1 /*
2 * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
3 * Copyright 2013, 2015 SAP AG. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "precompiled.hpp"
27 #include "asm/macroAssembler.inline.hpp"
28 #include "interpreter/interpreter.hpp"
29 #include "interpreter/interpreterRuntime.hpp"
30 #include "interpreter/interp_masm.hpp"
31 #include "interpreter/templateInterpreter.hpp"
32 #include "interpreter/templateTable.hpp"
33 #include "memory/universe.inline.hpp"
34 #include "oops/objArrayKlass.hpp"
35 #include "oops/oop.inline.hpp"
36 #include "prims/methodHandles.hpp"
37 #include "runtime/sharedRuntime.hpp"
38 #include "runtime/stubRoutines.hpp"
39 #include "runtime/synchronizer.hpp"
40 #include "utilities/macros.hpp"
41
42 #ifndef CC_INTERP
43
44 #undef __
45 #define __ _masm->
46
47 // ============================================================================
48 // Misc helpers
49
50 // Do an oop store like *(base + index) = val OR *(base + offset) = val
51 // (only one of both variants is possible at the same time).
52 // Index can be noreg.
53 // Kills:
54 // Rbase, Rtmp
55 static void do_oop_store(InterpreterMacroAssembler* _masm,
56 Register Rbase,
57 RegisterOrConstant offset,
58 Register Rval, // Noreg means always null.
59 Register Rtmp1,
60 Register Rtmp2,
61 Register Rtmp3,
62 BarrierSet::Name barrier,
63 bool precise,
64 bool check_null) {
65 assert_different_registers(Rtmp1, Rtmp2, Rtmp3, Rval, Rbase);
66
67 switch (barrier) {
68 #if INCLUDE_ALL_GCS
69 case BarrierSet::G1SATBCT:
70 case BarrierSet::G1SATBCTLogging:
71 {
72 // Load and record the previous value.
73 __ g1_write_barrier_pre(Rbase, offset,
74 Rtmp3, /* holder of pre_val ? */
75 Rtmp1, Rtmp2, false /* frame */);
76
77 Label Lnull, Ldone;
78 if (Rval != noreg) {
79 if (check_null) {
80 __ cmpdi(CCR0, Rval, 0);
81 __ beq(CCR0, Lnull);
82 }
83 __ store_heap_oop_not_null(Rval, offset, Rbase, /*Rval must stay uncompressed.*/ Rtmp1);
84 // Mark the card.
85 if (!(offset.is_constant() && offset.as_constant() == 0) && precise) {
86 __ add(Rbase, offset, Rbase);
87 }
88 __ g1_write_barrier_post(Rbase, Rval, Rtmp1, Rtmp2, Rtmp3, /*filtered (fast path)*/ &Ldone);
89 if (check_null) { __ b(Ldone); }
90 }
91
92 if (Rval == noreg || check_null) { // Store null oop.
93 Register Rnull = Rval;
94 __ bind(Lnull);
95 if (Rval == noreg) {
96 Rnull = Rtmp1;
97 __ li(Rnull, 0);
98 }
99 if (UseCompressedOops) {
100 __ stw(Rnull, offset, Rbase);
101 } else {
102 __ std(Rnull, offset, Rbase);
103 }
104 }
105 __ bind(Ldone);
106 }
107 break;
108 #endif // INCLUDE_ALL_GCS
109 case BarrierSet::CardTableModRef:
110 case BarrierSet::CardTableExtension:
111 {
112 Label Lnull, Ldone;
113 if (Rval != noreg) {
114 if (check_null) {
115 __ cmpdi(CCR0, Rval, 0);
116 __ beq(CCR0, Lnull);
117 }
118 __ store_heap_oop_not_null(Rval, offset, Rbase, /*Rval should better stay uncompressed.*/ Rtmp1);
119 // Mark the card.
120 if (!(offset.is_constant() && offset.as_constant() == 0) && precise) {
121 __ add(Rbase, offset, Rbase);
122 }
123 __ card_write_barrier_post(Rbase, Rval, Rtmp1);
124 if (check_null) {
125 __ b(Ldone);
126 }
127 }
128
129 if (Rval == noreg || check_null) { // Store null oop.
130 Register Rnull = Rval;
131 __ bind(Lnull);
132 if (Rval == noreg) {
133 Rnull = Rtmp1;
134 __ li(Rnull, 0);
135 }
136 if (UseCompressedOops) {
137 __ stw(Rnull, offset, Rbase);
138 } else {
139 __ std(Rnull, offset, Rbase);
140 }
141 }
142 __ bind(Ldone);
143 }
144 break;
145 case BarrierSet::ModRef:
146 ShouldNotReachHere();
147 break;
148 default:
149 ShouldNotReachHere();
150 }
151 }
152
153 // ============================================================================
154 // Platform-dependent initialization
155
156 void TemplateTable::pd_initialize() {
157 // No ppc64 specific initialization.
158 }
159
160 Address TemplateTable::at_bcp(int offset) {
161 // Not used on ppc.
162 ShouldNotReachHere();
163 return Address();
164 }
165
166 // Patches the current bytecode (ptr to it located in bcp)
167 // in the bytecode stream with a new one.
168 void TemplateTable::patch_bytecode(Bytecodes::Code new_bc, Register Rnew_bc, Register Rtemp, bool load_bc_into_bc_reg /*=true*/, int byte_no) {
169 // With sharing on, may need to test method flag.
170 if (!RewriteBytecodes) return;
171 Label L_patch_done;
172
173 switch (new_bc) {
174 case Bytecodes::_fast_aputfield:
175 case Bytecodes::_fast_bputfield:
176 case Bytecodes::_fast_cputfield:
177 case Bytecodes::_fast_dputfield:
178 case Bytecodes::_fast_fputfield:
179 case Bytecodes::_fast_iputfield:
180 case Bytecodes::_fast_lputfield:
181 case Bytecodes::_fast_sputfield:
182 {
183 // We skip bytecode quickening for putfield instructions when
184 // the put_code written to the constant pool cache is zero.
185 // This is required so that every execution of this instruction
186 // calls out to InterpreterRuntime::resolve_get_put to do
187 // additional, required work.
188 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
189 assert(load_bc_into_bc_reg, "we use bc_reg as temp");
190 __ get_cache_and_index_at_bcp(Rtemp /* dst = cache */, 1);
191 // ((*(cache+indices))>>((1+byte_no)*8))&0xFF:
192 #if defined(VM_LITTLE_ENDIAN)
193 __ lbz(Rnew_bc, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()) + 1 + byte_no, Rtemp);
194 #else
195 __ lbz(Rnew_bc, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()) + 7 - (1 + byte_no), Rtemp);
196 #endif
197 __ cmpwi(CCR0, Rnew_bc, 0);
198 __ li(Rnew_bc, (unsigned int)(unsigned char)new_bc);
199 __ beq(CCR0, L_patch_done);
200 // __ isync(); // acquire not needed
201 break;
202 }
203
204 default:
205 assert(byte_no == -1, "sanity");
206 if (load_bc_into_bc_reg) {
207 __ li(Rnew_bc, (unsigned int)(unsigned char)new_bc);
208 }
209 }
210
211 if (JvmtiExport::can_post_breakpoint()) {
212 Label L_fast_patch;
213 __ lbz(Rtemp, 0, R14_bcp);
214 __ cmpwi(CCR0, Rtemp, (unsigned int)(unsigned char)Bytecodes::_breakpoint);
215 __ bne(CCR0, L_fast_patch);
216 // Perform the quickening, slowly, in the bowels of the breakpoint table.
217 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), R19_method, R14_bcp, Rnew_bc);
218 __ b(L_patch_done);
219 __ bind(L_fast_patch);
220 }
221
222 // Patch bytecode.
223 __ stb(Rnew_bc, 0, R14_bcp);
224
225 __ bind(L_patch_done);
226 }
227
228 // ============================================================================
229 // Individual instructions
230
231 void TemplateTable::nop() {
232 transition(vtos, vtos);
233 // Nothing to do.
234 }
235
236 void TemplateTable::shouldnotreachhere() {
237 transition(vtos, vtos);
238 __ stop("shouldnotreachhere bytecode");
239 }
240
241 void TemplateTable::aconst_null() {
242 transition(vtos, atos);
243 __ li(R17_tos, 0);
244 }
245
246 void TemplateTable::iconst(int value) {
247 transition(vtos, itos);
248 assert(value >= -1 && value <= 5, "");
249 __ li(R17_tos, value);
250 }
251
252 void TemplateTable::lconst(int value) {
253 transition(vtos, ltos);
254 assert(value >= -1 && value <= 5, "");
255 __ li(R17_tos, value);
256 }
257
258 void TemplateTable::fconst(int value) {
259 transition(vtos, ftos);
260 static float zero = 0.0;
261 static float one = 1.0;
262 static float two = 2.0;
263 switch (value) {
264 default: ShouldNotReachHere();
265 case 0: {
266 int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&zero, R0, true);
267 __ lfs(F15_ftos, simm16_offset, R11_scratch1);
268 break;
269 }
270 case 1: {
271 int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&one, R0, true);
272 __ lfs(F15_ftos, simm16_offset, R11_scratch1);
273 break;
274 }
275 case 2: {
276 int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&two, R0, true);
277 __ lfs(F15_ftos, simm16_offset, R11_scratch1);
278 break;
279 }
280 }
281 }
282
283 void TemplateTable::dconst(int value) {
284 transition(vtos, dtos);
285 static double zero = 0.0;
286 static double one = 1.0;
287 switch (value) {
288 case 0: {
289 int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&zero, R0, true);
290 __ lfd(F15_ftos, simm16_offset, R11_scratch1);
291 break;
292 }
293 case 1: {
294 int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&one, R0, true);
295 __ lfd(F15_ftos, simm16_offset, R11_scratch1);
296 break;
297 }
298 default: ShouldNotReachHere();
299 }
300 }
301
302 void TemplateTable::bipush() {
303 transition(vtos, itos);
304 __ lbz(R17_tos, 1, R14_bcp);
305 __ extsb(R17_tos, R17_tos);
306 }
307
308 void TemplateTable::sipush() {
309 transition(vtos, itos);
310 __ get_2_byte_integer_at_bcp(1, R17_tos, InterpreterMacroAssembler::Signed);
311 }
312
313 void TemplateTable::ldc(bool wide) {
314 Register Rscratch1 = R11_scratch1,
315 Rscratch2 = R12_scratch2,
316 Rcpool = R3_ARG1;
317
318 transition(vtos, vtos);
319 Label notInt, notClass, exit;
320
321 __ get_cpool_and_tags(Rcpool, Rscratch2); // Set Rscratch2 = &tags.
322 if (wide) { // Read index.
323 __ get_2_byte_integer_at_bcp(1, Rscratch1, InterpreterMacroAssembler::Unsigned);
324 } else {
325 __ lbz(Rscratch1, 1, R14_bcp);
326 }
327
328 const int base_offset = ConstantPool::header_size() * wordSize;
329 const int tags_offset = Array<u1>::base_offset_in_bytes();
330
331 // Get type from tags.
332 __ addi(Rscratch2, Rscratch2, tags_offset);
333 __ lbzx(Rscratch2, Rscratch2, Rscratch1);
334
335 __ cmpwi(CCR0, Rscratch2, JVM_CONSTANT_UnresolvedClass); // Unresolved class?
336 __ cmpwi(CCR1, Rscratch2, JVM_CONSTANT_UnresolvedClassInError); // Unresolved class in error state?
337 __ cror(CCR0, Assembler::equal, CCR1, Assembler::equal);
338
339 // Resolved class - need to call vm to get java mirror of the class.
340 __ cmpwi(CCR1, Rscratch2, JVM_CONSTANT_Class);
341 __ crnor(CCR0, Assembler::equal, CCR1, Assembler::equal); // Neither resolved class nor unresolved case from above?
342 __ beq(CCR0, notClass);
343
344 __ li(R4, wide ? 1 : 0);
345 call_VM(R17_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::ldc), R4);
346 __ push(atos);
347 __ b(exit);
348
349 __ align(32, 12);
350 __ bind(notClass);
351 __ addi(Rcpool, Rcpool, base_offset);
352 __ sldi(Rscratch1, Rscratch1, LogBytesPerWord);
353 __ cmpdi(CCR0, Rscratch2, JVM_CONSTANT_Integer);
354 __ bne(CCR0, notInt);
355 __ lwax(R17_tos, Rcpool, Rscratch1);
356 __ push(itos);
357 __ b(exit);
358
359 __ align(32, 12);
360 __ bind(notInt);
361 #ifdef ASSERT
362 // String and Object are rewritten to fast_aldc
363 __ cmpdi(CCR0, Rscratch2, JVM_CONSTANT_Float);
364 __ asm_assert_eq("unexpected type", 0x8765);
365 #endif
366 __ lfsx(F15_ftos, Rcpool, Rscratch1);
367 __ push(ftos);
368
369 __ align(32, 12);
370 __ bind(exit);
371 }
372
373 // Fast path for caching oop constants.
374 void TemplateTable::fast_aldc(bool wide) {
375 transition(vtos, atos);
376
377 int index_size = wide ? sizeof(u2) : sizeof(u1);
378 const Register Rscratch = R11_scratch1;
379 Label resolved;
380
381 // We are resolved if the resolved reference cache entry contains a
382 // non-null object (CallSite, etc.)
383 __ get_cache_index_at_bcp(Rscratch, 1, index_size); // Load index.
384 __ load_resolved_reference_at_index(R17_tos, Rscratch);
385 __ cmpdi(CCR0, R17_tos, 0);
386 __ bne(CCR0, resolved);
387 __ load_const_optimized(R3_ARG1, (int)bytecode());
388
389 address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc);
390
391 // First time invocation - must resolve first.
392 __ call_VM(R17_tos, entry, R3_ARG1);
393
394 __ align(32, 12);
395 __ bind(resolved);
396 __ verify_oop(R17_tos);
397 }
398
399 void TemplateTable::ldc2_w() {
400 transition(vtos, vtos);
401 Label Llong, Lexit;
402
403 Register Rindex = R11_scratch1,
404 Rcpool = R12_scratch2,
405 Rtag = R3_ARG1;
406 __ get_cpool_and_tags(Rcpool, Rtag);
407 __ get_2_byte_integer_at_bcp(1, Rindex, InterpreterMacroAssembler::Unsigned);
408
409 const int base_offset = ConstantPool::header_size() * wordSize;
410 const int tags_offset = Array<u1>::base_offset_in_bytes();
411 // Get type from tags.
412 __ addi(Rcpool, Rcpool, base_offset);
413 __ addi(Rtag, Rtag, tags_offset);
414
415 __ lbzx(Rtag, Rtag, Rindex);
416
417 __ sldi(Rindex, Rindex, LogBytesPerWord);
418 __ cmpdi(CCR0, Rtag, JVM_CONSTANT_Double);
419 __ bne(CCR0, Llong);
420 // A double can be placed at word-aligned locations in the constant pool.
421 // Check out Conversions.java for an example.
422 // Also ConstantPool::header_size() is 20, which makes it very difficult
423 // to double-align double on the constant pool. SG, 11/7/97
424 __ lfdx(F15_ftos, Rcpool, Rindex);
425 __ push(dtos);
426 __ b(Lexit);
427
428 __ bind(Llong);
429 __ ldx(R17_tos, Rcpool, Rindex);
430 __ push(ltos);
431
432 __ bind(Lexit);
433 }
434
435 // Get the locals index located in the bytecode stream at bcp + offset.
436 void TemplateTable::locals_index(Register Rdst, int offset) {
437 __ lbz(Rdst, offset, R14_bcp);
438 }
439
440 void TemplateTable::iload() {
441 transition(vtos, itos);
442
443 // Get the local value into tos
444 const Register Rindex = R22_tmp2;
445 locals_index(Rindex);
446
447 // Rewrite iload,iload pair into fast_iload2
448 // iload,caload pair into fast_icaload
449 if (RewriteFrequentPairs) {
450 Label Lrewrite, Ldone;
451 Register Rnext_byte = R3_ARG1,
452 Rrewrite_to = R6_ARG4,
453 Rscratch = R11_scratch1;
454
455 // get next byte
456 __ lbz(Rnext_byte, Bytecodes::length_for(Bytecodes::_iload), R14_bcp);
457
458 // if _iload, wait to rewrite to iload2. We only want to rewrite the
459 // last two iloads in a pair. Comparing against fast_iload means that
460 // the next bytecode is neither an iload or a caload, and therefore
461 // an iload pair.
462 __ cmpwi(CCR0, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_iload);
463 __ beq(CCR0, Ldone);
464
465 __ cmpwi(CCR1, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_fast_iload);
466 __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_iload2);
467 __ beq(CCR1, Lrewrite);
468
469 __ cmpwi(CCR0, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_caload);
470 __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_icaload);
471 __ beq(CCR0, Lrewrite);
472
473 __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_iload);
474
475 __ bind(Lrewrite);
476 patch_bytecode(Bytecodes::_iload, Rrewrite_to, Rscratch, false);
477 __ bind(Ldone);
478 }
479
480 __ load_local_int(R17_tos, Rindex, Rindex);
481 }
482
483 // Load 2 integers in a row without dispatching
484 void TemplateTable::fast_iload2() {
485 transition(vtos, itos);
486
487 __ lbz(R3_ARG1, 1, R14_bcp);
488 __ lbz(R17_tos, Bytecodes::length_for(Bytecodes::_iload) + 1, R14_bcp);
489
490 __ load_local_int(R3_ARG1, R11_scratch1, R3_ARG1);
491 __ load_local_int(R17_tos, R12_scratch2, R17_tos);
492 __ push_i(R3_ARG1);
493 }
494
495 void TemplateTable::fast_iload() {
496 transition(vtos, itos);
497 // Get the local value into tos
498
499 const Register Rindex = R11_scratch1;
500 locals_index(Rindex);
501 __ load_local_int(R17_tos, Rindex, Rindex);
502 }
503
504 // Load a local variable type long from locals area to TOS cache register.
505 // Local index resides in bytecodestream.
506 void TemplateTable::lload() {
507 transition(vtos, ltos);
508
509 const Register Rindex = R11_scratch1;
510 locals_index(Rindex);
511 __ load_local_long(R17_tos, Rindex, Rindex);
512 }
513
514 void TemplateTable::fload() {
515 transition(vtos, ftos);
516
517 const Register Rindex = R11_scratch1;
518 locals_index(Rindex);
519 __ load_local_float(F15_ftos, Rindex, Rindex);
520 }
521
522 void TemplateTable::dload() {
523 transition(vtos, dtos);
524
525 const Register Rindex = R11_scratch1;
526 locals_index(Rindex);
527 __ load_local_double(F15_ftos, Rindex, Rindex);
528 }
529
530 void TemplateTable::aload() {
531 transition(vtos, atos);
532
533 const Register Rindex = R11_scratch1;
534 locals_index(Rindex);
535 __ load_local_ptr(R17_tos, Rindex, Rindex);
536 }
537
538 void TemplateTable::locals_index_wide(Register Rdst) {
539 // Offset is 2, not 1, because Lbcp points to wide prefix code.
540 __ get_2_byte_integer_at_bcp(2, Rdst, InterpreterMacroAssembler::Unsigned);
541 }
542
543 void TemplateTable::wide_iload() {
544 // Get the local value into tos.
545
546 const Register Rindex = R11_scratch1;
547 locals_index_wide(Rindex);
548 __ load_local_int(R17_tos, Rindex, Rindex);
549 }
550
551 void TemplateTable::wide_lload() {
552 transition(vtos, ltos);
553
554 const Register Rindex = R11_scratch1;
555 locals_index_wide(Rindex);
556 __ load_local_long(R17_tos, Rindex, Rindex);
557 }
558
559 void TemplateTable::wide_fload() {
560 transition(vtos, ftos);
561
562 const Register Rindex = R11_scratch1;
563 locals_index_wide(Rindex);
564 __ load_local_float(F15_ftos, Rindex, Rindex);
565 }
566
567 void TemplateTable::wide_dload() {
568 transition(vtos, dtos);
569
570 const Register Rindex = R11_scratch1;
571 locals_index_wide(Rindex);
572 __ load_local_double(F15_ftos, Rindex, Rindex);
573 }
574
575 void TemplateTable::wide_aload() {
576 transition(vtos, atos);
577
578 const Register Rindex = R11_scratch1;
579 locals_index_wide(Rindex);
580 __ load_local_ptr(R17_tos, Rindex, Rindex);
581 }
582
583 void TemplateTable::iaload() {
584 transition(itos, itos);
585
586 const Register Rload_addr = R3_ARG1,
587 Rarray = R4_ARG2,
588 Rtemp = R5_ARG3;
589 __ index_check(Rarray, R17_tos /* index */, LogBytesPerInt, Rtemp, Rload_addr);
590 __ lwa(R17_tos, arrayOopDesc::base_offset_in_bytes(T_INT), Rload_addr);
591 }
592
593 void TemplateTable::laload() {
594 transition(itos, ltos);
595
596 const Register Rload_addr = R3_ARG1,
597 Rarray = R4_ARG2,
598 Rtemp = R5_ARG3;
599 __ index_check(Rarray, R17_tos /* index */, LogBytesPerLong, Rtemp, Rload_addr);
600 __ ld(R17_tos, arrayOopDesc::base_offset_in_bytes(T_LONG), Rload_addr);
601 }
602
603 void TemplateTable::faload() {
604 transition(itos, ftos);
605
606 const Register Rload_addr = R3_ARG1,
607 Rarray = R4_ARG2,
608 Rtemp = R5_ARG3;
609 __ index_check(Rarray, R17_tos /* index */, LogBytesPerInt, Rtemp, Rload_addr);
610 __ lfs(F15_ftos, arrayOopDesc::base_offset_in_bytes(T_FLOAT), Rload_addr);
611 }
612
613 void TemplateTable::daload() {
614 transition(itos, dtos);
615
616 const Register Rload_addr = R3_ARG1,
617 Rarray = R4_ARG2,
618 Rtemp = R5_ARG3;
619 __ index_check(Rarray, R17_tos /* index */, LogBytesPerLong, Rtemp, Rload_addr);
620 __ lfd(F15_ftos, arrayOopDesc::base_offset_in_bytes(T_DOUBLE), Rload_addr);
621 }
622
623 void TemplateTable::aaload() {
624 transition(itos, atos);
625
626 // tos: index
627 // result tos: array
628 const Register Rload_addr = R3_ARG1,
629 Rarray = R4_ARG2,
630 Rtemp = R5_ARG3;
631 __ index_check(Rarray, R17_tos /* index */, UseCompressedOops ? 2 : LogBytesPerWord, Rtemp, Rload_addr);
632 __ load_heap_oop(R17_tos, arrayOopDesc::base_offset_in_bytes(T_OBJECT), Rload_addr);
633 __ verify_oop(R17_tos);
634 //__ dcbt(R17_tos); // prefetch
635 }
636
637 void TemplateTable::baload() {
638 transition(itos, itos);
639
640 const Register Rload_addr = R3_ARG1,
641 Rarray = R4_ARG2,
642 Rtemp = R5_ARG3;
643 __ index_check(Rarray, R17_tos /* index */, 0, Rtemp, Rload_addr);
644 __ lbz(R17_tos, arrayOopDesc::base_offset_in_bytes(T_BYTE), Rload_addr);
645 __ extsb(R17_tos, R17_tos);
646 }
647
648 void TemplateTable::caload() {
649 transition(itos, itos);
650
651 const Register Rload_addr = R3_ARG1,
652 Rarray = R4_ARG2,
653 Rtemp = R5_ARG3;
654 __ index_check(Rarray, R17_tos /* index */, LogBytesPerShort, Rtemp, Rload_addr);
655 __ lhz(R17_tos, arrayOopDesc::base_offset_in_bytes(T_CHAR), Rload_addr);
656 }
657
658 // Iload followed by caload frequent pair.
659 void TemplateTable::fast_icaload() {
660 transition(vtos, itos);
661
662 const Register Rload_addr = R3_ARG1,
663 Rarray = R4_ARG2,
664 Rtemp = R11_scratch1;
665
666 locals_index(R17_tos);
667 __ load_local_int(R17_tos, Rtemp, R17_tos);
668 __ index_check(Rarray, R17_tos /* index */, LogBytesPerShort, Rtemp, Rload_addr);
669 __ lhz(R17_tos, arrayOopDesc::base_offset_in_bytes(T_CHAR), Rload_addr);
670 }
671
672 void TemplateTable::saload() {
673 transition(itos, itos);
674
675 const Register Rload_addr = R11_scratch1,
676 Rarray = R12_scratch2,
677 Rtemp = R3_ARG1;
678 __ index_check(Rarray, R17_tos /* index */, LogBytesPerShort, Rtemp, Rload_addr);
679 __ lha(R17_tos, arrayOopDesc::base_offset_in_bytes(T_SHORT), Rload_addr);
680 }
681
682 void TemplateTable::iload(int n) {
683 transition(vtos, itos);
684
685 __ lwz(R17_tos, Interpreter::local_offset_in_bytes(n), R18_locals);
686 }
687
688 void TemplateTable::lload(int n) {
689 transition(vtos, ltos);
690
691 __ ld(R17_tos, Interpreter::local_offset_in_bytes(n + 1), R18_locals);
692 }
693
694 void TemplateTable::fload(int n) {
695 transition(vtos, ftos);
696
697 __ lfs(F15_ftos, Interpreter::local_offset_in_bytes(n), R18_locals);
698 }
699
700 void TemplateTable::dload(int n) {
701 transition(vtos, dtos);
702
703 __ lfd(F15_ftos, Interpreter::local_offset_in_bytes(n + 1), R18_locals);
704 }
705
706 void TemplateTable::aload(int n) {
707 transition(vtos, atos);
708
709 __ ld(R17_tos, Interpreter::local_offset_in_bytes(n), R18_locals);
710 }
711
712 void TemplateTable::aload_0() {
713 transition(vtos, atos);
714 // According to bytecode histograms, the pairs:
715 //
716 // _aload_0, _fast_igetfield
717 // _aload_0, _fast_agetfield
718 // _aload_0, _fast_fgetfield
719 //
720 // occur frequently. If RewriteFrequentPairs is set, the (slow)
721 // _aload_0 bytecode checks if the next bytecode is either
722 // _fast_igetfield, _fast_agetfield or _fast_fgetfield and then
723 // rewrites the current bytecode into a pair bytecode; otherwise it
724 // rewrites the current bytecode into _0 that doesn't do
725 // the pair check anymore.
726 //
727 // Note: If the next bytecode is _getfield, the rewrite must be
728 // delayed, otherwise we may miss an opportunity for a pair.
729 //
730 // Also rewrite frequent pairs
731 // aload_0, aload_1
732 // aload_0, iload_1
733 // These bytecodes with a small amount of code are most profitable
734 // to rewrite.
735
736 if (RewriteFrequentPairs) {
737
738 Label Lrewrite, Ldont_rewrite;
739 Register Rnext_byte = R3_ARG1,
740 Rrewrite_to = R6_ARG4,
741 Rscratch = R11_scratch1;
742
743 // Get next byte.
744 __ lbz(Rnext_byte, Bytecodes::length_for(Bytecodes::_aload_0), R14_bcp);
745
746 // If _getfield, wait to rewrite. We only want to rewrite the last two bytecodes in a pair.
747 __ cmpwi(CCR0, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_getfield);
748 __ beq(CCR0, Ldont_rewrite);
749
750 __ cmpwi(CCR1, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_fast_igetfield);
751 __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_iaccess_0);
752 __ beq(CCR1, Lrewrite);
753
754 __ cmpwi(CCR0, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_fast_agetfield);
755 __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_aaccess_0);
756 __ beq(CCR0, Lrewrite);
757
758 __ cmpwi(CCR1, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_fast_fgetfield);
759 __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_faccess_0);
760 __ beq(CCR1, Lrewrite);
761
762 __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_aload_0);
763
764 __ bind(Lrewrite);
765 patch_bytecode(Bytecodes::_aload_0, Rrewrite_to, Rscratch, false);
766 __ bind(Ldont_rewrite);
767 }
768
769 // Do actual aload_0 (must do this after patch_bytecode which might call VM and GC might change oop).
770 aload(0);
771 }
772
773 void TemplateTable::istore() {
774 transition(itos, vtos);
775
776 const Register Rindex = R11_scratch1;
777 locals_index(Rindex);
778 __ store_local_int(R17_tos, Rindex);
779 }
780
781 void TemplateTable::lstore() {
782 transition(ltos, vtos);
783 const Register Rindex = R11_scratch1;
784 locals_index(Rindex);
785 __ store_local_long(R17_tos, Rindex);
786 }
787
788 void TemplateTable::fstore() {
789 transition(ftos, vtos);
790
791 const Register Rindex = R11_scratch1;
792 locals_index(Rindex);
793 __ store_local_float(F15_ftos, Rindex);
794 }
795
796 void TemplateTable::dstore() {
797 transition(dtos, vtos);
798
799 const Register Rindex = R11_scratch1;
800 locals_index(Rindex);
801 __ store_local_double(F15_ftos, Rindex);
802 }
803
804 void TemplateTable::astore() {
805 transition(vtos, vtos);
806
807 const Register Rindex = R11_scratch1;
808 __ pop_ptr();
809 __ verify_oop_or_return_address(R17_tos, Rindex);
810 locals_index(Rindex);
811 __ store_local_ptr(R17_tos, Rindex);
812 }
813
814 void TemplateTable::wide_istore() {
815 transition(vtos, vtos);
816
817 const Register Rindex = R11_scratch1;
818 __ pop_i();
819 locals_index_wide(Rindex);
820 __ store_local_int(R17_tos, Rindex);
821 }
822
823 void TemplateTable::wide_lstore() {
824 transition(vtos, vtos);
825
826 const Register Rindex = R11_scratch1;
827 __ pop_l();
828 locals_index_wide(Rindex);
829 __ store_local_long(R17_tos, Rindex);
830 }
831
832 void TemplateTable::wide_fstore() {
833 transition(vtos, vtos);
834
835 const Register Rindex = R11_scratch1;
836 __ pop_f();
837 locals_index_wide(Rindex);
838 __ store_local_float(F15_ftos, Rindex);
839 }
840
841 void TemplateTable::wide_dstore() {
842 transition(vtos, vtos);
843
844 const Register Rindex = R11_scratch1;
845 __ pop_d();
846 locals_index_wide(Rindex);
847 __ store_local_double(F15_ftos, Rindex);
848 }
849
850 void TemplateTable::wide_astore() {
851 transition(vtos, vtos);
852
853 const Register Rindex = R11_scratch1;
854 __ pop_ptr();
855 __ verify_oop_or_return_address(R17_tos, Rindex);
856 locals_index_wide(Rindex);
857 __ store_local_ptr(R17_tos, Rindex);
858 }
859
860 void TemplateTable::iastore() {
861 transition(itos, vtos);
862
863 const Register Rindex = R3_ARG1,
864 Rstore_addr = R4_ARG2,
865 Rarray = R5_ARG3,
866 Rtemp = R6_ARG4;
867 __ pop_i(Rindex);
868 __ index_check(Rarray, Rindex, LogBytesPerInt, Rtemp, Rstore_addr);
869 __ stw(R17_tos, arrayOopDesc::base_offset_in_bytes(T_INT), Rstore_addr);
870 }
871
872 void TemplateTable::lastore() {
873 transition(ltos, vtos);
874
875 const Register Rindex = R3_ARG1,
876 Rstore_addr = R4_ARG2,
877 Rarray = R5_ARG3,
878 Rtemp = R6_ARG4;
879 __ pop_i(Rindex);
880 __ index_check(Rarray, Rindex, LogBytesPerLong, Rtemp, Rstore_addr);
881 __ std(R17_tos, arrayOopDesc::base_offset_in_bytes(T_LONG), Rstore_addr);
882 }
883
884 void TemplateTable::fastore() {
885 transition(ftos, vtos);
886
887 const Register Rindex = R3_ARG1,
888 Rstore_addr = R4_ARG2,
889 Rarray = R5_ARG3,
890 Rtemp = R6_ARG4;
891 __ pop_i(Rindex);
892 __ index_check(Rarray, Rindex, LogBytesPerInt, Rtemp, Rstore_addr);
893 __ stfs(F15_ftos, arrayOopDesc::base_offset_in_bytes(T_FLOAT), Rstore_addr);
894 }
895
896 void TemplateTable::dastore() {
897 transition(dtos, vtos);
898
899 const Register Rindex = R3_ARG1,
900 Rstore_addr = R4_ARG2,
901 Rarray = R5_ARG3,
902 Rtemp = R6_ARG4;
903 __ pop_i(Rindex);
904 __ index_check(Rarray, Rindex, LogBytesPerLong, Rtemp, Rstore_addr);
905 __ stfd(F15_ftos, arrayOopDesc::base_offset_in_bytes(T_DOUBLE), Rstore_addr);
906 }
907
908 // Pop 3 values from the stack and...
909 void TemplateTable::aastore() {
910 transition(vtos, vtos);
911
912 Label Lstore_ok, Lis_null, Ldone;
913 const Register Rindex = R3_ARG1,
914 Rarray = R4_ARG2,
915 Rscratch = R11_scratch1,
916 Rscratch2 = R12_scratch2,
917 Rarray_klass = R5_ARG3,
918 Rarray_element_klass = Rarray_klass,
919 Rvalue_klass = R6_ARG4,
920 Rstore_addr = R31; // Use register which survives VM call.
921
922 __ ld(R17_tos, Interpreter::expr_offset_in_bytes(0), R15_esp); // Get value to store.
923 __ lwz(Rindex, Interpreter::expr_offset_in_bytes(1), R15_esp); // Get index.
924 __ ld(Rarray, Interpreter::expr_offset_in_bytes(2), R15_esp); // Get array.
925
926 __ verify_oop(R17_tos);
927 __ index_check_without_pop(Rarray, Rindex, UseCompressedOops ? 2 : LogBytesPerWord, Rscratch, Rstore_addr);
928 // Rindex is dead!
929 Register Rscratch3 = Rindex;
930
931 // Do array store check - check for NULL value first.
932 __ cmpdi(CCR0, R17_tos, 0);
933 __ beq(CCR0, Lis_null);
934
935 __ load_klass(Rarray_klass, Rarray);
936 __ load_klass(Rvalue_klass, R17_tos);
937
938 // Do fast instanceof cache test.
939 __ ld(Rarray_element_klass, in_bytes(ObjArrayKlass::element_klass_offset()), Rarray_klass);
940
941 // Generate a fast subtype check. Branch to store_ok if no failure. Throw if failure.
942 __ gen_subtype_check(Rvalue_klass /*subklass*/, Rarray_element_klass /*superklass*/, Rscratch, Rscratch2, Rscratch3, Lstore_ok);
943
944 // Fell through: subtype check failed => throw an exception.
945 __ load_dispatch_table(R11_scratch1, (address*)Interpreter::_throw_ArrayStoreException_entry);
946 __ mtctr(R11_scratch1);
947 __ bctr();
948
949 __ bind(Lis_null);
950 do_oop_store(_masm, Rstore_addr, arrayOopDesc::base_offset_in_bytes(T_OBJECT), noreg /* 0 */,
951 Rscratch, Rscratch2, Rscratch3, _bs->kind(), true /* precise */, false /* check_null */);
952 __ profile_null_seen(Rscratch, Rscratch2);
953 __ b(Ldone);
954
955 // Store is OK.
956 __ bind(Lstore_ok);
957 do_oop_store(_masm, Rstore_addr, arrayOopDesc::base_offset_in_bytes(T_OBJECT), R17_tos /* value */,
958 Rscratch, Rscratch2, Rscratch3, _bs->kind(), true /* precise */, false /* check_null */);
959
960 __ bind(Ldone);
961 // Adjust sp (pops array, index and value).
962 __ addi(R15_esp, R15_esp, 3 * Interpreter::stackElementSize);
963 }
964
965 void TemplateTable::bastore() {
966 transition(itos, vtos);
967
968 const Register Rindex = R11_scratch1,
969 Rarray = R12_scratch2,
970 Rscratch = R3_ARG1;
971 __ pop_i(Rindex);
972 // tos: val
973 // Rarray: array ptr (popped by index_check)
974 __ index_check(Rarray, Rindex, 0, Rscratch, Rarray);
975 __ stb(R17_tos, arrayOopDesc::base_offset_in_bytes(T_BYTE), Rarray);
976 }
977
978 void TemplateTable::castore() {
979 transition(itos, vtos);
980
981 const Register Rindex = R11_scratch1,
982 Rarray = R12_scratch2,
983 Rscratch = R3_ARG1;
984 __ pop_i(Rindex);
985 // tos: val
986 // Rarray: array ptr (popped by index_check)
987 __ index_check(Rarray, Rindex, LogBytesPerShort, Rscratch, Rarray);
988 __ sth(R17_tos, arrayOopDesc::base_offset_in_bytes(T_CHAR), Rarray);
989 }
990
991 void TemplateTable::sastore() {
992 castore();
993 }
994
995 void TemplateTable::istore(int n) {
996 transition(itos, vtos);
997 __ stw(R17_tos, Interpreter::local_offset_in_bytes(n), R18_locals);
998 }
999
1000 void TemplateTable::lstore(int n) {
1001 transition(ltos, vtos);
1002 __ std(R17_tos, Interpreter::local_offset_in_bytes(n + 1), R18_locals);
1003 }
1004
1005 void TemplateTable::fstore(int n) {
1006 transition(ftos, vtos);
1007 __ stfs(F15_ftos, Interpreter::local_offset_in_bytes(n), R18_locals);
1008 }
1009
1010 void TemplateTable::dstore(int n) {
1011 transition(dtos, vtos);
1012 __ stfd(F15_ftos, Interpreter::local_offset_in_bytes(n + 1), R18_locals);
1013 }
1014
1015 void TemplateTable::astore(int n) {
1016 transition(vtos, vtos);
1017
1018 __ pop_ptr();
1019 __ verify_oop_or_return_address(R17_tos, R11_scratch1);
1020 __ std(R17_tos, Interpreter::local_offset_in_bytes(n), R18_locals);
1021 }
1022
1023 void TemplateTable::pop() {
1024 transition(vtos, vtos);
1025
1026 __ addi(R15_esp, R15_esp, Interpreter::stackElementSize);
1027 }
1028
1029 void TemplateTable::pop2() {
1030 transition(vtos, vtos);
1031
1032 __ addi(R15_esp, R15_esp, Interpreter::stackElementSize * 2);
1033 }
1034
1035 void TemplateTable::dup() {
1036 transition(vtos, vtos);
1037
1038 __ ld(R11_scratch1, Interpreter::stackElementSize, R15_esp);
1039 __ push_ptr(R11_scratch1);
1040 }
1041
1042 void TemplateTable::dup_x1() {
1043 transition(vtos, vtos);
1044
1045 Register Ra = R11_scratch1,
1046 Rb = R12_scratch2;
1047 // stack: ..., a, b
1048 __ ld(Rb, Interpreter::stackElementSize, R15_esp);
1049 __ ld(Ra, Interpreter::stackElementSize * 2, R15_esp);
1050 __ std(Rb, Interpreter::stackElementSize * 2, R15_esp);
1051 __ std(Ra, Interpreter::stackElementSize, R15_esp);
1052 __ push_ptr(Rb);
1053 // stack: ..., b, a, b
1054 }
1055
1056 void TemplateTable::dup_x2() {
1057 transition(vtos, vtos);
1058
1059 Register Ra = R11_scratch1,
1060 Rb = R12_scratch2,
1061 Rc = R3_ARG1;
1062
1063 // stack: ..., a, b, c
1064 __ ld(Rc, Interpreter::stackElementSize, R15_esp); // load c
1065 __ ld(Ra, Interpreter::stackElementSize * 3, R15_esp); // load a
1066 __ std(Rc, Interpreter::stackElementSize * 3, R15_esp); // store c in a
1067 __ ld(Rb, Interpreter::stackElementSize * 2, R15_esp); // load b
1068 // stack: ..., c, b, c
1069 __ std(Ra, Interpreter::stackElementSize * 2, R15_esp); // store a in b
1070 // stack: ..., c, a, c
1071 __ std(Rb, Interpreter::stackElementSize, R15_esp); // store b in c
1072 __ push_ptr(Rc); // push c
1073 // stack: ..., c, a, b, c
1074 }
1075
1076 void TemplateTable::dup2() {
1077 transition(vtos, vtos);
1078
1079 Register Ra = R11_scratch1,
1080 Rb = R12_scratch2;
1081 // stack: ..., a, b
1082 __ ld(Rb, Interpreter::stackElementSize, R15_esp);
1083 __ ld(Ra, Interpreter::stackElementSize * 2, R15_esp);
1084 __ push_2ptrs(Ra, Rb);
1085 // stack: ..., a, b, a, b
1086 }
1087
1088 void TemplateTable::dup2_x1() {
1089 transition(vtos, vtos);
1090
1091 Register Ra = R11_scratch1,
1092 Rb = R12_scratch2,
1093 Rc = R3_ARG1;
1094 // stack: ..., a, b, c
1095 __ ld(Rc, Interpreter::stackElementSize, R15_esp);
1096 __ ld(Rb, Interpreter::stackElementSize * 2, R15_esp);
1097 __ std(Rc, Interpreter::stackElementSize * 2, R15_esp);
1098 __ ld(Ra, Interpreter::stackElementSize * 3, R15_esp);
1099 __ std(Ra, Interpreter::stackElementSize, R15_esp);
1100 __ std(Rb, Interpreter::stackElementSize * 3, R15_esp);
1101 // stack: ..., b, c, a
1102 __ push_2ptrs(Rb, Rc);
1103 // stack: ..., b, c, a, b, c
1104 }
1105
1106 void TemplateTable::dup2_x2() {
1107 transition(vtos, vtos);
1108
1109 Register Ra = R11_scratch1,
1110 Rb = R12_scratch2,
1111 Rc = R3_ARG1,
1112 Rd = R4_ARG2;
1113 // stack: ..., a, b, c, d
1114 __ ld(Rb, Interpreter::stackElementSize * 3, R15_esp);
1115 __ ld(Rd, Interpreter::stackElementSize, R15_esp);
1116 __ std(Rb, Interpreter::stackElementSize, R15_esp); // store b in d
1117 __ std(Rd, Interpreter::stackElementSize * 3, R15_esp); // store d in b
1118 __ ld(Ra, Interpreter::stackElementSize * 4, R15_esp);
1119 __ ld(Rc, Interpreter::stackElementSize * 2, R15_esp);
1120 __ std(Ra, Interpreter::stackElementSize * 2, R15_esp); // store a in c
1121 __ std(Rc, Interpreter::stackElementSize * 4, R15_esp); // store c in a
1122 // stack: ..., c, d, a, b
1123 __ push_2ptrs(Rc, Rd);
1124 // stack: ..., c, d, a, b, c, d
1125 }
1126
1127 void TemplateTable::swap() {
1128 transition(vtos, vtos);
1129 // stack: ..., a, b
1130
1131 Register Ra = R11_scratch1,
1132 Rb = R12_scratch2;
1133 // stack: ..., a, b
1134 __ ld(Rb, Interpreter::stackElementSize, R15_esp);
1135 __ ld(Ra, Interpreter::stackElementSize * 2, R15_esp);
1136 __ std(Rb, Interpreter::stackElementSize * 2, R15_esp);
1137 __ std(Ra, Interpreter::stackElementSize, R15_esp);
1138 // stack: ..., b, a
1139 }
1140
1141 void TemplateTable::iop2(Operation op) {
1142 transition(itos, itos);
1143
1144 Register Rscratch = R11_scratch1;
1145
1146 __ pop_i(Rscratch);
1147 // tos = number of bits to shift
1148 // Rscratch = value to shift
1149 switch (op) {
1150 case add: __ add(R17_tos, Rscratch, R17_tos); break;
1151 case sub: __ sub(R17_tos, Rscratch, R17_tos); break;
1152 case mul: __ mullw(R17_tos, Rscratch, R17_tos); break;
1153 case _and: __ andr(R17_tos, Rscratch, R17_tos); break;
1154 case _or: __ orr(R17_tos, Rscratch, R17_tos); break;
1155 case _xor: __ xorr(R17_tos, Rscratch, R17_tos); break;
1156 case shl: __ rldicl(R17_tos, R17_tos, 0, 64-5); __ slw(R17_tos, Rscratch, R17_tos); break;
1157 case shr: __ rldicl(R17_tos, R17_tos, 0, 64-5); __ sraw(R17_tos, Rscratch, R17_tos); break;
1158 case ushr: __ rldicl(R17_tos, R17_tos, 0, 64-5); __ srw(R17_tos, Rscratch, R17_tos); break;
1159 default: ShouldNotReachHere();
1160 }
1161 }
1162
1163 void TemplateTable::lop2(Operation op) {
1164 transition(ltos, ltos);
1165
1166 Register Rscratch = R11_scratch1;
1167 __ pop_l(Rscratch);
1168 switch (op) {
1169 case add: __ add(R17_tos, Rscratch, R17_tos); break;
1170 case sub: __ sub(R17_tos, Rscratch, R17_tos); break;
1171 case _and: __ andr(R17_tos, Rscratch, R17_tos); break;
1172 case _or: __ orr(R17_tos, Rscratch, R17_tos); break;
1173 case _xor: __ xorr(R17_tos, Rscratch, R17_tos); break;
1174 default: ShouldNotReachHere();
1175 }
1176 }
1177
1178 void TemplateTable::idiv() {
1179 transition(itos, itos);
1180
1181 Label Lnormal, Lexception, Ldone;
1182 Register Rdividend = R11_scratch1; // Used by irem.
1183
1184 __ addi(R0, R17_tos, 1);
1185 __ cmplwi(CCR0, R0, 2);
1186 __ bgt(CCR0, Lnormal); // divisor <-1 or >1
1187
1188 __ cmpwi(CCR1, R17_tos, 0);
1189 __ beq(CCR1, Lexception); // divisor == 0
1190
1191 __ pop_i(Rdividend);
1192 __ mullw(R17_tos, Rdividend, R17_tos); // div by +/-1
1193 __ b(Ldone);
1194
1195 __ bind(Lexception);
1196 __ load_dispatch_table(R11_scratch1, (address*)Interpreter::_throw_ArithmeticException_entry);
1197 __ mtctr(R11_scratch1);
1198 __ bctr();
1199
1200 __ align(32, 12);
1201 __ bind(Lnormal);
1202 __ pop_i(Rdividend);
1203 __ divw(R17_tos, Rdividend, R17_tos); // Can't divide minint/-1.
1204 __ bind(Ldone);
1205 }
1206
1207 void TemplateTable::irem() {
1208 transition(itos, itos);
1209
1210 __ mr(R12_scratch2, R17_tos);
1211 idiv();
1212 __ mullw(R17_tos, R17_tos, R12_scratch2);
1213 __ subf(R17_tos, R17_tos, R11_scratch1); // Dividend set by idiv.
1214 }
1215
1216 void TemplateTable::lmul() {
1217 transition(ltos, ltos);
1218
1219 __ pop_l(R11_scratch1);
1220 __ mulld(R17_tos, R11_scratch1, R17_tos);
1221 }
1222
1223 void TemplateTable::ldiv() {
1224 transition(ltos, ltos);
1225
1226 Label Lnormal, Lexception, Ldone;
1227 Register Rdividend = R11_scratch1; // Used by lrem.
1228
1229 __ addi(R0, R17_tos, 1);
1230 __ cmpldi(CCR0, R0, 2);
1231 __ bgt(CCR0, Lnormal); // divisor <-1 or >1
1232
1233 __ cmpdi(CCR1, R17_tos, 0);
1234 __ beq(CCR1, Lexception); // divisor == 0
1235
1236 __ pop_l(Rdividend);
1237 __ mulld(R17_tos, Rdividend, R17_tos); // div by +/-1
1238 __ b(Ldone);
1239
1240 __ bind(Lexception);
1241 __ load_dispatch_table(R11_scratch1, (address*)Interpreter::_throw_ArithmeticException_entry);
1242 __ mtctr(R11_scratch1);
1243 __ bctr();
1244
1245 __ align(32, 12);
1246 __ bind(Lnormal);
1247 __ pop_l(Rdividend);
1248 __ divd(R17_tos, Rdividend, R17_tos); // Can't divide minint/-1.
1249 __ bind(Ldone);
1250 }
1251
1252 void TemplateTable::lrem() {
1253 transition(ltos, ltos);
1254
1255 __ mr(R12_scratch2, R17_tos);
1256 ldiv();
1257 __ mulld(R17_tos, R17_tos, R12_scratch2);
1258 __ subf(R17_tos, R17_tos, R11_scratch1); // Dividend set by ldiv.
1259 }
1260
1261 void TemplateTable::lshl() {
1262 transition(itos, ltos);
1263
1264 __ rldicl(R17_tos, R17_tos, 0, 64-6); // Extract least significant bits.
1265 __ pop_l(R11_scratch1);
1266 __ sld(R17_tos, R11_scratch1, R17_tos);
1267 }
1268
1269 void TemplateTable::lshr() {
1270 transition(itos, ltos);
1271
1272 __ rldicl(R17_tos, R17_tos, 0, 64-6); // Extract least significant bits.
1273 __ pop_l(R11_scratch1);
1274 __ srad(R17_tos, R11_scratch1, R17_tos);
1275 }
1276
1277 void TemplateTable::lushr() {
1278 transition(itos, ltos);
1279
1280 __ rldicl(R17_tos, R17_tos, 0, 64-6); // Extract least significant bits.
1281 __ pop_l(R11_scratch1);
1282 __ srd(R17_tos, R11_scratch1, R17_tos);
1283 }
1284
1285 void TemplateTable::fop2(Operation op) {
1286 transition(ftos, ftos);
1287
1288 switch (op) {
1289 case add: __ pop_f(F0_SCRATCH); __ fadds(F15_ftos, F0_SCRATCH, F15_ftos); break;
1290 case sub: __ pop_f(F0_SCRATCH); __ fsubs(F15_ftos, F0_SCRATCH, F15_ftos); break;
1291 case mul: __ pop_f(F0_SCRATCH); __ fmuls(F15_ftos, F0_SCRATCH, F15_ftos); break;
1292 case div: __ pop_f(F0_SCRATCH); __ fdivs(F15_ftos, F0_SCRATCH, F15_ftos); break;
1293 case rem:
1294 __ pop_f(F1_ARG1);
1295 __ fmr(F2_ARG2, F15_ftos);
1296 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::frem));
1297 __ fmr(F15_ftos, F1_RET);
1298 break;
1299
1300 default: ShouldNotReachHere();
1301 }
1302 }
1303
1304 void TemplateTable::dop2(Operation op) {
1305 transition(dtos, dtos);
1306
1307 switch (op) {
1308 case add: __ pop_d(F0_SCRATCH); __ fadd(F15_ftos, F0_SCRATCH, F15_ftos); break;
1309 case sub: __ pop_d(F0_SCRATCH); __ fsub(F15_ftos, F0_SCRATCH, F15_ftos); break;
1310 case mul: __ pop_d(F0_SCRATCH); __ fmul(F15_ftos, F0_SCRATCH, F15_ftos); break;
1311 case div: __ pop_d(F0_SCRATCH); __ fdiv(F15_ftos, F0_SCRATCH, F15_ftos); break;
1312 case rem:
1313 __ pop_d(F1_ARG1);
1314 __ fmr(F2_ARG2, F15_ftos);
1315 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::drem));
1316 __ fmr(F15_ftos, F1_RET);
1317 break;
1318
1319 default: ShouldNotReachHere();
1320 }
1321 }
1322
1323 // Negate the value in the TOS cache.
1324 void TemplateTable::ineg() {
1325 transition(itos, itos);
1326
1327 __ neg(R17_tos, R17_tos);
1328 }
1329
1330 // Negate the value in the TOS cache.
1331 void TemplateTable::lneg() {
1332 transition(ltos, ltos);
1333
1334 __ neg(R17_tos, R17_tos);
1335 }
1336
1337 void TemplateTable::fneg() {
1338 transition(ftos, ftos);
1339
1340 __ fneg(F15_ftos, F15_ftos);
1341 }
1342
1343 void TemplateTable::dneg() {
1344 transition(dtos, dtos);
1345
1346 __ fneg(F15_ftos, F15_ftos);
1347 }
1348
1349 // Increments a local variable in place.
1350 void TemplateTable::iinc() {
1351 transition(vtos, vtos);
1352
1353 const Register Rindex = R11_scratch1,
1354 Rincrement = R0,
1355 Rvalue = R12_scratch2;
1356
1357 locals_index(Rindex); // Load locals index from bytecode stream.
1358 __ lbz(Rincrement, 2, R14_bcp); // Load increment from the bytecode stream.
1359 __ extsb(Rincrement, Rincrement);
1360
1361 __ load_local_int(Rvalue, Rindex, Rindex); // Puts address of local into Rindex.
1362
1363 __ add(Rvalue, Rincrement, Rvalue);
1364 __ stw(Rvalue, 0, Rindex);
1365 }
1366
1367 void TemplateTable::wide_iinc() {
1368 transition(vtos, vtos);
1369
1370 Register Rindex = R11_scratch1,
1371 Rlocals_addr = Rindex,
1372 Rincr = R12_scratch2;
1373 locals_index_wide(Rindex);
1374 __ get_2_byte_integer_at_bcp(4, Rincr, InterpreterMacroAssembler::Signed);
1375 __ load_local_int(R17_tos, Rlocals_addr, Rindex);
1376 __ add(R17_tos, Rincr, R17_tos);
1377 __ stw(R17_tos, 0, Rlocals_addr);
1378 }
1379
1380 void TemplateTable::convert() {
1381 // %%%%% Factor this first part accross platforms
1382 #ifdef ASSERT
1383 TosState tos_in = ilgl;
1384 TosState tos_out = ilgl;
1385 switch (bytecode()) {
1386 case Bytecodes::_i2l: // fall through
1387 case Bytecodes::_i2f: // fall through
1388 case Bytecodes::_i2d: // fall through
1389 case Bytecodes::_i2b: // fall through
1390 case Bytecodes::_i2c: // fall through
1391 case Bytecodes::_i2s: tos_in = itos; break;
1392 case Bytecodes::_l2i: // fall through
1393 case Bytecodes::_l2f: // fall through
1394 case Bytecodes::_l2d: tos_in = ltos; break;
1395 case Bytecodes::_f2i: // fall through
1396 case Bytecodes::_f2l: // fall through
1397 case Bytecodes::_f2d: tos_in = ftos; break;
1398 case Bytecodes::_d2i: // fall through
1399 case Bytecodes::_d2l: // fall through
1400 case Bytecodes::_d2f: tos_in = dtos; break;
1401 default : ShouldNotReachHere();
1402 }
1403 switch (bytecode()) {
1404 case Bytecodes::_l2i: // fall through
1405 case Bytecodes::_f2i: // fall through
1406 case Bytecodes::_d2i: // fall through
1407 case Bytecodes::_i2b: // fall through
1408 case Bytecodes::_i2c: // fall through
1409 case Bytecodes::_i2s: tos_out = itos; break;
1410 case Bytecodes::_i2l: // fall through
1411 case Bytecodes::_f2l: // fall through
1412 case Bytecodes::_d2l: tos_out = ltos; break;
1413 case Bytecodes::_i2f: // fall through
1414 case Bytecodes::_l2f: // fall through
1415 case Bytecodes::_d2f: tos_out = ftos; break;
1416 case Bytecodes::_i2d: // fall through
1417 case Bytecodes::_l2d: // fall through
1418 case Bytecodes::_f2d: tos_out = dtos; break;
1419 default : ShouldNotReachHere();
1420 }
1421 transition(tos_in, tos_out);
1422 #endif
1423
1424 // Conversion
1425 Label done;
1426 switch (bytecode()) {
1427 case Bytecodes::_i2l:
1428 __ extsw(R17_tos, R17_tos);
1429 break;
1430
1431 case Bytecodes::_l2i:
1432 // Nothing to do, we'll continue to work with the lower bits.
1433 break;
1434
1435 case Bytecodes::_i2b:
1436 __ extsb(R17_tos, R17_tos);
1437 break;
1438
1439 case Bytecodes::_i2c:
1440 __ rldicl(R17_tos, R17_tos, 0, 64-2*8);
1441 break;
1442
1443 case Bytecodes::_i2s:
1444 __ extsh(R17_tos, R17_tos);
1445 break;
1446
1447 case Bytecodes::_i2d:
1448 __ extsw(R17_tos, R17_tos);
1449 case Bytecodes::_l2d:
1450 __ push_l_pop_d();
1451 __ fcfid(F15_ftos, F15_ftos);
1452 break;
1453
1454 case Bytecodes::_i2f:
1455 __ extsw(R17_tos, R17_tos);
1456 __ push_l_pop_d();
1457 if (VM_Version::has_fcfids()) { // fcfids is >= Power7 only
1458 // Comment: alternatively, load with sign extend could be done by lfiwax.
1459 __ fcfids(F15_ftos, F15_ftos);
1460 } else {
1461 __ fcfid(F15_ftos, F15_ftos);
1462 __ frsp(F15_ftos, F15_ftos);
1463 }
1464 break;
1465
1466 case Bytecodes::_l2f:
1467 if (VM_Version::has_fcfids()) { // fcfids is >= Power7 only
1468 __ push_l_pop_d();
1469 __ fcfids(F15_ftos, F15_ftos);
1470 } else {
1471 // Avoid rounding problem when result should be 0x3f800001: need fixup code before fcfid+frsp.
1472 __ mr(R3_ARG1, R17_tos);
1473 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::l2f));
1474 __ fmr(F15_ftos, F1_RET);
1475 }
1476 break;
1477
1478 case Bytecodes::_f2d:
1479 // empty
1480 break;
1481
1482 case Bytecodes::_d2f:
1483 __ frsp(F15_ftos, F15_ftos);
1484 break;
1485
1486 case Bytecodes::_d2i:
1487 case Bytecodes::_f2i:
1488 __ fcmpu(CCR0, F15_ftos, F15_ftos);
1489 __ li(R17_tos, 0); // 0 in case of NAN
1490 __ bso(CCR0, done);
1491 __ fctiwz(F15_ftos, F15_ftos);
1492 __ push_d_pop_l();
1493 break;
1494
1495 case Bytecodes::_d2l:
1496 case Bytecodes::_f2l:
1497 __ fcmpu(CCR0, F15_ftos, F15_ftos);
1498 __ li(R17_tos, 0); // 0 in case of NAN
1499 __ bso(CCR0, done);
1500 __ fctidz(F15_ftos, F15_ftos);
1501 __ push_d_pop_l();
1502 break;
1503
1504 default: ShouldNotReachHere();
1505 }
1506 __ bind(done);
1507 }
1508
1509 // Long compare
1510 void TemplateTable::lcmp() {
1511 transition(ltos, itos);
1512
1513 const Register Rscratch = R11_scratch1;
1514 __ pop_l(Rscratch); // first operand, deeper in stack
1515
1516 __ cmpd(CCR0, Rscratch, R17_tos); // compare
1517 __ mfcr(R17_tos); // set bit 32..33 as follows: <: 0b10, =: 0b00, >: 0b01
1518 __ srwi(Rscratch, R17_tos, 30);
1519 __ srawi(R17_tos, R17_tos, 31);
1520 __ orr(R17_tos, Rscratch, R17_tos); // set result as follows: <: -1, =: 0, >: 1
1521 }
1522
1523 // fcmpl/fcmpg and dcmpl/dcmpg bytecodes
1524 // unordered_result == -1 => fcmpl or dcmpl
1525 // unordered_result == 1 => fcmpg or dcmpg
1526 void TemplateTable::float_cmp(bool is_float, int unordered_result) {
1527 const FloatRegister Rfirst = F0_SCRATCH,
1528 Rsecond = F15_ftos;
1529 const Register Rscratch = R11_scratch1;
1530
1531 if (is_float) {
1532 __ pop_f(Rfirst);
1533 } else {
1534 __ pop_d(Rfirst);
1535 }
1536
1537 Label Lunordered, Ldone;
1538 __ fcmpu(CCR0, Rfirst, Rsecond); // compare
1539 if (unordered_result) {
1540 __ bso(CCR0, Lunordered);
1541 }
1542 __ mfcr(R17_tos); // set bit 32..33 as follows: <: 0b10, =: 0b00, >: 0b01
1543 __ srwi(Rscratch, R17_tos, 30);
1544 __ srawi(R17_tos, R17_tos, 31);
1545 __ orr(R17_tos, Rscratch, R17_tos); // set result as follows: <: -1, =: 0, >: 1
1546 if (unordered_result) {
1547 __ b(Ldone);
1548 __ bind(Lunordered);
1549 __ load_const_optimized(R17_tos, unordered_result);
1550 }
1551 __ bind(Ldone);
1552 }
1553
1554 // Branch_conditional which takes TemplateTable::Condition.
1555 void TemplateTable::branch_conditional(ConditionRegister crx, TemplateTable::Condition cc, Label& L, bool invert) {
1556 bool positive = false;
1557 Assembler::Condition cond = Assembler::equal;
1558 switch (cc) {
1559 case TemplateTable::equal: positive = true ; cond = Assembler::equal ; break;
1560 case TemplateTable::not_equal: positive = false; cond = Assembler::equal ; break;
1561 case TemplateTable::less: positive = true ; cond = Assembler::less ; break;
1562 case TemplateTable::less_equal: positive = false; cond = Assembler::greater; break;
1563 case TemplateTable::greater: positive = true ; cond = Assembler::greater; break;
1564 case TemplateTable::greater_equal: positive = false; cond = Assembler::less ; break;
1565 default: ShouldNotReachHere();
1566 }
1567 int bo = (positive != invert) ? Assembler::bcondCRbiIs1 : Assembler::bcondCRbiIs0;
1568 int bi = Assembler::bi0(crx, cond);
1569 __ bc(bo, bi, L);
1570 }
1571
1572 void TemplateTable::branch(bool is_jsr, bool is_wide) {
1573
1574 // Note: on SPARC, we use InterpreterMacroAssembler::if_cmp also.
1575 __ verify_thread();
1576
1577 const Register Rscratch1 = R11_scratch1,
1578 Rscratch2 = R12_scratch2,
1579 Rscratch3 = R3_ARG1,
1580 R4_counters = R4_ARG2,
1581 bumped_count = R31,
1582 Rdisp = R22_tmp2;
1583
1584 __ profile_taken_branch(Rscratch1, bumped_count);
1585
1586 // Get (wide) offset.
1587 if (is_wide) {
1588 __ get_4_byte_integer_at_bcp(1, Rdisp, InterpreterMacroAssembler::Signed);
1589 } else {
1590 __ get_2_byte_integer_at_bcp(1, Rdisp, InterpreterMacroAssembler::Signed);
1591 }
1592
1593 // --------------------------------------------------------------------------
1594 // Handle all the JSR stuff here, then exit.
1595 // It's much shorter and cleaner than intermingling with the
1596 // non-JSR normal-branch stuff occurring below.
1597 if (is_jsr) {
1598 // Compute return address as bci in Otos_i.
1599 __ ld(Rscratch1, in_bytes(Method::const_offset()), R19_method);
1600 __ addi(Rscratch2, R14_bcp, -in_bytes(ConstMethod::codes_offset()) + (is_wide ? 5 : 3));
1601 __ subf(R17_tos, Rscratch1, Rscratch2);
1602
1603 // Bump bcp to target of JSR.
1604 __ add(R14_bcp, Rdisp, R14_bcp);
1605 // Push returnAddress for "ret" on stack.
1606 __ push_ptr(R17_tos);
1607 // And away we go!
1608 __ dispatch_next(vtos);
1609 return;
1610 }
1611
1612 // --------------------------------------------------------------------------
1613 // Normal (non-jsr) branch handling
1614
1615 const bool increment_invocation_counter_for_backward_branches = UseCompiler && UseLoopCounter;
1616 if (increment_invocation_counter_for_backward_branches) {
1617 //__ unimplemented("branch invocation counter");
1618
1619 Label Lforward;
1620 __ add(R14_bcp, Rdisp, R14_bcp); // Add to bc addr.
1621
1622 // Check branch direction.
1623 __ cmpdi(CCR0, Rdisp, 0);
1624 __ bgt(CCR0, Lforward);
1625
1626 __ get_method_counters(R19_method, R4_counters, Lforward);
1627
1628 if (TieredCompilation) {
1629 Label Lno_mdo, Loverflow;
1630 const int increment = InvocationCounter::count_increment;
1631 const int mask = ((1 << Tier0BackedgeNotifyFreqLog) - 1) << InvocationCounter::count_shift;
1632 if (ProfileInterpreter) {
1633 Register Rmdo = Rscratch1;
1634
1635 // If no method data exists, go to profile_continue.
1636 __ ld(Rmdo, in_bytes(Method::method_data_offset()), R19_method);
1637 __ cmpdi(CCR0, Rmdo, 0);
1638 __ beq(CCR0, Lno_mdo);
1639
1640 // Increment backedge counter in the MDO.
1641 const int mdo_bc_offs = in_bytes(MethodData::backedge_counter_offset()) + in_bytes(InvocationCounter::counter_offset());
1642 __ lwz(Rscratch2, mdo_bc_offs, Rmdo);
1643 __ load_const_optimized(Rscratch3, mask, R0);
1644 __ addi(Rscratch2, Rscratch2, increment);
1645 __ stw(Rscratch2, mdo_bc_offs, Rmdo);
1646 __ and_(Rscratch3, Rscratch2, Rscratch3);
1647 __ bne(CCR0, Lforward);
1648 __ b(Loverflow);
1649 }
1650
1651 // If there's no MDO, increment counter in method.
1652 const int mo_bc_offs = in_bytes(MethodCounters::backedge_counter_offset()) + in_bytes(InvocationCounter::counter_offset());
1653 __ bind(Lno_mdo);
1654 __ lwz(Rscratch2, mo_bc_offs, R4_counters);
1655 __ load_const_optimized(Rscratch3, mask, R0);
1656 __ addi(Rscratch2, Rscratch2, increment);
1657 __ stw(Rscratch2, mo_bc_offs, R19_method);
1658 __ and_(Rscratch3, Rscratch2, Rscratch3);
1659 __ bne(CCR0, Lforward);
1660
1661 __ bind(Loverflow);
1662
1663 // Notify point for loop, pass branch bytecode.
1664 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), R14_bcp, true);
1665
1666 // Was an OSR adapter generated?
1667 // O0 = osr nmethod
1668 __ cmpdi(CCR0, R3_RET, 0);
1669 __ beq(CCR0, Lforward);
1670
1671 // Has the nmethod been invalidated already?
1672 __ lbz(R0, nmethod::state_offset(), R3_RET);
1673 __ cmpwi(CCR0, R0, nmethod::in_use);
1674 __ bne(CCR0, Lforward);
1675
1676 // Migrate the interpreter frame off of the stack.
1677 // We can use all registers because we will not return to interpreter from this point.
1678
1679 // Save nmethod.
1680 const Register osr_nmethod = R31;
1681 __ mr(osr_nmethod, R3_RET);
1682 __ set_top_ijava_frame_at_SP_as_last_Java_frame(R1_SP, R11_scratch1);
1683 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin), R16_thread);
1684 __ reset_last_Java_frame();
1685 // OSR buffer is in ARG1.
1686
1687 // Remove the interpreter frame.
1688 __ merge_frames(/*top_frame_sp*/ R21_sender_SP, /*return_pc*/ R0, R11_scratch1, R12_scratch2);
1689
1690 // Jump to the osr code.
1691 __ ld(R11_scratch1, nmethod::osr_entry_point_offset(), osr_nmethod);
1692 __ mtlr(R0);
1693 __ mtctr(R11_scratch1);
1694 __ bctr();
1695
1696 } else {
1697
1698 const Register invoke_ctr = Rscratch1;
1699 // Update Backedge branch separately from invocations.
1700 __ increment_backedge_counter(R4_counters, invoke_ctr, Rscratch2, Rscratch3);
1701
1702 if (ProfileInterpreter) {
1703 __ test_invocation_counter_for_mdp(invoke_ctr, Rscratch2, Lforward);
1704 if (UseOnStackReplacement) {
1705 __ test_backedge_count_for_osr(bumped_count, R14_bcp, Rscratch2);
1706 }
1707 } else {
1708 if (UseOnStackReplacement) {
1709 __ test_backedge_count_for_osr(invoke_ctr, R14_bcp, Rscratch2);
1710 }
1711 }
1712 }
1713
1714 __ bind(Lforward);
1715
1716 } else {
1717 // Bump bytecode pointer by displacement (take the branch).
1718 __ add(R14_bcp, Rdisp, R14_bcp); // Add to bc addr.
1719 }
1720 // Continue with bytecode @ target.
1721 // %%%%% Like Intel, could speed things up by moving bytecode fetch to code above,
1722 // %%%%% and changing dispatch_next to dispatch_only.
1723 __ dispatch_next(vtos);
1724 }
1725
1726 // Helper function for if_cmp* methods below.
1727 // Factored out common compare and branch code.
1728 void TemplateTable::if_cmp_common(Register Rfirst, Register Rsecond, Register Rscratch1, Register Rscratch2, Condition cc, bool is_jint, bool cmp0) {
1729 Label Lnot_taken;
1730 // Note: The condition code we get is the condition under which we
1731 // *fall through*! So we have to inverse the CC here.
1732
1733 if (is_jint) {
1734 if (cmp0) {
1735 __ cmpwi(CCR0, Rfirst, 0);
1736 } else {
1737 __ cmpw(CCR0, Rfirst, Rsecond);
1738 }
1739 } else {
1740 if (cmp0) {
1741 __ cmpdi(CCR0, Rfirst, 0);
1742 } else {
1743 __ cmpd(CCR0, Rfirst, Rsecond);
1744 }
1745 }
1746 branch_conditional(CCR0, cc, Lnot_taken, /*invert*/ true);
1747
1748 // Conition is false => Jump!
1749 branch(false, false);
1750
1751 // Condition is not true => Continue.
1752 __ align(32, 12);
1753 __ bind(Lnot_taken);
1754 __ profile_not_taken_branch(Rscratch1, Rscratch2);
1755 }
1756
1757 // Compare integer values with zero and fall through if CC holds, branch away otherwise.
1758 void TemplateTable::if_0cmp(Condition cc) {
1759 transition(itos, vtos);
1760
1761 if_cmp_common(R17_tos, noreg, R11_scratch1, R12_scratch2, cc, true, true);
1762 }
1763
1764 // Compare integer values and fall through if CC holds, branch away otherwise.
1765 //
1766 // Interface:
1767 // - Rfirst: First operand (older stack value)
1768 // - tos: Second operand (younger stack value)
1769 void TemplateTable::if_icmp(Condition cc) {
1770 transition(itos, vtos);
1771
1772 const Register Rfirst = R0,
1773 Rsecond = R17_tos;
1774
1775 __ pop_i(Rfirst);
1776 if_cmp_common(Rfirst, Rsecond, R11_scratch1, R12_scratch2, cc, true, false);
1777 }
1778
1779 void TemplateTable::if_nullcmp(Condition cc) {
1780 transition(atos, vtos);
1781
1782 if_cmp_common(R17_tos, noreg, R11_scratch1, R12_scratch2, cc, false, true);
1783 }
1784
1785 void TemplateTable::if_acmp(Condition cc) {
1786 transition(atos, vtos);
1787
1788 const Register Rfirst = R0,
1789 Rsecond = R17_tos;
1790
1791 __ pop_ptr(Rfirst);
1792 if_cmp_common(Rfirst, Rsecond, R11_scratch1, R12_scratch2, cc, false, false);
1793 }
1794
1795 void TemplateTable::ret() {
1796 locals_index(R11_scratch1);
1797 __ load_local_ptr(R17_tos, R11_scratch1, R11_scratch1);
1798
1799 __ profile_ret(vtos, R17_tos, R11_scratch1, R12_scratch2);
1800
1801 __ ld(R11_scratch1, in_bytes(Method::const_offset()), R19_method);
1802 __ add(R11_scratch1, R17_tos, R11_scratch1);
1803 __ addi(R14_bcp, R11_scratch1, in_bytes(ConstMethod::codes_offset()));
1804 __ dispatch_next(vtos);
1805 }
1806
1807 void TemplateTable::wide_ret() {
1808 transition(vtos, vtos);
1809
1810 const Register Rindex = R3_ARG1,
1811 Rscratch1 = R11_scratch1,
1812 Rscratch2 = R12_scratch2;
1813
1814 locals_index_wide(Rindex);
1815 __ load_local_ptr(R17_tos, R17_tos, Rindex);
1816 __ profile_ret(vtos, R17_tos, Rscratch1, R12_scratch2);
1817 // Tos now contains the bci, compute the bcp from that.
1818 __ ld(Rscratch1, in_bytes(Method::const_offset()), R19_method);
1819 __ addi(Rscratch2, R17_tos, in_bytes(ConstMethod::codes_offset()));
1820 __ add(R14_bcp, Rscratch1, Rscratch2);
1821 __ dispatch_next(vtos);
1822 }
1823
1824 void TemplateTable::tableswitch() {
1825 transition(itos, vtos);
1826
1827 Label Ldispatch, Ldefault_case;
1828 Register Rlow_byte = R3_ARG1,
1829 Rindex = Rlow_byte,
1830 Rhigh_byte = R4_ARG2,
1831 Rdef_offset_addr = R5_ARG3, // is going to contain address of default offset
1832 Rscratch1 = R11_scratch1,
1833 Rscratch2 = R12_scratch2,
1834 Roffset = R6_ARG4;
1835
1836 // Align bcp.
1837 __ addi(Rdef_offset_addr, R14_bcp, BytesPerInt);
1838 __ clrrdi(Rdef_offset_addr, Rdef_offset_addr, log2_long((jlong)BytesPerInt));
1839
1840 // Load lo & hi.
1841 __ get_u4(Rlow_byte, Rdef_offset_addr, BytesPerInt, InterpreterMacroAssembler::Unsigned);
1842 __ get_u4(Rhigh_byte, Rdef_offset_addr, 2 *BytesPerInt, InterpreterMacroAssembler::Unsigned);
1843
1844 // Check for default case (=index outside [low,high]).
1845 __ cmpw(CCR0, R17_tos, Rlow_byte);
1846 __ cmpw(CCR1, R17_tos, Rhigh_byte);
1847 __ blt(CCR0, Ldefault_case);
1848 __ bgt(CCR1, Ldefault_case);
1849
1850 // Lookup dispatch offset.
1851 __ sub(Rindex, R17_tos, Rlow_byte);
1852 __ extsw(Rindex, Rindex);
1853 __ profile_switch_case(Rindex, Rhigh_byte /* scratch */, Rscratch1, Rscratch2);
1854 __ sldi(Rindex, Rindex, LogBytesPerInt);
1855 __ addi(Rindex, Rindex, 3 * BytesPerInt);
1856 #if defined(VM_LITTLE_ENDIAN)
1857 __ lwbrx(Roffset, Rdef_offset_addr, Rindex);
1858 __ extsw(Roffset, Roffset);
1859 #else
1860 __ lwax(Roffset, Rdef_offset_addr, Rindex);
1861 #endif
1862 __ b(Ldispatch);
1863
1864 __ bind(Ldefault_case);
1865 __ profile_switch_default(Rhigh_byte, Rscratch1);
1866 __ get_u4(Roffset, Rdef_offset_addr, 0, InterpreterMacroAssembler::Signed);
1867
1868 __ bind(Ldispatch);
1869
1870 __ add(R14_bcp, Roffset, R14_bcp);
1871 __ dispatch_next(vtos);
1872 }
1873
1874 void TemplateTable::lookupswitch() {
1875 transition(itos, itos);
1876 __ stop("lookupswitch bytecode should have been rewritten");
1877 }
1878
1879 // Table switch using linear search through cases.
1880 // Bytecode stream format:
1881 // Bytecode (1) | 4-byte padding | default offset (4) | count (4) | value/offset pair1 (8) | value/offset pair2 (8) | ...
1882 // Note: Everything is big-endian format here.
1883 void TemplateTable::fast_linearswitch() {
1884 transition(itos, vtos);
1885
1886 Label Lloop_entry, Lsearch_loop, Lcontinue_execution, Ldefault_case;
1887 Register Rcount = R3_ARG1,
1888 Rcurrent_pair = R4_ARG2,
1889 Rdef_offset_addr = R5_ARG3, // Is going to contain address of default offset.
1890 Roffset = R31, // Might need to survive C call.
1891 Rvalue = R12_scratch2,
1892 Rscratch = R11_scratch1,
1893 Rcmp_value = R17_tos;
1894
1895 // Align bcp.
1896 __ addi(Rdef_offset_addr, R14_bcp, BytesPerInt);
1897 __ clrrdi(Rdef_offset_addr, Rdef_offset_addr, log2_long((jlong)BytesPerInt));
1898
1899 // Setup loop counter and limit.
1900 __ get_u4(Rcount, Rdef_offset_addr, BytesPerInt, InterpreterMacroAssembler::Unsigned);
1901 __ addi(Rcurrent_pair, Rdef_offset_addr, 2 * BytesPerInt); // Rcurrent_pair now points to first pair.
1902
1903 __ mtctr(Rcount);
1904 __ cmpwi(CCR0, Rcount, 0);
1905 __ bne(CCR0, Lloop_entry);
1906
1907 // Default case
1908 __ bind(Ldefault_case);
1909 __ get_u4(Roffset, Rdef_offset_addr, 0, InterpreterMacroAssembler::Signed);
1910 if (ProfileInterpreter) {
1911 __ profile_switch_default(Rdef_offset_addr, Rcount/* scratch */);
1912 }
1913 __ b(Lcontinue_execution);
1914
1915 // Next iteration
1916 __ bind(Lsearch_loop);
1917 __ bdz(Ldefault_case);
1918 __ addi(Rcurrent_pair, Rcurrent_pair, 2 * BytesPerInt);
1919 __ bind(Lloop_entry);
1920 __ get_u4(Rvalue, Rcurrent_pair, 0, InterpreterMacroAssembler::Unsigned);
1921 __ cmpw(CCR0, Rvalue, Rcmp_value);
1922 __ bne(CCR0, Lsearch_loop);
1923
1924 // Found, load offset.
1925 __ get_u4(Roffset, Rcurrent_pair, BytesPerInt, InterpreterMacroAssembler::Signed);
1926 // Calculate case index and profile
1927 __ mfctr(Rcurrent_pair);
1928 if (ProfileInterpreter) {
1929 __ sub(Rcurrent_pair, Rcount, Rcurrent_pair);
1930 __ profile_switch_case(Rcurrent_pair, Rcount /*scratch*/, Rdef_offset_addr/*scratch*/, Rscratch);
1931 }
1932
1933 __ bind(Lcontinue_execution);
1934 __ add(R14_bcp, Roffset, R14_bcp);
1935 __ dispatch_next(vtos);
1936 }
1937
1938 // Table switch using binary search (value/offset pairs are ordered).
1939 // Bytecode stream format:
1940 // Bytecode (1) | 4-byte padding | default offset (4) | count (4) | value/offset pair1 (8) | value/offset pair2 (8) | ...
1941 // Note: Everything is big-endian format here. So on little endian machines, we have to revers offset and count and cmp value.
1942 void TemplateTable::fast_binaryswitch() {
1943
1944 transition(itos, vtos);
1945 // Implementation using the following core algorithm: (copied from Intel)
1946 //
1947 // int binary_search(int key, LookupswitchPair* array, int n) {
1948 // // Binary search according to "Methodik des Programmierens" by
1949 // // Edsger W. Dijkstra and W.H.J. Feijen, Addison Wesley Germany 1985.
1950 // int i = 0;
1951 // int j = n;
1952 // while (i+1 < j) {
1953 // // invariant P: 0 <= i < j <= n and (a[i] <= key < a[j] or Q)
1954 // // with Q: for all i: 0 <= i < n: key < a[i]
1955 // // where a stands for the array and assuming that the (inexisting)
1956 // // element a[n] is infinitely big.
1957 // int h = (i + j) >> 1;
1958 // // i < h < j
1959 // if (key < array[h].fast_match()) {
1960 // j = h;
1961 // } else {
1962 // i = h;
1963 // }
1964 // }
1965 // // R: a[i] <= key < a[i+1] or Q
1966 // // (i.e., if key is within array, i is the correct index)
1967 // return i;
1968 // }
1969
1970 // register allocation
1971 const Register Rkey = R17_tos; // already set (tosca)
1972 const Register Rarray = R3_ARG1;
1973 const Register Ri = R4_ARG2;
1974 const Register Rj = R5_ARG3;
1975 const Register Rh = R6_ARG4;
1976 const Register Rscratch = R11_scratch1;
1977
1978 const int log_entry_size = 3;
1979 const int entry_size = 1 << log_entry_size;
1980
1981 Label found;
1982
1983 // Find Array start,
1984 __ addi(Rarray, R14_bcp, 3 * BytesPerInt);
1985 __ clrrdi(Rarray, Rarray, log2_long((jlong)BytesPerInt));
1986
1987 // initialize i & j
1988 __ li(Ri,0);
1989 __ get_u4(Rj, Rarray, -BytesPerInt, InterpreterMacroAssembler::Unsigned);
1990
1991 // and start.
1992 Label entry;
1993 __ b(entry);
1994
1995 // binary search loop
1996 { Label loop;
1997 __ bind(loop);
1998 // int h = (i + j) >> 1;
1999 __ srdi(Rh, Rh, 1);
2000 // if (key < array[h].fast_match()) {
2001 // j = h;
2002 // } else {
2003 // i = h;
2004 // }
2005 __ sldi(Rscratch, Rh, log_entry_size);
2006 #if defined(VM_LITTLE_ENDIAN)
2007 __ lwbrx(Rscratch, Rscratch, Rarray);
2008 #else
2009 __ lwzx(Rscratch, Rscratch, Rarray);
2010 #endif
2011
2012 // if (key < current value)
2013 // Rh = Rj
2014 // else
2015 // Rh = Ri
2016 Label Lgreater;
2017 __ cmpw(CCR0, Rkey, Rscratch);
2018 __ bge(CCR0, Lgreater);
2019 __ mr(Rj, Rh);
2020 __ b(entry);
2021 __ bind(Lgreater);
2022 __ mr(Ri, Rh);
2023
2024 // while (i+1 < j)
2025 __ bind(entry);
2026 __ addi(Rscratch, Ri, 1);
2027 __ cmpw(CCR0, Rscratch, Rj);
2028 __ add(Rh, Ri, Rj); // start h = i + j >> 1;
2029
2030 __ blt(CCR0, loop);
2031 }
2032
2033 // End of binary search, result index is i (must check again!).
2034 Label default_case;
2035 Label continue_execution;
2036 if (ProfileInterpreter) {
2037 __ mr(Rh, Ri); // Save index in i for profiling.
2038 }
2039 // Ri = value offset
2040 __ sldi(Ri, Ri, log_entry_size);
2041 __ add(Ri, Ri, Rarray);
2042 __ get_u4(Rscratch, Ri, 0, InterpreterMacroAssembler::Unsigned);
2043
2044 Label not_found;
2045 // Ri = offset offset
2046 __ cmpw(CCR0, Rkey, Rscratch);
2047 __ beq(CCR0, not_found);
2048 // entry not found -> j = default offset
2049 __ get_u4(Rj, Rarray, -2 * BytesPerInt, InterpreterMacroAssembler::Unsigned);
2050 __ b(default_case);
2051
2052 __ bind(not_found);
2053 // entry found -> j = offset
2054 __ profile_switch_case(Rh, Rj, Rscratch, Rkey);
2055 __ get_u4(Rj, Ri, BytesPerInt, InterpreterMacroAssembler::Unsigned);
2056
2057 if (ProfileInterpreter) {
2058 __ b(continue_execution);
2059 }
2060
2061 __ bind(default_case); // fall through (if not profiling)
2062 __ profile_switch_default(Ri, Rscratch);
2063
2064 __ bind(continue_execution);
2065
2066 __ extsw(Rj, Rj);
2067 __ add(R14_bcp, Rj, R14_bcp);
2068 __ dispatch_next(vtos);
2069 }
2070
2071 void TemplateTable::_return(TosState state) {
2072 transition(state, state);
2073 assert(_desc->calls_vm(),
2074 "inconsistent calls_vm information"); // call in remove_activation
2075
2076 if (_desc->bytecode() == Bytecodes::_return_register_finalizer) {
2077
2078 Register Rscratch = R11_scratch1,
2079 Rklass = R12_scratch2,
2080 Rklass_flags = Rklass;
2081 Label Lskip_register_finalizer;
2082
2083 // Check if the method has the FINALIZER flag set and call into the VM to finalize in this case.
2084 assert(state == vtos, "only valid state");
2085 __ ld(R17_tos, 0, R18_locals);
2086
2087 // Load klass of this obj.
2088 __ load_klass(Rklass, R17_tos);
2089 __ lwz(Rklass_flags, in_bytes(Klass::access_flags_offset()), Rklass);
2090 __ testbitdi(CCR0, R0, Rklass_flags, exact_log2(JVM_ACC_HAS_FINALIZER));
2091 __ bfalse(CCR0, Lskip_register_finalizer);
2092
2093 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::register_finalizer), R17_tos /* obj */);
2094
2095 __ align(32, 12);
2096 __ bind(Lskip_register_finalizer);
2097 }
2098
2099 // Move the result value into the correct register and remove memory stack frame.
2100 __ remove_activation(state, /* throw_monitor_exception */ true);
2101 // Restoration of lr done by remove_activation.
2102 switch (state) {
2103 case ltos:
2104 case btos:
2105 case ctos:
2106 case stos:
2107 case atos:
2108 case itos: __ mr(R3_RET, R17_tos); break;
2109 case ftos:
2110 case dtos: __ fmr(F1_RET, F15_ftos); break;
2111 case vtos: // This might be a constructor. Final fields (and volatile fields on PPC64) need
2112 // to get visible before the reference to the object gets stored anywhere.
2113 __ membar(Assembler::StoreStore); break;
2114 default : ShouldNotReachHere();
2115 }
2116 __ blr();
2117 }
2118
2119 // ============================================================================
2120 // Constant pool cache access
2121 //
2122 // Memory ordering:
2123 //
2124 // Like done in C++ interpreter, we load the fields
2125 // - _indices
2126 // - _f12_oop
2127 // acquired, because these are asked if the cache is already resolved. We don't
2128 // want to float loads above this check.
2129 // See also comments in ConstantPoolCacheEntry::bytecode_1(),
2130 // ConstantPoolCacheEntry::bytecode_2() and ConstantPoolCacheEntry::f1();
2131
2132 // Call into the VM if call site is not yet resolved
2133 //
2134 // Input regs:
2135 // - None, all passed regs are outputs.
2136 //
2137 // Returns:
2138 // - Rcache: The const pool cache entry that contains the resolved result.
2139 // - Rresult: Either noreg or output for f1/f2.
2140 //
2141 // Kills:
2142 // - Rscratch
2143 void TemplateTable::resolve_cache_and_index(int byte_no, Register Rcache, Register Rscratch, size_t index_size) {
2144
2145 __ get_cache_and_index_at_bcp(Rcache, 1, index_size);
2146 Label Lresolved, Ldone;
2147
2148 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
2149 // We are resolved if the indices offset contains the current bytecode.
2150 #if defined(VM_LITTLE_ENDIAN)
2151 __ lbz(Rscratch, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()) + byte_no + 1, Rcache);
2152 #else
2153 __ lbz(Rscratch, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()) + 7 - (byte_no + 1), Rcache);
2154 #endif
2155 // Acquire by cmp-br-isync (see below).
2156 __ cmpdi(CCR0, Rscratch, (int)bytecode());
2157 __ beq(CCR0, Lresolved);
2158
2159 address entry = NULL;
2160 switch (bytecode()) {
2161 case Bytecodes::_getstatic : // fall through
2162 case Bytecodes::_putstatic : // fall through
2163 case Bytecodes::_getfield : // fall through
2164 case Bytecodes::_putfield : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_get_put); break;
2165 case Bytecodes::_invokevirtual : // fall through
2166 case Bytecodes::_invokespecial : // fall through
2167 case Bytecodes::_invokestatic : // fall through
2168 case Bytecodes::_invokeinterface: entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invoke); break;
2169 case Bytecodes::_invokehandle : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokehandle); break;
2170 case Bytecodes::_invokedynamic : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokedynamic); break;
2171 default : ShouldNotReachHere(); break;
2172 }
2173 __ li(R4_ARG2, (int)bytecode());
2174 __ call_VM(noreg, entry, R4_ARG2, true);
2175
2176 // Update registers with resolved info.
2177 __ get_cache_and_index_at_bcp(Rcache, 1, index_size);
2178 __ b(Ldone);
2179
2180 __ bind(Lresolved);
2181 __ isync(); // Order load wrt. succeeding loads.
2182 __ bind(Ldone);
2183 }
2184
2185 // Load the constant pool cache entry at field accesses into registers.
2186 // The Rcache and Rindex registers must be set before call.
2187 // Input:
2188 // - Rcache, Rindex
2189 // Output:
2190 // - Robj, Roffset, Rflags
2191 void TemplateTable::load_field_cp_cache_entry(Register Robj,
2192 Register Rcache,
2193 Register Rindex /* unused on PPC64 */,
2194 Register Roffset,
2195 Register Rflags,
2196 bool is_static = false) {
2197 assert_different_registers(Rcache, Rflags, Roffset);
2198 // assert(Rindex == noreg, "parameter not used on PPC64");
2199
2200 ByteSize cp_base_offset = ConstantPoolCache::base_offset();
2201 __ ld(Rflags, in_bytes(cp_base_offset) + in_bytes(ConstantPoolCacheEntry::flags_offset()), Rcache);
2202 __ ld(Roffset, in_bytes(cp_base_offset) + in_bytes(ConstantPoolCacheEntry::f2_offset()), Rcache);
2203 if (is_static) {
2204 __ ld(Robj, in_bytes(cp_base_offset) + in_bytes(ConstantPoolCacheEntry::f1_offset()), Rcache);
2205 __ ld(Robj, in_bytes(Klass::java_mirror_offset()), Robj);
2206 // Acquire not needed here. Following access has an address dependency on this value.
2207 }
2208 }
2209
2210 // Load the constant pool cache entry at invokes into registers.
2211 // Resolve if necessary.
2212
2213 // Input Registers:
2214 // - None, bcp is used, though
2215 //
2216 // Return registers:
2217 // - Rmethod (f1 field or f2 if invokevirtual)
2218 // - Ritable_index (f2 field)
2219 // - Rflags (flags field)
2220 //
2221 // Kills:
2222 // - R21
2223 //
2224 void TemplateTable::load_invoke_cp_cache_entry(int byte_no,
2225 Register Rmethod,
2226 Register Ritable_index,
2227 Register Rflags,
2228 bool is_invokevirtual,
2229 bool is_invokevfinal,
2230 bool is_invokedynamic) {
2231
2232 ByteSize cp_base_offset = ConstantPoolCache::base_offset();
2233 // Determine constant pool cache field offsets.
2234 assert(is_invokevirtual == (byte_no == f2_byte), "is_invokevirtual flag redundant");
2235 const int method_offset = in_bytes(cp_base_offset + (is_invokevirtual ? ConstantPoolCacheEntry::f2_offset() : ConstantPoolCacheEntry::f1_offset()));
2236 const int flags_offset = in_bytes(cp_base_offset + ConstantPoolCacheEntry::flags_offset());
2237 // Access constant pool cache fields.
2238 const int index_offset = in_bytes(cp_base_offset + ConstantPoolCacheEntry::f2_offset());
2239
2240 Register Rcache = R21_tmp1; // Note: same register as R21_sender_SP.
2241
2242 if (is_invokevfinal) {
2243 assert(Ritable_index == noreg, "register not used");
2244 // Already resolved.
2245 __ get_cache_and_index_at_bcp(Rcache, 1);
2246 } else {
2247 resolve_cache_and_index(byte_no, Rcache, R0, is_invokedynamic ? sizeof(u4) : sizeof(u2));
2248 }
2249
2250 __ ld(Rmethod, method_offset, Rcache);
2251 __ ld(Rflags, flags_offset, Rcache);
2252
2253 if (Ritable_index != noreg) {
2254 __ ld(Ritable_index, index_offset, Rcache);
2255 }
2256 }
2257
2258 // ============================================================================
2259 // Field access
2260
2261 // Volatile variables demand their effects be made known to all CPU's
2262 // in order. Store buffers on most chips allow reads & writes to
2263 // reorder; the JMM's ReadAfterWrite.java test fails in -Xint mode
2264 // without some kind of memory barrier (i.e., it's not sufficient that
2265 // the interpreter does not reorder volatile references, the hardware
2266 // also must not reorder them).
2267 //
2268 // According to the new Java Memory Model (JMM):
2269 // (1) All volatiles are serialized wrt to each other. ALSO reads &
2270 // writes act as aquire & release, so:
2271 // (2) A read cannot let unrelated NON-volatile memory refs that
2272 // happen after the read float up to before the read. It's OK for
2273 // non-volatile memory refs that happen before the volatile read to
2274 // float down below it.
2275 // (3) Similar a volatile write cannot let unrelated NON-volatile
2276 // memory refs that happen BEFORE the write float down to after the
2277 // write. It's OK for non-volatile memory refs that happen after the
2278 // volatile write to float up before it.
2279 //
2280 // We only put in barriers around volatile refs (they are expensive),
2281 // not _between_ memory refs (that would require us to track the
2282 // flavor of the previous memory refs). Requirements (2) and (3)
2283 // require some barriers before volatile stores and after volatile
2284 // loads. These nearly cover requirement (1) but miss the
2285 // volatile-store-volatile-load case. This final case is placed after
2286 // volatile-stores although it could just as well go before
2287 // volatile-loads.
2288
2289 // The registers cache and index expected to be set before call.
2290 // Correct values of the cache and index registers are preserved.
2291 // Kills:
2292 // Rcache (if has_tos)
2293 // Rscratch
2294 void TemplateTable::jvmti_post_field_access(Register Rcache, Register Rscratch, bool is_static, bool has_tos) {
2295
2296 assert_different_registers(Rcache, Rscratch);
2297
2298 if (JvmtiExport::can_post_field_access()) {
2299 ByteSize cp_base_offset = ConstantPoolCache::base_offset();
2300 Label Lno_field_access_post;
2301
2302 // Check if post field access in enabled.
2303 int offs = __ load_const_optimized(Rscratch, JvmtiExport::get_field_access_count_addr(), R0, true);
2304 __ lwz(Rscratch, offs, Rscratch);
2305
2306 __ cmpwi(CCR0, Rscratch, 0);
2307 __ beq(CCR0, Lno_field_access_post);
2308
2309 // Post access enabled - do it!
2310 __ addi(Rcache, Rcache, in_bytes(cp_base_offset));
2311 if (is_static) {
2312 __ li(R17_tos, 0);
2313 } else {
2314 if (has_tos) {
2315 // The fast bytecode versions have obj ptr in register.
2316 // Thus, save object pointer before call_VM() clobbers it
2317 // put object on tos where GC wants it.
2318 __ push_ptr(R17_tos);
2319 } else {
2320 // Load top of stack (do not pop the value off the stack).
2321 __ ld(R17_tos, Interpreter::expr_offset_in_bytes(0), R15_esp);
2322 }
2323 __ verify_oop(R17_tos);
2324 }
2325 // tos: object pointer or NULL if static
2326 // cache: cache entry pointer
2327 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access), R17_tos, Rcache);
2328 if (!is_static && has_tos) {
2329 // Restore object pointer.
2330 __ pop_ptr(R17_tos);
2331 __ verify_oop(R17_tos);
2332 } else {
2333 // Cache is still needed to get class or obj.
2334 __ get_cache_and_index_at_bcp(Rcache, 1);
2335 }
2336
2337 __ align(32, 12);
2338 __ bind(Lno_field_access_post);
2339 }
2340 }
2341
2342 // kills R11_scratch1
2343 void TemplateTable::pop_and_check_object(Register Roop) {
2344 Register Rtmp = R11_scratch1;
2345
2346 assert_different_registers(Rtmp, Roop);
2347 __ pop_ptr(Roop);
2348 // For field access must check obj.
2349 __ null_check_throw(Roop, -1, Rtmp);
2350 __ verify_oop(Roop);
2351 }
2352
2353 // PPC64: implement volatile loads as fence-store-acquire.
2354 void TemplateTable::getfield_or_static(int byte_no, bool is_static) {
2355 transition(vtos, vtos);
2356
2357 Label Lacquire, Lisync;
2358
2359 const Register Rcache = R3_ARG1,
2360 Rclass_or_obj = R22_tmp2,
2361 Roffset = R23_tmp3,
2362 Rflags = R31,
2363 Rbtable = R5_ARG3,
2364 Rbc = R6_ARG4,
2365 Rscratch = R12_scratch2;
2366
2367 static address field_branch_table[number_of_states],
2368 static_branch_table[number_of_states];
2369
2370 address* branch_table = is_static ? static_branch_table : field_branch_table;
2371
2372 // Get field offset.
2373 resolve_cache_and_index(byte_no, Rcache, Rscratch, sizeof(u2));
2374
2375 // JVMTI support
2376 jvmti_post_field_access(Rcache, Rscratch, is_static, false);
2377
2378 // Load after possible GC.
2379 load_field_cp_cache_entry(Rclass_or_obj, Rcache, noreg, Roffset, Rflags, is_static);
2380
2381 // Load pointer to branch table.
2382 __ load_const_optimized(Rbtable, (address)branch_table, Rscratch);
2383
2384 // Get volatile flag.
2385 __ rldicl(Rscratch, Rflags, 64-ConstantPoolCacheEntry::is_volatile_shift, 63); // Extract volatile bit.
2386 // Note: sync is needed before volatile load on PPC64.
2387
2388 // Check field type.
2389 __ rldicl(Rflags, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits);
2390
2391 #ifdef ASSERT
2392 Label LFlagInvalid;
2393 __ cmpldi(CCR0, Rflags, number_of_states);
2394 __ bge(CCR0, LFlagInvalid);
2395 #endif
2396
2397 // Load from branch table and dispatch (volatile case: one instruction ahead).
2398 __ sldi(Rflags, Rflags, LogBytesPerWord);
2399 __ cmpwi(CCR6, Rscratch, 1); // Volatile?
2400 if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
2401 __ sldi(Rscratch, Rscratch, exact_log2(BytesPerInstWord)); // Volatile ? size of 1 instruction : 0.
2402 }
2403 __ ldx(Rbtable, Rbtable, Rflags);
2404
2405 // Get the obj from stack.
2406 if (!is_static) {
2407 pop_and_check_object(Rclass_or_obj); // Kills R11_scratch1.
2408 } else {
2409 __ verify_oop(Rclass_or_obj);
2410 }
2411
2412 if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
2413 __ subf(Rbtable, Rscratch, Rbtable); // Point to volatile/non-volatile entry point.
2414 }
2415 __ mtctr(Rbtable);
2416 __ bctr();
2417
2418 #ifdef ASSERT
2419 __ bind(LFlagInvalid);
2420 __ stop("got invalid flag", 0x654);
2421
2422 // __ bind(Lvtos);
2423 address pc_before_fence = __ pc();
2424 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point).
2425 assert(__ pc() - pc_before_fence == (ptrdiff_t)BytesPerInstWord, "must be single instruction");
2426 assert(branch_table[vtos] == 0, "can't compute twice");
2427 branch_table[vtos] = __ pc(); // non-volatile_entry point
2428 __ stop("vtos unexpected", 0x655);
2429 #endif
2430
2431 __ align(32, 28, 28); // Align load.
2432 // __ bind(Ldtos);
2433 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point).
2434 assert(branch_table[dtos] == 0, "can't compute twice");
2435 branch_table[dtos] = __ pc(); // non-volatile_entry point
2436 __ lfdx(F15_ftos, Rclass_or_obj, Roffset);
2437 __ push(dtos);
2438 if (!is_static) patch_bytecode(Bytecodes::_fast_dgetfield, Rbc, Rscratch);
2439 {
2440 Label acquire_double;
2441 __ beq(CCR6, acquire_double); // Volatile?
2442 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2443
2444 __ bind(acquire_double);
2445 __ fcmpu(CCR0, F15_ftos, F15_ftos); // Acquire by cmp-br-isync.
2446 __ beq_predict_taken(CCR0, Lisync);
2447 __ b(Lisync); // In case of NAN.
2448 }
2449
2450 __ align(32, 28, 28); // Align load.
2451 // __ bind(Lftos);
2452 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point).
2453 assert(branch_table[ftos] == 0, "can't compute twice");
2454 branch_table[ftos] = __ pc(); // non-volatile_entry point
2455 __ lfsx(F15_ftos, Rclass_or_obj, Roffset);
2456 __ push(ftos);
2457 if (!is_static) { patch_bytecode(Bytecodes::_fast_fgetfield, Rbc, Rscratch); }
2458 {
2459 Label acquire_float;
2460 __ beq(CCR6, acquire_float); // Volatile?
2461 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2462
2463 __ bind(acquire_float);
2464 __ fcmpu(CCR0, F15_ftos, F15_ftos); // Acquire by cmp-br-isync.
2465 __ beq_predict_taken(CCR0, Lisync);
2466 __ b(Lisync); // In case of NAN.
2467 }
2468
2469 __ align(32, 28, 28); // Align load.
2470 // __ bind(Litos);
2471 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point).
2472 assert(branch_table[itos] == 0, "can't compute twice");
2473 branch_table[itos] = __ pc(); // non-volatile_entry point
2474 __ lwax(R17_tos, Rclass_or_obj, Roffset);
2475 __ push(itos);
2476 if (!is_static) patch_bytecode(Bytecodes::_fast_igetfield, Rbc, Rscratch);
2477 __ beq(CCR6, Lacquire); // Volatile?
2478 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2479
2480 __ align(32, 28, 28); // Align load.
2481 // __ bind(Lltos);
2482 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point).
2483 assert(branch_table[ltos] == 0, "can't compute twice");
2484 branch_table[ltos] = __ pc(); // non-volatile_entry point
2485 __ ldx(R17_tos, Rclass_or_obj, Roffset);
2486 __ push(ltos);
2487 if (!is_static) patch_bytecode(Bytecodes::_fast_lgetfield, Rbc, Rscratch);
2488 __ beq(CCR6, Lacquire); // Volatile?
2489 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2490
2491 __ align(32, 28, 28); // Align load.
2492 // __ bind(Lbtos);
2493 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point).
2494 assert(branch_table[btos] == 0, "can't compute twice");
2495 branch_table[btos] = __ pc(); // non-volatile_entry point
2496 __ lbzx(R17_tos, Rclass_or_obj, Roffset);
2497 __ extsb(R17_tos, R17_tos);
2498 __ push(btos);
2499 if (!is_static) patch_bytecode(Bytecodes::_fast_bgetfield, Rbc, Rscratch);
2500 __ beq(CCR6, Lacquire); // Volatile?
2501 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2502
2503 __ align(32, 28, 28); // Align load.
2504 // __ bind(Lctos);
2505 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point).
2506 assert(branch_table[ctos] == 0, "can't compute twice");
2507 branch_table[ctos] = __ pc(); // non-volatile_entry point
2508 __ lhzx(R17_tos, Rclass_or_obj, Roffset);
2509 __ push(ctos);
2510 if (!is_static) patch_bytecode(Bytecodes::_fast_cgetfield, Rbc, Rscratch);
2511 __ beq(CCR6, Lacquire); // Volatile?
2512 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2513
2514 __ align(32, 28, 28); // Align load.
2515 // __ bind(Lstos);
2516 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point).
2517 assert(branch_table[stos] == 0, "can't compute twice");
2518 branch_table[stos] = __ pc(); // non-volatile_entry point
2519 __ lhax(R17_tos, Rclass_or_obj, Roffset);
2520 __ push(stos);
2521 if (!is_static) patch_bytecode(Bytecodes::_fast_sgetfield, Rbc, Rscratch);
2522 __ beq(CCR6, Lacquire); // Volatile?
2523 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2524
2525 __ align(32, 28, 28); // Align load.
2526 // __ bind(Latos);
2527 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point).
2528 assert(branch_table[atos] == 0, "can't compute twice");
2529 branch_table[atos] = __ pc(); // non-volatile_entry point
2530 __ load_heap_oop(R17_tos, (RegisterOrConstant)Roffset, Rclass_or_obj);
2531 __ verify_oop(R17_tos);
2532 __ push(atos);
2533 //__ dcbt(R17_tos); // prefetch
2534 if (!is_static) patch_bytecode(Bytecodes::_fast_agetfield, Rbc, Rscratch);
2535 __ beq(CCR6, Lacquire); // Volatile?
2536 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2537
2538 __ align(32, 12);
2539 __ bind(Lacquire);
2540 __ twi_0(R17_tos);
2541 __ bind(Lisync);
2542 __ isync(); // acquire
2543
2544 #ifdef ASSERT
2545 for (int i = 0; i<number_of_states; ++i) {
2546 assert(branch_table[i], "get initialization");
2547 //tty->print_cr("get: %s_branch_table[%d] = 0x%llx (opcode 0x%llx)",
2548 // is_static ? "static" : "field", i, branch_table[i], *((unsigned int*)branch_table[i]));
2549 }
2550 #endif
2551 }
2552
2553 void TemplateTable::getfield(int byte_no) {
2554 getfield_or_static(byte_no, false);
2555 }
2556
2557 void TemplateTable::getstatic(int byte_no) {
2558 getfield_or_static(byte_no, true);
2559 }
2560
2561 // The registers cache and index expected to be set before call.
2562 // The function may destroy various registers, just not the cache and index registers.
2563 void TemplateTable::jvmti_post_field_mod(Register Rcache, Register Rscratch, bool is_static) {
2564
2565 assert_different_registers(Rcache, Rscratch, R6_ARG4);
2566
2567 if (JvmtiExport::can_post_field_modification()) {
2568 Label Lno_field_mod_post;
2569
2570 // Check if post field access in enabled.
2571 int offs = __ load_const_optimized(Rscratch, JvmtiExport::get_field_modification_count_addr(), R0, true);
2572 __ lwz(Rscratch, offs, Rscratch);
2573
2574 __ cmpwi(CCR0, Rscratch, 0);
2575 __ beq(CCR0, Lno_field_mod_post);
2576
2577 // Do the post
2578 ByteSize cp_base_offset = ConstantPoolCache::base_offset();
2579 const Register Robj = Rscratch;
2580
2581 __ addi(Rcache, Rcache, in_bytes(cp_base_offset));
2582 if (is_static) {
2583 // Life is simple. Null out the object pointer.
2584 __ li(Robj, 0);
2585 } else {
2586 // In case of the fast versions, value lives in registers => put it back on tos.
2587 int offs = Interpreter::expr_offset_in_bytes(0);
2588 Register base = R15_esp;
2589 switch(bytecode()) {
2590 case Bytecodes::_fast_aputfield: __ push_ptr(); offs+= Interpreter::stackElementSize; break;
2591 case Bytecodes::_fast_iputfield: // Fall through
2592 case Bytecodes::_fast_bputfield: // Fall through
2593 case Bytecodes::_fast_cputfield: // Fall through
2594 case Bytecodes::_fast_sputfield: __ push_i(); offs+= Interpreter::stackElementSize; break;
2595 case Bytecodes::_fast_lputfield: __ push_l(); offs+=2*Interpreter::stackElementSize; break;
2596 case Bytecodes::_fast_fputfield: __ push_f(); offs+= Interpreter::stackElementSize; break;
2597 case Bytecodes::_fast_dputfield: __ push_d(); offs+=2*Interpreter::stackElementSize; break;
2598 default: {
2599 offs = 0;
2600 base = Robj;
2601 const Register Rflags = Robj;
2602 Label is_one_slot;
2603 // Life is harder. The stack holds the value on top, followed by the
2604 // object. We don't know the size of the value, though; it could be
2605 // one or two words depending on its type. As a result, we must find
2606 // the type to determine where the object is.
2607 __ ld(Rflags, in_bytes(ConstantPoolCacheEntry::flags_offset()), Rcache); // Big Endian
2608 __ rldicl(Rflags, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits);
2609
2610 __ cmpwi(CCR0, Rflags, ltos);
2611 __ cmpwi(CCR1, Rflags, dtos);
2612 __ addi(base, R15_esp, Interpreter::expr_offset_in_bytes(1));
2613 __ crnor(CCR0, Assembler::equal, CCR1, Assembler::equal);
2614 __ beq(CCR0, is_one_slot);
2615 __ addi(base, R15_esp, Interpreter::expr_offset_in_bytes(2));
2616 __ bind(is_one_slot);
2617 break;
2618 }
2619 }
2620 __ ld(Robj, offs, base);
2621 __ verify_oop(Robj);
2622 }
2623
2624 __ addi(R6_ARG4, R15_esp, Interpreter::expr_offset_in_bytes(0));
2625 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), Robj, Rcache, R6_ARG4);
2626 __ get_cache_and_index_at_bcp(Rcache, 1);
2627
2628 // In case of the fast versions, value lives in registers => put it back on tos.
2629 switch(bytecode()) {
2630 case Bytecodes::_fast_aputfield: __ pop_ptr(); break;
2631 case Bytecodes::_fast_iputfield: // Fall through
2632 case Bytecodes::_fast_bputfield: // Fall through
2633 case Bytecodes::_fast_cputfield: // Fall through
2634 case Bytecodes::_fast_sputfield: __ pop_i(); break;
2635 case Bytecodes::_fast_lputfield: __ pop_l(); break;
2636 case Bytecodes::_fast_fputfield: __ pop_f(); break;
2637 case Bytecodes::_fast_dputfield: __ pop_d(); break;
2638 default: break; // Nothin' to do.
2639 }
2640
2641 __ align(32, 12);
2642 __ bind(Lno_field_mod_post);
2643 }
2644 }
2645
2646 // PPC64: implement volatile stores as release-store (return bytecode contains an additional release).
2647 void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
2648 Label Lvolatile;
2649
2650 const Register Rcache = R5_ARG3, // Do not use ARG1/2 (causes trouble in jvmti_post_field_mod).
2651 Rclass_or_obj = R31, // Needs to survive C call.
2652 Roffset = R22_tmp2, // Needs to survive C call.
2653 Rflags = R3_ARG1,
2654 Rbtable = R4_ARG2,
2655 Rscratch = R11_scratch1,
2656 Rscratch2 = R12_scratch2,
2657 Rscratch3 = R6_ARG4,
2658 Rbc = Rscratch3;
2659 const ConditionRegister CR_is_vol = CCR2; // Non-volatile condition register (survives runtime call in do_oop_store).
2660
2661 static address field_branch_table[number_of_states],
2662 static_branch_table[number_of_states];
2663
2664 address* branch_table = is_static ? static_branch_table : field_branch_table;
2665
2666 // Stack (grows up):
2667 // value
2668 // obj
2669
2670 // Load the field offset.
2671 resolve_cache_and_index(byte_no, Rcache, Rscratch, sizeof(u2));
2672 jvmti_post_field_mod(Rcache, Rscratch, is_static);
2673 load_field_cp_cache_entry(Rclass_or_obj, Rcache, noreg, Roffset, Rflags, is_static);
2674
2675 // Load pointer to branch table.
2676 __ load_const_optimized(Rbtable, (address)branch_table, Rscratch);
2677
2678 // Get volatile flag.
2679 __ rldicl(Rscratch, Rflags, 64-ConstantPoolCacheEntry::is_volatile_shift, 63); // Extract volatile bit.
2680
2681 // Check the field type.
2682 __ rldicl(Rflags, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits);
2683
2684 #ifdef ASSERT
2685 Label LFlagInvalid;
2686 __ cmpldi(CCR0, Rflags, number_of_states);
2687 __ bge(CCR0, LFlagInvalid);
2688 #endif
2689
2690 // Load from branch table and dispatch (volatile case: one instruction ahead).
2691 __ sldi(Rflags, Rflags, LogBytesPerWord);
2692 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { __ cmpwi(CR_is_vol, Rscratch, 1); } // Volatile?
2693 __ sldi(Rscratch, Rscratch, exact_log2(BytesPerInstWord)); // Volatile? size of instruction 1 : 0.
2694 __ ldx(Rbtable, Rbtable, Rflags);
2695
2696 __ subf(Rbtable, Rscratch, Rbtable); // Point to volatile/non-volatile entry point.
2697 __ mtctr(Rbtable);
2698 __ bctr();
2699
2700 #ifdef ASSERT
2701 __ bind(LFlagInvalid);
2702 __ stop("got invalid flag", 0x656);
2703
2704 // __ bind(Lvtos);
2705 address pc_before_release = __ pc();
2706 __ release(); // Volatile entry point (one instruction before non-volatile_entry point).
2707 assert(__ pc() - pc_before_release == (ptrdiff_t)BytesPerInstWord, "must be single instruction");
2708 assert(branch_table[vtos] == 0, "can't compute twice");
2709 branch_table[vtos] = __ pc(); // non-volatile_entry point
2710 __ stop("vtos unexpected", 0x657);
2711 #endif
2712
2713 __ align(32, 28, 28); // Align pop.
2714 // __ bind(Ldtos);
2715 __ release(); // Volatile entry point (one instruction before non-volatile_entry point).
2716 assert(branch_table[dtos] == 0, "can't compute twice");
2717 branch_table[dtos] = __ pc(); // non-volatile_entry point
2718 __ pop(dtos);
2719 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1.
2720 __ stfdx(F15_ftos, Rclass_or_obj, Roffset);
2721 if (!is_static) { patch_bytecode(Bytecodes::_fast_dputfield, Rbc, Rscratch, true, byte_no); }
2722 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
2723 __ beq(CR_is_vol, Lvolatile); // Volatile?
2724 }
2725 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2726
2727 __ align(32, 28, 28); // Align pop.
2728 // __ bind(Lftos);
2729 __ release(); // Volatile entry point (one instruction before non-volatile_entry point).
2730 assert(branch_table[ftos] == 0, "can't compute twice");
2731 branch_table[ftos] = __ pc(); // non-volatile_entry point
2732 __ pop(ftos);
2733 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1.
2734 __ stfsx(F15_ftos, Rclass_or_obj, Roffset);
2735 if (!is_static) { patch_bytecode(Bytecodes::_fast_fputfield, Rbc, Rscratch, true, byte_no); }
2736 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
2737 __ beq(CR_is_vol, Lvolatile); // Volatile?
2738 }
2739 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2740
2741 __ align(32, 28, 28); // Align pop.
2742 // __ bind(Litos);
2743 __ release(); // Volatile entry point (one instruction before non-volatile_entry point).
2744 assert(branch_table[itos] == 0, "can't compute twice");
2745 branch_table[itos] = __ pc(); // non-volatile_entry point
2746 __ pop(itos);
2747 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1.
2748 __ stwx(R17_tos, Rclass_or_obj, Roffset);
2749 if (!is_static) { patch_bytecode(Bytecodes::_fast_iputfield, Rbc, Rscratch, true, byte_no); }
2750 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
2751 __ beq(CR_is_vol, Lvolatile); // Volatile?
2752 }
2753 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2754
2755 __ align(32, 28, 28); // Align pop.
2756 // __ bind(Lltos);
2757 __ release(); // Volatile entry point (one instruction before non-volatile_entry point).
2758 assert(branch_table[ltos] == 0, "can't compute twice");
2759 branch_table[ltos] = __ pc(); // non-volatile_entry point
2760 __ pop(ltos);
2761 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1.
2762 __ stdx(R17_tos, Rclass_or_obj, Roffset);
2763 if (!is_static) { patch_bytecode(Bytecodes::_fast_lputfield, Rbc, Rscratch, true, byte_no); }
2764 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
2765 __ beq(CR_is_vol, Lvolatile); // Volatile?
2766 }
2767 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2768
2769 __ align(32, 28, 28); // Align pop.
2770 // __ bind(Lbtos);
2771 __ release(); // Volatile entry point (one instruction before non-volatile_entry point).
2772 assert(branch_table[btos] == 0, "can't compute twice");
2773 branch_table[btos] = __ pc(); // non-volatile_entry point
2774 __ pop(btos);
2775 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1.
2776 __ stbx(R17_tos, Rclass_or_obj, Roffset);
2777 if (!is_static) { patch_bytecode(Bytecodes::_fast_bputfield, Rbc, Rscratch, true, byte_no); }
2778 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
2779 __ beq(CR_is_vol, Lvolatile); // Volatile?
2780 }
2781 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2782
2783 __ align(32, 28, 28); // Align pop.
2784 // __ bind(Lctos);
2785 __ release(); // Volatile entry point (one instruction before non-volatile_entry point).
2786 assert(branch_table[ctos] == 0, "can't compute twice");
2787 branch_table[ctos] = __ pc(); // non-volatile_entry point
2788 __ pop(ctos);
2789 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1..
2790 __ sthx(R17_tos, Rclass_or_obj, Roffset);
2791 if (!is_static) { patch_bytecode(Bytecodes::_fast_cputfield, Rbc, Rscratch, true, byte_no); }
2792 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
2793 __ beq(CR_is_vol, Lvolatile); // Volatile?
2794 }
2795 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2796
2797 __ align(32, 28, 28); // Align pop.
2798 // __ bind(Lstos);
2799 __ release(); // Volatile entry point (one instruction before non-volatile_entry point).
2800 assert(branch_table[stos] == 0, "can't compute twice");
2801 branch_table[stos] = __ pc(); // non-volatile_entry point
2802 __ pop(stos);
2803 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1.
2804 __ sthx(R17_tos, Rclass_or_obj, Roffset);
2805 if (!is_static) { patch_bytecode(Bytecodes::_fast_sputfield, Rbc, Rscratch, true, byte_no); }
2806 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
2807 __ beq(CR_is_vol, Lvolatile); // Volatile?
2808 }
2809 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2810
2811 __ align(32, 28, 28); // Align pop.
2812 // __ bind(Latos);
2813 __ release(); // Volatile entry point (one instruction before non-volatile_entry point).
2814 assert(branch_table[atos] == 0, "can't compute twice");
2815 branch_table[atos] = __ pc(); // non-volatile_entry point
2816 __ pop(atos);
2817 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // kills R11_scratch1
2818 do_oop_store(_masm, Rclass_or_obj, Roffset, R17_tos, Rscratch, Rscratch2, Rscratch3, _bs->kind(), false /* precise */, true /* check null */);
2819 if (!is_static) { patch_bytecode(Bytecodes::_fast_aputfield, Rbc, Rscratch, true, byte_no); }
2820 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
2821 __ beq(CR_is_vol, Lvolatile); // Volatile?
2822 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2823
2824 __ align(32, 12);
2825 __ bind(Lvolatile);
2826 __ fence();
2827 }
2828 // fallthru: __ b(Lexit);
2829
2830 #ifdef ASSERT
2831 for (int i = 0; i<number_of_states; ++i) {
2832 assert(branch_table[i], "put initialization");
2833 //tty->print_cr("put: %s_branch_table[%d] = 0x%llx (opcode 0x%llx)",
2834 // is_static ? "static" : "field", i, branch_table[i], *((unsigned int*)branch_table[i]));
2835 }
2836 #endif
2837 }
2838
2839 void TemplateTable::putfield(int byte_no) {
2840 putfield_or_static(byte_no, false);
2841 }
2842
2843 void TemplateTable::putstatic(int byte_no) {
2844 putfield_or_static(byte_no, true);
2845 }
2846
2847 // See SPARC. On PPC64, we have a different jvmti_post_field_mod which does the job.
2848 void TemplateTable::jvmti_post_fast_field_mod() {
2849 __ should_not_reach_here();
2850 }
2851
2852 void TemplateTable::fast_storefield(TosState state) {
2853 transition(state, vtos);
2854
2855 const Register Rcache = R5_ARG3, // Do not use ARG1/2 (causes trouble in jvmti_post_field_mod).
2856 Rclass_or_obj = R31, // Needs to survive C call.
2857 Roffset = R22_tmp2, // Needs to survive C call.
2858 Rflags = R3_ARG1,
2859 Rscratch = R11_scratch1,
2860 Rscratch2 = R12_scratch2,
2861 Rscratch3 = R4_ARG2;
2862 const ConditionRegister CR_is_vol = CCR2; // Non-volatile condition register (survives runtime call in do_oop_store).
2863
2864 // Constant pool already resolved => Load flags and offset of field.
2865 __ get_cache_and_index_at_bcp(Rcache, 1);
2866 jvmti_post_field_mod(Rcache, Rscratch, false /* not static */);
2867 load_field_cp_cache_entry(noreg, Rcache, noreg, Roffset, Rflags, false);
2868
2869 // Get the obj and the final store addr.
2870 pop_and_check_object(Rclass_or_obj); // Kills R11_scratch1.
2871
2872 // Get volatile flag.
2873 __ rldicl_(Rscratch, Rflags, 64-ConstantPoolCacheEntry::is_volatile_shift, 63); // Extract volatile bit.
2874 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { __ cmpdi(CR_is_vol, Rscratch, 1); }
2875 {
2876 Label LnotVolatile;
2877 __ beq(CCR0, LnotVolatile);
2878 __ release();
2879 __ align(32, 12);
2880 __ bind(LnotVolatile);
2881 }
2882
2883 // Do the store and fencing.
2884 switch(bytecode()) {
2885 case Bytecodes::_fast_aputfield:
2886 // Store into the field.
2887 do_oop_store(_masm, Rclass_or_obj, Roffset, R17_tos, Rscratch, Rscratch2, Rscratch3, _bs->kind(), false /* precise */, true /* check null */);
2888 break;
2889
2890 case Bytecodes::_fast_iputfield:
2891 __ stwx(R17_tos, Rclass_or_obj, Roffset);
2892 break;
2893
2894 case Bytecodes::_fast_lputfield:
2895 __ stdx(R17_tos, Rclass_or_obj, Roffset);
2896 break;
2897
2898 case Bytecodes::_fast_bputfield:
2899 __ stbx(R17_tos, Rclass_or_obj, Roffset);
2900 break;
2901
2902 case Bytecodes::_fast_cputfield:
2903 case Bytecodes::_fast_sputfield:
2904 __ sthx(R17_tos, Rclass_or_obj, Roffset);
2905 break;
2906
2907 case Bytecodes::_fast_fputfield:
2908 __ stfsx(F15_ftos, Rclass_or_obj, Roffset);
2909 break;
2910
2911 case Bytecodes::_fast_dputfield:
2912 __ stfdx(F15_ftos, Rclass_or_obj, Roffset);
2913 break;
2914
2915 default: ShouldNotReachHere();
2916 }
2917
2918 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
2919 Label LVolatile;
2920 __ beq(CR_is_vol, LVolatile);
2921 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2922
2923 __ align(32, 12);
2924 __ bind(LVolatile);
2925 __ fence();
2926 }
2927 }
2928
2929 void TemplateTable::fast_accessfield(TosState state) {
2930 transition(atos, state);
2931
2932 Label LisVolatile;
2933 ByteSize cp_base_offset = ConstantPoolCache::base_offset();
2934
2935 const Register Rcache = R3_ARG1,
2936 Rclass_or_obj = R17_tos,
2937 Roffset = R22_tmp2,
2938 Rflags = R23_tmp3,
2939 Rscratch = R12_scratch2;
2940
2941 // Constant pool already resolved. Get the field offset.
2942 __ get_cache_and_index_at_bcp(Rcache, 1);
2943 load_field_cp_cache_entry(noreg, Rcache, noreg, Roffset, Rflags, false);
2944
2945 // JVMTI support
2946 jvmti_post_field_access(Rcache, Rscratch, false, true);
2947
2948 // Get the load address.
2949 __ null_check_throw(Rclass_or_obj, -1, Rscratch);
2950
2951 // Get volatile flag.
2952 __ rldicl_(Rscratch, Rflags, 64-ConstantPoolCacheEntry::is_volatile_shift, 63); // Extract volatile bit.
2953 __ bne(CCR0, LisVolatile);
2954
2955 switch(bytecode()) {
2956 case Bytecodes::_fast_agetfield:
2957 {
2958 __ load_heap_oop(R17_tos, (RegisterOrConstant)Roffset, Rclass_or_obj);
2959 __ verify_oop(R17_tos);
2960 __ dispatch_epilog(state, Bytecodes::length_for(bytecode()));
2961
2962 __ bind(LisVolatile);
2963 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); }
2964 __ load_heap_oop(R17_tos, (RegisterOrConstant)Roffset, Rclass_or_obj);
2965 __ verify_oop(R17_tos);
2966 __ twi_0(R17_tos);
2967 __ isync();
2968 break;
2969 }
2970 case Bytecodes::_fast_igetfield:
2971 {
2972 __ lwax(R17_tos, Rclass_or_obj, Roffset);
2973 __ dispatch_epilog(state, Bytecodes::length_for(bytecode()));
2974
2975 __ bind(LisVolatile);
2976 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); }
2977 __ lwax(R17_tos, Rclass_or_obj, Roffset);
2978 __ twi_0(R17_tos);
2979 __ isync();
2980 break;
2981 }
2982 case Bytecodes::_fast_lgetfield:
2983 {
2984 __ ldx(R17_tos, Rclass_or_obj, Roffset);
2985 __ dispatch_epilog(state, Bytecodes::length_for(bytecode()));
2986
2987 __ bind(LisVolatile);
2988 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); }
2989 __ ldx(R17_tos, Rclass_or_obj, Roffset);
2990 __ twi_0(R17_tos);
2991 __ isync();
2992 break;
2993 }
2994 case Bytecodes::_fast_bgetfield:
2995 {
2996 __ lbzx(R17_tos, Rclass_or_obj, Roffset);
2997 __ extsb(R17_tos, R17_tos);
2998 __ dispatch_epilog(state, Bytecodes::length_for(bytecode()));
2999
3000 __ bind(LisVolatile);
3001 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); }
3002 __ lbzx(R17_tos, Rclass_or_obj, Roffset);
3003 __ twi_0(R17_tos);
3004 __ extsb(R17_tos, R17_tos);
3005 __ isync();
3006 break;
3007 }
3008 case Bytecodes::_fast_cgetfield:
3009 {
3010 __ lhzx(R17_tos, Rclass_or_obj, Roffset);
3011 __ dispatch_epilog(state, Bytecodes::length_for(bytecode()));
3012
3013 __ bind(LisVolatile);
3014 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); }
3015 __ lhzx(R17_tos, Rclass_or_obj, Roffset);
3016 __ twi_0(R17_tos);
3017 __ isync();
3018 break;
3019 }
3020 case Bytecodes::_fast_sgetfield:
3021 {
3022 __ lhax(R17_tos, Rclass_or_obj, Roffset);
3023 __ dispatch_epilog(state, Bytecodes::length_for(bytecode()));
3024
3025 __ bind(LisVolatile);
3026 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); }
3027 __ lhax(R17_tos, Rclass_or_obj, Roffset);
3028 __ twi_0(R17_tos);
3029 __ isync();
3030 break;
3031 }
3032 case Bytecodes::_fast_fgetfield:
3033 {
3034 __ lfsx(F15_ftos, Rclass_or_obj, Roffset);
3035 __ dispatch_epilog(state, Bytecodes::length_for(bytecode()));
3036
3037 __ bind(LisVolatile);
3038 Label Ldummy;
3039 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); }
3040 __ lfsx(F15_ftos, Rclass_or_obj, Roffset);
3041 __ fcmpu(CCR0, F15_ftos, F15_ftos); // Acquire by cmp-br-isync.
3042 __ bne_predict_not_taken(CCR0, Ldummy);
3043 __ bind(Ldummy);
3044 __ isync();
3045 break;
3046 }
3047 case Bytecodes::_fast_dgetfield:
3048 {
3049 __ lfdx(F15_ftos, Rclass_or_obj, Roffset);
3050 __ dispatch_epilog(state, Bytecodes::length_for(bytecode()));
3051
3052 __ bind(LisVolatile);
3053 Label Ldummy;
3054 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); }
3055 __ lfdx(F15_ftos, Rclass_or_obj, Roffset);
3056 __ fcmpu(CCR0, F15_ftos, F15_ftos); // Acquire by cmp-br-isync.
3057 __ bne_predict_not_taken(CCR0, Ldummy);
3058 __ bind(Ldummy);
3059 __ isync();
3060 break;
3061 }
3062 default: ShouldNotReachHere();
3063 }
3064 }
3065
3066 void TemplateTable::fast_xaccess(TosState state) {
3067 transition(vtos, state);
3068
3069 Label LisVolatile;
3070 ByteSize cp_base_offset = ConstantPoolCache::base_offset();
3071 const Register Rcache = R3_ARG1,
3072 Rclass_or_obj = R17_tos,
3073 Roffset = R22_tmp2,
3074 Rflags = R23_tmp3,
3075 Rscratch = R12_scratch2;
3076
3077 __ ld(Rclass_or_obj, 0, R18_locals);
3078
3079 // Constant pool already resolved. Get the field offset.
3080 __ get_cache_and_index_at_bcp(Rcache, 2);
3081 load_field_cp_cache_entry(noreg, Rcache, noreg, Roffset, Rflags, false);
3082
3083 // JVMTI support not needed, since we switch back to single bytecode as soon as debugger attaches.
3084
3085 // Needed to report exception at the correct bcp.
3086 __ addi(R14_bcp, R14_bcp, 1);
3087
3088 // Get the load address.
3089 __ null_check_throw(Rclass_or_obj, -1, Rscratch);
3090
3091 // Get volatile flag.
3092 __ rldicl_(Rscratch, Rflags, 64-ConstantPoolCacheEntry::is_volatile_shift, 63); // Extract volatile bit.
3093 __ bne(CCR0, LisVolatile);
3094
3095 switch(state) {
3096 case atos:
3097 {
3098 __ load_heap_oop(R17_tos, (RegisterOrConstant)Roffset, Rclass_or_obj);
3099 __ verify_oop(R17_tos);
3100 __ dispatch_epilog(state, Bytecodes::length_for(bytecode()) - 1); // Undo bcp increment.
3101
3102 __ bind(LisVolatile);
3103 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); }
3104 __ load_heap_oop(R17_tos, (RegisterOrConstant)Roffset, Rclass_or_obj);
3105 __ verify_oop(R17_tos);
3106 __ twi_0(R17_tos);
3107 __ isync();
3108 break;
3109 }
3110 case itos:
3111 {
3112 __ lwax(R17_tos, Rclass_or_obj, Roffset);
3113 __ dispatch_epilog(state, Bytecodes::length_for(bytecode()) - 1); // Undo bcp increment.
3114
3115 __ bind(LisVolatile);
3116 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); }
3117 __ lwax(R17_tos, Rclass_or_obj, Roffset);
3118 __ twi_0(R17_tos);
3119 __ isync();
3120 break;
3121 }
3122 case ftos:
3123 {
3124 __ lfsx(F15_ftos, Rclass_or_obj, Roffset);
3125 __ dispatch_epilog(state, Bytecodes::length_for(bytecode()) - 1); // Undo bcp increment.
3126
3127 __ bind(LisVolatile);
3128 Label Ldummy;
3129 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); }
3130 __ lfsx(F15_ftos, Rclass_or_obj, Roffset);
3131 __ fcmpu(CCR0, F15_ftos, F15_ftos); // Acquire by cmp-br-isync.
3132 __ bne_predict_not_taken(CCR0, Ldummy);
3133 __ bind(Ldummy);
3134 __ isync();
3135 break;
3136 }
3137 default: ShouldNotReachHere();
3138 }
3139 __ addi(R14_bcp, R14_bcp, -1);
3140 }
3141
3142 // ============================================================================
3143 // Calls
3144
3145 // Common code for invoke
3146 //
3147 // Input:
3148 // - byte_no
3149 //
3150 // Output:
3151 // - Rmethod: The method to invoke next.
3152 // - Rret_addr: The return address to return to.
3153 // - Rindex: MethodType (invokehandle) or CallSite obj (invokedynamic)
3154 // - Rrecv: Cache for "this" pointer, might be noreg if static call.
3155 // - Rflags: Method flags from const pool cache.
3156 //
3157 // Kills:
3158 // - Rscratch1
3159 //
3160 void TemplateTable::prepare_invoke(int byte_no,
3161 Register Rmethod, // linked method (or i-klass)
3162 Register Rret_addr,// return address
3163 Register Rindex, // itable index, MethodType, etc.
3164 Register Rrecv, // If caller wants to see it.
3165 Register Rflags, // If caller wants to test it.
3166 Register Rscratch
3167 ) {
3168 // Determine flags.
3169 const Bytecodes::Code code = bytecode();
3170 const bool is_invokeinterface = code == Bytecodes::_invokeinterface;
3171 const bool is_invokedynamic = code == Bytecodes::_invokedynamic;
3172 const bool is_invokehandle = code == Bytecodes::_invokehandle;
3173 const bool is_invokevirtual = code == Bytecodes::_invokevirtual;
3174 const bool is_invokespecial = code == Bytecodes::_invokespecial;
3175 const bool load_receiver = (Rrecv != noreg);
3176 assert(load_receiver == (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic), "");
3177
3178 assert_different_registers(Rmethod, Rindex, Rflags, Rscratch);
3179 assert_different_registers(Rmethod, Rrecv, Rflags, Rscratch);
3180 assert_different_registers(Rret_addr, Rscratch);
3181
3182 load_invoke_cp_cache_entry(byte_no, Rmethod, Rindex, Rflags, is_invokevirtual, false, is_invokedynamic);
3183
3184 // Saving of SP done in call_from_interpreter.
3185
3186 // Maybe push "appendix" to arguments.
3187 if (is_invokedynamic || is_invokehandle) {
3188 Label Ldone;
3189 __ rldicl_(R0, Rflags, 64-ConstantPoolCacheEntry::has_appendix_shift, 63);
3190 __ beq(CCR0, Ldone);
3191 // Push "appendix" (MethodType, CallSite, etc.).
3192 // This must be done before we get the receiver,
3193 // since the parameter_size includes it.
3194 __ load_resolved_reference_at_index(Rscratch, Rindex);
3195 __ verify_oop(Rscratch);
3196 __ push_ptr(Rscratch);
3197 __ bind(Ldone);
3198 }
3199
3200 // Load receiver if needed (after appendix is pushed so parameter size is correct).
3201 if (load_receiver) {
3202 const Register Rparam_count = Rscratch;
3203 __ andi(Rparam_count, Rflags, ConstantPoolCacheEntry::parameter_size_mask);
3204 __ load_receiver(Rparam_count, Rrecv);
3205 __ verify_oop(Rrecv);
3206 }
3207
3208 // Get return address.
3209 {
3210 Register Rtable_addr = Rscratch;
3211 Register Rret_type = Rret_addr;
3212 address table_addr = (address) Interpreter::invoke_return_entry_table_for(code);
3213
3214 // Get return type. It's coded into the upper 4 bits of the lower half of the 64 bit value.
3215 __ rldicl(Rret_type, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits);
3216 __ load_dispatch_table(Rtable_addr, (address*)table_addr);
3217 __ sldi(Rret_type, Rret_type, LogBytesPerWord);
3218 // Get return address.
3219 __ ldx(Rret_addr, Rtable_addr, Rret_type);
3220 }
3221 }
3222
3223 // Helper for virtual calls. Load target out of vtable and jump off!
3224 // Kills all passed registers.
3225 void TemplateTable::generate_vtable_call(Register Rrecv_klass, Register Rindex, Register Rret, Register Rtemp) {
3226
3227 assert_different_registers(Rrecv_klass, Rtemp, Rret);
3228 const Register Rtarget_method = Rindex;
3229
3230 // Get target method & entry point.
3231 const int base = InstanceKlass::vtable_start_offset() * wordSize;
3232 // Calc vtable addr scale the vtable index by 8.
3233 __ sldi(Rindex, Rindex, exact_log2(vtableEntry::size() * wordSize));
3234 // Load target.
3235 __ addi(Rrecv_klass, Rrecv_klass, base + vtableEntry::method_offset_in_bytes());
3236 __ ldx(Rtarget_method, Rindex, Rrecv_klass);
3237 // Argument and return type profiling.
3238 __ profile_arguments_type(Rtarget_method, Rrecv_klass /* scratch1 */, Rtemp /* scratch2 */, true);
3239 __ call_from_interpreter(Rtarget_method, Rret, Rrecv_klass /* scratch1 */, Rtemp /* scratch2 */);
3240 }
3241
3242 // Virtual or final call. Final calls are rewritten on the fly to run through "fast_finalcall" next time.
3243 void TemplateTable::invokevirtual(int byte_no) {
3244 transition(vtos, vtos);
3245
3246 Register Rtable_addr = R11_scratch1,
3247 Rret_type = R12_scratch2,
3248 Rret_addr = R5_ARG3,
3249 Rflags = R22_tmp2, // Should survive C call.
3250 Rrecv = R3_ARG1,
3251 Rrecv_klass = Rrecv,
3252 Rvtableindex_or_method = R31, // Should survive C call.
3253 Rnum_params = R4_ARG2,
3254 Rnew_bc = R6_ARG4;
3255
3256 Label LnotFinal;
3257
3258 load_invoke_cp_cache_entry(byte_no, Rvtableindex_or_method, noreg, Rflags, /*virtual*/ true, false, false);
3259
3260 __ testbitdi(CCR0, R0, Rflags, ConstantPoolCacheEntry::is_vfinal_shift);
3261 __ bfalse(CCR0, LnotFinal);
3262
3263 patch_bytecode(Bytecodes::_fast_invokevfinal, Rnew_bc, R12_scratch2);
3264 invokevfinal_helper(Rvtableindex_or_method, Rflags, R11_scratch1, R12_scratch2);
3265
3266 __ align(32, 12);
3267 __ bind(LnotFinal);
3268 // Load "this" pointer (receiver).
3269 __ rldicl(Rnum_params, Rflags, 64, 48);
3270 __ load_receiver(Rnum_params, Rrecv);
3271 __ verify_oop(Rrecv);
3272
3273 // Get return type. It's coded into the upper 4 bits of the lower half of the 64 bit value.
3274 __ rldicl(Rret_type, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits);
3275 __ load_dispatch_table(Rtable_addr, Interpreter::invoke_return_entry_table());
3276 __ sldi(Rret_type, Rret_type, LogBytesPerWord);
3277 __ ldx(Rret_addr, Rret_type, Rtable_addr);
3278 __ null_check_throw(Rrecv, oopDesc::klass_offset_in_bytes(), R11_scratch1);
3279 __ load_klass(Rrecv_klass, Rrecv);
3280 __ verify_klass_ptr(Rrecv_klass);
3281 __ profile_virtual_call(Rrecv_klass, R11_scratch1, R12_scratch2, false);
3282
3283 generate_vtable_call(Rrecv_klass, Rvtableindex_or_method, Rret_addr, R11_scratch1);
3284 }
3285
3286 void TemplateTable::fast_invokevfinal(int byte_no) {
3287 transition(vtos, vtos);
3288
3289 assert(byte_no == f2_byte, "use this argument");
3290 Register Rflags = R22_tmp2,
3291 Rmethod = R31;
3292 load_invoke_cp_cache_entry(byte_no, Rmethod, noreg, Rflags, /*virtual*/ true, /*is_invokevfinal*/ true, false);
3293 invokevfinal_helper(Rmethod, Rflags, R11_scratch1, R12_scratch2);
3294 }
3295
3296 void TemplateTable::invokevfinal_helper(Register Rmethod, Register Rflags, Register Rscratch1, Register Rscratch2) {
3297
3298 assert_different_registers(Rmethod, Rflags, Rscratch1, Rscratch2);
3299
3300 // Load receiver from stack slot.
3301 Register Rrecv = Rscratch2;
3302 Register Rnum_params = Rrecv;
3303
3304 __ ld(Rnum_params, in_bytes(Method::const_offset()), Rmethod);
3305 __ lhz(Rnum_params /* number of params */, in_bytes(ConstMethod::size_of_parameters_offset()), Rnum_params);
3306
3307 // Get return address.
3308 Register Rtable_addr = Rscratch1,
3309 Rret_addr = Rflags,
3310 Rret_type = Rret_addr;
3311 // Get return type. It's coded into the upper 4 bits of the lower half of the 64 bit value.
3312 __ rldicl(Rret_type, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits);
3313 __ load_dispatch_table(Rtable_addr, Interpreter::invoke_return_entry_table());
3314 __ sldi(Rret_type, Rret_type, LogBytesPerWord);
3315 __ ldx(Rret_addr, Rret_type, Rtable_addr);
3316
3317 // Load receiver and receiver NULL check.
3318 __ load_receiver(Rnum_params, Rrecv);
3319 __ null_check_throw(Rrecv, -1, Rscratch1);
3320
3321 __ profile_final_call(Rrecv, Rscratch1);
3322 // Argument and return type profiling.
3323 __ profile_arguments_type(Rmethod, Rscratch1, Rscratch2, true);
3324
3325 // Do the call.
3326 __ call_from_interpreter(Rmethod, Rret_addr, Rscratch1, Rscratch2);
3327 }
3328
3329 void TemplateTable::invokespecial(int byte_no) {
3330 assert(byte_no == f1_byte, "use this argument");
3331 transition(vtos, vtos);
3332
3333 Register Rtable_addr = R3_ARG1,
3334 Rret_addr = R4_ARG2,
3335 Rflags = R5_ARG3,
3336 Rreceiver = R6_ARG4,
3337 Rmethod = R31;
3338
3339 prepare_invoke(byte_no, Rmethod, Rret_addr, noreg, Rreceiver, Rflags, R11_scratch1);
3340
3341 // Receiver NULL check.
3342 __ null_check_throw(Rreceiver, -1, R11_scratch1);
3343
3344 __ profile_call(R11_scratch1, R12_scratch2);
3345 // Argument and return type profiling.
3346 __ profile_arguments_type(Rmethod, R11_scratch1, R12_scratch2, false);
3347 __ call_from_interpreter(Rmethod, Rret_addr, R11_scratch1, R12_scratch2);
3348 }
3349
3350 void TemplateTable::invokestatic(int byte_no) {
3351 assert(byte_no == f1_byte, "use this argument");
3352 transition(vtos, vtos);
3353
3354 Register Rtable_addr = R3_ARG1,
3355 Rret_addr = R4_ARG2,
3356 Rflags = R5_ARG3;
3357
3358 prepare_invoke(byte_no, R19_method, Rret_addr, noreg, noreg, Rflags, R11_scratch1);
3359
3360 __ profile_call(R11_scratch1, R12_scratch2);
3361 // Argument and return type profiling.
3362 __ profile_arguments_type(R19_method, R11_scratch1, R12_scratch2, false);
3363 __ call_from_interpreter(R19_method, Rret_addr, R11_scratch1, R12_scratch2);
3364 }
3365
3366 void TemplateTable::invokeinterface_object_method(Register Rrecv_klass,
3367 Register Rret,
3368 Register Rflags,
3369 Register Rindex,
3370 Register Rtemp1,
3371 Register Rtemp2) {
3372
3373 assert_different_registers(Rindex, Rret, Rrecv_klass, Rflags, Rtemp1, Rtemp2);
3374 Label LnotFinal;
3375
3376 // Check for vfinal.
3377 __ testbitdi(CCR0, R0, Rflags, ConstantPoolCacheEntry::is_vfinal_shift);
3378 __ bfalse(CCR0, LnotFinal);
3379
3380 Register Rscratch = Rflags; // Rflags is dead now.
3381
3382 // Final call case.
3383 __ profile_final_call(Rtemp1, Rscratch);
3384 // Argument and return type profiling.
3385 __ profile_arguments_type(Rindex, Rscratch, Rrecv_klass /* scratch */, true);
3386 // Do the final call - the index (f2) contains the method.
3387 __ call_from_interpreter(Rindex, Rret, Rscratch, Rrecv_klass /* scratch */);
3388
3389 // Non-final callc case.
3390 __ bind(LnotFinal);
3391 __ profile_virtual_call(Rrecv_klass, Rtemp1, Rscratch, false);
3392 generate_vtable_call(Rrecv_klass, Rindex, Rret, Rscratch);
3393 }
3394
3395 void TemplateTable::invokeinterface(int byte_no) {
3396 assert(byte_no == f1_byte, "use this argument");
3397 transition(vtos, vtos);
3398
3399 const Register Rscratch1 = R11_scratch1,
3400 Rscratch2 = R12_scratch2,
3401 Rscratch3 = R9_ARG7,
3402 Rscratch4 = R10_ARG8,
3403 Rtable_addr = Rscratch2,
3404 Rinterface_klass = R5_ARG3,
3405 Rret_type = R8_ARG6,
3406 Rret_addr = Rret_type,
3407 Rindex = R6_ARG4,
3408 Rreceiver = R4_ARG2,
3409 Rrecv_klass = Rreceiver,
3410 Rflags = R7_ARG5;
3411
3412 prepare_invoke(byte_no, Rinterface_klass, Rret_addr, Rindex, Rreceiver, Rflags, Rscratch1);
3413
3414 // Get receiver klass.
3415 __ null_check_throw(Rreceiver, oopDesc::klass_offset_in_bytes(), Rscratch3);
3416 __ load_klass(Rrecv_klass, Rreceiver);
3417
3418 // Check corner case object method.
3419 Label LobjectMethod;
3420
3421 __ testbitdi(CCR0, R0, Rflags, ConstantPoolCacheEntry::is_forced_virtual_shift);
3422 __ btrue(CCR0, LobjectMethod);
3423
3424 // Fallthrough: The normal invokeinterface case.
3425 __ profile_virtual_call(Rrecv_klass, Rscratch1, Rscratch2, false);
3426
3427 // Find entry point to call.
3428 Label Lthrow_icc, Lthrow_ame;
3429 // Result will be returned in Rindex.
3430 __ mr(Rscratch4, Rrecv_klass);
3431 __ mr(Rscratch3, Rindex);
3432 __ lookup_interface_method(Rrecv_klass, Rinterface_klass, Rindex, Rindex, Rscratch1, Rscratch2, Lthrow_icc);
3433
3434 __ cmpdi(CCR0, Rindex, 0);
3435 __ beq(CCR0, Lthrow_ame);
3436 // Found entry. Jump off!
3437 // Argument and return type profiling.
3438 __ profile_arguments_type(Rindex, Rscratch1, Rscratch2, true);
3439 __ call_from_interpreter(Rindex, Rret_addr, Rscratch1, Rscratch2);
3440
3441 // Vtable entry was NULL => Throw abstract method error.
3442 __ bind(Lthrow_ame);
3443 __ mr(Rrecv_klass, Rscratch4);
3444 __ mr(Rindex, Rscratch3);
3445 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError));
3446
3447 // Interface was not found => Throw incompatible class change error.
3448 __ bind(Lthrow_icc);
3449 __ mr(Rrecv_klass, Rscratch4);
3450 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_IncompatibleClassChangeError));
3451
3452 __ should_not_reach_here();
3453
3454 // Special case of invokeinterface called for virtual method of
3455 // java.lang.Object. See ConstantPoolCacheEntry::set_method() for details:
3456 // The invokeinterface was rewritten to a invokevirtual, hence we have
3457 // to handle this corner case. This code isn't produced by javac, but could
3458 // be produced by another compliant java compiler.
3459 __ bind(LobjectMethod);
3460 invokeinterface_object_method(Rrecv_klass, Rret_addr, Rflags, Rindex, Rscratch1, Rscratch2);
3461 }
3462
3463 void TemplateTable::invokedynamic(int byte_no) {
3464 transition(vtos, vtos);
3465
3466 const Register Rret_addr = R3_ARG1,
3467 Rflags = R4_ARG2,
3468 Rmethod = R22_tmp2,
3469 Rscratch1 = R11_scratch1,
3470 Rscratch2 = R12_scratch2;
3471
3472 prepare_invoke(byte_no, Rmethod, Rret_addr, Rscratch1, noreg, Rflags, Rscratch2);
3473
3474 // Profile this call.
3475 __ profile_call(Rscratch1, Rscratch2);
3476
3477 // Off we go. With the new method handles, we don't jump to a method handle
3478 // entry any more. Instead, we pushed an "appendix" in prepare invoke, which happens
3479 // to be the callsite object the bootstrap method returned. This is passed to a
3480 // "link" method which does the dispatch (Most likely just grabs the MH stored
3481 // inside the callsite and does an invokehandle).
3482 // Argument and return type profiling.
3483 __ profile_arguments_type(Rmethod, Rscratch1, Rscratch2, false);
3484 __ call_from_interpreter(Rmethod, Rret_addr, Rscratch1 /* scratch1 */, Rscratch2 /* scratch2 */);
3485 }
3486
3487 void TemplateTable::invokehandle(int byte_no) {
3488 transition(vtos, vtos);
3489
3490 const Register Rret_addr = R3_ARG1,
3491 Rflags = R4_ARG2,
3492 Rrecv = R5_ARG3,
3493 Rmethod = R22_tmp2,
3494 Rscratch1 = R11_scratch1,
3495 Rscratch2 = R12_scratch2;
3496
3497 prepare_invoke(byte_no, Rmethod, Rret_addr, Rscratch1, Rrecv, Rflags, Rscratch2);
3498 __ verify_method_ptr(Rmethod);
3499 __ null_check_throw(Rrecv, -1, Rscratch2);
3500
3501 __ profile_final_call(Rrecv, Rscratch1);
3502
3503 // Still no call from handle => We call the method handle interpreter here.
3504 // Argument and return type profiling.
3505 __ profile_arguments_type(Rmethod, Rscratch1, Rscratch2, true);
3506 __ call_from_interpreter(Rmethod, Rret_addr, Rscratch1 /* scratch1 */, Rscratch2 /* scratch2 */);
3507 }
3508
3509 // =============================================================================
3510 // Allocation
3511
3512 // Puts allocated obj ref onto the expression stack.
3513 void TemplateTable::_new() {
3514 transition(vtos, atos);
3515
3516 Label Lslow_case,
3517 Ldone,
3518 Linitialize_header,
3519 Lallocate_shared,
3520 Linitialize_object; // Including clearing the fields.
3521
3522 const Register RallocatedObject = R17_tos,
3523 RinstanceKlass = R9_ARG7,
3524 Rscratch = R11_scratch1,
3525 Roffset = R8_ARG6,
3526 Rinstance_size = Roffset,
3527 Rcpool = R4_ARG2,
3528 Rtags = R3_ARG1,
3529 Rindex = R5_ARG3;
3530
3531 const bool allow_shared_alloc = Universe::heap()->supports_inline_contig_alloc();
3532
3533 // --------------------------------------------------------------------------
3534 // Check if fast case is possible.
3535
3536 // Load pointers to const pool and const pool's tags array.
3537 __ get_cpool_and_tags(Rcpool, Rtags);
3538 // Load index of constant pool entry.
3539 __ get_2_byte_integer_at_bcp(1, Rindex, InterpreterMacroAssembler::Unsigned);
3540
3541 if (UseTLAB) {
3542 // Make sure the class we're about to instantiate has been resolved
3543 // This is done before loading instanceKlass to be consistent with the order
3544 // how Constant Pool is updated (see ConstantPoolCache::klass_at_put).
3545 __ addi(Rtags, Rtags, Array<u1>::base_offset_in_bytes());
3546 __ lbzx(Rtags, Rindex, Rtags);
3547
3548 __ cmpdi(CCR0, Rtags, JVM_CONSTANT_Class);
3549 __ bne(CCR0, Lslow_case);
3550
3551 // Get instanceKlass (load from Rcpool + sizeof(ConstantPool) + Rindex*BytesPerWord).
3552 __ sldi(Roffset, Rindex, LogBytesPerWord);
3553 __ addi(Rscratch, Rcpool, sizeof(ConstantPool));
3554 __ isync(); // Order load of instance Klass wrt. tags.
3555 __ ldx(RinstanceKlass, Roffset, Rscratch);
3556
3557 // Make sure klass is fully initialized and get instance_size.
3558 __ lbz(Rscratch, in_bytes(InstanceKlass::init_state_offset()), RinstanceKlass);
3559 __ lwz(Rinstance_size, in_bytes(Klass::layout_helper_offset()), RinstanceKlass);
3560
3561 __ cmpdi(CCR1, Rscratch, InstanceKlass::fully_initialized);
3562 // Make sure klass does not have has_finalizer, or is abstract, or interface or java/lang/Class.
3563 __ andi_(R0, Rinstance_size, Klass::_lh_instance_slow_path_bit); // slow path bit equals 0?
3564
3565 __ crnand(CCR0, Assembler::equal, CCR1, Assembler::equal); // slow path bit set or not fully initialized?
3566 __ beq(CCR0, Lslow_case);
3567
3568 // --------------------------------------------------------------------------
3569 // Fast case:
3570 // Allocate the instance.
3571 // 1) Try to allocate in the TLAB.
3572 // 2) If fail, and the TLAB is not full enough to discard, allocate in the shared Eden.
3573 // 3) If the above fails (or is not applicable), go to a slow case (creates a new TLAB, etc.).
3574
3575 Register RoldTopValue = RallocatedObject; // Object will be allocated here if it fits.
3576 Register RnewTopValue = R6_ARG4;
3577 Register RendValue = R7_ARG5;
3578
3579 // Check if we can allocate in the TLAB.
3580 __ ld(RoldTopValue, in_bytes(JavaThread::tlab_top_offset()), R16_thread);
3581 __ ld(RendValue, in_bytes(JavaThread::tlab_end_offset()), R16_thread);
3582
3583 __ add(RnewTopValue, Rinstance_size, RoldTopValue);
3584
3585 // If there is enough space, we do not CAS and do not clear.
3586 __ cmpld(CCR0, RnewTopValue, RendValue);
3587 __ bgt(CCR0, allow_shared_alloc ? Lallocate_shared : Lslow_case);
3588
3589 __ std(RnewTopValue, in_bytes(JavaThread::tlab_top_offset()), R16_thread);
3590
3591 if (ZeroTLAB) {
3592 // The fields have already been cleared.
3593 __ b(Linitialize_header);
3594 } else {
3595 // Initialize both the header and fields.
3596 __ b(Linitialize_object);
3597 }
3598
3599 // Fall through: TLAB was too small.
3600 if (allow_shared_alloc) {
3601 Register RtlabWasteLimitValue = R10_ARG8;
3602 Register RfreeValue = RnewTopValue;
3603
3604 __ bind(Lallocate_shared);
3605 // Check if tlab should be discarded (refill_waste_limit >= free).
3606 __ ld(RtlabWasteLimitValue, in_bytes(JavaThread::tlab_refill_waste_limit_offset()), R16_thread);
3607 __ subf(RfreeValue, RoldTopValue, RendValue);
3608 __ srdi(RfreeValue, RfreeValue, LogHeapWordSize); // in dwords
3609 __ cmpld(CCR0, RtlabWasteLimitValue, RfreeValue);
3610 __ bge(CCR0, Lslow_case);
3611
3612 // Increment waste limit to prevent getting stuck on this slow path.
3613 __ addi(RtlabWasteLimitValue, RtlabWasteLimitValue, (int)ThreadLocalAllocBuffer::refill_waste_limit_increment());
3614 __ std(RtlabWasteLimitValue, in_bytes(JavaThread::tlab_refill_waste_limit_offset()), R16_thread);
3615 }
3616 // else: No allocation in the shared eden. // fallthru: __ b(Lslow_case);
3617 }
3618 // else: Always go the slow path.
3619
3620 // --------------------------------------------------------------------------
3621 // slow case
3622 __ bind(Lslow_case);
3623 call_VM(R17_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), Rcpool, Rindex);
3624
3625 if (UseTLAB) {
3626 __ b(Ldone);
3627 // --------------------------------------------------------------------------
3628 // Init1: Zero out newly allocated memory.
3629
3630 if (!ZeroTLAB || allow_shared_alloc) {
3631 // Clear object fields.
3632 __ bind(Linitialize_object);
3633
3634 // Initialize remaining object fields.
3635 Register Rbase = Rtags;
3636 __ addi(Rinstance_size, Rinstance_size, 7 - (int)sizeof(oopDesc));
3637 __ addi(Rbase, RallocatedObject, sizeof(oopDesc));
3638 __ srdi(Rinstance_size, Rinstance_size, 3);
3639
3640 // Clear out object skipping header. Takes also care of the zero length case.
3641 __ clear_memory_doubleword(Rbase, Rinstance_size);
3642 // fallthru: __ b(Linitialize_header);
3643 }
3644
3645 // --------------------------------------------------------------------------
3646 // Init2: Initialize the header: mark, klass
3647 __ bind(Linitialize_header);
3648
3649 // Init mark.
3650 if (UseBiasedLocking) {
3651 __ ld(Rscratch, in_bytes(Klass::prototype_header_offset()), RinstanceKlass);
3652 } else {
3653 __ load_const_optimized(Rscratch, markOopDesc::prototype(), R0);
3654 }
3655 __ std(Rscratch, oopDesc::mark_offset_in_bytes(), RallocatedObject);
3656
3657 // Init klass.
3658 __ store_klass_gap(RallocatedObject);
3659 __ store_klass(RallocatedObject, RinstanceKlass, Rscratch); // klass (last for cms)
3660
3661 // Check and trigger dtrace event.
3662 {
3663 SkipIfEqualZero skip_if(_masm, Rscratch, &DTraceAllocProbes);
3664 __ push(atos);
3665 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc));
3666 __ pop(atos);
3667 }
3668 }
3669
3670 // continue
3671 __ bind(Ldone);
3672
3673 // Must prevent reordering of stores for object initialization with stores that publish the new object.
3674 __ membar(Assembler::StoreStore);
3675 }
3676
3677 void TemplateTable::newarray() {
3678 transition(itos, atos);
3679
3680 __ lbz(R4, 1, R14_bcp);
3681 __ extsw(R5, R17_tos);
3682 call_VM(R17_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::newarray), R4, R5 /* size */);
3683
3684 // Must prevent reordering of stores for object initialization with stores that publish the new object.
3685 __ membar(Assembler::StoreStore);
3686 }
3687
3688 void TemplateTable::anewarray() {
3689 transition(itos, atos);
3690
3691 __ get_constant_pool(R4);
3692 __ get_2_byte_integer_at_bcp(1, R5, InterpreterMacroAssembler::Unsigned);
3693 __ extsw(R6, R17_tos); // size
3694 call_VM(R17_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::anewarray), R4 /* pool */, R5 /* index */, R6 /* size */);
3695
3696 // Must prevent reordering of stores for object initialization with stores that publish the new object.
3697 __ membar(Assembler::StoreStore);
3698 }
3699
3700 // Allocate a multi dimensional array
3701 void TemplateTable::multianewarray() {
3702 transition(vtos, atos);
3703
3704 Register Rptr = R31; // Needs to survive C call.
3705
3706 // Put ndims * wordSize into frame temp slot
3707 __ lbz(Rptr, 3, R14_bcp);
3708 __ sldi(Rptr, Rptr, Interpreter::logStackElementSize);
3709 // Esp points past last_dim, so set to R4 to first_dim address.
3710 __ add(R4, Rptr, R15_esp);
3711 call_VM(R17_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::multianewarray), R4 /* first_size_address */);
3712 // Pop all dimensions off the stack.
3713 __ add(R15_esp, Rptr, R15_esp);
3714
3715 // Must prevent reordering of stores for object initialization with stores that publish the new object.
3716 __ membar(Assembler::StoreStore);
3717 }
3718
3719 void TemplateTable::arraylength() {
3720 transition(atos, itos);
3721
3722 Label LnoException;
3723 __ verify_oop(R17_tos);
3724 __ null_check_throw(R17_tos, arrayOopDesc::length_offset_in_bytes(), R11_scratch1);
3725 __ lwa(R17_tos, arrayOopDesc::length_offset_in_bytes(), R17_tos);
3726 }
3727
3728 // ============================================================================
3729 // Typechecks
3730
3731 void TemplateTable::checkcast() {
3732 transition(atos, atos);
3733
3734 Label Ldone, Lis_null, Lquicked, Lresolved;
3735 Register Roffset = R6_ARG4,
3736 RobjKlass = R4_ARG2,
3737 RspecifiedKlass = R5_ARG3, // Generate_ClassCastException_verbose_handler will read value from this register.
3738 Rcpool = R11_scratch1,
3739 Rtags = R12_scratch2;
3740
3741 // Null does not pass.
3742 __ cmpdi(CCR0, R17_tos, 0);
3743 __ beq(CCR0, Lis_null);
3744
3745 // Get constant pool tag to find out if the bytecode has already been "quickened".
3746 __ get_cpool_and_tags(Rcpool, Rtags);
3747
3748 __ get_2_byte_integer_at_bcp(1, Roffset, InterpreterMacroAssembler::Unsigned);
3749
3750 __ addi(Rtags, Rtags, Array<u1>::base_offset_in_bytes());
3751 __ lbzx(Rtags, Rtags, Roffset);
3752
3753 __ cmpdi(CCR0, Rtags, JVM_CONSTANT_Class);
3754 __ beq(CCR0, Lquicked);
3755
3756 // Call into the VM to "quicken" instanceof.
3757 __ push_ptr(); // for GC
3758 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc));
3759 __ get_vm_result_2(RspecifiedKlass);
3760 __ pop_ptr(); // Restore receiver.
3761 __ b(Lresolved);
3762
3763 // Extract target class from constant pool.
3764 __ bind(Lquicked);
3765 __ sldi(Roffset, Roffset, LogBytesPerWord);
3766 __ addi(Rcpool, Rcpool, sizeof(ConstantPool));
3767 __ isync(); // Order load of specified Klass wrt. tags.
3768 __ ldx(RspecifiedKlass, Rcpool, Roffset);
3769
3770 // Do the checkcast.
3771 __ bind(Lresolved);
3772 // Get value klass in RobjKlass.
3773 __ load_klass(RobjKlass, R17_tos);
3774 // Generate a fast subtype check. Branch to cast_ok if no failure. Return 0 if failure.
3775 __ gen_subtype_check(RobjKlass, RspecifiedKlass, /*3 temp regs*/ Roffset, Rcpool, Rtags, /*target if subtype*/ Ldone);
3776
3777 // Not a subtype; so must throw exception
3778 // Target class oop is in register R6_ARG4 == RspecifiedKlass by convention.
3779 __ load_dispatch_table(R11_scratch1, (address*)Interpreter::_throw_ClassCastException_entry);
3780 __ mtctr(R11_scratch1);
3781 __ bctr();
3782
3783 // Profile the null case.
3784 __ align(32, 12);
3785 __ bind(Lis_null);
3786 __ profile_null_seen(R11_scratch1, Rtags); // Rtags used as scratch.
3787
3788 __ align(32, 12);
3789 __ bind(Ldone);
3790 }
3791
3792 // Output:
3793 // - tos == 0: Obj was null or not an instance of class.
3794 // - tos == 1: Obj was an instance of class.
3795 void TemplateTable::instanceof() {
3796 transition(atos, itos);
3797
3798 Label Ldone, Lis_null, Lquicked, Lresolved;
3799 Register Roffset = R5_ARG3,
3800 RobjKlass = R4_ARG2,
3801 RspecifiedKlass = R6_ARG4, // Generate_ClassCastException_verbose_handler will expect the value in this register.
3802 Rcpool = R11_scratch1,
3803 Rtags = R12_scratch2;
3804
3805 // Null does not pass.
3806 __ cmpdi(CCR0, R17_tos, 0);
3807 __ beq(CCR0, Lis_null);
3808
3809 // Get constant pool tag to find out if the bytecode has already been "quickened".
3810 __ get_cpool_and_tags(Rcpool, Rtags);
3811
3812 __ get_2_byte_integer_at_bcp(1, Roffset, InterpreterMacroAssembler::Unsigned);
3813
3814 __ addi(Rtags, Rtags, Array<u1>::base_offset_in_bytes());
3815 __ lbzx(Rtags, Rtags, Roffset);
3816
3817 __ cmpdi(CCR0, Rtags, JVM_CONSTANT_Class);
3818 __ beq(CCR0, Lquicked);
3819
3820 // Call into the VM to "quicken" instanceof.
3821 __ push_ptr(); // for GC
3822 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc));
3823 __ get_vm_result_2(RspecifiedKlass);
3824 __ pop_ptr(); // Restore receiver.
3825 __ b(Lresolved);
3826
3827 // Extract target class from constant pool.
3828 __ bind(Lquicked);
3829 __ sldi(Roffset, Roffset, LogBytesPerWord);
3830 __ addi(Rcpool, Rcpool, sizeof(ConstantPool));
3831 __ isync(); // Order load of specified Klass wrt. tags.
3832 __ ldx(RspecifiedKlass, Rcpool, Roffset);
3833
3834 // Do the checkcast.
3835 __ bind(Lresolved);
3836 // Get value klass in RobjKlass.
3837 __ load_klass(RobjKlass, R17_tos);
3838 // Generate a fast subtype check. Branch to cast_ok if no failure. Return 0 if failure.
3839 __ li(R17_tos, 1);
3840 __ gen_subtype_check(RobjKlass, RspecifiedKlass, /*3 temp regs*/ Roffset, Rcpool, Rtags, /*target if subtype*/ Ldone);
3841 __ li(R17_tos, 0);
3842
3843 if (ProfileInterpreter) {
3844 __ b(Ldone);
3845 }
3846
3847 // Profile the null case.
3848 __ align(32, 12);
3849 __ bind(Lis_null);
3850 __ profile_null_seen(Rcpool, Rtags); // Rcpool and Rtags used as scratch.
3851
3852 __ align(32, 12);
3853 __ bind(Ldone);
3854 }
3855
3856 // =============================================================================
3857 // Breakpoints
3858
3859 void TemplateTable::_breakpoint() {
3860 transition(vtos, vtos);
3861
3862 // Get the unpatched byte code.
3863 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::get_original_bytecode_at), R19_method, R14_bcp);
3864 __ mr(R31, R3_RET);
3865
3866 // Post the breakpoint event.
3867 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::_breakpoint), R19_method, R14_bcp);
3868
3869 // Complete the execution of original bytecode.
3870 __ dispatch_Lbyte_code(vtos, R31, Interpreter::normal_table(vtos));
3871 }
3872
3873 // =============================================================================
3874 // Exceptions
3875
3876 void TemplateTable::athrow() {
3877 transition(atos, vtos);
3878
3879 // Exception oop is in tos
3880 __ verify_oop(R17_tos);
3881
3882 __ null_check_throw(R17_tos, -1, R11_scratch1);
3883
3884 // Throw exception interpreter entry expects exception oop to be in R3.
3885 __ mr(R3_RET, R17_tos);
3886 __ load_dispatch_table(R11_scratch1, (address*)Interpreter::throw_exception_entry());
3887 __ mtctr(R11_scratch1);
3888 __ bctr();
3889 }
3890
3891 // =============================================================================
3892 // Synchronization
3893 // Searches the basic object lock list on the stack for a free slot
3894 // and uses it to lock the obect in tos.
3895 //
3896 // Recursive locking is enabled by exiting the search if the same
3897 // object is already found in the list. Thus, a new basic lock obj lock
3898 // is allocated "higher up" in the stack and thus is found first
3899 // at next monitor exit.
3900 void TemplateTable::monitorenter() {
3901 transition(atos, vtos);
3902
3903 __ verify_oop(R17_tos);
3904
3905 Register Rcurrent_monitor = R11_scratch1,
3906 Rcurrent_obj = R12_scratch2,
3907 Robj_to_lock = R17_tos,
3908 Rscratch1 = R3_ARG1,
3909 Rscratch2 = R4_ARG2,
3910 Rscratch3 = R5_ARG3,
3911 Rcurrent_obj_addr = R6_ARG4;
3912
3913 // ------------------------------------------------------------------------------
3914 // Null pointer exception.
3915 __ null_check_throw(Robj_to_lock, -1, R11_scratch1);
3916
3917 // Try to acquire a lock on the object.
3918 // Repeat until succeeded (i.e., until monitorenter returns true).
3919
3920 // ------------------------------------------------------------------------------
3921 // Find a free slot in the monitor block.
3922 Label Lfound, Lexit, Lallocate_new;
3923 ConditionRegister found_free_slot = CCR0,
3924 found_same_obj = CCR1,
3925 reached_limit = CCR6;
3926 {
3927 Label Lloop, Lentry;
3928 Register Rlimit = Rcurrent_monitor;
3929
3930 // Set up search loop - start with topmost monitor.
3931 __ add(Rcurrent_obj_addr, BasicObjectLock::obj_offset_in_bytes(), R26_monitor);
3932
3933 __ ld(Rlimit, 0, R1_SP);
3934 __ addi(Rlimit, Rlimit, - (frame::ijava_state_size + frame::interpreter_frame_monitor_size_in_bytes() - BasicObjectLock::obj_offset_in_bytes())); // Monitor base
3935
3936 // Check if any slot is present => short cut to allocation if not.
3937 __ cmpld(reached_limit, Rcurrent_obj_addr, Rlimit);
3938 __ bgt(reached_limit, Lallocate_new);
3939
3940 // Pre-load topmost slot.
3941 __ ld(Rcurrent_obj, 0, Rcurrent_obj_addr);
3942 __ addi(Rcurrent_obj_addr, Rcurrent_obj_addr, frame::interpreter_frame_monitor_size() * wordSize);
3943 // The search loop.
3944 __ bind(Lloop);
3945 // Found free slot?
3946 __ cmpdi(found_free_slot, Rcurrent_obj, 0);
3947 // Is this entry for same obj? If so, stop the search and take the found
3948 // free slot or allocate a new one to enable recursive locking.
3949 __ cmpd(found_same_obj, Rcurrent_obj, Robj_to_lock);
3950 __ cmpld(reached_limit, Rcurrent_obj_addr, Rlimit);
3951 __ beq(found_free_slot, Lexit);
3952 __ beq(found_same_obj, Lallocate_new);
3953 __ bgt(reached_limit, Lallocate_new);
3954 // Check if last allocated BasicLockObj reached.
3955 __ ld(Rcurrent_obj, 0, Rcurrent_obj_addr);
3956 __ addi(Rcurrent_obj_addr, Rcurrent_obj_addr, frame::interpreter_frame_monitor_size() * wordSize);
3957 // Next iteration if unchecked BasicObjectLocks exist on the stack.
3958 __ b(Lloop);
3959 }
3960
3961 // ------------------------------------------------------------------------------
3962 // Check if we found a free slot.
3963 __ bind(Lexit);
3964
3965 __ addi(Rcurrent_monitor, Rcurrent_obj_addr, -(frame::interpreter_frame_monitor_size() * wordSize) - BasicObjectLock::obj_offset_in_bytes());
3966 __ addi(Rcurrent_obj_addr, Rcurrent_obj_addr, - frame::interpreter_frame_monitor_size() * wordSize);
3967 __ b(Lfound);
3968
3969 // We didn't find a free BasicObjLock => allocate one.
3970 __ align(32, 12);
3971 __ bind(Lallocate_new);
3972 __ add_monitor_to_stack(false, Rscratch1, Rscratch2);
3973 __ mr(Rcurrent_monitor, R26_monitor);
3974 __ addi(Rcurrent_obj_addr, R26_monitor, BasicObjectLock::obj_offset_in_bytes());
3975
3976 // ------------------------------------------------------------------------------
3977 // We now have a slot to lock.
3978 __ bind(Lfound);
3979
3980 // Increment bcp to point to the next bytecode, so exception handling for async. exceptions work correctly.
3981 // The object has already been poped from the stack, so the expression stack looks correct.
3982 __ addi(R14_bcp, R14_bcp, 1);
3983
3984 __ std(Robj_to_lock, 0, Rcurrent_obj_addr);
3985 __ lock_object(Rcurrent_monitor, Robj_to_lock);
3986
3987 // Check if there's enough space on the stack for the monitors after locking.
3988 Label Lskip_stack_check;
3989 // Optimization: If the monitors stack section is less then a std page size (4K) don't run
3990 // the stack check. There should be enough shadow pages to fit that in.
3991 __ ld(Rscratch3, 0, R1_SP);
3992 __ sub(Rscratch3, Rscratch3, R26_monitor);
3993 __ cmpdi(CCR0, Rscratch3, 4*K);
3994 __ blt(CCR0, Lskip_stack_check);
3995
3996 DEBUG_ONLY(__ untested("stack overflow check during monitor enter");)
3997 __ li(Rscratch1, 0);
3998 __ generate_stack_overflow_check_with_compare_and_throw(Rscratch1, Rscratch2);
3999
4000 __ align(32, 12);
4001 __ bind(Lskip_stack_check);
4002
4003 // The bcp has already been incremented. Just need to dispatch to next instruction.
4004 __ dispatch_next(vtos);
4005 }
4006
4007 void TemplateTable::monitorexit() {
4008 transition(atos, vtos);
4009 __ verify_oop(R17_tos);
4010
4011 Register Rcurrent_monitor = R11_scratch1,
4012 Rcurrent_obj = R12_scratch2,
4013 Robj_to_lock = R17_tos,
4014 Rcurrent_obj_addr = R3_ARG1,
4015 Rlimit = R4_ARG2;
4016 Label Lfound, Lillegal_monitor_state;
4017
4018 // Check corner case: unbalanced monitorEnter / Exit.
4019 __ ld(Rlimit, 0, R1_SP);
4020 __ addi(Rlimit, Rlimit, - (frame::ijava_state_size + frame::interpreter_frame_monitor_size_in_bytes())); // Monitor base
4021
4022 // Null pointer check.
4023 __ null_check_throw(Robj_to_lock, -1, R11_scratch1);
4024
4025 __ cmpld(CCR0, R26_monitor, Rlimit);
4026 __ bgt(CCR0, Lillegal_monitor_state);
4027
4028 // Find the corresponding slot in the monitors stack section.
4029 {
4030 Label Lloop;
4031
4032 // Start with topmost monitor.
4033 __ addi(Rcurrent_obj_addr, R26_monitor, BasicObjectLock::obj_offset_in_bytes());
4034 __ addi(Rlimit, Rlimit, BasicObjectLock::obj_offset_in_bytes());
4035 __ ld(Rcurrent_obj, 0, Rcurrent_obj_addr);
4036 __ addi(Rcurrent_obj_addr, Rcurrent_obj_addr, frame::interpreter_frame_monitor_size() * wordSize);
4037
4038 __ bind(Lloop);
4039 // Is this entry for same obj?
4040 __ cmpd(CCR0, Rcurrent_obj, Robj_to_lock);
4041 __ beq(CCR0, Lfound);
4042
4043 // Check if last allocated BasicLockObj reached.
4044
4045 __ ld(Rcurrent_obj, 0, Rcurrent_obj_addr);
4046 __ cmpld(CCR0, Rcurrent_obj_addr, Rlimit);
4047 __ addi(Rcurrent_obj_addr, Rcurrent_obj_addr, frame::interpreter_frame_monitor_size() * wordSize);
4048
4049 // Next iteration if unchecked BasicObjectLocks exist on the stack.
4050 __ ble(CCR0, Lloop);
4051 }
4052
4053 // Fell through without finding the basic obj lock => throw up!
4054 __ bind(Lillegal_monitor_state);
4055 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception));
4056 __ should_not_reach_here();
4057
4058 __ align(32, 12);
4059 __ bind(Lfound);
4060 __ addi(Rcurrent_monitor, Rcurrent_obj_addr,
4061 -(frame::interpreter_frame_monitor_size() * wordSize) - BasicObjectLock::obj_offset_in_bytes());
4062 __ unlock_object(Rcurrent_monitor);
4063 }
4064
4065 // ============================================================================
4066 // Wide bytecodes
4067
4068 // Wide instructions. Simply redirects to the wide entry point for that instruction.
4069 void TemplateTable::wide() {
4070 transition(vtos, vtos);
4071
4072 const Register Rtable = R11_scratch1,
4073 Rindex = R12_scratch2,
4074 Rtmp = R0;
4075
4076 __ lbz(Rindex, 1, R14_bcp);
4077
4078 __ load_dispatch_table(Rtable, Interpreter::_wentry_point);
4079
4080 __ slwi(Rindex, Rindex, LogBytesPerWord);
4081 __ ldx(Rtmp, Rtable, Rindex);
4082 __ mtctr(Rtmp);
4083 __ bctr();
4084 // Note: the bcp increment step is part of the individual wide bytecode implementations.
4085 }
4086 #endif // !CC_INTERP