1 /*
2 * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "interpreter/interpreter.hpp"
27 #include "interpreter/interpreterRuntime.hpp"
28 #include "interpreter/interp_masm.hpp"
29 #include "interpreter/templateTable.hpp"
30 #include "memory/universe.inline.hpp"
31 #include "oops/methodData.hpp"
32 #include "oops/objArrayKlass.hpp"
33 #include "oops/oop.inline.hpp"
34 #include "prims/methodHandles.hpp"
35 #include "runtime/sharedRuntime.hpp"
36 #include "runtime/stubRoutines.hpp"
37 #include "runtime/synchronizer.hpp"
38 #include "utilities/macros.hpp"
39
40 #ifndef CC_INTERP
41 #define __ _masm->
42
43 // Misc helpers
44
45 // Do an oop store like *(base + index + offset) = val
46 // index can be noreg,
47 static void do_oop_store(InterpreterMacroAssembler* _masm,
48 Register base,
49 Register index,
50 int offset,
51 Register val,
52 Register tmp,
53 BarrierSet::Name barrier,
54 bool precise) {
55 assert(tmp != val && tmp != base && tmp != index, "register collision");
56 assert(index == noreg || offset == 0, "only one offset");
57 switch (barrier) {
58 #if INCLUDE_ALL_GCS
59 case BarrierSet::G1SATBCT:
60 case BarrierSet::G1SATBCTLogging:
61 {
62 // Load and record the previous value.
63 __ g1_write_barrier_pre(base, index, offset,
64 noreg /* pre_val */,
65 tmp, true /*preserve_o_regs*/);
66
67 // G1 barrier needs uncompressed oop for region cross check.
68 Register new_val = val;
69 if (UseCompressedOops && val != G0) {
70 new_val = tmp;
71 __ mov(val, new_val);
72 }
73
74 if (index == noreg ) {
75 assert(Assembler::is_simm13(offset), "fix this code");
76 __ store_heap_oop(val, base, offset);
77 } else {
78 __ store_heap_oop(val, base, index);
79 }
80
81 // No need for post barrier if storing NULL
82 if (val != G0) {
83 if (precise) {
84 if (index == noreg) {
85 __ add(base, offset, base);
86 } else {
87 __ add(base, index, base);
88 }
89 }
90 __ g1_write_barrier_post(base, new_val, tmp);
91 }
92 }
93 break;
94 #endif // INCLUDE_ALL_GCS
95 case BarrierSet::CardTableModRef:
96 case BarrierSet::CardTableExtension:
97 {
98 if (index == noreg ) {
99 assert(Assembler::is_simm13(offset), "fix this code");
100 __ store_heap_oop(val, base, offset);
101 } else {
102 __ store_heap_oop(val, base, index);
103 }
104 // No need for post barrier if storing NULL
105 if (val != G0) {
106 if (precise) {
107 if (index == noreg) {
108 __ add(base, offset, base);
109 } else {
110 __ add(base, index, base);
111 }
112 }
113 __ card_write_barrier_post(base, val, tmp);
114 }
115 }
116 break;
117 case BarrierSet::ModRef:
118 case BarrierSet::Other:
119 ShouldNotReachHere();
120 break;
121 default :
122 ShouldNotReachHere();
123
124 }
125 }
126
127
128 //----------------------------------------------------------------------------------------------------
129 // Platform-dependent initialization
130
131 void TemplateTable::pd_initialize() {
132 // (none)
133 }
134
135
136 //----------------------------------------------------------------------------------------------------
137 // Condition conversion
138 Assembler::Condition ccNot(TemplateTable::Condition cc) {
139 switch (cc) {
140 case TemplateTable::equal : return Assembler::notEqual;
141 case TemplateTable::not_equal : return Assembler::equal;
142 case TemplateTable::less : return Assembler::greaterEqual;
143 case TemplateTable::less_equal : return Assembler::greater;
144 case TemplateTable::greater : return Assembler::lessEqual;
145 case TemplateTable::greater_equal: return Assembler::less;
146 }
147 ShouldNotReachHere();
148 return Assembler::zero;
149 }
150
151 //----------------------------------------------------------------------------------------------------
152 // Miscelaneous helper routines
153
154
155 Address TemplateTable::at_bcp(int offset) {
156 assert(_desc->uses_bcp(), "inconsistent uses_bcp information");
157 return Address(Lbcp, offset);
158 }
159
160
161 void TemplateTable::patch_bytecode(Bytecodes::Code bc, Register bc_reg,
162 Register temp_reg, bool load_bc_into_bc_reg/*=true*/,
163 int byte_no) {
164 // With sharing on, may need to test Method* flag.
165 if (!RewriteBytecodes) return;
166 Label L_patch_done;
167
168 switch (bc) {
169 case Bytecodes::_fast_aputfield:
170 case Bytecodes::_fast_bputfield:
171 case Bytecodes::_fast_cputfield:
172 case Bytecodes::_fast_dputfield:
173 case Bytecodes::_fast_fputfield:
174 case Bytecodes::_fast_iputfield:
175 case Bytecodes::_fast_lputfield:
176 case Bytecodes::_fast_sputfield:
177 {
178 // We skip bytecode quickening for putfield instructions when
179 // the put_code written to the constant pool cache is zero.
180 // This is required so that every execution of this instruction
181 // calls out to InterpreterRuntime::resolve_get_put to do
182 // additional, required work.
183 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
184 assert(load_bc_into_bc_reg, "we use bc_reg as temp");
185 __ get_cache_and_index_and_bytecode_at_bcp(bc_reg, temp_reg, temp_reg, byte_no, 1);
186 __ set(bc, bc_reg);
187 __ cmp_and_br_short(temp_reg, 0, Assembler::equal, Assembler::pn, L_patch_done); // don't patch
188 }
189 break;
190 default:
191 assert(byte_no == -1, "sanity");
192 if (load_bc_into_bc_reg) {
193 __ set(bc, bc_reg);
194 }
195 }
196
197 if (JvmtiExport::can_post_breakpoint()) {
198 Label L_fast_patch;
199 __ ldub(at_bcp(0), temp_reg);
200 __ cmp_and_br_short(temp_reg, Bytecodes::_breakpoint, Assembler::notEqual, Assembler::pt, L_fast_patch);
201 // perform the quickening, slowly, in the bowels of the breakpoint table
202 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), Lmethod, Lbcp, bc_reg);
203 __ ba_short(L_patch_done);
204 __ bind(L_fast_patch);
205 }
206
207 #ifdef ASSERT
208 Bytecodes::Code orig_bytecode = Bytecodes::java_code(bc);
209 Label L_okay;
210 __ ldub(at_bcp(0), temp_reg);
211 __ cmp(temp_reg, orig_bytecode);
212 __ br(Assembler::equal, false, Assembler::pt, L_okay);
213 __ delayed()->cmp(temp_reg, bc_reg);
214 __ br(Assembler::equal, false, Assembler::pt, L_okay);
215 __ delayed()->nop();
216 __ stop("patching the wrong bytecode");
217 __ bind(L_okay);
218 #endif
219
220 // patch bytecode
221 __ stb(bc_reg, at_bcp(0));
222 __ bind(L_patch_done);
223 }
224
225 //----------------------------------------------------------------------------------------------------
226 // Individual instructions
227
228 void TemplateTable::nop() {
229 transition(vtos, vtos);
230 // nothing to do
231 }
232
233 void TemplateTable::shouldnotreachhere() {
234 transition(vtos, vtos);
235 __ stop("shouldnotreachhere bytecode");
236 }
237
238 void TemplateTable::aconst_null() {
239 transition(vtos, atos);
240 __ clr(Otos_i);
241 }
242
243
244 void TemplateTable::iconst(int value) {
245 transition(vtos, itos);
246 __ set(value, Otos_i);
247 }
248
249
250 void TemplateTable::lconst(int value) {
251 transition(vtos, ltos);
252 assert(value >= 0, "check this code");
253 #ifdef _LP64
254 __ set(value, Otos_l);
255 #else
256 __ set(value, Otos_l2);
257 __ clr( Otos_l1);
258 #endif
259 }
260
261
262 void TemplateTable::fconst(int value) {
263 transition(vtos, ftos);
264 static float zero = 0.0, one = 1.0, two = 2.0;
265 float* p;
266 switch( value ) {
267 default: ShouldNotReachHere();
268 case 0: p = &zero; break;
269 case 1: p = &one; break;
270 case 2: p = &two; break;
271 }
272 AddressLiteral a(p);
273 __ sethi(a, G3_scratch);
274 __ ldf(FloatRegisterImpl::S, G3_scratch, a.low10(), Ftos_f);
275 }
276
277
278 void TemplateTable::dconst(int value) {
279 transition(vtos, dtos);
280 static double zero = 0.0, one = 1.0;
281 double* p;
282 switch( value ) {
283 default: ShouldNotReachHere();
284 case 0: p = &zero; break;
285 case 1: p = &one; break;
286 }
287 AddressLiteral a(p);
288 __ sethi(a, G3_scratch);
289 __ ldf(FloatRegisterImpl::D, G3_scratch, a.low10(), Ftos_d);
290 }
291
292
293 // %%%%% Should factore most snippet templates across platforms
294
295 void TemplateTable::bipush() {
296 transition(vtos, itos);
297 __ ldsb( at_bcp(1), Otos_i );
298 }
299
300 void TemplateTable::sipush() {
301 transition(vtos, itos);
302 __ get_2_byte_integer_at_bcp(1, G3_scratch, Otos_i, InterpreterMacroAssembler::Signed);
303 }
304
305 void TemplateTable::ldc(bool wide) {
306 transition(vtos, vtos);
307 Label call_ldc, notInt, isString, notString, notClass, exit;
308
309 if (wide) {
310 __ get_2_byte_integer_at_bcp(1, G3_scratch, O1, InterpreterMacroAssembler::Unsigned);
311 } else {
312 __ ldub(Lbcp, 1, O1);
313 }
314 __ get_cpool_and_tags(O0, O2);
315
316 const int base_offset = ConstantPool::header_size() * wordSize;
317 const int tags_offset = Array<u1>::base_offset_in_bytes();
318
319 // get type from tags
320 __ add(O2, tags_offset, O2);
321 __ ldub(O2, O1, O2);
322
323 // unresolved class? If so, must resolve
324 __ cmp_and_brx_short(O2, JVM_CONSTANT_UnresolvedClass, Assembler::equal, Assembler::pt, call_ldc);
325
326 // unresolved class in error state
327 __ cmp_and_brx_short(O2, JVM_CONSTANT_UnresolvedClassInError, Assembler::equal, Assembler::pn, call_ldc);
328
329 __ cmp(O2, JVM_CONSTANT_Class); // need to call vm to get java mirror of the class
330 __ brx(Assembler::notEqual, true, Assembler::pt, notClass);
331 __ delayed()->add(O0, base_offset, O0);
332
333 __ bind(call_ldc);
334 __ set(wide, O1);
335 call_VM(Otos_i, CAST_FROM_FN_PTR(address, InterpreterRuntime::ldc), O1);
336 __ push(atos);
337 __ ba_short(exit);
338
339 __ bind(notClass);
340 // __ add(O0, base_offset, O0);
341 __ sll(O1, LogBytesPerWord, O1);
342 __ cmp(O2, JVM_CONSTANT_Integer);
343 __ brx(Assembler::notEqual, true, Assembler::pt, notInt);
344 __ delayed()->cmp(O2, JVM_CONSTANT_String);
345 __ ld(O0, O1, Otos_i);
346 __ push(itos);
347 __ ba_short(exit);
348
349 __ bind(notInt);
350 // __ cmp(O2, JVM_CONSTANT_String);
351 __ brx(Assembler::notEqual, true, Assembler::pt, notString);
352 __ delayed()->ldf(FloatRegisterImpl::S, O0, O1, Ftos_f);
353 __ bind(isString);
354 __ stop("string should be rewritten to fast_aldc");
355 __ ba_short(exit);
356
357 __ bind(notString);
358 // __ ldf(FloatRegisterImpl::S, O0, O1, Ftos_f);
359 __ push(ftos);
360
361 __ bind(exit);
362 }
363
364 // Fast path for caching oop constants.
365 // %%% We should use this to handle Class and String constants also.
366 // %%% It will simplify the ldc/primitive path considerably.
367 void TemplateTable::fast_aldc(bool wide) {
368 transition(vtos, atos);
369
370 int index_size = wide ? sizeof(u2) : sizeof(u1);
371 Label resolved;
372
373 // We are resolved if the resolved reference cache entry contains a
374 // non-null object (CallSite, etc.)
375 assert_different_registers(Otos_i, G3_scratch);
376 __ get_cache_index_at_bcp(Otos_i, G3_scratch, 1, index_size); // load index => G3_scratch
377 __ load_resolved_reference_at_index(Otos_i, G3_scratch);
378 __ tst(Otos_i);
379 __ br(Assembler::notEqual, false, Assembler::pt, resolved);
380 __ delayed()->set((int)bytecode(), O1);
381
382 address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc);
383
384 // first time invocation - must resolve first
385 __ call_VM(Otos_i, entry, O1);
386 __ bind(resolved);
387 __ verify_oop(Otos_i);
388 }
389
390
391 void TemplateTable::ldc2_w() {
392 transition(vtos, vtos);
393 Label Long, exit;
394
395 __ get_2_byte_integer_at_bcp(1, G3_scratch, O1, InterpreterMacroAssembler::Unsigned);
396 __ get_cpool_and_tags(O0, O2);
397
398 const int base_offset = ConstantPool::header_size() * wordSize;
399 const int tags_offset = Array<u1>::base_offset_in_bytes();
400 // get type from tags
401 __ add(O2, tags_offset, O2);
402 __ ldub(O2, O1, O2);
403
404 __ sll(O1, LogBytesPerWord, O1);
405 __ add(O0, O1, G3_scratch);
406
407 __ cmp_and_brx_short(O2, JVM_CONSTANT_Double, Assembler::notEqual, Assembler::pt, Long);
408 // A double can be placed at word-aligned locations in the constant pool.
409 // Check out Conversions.java for an example.
410 // Also ConstantPool::header_size() is 20, which makes it very difficult
411 // to double-align double on the constant pool. SG, 11/7/97
412 #ifdef _LP64
413 __ ldf(FloatRegisterImpl::D, G3_scratch, base_offset, Ftos_d);
414 #else
415 FloatRegister f = Ftos_d;
416 __ ldf(FloatRegisterImpl::S, G3_scratch, base_offset, f);
417 __ ldf(FloatRegisterImpl::S, G3_scratch, base_offset + sizeof(jdouble)/2,
418 f->successor());
419 #endif
420 __ push(dtos);
421 __ ba_short(exit);
422
423 __ bind(Long);
424 #ifdef _LP64
425 __ ldx(G3_scratch, base_offset, Otos_l);
426 #else
427 __ ld(G3_scratch, base_offset, Otos_l);
428 __ ld(G3_scratch, base_offset + sizeof(jlong)/2, Otos_l->successor());
429 #endif
430 __ push(ltos);
431
432 __ bind(exit);
433 }
434
435
436 void TemplateTable::locals_index(Register reg, int offset) {
437 __ ldub( at_bcp(offset), reg );
438 }
439
440
441 void TemplateTable::locals_index_wide(Register reg) {
442 // offset is 2, not 1, because Lbcp points to wide prefix code
443 __ get_2_byte_integer_at_bcp(2, G4_scratch, reg, InterpreterMacroAssembler::Unsigned);
444 }
445
446 void TemplateTable::iload() {
447 transition(vtos, itos);
448 // Rewrite iload,iload pair into fast_iload2
449 // iload,caload pair into fast_icaload
450 if (RewriteFrequentPairs) {
451 Label rewrite, done;
452
453 // get next byte
454 __ ldub(at_bcp(Bytecodes::length_for(Bytecodes::_iload)), G3_scratch);
455
456 // if _iload, wait to rewrite to iload2. We only want to rewrite the
457 // last two iloads in a pair. Comparing against fast_iload means that
458 // the next bytecode is neither an iload or a caload, and therefore
459 // an iload pair.
460 __ cmp_and_br_short(G3_scratch, (int)Bytecodes::_iload, Assembler::equal, Assembler::pn, done);
461
462 __ cmp(G3_scratch, (int)Bytecodes::_fast_iload);
463 __ br(Assembler::equal, false, Assembler::pn, rewrite);
464 __ delayed()->set(Bytecodes::_fast_iload2, G4_scratch);
465
466 __ cmp(G3_scratch, (int)Bytecodes::_caload);
467 __ br(Assembler::equal, false, Assembler::pn, rewrite);
468 __ delayed()->set(Bytecodes::_fast_icaload, G4_scratch);
469
470 __ set(Bytecodes::_fast_iload, G4_scratch); // don't check again
471 // rewrite
472 // G4_scratch: fast bytecode
473 __ bind(rewrite);
474 patch_bytecode(Bytecodes::_iload, G4_scratch, G3_scratch, false);
475 __ bind(done);
476 }
477
478 // Get the local value into tos
479 locals_index(G3_scratch);
480 __ access_local_int( G3_scratch, Otos_i );
481 }
482
483 void TemplateTable::fast_iload2() {
484 transition(vtos, itos);
485 locals_index(G3_scratch);
486 __ access_local_int( G3_scratch, Otos_i );
487 __ push_i();
488 locals_index(G3_scratch, 3); // get next bytecode's local index.
489 __ access_local_int( G3_scratch, Otos_i );
490 }
491
492 void TemplateTable::fast_iload() {
493 transition(vtos, itos);
494 locals_index(G3_scratch);
495 __ access_local_int( G3_scratch, Otos_i );
496 }
497
498 void TemplateTable::lload() {
499 transition(vtos, ltos);
500 locals_index(G3_scratch);
501 __ access_local_long( G3_scratch, Otos_l );
502 }
503
504
505 void TemplateTable::fload() {
506 transition(vtos, ftos);
507 locals_index(G3_scratch);
508 __ access_local_float( G3_scratch, Ftos_f );
509 }
510
511
512 void TemplateTable::dload() {
513 transition(vtos, dtos);
514 locals_index(G3_scratch);
515 __ access_local_double( G3_scratch, Ftos_d );
516 }
517
518
519 void TemplateTable::aload() {
520 transition(vtos, atos);
521 locals_index(G3_scratch);
522 __ access_local_ptr( G3_scratch, Otos_i);
523 }
524
525
526 void TemplateTable::wide_iload() {
527 transition(vtos, itos);
528 locals_index_wide(G3_scratch);
529 __ access_local_int( G3_scratch, Otos_i );
530 }
531
532
533 void TemplateTable::wide_lload() {
534 transition(vtos, ltos);
535 locals_index_wide(G3_scratch);
536 __ access_local_long( G3_scratch, Otos_l );
537 }
538
539
540 void TemplateTable::wide_fload() {
541 transition(vtos, ftos);
542 locals_index_wide(G3_scratch);
543 __ access_local_float( G3_scratch, Ftos_f );
544 }
545
546
547 void TemplateTable::wide_dload() {
548 transition(vtos, dtos);
549 locals_index_wide(G3_scratch);
550 __ access_local_double( G3_scratch, Ftos_d );
551 }
552
553
554 void TemplateTable::wide_aload() {
555 transition(vtos, atos);
556 locals_index_wide(G3_scratch);
557 __ access_local_ptr( G3_scratch, Otos_i );
558 __ verify_oop(Otos_i);
559 }
560
561
562 void TemplateTable::iaload() {
563 transition(itos, itos);
564 // Otos_i: index
565 // tos: array
566 __ index_check(O2, Otos_i, LogBytesPerInt, G3_scratch, O3);
567 __ ld(O3, arrayOopDesc::base_offset_in_bytes(T_INT), Otos_i);
568 }
569
570
571 void TemplateTable::laload() {
572 transition(itos, ltos);
573 // Otos_i: index
574 // O2: array
575 __ index_check(O2, Otos_i, LogBytesPerLong, G3_scratch, O3);
576 __ ld_long(O3, arrayOopDesc::base_offset_in_bytes(T_LONG), Otos_l);
577 }
578
579
580 void TemplateTable::faload() {
581 transition(itos, ftos);
582 // Otos_i: index
583 // O2: array
584 __ index_check(O2, Otos_i, LogBytesPerInt, G3_scratch, O3);
585 __ ldf(FloatRegisterImpl::S, O3, arrayOopDesc::base_offset_in_bytes(T_FLOAT), Ftos_f);
586 }
587
588
589 void TemplateTable::daload() {
590 transition(itos, dtos);
591 // Otos_i: index
592 // O2: array
593 __ index_check(O2, Otos_i, LogBytesPerLong, G3_scratch, O3);
594 __ ldf(FloatRegisterImpl::D, O3, arrayOopDesc::base_offset_in_bytes(T_DOUBLE), Ftos_d);
595 }
596
597
598 void TemplateTable::aaload() {
599 transition(itos, atos);
600 // Otos_i: index
601 // tos: array
602 __ index_check(O2, Otos_i, UseCompressedOops ? 2 : LogBytesPerWord, G3_scratch, O3);
603 __ load_heap_oop(O3, arrayOopDesc::base_offset_in_bytes(T_OBJECT), Otos_i);
604 __ verify_oop(Otos_i);
605 }
606
607
608 void TemplateTable::baload() {
609 transition(itos, itos);
610 // Otos_i: index
611 // tos: array
612 __ index_check(O2, Otos_i, 0, G3_scratch, O3);
613 __ ldsb(O3, arrayOopDesc::base_offset_in_bytes(T_BYTE), Otos_i);
614 }
615
616
617 void TemplateTable::caload() {
618 transition(itos, itos);
619 // Otos_i: index
620 // tos: array
621 __ index_check(O2, Otos_i, LogBytesPerShort, G3_scratch, O3);
622 __ lduh(O3, arrayOopDesc::base_offset_in_bytes(T_CHAR), Otos_i);
623 }
624
625 void TemplateTable::fast_icaload() {
626 transition(vtos, itos);
627 // Otos_i: index
628 // tos: array
629 locals_index(G3_scratch);
630 __ access_local_int( G3_scratch, Otos_i );
631 __ index_check(O2, Otos_i, LogBytesPerShort, G3_scratch, O3);
632 __ lduh(O3, arrayOopDesc::base_offset_in_bytes(T_CHAR), Otos_i);
633 }
634
635
636 void TemplateTable::saload() {
637 transition(itos, itos);
638 // Otos_i: index
639 // tos: array
640 __ index_check(O2, Otos_i, LogBytesPerShort, G3_scratch, O3);
641 __ ldsh(O3, arrayOopDesc::base_offset_in_bytes(T_SHORT), Otos_i);
642 }
643
644
645 void TemplateTable::iload(int n) {
646 transition(vtos, itos);
647 __ ld( Llocals, Interpreter::local_offset_in_bytes(n), Otos_i );
648 }
649
650
651 void TemplateTable::lload(int n) {
652 transition(vtos, ltos);
653 assert(n+1 < Argument::n_register_parameters, "would need more code");
654 __ load_unaligned_long(Llocals, Interpreter::local_offset_in_bytes(n+1), Otos_l);
655 }
656
657
658 void TemplateTable::fload(int n) {
659 transition(vtos, ftos);
660 assert(n < Argument::n_register_parameters, "would need more code");
661 __ ldf( FloatRegisterImpl::S, Llocals, Interpreter::local_offset_in_bytes(n), Ftos_f );
662 }
663
664
665 void TemplateTable::dload(int n) {
666 transition(vtos, dtos);
667 FloatRegister dst = Ftos_d;
668 __ load_unaligned_double(Llocals, Interpreter::local_offset_in_bytes(n+1), dst);
669 }
670
671
672 void TemplateTable::aload(int n) {
673 transition(vtos, atos);
674 __ ld_ptr( Llocals, Interpreter::local_offset_in_bytes(n), Otos_i );
675 }
676
677
678 void TemplateTable::aload_0() {
679 transition(vtos, atos);
680
681 // According to bytecode histograms, the pairs:
682 //
683 // _aload_0, _fast_igetfield (itos)
684 // _aload_0, _fast_agetfield (atos)
685 // _aload_0, _fast_fgetfield (ftos)
686 //
687 // occur frequently. If RewriteFrequentPairs is set, the (slow) _aload_0
688 // bytecode checks the next bytecode and then rewrites the current
689 // bytecode into a pair bytecode; otherwise it rewrites the current
690 // bytecode into _fast_aload_0 that doesn't do the pair check anymore.
691 //
692 if (RewriteFrequentPairs) {
693 Label rewrite, done;
694
695 // get next byte
696 __ ldub(at_bcp(Bytecodes::length_for(Bytecodes::_aload_0)), G3_scratch);
697
698 // do actual aload_0
699 aload(0);
700
701 // if _getfield then wait with rewrite
702 __ cmp_and_br_short(G3_scratch, (int)Bytecodes::_getfield, Assembler::equal, Assembler::pn, done);
703
704 // if _igetfield then rewrite to _fast_iaccess_0
705 assert(Bytecodes::java_code(Bytecodes::_fast_iaccess_0) == Bytecodes::_aload_0, "adjust fast bytecode def");
706 __ cmp(G3_scratch, (int)Bytecodes::_fast_igetfield);
707 __ br(Assembler::equal, false, Assembler::pn, rewrite);
708 __ delayed()->set(Bytecodes::_fast_iaccess_0, G4_scratch);
709
710 // if _agetfield then rewrite to _fast_aaccess_0
711 assert(Bytecodes::java_code(Bytecodes::_fast_aaccess_0) == Bytecodes::_aload_0, "adjust fast bytecode def");
712 __ cmp(G3_scratch, (int)Bytecodes::_fast_agetfield);
713 __ br(Assembler::equal, false, Assembler::pn, rewrite);
714 __ delayed()->set(Bytecodes::_fast_aaccess_0, G4_scratch);
715
716 // if _fgetfield then rewrite to _fast_faccess_0
717 assert(Bytecodes::java_code(Bytecodes::_fast_faccess_0) == Bytecodes::_aload_0, "adjust fast bytecode def");
718 __ cmp(G3_scratch, (int)Bytecodes::_fast_fgetfield);
719 __ br(Assembler::equal, false, Assembler::pn, rewrite);
720 __ delayed()->set(Bytecodes::_fast_faccess_0, G4_scratch);
721
722 // else rewrite to _fast_aload0
723 assert(Bytecodes::java_code(Bytecodes::_fast_aload_0) == Bytecodes::_aload_0, "adjust fast bytecode def");
724 __ set(Bytecodes::_fast_aload_0, G4_scratch);
725
726 // rewrite
727 // G4_scratch: fast bytecode
728 __ bind(rewrite);
729 patch_bytecode(Bytecodes::_aload_0, G4_scratch, G3_scratch, false);
730 __ bind(done);
731 } else {
732 aload(0);
733 }
734 }
735
736
737 void TemplateTable::istore() {
738 transition(itos, vtos);
739 locals_index(G3_scratch);
740 __ store_local_int( G3_scratch, Otos_i );
741 }
742
743
744 void TemplateTable::lstore() {
745 transition(ltos, vtos);
746 locals_index(G3_scratch);
747 __ store_local_long( G3_scratch, Otos_l );
748 }
749
750
751 void TemplateTable::fstore() {
752 transition(ftos, vtos);
753 locals_index(G3_scratch);
754 __ store_local_float( G3_scratch, Ftos_f );
755 }
756
757
758 void TemplateTable::dstore() {
759 transition(dtos, vtos);
760 locals_index(G3_scratch);
761 __ store_local_double( G3_scratch, Ftos_d );
762 }
763
764
765 void TemplateTable::astore() {
766 transition(vtos, vtos);
767 __ load_ptr(0, Otos_i);
768 __ inc(Lesp, Interpreter::stackElementSize);
769 __ verify_oop_or_return_address(Otos_i, G3_scratch);
770 locals_index(G3_scratch);
771 __ store_local_ptr(G3_scratch, Otos_i);
772 }
773
774
775 void TemplateTable::wide_istore() {
776 transition(vtos, vtos);
777 __ pop_i();
778 locals_index_wide(G3_scratch);
779 __ store_local_int( G3_scratch, Otos_i );
780 }
781
782
783 void TemplateTable::wide_lstore() {
784 transition(vtos, vtos);
785 __ pop_l();
786 locals_index_wide(G3_scratch);
787 __ store_local_long( G3_scratch, Otos_l );
788 }
789
790
791 void TemplateTable::wide_fstore() {
792 transition(vtos, vtos);
793 __ pop_f();
794 locals_index_wide(G3_scratch);
795 __ store_local_float( G3_scratch, Ftos_f );
796 }
797
798
799 void TemplateTable::wide_dstore() {
800 transition(vtos, vtos);
801 __ pop_d();
802 locals_index_wide(G3_scratch);
803 __ store_local_double( G3_scratch, Ftos_d );
804 }
805
806
807 void TemplateTable::wide_astore() {
808 transition(vtos, vtos);
809 __ load_ptr(0, Otos_i);
810 __ inc(Lesp, Interpreter::stackElementSize);
811 __ verify_oop_or_return_address(Otos_i, G3_scratch);
812 locals_index_wide(G3_scratch);
813 __ store_local_ptr(G3_scratch, Otos_i);
814 }
815
816
817 void TemplateTable::iastore() {
818 transition(itos, vtos);
819 __ pop_i(O2); // index
820 // Otos_i: val
821 // O3: array
822 __ index_check(O3, O2, LogBytesPerInt, G3_scratch, O2);
823 __ st(Otos_i, O2, arrayOopDesc::base_offset_in_bytes(T_INT));
824 }
825
826
827 void TemplateTable::lastore() {
828 transition(ltos, vtos);
829 __ pop_i(O2); // index
830 // Otos_l: val
831 // O3: array
832 __ index_check(O3, O2, LogBytesPerLong, G3_scratch, O2);
833 __ st_long(Otos_l, O2, arrayOopDesc::base_offset_in_bytes(T_LONG));
834 }
835
836
837 void TemplateTable::fastore() {
838 transition(ftos, vtos);
839 __ pop_i(O2); // index
840 // Ftos_f: val
841 // O3: array
842 __ index_check(O3, O2, LogBytesPerInt, G3_scratch, O2);
843 __ stf(FloatRegisterImpl::S, Ftos_f, O2, arrayOopDesc::base_offset_in_bytes(T_FLOAT));
844 }
845
846
847 void TemplateTable::dastore() {
848 transition(dtos, vtos);
849 __ pop_i(O2); // index
850 // Fos_d: val
851 // O3: array
852 __ index_check(O3, O2, LogBytesPerLong, G3_scratch, O2);
853 __ stf(FloatRegisterImpl::D, Ftos_d, O2, arrayOopDesc::base_offset_in_bytes(T_DOUBLE));
854 }
855
856
857 void TemplateTable::aastore() {
858 Label store_ok, is_null, done;
859 transition(vtos, vtos);
860 __ ld_ptr(Lesp, Interpreter::expr_offset_in_bytes(0), Otos_i);
861 __ ld(Lesp, Interpreter::expr_offset_in_bytes(1), O2); // get index
862 __ ld_ptr(Lesp, Interpreter::expr_offset_in_bytes(2), O3); // get array
863 // Otos_i: val
864 // O2: index
865 // O3: array
866 __ verify_oop(Otos_i);
867 __ index_check_without_pop(O3, O2, UseCompressedOops ? 2 : LogBytesPerWord, G3_scratch, O1);
868
869 // do array store check - check for NULL value first
870 __ br_null_short( Otos_i, Assembler::pn, is_null );
871
872 __ load_klass(O3, O4); // get array klass
873 __ load_klass(Otos_i, O5); // get value klass
874
875 // do fast instanceof cache test
876
877 __ ld_ptr(O4, in_bytes(ObjArrayKlass::element_klass_offset()), O4);
878
879 assert(Otos_i == O0, "just checking");
880
881 // Otos_i: value
882 // O1: addr - offset
883 // O2: index
884 // O3: array
885 // O4: array element klass
886 // O5: value klass
887
888 // Address element(O1, 0, arrayOopDesc::base_offset_in_bytes(T_OBJECT));
889
890 // Generate a fast subtype check. Branch to store_ok if no
891 // failure. Throw if failure.
892 __ gen_subtype_check( O5, O4, G3_scratch, G4_scratch, G1_scratch, store_ok );
893
894 // Not a subtype; so must throw exception
895 __ throw_if_not_x( Assembler::never, Interpreter::_throw_ArrayStoreException_entry, G3_scratch );
896
897 // Store is OK.
898 __ bind(store_ok);
899 do_oop_store(_masm, O1, noreg, arrayOopDesc::base_offset_in_bytes(T_OBJECT), Otos_i, G3_scratch, _bs->kind(), true);
900
901 __ ba(done);
902 __ delayed()->inc(Lesp, 3* Interpreter::stackElementSize); // adj sp (pops array, index and value)
903
904 __ bind(is_null);
905 do_oop_store(_masm, O1, noreg, arrayOopDesc::base_offset_in_bytes(T_OBJECT), G0, G4_scratch, _bs->kind(), true);
906
907 __ profile_null_seen(G3_scratch);
908 __ inc(Lesp, 3* Interpreter::stackElementSize); // adj sp (pops array, index and value)
909 __ bind(done);
910 }
911
912
913 void TemplateTable::bastore() {
914 transition(itos, vtos);
915 __ pop_i(O2); // index
916 // Otos_i: val
917 // O3: array
918 __ index_check(O3, O2, 0, G3_scratch, O2);
919 __ stb(Otos_i, O2, arrayOopDesc::base_offset_in_bytes(T_BYTE));
920 }
921
922
923 void TemplateTable::castore() {
924 transition(itos, vtos);
925 __ pop_i(O2); // index
926 // Otos_i: val
927 // O3: array
928 __ index_check(O3, O2, LogBytesPerShort, G3_scratch, O2);
929 __ sth(Otos_i, O2, arrayOopDesc::base_offset_in_bytes(T_CHAR));
930 }
931
932
933 void TemplateTable::sastore() {
934 // %%%%% Factor across platform
935 castore();
936 }
937
938
939 void TemplateTable::istore(int n) {
940 transition(itos, vtos);
941 __ st(Otos_i, Llocals, Interpreter::local_offset_in_bytes(n));
942 }
943
944
945 void TemplateTable::lstore(int n) {
946 transition(ltos, vtos);
947 assert(n+1 < Argument::n_register_parameters, "only handle register cases");
948 __ store_unaligned_long(Otos_l, Llocals, Interpreter::local_offset_in_bytes(n+1));
949
950 }
951
952
953 void TemplateTable::fstore(int n) {
954 transition(ftos, vtos);
955 assert(n < Argument::n_register_parameters, "only handle register cases");
956 __ stf(FloatRegisterImpl::S, Ftos_f, Llocals, Interpreter::local_offset_in_bytes(n));
957 }
958
959
960 void TemplateTable::dstore(int n) {
961 transition(dtos, vtos);
962 FloatRegister src = Ftos_d;
963 __ store_unaligned_double(src, Llocals, Interpreter::local_offset_in_bytes(n+1));
964 }
965
966
967 void TemplateTable::astore(int n) {
968 transition(vtos, vtos);
969 __ load_ptr(0, Otos_i);
970 __ inc(Lesp, Interpreter::stackElementSize);
971 __ verify_oop_or_return_address(Otos_i, G3_scratch);
972 __ store_local_ptr(n, Otos_i);
973 }
974
975
976 void TemplateTable::pop() {
977 transition(vtos, vtos);
978 __ inc(Lesp, Interpreter::stackElementSize);
979 }
980
981
982 void TemplateTable::pop2() {
983 transition(vtos, vtos);
984 __ inc(Lesp, 2 * Interpreter::stackElementSize);
985 }
986
987
988 void TemplateTable::dup() {
989 transition(vtos, vtos);
990 // stack: ..., a
991 // load a and tag
992 __ load_ptr(0, Otos_i);
993 __ push_ptr(Otos_i);
994 // stack: ..., a, a
995 }
996
997
998 void TemplateTable::dup_x1() {
999 transition(vtos, vtos);
1000 // stack: ..., a, b
1001 __ load_ptr( 1, G3_scratch); // get a
1002 __ load_ptr( 0, Otos_l1); // get b
1003 __ store_ptr(1, Otos_l1); // put b
1004 __ store_ptr(0, G3_scratch); // put a - like swap
1005 __ push_ptr(Otos_l1); // push b
1006 // stack: ..., b, a, b
1007 }
1008
1009
1010 void TemplateTable::dup_x2() {
1011 transition(vtos, vtos);
1012 // stack: ..., a, b, c
1013 // get c and push on stack, reuse registers
1014 __ load_ptr( 0, G3_scratch); // get c
1015 __ push_ptr(G3_scratch); // push c with tag
1016 // stack: ..., a, b, c, c (c in reg) (Lesp - 4)
1017 // (stack offsets n+1 now)
1018 __ load_ptr( 3, Otos_l1); // get a
1019 __ store_ptr(3, G3_scratch); // put c at 3
1020 // stack: ..., c, b, c, c (a in reg)
1021 __ load_ptr( 2, G3_scratch); // get b
1022 __ store_ptr(2, Otos_l1); // put a at 2
1023 // stack: ..., c, a, c, c (b in reg)
1024 __ store_ptr(1, G3_scratch); // put b at 1
1025 // stack: ..., c, a, b, c
1026 }
1027
1028
1029 void TemplateTable::dup2() {
1030 transition(vtos, vtos);
1031 __ load_ptr(1, G3_scratch); // get a
1032 __ load_ptr(0, Otos_l1); // get b
1033 __ push_ptr(G3_scratch); // push a
1034 __ push_ptr(Otos_l1); // push b
1035 // stack: ..., a, b, a, b
1036 }
1037
1038
1039 void TemplateTable::dup2_x1() {
1040 transition(vtos, vtos);
1041 // stack: ..., a, b, c
1042 __ load_ptr( 1, Lscratch); // get b
1043 __ load_ptr( 2, Otos_l1); // get a
1044 __ store_ptr(2, Lscratch); // put b at a
1045 // stack: ..., b, b, c
1046 __ load_ptr( 0, G3_scratch); // get c
1047 __ store_ptr(1, G3_scratch); // put c at b
1048 // stack: ..., b, c, c
1049 __ store_ptr(0, Otos_l1); // put a at c
1050 // stack: ..., b, c, a
1051 __ push_ptr(Lscratch); // push b
1052 __ push_ptr(G3_scratch); // push c
1053 // stack: ..., b, c, a, b, c
1054 }
1055
1056
1057 // The spec says that these types can be a mixture of category 1 (1 word)
1058 // types and/or category 2 types (long and doubles)
1059 void TemplateTable::dup2_x2() {
1060 transition(vtos, vtos);
1061 // stack: ..., a, b, c, d
1062 __ load_ptr( 1, Lscratch); // get c
1063 __ load_ptr( 3, Otos_l1); // get a
1064 __ store_ptr(3, Lscratch); // put c at 3
1065 __ store_ptr(1, Otos_l1); // put a at 1
1066 // stack: ..., c, b, a, d
1067 __ load_ptr( 2, G3_scratch); // get b
1068 __ load_ptr( 0, Otos_l1); // get d
1069 __ store_ptr(0, G3_scratch); // put b at 0
1070 __ store_ptr(2, Otos_l1); // put d at 2
1071 // stack: ..., c, d, a, b
1072 __ push_ptr(Lscratch); // push c
1073 __ push_ptr(Otos_l1); // push d
1074 // stack: ..., c, d, a, b, c, d
1075 }
1076
1077
1078 void TemplateTable::swap() {
1079 transition(vtos, vtos);
1080 // stack: ..., a, b
1081 __ load_ptr( 1, G3_scratch); // get a
1082 __ load_ptr( 0, Otos_l1); // get b
1083 __ store_ptr(0, G3_scratch); // put b
1084 __ store_ptr(1, Otos_l1); // put a
1085 // stack: ..., b, a
1086 }
1087
1088
1089 void TemplateTable::iop2(Operation op) {
1090 transition(itos, itos);
1091 __ pop_i(O1);
1092 switch (op) {
1093 case add: __ add(O1, Otos_i, Otos_i); break;
1094 case sub: __ sub(O1, Otos_i, Otos_i); break;
1095 // %%%%% Mul may not exist: better to call .mul?
1096 case mul: __ smul(O1, Otos_i, Otos_i); break;
1097 case _and: __ and3(O1, Otos_i, Otos_i); break;
1098 case _or: __ or3(O1, Otos_i, Otos_i); break;
1099 case _xor: __ xor3(O1, Otos_i, Otos_i); break;
1100 case shl: __ sll(O1, Otos_i, Otos_i); break;
1101 case shr: __ sra(O1, Otos_i, Otos_i); break;
1102 case ushr: __ srl(O1, Otos_i, Otos_i); break;
1103 default: ShouldNotReachHere();
1104 }
1105 }
1106
1107
1108 void TemplateTable::lop2(Operation op) {
1109 transition(ltos, ltos);
1110 __ pop_l(O2);
1111 switch (op) {
1112 #ifdef _LP64
1113 case add: __ add(O2, Otos_l, Otos_l); break;
1114 case sub: __ sub(O2, Otos_l, Otos_l); break;
1115 case _and: __ and3(O2, Otos_l, Otos_l); break;
1116 case _or: __ or3(O2, Otos_l, Otos_l); break;
1117 case _xor: __ xor3(O2, Otos_l, Otos_l); break;
1118 #else
1119 case add: __ addcc(O3, Otos_l2, Otos_l2); __ addc(O2, Otos_l1, Otos_l1); break;
1120 case sub: __ subcc(O3, Otos_l2, Otos_l2); __ subc(O2, Otos_l1, Otos_l1); break;
1121 case _and: __ and3(O3, Otos_l2, Otos_l2); __ and3(O2, Otos_l1, Otos_l1); break;
1122 case _or: __ or3(O3, Otos_l2, Otos_l2); __ or3(O2, Otos_l1, Otos_l1); break;
1123 case _xor: __ xor3(O3, Otos_l2, Otos_l2); __ xor3(O2, Otos_l1, Otos_l1); break;
1124 #endif
1125 default: ShouldNotReachHere();
1126 }
1127 }
1128
1129
1130 void TemplateTable::idiv() {
1131 // %%%%% Later: ForSPARC/V7 call .sdiv library routine,
1132 // %%%%% Use ldsw...sdivx on pure V9 ABI. 64 bit safe.
1133
1134 transition(itos, itos);
1135 __ pop_i(O1); // get 1st op
1136
1137 // Y contains upper 32 bits of result, set it to 0 or all ones
1138 __ wry(G0);
1139 __ mov(~0, G3_scratch);
1140
1141 __ tst(O1);
1142 Label neg;
1143 __ br(Assembler::negative, true, Assembler::pn, neg);
1144 __ delayed()->wry(G3_scratch);
1145 __ bind(neg);
1146
1147 Label ok;
1148 __ tst(Otos_i);
1149 __ throw_if_not_icc( Assembler::notZero, Interpreter::_throw_ArithmeticException_entry, G3_scratch );
1150
1151 const int min_int = 0x80000000;
1152 Label regular;
1153 __ cmp(Otos_i, -1);
1154 __ br(Assembler::notEqual, false, Assembler::pt, regular);
1155 #ifdef _LP64
1156 // Don't put set in delay slot
1157 // Set will turn into multiple instructions in 64 bit mode
1158 __ delayed()->nop();
1159 __ set(min_int, G4_scratch);
1160 #else
1161 __ delayed()->set(min_int, G4_scratch);
1162 #endif
1163 Label done;
1164 __ cmp(O1, G4_scratch);
1165 __ br(Assembler::equal, true, Assembler::pt, done);
1166 __ delayed()->mov(O1, Otos_i); // (mov only executed if branch taken)
1167
1168 __ bind(regular);
1169 __ sdiv(O1, Otos_i, Otos_i); // note: irem uses O1 after this instruction!
1170 __ bind(done);
1171 }
1172
1173
1174 void TemplateTable::irem() {
1175 transition(itos, itos);
1176 __ mov(Otos_i, O2); // save divisor
1177 idiv(); // %%%% Hack: exploits fact that idiv leaves dividend in O1
1178 __ smul(Otos_i, O2, Otos_i);
1179 __ sub(O1, Otos_i, Otos_i);
1180 }
1181
1182
1183 void TemplateTable::lmul() {
1184 transition(ltos, ltos);
1185 __ pop_l(O2);
1186 #ifdef _LP64
1187 __ mulx(Otos_l, O2, Otos_l);
1188 #else
1189 __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::lmul));
1190 #endif
1191
1192 }
1193
1194
1195 void TemplateTable::ldiv() {
1196 transition(ltos, ltos);
1197
1198 // check for zero
1199 __ pop_l(O2);
1200 #ifdef _LP64
1201 __ tst(Otos_l);
1202 __ throw_if_not_xcc( Assembler::notZero, Interpreter::_throw_ArithmeticException_entry, G3_scratch);
1203 __ sdivx(O2, Otos_l, Otos_l);
1204 #else
1205 __ orcc(Otos_l1, Otos_l2, G0);
1206 __ throw_if_not_icc( Assembler::notZero, Interpreter::_throw_ArithmeticException_entry, G3_scratch);
1207 __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::ldiv));
1208 #endif
1209 }
1210
1211
1212 void TemplateTable::lrem() {
1213 transition(ltos, ltos);
1214
1215 // check for zero
1216 __ pop_l(O2);
1217 #ifdef _LP64
1218 __ tst(Otos_l);
1219 __ throw_if_not_xcc( Assembler::notZero, Interpreter::_throw_ArithmeticException_entry, G3_scratch);
1220 __ sdivx(O2, Otos_l, Otos_l2);
1221 __ mulx (Otos_l2, Otos_l, Otos_l2);
1222 __ sub (O2, Otos_l2, Otos_l);
1223 #else
1224 __ orcc(Otos_l1, Otos_l2, G0);
1225 __ throw_if_not_icc(Assembler::notZero, Interpreter::_throw_ArithmeticException_entry, G3_scratch);
1226 __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::lrem));
1227 #endif
1228 }
1229
1230
1231 void TemplateTable::lshl() {
1232 transition(itos, ltos); // %%%% could optimize, fill delay slot or opt for ultra
1233
1234 __ pop_l(O2); // shift value in O2, O3
1235 #ifdef _LP64
1236 __ sllx(O2, Otos_i, Otos_l);
1237 #else
1238 __ lshl(O2, O3, Otos_i, Otos_l1, Otos_l2, O4);
1239 #endif
1240 }
1241
1242
1243 void TemplateTable::lshr() {
1244 transition(itos, ltos); // %%%% see lshl comment
1245
1246 __ pop_l(O2); // shift value in O2, O3
1247 #ifdef _LP64
1248 __ srax(O2, Otos_i, Otos_l);
1249 #else
1250 __ lshr(O2, O3, Otos_i, Otos_l1, Otos_l2, O4);
1251 #endif
1252 }
1253
1254
1255
1256 void TemplateTable::lushr() {
1257 transition(itos, ltos); // %%%% see lshl comment
1258
1259 __ pop_l(O2); // shift value in O2, O3
1260 #ifdef _LP64
1261 __ srlx(O2, Otos_i, Otos_l);
1262 #else
1263 __ lushr(O2, O3, Otos_i, Otos_l1, Otos_l2, O4);
1264 #endif
1265 }
1266
1267
1268 void TemplateTable::fop2(Operation op) {
1269 transition(ftos, ftos);
1270 switch (op) {
1271 case add: __ pop_f(F4); __ fadd(FloatRegisterImpl::S, F4, Ftos_f, Ftos_f); break;
1272 case sub: __ pop_f(F4); __ fsub(FloatRegisterImpl::S, F4, Ftos_f, Ftos_f); break;
1273 case mul: __ pop_f(F4); __ fmul(FloatRegisterImpl::S, F4, Ftos_f, Ftos_f); break;
1274 case div: __ pop_f(F4); __ fdiv(FloatRegisterImpl::S, F4, Ftos_f, Ftos_f); break;
1275 case rem:
1276 assert(Ftos_f == F0, "just checking");
1277 #ifdef _LP64
1278 // LP64 calling conventions use F1, F3 for passing 2 floats
1279 __ pop_f(F1);
1280 __ fmov(FloatRegisterImpl::S, Ftos_f, F3);
1281 #else
1282 __ pop_i(O0);
1283 __ stf(FloatRegisterImpl::S, Ftos_f, __ d_tmp);
1284 __ ld( __ d_tmp, O1 );
1285 #endif
1286 __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::frem));
1287 assert( Ftos_f == F0, "fix this code" );
1288 break;
1289
1290 default: ShouldNotReachHere();
1291 }
1292 }
1293
1294
1295 void TemplateTable::dop2(Operation op) {
1296 transition(dtos, dtos);
1297 switch (op) {
1298 case add: __ pop_d(F4); __ fadd(FloatRegisterImpl::D, F4, Ftos_d, Ftos_d); break;
1299 case sub: __ pop_d(F4); __ fsub(FloatRegisterImpl::D, F4, Ftos_d, Ftos_d); break;
1300 case mul: __ pop_d(F4); __ fmul(FloatRegisterImpl::D, F4, Ftos_d, Ftos_d); break;
1301 case div: __ pop_d(F4); __ fdiv(FloatRegisterImpl::D, F4, Ftos_d, Ftos_d); break;
1302 case rem:
1303 #ifdef _LP64
1304 // Pass arguments in D0, D2
1305 __ fmov(FloatRegisterImpl::D, Ftos_f, F2 );
1306 __ pop_d( F0 );
1307 #else
1308 // Pass arguments in O0O1, O2O3
1309 __ stf(FloatRegisterImpl::D, Ftos_f, __ d_tmp);
1310 __ ldd( __ d_tmp, O2 );
1311 __ pop_d(Ftos_f);
1312 __ stf(FloatRegisterImpl::D, Ftos_f, __ d_tmp);
1313 __ ldd( __ d_tmp, O0 );
1314 #endif
1315 __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::drem));
1316 assert( Ftos_d == F0, "fix this code" );
1317 break;
1318
1319 default: ShouldNotReachHere();
1320 }
1321 }
1322
1323
1324 void TemplateTable::ineg() {
1325 transition(itos, itos);
1326 __ neg(Otos_i);
1327 }
1328
1329
1330 void TemplateTable::lneg() {
1331 transition(ltos, ltos);
1332 #ifdef _LP64
1333 __ sub(G0, Otos_l, Otos_l);
1334 #else
1335 __ lneg(Otos_l1, Otos_l2);
1336 #endif
1337 }
1338
1339
1340 void TemplateTable::fneg() {
1341 transition(ftos, ftos);
1342 __ fneg(FloatRegisterImpl::S, Ftos_f, Ftos_f);
1343 }
1344
1345
1346 void TemplateTable::dneg() {
1347 transition(dtos, dtos);
1348 __ fneg(FloatRegisterImpl::D, Ftos_f, Ftos_f);
1349 }
1350
1351
1352 void TemplateTable::iinc() {
1353 transition(vtos, vtos);
1354 locals_index(G3_scratch);
1355 __ ldsb(Lbcp, 2, O2); // load constant
1356 __ access_local_int(G3_scratch, Otos_i);
1357 __ add(Otos_i, O2, Otos_i);
1358 __ st(Otos_i, G3_scratch, 0); // access_local_int puts E.A. in G3_scratch
1359 }
1360
1361
1362 void TemplateTable::wide_iinc() {
1363 transition(vtos, vtos);
1364 locals_index_wide(G3_scratch);
1365 __ get_2_byte_integer_at_bcp( 4, O2, O3, InterpreterMacroAssembler::Signed);
1366 __ access_local_int(G3_scratch, Otos_i);
1367 __ add(Otos_i, O3, Otos_i);
1368 __ st(Otos_i, G3_scratch, 0); // access_local_int puts E.A. in G3_scratch
1369 }
1370
1371
1372 void TemplateTable::convert() {
1373 // %%%%% Factor this first part accross platforms
1374 #ifdef ASSERT
1375 TosState tos_in = ilgl;
1376 TosState tos_out = ilgl;
1377 switch (bytecode()) {
1378 case Bytecodes::_i2l: // fall through
1379 case Bytecodes::_i2f: // fall through
1380 case Bytecodes::_i2d: // fall through
1381 case Bytecodes::_i2b: // fall through
1382 case Bytecodes::_i2c: // fall through
1383 case Bytecodes::_i2s: tos_in = itos; break;
1384 case Bytecodes::_l2i: // fall through
1385 case Bytecodes::_l2f: // fall through
1386 case Bytecodes::_l2d: tos_in = ltos; break;
1387 case Bytecodes::_f2i: // fall through
1388 case Bytecodes::_f2l: // fall through
1389 case Bytecodes::_f2d: tos_in = ftos; break;
1390 case Bytecodes::_d2i: // fall through
1391 case Bytecodes::_d2l: // fall through
1392 case Bytecodes::_d2f: tos_in = dtos; break;
1393 default : ShouldNotReachHere();
1394 }
1395 switch (bytecode()) {
1396 case Bytecodes::_l2i: // fall through
1397 case Bytecodes::_f2i: // fall through
1398 case Bytecodes::_d2i: // fall through
1399 case Bytecodes::_i2b: // fall through
1400 case Bytecodes::_i2c: // fall through
1401 case Bytecodes::_i2s: tos_out = itos; break;
1402 case Bytecodes::_i2l: // fall through
1403 case Bytecodes::_f2l: // fall through
1404 case Bytecodes::_d2l: tos_out = ltos; break;
1405 case Bytecodes::_i2f: // fall through
1406 case Bytecodes::_l2f: // fall through
1407 case Bytecodes::_d2f: tos_out = ftos; break;
1408 case Bytecodes::_i2d: // fall through
1409 case Bytecodes::_l2d: // fall through
1410 case Bytecodes::_f2d: tos_out = dtos; break;
1411 default : ShouldNotReachHere();
1412 }
1413 transition(tos_in, tos_out);
1414 #endif
1415
1416
1417 // Conversion
1418 Label done;
1419 switch (bytecode()) {
1420 case Bytecodes::_i2l:
1421 #ifdef _LP64
1422 // Sign extend the 32 bits
1423 __ sra ( Otos_i, 0, Otos_l );
1424 #else
1425 __ addcc(Otos_i, 0, Otos_l2);
1426 __ br(Assembler::greaterEqual, true, Assembler::pt, done);
1427 __ delayed()->clr(Otos_l1);
1428 __ set(~0, Otos_l1);
1429 #endif
1430 break;
1431
1432 case Bytecodes::_i2f:
1433 __ st(Otos_i, __ d_tmp );
1434 __ ldf(FloatRegisterImpl::S, __ d_tmp, F0);
1435 __ fitof(FloatRegisterImpl::S, F0, Ftos_f);
1436 break;
1437
1438 case Bytecodes::_i2d:
1439 __ st(Otos_i, __ d_tmp);
1440 __ ldf(FloatRegisterImpl::S, __ d_tmp, F0);
1441 __ fitof(FloatRegisterImpl::D, F0, Ftos_f);
1442 break;
1443
1444 case Bytecodes::_i2b:
1445 __ sll(Otos_i, 24, Otos_i);
1446 __ sra(Otos_i, 24, Otos_i);
1447 break;
1448
1449 case Bytecodes::_i2c:
1450 __ sll(Otos_i, 16, Otos_i);
1451 __ srl(Otos_i, 16, Otos_i);
1452 break;
1453
1454 case Bytecodes::_i2s:
1455 __ sll(Otos_i, 16, Otos_i);
1456 __ sra(Otos_i, 16, Otos_i);
1457 break;
1458
1459 case Bytecodes::_l2i:
1460 #ifndef _LP64
1461 __ mov(Otos_l2, Otos_i);
1462 #else
1463 // Sign-extend into the high 32 bits
1464 __ sra(Otos_l, 0, Otos_i);
1465 #endif
1466 break;
1467
1468 case Bytecodes::_l2f:
1469 case Bytecodes::_l2d:
1470 __ st_long(Otos_l, __ d_tmp);
1471 __ ldf(FloatRegisterImpl::D, __ d_tmp, Ftos_d);
1472
1473 if (bytecode() == Bytecodes::_l2f) {
1474 __ fxtof(FloatRegisterImpl::S, Ftos_d, Ftos_f);
1475 } else {
1476 __ fxtof(FloatRegisterImpl::D, Ftos_d, Ftos_d);
1477 }
1478 break;
1479
1480 case Bytecodes::_f2i: {
1481 Label isNaN;
1482 // result must be 0 if value is NaN; test by comparing value to itself
1483 __ fcmp(FloatRegisterImpl::S, Assembler::fcc0, Ftos_f, Ftos_f);
1484 __ fb(Assembler::f_unordered, true, Assembler::pn, isNaN);
1485 __ delayed()->clr(Otos_i); // NaN
1486 __ ftoi(FloatRegisterImpl::S, Ftos_f, F30);
1487 __ stf(FloatRegisterImpl::S, F30, __ d_tmp);
1488 __ ld(__ d_tmp, Otos_i);
1489 __ bind(isNaN);
1490 }
1491 break;
1492
1493 case Bytecodes::_f2l:
1494 // must uncache tos
1495 __ push_f();
1496 #ifdef _LP64
1497 __ pop_f(F1);
1498 #else
1499 __ pop_i(O0);
1500 #endif
1501 __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::f2l));
1502 break;
1503
1504 case Bytecodes::_f2d:
1505 __ ftof( FloatRegisterImpl::S, FloatRegisterImpl::D, Ftos_f, Ftos_f);
1506 break;
1507
1508 case Bytecodes::_d2i:
1509 case Bytecodes::_d2l:
1510 // must uncache tos
1511 __ push_d();
1512 #ifdef _LP64
1513 // LP64 calling conventions pass first double arg in D0
1514 __ pop_d( Ftos_d );
1515 #else
1516 __ pop_i( O0 );
1517 __ pop_i( O1 );
1518 #endif
1519 __ call_VM_leaf(Lscratch,
1520 bytecode() == Bytecodes::_d2i
1521 ? CAST_FROM_FN_PTR(address, SharedRuntime::d2i)
1522 : CAST_FROM_FN_PTR(address, SharedRuntime::d2l));
1523 break;
1524
1525 case Bytecodes::_d2f:
1526 __ ftof( FloatRegisterImpl::D, FloatRegisterImpl::S, Ftos_d, Ftos_f);
1527 break;
1528
1529 default: ShouldNotReachHere();
1530 }
1531 __ bind(done);
1532 }
1533
1534
1535 void TemplateTable::lcmp() {
1536 transition(ltos, itos);
1537
1538 #ifdef _LP64
1539 __ pop_l(O1); // pop off value 1, value 2 is in O0
1540 __ lcmp( O1, Otos_l, Otos_i );
1541 #else
1542 __ pop_l(O2); // cmp O2,3 to O0,1
1543 __ lcmp( O2, O3, Otos_l1, Otos_l2, Otos_i );
1544 #endif
1545 }
1546
1547
1548 void TemplateTable::float_cmp(bool is_float, int unordered_result) {
1549
1550 if (is_float) __ pop_f(F2);
1551 else __ pop_d(F2);
1552
1553 assert(Ftos_f == F0 && Ftos_d == F0, "alias checking:");
1554
1555 __ float_cmp( is_float, unordered_result, F2, F0, Otos_i );
1556 }
1557
1558 void TemplateTable::branch(bool is_jsr, bool is_wide) {
1559 // Note: on SPARC, we use InterpreterMacroAssembler::if_cmp also.
1560 __ verify_thread();
1561
1562 const Register O2_bumped_count = O2;
1563 __ profile_taken_branch(G3_scratch, O2_bumped_count);
1564
1565 // get (wide) offset to O1_disp
1566 const Register O1_disp = O1;
1567 if (is_wide) __ get_4_byte_integer_at_bcp( 1, G4_scratch, O1_disp, InterpreterMacroAssembler::set_CC);
1568 else __ get_2_byte_integer_at_bcp( 1, G4_scratch, O1_disp, InterpreterMacroAssembler::Signed, InterpreterMacroAssembler::set_CC);
1569
1570 // Handle all the JSR stuff here, then exit.
1571 // It's much shorter and cleaner than intermingling with the
1572 // non-JSR normal-branch stuff occurring below.
1573 if( is_jsr ) {
1574 // compute return address as bci in Otos_i
1575 __ ld_ptr(Lmethod, Method::const_offset(), G3_scratch);
1576 __ sub(Lbcp, G3_scratch, G3_scratch);
1577 __ sub(G3_scratch, in_bytes(ConstMethod::codes_offset()) - (is_wide ? 5 : 3), Otos_i);
1578
1579 // Bump Lbcp to target of JSR
1580 __ add(Lbcp, O1_disp, Lbcp);
1581 // Push returnAddress for "ret" on stack
1582 __ push_ptr(Otos_i);
1583 // And away we go!
1584 __ dispatch_next(vtos);
1585 return;
1586 }
1587
1588 // Normal (non-jsr) branch handling
1589
1590 // Save the current Lbcp
1591 const Register l_cur_bcp = Lscratch;
1592 __ mov( Lbcp, l_cur_bcp );
1593
1594 bool increment_invocation_counter_for_backward_branches = UseCompiler && UseLoopCounter;
1595 if ( increment_invocation_counter_for_backward_branches ) {
1596 Label Lforward;
1597 // check branch direction
1598 __ br( Assembler::positive, false, Assembler::pn, Lforward );
1599 // Bump bytecode pointer by displacement (take the branch)
1600 __ delayed()->add( O1_disp, Lbcp, Lbcp ); // add to bc addr
1601
1602 const Register G3_method_counters = G3_scratch;
1603 __ get_method_counters(Lmethod, G3_method_counters, Lforward);
1604
1605 if (TieredCompilation) {
1606 Label Lno_mdo, Loverflow;
1607 int increment = InvocationCounter::count_increment;
1608 if (ProfileInterpreter) {
1609 // If no method data exists, go to profile_continue.
1610 __ ld_ptr(Lmethod, Method::method_data_offset(), G4_scratch);
1611 __ br_null_short(G4_scratch, Assembler::pn, Lno_mdo);
1612
1613 // Increment backedge counter in the MDO
1614 Address mdo_backedge_counter(G4_scratch, in_bytes(MethodData::backedge_counter_offset()) +
1615 in_bytes(InvocationCounter::counter_offset()));
1616 Address mask(G4_scratch, in_bytes(MethodData::backedge_mask_offset()));
1617 __ increment_mask_and_jump(mdo_backedge_counter, increment, mask, G3_scratch, O0,
1618 Assembler::notZero, &Lforward);
1619 __ ba_short(Loverflow);
1620 }
1621
1622 // If there's no MDO, increment counter in MethodCounters*
1623 __ bind(Lno_mdo);
1624 Address backedge_counter(G3_method_counters,
1625 in_bytes(MethodCounters::backedge_counter_offset()) +
1626 in_bytes(InvocationCounter::counter_offset()));
1627 Address mask(G3_method_counters, in_bytes(MethodCounters::backedge_mask_offset()));
1628 __ increment_mask_and_jump(backedge_counter, increment, mask, G4_scratch, O0,
1629 Assembler::notZero, &Lforward);
1630 __ bind(Loverflow);
1631
1632 // notify point for loop, pass branch bytecode
1633 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), l_cur_bcp);
1634
1635 // Was an OSR adapter generated?
1636 // O0 = osr nmethod
1637 __ br_null_short(O0, Assembler::pn, Lforward);
1638
1639 // Has the nmethod been invalidated already?
1640 __ ldub(O0, nmethod::state_offset(), O2);
1641 __ cmp_and_br_short(O2, nmethod::in_use, Assembler::notEqual, Assembler::pn, Lforward);
1642
1643 // migrate the interpreter frame off of the stack
1644
1645 __ mov(G2_thread, L7);
1646 // save nmethod
1647 __ mov(O0, L6);
1648 __ set_last_Java_frame(SP, noreg);
1649 __ call_VM_leaf(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin), L7);
1650 __ reset_last_Java_frame();
1651 __ mov(L7, G2_thread);
1652
1653 // move OSR nmethod to I1
1654 __ mov(L6, I1);
1655
1656 // OSR buffer to I0
1657 __ mov(O0, I0);
1658
1659 // remove the interpreter frame
1660 __ restore(I5_savedSP, 0, SP);
1661
1662 // Jump to the osr code.
1663 __ ld_ptr(O1, nmethod::osr_entry_point_offset(), O2);
1664 __ jmp(O2, G0);
1665 __ delayed()->nop();
1666
1667 } else { // not TieredCompilation
1668 // Update Backedge branch separately from invocations
1669 const Register G4_invoke_ctr = G4;
1670 __ increment_backedge_counter(G3_method_counters, G4_invoke_ctr, G1_scratch);
1671 if (ProfileInterpreter) {
1672 __ test_invocation_counter_for_mdp(G4_invoke_ctr, G3_method_counters, G1_scratch, Lforward);
1673 if (UseOnStackReplacement) {
1674
1675 __ test_backedge_count_for_osr(O2_bumped_count, G3_method_counters, l_cur_bcp, G1_scratch);
1676 }
1677 } else {
1678 if (UseOnStackReplacement) {
1679 __ test_backedge_count_for_osr(G4_invoke_ctr, G3_method_counters, l_cur_bcp, G1_scratch);
1680 }
1681 }
1682 }
1683
1684 __ bind(Lforward);
1685 } else
1686 // Bump bytecode pointer by displacement (take the branch)
1687 __ add( O1_disp, Lbcp, Lbcp );// add to bc addr
1688
1689 // continue with bytecode @ target
1690 // %%%%% Like Intel, could speed things up by moving bytecode fetch to code above,
1691 // %%%%% and changing dispatch_next to dispatch_only
1692 __ dispatch_next(vtos);
1693 }
1694
1695
1696 // Note Condition in argument is TemplateTable::Condition
1697 // arg scope is within class scope
1698
1699 void TemplateTable::if_0cmp(Condition cc) {
1700 // no pointers, integer only!
1701 transition(itos, vtos);
1702 // assume branch is more often taken than not (loops use backward branches)
1703 __ cmp( Otos_i, 0);
1704 __ if_cmp(ccNot(cc), false);
1705 }
1706
1707
1708 void TemplateTable::if_icmp(Condition cc) {
1709 transition(itos, vtos);
1710 __ pop_i(O1);
1711 __ cmp(O1, Otos_i);
1712 __ if_cmp(ccNot(cc), false);
1713 }
1714
1715
1716 void TemplateTable::if_nullcmp(Condition cc) {
1717 transition(atos, vtos);
1718 __ tst(Otos_i);
1719 __ if_cmp(ccNot(cc), true);
1720 }
1721
1722
1723 void TemplateTable::if_acmp(Condition cc) {
1724 transition(atos, vtos);
1725 __ pop_ptr(O1);
1726 __ verify_oop(O1);
1727 __ verify_oop(Otos_i);
1728 __ cmp(O1, Otos_i);
1729 __ if_cmp(ccNot(cc), true);
1730 }
1731
1732
1733
1734 void TemplateTable::ret() {
1735 transition(vtos, vtos);
1736 locals_index(G3_scratch);
1737 __ access_local_returnAddress(G3_scratch, Otos_i);
1738 // Otos_i contains the bci, compute the bcp from that
1739
1740 #ifdef _LP64
1741 #ifdef ASSERT
1742 // jsr result was labeled as an 'itos' not an 'atos' because we cannot GC
1743 // the result. The return address (really a BCI) was stored with an
1744 // 'astore' because JVM specs claim it's a pointer-sized thing. Hence in
1745 // the 64-bit build the 32-bit BCI is actually in the low bits of a 64-bit
1746 // loaded value.
1747 { Label zzz ;
1748 __ set (65536, G3_scratch) ;
1749 __ cmp (Otos_i, G3_scratch) ;
1750 __ bp( Assembler::lessEqualUnsigned, false, Assembler::xcc, Assembler::pn, zzz);
1751 __ delayed()->nop();
1752 __ stop("BCI is in the wrong register half?");
1753 __ bind (zzz) ;
1754 }
1755 #endif
1756 #endif
1757
1758 __ profile_ret(vtos, Otos_i, G4_scratch);
1759
1760 __ ld_ptr(Lmethod, Method::const_offset(), G3_scratch);
1761 __ add(G3_scratch, Otos_i, G3_scratch);
1762 __ add(G3_scratch, in_bytes(ConstMethod::codes_offset()), Lbcp);
1763 __ dispatch_next(vtos);
1764 }
1765
1766
1767 void TemplateTable::wide_ret() {
1768 transition(vtos, vtos);
1769 locals_index_wide(G3_scratch);
1770 __ access_local_returnAddress(G3_scratch, Otos_i);
1771 // Otos_i contains the bci, compute the bcp from that
1772
1773 __ profile_ret(vtos, Otos_i, G4_scratch);
1774
1775 __ ld_ptr(Lmethod, Method::const_offset(), G3_scratch);
1776 __ add(G3_scratch, Otos_i, G3_scratch);
1777 __ add(G3_scratch, in_bytes(ConstMethod::codes_offset()), Lbcp);
1778 __ dispatch_next(vtos);
1779 }
1780
1781
1782 void TemplateTable::tableswitch() {
1783 transition(itos, vtos);
1784 Label default_case, continue_execution;
1785
1786 // align bcp
1787 __ add(Lbcp, BytesPerInt, O1);
1788 __ and3(O1, -BytesPerInt, O1);
1789 // load lo, hi
1790 __ ld(O1, 1 * BytesPerInt, O2); // Low Byte
1791 __ ld(O1, 2 * BytesPerInt, O3); // High Byte
1792 #ifdef _LP64
1793 // Sign extend the 32 bits
1794 __ sra ( Otos_i, 0, Otos_i );
1795 #endif /* _LP64 */
1796
1797 // check against lo & hi
1798 __ cmp( Otos_i, O2);
1799 __ br( Assembler::less, false, Assembler::pn, default_case);
1800 __ delayed()->cmp( Otos_i, O3 );
1801 __ br( Assembler::greater, false, Assembler::pn, default_case);
1802 // lookup dispatch offset
1803 __ delayed()->sub(Otos_i, O2, O2);
1804 __ profile_switch_case(O2, O3, G3_scratch, G4_scratch);
1805 __ sll(O2, LogBytesPerInt, O2);
1806 __ add(O2, 3 * BytesPerInt, O2);
1807 __ ba(continue_execution);
1808 __ delayed()->ld(O1, O2, O2);
1809 // handle default
1810 __ bind(default_case);
1811 __ profile_switch_default(O3);
1812 __ ld(O1, 0, O2); // get default offset
1813 // continue execution
1814 __ bind(continue_execution);
1815 __ add(Lbcp, O2, Lbcp);
1816 __ dispatch_next(vtos);
1817 }
1818
1819
1820 void TemplateTable::lookupswitch() {
1821 transition(itos, itos);
1822 __ stop("lookupswitch bytecode should have been rewritten");
1823 }
1824
1825 void TemplateTable::fast_linearswitch() {
1826 transition(itos, vtos);
1827 Label loop_entry, loop, found, continue_execution;
1828 // align bcp
1829 __ add(Lbcp, BytesPerInt, O1);
1830 __ and3(O1, -BytesPerInt, O1);
1831 // set counter
1832 __ ld(O1, BytesPerInt, O2);
1833 __ sll(O2, LogBytesPerInt + 1, O2); // in word-pairs
1834 __ add(O1, 2 * BytesPerInt, O3); // set first pair addr
1835 __ ba(loop_entry);
1836 __ delayed()->add(O3, O2, O2); // counter now points past last pair
1837
1838 // table search
1839 __ bind(loop);
1840 __ cmp(O4, Otos_i);
1841 __ br(Assembler::equal, true, Assembler::pn, found);
1842 __ delayed()->ld(O3, BytesPerInt, O4); // offset -> O4
1843 __ inc(O3, 2 * BytesPerInt);
1844
1845 __ bind(loop_entry);
1846 __ cmp(O2, O3);
1847 __ brx(Assembler::greaterUnsigned, true, Assembler::pt, loop);
1848 __ delayed()->ld(O3, 0, O4);
1849
1850 // default case
1851 __ ld(O1, 0, O4); // get default offset
1852 if (ProfileInterpreter) {
1853 __ profile_switch_default(O3);
1854 __ ba_short(continue_execution);
1855 }
1856
1857 // entry found -> get offset
1858 __ bind(found);
1859 if (ProfileInterpreter) {
1860 __ sub(O3, O1, O3);
1861 __ sub(O3, 2*BytesPerInt, O3);
1862 __ srl(O3, LogBytesPerInt + 1, O3); // in word-pairs
1863 __ profile_switch_case(O3, O1, O2, G3_scratch);
1864
1865 __ bind(continue_execution);
1866 }
1867 __ add(Lbcp, O4, Lbcp);
1868 __ dispatch_next(vtos);
1869 }
1870
1871
1872 void TemplateTable::fast_binaryswitch() {
1873 transition(itos, vtos);
1874 // Implementation using the following core algorithm: (copied from Intel)
1875 //
1876 // int binary_search(int key, LookupswitchPair* array, int n) {
1877 // // Binary search according to "Methodik des Programmierens" by
1878 // // Edsger W. Dijkstra and W.H.J. Feijen, Addison Wesley Germany 1985.
1879 // int i = 0;
1880 // int j = n;
1881 // while (i+1 < j) {
1882 // // invariant P: 0 <= i < j <= n and (a[i] <= key < a[j] or Q)
1883 // // with Q: for all i: 0 <= i < n: key < a[i]
1884 // // where a stands for the array and assuming that the (inexisting)
1885 // // element a[n] is infinitely big.
1886 // int h = (i + j) >> 1;
1887 // // i < h < j
1888 // if (key < array[h].fast_match()) {
1889 // j = h;
1890 // } else {
1891 // i = h;
1892 // }
1893 // }
1894 // // R: a[i] <= key < a[i+1] or Q
1895 // // (i.e., if key is within array, i is the correct index)
1896 // return i;
1897 // }
1898
1899 // register allocation
1900 assert(Otos_i == O0, "alias checking");
1901 const Register Rkey = Otos_i; // already set (tosca)
1902 const Register Rarray = O1;
1903 const Register Ri = O2;
1904 const Register Rj = O3;
1905 const Register Rh = O4;
1906 const Register Rscratch = O5;
1907
1908 const int log_entry_size = 3;
1909 const int entry_size = 1 << log_entry_size;
1910
1911 Label found;
1912 // Find Array start
1913 __ add(Lbcp, 3 * BytesPerInt, Rarray);
1914 __ and3(Rarray, -BytesPerInt, Rarray);
1915 // initialize i & j (in delay slot)
1916 __ clr( Ri );
1917
1918 // and start
1919 Label entry;
1920 __ ba(entry);
1921 __ delayed()->ld( Rarray, -BytesPerInt, Rj);
1922 // (Rj is already in the native byte-ordering.)
1923
1924 // binary search loop
1925 { Label loop;
1926 __ bind( loop );
1927 // int h = (i + j) >> 1;
1928 __ sra( Rh, 1, Rh );
1929 // if (key < array[h].fast_match()) {
1930 // j = h;
1931 // } else {
1932 // i = h;
1933 // }
1934 __ sll( Rh, log_entry_size, Rscratch );
1935 __ ld( Rarray, Rscratch, Rscratch );
1936 // (Rscratch is already in the native byte-ordering.)
1937 __ cmp( Rkey, Rscratch );
1938 __ movcc( Assembler::less, false, Assembler::icc, Rh, Rj ); // j = h if (key < array[h].fast_match())
1939 __ movcc( Assembler::greaterEqual, false, Assembler::icc, Rh, Ri ); // i = h if (key >= array[h].fast_match())
1940
1941 // while (i+1 < j)
1942 __ bind( entry );
1943 __ add( Ri, 1, Rscratch );
1944 __ cmp(Rscratch, Rj);
1945 __ br( Assembler::less, true, Assembler::pt, loop );
1946 __ delayed()->add( Ri, Rj, Rh ); // start h = i + j >> 1;
1947 }
1948
1949 // end of binary search, result index is i (must check again!)
1950 Label default_case;
1951 Label continue_execution;
1952 if (ProfileInterpreter) {
1953 __ mov( Ri, Rh ); // Save index in i for profiling
1954 }
1955 __ sll( Ri, log_entry_size, Ri );
1956 __ ld( Rarray, Ri, Rscratch );
1957 // (Rscratch is already in the native byte-ordering.)
1958 __ cmp( Rkey, Rscratch );
1959 __ br( Assembler::notEqual, true, Assembler::pn, default_case );
1960 __ delayed()->ld( Rarray, -2 * BytesPerInt, Rj ); // load default offset -> j
1961
1962 // entry found -> j = offset
1963 __ inc( Ri, BytesPerInt );
1964 __ profile_switch_case(Rh, Rj, Rscratch, Rkey);
1965 __ ld( Rarray, Ri, Rj );
1966 // (Rj is already in the native byte-ordering.)
1967
1968 if (ProfileInterpreter) {
1969 __ ba_short(continue_execution);
1970 }
1971
1972 __ bind(default_case); // fall through (if not profiling)
1973 __ profile_switch_default(Ri);
1974
1975 __ bind(continue_execution);
1976 __ add( Lbcp, Rj, Lbcp );
1977 __ dispatch_next( vtos );
1978 }
1979
1980
1981 void TemplateTable::_return(TosState state) {
1982 transition(state, state);
1983 assert(_desc->calls_vm(), "inconsistent calls_vm information");
1984
1985 if (_desc->bytecode() == Bytecodes::_return_register_finalizer) {
1986 assert(state == vtos, "only valid state");
1987 __ mov(G0, G3_scratch);
1988 __ access_local_ptr(G3_scratch, Otos_i);
1989 __ load_klass(Otos_i, O2);
1990 __ set(JVM_ACC_HAS_FINALIZER, G3);
1991 __ ld(O2, in_bytes(Klass::access_flags_offset()), O2);
1992 __ andcc(G3, O2, G0);
1993 Label skip_register_finalizer;
1994 __ br(Assembler::zero, false, Assembler::pn, skip_register_finalizer);
1995 __ delayed()->nop();
1996
1997 // Call out to do finalizer registration
1998 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::register_finalizer), Otos_i);
1999
2000 __ bind(skip_register_finalizer);
2001 }
2002
2003 __ remove_activation(state, /* throw_monitor_exception */ true);
2004
2005 // The caller's SP was adjusted upon method entry to accomodate
2006 // the callee's non-argument locals. Undo that adjustment.
2007 __ ret(); // return to caller
2008 __ delayed()->restore(I5_savedSP, G0, SP);
2009 }
2010
2011
2012 // ----------------------------------------------------------------------------
2013 // Volatile variables demand their effects be made known to all CPU's in
2014 // order. Store buffers on most chips allow reads & writes to reorder; the
2015 // JMM's ReadAfterWrite.java test fails in -Xint mode without some kind of
2016 // memory barrier (i.e., it's not sufficient that the interpreter does not
2017 // reorder volatile references, the hardware also must not reorder them).
2018 //
2019 // According to the new Java Memory Model (JMM):
2020 // (1) All volatiles are serialized wrt to each other.
2021 // ALSO reads & writes act as aquire & release, so:
2022 // (2) A read cannot let unrelated NON-volatile memory refs that happen after
2023 // the read float up to before the read. It's OK for non-volatile memory refs
2024 // that happen before the volatile read to float down below it.
2025 // (3) Similar a volatile write cannot let unrelated NON-volatile memory refs
2026 // that happen BEFORE the write float down to after the write. It's OK for
2027 // non-volatile memory refs that happen after the volatile write to float up
2028 // before it.
2029 //
2030 // We only put in barriers around volatile refs (they are expensive), not
2031 // _between_ memory refs (that would require us to track the flavor of the
2032 // previous memory refs). Requirements (2) and (3) require some barriers
2033 // before volatile stores and after volatile loads. These nearly cover
2034 // requirement (1) but miss the volatile-store-volatile-load case. This final
2035 // case is placed after volatile-stores although it could just as well go
2036 // before volatile-loads.
2037 void TemplateTable::volatile_barrier(Assembler::Membar_mask_bits order_constraint) {
2038 // Helper function to insert a is-volatile test and memory barrier
2039 // All current sparc implementations run in TSO, needing only StoreLoad
2040 if ((order_constraint & Assembler::StoreLoad) == 0) return;
2041 __ membar( order_constraint );
2042 }
2043
2044 // ----------------------------------------------------------------------------
2045 void TemplateTable::resolve_cache_and_index(int byte_no,
2046 Register Rcache,
2047 Register index,
2048 size_t index_size) {
2049 // Depends on cpCacheOop layout!
2050 Label resolved;
2051
2052 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
2053 __ get_cache_and_index_and_bytecode_at_bcp(Rcache, index, Lbyte_code, byte_no, 1, index_size);
2054 __ cmp(Lbyte_code, (int) bytecode()); // have we resolved this bytecode?
2055 __ br(Assembler::equal, false, Assembler::pt, resolved);
2056 __ delayed()->set((int)bytecode(), O1);
2057
2058 address entry;
2059 switch (bytecode()) {
2060 case Bytecodes::_getstatic : // fall through
2061 case Bytecodes::_putstatic : // fall through
2062 case Bytecodes::_getfield : // fall through
2063 case Bytecodes::_putfield : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_get_put); break;
2064 case Bytecodes::_invokevirtual : // fall through
2065 case Bytecodes::_invokespecial : // fall through
2066 case Bytecodes::_invokestatic : // fall through
2067 case Bytecodes::_invokeinterface: entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invoke); break;
2068 case Bytecodes::_invokehandle : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokehandle); break;
2069 case Bytecodes::_invokedynamic : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokedynamic); break;
2070 default:
2071 fatal(err_msg("unexpected bytecode: %s", Bytecodes::name(bytecode())));
2072 break;
2073 }
2074 // first time invocation - must resolve first
2075 __ call_VM(noreg, entry, O1);
2076 // Update registers with resolved info
2077 __ get_cache_and_index_at_bcp(Rcache, index, 1, index_size);
2078 __ bind(resolved);
2079 }
2080
2081 void TemplateTable::load_invoke_cp_cache_entry(int byte_no,
2082 Register method,
2083 Register itable_index,
2084 Register flags,
2085 bool is_invokevirtual,
2086 bool is_invokevfinal,
2087 bool is_invokedynamic) {
2088 // Uses both G3_scratch and G4_scratch
2089 Register cache = G3_scratch;
2090 Register index = G4_scratch;
2091 assert_different_registers(cache, method, itable_index);
2092
2093 // determine constant pool cache field offsets
2094 assert(is_invokevirtual == (byte_no == f2_byte), "is_invokevirtual flag redundant");
2095 const int method_offset = in_bytes(
2096 ConstantPoolCache::base_offset() +
2097 ((byte_no == f2_byte)
2098 ? ConstantPoolCacheEntry::f2_offset()
2099 : ConstantPoolCacheEntry::f1_offset()
2100 )
2101 );
2102 const int flags_offset = in_bytes(ConstantPoolCache::base_offset() +
2103 ConstantPoolCacheEntry::flags_offset());
2104 // access constant pool cache fields
2105 const int index_offset = in_bytes(ConstantPoolCache::base_offset() +
2106 ConstantPoolCacheEntry::f2_offset());
2107
2108 if (is_invokevfinal) {
2109 __ get_cache_and_index_at_bcp(cache, index, 1);
2110 __ ld_ptr(Address(cache, method_offset), method);
2111 } else {
2112 size_t index_size = (is_invokedynamic ? sizeof(u4) : sizeof(u2));
2113 resolve_cache_and_index(byte_no, cache, index, index_size);
2114 __ ld_ptr(Address(cache, method_offset), method);
2115 }
2116
2117 if (itable_index != noreg) {
2118 // pick up itable or appendix index from f2 also:
2119 __ ld_ptr(Address(cache, index_offset), itable_index);
2120 }
2121 __ ld_ptr(Address(cache, flags_offset), flags);
2122 }
2123
2124 // The Rcache register must be set before call
2125 void TemplateTable::load_field_cp_cache_entry(Register Robj,
2126 Register Rcache,
2127 Register index,
2128 Register Roffset,
2129 Register Rflags,
2130 bool is_static) {
2131 assert_different_registers(Rcache, Rflags, Roffset);
2132
2133 ByteSize cp_base_offset = ConstantPoolCache::base_offset();
2134
2135 __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::flags_offset(), Rflags);
2136 __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::f2_offset(), Roffset);
2137 if (is_static) {
2138 __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::f1_offset(), Robj);
2139 const int mirror_offset = in_bytes(Klass::java_mirror_offset());
2140 __ ld_ptr( Robj, mirror_offset, Robj);
2141 }
2142 }
2143
2144 // The registers Rcache and index expected to be set before call.
2145 // Correct values of the Rcache and index registers are preserved.
2146 void TemplateTable::jvmti_post_field_access(Register Rcache,
2147 Register index,
2148 bool is_static,
2149 bool has_tos) {
2150 ByteSize cp_base_offset = ConstantPoolCache::base_offset();
2151
2152 if (JvmtiExport::can_post_field_access()) {
2153 // Check to see if a field access watch has been set before we take
2154 // the time to call into the VM.
2155 Label Label1;
2156 assert_different_registers(Rcache, index, G1_scratch);
2157 AddressLiteral get_field_access_count_addr(JvmtiExport::get_field_access_count_addr());
2158 __ load_contents(get_field_access_count_addr, G1_scratch);
2159 __ cmp_and_br_short(G1_scratch, 0, Assembler::equal, Assembler::pt, Label1);
2160
2161 __ add(Rcache, in_bytes(cp_base_offset), Rcache);
2162
2163 if (is_static) {
2164 __ clr(Otos_i);
2165 } else {
2166 if (has_tos) {
2167 // save object pointer before call_VM() clobbers it
2168 __ push_ptr(Otos_i); // put object on tos where GC wants it.
2169 } else {
2170 // Load top of stack (do not pop the value off the stack);
2171 __ ld_ptr(Lesp, Interpreter::expr_offset_in_bytes(0), Otos_i);
2172 }
2173 __ verify_oop(Otos_i);
2174 }
2175 // Otos_i: object pointer or NULL if static
2176 // Rcache: cache entry pointer
2177 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access),
2178 Otos_i, Rcache);
2179 if (!is_static && has_tos) {
2180 __ pop_ptr(Otos_i); // restore object pointer
2181 __ verify_oop(Otos_i);
2182 }
2183 __ get_cache_and_index_at_bcp(Rcache, index, 1);
2184 __ bind(Label1);
2185 }
2186 }
2187
2188 void TemplateTable::getfield_or_static(int byte_no, bool is_static) {
2189 transition(vtos, vtos);
2190
2191 Register Rcache = G3_scratch;
2192 Register index = G4_scratch;
2193 Register Rclass = Rcache;
2194 Register Roffset= G4_scratch;
2195 Register Rflags = G1_scratch;
2196 ByteSize cp_base_offset = ConstantPoolCache::base_offset();
2197
2198 resolve_cache_and_index(byte_no, Rcache, index, sizeof(u2));
2199 jvmti_post_field_access(Rcache, index, is_static, false);
2200 load_field_cp_cache_entry(Rclass, Rcache, index, Roffset, Rflags, is_static);
2201
2202 if (!is_static) {
2203 pop_and_check_object(Rclass);
2204 } else {
2205 __ verify_oop(Rclass);
2206 }
2207
2208 Label exit;
2209
2210 Assembler::Membar_mask_bits membar_bits =
2211 Assembler::Membar_mask_bits(Assembler::LoadLoad | Assembler::LoadStore);
2212
2213 if (__ membar_has_effect(membar_bits)) {
2214 // Get volatile flag
2215 __ set((1 << ConstantPoolCacheEntry::is_volatile_shift), Lscratch);
2216 __ and3(Rflags, Lscratch, Lscratch);
2217 }
2218
2219 Label checkVolatile;
2220
2221 // compute field type
2222 Label notByte, notInt, notShort, notChar, notLong, notFloat, notObj;
2223 __ srl(Rflags, ConstantPoolCacheEntry::tos_state_shift, Rflags);
2224 // Make sure we don't need to mask Rflags after the above shift
2225 ConstantPoolCacheEntry::verify_tos_state_shift();
2226
2227 // Check atos before itos for getstatic, more likely (in Queens at least)
2228 __ cmp(Rflags, atos);
2229 __ br(Assembler::notEqual, false, Assembler::pt, notObj);
2230 __ delayed() ->cmp(Rflags, itos);
2231
2232 // atos
2233 __ load_heap_oop(Rclass, Roffset, Otos_i);
2234 __ verify_oop(Otos_i);
2235 __ push(atos);
2236 if (!is_static) {
2237 patch_bytecode(Bytecodes::_fast_agetfield, G3_scratch, G4_scratch);
2238 }
2239 __ ba(checkVolatile);
2240 __ delayed()->tst(Lscratch);
2241
2242 __ bind(notObj);
2243
2244 // cmp(Rflags, itos);
2245 __ br(Assembler::notEqual, false, Assembler::pt, notInt);
2246 __ delayed() ->cmp(Rflags, ltos);
2247
2248 // itos
2249 __ ld(Rclass, Roffset, Otos_i);
2250 __ push(itos);
2251 if (!is_static) {
2252 patch_bytecode(Bytecodes::_fast_igetfield, G3_scratch, G4_scratch);
2253 }
2254 __ ba(checkVolatile);
2255 __ delayed()->tst(Lscratch);
2256
2257 __ bind(notInt);
2258
2259 // cmp(Rflags, ltos);
2260 __ br(Assembler::notEqual, false, Assembler::pt, notLong);
2261 __ delayed() ->cmp(Rflags, btos);
2262
2263 // ltos
2264 // load must be atomic
2265 __ ld_long(Rclass, Roffset, Otos_l);
2266 __ push(ltos);
2267 if (!is_static) {
2268 patch_bytecode(Bytecodes::_fast_lgetfield, G3_scratch, G4_scratch);
2269 }
2270 __ ba(checkVolatile);
2271 __ delayed()->tst(Lscratch);
2272
2273 __ bind(notLong);
2274
2275 // cmp(Rflags, btos);
2276 __ br(Assembler::notEqual, false, Assembler::pt, notByte);
2277 __ delayed() ->cmp(Rflags, ctos);
2278
2279 // btos
2280 __ ldsb(Rclass, Roffset, Otos_i);
2281 __ push(itos);
2282 if (!is_static) {
2283 patch_bytecode(Bytecodes::_fast_bgetfield, G3_scratch, G4_scratch);
2284 }
2285 __ ba(checkVolatile);
2286 __ delayed()->tst(Lscratch);
2287
2288 __ bind(notByte);
2289
2290 // cmp(Rflags, ctos);
2291 __ br(Assembler::notEqual, false, Assembler::pt, notChar);
2292 __ delayed() ->cmp(Rflags, stos);
2293
2294 // ctos
2295 __ lduh(Rclass, Roffset, Otos_i);
2296 __ push(itos);
2297 if (!is_static) {
2298 patch_bytecode(Bytecodes::_fast_cgetfield, G3_scratch, G4_scratch);
2299 }
2300 __ ba(checkVolatile);
2301 __ delayed()->tst(Lscratch);
2302
2303 __ bind(notChar);
2304
2305 // cmp(Rflags, stos);
2306 __ br(Assembler::notEqual, false, Assembler::pt, notShort);
2307 __ delayed() ->cmp(Rflags, ftos);
2308
2309 // stos
2310 __ ldsh(Rclass, Roffset, Otos_i);
2311 __ push(itos);
2312 if (!is_static) {
2313 patch_bytecode(Bytecodes::_fast_sgetfield, G3_scratch, G4_scratch);
2314 }
2315 __ ba(checkVolatile);
2316 __ delayed()->tst(Lscratch);
2317
2318 __ bind(notShort);
2319
2320
2321 // cmp(Rflags, ftos);
2322 __ br(Assembler::notEqual, false, Assembler::pt, notFloat);
2323 __ delayed() ->tst(Lscratch);
2324
2325 // ftos
2326 __ ldf(FloatRegisterImpl::S, Rclass, Roffset, Ftos_f);
2327 __ push(ftos);
2328 if (!is_static) {
2329 patch_bytecode(Bytecodes::_fast_fgetfield, G3_scratch, G4_scratch);
2330 }
2331 __ ba(checkVolatile);
2332 __ delayed()->tst(Lscratch);
2333
2334 __ bind(notFloat);
2335
2336
2337 // dtos
2338 __ ldf(FloatRegisterImpl::D, Rclass, Roffset, Ftos_d);
2339 __ push(dtos);
2340 if (!is_static) {
2341 patch_bytecode(Bytecodes::_fast_dgetfield, G3_scratch, G4_scratch);
2342 }
2343
2344 __ bind(checkVolatile);
2345 if (__ membar_has_effect(membar_bits)) {
2346 // __ tst(Lscratch); executed in delay slot
2347 __ br(Assembler::zero, false, Assembler::pt, exit);
2348 __ delayed()->nop();
2349 volatile_barrier(membar_bits);
2350 }
2351
2352 __ bind(exit);
2353 }
2354
2355
2356 void TemplateTable::getfield(int byte_no) {
2357 getfield_or_static(byte_no, false);
2358 }
2359
2360 void TemplateTable::getstatic(int byte_no) {
2361 getfield_or_static(byte_no, true);
2362 }
2363
2364
2365 void TemplateTable::fast_accessfield(TosState state) {
2366 transition(atos, state);
2367 Register Rcache = G3_scratch;
2368 Register index = G4_scratch;
2369 Register Roffset = G4_scratch;
2370 Register Rflags = Rcache;
2371 ByteSize cp_base_offset = ConstantPoolCache::base_offset();
2372
2373 __ get_cache_and_index_at_bcp(Rcache, index, 1);
2374 jvmti_post_field_access(Rcache, index, /*is_static*/false, /*has_tos*/true);
2375
2376 __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::f2_offset(), Roffset);
2377
2378 __ null_check(Otos_i);
2379 __ verify_oop(Otos_i);
2380
2381 Label exit;
2382
2383 Assembler::Membar_mask_bits membar_bits =
2384 Assembler::Membar_mask_bits(Assembler::LoadLoad | Assembler::LoadStore);
2385 if (__ membar_has_effect(membar_bits)) {
2386 // Get volatile flag
2387 __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::f2_offset(), Rflags);
2388 __ set((1 << ConstantPoolCacheEntry::is_volatile_shift), Lscratch);
2389 }
2390
2391 switch (bytecode()) {
2392 case Bytecodes::_fast_bgetfield:
2393 __ ldsb(Otos_i, Roffset, Otos_i);
2394 break;
2395 case Bytecodes::_fast_cgetfield:
2396 __ lduh(Otos_i, Roffset, Otos_i);
2397 break;
2398 case Bytecodes::_fast_sgetfield:
2399 __ ldsh(Otos_i, Roffset, Otos_i);
2400 break;
2401 case Bytecodes::_fast_igetfield:
2402 __ ld(Otos_i, Roffset, Otos_i);
2403 break;
2404 case Bytecodes::_fast_lgetfield:
2405 __ ld_long(Otos_i, Roffset, Otos_l);
2406 break;
2407 case Bytecodes::_fast_fgetfield:
2408 __ ldf(FloatRegisterImpl::S, Otos_i, Roffset, Ftos_f);
2409 break;
2410 case Bytecodes::_fast_dgetfield:
2411 __ ldf(FloatRegisterImpl::D, Otos_i, Roffset, Ftos_d);
2412 break;
2413 case Bytecodes::_fast_agetfield:
2414 __ load_heap_oop(Otos_i, Roffset, Otos_i);
2415 break;
2416 default:
2417 ShouldNotReachHere();
2418 }
2419
2420 if (__ membar_has_effect(membar_bits)) {
2421 __ btst(Lscratch, Rflags);
2422 __ br(Assembler::zero, false, Assembler::pt, exit);
2423 __ delayed()->nop();
2424 volatile_barrier(membar_bits);
2425 __ bind(exit);
2426 }
2427
2428 if (state == atos) {
2429 __ verify_oop(Otos_i); // does not blow flags!
2430 }
2431 }
2432
2433 void TemplateTable::jvmti_post_fast_field_mod() {
2434 if (JvmtiExport::can_post_field_modification()) {
2435 // Check to see if a field modification watch has been set before we take
2436 // the time to call into the VM.
2437 Label done;
2438 AddressLiteral get_field_modification_count_addr(JvmtiExport::get_field_modification_count_addr());
2439 __ load_contents(get_field_modification_count_addr, G4_scratch);
2440 __ cmp_and_br_short(G4_scratch, 0, Assembler::equal, Assembler::pt, done);
2441 __ pop_ptr(G4_scratch); // copy the object pointer from tos
2442 __ verify_oop(G4_scratch);
2443 __ push_ptr(G4_scratch); // put the object pointer back on tos
2444 __ get_cache_entry_pointer_at_bcp(G1_scratch, G3_scratch, 1);
2445 // Save tos values before call_VM() clobbers them. Since we have
2446 // to do it for every data type, we use the saved values as the
2447 // jvalue object.
2448 switch (bytecode()) { // save tos values before call_VM() clobbers them
2449 case Bytecodes::_fast_aputfield: __ push_ptr(Otos_i); break;
2450 case Bytecodes::_fast_bputfield: // fall through
2451 case Bytecodes::_fast_sputfield: // fall through
2452 case Bytecodes::_fast_cputfield: // fall through
2453 case Bytecodes::_fast_iputfield: __ push_i(Otos_i); break;
2454 case Bytecodes::_fast_dputfield: __ push_d(Ftos_d); break;
2455 case Bytecodes::_fast_fputfield: __ push_f(Ftos_f); break;
2456 // get words in right order for use as jvalue object
2457 case Bytecodes::_fast_lputfield: __ push_l(Otos_l); break;
2458 }
2459 // setup pointer to jvalue object
2460 __ mov(Lesp, G3_scratch); __ inc(G3_scratch, wordSize);
2461 // G4_scratch: object pointer
2462 // G1_scratch: cache entry pointer
2463 // G3_scratch: jvalue object on the stack
2464 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), G4_scratch, G1_scratch, G3_scratch);
2465 switch (bytecode()) { // restore tos values
2466 case Bytecodes::_fast_aputfield: __ pop_ptr(Otos_i); break;
2467 case Bytecodes::_fast_bputfield: // fall through
2468 case Bytecodes::_fast_sputfield: // fall through
2469 case Bytecodes::_fast_cputfield: // fall through
2470 case Bytecodes::_fast_iputfield: __ pop_i(Otos_i); break;
2471 case Bytecodes::_fast_dputfield: __ pop_d(Ftos_d); break;
2472 case Bytecodes::_fast_fputfield: __ pop_f(Ftos_f); break;
2473 case Bytecodes::_fast_lputfield: __ pop_l(Otos_l); break;
2474 }
2475 __ bind(done);
2476 }
2477 }
2478
2479 // The registers Rcache and index expected to be set before call.
2480 // The function may destroy various registers, just not the Rcache and index registers.
2481 void TemplateTable::jvmti_post_field_mod(Register Rcache, Register index, bool is_static) {
2482 ByteSize cp_base_offset = ConstantPoolCache::base_offset();
2483
2484 if (JvmtiExport::can_post_field_modification()) {
2485 // Check to see if a field modification watch has been set before we take
2486 // the time to call into the VM.
2487 Label Label1;
2488 assert_different_registers(Rcache, index, G1_scratch);
2489 AddressLiteral get_field_modification_count_addr(JvmtiExport::get_field_modification_count_addr());
2490 __ load_contents(get_field_modification_count_addr, G1_scratch);
2491 __ cmp_and_br_short(G1_scratch, 0, Assembler::zero, Assembler::pt, Label1);
2492
2493 // The Rcache and index registers have been already set.
2494 // This allows to eliminate this call but the Rcache and index
2495 // registers must be correspondingly used after this line.
2496 __ get_cache_and_index_at_bcp(G1_scratch, G4_scratch, 1);
2497
2498 __ add(G1_scratch, in_bytes(cp_base_offset), G3_scratch);
2499 if (is_static) {
2500 // Life is simple. Null out the object pointer.
2501 __ clr(G4_scratch);
2502 } else {
2503 Register Rflags = G1_scratch;
2504 // Life is harder. The stack holds the value on top, followed by the
2505 // object. We don't know the size of the value, though; it could be
2506 // one or two words depending on its type. As a result, we must find
2507 // the type to determine where the object is.
2508
2509 Label two_word, valsizeknown;
2510 __ ld_ptr(G1_scratch, cp_base_offset + ConstantPoolCacheEntry::flags_offset(), Rflags);
2511 __ mov(Lesp, G4_scratch);
2512 __ srl(Rflags, ConstantPoolCacheEntry::tos_state_shift, Rflags);
2513 // Make sure we don't need to mask Rflags after the above shift
2514 ConstantPoolCacheEntry::verify_tos_state_shift();
2515 __ cmp(Rflags, ltos);
2516 __ br(Assembler::equal, false, Assembler::pt, two_word);
2517 __ delayed()->cmp(Rflags, dtos);
2518 __ br(Assembler::equal, false, Assembler::pt, two_word);
2519 __ delayed()->nop();
2520 __ inc(G4_scratch, Interpreter::expr_offset_in_bytes(1));
2521 __ ba_short(valsizeknown);
2522 __ bind(two_word);
2523
2524 __ inc(G4_scratch, Interpreter::expr_offset_in_bytes(2));
2525
2526 __ bind(valsizeknown);
2527 // setup object pointer
2528 __ ld_ptr(G4_scratch, 0, G4_scratch);
2529 __ verify_oop(G4_scratch);
2530 }
2531 // setup pointer to jvalue object
2532 __ mov(Lesp, G1_scratch); __ inc(G1_scratch, wordSize);
2533 // G4_scratch: object pointer or NULL if static
2534 // G3_scratch: cache entry pointer
2535 // G1_scratch: jvalue object on the stack
2536 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification),
2537 G4_scratch, G3_scratch, G1_scratch);
2538 __ get_cache_and_index_at_bcp(Rcache, index, 1);
2539 __ bind(Label1);
2540 }
2541 }
2542
2543 void TemplateTable::pop_and_check_object(Register r) {
2544 __ pop_ptr(r);
2545 __ null_check(r); // for field access must check obj.
2546 __ verify_oop(r);
2547 }
2548
2549 void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
2550 transition(vtos, vtos);
2551 Register Rcache = G3_scratch;
2552 Register index = G4_scratch;
2553 Register Rclass = Rcache;
2554 Register Roffset= G4_scratch;
2555 Register Rflags = G1_scratch;
2556 ByteSize cp_base_offset = ConstantPoolCache::base_offset();
2557
2558 resolve_cache_and_index(byte_no, Rcache, index, sizeof(u2));
2559 jvmti_post_field_mod(Rcache, index, is_static);
2560 load_field_cp_cache_entry(Rclass, Rcache, index, Roffset, Rflags, is_static);
2561
2562 Assembler::Membar_mask_bits read_bits =
2563 Assembler::Membar_mask_bits(Assembler::LoadStore | Assembler::StoreStore);
2564 Assembler::Membar_mask_bits write_bits = Assembler::StoreLoad;
2565
2566 Label notVolatile, checkVolatile, exit;
2567 if (__ membar_has_effect(read_bits) || __ membar_has_effect(write_bits)) {
2568 __ set((1 << ConstantPoolCacheEntry::is_volatile_shift), Lscratch);
2569 __ and3(Rflags, Lscratch, Lscratch);
2570
2571 if (__ membar_has_effect(read_bits)) {
2572 __ cmp_and_br_short(Lscratch, 0, Assembler::equal, Assembler::pt, notVolatile);
2573 volatile_barrier(read_bits);
2574 __ bind(notVolatile);
2575 }
2576 }
2577
2578 __ srl(Rflags, ConstantPoolCacheEntry::tos_state_shift, Rflags);
2579 // Make sure we don't need to mask Rflags after the above shift
2580 ConstantPoolCacheEntry::verify_tos_state_shift();
2581
2582 // compute field type
2583 Label notInt, notShort, notChar, notObj, notByte, notLong, notFloat;
2584
2585 if (is_static) {
2586 // putstatic with object type most likely, check that first
2587 __ cmp(Rflags, atos);
2588 __ br(Assembler::notEqual, false, Assembler::pt, notObj);
2589 __ delayed()->cmp(Rflags, itos);
2590
2591 // atos
2592 {
2593 __ pop_ptr();
2594 __ verify_oop(Otos_i);
2595 do_oop_store(_masm, Rclass, Roffset, 0, Otos_i, G1_scratch, _bs->kind(), false);
2596 __ ba(checkVolatile);
2597 __ delayed()->tst(Lscratch);
2598 }
2599
2600 __ bind(notObj);
2601 // cmp(Rflags, itos);
2602 __ br(Assembler::notEqual, false, Assembler::pt, notInt);
2603 __ delayed()->cmp(Rflags, btos);
2604
2605 // itos
2606 {
2607 __ pop_i();
2608 __ st(Otos_i, Rclass, Roffset);
2609 __ ba(checkVolatile);
2610 __ delayed()->tst(Lscratch);
2611 }
2612
2613 __ bind(notInt);
2614 } else {
2615 // putfield with int type most likely, check that first
2616 __ cmp(Rflags, itos);
2617 __ br(Assembler::notEqual, false, Assembler::pt, notInt);
2618 __ delayed()->cmp(Rflags, atos);
2619
2620 // itos
2621 {
2622 __ pop_i();
2623 pop_and_check_object(Rclass);
2624 __ st(Otos_i, Rclass, Roffset);
2625 patch_bytecode(Bytecodes::_fast_iputfield, G3_scratch, G4_scratch, true, byte_no);
2626 __ ba(checkVolatile);
2627 __ delayed()->tst(Lscratch);
2628 }
2629
2630 __ bind(notInt);
2631 // cmp(Rflags, atos);
2632 __ br(Assembler::notEqual, false, Assembler::pt, notObj);
2633 __ delayed()->cmp(Rflags, btos);
2634
2635 // atos
2636 {
2637 __ pop_ptr();
2638 pop_and_check_object(Rclass);
2639 __ verify_oop(Otos_i);
2640 do_oop_store(_masm, Rclass, Roffset, 0, Otos_i, G1_scratch, _bs->kind(), false);
2641 patch_bytecode(Bytecodes::_fast_aputfield, G3_scratch, G4_scratch, true, byte_no);
2642 __ ba(checkVolatile);
2643 __ delayed()->tst(Lscratch);
2644 }
2645
2646 __ bind(notObj);
2647 }
2648
2649 // cmp(Rflags, btos);
2650 __ br(Assembler::notEqual, false, Assembler::pt, notByte);
2651 __ delayed()->cmp(Rflags, ltos);
2652
2653 // btos
2654 {
2655 __ pop_i();
2656 if (!is_static) pop_and_check_object(Rclass);
2657 __ stb(Otos_i, Rclass, Roffset);
2658 if (!is_static) {
2659 patch_bytecode(Bytecodes::_fast_bputfield, G3_scratch, G4_scratch, true, byte_no);
2660 }
2661 __ ba(checkVolatile);
2662 __ delayed()->tst(Lscratch);
2663 }
2664
2665 __ bind(notByte);
2666 // cmp(Rflags, ltos);
2667 __ br(Assembler::notEqual, false, Assembler::pt, notLong);
2668 __ delayed()->cmp(Rflags, ctos);
2669
2670 // ltos
2671 {
2672 __ pop_l();
2673 if (!is_static) pop_and_check_object(Rclass);
2674 __ st_long(Otos_l, Rclass, Roffset);
2675 if (!is_static) {
2676 patch_bytecode(Bytecodes::_fast_lputfield, G3_scratch, G4_scratch, true, byte_no);
2677 }
2678 __ ba(checkVolatile);
2679 __ delayed()->tst(Lscratch);
2680 }
2681
2682 __ bind(notLong);
2683 // cmp(Rflags, ctos);
2684 __ br(Assembler::notEqual, false, Assembler::pt, notChar);
2685 __ delayed()->cmp(Rflags, stos);
2686
2687 // ctos (char)
2688 {
2689 __ pop_i();
2690 if (!is_static) pop_and_check_object(Rclass);
2691 __ sth(Otos_i, Rclass, Roffset);
2692 if (!is_static) {
2693 patch_bytecode(Bytecodes::_fast_cputfield, G3_scratch, G4_scratch, true, byte_no);
2694 }
2695 __ ba(checkVolatile);
2696 __ delayed()->tst(Lscratch);
2697 }
2698
2699 __ bind(notChar);
2700 // cmp(Rflags, stos);
2701 __ br(Assembler::notEqual, false, Assembler::pt, notShort);
2702 __ delayed()->cmp(Rflags, ftos);
2703
2704 // stos (short)
2705 {
2706 __ pop_i();
2707 if (!is_static) pop_and_check_object(Rclass);
2708 __ sth(Otos_i, Rclass, Roffset);
2709 if (!is_static) {
2710 patch_bytecode(Bytecodes::_fast_sputfield, G3_scratch, G4_scratch, true, byte_no);
2711 }
2712 __ ba(checkVolatile);
2713 __ delayed()->tst(Lscratch);
2714 }
2715
2716 __ bind(notShort);
2717 // cmp(Rflags, ftos);
2718 __ br(Assembler::notZero, false, Assembler::pt, notFloat);
2719 __ delayed()->nop();
2720
2721 // ftos
2722 {
2723 __ pop_f();
2724 if (!is_static) pop_and_check_object(Rclass);
2725 __ stf(FloatRegisterImpl::S, Ftos_f, Rclass, Roffset);
2726 if (!is_static) {
2727 patch_bytecode(Bytecodes::_fast_fputfield, G3_scratch, G4_scratch, true, byte_no);
2728 }
2729 __ ba(checkVolatile);
2730 __ delayed()->tst(Lscratch);
2731 }
2732
2733 __ bind(notFloat);
2734
2735 // dtos
2736 {
2737 __ pop_d();
2738 if (!is_static) pop_and_check_object(Rclass);
2739 __ stf(FloatRegisterImpl::D, Ftos_d, Rclass, Roffset);
2740 if (!is_static) {
2741 patch_bytecode(Bytecodes::_fast_dputfield, G3_scratch, G4_scratch, true, byte_no);
2742 }
2743 }
2744
2745 __ bind(checkVolatile);
2746 __ tst(Lscratch);
2747
2748 if (__ membar_has_effect(write_bits)) {
2749 // __ tst(Lscratch); in delay slot
2750 __ br(Assembler::zero, false, Assembler::pt, exit);
2751 __ delayed()->nop();
2752 volatile_barrier(Assembler::StoreLoad);
2753 __ bind(exit);
2754 }
2755 }
2756
2757 void TemplateTable::fast_storefield(TosState state) {
2758 transition(state, vtos);
2759 Register Rcache = G3_scratch;
2760 Register Rclass = Rcache;
2761 Register Roffset= G4_scratch;
2762 Register Rflags = G1_scratch;
2763 ByteSize cp_base_offset = ConstantPoolCache::base_offset();
2764
2765 jvmti_post_fast_field_mod();
2766
2767 __ get_cache_and_index_at_bcp(Rcache, G4_scratch, 1);
2768
2769 Assembler::Membar_mask_bits read_bits =
2770 Assembler::Membar_mask_bits(Assembler::LoadStore | Assembler::StoreStore);
2771 Assembler::Membar_mask_bits write_bits = Assembler::StoreLoad;
2772
2773 Label notVolatile, checkVolatile, exit;
2774 if (__ membar_has_effect(read_bits) || __ membar_has_effect(write_bits)) {
2775 __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::flags_offset(), Rflags);
2776 __ set((1 << ConstantPoolCacheEntry::is_volatile_shift), Lscratch);
2777 __ and3(Rflags, Lscratch, Lscratch);
2778 if (__ membar_has_effect(read_bits)) {
2779 __ cmp_and_br_short(Lscratch, 0, Assembler::equal, Assembler::pt, notVolatile);
2780 volatile_barrier(read_bits);
2781 __ bind(notVolatile);
2782 }
2783 }
2784
2785 __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::f2_offset(), Roffset);
2786 pop_and_check_object(Rclass);
2787
2788 switch (bytecode()) {
2789 case Bytecodes::_fast_bputfield: __ stb(Otos_i, Rclass, Roffset); break;
2790 case Bytecodes::_fast_cputfield: /* fall through */
2791 case Bytecodes::_fast_sputfield: __ sth(Otos_i, Rclass, Roffset); break;
2792 case Bytecodes::_fast_iputfield: __ st(Otos_i, Rclass, Roffset); break;
2793 case Bytecodes::_fast_lputfield: __ st_long(Otos_l, Rclass, Roffset); break;
2794 case Bytecodes::_fast_fputfield:
2795 __ stf(FloatRegisterImpl::S, Ftos_f, Rclass, Roffset);
2796 break;
2797 case Bytecodes::_fast_dputfield:
2798 __ stf(FloatRegisterImpl::D, Ftos_d, Rclass, Roffset);
2799 break;
2800 case Bytecodes::_fast_aputfield:
2801 do_oop_store(_masm, Rclass, Roffset, 0, Otos_i, G1_scratch, _bs->kind(), false);
2802 break;
2803 default:
2804 ShouldNotReachHere();
2805 }
2806
2807 if (__ membar_has_effect(write_bits)) {
2808 __ cmp_and_br_short(Lscratch, 0, Assembler::equal, Assembler::pt, exit);
2809 volatile_barrier(Assembler::StoreLoad);
2810 __ bind(exit);
2811 }
2812 }
2813
2814
2815 void TemplateTable::putfield(int byte_no) {
2816 putfield_or_static(byte_no, false);
2817 }
2818
2819 void TemplateTable::putstatic(int byte_no) {
2820 putfield_or_static(byte_no, true);
2821 }
2822
2823
2824 void TemplateTable::fast_xaccess(TosState state) {
2825 transition(vtos, state);
2826 Register Rcache = G3_scratch;
2827 Register Roffset = G4_scratch;
2828 Register Rflags = G4_scratch;
2829 Register Rreceiver = Lscratch;
2830
2831 __ ld_ptr(Llocals, 0, Rreceiver);
2832
2833 // access constant pool cache (is resolved)
2834 __ get_cache_and_index_at_bcp(Rcache, G4_scratch, 2);
2835 __ ld_ptr(Rcache, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::f2_offset(), Roffset);
2836 __ add(Lbcp, 1, Lbcp); // needed to report exception at the correct bcp
2837
2838 __ verify_oop(Rreceiver);
2839 __ null_check(Rreceiver);
2840 if (state == atos) {
2841 __ load_heap_oop(Rreceiver, Roffset, Otos_i);
2842 } else if (state == itos) {
2843 __ ld (Rreceiver, Roffset, Otos_i) ;
2844 } else if (state == ftos) {
2845 __ ldf(FloatRegisterImpl::S, Rreceiver, Roffset, Ftos_f);
2846 } else {
2847 ShouldNotReachHere();
2848 }
2849
2850 Assembler::Membar_mask_bits membar_bits =
2851 Assembler::Membar_mask_bits(Assembler::LoadLoad | Assembler::LoadStore);
2852 if (__ membar_has_effect(membar_bits)) {
2853
2854 // Get is_volatile value in Rflags and check if membar is needed
2855 __ ld_ptr(Rcache, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset(), Rflags);
2856
2857 // Test volatile
2858 Label notVolatile;
2859 __ set((1 << ConstantPoolCacheEntry::is_volatile_shift), Lscratch);
2860 __ btst(Rflags, Lscratch);
2861 __ br(Assembler::zero, false, Assembler::pt, notVolatile);
2862 __ delayed()->nop();
2863 volatile_barrier(membar_bits);
2864 __ bind(notVolatile);
2865 }
2866
2867 __ interp_verify_oop(Otos_i, state, __FILE__, __LINE__);
2868 __ sub(Lbcp, 1, Lbcp);
2869 }
2870
2871 //----------------------------------------------------------------------------------------------------
2872 // Calls
2873
2874 void TemplateTable::count_calls(Register method, Register temp) {
2875 // implemented elsewhere
2876 ShouldNotReachHere();
2877 }
2878
2879 void TemplateTable::prepare_invoke(int byte_no,
2880 Register method, // linked method (or i-klass)
2881 Register ra, // return address
2882 Register index, // itable index, MethodType, etc.
2883 Register recv, // if caller wants to see it
2884 Register flags // if caller wants to test it
2885 ) {
2886 // determine flags
2887 const Bytecodes::Code code = bytecode();
2888 const bool is_invokeinterface = code == Bytecodes::_invokeinterface;
2889 const bool is_invokedynamic = code == Bytecodes::_invokedynamic;
2890 const bool is_invokehandle = code == Bytecodes::_invokehandle;
2891 const bool is_invokevirtual = code == Bytecodes::_invokevirtual;
2892 const bool is_invokespecial = code == Bytecodes::_invokespecial;
2893 const bool load_receiver = (recv != noreg);
2894 assert(load_receiver == (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic), "");
2895 assert(recv == noreg || recv == O0, "");
2896 assert(flags == noreg || flags == O1, "");
2897
2898 // setup registers & access constant pool cache
2899 if (recv == noreg) recv = O0;
2900 if (flags == noreg) flags = O1;
2901 const Register temp = O2;
2902 assert_different_registers(method, ra, index, recv, flags, temp);
2903
2904 load_invoke_cp_cache_entry(byte_no, method, index, flags, is_invokevirtual, false, is_invokedynamic);
2905
2906 __ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore
2907
2908 // maybe push appendix to arguments
2909 if (is_invokedynamic || is_invokehandle) {
2910 Label L_no_push;
2911 __ set((1 << ConstantPoolCacheEntry::has_appendix_shift), temp);
2912 __ btst(flags, temp);
2913 __ br(Assembler::zero, false, Assembler::pt, L_no_push);
2914 __ delayed()->nop();
2915 // Push the appendix as a trailing parameter.
2916 // This must be done before we get the receiver,
2917 // since the parameter_size includes it.
2918 assert(ConstantPoolCacheEntry::_indy_resolved_references_appendix_offset == 0, "appendix expected at index+0");
2919 __ load_resolved_reference_at_index(temp, index);
2920 __ verify_oop(temp);
2921 __ push_ptr(temp); // push appendix (MethodType, CallSite, etc.)
2922 __ bind(L_no_push);
2923 }
2924
2925 // load receiver if needed (after appendix is pushed so parameter size is correct)
2926 if (load_receiver) {
2927 __ and3(flags, ConstantPoolCacheEntry::parameter_size_mask, temp); // get parameter size
2928 __ load_receiver(temp, recv); // __ argument_address uses Gargs but we need Lesp
2929 __ verify_oop(recv);
2930 }
2931
2932 // compute return type
2933 __ srl(flags, ConstantPoolCacheEntry::tos_state_shift, ra);
2934 // Make sure we don't need to mask flags after the above shift
2935 ConstantPoolCacheEntry::verify_tos_state_shift();
2936 // load return address
2937 {
2938 const address table_addr = (address) Interpreter::invoke_return_entry_table_for(code);
2939 AddressLiteral table(table_addr);
2940 __ set(table, temp);
2941 __ sll(ra, LogBytesPerWord, ra);
2942 __ ld_ptr(Address(temp, ra), ra);
2943 }
2944 }
2945
2946
2947 void TemplateTable::generate_vtable_call(Register Rrecv, Register Rindex, Register Rret) {
2948 Register Rcall = Rindex;
2949 assert_different_registers(Rcall, G5_method, Gargs, Rret);
2950
2951 // get target Method* & entry point
2952 __ lookup_virtual_method(Rrecv, Rindex, G5_method);
2953 __ profile_arguments_type(G5_method, Rcall, Gargs, true);
2954 __ call_from_interpreter(Rcall, Gargs, Rret);
2955 }
2956
2957 void TemplateTable::invokevirtual(int byte_no) {
2958 transition(vtos, vtos);
2959 assert(byte_no == f2_byte, "use this argument");
2960
2961 Register Rscratch = G3_scratch;
2962 Register Rtemp = G4_scratch;
2963 Register Rret = Lscratch;
2964 Register O0_recv = O0;
2965 Label notFinal;
2966
2967 load_invoke_cp_cache_entry(byte_no, G5_method, noreg, Rret, true, false, false);
2968 __ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore
2969
2970 // Check for vfinal
2971 __ set((1 << ConstantPoolCacheEntry::is_vfinal_shift), G4_scratch);
2972 __ btst(Rret, G4_scratch);
2973 __ br(Assembler::zero, false, Assembler::pt, notFinal);
2974 __ delayed()->and3(Rret, 0xFF, G4_scratch); // gets number of parameters
2975
2976 patch_bytecode(Bytecodes::_fast_invokevfinal, Rscratch, Rtemp);
2977
2978 invokevfinal_helper(Rscratch, Rret);
2979
2980 __ bind(notFinal);
2981
2982 __ mov(G5_method, Rscratch); // better scratch register
2983 __ load_receiver(G4_scratch, O0_recv); // gets receiverOop
2984 // receiver is in O0_recv
2985 __ verify_oop(O0_recv);
2986
2987 // get return address
2988 AddressLiteral table(Interpreter::invoke_return_entry_table());
2989 __ set(table, Rtemp);
2990 __ srl(Rret, ConstantPoolCacheEntry::tos_state_shift, Rret); // get return type
2991 // Make sure we don't need to mask Rret after the above shift
2992 ConstantPoolCacheEntry::verify_tos_state_shift();
2993 __ sll(Rret, LogBytesPerWord, Rret);
2994 __ ld_ptr(Rtemp, Rret, Rret); // get return address
2995
2996 // get receiver klass
2997 __ null_check(O0_recv, oopDesc::klass_offset_in_bytes());
2998 __ load_klass(O0_recv, O0_recv);
2999 __ verify_klass_ptr(O0_recv);
3000
3001 __ profile_virtual_call(O0_recv, O4);
3002
3003 generate_vtable_call(O0_recv, Rscratch, Rret);
3004 }
3005
3006 void TemplateTable::fast_invokevfinal(int byte_no) {
3007 transition(vtos, vtos);
3008 assert(byte_no == f2_byte, "use this argument");
3009
3010 load_invoke_cp_cache_entry(byte_no, G5_method, noreg, Lscratch, true,
3011 /*is_invokevfinal*/true, false);
3012 __ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore
3013 invokevfinal_helper(G3_scratch, Lscratch);
3014 }
3015
3016 void TemplateTable::invokevfinal_helper(Register Rscratch, Register Rret) {
3017 Register Rtemp = G4_scratch;
3018
3019 // Load receiver from stack slot
3020 __ ld_ptr(G5_method, in_bytes(Method::const_offset()), G4_scratch);
3021 __ lduh(G4_scratch, in_bytes(ConstMethod::size_of_parameters_offset()), G4_scratch);
3022 __ load_receiver(G4_scratch, O0);
3023
3024 // receiver NULL check
3025 __ null_check(O0);
3026
3027 __ profile_final_call(O4);
3028 __ profile_arguments_type(G5_method, Rscratch, Gargs, true);
3029
3030 // get return address
3031 AddressLiteral table(Interpreter::invoke_return_entry_table());
3032 __ set(table, Rtemp);
3033 __ srl(Rret, ConstantPoolCacheEntry::tos_state_shift, Rret); // get return type
3034 // Make sure we don't need to mask Rret after the above shift
3035 ConstantPoolCacheEntry::verify_tos_state_shift();
3036 __ sll(Rret, LogBytesPerWord, Rret);
3037 __ ld_ptr(Rtemp, Rret, Rret); // get return address
3038
3039
3040 // do the call
3041 __ call_from_interpreter(Rscratch, Gargs, Rret);
3042 }
3043
3044
3045 void TemplateTable::invokespecial(int byte_no) {
3046 transition(vtos, vtos);
3047 assert(byte_no == f1_byte, "use this argument");
3048
3049 const Register Rret = Lscratch;
3050 const Register O0_recv = O0;
3051 const Register Rscratch = G3_scratch;
3052
3053 prepare_invoke(byte_no, G5_method, Rret, noreg, O0_recv); // get receiver also for null check
3054 __ null_check(O0_recv);
3055
3056 // do the call
3057 __ profile_call(O4);
3058 __ profile_arguments_type(G5_method, Rscratch, Gargs, false);
3059 __ call_from_interpreter(Rscratch, Gargs, Rret);
3060 }
3061
3062
3063 void TemplateTable::invokestatic(int byte_no) {
3064 transition(vtos, vtos);
3065 assert(byte_no == f1_byte, "use this argument");
3066
3067 const Register Rret = Lscratch;
3068 const Register Rscratch = G3_scratch;
3069
3070 prepare_invoke(byte_no, G5_method, Rret); // get f1 Method*
3071
3072 // do the call
3073 __ profile_call(O4);
3074 __ profile_arguments_type(G5_method, Rscratch, Gargs, false);
3075 __ call_from_interpreter(Rscratch, Gargs, Rret);
3076 }
3077
3078 void TemplateTable::invokeinterface_object_method(Register RKlass,
3079 Register Rcall,
3080 Register Rret,
3081 Register Rflags) {
3082 Register Rscratch = G4_scratch;
3083 Register Rindex = Lscratch;
3084
3085 assert_different_registers(Rscratch, Rindex, Rret);
3086
3087 Label notFinal;
3088
3089 // Check for vfinal
3090 __ set((1 << ConstantPoolCacheEntry::is_vfinal_shift), Rscratch);
3091 __ btst(Rflags, Rscratch);
3092 __ br(Assembler::zero, false, Assembler::pt, notFinal);
3093 __ delayed()->nop();
3094
3095 __ profile_final_call(O4);
3096
3097 // do the call - the index (f2) contains the Method*
3098 assert_different_registers(G5_method, Gargs, Rcall);
3099 __ mov(Rindex, G5_method);
3100 __ profile_arguments_type(G5_method, Rcall, Gargs, true);
3101 __ call_from_interpreter(Rcall, Gargs, Rret);
3102 __ bind(notFinal);
3103
3104 __ profile_virtual_call(RKlass, O4);
3105 generate_vtable_call(RKlass, Rindex, Rret);
3106 }
3107
3108
3109 void TemplateTable::invokeinterface(int byte_no) {
3110 transition(vtos, vtos);
3111 assert(byte_no == f1_byte, "use this argument");
3112
3113 const Register Rinterface = G1_scratch;
3114 const Register Rret = G3_scratch;
3115 const Register Rindex = Lscratch;
3116 const Register O0_recv = O0;
3117 const Register O1_flags = O1;
3118 const Register O2_Klass = O2;
3119 const Register Rscratch = G4_scratch;
3120 assert_different_registers(Rscratch, G5_method);
3121
3122 prepare_invoke(byte_no, Rinterface, Rret, Rindex, O0_recv, O1_flags);
3123
3124 // get receiver klass
3125 __ null_check(O0_recv, oopDesc::klass_offset_in_bytes());
3126 __ load_klass(O0_recv, O2_Klass);
3127
3128 // Special case of invokeinterface called for virtual method of
3129 // java.lang.Object. See cpCacheOop.cpp for details.
3130 // This code isn't produced by javac, but could be produced by
3131 // another compliant java compiler.
3132 Label notMethod;
3133 __ set((1 << ConstantPoolCacheEntry::is_forced_virtual_shift), Rscratch);
3134 __ btst(O1_flags, Rscratch);
3135 __ br(Assembler::zero, false, Assembler::pt, notMethod);
3136 __ delayed()->nop();
3137
3138 invokeinterface_object_method(O2_Klass, Rinterface, Rret, O1_flags);
3139
3140 __ bind(notMethod);
3141
3142 __ profile_virtual_call(O2_Klass, O4);
3143
3144 //
3145 // find entry point to call
3146 //
3147
3148 // compute start of first itableOffsetEntry (which is at end of vtable)
3149 const int base = InstanceKlass::vtable_start_offset() * wordSize;
3150 Label search;
3151 Register Rtemp = O1_flags;
3152
3153 __ ld(O2_Klass, InstanceKlass::vtable_length_offset() * wordSize, Rtemp);
3154 if (align_object_offset(1) > 1) {
3155 __ round_to(Rtemp, align_object_offset(1));
3156 }
3157 __ sll(Rtemp, LogBytesPerWord, Rtemp); // Rscratch *= 4;
3158 if (Assembler::is_simm13(base)) {
3159 __ add(Rtemp, base, Rtemp);
3160 } else {
3161 __ set(base, Rscratch);
3162 __ add(Rscratch, Rtemp, Rtemp);
3163 }
3164 __ add(O2_Klass, Rtemp, Rscratch);
3165
3166 __ bind(search);
3167
3168 __ ld_ptr(Rscratch, itableOffsetEntry::interface_offset_in_bytes(), Rtemp);
3169 {
3170 Label ok;
3171
3172 // Check that entry is non-null. Null entries are probably a bytecode
3173 // problem. If the interface isn't implemented by the receiver class,
3174 // the VM should throw IncompatibleClassChangeError. linkResolver checks
3175 // this too but that's only if the entry isn't already resolved, so we
3176 // need to check again.
3177 __ br_notnull_short( Rtemp, Assembler::pt, ok);
3178 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_IncompatibleClassChangeError));
3179 __ should_not_reach_here();
3180 __ bind(ok);
3181 }
3182
3183 __ cmp(Rinterface, Rtemp);
3184 __ brx(Assembler::notEqual, true, Assembler::pn, search);
3185 __ delayed()->add(Rscratch, itableOffsetEntry::size() * wordSize, Rscratch);
3186
3187 // entry found and Rscratch points to it
3188 __ ld(Rscratch, itableOffsetEntry::offset_offset_in_bytes(), Rscratch);
3189
3190 assert(itableMethodEntry::method_offset_in_bytes() == 0, "adjust instruction below");
3191 __ sll(Rindex, exact_log2(itableMethodEntry::size() * wordSize), Rindex); // Rindex *= 8;
3192 __ add(Rscratch, Rindex, Rscratch);
3193 __ ld_ptr(O2_Klass, Rscratch, G5_method);
3194
3195 // Check for abstract method error.
3196 {
3197 Label ok;
3198 __ br_notnull_short(G5_method, Assembler::pt, ok);
3199 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError));
3200 __ should_not_reach_here();
3201 __ bind(ok);
3202 }
3203
3204 Register Rcall = Rinterface;
3205 assert_different_registers(Rcall, G5_method, Gargs, Rret);
3206
3207 __ profile_arguments_type(G5_method, Rcall, Gargs, true);
3208 __ call_from_interpreter(Rcall, Gargs, Rret);
3209 }
3210
3211 void TemplateTable::invokehandle(int byte_no) {
3212 transition(vtos, vtos);
3213 assert(byte_no == f1_byte, "use this argument");
3214
3215 const Register Rret = Lscratch;
3216 const Register G4_mtype = G4_scratch;
3217 const Register O0_recv = O0;
3218 const Register Rscratch = G3_scratch;
3219
3220 prepare_invoke(byte_no, G5_method, Rret, G4_mtype, O0_recv);
3221 __ null_check(O0_recv);
3222
3223 // G4: MethodType object (from cpool->resolved_references[f1], if necessary)
3224 // G5: MH.invokeExact_MT method (from f2)
3225
3226 // Note: G4_mtype is already pushed (if necessary) by prepare_invoke
3227
3228 // do the call
3229 __ verify_oop(G4_mtype);
3230 __ profile_final_call(O4); // FIXME: profile the LambdaForm also
3231 __ profile_arguments_type(G5_method, Rscratch, Gargs, true);
3232 __ call_from_interpreter(Rscratch, Gargs, Rret);
3233 }
3234
3235
3236 void TemplateTable::invokedynamic(int byte_no) {
3237 transition(vtos, vtos);
3238 assert(byte_no == f1_byte, "use this argument");
3239
3240 const Register Rret = Lscratch;
3241 const Register G4_callsite = G4_scratch;
3242 const Register Rscratch = G3_scratch;
3243
3244 prepare_invoke(byte_no, G5_method, Rret, G4_callsite);
3245
3246 // G4: CallSite object (from cpool->resolved_references[f1])
3247 // G5: MH.linkToCallSite method (from f2)
3248
3249 // Note: G4_callsite is already pushed by prepare_invoke
3250
3251 // %%% should make a type profile for any invokedynamic that takes a ref argument
3252 // profile this call
3253 __ profile_call(O4);
3254
3255 // do the call
3256 __ verify_oop(G4_callsite);
3257 __ profile_arguments_type(G5_method, Rscratch, Gargs, false);
3258 __ call_from_interpreter(Rscratch, Gargs, Rret);
3259 }
3260
3261
3262 //----------------------------------------------------------------------------------------------------
3263 // Allocation
3264
3265 void TemplateTable::_new() {
3266 transition(vtos, atos);
3267
3268 Label slow_case;
3269 Label done;
3270 Label initialize_header;
3271 Label initialize_object; // including clearing the fields
3272
3273 Register RallocatedObject = Otos_i;
3274 Register RinstanceKlass = O1;
3275 Register Roffset = O3;
3276 Register Rscratch = O4;
3277
3278 __ get_2_byte_integer_at_bcp(1, Rscratch, Roffset, InterpreterMacroAssembler::Unsigned);
3279 __ get_cpool_and_tags(Rscratch, G3_scratch);
3280 // make sure the class we're about to instantiate has been resolved
3281 // This is done before loading InstanceKlass to be consistent with the order
3282 // how Constant Pool is updated (see ConstantPool::klass_at_put)
3283 __ add(G3_scratch, Array<u1>::base_offset_in_bytes(), G3_scratch);
3284 __ ldub(G3_scratch, Roffset, G3_scratch);
3285 __ cmp(G3_scratch, JVM_CONSTANT_Class);
3286 __ br(Assembler::notEqual, false, Assembler::pn, slow_case);
3287 __ delayed()->sll(Roffset, LogBytesPerWord, Roffset);
3288 // get InstanceKlass
3289 //__ sll(Roffset, LogBytesPerWord, Roffset); // executed in delay slot
3290 __ add(Roffset, sizeof(ConstantPool), Roffset);
3291 __ ld_ptr(Rscratch, Roffset, RinstanceKlass);
3292
3293 // make sure klass is fully initialized:
3294 __ ldub(RinstanceKlass, in_bytes(InstanceKlass::init_state_offset()), G3_scratch);
3295 __ cmp(G3_scratch, InstanceKlass::fully_initialized);
3296 __ br(Assembler::notEqual, false, Assembler::pn, slow_case);
3297 __ delayed()->ld(RinstanceKlass, in_bytes(Klass::layout_helper_offset()), Roffset);
3298
3299 // get instance_size in InstanceKlass (already aligned)
3300 //__ ld(RinstanceKlass, in_bytes(Klass::layout_helper_offset()), Roffset);
3301
3302 // make sure klass does not have has_finalizer, or is abstract, or interface or java/lang/Class
3303 __ btst(Klass::_lh_instance_slow_path_bit, Roffset);
3304 __ br(Assembler::notZero, false, Assembler::pn, slow_case);
3305 __ delayed()->nop();
3306
3307 // allocate the instance
3308 // 1) Try to allocate in the TLAB
3309 // 2) if fail, and the TLAB is not full enough to discard, allocate in the shared Eden
3310 // 3) if the above fails (or is not applicable), go to a slow case
3311 // (creates a new TLAB, etc.)
3312
3313 const bool allow_shared_alloc =
3314 Universe::heap()->supports_inline_contig_alloc();
3315
3316 if(UseTLAB) {
3317 Register RoldTopValue = RallocatedObject;
3318 Register RtlabWasteLimitValue = G3_scratch;
3319 Register RnewTopValue = G1_scratch;
3320 Register RendValue = Rscratch;
3321 Register RfreeValue = RnewTopValue;
3322
3323 // check if we can allocate in the TLAB
3324 __ ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), RoldTopValue); // sets up RalocatedObject
3325 __ ld_ptr(G2_thread, in_bytes(JavaThread::tlab_end_offset()), RendValue);
3326 __ add(RoldTopValue, Roffset, RnewTopValue);
3327
3328 // if there is enough space, we do not CAS and do not clear
3329 __ cmp(RnewTopValue, RendValue);
3330 if(ZeroTLAB) {
3331 // the fields have already been cleared
3332 __ brx(Assembler::lessEqualUnsigned, true, Assembler::pt, initialize_header);
3333 } else {
3334 // initialize both the header and fields
3335 __ brx(Assembler::lessEqualUnsigned, true, Assembler::pt, initialize_object);
3336 }
3337 __ delayed()->st_ptr(RnewTopValue, G2_thread, in_bytes(JavaThread::tlab_top_offset()));
3338
3339 if (allow_shared_alloc) {
3340 // Check if tlab should be discarded (refill_waste_limit >= free)
3341 __ ld_ptr(G2_thread, in_bytes(JavaThread::tlab_refill_waste_limit_offset()), RtlabWasteLimitValue);
3342 __ sub(RendValue, RoldTopValue, RfreeValue);
3343 #ifdef _LP64
3344 __ srlx(RfreeValue, LogHeapWordSize, RfreeValue);
3345 #else
3346 __ srl(RfreeValue, LogHeapWordSize, RfreeValue);
3347 #endif
3348 __ cmp_and_brx_short(RtlabWasteLimitValue, RfreeValue, Assembler::greaterEqualUnsigned, Assembler::pt, slow_case); // tlab waste is small
3349
3350 // increment waste limit to prevent getting stuck on this slow path
3351 __ add(RtlabWasteLimitValue, ThreadLocalAllocBuffer::refill_waste_limit_increment(), RtlabWasteLimitValue);
3352 __ st_ptr(RtlabWasteLimitValue, G2_thread, in_bytes(JavaThread::tlab_refill_waste_limit_offset()));
3353 } else {
3354 // No allocation in the shared eden.
3355 __ ba_short(slow_case);
3356 }
3357 }
3358
3359 // Allocation in the shared Eden
3360 if (allow_shared_alloc) {
3361 Register RoldTopValue = G1_scratch;
3362 Register RtopAddr = G3_scratch;
3363 Register RnewTopValue = RallocatedObject;
3364 Register RendValue = Rscratch;
3365
3366 __ set((intptr_t)Universe::heap()->top_addr(), RtopAddr);
3367
3368 Label retry;
3369 __ bind(retry);
3370 __ set((intptr_t)Universe::heap()->end_addr(), RendValue);
3371 __ ld_ptr(RendValue, 0, RendValue);
3372 __ ld_ptr(RtopAddr, 0, RoldTopValue);
3373 __ add(RoldTopValue, Roffset, RnewTopValue);
3374
3375 // RnewTopValue contains the top address after the new object
3376 // has been allocated.
3377 __ cmp_and_brx_short(RnewTopValue, RendValue, Assembler::greaterUnsigned, Assembler::pn, slow_case);
3378
3379 __ cas_ptr(RtopAddr, RoldTopValue, RnewTopValue);
3380
3381 // if someone beat us on the allocation, try again, otherwise continue
3382 __ cmp_and_brx_short(RoldTopValue, RnewTopValue, Assembler::notEqual, Assembler::pn, retry);
3383
3384 // bump total bytes allocated by this thread
3385 // RoldTopValue and RtopAddr are dead, so can use G1 and G3
3386 __ incr_allocated_bytes(Roffset, G1_scratch, G3_scratch);
3387 }
3388
3389 if (UseTLAB || Universe::heap()->supports_inline_contig_alloc()) {
3390 // clear object fields
3391 __ bind(initialize_object);
3392 __ deccc(Roffset, sizeof(oopDesc));
3393 __ br(Assembler::zero, false, Assembler::pt, initialize_header);
3394 __ delayed()->add(RallocatedObject, sizeof(oopDesc), G3_scratch);
3395
3396 // initialize remaining object fields
3397 if (UseBlockZeroing) {
3398 // Use BIS for zeroing
3399 __ bis_zeroing(G3_scratch, Roffset, G1_scratch, initialize_header);
3400 } else {
3401 Label loop;
3402 __ subcc(Roffset, wordSize, Roffset);
3403 __ bind(loop);
3404 //__ subcc(Roffset, wordSize, Roffset); // executed above loop or in delay slot
3405 __ st_ptr(G0, G3_scratch, Roffset);
3406 __ br(Assembler::notEqual, false, Assembler::pt, loop);
3407 __ delayed()->subcc(Roffset, wordSize, Roffset);
3408 }
3409 __ ba_short(initialize_header);
3410 }
3411
3412 // slow case
3413 __ bind(slow_case);
3414 __ get_2_byte_integer_at_bcp(1, G3_scratch, O2, InterpreterMacroAssembler::Unsigned);
3415 __ get_constant_pool(O1);
3416
3417 call_VM(Otos_i, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), O1, O2);
3418
3419 __ ba_short(done);
3420
3421 // Initialize the header: mark, klass
3422 __ bind(initialize_header);
3423
3424 if (UseBiasedLocking) {
3425 __ ld_ptr(RinstanceKlass, in_bytes(Klass::prototype_header_offset()), G4_scratch);
3426 } else {
3427 __ set((intptr_t)markOopDesc::prototype(), G4_scratch);
3428 }
3429 __ st_ptr(G4_scratch, RallocatedObject, oopDesc::mark_offset_in_bytes()); // mark
3430 __ store_klass_gap(G0, RallocatedObject); // klass gap if compressed
3431 __ store_klass(RinstanceKlass, RallocatedObject); // klass (last for cms)
3432
3433 {
3434 SkipIfEqual skip_if(
3435 _masm, G4_scratch, &DTraceAllocProbes, Assembler::zero);
3436 // Trigger dtrace event
3437 __ push(atos);
3438 __ call_VM_leaf(noreg,
3439 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc), O0);
3440 __ pop(atos);
3441 }
3442
3443 // continue
3444 __ bind(done);
3445 }
3446
3447
3448
3449 void TemplateTable::newarray() {
3450 transition(itos, atos);
3451 __ ldub(Lbcp, 1, O1);
3452 call_VM(Otos_i, CAST_FROM_FN_PTR(address, InterpreterRuntime::newarray), O1, Otos_i);
3453 }
3454
3455
3456 void TemplateTable::anewarray() {
3457 transition(itos, atos);
3458 __ get_constant_pool(O1);
3459 __ get_2_byte_integer_at_bcp(1, G4_scratch, O2, InterpreterMacroAssembler::Unsigned);
3460 call_VM(Otos_i, CAST_FROM_FN_PTR(address, InterpreterRuntime::anewarray), O1, O2, Otos_i);
3461 }
3462
3463
3464 void TemplateTable::arraylength() {
3465 transition(atos, itos);
3466 Label ok;
3467 __ verify_oop(Otos_i);
3468 __ tst(Otos_i);
3469 __ throw_if_not_1_x( Assembler::notZero, ok );
3470 __ delayed()->ld(Otos_i, arrayOopDesc::length_offset_in_bytes(), Otos_i);
3471 __ throw_if_not_2( Interpreter::_throw_NullPointerException_entry, G3_scratch, ok);
3472 }
3473
3474
3475 void TemplateTable::checkcast() {
3476 transition(atos, atos);
3477 Label done, is_null, quicked, cast_ok, resolved;
3478 Register Roffset = G1_scratch;
3479 Register RobjKlass = O5;
3480 Register RspecifiedKlass = O4;
3481
3482 // Check for casting a NULL
3483 __ br_null_short(Otos_i, Assembler::pn, is_null);
3484
3485 // Get value klass in RobjKlass
3486 __ load_klass(Otos_i, RobjKlass); // get value klass
3487
3488 // Get constant pool tag
3489 __ get_2_byte_integer_at_bcp(1, Lscratch, Roffset, InterpreterMacroAssembler::Unsigned);
3490
3491 // See if the checkcast has been quickened
3492 __ get_cpool_and_tags(Lscratch, G3_scratch);
3493 __ add(G3_scratch, Array<u1>::base_offset_in_bytes(), G3_scratch);
3494 __ ldub(G3_scratch, Roffset, G3_scratch);
3495 __ cmp(G3_scratch, JVM_CONSTANT_Class);
3496 __ br(Assembler::equal, true, Assembler::pt, quicked);
3497 __ delayed()->sll(Roffset, LogBytesPerWord, Roffset);
3498
3499 __ push_ptr(); // save receiver for result, and for GC
3500 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc) );
3501 __ get_vm_result_2(RspecifiedKlass);
3502 __ pop_ptr(Otos_i, G3_scratch); // restore receiver
3503
3504 __ ba_short(resolved);
3505
3506 // Extract target class from constant pool
3507 __ bind(quicked);
3508 __ add(Roffset, sizeof(ConstantPool), Roffset);
3509 __ ld_ptr(Lscratch, Roffset, RspecifiedKlass);
3510 __ bind(resolved);
3511 __ load_klass(Otos_i, RobjKlass); // get value klass
3512
3513 // Generate a fast subtype check. Branch to cast_ok if no
3514 // failure. Throw exception if failure.
3515 __ gen_subtype_check( RobjKlass, RspecifiedKlass, G3_scratch, G4_scratch, G1_scratch, cast_ok );
3516
3517 // Not a subtype; so must throw exception
3518 __ throw_if_not_x( Assembler::never, Interpreter::_throw_ClassCastException_entry, G3_scratch );
3519
3520 __ bind(cast_ok);
3521
3522 if (ProfileInterpreter) {
3523 __ ba_short(done);
3524 }
3525 __ bind(is_null);
3526 __ profile_null_seen(G3_scratch);
3527 __ bind(done);
3528 }
3529
3530
3531 void TemplateTable::instanceof() {
3532 Label done, is_null, quicked, resolved;
3533 transition(atos, itos);
3534 Register Roffset = G1_scratch;
3535 Register RobjKlass = O5;
3536 Register RspecifiedKlass = O4;
3537
3538 // Check for casting a NULL
3539 __ br_null_short(Otos_i, Assembler::pt, is_null);
3540
3541 // Get value klass in RobjKlass
3542 __ load_klass(Otos_i, RobjKlass); // get value klass
3543
3544 // Get constant pool tag
3545 __ get_2_byte_integer_at_bcp(1, Lscratch, Roffset, InterpreterMacroAssembler::Unsigned);
3546
3547 // See if the checkcast has been quickened
3548 __ get_cpool_and_tags(Lscratch, G3_scratch);
3549 __ add(G3_scratch, Array<u1>::base_offset_in_bytes(), G3_scratch);
3550 __ ldub(G3_scratch, Roffset, G3_scratch);
3551 __ cmp(G3_scratch, JVM_CONSTANT_Class);
3552 __ br(Assembler::equal, true, Assembler::pt, quicked);
3553 __ delayed()->sll(Roffset, LogBytesPerWord, Roffset);
3554
3555 __ push_ptr(); // save receiver for result, and for GC
3556 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc) );
3557 __ get_vm_result_2(RspecifiedKlass);
3558 __ pop_ptr(Otos_i, G3_scratch); // restore receiver
3559
3560 __ ba_short(resolved);
3561
3562 // Extract target class from constant pool
3563 __ bind(quicked);
3564 __ add(Roffset, sizeof(ConstantPool), Roffset);
3565 __ get_constant_pool(Lscratch);
3566 __ ld_ptr(Lscratch, Roffset, RspecifiedKlass);
3567 __ bind(resolved);
3568 __ load_klass(Otos_i, RobjKlass); // get value klass
3569
3570 // Generate a fast subtype check. Branch to cast_ok if no
3571 // failure. Return 0 if failure.
3572 __ or3(G0, 1, Otos_i); // set result assuming quick tests succeed
3573 __ gen_subtype_check( RobjKlass, RspecifiedKlass, G3_scratch, G4_scratch, G1_scratch, done );
3574 // Not a subtype; return 0;
3575 __ clr( Otos_i );
3576
3577 if (ProfileInterpreter) {
3578 __ ba_short(done);
3579 }
3580 __ bind(is_null);
3581 __ profile_null_seen(G3_scratch);
3582 __ bind(done);
3583 }
3584
3585 void TemplateTable::_breakpoint() {
3586
3587 // Note: We get here even if we are single stepping..
3588 // jbug inists on setting breakpoints at every bytecode
3589 // even if we are in single step mode.
3590
3591 transition(vtos, vtos);
3592 // get the unpatched byte code
3593 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::get_original_bytecode_at), Lmethod, Lbcp);
3594 __ mov(O0, Lbyte_code);
3595
3596 // post the breakpoint event
3597 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::_breakpoint), Lmethod, Lbcp);
3598
3599 // complete the execution of original bytecode
3600 __ dispatch_normal(vtos);
3601 }
3602
3603
3604 //----------------------------------------------------------------------------------------------------
3605 // Exceptions
3606
3607 void TemplateTable::athrow() {
3608 transition(atos, vtos);
3609
3610 // This works because exception is cached in Otos_i which is same as O0,
3611 // which is same as what throw_exception_entry_expects
3612 assert(Otos_i == Oexception, "see explanation above");
3613
3614 __ verify_oop(Otos_i);
3615 __ null_check(Otos_i);
3616 __ throw_if_not_x(Assembler::never, Interpreter::throw_exception_entry(), G3_scratch);
3617 }
3618
3619
3620 //----------------------------------------------------------------------------------------------------
3621 // Synchronization
3622
3623
3624 // See frame_sparc.hpp for monitor block layout.
3625 // Monitor elements are dynamically allocated by growing stack as needed.
3626
3627 void TemplateTable::monitorenter() {
3628 transition(atos, vtos);
3629 __ verify_oop(Otos_i);
3630 // Try to acquire a lock on the object
3631 // Repeat until succeeded (i.e., until
3632 // monitorenter returns true).
3633
3634 { Label ok;
3635 __ tst(Otos_i);
3636 __ throw_if_not_1_x( Assembler::notZero, ok);
3637 __ delayed()->mov(Otos_i, Lscratch); // save obj
3638 __ throw_if_not_2( Interpreter::_throw_NullPointerException_entry, G3_scratch, ok);
3639 }
3640
3641 assert(O0 == Otos_i, "Be sure where the object to lock is");
3642
3643 // find a free slot in the monitor block
3644
3645
3646 // initialize entry pointer
3647 __ clr(O1); // points to free slot or NULL
3648
3649 {
3650 Label entry, loop, exit;
3651 __ add( __ top_most_monitor(), O2 ); // last one to check
3652 __ ba( entry );
3653 __ delayed()->mov( Lmonitors, O3 ); // first one to check
3654
3655
3656 __ bind( loop );
3657
3658 __ verify_oop(O4); // verify each monitor's oop
3659 __ tst(O4); // is this entry unused?
3660 __ movcc( Assembler::zero, false, Assembler::ptr_cc, O3, O1);
3661
3662 __ cmp(O4, O0); // check if current entry is for same object
3663 __ brx( Assembler::equal, false, Assembler::pn, exit );
3664 __ delayed()->inc( O3, frame::interpreter_frame_monitor_size() * wordSize ); // check next one
3665
3666 __ bind( entry );
3667
3668 __ cmp( O3, O2 );
3669 __ brx( Assembler::lessEqualUnsigned, true, Assembler::pt, loop );
3670 __ delayed()->ld_ptr(O3, BasicObjectLock::obj_offset_in_bytes(), O4);
3671
3672 __ bind( exit );
3673 }
3674
3675 { Label allocated;
3676
3677 // found free slot?
3678 __ br_notnull_short(O1, Assembler::pn, allocated);
3679
3680 __ add_monitor_to_stack( false, O2, O3 );
3681 __ mov(Lmonitors, O1);
3682
3683 __ bind(allocated);
3684 }
3685
3686 // Increment bcp to point to the next bytecode, so exception handling for async. exceptions work correctly.
3687 // The object has already been poped from the stack, so the expression stack looks correct.
3688 __ inc(Lbcp);
3689
3690 __ st_ptr(O0, O1, BasicObjectLock::obj_offset_in_bytes()); // store object
3691 __ lock_object(O1, O0);
3692
3693 // check if there's enough space on the stack for the monitors after locking
3694 __ generate_stack_overflow_check(0);
3695
3696 // The bcp has already been incremented. Just need to dispatch to next instruction.
3697 __ dispatch_next(vtos);
3698 }
3699
3700
3701 void TemplateTable::monitorexit() {
3702 transition(atos, vtos);
3703 __ verify_oop(Otos_i);
3704 __ tst(Otos_i);
3705 __ throw_if_not_x( Assembler::notZero, Interpreter::_throw_NullPointerException_entry, G3_scratch );
3706
3707 assert(O0 == Otos_i, "just checking");
3708
3709 { Label entry, loop, found;
3710 __ add( __ top_most_monitor(), O2 ); // last one to check
3711 __ ba(entry);
3712 // use Lscratch to hold monitor elem to check, start with most recent monitor,
3713 // By using a local it survives the call to the C routine.
3714 __ delayed()->mov( Lmonitors, Lscratch );
3715
3716 __ bind( loop );
3717
3718 __ verify_oop(O4); // verify each monitor's oop
3719 __ cmp(O4, O0); // check if current entry is for desired object
3720 __ brx( Assembler::equal, true, Assembler::pt, found );
3721 __ delayed()->mov(Lscratch, O1); // pass found entry as argument to monitorexit
3722
3723 __ inc( Lscratch, frame::interpreter_frame_monitor_size() * wordSize ); // advance to next
3724
3725 __ bind( entry );
3726
3727 __ cmp( Lscratch, O2 );
3728 __ brx( Assembler::lessEqualUnsigned, true, Assembler::pt, loop );
3729 __ delayed()->ld_ptr(Lscratch, BasicObjectLock::obj_offset_in_bytes(), O4);
3730
3731 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception));
3732 __ should_not_reach_here();
3733
3734 __ bind(found);
3735 }
3736 __ unlock_object(O1);
3737 }
3738
3739
3740 //----------------------------------------------------------------------------------------------------
3741 // Wide instructions
3742
3743 void TemplateTable::wide() {
3744 transition(vtos, vtos);
3745 __ ldub(Lbcp, 1, G3_scratch);// get next bc
3746 __ sll(G3_scratch, LogBytesPerWord, G3_scratch);
3747 AddressLiteral ep(Interpreter::_wentry_point);
3748 __ set(ep, G4_scratch);
3749 __ ld_ptr(G4_scratch, G3_scratch, G3_scratch);
3750 __ jmp(G3_scratch, G0);
3751 __ delayed()->nop();
3752 // Note: the Lbcp increment step is part of the individual wide bytecode implementations
3753 }
3754
3755
3756 //----------------------------------------------------------------------------------------------------
3757 // Multi arrays
3758
3759 void TemplateTable::multianewarray() {
3760 transition(vtos, atos);
3761 // put ndims * wordSize into Lscratch
3762 __ ldub( Lbcp, 3, Lscratch);
3763 __ sll( Lscratch, Interpreter::logStackElementSize, Lscratch);
3764 // Lesp points past last_dim, so set to O1 to first_dim address
3765 __ add( Lesp, Lscratch, O1);
3766 call_VM(Otos_i, CAST_FROM_FN_PTR(address, InterpreterRuntime::multianewarray), O1);
3767 __ add( Lesp, Lscratch, Lesp); // pop all dimensions off the stack
3768 }
3769 #endif /* !CC_INTERP */