1 /*
2 * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "interpreter/interpreter.hpp"
27 #include "interpreter/interpreterRuntime.hpp"
28 #include "interpreter/interp_masm.hpp"
29 #include "interpreter/templateTable.hpp"
30 #include "memory/universe.inline.hpp"
31 #include "oops/methodData.hpp"
32 #include "oops/objArrayKlass.hpp"
33 #include "oops/oop.inline.hpp"
34 #include "prims/methodHandles.hpp"
35 #include "runtime/sharedRuntime.hpp"
36 #include "runtime/stubRoutines.hpp"
37 #include "runtime/synchronizer.hpp"
38 #include "utilities/macros.hpp"
39
40 #ifndef CC_INTERP
41 #define __ _masm->
42
43 // Misc helpers
44
45 // Do an oop store like *(base + index + offset) = val
46 // index can be noreg,
47 static void do_oop_store(InterpreterMacroAssembler* _masm,
48 Register base,
49 Register index,
50 int offset,
51 Register val,
52 Register tmp,
53 BarrierSet::Name barrier,
54 bool precise) {
55 assert(tmp != val && tmp != base && tmp != index, "register collision");
56 assert(index == noreg || offset == 0, "only one offset");
57 switch (barrier) {
58 #if INCLUDE_ALL_GCS
59 case BarrierSet::G1SATBCT:
60 case BarrierSet::G1SATBCTLogging:
61 {
62 // Load and record the previous value.
63 __ g1_write_barrier_pre(base, index, offset,
64 noreg /* pre_val */,
65 tmp, true /*preserve_o_regs*/);
66
67 // G1 barrier needs uncompressed oop for region cross check.
68 Register new_val = val;
69 if (UseCompressedOops && val != G0) {
70 new_val = tmp;
71 __ mov(val, new_val);
72 }
73
74 if (index == noreg ) {
75 assert(Assembler::is_simm13(offset), "fix this code");
76 __ store_heap_oop(val, base, offset);
77 } else {
78 __ store_heap_oop(val, base, index);
79 }
80
81 // No need for post barrier if storing NULL
82 if (val != G0) {
83 if (precise) {
84 if (index == noreg) {
85 __ add(base, offset, base);
86 } else {
87 __ add(base, index, base);
88 }
89 }
90 __ g1_write_barrier_post(base, new_val, tmp);
91 }
92 }
93 break;
94 #endif // INCLUDE_ALL_GCS
95 case BarrierSet::CardTableModRef:
96 case BarrierSet::CardTableExtension:
97 {
98 if (index == noreg ) {
99 assert(Assembler::is_simm13(offset), "fix this code");
100 __ store_heap_oop(val, base, offset);
101 } else {
102 __ store_heap_oop(val, base, index);
103 }
104 // No need for post barrier if storing NULL
105 if (val != G0) {
106 if (precise) {
107 if (index == noreg) {
108 __ add(base, offset, base);
109 } else {
110 __ add(base, index, base);
111 }
112 }
113 __ card_write_barrier_post(base, val, tmp);
114 }
115 }
116 break;
117 case BarrierSet::ModRef:
118 ShouldNotReachHere();
119 break;
120 default :
121 ShouldNotReachHere();
122
123 }
124 }
125
126
127 //----------------------------------------------------------------------------------------------------
128 // Platform-dependent initialization
129
130 void TemplateTable::pd_initialize() {
131 // (none)
132 }
133
134
135 //----------------------------------------------------------------------------------------------------
136 // Condition conversion
137 Assembler::Condition ccNot(TemplateTable::Condition cc) {
138 switch (cc) {
139 case TemplateTable::equal : return Assembler::notEqual;
140 case TemplateTable::not_equal : return Assembler::equal;
141 case TemplateTable::less : return Assembler::greaterEqual;
142 case TemplateTable::less_equal : return Assembler::greater;
143 case TemplateTable::greater : return Assembler::lessEqual;
144 case TemplateTable::greater_equal: return Assembler::less;
145 }
146 ShouldNotReachHere();
147 return Assembler::zero;
148 }
149
150 //----------------------------------------------------------------------------------------------------
151 // Miscelaneous helper routines
152
153
154 Address TemplateTable::at_bcp(int offset) {
155 assert(_desc->uses_bcp(), "inconsistent uses_bcp information");
156 return Address(Lbcp, offset);
157 }
158
159
160 void TemplateTable::patch_bytecode(Bytecodes::Code bc, Register bc_reg,
161 Register temp_reg, bool load_bc_into_bc_reg/*=true*/,
162 int byte_no) {
163 // With sharing on, may need to test Method* flag.
164 if (!RewriteBytecodes) return;
165 Label L_patch_done;
166
167 switch (bc) {
168 case Bytecodes::_fast_aputfield:
169 case Bytecodes::_fast_bputfield:
170 case Bytecodes::_fast_cputfield:
171 case Bytecodes::_fast_dputfield:
172 case Bytecodes::_fast_fputfield:
173 case Bytecodes::_fast_iputfield:
174 case Bytecodes::_fast_lputfield:
175 case Bytecodes::_fast_sputfield:
176 {
177 // We skip bytecode quickening for putfield instructions when
178 // the put_code written to the constant pool cache is zero.
179 // This is required so that every execution of this instruction
180 // calls out to InterpreterRuntime::resolve_get_put to do
181 // additional, required work.
182 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
183 assert(load_bc_into_bc_reg, "we use bc_reg as temp");
184 __ get_cache_and_index_and_bytecode_at_bcp(bc_reg, temp_reg, temp_reg, byte_no, 1);
185 __ set(bc, bc_reg);
186 __ cmp_and_br_short(temp_reg, 0, Assembler::equal, Assembler::pn, L_patch_done); // don't patch
187 }
188 break;
189 default:
190 assert(byte_no == -1, "sanity");
191 if (load_bc_into_bc_reg) {
192 __ set(bc, bc_reg);
193 }
194 }
195
196 if (JvmtiExport::can_post_breakpoint()) {
197 Label L_fast_patch;
198 __ ldub(at_bcp(0), temp_reg);
199 __ cmp_and_br_short(temp_reg, Bytecodes::_breakpoint, Assembler::notEqual, Assembler::pt, L_fast_patch);
200 // perform the quickening, slowly, in the bowels of the breakpoint table
201 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), Lmethod, Lbcp, bc_reg);
202 __ ba_short(L_patch_done);
203 __ bind(L_fast_patch);
204 }
205
206 #ifdef ASSERT
207 Bytecodes::Code orig_bytecode = Bytecodes::java_code(bc);
208 Label L_okay;
209 __ ldub(at_bcp(0), temp_reg);
210 __ cmp(temp_reg, orig_bytecode);
211 __ br(Assembler::equal, false, Assembler::pt, L_okay);
212 __ delayed()->cmp(temp_reg, bc_reg);
213 __ br(Assembler::equal, false, Assembler::pt, L_okay);
214 __ delayed()->nop();
215 __ stop("patching the wrong bytecode");
216 __ bind(L_okay);
217 #endif
218
219 // patch bytecode
220 __ stb(bc_reg, at_bcp(0));
221 __ bind(L_patch_done);
222 }
223
224 //----------------------------------------------------------------------------------------------------
225 // Individual instructions
226
227 void TemplateTable::nop() {
228 transition(vtos, vtos);
229 // nothing to do
230 }
231
232 void TemplateTable::shouldnotreachhere() {
233 transition(vtos, vtos);
234 __ stop("shouldnotreachhere bytecode");
235 }
236
237 void TemplateTable::aconst_null() {
238 transition(vtos, atos);
239 __ clr(Otos_i);
240 }
241
242
243 void TemplateTable::iconst(int value) {
244 transition(vtos, itos);
245 __ set(value, Otos_i);
246 }
247
248
249 void TemplateTable::lconst(int value) {
250 transition(vtos, ltos);
251 assert(value >= 0, "check this code");
252 #ifdef _LP64
253 __ set(value, Otos_l);
254 #else
255 __ set(value, Otos_l2);
256 __ clr( Otos_l1);
257 #endif
258 }
259
260
261 void TemplateTable::fconst(int value) {
262 transition(vtos, ftos);
263 static float zero = 0.0, one = 1.0, two = 2.0;
264 float* p;
265 switch( value ) {
266 default: ShouldNotReachHere();
267 case 0: p = &zero; break;
268 case 1: p = &one; break;
269 case 2: p = &two; break;
270 }
271 AddressLiteral a(p);
272 __ sethi(a, G3_scratch);
273 __ ldf(FloatRegisterImpl::S, G3_scratch, a.low10(), Ftos_f);
274 }
275
276
277 void TemplateTable::dconst(int value) {
278 transition(vtos, dtos);
279 static double zero = 0.0, one = 1.0;
280 double* p;
281 switch( value ) {
282 default: ShouldNotReachHere();
283 case 0: p = &zero; break;
284 case 1: p = &one; break;
285 }
286 AddressLiteral a(p);
287 __ sethi(a, G3_scratch);
288 __ ldf(FloatRegisterImpl::D, G3_scratch, a.low10(), Ftos_d);
289 }
290
291
292 // %%%%% Should factore most snippet templates across platforms
293
294 void TemplateTable::bipush() {
295 transition(vtos, itos);
296 __ ldsb( at_bcp(1), Otos_i );
297 }
298
299 void TemplateTable::sipush() {
300 transition(vtos, itos);
301 __ get_2_byte_integer_at_bcp(1, G3_scratch, Otos_i, InterpreterMacroAssembler::Signed);
302 }
303
304 void TemplateTable::ldc(bool wide) {
305 transition(vtos, vtos);
306 Label call_ldc, notInt, isString, notString, notClass, exit;
307
308 if (wide) {
309 __ get_2_byte_integer_at_bcp(1, G3_scratch, O1, InterpreterMacroAssembler::Unsigned);
310 } else {
311 __ ldub(Lbcp, 1, O1);
312 }
313 __ get_cpool_and_tags(O0, O2);
314
315 const int base_offset = ConstantPool::header_size() * wordSize;
316 const int tags_offset = Array<u1>::base_offset_in_bytes();
317
318 // get type from tags
319 __ add(O2, tags_offset, O2);
320 __ ldub(O2, O1, O2);
321
322 // unresolved class? If so, must resolve
323 __ cmp_and_brx_short(O2, JVM_CONSTANT_UnresolvedClass, Assembler::equal, Assembler::pt, call_ldc);
324
325 // unresolved class in error state
326 __ cmp_and_brx_short(O2, JVM_CONSTANT_UnresolvedClassInError, Assembler::equal, Assembler::pn, call_ldc);
327
328 __ cmp(O2, JVM_CONSTANT_Class); // need to call vm to get java mirror of the class
329 __ brx(Assembler::notEqual, true, Assembler::pt, notClass);
330 __ delayed()->add(O0, base_offset, O0);
331
332 __ bind(call_ldc);
333 __ set(wide, O1);
334 call_VM(Otos_i, CAST_FROM_FN_PTR(address, InterpreterRuntime::ldc), O1);
335 __ push(atos);
336 __ ba_short(exit);
337
338 __ bind(notClass);
339 // __ add(O0, base_offset, O0);
340 __ sll(O1, LogBytesPerWord, O1);
341 __ cmp(O2, JVM_CONSTANT_Integer);
342 __ brx(Assembler::notEqual, true, Assembler::pt, notInt);
343 __ delayed()->cmp(O2, JVM_CONSTANT_String);
344 __ ld(O0, O1, Otos_i);
345 __ push(itos);
346 __ ba_short(exit);
347
348 __ bind(notInt);
349 // __ cmp(O2, JVM_CONSTANT_String);
350 __ brx(Assembler::notEqual, true, Assembler::pt, notString);
351 __ delayed()->ldf(FloatRegisterImpl::S, O0, O1, Ftos_f);
352 __ bind(isString);
353 __ stop("string should be rewritten to fast_aldc");
354 __ ba_short(exit);
355
356 __ bind(notString);
357 // __ ldf(FloatRegisterImpl::S, O0, O1, Ftos_f);
358 __ push(ftos);
359
360 __ bind(exit);
361 }
362
363 // Fast path for caching oop constants.
364 // %%% We should use this to handle Class and String constants also.
365 // %%% It will simplify the ldc/primitive path considerably.
366 void TemplateTable::fast_aldc(bool wide) {
367 transition(vtos, atos);
368
369 int index_size = wide ? sizeof(u2) : sizeof(u1);
370 Label resolved;
371
372 // We are resolved if the resolved reference cache entry contains a
373 // non-null object (CallSite, etc.)
374 assert_different_registers(Otos_i, G3_scratch);
375 __ get_cache_index_at_bcp(Otos_i, G3_scratch, 1, index_size); // load index => G3_scratch
376 __ load_resolved_reference_at_index(Otos_i, G3_scratch);
377 __ tst(Otos_i);
378 __ br(Assembler::notEqual, false, Assembler::pt, resolved);
379 __ delayed()->set((int)bytecode(), O1);
380
381 address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc);
382
383 // first time invocation - must resolve first
384 __ call_VM(Otos_i, entry, O1);
385 __ bind(resolved);
386 __ verify_oop(Otos_i);
387 }
388
389
390 void TemplateTable::ldc2_w() {
391 transition(vtos, vtos);
392 Label Long, exit;
393
394 __ get_2_byte_integer_at_bcp(1, G3_scratch, O1, InterpreterMacroAssembler::Unsigned);
395 __ get_cpool_and_tags(O0, O2);
396
397 const int base_offset = ConstantPool::header_size() * wordSize;
398 const int tags_offset = Array<u1>::base_offset_in_bytes();
399 // get type from tags
400 __ add(O2, tags_offset, O2);
401 __ ldub(O2, O1, O2);
402
403 __ sll(O1, LogBytesPerWord, O1);
404 __ add(O0, O1, G3_scratch);
405
406 __ cmp_and_brx_short(O2, JVM_CONSTANT_Double, Assembler::notEqual, Assembler::pt, Long);
407 // A double can be placed at word-aligned locations in the constant pool.
408 // Check out Conversions.java for an example.
409 // Also ConstantPool::header_size() is 20, which makes it very difficult
410 // to double-align double on the constant pool. SG, 11/7/97
411 #ifdef _LP64
412 __ ldf(FloatRegisterImpl::D, G3_scratch, base_offset, Ftos_d);
413 #else
414 FloatRegister f = Ftos_d;
415 __ ldf(FloatRegisterImpl::S, G3_scratch, base_offset, f);
416 __ ldf(FloatRegisterImpl::S, G3_scratch, base_offset + sizeof(jdouble)/2,
417 f->successor());
418 #endif
419 __ push(dtos);
420 __ ba_short(exit);
421
422 __ bind(Long);
423 #ifdef _LP64
424 __ ldx(G3_scratch, base_offset, Otos_l);
425 #else
426 __ ld(G3_scratch, base_offset, Otos_l);
427 __ ld(G3_scratch, base_offset + sizeof(jlong)/2, Otos_l->successor());
428 #endif
429 __ push(ltos);
430
431 __ bind(exit);
432 }
433
434
435 void TemplateTable::locals_index(Register reg, int offset) {
436 __ ldub( at_bcp(offset), reg );
437 }
438
439
440 void TemplateTable::locals_index_wide(Register reg) {
441 // offset is 2, not 1, because Lbcp points to wide prefix code
442 __ get_2_byte_integer_at_bcp(2, G4_scratch, reg, InterpreterMacroAssembler::Unsigned);
443 }
444
445 void TemplateTable::iload() {
446 transition(vtos, itos);
447 // Rewrite iload,iload pair into fast_iload2
448 // iload,caload pair into fast_icaload
449 if (RewriteFrequentPairs) {
450 Label rewrite, done;
451
452 // get next byte
453 __ ldub(at_bcp(Bytecodes::length_for(Bytecodes::_iload)), G3_scratch);
454
455 // if _iload, wait to rewrite to iload2. We only want to rewrite the
456 // last two iloads in a pair. Comparing against fast_iload means that
457 // the next bytecode is neither an iload or a caload, and therefore
458 // an iload pair.
459 __ cmp_and_br_short(G3_scratch, (int)Bytecodes::_iload, Assembler::equal, Assembler::pn, done);
460
461 __ cmp(G3_scratch, (int)Bytecodes::_fast_iload);
462 __ br(Assembler::equal, false, Assembler::pn, rewrite);
463 __ delayed()->set(Bytecodes::_fast_iload2, G4_scratch);
464
465 __ cmp(G3_scratch, (int)Bytecodes::_caload);
466 __ br(Assembler::equal, false, Assembler::pn, rewrite);
467 __ delayed()->set(Bytecodes::_fast_icaload, G4_scratch);
468
469 __ set(Bytecodes::_fast_iload, G4_scratch); // don't check again
470 // rewrite
471 // G4_scratch: fast bytecode
472 __ bind(rewrite);
473 patch_bytecode(Bytecodes::_iload, G4_scratch, G3_scratch, false);
474 __ bind(done);
475 }
476
477 // Get the local value into tos
478 locals_index(G3_scratch);
479 __ access_local_int( G3_scratch, Otos_i );
480 }
481
482 void TemplateTable::fast_iload2() {
483 transition(vtos, itos);
484 locals_index(G3_scratch);
485 __ access_local_int( G3_scratch, Otos_i );
486 __ push_i();
487 locals_index(G3_scratch, 3); // get next bytecode's local index.
488 __ access_local_int( G3_scratch, Otos_i );
489 }
490
491 void TemplateTable::fast_iload() {
492 transition(vtos, itos);
493 locals_index(G3_scratch);
494 __ access_local_int( G3_scratch, Otos_i );
495 }
496
497 void TemplateTable::lload() {
498 transition(vtos, ltos);
499 locals_index(G3_scratch);
500 __ access_local_long( G3_scratch, Otos_l );
501 }
502
503
504 void TemplateTable::fload() {
505 transition(vtos, ftos);
506 locals_index(G3_scratch);
507 __ access_local_float( G3_scratch, Ftos_f );
508 }
509
510
511 void TemplateTable::dload() {
512 transition(vtos, dtos);
513 locals_index(G3_scratch);
514 __ access_local_double( G3_scratch, Ftos_d );
515 }
516
517
518 void TemplateTable::aload() {
519 transition(vtos, atos);
520 locals_index(G3_scratch);
521 __ access_local_ptr( G3_scratch, Otos_i);
522 }
523
524
525 void TemplateTable::wide_iload() {
526 transition(vtos, itos);
527 locals_index_wide(G3_scratch);
528 __ access_local_int( G3_scratch, Otos_i );
529 }
530
531
532 void TemplateTable::wide_lload() {
533 transition(vtos, ltos);
534 locals_index_wide(G3_scratch);
535 __ access_local_long( G3_scratch, Otos_l );
536 }
537
538
539 void TemplateTable::wide_fload() {
540 transition(vtos, ftos);
541 locals_index_wide(G3_scratch);
542 __ access_local_float( G3_scratch, Ftos_f );
543 }
544
545
546 void TemplateTable::wide_dload() {
547 transition(vtos, dtos);
548 locals_index_wide(G3_scratch);
549 __ access_local_double( G3_scratch, Ftos_d );
550 }
551
552
553 void TemplateTable::wide_aload() {
554 transition(vtos, atos);
555 locals_index_wide(G3_scratch);
556 __ access_local_ptr( G3_scratch, Otos_i );
557 __ verify_oop(Otos_i);
558 }
559
560
561 void TemplateTable::iaload() {
562 transition(itos, itos);
563 // Otos_i: index
564 // tos: array
565 __ index_check(O2, Otos_i, LogBytesPerInt, G3_scratch, O3);
566 __ ld(O3, arrayOopDesc::base_offset_in_bytes(T_INT), Otos_i);
567 }
568
569
570 void TemplateTable::laload() {
571 transition(itos, ltos);
572 // Otos_i: index
573 // O2: array
574 __ index_check(O2, Otos_i, LogBytesPerLong, G3_scratch, O3);
575 __ ld_long(O3, arrayOopDesc::base_offset_in_bytes(T_LONG), Otos_l);
576 }
577
578
579 void TemplateTable::faload() {
580 transition(itos, ftos);
581 // Otos_i: index
582 // O2: array
583 __ index_check(O2, Otos_i, LogBytesPerInt, G3_scratch, O3);
584 __ ldf(FloatRegisterImpl::S, O3, arrayOopDesc::base_offset_in_bytes(T_FLOAT), Ftos_f);
585 }
586
587
588 void TemplateTable::daload() {
589 transition(itos, dtos);
590 // Otos_i: index
591 // O2: array
592 __ index_check(O2, Otos_i, LogBytesPerLong, G3_scratch, O3);
593 __ ldf(FloatRegisterImpl::D, O3, arrayOopDesc::base_offset_in_bytes(T_DOUBLE), Ftos_d);
594 }
595
596
597 void TemplateTable::aaload() {
598 transition(itos, atos);
599 // Otos_i: index
600 // tos: array
601 __ index_check(O2, Otos_i, UseCompressedOops ? 2 : LogBytesPerWord, G3_scratch, O3);
602 __ load_heap_oop(O3, arrayOopDesc::base_offset_in_bytes(T_OBJECT), Otos_i);
603 __ verify_oop(Otos_i);
604 }
605
606
607 void TemplateTable::baload() {
608 transition(itos, itos);
609 // Otos_i: index
610 // tos: array
611 __ index_check(O2, Otos_i, 0, G3_scratch, O3);
612 __ ldsb(O3, arrayOopDesc::base_offset_in_bytes(T_BYTE), Otos_i);
613 }
614
615
616 void TemplateTable::caload() {
617 transition(itos, itos);
618 // Otos_i: index
619 // tos: array
620 __ index_check(O2, Otos_i, LogBytesPerShort, G3_scratch, O3);
621 __ lduh(O3, arrayOopDesc::base_offset_in_bytes(T_CHAR), Otos_i);
622 }
623
624 void TemplateTable::fast_icaload() {
625 transition(vtos, itos);
626 // Otos_i: index
627 // tos: array
628 locals_index(G3_scratch);
629 __ access_local_int( G3_scratch, Otos_i );
630 __ index_check(O2, Otos_i, LogBytesPerShort, G3_scratch, O3);
631 __ lduh(O3, arrayOopDesc::base_offset_in_bytes(T_CHAR), Otos_i);
632 }
633
634
635 void TemplateTable::saload() {
636 transition(itos, itos);
637 // Otos_i: index
638 // tos: array
639 __ index_check(O2, Otos_i, LogBytesPerShort, G3_scratch, O3);
640 __ ldsh(O3, arrayOopDesc::base_offset_in_bytes(T_SHORT), Otos_i);
641 }
642
643
644 void TemplateTable::iload(int n) {
645 transition(vtos, itos);
646 __ ld( Llocals, Interpreter::local_offset_in_bytes(n), Otos_i );
647 }
648
649
650 void TemplateTable::lload(int n) {
651 transition(vtos, ltos);
652 assert(n+1 < Argument::n_register_parameters, "would need more code");
653 __ load_unaligned_long(Llocals, Interpreter::local_offset_in_bytes(n+1), Otos_l);
654 }
655
656
657 void TemplateTable::fload(int n) {
658 transition(vtos, ftos);
659 assert(n < Argument::n_register_parameters, "would need more code");
660 __ ldf( FloatRegisterImpl::S, Llocals, Interpreter::local_offset_in_bytes(n), Ftos_f );
661 }
662
663
664 void TemplateTable::dload(int n) {
665 transition(vtos, dtos);
666 FloatRegister dst = Ftos_d;
667 __ load_unaligned_double(Llocals, Interpreter::local_offset_in_bytes(n+1), dst);
668 }
669
670
671 void TemplateTable::aload(int n) {
672 transition(vtos, atos);
673 __ ld_ptr( Llocals, Interpreter::local_offset_in_bytes(n), Otos_i );
674 }
675
676
677 void TemplateTable::aload_0() {
678 transition(vtos, atos);
679
680 // According to bytecode histograms, the pairs:
681 //
682 // _aload_0, _fast_igetfield (itos)
683 // _aload_0, _fast_agetfield (atos)
684 // _aload_0, _fast_fgetfield (ftos)
685 //
686 // occur frequently. If RewriteFrequentPairs is set, the (slow) _aload_0
687 // bytecode checks the next bytecode and then rewrites the current
688 // bytecode into a pair bytecode; otherwise it rewrites the current
689 // bytecode into _fast_aload_0 that doesn't do the pair check anymore.
690 //
691 if (RewriteFrequentPairs) {
692 Label rewrite, done;
693
694 // get next byte
695 __ ldub(at_bcp(Bytecodes::length_for(Bytecodes::_aload_0)), G3_scratch);
696
697 // do actual aload_0
698 aload(0);
699
700 // if _getfield then wait with rewrite
701 __ cmp_and_br_short(G3_scratch, (int)Bytecodes::_getfield, Assembler::equal, Assembler::pn, done);
702
703 // if _igetfield then rewrite to _fast_iaccess_0
704 assert(Bytecodes::java_code(Bytecodes::_fast_iaccess_0) == Bytecodes::_aload_0, "adjust fast bytecode def");
705 __ cmp(G3_scratch, (int)Bytecodes::_fast_igetfield);
706 __ br(Assembler::equal, false, Assembler::pn, rewrite);
707 __ delayed()->set(Bytecodes::_fast_iaccess_0, G4_scratch);
708
709 // if _agetfield then rewrite to _fast_aaccess_0
710 assert(Bytecodes::java_code(Bytecodes::_fast_aaccess_0) == Bytecodes::_aload_0, "adjust fast bytecode def");
711 __ cmp(G3_scratch, (int)Bytecodes::_fast_agetfield);
712 __ br(Assembler::equal, false, Assembler::pn, rewrite);
713 __ delayed()->set(Bytecodes::_fast_aaccess_0, G4_scratch);
714
715 // if _fgetfield then rewrite to _fast_faccess_0
716 assert(Bytecodes::java_code(Bytecodes::_fast_faccess_0) == Bytecodes::_aload_0, "adjust fast bytecode def");
717 __ cmp(G3_scratch, (int)Bytecodes::_fast_fgetfield);
718 __ br(Assembler::equal, false, Assembler::pn, rewrite);
719 __ delayed()->set(Bytecodes::_fast_faccess_0, G4_scratch);
720
721 // else rewrite to _fast_aload0
722 assert(Bytecodes::java_code(Bytecodes::_fast_aload_0) == Bytecodes::_aload_0, "adjust fast bytecode def");
723 __ set(Bytecodes::_fast_aload_0, G4_scratch);
724
725 // rewrite
726 // G4_scratch: fast bytecode
727 __ bind(rewrite);
728 patch_bytecode(Bytecodes::_aload_0, G4_scratch, G3_scratch, false);
729 __ bind(done);
730 } else {
731 aload(0);
732 }
733 }
734
735
736 void TemplateTable::istore() {
737 transition(itos, vtos);
738 locals_index(G3_scratch);
739 __ store_local_int( G3_scratch, Otos_i );
740 }
741
742
743 void TemplateTable::lstore() {
744 transition(ltos, vtos);
745 locals_index(G3_scratch);
746 __ store_local_long( G3_scratch, Otos_l );
747 }
748
749
750 void TemplateTable::fstore() {
751 transition(ftos, vtos);
752 locals_index(G3_scratch);
753 __ store_local_float( G3_scratch, Ftos_f );
754 }
755
756
757 void TemplateTable::dstore() {
758 transition(dtos, vtos);
759 locals_index(G3_scratch);
760 __ store_local_double( G3_scratch, Ftos_d );
761 }
762
763
764 void TemplateTable::astore() {
765 transition(vtos, vtos);
766 __ load_ptr(0, Otos_i);
767 __ inc(Lesp, Interpreter::stackElementSize);
768 __ verify_oop_or_return_address(Otos_i, G3_scratch);
769 locals_index(G3_scratch);
770 __ store_local_ptr(G3_scratch, Otos_i);
771 }
772
773
774 void TemplateTable::wide_istore() {
775 transition(vtos, vtos);
776 __ pop_i();
777 locals_index_wide(G3_scratch);
778 __ store_local_int( G3_scratch, Otos_i );
779 }
780
781
782 void TemplateTable::wide_lstore() {
783 transition(vtos, vtos);
784 __ pop_l();
785 locals_index_wide(G3_scratch);
786 __ store_local_long( G3_scratch, Otos_l );
787 }
788
789
790 void TemplateTable::wide_fstore() {
791 transition(vtos, vtos);
792 __ pop_f();
793 locals_index_wide(G3_scratch);
794 __ store_local_float( G3_scratch, Ftos_f );
795 }
796
797
798 void TemplateTable::wide_dstore() {
799 transition(vtos, vtos);
800 __ pop_d();
801 locals_index_wide(G3_scratch);
802 __ store_local_double( G3_scratch, Ftos_d );
803 }
804
805
806 void TemplateTable::wide_astore() {
807 transition(vtos, vtos);
808 __ load_ptr(0, Otos_i);
809 __ inc(Lesp, Interpreter::stackElementSize);
810 __ verify_oop_or_return_address(Otos_i, G3_scratch);
811 locals_index_wide(G3_scratch);
812 __ store_local_ptr(G3_scratch, Otos_i);
813 }
814
815
816 void TemplateTable::iastore() {
817 transition(itos, vtos);
818 __ pop_i(O2); // index
819 // Otos_i: val
820 // O3: array
821 __ index_check(O3, O2, LogBytesPerInt, G3_scratch, O2);
822 __ st(Otos_i, O2, arrayOopDesc::base_offset_in_bytes(T_INT));
823 }
824
825
826 void TemplateTable::lastore() {
827 transition(ltos, vtos);
828 __ pop_i(O2); // index
829 // Otos_l: val
830 // O3: array
831 __ index_check(O3, O2, LogBytesPerLong, G3_scratch, O2);
832 __ st_long(Otos_l, O2, arrayOopDesc::base_offset_in_bytes(T_LONG));
833 }
834
835
836 void TemplateTable::fastore() {
837 transition(ftos, vtos);
838 __ pop_i(O2); // index
839 // Ftos_f: val
840 // O3: array
841 __ index_check(O3, O2, LogBytesPerInt, G3_scratch, O2);
842 __ stf(FloatRegisterImpl::S, Ftos_f, O2, arrayOopDesc::base_offset_in_bytes(T_FLOAT));
843 }
844
845
846 void TemplateTable::dastore() {
847 transition(dtos, vtos);
848 __ pop_i(O2); // index
849 // Fos_d: val
850 // O3: array
851 __ index_check(O3, O2, LogBytesPerLong, G3_scratch, O2);
852 __ stf(FloatRegisterImpl::D, Ftos_d, O2, arrayOopDesc::base_offset_in_bytes(T_DOUBLE));
853 }
854
855
856 void TemplateTable::aastore() {
857 Label store_ok, is_null, done;
858 transition(vtos, vtos);
859 __ ld_ptr(Lesp, Interpreter::expr_offset_in_bytes(0), Otos_i);
860 __ ld(Lesp, Interpreter::expr_offset_in_bytes(1), O2); // get index
861 __ ld_ptr(Lesp, Interpreter::expr_offset_in_bytes(2), O3); // get array
862 // Otos_i: val
863 // O2: index
864 // O3: array
865 __ verify_oop(Otos_i);
866 __ index_check_without_pop(O3, O2, UseCompressedOops ? 2 : LogBytesPerWord, G3_scratch, O1);
867
868 // do array store check - check for NULL value first
869 __ br_null_short( Otos_i, Assembler::pn, is_null );
870
871 __ load_klass(O3, O4); // get array klass
872 __ load_klass(Otos_i, O5); // get value klass
873
874 // do fast instanceof cache test
875
876 __ ld_ptr(O4, in_bytes(ObjArrayKlass::element_klass_offset()), O4);
877
878 assert(Otos_i == O0, "just checking");
879
880 // Otos_i: value
881 // O1: addr - offset
882 // O2: index
883 // O3: array
884 // O4: array element klass
885 // O5: value klass
886
887 // Address element(O1, 0, arrayOopDesc::base_offset_in_bytes(T_OBJECT));
888
889 // Generate a fast subtype check. Branch to store_ok if no
890 // failure. Throw if failure.
891 __ gen_subtype_check( O5, O4, G3_scratch, G4_scratch, G1_scratch, store_ok );
892
893 // Not a subtype; so must throw exception
894 __ throw_if_not_x( Assembler::never, Interpreter::_throw_ArrayStoreException_entry, G3_scratch );
895
896 // Store is OK.
897 __ bind(store_ok);
898 do_oop_store(_masm, O1, noreg, arrayOopDesc::base_offset_in_bytes(T_OBJECT), Otos_i, G3_scratch, _bs->kind(), true);
899
900 __ ba(done);
901 __ delayed()->inc(Lesp, 3* Interpreter::stackElementSize); // adj sp (pops array, index and value)
902
903 __ bind(is_null);
904 do_oop_store(_masm, O1, noreg, arrayOopDesc::base_offset_in_bytes(T_OBJECT), G0, G4_scratch, _bs->kind(), true);
905
906 __ profile_null_seen(G3_scratch);
907 __ inc(Lesp, 3* Interpreter::stackElementSize); // adj sp (pops array, index and value)
908 __ bind(done);
909 }
910
911
912 void TemplateTable::bastore() {
913 transition(itos, vtos);
914 __ pop_i(O2); // index
915 // Otos_i: val
916 // O3: array
917 __ index_check(O3, O2, 0, G3_scratch, O2);
918 __ stb(Otos_i, O2, arrayOopDesc::base_offset_in_bytes(T_BYTE));
919 }
920
921
922 void TemplateTable::castore() {
923 transition(itos, vtos);
924 __ pop_i(O2); // index
925 // Otos_i: val
926 // O3: array
927 __ index_check(O3, O2, LogBytesPerShort, G3_scratch, O2);
928 __ sth(Otos_i, O2, arrayOopDesc::base_offset_in_bytes(T_CHAR));
929 }
930
931
932 void TemplateTable::sastore() {
933 // %%%%% Factor across platform
934 castore();
935 }
936
937
938 void TemplateTable::istore(int n) {
939 transition(itos, vtos);
940 __ st(Otos_i, Llocals, Interpreter::local_offset_in_bytes(n));
941 }
942
943
944 void TemplateTable::lstore(int n) {
945 transition(ltos, vtos);
946 assert(n+1 < Argument::n_register_parameters, "only handle register cases");
947 __ store_unaligned_long(Otos_l, Llocals, Interpreter::local_offset_in_bytes(n+1));
948
949 }
950
951
952 void TemplateTable::fstore(int n) {
953 transition(ftos, vtos);
954 assert(n < Argument::n_register_parameters, "only handle register cases");
955 __ stf(FloatRegisterImpl::S, Ftos_f, Llocals, Interpreter::local_offset_in_bytes(n));
956 }
957
958
959 void TemplateTable::dstore(int n) {
960 transition(dtos, vtos);
961 FloatRegister src = Ftos_d;
962 __ store_unaligned_double(src, Llocals, Interpreter::local_offset_in_bytes(n+1));
963 }
964
965
966 void TemplateTable::astore(int n) {
967 transition(vtos, vtos);
968 __ load_ptr(0, Otos_i);
969 __ inc(Lesp, Interpreter::stackElementSize);
970 __ verify_oop_or_return_address(Otos_i, G3_scratch);
971 __ store_local_ptr(n, Otos_i);
972 }
973
974
975 void TemplateTable::pop() {
976 transition(vtos, vtos);
977 __ inc(Lesp, Interpreter::stackElementSize);
978 }
979
980
981 void TemplateTable::pop2() {
982 transition(vtos, vtos);
983 __ inc(Lesp, 2 * Interpreter::stackElementSize);
984 }
985
986
987 void TemplateTable::dup() {
988 transition(vtos, vtos);
989 // stack: ..., a
990 // load a and tag
991 __ load_ptr(0, Otos_i);
992 __ push_ptr(Otos_i);
993 // stack: ..., a, a
994 }
995
996
997 void TemplateTable::dup_x1() {
998 transition(vtos, vtos);
999 // stack: ..., a, b
1000 __ load_ptr( 1, G3_scratch); // get a
1001 __ load_ptr( 0, Otos_l1); // get b
1002 __ store_ptr(1, Otos_l1); // put b
1003 __ store_ptr(0, G3_scratch); // put a - like swap
1004 __ push_ptr(Otos_l1); // push b
1005 // stack: ..., b, a, b
1006 }
1007
1008
1009 void TemplateTable::dup_x2() {
1010 transition(vtos, vtos);
1011 // stack: ..., a, b, c
1012 // get c and push on stack, reuse registers
1013 __ load_ptr( 0, G3_scratch); // get c
1014 __ push_ptr(G3_scratch); // push c with tag
1015 // stack: ..., a, b, c, c (c in reg) (Lesp - 4)
1016 // (stack offsets n+1 now)
1017 __ load_ptr( 3, Otos_l1); // get a
1018 __ store_ptr(3, G3_scratch); // put c at 3
1019 // stack: ..., c, b, c, c (a in reg)
1020 __ load_ptr( 2, G3_scratch); // get b
1021 __ store_ptr(2, Otos_l1); // put a at 2
1022 // stack: ..., c, a, c, c (b in reg)
1023 __ store_ptr(1, G3_scratch); // put b at 1
1024 // stack: ..., c, a, b, c
1025 }
1026
1027
1028 void TemplateTable::dup2() {
1029 transition(vtos, vtos);
1030 __ load_ptr(1, G3_scratch); // get a
1031 __ load_ptr(0, Otos_l1); // get b
1032 __ push_ptr(G3_scratch); // push a
1033 __ push_ptr(Otos_l1); // push b
1034 // stack: ..., a, b, a, b
1035 }
1036
1037
1038 void TemplateTable::dup2_x1() {
1039 transition(vtos, vtos);
1040 // stack: ..., a, b, c
1041 __ load_ptr( 1, Lscratch); // get b
1042 __ load_ptr( 2, Otos_l1); // get a
1043 __ store_ptr(2, Lscratch); // put b at a
1044 // stack: ..., b, b, c
1045 __ load_ptr( 0, G3_scratch); // get c
1046 __ store_ptr(1, G3_scratch); // put c at b
1047 // stack: ..., b, c, c
1048 __ store_ptr(0, Otos_l1); // put a at c
1049 // stack: ..., b, c, a
1050 __ push_ptr(Lscratch); // push b
1051 __ push_ptr(G3_scratch); // push c
1052 // stack: ..., b, c, a, b, c
1053 }
1054
1055
1056 // The spec says that these types can be a mixture of category 1 (1 word)
1057 // types and/or category 2 types (long and doubles)
1058 void TemplateTable::dup2_x2() {
1059 transition(vtos, vtos);
1060 // stack: ..., a, b, c, d
1061 __ load_ptr( 1, Lscratch); // get c
1062 __ load_ptr( 3, Otos_l1); // get a
1063 __ store_ptr(3, Lscratch); // put c at 3
1064 __ store_ptr(1, Otos_l1); // put a at 1
1065 // stack: ..., c, b, a, d
1066 __ load_ptr( 2, G3_scratch); // get b
1067 __ load_ptr( 0, Otos_l1); // get d
1068 __ store_ptr(0, G3_scratch); // put b at 0
1069 __ store_ptr(2, Otos_l1); // put d at 2
1070 // stack: ..., c, d, a, b
1071 __ push_ptr(Lscratch); // push c
1072 __ push_ptr(Otos_l1); // push d
1073 // stack: ..., c, d, a, b, c, d
1074 }
1075
1076
1077 void TemplateTable::swap() {
1078 transition(vtos, vtos);
1079 // stack: ..., a, b
1080 __ load_ptr( 1, G3_scratch); // get a
1081 __ load_ptr( 0, Otos_l1); // get b
1082 __ store_ptr(0, G3_scratch); // put b
1083 __ store_ptr(1, Otos_l1); // put a
1084 // stack: ..., b, a
1085 }
1086
1087
1088 void TemplateTable::iop2(Operation op) {
1089 transition(itos, itos);
1090 __ pop_i(O1);
1091 switch (op) {
1092 case add: __ add(O1, Otos_i, Otos_i); break;
1093 case sub: __ sub(O1, Otos_i, Otos_i); break;
1094 // %%%%% Mul may not exist: better to call .mul?
1095 case mul: __ smul(O1, Otos_i, Otos_i); break;
1096 case _and: __ and3(O1, Otos_i, Otos_i); break;
1097 case _or: __ or3(O1, Otos_i, Otos_i); break;
1098 case _xor: __ xor3(O1, Otos_i, Otos_i); break;
1099 case shl: __ sll(O1, Otos_i, Otos_i); break;
1100 case shr: __ sra(O1, Otos_i, Otos_i); break;
1101 case ushr: __ srl(O1, Otos_i, Otos_i); break;
1102 default: ShouldNotReachHere();
1103 }
1104 }
1105
1106
1107 void TemplateTable::lop2(Operation op) {
1108 transition(ltos, ltos);
1109 __ pop_l(O2);
1110 switch (op) {
1111 #ifdef _LP64
1112 case add: __ add(O2, Otos_l, Otos_l); break;
1113 case sub: __ sub(O2, Otos_l, Otos_l); break;
1114 case _and: __ and3(O2, Otos_l, Otos_l); break;
1115 case _or: __ or3(O2, Otos_l, Otos_l); break;
1116 case _xor: __ xor3(O2, Otos_l, Otos_l); break;
1117 #else
1118 case add: __ addcc(O3, Otos_l2, Otos_l2); __ addc(O2, Otos_l1, Otos_l1); break;
1119 case sub: __ subcc(O3, Otos_l2, Otos_l2); __ subc(O2, Otos_l1, Otos_l1); break;
1120 case _and: __ and3(O3, Otos_l2, Otos_l2); __ and3(O2, Otos_l1, Otos_l1); break;
1121 case _or: __ or3(O3, Otos_l2, Otos_l2); __ or3(O2, Otos_l1, Otos_l1); break;
1122 case _xor: __ xor3(O3, Otos_l2, Otos_l2); __ xor3(O2, Otos_l1, Otos_l1); break;
1123 #endif
1124 default: ShouldNotReachHere();
1125 }
1126 }
1127
1128
1129 void TemplateTable::idiv() {
1130 // %%%%% Later: ForSPARC/V7 call .sdiv library routine,
1131 // %%%%% Use ldsw...sdivx on pure V9 ABI. 64 bit safe.
1132
1133 transition(itos, itos);
1134 __ pop_i(O1); // get 1st op
1135
1136 // Y contains upper 32 bits of result, set it to 0 or all ones
1137 __ wry(G0);
1138 __ mov(~0, G3_scratch);
1139
1140 __ tst(O1);
1141 Label neg;
1142 __ br(Assembler::negative, true, Assembler::pn, neg);
1143 __ delayed()->wry(G3_scratch);
1144 __ bind(neg);
1145
1146 Label ok;
1147 __ tst(Otos_i);
1148 __ throw_if_not_icc( Assembler::notZero, Interpreter::_throw_ArithmeticException_entry, G3_scratch );
1149
1150 const int min_int = 0x80000000;
1151 Label regular;
1152 __ cmp(Otos_i, -1);
1153 __ br(Assembler::notEqual, false, Assembler::pt, regular);
1154 #ifdef _LP64
1155 // Don't put set in delay slot
1156 // Set will turn into multiple instructions in 64 bit mode
1157 __ delayed()->nop();
1158 __ set(min_int, G4_scratch);
1159 #else
1160 __ delayed()->set(min_int, G4_scratch);
1161 #endif
1162 Label done;
1163 __ cmp(O1, G4_scratch);
1164 __ br(Assembler::equal, true, Assembler::pt, done);
1165 __ delayed()->mov(O1, Otos_i); // (mov only executed if branch taken)
1166
1167 __ bind(regular);
1168 __ sdiv(O1, Otos_i, Otos_i); // note: irem uses O1 after this instruction!
1169 __ bind(done);
1170 }
1171
1172
1173 void TemplateTable::irem() {
1174 transition(itos, itos);
1175 __ mov(Otos_i, O2); // save divisor
1176 idiv(); // %%%% Hack: exploits fact that idiv leaves dividend in O1
1177 __ smul(Otos_i, O2, Otos_i);
1178 __ sub(O1, Otos_i, Otos_i);
1179 }
1180
1181
1182 void TemplateTable::lmul() {
1183 transition(ltos, ltos);
1184 __ pop_l(O2);
1185 #ifdef _LP64
1186 __ mulx(Otos_l, O2, Otos_l);
1187 #else
1188 __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::lmul));
1189 #endif
1190
1191 }
1192
1193
1194 void TemplateTable::ldiv() {
1195 transition(ltos, ltos);
1196
1197 // check for zero
1198 __ pop_l(O2);
1199 #ifdef _LP64
1200 __ tst(Otos_l);
1201 __ throw_if_not_xcc( Assembler::notZero, Interpreter::_throw_ArithmeticException_entry, G3_scratch);
1202 __ sdivx(O2, Otos_l, Otos_l);
1203 #else
1204 __ orcc(Otos_l1, Otos_l2, G0);
1205 __ throw_if_not_icc( Assembler::notZero, Interpreter::_throw_ArithmeticException_entry, G3_scratch);
1206 __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::ldiv));
1207 #endif
1208 }
1209
1210
1211 void TemplateTable::lrem() {
1212 transition(ltos, ltos);
1213
1214 // check for zero
1215 __ pop_l(O2);
1216 #ifdef _LP64
1217 __ tst(Otos_l);
1218 __ throw_if_not_xcc( Assembler::notZero, Interpreter::_throw_ArithmeticException_entry, G3_scratch);
1219 __ sdivx(O2, Otos_l, Otos_l2);
1220 __ mulx (Otos_l2, Otos_l, Otos_l2);
1221 __ sub (O2, Otos_l2, Otos_l);
1222 #else
1223 __ orcc(Otos_l1, Otos_l2, G0);
1224 __ throw_if_not_icc(Assembler::notZero, Interpreter::_throw_ArithmeticException_entry, G3_scratch);
1225 __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::lrem));
1226 #endif
1227 }
1228
1229
1230 void TemplateTable::lshl() {
1231 transition(itos, ltos); // %%%% could optimize, fill delay slot or opt for ultra
1232
1233 __ pop_l(O2); // shift value in O2, O3
1234 #ifdef _LP64
1235 __ sllx(O2, Otos_i, Otos_l);
1236 #else
1237 __ lshl(O2, O3, Otos_i, Otos_l1, Otos_l2, O4);
1238 #endif
1239 }
1240
1241
1242 void TemplateTable::lshr() {
1243 transition(itos, ltos); // %%%% see lshl comment
1244
1245 __ pop_l(O2); // shift value in O2, O3
1246 #ifdef _LP64
1247 __ srax(O2, Otos_i, Otos_l);
1248 #else
1249 __ lshr(O2, O3, Otos_i, Otos_l1, Otos_l2, O4);
1250 #endif
1251 }
1252
1253
1254
1255 void TemplateTable::lushr() {
1256 transition(itos, ltos); // %%%% see lshl comment
1257
1258 __ pop_l(O2); // shift value in O2, O3
1259 #ifdef _LP64
1260 __ srlx(O2, Otos_i, Otos_l);
1261 #else
1262 __ lushr(O2, O3, Otos_i, Otos_l1, Otos_l2, O4);
1263 #endif
1264 }
1265
1266
1267 void TemplateTable::fop2(Operation op) {
1268 transition(ftos, ftos);
1269 switch (op) {
1270 case add: __ pop_f(F4); __ fadd(FloatRegisterImpl::S, F4, Ftos_f, Ftos_f); break;
1271 case sub: __ pop_f(F4); __ fsub(FloatRegisterImpl::S, F4, Ftos_f, Ftos_f); break;
1272 case mul: __ pop_f(F4); __ fmul(FloatRegisterImpl::S, F4, Ftos_f, Ftos_f); break;
1273 case div: __ pop_f(F4); __ fdiv(FloatRegisterImpl::S, F4, Ftos_f, Ftos_f); break;
1274 case rem:
1275 assert(Ftos_f == F0, "just checking");
1276 #ifdef _LP64
1277 // LP64 calling conventions use F1, F3 for passing 2 floats
1278 __ pop_f(F1);
1279 __ fmov(FloatRegisterImpl::S, Ftos_f, F3);
1280 #else
1281 __ pop_i(O0);
1282 __ stf(FloatRegisterImpl::S, Ftos_f, __ d_tmp);
1283 __ ld( __ d_tmp, O1 );
1284 #endif
1285 __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::frem));
1286 assert( Ftos_f == F0, "fix this code" );
1287 break;
1288
1289 default: ShouldNotReachHere();
1290 }
1291 }
1292
1293
1294 void TemplateTable::dop2(Operation op) {
1295 transition(dtos, dtos);
1296 switch (op) {
1297 case add: __ pop_d(F4); __ fadd(FloatRegisterImpl::D, F4, Ftos_d, Ftos_d); break;
1298 case sub: __ pop_d(F4); __ fsub(FloatRegisterImpl::D, F4, Ftos_d, Ftos_d); break;
1299 case mul: __ pop_d(F4); __ fmul(FloatRegisterImpl::D, F4, Ftos_d, Ftos_d); break;
1300 case div: __ pop_d(F4); __ fdiv(FloatRegisterImpl::D, F4, Ftos_d, Ftos_d); break;
1301 case rem:
1302 #ifdef _LP64
1303 // Pass arguments in D0, D2
1304 __ fmov(FloatRegisterImpl::D, Ftos_f, F2 );
1305 __ pop_d( F0 );
1306 #else
1307 // Pass arguments in O0O1, O2O3
1308 __ stf(FloatRegisterImpl::D, Ftos_f, __ d_tmp);
1309 __ ldd( __ d_tmp, O2 );
1310 __ pop_d(Ftos_f);
1311 __ stf(FloatRegisterImpl::D, Ftos_f, __ d_tmp);
1312 __ ldd( __ d_tmp, O0 );
1313 #endif
1314 __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::drem));
1315 assert( Ftos_d == F0, "fix this code" );
1316 break;
1317
1318 default: ShouldNotReachHere();
1319 }
1320 }
1321
1322
1323 void TemplateTable::ineg() {
1324 transition(itos, itos);
1325 __ neg(Otos_i);
1326 }
1327
1328
1329 void TemplateTable::lneg() {
1330 transition(ltos, ltos);
1331 #ifdef _LP64
1332 __ sub(G0, Otos_l, Otos_l);
1333 #else
1334 __ lneg(Otos_l1, Otos_l2);
1335 #endif
1336 }
1337
1338
1339 void TemplateTable::fneg() {
1340 transition(ftos, ftos);
1341 __ fneg(FloatRegisterImpl::S, Ftos_f, Ftos_f);
1342 }
1343
1344
1345 void TemplateTable::dneg() {
1346 transition(dtos, dtos);
1347 __ fneg(FloatRegisterImpl::D, Ftos_f, Ftos_f);
1348 }
1349
1350
1351 void TemplateTable::iinc() {
1352 transition(vtos, vtos);
1353 locals_index(G3_scratch);
1354 __ ldsb(Lbcp, 2, O2); // load constant
1355 __ access_local_int(G3_scratch, Otos_i);
1356 __ add(Otos_i, O2, Otos_i);
1357 __ st(Otos_i, G3_scratch, 0); // access_local_int puts E.A. in G3_scratch
1358 }
1359
1360
1361 void TemplateTable::wide_iinc() {
1362 transition(vtos, vtos);
1363 locals_index_wide(G3_scratch);
1364 __ get_2_byte_integer_at_bcp( 4, O2, O3, InterpreterMacroAssembler::Signed);
1365 __ access_local_int(G3_scratch, Otos_i);
1366 __ add(Otos_i, O3, Otos_i);
1367 __ st(Otos_i, G3_scratch, 0); // access_local_int puts E.A. in G3_scratch
1368 }
1369
1370
1371 void TemplateTable::convert() {
1372 // %%%%% Factor this first part accross platforms
1373 #ifdef ASSERT
1374 TosState tos_in = ilgl;
1375 TosState tos_out = ilgl;
1376 switch (bytecode()) {
1377 case Bytecodes::_i2l: // fall through
1378 case Bytecodes::_i2f: // fall through
1379 case Bytecodes::_i2d: // fall through
1380 case Bytecodes::_i2b: // fall through
1381 case Bytecodes::_i2c: // fall through
1382 case Bytecodes::_i2s: tos_in = itos; break;
1383 case Bytecodes::_l2i: // fall through
1384 case Bytecodes::_l2f: // fall through
1385 case Bytecodes::_l2d: tos_in = ltos; break;
1386 case Bytecodes::_f2i: // fall through
1387 case Bytecodes::_f2l: // fall through
1388 case Bytecodes::_f2d: tos_in = ftos; break;
1389 case Bytecodes::_d2i: // fall through
1390 case Bytecodes::_d2l: // fall through
1391 case Bytecodes::_d2f: tos_in = dtos; break;
1392 default : ShouldNotReachHere();
1393 }
1394 switch (bytecode()) {
1395 case Bytecodes::_l2i: // fall through
1396 case Bytecodes::_f2i: // fall through
1397 case Bytecodes::_d2i: // fall through
1398 case Bytecodes::_i2b: // fall through
1399 case Bytecodes::_i2c: // fall through
1400 case Bytecodes::_i2s: tos_out = itos; break;
1401 case Bytecodes::_i2l: // fall through
1402 case Bytecodes::_f2l: // fall through
1403 case Bytecodes::_d2l: tos_out = ltos; break;
1404 case Bytecodes::_i2f: // fall through
1405 case Bytecodes::_l2f: // fall through
1406 case Bytecodes::_d2f: tos_out = ftos; break;
1407 case Bytecodes::_i2d: // fall through
1408 case Bytecodes::_l2d: // fall through
1409 case Bytecodes::_f2d: tos_out = dtos; break;
1410 default : ShouldNotReachHere();
1411 }
1412 transition(tos_in, tos_out);
1413 #endif
1414
1415
1416 // Conversion
1417 Label done;
1418 switch (bytecode()) {
1419 case Bytecodes::_i2l:
1420 #ifdef _LP64
1421 // Sign extend the 32 bits
1422 __ sra ( Otos_i, 0, Otos_l );
1423 #else
1424 __ addcc(Otos_i, 0, Otos_l2);
1425 __ br(Assembler::greaterEqual, true, Assembler::pt, done);
1426 __ delayed()->clr(Otos_l1);
1427 __ set(~0, Otos_l1);
1428 #endif
1429 break;
1430
1431 case Bytecodes::_i2f:
1432 __ st(Otos_i, __ d_tmp );
1433 __ ldf(FloatRegisterImpl::S, __ d_tmp, F0);
1434 __ fitof(FloatRegisterImpl::S, F0, Ftos_f);
1435 break;
1436
1437 case Bytecodes::_i2d:
1438 __ st(Otos_i, __ d_tmp);
1439 __ ldf(FloatRegisterImpl::S, __ d_tmp, F0);
1440 __ fitof(FloatRegisterImpl::D, F0, Ftos_f);
1441 break;
1442
1443 case Bytecodes::_i2b:
1444 __ sll(Otos_i, 24, Otos_i);
1445 __ sra(Otos_i, 24, Otos_i);
1446 break;
1447
1448 case Bytecodes::_i2c:
1449 __ sll(Otos_i, 16, Otos_i);
1450 __ srl(Otos_i, 16, Otos_i);
1451 break;
1452
1453 case Bytecodes::_i2s:
1454 __ sll(Otos_i, 16, Otos_i);
1455 __ sra(Otos_i, 16, Otos_i);
1456 break;
1457
1458 case Bytecodes::_l2i:
1459 #ifndef _LP64
1460 __ mov(Otos_l2, Otos_i);
1461 #else
1462 // Sign-extend into the high 32 bits
1463 __ sra(Otos_l, 0, Otos_i);
1464 #endif
1465 break;
1466
1467 case Bytecodes::_l2f:
1468 case Bytecodes::_l2d:
1469 __ st_long(Otos_l, __ d_tmp);
1470 __ ldf(FloatRegisterImpl::D, __ d_tmp, Ftos_d);
1471
1472 if (bytecode() == Bytecodes::_l2f) {
1473 __ fxtof(FloatRegisterImpl::S, Ftos_d, Ftos_f);
1474 } else {
1475 __ fxtof(FloatRegisterImpl::D, Ftos_d, Ftos_d);
1476 }
1477 break;
1478
1479 case Bytecodes::_f2i: {
1480 Label isNaN;
1481 // result must be 0 if value is NaN; test by comparing value to itself
1482 __ fcmp(FloatRegisterImpl::S, Assembler::fcc0, Ftos_f, Ftos_f);
1483 __ fb(Assembler::f_unordered, true, Assembler::pn, isNaN);
1484 __ delayed()->clr(Otos_i); // NaN
1485 __ ftoi(FloatRegisterImpl::S, Ftos_f, F30);
1486 __ stf(FloatRegisterImpl::S, F30, __ d_tmp);
1487 __ ld(__ d_tmp, Otos_i);
1488 __ bind(isNaN);
1489 }
1490 break;
1491
1492 case Bytecodes::_f2l:
1493 // must uncache tos
1494 __ push_f();
1495 #ifdef _LP64
1496 __ pop_f(F1);
1497 #else
1498 __ pop_i(O0);
1499 #endif
1500 __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::f2l));
1501 break;
1502
1503 case Bytecodes::_f2d:
1504 __ ftof( FloatRegisterImpl::S, FloatRegisterImpl::D, Ftos_f, Ftos_f);
1505 break;
1506
1507 case Bytecodes::_d2i:
1508 case Bytecodes::_d2l:
1509 // must uncache tos
1510 __ push_d();
1511 #ifdef _LP64
1512 // LP64 calling conventions pass first double arg in D0
1513 __ pop_d( Ftos_d );
1514 #else
1515 __ pop_i( O0 );
1516 __ pop_i( O1 );
1517 #endif
1518 __ call_VM_leaf(Lscratch,
1519 bytecode() == Bytecodes::_d2i
1520 ? CAST_FROM_FN_PTR(address, SharedRuntime::d2i)
1521 : CAST_FROM_FN_PTR(address, SharedRuntime::d2l));
1522 break;
1523
1524 case Bytecodes::_d2f:
1525 __ ftof( FloatRegisterImpl::D, FloatRegisterImpl::S, Ftos_d, Ftos_f);
1526 break;
1527
1528 default: ShouldNotReachHere();
1529 }
1530 __ bind(done);
1531 }
1532
1533
1534 void TemplateTable::lcmp() {
1535 transition(ltos, itos);
1536
1537 #ifdef _LP64
1538 __ pop_l(O1); // pop off value 1, value 2 is in O0
1539 __ lcmp( O1, Otos_l, Otos_i );
1540 #else
1541 __ pop_l(O2); // cmp O2,3 to O0,1
1542 __ lcmp( O2, O3, Otos_l1, Otos_l2, Otos_i );
1543 #endif
1544 }
1545
1546
1547 void TemplateTable::float_cmp(bool is_float, int unordered_result) {
1548
1549 if (is_float) __ pop_f(F2);
1550 else __ pop_d(F2);
1551
1552 assert(Ftos_f == F0 && Ftos_d == F0, "alias checking:");
1553
1554 __ float_cmp( is_float, unordered_result, F2, F0, Otos_i );
1555 }
1556
1557 void TemplateTable::branch(bool is_jsr, bool is_wide) {
1558 // Note: on SPARC, we use InterpreterMacroAssembler::if_cmp also.
1559 __ verify_thread();
1560
1561 const Register O2_bumped_count = O2;
1562 __ profile_taken_branch(G3_scratch, O2_bumped_count);
1563
1564 // get (wide) offset to O1_disp
1565 const Register O1_disp = O1;
1566 if (is_wide) __ get_4_byte_integer_at_bcp( 1, G4_scratch, O1_disp, InterpreterMacroAssembler::set_CC);
1567 else __ get_2_byte_integer_at_bcp( 1, G4_scratch, O1_disp, InterpreterMacroAssembler::Signed, InterpreterMacroAssembler::set_CC);
1568
1569 // Handle all the JSR stuff here, then exit.
1570 // It's much shorter and cleaner than intermingling with the
1571 // non-JSR normal-branch stuff occurring below.
1572 if( is_jsr ) {
1573 // compute return address as bci in Otos_i
1574 __ ld_ptr(Lmethod, Method::const_offset(), G3_scratch);
1575 __ sub(Lbcp, G3_scratch, G3_scratch);
1576 __ sub(G3_scratch, in_bytes(ConstMethod::codes_offset()) - (is_wide ? 5 : 3), Otos_i);
1577
1578 // Bump Lbcp to target of JSR
1579 __ add(Lbcp, O1_disp, Lbcp);
1580 // Push returnAddress for "ret" on stack
1581 __ push_ptr(Otos_i);
1582 // And away we go!
1583 __ dispatch_next(vtos);
1584 return;
1585 }
1586
1587 // Normal (non-jsr) branch handling
1588
1589 // Save the current Lbcp
1590 const Register l_cur_bcp = Lscratch;
1591 __ mov( Lbcp, l_cur_bcp );
1592
1593 bool increment_invocation_counter_for_backward_branches = UseCompiler && UseLoopCounter;
1594 if ( increment_invocation_counter_for_backward_branches ) {
1595 Label Lforward;
1596 // check branch direction
1597 __ br( Assembler::positive, false, Assembler::pn, Lforward );
1598 // Bump bytecode pointer by displacement (take the branch)
1599 __ delayed()->add( O1_disp, Lbcp, Lbcp ); // add to bc addr
1600
1601 const Register G3_method_counters = G3_scratch;
1602 __ get_method_counters(Lmethod, G3_method_counters, Lforward);
1603
1604 if (TieredCompilation) {
1605 Label Lno_mdo, Loverflow;
1606 int increment = InvocationCounter::count_increment;
1607 if (ProfileInterpreter) {
1608 // If no method data exists, go to profile_continue.
1609 __ ld_ptr(Lmethod, Method::method_data_offset(), G4_scratch);
1610 __ br_null_short(G4_scratch, Assembler::pn, Lno_mdo);
1611
1612 // Increment backedge counter in the MDO
1613 Address mdo_backedge_counter(G4_scratch, in_bytes(MethodData::backedge_counter_offset()) +
1614 in_bytes(InvocationCounter::counter_offset()));
1615 Address mask(G4_scratch, in_bytes(MethodData::backedge_mask_offset()));
1616 __ increment_mask_and_jump(mdo_backedge_counter, increment, mask, G3_scratch, O0,
1617 Assembler::notZero, &Lforward);
1618 __ ba_short(Loverflow);
1619 }
1620
1621 // If there's no MDO, increment counter in MethodCounters*
1622 __ bind(Lno_mdo);
1623 Address backedge_counter(G3_method_counters,
1624 in_bytes(MethodCounters::backedge_counter_offset()) +
1625 in_bytes(InvocationCounter::counter_offset()));
1626 Address mask(G3_method_counters, in_bytes(MethodCounters::backedge_mask_offset()));
1627 __ increment_mask_and_jump(backedge_counter, increment, mask, G4_scratch, O0,
1628 Assembler::notZero, &Lforward);
1629 __ bind(Loverflow);
1630
1631 // notify point for loop, pass branch bytecode
1632 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), l_cur_bcp);
1633
1634 // Was an OSR adapter generated?
1635 // O0 = osr nmethod
1636 __ br_null_short(O0, Assembler::pn, Lforward);
1637
1638 // Has the nmethod been invalidated already?
1639 __ ldub(O0, nmethod::state_offset(), O2);
1640 __ cmp_and_br_short(O2, nmethod::in_use, Assembler::notEqual, Assembler::pn, Lforward);
1641
1642 // migrate the interpreter frame off of the stack
1643
1644 __ mov(G2_thread, L7);
1645 // save nmethod
1646 __ mov(O0, L6);
1647 __ set_last_Java_frame(SP, noreg);
1648 __ call_VM_leaf(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin), L7);
1649 __ reset_last_Java_frame();
1650 __ mov(L7, G2_thread);
1651
1652 // move OSR nmethod to I1
1653 __ mov(L6, I1);
1654
1655 // OSR buffer to I0
1656 __ mov(O0, I0);
1657
1658 // remove the interpreter frame
1659 __ restore(I5_savedSP, 0, SP);
1660
1661 // Jump to the osr code.
1662 __ ld_ptr(O1, nmethod::osr_entry_point_offset(), O2);
1663 __ jmp(O2, G0);
1664 __ delayed()->nop();
1665
1666 } else { // not TieredCompilation
1667 // Update Backedge branch separately from invocations
1668 const Register G4_invoke_ctr = G4;
1669 __ increment_backedge_counter(G3_method_counters, G4_invoke_ctr, G1_scratch);
1670 if (ProfileInterpreter) {
1671 __ test_invocation_counter_for_mdp(G4_invoke_ctr, G3_method_counters, G1_scratch, Lforward);
1672 if (UseOnStackReplacement) {
1673
1674 __ test_backedge_count_for_osr(O2_bumped_count, G3_method_counters, l_cur_bcp, G1_scratch);
1675 }
1676 } else {
1677 if (UseOnStackReplacement) {
1678 __ test_backedge_count_for_osr(G4_invoke_ctr, G3_method_counters, l_cur_bcp, G1_scratch);
1679 }
1680 }
1681 }
1682
1683 __ bind(Lforward);
1684 } else
1685 // Bump bytecode pointer by displacement (take the branch)
1686 __ add( O1_disp, Lbcp, Lbcp );// add to bc addr
1687
1688 // continue with bytecode @ target
1689 // %%%%% Like Intel, could speed things up by moving bytecode fetch to code above,
1690 // %%%%% and changing dispatch_next to dispatch_only
1691 __ dispatch_next(vtos);
1692 }
1693
1694
1695 // Note Condition in argument is TemplateTable::Condition
1696 // arg scope is within class scope
1697
1698 void TemplateTable::if_0cmp(Condition cc) {
1699 // no pointers, integer only!
1700 transition(itos, vtos);
1701 // assume branch is more often taken than not (loops use backward branches)
1702 __ cmp( Otos_i, 0);
1703 __ if_cmp(ccNot(cc), false);
1704 }
1705
1706
1707 void TemplateTable::if_icmp(Condition cc) {
1708 transition(itos, vtos);
1709 __ pop_i(O1);
1710 __ cmp(O1, Otos_i);
1711 __ if_cmp(ccNot(cc), false);
1712 }
1713
1714
1715 void TemplateTable::if_nullcmp(Condition cc) {
1716 transition(atos, vtos);
1717 __ tst(Otos_i);
1718 __ if_cmp(ccNot(cc), true);
1719 }
1720
1721
1722 void TemplateTable::if_acmp(Condition cc) {
1723 transition(atos, vtos);
1724 __ pop_ptr(O1);
1725 __ verify_oop(O1);
1726 __ verify_oop(Otos_i);
1727 __ cmp(O1, Otos_i);
1728 __ if_cmp(ccNot(cc), true);
1729 }
1730
1731
1732
1733 void TemplateTable::ret() {
1734 transition(vtos, vtos);
1735 locals_index(G3_scratch);
1736 __ access_local_returnAddress(G3_scratch, Otos_i);
1737 // Otos_i contains the bci, compute the bcp from that
1738
1739 #ifdef _LP64
1740 #ifdef ASSERT
1741 // jsr result was labeled as an 'itos' not an 'atos' because we cannot GC
1742 // the result. The return address (really a BCI) was stored with an
1743 // 'astore' because JVM specs claim it's a pointer-sized thing. Hence in
1744 // the 64-bit build the 32-bit BCI is actually in the low bits of a 64-bit
1745 // loaded value.
1746 { Label zzz ;
1747 __ set (65536, G3_scratch) ;
1748 __ cmp (Otos_i, G3_scratch) ;
1749 __ bp( Assembler::lessEqualUnsigned, false, Assembler::xcc, Assembler::pn, zzz);
1750 __ delayed()->nop();
1751 __ stop("BCI is in the wrong register half?");
1752 __ bind (zzz) ;
1753 }
1754 #endif
1755 #endif
1756
1757 __ profile_ret(vtos, Otos_i, G4_scratch);
1758
1759 __ ld_ptr(Lmethod, Method::const_offset(), G3_scratch);
1760 __ add(G3_scratch, Otos_i, G3_scratch);
1761 __ add(G3_scratch, in_bytes(ConstMethod::codes_offset()), Lbcp);
1762 __ dispatch_next(vtos);
1763 }
1764
1765
1766 void TemplateTable::wide_ret() {
1767 transition(vtos, vtos);
1768 locals_index_wide(G3_scratch);
1769 __ access_local_returnAddress(G3_scratch, Otos_i);
1770 // Otos_i contains the bci, compute the bcp from that
1771
1772 __ profile_ret(vtos, Otos_i, G4_scratch);
1773
1774 __ ld_ptr(Lmethod, Method::const_offset(), G3_scratch);
1775 __ add(G3_scratch, Otos_i, G3_scratch);
1776 __ add(G3_scratch, in_bytes(ConstMethod::codes_offset()), Lbcp);
1777 __ dispatch_next(vtos);
1778 }
1779
1780
1781 void TemplateTable::tableswitch() {
1782 transition(itos, vtos);
1783 Label default_case, continue_execution;
1784
1785 // align bcp
1786 __ add(Lbcp, BytesPerInt, O1);
1787 __ and3(O1, -BytesPerInt, O1);
1788 // load lo, hi
1789 __ ld(O1, 1 * BytesPerInt, O2); // Low Byte
1790 __ ld(O1, 2 * BytesPerInt, O3); // High Byte
1791 #ifdef _LP64
1792 // Sign extend the 32 bits
1793 __ sra ( Otos_i, 0, Otos_i );
1794 #endif /* _LP64 */
1795
1796 // check against lo & hi
1797 __ cmp( Otos_i, O2);
1798 __ br( Assembler::less, false, Assembler::pn, default_case);
1799 __ delayed()->cmp( Otos_i, O3 );
1800 __ br( Assembler::greater, false, Assembler::pn, default_case);
1801 // lookup dispatch offset
1802 __ delayed()->sub(Otos_i, O2, O2);
1803 __ profile_switch_case(O2, O3, G3_scratch, G4_scratch);
1804 __ sll(O2, LogBytesPerInt, O2);
1805 __ add(O2, 3 * BytesPerInt, O2);
1806 __ ba(continue_execution);
1807 __ delayed()->ld(O1, O2, O2);
1808 // handle default
1809 __ bind(default_case);
1810 __ profile_switch_default(O3);
1811 __ ld(O1, 0, O2); // get default offset
1812 // continue execution
1813 __ bind(continue_execution);
1814 __ add(Lbcp, O2, Lbcp);
1815 __ dispatch_next(vtos);
1816 }
1817
1818
1819 void TemplateTable::lookupswitch() {
1820 transition(itos, itos);
1821 __ stop("lookupswitch bytecode should have been rewritten");
1822 }
1823
1824 void TemplateTable::fast_linearswitch() {
1825 transition(itos, vtos);
1826 Label loop_entry, loop, found, continue_execution;
1827 // align bcp
1828 __ add(Lbcp, BytesPerInt, O1);
1829 __ and3(O1, -BytesPerInt, O1);
1830 // set counter
1831 __ ld(O1, BytesPerInt, O2);
1832 __ sll(O2, LogBytesPerInt + 1, O2); // in word-pairs
1833 __ add(O1, 2 * BytesPerInt, O3); // set first pair addr
1834 __ ba(loop_entry);
1835 __ delayed()->add(O3, O2, O2); // counter now points past last pair
1836
1837 // table search
1838 __ bind(loop);
1839 __ cmp(O4, Otos_i);
1840 __ br(Assembler::equal, true, Assembler::pn, found);
1841 __ delayed()->ld(O3, BytesPerInt, O4); // offset -> O4
1842 __ inc(O3, 2 * BytesPerInt);
1843
1844 __ bind(loop_entry);
1845 __ cmp(O2, O3);
1846 __ brx(Assembler::greaterUnsigned, true, Assembler::pt, loop);
1847 __ delayed()->ld(O3, 0, O4);
1848
1849 // default case
1850 __ ld(O1, 0, O4); // get default offset
1851 if (ProfileInterpreter) {
1852 __ profile_switch_default(O3);
1853 __ ba_short(continue_execution);
1854 }
1855
1856 // entry found -> get offset
1857 __ bind(found);
1858 if (ProfileInterpreter) {
1859 __ sub(O3, O1, O3);
1860 __ sub(O3, 2*BytesPerInt, O3);
1861 __ srl(O3, LogBytesPerInt + 1, O3); // in word-pairs
1862 __ profile_switch_case(O3, O1, O2, G3_scratch);
1863
1864 __ bind(continue_execution);
1865 }
1866 __ add(Lbcp, O4, Lbcp);
1867 __ dispatch_next(vtos);
1868 }
1869
1870
1871 void TemplateTable::fast_binaryswitch() {
1872 transition(itos, vtos);
1873 // Implementation using the following core algorithm: (copied from Intel)
1874 //
1875 // int binary_search(int key, LookupswitchPair* array, int n) {
1876 // // Binary search according to "Methodik des Programmierens" by
1877 // // Edsger W. Dijkstra and W.H.J. Feijen, Addison Wesley Germany 1985.
1878 // int i = 0;
1879 // int j = n;
1880 // while (i+1 < j) {
1881 // // invariant P: 0 <= i < j <= n and (a[i] <= key < a[j] or Q)
1882 // // with Q: for all i: 0 <= i < n: key < a[i]
1883 // // where a stands for the array and assuming that the (inexisting)
1884 // // element a[n] is infinitely big.
1885 // int h = (i + j) >> 1;
1886 // // i < h < j
1887 // if (key < array[h].fast_match()) {
1888 // j = h;
1889 // } else {
1890 // i = h;
1891 // }
1892 // }
1893 // // R: a[i] <= key < a[i+1] or Q
1894 // // (i.e., if key is within array, i is the correct index)
1895 // return i;
1896 // }
1897
1898 // register allocation
1899 assert(Otos_i == O0, "alias checking");
1900 const Register Rkey = Otos_i; // already set (tosca)
1901 const Register Rarray = O1;
1902 const Register Ri = O2;
1903 const Register Rj = O3;
1904 const Register Rh = O4;
1905 const Register Rscratch = O5;
1906
1907 const int log_entry_size = 3;
1908 const int entry_size = 1 << log_entry_size;
1909
1910 Label found;
1911 // Find Array start
1912 __ add(Lbcp, 3 * BytesPerInt, Rarray);
1913 __ and3(Rarray, -BytesPerInt, Rarray);
1914 // initialize i & j (in delay slot)
1915 __ clr( Ri );
1916
1917 // and start
1918 Label entry;
1919 __ ba(entry);
1920 __ delayed()->ld( Rarray, -BytesPerInt, Rj);
1921 // (Rj is already in the native byte-ordering.)
1922
1923 // binary search loop
1924 { Label loop;
1925 __ bind( loop );
1926 // int h = (i + j) >> 1;
1927 __ sra( Rh, 1, Rh );
1928 // if (key < array[h].fast_match()) {
1929 // j = h;
1930 // } else {
1931 // i = h;
1932 // }
1933 __ sll( Rh, log_entry_size, Rscratch );
1934 __ ld( Rarray, Rscratch, Rscratch );
1935 // (Rscratch is already in the native byte-ordering.)
1936 __ cmp( Rkey, Rscratch );
1937 __ movcc( Assembler::less, false, Assembler::icc, Rh, Rj ); // j = h if (key < array[h].fast_match())
1938 __ movcc( Assembler::greaterEqual, false, Assembler::icc, Rh, Ri ); // i = h if (key >= array[h].fast_match())
1939
1940 // while (i+1 < j)
1941 __ bind( entry );
1942 __ add( Ri, 1, Rscratch );
1943 __ cmp(Rscratch, Rj);
1944 __ br( Assembler::less, true, Assembler::pt, loop );
1945 __ delayed()->add( Ri, Rj, Rh ); // start h = i + j >> 1;
1946 }
1947
1948 // end of binary search, result index is i (must check again!)
1949 Label default_case;
1950 Label continue_execution;
1951 if (ProfileInterpreter) {
1952 __ mov( Ri, Rh ); // Save index in i for profiling
1953 }
1954 __ sll( Ri, log_entry_size, Ri );
1955 __ ld( Rarray, Ri, Rscratch );
1956 // (Rscratch is already in the native byte-ordering.)
1957 __ cmp( Rkey, Rscratch );
1958 __ br( Assembler::notEqual, true, Assembler::pn, default_case );
1959 __ delayed()->ld( Rarray, -2 * BytesPerInt, Rj ); // load default offset -> j
1960
1961 // entry found -> j = offset
1962 __ inc( Ri, BytesPerInt );
1963 __ profile_switch_case(Rh, Rj, Rscratch, Rkey);
1964 __ ld( Rarray, Ri, Rj );
1965 // (Rj is already in the native byte-ordering.)
1966
1967 if (ProfileInterpreter) {
1968 __ ba_short(continue_execution);
1969 }
1970
1971 __ bind(default_case); // fall through (if not profiling)
1972 __ profile_switch_default(Ri);
1973
1974 __ bind(continue_execution);
1975 __ add( Lbcp, Rj, Lbcp );
1976 __ dispatch_next( vtos );
1977 }
1978
1979
1980 void TemplateTable::_return(TosState state) {
1981 transition(state, state);
1982 assert(_desc->calls_vm(), "inconsistent calls_vm information");
1983
1984 if (_desc->bytecode() == Bytecodes::_return_register_finalizer) {
1985 assert(state == vtos, "only valid state");
1986 __ mov(G0, G3_scratch);
1987 __ access_local_ptr(G3_scratch, Otos_i);
1988 __ load_klass(Otos_i, O2);
1989 __ set(JVM_ACC_HAS_FINALIZER, G3);
1990 __ ld(O2, in_bytes(Klass::access_flags_offset()), O2);
1991 __ andcc(G3, O2, G0);
1992 Label skip_register_finalizer;
1993 __ br(Assembler::zero, false, Assembler::pn, skip_register_finalizer);
1994 __ delayed()->nop();
1995
1996 // Call out to do finalizer registration
1997 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::register_finalizer), Otos_i);
1998
1999 __ bind(skip_register_finalizer);
2000 }
2001
2002 __ remove_activation(state, /* throw_monitor_exception */ true);
2003
2004 // The caller's SP was adjusted upon method entry to accomodate
2005 // the callee's non-argument locals. Undo that adjustment.
2006 __ ret(); // return to caller
2007 __ delayed()->restore(I5_savedSP, G0, SP);
2008 }
2009
2010
2011 // ----------------------------------------------------------------------------
2012 // Volatile variables demand their effects be made known to all CPU's in
2013 // order. Store buffers on most chips allow reads & writes to reorder; the
2014 // JMM's ReadAfterWrite.java test fails in -Xint mode without some kind of
2015 // memory barrier (i.e., it's not sufficient that the interpreter does not
2016 // reorder volatile references, the hardware also must not reorder them).
2017 //
2018 // According to the new Java Memory Model (JMM):
2019 // (1) All volatiles are serialized wrt to each other.
2020 // ALSO reads & writes act as aquire & release, so:
2021 // (2) A read cannot let unrelated NON-volatile memory refs that happen after
2022 // the read float up to before the read. It's OK for non-volatile memory refs
2023 // that happen before the volatile read to float down below it.
2024 // (3) Similar a volatile write cannot let unrelated NON-volatile memory refs
2025 // that happen BEFORE the write float down to after the write. It's OK for
2026 // non-volatile memory refs that happen after the volatile write to float up
2027 // before it.
2028 //
2029 // We only put in barriers around volatile refs (they are expensive), not
2030 // _between_ memory refs (that would require us to track the flavor of the
2031 // previous memory refs). Requirements (2) and (3) require some barriers
2032 // before volatile stores and after volatile loads. These nearly cover
2033 // requirement (1) but miss the volatile-store-volatile-load case. This final
2034 // case is placed after volatile-stores although it could just as well go
2035 // before volatile-loads.
2036 void TemplateTable::volatile_barrier(Assembler::Membar_mask_bits order_constraint) {
2037 // Helper function to insert a is-volatile test and memory barrier
2038 // All current sparc implementations run in TSO, needing only StoreLoad
2039 if ((order_constraint & Assembler::StoreLoad) == 0) return;
2040 __ membar( order_constraint );
2041 }
2042
2043 // ----------------------------------------------------------------------------
2044 void TemplateTable::resolve_cache_and_index(int byte_no,
2045 Register Rcache,
2046 Register index,
2047 size_t index_size) {
2048 // Depends on cpCacheOop layout!
2049 Label resolved;
2050
2051 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
2052 __ get_cache_and_index_and_bytecode_at_bcp(Rcache, index, Lbyte_code, byte_no, 1, index_size);
2053 __ cmp(Lbyte_code, (int) bytecode()); // have we resolved this bytecode?
2054 __ br(Assembler::equal, false, Assembler::pt, resolved);
2055 __ delayed()->set((int)bytecode(), O1);
2056
2057 address entry;
2058 switch (bytecode()) {
2059 case Bytecodes::_getstatic : // fall through
2060 case Bytecodes::_putstatic : // fall through
2061 case Bytecodes::_getfield : // fall through
2062 case Bytecodes::_putfield : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_get_put); break;
2063 case Bytecodes::_invokevirtual : // fall through
2064 case Bytecodes::_invokespecial : // fall through
2065 case Bytecodes::_invokestatic : // fall through
2066 case Bytecodes::_invokeinterface: entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invoke); break;
2067 case Bytecodes::_invokehandle : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokehandle); break;
2068 case Bytecodes::_invokedynamic : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokedynamic); break;
2069 default:
2070 fatal(err_msg("unexpected bytecode: %s", Bytecodes::name(bytecode())));
2071 break;
2072 }
2073 // first time invocation - must resolve first
2074 __ call_VM(noreg, entry, O1);
2075 // Update registers with resolved info
2076 __ get_cache_and_index_at_bcp(Rcache, index, 1, index_size);
2077 __ bind(resolved);
2078 }
2079
2080 void TemplateTable::load_invoke_cp_cache_entry(int byte_no,
2081 Register method,
2082 Register itable_index,
2083 Register flags,
2084 bool is_invokevirtual,
2085 bool is_invokevfinal,
2086 bool is_invokedynamic) {
2087 // Uses both G3_scratch and G4_scratch
2088 Register cache = G3_scratch;
2089 Register index = G4_scratch;
2090 assert_different_registers(cache, method, itable_index);
2091
2092 // determine constant pool cache field offsets
2093 assert(is_invokevirtual == (byte_no == f2_byte), "is_invokevirtual flag redundant");
2094 const int method_offset = in_bytes(
2095 ConstantPoolCache::base_offset() +
2096 ((byte_no == f2_byte)
2097 ? ConstantPoolCacheEntry::f2_offset()
2098 : ConstantPoolCacheEntry::f1_offset()
2099 )
2100 );
2101 const int flags_offset = in_bytes(ConstantPoolCache::base_offset() +
2102 ConstantPoolCacheEntry::flags_offset());
2103 // access constant pool cache fields
2104 const int index_offset = in_bytes(ConstantPoolCache::base_offset() +
2105 ConstantPoolCacheEntry::f2_offset());
2106
2107 if (is_invokevfinal) {
2108 __ get_cache_and_index_at_bcp(cache, index, 1);
2109 __ ld_ptr(Address(cache, method_offset), method);
2110 } else {
2111 size_t index_size = (is_invokedynamic ? sizeof(u4) : sizeof(u2));
2112 resolve_cache_and_index(byte_no, cache, index, index_size);
2113 __ ld_ptr(Address(cache, method_offset), method);
2114 }
2115
2116 if (itable_index != noreg) {
2117 // pick up itable or appendix index from f2 also:
2118 __ ld_ptr(Address(cache, index_offset), itable_index);
2119 }
2120 __ ld_ptr(Address(cache, flags_offset), flags);
2121 }
2122
2123 // The Rcache register must be set before call
2124 void TemplateTable::load_field_cp_cache_entry(Register Robj,
2125 Register Rcache,
2126 Register index,
2127 Register Roffset,
2128 Register Rflags,
2129 bool is_static) {
2130 assert_different_registers(Rcache, Rflags, Roffset);
2131
2132 ByteSize cp_base_offset = ConstantPoolCache::base_offset();
2133
2134 __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::flags_offset(), Rflags);
2135 __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::f2_offset(), Roffset);
2136 if (is_static) {
2137 __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::f1_offset(), Robj);
2138 const int mirror_offset = in_bytes(Klass::java_mirror_offset());
2139 __ ld_ptr( Robj, mirror_offset, Robj);
2140 }
2141 }
2142
2143 // The registers Rcache and index expected to be set before call.
2144 // Correct values of the Rcache and index registers are preserved.
2145 void TemplateTable::jvmti_post_field_access(Register Rcache,
2146 Register index,
2147 bool is_static,
2148 bool has_tos) {
2149 ByteSize cp_base_offset = ConstantPoolCache::base_offset();
2150
2151 if (JvmtiExport::can_post_field_access()) {
2152 // Check to see if a field access watch has been set before we take
2153 // the time to call into the VM.
2154 Label Label1;
2155 assert_different_registers(Rcache, index, G1_scratch);
2156 AddressLiteral get_field_access_count_addr(JvmtiExport::get_field_access_count_addr());
2157 __ load_contents(get_field_access_count_addr, G1_scratch);
2158 __ cmp_and_br_short(G1_scratch, 0, Assembler::equal, Assembler::pt, Label1);
2159
2160 __ add(Rcache, in_bytes(cp_base_offset), Rcache);
2161
2162 if (is_static) {
2163 __ clr(Otos_i);
2164 } else {
2165 if (has_tos) {
2166 // save object pointer before call_VM() clobbers it
2167 __ push_ptr(Otos_i); // put object on tos where GC wants it.
2168 } else {
2169 // Load top of stack (do not pop the value off the stack);
2170 __ ld_ptr(Lesp, Interpreter::expr_offset_in_bytes(0), Otos_i);
2171 }
2172 __ verify_oop(Otos_i);
2173 }
2174 // Otos_i: object pointer or NULL if static
2175 // Rcache: cache entry pointer
2176 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access),
2177 Otos_i, Rcache);
2178 if (!is_static && has_tos) {
2179 __ pop_ptr(Otos_i); // restore object pointer
2180 __ verify_oop(Otos_i);
2181 }
2182 __ get_cache_and_index_at_bcp(Rcache, index, 1);
2183 __ bind(Label1);
2184 }
2185 }
2186
2187 void TemplateTable::getfield_or_static(int byte_no, bool is_static) {
2188 transition(vtos, vtos);
2189
2190 Register Rcache = G3_scratch;
2191 Register index = G4_scratch;
2192 Register Rclass = Rcache;
2193 Register Roffset= G4_scratch;
2194 Register Rflags = G1_scratch;
2195 ByteSize cp_base_offset = ConstantPoolCache::base_offset();
2196
2197 resolve_cache_and_index(byte_no, Rcache, index, sizeof(u2));
2198 jvmti_post_field_access(Rcache, index, is_static, false);
2199 load_field_cp_cache_entry(Rclass, Rcache, index, Roffset, Rflags, is_static);
2200
2201 if (!is_static) {
2202 pop_and_check_object(Rclass);
2203 } else {
2204 __ verify_oop(Rclass);
2205 }
2206
2207 Label exit;
2208
2209 Assembler::Membar_mask_bits membar_bits =
2210 Assembler::Membar_mask_bits(Assembler::LoadLoad | Assembler::LoadStore);
2211
2212 if (__ membar_has_effect(membar_bits)) {
2213 // Get volatile flag
2214 __ set((1 << ConstantPoolCacheEntry::is_volatile_shift), Lscratch);
2215 __ and3(Rflags, Lscratch, Lscratch);
2216 }
2217
2218 Label checkVolatile;
2219
2220 // compute field type
2221 Label notByte, notInt, notShort, notChar, notLong, notFloat, notObj;
2222 __ srl(Rflags, ConstantPoolCacheEntry::tos_state_shift, Rflags);
2223 // Make sure we don't need to mask Rflags after the above shift
2224 ConstantPoolCacheEntry::verify_tos_state_shift();
2225
2226 // Check atos before itos for getstatic, more likely (in Queens at least)
2227 __ cmp(Rflags, atos);
2228 __ br(Assembler::notEqual, false, Assembler::pt, notObj);
2229 __ delayed() ->cmp(Rflags, itos);
2230
2231 // atos
2232 __ load_heap_oop(Rclass, Roffset, Otos_i);
2233 __ verify_oop(Otos_i);
2234 __ push(atos);
2235 if (!is_static) {
2236 patch_bytecode(Bytecodes::_fast_agetfield, G3_scratch, G4_scratch);
2237 }
2238 __ ba(checkVolatile);
2239 __ delayed()->tst(Lscratch);
2240
2241 __ bind(notObj);
2242
2243 // cmp(Rflags, itos);
2244 __ br(Assembler::notEqual, false, Assembler::pt, notInt);
2245 __ delayed() ->cmp(Rflags, ltos);
2246
2247 // itos
2248 __ ld(Rclass, Roffset, Otos_i);
2249 __ push(itos);
2250 if (!is_static) {
2251 patch_bytecode(Bytecodes::_fast_igetfield, G3_scratch, G4_scratch);
2252 }
2253 __ ba(checkVolatile);
2254 __ delayed()->tst(Lscratch);
2255
2256 __ bind(notInt);
2257
2258 // cmp(Rflags, ltos);
2259 __ br(Assembler::notEqual, false, Assembler::pt, notLong);
2260 __ delayed() ->cmp(Rflags, btos);
2261
2262 // ltos
2263 // load must be atomic
2264 __ ld_long(Rclass, Roffset, Otos_l);
2265 __ push(ltos);
2266 if (!is_static) {
2267 patch_bytecode(Bytecodes::_fast_lgetfield, G3_scratch, G4_scratch);
2268 }
2269 __ ba(checkVolatile);
2270 __ delayed()->tst(Lscratch);
2271
2272 __ bind(notLong);
2273
2274 // cmp(Rflags, btos);
2275 __ br(Assembler::notEqual, false, Assembler::pt, notByte);
2276 __ delayed() ->cmp(Rflags, ctos);
2277
2278 // btos
2279 __ ldsb(Rclass, Roffset, Otos_i);
2280 __ push(itos);
2281 if (!is_static) {
2282 patch_bytecode(Bytecodes::_fast_bgetfield, G3_scratch, G4_scratch);
2283 }
2284 __ ba(checkVolatile);
2285 __ delayed()->tst(Lscratch);
2286
2287 __ bind(notByte);
2288
2289 // cmp(Rflags, ctos);
2290 __ br(Assembler::notEqual, false, Assembler::pt, notChar);
2291 __ delayed() ->cmp(Rflags, stos);
2292
2293 // ctos
2294 __ lduh(Rclass, Roffset, Otos_i);
2295 __ push(itos);
2296 if (!is_static) {
2297 patch_bytecode(Bytecodes::_fast_cgetfield, G3_scratch, G4_scratch);
2298 }
2299 __ ba(checkVolatile);
2300 __ delayed()->tst(Lscratch);
2301
2302 __ bind(notChar);
2303
2304 // cmp(Rflags, stos);
2305 __ br(Assembler::notEqual, false, Assembler::pt, notShort);
2306 __ delayed() ->cmp(Rflags, ftos);
2307
2308 // stos
2309 __ ldsh(Rclass, Roffset, Otos_i);
2310 __ push(itos);
2311 if (!is_static) {
2312 patch_bytecode(Bytecodes::_fast_sgetfield, G3_scratch, G4_scratch);
2313 }
2314 __ ba(checkVolatile);
2315 __ delayed()->tst(Lscratch);
2316
2317 __ bind(notShort);
2318
2319
2320 // cmp(Rflags, ftos);
2321 __ br(Assembler::notEqual, false, Assembler::pt, notFloat);
2322 __ delayed() ->tst(Lscratch);
2323
2324 // ftos
2325 __ ldf(FloatRegisterImpl::S, Rclass, Roffset, Ftos_f);
2326 __ push(ftos);
2327 if (!is_static) {
2328 patch_bytecode(Bytecodes::_fast_fgetfield, G3_scratch, G4_scratch);
2329 }
2330 __ ba(checkVolatile);
2331 __ delayed()->tst(Lscratch);
2332
2333 __ bind(notFloat);
2334
2335
2336 // dtos
2337 __ ldf(FloatRegisterImpl::D, Rclass, Roffset, Ftos_d);
2338 __ push(dtos);
2339 if (!is_static) {
2340 patch_bytecode(Bytecodes::_fast_dgetfield, G3_scratch, G4_scratch);
2341 }
2342
2343 __ bind(checkVolatile);
2344 if (__ membar_has_effect(membar_bits)) {
2345 // __ tst(Lscratch); executed in delay slot
2346 __ br(Assembler::zero, false, Assembler::pt, exit);
2347 __ delayed()->nop();
2348 volatile_barrier(membar_bits);
2349 }
2350
2351 __ bind(exit);
2352 }
2353
2354
2355 void TemplateTable::getfield(int byte_no) {
2356 getfield_or_static(byte_no, false);
2357 }
2358
2359 void TemplateTable::getstatic(int byte_no) {
2360 getfield_or_static(byte_no, true);
2361 }
2362
2363
2364 void TemplateTable::fast_accessfield(TosState state) {
2365 transition(atos, state);
2366 Register Rcache = G3_scratch;
2367 Register index = G4_scratch;
2368 Register Roffset = G4_scratch;
2369 Register Rflags = Rcache;
2370 ByteSize cp_base_offset = ConstantPoolCache::base_offset();
2371
2372 __ get_cache_and_index_at_bcp(Rcache, index, 1);
2373 jvmti_post_field_access(Rcache, index, /*is_static*/false, /*has_tos*/true);
2374
2375 __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::f2_offset(), Roffset);
2376
2377 __ null_check(Otos_i);
2378 __ verify_oop(Otos_i);
2379
2380 Label exit;
2381
2382 Assembler::Membar_mask_bits membar_bits =
2383 Assembler::Membar_mask_bits(Assembler::LoadLoad | Assembler::LoadStore);
2384 if (__ membar_has_effect(membar_bits)) {
2385 // Get volatile flag
2386 __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::f2_offset(), Rflags);
2387 __ set((1 << ConstantPoolCacheEntry::is_volatile_shift), Lscratch);
2388 }
2389
2390 switch (bytecode()) {
2391 case Bytecodes::_fast_bgetfield:
2392 __ ldsb(Otos_i, Roffset, Otos_i);
2393 break;
2394 case Bytecodes::_fast_cgetfield:
2395 __ lduh(Otos_i, Roffset, Otos_i);
2396 break;
2397 case Bytecodes::_fast_sgetfield:
2398 __ ldsh(Otos_i, Roffset, Otos_i);
2399 break;
2400 case Bytecodes::_fast_igetfield:
2401 __ ld(Otos_i, Roffset, Otos_i);
2402 break;
2403 case Bytecodes::_fast_lgetfield:
2404 __ ld_long(Otos_i, Roffset, Otos_l);
2405 break;
2406 case Bytecodes::_fast_fgetfield:
2407 __ ldf(FloatRegisterImpl::S, Otos_i, Roffset, Ftos_f);
2408 break;
2409 case Bytecodes::_fast_dgetfield:
2410 __ ldf(FloatRegisterImpl::D, Otos_i, Roffset, Ftos_d);
2411 break;
2412 case Bytecodes::_fast_agetfield:
2413 __ load_heap_oop(Otos_i, Roffset, Otos_i);
2414 break;
2415 default:
2416 ShouldNotReachHere();
2417 }
2418
2419 if (__ membar_has_effect(membar_bits)) {
2420 __ btst(Lscratch, Rflags);
2421 __ br(Assembler::zero, false, Assembler::pt, exit);
2422 __ delayed()->nop();
2423 volatile_barrier(membar_bits);
2424 __ bind(exit);
2425 }
2426
2427 if (state == atos) {
2428 __ verify_oop(Otos_i); // does not blow flags!
2429 }
2430 }
2431
2432 void TemplateTable::jvmti_post_fast_field_mod() {
2433 if (JvmtiExport::can_post_field_modification()) {
2434 // Check to see if a field modification watch has been set before we take
2435 // the time to call into the VM.
2436 Label done;
2437 AddressLiteral get_field_modification_count_addr(JvmtiExport::get_field_modification_count_addr());
2438 __ load_contents(get_field_modification_count_addr, G4_scratch);
2439 __ cmp_and_br_short(G4_scratch, 0, Assembler::equal, Assembler::pt, done);
2440 __ pop_ptr(G4_scratch); // copy the object pointer from tos
2441 __ verify_oop(G4_scratch);
2442 __ push_ptr(G4_scratch); // put the object pointer back on tos
2443 __ get_cache_entry_pointer_at_bcp(G1_scratch, G3_scratch, 1);
2444 // Save tos values before call_VM() clobbers them. Since we have
2445 // to do it for every data type, we use the saved values as the
2446 // jvalue object.
2447 switch (bytecode()) { // save tos values before call_VM() clobbers them
2448 case Bytecodes::_fast_aputfield: __ push_ptr(Otos_i); break;
2449 case Bytecodes::_fast_bputfield: // fall through
2450 case Bytecodes::_fast_sputfield: // fall through
2451 case Bytecodes::_fast_cputfield: // fall through
2452 case Bytecodes::_fast_iputfield: __ push_i(Otos_i); break;
2453 case Bytecodes::_fast_dputfield: __ push_d(Ftos_d); break;
2454 case Bytecodes::_fast_fputfield: __ push_f(Ftos_f); break;
2455 // get words in right order for use as jvalue object
2456 case Bytecodes::_fast_lputfield: __ push_l(Otos_l); break;
2457 }
2458 // setup pointer to jvalue object
2459 __ mov(Lesp, G3_scratch); __ inc(G3_scratch, wordSize);
2460 // G4_scratch: object pointer
2461 // G1_scratch: cache entry pointer
2462 // G3_scratch: jvalue object on the stack
2463 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), G4_scratch, G1_scratch, G3_scratch);
2464 switch (bytecode()) { // restore tos values
2465 case Bytecodes::_fast_aputfield: __ pop_ptr(Otos_i); break;
2466 case Bytecodes::_fast_bputfield: // fall through
2467 case Bytecodes::_fast_sputfield: // fall through
2468 case Bytecodes::_fast_cputfield: // fall through
2469 case Bytecodes::_fast_iputfield: __ pop_i(Otos_i); break;
2470 case Bytecodes::_fast_dputfield: __ pop_d(Ftos_d); break;
2471 case Bytecodes::_fast_fputfield: __ pop_f(Ftos_f); break;
2472 case Bytecodes::_fast_lputfield: __ pop_l(Otos_l); break;
2473 }
2474 __ bind(done);
2475 }
2476 }
2477
2478 // The registers Rcache and index expected to be set before call.
2479 // The function may destroy various registers, just not the Rcache and index registers.
2480 void TemplateTable::jvmti_post_field_mod(Register Rcache, Register index, bool is_static) {
2481 ByteSize cp_base_offset = ConstantPoolCache::base_offset();
2482
2483 if (JvmtiExport::can_post_field_modification()) {
2484 // Check to see if a field modification watch has been set before we take
2485 // the time to call into the VM.
2486 Label Label1;
2487 assert_different_registers(Rcache, index, G1_scratch);
2488 AddressLiteral get_field_modification_count_addr(JvmtiExport::get_field_modification_count_addr());
2489 __ load_contents(get_field_modification_count_addr, G1_scratch);
2490 __ cmp_and_br_short(G1_scratch, 0, Assembler::zero, Assembler::pt, Label1);
2491
2492 // The Rcache and index registers have been already set.
2493 // This allows to eliminate this call but the Rcache and index
2494 // registers must be correspondingly used after this line.
2495 __ get_cache_and_index_at_bcp(G1_scratch, G4_scratch, 1);
2496
2497 __ add(G1_scratch, in_bytes(cp_base_offset), G3_scratch);
2498 if (is_static) {
2499 // Life is simple. Null out the object pointer.
2500 __ clr(G4_scratch);
2501 } else {
2502 Register Rflags = G1_scratch;
2503 // Life is harder. The stack holds the value on top, followed by the
2504 // object. We don't know the size of the value, though; it could be
2505 // one or two words depending on its type. As a result, we must find
2506 // the type to determine where the object is.
2507
2508 Label two_word, valsizeknown;
2509 __ ld_ptr(G1_scratch, cp_base_offset + ConstantPoolCacheEntry::flags_offset(), Rflags);
2510 __ mov(Lesp, G4_scratch);
2511 __ srl(Rflags, ConstantPoolCacheEntry::tos_state_shift, Rflags);
2512 // Make sure we don't need to mask Rflags after the above shift
2513 ConstantPoolCacheEntry::verify_tos_state_shift();
2514 __ cmp(Rflags, ltos);
2515 __ br(Assembler::equal, false, Assembler::pt, two_word);
2516 __ delayed()->cmp(Rflags, dtos);
2517 __ br(Assembler::equal, false, Assembler::pt, two_word);
2518 __ delayed()->nop();
2519 __ inc(G4_scratch, Interpreter::expr_offset_in_bytes(1));
2520 __ ba_short(valsizeknown);
2521 __ bind(two_word);
2522
2523 __ inc(G4_scratch, Interpreter::expr_offset_in_bytes(2));
2524
2525 __ bind(valsizeknown);
2526 // setup object pointer
2527 __ ld_ptr(G4_scratch, 0, G4_scratch);
2528 __ verify_oop(G4_scratch);
2529 }
2530 // setup pointer to jvalue object
2531 __ mov(Lesp, G1_scratch); __ inc(G1_scratch, wordSize);
2532 // G4_scratch: object pointer or NULL if static
2533 // G3_scratch: cache entry pointer
2534 // G1_scratch: jvalue object on the stack
2535 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification),
2536 G4_scratch, G3_scratch, G1_scratch);
2537 __ get_cache_and_index_at_bcp(Rcache, index, 1);
2538 __ bind(Label1);
2539 }
2540 }
2541
2542 void TemplateTable::pop_and_check_object(Register r) {
2543 __ pop_ptr(r);
2544 __ null_check(r); // for field access must check obj.
2545 __ verify_oop(r);
2546 }
2547
2548 void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
2549 transition(vtos, vtos);
2550 Register Rcache = G3_scratch;
2551 Register index = G4_scratch;
2552 Register Rclass = Rcache;
2553 Register Roffset= G4_scratch;
2554 Register Rflags = G1_scratch;
2555 ByteSize cp_base_offset = ConstantPoolCache::base_offset();
2556
2557 resolve_cache_and_index(byte_no, Rcache, index, sizeof(u2));
2558 jvmti_post_field_mod(Rcache, index, is_static);
2559 load_field_cp_cache_entry(Rclass, Rcache, index, Roffset, Rflags, is_static);
2560
2561 Assembler::Membar_mask_bits read_bits =
2562 Assembler::Membar_mask_bits(Assembler::LoadStore | Assembler::StoreStore);
2563 Assembler::Membar_mask_bits write_bits = Assembler::StoreLoad;
2564
2565 Label notVolatile, checkVolatile, exit;
2566 if (__ membar_has_effect(read_bits) || __ membar_has_effect(write_bits)) {
2567 __ set((1 << ConstantPoolCacheEntry::is_volatile_shift), Lscratch);
2568 __ and3(Rflags, Lscratch, Lscratch);
2569
2570 if (__ membar_has_effect(read_bits)) {
2571 __ cmp_and_br_short(Lscratch, 0, Assembler::equal, Assembler::pt, notVolatile);
2572 volatile_barrier(read_bits);
2573 __ bind(notVolatile);
2574 }
2575 }
2576
2577 __ srl(Rflags, ConstantPoolCacheEntry::tos_state_shift, Rflags);
2578 // Make sure we don't need to mask Rflags after the above shift
2579 ConstantPoolCacheEntry::verify_tos_state_shift();
2580
2581 // compute field type
2582 Label notInt, notShort, notChar, notObj, notByte, notLong, notFloat;
2583
2584 if (is_static) {
2585 // putstatic with object type most likely, check that first
2586 __ cmp(Rflags, atos);
2587 __ br(Assembler::notEqual, false, Assembler::pt, notObj);
2588 __ delayed()->cmp(Rflags, itos);
2589
2590 // atos
2591 {
2592 __ pop_ptr();
2593 __ verify_oop(Otos_i);
2594 do_oop_store(_masm, Rclass, Roffset, 0, Otos_i, G1_scratch, _bs->kind(), false);
2595 __ ba(checkVolatile);
2596 __ delayed()->tst(Lscratch);
2597 }
2598
2599 __ bind(notObj);
2600 // cmp(Rflags, itos);
2601 __ br(Assembler::notEqual, false, Assembler::pt, notInt);
2602 __ delayed()->cmp(Rflags, btos);
2603
2604 // itos
2605 {
2606 __ pop_i();
2607 __ st(Otos_i, Rclass, Roffset);
2608 __ ba(checkVolatile);
2609 __ delayed()->tst(Lscratch);
2610 }
2611
2612 __ bind(notInt);
2613 } else {
2614 // putfield with int type most likely, check that first
2615 __ cmp(Rflags, itos);
2616 __ br(Assembler::notEqual, false, Assembler::pt, notInt);
2617 __ delayed()->cmp(Rflags, atos);
2618
2619 // itos
2620 {
2621 __ pop_i();
2622 pop_and_check_object(Rclass);
2623 __ st(Otos_i, Rclass, Roffset);
2624 patch_bytecode(Bytecodes::_fast_iputfield, G3_scratch, G4_scratch, true, byte_no);
2625 __ ba(checkVolatile);
2626 __ delayed()->tst(Lscratch);
2627 }
2628
2629 __ bind(notInt);
2630 // cmp(Rflags, atos);
2631 __ br(Assembler::notEqual, false, Assembler::pt, notObj);
2632 __ delayed()->cmp(Rflags, btos);
2633
2634 // atos
2635 {
2636 __ pop_ptr();
2637 pop_and_check_object(Rclass);
2638 __ verify_oop(Otos_i);
2639 do_oop_store(_masm, Rclass, Roffset, 0, Otos_i, G1_scratch, _bs->kind(), false);
2640 patch_bytecode(Bytecodes::_fast_aputfield, G3_scratch, G4_scratch, true, byte_no);
2641 __ ba(checkVolatile);
2642 __ delayed()->tst(Lscratch);
2643 }
2644
2645 __ bind(notObj);
2646 }
2647
2648 // cmp(Rflags, btos);
2649 __ br(Assembler::notEqual, false, Assembler::pt, notByte);
2650 __ delayed()->cmp(Rflags, ltos);
2651
2652 // btos
2653 {
2654 __ pop_i();
2655 if (!is_static) pop_and_check_object(Rclass);
2656 __ stb(Otos_i, Rclass, Roffset);
2657 if (!is_static) {
2658 patch_bytecode(Bytecodes::_fast_bputfield, G3_scratch, G4_scratch, true, byte_no);
2659 }
2660 __ ba(checkVolatile);
2661 __ delayed()->tst(Lscratch);
2662 }
2663
2664 __ bind(notByte);
2665 // cmp(Rflags, ltos);
2666 __ br(Assembler::notEqual, false, Assembler::pt, notLong);
2667 __ delayed()->cmp(Rflags, ctos);
2668
2669 // ltos
2670 {
2671 __ pop_l();
2672 if (!is_static) pop_and_check_object(Rclass);
2673 __ st_long(Otos_l, Rclass, Roffset);
2674 if (!is_static) {
2675 patch_bytecode(Bytecodes::_fast_lputfield, G3_scratch, G4_scratch, true, byte_no);
2676 }
2677 __ ba(checkVolatile);
2678 __ delayed()->tst(Lscratch);
2679 }
2680
2681 __ bind(notLong);
2682 // cmp(Rflags, ctos);
2683 __ br(Assembler::notEqual, false, Assembler::pt, notChar);
2684 __ delayed()->cmp(Rflags, stos);
2685
2686 // ctos (char)
2687 {
2688 __ pop_i();
2689 if (!is_static) pop_and_check_object(Rclass);
2690 __ sth(Otos_i, Rclass, Roffset);
2691 if (!is_static) {
2692 patch_bytecode(Bytecodes::_fast_cputfield, G3_scratch, G4_scratch, true, byte_no);
2693 }
2694 __ ba(checkVolatile);
2695 __ delayed()->tst(Lscratch);
2696 }
2697
2698 __ bind(notChar);
2699 // cmp(Rflags, stos);
2700 __ br(Assembler::notEqual, false, Assembler::pt, notShort);
2701 __ delayed()->cmp(Rflags, ftos);
2702
2703 // stos (short)
2704 {
2705 __ pop_i();
2706 if (!is_static) pop_and_check_object(Rclass);
2707 __ sth(Otos_i, Rclass, Roffset);
2708 if (!is_static) {
2709 patch_bytecode(Bytecodes::_fast_sputfield, G3_scratch, G4_scratch, true, byte_no);
2710 }
2711 __ ba(checkVolatile);
2712 __ delayed()->tst(Lscratch);
2713 }
2714
2715 __ bind(notShort);
2716 // cmp(Rflags, ftos);
2717 __ br(Assembler::notZero, false, Assembler::pt, notFloat);
2718 __ delayed()->nop();
2719
2720 // ftos
2721 {
2722 __ pop_f();
2723 if (!is_static) pop_and_check_object(Rclass);
2724 __ stf(FloatRegisterImpl::S, Ftos_f, Rclass, Roffset);
2725 if (!is_static) {
2726 patch_bytecode(Bytecodes::_fast_fputfield, G3_scratch, G4_scratch, true, byte_no);
2727 }
2728 __ ba(checkVolatile);
2729 __ delayed()->tst(Lscratch);
2730 }
2731
2732 __ bind(notFloat);
2733
2734 // dtos
2735 {
2736 __ pop_d();
2737 if (!is_static) pop_and_check_object(Rclass);
2738 __ stf(FloatRegisterImpl::D, Ftos_d, Rclass, Roffset);
2739 if (!is_static) {
2740 patch_bytecode(Bytecodes::_fast_dputfield, G3_scratch, G4_scratch, true, byte_no);
2741 }
2742 }
2743
2744 __ bind(checkVolatile);
2745 __ tst(Lscratch);
2746
2747 if (__ membar_has_effect(write_bits)) {
2748 // __ tst(Lscratch); in delay slot
2749 __ br(Assembler::zero, false, Assembler::pt, exit);
2750 __ delayed()->nop();
2751 volatile_barrier(Assembler::StoreLoad);
2752 __ bind(exit);
2753 }
2754 }
2755
2756 void TemplateTable::fast_storefield(TosState state) {
2757 transition(state, vtos);
2758 Register Rcache = G3_scratch;
2759 Register Rclass = Rcache;
2760 Register Roffset= G4_scratch;
2761 Register Rflags = G1_scratch;
2762 ByteSize cp_base_offset = ConstantPoolCache::base_offset();
2763
2764 jvmti_post_fast_field_mod();
2765
2766 __ get_cache_and_index_at_bcp(Rcache, G4_scratch, 1);
2767
2768 Assembler::Membar_mask_bits read_bits =
2769 Assembler::Membar_mask_bits(Assembler::LoadStore | Assembler::StoreStore);
2770 Assembler::Membar_mask_bits write_bits = Assembler::StoreLoad;
2771
2772 Label notVolatile, checkVolatile, exit;
2773 if (__ membar_has_effect(read_bits) || __ membar_has_effect(write_bits)) {
2774 __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::flags_offset(), Rflags);
2775 __ set((1 << ConstantPoolCacheEntry::is_volatile_shift), Lscratch);
2776 __ and3(Rflags, Lscratch, Lscratch);
2777 if (__ membar_has_effect(read_bits)) {
2778 __ cmp_and_br_short(Lscratch, 0, Assembler::equal, Assembler::pt, notVolatile);
2779 volatile_barrier(read_bits);
2780 __ bind(notVolatile);
2781 }
2782 }
2783
2784 __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::f2_offset(), Roffset);
2785 pop_and_check_object(Rclass);
2786
2787 switch (bytecode()) {
2788 case Bytecodes::_fast_bputfield: __ stb(Otos_i, Rclass, Roffset); break;
2789 case Bytecodes::_fast_cputfield: /* fall through */
2790 case Bytecodes::_fast_sputfield: __ sth(Otos_i, Rclass, Roffset); break;
2791 case Bytecodes::_fast_iputfield: __ st(Otos_i, Rclass, Roffset); break;
2792 case Bytecodes::_fast_lputfield: __ st_long(Otos_l, Rclass, Roffset); break;
2793 case Bytecodes::_fast_fputfield:
2794 __ stf(FloatRegisterImpl::S, Ftos_f, Rclass, Roffset);
2795 break;
2796 case Bytecodes::_fast_dputfield:
2797 __ stf(FloatRegisterImpl::D, Ftos_d, Rclass, Roffset);
2798 break;
2799 case Bytecodes::_fast_aputfield:
2800 do_oop_store(_masm, Rclass, Roffset, 0, Otos_i, G1_scratch, _bs->kind(), false);
2801 break;
2802 default:
2803 ShouldNotReachHere();
2804 }
2805
2806 if (__ membar_has_effect(write_bits)) {
2807 __ cmp_and_br_short(Lscratch, 0, Assembler::equal, Assembler::pt, exit);
2808 volatile_barrier(Assembler::StoreLoad);
2809 __ bind(exit);
2810 }
2811 }
2812
2813
2814 void TemplateTable::putfield(int byte_no) {
2815 putfield_or_static(byte_no, false);
2816 }
2817
2818 void TemplateTable::putstatic(int byte_no) {
2819 putfield_or_static(byte_no, true);
2820 }
2821
2822
2823 void TemplateTable::fast_xaccess(TosState state) {
2824 transition(vtos, state);
2825 Register Rcache = G3_scratch;
2826 Register Roffset = G4_scratch;
2827 Register Rflags = G4_scratch;
2828 Register Rreceiver = Lscratch;
2829
2830 __ ld_ptr(Llocals, 0, Rreceiver);
2831
2832 // access constant pool cache (is resolved)
2833 __ get_cache_and_index_at_bcp(Rcache, G4_scratch, 2);
2834 __ ld_ptr(Rcache, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::f2_offset(), Roffset);
2835 __ add(Lbcp, 1, Lbcp); // needed to report exception at the correct bcp
2836
2837 __ verify_oop(Rreceiver);
2838 __ null_check(Rreceiver);
2839 if (state == atos) {
2840 __ load_heap_oop(Rreceiver, Roffset, Otos_i);
2841 } else if (state == itos) {
2842 __ ld (Rreceiver, Roffset, Otos_i) ;
2843 } else if (state == ftos) {
2844 __ ldf(FloatRegisterImpl::S, Rreceiver, Roffset, Ftos_f);
2845 } else {
2846 ShouldNotReachHere();
2847 }
2848
2849 Assembler::Membar_mask_bits membar_bits =
2850 Assembler::Membar_mask_bits(Assembler::LoadLoad | Assembler::LoadStore);
2851 if (__ membar_has_effect(membar_bits)) {
2852
2853 // Get is_volatile value in Rflags and check if membar is needed
2854 __ ld_ptr(Rcache, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset(), Rflags);
2855
2856 // Test volatile
2857 Label notVolatile;
2858 __ set((1 << ConstantPoolCacheEntry::is_volatile_shift), Lscratch);
2859 __ btst(Rflags, Lscratch);
2860 __ br(Assembler::zero, false, Assembler::pt, notVolatile);
2861 __ delayed()->nop();
2862 volatile_barrier(membar_bits);
2863 __ bind(notVolatile);
2864 }
2865
2866 __ interp_verify_oop(Otos_i, state, __FILE__, __LINE__);
2867 __ sub(Lbcp, 1, Lbcp);
2868 }
2869
2870 //----------------------------------------------------------------------------------------------------
2871 // Calls
2872
2873 void TemplateTable::count_calls(Register method, Register temp) {
2874 // implemented elsewhere
2875 ShouldNotReachHere();
2876 }
2877
2878 void TemplateTable::prepare_invoke(int byte_no,
2879 Register method, // linked method (or i-klass)
2880 Register ra, // return address
2881 Register index, // itable index, MethodType, etc.
2882 Register recv, // if caller wants to see it
2883 Register flags // if caller wants to test it
2884 ) {
2885 // determine flags
2886 const Bytecodes::Code code = bytecode();
2887 const bool is_invokeinterface = code == Bytecodes::_invokeinterface;
2888 const bool is_invokedynamic = code == Bytecodes::_invokedynamic;
2889 const bool is_invokehandle = code == Bytecodes::_invokehandle;
2890 const bool is_invokevirtual = code == Bytecodes::_invokevirtual;
2891 const bool is_invokespecial = code == Bytecodes::_invokespecial;
2892 const bool load_receiver = (recv != noreg);
2893 assert(load_receiver == (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic), "");
2894 assert(recv == noreg || recv == O0, "");
2895 assert(flags == noreg || flags == O1, "");
2896
2897 // setup registers & access constant pool cache
2898 if (recv == noreg) recv = O0;
2899 if (flags == noreg) flags = O1;
2900 const Register temp = O2;
2901 assert_different_registers(method, ra, index, recv, flags, temp);
2902
2903 load_invoke_cp_cache_entry(byte_no, method, index, flags, is_invokevirtual, false, is_invokedynamic);
2904
2905 __ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore
2906
2907 // maybe push appendix to arguments
2908 if (is_invokedynamic || is_invokehandle) {
2909 Label L_no_push;
2910 __ set((1 << ConstantPoolCacheEntry::has_appendix_shift), temp);
2911 __ btst(flags, temp);
2912 __ br(Assembler::zero, false, Assembler::pt, L_no_push);
2913 __ delayed()->nop();
2914 // Push the appendix as a trailing parameter.
2915 // This must be done before we get the receiver,
2916 // since the parameter_size includes it.
2917 assert(ConstantPoolCacheEntry::_indy_resolved_references_appendix_offset == 0, "appendix expected at index+0");
2918 __ load_resolved_reference_at_index(temp, index);
2919 __ verify_oop(temp);
2920 __ push_ptr(temp); // push appendix (MethodType, CallSite, etc.)
2921 __ bind(L_no_push);
2922 }
2923
2924 // load receiver if needed (after appendix is pushed so parameter size is correct)
2925 if (load_receiver) {
2926 __ and3(flags, ConstantPoolCacheEntry::parameter_size_mask, temp); // get parameter size
2927 __ load_receiver(temp, recv); // __ argument_address uses Gargs but we need Lesp
2928 __ verify_oop(recv);
2929 }
2930
2931 // compute return type
2932 __ srl(flags, ConstantPoolCacheEntry::tos_state_shift, ra);
2933 // Make sure we don't need to mask flags after the above shift
2934 ConstantPoolCacheEntry::verify_tos_state_shift();
2935 // load return address
2936 {
2937 const address table_addr = (address) Interpreter::invoke_return_entry_table_for(code);
2938 AddressLiteral table(table_addr);
2939 __ set(table, temp);
2940 __ sll(ra, LogBytesPerWord, ra);
2941 __ ld_ptr(Address(temp, ra), ra);
2942 }
2943 }
2944
2945
2946 void TemplateTable::generate_vtable_call(Register Rrecv, Register Rindex, Register Rret) {
2947 Register Rcall = Rindex;
2948 assert_different_registers(Rcall, G5_method, Gargs, Rret);
2949
2950 // get target Method* & entry point
2951 __ lookup_virtual_method(Rrecv, Rindex, G5_method);
2952 __ profile_arguments_type(G5_method, Rcall, Gargs, true);
2953 __ call_from_interpreter(Rcall, Gargs, Rret);
2954 }
2955
2956 void TemplateTable::invokevirtual(int byte_no) {
2957 transition(vtos, vtos);
2958 assert(byte_no == f2_byte, "use this argument");
2959
2960 Register Rscratch = G3_scratch;
2961 Register Rtemp = G4_scratch;
2962 Register Rret = Lscratch;
2963 Register O0_recv = O0;
2964 Label notFinal;
2965
2966 load_invoke_cp_cache_entry(byte_no, G5_method, noreg, Rret, true, false, false);
2967 __ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore
2968
2969 // Check for vfinal
2970 __ set((1 << ConstantPoolCacheEntry::is_vfinal_shift), G4_scratch);
2971 __ btst(Rret, G4_scratch);
2972 __ br(Assembler::zero, false, Assembler::pt, notFinal);
2973 __ delayed()->and3(Rret, 0xFF, G4_scratch); // gets number of parameters
2974
2975 patch_bytecode(Bytecodes::_fast_invokevfinal, Rscratch, Rtemp);
2976
2977 invokevfinal_helper(Rscratch, Rret);
2978
2979 __ bind(notFinal);
2980
2981 __ mov(G5_method, Rscratch); // better scratch register
2982 __ load_receiver(G4_scratch, O0_recv); // gets receiverOop
2983 // receiver is in O0_recv
2984 __ verify_oop(O0_recv);
2985
2986 // get return address
2987 AddressLiteral table(Interpreter::invoke_return_entry_table());
2988 __ set(table, Rtemp);
2989 __ srl(Rret, ConstantPoolCacheEntry::tos_state_shift, Rret); // get return type
2990 // Make sure we don't need to mask Rret after the above shift
2991 ConstantPoolCacheEntry::verify_tos_state_shift();
2992 __ sll(Rret, LogBytesPerWord, Rret);
2993 __ ld_ptr(Rtemp, Rret, Rret); // get return address
2994
2995 // get receiver klass
2996 __ null_check(O0_recv, oopDesc::klass_offset_in_bytes());
2997 __ load_klass(O0_recv, O0_recv);
2998 __ verify_klass_ptr(O0_recv);
2999
3000 __ profile_virtual_call(O0_recv, O4);
3001
3002 generate_vtable_call(O0_recv, Rscratch, Rret);
3003 }
3004
3005 void TemplateTable::fast_invokevfinal(int byte_no) {
3006 transition(vtos, vtos);
3007 assert(byte_no == f2_byte, "use this argument");
3008
3009 load_invoke_cp_cache_entry(byte_no, G5_method, noreg, Lscratch, true,
3010 /*is_invokevfinal*/true, false);
3011 __ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore
3012 invokevfinal_helper(G3_scratch, Lscratch);
3013 }
3014
3015 void TemplateTable::invokevfinal_helper(Register Rscratch, Register Rret) {
3016 Register Rtemp = G4_scratch;
3017
3018 // Load receiver from stack slot
3019 __ ld_ptr(G5_method, in_bytes(Method::const_offset()), G4_scratch);
3020 __ lduh(G4_scratch, in_bytes(ConstMethod::size_of_parameters_offset()), G4_scratch);
3021 __ load_receiver(G4_scratch, O0);
3022
3023 // receiver NULL check
3024 __ null_check(O0);
3025
3026 __ profile_final_call(O4);
3027 __ profile_arguments_type(G5_method, Rscratch, Gargs, true);
3028
3029 // get return address
3030 AddressLiteral table(Interpreter::invoke_return_entry_table());
3031 __ set(table, Rtemp);
3032 __ srl(Rret, ConstantPoolCacheEntry::tos_state_shift, Rret); // get return type
3033 // Make sure we don't need to mask Rret after the above shift
3034 ConstantPoolCacheEntry::verify_tos_state_shift();
3035 __ sll(Rret, LogBytesPerWord, Rret);
3036 __ ld_ptr(Rtemp, Rret, Rret); // get return address
3037
3038
3039 // do the call
3040 __ call_from_interpreter(Rscratch, Gargs, Rret);
3041 }
3042
3043
3044 void TemplateTable::invokespecial(int byte_no) {
3045 transition(vtos, vtos);
3046 assert(byte_no == f1_byte, "use this argument");
3047
3048 const Register Rret = Lscratch;
3049 const Register O0_recv = O0;
3050 const Register Rscratch = G3_scratch;
3051
3052 prepare_invoke(byte_no, G5_method, Rret, noreg, O0_recv); // get receiver also for null check
3053 __ null_check(O0_recv);
3054
3055 // do the call
3056 __ profile_call(O4);
3057 __ profile_arguments_type(G5_method, Rscratch, Gargs, false);
3058 __ call_from_interpreter(Rscratch, Gargs, Rret);
3059 }
3060
3061
3062 void TemplateTable::invokestatic(int byte_no) {
3063 transition(vtos, vtos);
3064 assert(byte_no == f1_byte, "use this argument");
3065
3066 const Register Rret = Lscratch;
3067 const Register Rscratch = G3_scratch;
3068
3069 prepare_invoke(byte_no, G5_method, Rret); // get f1 Method*
3070
3071 // do the call
3072 __ profile_call(O4);
3073 __ profile_arguments_type(G5_method, Rscratch, Gargs, false);
3074 __ call_from_interpreter(Rscratch, Gargs, Rret);
3075 }
3076
3077 void TemplateTable::invokeinterface_object_method(Register RKlass,
3078 Register Rcall,
3079 Register Rret,
3080 Register Rflags) {
3081 Register Rscratch = G4_scratch;
3082 Register Rindex = Lscratch;
3083
3084 assert_different_registers(Rscratch, Rindex, Rret);
3085
3086 Label notFinal;
3087
3088 // Check for vfinal
3089 __ set((1 << ConstantPoolCacheEntry::is_vfinal_shift), Rscratch);
3090 __ btst(Rflags, Rscratch);
3091 __ br(Assembler::zero, false, Assembler::pt, notFinal);
3092 __ delayed()->nop();
3093
3094 __ profile_final_call(O4);
3095
3096 // do the call - the index (f2) contains the Method*
3097 assert_different_registers(G5_method, Gargs, Rcall);
3098 __ mov(Rindex, G5_method);
3099 __ profile_arguments_type(G5_method, Rcall, Gargs, true);
3100 __ call_from_interpreter(Rcall, Gargs, Rret);
3101 __ bind(notFinal);
3102
3103 __ profile_virtual_call(RKlass, O4);
3104 generate_vtable_call(RKlass, Rindex, Rret);
3105 }
3106
3107
3108 void TemplateTable::invokeinterface(int byte_no) {
3109 transition(vtos, vtos);
3110 assert(byte_no == f1_byte, "use this argument");
3111
3112 const Register Rinterface = G1_scratch;
3113 const Register Rret = G3_scratch;
3114 const Register Rindex = Lscratch;
3115 const Register O0_recv = O0;
3116 const Register O1_flags = O1;
3117 const Register O2_Klass = O2;
3118 const Register Rscratch = G4_scratch;
3119 assert_different_registers(Rscratch, G5_method);
3120
3121 prepare_invoke(byte_no, Rinterface, Rret, Rindex, O0_recv, O1_flags);
3122
3123 // get receiver klass
3124 __ null_check(O0_recv, oopDesc::klass_offset_in_bytes());
3125 __ load_klass(O0_recv, O2_Klass);
3126
3127 // Special case of invokeinterface called for virtual method of
3128 // java.lang.Object. See cpCacheOop.cpp for details.
3129 // This code isn't produced by javac, but could be produced by
3130 // another compliant java compiler.
3131 Label notMethod;
3132 __ set((1 << ConstantPoolCacheEntry::is_forced_virtual_shift), Rscratch);
3133 __ btst(O1_flags, Rscratch);
3134 __ br(Assembler::zero, false, Assembler::pt, notMethod);
3135 __ delayed()->nop();
3136
3137 invokeinterface_object_method(O2_Klass, Rinterface, Rret, O1_flags);
3138
3139 __ bind(notMethod);
3140
3141 __ profile_virtual_call(O2_Klass, O4);
3142
3143 //
3144 // find entry point to call
3145 //
3146
3147 // compute start of first itableOffsetEntry (which is at end of vtable)
3148 const int base = InstanceKlass::vtable_start_offset() * wordSize;
3149 Label search;
3150 Register Rtemp = O1_flags;
3151
3152 __ ld(O2_Klass, InstanceKlass::vtable_length_offset() * wordSize, Rtemp);
3153 if (align_object_offset(1) > 1) {
3154 __ round_to(Rtemp, align_object_offset(1));
3155 }
3156 __ sll(Rtemp, LogBytesPerWord, Rtemp); // Rscratch *= 4;
3157 if (Assembler::is_simm13(base)) {
3158 __ add(Rtemp, base, Rtemp);
3159 } else {
3160 __ set(base, Rscratch);
3161 __ add(Rscratch, Rtemp, Rtemp);
3162 }
3163 __ add(O2_Klass, Rtemp, Rscratch);
3164
3165 __ bind(search);
3166
3167 __ ld_ptr(Rscratch, itableOffsetEntry::interface_offset_in_bytes(), Rtemp);
3168 {
3169 Label ok;
3170
3171 // Check that entry is non-null. Null entries are probably a bytecode
3172 // problem. If the interface isn't implemented by the receiver class,
3173 // the VM should throw IncompatibleClassChangeError. linkResolver checks
3174 // this too but that's only if the entry isn't already resolved, so we
3175 // need to check again.
3176 __ br_notnull_short( Rtemp, Assembler::pt, ok);
3177 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_IncompatibleClassChangeError));
3178 __ should_not_reach_here();
3179 __ bind(ok);
3180 }
3181
3182 __ cmp(Rinterface, Rtemp);
3183 __ brx(Assembler::notEqual, true, Assembler::pn, search);
3184 __ delayed()->add(Rscratch, itableOffsetEntry::size() * wordSize, Rscratch);
3185
3186 // entry found and Rscratch points to it
3187 __ ld(Rscratch, itableOffsetEntry::offset_offset_in_bytes(), Rscratch);
3188
3189 assert(itableMethodEntry::method_offset_in_bytes() == 0, "adjust instruction below");
3190 __ sll(Rindex, exact_log2(itableMethodEntry::size() * wordSize), Rindex); // Rindex *= 8;
3191 __ add(Rscratch, Rindex, Rscratch);
3192 __ ld_ptr(O2_Klass, Rscratch, G5_method);
3193
3194 // Check for abstract method error.
3195 {
3196 Label ok;
3197 __ br_notnull_short(G5_method, Assembler::pt, ok);
3198 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError));
3199 __ should_not_reach_here();
3200 __ bind(ok);
3201 }
3202
3203 Register Rcall = Rinterface;
3204 assert_different_registers(Rcall, G5_method, Gargs, Rret);
3205
3206 __ profile_arguments_type(G5_method, Rcall, Gargs, true);
3207 __ call_from_interpreter(Rcall, Gargs, Rret);
3208 }
3209
3210 void TemplateTable::invokehandle(int byte_no) {
3211 transition(vtos, vtos);
3212 assert(byte_no == f1_byte, "use this argument");
3213
3214 const Register Rret = Lscratch;
3215 const Register G4_mtype = G4_scratch;
3216 const Register O0_recv = O0;
3217 const Register Rscratch = G3_scratch;
3218
3219 prepare_invoke(byte_no, G5_method, Rret, G4_mtype, O0_recv);
3220 __ null_check(O0_recv);
3221
3222 // G4: MethodType object (from cpool->resolved_references[f1], if necessary)
3223 // G5: MH.invokeExact_MT method (from f2)
3224
3225 // Note: G4_mtype is already pushed (if necessary) by prepare_invoke
3226
3227 // do the call
3228 __ verify_oop(G4_mtype);
3229 __ profile_final_call(O4); // FIXME: profile the LambdaForm also
3230 __ profile_arguments_type(G5_method, Rscratch, Gargs, true);
3231 __ call_from_interpreter(Rscratch, Gargs, Rret);
3232 }
3233
3234
3235 void TemplateTable::invokedynamic(int byte_no) {
3236 transition(vtos, vtos);
3237 assert(byte_no == f1_byte, "use this argument");
3238
3239 const Register Rret = Lscratch;
3240 const Register G4_callsite = G4_scratch;
3241 const Register Rscratch = G3_scratch;
3242
3243 prepare_invoke(byte_no, G5_method, Rret, G4_callsite);
3244
3245 // G4: CallSite object (from cpool->resolved_references[f1])
3246 // G5: MH.linkToCallSite method (from f2)
3247
3248 // Note: G4_callsite is already pushed by prepare_invoke
3249
3250 // %%% should make a type profile for any invokedynamic that takes a ref argument
3251 // profile this call
3252 __ profile_call(O4);
3253
3254 // do the call
3255 __ verify_oop(G4_callsite);
3256 __ profile_arguments_type(G5_method, Rscratch, Gargs, false);
3257 __ call_from_interpreter(Rscratch, Gargs, Rret);
3258 }
3259
3260
3261 //----------------------------------------------------------------------------------------------------
3262 // Allocation
3263
3264 void TemplateTable::_new() {
3265 transition(vtos, atos);
3266
3267 Label slow_case;
3268 Label done;
3269 Label initialize_header;
3270 Label initialize_object; // including clearing the fields
3271
3272 Register RallocatedObject = Otos_i;
3273 Register RinstanceKlass = O1;
3274 Register Roffset = O3;
3275 Register Rscratch = O4;
3276
3277 __ get_2_byte_integer_at_bcp(1, Rscratch, Roffset, InterpreterMacroAssembler::Unsigned);
3278 __ get_cpool_and_tags(Rscratch, G3_scratch);
3279 // make sure the class we're about to instantiate has been resolved
3280 // This is done before loading InstanceKlass to be consistent with the order
3281 // how Constant Pool is updated (see ConstantPool::klass_at_put)
3282 __ add(G3_scratch, Array<u1>::base_offset_in_bytes(), G3_scratch);
3283 __ ldub(G3_scratch, Roffset, G3_scratch);
3284 __ cmp(G3_scratch, JVM_CONSTANT_Class);
3285 __ br(Assembler::notEqual, false, Assembler::pn, slow_case);
3286 __ delayed()->sll(Roffset, LogBytesPerWord, Roffset);
3287 // get InstanceKlass
3288 //__ sll(Roffset, LogBytesPerWord, Roffset); // executed in delay slot
3289 __ add(Roffset, sizeof(ConstantPool), Roffset);
3290 __ ld_ptr(Rscratch, Roffset, RinstanceKlass);
3291
3292 // make sure klass is fully initialized:
3293 __ ldub(RinstanceKlass, in_bytes(InstanceKlass::init_state_offset()), G3_scratch);
3294 __ cmp(G3_scratch, InstanceKlass::fully_initialized);
3295 __ br(Assembler::notEqual, false, Assembler::pn, slow_case);
3296 __ delayed()->ld(RinstanceKlass, in_bytes(Klass::layout_helper_offset()), Roffset);
3297
3298 // get instance_size in InstanceKlass (already aligned)
3299 //__ ld(RinstanceKlass, in_bytes(Klass::layout_helper_offset()), Roffset);
3300
3301 // make sure klass does not have has_finalizer, or is abstract, or interface or java/lang/Class
3302 __ btst(Klass::_lh_instance_slow_path_bit, Roffset);
3303 __ br(Assembler::notZero, false, Assembler::pn, slow_case);
3304 __ delayed()->nop();
3305
3306 // allocate the instance
3307 // 1) Try to allocate in the TLAB
3308 // 2) if fail, and the TLAB is not full enough to discard, allocate in the shared Eden
3309 // 3) if the above fails (or is not applicable), go to a slow case
3310 // (creates a new TLAB, etc.)
3311
3312 const bool allow_shared_alloc =
3313 Universe::heap()->supports_inline_contig_alloc();
3314
3315 if(UseTLAB) {
3316 Register RoldTopValue = RallocatedObject;
3317 Register RtlabWasteLimitValue = G3_scratch;
3318 Register RnewTopValue = G1_scratch;
3319 Register RendValue = Rscratch;
3320 Register RfreeValue = RnewTopValue;
3321
3322 // check if we can allocate in the TLAB
3323 __ ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), RoldTopValue); // sets up RalocatedObject
3324 __ ld_ptr(G2_thread, in_bytes(JavaThread::tlab_end_offset()), RendValue);
3325 __ add(RoldTopValue, Roffset, RnewTopValue);
3326
3327 // if there is enough space, we do not CAS and do not clear
3328 __ cmp(RnewTopValue, RendValue);
3329 if(ZeroTLAB) {
3330 // the fields have already been cleared
3331 __ brx(Assembler::lessEqualUnsigned, true, Assembler::pt, initialize_header);
3332 } else {
3333 // initialize both the header and fields
3334 __ brx(Assembler::lessEqualUnsigned, true, Assembler::pt, initialize_object);
3335 }
3336 __ delayed()->st_ptr(RnewTopValue, G2_thread, in_bytes(JavaThread::tlab_top_offset()));
3337
3338 if (allow_shared_alloc) {
3339 // Check if tlab should be discarded (refill_waste_limit >= free)
3340 __ ld_ptr(G2_thread, in_bytes(JavaThread::tlab_refill_waste_limit_offset()), RtlabWasteLimitValue);
3341 __ sub(RendValue, RoldTopValue, RfreeValue);
3342 #ifdef _LP64
3343 __ srlx(RfreeValue, LogHeapWordSize, RfreeValue);
3344 #else
3345 __ srl(RfreeValue, LogHeapWordSize, RfreeValue);
3346 #endif
3347 __ cmp_and_brx_short(RtlabWasteLimitValue, RfreeValue, Assembler::greaterEqualUnsigned, Assembler::pt, slow_case); // tlab waste is small
3348
3349 // increment waste limit to prevent getting stuck on this slow path
3350 __ add(RtlabWasteLimitValue, ThreadLocalAllocBuffer::refill_waste_limit_increment(), RtlabWasteLimitValue);
3351 __ st_ptr(RtlabWasteLimitValue, G2_thread, in_bytes(JavaThread::tlab_refill_waste_limit_offset()));
3352 } else {
3353 // No allocation in the shared eden.
3354 __ ba_short(slow_case);
3355 }
3356 }
3357
3358 // Allocation in the shared Eden
3359 if (allow_shared_alloc) {
3360 Register RoldTopValue = G1_scratch;
3361 Register RtopAddr = G3_scratch;
3362 Register RnewTopValue = RallocatedObject;
3363 Register RendValue = Rscratch;
3364
3365 __ set((intptr_t)Universe::heap()->top_addr(), RtopAddr);
3366
3367 Label retry;
3368 __ bind(retry);
3369 __ set((intptr_t)Universe::heap()->end_addr(), RendValue);
3370 __ ld_ptr(RendValue, 0, RendValue);
3371 __ ld_ptr(RtopAddr, 0, RoldTopValue);
3372 __ add(RoldTopValue, Roffset, RnewTopValue);
3373
3374 // RnewTopValue contains the top address after the new object
3375 // has been allocated.
3376 __ cmp_and_brx_short(RnewTopValue, RendValue, Assembler::greaterUnsigned, Assembler::pn, slow_case);
3377
3378 __ cas_ptr(RtopAddr, RoldTopValue, RnewTopValue);
3379
3380 // if someone beat us on the allocation, try again, otherwise continue
3381 __ cmp_and_brx_short(RoldTopValue, RnewTopValue, Assembler::notEqual, Assembler::pn, retry);
3382
3383 // bump total bytes allocated by this thread
3384 // RoldTopValue and RtopAddr are dead, so can use G1 and G3
3385 __ incr_allocated_bytes(Roffset, G1_scratch, G3_scratch);
3386 }
3387
3388 if (UseTLAB || Universe::heap()->supports_inline_contig_alloc()) {
3389 // clear object fields
3390 __ bind(initialize_object);
3391 __ deccc(Roffset, sizeof(oopDesc));
3392 __ br(Assembler::zero, false, Assembler::pt, initialize_header);
3393 __ delayed()->add(RallocatedObject, sizeof(oopDesc), G3_scratch);
3394
3395 // initialize remaining object fields
3396 if (UseBlockZeroing) {
3397 // Use BIS for zeroing
3398 __ bis_zeroing(G3_scratch, Roffset, G1_scratch, initialize_header);
3399 } else {
3400 Label loop;
3401 __ subcc(Roffset, wordSize, Roffset);
3402 __ bind(loop);
3403 //__ subcc(Roffset, wordSize, Roffset); // executed above loop or in delay slot
3404 __ st_ptr(G0, G3_scratch, Roffset);
3405 __ br(Assembler::notEqual, false, Assembler::pt, loop);
3406 __ delayed()->subcc(Roffset, wordSize, Roffset);
3407 }
3408 __ ba_short(initialize_header);
3409 }
3410
3411 // slow case
3412 __ bind(slow_case);
3413 __ get_2_byte_integer_at_bcp(1, G3_scratch, O2, InterpreterMacroAssembler::Unsigned);
3414 __ get_constant_pool(O1);
3415
3416 call_VM(Otos_i, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), O1, O2);
3417
3418 __ ba_short(done);
3419
3420 // Initialize the header: mark, klass
3421 __ bind(initialize_header);
3422
3423 if (UseBiasedLocking) {
3424 __ ld_ptr(RinstanceKlass, in_bytes(Klass::prototype_header_offset()), G4_scratch);
3425 } else {
3426 __ set((intptr_t)markOopDesc::prototype(), G4_scratch);
3427 }
3428 __ st_ptr(G4_scratch, RallocatedObject, oopDesc::mark_offset_in_bytes()); // mark
3429 __ store_klass_gap(G0, RallocatedObject); // klass gap if compressed
3430 __ store_klass(RinstanceKlass, RallocatedObject); // klass (last for cms)
3431
3432 {
3433 SkipIfEqual skip_if(
3434 _masm, G4_scratch, &DTraceAllocProbes, Assembler::zero);
3435 // Trigger dtrace event
3436 __ push(atos);
3437 __ call_VM_leaf(noreg,
3438 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc), O0);
3439 __ pop(atos);
3440 }
3441
3442 // continue
3443 __ bind(done);
3444 }
3445
3446
3447
3448 void TemplateTable::newarray() {
3449 transition(itos, atos);
3450 __ ldub(Lbcp, 1, O1);
3451 call_VM(Otos_i, CAST_FROM_FN_PTR(address, InterpreterRuntime::newarray), O1, Otos_i);
3452 }
3453
3454
3455 void TemplateTable::anewarray() {
3456 transition(itos, atos);
3457 __ get_constant_pool(O1);
3458 __ get_2_byte_integer_at_bcp(1, G4_scratch, O2, InterpreterMacroAssembler::Unsigned);
3459 call_VM(Otos_i, CAST_FROM_FN_PTR(address, InterpreterRuntime::anewarray), O1, O2, Otos_i);
3460 }
3461
3462
3463 void TemplateTable::arraylength() {
3464 transition(atos, itos);
3465 Label ok;
3466 __ verify_oop(Otos_i);
3467 __ tst(Otos_i);
3468 __ throw_if_not_1_x( Assembler::notZero, ok );
3469 __ delayed()->ld(Otos_i, arrayOopDesc::length_offset_in_bytes(), Otos_i);
3470 __ throw_if_not_2( Interpreter::_throw_NullPointerException_entry, G3_scratch, ok);
3471 }
3472
3473
3474 void TemplateTable::checkcast() {
3475 transition(atos, atos);
3476 Label done, is_null, quicked, cast_ok, resolved;
3477 Register Roffset = G1_scratch;
3478 Register RobjKlass = O5;
3479 Register RspecifiedKlass = O4;
3480
3481 // Check for casting a NULL
3482 __ br_null_short(Otos_i, Assembler::pn, is_null);
3483
3484 // Get value klass in RobjKlass
3485 __ load_klass(Otos_i, RobjKlass); // get value klass
3486
3487 // Get constant pool tag
3488 __ get_2_byte_integer_at_bcp(1, Lscratch, Roffset, InterpreterMacroAssembler::Unsigned);
3489
3490 // See if the checkcast has been quickened
3491 __ get_cpool_and_tags(Lscratch, G3_scratch);
3492 __ add(G3_scratch, Array<u1>::base_offset_in_bytes(), G3_scratch);
3493 __ ldub(G3_scratch, Roffset, G3_scratch);
3494 __ cmp(G3_scratch, JVM_CONSTANT_Class);
3495 __ br(Assembler::equal, true, Assembler::pt, quicked);
3496 __ delayed()->sll(Roffset, LogBytesPerWord, Roffset);
3497
3498 __ push_ptr(); // save receiver for result, and for GC
3499 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc) );
3500 __ get_vm_result_2(RspecifiedKlass);
3501 __ pop_ptr(Otos_i, G3_scratch); // restore receiver
3502
3503 __ ba_short(resolved);
3504
3505 // Extract target class from constant pool
3506 __ bind(quicked);
3507 __ add(Roffset, sizeof(ConstantPool), Roffset);
3508 __ ld_ptr(Lscratch, Roffset, RspecifiedKlass);
3509 __ bind(resolved);
3510 __ load_klass(Otos_i, RobjKlass); // get value klass
3511
3512 // Generate a fast subtype check. Branch to cast_ok if no
3513 // failure. Throw exception if failure.
3514 __ gen_subtype_check( RobjKlass, RspecifiedKlass, G3_scratch, G4_scratch, G1_scratch, cast_ok );
3515
3516 // Not a subtype; so must throw exception
3517 __ throw_if_not_x( Assembler::never, Interpreter::_throw_ClassCastException_entry, G3_scratch );
3518
3519 __ bind(cast_ok);
3520
3521 if (ProfileInterpreter) {
3522 __ ba_short(done);
3523 }
3524 __ bind(is_null);
3525 __ profile_null_seen(G3_scratch);
3526 __ bind(done);
3527 }
3528
3529
3530 void TemplateTable::instanceof() {
3531 Label done, is_null, quicked, resolved;
3532 transition(atos, itos);
3533 Register Roffset = G1_scratch;
3534 Register RobjKlass = O5;
3535 Register RspecifiedKlass = O4;
3536
3537 // Check for casting a NULL
3538 __ br_null_short(Otos_i, Assembler::pt, is_null);
3539
3540 // Get value klass in RobjKlass
3541 __ load_klass(Otos_i, RobjKlass); // get value klass
3542
3543 // Get constant pool tag
3544 __ get_2_byte_integer_at_bcp(1, Lscratch, Roffset, InterpreterMacroAssembler::Unsigned);
3545
3546 // See if the checkcast has been quickened
3547 __ get_cpool_and_tags(Lscratch, G3_scratch);
3548 __ add(G3_scratch, Array<u1>::base_offset_in_bytes(), G3_scratch);
3549 __ ldub(G3_scratch, Roffset, G3_scratch);
3550 __ cmp(G3_scratch, JVM_CONSTANT_Class);
3551 __ br(Assembler::equal, true, Assembler::pt, quicked);
3552 __ delayed()->sll(Roffset, LogBytesPerWord, Roffset);
3553
3554 __ push_ptr(); // save receiver for result, and for GC
3555 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc) );
3556 __ get_vm_result_2(RspecifiedKlass);
3557 __ pop_ptr(Otos_i, G3_scratch); // restore receiver
3558
3559 __ ba_short(resolved);
3560
3561 // Extract target class from constant pool
3562 __ bind(quicked);
3563 __ add(Roffset, sizeof(ConstantPool), Roffset);
3564 __ get_constant_pool(Lscratch);
3565 __ ld_ptr(Lscratch, Roffset, RspecifiedKlass);
3566 __ bind(resolved);
3567 __ load_klass(Otos_i, RobjKlass); // get value klass
3568
3569 // Generate a fast subtype check. Branch to cast_ok if no
3570 // failure. Return 0 if failure.
3571 __ or3(G0, 1, Otos_i); // set result assuming quick tests succeed
3572 __ gen_subtype_check( RobjKlass, RspecifiedKlass, G3_scratch, G4_scratch, G1_scratch, done );
3573 // Not a subtype; return 0;
3574 __ clr( Otos_i );
3575
3576 if (ProfileInterpreter) {
3577 __ ba_short(done);
3578 }
3579 __ bind(is_null);
3580 __ profile_null_seen(G3_scratch);
3581 __ bind(done);
3582 }
3583
3584 void TemplateTable::_breakpoint() {
3585
3586 // Note: We get here even if we are single stepping..
3587 // jbug inists on setting breakpoints at every bytecode
3588 // even if we are in single step mode.
3589
3590 transition(vtos, vtos);
3591 // get the unpatched byte code
3592 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::get_original_bytecode_at), Lmethod, Lbcp);
3593 __ mov(O0, Lbyte_code);
3594
3595 // post the breakpoint event
3596 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::_breakpoint), Lmethod, Lbcp);
3597
3598 // complete the execution of original bytecode
3599 __ dispatch_normal(vtos);
3600 }
3601
3602
3603 //----------------------------------------------------------------------------------------------------
3604 // Exceptions
3605
3606 void TemplateTable::athrow() {
3607 transition(atos, vtos);
3608
3609 // This works because exception is cached in Otos_i which is same as O0,
3610 // which is same as what throw_exception_entry_expects
3611 assert(Otos_i == Oexception, "see explanation above");
3612
3613 __ verify_oop(Otos_i);
3614 __ null_check(Otos_i);
3615 __ throw_if_not_x(Assembler::never, Interpreter::throw_exception_entry(), G3_scratch);
3616 }
3617
3618
3619 //----------------------------------------------------------------------------------------------------
3620 // Synchronization
3621
3622
3623 // See frame_sparc.hpp for monitor block layout.
3624 // Monitor elements are dynamically allocated by growing stack as needed.
3625
3626 void TemplateTable::monitorenter() {
3627 transition(atos, vtos);
3628 __ verify_oop(Otos_i);
3629 // Try to acquire a lock on the object
3630 // Repeat until succeeded (i.e., until
3631 // monitorenter returns true).
3632
3633 { Label ok;
3634 __ tst(Otos_i);
3635 __ throw_if_not_1_x( Assembler::notZero, ok);
3636 __ delayed()->mov(Otos_i, Lscratch); // save obj
3637 __ throw_if_not_2( Interpreter::_throw_NullPointerException_entry, G3_scratch, ok);
3638 }
3639
3640 assert(O0 == Otos_i, "Be sure where the object to lock is");
3641
3642 // find a free slot in the monitor block
3643
3644
3645 // initialize entry pointer
3646 __ clr(O1); // points to free slot or NULL
3647
3648 {
3649 Label entry, loop, exit;
3650 __ add( __ top_most_monitor(), O2 ); // last one to check
3651 __ ba( entry );
3652 __ delayed()->mov( Lmonitors, O3 ); // first one to check
3653
3654
3655 __ bind( loop );
3656
3657 __ verify_oop(O4); // verify each monitor's oop
3658 __ tst(O4); // is this entry unused?
3659 __ movcc( Assembler::zero, false, Assembler::ptr_cc, O3, O1);
3660
3661 __ cmp(O4, O0); // check if current entry is for same object
3662 __ brx( Assembler::equal, false, Assembler::pn, exit );
3663 __ delayed()->inc( O3, frame::interpreter_frame_monitor_size() * wordSize ); // check next one
3664
3665 __ bind( entry );
3666
3667 __ cmp( O3, O2 );
3668 __ brx( Assembler::lessEqualUnsigned, true, Assembler::pt, loop );
3669 __ delayed()->ld_ptr(O3, BasicObjectLock::obj_offset_in_bytes(), O4);
3670
3671 __ bind( exit );
3672 }
3673
3674 { Label allocated;
3675
3676 // found free slot?
3677 __ br_notnull_short(O1, Assembler::pn, allocated);
3678
3679 __ add_monitor_to_stack( false, O2, O3 );
3680 __ mov(Lmonitors, O1);
3681
3682 __ bind(allocated);
3683 }
3684
3685 // Increment bcp to point to the next bytecode, so exception handling for async. exceptions work correctly.
3686 // The object has already been poped from the stack, so the expression stack looks correct.
3687 __ inc(Lbcp);
3688
3689 __ st_ptr(O0, O1, BasicObjectLock::obj_offset_in_bytes()); // store object
3690 __ lock_object(O1, O0);
3691
3692 // check if there's enough space on the stack for the monitors after locking
3693 __ generate_stack_overflow_check(0);
3694
3695 // The bcp has already been incremented. Just need to dispatch to next instruction.
3696 __ dispatch_next(vtos);
3697 }
3698
3699
3700 void TemplateTable::monitorexit() {
3701 transition(atos, vtos);
3702 __ verify_oop(Otos_i);
3703 __ tst(Otos_i);
3704 __ throw_if_not_x( Assembler::notZero, Interpreter::_throw_NullPointerException_entry, G3_scratch );
3705
3706 assert(O0 == Otos_i, "just checking");
3707
3708 { Label entry, loop, found;
3709 __ add( __ top_most_monitor(), O2 ); // last one to check
3710 __ ba(entry);
3711 // use Lscratch to hold monitor elem to check, start with most recent monitor,
3712 // By using a local it survives the call to the C routine.
3713 __ delayed()->mov( Lmonitors, Lscratch );
3714
3715 __ bind( loop );
3716
3717 __ verify_oop(O4); // verify each monitor's oop
3718 __ cmp(O4, O0); // check if current entry is for desired object
3719 __ brx( Assembler::equal, true, Assembler::pt, found );
3720 __ delayed()->mov(Lscratch, O1); // pass found entry as argument to monitorexit
3721
3722 __ inc( Lscratch, frame::interpreter_frame_monitor_size() * wordSize ); // advance to next
3723
3724 __ bind( entry );
3725
3726 __ cmp( Lscratch, O2 );
3727 __ brx( Assembler::lessEqualUnsigned, true, Assembler::pt, loop );
3728 __ delayed()->ld_ptr(Lscratch, BasicObjectLock::obj_offset_in_bytes(), O4);
3729
3730 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception));
3731 __ should_not_reach_here();
3732
3733 __ bind(found);
3734 }
3735 __ unlock_object(O1);
3736 }
3737
3738
3739 //----------------------------------------------------------------------------------------------------
3740 // Wide instructions
3741
3742 void TemplateTable::wide() {
3743 transition(vtos, vtos);
3744 __ ldub(Lbcp, 1, G3_scratch);// get next bc
3745 __ sll(G3_scratch, LogBytesPerWord, G3_scratch);
3746 AddressLiteral ep(Interpreter::_wentry_point);
3747 __ set(ep, G4_scratch);
3748 __ ld_ptr(G4_scratch, G3_scratch, G3_scratch);
3749 __ jmp(G3_scratch, G0);
3750 __ delayed()->nop();
3751 // Note: the Lbcp increment step is part of the individual wide bytecode implementations
3752 }
3753
3754
3755 //----------------------------------------------------------------------------------------------------
3756 // Multi arrays
3757
3758 void TemplateTable::multianewarray() {
3759 transition(vtos, atos);
3760 // put ndims * wordSize into Lscratch
3761 __ ldub( Lbcp, 3, Lscratch);
3762 __ sll( Lscratch, Interpreter::logStackElementSize, Lscratch);
3763 // Lesp points past last_dim, so set to O1 to first_dim address
3764 __ add( Lesp, Lscratch, O1);
3765 call_VM(Otos_i, CAST_FROM_FN_PTR(address, InterpreterRuntime::multianewarray), O1);
3766 __ add( Lesp, Lscratch, Lesp); // pop all dimensions off the stack
3767 }
3768 #endif /* !CC_INTERP */
--- EOF ---