153 static void do_oop_load(InterpreterMacroAssembler* _masm,
154 Address src,
155 Register dst,
156 DecoratorSet decorators) {
157 __ load_heap_oop(dst, src, r10, r1, decorators);
158 }
159
160 Address TemplateTable::at_bcp(int offset) {
161 assert(_desc->uses_bcp(), "inconsistent uses_bcp information");
162 return Address(rbcp, offset);
163 }
164
165 void TemplateTable::patch_bytecode(Bytecodes::Code bc, Register bc_reg,
166 Register temp_reg, bool load_bc_into_bc_reg/*=true*/,
167 int byte_no)
168 {
169 if (!RewriteBytecodes) return;
170 Label L_patch_done;
171
172 switch (bc) {
173 case Bytecodes::_fast_aputfield:
174 case Bytecodes::_fast_bputfield:
175 case Bytecodes::_fast_zputfield:
176 case Bytecodes::_fast_cputfield:
177 case Bytecodes::_fast_dputfield:
178 case Bytecodes::_fast_fputfield:
179 case Bytecodes::_fast_iputfield:
180 case Bytecodes::_fast_lputfield:
181 case Bytecodes::_fast_sputfield:
182 {
183 // We skip bytecode quickening for putfield instructions when
184 // the put_code written to the constant pool cache is zero.
185 // This is required so that every execution of this instruction
186 // calls out to InterpreterRuntime::resolve_get_put to do
187 // additional, required work.
188 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
189 assert(load_bc_into_bc_reg, "we use bc_reg as temp");
190 __ get_cache_and_index_and_bytecode_at_bcp(temp_reg, bc_reg, temp_reg, byte_no, 1);
191 __ movw(bc_reg, bc);
192 __ cbzw(temp_reg, L_patch_done); // don't patch
791 void TemplateTable::daload()
792 {
793 transition(itos, dtos);
794 __ mov(r1, r0);
795 __ pop_ptr(r0);
796 // r0: array
797 // r1: index
798 index_check(r0, r1); // leaves index in r1, kills rscratch1
799 __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_DOUBLE) >> 3);
800 __ access_load_at(T_DOUBLE, IN_HEAP | IS_ARRAY, r0, Address(r0, r1, Address::uxtw(3)), noreg, noreg);
801 }
802
803 void TemplateTable::aaload()
804 {
805 transition(itos, atos);
806 __ mov(r1, r0);
807 __ pop_ptr(r0);
808 // r0: array
809 // r1: index
810 index_check(r0, r1); // leaves index in r1, kills rscratch1
811 __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_OBJECT) >> LogBytesPerHeapOop);
812 do_oop_load(_masm,
813 Address(r0, r1, Address::uxtw(LogBytesPerHeapOop)),
814 r0,
815 IS_ARRAY);
816 }
817
818 void TemplateTable::baload()
819 {
820 transition(itos, itos);
821 __ mov(r1, r0);
822 __ pop_ptr(r0);
823 // r0: array
824 // r1: index
825 index_check(r0, r1); // leaves index in r1, kills rscratch1
826 __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_BYTE) >> 0);
827 __ access_load_at(T_BYTE, IN_HEAP | IS_ARRAY, r0, Address(r0, r1, Address::uxtw(0)), noreg, noreg);
828 }
829
830 void TemplateTable::caload()
831 {
832 transition(itos, itos);
833 __ mov(r1, r0);
834 __ pop_ptr(r0);
835 // r0: array
1092 __ pop_ptr(r3);
1093 // v0: value
1094 // r1: index
1095 // r3: array
1096 index_check(r3, r1); // prefer index in r1
1097 __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_DOUBLE) >> 3);
1098 __ access_store_at(T_DOUBLE, IN_HEAP | IS_ARRAY, Address(r3, r1, Address::uxtw(3)), noreg /* dtos */, noreg, noreg);
1099 }
1100
1101 void TemplateTable::aastore() {
1102 Label is_null, ok_is_subtype, done;
1103 transition(vtos, vtos);
1104 // stack: ..., array, index, value
1105 __ ldr(r0, at_tos()); // value
1106 __ ldr(r2, at_tos_p1()); // index
1107 __ ldr(r3, at_tos_p2()); // array
1108
1109 Address element_address(r3, r4, Address::uxtw(LogBytesPerHeapOop));
1110
1111 index_check(r3, r2); // kills r1
1112 __ add(r4, r2, arrayOopDesc::base_offset_in_bytes(T_OBJECT) >> LogBytesPerHeapOop);
1113
1114 // do array store check - check for NULL value first
1115 __ cbz(r0, is_null);
1116
1117 // Move subklass into r1
1118 __ load_klass(r1, r0);
1119 // Move superklass into r0
1120 __ load_klass(r0, r3);
1121 __ ldr(r0, Address(r0,
1122 ObjArrayKlass::element_klass_offset()));
1123 // Compress array + index*oopSize + 12 into a single register. Frees r2.
1124
1125 // Generate subtype check. Blows r2, r5
1126 // Superklass in r0. Subklass in r1.
1127 __ gen_subtype_check(r1, ok_is_subtype);
1128
1129 // Come here on failure
1130 // object is at TOS
1131 __ b(Interpreter::_throw_ArrayStoreException_entry);
1132
1133 // Come here on success
1134 __ bind(ok_is_subtype);
1135
1136 // Get the value we will store
1137 __ ldr(r0, at_tos());
1138 // Now store using the appropriate barrier
1139 do_oop_store(_masm, element_address, r0, IS_ARRAY);
1140 __ b(done);
1141
1142 // Have a NULL in r0, r3=array, r2=index. Store NULL at ary[idx]
1143 __ bind(is_null);
1144 __ profile_null_seen(r2);
1145
1146 // Store a NULL
1147 do_oop_store(_masm, element_address, noreg, IS_ARRAY);
1148
1149 // Pop stack arguments
1150 __ bind(done);
1151 __ add(esp, esp, 3 * Interpreter::stackElementSize);
1152 }
1153
1154 void TemplateTable::bastore()
1155 {
1156 transition(itos, vtos);
1157 __ pop_i(r1);
1158 __ pop_ptr(r3);
1159 // r0: value
1160 // r1: index
1161 // r3: array
1162 index_check(r3, r1); // prefer index in r1
1163
1164 // Need to check whether array is boolean or byte
1165 // since both types share the bastore bytecode.
1166 __ load_klass(r2, r3);
1167 __ ldrw(r2, Address(r2, Klass::layout_helper_offset()));
2008 }
2009
2010 void TemplateTable::if_nullcmp(Condition cc)
2011 {
2012 transition(atos, vtos);
2013 // assume branch is more often taken than not (loops use backward branches)
2014 Label not_taken;
2015 if (cc == equal)
2016 __ cbnz(r0, not_taken);
2017 else
2018 __ cbz(r0, not_taken);
2019 branch(false, false);
2020 __ bind(not_taken);
2021 __ profile_not_taken_branch(r0);
2022 }
2023
2024 void TemplateTable::if_acmp(Condition cc)
2025 {
2026 transition(atos, vtos);
2027 // assume branch is more often taken than not (loops use backward branches)
2028 Label not_taken;
2029 __ pop_ptr(r1);
2030 __ cmpoop(r1, r0);
2031 __ br(j_not(cc), not_taken);
2032 branch(false, false);
2033 __ bind(not_taken);
2034 __ profile_not_taken_branch(r0);
2035 }
2036
2037 void TemplateTable::ret() {
2038 transition(vtos, vtos);
2039 // We might be moving to a safepoint. The thread which calls
2040 // Interpreter::notice_safepoints() will effectively flush its cache
2041 // when it makes a system call, but we need to do something to
2042 // ensure that we see the changed dispatch table.
2043 __ membar(MacroAssembler::LoadLoad);
2044
2045 locals_index(r1);
2046 __ ldr(r1, aaddress(r1)); // get return bci, compute return bcp
2047 __ profile_ret(r1, r2);
2048 __ ldr(rbcp, Address(rmethod, Method::const_offset()));
2049 __ lea(rbcp, Address(rbcp, r1));
2050 __ add(rbcp, rbcp, in_bytes(ConstMethod::codes_offset()));
2051 __ dispatch_next(vtos, 0, /*generate_poll*/true);
2480 // 8179954: We need to make sure that the code generated for
2481 // volatile accesses forms a sequentially-consistent set of
2482 // operations when combined with STLR and LDAR. Without a leading
2483 // membar it's possible for a simple Dekker test to fail if loads
2484 // use LDR;DMB but stores use STLR. This can happen if C2 compiles
2485 // the stores in one method and we interpret the loads in another.
2486 if (! UseBarriersForVolatile) {
2487 Label notVolatile;
2488 __ tbz(raw_flags, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
2489 __ membar(MacroAssembler::AnyAny);
2490 __ bind(notVolatile);
2491 }
2492
2493 const Address field(obj, off);
2494
2495 Label Done, notByte, notBool, notInt, notShort, notChar,
2496 notLong, notFloat, notObj, notDouble;
2497
2498 // x86 uses a shift and mask or wings it with a shift plus assert
2499 // the mask is not needed. aarch64 just uses bitfield extract
2500 __ ubfxw(flags, raw_flags, ConstantPoolCacheEntry::tos_state_shift,
2501 ConstantPoolCacheEntry::tos_state_bits);
2502
2503 assert(btos == 0, "change code, btos != 0");
2504 __ cbnz(flags, notByte);
2505
2506 // Don't rewrite getstatic, only getfield
2507 if (is_static) rc = may_not_rewrite;
2508
2509 // btos
2510 __ access_load_at(T_BYTE, IN_HEAP, r0, field, noreg, noreg);
2511 __ push(btos);
2512 // Rewrite bytecode to be faster
2513 if (rc == may_rewrite) {
2514 patch_bytecode(Bytecodes::_fast_bgetfield, bc, r1);
2515 }
2516 __ b(Done);
2517
2518 __ bind(notByte);
2519 __ cmp(flags, (u1)ztos);
2520 __ br(Assembler::NE, notBool);
2521
2522 // ztos (same code as btos)
2523 __ access_load_at(T_BOOLEAN, IN_HEAP, r0, field, noreg, noreg);
2524 __ push(ztos);
2525 // Rewrite bytecode to be faster
2526 if (rc == may_rewrite) {
2527 // use btos rewriting, no truncating to t/f bit is needed for getfield.
2528 patch_bytecode(Bytecodes::_fast_bgetfield, bc, r1);
2529 }
2530 __ b(Done);
2531
2532 __ bind(notBool);
2533 __ cmp(flags, (u1)atos);
2534 __ br(Assembler::NE, notObj);
2535 // atos
2536 do_oop_load(_masm, field, r0, IN_HEAP);
2537 __ push(atos);
2538 if (rc == may_rewrite) {
2539 patch_bytecode(Bytecodes::_fast_agetfield, bc, r1);
2540 }
2541 __ b(Done);
2542
2543 __ bind(notObj);
2544 __ cmp(flags, (u1)itos);
2545 __ br(Assembler::NE, notInt);
2546 // itos
2547 __ access_load_at(T_INT, IN_HEAP, r0, field, noreg, noreg);
2548 __ push(itos);
2549 // Rewrite bytecode to be faster
2550 if (rc == may_rewrite) {
2551 patch_bytecode(Bytecodes::_fast_igetfield, bc, r1);
2552 }
2553 __ b(Done);
2554
2555 __ bind(notInt);
2556 __ cmp(flags, (u1)ctos);
2557 __ br(Assembler::NE, notChar);
2558 // ctos
2559 __ access_load_at(T_CHAR, IN_HEAP, r0, field, noreg, noreg);
2560 __ push(ctos);
2561 // Rewrite bytecode to be faster
2691 // c_rarg1: object pointer set up above (NULL if static)
2692 // c_rarg2: cache entry pointer
2693 // c_rarg3: jvalue object on the stack
2694 __ call_VM(noreg,
2695 CAST_FROM_FN_PTR(address,
2696 InterpreterRuntime::post_field_modification),
2697 c_rarg1, c_rarg2, c_rarg3);
2698 __ get_cache_and_index_at_bcp(cache, index, 1);
2699 __ bind(L1);
2700 }
2701 }
2702
2703 void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteControl rc) {
2704 transition(vtos, vtos);
2705
2706 const Register cache = r2;
2707 const Register index = r3;
2708 const Register obj = r2;
2709 const Register off = r19;
2710 const Register flags = r0;
2711 const Register bc = r4;
2712
2713 resolve_cache_and_index(byte_no, cache, index, sizeof(u2));
2714 jvmti_post_field_mod(cache, index, is_static);
2715 load_field_cp_cache_entry(obj, cache, index, off, flags, is_static);
2716
2717 Label Done;
2718 __ mov(r5, flags);
2719
2720 {
2721 Label notVolatile;
2722 __ tbz(r5, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
2723 __ membar(MacroAssembler::StoreStore);
2724 __ bind(notVolatile);
2725 }
2726
2727 // field address
2728 const Address field(obj, off);
2729
2730 Label notByte, notBool, notInt, notShort, notChar,
2731 notLong, notFloat, notObj, notDouble;
2732
2733 // x86 uses a shift and mask or wings it with a shift plus assert
2734 // the mask is not needed. aarch64 just uses bitfield extract
2735 __ ubfxw(flags, flags, ConstantPoolCacheEntry::tos_state_shift, ConstantPoolCacheEntry::tos_state_bits);
2736
2737 assert(btos == 0, "change code, btos != 0");
2738 __ cbnz(flags, notByte);
2739
2740 // Don't rewrite putstatic, only putfield
2741 if (is_static) rc = may_not_rewrite;
2742
2743 // btos
2744 {
2745 __ pop(btos);
2746 if (!is_static) pop_and_check_object(obj);
2747 __ access_store_at(T_BYTE, IN_HEAP, field, r0, noreg, noreg);
2748 if (rc == may_rewrite) {
2749 patch_bytecode(Bytecodes::_fast_bputfield, bc, r1, true, byte_no);
2750 }
2751 __ b(Done);
2752 }
2755 __ cmp(flags, (u1)ztos);
2756 __ br(Assembler::NE, notBool);
2757
2758 // ztos
2759 {
2760 __ pop(ztos);
2761 if (!is_static) pop_and_check_object(obj);
2762 __ access_store_at(T_BOOLEAN, IN_HEAP, field, r0, noreg, noreg);
2763 if (rc == may_rewrite) {
2764 patch_bytecode(Bytecodes::_fast_zputfield, bc, r1, true, byte_no);
2765 }
2766 __ b(Done);
2767 }
2768
2769 __ bind(notBool);
2770 __ cmp(flags, (u1)atos);
2771 __ br(Assembler::NE, notObj);
2772
2773 // atos
2774 {
2775 __ pop(atos);
2776 if (!is_static) pop_and_check_object(obj);
2777 // Store into the field
2778 do_oop_store(_masm, field, r0, IN_HEAP);
2779 if (rc == may_rewrite) {
2780 patch_bytecode(Bytecodes::_fast_aputfield, bc, r1, true, byte_no);
2781 }
2782 __ b(Done);
2783 }
2784
2785 __ bind(notObj);
2786 __ cmp(flags, (u1)itos);
2787 __ br(Assembler::NE, notInt);
2788
2789 // itos
2790 {
2791 __ pop(itos);
2792 if (!is_static) pop_and_check_object(obj);
2793 __ access_store_at(T_INT, IN_HEAP, field, r0, noreg, noreg);
2794 if (rc == may_rewrite) {
2795 patch_bytecode(Bytecodes::_fast_iputfield, bc, r1, true, byte_no);
2796 }
2797 __ b(Done);
2798 }
2799
2800 __ bind(notInt);
2801 __ cmp(flags, (u1)ctos);
2802 __ br(Assembler::NE, notChar);
2902 void TemplateTable::putstatic(int byte_no) {
2903 putfield_or_static(byte_no, true);
2904 }
2905
2906 void TemplateTable::jvmti_post_fast_field_mod()
2907 {
2908 if (JvmtiExport::can_post_field_modification()) {
2909 // Check to see if a field modification watch has been set before
2910 // we take the time to call into the VM.
2911 Label L2;
2912 __ lea(rscratch1, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr()));
2913 __ ldrw(c_rarg3, Address(rscratch1));
2914 __ cbzw(c_rarg3, L2);
2915 __ pop_ptr(r19); // copy the object pointer from tos
2916 __ verify_oop(r19);
2917 __ push_ptr(r19); // put the object pointer back on tos
2918 // Save tos values before call_VM() clobbers them. Since we have
2919 // to do it for every data type, we use the saved values as the
2920 // jvalue object.
2921 switch (bytecode()) { // load values into the jvalue object
2922 case Bytecodes::_fast_aputfield: __ push_ptr(r0); break;
2923 case Bytecodes::_fast_bputfield: // fall through
2924 case Bytecodes::_fast_zputfield: // fall through
2925 case Bytecodes::_fast_sputfield: // fall through
2926 case Bytecodes::_fast_cputfield: // fall through
2927 case Bytecodes::_fast_iputfield: __ push_i(r0); break;
2928 case Bytecodes::_fast_dputfield: __ push_d(); break;
2929 case Bytecodes::_fast_fputfield: __ push_f(); break;
2930 case Bytecodes::_fast_lputfield: __ push_l(r0); break;
2931
2932 default:
2933 ShouldNotReachHere();
2934 }
2935 __ mov(c_rarg3, esp); // points to jvalue on the stack
2936 // access constant pool cache entry
2937 __ get_cache_entry_pointer_at_bcp(c_rarg2, r0, 1);
2938 __ verify_oop(r19);
2939 // r19: object pointer copied above
2940 // c_rarg2: cache entry pointer
2941 // c_rarg3: jvalue object on the stack
2942 __ call_VM(noreg,
2943 CAST_FROM_FN_PTR(address,
2944 InterpreterRuntime::post_field_modification),
2945 r19, c_rarg2, c_rarg3);
2946
2947 switch (bytecode()) { // restore tos values
2948 case Bytecodes::_fast_aputfield: __ pop_ptr(r0); break;
2949 case Bytecodes::_fast_bputfield: // fall through
2950 case Bytecodes::_fast_zputfield: // fall through
2951 case Bytecodes::_fast_sputfield: // fall through
2952 case Bytecodes::_fast_cputfield: // fall through
2953 case Bytecodes::_fast_iputfield: __ pop_i(r0); break;
2954 case Bytecodes::_fast_dputfield: __ pop_d(); break;
2955 case Bytecodes::_fast_fputfield: __ pop_f(); break;
2956 case Bytecodes::_fast_lputfield: __ pop_l(r0); break;
2957 default: break;
2958 }
2959 __ bind(L2);
2960 }
2961 }
2962
2963 void TemplateTable::fast_storefield(TosState state)
2964 {
2965 transition(state, vtos);
2966
2967 ByteSize base = ConstantPoolCache::base_offset();
2978 // replace index with field offset from cache entry
2979 __ ldr(r1, Address(r2, in_bytes(base + ConstantPoolCacheEntry::f2_offset())));
2980
2981 {
2982 Label notVolatile;
2983 __ tbz(r3, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
2984 __ membar(MacroAssembler::StoreStore);
2985 __ bind(notVolatile);
2986 }
2987
2988 Label notVolatile;
2989
2990 // Get object from stack
2991 pop_and_check_object(r2);
2992
2993 // field address
2994 const Address field(r2, r1);
2995
2996 // access field
2997 switch (bytecode()) {
2998 case Bytecodes::_fast_aputfield:
2999 do_oop_store(_masm, field, r0, IN_HEAP);
3000 break;
3001 case Bytecodes::_fast_lputfield:
3002 __ access_store_at(T_LONG, IN_HEAP, field, r0, noreg, noreg);
3003 break;
3004 case Bytecodes::_fast_iputfield:
3005 __ access_store_at(T_INT, IN_HEAP, field, r0, noreg, noreg);
3006 break;
3007 case Bytecodes::_fast_zputfield:
3008 __ access_store_at(T_BOOLEAN, IN_HEAP, field, r0, noreg, noreg);
3009 break;
3010 case Bytecodes::_fast_bputfield:
3011 __ access_store_at(T_BYTE, IN_HEAP, field, r0, noreg, noreg);
3012 break;
3013 case Bytecodes::_fast_sputfield:
3014 __ access_store_at(T_SHORT, IN_HEAP, field, r0, noreg, noreg);
3015 break;
3016 case Bytecodes::_fast_cputfield:
3017 __ access_store_at(T_CHAR, IN_HEAP, field, r0, noreg, noreg);
3071 // r0: object
3072 __ verify_oop(r0);
3073 __ null_check(r0);
3074 const Address field(r0, r1);
3075
3076 // 8179954: We need to make sure that the code generated for
3077 // volatile accesses forms a sequentially-consistent set of
3078 // operations when combined with STLR and LDAR. Without a leading
3079 // membar it's possible for a simple Dekker test to fail if loads
3080 // use LDR;DMB but stores use STLR. This can happen if C2 compiles
3081 // the stores in one method and we interpret the loads in another.
3082 if (! UseBarriersForVolatile) {
3083 Label notVolatile;
3084 __ tbz(r3, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
3085 __ membar(MacroAssembler::AnyAny);
3086 __ bind(notVolatile);
3087 }
3088
3089 // access field
3090 switch (bytecode()) {
3091 case Bytecodes::_fast_agetfield:
3092 do_oop_load(_masm, field, r0, IN_HEAP);
3093 __ verify_oop(r0);
3094 break;
3095 case Bytecodes::_fast_lgetfield:
3096 __ access_load_at(T_LONG, IN_HEAP, r0, field, noreg, noreg);
3097 break;
3098 case Bytecodes::_fast_igetfield:
3099 __ access_load_at(T_INT, IN_HEAP, r0, field, noreg, noreg);
3100 break;
3101 case Bytecodes::_fast_bgetfield:
3102 __ access_load_at(T_BYTE, IN_HEAP, r0, field, noreg, noreg);
3103 break;
3104 case Bytecodes::_fast_sgetfield:
3105 __ access_load_at(T_SHORT, IN_HEAP, r0, field, noreg, noreg);
3106 break;
3107 case Bytecodes::_fast_cgetfield:
3108 __ access_load_at(T_CHAR, IN_HEAP, r0, field, noreg, noreg);
3109 break;
3110 case Bytecodes::_fast_fgetfield:
3628 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc), r0);
3629 __ pop(atos); // restore the return value
3630
3631 }
3632 __ b(done);
3633 }
3634
3635 // slow case
3636 __ bind(slow_case);
3637 __ get_constant_pool(c_rarg1);
3638 __ get_unsigned_2_byte_index_at_bcp(c_rarg2, 1);
3639 call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), c_rarg1, c_rarg2);
3640 __ verify_oop(r0);
3641
3642 // continue
3643 __ bind(done);
3644 // Must prevent reordering of stores for object initialization with stores that publish the new object.
3645 __ membar(Assembler::StoreStore);
3646 }
3647
3648 void TemplateTable::newarray() {
3649 transition(itos, atos);
3650 __ load_unsigned_byte(c_rarg1, at_bcp(1));
3651 __ mov(c_rarg2, r0);
3652 call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::newarray),
3653 c_rarg1, c_rarg2);
3654 // Must prevent reordering of stores for object initialization with stores that publish the new object.
3655 __ membar(Assembler::StoreStore);
3656 }
3657
3658 void TemplateTable::anewarray() {
3659 transition(itos, atos);
3660 __ get_unsigned_2_byte_index_at_bcp(c_rarg2, 1);
3661 __ get_constant_pool(c_rarg1);
3662 __ mov(c_rarg3, r0);
3663 call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::anewarray),
3664 c_rarg1, c_rarg2, c_rarg3);
3665 // Must prevent reordering of stores for object initialization with stores that publish the new object.
3666 __ membar(Assembler::StoreStore);
3667 }
3699 __ bind(quicked);
3700 __ mov(r3, r0); // Save object in r3; r0 needed for subtype check
3701 __ load_resolved_klass_at_offset(r2, r19, r0, rscratch1); // r0 = klass
3702
3703 __ bind(resolved);
3704 __ load_klass(r19, r3);
3705
3706 // Generate subtype check. Blows r2, r5. Object in r3.
3707 // Superklass in r0. Subklass in r19.
3708 __ gen_subtype_check(r19, ok_is_subtype);
3709
3710 // Come here on failure
3711 __ push(r3);
3712 // object is at TOS
3713 __ b(Interpreter::_throw_ClassCastException_entry);
3714
3715 // Come here on success
3716 __ bind(ok_is_subtype);
3717 __ mov(r0, r3); // Restore object in r3
3718
3719 // Collect counts on whether this test sees NULLs a lot or not.
3720 if (ProfileInterpreter) {
3721 __ b(done);
3722 __ bind(is_null);
3723 __ profile_null_seen(r2);
3724 } else {
3725 __ bind(is_null); // same as 'done'
3726 }
3727 __ bind(done);
3728 }
3729
3730 void TemplateTable::instanceof() {
3731 transition(atos, itos);
3732 Label done, is_null, ok_is_subtype, quicked, resolved;
3733 __ cbz(r0, is_null);
3734
3735 // Get cpool & tags index
3736 __ get_cpool_and_tags(r2, r3); // r2=cpool, r3=tags array
3737 __ get_unsigned_2_byte_index_at_bcp(r19, 1); // r19=index
3738 // See if bytecode has already been quicked
3739 __ add(rscratch1, r3, Array<u1>::base_offset_in_bytes());
3740 __ lea(r1, Address(rscratch1, r19));
3741 __ ldarb(r1, r1);
3742 __ cmp(r1, (u1)JVM_CONSTANT_Class);
3743 __ br(Assembler::EQ, quicked);
3744
3745 __ push(atos); // save receiver for result, and for GC
3746 call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc));
|
153 static void do_oop_load(InterpreterMacroAssembler* _masm,
154 Address src,
155 Register dst,
156 DecoratorSet decorators) {
157 __ load_heap_oop(dst, src, r10, r1, decorators);
158 }
159
160 Address TemplateTable::at_bcp(int offset) {
161 assert(_desc->uses_bcp(), "inconsistent uses_bcp information");
162 return Address(rbcp, offset);
163 }
164
165 void TemplateTable::patch_bytecode(Bytecodes::Code bc, Register bc_reg,
166 Register temp_reg, bool load_bc_into_bc_reg/*=true*/,
167 int byte_no)
168 {
169 if (!RewriteBytecodes) return;
170 Label L_patch_done;
171
172 switch (bc) {
173 case Bytecodes::_fast_qputfield:
174 case Bytecodes::_fast_aputfield:
175 case Bytecodes::_fast_bputfield:
176 case Bytecodes::_fast_zputfield:
177 case Bytecodes::_fast_cputfield:
178 case Bytecodes::_fast_dputfield:
179 case Bytecodes::_fast_fputfield:
180 case Bytecodes::_fast_iputfield:
181 case Bytecodes::_fast_lputfield:
182 case Bytecodes::_fast_sputfield:
183 {
184 // We skip bytecode quickening for putfield instructions when
185 // the put_code written to the constant pool cache is zero.
186 // This is required so that every execution of this instruction
187 // calls out to InterpreterRuntime::resolve_get_put to do
188 // additional, required work.
189 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
190 assert(load_bc_into_bc_reg, "we use bc_reg as temp");
191 __ get_cache_and_index_and_bytecode_at_bcp(temp_reg, bc_reg, temp_reg, byte_no, 1);
192 __ movw(bc_reg, bc);
193 __ cbzw(temp_reg, L_patch_done); // don't patch
792 void TemplateTable::daload()
793 {
794 transition(itos, dtos);
795 __ mov(r1, r0);
796 __ pop_ptr(r0);
797 // r0: array
798 // r1: index
799 index_check(r0, r1); // leaves index in r1, kills rscratch1
800 __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_DOUBLE) >> 3);
801 __ access_load_at(T_DOUBLE, IN_HEAP | IS_ARRAY, r0, Address(r0, r1, Address::uxtw(3)), noreg, noreg);
802 }
803
804 void TemplateTable::aaload()
805 {
806 transition(itos, atos);
807 __ mov(r1, r0);
808 __ pop_ptr(r0);
809 // r0: array
810 // r1: index
811 index_check(r0, r1); // leaves index in r1, kills rscratch1
812 if (EnableValhalla && ValueArrayFlatten) {
813 Label is_flat_array, done;
814
815 __ test_flat_array_oop(r0, r10 /*temp*/, is_flat_array);
816 __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_OBJECT) >> LogBytesPerHeapOop);
817 do_oop_load(_masm, Address(r0, r1, Address::uxtw(LogBytesPerHeapOop)), r0, IS_ARRAY);
818
819 __ b(done);
820 __ bind(is_flat_array);
821 __ call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::value_array_load), r0, r1);
822 __ bind(done);
823 } else {
824 __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_OBJECT) >> LogBytesPerHeapOop);
825 do_oop_load(_masm, Address(r0, r1, Address::uxtw(LogBytesPerHeapOop)), r0, IS_ARRAY);
826 }
827 }
828
829 void TemplateTable::baload()
830 {
831 transition(itos, itos);
832 __ mov(r1, r0);
833 __ pop_ptr(r0);
834 // r0: array
835 // r1: index
836 index_check(r0, r1); // leaves index in r1, kills rscratch1
837 __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_BYTE) >> 0);
838 __ access_load_at(T_BYTE, IN_HEAP | IS_ARRAY, r0, Address(r0, r1, Address::uxtw(0)), noreg, noreg);
839 }
840
841 void TemplateTable::caload()
842 {
843 transition(itos, itos);
844 __ mov(r1, r0);
845 __ pop_ptr(r0);
846 // r0: array
1103 __ pop_ptr(r3);
1104 // v0: value
1105 // r1: index
1106 // r3: array
1107 index_check(r3, r1); // prefer index in r1
1108 __ add(r1, r1, arrayOopDesc::base_offset_in_bytes(T_DOUBLE) >> 3);
1109 __ access_store_at(T_DOUBLE, IN_HEAP | IS_ARRAY, Address(r3, r1, Address::uxtw(3)), noreg /* dtos */, noreg, noreg);
1110 }
1111
1112 void TemplateTable::aastore() {
1113 Label is_null, ok_is_subtype, done;
1114 transition(vtos, vtos);
1115 // stack: ..., array, index, value
1116 __ ldr(r0, at_tos()); // value
1117 __ ldr(r2, at_tos_p1()); // index
1118 __ ldr(r3, at_tos_p2()); // array
1119
1120 Address element_address(r3, r4, Address::uxtw(LogBytesPerHeapOop));
1121
1122 index_check(r3, r2); // kills r1
1123
1124 // DMS CHECK: what does line below do?
1125 __ add(r4, r2, arrayOopDesc::base_offset_in_bytes(T_OBJECT) >> LogBytesPerHeapOop);
1126
1127 // do array store check - check for NULL value first
1128 __ cbz(r0, is_null);
1129
1130 // Load array klass to r1, check if it is flat and bail out to ususal way
1131 Label is_flat_array;
1132 if (ValueArrayFlatten) {
1133 __ load_klass(r1, r3);
1134 __ test_flat_array_klass(r1, r10 /*temp*/, is_flat_array);
1135 }
1136
1137 // Move subklass into r1
1138 __ load_klass(r1, r0);
1139 // Move superklass into r0
1140 __ load_klass(r0, r3);
1141 __ ldr(r0, Address(r0,
1142 ObjArrayKlass::element_klass_offset()));
1143 // Compress array + index*oopSize + 12 into a single register. Frees r2.
1144
1145 // Generate subtype check. Blows r2, r5
1146 // Superklass in r0. Subklass in r1.
1147 __ gen_subtype_check(r1, ok_is_subtype);
1148
1149 // Come here on failure
1150 // object is at TOS
1151 __ b(Interpreter::_throw_ArrayStoreException_entry);
1152
1153 // Come here on success
1154 __ bind(ok_is_subtype);
1155
1156 // Get the value we will store
1157 __ ldr(r0, at_tos());
1158 // Now store using the appropriate barrier
1159 do_oop_store(_masm, element_address, r0, IS_ARRAY);
1160 __ b(done);
1161
1162 // Have a NULL in r0, r3=array, r2=index. Store NULL at ary[idx]
1163 __ bind(is_null);
1164 __ profile_null_seen(r2);
1165
1166 if (EnableValhalla) {
1167 Label is_null_into_value_array_npe, store_null;
1168
1169 __ load_klass(r0, r3);
1170 // No way to store null in flat array
1171 __ test_flat_array_klass(r0, r1, is_null_into_value_array_npe);
1172
1173 // Use case for storing values in objArray where element_klass is specifically
1174 // a value type because they could not be flattened "for reasons",
1175 // these need to have the same semantics as flat arrays, i.e. NPE
1176 __ ldr(r0, Address(r0, ObjArrayKlass::element_klass_offset()));
1177 __ test_klass_is_value(r0, r1, is_null_into_value_array_npe);
1178 __ b(store_null);
1179
1180 __ bind(is_null_into_value_array_npe);
1181 __ b(ExternalAddress(Interpreter::_throw_NullPointerException_entry));
1182
1183 __ bind(store_null);
1184 }
1185
1186 // Store a NULL
1187 do_oop_store(_masm, element_address, noreg, IS_ARRAY);
1188 __ b(done);
1189
1190
1191 if (EnableValhalla) {
1192 // r0 - value, r2 - index, r3 - array. r1 - loaded array klass
1193 // store non-null value
1194 __ bind(is_flat_array);
1195
1196 // Simplistic type check...
1197 Label is_type_ok;
1198
1199 // Profile the not-null value's klass.
1200 // Load value class
1201 __ load_klass(r10, r0);
1202 __ profile_typecheck(r2, r1, r0); // blows r2, and r0
1203
1204 // flat value array needs exact type match
1205 // is "r10 == r0" (value subclass == array element superclass)
1206
1207 // Move element klass into r0
1208 __ ldr(r0, Address(r1, ArrayKlass::element_klass_offset()));
1209 __ cmp(r0, r10);
1210 __ br(Assembler::EQ, is_type_ok);
1211
1212 __ profile_typecheck_failed(r2);
1213 __ b(ExternalAddress(Interpreter::_throw_ArrayStoreException_entry));
1214 __ bind(is_type_ok);
1215
1216 // DMS CHECK: Reload from TOS to be safe,
1217 // DMS CHECK: Because of profile_typecheck that blows r2 and r0. Should we really do it?
1218 __ ldr(r1, at_tos()); // value
1219 __ mov(r2, r3); // array
1220 __ ldr(r3, at_tos_p1()); // index
1221 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::value_array_store), r1, r2, r3);
1222 }
1223
1224
1225 // Pop stack arguments
1226 __ bind(done);
1227 __ add(esp, esp, 3 * Interpreter::stackElementSize);
1228 }
1229
1230 void TemplateTable::bastore()
1231 {
1232 transition(itos, vtos);
1233 __ pop_i(r1);
1234 __ pop_ptr(r3);
1235 // r0: value
1236 // r1: index
1237 // r3: array
1238 index_check(r3, r1); // prefer index in r1
1239
1240 // Need to check whether array is boolean or byte
1241 // since both types share the bastore bytecode.
1242 __ load_klass(r2, r3);
1243 __ ldrw(r2, Address(r2, Klass::layout_helper_offset()));
2084 }
2085
2086 void TemplateTable::if_nullcmp(Condition cc)
2087 {
2088 transition(atos, vtos);
2089 // assume branch is more often taken than not (loops use backward branches)
2090 Label not_taken;
2091 if (cc == equal)
2092 __ cbnz(r0, not_taken);
2093 else
2094 __ cbz(r0, not_taken);
2095 branch(false, false);
2096 __ bind(not_taken);
2097 __ profile_not_taken_branch(r0);
2098 }
2099
2100 void TemplateTable::if_acmp(Condition cc)
2101 {
2102 transition(atos, vtos);
2103 // assume branch is more often taken than not (loops use backward branches)
2104 Label taken, not_taken;
2105 __ pop_ptr(r1);
2106 __ cmpoop(r1, r0);
2107
2108 if (EnableValhalla) {
2109 guarantee(UsePointerPerturbation == false, "UsePointerPerturbation is not implemented");
2110
2111 __ br(Assembler::NE, (cc == not_equal) ? taken : not_taken);
2112 __ cbz(r1, (cc == equal) ? taken : not_taken);
2113 __ ldr(r2, Address(r1, oopDesc::mark_offset_in_bytes()));
2114 // DMS CHECK: Is code below correct?
2115 __ andr(r2, r2, markOopDesc::always_locked_pattern && 0xF);
2116 __ cmp(r2, (u1) markOopDesc::always_locked_pattern);
2117 cc = (cc == equal) ? not_equal : equal;
2118 }
2119
2120
2121 __ br(j_not(cc), not_taken);
2122 __ bind(taken);
2123 branch(false, false);
2124 __ bind(not_taken);
2125 __ profile_not_taken_branch(r0);
2126 }
2127
2128 void TemplateTable::ret() {
2129 transition(vtos, vtos);
2130 // We might be moving to a safepoint. The thread which calls
2131 // Interpreter::notice_safepoints() will effectively flush its cache
2132 // when it makes a system call, but we need to do something to
2133 // ensure that we see the changed dispatch table.
2134 __ membar(MacroAssembler::LoadLoad);
2135
2136 locals_index(r1);
2137 __ ldr(r1, aaddress(r1)); // get return bci, compute return bcp
2138 __ profile_ret(r1, r2);
2139 __ ldr(rbcp, Address(rmethod, Method::const_offset()));
2140 __ lea(rbcp, Address(rbcp, r1));
2141 __ add(rbcp, rbcp, in_bytes(ConstMethod::codes_offset()));
2142 __ dispatch_next(vtos, 0, /*generate_poll*/true);
2571 // 8179954: We need to make sure that the code generated for
2572 // volatile accesses forms a sequentially-consistent set of
2573 // operations when combined with STLR and LDAR. Without a leading
2574 // membar it's possible for a simple Dekker test to fail if loads
2575 // use LDR;DMB but stores use STLR. This can happen if C2 compiles
2576 // the stores in one method and we interpret the loads in another.
2577 if (! UseBarriersForVolatile) {
2578 Label notVolatile;
2579 __ tbz(raw_flags, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
2580 __ membar(MacroAssembler::AnyAny);
2581 __ bind(notVolatile);
2582 }
2583
2584 const Address field(obj, off);
2585
2586 Label Done, notByte, notBool, notInt, notShort, notChar,
2587 notLong, notFloat, notObj, notDouble;
2588
2589 // x86 uses a shift and mask or wings it with a shift plus assert
2590 // the mask is not needed. aarch64 just uses bitfield extract
2591 __ ubfxw(flags, raw_flags, ConstantPoolCacheEntry::tos_state_shift, ConstantPoolCacheEntry::tos_state_bits);
2592
2593 assert(btos == 0, "change code, btos != 0");
2594 __ cbnz(flags, notByte);
2595
2596 // Don't rewrite getstatic, only getfield
2597 if (is_static) rc = may_not_rewrite;
2598
2599 // btos
2600 __ access_load_at(T_BYTE, IN_HEAP, r0, field, noreg, noreg);
2601 __ push(btos);
2602 // Rewrite bytecode to be faster
2603 if (rc == may_rewrite) {
2604 patch_bytecode(Bytecodes::_fast_bgetfield, bc, r1);
2605 }
2606 __ b(Done);
2607
2608 __ bind(notByte);
2609 __ cmp(flags, (u1)ztos);
2610 __ br(Assembler::NE, notBool);
2611
2612 // ztos (same code as btos)
2613 __ access_load_at(T_BOOLEAN, IN_HEAP, r0, field, noreg, noreg);
2614 __ push(ztos);
2615 // Rewrite bytecode to be faster
2616 if (rc == may_rewrite) {
2617 // use btos rewriting, no truncating to t/f bit is needed for getfield.
2618 patch_bytecode(Bytecodes::_fast_bgetfield, bc, r1);
2619 }
2620 __ b(Done);
2621
2622 __ bind(notBool);
2623 __ cmp(flags, (u1)atos);
2624 __ br(Assembler::NE, notObj);
2625 // atos
2626 if (!EnableValhalla) {
2627 do_oop_load(_masm, field, r0, IN_HEAP);
2628 __ push(atos);
2629 if (rc == may_rewrite) {
2630 patch_bytecode(Bytecodes::_fast_agetfield, bc, r1);
2631 }
2632 __ b(Done);
2633 } else { // Valhalla
2634
2635 if (is_static) {
2636 __ load_heap_oop(r0, field);
2637 Label isFlattenable, isUninitialized;
2638 // Issue below if the static field has not been initialized yet
2639 __ test_field_is_flattenable(raw_flags, r10, isFlattenable);
2640 // Not flattenable case
2641 __ push(atos);
2642 __ b(Done);
2643 // Flattenable case, must not return null even if uninitialized
2644 __ bind(isFlattenable);
2645 __ cbz(r0, isUninitialized);
2646 __ push(atos);
2647 __ b(Done);
2648 __ bind(isUninitialized);
2649 __ andw(raw_flags, raw_flags, ConstantPoolCacheEntry::field_index_mask);
2650 __ call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::uninitialized_static_value_field), obj, raw_flags);
2651 __ verify_oop(r0);
2652 __ push(atos);
2653 __ b(Done);
2654 } else {
2655 Label isFlattened, isInitialized, isFlattenable, rewriteFlattenable;
2656 __ test_field_is_flattenable(raw_flags, r10, isFlattenable);
2657 // Non-flattenable field case, also covers the object case
2658 __ load_heap_oop(r0, field);
2659 __ push(atos);
2660 if (rc == may_rewrite) {
2661 patch_bytecode(Bytecodes::_fast_agetfield, bc, r1);
2662 }
2663 __ b(Done);
2664 __ bind(isFlattenable);
2665 __ test_field_is_flattened(raw_flags, r10, isFlattened);
2666 // Non-flattened field case
2667 __ load_heap_oop(r0, field);
2668 __ cbnz(r0, isInitialized);
2669 __ andw(raw_flags, raw_flags, ConstantPoolCacheEntry::field_index_mask);
2670 __ call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::uninitialized_instance_value_field), obj, raw_flags);
2671 __ bind(isInitialized);
2672 __ verify_oop(r0);
2673 __ push(atos);
2674 __ b(rewriteFlattenable);
2675 __ bind(isFlattened);
2676 __ ldr(r10, Address(cache, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::f1_offset())));
2677 __ andw(raw_flags, raw_flags, ConstantPoolCacheEntry::field_index_mask);
2678 call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::read_flattened_field), obj, raw_flags, r10);
2679 __ verify_oop(r0);
2680 __ push(atos);
2681 __ bind(rewriteFlattenable);
2682 if (rc == may_rewrite) {
2683 patch_bytecode(Bytecodes::_fast_qgetfield, bc, r1);
2684 }
2685 __ b(Done);
2686 }
2687 }
2688
2689 __ bind(notObj);
2690 __ cmp(flags, (u1)itos);
2691 __ br(Assembler::NE, notInt);
2692 // itos
2693 __ access_load_at(T_INT, IN_HEAP, r0, field, noreg, noreg);
2694 __ push(itos);
2695 // Rewrite bytecode to be faster
2696 if (rc == may_rewrite) {
2697 patch_bytecode(Bytecodes::_fast_igetfield, bc, r1);
2698 }
2699 __ b(Done);
2700
2701 __ bind(notInt);
2702 __ cmp(flags, (u1)ctos);
2703 __ br(Assembler::NE, notChar);
2704 // ctos
2705 __ access_load_at(T_CHAR, IN_HEAP, r0, field, noreg, noreg);
2706 __ push(ctos);
2707 // Rewrite bytecode to be faster
2837 // c_rarg1: object pointer set up above (NULL if static)
2838 // c_rarg2: cache entry pointer
2839 // c_rarg3: jvalue object on the stack
2840 __ call_VM(noreg,
2841 CAST_FROM_FN_PTR(address,
2842 InterpreterRuntime::post_field_modification),
2843 c_rarg1, c_rarg2, c_rarg3);
2844 __ get_cache_and_index_at_bcp(cache, index, 1);
2845 __ bind(L1);
2846 }
2847 }
2848
2849 void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteControl rc) {
2850 transition(vtos, vtos);
2851
2852 const Register cache = r2;
2853 const Register index = r3;
2854 const Register obj = r2;
2855 const Register off = r19;
2856 const Register flags = r0;
2857 const Register flags2 = r6;
2858 const Register bc = r4;
2859
2860 resolve_cache_and_index(byte_no, cache, index, sizeof(u2));
2861 jvmti_post_field_mod(cache, index, is_static);
2862 load_field_cp_cache_entry(obj, cache, index, off, flags, is_static);
2863
2864 Label Done;
2865 __ mov(r5, flags);
2866
2867 {
2868 Label notVolatile;
2869 __ tbz(r5, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
2870 __ membar(MacroAssembler::StoreStore);
2871 __ bind(notVolatile);
2872 }
2873
2874 // field address
2875 const Address field(obj, off);
2876
2877 Label notByte, notBool, notInt, notShort, notChar,
2878 notLong, notFloat, notObj, notDouble;
2879
2880 __ mov(flags2, flags);
2881
2882 // x86 uses a shift and mask or wings it with a shift plus assert
2883 // the mask is not needed. aarch64 just uses bitfield extract
2884 __ ubfxw(flags, flags, ConstantPoolCacheEntry::tos_state_shift, ConstantPoolCacheEntry::tos_state_bits);
2885
2886 assert(btos == 0, "change code, btos != 0");
2887 __ cbnz(flags, notByte);
2888
2889 // Don't rewrite putstatic, only putfield
2890 if (is_static) rc = may_not_rewrite;
2891
2892 // btos
2893 {
2894 __ pop(btos);
2895 if (!is_static) pop_and_check_object(obj);
2896 __ access_store_at(T_BYTE, IN_HEAP, field, r0, noreg, noreg);
2897 if (rc == may_rewrite) {
2898 patch_bytecode(Bytecodes::_fast_bputfield, bc, r1, true, byte_no);
2899 }
2900 __ b(Done);
2901 }
2904 __ cmp(flags, (u1)ztos);
2905 __ br(Assembler::NE, notBool);
2906
2907 // ztos
2908 {
2909 __ pop(ztos);
2910 if (!is_static) pop_and_check_object(obj);
2911 __ access_store_at(T_BOOLEAN, IN_HEAP, field, r0, noreg, noreg);
2912 if (rc == may_rewrite) {
2913 patch_bytecode(Bytecodes::_fast_zputfield, bc, r1, true, byte_no);
2914 }
2915 __ b(Done);
2916 }
2917
2918 __ bind(notBool);
2919 __ cmp(flags, (u1)atos);
2920 __ br(Assembler::NE, notObj);
2921
2922 // atos
2923 {
2924 if (!EnableValhalla) {
2925 __ pop(atos);
2926 if (!is_static) pop_and_check_object(obj);
2927 // Store into the field
2928 do_oop_store(_masm, field, r0, IN_HEAP);
2929 if (rc == may_rewrite) {
2930 patch_bytecode(Bytecodes::_fast_aputfield, bc, r1, true, byte_no);
2931 }
2932 __ b(Done);
2933 } else { // Valhalla
2934
2935 __ pop(atos);
2936 if (is_static) {
2937 Label notFlattenable;
2938 __ test_field_is_not_flattenable(flags2, r10, notFlattenable);
2939 __ null_check(r0);
2940 __ bind(notFlattenable);
2941 do_oop_store(_masm, field, r0, IN_HEAP);
2942 __ b(Done);
2943 } else {
2944 Label isFlattenable, isFlattened, notBuffered, notBuffered2, rewriteNotFlattenable, rewriteFlattenable;
2945 __ test_field_is_flattenable(flags2, r10, isFlattenable);
2946 // Not flattenable case, covers not flattenable values and objects
2947 pop_and_check_object(obj);
2948 // Store into the field
2949 do_oop_store(_masm, field, r0, IN_HEAP);
2950 __ bind(rewriteNotFlattenable);
2951 if (rc == may_rewrite) {
2952 patch_bytecode(Bytecodes::_fast_aputfield, bc, r19, true, byte_no);
2953 }
2954 __ b(Done);
2955 // Implementation of the flattenable semantic
2956 __ bind(isFlattenable);
2957 __ null_check(r0);
2958 __ test_field_is_flattened(flags2, r10, isFlattened);
2959 // Not flattened case
2960 pop_and_check_object(obj);
2961 // Store into the field
2962 do_oop_store(_masm, field, r0, IN_HEAP);
2963 __ b(rewriteFlattenable);
2964 __ bind(isFlattened);
2965 pop_and_check_object(obj);
2966 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::write_flattened_value), r0, off, obj);
2967 __ bind(rewriteFlattenable);
2968 if (rc == may_rewrite) {
2969 patch_bytecode(Bytecodes::_fast_qputfield, bc, r19, true, byte_no);
2970 }
2971 __ b(Done);
2972 }
2973 } // Valhalla
2974 }
2975
2976 __ bind(notObj);
2977 __ cmp(flags, (u1)itos);
2978 __ br(Assembler::NE, notInt);
2979
2980 // itos
2981 {
2982 __ pop(itos);
2983 if (!is_static) pop_and_check_object(obj);
2984 __ access_store_at(T_INT, IN_HEAP, field, r0, noreg, noreg);
2985 if (rc == may_rewrite) {
2986 patch_bytecode(Bytecodes::_fast_iputfield, bc, r1, true, byte_no);
2987 }
2988 __ b(Done);
2989 }
2990
2991 __ bind(notInt);
2992 __ cmp(flags, (u1)ctos);
2993 __ br(Assembler::NE, notChar);
3093 void TemplateTable::putstatic(int byte_no) {
3094 putfield_or_static(byte_no, true);
3095 }
3096
3097 void TemplateTable::jvmti_post_fast_field_mod()
3098 {
3099 if (JvmtiExport::can_post_field_modification()) {
3100 // Check to see if a field modification watch has been set before
3101 // we take the time to call into the VM.
3102 Label L2;
3103 __ lea(rscratch1, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr()));
3104 __ ldrw(c_rarg3, Address(rscratch1));
3105 __ cbzw(c_rarg3, L2);
3106 __ pop_ptr(r19); // copy the object pointer from tos
3107 __ verify_oop(r19);
3108 __ push_ptr(r19); // put the object pointer back on tos
3109 // Save tos values before call_VM() clobbers them. Since we have
3110 // to do it for every data type, we use the saved values as the
3111 // jvalue object.
3112 switch (bytecode()) { // load values into the jvalue object
3113 case Bytecodes::_fast_qputfield: //fall through
3114 case Bytecodes::_fast_aputfield: __ push_ptr(r0); break;
3115 case Bytecodes::_fast_bputfield: // fall through
3116 case Bytecodes::_fast_zputfield: // fall through
3117 case Bytecodes::_fast_sputfield: // fall through
3118 case Bytecodes::_fast_cputfield: // fall through
3119 case Bytecodes::_fast_iputfield: __ push_i(r0); break;
3120 case Bytecodes::_fast_dputfield: __ push_d(); break;
3121 case Bytecodes::_fast_fputfield: __ push_f(); break;
3122 case Bytecodes::_fast_lputfield: __ push_l(r0); break;
3123
3124 default:
3125 ShouldNotReachHere();
3126 }
3127 __ mov(c_rarg3, esp); // points to jvalue on the stack
3128 // access constant pool cache entry
3129 __ get_cache_entry_pointer_at_bcp(c_rarg2, r0, 1);
3130 __ verify_oop(r19);
3131 // r19: object pointer copied above
3132 // c_rarg2: cache entry pointer
3133 // c_rarg3: jvalue object on the stack
3134 __ call_VM(noreg,
3135 CAST_FROM_FN_PTR(address,
3136 InterpreterRuntime::post_field_modification),
3137 r19, c_rarg2, c_rarg3);
3138
3139 switch (bytecode()) { // restore tos values
3140 case Bytecodes::_fast_qputfield: //fall through
3141 case Bytecodes::_fast_aputfield: __ pop_ptr(r0); break;
3142 case Bytecodes::_fast_bputfield: // fall through
3143 case Bytecodes::_fast_zputfield: // fall through
3144 case Bytecodes::_fast_sputfield: // fall through
3145 case Bytecodes::_fast_cputfield: // fall through
3146 case Bytecodes::_fast_iputfield: __ pop_i(r0); break;
3147 case Bytecodes::_fast_dputfield: __ pop_d(); break;
3148 case Bytecodes::_fast_fputfield: __ pop_f(); break;
3149 case Bytecodes::_fast_lputfield: __ pop_l(r0); break;
3150 default: break;
3151 }
3152 __ bind(L2);
3153 }
3154 }
3155
3156 void TemplateTable::fast_storefield(TosState state)
3157 {
3158 transition(state, vtos);
3159
3160 ByteSize base = ConstantPoolCache::base_offset();
3171 // replace index with field offset from cache entry
3172 __ ldr(r1, Address(r2, in_bytes(base + ConstantPoolCacheEntry::f2_offset())));
3173
3174 {
3175 Label notVolatile;
3176 __ tbz(r3, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
3177 __ membar(MacroAssembler::StoreStore);
3178 __ bind(notVolatile);
3179 }
3180
3181 Label notVolatile;
3182
3183 // Get object from stack
3184 pop_and_check_object(r2);
3185
3186 // field address
3187 const Address field(r2, r1);
3188
3189 // access field
3190 switch (bytecode()) {
3191 case Bytecodes::_fast_qputfield: //fall through
3192 {
3193 Label isFlattened, done;
3194 __ null_check(r0);
3195 __ test_field_is_flattened(r3, r10, isFlattened);
3196 // No Flattened case
3197 do_oop_store(_masm, field, r0, IN_HEAP);
3198 __ b(done);
3199 __ bind(isFlattened);
3200 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::write_flattened_value), r0, r1, r2);
3201 __ bind(done);
3202 }
3203 break;
3204 case Bytecodes::_fast_aputfield:
3205 do_oop_store(_masm, field, r0, IN_HEAP);
3206 break;
3207 case Bytecodes::_fast_lputfield:
3208 __ access_store_at(T_LONG, IN_HEAP, field, r0, noreg, noreg);
3209 break;
3210 case Bytecodes::_fast_iputfield:
3211 __ access_store_at(T_INT, IN_HEAP, field, r0, noreg, noreg);
3212 break;
3213 case Bytecodes::_fast_zputfield:
3214 __ access_store_at(T_BOOLEAN, IN_HEAP, field, r0, noreg, noreg);
3215 break;
3216 case Bytecodes::_fast_bputfield:
3217 __ access_store_at(T_BYTE, IN_HEAP, field, r0, noreg, noreg);
3218 break;
3219 case Bytecodes::_fast_sputfield:
3220 __ access_store_at(T_SHORT, IN_HEAP, field, r0, noreg, noreg);
3221 break;
3222 case Bytecodes::_fast_cputfield:
3223 __ access_store_at(T_CHAR, IN_HEAP, field, r0, noreg, noreg);
3277 // r0: object
3278 __ verify_oop(r0);
3279 __ null_check(r0);
3280 const Address field(r0, r1);
3281
3282 // 8179954: We need to make sure that the code generated for
3283 // volatile accesses forms a sequentially-consistent set of
3284 // operations when combined with STLR and LDAR. Without a leading
3285 // membar it's possible for a simple Dekker test to fail if loads
3286 // use LDR;DMB but stores use STLR. This can happen if C2 compiles
3287 // the stores in one method and we interpret the loads in another.
3288 if (! UseBarriersForVolatile) {
3289 Label notVolatile;
3290 __ tbz(r3, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
3291 __ membar(MacroAssembler::AnyAny);
3292 __ bind(notVolatile);
3293 }
3294
3295 // access field
3296 switch (bytecode()) {
3297 case Bytecodes::_fast_qgetfield:
3298 {
3299 Label isFlattened, isInitialized, Done;
3300 // DMS CHECK: We don't need to reload multiple times, but stay close to original code
3301 __ ldrw(r10, Address(r2, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset())));
3302 __ test_field_is_flattened(r10, r10, isFlattened);
3303 // Non-flattened field case
3304 __ mov(r10, r0);
3305 __ load_heap_oop(r0, field);
3306 __ cbnz(r0, isInitialized);
3307 __ mov(r0, r10);
3308 __ ldrw(r10, Address(r2, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset())));
3309 __ andw(r10, r10, ConstantPoolCacheEntry::field_index_mask);
3310 __ call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::uninitialized_instance_value_field), r0, r10);
3311 __ bind(isInitialized);
3312 __ verify_oop(r0);
3313 __ b(Done);
3314 __ bind(isFlattened);
3315 __ ldrw(r10, Address(r2, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset())));
3316 __ andw(r10, r10, ConstantPoolCacheEntry::field_index_mask);
3317 __ ldr(r3, Address(r2, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::f1_offset())));
3318 call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::read_flattened_field), r0, r10, r3);
3319 __ verify_oop(r0);
3320 __ bind(Done);
3321 }
3322 break;
3323 case Bytecodes::_fast_agetfield:
3324 do_oop_load(_masm, field, r0, IN_HEAP);
3325 __ verify_oop(r0);
3326 break;
3327 case Bytecodes::_fast_lgetfield:
3328 __ access_load_at(T_LONG, IN_HEAP, r0, field, noreg, noreg);
3329 break;
3330 case Bytecodes::_fast_igetfield:
3331 __ access_load_at(T_INT, IN_HEAP, r0, field, noreg, noreg);
3332 break;
3333 case Bytecodes::_fast_bgetfield:
3334 __ access_load_at(T_BYTE, IN_HEAP, r0, field, noreg, noreg);
3335 break;
3336 case Bytecodes::_fast_sgetfield:
3337 __ access_load_at(T_SHORT, IN_HEAP, r0, field, noreg, noreg);
3338 break;
3339 case Bytecodes::_fast_cgetfield:
3340 __ access_load_at(T_CHAR, IN_HEAP, r0, field, noreg, noreg);
3341 break;
3342 case Bytecodes::_fast_fgetfield:
3860 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc), r0);
3861 __ pop(atos); // restore the return value
3862
3863 }
3864 __ b(done);
3865 }
3866
3867 // slow case
3868 __ bind(slow_case);
3869 __ get_constant_pool(c_rarg1);
3870 __ get_unsigned_2_byte_index_at_bcp(c_rarg2, 1);
3871 call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), c_rarg1, c_rarg2);
3872 __ verify_oop(r0);
3873
3874 // continue
3875 __ bind(done);
3876 // Must prevent reordering of stores for object initialization with stores that publish the new object.
3877 __ membar(Assembler::StoreStore);
3878 }
3879
3880 void TemplateTable::defaultvalue() {
3881 transition(vtos, atos);
3882 __ get_unsigned_2_byte_index_at_bcp(c_rarg2, 1);
3883 __ get_constant_pool(c_rarg1);
3884 call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::defaultvalue),
3885 c_rarg1, c_rarg2);
3886 __ verify_oop(r0);
3887 // Must prevent reordering of stores for object initialization with stores that publish the new object.
3888 __ membar(Assembler::StoreStore);
3889 }
3890
3891 void TemplateTable::withfield() {
3892 transition(vtos, atos);
3893 resolve_cache_and_index(f2_byte, c_rarg1 /*cache*/, c_rarg2 /*index*/, sizeof(u2));
3894
3895 // n.b. unlike x86 cache is now rcpool plus the indexed offset
3896 // so using rcpool to meet shared code expectations
3897
3898 call_VM(r1, CAST_FROM_FN_PTR(address, InterpreterRuntime::withfield), rcpool);
3899 __ verify_oop(r1);
3900 __ add(esp, esp, r0);
3901 __ mov(r0, r1);
3902 }
3903
3904 void TemplateTable::newarray() {
3905 transition(itos, atos);
3906 __ load_unsigned_byte(c_rarg1, at_bcp(1));
3907 __ mov(c_rarg2, r0);
3908 call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::newarray),
3909 c_rarg1, c_rarg2);
3910 // Must prevent reordering of stores for object initialization with stores that publish the new object.
3911 __ membar(Assembler::StoreStore);
3912 }
3913
3914 void TemplateTable::anewarray() {
3915 transition(itos, atos);
3916 __ get_unsigned_2_byte_index_at_bcp(c_rarg2, 1);
3917 __ get_constant_pool(c_rarg1);
3918 __ mov(c_rarg3, r0);
3919 call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::anewarray),
3920 c_rarg1, c_rarg2, c_rarg3);
3921 // Must prevent reordering of stores for object initialization with stores that publish the new object.
3922 __ membar(Assembler::StoreStore);
3923 }
3955 __ bind(quicked);
3956 __ mov(r3, r0); // Save object in r3; r0 needed for subtype check
3957 __ load_resolved_klass_at_offset(r2, r19, r0, rscratch1); // r0 = klass
3958
3959 __ bind(resolved);
3960 __ load_klass(r19, r3);
3961
3962 // Generate subtype check. Blows r2, r5. Object in r3.
3963 // Superklass in r0. Subklass in r19.
3964 __ gen_subtype_check(r19, ok_is_subtype);
3965
3966 // Come here on failure
3967 __ push(r3);
3968 // object is at TOS
3969 __ b(Interpreter::_throw_ClassCastException_entry);
3970
3971 // Come here on success
3972 __ bind(ok_is_subtype);
3973 __ mov(r0, r3); // Restore object in r3
3974
3975 __ b(done);
3976 __ bind(is_null);
3977
3978 // Collect counts on whether this test sees NULLs a lot or not.
3979 if (ProfileInterpreter) {
3980 __ profile_null_seen(r2);
3981 }
3982
3983 if (EnableValhalla) {
3984 // Get cpool & tags index
3985 __ get_cpool_and_tags(r2, r3); // r2=cpool, r3=tags array
3986 __ get_unsigned_2_byte_index_at_bcp(r19, 1); // r19=index
3987 // See if bytecode has already been quicked
3988 __ add(rscratch1, r3, Array<u1>::base_offset_in_bytes());
3989 __ lea(r1, Address(rscratch1, r19));
3990 __ ldarb(r1, r1);
3991 // See if CP entry is a Q-descriptor
3992 __ andr (r1, r1, JVM_CONSTANT_QDESC_BIT);
3993 __ cmp(r1, (u1) JVM_CONSTANT_QDESC_BIT);
3994 __ br(Assembler::NE, done);
3995 __ b(ExternalAddress(Interpreter::_throw_NullPointerException_entry));
3996 }
3997
3998 __ bind(done);
3999 }
4000
4001 void TemplateTable::instanceof() {
4002 transition(atos, itos);
4003 Label done, is_null, ok_is_subtype, quicked, resolved;
4004 __ cbz(r0, is_null);
4005
4006 // Get cpool & tags index
4007 __ get_cpool_and_tags(r2, r3); // r2=cpool, r3=tags array
4008 __ get_unsigned_2_byte_index_at_bcp(r19, 1); // r19=index
4009 // See if bytecode has already been quicked
4010 __ add(rscratch1, r3, Array<u1>::base_offset_in_bytes());
4011 __ lea(r1, Address(rscratch1, r19));
4012 __ ldarb(r1, r1);
4013 __ cmp(r1, (u1)JVM_CONSTANT_Class);
4014 __ br(Assembler::EQ, quicked);
4015
4016 __ push(atos); // save receiver for result, and for GC
4017 call_VM(r0, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc));
|