rev 58025 : Shenandoah: C1: Resolve into registers of correct type rev 58026 : [mq]: JDK-8238851-2.patch
1 /* 2 * Copyright (c) 2018, 2019, Red Hat, Inc. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "c1/c1_IR.hpp" 27 #include "gc/shared/satbMarkQueue.hpp" 28 #include "gc/shenandoah/shenandoahBarrierSet.hpp" 29 #include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp" 30 #include "gc/shenandoah/shenandoahHeap.hpp" 31 #include "gc/shenandoah/shenandoahHeapRegion.hpp" 32 #include "gc/shenandoah/shenandoahRuntime.hpp" 33 #include "gc/shenandoah/shenandoahThreadLocalData.hpp" 34 #include "gc/shenandoah/c1/shenandoahBarrierSetC1.hpp" 35 36 #ifdef ASSERT 37 #define __ gen->lir(__FILE__, __LINE__)-> 38 #else 39 #define __ gen->lir()-> 40 #endif 41 42 void ShenandoahPreBarrierStub::emit_code(LIR_Assembler* ce) { 43 ShenandoahBarrierSetAssembler* bs = (ShenandoahBarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler(); 44 bs->gen_pre_barrier_stub(ce, this); 45 } 46 47 void ShenandoahLoadReferenceBarrierStub::emit_code(LIR_Assembler* ce) { 48 ShenandoahBarrierSetAssembler* bs = (ShenandoahBarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler(); 49 bs->gen_load_reference_barrier_stub(ce, this); 50 } 51 52 ShenandoahBarrierSetC1::ShenandoahBarrierSetC1() : 53 _pre_barrier_c1_runtime_code_blob(NULL), 54 _load_reference_barrier_rt_code_blob(NULL) {} 55 56 void ShenandoahBarrierSetC1::pre_barrier(LIRGenerator* gen, CodeEmitInfo* info, DecoratorSet decorators, LIR_Opr addr_opr, LIR_Opr pre_val) { 57 // First we test whether marking is in progress. 58 BasicType flag_type; 59 bool patch = (decorators & C1_NEEDS_PATCHING) != 0; 60 bool do_load = pre_val == LIR_OprFact::illegalOpr; 61 if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) { 62 flag_type = T_INT; 63 } else { 64 guarantee(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, 65 "Assumption"); 66 // Use unsigned type T_BOOLEAN here rather than signed T_BYTE since some platforms, eg. ARM, 67 // need to use unsigned instructions to use the large offset to load the satb_mark_queue. 68 flag_type = T_BOOLEAN; 69 } 70 LIR_Opr thrd = gen->getThreadPointer(); 71 LIR_Address* mark_active_flag_addr = 72 new LIR_Address(thrd, 73 in_bytes(ShenandoahThreadLocalData::satb_mark_queue_active_offset()), 74 flag_type); 75 // Read the marking-in-progress flag. 76 LIR_Opr flag_val = gen->new_register(T_INT); 77 __ load(mark_active_flag_addr, flag_val); 78 __ cmp(lir_cond_notEqual, flag_val, LIR_OprFact::intConst(0)); 79 80 LIR_PatchCode pre_val_patch_code = lir_patch_none; 81 82 CodeStub* slow; 83 84 if (do_load) { 85 assert(pre_val == LIR_OprFact::illegalOpr, "sanity"); 86 assert(addr_opr != LIR_OprFact::illegalOpr, "sanity"); 87 88 if (patch) 89 pre_val_patch_code = lir_patch_normal; 90 91 pre_val = gen->new_register(T_OBJECT); 92 93 if (!addr_opr->is_address()) { 94 assert(addr_opr->is_register(), "must be"); 95 addr_opr = LIR_OprFact::address(new LIR_Address(addr_opr, T_OBJECT)); 96 } 97 slow = new ShenandoahPreBarrierStub(addr_opr, pre_val, pre_val_patch_code, info ? new CodeEmitInfo(info) : NULL); 98 } else { 99 assert(addr_opr == LIR_OprFact::illegalOpr, "sanity"); 100 assert(pre_val->is_register(), "must be"); 101 assert(pre_val->type() == T_OBJECT, "must be an object"); 102 103 slow = new ShenandoahPreBarrierStub(pre_val); 104 } 105 106 __ branch(lir_cond_notEqual, T_INT, slow); 107 __ branch_destination(slow->continuation()); 108 } 109 110 LIR_Opr ShenandoahBarrierSetC1::load_reference_barrier(LIRGenerator* gen, LIR_Opr obj, LIR_Opr addr) { 111 if (ShenandoahLoadRefBarrier) { 112 return load_reference_barrier_impl(gen, obj, addr); 113 } else { 114 return obj; 115 } 116 } 117 118 LIR_Opr ShenandoahBarrierSetC1::load_reference_barrier_impl(LIRGenerator* gen, LIR_Opr obj, LIR_Opr addr) { 119 assert(ShenandoahLoadRefBarrier, "Should be enabled"); 120 121 obj = ensure_in_register(gen, obj, T_OBJECT); 122 assert(obj->is_register(), "must be a register at this point"); 123 addr = ensure_in_register(gen, addr, T_ADDRESS); 124 assert(addr->is_register(), "must be a register at this point"); 125 LIR_Opr result = gen->result_register_for(obj->value_type()); 126 __ move(obj, result); 127 LIR_Opr tmp1 = gen->new_register(T_ADDRESS); 128 LIR_Opr tmp2 = gen->new_register(T_ADDRESS); 129 130 LIR_Opr thrd = gen->getThreadPointer(); 131 LIR_Address* active_flag_addr = 132 new LIR_Address(thrd, 133 in_bytes(ShenandoahThreadLocalData::gc_state_offset()), 134 T_BYTE); 135 // Read and check the gc-state-flag. 136 LIR_Opr flag_val = gen->new_register(T_INT); 137 __ load(active_flag_addr, flag_val); 138 LIR_Opr mask = LIR_OprFact::intConst(ShenandoahHeap::HAS_FORWARDED | 139 ShenandoahHeap::EVACUATION | 140 ShenandoahHeap::TRAVERSAL); 141 LIR_Opr mask_reg = gen->new_register(T_INT); 142 __ move(mask, mask_reg); 143 144 if (TwoOperandLIRForm) { 145 __ logical_and(flag_val, mask_reg, flag_val); 146 } else { 147 LIR_Opr masked_flag = gen->new_register(T_INT); 148 __ logical_and(flag_val, mask_reg, masked_flag); 149 flag_val = masked_flag; 150 } 151 __ cmp(lir_cond_notEqual, flag_val, LIR_OprFact::intConst(0)); 152 153 CodeStub* slow = new ShenandoahLoadReferenceBarrierStub(obj, addr, result, tmp1, tmp2); 154 __ branch(lir_cond_notEqual, T_INT, slow); 155 __ branch_destination(slow->continuation()); 156 157 return result; 158 } 159 160 LIR_Opr ShenandoahBarrierSetC1::ensure_in_register(LIRGenerator* gen, LIR_Opr obj, BasicType type) { 161 if (!obj->is_register()) { 162 LIR_Opr obj_reg; 163 if (obj->is_constant()) { 164 obj_reg = gen->new_register(type); 165 __ move(obj, obj_reg); 166 } else { 167 #ifdef AARCH64 168 // AArch64 expects double-size register. 169 obj_reg = gen->new_pointer_register(); 170 #else 171 // x86 expects single-size register. 172 obj_reg = gen->new_register(type); 173 #endif 174 __ leal(obj, obj_reg); 175 } 176 obj = obj_reg; 177 } 178 return obj; 179 } 180 181 LIR_Opr ShenandoahBarrierSetC1::storeval_barrier(LIRGenerator* gen, LIR_Opr obj, CodeEmitInfo* info, DecoratorSet decorators) { 182 if (ShenandoahStoreValEnqueueBarrier) { 183 obj = ensure_in_register(gen, obj, T_OBJECT); 184 pre_barrier(gen, info, decorators, LIR_OprFact::illegalOpr, obj); 185 } 186 return obj; 187 } 188 189 void ShenandoahBarrierSetC1::store_at_resolved(LIRAccess& access, LIR_Opr value) { 190 if (access.is_oop()) { 191 if (ShenandoahSATBBarrier) { 192 pre_barrier(access.gen(), access.access_emit_info(), access.decorators(), access.resolved_addr(), LIR_OprFact::illegalOpr /* pre_val */); 193 } 194 value = storeval_barrier(access.gen(), value, access.access_emit_info(), access.decorators()); 195 } 196 BarrierSetC1::store_at_resolved(access, value); 197 } 198 199 LIR_Opr ShenandoahBarrierSetC1::resolve_address(LIRAccess& access, bool resolve_in_register) { 200 // We must resolve in register when patching. This is to avoid 201 // having a patch area in the load barrier stub, since the call 202 // into the runtime to patch will not have the proper oop map. 203 const bool patch_before_barrier = access.is_oop() && (access.decorators() & C1_NEEDS_PATCHING) != 0; 204 return BarrierSetC1::resolve_address(access, resolve_in_register || patch_before_barrier); 205 } 206 207 void ShenandoahBarrierSetC1::load_at_resolved(LIRAccess& access, LIR_Opr result) { 208 // 1: non-reference load, no additional barrier is needed 209 if (!access.is_oop()) { 210 BarrierSetC1::load_at_resolved(access, result); 211 return; 212 } 213 214 LIRGenerator* gen = access.gen(); 215 DecoratorSet decorators = access.decorators(); 216 BasicType type = access.type(); 217 218 // 2: load a reference from src location and apply LRB if ShenandoahLoadRefBarrier is set 219 if (ShenandoahBarrierSet::need_load_reference_barrier(decorators, type)) { 220 if (ShenandoahBarrierSet::use_load_reference_barrier_native(decorators, type)) { 221 BarrierSetC1::load_at_resolved(access, result); 222 LIR_OprList* args = new LIR_OprList(); 223 LIR_Opr addr = access.resolved_addr(); 224 addr = ensure_in_register(gen, addr, T_ADDRESS); 225 args->append(result); 226 args->append(addr); 227 BasicTypeList signature; 228 signature.append(T_OBJECT); 229 signature.append(T_ADDRESS); 230 LIR_Opr call_result = gen->call_runtime(&signature, args, 231 CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_native), 232 objectType, NULL); 233 __ move(call_result, result); 234 } else { 235 LIR_Opr tmp = gen->new_register(T_OBJECT); 236 BarrierSetC1::load_at_resolved(access, tmp); 237 tmp = load_reference_barrier(gen, tmp, access.resolved_addr()); 238 __ move(tmp, result); 239 } 240 } else { 241 BarrierSetC1::load_at_resolved(access, result); 242 } 243 244 // 3: apply keep-alive barrier if ShenandoahKeepAliveBarrier is set 245 if (ShenandoahKeepAliveBarrier) { 246 bool is_weak = (decorators & ON_WEAK_OOP_REF) != 0; 247 bool is_phantom = (decorators & ON_PHANTOM_OOP_REF) != 0; 248 bool is_anonymous = (decorators & ON_UNKNOWN_OOP_REF) != 0; 249 bool is_traversal_mode = ShenandoahHeap::heap()->is_traversal_mode(); 250 bool keep_alive = (decorators & AS_NO_KEEPALIVE) == 0 || is_traversal_mode; 251 252 if ((is_weak || is_phantom || is_anonymous) && keep_alive) { 253 // Register the value in the referent field with the pre-barrier 254 LabelObj *Lcont_anonymous; 255 if (is_anonymous) { 256 Lcont_anonymous = new LabelObj(); 257 generate_referent_check(access, Lcont_anonymous); 258 } 259 pre_barrier(gen, access.access_emit_info(), decorators, LIR_OprFact::illegalOpr /* addr_opr */, 260 result /* pre_val */); 261 if (is_anonymous) { 262 __ branch_destination(Lcont_anonymous->label()); 263 } 264 } 265 } 266 } 267 268 class C1ShenandoahPreBarrierCodeGenClosure : public StubAssemblerCodeGenClosure { 269 virtual OopMapSet* generate_code(StubAssembler* sasm) { 270 ShenandoahBarrierSetAssembler* bs = (ShenandoahBarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler(); 271 bs->generate_c1_pre_barrier_runtime_stub(sasm); 272 return NULL; 273 } 274 }; 275 276 class C1ShenandoahLoadReferenceBarrierCodeGenClosure : public StubAssemblerCodeGenClosure { 277 virtual OopMapSet* generate_code(StubAssembler* sasm) { 278 ShenandoahBarrierSetAssembler* bs = (ShenandoahBarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler(); 279 bs->generate_c1_load_reference_barrier_runtime_stub(sasm); 280 return NULL; 281 } 282 }; 283 284 void ShenandoahBarrierSetC1::generate_c1_runtime_stubs(BufferBlob* buffer_blob) { 285 C1ShenandoahPreBarrierCodeGenClosure pre_code_gen_cl; 286 _pre_barrier_c1_runtime_code_blob = Runtime1::generate_blob(buffer_blob, -1, 287 "shenandoah_pre_barrier_slow", 288 false, &pre_code_gen_cl); 289 if (ShenandoahLoadRefBarrier) { 290 C1ShenandoahLoadReferenceBarrierCodeGenClosure lrb_code_gen_cl; 291 _load_reference_barrier_rt_code_blob = Runtime1::generate_blob(buffer_blob, -1, 292 "shenandoah_load_reference_barrier_slow", 293 false, &lrb_code_gen_cl); 294 } 295 } 296 297 const char* ShenandoahBarrierSetC1::rtcall_name_for_address(address entry) { 298 if (entry == CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_native)) { 299 return "ShenandoahRuntime::load_reference_barrier_native"; 300 } 301 return NULL; 302 } --- EOF ---