1 /*
   2  * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/shared/barrierSetAssembler.hpp"
  27 #include "gc/shared/collectedHeap.hpp"
  28 #include "memory/universe.hpp"
  29 #include "runtime/jniHandles.hpp"
  30 #include "runtime/thread.hpp"
  31 
  32 #define __ masm->
  33 
  34 void BarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
  35                                   Register dst, Address src, Register tmp1, Register tmp_thread) {
  36 
  37   // LR is live.  It must be saved around calls.
  38 
  39   bool in_heap = (decorators & IN_HEAP) != 0;
  40   bool in_native = (decorators & IN_NATIVE) != 0;
  41   bool is_not_null = (decorators & IS_NOT_NULL) != 0;
  42   switch (type) {
  43   case T_OBJECT:
  44   case T_ARRAY: {
  45     if (in_heap) {
  46       if (UseCompressedOops) {
  47         __ ldrw(dst, src);
  48         if (is_not_null) {
  49           __ decode_heap_oop_not_null(dst);
  50         } else {
  51           __ decode_heap_oop(dst);
  52         }
  53       } else {
  54         __ ldr(dst, src);
  55       }
  56     } else {
  57       assert(in_native, "why else?");
  58       __ ldr(dst, src);
  59     }
  60     break;
  61   }
  62   case T_BOOLEAN: __ load_unsigned_byte (dst, src); break;
  63   case T_BYTE:    __ load_signed_byte   (dst, src); break;
  64   case T_CHAR:    __ load_unsigned_short(dst, src); break;
  65   case T_SHORT:   __ load_signed_short  (dst, src); break;
  66   case T_INT:     __ ldrw               (dst, src); break;
  67   case T_LONG:    __ ldr                (dst, src); break;
  68   case T_ADDRESS: __ ldr                (dst, src); break;
  69   case T_FLOAT:   __ ldrs               (v0, src);  break;
  70   case T_DOUBLE:  __ ldrd               (v0, src);  break;
  71   default: Unimplemented();
  72   }
  73 }
  74 
  75 void BarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
  76                                    Address dst, Register val, Register tmp1, Register tmp2, Register tmp3) {
  77   bool in_heap = (decorators & IN_HEAP) != 0;
  78   bool in_native = (decorators & IN_NATIVE) != 0;
  79   bool is_not_null = (decorators & IS_NOT_NULL) != 0;
  80 
  81   switch (type) {
  82   case T_OBJECT:
  83   case T_ARRAY: {
  84    if (in_heap) {
  85       if (val == noreg) {
  86         assert(!is_not_null, "inconsistent access");
  87         if (UseCompressedOops) {
  88           __ strw(zr, dst);
  89         } else {
  90           __ str(zr, dst);
  91         }
  92       } else {
  93         if (UseCompressedOops) {
  94           assert(!dst.uses(val), "not enough registers");
  95           if (is_not_null) {
  96             __ encode_heap_oop_not_null(val);
  97           } else {
  98             __ encode_heap_oop(val);
  99           }
 100           __ strw(val, dst); 
 101         } else {
 102           __ str(val, dst);
 103         }
 104       }
 105     } else {
 106       assert(in_native, "why else?");
 107       assert(val != noreg, "not supported");
 108       __ str(val, dst);
 109     }
 110     break;
 111   }
 112   case T_BOOLEAN:
 113     __ andw(val, val, 0x1);  // boolean is true if LSB is 1
 114     __ strb(val, dst);
 115     break;
 116   case T_BYTE:    __ strb(val, dst); break;
 117   case T_CHAR:    __ strh(val, dst); break;
 118   case T_SHORT:   __ strh(val, dst); break;
 119   case T_INT:     __ strw(val, dst); break;
 120   case T_LONG:    __ str (val, dst); break;
 121   case T_ADDRESS: __ str (val, dst); break;
 122   case T_FLOAT:   __ strs(v0,  dst); break;
 123   case T_DOUBLE:  __ strd(v0,  dst); break;
 124   default: Unimplemented();
 125   }
 126 }
 127 
 128 void BarrierSetAssembler::obj_equals(MacroAssembler* masm,
 129                                      Register obj1, Register obj2) {
 130   __ cmp(obj1, obj2);
 131 }
 132 
 133 void BarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler* masm, Register jni_env,
 134                                                         Register obj, Register tmp, Label& slowpath) {
 135   // If mask changes we need to ensure that the inverse is still encodable as an immediate
 136   STATIC_ASSERT(JNIHandles::weak_tag_mask == 1);
 137   __ andr(obj, obj, ~JNIHandles::weak_tag_mask);
 138   __ ldr(obj, Address(obj, 0));             // *obj
 139 }
 140 
 141 // Defines obj, preserves var_size_in_bytes, okay for t2 == var_size_in_bytes.
 142 void BarrierSetAssembler::tlab_allocate(MacroAssembler* masm, Register obj,
 143                                         Register var_size_in_bytes,
 144                                         int con_size_in_bytes,
 145                                         Register t1,
 146                                         Register t2,
 147                                         Label& slow_case) {
 148   assert_different_registers(obj, t2);
 149   assert_different_registers(obj, var_size_in_bytes);
 150   Register end = t2;
 151 
 152   // verify_tlab();
 153 
 154   __ ldr(obj, Address(rthread, JavaThread::tlab_top_offset()));
 155   if (var_size_in_bytes == noreg) {
 156     __ lea(end, Address(obj, con_size_in_bytes));
 157   } else {
 158     __ lea(end, Address(obj, var_size_in_bytes));
 159   }
 160   __ ldr(rscratch1, Address(rthread, JavaThread::tlab_end_offset()));
 161   __ cmp(end, rscratch1);
 162   __ br(Assembler::HI, slow_case);
 163 
 164   // update the tlab top pointer
 165   __ str(end, Address(rthread, JavaThread::tlab_top_offset()));
 166 
 167   // recover var_size_in_bytes if necessary
 168   if (var_size_in_bytes == end) {
 169     __ sub(var_size_in_bytes, var_size_in_bytes, obj);
 170   }
 171   // verify_tlab();
 172 }
 173 
 174 // Defines obj, preserves var_size_in_bytes
 175 void BarrierSetAssembler::eden_allocate(MacroAssembler* masm, Register obj,
 176                                         Register var_size_in_bytes,
 177                                         int con_size_in_bytes,
 178                                         Register t1,
 179                                         Label& slow_case) {
 180   assert_different_registers(obj, var_size_in_bytes, t1);
 181   if (!Universe::heap()->supports_inline_contig_alloc()) {
 182     __ b(slow_case);
 183   } else {
 184     Register end = t1;
 185     Register heap_end = rscratch2;
 186     Label retry;
 187     __ bind(retry);
 188     {
 189       unsigned long offset;
 190       __ adrp(rscratch1, ExternalAddress((address) Universe::heap()->end_addr()), offset);
 191       __ ldr(heap_end, Address(rscratch1, offset));
 192     }
 193 
 194     ExternalAddress heap_top((address) Universe::heap()->top_addr());
 195 
 196     // Get the current top of the heap
 197     {
 198       unsigned long offset;
 199       __ adrp(rscratch1, heap_top, offset);
 200       // Use add() here after ARDP, rather than lea().
 201       // lea() does not generate anything if its offset is zero.
 202       // However, relocs expect to find either an ADD or a load/store
 203       // insn after an ADRP.  add() always generates an ADD insn, even
 204       // for add(Rn, Rn, 0).
 205       __ add(rscratch1, rscratch1, offset);
 206       __ ldaxr(obj, rscratch1);
 207     }
 208 
 209     // Adjust it my the size of our new object
 210     if (var_size_in_bytes == noreg) {
 211       __ lea(end, Address(obj, con_size_in_bytes));
 212     } else {
 213       __ lea(end, Address(obj, var_size_in_bytes));
 214     }
 215 
 216     // if end < obj then we wrapped around high memory
 217     __ cmp(end, obj);
 218     __ br(Assembler::LO, slow_case);
 219 
 220     __ cmp(end, heap_end);
 221     __ br(Assembler::HI, slow_case);
 222 
 223     // If heap_top hasn't been changed by some other thread, update it.
 224     __ stlxr(rscratch2, end, rscratch1);
 225     __ cbnzw(rscratch2, retry);
 226 
 227     incr_allocated_bytes(masm, var_size_in_bytes, con_size_in_bytes, t1);
 228   }
 229 }
 230 
 231 void BarrierSetAssembler::incr_allocated_bytes(MacroAssembler* masm,
 232                                                Register var_size_in_bytes,
 233                                                int con_size_in_bytes,
 234                                                Register t1) {
 235   assert(t1->is_valid(), "need temp reg");
 236 
 237   __ ldr(t1, Address(rthread, in_bytes(JavaThread::allocated_bytes_offset())));
 238   if (var_size_in_bytes->is_valid()) {
 239     __ add(t1, t1, var_size_in_bytes);
 240   } else {
 241     __ add(t1, t1, con_size_in_bytes);
 242   }
 243   __ str(t1, Address(rthread, in_bytes(JavaThread::allocated_bytes_offset())));
 244 }
 245 
 246 void BarrierSetAssembler::nmethod_entry_barrier(MacroAssembler* masm)  {
 247 // FIXME: 8210498: nmethod entry barriers is not implemented
 248 #if 0
 249  BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
 250   if (bs_nm == NULL) {
 251     return;
 252   }
 253   Label continuation;
 254   Address disarmed_addr(rthread, in_bytes(bs_nm->thread_disarmed_offset()));
 255   __ align(8);
 256   __ ldr(rscratch1, disarmed_addr);
 257   __ cbz(rscratch1, continuation);
 258   __ blr(RuntimeAddress(StubRoutines::aarch64::method_entry_barrier()));
 259   __ bind(continuation);
 260 #endif
 261 }
 262