1 /* 2 * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2014, Red Hat Inc. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #include "precompiled.hpp" 27 #include "asm/macroAssembler.inline.hpp" 28 #include "c1/c1_CodeStubs.hpp" 29 #include "c1/c1_FrameMap.hpp" 30 #include "c1/c1_LIRAssembler.hpp" 31 #include "c1/c1_MacroAssembler.hpp" 32 #include "c1/c1_Runtime1.hpp" 33 #include "nativeInst_aarch64.hpp" 34 #include "runtime/sharedRuntime.hpp" 35 #include "vmreg_aarch64.inline.hpp" 36 37 38 #define __ ce->masm()-> 39 40 void CounterOverflowStub::emit_code(LIR_Assembler* ce) { 41 __ bind(_entry); 42 Metadata *m = _method->as_constant_ptr()->as_metadata(); 43 __ mov_metadata(rscratch1, m); 44 ce->store_parameter(rscratch1, 1); 45 ce->store_parameter(_bci, 0); 46 __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::counter_overflow_id))); 47 ce->add_call_info_here(_info); 48 ce->verify_oop_map(_info); 49 __ b(_continuation); 50 } 51 52 RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index, LIR_Opr array) 53 : _index(index), _array(array), _throw_index_out_of_bounds_exception(false) { 54 assert(info != NULL, "must have info"); 55 _info = new CodeEmitInfo(info); 56 } 57 58 RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index) 59 : _index(index), _array(NULL), _throw_index_out_of_bounds_exception(true) { 60 assert(info != NULL, "must have info"); 61 _info = new CodeEmitInfo(info); 62 } 63 64 void RangeCheckStub::emit_code(LIR_Assembler* ce) { 65 __ bind(_entry); 66 if (_info->deoptimize_on_exception()) { 67 address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id); 68 __ far_call(RuntimeAddress(a)); 69 ce->add_call_info_here(_info); 70 ce->verify_oop_map(_info); 71 debug_only(__ should_not_reach_here()); 72 return; 73 } 74 75 if (_index->is_cpu_register()) { 76 __ mov(rscratch1, _index->as_register()); 77 } else { 78 __ mov(rscratch1, _index->as_jint()); 79 } 80 Runtime1::StubID stub_id; 81 if (_throw_index_out_of_bounds_exception) { 82 stub_id = Runtime1::throw_index_exception_id; 83 } else { 84 assert(_array != NULL, "sanity"); 85 __ mov(rscratch2, _array->as_pointer_register()); 86 stub_id = Runtime1::throw_range_check_failed_id; 87 } 88 __ lea(lr, RuntimeAddress(Runtime1::entry_for(stub_id))); 89 __ blr(lr); 90 ce->add_call_info_here(_info); 91 ce->verify_oop_map(_info); 92 debug_only(__ should_not_reach_here()); 93 } 94 95 PredicateFailedStub::PredicateFailedStub(CodeEmitInfo* info) { 96 _info = new CodeEmitInfo(info); 97 } 98 99 void PredicateFailedStub::emit_code(LIR_Assembler* ce) { 100 __ bind(_entry); 101 address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id); 102 __ far_call(RuntimeAddress(a)); 103 ce->add_call_info_here(_info); 104 ce->verify_oop_map(_info); 105 debug_only(__ should_not_reach_here()); 106 } 107 108 void DivByZeroStub::emit_code(LIR_Assembler* ce) { 109 if (_offset != -1) { 110 ce->compilation()->implicit_exception_table()->append(_offset, __ offset()); 111 } 112 __ bind(_entry); 113 __ far_call(Address(Runtime1::entry_for(Runtime1::throw_div0_exception_id), relocInfo::runtime_call_type)); 114 ce->add_call_info_here(_info); 115 ce->verify_oop_map(_info); 116 #ifdef ASSERT 117 __ should_not_reach_here(); 118 #endif 119 } 120 121 // Implementation of LoadFlattenedArrayStub 122 123 LoadFlattenedArrayStub::LoadFlattenedArrayStub(LIR_Opr array, LIR_Opr index, LIR_Opr result, CodeEmitInfo* info) { 124 _array = array; 125 _index = index; 126 _result = result; 127 _scratch_reg = FrameMap::r0_oop_opr; 128 _info = new CodeEmitInfo(info); 129 } 130 131 void LoadFlattenedArrayStub::emit_code(LIR_Assembler* ce) { 132 assert(__ rsp_offset() == 0, "frame size should be fixed"); 133 __ bind(_entry); 134 ce->store_parameter(_array->as_register(), 1); 135 ce->store_parameter(_index->as_register(), 0); 136 __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::load_flattened_array_id))); 137 ce->add_call_info_here(_info); 138 ce->verify_oop_map(_info); 139 if (_result->as_register() != r0) { 140 __ mov(_result->as_register(), r0); 141 } 142 __ b(_continuation); 143 } 144 145 146 // Implementation of StoreFlattenedArrayStub 147 148 StoreFlattenedArrayStub::StoreFlattenedArrayStub(LIR_Opr array, LIR_Opr index, LIR_Opr value, CodeEmitInfo* info) { 149 _array = array; 150 _index = index; 151 _value = value; 152 _scratch_reg = FrameMap::r0_oop_opr; 153 _info = new CodeEmitInfo(info); 154 } 155 156 157 void StoreFlattenedArrayStub::emit_code(LIR_Assembler* ce) { 158 assert(__ rsp_offset() == 0, "frame size should be fixed"); 159 __ bind(_entry); 160 ce->store_parameter(_array->as_register(), 2); 161 ce->store_parameter(_index->as_register(), 1); 162 ce->store_parameter(_value->as_register(), 0); 163 __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::store_flattened_array_id))); 164 ce->add_call_info_here(_info); 165 ce->verify_oop_map(_info); 166 __ b(_continuation); 167 } 168 169 // Implementation of SubstitutabilityCheckStub 170 SubstitutabilityCheckStub::SubstitutabilityCheckStub(LIR_Opr left, LIR_Opr right, CodeEmitInfo* info) { 171 _left = left; 172 _right = right; 173 _scratch_reg = FrameMap::r0_oop_opr; 174 _info = new CodeEmitInfo(info); 175 } 176 177 void SubstitutabilityCheckStub::emit_code(LIR_Assembler* ce) { 178 assert(__ rsp_offset() == 0, "frame size should be fixed"); 179 __ bind(_entry); 180 ce->store_parameter(_left->as_register(), 1); 181 ce->store_parameter(_right->as_register(), 0); 182 __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::substitutability_check_id))); 183 ce->add_call_info_here(_info); 184 ce->verify_oop_map(_info); 185 __ b(_continuation); 186 } 187 188 189 // Implementation of NewInstanceStub 190 191 NewInstanceStub::NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, Runtime1::StubID stub_id) { 192 _result = result; 193 _klass = klass; 194 _klass_reg = klass_reg; 195 _info = new CodeEmitInfo(info); 196 assert(stub_id == Runtime1::new_instance_id || 197 stub_id == Runtime1::fast_new_instance_id || 198 stub_id == Runtime1::fast_new_instance_init_check_id, 199 "need new_instance id"); 200 _stub_id = stub_id; 201 } 202 203 void NewInstanceStub::emit_code(LIR_Assembler* ce) { 204 assert(__ rsp_offset() == 0, "frame size should be fixed"); 205 __ bind(_entry); 206 __ mov(r3, _klass_reg->as_register()); 207 __ far_call(RuntimeAddress(Runtime1::entry_for(_stub_id))); 208 ce->add_call_info_here(_info); 209 ce->verify_oop_map(_info); 210 assert(_result->as_register() == r0, "result must in r0,"); 211 __ b(_continuation); 212 } 213 214 215 // Implementation of NewTypeArrayStub 216 217 // Implementation of NewTypeArrayStub 218 219 NewTypeArrayStub::NewTypeArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) { 220 _klass_reg = klass_reg; 221 _length = length; 222 _result = result; 223 _info = new CodeEmitInfo(info); 224 } 225 226 227 void NewTypeArrayStub::emit_code(LIR_Assembler* ce) { 228 assert(__ rsp_offset() == 0, "frame size should be fixed"); 229 __ bind(_entry); 230 assert(_length->as_register() == r19, "length must in r19,"); 231 assert(_klass_reg->as_register() == r3, "klass_reg must in r3"); 232 __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::new_type_array_id))); 233 ce->add_call_info_here(_info); 234 ce->verify_oop_map(_info); 235 assert(_result->as_register() == r0, "result must in r0"); 236 __ b(_continuation); 237 } 238 239 240 // Implementation of NewObjectArrayStub 241 242 NewObjectArrayStub::NewObjectArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info, bool is_value_type) { 243 _klass_reg = klass_reg; 244 _result = result; 245 _length = length; 246 _info = new CodeEmitInfo(info); 247 _is_value_type = is_value_type; 248 } 249 250 251 void NewObjectArrayStub::emit_code(LIR_Assembler* ce) { 252 assert(__ rsp_offset() == 0, "frame size should be fixed"); 253 __ bind(_entry); 254 assert(_length->as_register() == r19, "length must in r19,"); 255 assert(_klass_reg->as_register() == r3, "klass_reg must in r3"); 256 257 if (_is_value_type) { 258 __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::new_value_array_id))); 259 } else { 260 __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::new_object_array_id))); 261 } 262 263 ce->add_call_info_here(_info); 264 ce->verify_oop_map(_info); 265 assert(_result->as_register() == r0, "result must in r0"); 266 __ b(_continuation); 267 } 268 // Implementation of MonitorAccessStubs 269 270 MonitorEnterStub::MonitorEnterStub(LIR_Opr obj_reg, LIR_Opr lock_reg, CodeEmitInfo* info, CodeStub* throw_imse_stub, LIR_Opr scratch_reg) 271 : MonitorAccessStub(obj_reg, lock_reg) 272 { 273 _info = new CodeEmitInfo(info); 274 _scratch_reg = scratch_reg; 275 _throw_imse_stub = throw_imse_stub; 276 if (_throw_imse_stub != NULL) { 277 assert(_scratch_reg != LIR_OprFact::illegalOpr, "must be"); 278 } 279 } 280 281 282 void MonitorEnterStub::emit_code(LIR_Assembler* ce) { 283 assert(__ rsp_offset() == 0, "frame size should be fixed"); 284 __ bind(_entry); 285 if (_throw_imse_stub != NULL) { 286 // When we come here, _obj_reg has already been checked to be non-null. 287 __ ldr(rscratch1, Address(_obj_reg->as_register(), oopDesc::mark_offset_in_bytes())); 288 __ mov(rscratch2, markOopDesc::always_locked_pattern); 289 __ andr(rscratch1, rscratch1, rscratch2); 290 291 __ cmp(rscratch1, rscratch2); 292 __ br(Assembler::NE, *_throw_imse_stub->entry()); 293 } 294 295 ce->store_parameter(_obj_reg->as_register(), 1); 296 ce->store_parameter(_lock_reg->as_register(), 0); 297 Runtime1::StubID enter_id; 298 if (ce->compilation()->has_fpu_code()) { 299 enter_id = Runtime1::monitorenter_id; 300 } else { 301 enter_id = Runtime1::monitorenter_nofpu_id; 302 } 303 __ far_call(RuntimeAddress(Runtime1::entry_for(enter_id))); 304 ce->add_call_info_here(_info); 305 ce->verify_oop_map(_info); 306 __ b(_continuation); 307 } 308 309 310 void MonitorExitStub::emit_code(LIR_Assembler* ce) { 311 __ bind(_entry); 312 if (_compute_lock) { 313 // lock_reg was destroyed by fast unlocking attempt => recompute it 314 ce->monitor_address(_monitor_ix, _lock_reg); 315 } 316 ce->store_parameter(_lock_reg->as_register(), 0); 317 // note: non-blocking leaf routine => no call info needed 318 Runtime1::StubID exit_id; 319 if (ce->compilation()->has_fpu_code()) { 320 exit_id = Runtime1::monitorexit_id; 321 } else { 322 exit_id = Runtime1::monitorexit_nofpu_id; 323 } 324 __ adr(lr, _continuation); 325 __ far_jump(RuntimeAddress(Runtime1::entry_for(exit_id))); 326 } 327 328 329 // Implementation of patching: 330 // - Copy the code at given offset to an inlined buffer (first the bytes, then the number of bytes) 331 // - Replace original code with a call to the stub 332 // At Runtime: 333 // - call to stub, jump to runtime 334 // - in runtime: preserve all registers (rspecially objects, i.e., source and destination object) 335 // - in runtime: after initializing class, restore original code, reexecute instruction 336 337 int PatchingStub::_patch_info_offset = -NativeGeneralJump::instruction_size; 338 339 void PatchingStub::align_patch_site(MacroAssembler* masm) { 340 } 341 342 void PatchingStub::emit_code(LIR_Assembler* ce) { 343 assert(false, "AArch64 should not use C1 runtime patching"); 344 } 345 346 347 void DeoptimizeStub::emit_code(LIR_Assembler* ce) { 348 __ bind(_entry); 349 ce->store_parameter(_trap_request, 0); 350 __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::deoptimize_id))); 351 ce->add_call_info_here(_info); 352 DEBUG_ONLY(__ should_not_reach_here()); 353 } 354 355 356 void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) { 357 address a; 358 if (_info->deoptimize_on_exception()) { 359 // Deoptimize, do not throw the exception, because it is probably wrong to do it here. 360 a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id); 361 } else { 362 a = Runtime1::entry_for(Runtime1::throw_null_pointer_exception_id); 363 } 364 365 ce->compilation()->implicit_exception_table()->append(_offset, __ offset()); 366 __ bind(_entry); 367 __ far_call(RuntimeAddress(a)); 368 ce->add_call_info_here(_info); 369 ce->verify_oop_map(_info); 370 debug_only(__ should_not_reach_here()); 371 } 372 373 374 void SimpleExceptionStub::emit_code(LIR_Assembler* ce) { 375 assert(__ rsp_offset() == 0, "frame size should be fixed"); 376 377 __ bind(_entry); 378 // pass the object in a scratch register because all other registers 379 // must be preserved 380 if (_obj->is_cpu_register()) { 381 __ mov(rscratch1, _obj->as_register()); 382 } 383 __ far_call(RuntimeAddress(Runtime1::entry_for(_stub)), NULL, rscratch2); 384 ce->add_call_info_here(_info); 385 debug_only(__ should_not_reach_here()); 386 } 387 388 389 void ArrayCopyStub::emit_code(LIR_Assembler* ce) { 390 //---------------slow case: call to native----------------- 391 __ bind(_entry); 392 // Figure out where the args should go 393 // This should really convert the IntrinsicID to the Method* and signature 394 // but I don't know how to do that. 395 // 396 VMRegPair args[5]; 397 BasicType signature[5] = { T_OBJECT, T_INT, T_OBJECT, T_INT, T_INT}; 398 SharedRuntime::java_calling_convention(signature, args, 5, true); 399 400 // push parameters 401 // (src, src_pos, dest, destPos, length) 402 Register r[5]; 403 r[0] = src()->as_register(); 404 r[1] = src_pos()->as_register(); 405 r[2] = dst()->as_register(); 406 r[3] = dst_pos()->as_register(); 407 r[4] = length()->as_register(); 408 409 // next registers will get stored on the stack 410 for (int i = 0; i < 5 ; i++ ) { 411 VMReg r_1 = args[i].first(); 412 if (r_1->is_stack()) { 413 int st_off = r_1->reg2stack() * wordSize; 414 __ str (r[i], Address(sp, st_off)); 415 } else { 416 assert(r[i] == args[i].first()->as_Register(), "Wrong register for arg "); 417 } 418 } 419 420 ce->align_call(lir_static_call); 421 422 ce->emit_static_call_stub(); 423 if (ce->compilation()->bailed_out()) { 424 return; // CodeCache is full 425 } 426 Address resolve(SharedRuntime::get_resolve_static_call_stub(), 427 relocInfo::static_call_type); 428 address call = __ trampoline_call(resolve); 429 if (call == NULL) { 430 ce->bailout("trampoline stub overflow"); 431 return; 432 } 433 ce->add_call_info_here(info()); 434 435 #ifndef PRODUCT 436 __ lea(rscratch2, ExternalAddress((address)&Runtime1::_arraycopy_slowcase_cnt)); 437 __ incrementw(Address(rscratch2)); 438 #endif 439 440 __ b(_continuation); 441 } 442 443 #undef __