1 /* 2 * Copyright (c) 1998, 2020, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/systemDictionary.hpp" 27 #include "classfile/vmSymbols.hpp" 28 #include "code/codeCache.hpp" 29 #include "code/compiledMethod.inline.hpp" 30 #include "code/compiledIC.hpp" 31 #include "code/icBuffer.hpp" 32 #include "code/nmethod.hpp" 33 #include "code/pcDesc.hpp" 34 #include "code/scopeDesc.hpp" 35 #include "code/vtableStubs.hpp" 36 #include "compiler/compileBroker.hpp" 37 #include "compiler/oopMap.hpp" 38 #include "gc/g1/heapRegion.hpp" 39 #include "gc/shared/barrierSet.hpp" 40 #include "gc/shared/collectedHeap.hpp" 41 #include "gc/shared/gcLocker.hpp" 42 #include "interpreter/bytecode.hpp" 43 #include "interpreter/interpreter.hpp" 44 #include "interpreter/linkResolver.hpp" 45 #include "logging/log.hpp" 46 #include "logging/logStream.hpp" 47 #include "memory/oopFactory.hpp" 48 #include "memory/resourceArea.hpp" 49 #include "oops/objArrayKlass.hpp" 50 #include "oops/oop.inline.hpp" 51 #include "oops/typeArrayOop.inline.hpp" 52 #include "opto/ad.hpp" 53 #include "opto/addnode.hpp" 54 #include "opto/callnode.hpp" 55 #include "opto/cfgnode.hpp" 56 #include "opto/graphKit.hpp" 57 #include "opto/machnode.hpp" 58 #include "opto/matcher.hpp" 59 #include "opto/memnode.hpp" 60 #include "opto/mulnode.hpp" 61 #include "opto/output.hpp" 62 #include "opto/runtime.hpp" 63 #include "opto/subnode.hpp" 64 #include "runtime/atomic.hpp" 65 #include "runtime/frame.inline.hpp" 66 #include "runtime/handles.inline.hpp" 67 #include "runtime/interfaceSupport.inline.hpp" 68 #include "runtime/javaCalls.hpp" 69 #include "runtime/sharedRuntime.hpp" 70 #include "runtime/signature.hpp" 71 #include "runtime/threadCritical.hpp" 72 #include "runtime/vframe.hpp" 73 #include "runtime/vframeArray.hpp" 74 #include "runtime/vframe_hp.hpp" 75 #include "utilities/copy.hpp" 76 #include "utilities/preserveException.hpp" 77 78 79 // For debugging purposes: 80 // To force FullGCALot inside a runtime function, add the following two lines 81 // 82 // Universe::release_fullgc_alot_dummy(); 83 // MarkSweep::invoke(0, "Debugging"); 84 // 85 // At command line specify the parameters: -XX:+FullGCALot -XX:FullGCALotStart=100000000 86 87 88 89 90 // Compiled code entry points 91 address OptoRuntime::_new_instance_Java = NULL; 92 address OptoRuntime::_new_array_Java = NULL; 93 address OptoRuntime::_new_array_nozero_Java = NULL; 94 address OptoRuntime::_multianewarray2_Java = NULL; 95 address OptoRuntime::_multianewarray3_Java = NULL; 96 address OptoRuntime::_multianewarray4_Java = NULL; 97 address OptoRuntime::_multianewarray5_Java = NULL; 98 address OptoRuntime::_multianewarrayN_Java = NULL; 99 address OptoRuntime::_vtable_must_compile_Java = NULL; 100 address OptoRuntime::_complete_monitor_locking_Java = NULL; 101 address OptoRuntime::_monitor_notify_Java = NULL; 102 address OptoRuntime::_monitor_notifyAll_Java = NULL; 103 address OptoRuntime::_rethrow_Java = NULL; 104 105 address OptoRuntime::_slow_arraycopy_Java = NULL; 106 address OptoRuntime::_register_finalizer_Java = NULL; 107 108 ExceptionBlob* OptoRuntime::_exception_blob; 109 110 // This should be called in an assertion at the start of OptoRuntime routines 111 // which are entered from compiled code (all of them) 112 #ifdef ASSERT 113 static bool check_compiled_frame(JavaThread* thread) { 114 assert(thread->last_frame().is_runtime_frame(), "cannot call runtime directly from compiled code"); 115 RegisterMap map(thread, false); 116 frame caller = thread->last_frame().sender(&map); 117 assert(caller.is_compiled_frame(), "not being called from compiled like code"); 118 return true; 119 } 120 #endif // ASSERT 121 122 123 #define gen(env, var, type_func_gen, c_func, fancy_jump, pass_tls, save_arg_regs, return_pc) \ 124 var = generate_stub(env, type_func_gen, CAST_FROM_FN_PTR(address, c_func), #var, fancy_jump, pass_tls, save_arg_regs, return_pc); \ 125 if (var == NULL) { return false; } 126 127 bool OptoRuntime::generate(ciEnv* env) { 128 129 generate_exception_blob(); 130 131 // Note: tls: Means fetching the return oop out of the thread-local storage 132 // 133 // variable/name type-function-gen , runtime method ,fncy_jp, tls,save_args,retpc 134 // ------------------------------------------------------------------------------------------------------------------------------- 135 gen(env, _new_instance_Java , new_instance_Type , new_instance_C , 0 , true , false, false); 136 gen(env, _new_array_Java , new_array_Type , new_array_C , 0 , true , false, false); 137 gen(env, _new_array_nozero_Java , new_array_Type , new_array_nozero_C , 0 , true , false, false); 138 gen(env, _multianewarray2_Java , multianewarray2_Type , multianewarray2_C , 0 , true , false, false); 139 gen(env, _multianewarray3_Java , multianewarray3_Type , multianewarray3_C , 0 , true , false, false); 140 gen(env, _multianewarray4_Java , multianewarray4_Type , multianewarray4_C , 0 , true , false, false); 141 gen(env, _multianewarray5_Java , multianewarray5_Type , multianewarray5_C , 0 , true , false, false); 142 gen(env, _multianewarrayN_Java , multianewarrayN_Type , multianewarrayN_C , 0 , true , false, false); 143 gen(env, _complete_monitor_locking_Java , complete_monitor_enter_Type , SharedRuntime::complete_monitor_locking_C, 0, false, false, false); 144 gen(env, _monitor_notify_Java , monitor_notify_Type , monitor_notify_C , 0 , false, false, false); 145 gen(env, _monitor_notifyAll_Java , monitor_notify_Type , monitor_notifyAll_C , 0 , false, false, false); 146 gen(env, _rethrow_Java , rethrow_Type , rethrow_C , 2 , true , false, true ); 147 148 gen(env, _slow_arraycopy_Java , slow_arraycopy_Type , SharedRuntime::slow_arraycopy_C , 0 , false, false, false); 149 gen(env, _register_finalizer_Java , register_finalizer_Type , register_finalizer , 0 , false, false, false); 150 151 return true; 152 } 153 154 #undef gen 155 156 157 // Helper method to do generation of RunTimeStub's 158 address OptoRuntime::generate_stub( ciEnv* env, 159 TypeFunc_generator gen, address C_function, 160 const char *name, int is_fancy_jump, 161 bool pass_tls, 162 bool save_argument_registers, 163 bool return_pc) { 164 165 // Matching the default directive, we currently have no method to match. 166 DirectiveSet* directive = DirectivesStack::getDefaultDirective(CompileBroker::compiler(CompLevel_full_optimization)); 167 ResourceMark rm; 168 Compile C( env, gen, C_function, name, is_fancy_jump, pass_tls, save_argument_registers, return_pc, directive); 169 DirectivesStack::release(directive); 170 return C.stub_entry_point(); 171 } 172 173 const char* OptoRuntime::stub_name(address entry) { 174 #ifndef PRODUCT 175 CodeBlob* cb = CodeCache::find_blob(entry); 176 RuntimeStub* rs =(RuntimeStub *)cb; 177 assert(rs != NULL && rs->is_runtime_stub(), "not a runtime stub"); 178 return rs->name(); 179 #else 180 // Fast implementation for product mode (maybe it should be inlined too) 181 return "runtime stub"; 182 #endif 183 } 184 185 186 //============================================================================= 187 // Opto compiler runtime routines 188 //============================================================================= 189 190 191 //=============================allocation====================================== 192 // We failed the fast-path allocation. Now we need to do a scavenge or GC 193 // and try allocation again. 194 195 // object allocation 196 JRT_BLOCK_ENTRY(void, OptoRuntime::new_instance_C(Klass* klass, JavaThread* thread)) 197 JRT_BLOCK; 198 #ifndef PRODUCT 199 SharedRuntime::_new_instance_ctr++; // new instance requires GC 200 #endif 201 assert(check_compiled_frame(thread), "incorrect caller"); 202 203 // These checks are cheap to make and support reflective allocation. 204 int lh = klass->layout_helper(); 205 if (Klass::layout_helper_needs_slow_path(lh) || !InstanceKlass::cast(klass)->is_initialized()) { 206 Handle holder(THREAD, klass->klass_holder()); // keep the klass alive 207 klass->check_valid_for_instantiation(false, THREAD); 208 if (!HAS_PENDING_EXCEPTION) { 209 InstanceKlass::cast(klass)->initialize(THREAD); 210 } 211 } 212 213 if (!HAS_PENDING_EXCEPTION) { 214 // Scavenge and allocate an instance. 215 Handle holder(THREAD, klass->klass_holder()); // keep the klass alive 216 oop result = InstanceKlass::cast(klass)->allocate_instance(THREAD); 217 thread->set_vm_result(result); 218 219 // Pass oops back through thread local storage. Our apparent type to Java 220 // is that we return an oop, but we can block on exit from this routine and 221 // a GC can trash the oop in C's return register. The generated stub will 222 // fetch the oop from TLS after any possible GC. 223 } 224 225 deoptimize_caller_frame(thread, HAS_PENDING_EXCEPTION); 226 JRT_BLOCK_END; 227 228 // inform GC that we won't do card marks for initializing writes. 229 SharedRuntime::on_slowpath_allocation_exit(thread); 230 JRT_END 231 232 233 // array allocation 234 JRT_BLOCK_ENTRY(void, OptoRuntime::new_array_C(Klass* array_type, int len, JavaThread *thread)) 235 JRT_BLOCK; 236 #ifndef PRODUCT 237 SharedRuntime::_new_array_ctr++; // new array requires GC 238 #endif 239 assert(check_compiled_frame(thread), "incorrect caller"); 240 241 // Scavenge and allocate an instance. 242 oop result; 243 244 if (array_type->is_typeArray_klass()) { 245 // The oopFactory likes to work with the element type. 246 // (We could bypass the oopFactory, since it doesn't add much value.) 247 BasicType elem_type = TypeArrayKlass::cast(array_type)->element_type(); 248 result = oopFactory::new_typeArray(elem_type, len, THREAD); 249 } else { 250 // Although the oopFactory likes to work with the elem_type, 251 // the compiler prefers the array_type, since it must already have 252 // that latter value in hand for the fast path. 253 Handle holder(THREAD, array_type->klass_holder()); // keep the array klass alive 254 Klass* elem_type = ObjArrayKlass::cast(array_type)->element_klass(); 255 result = oopFactory::new_objArray(elem_type, len, THREAD); 256 } 257 258 // Pass oops back through thread local storage. Our apparent type to Java 259 // is that we return an oop, but we can block on exit from this routine and 260 // a GC can trash the oop in C's return register. The generated stub will 261 // fetch the oop from TLS after any possible GC. 262 deoptimize_caller_frame(thread, HAS_PENDING_EXCEPTION); 263 thread->set_vm_result(result); 264 JRT_BLOCK_END; 265 266 // inform GC that we won't do card marks for initializing writes. 267 SharedRuntime::on_slowpath_allocation_exit(thread); 268 JRT_END 269 270 // array allocation without zeroing 271 JRT_BLOCK_ENTRY(void, OptoRuntime::new_array_nozero_C(Klass* array_type, int len, JavaThread *thread)) 272 JRT_BLOCK; 273 #ifndef PRODUCT 274 SharedRuntime::_new_array_ctr++; // new array requires GC 275 #endif 276 assert(check_compiled_frame(thread), "incorrect caller"); 277 278 // Scavenge and allocate an instance. 279 oop result; 280 281 assert(array_type->is_typeArray_klass(), "should be called only for type array"); 282 // The oopFactory likes to work with the element type. 283 BasicType elem_type = TypeArrayKlass::cast(array_type)->element_type(); 284 result = oopFactory::new_typeArray_nozero(elem_type, len, THREAD); 285 286 // Pass oops back through thread local storage. Our apparent type to Java 287 // is that we return an oop, but we can block on exit from this routine and 288 // a GC can trash the oop in C's return register. The generated stub will 289 // fetch the oop from TLS after any possible GC. 290 deoptimize_caller_frame(thread, HAS_PENDING_EXCEPTION); 291 thread->set_vm_result(result); 292 JRT_BLOCK_END; 293 294 295 // inform GC that we won't do card marks for initializing writes. 296 SharedRuntime::on_slowpath_allocation_exit(thread); 297 298 oop result = thread->vm_result(); 299 if ((len > 0) && (result != NULL) && 300 is_deoptimized_caller_frame(thread)) { 301 // Zero array here if the caller is deoptimized. 302 int size = ((typeArrayOop)result)->object_size(); 303 BasicType elem_type = TypeArrayKlass::cast(array_type)->element_type(); 304 const size_t hs = arrayOopDesc::header_size(elem_type); 305 // Align to next 8 bytes to avoid trashing arrays's length. 306 const size_t aligned_hs = align_object_offset(hs); 307 HeapWord* obj = cast_from_oop<HeapWord*>(result); 308 if (aligned_hs > hs) { 309 Copy::zero_to_words(obj+hs, aligned_hs-hs); 310 } 311 // Optimized zeroing. 312 Copy::fill_to_aligned_words(obj+aligned_hs, size-aligned_hs); 313 } 314 315 JRT_END 316 317 // Note: multianewarray for one dimension is handled inline by GraphKit::new_array. 318 319 // multianewarray for 2 dimensions 320 JRT_ENTRY(void, OptoRuntime::multianewarray2_C(Klass* elem_type, int len1, int len2, JavaThread *thread)) 321 #ifndef PRODUCT 322 SharedRuntime::_multi2_ctr++; // multianewarray for 1 dimension 323 #endif 324 assert(check_compiled_frame(thread), "incorrect caller"); 325 assert(elem_type->is_klass(), "not a class"); 326 jint dims[2]; 327 dims[0] = len1; 328 dims[1] = len2; 329 Handle holder(THREAD, elem_type->klass_holder()); // keep the klass alive 330 oop obj = ArrayKlass::cast(elem_type)->multi_allocate(2, dims, THREAD); 331 deoptimize_caller_frame(thread, HAS_PENDING_EXCEPTION); 332 thread->set_vm_result(obj); 333 JRT_END 334 335 // multianewarray for 3 dimensions 336 JRT_ENTRY(void, OptoRuntime::multianewarray3_C(Klass* elem_type, int len1, int len2, int len3, JavaThread *thread)) 337 #ifndef PRODUCT 338 SharedRuntime::_multi3_ctr++; // multianewarray for 1 dimension 339 #endif 340 assert(check_compiled_frame(thread), "incorrect caller"); 341 assert(elem_type->is_klass(), "not a class"); 342 jint dims[3]; 343 dims[0] = len1; 344 dims[1] = len2; 345 dims[2] = len3; 346 Handle holder(THREAD, elem_type->klass_holder()); // keep the klass alive 347 oop obj = ArrayKlass::cast(elem_type)->multi_allocate(3, dims, THREAD); 348 deoptimize_caller_frame(thread, HAS_PENDING_EXCEPTION); 349 thread->set_vm_result(obj); 350 JRT_END 351 352 // multianewarray for 4 dimensions 353 JRT_ENTRY(void, OptoRuntime::multianewarray4_C(Klass* elem_type, int len1, int len2, int len3, int len4, JavaThread *thread)) 354 #ifndef PRODUCT 355 SharedRuntime::_multi4_ctr++; // multianewarray for 1 dimension 356 #endif 357 assert(check_compiled_frame(thread), "incorrect caller"); 358 assert(elem_type->is_klass(), "not a class"); 359 jint dims[4]; 360 dims[0] = len1; 361 dims[1] = len2; 362 dims[2] = len3; 363 dims[3] = len4; 364 Handle holder(THREAD, elem_type->klass_holder()); // keep the klass alive 365 oop obj = ArrayKlass::cast(elem_type)->multi_allocate(4, dims, THREAD); 366 deoptimize_caller_frame(thread, HAS_PENDING_EXCEPTION); 367 thread->set_vm_result(obj); 368 JRT_END 369 370 // multianewarray for 5 dimensions 371 JRT_ENTRY(void, OptoRuntime::multianewarray5_C(Klass* elem_type, int len1, int len2, int len3, int len4, int len5, JavaThread *thread)) 372 #ifndef PRODUCT 373 SharedRuntime::_multi5_ctr++; // multianewarray for 1 dimension 374 #endif 375 assert(check_compiled_frame(thread), "incorrect caller"); 376 assert(elem_type->is_klass(), "not a class"); 377 jint dims[5]; 378 dims[0] = len1; 379 dims[1] = len2; 380 dims[2] = len3; 381 dims[3] = len4; 382 dims[4] = len5; 383 Handle holder(THREAD, elem_type->klass_holder()); // keep the klass alive 384 oop obj = ArrayKlass::cast(elem_type)->multi_allocate(5, dims, THREAD); 385 deoptimize_caller_frame(thread, HAS_PENDING_EXCEPTION); 386 thread->set_vm_result(obj); 387 JRT_END 388 389 JRT_ENTRY(void, OptoRuntime::multianewarrayN_C(Klass* elem_type, arrayOopDesc* dims, JavaThread *thread)) 390 assert(check_compiled_frame(thread), "incorrect caller"); 391 assert(elem_type->is_klass(), "not a class"); 392 assert(oop(dims)->is_typeArray(), "not an array"); 393 394 ResourceMark rm; 395 jint len = dims->length(); 396 assert(len > 0, "Dimensions array should contain data"); 397 jint *c_dims = NEW_RESOURCE_ARRAY(jint, len); 398 ArrayAccess<>::arraycopy_to_native<>(dims, typeArrayOopDesc::element_offset<jint>(0), 399 c_dims, len); 400 401 Handle holder(THREAD, elem_type->klass_holder()); // keep the klass alive 402 oop obj = ArrayKlass::cast(elem_type)->multi_allocate(len, c_dims, THREAD); 403 deoptimize_caller_frame(thread, HAS_PENDING_EXCEPTION); 404 thread->set_vm_result(obj); 405 JRT_END 406 407 JRT_BLOCK_ENTRY(void, OptoRuntime::monitor_notify_C(oopDesc* obj, JavaThread *thread)) 408 409 // Very few notify/notifyAll operations find any threads on the waitset, so 410 // the dominant fast-path is to simply return. 411 // Relatedly, it's critical that notify/notifyAll be fast in order to 412 // reduce lock hold times. 413 if (!SafepointSynchronize::is_synchronizing()) { 414 if (ObjectSynchronizer::quick_notify(obj, thread, false)) { 415 return; 416 } 417 } 418 419 // This is the case the fast-path above isn't provisioned to handle. 420 // The fast-path is designed to handle frequently arising cases in an efficient manner. 421 // (The fast-path is just a degenerate variant of the slow-path). 422 // Perform the dreaded state transition and pass control into the slow-path. 423 JRT_BLOCK; 424 Handle h_obj(THREAD, obj); 425 ObjectSynchronizer::notify(h_obj, CHECK); 426 JRT_BLOCK_END; 427 JRT_END 428 429 JRT_BLOCK_ENTRY(void, OptoRuntime::monitor_notifyAll_C(oopDesc* obj, JavaThread *thread)) 430 431 if (!SafepointSynchronize::is_synchronizing() ) { 432 if (ObjectSynchronizer::quick_notify(obj, thread, true)) { 433 return; 434 } 435 } 436 437 // This is the case the fast-path above isn't provisioned to handle. 438 // The fast-path is designed to handle frequently arising cases in an efficient manner. 439 // (The fast-path is just a degenerate variant of the slow-path). 440 // Perform the dreaded state transition and pass control into the slow-path. 441 JRT_BLOCK; 442 Handle h_obj(THREAD, obj); 443 ObjectSynchronizer::notifyall(h_obj, CHECK); 444 JRT_BLOCK_END; 445 JRT_END 446 447 const TypeFunc *OptoRuntime::new_instance_Type() { 448 // create input type (domain) 449 const Type **fields = TypeTuple::fields(1); 450 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Klass to be allocated 451 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1, fields); 452 453 // create result type (range) 454 fields = TypeTuple::fields(1); 455 fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Returned oop 456 457 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields); 458 459 return TypeFunc::make(domain, range); 460 } 461 462 463 const TypeFunc *OptoRuntime::athrow_Type() { 464 // create input type (domain) 465 const Type **fields = TypeTuple::fields(1); 466 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Klass to be allocated 467 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1, fields); 468 469 // create result type (range) 470 fields = TypeTuple::fields(0); 471 472 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields); 473 474 return TypeFunc::make(domain, range); 475 } 476 477 478 const TypeFunc *OptoRuntime::new_array_Type() { 479 // create input type (domain) 480 const Type **fields = TypeTuple::fields(2); 481 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // element klass 482 fields[TypeFunc::Parms+1] = TypeInt::INT; // array size 483 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields); 484 485 // create result type (range) 486 fields = TypeTuple::fields(1); 487 fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Returned oop 488 489 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields); 490 491 return TypeFunc::make(domain, range); 492 } 493 494 const TypeFunc *OptoRuntime::multianewarray_Type(int ndim) { 495 // create input type (domain) 496 const int nargs = ndim + 1; 497 const Type **fields = TypeTuple::fields(nargs); 498 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // element klass 499 for( int i = 1; i < nargs; i++ ) 500 fields[TypeFunc::Parms + i] = TypeInt::INT; // array size 501 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+nargs, fields); 502 503 // create result type (range) 504 fields = TypeTuple::fields(1); 505 fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Returned oop 506 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields); 507 508 return TypeFunc::make(domain, range); 509 } 510 511 const TypeFunc *OptoRuntime::multianewarray2_Type() { 512 return multianewarray_Type(2); 513 } 514 515 const TypeFunc *OptoRuntime::multianewarray3_Type() { 516 return multianewarray_Type(3); 517 } 518 519 const TypeFunc *OptoRuntime::multianewarray4_Type() { 520 return multianewarray_Type(4); 521 } 522 523 const TypeFunc *OptoRuntime::multianewarray5_Type() { 524 return multianewarray_Type(5); 525 } 526 527 const TypeFunc *OptoRuntime::multianewarrayN_Type() { 528 // create input type (domain) 529 const Type **fields = TypeTuple::fields(2); 530 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // element klass 531 fields[TypeFunc::Parms+1] = TypeInstPtr::NOTNULL; // array of dim sizes 532 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields); 533 534 // create result type (range) 535 fields = TypeTuple::fields(1); 536 fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Returned oop 537 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields); 538 539 return TypeFunc::make(domain, range); 540 } 541 542 const TypeFunc *OptoRuntime::uncommon_trap_Type() { 543 // create input type (domain) 544 const Type **fields = TypeTuple::fields(1); 545 fields[TypeFunc::Parms+0] = TypeInt::INT; // trap_reason (deopt reason and action) 546 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1, fields); 547 548 // create result type (range) 549 fields = TypeTuple::fields(0); 550 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields); 551 552 return TypeFunc::make(domain, range); 553 } 554 555 //----------------------------------------------------------------------------- 556 // Monitor Handling 557 const TypeFunc *OptoRuntime::complete_monitor_enter_Type() { 558 // create input type (domain) 559 const Type **fields = TypeTuple::fields(2); 560 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Object to be Locked 561 fields[TypeFunc::Parms+1] = TypeRawPtr::BOTTOM; // Address of stack location for lock 562 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2,fields); 563 564 // create result type (range) 565 fields = TypeTuple::fields(0); 566 567 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields); 568 569 return TypeFunc::make(domain,range); 570 } 571 572 573 //----------------------------------------------------------------------------- 574 const TypeFunc *OptoRuntime::complete_monitor_exit_Type() { 575 // create input type (domain) 576 const Type **fields = TypeTuple::fields(3); 577 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Object to be Locked 578 fields[TypeFunc::Parms+1] = TypeRawPtr::BOTTOM; // Address of stack location for lock - BasicLock 579 fields[TypeFunc::Parms+2] = TypeRawPtr::BOTTOM; // Thread pointer (Self) 580 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+3, fields); 581 582 // create result type (range) 583 fields = TypeTuple::fields(0); 584 585 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields); 586 587 return TypeFunc::make(domain, range); 588 } 589 590 const TypeFunc *OptoRuntime::monitor_notify_Type() { 591 // create input type (domain) 592 const Type **fields = TypeTuple::fields(1); 593 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Object to be Locked 594 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1, fields); 595 596 // create result type (range) 597 fields = TypeTuple::fields(0); 598 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields); 599 return TypeFunc::make(domain, range); 600 } 601 602 const TypeFunc* OptoRuntime::flush_windows_Type() { 603 // create input type (domain) 604 const Type** fields = TypeTuple::fields(1); 605 fields[TypeFunc::Parms+0] = NULL; // void 606 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms, fields); 607 608 // create result type 609 fields = TypeTuple::fields(1); 610 fields[TypeFunc::Parms+0] = NULL; // void 611 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms, fields); 612 613 return TypeFunc::make(domain, range); 614 } 615 616 const TypeFunc* OptoRuntime::l2f_Type() { 617 // create input type (domain) 618 const Type **fields = TypeTuple::fields(2); 619 fields[TypeFunc::Parms+0] = TypeLong::LONG; 620 fields[TypeFunc::Parms+1] = Type::HALF; 621 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields); 622 623 // create result type (range) 624 fields = TypeTuple::fields(1); 625 fields[TypeFunc::Parms+0] = Type::FLOAT; 626 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields); 627 628 return TypeFunc::make(domain, range); 629 } 630 631 const TypeFunc* OptoRuntime::modf_Type() { 632 const Type **fields = TypeTuple::fields(2); 633 fields[TypeFunc::Parms+0] = Type::FLOAT; 634 fields[TypeFunc::Parms+1] = Type::FLOAT; 635 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields); 636 637 // create result type (range) 638 fields = TypeTuple::fields(1); 639 fields[TypeFunc::Parms+0] = Type::FLOAT; 640 641 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields); 642 643 return TypeFunc::make(domain, range); 644 } 645 646 const TypeFunc *OptoRuntime::Math_D_D_Type() { 647 // create input type (domain) 648 const Type **fields = TypeTuple::fields(2); 649 // Symbol* name of class to be loaded 650 fields[TypeFunc::Parms+0] = Type::DOUBLE; 651 fields[TypeFunc::Parms+1] = Type::HALF; 652 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields); 653 654 // create result type (range) 655 fields = TypeTuple::fields(2); 656 fields[TypeFunc::Parms+0] = Type::DOUBLE; 657 fields[TypeFunc::Parms+1] = Type::HALF; 658 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+2, fields); 659 660 return TypeFunc::make(domain, range); 661 } 662 663 const TypeFunc* OptoRuntime::Math_DD_D_Type() { 664 const Type **fields = TypeTuple::fields(4); 665 fields[TypeFunc::Parms+0] = Type::DOUBLE; 666 fields[TypeFunc::Parms+1] = Type::HALF; 667 fields[TypeFunc::Parms+2] = Type::DOUBLE; 668 fields[TypeFunc::Parms+3] = Type::HALF; 669 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+4, fields); 670 671 // create result type (range) 672 fields = TypeTuple::fields(2); 673 fields[TypeFunc::Parms+0] = Type::DOUBLE; 674 fields[TypeFunc::Parms+1] = Type::HALF; 675 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+2, fields); 676 677 return TypeFunc::make(domain, range); 678 } 679 680 //-------------- currentTimeMillis, currentTimeNanos, etc 681 682 const TypeFunc* OptoRuntime::void_long_Type() { 683 // create input type (domain) 684 const Type **fields = TypeTuple::fields(0); 685 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+0, fields); 686 687 // create result type (range) 688 fields = TypeTuple::fields(2); 689 fields[TypeFunc::Parms+0] = TypeLong::LONG; 690 fields[TypeFunc::Parms+1] = Type::HALF; 691 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+2, fields); 692 693 return TypeFunc::make(domain, range); 694 } 695 696 // arraycopy stub variations: 697 enum ArrayCopyType { 698 ac_fast, // void(ptr, ptr, size_t) 699 ac_checkcast, // int(ptr, ptr, size_t, size_t, ptr) 700 ac_slow, // void(ptr, int, ptr, int, int) 701 ac_generic // int(ptr, int, ptr, int, int) 702 }; 703 704 static const TypeFunc* make_arraycopy_Type(ArrayCopyType act) { 705 // create input type (domain) 706 int num_args = (act == ac_fast ? 3 : 5); 707 int num_size_args = (act == ac_fast ? 1 : act == ac_checkcast ? 2 : 0); 708 int argcnt = num_args; 709 LP64_ONLY(argcnt += num_size_args); // halfwords for lengths 710 const Type** fields = TypeTuple::fields(argcnt); 711 int argp = TypeFunc::Parms; 712 fields[argp++] = TypePtr::NOTNULL; // src 713 if (num_size_args == 0) { 714 fields[argp++] = TypeInt::INT; // src_pos 715 } 716 fields[argp++] = TypePtr::NOTNULL; // dest 717 if (num_size_args == 0) { 718 fields[argp++] = TypeInt::INT; // dest_pos 719 fields[argp++] = TypeInt::INT; // length 720 } 721 while (num_size_args-- > 0) { 722 fields[argp++] = TypeX_X; // size in whatevers (size_t) 723 LP64_ONLY(fields[argp++] = Type::HALF); // other half of long length 724 } 725 if (act == ac_checkcast) { 726 fields[argp++] = TypePtr::NOTNULL; // super_klass 727 } 728 assert(argp == TypeFunc::Parms+argcnt, "correct decoding of act"); 729 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 730 731 // create result type if needed 732 int retcnt = (act == ac_checkcast || act == ac_generic ? 1 : 0); 733 fields = TypeTuple::fields(1); 734 if (retcnt == 0) 735 fields[TypeFunc::Parms+0] = NULL; // void 736 else 737 fields[TypeFunc::Parms+0] = TypeInt::INT; // status result, if needed 738 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms+retcnt, fields); 739 return TypeFunc::make(domain, range); 740 } 741 742 const TypeFunc* OptoRuntime::fast_arraycopy_Type() { 743 // This signature is simple: Two base pointers and a size_t. 744 return make_arraycopy_Type(ac_fast); 745 } 746 747 const TypeFunc* OptoRuntime::checkcast_arraycopy_Type() { 748 // An extension of fast_arraycopy_Type which adds type checking. 749 return make_arraycopy_Type(ac_checkcast); 750 } 751 752 const TypeFunc* OptoRuntime::slow_arraycopy_Type() { 753 // This signature is exactly the same as System.arraycopy. 754 // There are no intptr_t (int/long) arguments. 755 return make_arraycopy_Type(ac_slow); 756 } 757 758 const TypeFunc* OptoRuntime::generic_arraycopy_Type() { 759 // This signature is like System.arraycopy, except that it returns status. 760 return make_arraycopy_Type(ac_generic); 761 } 762 763 764 const TypeFunc* OptoRuntime::array_fill_Type() { 765 const Type** fields; 766 int argp = TypeFunc::Parms; 767 // create input type (domain): pointer, int, size_t 768 fields = TypeTuple::fields(3 LP64_ONLY( + 1)); 769 fields[argp++] = TypePtr::NOTNULL; 770 fields[argp++] = TypeInt::INT; 771 fields[argp++] = TypeX_X; // size in whatevers (size_t) 772 LP64_ONLY(fields[argp++] = Type::HALF); // other half of long length 773 const TypeTuple *domain = TypeTuple::make(argp, fields); 774 775 // create result type 776 fields = TypeTuple::fields(1); 777 fields[TypeFunc::Parms+0] = NULL; // void 778 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms, fields); 779 780 return TypeFunc::make(domain, range); 781 } 782 783 // for aescrypt encrypt/decrypt operations, just three pointers returning void (length is constant) 784 const TypeFunc* OptoRuntime::aescrypt_block_Type() { 785 // create input type (domain) 786 int num_args = 3; 787 if (Matcher::pass_original_key_for_aes()) { 788 num_args = 4; 789 } 790 int argcnt = num_args; 791 const Type** fields = TypeTuple::fields(argcnt); 792 int argp = TypeFunc::Parms; 793 fields[argp++] = TypePtr::NOTNULL; // src 794 fields[argp++] = TypePtr::NOTNULL; // dest 795 fields[argp++] = TypePtr::NOTNULL; // k array 796 if (Matcher::pass_original_key_for_aes()) { 797 fields[argp++] = TypePtr::NOTNULL; // original k array 798 } 799 assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); 800 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 801 802 // no result type needed 803 fields = TypeTuple::fields(1); 804 fields[TypeFunc::Parms+0] = NULL; // void 805 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields); 806 return TypeFunc::make(domain, range); 807 } 808 809 /** 810 * int updateBytesCRC32(int crc, byte* b, int len) 811 */ 812 const TypeFunc* OptoRuntime::updateBytesCRC32_Type() { 813 // create input type (domain) 814 int num_args = 3; 815 int argcnt = num_args; 816 const Type** fields = TypeTuple::fields(argcnt); 817 int argp = TypeFunc::Parms; 818 fields[argp++] = TypeInt::INT; // crc 819 fields[argp++] = TypePtr::NOTNULL; // src 820 fields[argp++] = TypeInt::INT; // len 821 assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); 822 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 823 824 // result type needed 825 fields = TypeTuple::fields(1); 826 fields[TypeFunc::Parms+0] = TypeInt::INT; // crc result 827 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms+1, fields); 828 return TypeFunc::make(domain, range); 829 } 830 831 /** 832 * int updateBytesCRC32C(int crc, byte* buf, int len, int* table) 833 */ 834 const TypeFunc* OptoRuntime::updateBytesCRC32C_Type() { 835 // create input type (domain) 836 int num_args = 4; 837 int argcnt = num_args; 838 const Type** fields = TypeTuple::fields(argcnt); 839 int argp = TypeFunc::Parms; 840 fields[argp++] = TypeInt::INT; // crc 841 fields[argp++] = TypePtr::NOTNULL; // buf 842 fields[argp++] = TypeInt::INT; // len 843 fields[argp++] = TypePtr::NOTNULL; // table 844 assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); 845 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 846 847 // result type needed 848 fields = TypeTuple::fields(1); 849 fields[TypeFunc::Parms+0] = TypeInt::INT; // crc result 850 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms+1, fields); 851 return TypeFunc::make(domain, range); 852 } 853 854 /** 855 * int updateBytesAdler32(int adler, bytes* b, int off, int len) 856 */ 857 const TypeFunc* OptoRuntime::updateBytesAdler32_Type() { 858 // create input type (domain) 859 int num_args = 3; 860 int argcnt = num_args; 861 const Type** fields = TypeTuple::fields(argcnt); 862 int argp = TypeFunc::Parms; 863 fields[argp++] = TypeInt::INT; // crc 864 fields[argp++] = TypePtr::NOTNULL; // src + offset 865 fields[argp++] = TypeInt::INT; // len 866 assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); 867 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 868 869 // result type needed 870 fields = TypeTuple::fields(1); 871 fields[TypeFunc::Parms+0] = TypeInt::INT; // crc result 872 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms+1, fields); 873 return TypeFunc::make(domain, range); 874 } 875 876 // for cipherBlockChaining calls of aescrypt encrypt/decrypt, four pointers and a length, returning int 877 const TypeFunc* OptoRuntime::cipherBlockChaining_aescrypt_Type() { 878 // create input type (domain) 879 int num_args = 5; 880 if (Matcher::pass_original_key_for_aes()) { 881 num_args = 6; 882 } 883 int argcnt = num_args; 884 const Type** fields = TypeTuple::fields(argcnt); 885 int argp = TypeFunc::Parms; 886 fields[argp++] = TypePtr::NOTNULL; // src 887 fields[argp++] = TypePtr::NOTNULL; // dest 888 fields[argp++] = TypePtr::NOTNULL; // k array 889 fields[argp++] = TypePtr::NOTNULL; // r array 890 fields[argp++] = TypeInt::INT; // src len 891 if (Matcher::pass_original_key_for_aes()) { 892 fields[argp++] = TypePtr::NOTNULL; // original k array 893 } 894 assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); 895 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 896 897 // returning cipher len (int) 898 fields = TypeTuple::fields(1); 899 fields[TypeFunc::Parms+0] = TypeInt::INT; 900 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms+1, fields); 901 return TypeFunc::make(domain, range); 902 } 903 904 // for electronicCodeBook calls of aescrypt encrypt/decrypt, three pointers and a length, returning int 905 const TypeFunc* OptoRuntime::electronicCodeBook_aescrypt_Type() { 906 // create input type (domain) 907 int num_args = 4; 908 if (Matcher::pass_original_key_for_aes()) { 909 num_args = 5; 910 } 911 int argcnt = num_args; 912 const Type** fields = TypeTuple::fields(argcnt); 913 int argp = TypeFunc::Parms; 914 fields[argp++] = TypePtr::NOTNULL; // src 915 fields[argp++] = TypePtr::NOTNULL; // dest 916 fields[argp++] = TypePtr::NOTNULL; // k array 917 fields[argp++] = TypeInt::INT; // src len 918 if (Matcher::pass_original_key_for_aes()) { 919 fields[argp++] = TypePtr::NOTNULL; // original k array 920 } 921 assert(argp == TypeFunc::Parms + argcnt, "correct decoding"); 922 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields); 923 924 // returning cipher len (int) 925 fields = TypeTuple::fields(1); 926 fields[TypeFunc::Parms + 0] = TypeInt::INT; 927 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields); 928 return TypeFunc::make(domain, range); 929 } 930 931 //for counterMode calls of aescrypt encrypt/decrypt, four pointers and a length, returning int 932 const TypeFunc* OptoRuntime::counterMode_aescrypt_Type() { 933 // create input type (domain) 934 int num_args = 7; 935 if (Matcher::pass_original_key_for_aes()) { 936 num_args = 8; 937 } 938 int argcnt = num_args; 939 const Type** fields = TypeTuple::fields(argcnt); 940 int argp = TypeFunc::Parms; 941 fields[argp++] = TypePtr::NOTNULL; // src 942 fields[argp++] = TypePtr::NOTNULL; // dest 943 fields[argp++] = TypePtr::NOTNULL; // k array 944 fields[argp++] = TypePtr::NOTNULL; // counter array 945 fields[argp++] = TypeInt::INT; // src len 946 fields[argp++] = TypePtr::NOTNULL; // saved_encCounter 947 fields[argp++] = TypePtr::NOTNULL; // saved used addr 948 if (Matcher::pass_original_key_for_aes()) { 949 fields[argp++] = TypePtr::NOTNULL; // original k array 950 } 951 assert(argp == TypeFunc::Parms + argcnt, "correct decoding"); 952 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields); 953 // returning cipher len (int) 954 fields = TypeTuple::fields(1); 955 fields[TypeFunc::Parms + 0] = TypeInt::INT; 956 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields); 957 return TypeFunc::make(domain, range); 958 } 959 960 /* 961 * void implCompress(byte[] buf, int ofs) 962 */ 963 const TypeFunc* OptoRuntime::digestBase_implCompress_Type(bool is_sha3) { 964 // create input type (domain) 965 int num_args = is_sha3 ? 3 : 2; 966 int argcnt = num_args; 967 const Type** fields = TypeTuple::fields(argcnt); 968 int argp = TypeFunc::Parms; 969 fields[argp++] = TypePtr::NOTNULL; // buf 970 fields[argp++] = TypePtr::NOTNULL; // state 971 if (is_sha3) fields[argp++] = TypeInt::INT; // digest_length 972 assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); 973 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 974 975 // no result type needed 976 fields = TypeTuple::fields(1); 977 fields[TypeFunc::Parms+0] = NULL; // void 978 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields); 979 return TypeFunc::make(domain, range); 980 } 981 982 /* 983 * int implCompressMultiBlock(byte[] b, int ofs, int limit) 984 */ 985 const TypeFunc* OptoRuntime::digestBase_implCompressMB_Type(bool is_sha3) { 986 // create input type (domain) 987 int num_args = is_sha3 ? 5 : 4; 988 int argcnt = num_args; 989 const Type** fields = TypeTuple::fields(argcnt); 990 int argp = TypeFunc::Parms; 991 fields[argp++] = TypePtr::NOTNULL; // buf 992 fields[argp++] = TypePtr::NOTNULL; // state 993 if (is_sha3) fields[argp++] = TypeInt::INT; // digest_length 994 fields[argp++] = TypeInt::INT; // ofs 995 fields[argp++] = TypeInt::INT; // limit 996 assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); 997 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 998 999 // returning ofs (int) 1000 fields = TypeTuple::fields(1); 1001 fields[TypeFunc::Parms+0] = TypeInt::INT; // ofs 1002 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms+1, fields); 1003 return TypeFunc::make(domain, range); 1004 } 1005 1006 const TypeFunc* OptoRuntime::multiplyToLen_Type() { 1007 // create input type (domain) 1008 int num_args = 6; 1009 int argcnt = num_args; 1010 const Type** fields = TypeTuple::fields(argcnt); 1011 int argp = TypeFunc::Parms; 1012 fields[argp++] = TypePtr::NOTNULL; // x 1013 fields[argp++] = TypeInt::INT; // xlen 1014 fields[argp++] = TypePtr::NOTNULL; // y 1015 fields[argp++] = TypeInt::INT; // ylen 1016 fields[argp++] = TypePtr::NOTNULL; // z 1017 fields[argp++] = TypeInt::INT; // zlen 1018 assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); 1019 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 1020 1021 // no result type needed 1022 fields = TypeTuple::fields(1); 1023 fields[TypeFunc::Parms+0] = NULL; 1024 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields); 1025 return TypeFunc::make(domain, range); 1026 } 1027 1028 const TypeFunc* OptoRuntime::squareToLen_Type() { 1029 // create input type (domain) 1030 int num_args = 4; 1031 int argcnt = num_args; 1032 const Type** fields = TypeTuple::fields(argcnt); 1033 int argp = TypeFunc::Parms; 1034 fields[argp++] = TypePtr::NOTNULL; // x 1035 fields[argp++] = TypeInt::INT; // len 1036 fields[argp++] = TypePtr::NOTNULL; // z 1037 fields[argp++] = TypeInt::INT; // zlen 1038 assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); 1039 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 1040 1041 // no result type needed 1042 fields = TypeTuple::fields(1); 1043 fields[TypeFunc::Parms+0] = NULL; 1044 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields); 1045 return TypeFunc::make(domain, range); 1046 } 1047 1048 // for mulAdd calls, 2 pointers and 3 ints, returning int 1049 const TypeFunc* OptoRuntime::mulAdd_Type() { 1050 // create input type (domain) 1051 int num_args = 5; 1052 int argcnt = num_args; 1053 const Type** fields = TypeTuple::fields(argcnt); 1054 int argp = TypeFunc::Parms; 1055 fields[argp++] = TypePtr::NOTNULL; // out 1056 fields[argp++] = TypePtr::NOTNULL; // in 1057 fields[argp++] = TypeInt::INT; // offset 1058 fields[argp++] = TypeInt::INT; // len 1059 fields[argp++] = TypeInt::INT; // k 1060 assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); 1061 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 1062 1063 // returning carry (int) 1064 fields = TypeTuple::fields(1); 1065 fields[TypeFunc::Parms+0] = TypeInt::INT; 1066 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms+1, fields); 1067 return TypeFunc::make(domain, range); 1068 } 1069 1070 const TypeFunc* OptoRuntime::montgomeryMultiply_Type() { 1071 // create input type (domain) 1072 int num_args = 7; 1073 int argcnt = num_args; 1074 const Type** fields = TypeTuple::fields(argcnt); 1075 int argp = TypeFunc::Parms; 1076 fields[argp++] = TypePtr::NOTNULL; // a 1077 fields[argp++] = TypePtr::NOTNULL; // b 1078 fields[argp++] = TypePtr::NOTNULL; // n 1079 fields[argp++] = TypeInt::INT; // len 1080 fields[argp++] = TypeLong::LONG; // inv 1081 fields[argp++] = Type::HALF; 1082 fields[argp++] = TypePtr::NOTNULL; // result 1083 assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); 1084 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 1085 1086 // result type needed 1087 fields = TypeTuple::fields(1); 1088 fields[TypeFunc::Parms+0] = TypePtr::NOTNULL; 1089 1090 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields); 1091 return TypeFunc::make(domain, range); 1092 } 1093 1094 const TypeFunc* OptoRuntime::montgomerySquare_Type() { 1095 // create input type (domain) 1096 int num_args = 6; 1097 int argcnt = num_args; 1098 const Type** fields = TypeTuple::fields(argcnt); 1099 int argp = TypeFunc::Parms; 1100 fields[argp++] = TypePtr::NOTNULL; // a 1101 fields[argp++] = TypePtr::NOTNULL; // n 1102 fields[argp++] = TypeInt::INT; // len 1103 fields[argp++] = TypeLong::LONG; // inv 1104 fields[argp++] = Type::HALF; 1105 fields[argp++] = TypePtr::NOTNULL; // result 1106 assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); 1107 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 1108 1109 // result type needed 1110 fields = TypeTuple::fields(1); 1111 fields[TypeFunc::Parms+0] = TypePtr::NOTNULL; 1112 1113 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields); 1114 return TypeFunc::make(domain, range); 1115 } 1116 1117 const TypeFunc * OptoRuntime::bigIntegerShift_Type() { 1118 int argcnt = 5; 1119 const Type** fields = TypeTuple::fields(argcnt); 1120 int argp = TypeFunc::Parms; 1121 fields[argp++] = TypePtr::NOTNULL; // newArr 1122 fields[argp++] = TypePtr::NOTNULL; // oldArr 1123 fields[argp++] = TypeInt::INT; // newIdx 1124 fields[argp++] = TypeInt::INT; // shiftCount 1125 fields[argp++] = TypeInt::INT; // numIter 1126 assert(argp == TypeFunc::Parms + argcnt, "correct decoding"); 1127 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields); 1128 1129 // no result type needed 1130 fields = TypeTuple::fields(1); 1131 fields[TypeFunc::Parms + 0] = NULL; 1132 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields); 1133 return TypeFunc::make(domain, range); 1134 } 1135 1136 const TypeFunc* OptoRuntime::vectorizedMismatch_Type() { 1137 // create input type (domain) 1138 int num_args = 4; 1139 int argcnt = num_args; 1140 const Type** fields = TypeTuple::fields(argcnt); 1141 int argp = TypeFunc::Parms; 1142 fields[argp++] = TypePtr::NOTNULL; // obja 1143 fields[argp++] = TypePtr::NOTNULL; // objb 1144 fields[argp++] = TypeInt::INT; // length, number of elements 1145 fields[argp++] = TypeInt::INT; // log2scale, element size 1146 assert(argp == TypeFunc::Parms + argcnt, "correct decoding"); 1147 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields); 1148 1149 //return mismatch index (int) 1150 fields = TypeTuple::fields(1); 1151 fields[TypeFunc::Parms + 0] = TypeInt::INT; 1152 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields); 1153 return TypeFunc::make(domain, range); 1154 } 1155 1156 // GHASH block processing 1157 const TypeFunc* OptoRuntime::ghash_processBlocks_Type() { 1158 int argcnt = 4; 1159 1160 const Type** fields = TypeTuple::fields(argcnt); 1161 int argp = TypeFunc::Parms; 1162 fields[argp++] = TypePtr::NOTNULL; // state 1163 fields[argp++] = TypePtr::NOTNULL; // subkeyH 1164 fields[argp++] = TypePtr::NOTNULL; // data 1165 fields[argp++] = TypeInt::INT; // blocks 1166 assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); 1167 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 1168 1169 // result type needed 1170 fields = TypeTuple::fields(1); 1171 fields[TypeFunc::Parms+0] = NULL; // void 1172 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields); 1173 return TypeFunc::make(domain, range); 1174 } 1175 // Base64 encode function 1176 const TypeFunc* OptoRuntime::base64_encodeBlock_Type() { 1177 int argcnt = 6; 1178 1179 const Type** fields = TypeTuple::fields(argcnt); 1180 int argp = TypeFunc::Parms; 1181 fields[argp++] = TypePtr::NOTNULL; // src array 1182 fields[argp++] = TypeInt::INT; // offset 1183 fields[argp++] = TypeInt::INT; // length 1184 fields[argp++] = TypePtr::NOTNULL; // dest array 1185 fields[argp++] = TypeInt::INT; // dp 1186 fields[argp++] = TypeInt::BOOL; // isURL 1187 assert(argp == TypeFunc::Parms + argcnt, "correct decoding"); 1188 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 1189 1190 // result type needed 1191 fields = TypeTuple::fields(1); 1192 fields[TypeFunc::Parms + 0] = NULL; // void 1193 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields); 1194 return TypeFunc::make(domain, range); 1195 } 1196 1197 //------------- Interpreter state access for on stack replacement 1198 const TypeFunc* OptoRuntime::osr_end_Type() { 1199 // create input type (domain) 1200 const Type **fields = TypeTuple::fields(1); 1201 fields[TypeFunc::Parms+0] = TypeRawPtr::BOTTOM; // OSR temp buf 1202 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1, fields); 1203 1204 // create result type 1205 fields = TypeTuple::fields(1); 1206 // fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // locked oop 1207 fields[TypeFunc::Parms+0] = NULL; // void 1208 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms, fields); 1209 return TypeFunc::make(domain, range); 1210 } 1211 1212 //-------------- methodData update helpers 1213 1214 const TypeFunc* OptoRuntime::profile_receiver_type_Type() { 1215 // create input type (domain) 1216 const Type **fields = TypeTuple::fields(2); 1217 fields[TypeFunc::Parms+0] = TypeAryPtr::NOTNULL; // methodData pointer 1218 fields[TypeFunc::Parms+1] = TypeInstPtr::BOTTOM; // receiver oop 1219 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields); 1220 1221 // create result type 1222 fields = TypeTuple::fields(1); 1223 fields[TypeFunc::Parms+0] = NULL; // void 1224 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms, fields); 1225 return TypeFunc::make(domain,range); 1226 } 1227 1228 JRT_LEAF(void, OptoRuntime::profile_receiver_type_C(DataLayout* data, oopDesc* receiver)) 1229 if (receiver == NULL) return; 1230 Klass* receiver_klass = receiver->klass(); 1231 1232 intptr_t* mdp = ((intptr_t*)(data)) + DataLayout::header_size_in_cells(); 1233 int empty_row = -1; // free row, if any is encountered 1234 1235 // ReceiverTypeData* vc = new ReceiverTypeData(mdp); 1236 for (uint row = 0; row < ReceiverTypeData::row_limit(); row++) { 1237 // if (vc->receiver(row) == receiver_klass) 1238 int receiver_off = ReceiverTypeData::receiver_cell_index(row); 1239 intptr_t row_recv = *(mdp + receiver_off); 1240 if (row_recv == (intptr_t) receiver_klass) { 1241 // vc->set_receiver_count(row, vc->receiver_count(row) + DataLayout::counter_increment); 1242 int count_off = ReceiverTypeData::receiver_count_cell_index(row); 1243 *(mdp + count_off) += DataLayout::counter_increment; 1244 return; 1245 } else if (row_recv == 0) { 1246 // else if (vc->receiver(row) == NULL) 1247 empty_row = (int) row; 1248 } 1249 } 1250 1251 if (empty_row != -1) { 1252 int receiver_off = ReceiverTypeData::receiver_cell_index(empty_row); 1253 // vc->set_receiver(empty_row, receiver_klass); 1254 *(mdp + receiver_off) = (intptr_t) receiver_klass; 1255 // vc->set_receiver_count(empty_row, DataLayout::counter_increment); 1256 int count_off = ReceiverTypeData::receiver_count_cell_index(empty_row); 1257 *(mdp + count_off) = DataLayout::counter_increment; 1258 } else { 1259 // Receiver did not match any saved receiver and there is no empty row for it. 1260 // Increment total counter to indicate polymorphic case. 1261 intptr_t* count_p = (intptr_t*)(((uint8_t*)(data)) + in_bytes(CounterData::count_offset())); 1262 *count_p += DataLayout::counter_increment; 1263 } 1264 JRT_END 1265 1266 //------------------------------------------------------------------------------------- 1267 // register policy 1268 1269 bool OptoRuntime::is_callee_saved_register(MachRegisterNumbers reg) { 1270 assert(reg >= 0 && reg < _last_Mach_Reg, "must be a machine register"); 1271 switch (register_save_policy[reg]) { 1272 case 'C': return false; //SOC 1273 case 'E': return true ; //SOE 1274 case 'N': return false; //NS 1275 case 'A': return false; //AS 1276 } 1277 ShouldNotReachHere(); 1278 return false; 1279 } 1280 1281 //----------------------------------------------------------------------- 1282 // Exceptions 1283 // 1284 1285 static void trace_exception(outputStream* st, oop exception_oop, address exception_pc, const char* msg); 1286 1287 // The method is an entry that is always called by a C++ method not 1288 // directly from compiled code. Compiled code will call the C++ method following. 1289 // We can't allow async exception to be installed during exception processing. 1290 JRT_ENTRY_NO_ASYNC(address, OptoRuntime::handle_exception_C_helper(JavaThread* thread, nmethod* &nm)) 1291 1292 // Do not confuse exception_oop with pending_exception. The exception_oop 1293 // is only used to pass arguments into the method. Not for general 1294 // exception handling. DO NOT CHANGE IT to use pending_exception, since 1295 // the runtime stubs checks this on exit. 1296 assert(thread->exception_oop() != NULL, "exception oop is found"); 1297 address handler_address = NULL; 1298 1299 Handle exception(thread, thread->exception_oop()); 1300 address pc = thread->exception_pc(); 1301 1302 // Clear out the exception oop and pc since looking up an 1303 // exception handler can cause class loading, which might throw an 1304 // exception and those fields are expected to be clear during 1305 // normal bytecode execution. 1306 thread->clear_exception_oop_and_pc(); 1307 1308 LogTarget(Info, exceptions) lt; 1309 if (lt.is_enabled()) { 1310 ResourceMark rm; 1311 LogStream ls(lt); 1312 trace_exception(&ls, exception(), pc, ""); 1313 } 1314 1315 // for AbortVMOnException flag 1316 Exceptions::debug_check_abort(exception); 1317 1318 #ifdef ASSERT 1319 if (!(exception->is_a(SystemDictionary::Throwable_klass()))) { 1320 // should throw an exception here 1321 ShouldNotReachHere(); 1322 } 1323 #endif 1324 1325 // new exception handling: this method is entered only from adapters 1326 // exceptions from compiled java methods are handled in compiled code 1327 // using rethrow node 1328 1329 nm = CodeCache::find_nmethod(pc); 1330 assert(nm != NULL, "No NMethod found"); 1331 if (nm->is_native_method()) { 1332 fatal("Native method should not have path to exception handling"); 1333 } else { 1334 // we are switching to old paradigm: search for exception handler in caller_frame 1335 // instead in exception handler of caller_frame.sender() 1336 1337 if (JvmtiExport::can_post_on_exceptions()) { 1338 // "Full-speed catching" is not necessary here, 1339 // since we're notifying the VM on every catch. 1340 // Force deoptimization and the rest of the lookup 1341 // will be fine. 1342 deoptimize_caller_frame(thread); 1343 } 1344 1345 // Check the stack guard pages. If enabled, look for handler in this frame; 1346 // otherwise, forcibly unwind the frame. 1347 // 1348 // 4826555: use default current sp for reguard_stack instead of &nm: it's more accurate. 1349 bool force_unwind = !thread->reguard_stack(); 1350 bool deopting = false; 1351 if (nm->is_deopt_pc(pc)) { 1352 deopting = true; 1353 RegisterMap map(thread, false); 1354 frame deoptee = thread->last_frame().sender(&map); 1355 assert(deoptee.is_deoptimized_frame(), "must be deopted"); 1356 // Adjust the pc back to the original throwing pc 1357 pc = deoptee.pc(); 1358 } 1359 1360 // If we are forcing an unwind because of stack overflow then deopt is 1361 // irrelevant since we are throwing the frame away anyway. 1362 1363 if (deopting && !force_unwind) { 1364 handler_address = SharedRuntime::deopt_blob()->unpack_with_exception(); 1365 } else { 1366 1367 handler_address = 1368 force_unwind ? NULL : nm->handler_for_exception_and_pc(exception, pc); 1369 1370 if (handler_address == NULL) { 1371 bool recursive_exception = false; 1372 handler_address = SharedRuntime::compute_compiled_exc_handler(nm, pc, exception, force_unwind, true, recursive_exception); 1373 assert (handler_address != NULL, "must have compiled handler"); 1374 // Update the exception cache only when the unwind was not forced 1375 // and there didn't happen another exception during the computation of the 1376 // compiled exception handler. Checking for exception oop equality is not 1377 // sufficient because some exceptions are pre-allocated and reused. 1378 if (!force_unwind && !recursive_exception) { 1379 nm->add_handler_for_exception_and_pc(exception,pc,handler_address); 1380 } 1381 } else { 1382 #ifdef ASSERT 1383 bool recursive_exception = false; 1384 address computed_address = SharedRuntime::compute_compiled_exc_handler(nm, pc, exception, force_unwind, true, recursive_exception); 1385 vmassert(recursive_exception || (handler_address == computed_address), "Handler address inconsistency: " PTR_FORMAT " != " PTR_FORMAT, 1386 p2i(handler_address), p2i(computed_address)); 1387 #endif 1388 } 1389 } 1390 1391 thread->set_exception_pc(pc); 1392 thread->set_exception_handler_pc(handler_address); 1393 1394 // Check if the exception PC is a MethodHandle call site. 1395 thread->set_is_method_handle_return(nm->is_method_handle_return(pc)); 1396 } 1397 1398 // Restore correct return pc. Was saved above. 1399 thread->set_exception_oop(exception()); 1400 return handler_address; 1401 1402 JRT_END 1403 1404 // We are entering here from exception_blob 1405 // If there is a compiled exception handler in this method, we will continue there; 1406 // otherwise we will unwind the stack and continue at the caller of top frame method 1407 // Note we enter without the usual JRT wrapper. We will call a helper routine that 1408 // will do the normal VM entry. We do it this way so that we can see if the nmethod 1409 // we looked up the handler for has been deoptimized in the meantime. If it has been 1410 // we must not use the handler and instead return the deopt blob. 1411 address OptoRuntime::handle_exception_C(JavaThread* thread) { 1412 // 1413 // We are in Java not VM and in debug mode we have a NoHandleMark 1414 // 1415 #ifndef PRODUCT 1416 SharedRuntime::_find_handler_ctr++; // find exception handler 1417 #endif 1418 debug_only(NoHandleMark __hm;) 1419 nmethod* nm = NULL; 1420 address handler_address = NULL; 1421 { 1422 // Enter the VM 1423 1424 ResetNoHandleMark rnhm; 1425 handler_address = handle_exception_C_helper(thread, nm); 1426 } 1427 1428 // Back in java: Use no oops, DON'T safepoint 1429 1430 // Now check to see if the handler we are returning is in a now 1431 // deoptimized frame 1432 1433 if (nm != NULL) { 1434 RegisterMap map(thread, false); 1435 frame caller = thread->last_frame().sender(&map); 1436 #ifdef ASSERT 1437 assert(caller.is_compiled_frame(), "must be"); 1438 #endif // ASSERT 1439 if (caller.is_deoptimized_frame()) { 1440 handler_address = SharedRuntime::deopt_blob()->unpack_with_exception(); 1441 } 1442 } 1443 return handler_address; 1444 } 1445 1446 //------------------------------rethrow---------------------------------------- 1447 // We get here after compiled code has executed a 'RethrowNode'. The callee 1448 // is either throwing or rethrowing an exception. The callee-save registers 1449 // have been restored, synchronized objects have been unlocked and the callee 1450 // stack frame has been removed. The return address was passed in. 1451 // Exception oop is passed as the 1st argument. This routine is then called 1452 // from the stub. On exit, we know where to jump in the caller's code. 1453 // After this C code exits, the stub will pop his frame and end in a jump 1454 // (instead of a return). We enter the caller's default handler. 1455 // 1456 // This must be JRT_LEAF: 1457 // - caller will not change its state as we cannot block on exit, 1458 // therefore raw_exception_handler_for_return_address is all it takes 1459 // to handle deoptimized blobs 1460 // 1461 // However, there needs to be a safepoint check in the middle! So compiled 1462 // safepoints are completely watertight. 1463 // 1464 // Thus, it cannot be a leaf since it contains the NoSafepointVerifier. 1465 // 1466 // *THIS IS NOT RECOMMENDED PROGRAMMING STYLE* 1467 // 1468 address OptoRuntime::rethrow_C(oopDesc* exception, JavaThread* thread, address ret_pc) { 1469 #ifndef PRODUCT 1470 SharedRuntime::_rethrow_ctr++; // count rethrows 1471 #endif 1472 assert (exception != NULL, "should have thrown a NULLPointerException"); 1473 #ifdef ASSERT 1474 if (!(exception->is_a(SystemDictionary::Throwable_klass()))) { 1475 // should throw an exception here 1476 ShouldNotReachHere(); 1477 } 1478 #endif 1479 1480 thread->set_vm_result(exception); 1481 // Frame not compiled (handles deoptimization blob) 1482 return SharedRuntime::raw_exception_handler_for_return_address(thread, ret_pc); 1483 } 1484 1485 1486 const TypeFunc *OptoRuntime::rethrow_Type() { 1487 // create input type (domain) 1488 const Type **fields = TypeTuple::fields(1); 1489 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Exception oop 1490 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1,fields); 1491 1492 // create result type (range) 1493 fields = TypeTuple::fields(1); 1494 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Exception oop 1495 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields); 1496 1497 return TypeFunc::make(domain, range); 1498 } 1499 1500 1501 void OptoRuntime::deoptimize_caller_frame(JavaThread *thread, bool doit) { 1502 // Deoptimize the caller before continuing, as the compiled 1503 // exception handler table may not be valid. 1504 if (!StressCompiledExceptionHandlers && doit) { 1505 deoptimize_caller_frame(thread); 1506 } 1507 } 1508 1509 void OptoRuntime::deoptimize_caller_frame(JavaThread *thread) { 1510 // Called from within the owner thread, so no need for safepoint 1511 RegisterMap reg_map(thread); 1512 frame stub_frame = thread->last_frame(); 1513 assert(stub_frame.is_runtime_frame() || exception_blob()->contains(stub_frame.pc()), "sanity check"); 1514 frame caller_frame = stub_frame.sender(®_map); 1515 1516 // Deoptimize the caller frame. 1517 Deoptimization::deoptimize_frame(thread, caller_frame.id()); 1518 } 1519 1520 1521 bool OptoRuntime::is_deoptimized_caller_frame(JavaThread *thread) { 1522 // Called from within the owner thread, so no need for safepoint 1523 RegisterMap reg_map(thread); 1524 frame stub_frame = thread->last_frame(); 1525 assert(stub_frame.is_runtime_frame() || exception_blob()->contains(stub_frame.pc()), "sanity check"); 1526 frame caller_frame = stub_frame.sender(®_map); 1527 return caller_frame.is_deoptimized_frame(); 1528 } 1529 1530 1531 const TypeFunc *OptoRuntime::register_finalizer_Type() { 1532 // create input type (domain) 1533 const Type **fields = TypeTuple::fields(1); 1534 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // oop; Receiver 1535 // // The JavaThread* is passed to each routine as the last argument 1536 // fields[TypeFunc::Parms+1] = TypeRawPtr::NOTNULL; // JavaThread *; Executing thread 1537 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1,fields); 1538 1539 // create result type (range) 1540 fields = TypeTuple::fields(0); 1541 1542 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields); 1543 1544 return TypeFunc::make(domain,range); 1545 } 1546 1547 1548 //----------------------------------------------------------------------------- 1549 // Dtrace support. entry and exit probes have the same signature 1550 const TypeFunc *OptoRuntime::dtrace_method_entry_exit_Type() { 1551 // create input type (domain) 1552 const Type **fields = TypeTuple::fields(2); 1553 fields[TypeFunc::Parms+0] = TypeRawPtr::BOTTOM; // Thread-local storage 1554 fields[TypeFunc::Parms+1] = TypeMetadataPtr::BOTTOM; // Method*; Method we are entering 1555 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2,fields); 1556 1557 // create result type (range) 1558 fields = TypeTuple::fields(0); 1559 1560 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields); 1561 1562 return TypeFunc::make(domain,range); 1563 } 1564 1565 const TypeFunc *OptoRuntime::dtrace_object_alloc_Type() { 1566 // create input type (domain) 1567 const Type **fields = TypeTuple::fields(2); 1568 fields[TypeFunc::Parms+0] = TypeRawPtr::BOTTOM; // Thread-local storage 1569 fields[TypeFunc::Parms+1] = TypeInstPtr::NOTNULL; // oop; newly allocated object 1570 1571 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2,fields); 1572 1573 // create result type (range) 1574 fields = TypeTuple::fields(0); 1575 1576 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields); 1577 1578 return TypeFunc::make(domain,range); 1579 } 1580 1581 1582 JRT_ENTRY_NO_ASYNC(void, OptoRuntime::register_finalizer(oopDesc* obj, JavaThread* thread)) 1583 assert(oopDesc::is_oop(obj), "must be a valid oop"); 1584 assert(obj->klass()->has_finalizer(), "shouldn't be here otherwise"); 1585 InstanceKlass::register_finalizer(instanceOop(obj), CHECK); 1586 JRT_END 1587 1588 //----------------------------------------------------------------------------- 1589 1590 NamedCounter * volatile OptoRuntime::_named_counters = NULL; 1591 1592 // 1593 // dump the collected NamedCounters. 1594 // 1595 void OptoRuntime::print_named_counters() { 1596 int total_lock_count = 0; 1597 int eliminated_lock_count = 0; 1598 1599 NamedCounter* c = _named_counters; 1600 while (c) { 1601 if (c->tag() == NamedCounter::LockCounter || c->tag() == NamedCounter::EliminatedLockCounter) { 1602 int count = c->count(); 1603 if (count > 0) { 1604 bool eliminated = c->tag() == NamedCounter::EliminatedLockCounter; 1605 if (Verbose) { 1606 tty->print_cr("%d %s%s", count, c->name(), eliminated ? " (eliminated)" : ""); 1607 } 1608 total_lock_count += count; 1609 if (eliminated) { 1610 eliminated_lock_count += count; 1611 } 1612 } 1613 } else if (c->tag() == NamedCounter::BiasedLockingCounter) { 1614 BiasedLockingCounters* blc = ((BiasedLockingNamedCounter*)c)->counters(); 1615 if (blc->nonzero()) { 1616 tty->print_cr("%s", c->name()); 1617 blc->print_on(tty); 1618 } 1619 #if INCLUDE_RTM_OPT 1620 } else if (c->tag() == NamedCounter::RTMLockingCounter) { 1621 RTMLockingCounters* rlc = ((RTMLockingNamedCounter*)c)->counters(); 1622 if (rlc->nonzero()) { 1623 tty->print_cr("%s", c->name()); 1624 rlc->print_on(tty); 1625 } 1626 #endif 1627 } 1628 c = c->next(); 1629 } 1630 if (total_lock_count > 0) { 1631 tty->print_cr("dynamic locks: %d", total_lock_count); 1632 if (eliminated_lock_count) { 1633 tty->print_cr("eliminated locks: %d (%d%%)", eliminated_lock_count, 1634 (int)(eliminated_lock_count * 100.0 / total_lock_count)); 1635 } 1636 } 1637 } 1638 1639 // 1640 // Allocate a new NamedCounter. The JVMState is used to generate the 1641 // name which consists of method@line for the inlining tree. 1642 // 1643 1644 NamedCounter* OptoRuntime::new_named_counter(JVMState* youngest_jvms, NamedCounter::CounterTag tag) { 1645 int max_depth = youngest_jvms->depth(); 1646 1647 // Visit scopes from youngest to oldest. 1648 bool first = true; 1649 stringStream st; 1650 for (int depth = max_depth; depth >= 1; depth--) { 1651 JVMState* jvms = youngest_jvms->of_depth(depth); 1652 ciMethod* m = jvms->has_method() ? jvms->method() : NULL; 1653 if (!first) { 1654 st.print(" "); 1655 } else { 1656 first = false; 1657 } 1658 int bci = jvms->bci(); 1659 if (bci < 0) bci = 0; 1660 if (m != NULL) { 1661 st.print("%s.%s", m->holder()->name()->as_utf8(), m->name()->as_utf8()); 1662 } else { 1663 st.print("no method"); 1664 } 1665 st.print("@%d", bci); 1666 // To print linenumbers instead of bci use: m->line_number_from_bci(bci) 1667 } 1668 NamedCounter* c; 1669 if (tag == NamedCounter::BiasedLockingCounter) { 1670 c = new BiasedLockingNamedCounter(st.as_string()); 1671 } else if (tag == NamedCounter::RTMLockingCounter) { 1672 c = new RTMLockingNamedCounter(st.as_string()); 1673 } else { 1674 c = new NamedCounter(st.as_string(), tag); 1675 } 1676 1677 // atomically add the new counter to the head of the list. We only 1678 // add counters so this is safe. 1679 NamedCounter* head; 1680 do { 1681 c->set_next(NULL); 1682 head = _named_counters; 1683 c->set_next(head); 1684 } while (Atomic::cmpxchg(&_named_counters, head, c) != head); 1685 return c; 1686 } 1687 1688 int trace_exception_counter = 0; 1689 static void trace_exception(outputStream* st, oop exception_oop, address exception_pc, const char* msg) { 1690 trace_exception_counter++; 1691 stringStream tempst; 1692 1693 tempst.print("%d [Exception (%s): ", trace_exception_counter, msg); 1694 exception_oop->print_value_on(&tempst); 1695 tempst.print(" in "); 1696 CodeBlob* blob = CodeCache::find_blob(exception_pc); 1697 if (blob->is_compiled()) { 1698 CompiledMethod* cm = blob->as_compiled_method_or_null(); 1699 cm->method()->print_value_on(&tempst); 1700 } else if (blob->is_runtime_stub()) { 1701 tempst.print("<runtime-stub>"); 1702 } else { 1703 tempst.print("<unknown>"); 1704 } 1705 tempst.print(" at " INTPTR_FORMAT, p2i(exception_pc)); 1706 tempst.print("]"); 1707 1708 st->print_raw_cr(tempst.as_string()); 1709 }