1 /* 2 * Copyright (c) 1998, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/systemDictionary.hpp" 27 #include "classfile/vmSymbols.hpp" 28 #include "code/codeCache.hpp" 29 #include "code/compiledMethod.inline.hpp" 30 #include "code/compiledIC.hpp" 31 #include "code/icBuffer.hpp" 32 #include "code/nmethod.hpp" 33 #include "code/pcDesc.hpp" 34 #include "code/scopeDesc.hpp" 35 #include "code/vtableStubs.hpp" 36 #include "compiler/compileBroker.hpp" 37 #include "compiler/oopMap.hpp" 38 #include "gc/g1/heapRegion.hpp" 39 #include "gc/shared/barrierSet.hpp" 40 #include "gc/shared/collectedHeap.hpp" 41 #include "gc/shared/gcLocker.hpp" 42 #include "interpreter/bytecode.hpp" 43 #include "interpreter/interpreter.hpp" 44 #include "interpreter/linkResolver.hpp" 45 #include "logging/log.hpp" 46 #include "logging/logStream.hpp" 47 #include "memory/oopFactory.hpp" 48 #include "memory/resourceArea.hpp" 49 #include "oops/objArrayKlass.hpp" 50 #include "oops/oop.inline.hpp" 51 #include "oops/typeArrayOop.inline.hpp" 52 #include "opto/ad.hpp" 53 #include "opto/addnode.hpp" 54 #include "opto/callnode.hpp" 55 #include "opto/cfgnode.hpp" 56 #include "opto/graphKit.hpp" 57 #include "opto/machnode.hpp" 58 #include "opto/matcher.hpp" 59 #include "opto/memnode.hpp" 60 #include "opto/mulnode.hpp" 61 #include "opto/runtime.hpp" 62 #include "opto/subnode.hpp" 63 #include "runtime/atomic.hpp" 64 #include "runtime/frame.inline.hpp" 65 #include "runtime/handles.inline.hpp" 66 #include "runtime/interfaceSupport.inline.hpp" 67 #include "runtime/javaCalls.hpp" 68 #include "runtime/sharedRuntime.hpp" 69 #include "runtime/signature.hpp" 70 #include "runtime/threadCritical.hpp" 71 #include "runtime/vframe.hpp" 72 #include "runtime/vframeArray.hpp" 73 #include "runtime/vframe_hp.hpp" 74 #include "utilities/copy.hpp" 75 #include "utilities/preserveException.hpp" 76 77 78 // For debugging purposes: 79 // To force FullGCALot inside a runtime function, add the following two lines 80 // 81 // Universe::release_fullgc_alot_dummy(); 82 // MarkSweep::invoke(0, "Debugging"); 83 // 84 // At command line specify the parameters: -XX:+FullGCALot -XX:FullGCALotStart=100000000 85 86 87 88 89 // Compiled code entry points 90 address OptoRuntime::_new_instance_Java = NULL; 91 address OptoRuntime::_new_array_Java = NULL; 92 address OptoRuntime::_new_array_nozero_Java = NULL; 93 address OptoRuntime::_multianewarray2_Java = NULL; 94 address OptoRuntime::_multianewarray3_Java = NULL; 95 address OptoRuntime::_multianewarray4_Java = NULL; 96 address OptoRuntime::_multianewarray5_Java = NULL; 97 address OptoRuntime::_multianewarrayN_Java = NULL; 98 address OptoRuntime::_vtable_must_compile_Java = NULL; 99 address OptoRuntime::_complete_monitor_locking_Java = NULL; 100 address OptoRuntime::_monitor_notify_Java = NULL; 101 address OptoRuntime::_monitor_notifyAll_Java = NULL; 102 address OptoRuntime::_rethrow_Java = NULL; 103 104 address OptoRuntime::_slow_arraycopy_Java = NULL; 105 address OptoRuntime::_register_finalizer_Java = NULL; 106 107 ExceptionBlob* OptoRuntime::_exception_blob; 108 109 // This should be called in an assertion at the start of OptoRuntime routines 110 // which are entered from compiled code (all of them) 111 #ifdef ASSERT 112 static bool check_compiled_frame(JavaThread* thread) { 113 assert(thread->last_frame().is_runtime_frame(), "cannot call runtime directly from compiled code"); 114 RegisterMap map(thread, false); 115 frame caller = thread->last_frame().sender(&map); 116 assert(caller.is_compiled_frame(), "not being called from compiled like code"); 117 return true; 118 } 119 #endif // ASSERT 120 121 122 #define gen(env, var, type_func_gen, c_func, fancy_jump, pass_tls, save_arg_regs, return_pc) \ 123 var = generate_stub(env, type_func_gen, CAST_FROM_FN_PTR(address, c_func), #var, fancy_jump, pass_tls, save_arg_regs, return_pc); \ 124 if (var == NULL) { return false; } 125 126 bool OptoRuntime::generate(ciEnv* env) { 127 128 generate_exception_blob(); 129 130 // Note: tls: Means fetching the return oop out of the thread-local storage 131 // 132 // variable/name type-function-gen , runtime method ,fncy_jp, tls,save_args,retpc 133 // ------------------------------------------------------------------------------------------------------------------------------- 134 gen(env, _new_instance_Java , new_instance_Type , new_instance_C , 0 , true , false, false); 135 gen(env, _new_array_Java , new_array_Type , new_array_C , 0 , true , false, false); 136 gen(env, _new_array_nozero_Java , new_array_Type , new_array_nozero_C , 0 , true , false, false); 137 gen(env, _multianewarray2_Java , multianewarray2_Type , multianewarray2_C , 0 , true , false, false); 138 gen(env, _multianewarray3_Java , multianewarray3_Type , multianewarray3_C , 0 , true , false, false); 139 gen(env, _multianewarray4_Java , multianewarray4_Type , multianewarray4_C , 0 , true , false, false); 140 gen(env, _multianewarray5_Java , multianewarray5_Type , multianewarray5_C , 0 , true , false, false); 141 gen(env, _multianewarrayN_Java , multianewarrayN_Type , multianewarrayN_C , 0 , true , false, false); 142 gen(env, _complete_monitor_locking_Java , complete_monitor_enter_Type , SharedRuntime::complete_monitor_locking_C, 0, false, false, false); 143 gen(env, _monitor_notify_Java , monitor_notify_Type , monitor_notify_C , 0 , false, false, false); 144 gen(env, _monitor_notifyAll_Java , monitor_notify_Type , monitor_notifyAll_C , 0 , false, false, false); 145 gen(env, _rethrow_Java , rethrow_Type , rethrow_C , 2 , true , false, true ); 146 147 gen(env, _slow_arraycopy_Java , slow_arraycopy_Type , SharedRuntime::slow_arraycopy_C , 0 , false, false, false); 148 gen(env, _register_finalizer_Java , register_finalizer_Type , register_finalizer , 0 , false, false, false); 149 150 return true; 151 } 152 153 #undef gen 154 155 156 // Helper method to do generation of RunTimeStub's 157 address OptoRuntime::generate_stub( ciEnv* env, 158 TypeFunc_generator gen, address C_function, 159 const char *name, int is_fancy_jump, 160 bool pass_tls, 161 bool save_argument_registers, 162 bool return_pc) { 163 164 // Matching the default directive, we currently have no method to match. 165 DirectiveSet* directive = DirectivesStack::getDefaultDirective(CompileBroker::compiler(CompLevel_full_optimization)); 166 ResourceMark rm; 167 Compile C( env, gen, C_function, name, is_fancy_jump, pass_tls, save_argument_registers, return_pc, directive); 168 DirectivesStack::release(directive); 169 return C.stub_entry_point(); 170 } 171 172 const char* OptoRuntime::stub_name(address entry) { 173 #ifndef PRODUCT 174 CodeBlob* cb = CodeCache::find_blob(entry); 175 RuntimeStub* rs =(RuntimeStub *)cb; 176 assert(rs != NULL && rs->is_runtime_stub(), "not a runtime stub"); 177 return rs->name(); 178 #else 179 // Fast implementation for product mode (maybe it should be inlined too) 180 return "runtime stub"; 181 #endif 182 } 183 184 185 //============================================================================= 186 // Opto compiler runtime routines 187 //============================================================================= 188 189 190 //=============================allocation====================================== 191 // We failed the fast-path allocation. Now we need to do a scavenge or GC 192 // and try allocation again. 193 194 // object allocation 195 JRT_BLOCK_ENTRY(void, OptoRuntime::new_instance_C(Klass* klass, JavaThread* thread)) 196 JRT_BLOCK; 197 #ifndef PRODUCT 198 SharedRuntime::_new_instance_ctr++; // new instance requires GC 199 #endif 200 assert(check_compiled_frame(thread), "incorrect caller"); 201 202 // These checks are cheap to make and support reflective allocation. 203 int lh = klass->layout_helper(); 204 if (Klass::layout_helper_needs_slow_path(lh) || !InstanceKlass::cast(klass)->is_initialized()) { 205 Handle holder(THREAD, klass->klass_holder()); // keep the klass alive 206 klass->check_valid_for_instantiation(false, THREAD); 207 if (!HAS_PENDING_EXCEPTION) { 208 InstanceKlass::cast(klass)->initialize(THREAD); 209 } 210 } 211 212 if (!HAS_PENDING_EXCEPTION) { 213 // Scavenge and allocate an instance. 214 Handle holder(THREAD, klass->klass_holder()); // keep the klass alive 215 oop result = InstanceKlass::cast(klass)->allocate_instance(THREAD); 216 thread->set_vm_result(result); 217 218 // Pass oops back through thread local storage. Our apparent type to Java 219 // is that we return an oop, but we can block on exit from this routine and 220 // a GC can trash the oop in C's return register. The generated stub will 221 // fetch the oop from TLS after any possible GC. 222 } 223 224 deoptimize_caller_frame(thread, HAS_PENDING_EXCEPTION); 225 JRT_BLOCK_END; 226 227 // inform GC that we won't do card marks for initializing writes. 228 SharedRuntime::on_slowpath_allocation_exit(thread); 229 JRT_END 230 231 232 // array allocation 233 JRT_BLOCK_ENTRY(void, OptoRuntime::new_array_C(Klass* array_type, int len, JavaThread *thread)) 234 JRT_BLOCK; 235 #ifndef PRODUCT 236 SharedRuntime::_new_array_ctr++; // new array requires GC 237 #endif 238 assert(check_compiled_frame(thread), "incorrect caller"); 239 240 // Scavenge and allocate an instance. 241 oop result; 242 243 if (array_type->is_typeArray_klass()) { 244 // The oopFactory likes to work with the element type. 245 // (We could bypass the oopFactory, since it doesn't add much value.) 246 BasicType elem_type = TypeArrayKlass::cast(array_type)->element_type(); 247 result = oopFactory::new_typeArray(elem_type, len, THREAD); 248 } else { 249 // Although the oopFactory likes to work with the elem_type, 250 // the compiler prefers the array_type, since it must already have 251 // that latter value in hand for the fast path. 252 Handle holder(THREAD, array_type->klass_holder()); // keep the array klass alive 253 Klass* elem_type = ObjArrayKlass::cast(array_type)->element_klass(); 254 result = oopFactory::new_objArray(elem_type, len, THREAD); 255 } 256 257 // Pass oops back through thread local storage. Our apparent type to Java 258 // is that we return an oop, but we can block on exit from this routine and 259 // a GC can trash the oop in C's return register. The generated stub will 260 // fetch the oop from TLS after any possible GC. 261 deoptimize_caller_frame(thread, HAS_PENDING_EXCEPTION); 262 thread->set_vm_result(result); 263 JRT_BLOCK_END; 264 265 // inform GC that we won't do card marks for initializing writes. 266 SharedRuntime::on_slowpath_allocation_exit(thread); 267 JRT_END 268 269 // array allocation without zeroing 270 JRT_BLOCK_ENTRY(void, OptoRuntime::new_array_nozero_C(Klass* array_type, int len, JavaThread *thread)) 271 JRT_BLOCK; 272 #ifndef PRODUCT 273 SharedRuntime::_new_array_ctr++; // new array requires GC 274 #endif 275 assert(check_compiled_frame(thread), "incorrect caller"); 276 277 // Scavenge and allocate an instance. 278 oop result; 279 280 assert(array_type->is_typeArray_klass(), "should be called only for type array"); 281 // The oopFactory likes to work with the element type. 282 BasicType elem_type = TypeArrayKlass::cast(array_type)->element_type(); 283 result = oopFactory::new_typeArray_nozero(elem_type, len, THREAD); 284 285 // Pass oops back through thread local storage. Our apparent type to Java 286 // is that we return an oop, but we can block on exit from this routine and 287 // a GC can trash the oop in C's return register. The generated stub will 288 // fetch the oop from TLS after any possible GC. 289 deoptimize_caller_frame(thread, HAS_PENDING_EXCEPTION); 290 thread->set_vm_result(result); 291 JRT_BLOCK_END; 292 293 294 // inform GC that we won't do card marks for initializing writes. 295 SharedRuntime::on_slowpath_allocation_exit(thread); 296 297 oop result = thread->vm_result(); 298 if ((len > 0) && (result != NULL) && 299 is_deoptimized_caller_frame(thread)) { 300 // Zero array here if the caller is deoptimized. 301 int size = ((typeArrayOop)result)->object_size(); 302 BasicType elem_type = TypeArrayKlass::cast(array_type)->element_type(); 303 const size_t hs = arrayOopDesc::header_size(elem_type); 304 // Align to next 8 bytes to avoid trashing arrays's length. 305 const size_t aligned_hs = align_object_offset(hs); 306 HeapWord* obj = (HeapWord*)result; 307 if (aligned_hs > hs) { 308 Copy::zero_to_words(obj+hs, aligned_hs-hs); 309 } 310 // Optimized zeroing. 311 Copy::fill_to_aligned_words(obj+aligned_hs, size-aligned_hs); 312 } 313 314 JRT_END 315 316 // Note: multianewarray for one dimension is handled inline by GraphKit::new_array. 317 318 // multianewarray for 2 dimensions 319 JRT_ENTRY(void, OptoRuntime::multianewarray2_C(Klass* elem_type, int len1, int len2, JavaThread *thread)) 320 #ifndef PRODUCT 321 SharedRuntime::_multi2_ctr++; // multianewarray for 1 dimension 322 #endif 323 assert(check_compiled_frame(thread), "incorrect caller"); 324 assert(elem_type->is_klass(), "not a class"); 325 jint dims[2]; 326 dims[0] = len1; 327 dims[1] = len2; 328 Handle holder(THREAD, elem_type->klass_holder()); // keep the klass alive 329 oop obj = ArrayKlass::cast(elem_type)->multi_allocate(2, dims, THREAD); 330 deoptimize_caller_frame(thread, HAS_PENDING_EXCEPTION); 331 thread->set_vm_result(obj); 332 JRT_END 333 334 // multianewarray for 3 dimensions 335 JRT_ENTRY(void, OptoRuntime::multianewarray3_C(Klass* elem_type, int len1, int len2, int len3, JavaThread *thread)) 336 #ifndef PRODUCT 337 SharedRuntime::_multi3_ctr++; // multianewarray for 1 dimension 338 #endif 339 assert(check_compiled_frame(thread), "incorrect caller"); 340 assert(elem_type->is_klass(), "not a class"); 341 jint dims[3]; 342 dims[0] = len1; 343 dims[1] = len2; 344 dims[2] = len3; 345 Handle holder(THREAD, elem_type->klass_holder()); // keep the klass alive 346 oop obj = ArrayKlass::cast(elem_type)->multi_allocate(3, dims, THREAD); 347 deoptimize_caller_frame(thread, HAS_PENDING_EXCEPTION); 348 thread->set_vm_result(obj); 349 JRT_END 350 351 // multianewarray for 4 dimensions 352 JRT_ENTRY(void, OptoRuntime::multianewarray4_C(Klass* elem_type, int len1, int len2, int len3, int len4, JavaThread *thread)) 353 #ifndef PRODUCT 354 SharedRuntime::_multi4_ctr++; // multianewarray for 1 dimension 355 #endif 356 assert(check_compiled_frame(thread), "incorrect caller"); 357 assert(elem_type->is_klass(), "not a class"); 358 jint dims[4]; 359 dims[0] = len1; 360 dims[1] = len2; 361 dims[2] = len3; 362 dims[3] = len4; 363 Handle holder(THREAD, elem_type->klass_holder()); // keep the klass alive 364 oop obj = ArrayKlass::cast(elem_type)->multi_allocate(4, dims, THREAD); 365 deoptimize_caller_frame(thread, HAS_PENDING_EXCEPTION); 366 thread->set_vm_result(obj); 367 JRT_END 368 369 // multianewarray for 5 dimensions 370 JRT_ENTRY(void, OptoRuntime::multianewarray5_C(Klass* elem_type, int len1, int len2, int len3, int len4, int len5, JavaThread *thread)) 371 #ifndef PRODUCT 372 SharedRuntime::_multi5_ctr++; // multianewarray for 1 dimension 373 #endif 374 assert(check_compiled_frame(thread), "incorrect caller"); 375 assert(elem_type->is_klass(), "not a class"); 376 jint dims[5]; 377 dims[0] = len1; 378 dims[1] = len2; 379 dims[2] = len3; 380 dims[3] = len4; 381 dims[4] = len5; 382 Handle holder(THREAD, elem_type->klass_holder()); // keep the klass alive 383 oop obj = ArrayKlass::cast(elem_type)->multi_allocate(5, dims, THREAD); 384 deoptimize_caller_frame(thread, HAS_PENDING_EXCEPTION); 385 thread->set_vm_result(obj); 386 JRT_END 387 388 JRT_ENTRY(void, OptoRuntime::multianewarrayN_C(Klass* elem_type, arrayOopDesc* dims, JavaThread *thread)) 389 assert(check_compiled_frame(thread), "incorrect caller"); 390 assert(elem_type->is_klass(), "not a class"); 391 assert(oop(dims)->is_typeArray(), "not an array"); 392 393 ResourceMark rm; 394 jint len = dims->length(); 395 assert(len > 0, "Dimensions array should contain data"); 396 jint *c_dims = NEW_RESOURCE_ARRAY(jint, len); 397 ArrayAccess<>::arraycopy_to_native<>(dims, typeArrayOopDesc::element_offset<jint>(0), 398 c_dims, len); 399 400 Handle holder(THREAD, elem_type->klass_holder()); // keep the klass alive 401 oop obj = ArrayKlass::cast(elem_type)->multi_allocate(len, c_dims, THREAD); 402 deoptimize_caller_frame(thread, HAS_PENDING_EXCEPTION); 403 thread->set_vm_result(obj); 404 JRT_END 405 406 JRT_BLOCK_ENTRY(void, OptoRuntime::monitor_notify_C(oopDesc* obj, JavaThread *thread)) 407 408 // Very few notify/notifyAll operations find any threads on the waitset, so 409 // the dominant fast-path is to simply return. 410 // Relatedly, it's critical that notify/notifyAll be fast in order to 411 // reduce lock hold times. 412 if (!SafepointSynchronize::is_synchronizing()) { 413 if (ObjectSynchronizer::quick_notify(obj, thread, false)) { 414 return; 415 } 416 } 417 418 // This is the case the fast-path above isn't provisioned to handle. 419 // The fast-path is designed to handle frequently arising cases in an efficient manner. 420 // (The fast-path is just a degenerate variant of the slow-path). 421 // Perform the dreaded state transition and pass control into the slow-path. 422 JRT_BLOCK; 423 Handle h_obj(THREAD, obj); 424 ObjectSynchronizer::notify(h_obj, CHECK); 425 JRT_BLOCK_END; 426 JRT_END 427 428 JRT_BLOCK_ENTRY(void, OptoRuntime::monitor_notifyAll_C(oopDesc* obj, JavaThread *thread)) 429 430 if (!SafepointSynchronize::is_synchronizing() ) { 431 if (ObjectSynchronizer::quick_notify(obj, thread, true)) { 432 return; 433 } 434 } 435 436 // This is the case the fast-path above isn't provisioned to handle. 437 // The fast-path is designed to handle frequently arising cases in an efficient manner. 438 // (The fast-path is just a degenerate variant of the slow-path). 439 // Perform the dreaded state transition and pass control into the slow-path. 440 JRT_BLOCK; 441 Handle h_obj(THREAD, obj); 442 ObjectSynchronizer::notifyall(h_obj, CHECK); 443 JRT_BLOCK_END; 444 JRT_END 445 446 const TypeFunc *OptoRuntime::new_instance_Type() { 447 // create input type (domain) 448 const Type **fields = TypeTuple::fields(1); 449 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Klass to be allocated 450 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1, fields); 451 452 // create result type (range) 453 fields = TypeTuple::fields(1); 454 fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Returned oop 455 456 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields); 457 458 return TypeFunc::make(domain, range); 459 } 460 461 462 const TypeFunc *OptoRuntime::athrow_Type() { 463 // create input type (domain) 464 const Type **fields = TypeTuple::fields(1); 465 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Klass to be allocated 466 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1, fields); 467 468 // create result type (range) 469 fields = TypeTuple::fields(0); 470 471 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields); 472 473 return TypeFunc::make(domain, range); 474 } 475 476 477 const TypeFunc *OptoRuntime::new_array_Type() { 478 // create input type (domain) 479 const Type **fields = TypeTuple::fields(2); 480 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // element klass 481 fields[TypeFunc::Parms+1] = TypeInt::INT; // array size 482 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields); 483 484 // create result type (range) 485 fields = TypeTuple::fields(1); 486 fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Returned oop 487 488 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields); 489 490 return TypeFunc::make(domain, range); 491 } 492 493 const TypeFunc *OptoRuntime::multianewarray_Type(int ndim) { 494 // create input type (domain) 495 const int nargs = ndim + 1; 496 const Type **fields = TypeTuple::fields(nargs); 497 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // element klass 498 for( int i = 1; i < nargs; i++ ) 499 fields[TypeFunc::Parms + i] = TypeInt::INT; // array size 500 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+nargs, fields); 501 502 // create result type (range) 503 fields = TypeTuple::fields(1); 504 fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Returned oop 505 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields); 506 507 return TypeFunc::make(domain, range); 508 } 509 510 const TypeFunc *OptoRuntime::multianewarray2_Type() { 511 return multianewarray_Type(2); 512 } 513 514 const TypeFunc *OptoRuntime::multianewarray3_Type() { 515 return multianewarray_Type(3); 516 } 517 518 const TypeFunc *OptoRuntime::multianewarray4_Type() { 519 return multianewarray_Type(4); 520 } 521 522 const TypeFunc *OptoRuntime::multianewarray5_Type() { 523 return multianewarray_Type(5); 524 } 525 526 const TypeFunc *OptoRuntime::multianewarrayN_Type() { 527 // create input type (domain) 528 const Type **fields = TypeTuple::fields(2); 529 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // element klass 530 fields[TypeFunc::Parms+1] = TypeInstPtr::NOTNULL; // array of dim sizes 531 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields); 532 533 // create result type (range) 534 fields = TypeTuple::fields(1); 535 fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Returned oop 536 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields); 537 538 return TypeFunc::make(domain, range); 539 } 540 541 const TypeFunc *OptoRuntime::uncommon_trap_Type() { 542 // create input type (domain) 543 const Type **fields = TypeTuple::fields(1); 544 fields[TypeFunc::Parms+0] = TypeInt::INT; // trap_reason (deopt reason and action) 545 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1, fields); 546 547 // create result type (range) 548 fields = TypeTuple::fields(0); 549 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields); 550 551 return TypeFunc::make(domain, range); 552 } 553 554 //----------------------------------------------------------------------------- 555 // Monitor Handling 556 const TypeFunc *OptoRuntime::complete_monitor_enter_Type() { 557 // create input type (domain) 558 const Type **fields = TypeTuple::fields(2); 559 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Object to be Locked 560 fields[TypeFunc::Parms+1] = TypeRawPtr::BOTTOM; // Address of stack location for lock 561 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2,fields); 562 563 // create result type (range) 564 fields = TypeTuple::fields(0); 565 566 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields); 567 568 return TypeFunc::make(domain,range); 569 } 570 571 572 //----------------------------------------------------------------------------- 573 const TypeFunc *OptoRuntime::complete_monitor_exit_Type() { 574 // create input type (domain) 575 const Type **fields = TypeTuple::fields(3); 576 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Object to be Locked 577 fields[TypeFunc::Parms+1] = TypeRawPtr::BOTTOM; // Address of stack location for lock - BasicLock 578 fields[TypeFunc::Parms+2] = TypeRawPtr::BOTTOM; // Thread pointer (Self) 579 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+3, fields); 580 581 // create result type (range) 582 fields = TypeTuple::fields(0); 583 584 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields); 585 586 return TypeFunc::make(domain, range); 587 } 588 589 const TypeFunc *OptoRuntime::monitor_notify_Type() { 590 // create input type (domain) 591 const Type **fields = TypeTuple::fields(1); 592 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Object to be Locked 593 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1, fields); 594 595 // create result type (range) 596 fields = TypeTuple::fields(0); 597 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields); 598 return TypeFunc::make(domain, range); 599 } 600 601 const TypeFunc* OptoRuntime::flush_windows_Type() { 602 // create input type (domain) 603 const Type** fields = TypeTuple::fields(1); 604 fields[TypeFunc::Parms+0] = NULL; // void 605 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms, fields); 606 607 // create result type 608 fields = TypeTuple::fields(1); 609 fields[TypeFunc::Parms+0] = NULL; // void 610 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms, fields); 611 612 return TypeFunc::make(domain, range); 613 } 614 615 const TypeFunc* OptoRuntime::l2f_Type() { 616 // create input type (domain) 617 const Type **fields = TypeTuple::fields(2); 618 fields[TypeFunc::Parms+0] = TypeLong::LONG; 619 fields[TypeFunc::Parms+1] = Type::HALF; 620 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields); 621 622 // create result type (range) 623 fields = TypeTuple::fields(1); 624 fields[TypeFunc::Parms+0] = Type::FLOAT; 625 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields); 626 627 return TypeFunc::make(domain, range); 628 } 629 630 const TypeFunc* OptoRuntime::modf_Type() { 631 const Type **fields = TypeTuple::fields(2); 632 fields[TypeFunc::Parms+0] = Type::FLOAT; 633 fields[TypeFunc::Parms+1] = Type::FLOAT; 634 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields); 635 636 // create result type (range) 637 fields = TypeTuple::fields(1); 638 fields[TypeFunc::Parms+0] = Type::FLOAT; 639 640 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields); 641 642 return TypeFunc::make(domain, range); 643 } 644 645 const TypeFunc *OptoRuntime::Math_D_D_Type() { 646 // create input type (domain) 647 const Type **fields = TypeTuple::fields(2); 648 // Symbol* name of class to be loaded 649 fields[TypeFunc::Parms+0] = Type::DOUBLE; 650 fields[TypeFunc::Parms+1] = Type::HALF; 651 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields); 652 653 // create result type (range) 654 fields = TypeTuple::fields(2); 655 fields[TypeFunc::Parms+0] = Type::DOUBLE; 656 fields[TypeFunc::Parms+1] = Type::HALF; 657 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+2, fields); 658 659 return TypeFunc::make(domain, range); 660 } 661 662 const TypeFunc* OptoRuntime::Math_DD_D_Type() { 663 const Type **fields = TypeTuple::fields(4); 664 fields[TypeFunc::Parms+0] = Type::DOUBLE; 665 fields[TypeFunc::Parms+1] = Type::HALF; 666 fields[TypeFunc::Parms+2] = Type::DOUBLE; 667 fields[TypeFunc::Parms+3] = Type::HALF; 668 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+4, fields); 669 670 // create result type (range) 671 fields = TypeTuple::fields(2); 672 fields[TypeFunc::Parms+0] = Type::DOUBLE; 673 fields[TypeFunc::Parms+1] = Type::HALF; 674 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+2, fields); 675 676 return TypeFunc::make(domain, range); 677 } 678 679 //-------------- currentTimeMillis, currentTimeNanos, etc 680 681 const TypeFunc* OptoRuntime::void_long_Type() { 682 // create input type (domain) 683 const Type **fields = TypeTuple::fields(0); 684 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+0, fields); 685 686 // create result type (range) 687 fields = TypeTuple::fields(2); 688 fields[TypeFunc::Parms+0] = TypeLong::LONG; 689 fields[TypeFunc::Parms+1] = Type::HALF; 690 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+2, fields); 691 692 return TypeFunc::make(domain, range); 693 } 694 695 // arraycopy stub variations: 696 enum ArrayCopyType { 697 ac_fast, // void(ptr, ptr, size_t) 698 ac_checkcast, // int(ptr, ptr, size_t, size_t, ptr) 699 ac_slow, // void(ptr, int, ptr, int, int) 700 ac_generic // int(ptr, int, ptr, int, int) 701 }; 702 703 static const TypeFunc* make_arraycopy_Type(ArrayCopyType act) { 704 // create input type (domain) 705 int num_args = (act == ac_fast ? 3 : 5); 706 int num_size_args = (act == ac_fast ? 1 : act == ac_checkcast ? 2 : 0); 707 int argcnt = num_args; 708 LP64_ONLY(argcnt += num_size_args); // halfwords for lengths 709 const Type** fields = TypeTuple::fields(argcnt); 710 int argp = TypeFunc::Parms; 711 fields[argp++] = TypePtr::NOTNULL; // src 712 if (num_size_args == 0) { 713 fields[argp++] = TypeInt::INT; // src_pos 714 } 715 fields[argp++] = TypePtr::NOTNULL; // dest 716 if (num_size_args == 0) { 717 fields[argp++] = TypeInt::INT; // dest_pos 718 fields[argp++] = TypeInt::INT; // length 719 } 720 while (num_size_args-- > 0) { 721 fields[argp++] = TypeX_X; // size in whatevers (size_t) 722 LP64_ONLY(fields[argp++] = Type::HALF); // other half of long length 723 } 724 if (act == ac_checkcast) { 725 fields[argp++] = TypePtr::NOTNULL; // super_klass 726 } 727 assert(argp == TypeFunc::Parms+argcnt, "correct decoding of act"); 728 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 729 730 // create result type if needed 731 int retcnt = (act == ac_checkcast || act == ac_generic ? 1 : 0); 732 fields = TypeTuple::fields(1); 733 if (retcnt == 0) 734 fields[TypeFunc::Parms+0] = NULL; // void 735 else 736 fields[TypeFunc::Parms+0] = TypeInt::INT; // status result, if needed 737 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms+retcnt, fields); 738 return TypeFunc::make(domain, range); 739 } 740 741 const TypeFunc* OptoRuntime::fast_arraycopy_Type() { 742 // This signature is simple: Two base pointers and a size_t. 743 return make_arraycopy_Type(ac_fast); 744 } 745 746 const TypeFunc* OptoRuntime::checkcast_arraycopy_Type() { 747 // An extension of fast_arraycopy_Type which adds type checking. 748 return make_arraycopy_Type(ac_checkcast); 749 } 750 751 const TypeFunc* OptoRuntime::slow_arraycopy_Type() { 752 // This signature is exactly the same as System.arraycopy. 753 // There are no intptr_t (int/long) arguments. 754 return make_arraycopy_Type(ac_slow); 755 } 756 757 const TypeFunc* OptoRuntime::generic_arraycopy_Type() { 758 // This signature is like System.arraycopy, except that it returns status. 759 return make_arraycopy_Type(ac_generic); 760 } 761 762 763 const TypeFunc* OptoRuntime::array_fill_Type() { 764 const Type** fields; 765 int argp = TypeFunc::Parms; 766 // create input type (domain): pointer, int, size_t 767 fields = TypeTuple::fields(3 LP64_ONLY( + 1)); 768 fields[argp++] = TypePtr::NOTNULL; 769 fields[argp++] = TypeInt::INT; 770 fields[argp++] = TypeX_X; // size in whatevers (size_t) 771 LP64_ONLY(fields[argp++] = Type::HALF); // other half of long length 772 const TypeTuple *domain = TypeTuple::make(argp, fields); 773 774 // create result type 775 fields = TypeTuple::fields(1); 776 fields[TypeFunc::Parms+0] = NULL; // void 777 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms, fields); 778 779 return TypeFunc::make(domain, range); 780 } 781 782 // for aescrypt encrypt/decrypt operations, just three pointers returning void (length is constant) 783 const TypeFunc* OptoRuntime::aescrypt_block_Type() { 784 // create input type (domain) 785 int num_args = 3; 786 if (Matcher::pass_original_key_for_aes()) { 787 num_args = 4; 788 } 789 int argcnt = num_args; 790 const Type** fields = TypeTuple::fields(argcnt); 791 int argp = TypeFunc::Parms; 792 fields[argp++] = TypePtr::NOTNULL; // src 793 fields[argp++] = TypePtr::NOTNULL; // dest 794 fields[argp++] = TypePtr::NOTNULL; // k array 795 if (Matcher::pass_original_key_for_aes()) { 796 fields[argp++] = TypePtr::NOTNULL; // original k array 797 } 798 assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); 799 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 800 801 // no result type needed 802 fields = TypeTuple::fields(1); 803 fields[TypeFunc::Parms+0] = NULL; // void 804 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields); 805 return TypeFunc::make(domain, range); 806 } 807 808 /** 809 * int updateBytesCRC32(int crc, byte* b, int len) 810 */ 811 const TypeFunc* OptoRuntime::updateBytesCRC32_Type() { 812 // create input type (domain) 813 int num_args = 3; 814 int argcnt = num_args; 815 const Type** fields = TypeTuple::fields(argcnt); 816 int argp = TypeFunc::Parms; 817 fields[argp++] = TypeInt::INT; // crc 818 fields[argp++] = TypePtr::NOTNULL; // src 819 fields[argp++] = TypeInt::INT; // len 820 assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); 821 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 822 823 // result type needed 824 fields = TypeTuple::fields(1); 825 fields[TypeFunc::Parms+0] = TypeInt::INT; // crc result 826 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms+1, fields); 827 return TypeFunc::make(domain, range); 828 } 829 830 /** 831 * int updateBytesCRC32C(int crc, byte* buf, int len, int* table) 832 */ 833 const TypeFunc* OptoRuntime::updateBytesCRC32C_Type() { 834 // create input type (domain) 835 int num_args = 4; 836 int argcnt = num_args; 837 const Type** fields = TypeTuple::fields(argcnt); 838 int argp = TypeFunc::Parms; 839 fields[argp++] = TypeInt::INT; // crc 840 fields[argp++] = TypePtr::NOTNULL; // buf 841 fields[argp++] = TypeInt::INT; // len 842 fields[argp++] = TypePtr::NOTNULL; // table 843 assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); 844 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 845 846 // result type needed 847 fields = TypeTuple::fields(1); 848 fields[TypeFunc::Parms+0] = TypeInt::INT; // crc result 849 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms+1, fields); 850 return TypeFunc::make(domain, range); 851 } 852 853 /** 854 * int updateBytesAdler32(int adler, bytes* b, int off, int len) 855 */ 856 const TypeFunc* OptoRuntime::updateBytesAdler32_Type() { 857 // create input type (domain) 858 int num_args = 3; 859 int argcnt = num_args; 860 const Type** fields = TypeTuple::fields(argcnt); 861 int argp = TypeFunc::Parms; 862 fields[argp++] = TypeInt::INT; // crc 863 fields[argp++] = TypePtr::NOTNULL; // src + offset 864 fields[argp++] = TypeInt::INT; // len 865 assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); 866 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 867 868 // result type needed 869 fields = TypeTuple::fields(1); 870 fields[TypeFunc::Parms+0] = TypeInt::INT; // crc result 871 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms+1, fields); 872 return TypeFunc::make(domain, range); 873 } 874 875 // for cipherBlockChaining calls of aescrypt encrypt/decrypt, four pointers and a length, returning int 876 const TypeFunc* OptoRuntime::cipherBlockChaining_aescrypt_Type() { 877 // create input type (domain) 878 int num_args = 5; 879 if (Matcher::pass_original_key_for_aes()) { 880 num_args = 6; 881 } 882 int argcnt = num_args; 883 const Type** fields = TypeTuple::fields(argcnt); 884 int argp = TypeFunc::Parms; 885 fields[argp++] = TypePtr::NOTNULL; // src 886 fields[argp++] = TypePtr::NOTNULL; // dest 887 fields[argp++] = TypePtr::NOTNULL; // k array 888 fields[argp++] = TypePtr::NOTNULL; // r array 889 fields[argp++] = TypeInt::INT; // src len 890 if (Matcher::pass_original_key_for_aes()) { 891 fields[argp++] = TypePtr::NOTNULL; // original k array 892 } 893 assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); 894 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 895 896 // returning cipher len (int) 897 fields = TypeTuple::fields(1); 898 fields[TypeFunc::Parms+0] = TypeInt::INT; 899 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms+1, fields); 900 return TypeFunc::make(domain, range); 901 } 902 903 // for electronicCodeBook calls of aescrypt encrypt/decrypt, three pointers and a length, returning int 904 const TypeFunc* OptoRuntime::electronicCodeBook_aescrypt_Type() { 905 // create input type (domain) 906 int num_args = 4; 907 if (Matcher::pass_original_key_for_aes()) { 908 num_args = 5; 909 } 910 int argcnt = num_args; 911 const Type** fields = TypeTuple::fields(argcnt); 912 int argp = TypeFunc::Parms; 913 fields[argp++] = TypePtr::NOTNULL; // src 914 fields[argp++] = TypePtr::NOTNULL; // dest 915 fields[argp++] = TypePtr::NOTNULL; // k array 916 fields[argp++] = TypeInt::INT; // src len 917 if (Matcher::pass_original_key_for_aes()) { 918 fields[argp++] = TypePtr::NOTNULL; // original k array 919 } 920 assert(argp == TypeFunc::Parms + argcnt, "correct decoding"); 921 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields); 922 923 // returning cipher len (int) 924 fields = TypeTuple::fields(1); 925 fields[TypeFunc::Parms + 0] = TypeInt::INT; 926 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields); 927 return TypeFunc::make(domain, range); 928 } 929 930 //for counterMode calls of aescrypt encrypt/decrypt, four pointers and a length, returning int 931 const TypeFunc* OptoRuntime::counterMode_aescrypt_Type() { 932 // create input type (domain) 933 int num_args = 7; 934 if (Matcher::pass_original_key_for_aes()) { 935 num_args = 8; 936 } 937 int argcnt = num_args; 938 const Type** fields = TypeTuple::fields(argcnt); 939 int argp = TypeFunc::Parms; 940 fields[argp++] = TypePtr::NOTNULL; // src 941 fields[argp++] = TypePtr::NOTNULL; // dest 942 fields[argp++] = TypePtr::NOTNULL; // k array 943 fields[argp++] = TypePtr::NOTNULL; // counter array 944 fields[argp++] = TypeInt::INT; // src len 945 fields[argp++] = TypePtr::NOTNULL; // saved_encCounter 946 fields[argp++] = TypePtr::NOTNULL; // saved used addr 947 if (Matcher::pass_original_key_for_aes()) { 948 fields[argp++] = TypePtr::NOTNULL; // original k array 949 } 950 assert(argp == TypeFunc::Parms + argcnt, "correct decoding"); 951 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields); 952 // returning cipher len (int) 953 fields = TypeTuple::fields(1); 954 fields[TypeFunc::Parms + 0] = TypeInt::INT; 955 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields); 956 return TypeFunc::make(domain, range); 957 } 958 959 /* 960 * void implCompress(byte[] buf, int ofs) 961 */ 962 const TypeFunc* OptoRuntime::sha_implCompress_Type() { 963 // create input type (domain) 964 int num_args = 2; 965 int argcnt = num_args; 966 const Type** fields = TypeTuple::fields(argcnt); 967 int argp = TypeFunc::Parms; 968 fields[argp++] = TypePtr::NOTNULL; // buf 969 fields[argp++] = TypePtr::NOTNULL; // state 970 assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); 971 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 972 973 // no result type needed 974 fields = TypeTuple::fields(1); 975 fields[TypeFunc::Parms+0] = NULL; // void 976 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields); 977 return TypeFunc::make(domain, range); 978 } 979 980 /* 981 * int implCompressMultiBlock(byte[] b, int ofs, int limit) 982 */ 983 const TypeFunc* OptoRuntime::digestBase_implCompressMB_Type() { 984 // create input type (domain) 985 int num_args = 4; 986 int argcnt = num_args; 987 const Type** fields = TypeTuple::fields(argcnt); 988 int argp = TypeFunc::Parms; 989 fields[argp++] = TypePtr::NOTNULL; // buf 990 fields[argp++] = TypePtr::NOTNULL; // state 991 fields[argp++] = TypeInt::INT; // ofs 992 fields[argp++] = TypeInt::INT; // limit 993 assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); 994 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 995 996 // returning ofs (int) 997 fields = TypeTuple::fields(1); 998 fields[TypeFunc::Parms+0] = TypeInt::INT; // ofs 999 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms+1, fields); 1000 return TypeFunc::make(domain, range); 1001 } 1002 1003 const TypeFunc* OptoRuntime::multiplyToLen_Type() { 1004 // create input type (domain) 1005 int num_args = 6; 1006 int argcnt = num_args; 1007 const Type** fields = TypeTuple::fields(argcnt); 1008 int argp = TypeFunc::Parms; 1009 fields[argp++] = TypePtr::NOTNULL; // x 1010 fields[argp++] = TypeInt::INT; // xlen 1011 fields[argp++] = TypePtr::NOTNULL; // y 1012 fields[argp++] = TypeInt::INT; // ylen 1013 fields[argp++] = TypePtr::NOTNULL; // z 1014 fields[argp++] = TypeInt::INT; // zlen 1015 assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); 1016 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 1017 1018 // no result type needed 1019 fields = TypeTuple::fields(1); 1020 fields[TypeFunc::Parms+0] = NULL; 1021 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields); 1022 return TypeFunc::make(domain, range); 1023 } 1024 1025 const TypeFunc* OptoRuntime::squareToLen_Type() { 1026 // create input type (domain) 1027 int num_args = 4; 1028 int argcnt = num_args; 1029 const Type** fields = TypeTuple::fields(argcnt); 1030 int argp = TypeFunc::Parms; 1031 fields[argp++] = TypePtr::NOTNULL; // x 1032 fields[argp++] = TypeInt::INT; // len 1033 fields[argp++] = TypePtr::NOTNULL; // z 1034 fields[argp++] = TypeInt::INT; // zlen 1035 assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); 1036 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 1037 1038 // no result type needed 1039 fields = TypeTuple::fields(1); 1040 fields[TypeFunc::Parms+0] = NULL; 1041 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields); 1042 return TypeFunc::make(domain, range); 1043 } 1044 1045 // for mulAdd calls, 2 pointers and 3 ints, returning int 1046 const TypeFunc* OptoRuntime::mulAdd_Type() { 1047 // create input type (domain) 1048 int num_args = 5; 1049 int argcnt = num_args; 1050 const Type** fields = TypeTuple::fields(argcnt); 1051 int argp = TypeFunc::Parms; 1052 fields[argp++] = TypePtr::NOTNULL; // out 1053 fields[argp++] = TypePtr::NOTNULL; // in 1054 fields[argp++] = TypeInt::INT; // offset 1055 fields[argp++] = TypeInt::INT; // len 1056 fields[argp++] = TypeInt::INT; // k 1057 assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); 1058 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 1059 1060 // returning carry (int) 1061 fields = TypeTuple::fields(1); 1062 fields[TypeFunc::Parms+0] = TypeInt::INT; 1063 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms+1, fields); 1064 return TypeFunc::make(domain, range); 1065 } 1066 1067 const TypeFunc* OptoRuntime::montgomeryMultiply_Type() { 1068 // create input type (domain) 1069 int num_args = 7; 1070 int argcnt = num_args; 1071 const Type** fields = TypeTuple::fields(argcnt); 1072 int argp = TypeFunc::Parms; 1073 fields[argp++] = TypePtr::NOTNULL; // a 1074 fields[argp++] = TypePtr::NOTNULL; // b 1075 fields[argp++] = TypePtr::NOTNULL; // n 1076 fields[argp++] = TypeInt::INT; // len 1077 fields[argp++] = TypeLong::LONG; // inv 1078 fields[argp++] = Type::HALF; 1079 fields[argp++] = TypePtr::NOTNULL; // result 1080 assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); 1081 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 1082 1083 // result type needed 1084 fields = TypeTuple::fields(1); 1085 fields[TypeFunc::Parms+0] = TypePtr::NOTNULL; 1086 1087 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields); 1088 return TypeFunc::make(domain, range); 1089 } 1090 1091 const TypeFunc* OptoRuntime::montgomerySquare_Type() { 1092 // create input type (domain) 1093 int num_args = 6; 1094 int argcnt = num_args; 1095 const Type** fields = TypeTuple::fields(argcnt); 1096 int argp = TypeFunc::Parms; 1097 fields[argp++] = TypePtr::NOTNULL; // a 1098 fields[argp++] = TypePtr::NOTNULL; // n 1099 fields[argp++] = TypeInt::INT; // len 1100 fields[argp++] = TypeLong::LONG; // inv 1101 fields[argp++] = Type::HALF; 1102 fields[argp++] = TypePtr::NOTNULL; // result 1103 assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); 1104 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 1105 1106 // result type needed 1107 fields = TypeTuple::fields(1); 1108 fields[TypeFunc::Parms+0] = TypePtr::NOTNULL; 1109 1110 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields); 1111 return TypeFunc::make(domain, range); 1112 } 1113 1114 const TypeFunc * OptoRuntime::bigIntegerShift_Type() { 1115 int argcnt = 5; 1116 const Type** fields = TypeTuple::fields(argcnt); 1117 int argp = TypeFunc::Parms; 1118 fields[argp++] = TypePtr::NOTNULL; // newArr 1119 fields[argp++] = TypePtr::NOTNULL; // oldArr 1120 fields[argp++] = TypeInt::INT; // newIdx 1121 fields[argp++] = TypeInt::INT; // shiftCount 1122 fields[argp++] = TypeInt::INT; // numIter 1123 assert(argp == TypeFunc::Parms + argcnt, "correct decoding"); 1124 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields); 1125 1126 // no result type needed 1127 fields = TypeTuple::fields(1); 1128 fields[TypeFunc::Parms + 0] = NULL; 1129 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields); 1130 return TypeFunc::make(domain, range); 1131 } 1132 1133 const TypeFunc* OptoRuntime::vectorizedMismatch_Type() { 1134 // create input type (domain) 1135 int num_args = 4; 1136 int argcnt = num_args; 1137 const Type** fields = TypeTuple::fields(argcnt); 1138 int argp = TypeFunc::Parms; 1139 fields[argp++] = TypePtr::NOTNULL; // obja 1140 fields[argp++] = TypePtr::NOTNULL; // objb 1141 fields[argp++] = TypeInt::INT; // length, number of elements 1142 fields[argp++] = TypeInt::INT; // log2scale, element size 1143 assert(argp == TypeFunc::Parms + argcnt, "correct decoding"); 1144 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms + argcnt, fields); 1145 1146 //return mismatch index (int) 1147 fields = TypeTuple::fields(1); 1148 fields[TypeFunc::Parms + 0] = TypeInt::INT; 1149 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms + 1, fields); 1150 return TypeFunc::make(domain, range); 1151 } 1152 1153 // GHASH block processing 1154 const TypeFunc* OptoRuntime::ghash_processBlocks_Type() { 1155 int argcnt = 4; 1156 1157 const Type** fields = TypeTuple::fields(argcnt); 1158 int argp = TypeFunc::Parms; 1159 fields[argp++] = TypePtr::NOTNULL; // state 1160 fields[argp++] = TypePtr::NOTNULL; // subkeyH 1161 fields[argp++] = TypePtr::NOTNULL; // data 1162 fields[argp++] = TypeInt::INT; // blocks 1163 assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); 1164 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 1165 1166 // result type needed 1167 fields = TypeTuple::fields(1); 1168 fields[TypeFunc::Parms+0] = NULL; // void 1169 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields); 1170 return TypeFunc::make(domain, range); 1171 } 1172 // Base64 encode function 1173 const TypeFunc* OptoRuntime::base64_encodeBlock_Type() { 1174 int argcnt = 6; 1175 1176 const Type** fields = TypeTuple::fields(argcnt); 1177 int argp = TypeFunc::Parms; 1178 fields[argp++] = TypePtr::NOTNULL; // src array 1179 fields[argp++] = TypeInt::INT; // offset 1180 fields[argp++] = TypeInt::INT; // length 1181 fields[argp++] = TypePtr::NOTNULL; // dest array 1182 fields[argp++] = TypeInt::INT; // dp 1183 fields[argp++] = TypeInt::BOOL; // isURL 1184 assert(argp == TypeFunc::Parms + argcnt, "correct decoding"); 1185 const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 1186 1187 // result type needed 1188 fields = TypeTuple::fields(1); 1189 fields[TypeFunc::Parms + 0] = NULL; // void 1190 const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields); 1191 return TypeFunc::make(domain, range); 1192 } 1193 1194 //------------- Interpreter state access for on stack replacement 1195 const TypeFunc* OptoRuntime::osr_end_Type() { 1196 // create input type (domain) 1197 const Type **fields = TypeTuple::fields(1); 1198 fields[TypeFunc::Parms+0] = TypeRawPtr::BOTTOM; // OSR temp buf 1199 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1, fields); 1200 1201 // create result type 1202 fields = TypeTuple::fields(1); 1203 // fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // locked oop 1204 fields[TypeFunc::Parms+0] = NULL; // void 1205 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms, fields); 1206 return TypeFunc::make(domain, range); 1207 } 1208 1209 //-------------- methodData update helpers 1210 1211 const TypeFunc* OptoRuntime::profile_receiver_type_Type() { 1212 // create input type (domain) 1213 const Type **fields = TypeTuple::fields(2); 1214 fields[TypeFunc::Parms+0] = TypeAryPtr::NOTNULL; // methodData pointer 1215 fields[TypeFunc::Parms+1] = TypeInstPtr::BOTTOM; // receiver oop 1216 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields); 1217 1218 // create result type 1219 fields = TypeTuple::fields(1); 1220 fields[TypeFunc::Parms+0] = NULL; // void 1221 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms, fields); 1222 return TypeFunc::make(domain,range); 1223 } 1224 1225 JRT_LEAF(void, OptoRuntime::profile_receiver_type_C(DataLayout* data, oopDesc* receiver)) 1226 if (receiver == NULL) return; 1227 Klass* receiver_klass = receiver->klass(); 1228 1229 intptr_t* mdp = ((intptr_t*)(data)) + DataLayout::header_size_in_cells(); 1230 int empty_row = -1; // free row, if any is encountered 1231 1232 // ReceiverTypeData* vc = new ReceiverTypeData(mdp); 1233 for (uint row = 0; row < ReceiverTypeData::row_limit(); row++) { 1234 // if (vc->receiver(row) == receiver_klass) 1235 int receiver_off = ReceiverTypeData::receiver_cell_index(row); 1236 intptr_t row_recv = *(mdp + receiver_off); 1237 if (row_recv == (intptr_t) receiver_klass) { 1238 // vc->set_receiver_count(row, vc->receiver_count(row) + DataLayout::counter_increment); 1239 int count_off = ReceiverTypeData::receiver_count_cell_index(row); 1240 *(mdp + count_off) += DataLayout::counter_increment; 1241 return; 1242 } else if (row_recv == 0) { 1243 // else if (vc->receiver(row) == NULL) 1244 empty_row = (int) row; 1245 } 1246 } 1247 1248 if (empty_row != -1) { 1249 int receiver_off = ReceiverTypeData::receiver_cell_index(empty_row); 1250 // vc->set_receiver(empty_row, receiver_klass); 1251 *(mdp + receiver_off) = (intptr_t) receiver_klass; 1252 // vc->set_receiver_count(empty_row, DataLayout::counter_increment); 1253 int count_off = ReceiverTypeData::receiver_count_cell_index(empty_row); 1254 *(mdp + count_off) = DataLayout::counter_increment; 1255 } else { 1256 // Receiver did not match any saved receiver and there is no empty row for it. 1257 // Increment total counter to indicate polymorphic case. 1258 intptr_t* count_p = (intptr_t*)(((uint8_t*)(data)) + in_bytes(CounterData::count_offset())); 1259 *count_p += DataLayout::counter_increment; 1260 } 1261 JRT_END 1262 1263 //------------------------------------------------------------------------------------- 1264 // register policy 1265 1266 bool OptoRuntime::is_callee_saved_register(MachRegisterNumbers reg) { 1267 assert(reg >= 0 && reg < _last_Mach_Reg, "must be a machine register"); 1268 switch (register_save_policy[reg]) { 1269 case 'C': return false; //SOC 1270 case 'E': return true ; //SOE 1271 case 'N': return false; //NS 1272 case 'A': return false; //AS 1273 } 1274 ShouldNotReachHere(); 1275 return false; 1276 } 1277 1278 //----------------------------------------------------------------------- 1279 // Exceptions 1280 // 1281 1282 static void trace_exception(outputStream* st, oop exception_oop, address exception_pc, const char* msg); 1283 1284 // The method is an entry that is always called by a C++ method not 1285 // directly from compiled code. Compiled code will call the C++ method following. 1286 // We can't allow async exception to be installed during exception processing. 1287 JRT_ENTRY_NO_ASYNC(address, OptoRuntime::handle_exception_C_helper(JavaThread* thread, nmethod* &nm)) 1288 1289 // Do not confuse exception_oop with pending_exception. The exception_oop 1290 // is only used to pass arguments into the method. Not for general 1291 // exception handling. DO NOT CHANGE IT to use pending_exception, since 1292 // the runtime stubs checks this on exit. 1293 assert(thread->exception_oop() != NULL, "exception oop is found"); 1294 address handler_address = NULL; 1295 1296 Handle exception(thread, thread->exception_oop()); 1297 address pc = thread->exception_pc(); 1298 1299 // Clear out the exception oop and pc since looking up an 1300 // exception handler can cause class loading, which might throw an 1301 // exception and those fields are expected to be clear during 1302 // normal bytecode execution. 1303 thread->clear_exception_oop_and_pc(); 1304 1305 LogTarget(Info, exceptions) lt; 1306 if (lt.is_enabled()) { 1307 ResourceMark rm; 1308 LogStream ls(lt); 1309 trace_exception(&ls, exception(), pc, ""); 1310 } 1311 1312 // for AbortVMOnException flag 1313 Exceptions::debug_check_abort(exception); 1314 1315 #ifdef ASSERT 1316 if (!(exception->is_a(SystemDictionary::Throwable_klass()))) { 1317 // should throw an exception here 1318 ShouldNotReachHere(); 1319 } 1320 #endif 1321 1322 // new exception handling: this method is entered only from adapters 1323 // exceptions from compiled java methods are handled in compiled code 1324 // using rethrow node 1325 1326 nm = CodeCache::find_nmethod(pc); 1327 assert(nm != NULL, "No NMethod found"); 1328 if (nm->is_native_method()) { 1329 fatal("Native method should not have path to exception handling"); 1330 } else { 1331 // we are switching to old paradigm: search for exception handler in caller_frame 1332 // instead in exception handler of caller_frame.sender() 1333 1334 if (JvmtiExport::can_post_on_exceptions()) { 1335 // "Full-speed catching" is not necessary here, 1336 // since we're notifying the VM on every catch. 1337 // Force deoptimization and the rest of the lookup 1338 // will be fine. 1339 deoptimize_caller_frame(thread); 1340 } 1341 1342 // Check the stack guard pages. If enabled, look for handler in this frame; 1343 // otherwise, forcibly unwind the frame. 1344 // 1345 // 4826555: use default current sp for reguard_stack instead of &nm: it's more accurate. 1346 bool force_unwind = !thread->reguard_stack(); 1347 bool deopting = false; 1348 if (nm->is_deopt_pc(pc)) { 1349 deopting = true; 1350 RegisterMap map(thread, false); 1351 frame deoptee = thread->last_frame().sender(&map); 1352 assert(deoptee.is_deoptimized_frame(), "must be deopted"); 1353 // Adjust the pc back to the original throwing pc 1354 pc = deoptee.pc(); 1355 } 1356 1357 // If we are forcing an unwind because of stack overflow then deopt is 1358 // irrelevant since we are throwing the frame away anyway. 1359 1360 if (deopting && !force_unwind) { 1361 handler_address = SharedRuntime::deopt_blob()->unpack_with_exception(); 1362 } else { 1363 1364 handler_address = 1365 force_unwind ? NULL : nm->handler_for_exception_and_pc(exception, pc); 1366 1367 if (handler_address == NULL) { 1368 bool recursive_exception = false; 1369 handler_address = SharedRuntime::compute_compiled_exc_handler(nm, pc, exception, force_unwind, true, recursive_exception); 1370 assert (handler_address != NULL, "must have compiled handler"); 1371 // Update the exception cache only when the unwind was not forced 1372 // and there didn't happen another exception during the computation of the 1373 // compiled exception handler. Checking for exception oop equality is not 1374 // sufficient because some exceptions are pre-allocated and reused. 1375 if (!force_unwind && !recursive_exception) { 1376 nm->add_handler_for_exception_and_pc(exception,pc,handler_address); 1377 } 1378 } else { 1379 #ifdef ASSERT 1380 bool recursive_exception = false; 1381 address computed_address = SharedRuntime::compute_compiled_exc_handler(nm, pc, exception, force_unwind, true, recursive_exception); 1382 vmassert(recursive_exception || (handler_address == computed_address), "Handler address inconsistency: " PTR_FORMAT " != " PTR_FORMAT, 1383 p2i(handler_address), p2i(computed_address)); 1384 #endif 1385 } 1386 } 1387 1388 thread->set_exception_pc(pc); 1389 thread->set_exception_handler_pc(handler_address); 1390 1391 // Check if the exception PC is a MethodHandle call site. 1392 thread->set_is_method_handle_return(nm->is_method_handle_return(pc)); 1393 } 1394 1395 // Restore correct return pc. Was saved above. 1396 thread->set_exception_oop(exception()); 1397 return handler_address; 1398 1399 JRT_END 1400 1401 // We are entering here from exception_blob 1402 // If there is a compiled exception handler in this method, we will continue there; 1403 // otherwise we will unwind the stack and continue at the caller of top frame method 1404 // Note we enter without the usual JRT wrapper. We will call a helper routine that 1405 // will do the normal VM entry. We do it this way so that we can see if the nmethod 1406 // we looked up the handler for has been deoptimized in the meantime. If it has been 1407 // we must not use the handler and instead return the deopt blob. 1408 address OptoRuntime::handle_exception_C(JavaThread* thread) { 1409 // 1410 // We are in Java not VM and in debug mode we have a NoHandleMark 1411 // 1412 #ifndef PRODUCT 1413 SharedRuntime::_find_handler_ctr++; // find exception handler 1414 #endif 1415 debug_only(NoHandleMark __hm;) 1416 nmethod* nm = NULL; 1417 address handler_address = NULL; 1418 { 1419 // Enter the VM 1420 1421 ResetNoHandleMark rnhm; 1422 handler_address = handle_exception_C_helper(thread, nm); 1423 } 1424 1425 // Back in java: Use no oops, DON'T safepoint 1426 1427 // Now check to see if the handler we are returning is in a now 1428 // deoptimized frame 1429 1430 if (nm != NULL) { 1431 RegisterMap map(thread, false); 1432 frame caller = thread->last_frame().sender(&map); 1433 #ifdef ASSERT 1434 assert(caller.is_compiled_frame(), "must be"); 1435 #endif // ASSERT 1436 if (caller.is_deoptimized_frame()) { 1437 handler_address = SharedRuntime::deopt_blob()->unpack_with_exception(); 1438 } 1439 } 1440 return handler_address; 1441 } 1442 1443 //------------------------------rethrow---------------------------------------- 1444 // We get here after compiled code has executed a 'RethrowNode'. The callee 1445 // is either throwing or rethrowing an exception. The callee-save registers 1446 // have been restored, synchronized objects have been unlocked and the callee 1447 // stack frame has been removed. The return address was passed in. 1448 // Exception oop is passed as the 1st argument. This routine is then called 1449 // from the stub. On exit, we know where to jump in the caller's code. 1450 // After this C code exits, the stub will pop his frame and end in a jump 1451 // (instead of a return). We enter the caller's default handler. 1452 // 1453 // This must be JRT_LEAF: 1454 // - caller will not change its state as we cannot block on exit, 1455 // therefore raw_exception_handler_for_return_address is all it takes 1456 // to handle deoptimized blobs 1457 // 1458 // However, there needs to be a safepoint check in the middle! So compiled 1459 // safepoints are completely watertight. 1460 // 1461 // Thus, it cannot be a leaf since it contains the NoSafepointVerifier. 1462 // 1463 // *THIS IS NOT RECOMMENDED PROGRAMMING STYLE* 1464 // 1465 address OptoRuntime::rethrow_C(oopDesc* exception, JavaThread* thread, address ret_pc) { 1466 #ifndef PRODUCT 1467 SharedRuntime::_rethrow_ctr++; // count rethrows 1468 #endif 1469 assert (exception != NULL, "should have thrown a NULLPointerException"); 1470 #ifdef ASSERT 1471 if (!(exception->is_a(SystemDictionary::Throwable_klass()))) { 1472 // should throw an exception here 1473 ShouldNotReachHere(); 1474 } 1475 #endif 1476 1477 thread->set_vm_result(exception); 1478 // Frame not compiled (handles deoptimization blob) 1479 return SharedRuntime::raw_exception_handler_for_return_address(thread, ret_pc); 1480 } 1481 1482 1483 const TypeFunc *OptoRuntime::rethrow_Type() { 1484 // create input type (domain) 1485 const Type **fields = TypeTuple::fields(1); 1486 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Exception oop 1487 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1,fields); 1488 1489 // create result type (range) 1490 fields = TypeTuple::fields(1); 1491 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Exception oop 1492 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields); 1493 1494 return TypeFunc::make(domain, range); 1495 } 1496 1497 1498 void OptoRuntime::deoptimize_caller_frame(JavaThread *thread, bool doit) { 1499 // Deoptimize the caller before continuing, as the compiled 1500 // exception handler table may not be valid. 1501 if (!StressCompiledExceptionHandlers && doit) { 1502 deoptimize_caller_frame(thread); 1503 } 1504 } 1505 1506 void OptoRuntime::deoptimize_caller_frame(JavaThread *thread) { 1507 // Called from within the owner thread, so no need for safepoint 1508 RegisterMap reg_map(thread); 1509 frame stub_frame = thread->last_frame(); 1510 assert(stub_frame.is_runtime_frame() || exception_blob()->contains(stub_frame.pc()), "sanity check"); 1511 frame caller_frame = stub_frame.sender(®_map); 1512 1513 // Deoptimize the caller frame. 1514 Deoptimization::deoptimize_frame(thread, caller_frame.id()); 1515 } 1516 1517 1518 bool OptoRuntime::is_deoptimized_caller_frame(JavaThread *thread) { 1519 // Called from within the owner thread, so no need for safepoint 1520 RegisterMap reg_map(thread); 1521 frame stub_frame = thread->last_frame(); 1522 assert(stub_frame.is_runtime_frame() || exception_blob()->contains(stub_frame.pc()), "sanity check"); 1523 frame caller_frame = stub_frame.sender(®_map); 1524 return caller_frame.is_deoptimized_frame(); 1525 } 1526 1527 1528 const TypeFunc *OptoRuntime::register_finalizer_Type() { 1529 // create input type (domain) 1530 const Type **fields = TypeTuple::fields(1); 1531 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // oop; Receiver 1532 // // The JavaThread* is passed to each routine as the last argument 1533 // fields[TypeFunc::Parms+1] = TypeRawPtr::NOTNULL; // JavaThread *; Executing thread 1534 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+1,fields); 1535 1536 // create result type (range) 1537 fields = TypeTuple::fields(0); 1538 1539 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields); 1540 1541 return TypeFunc::make(domain,range); 1542 } 1543 1544 1545 //----------------------------------------------------------------------------- 1546 // Dtrace support. entry and exit probes have the same signature 1547 const TypeFunc *OptoRuntime::dtrace_method_entry_exit_Type() { 1548 // create input type (domain) 1549 const Type **fields = TypeTuple::fields(2); 1550 fields[TypeFunc::Parms+0] = TypeRawPtr::BOTTOM; // Thread-local storage 1551 fields[TypeFunc::Parms+1] = TypeMetadataPtr::BOTTOM; // Method*; Method we are entering 1552 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2,fields); 1553 1554 // create result type (range) 1555 fields = TypeTuple::fields(0); 1556 1557 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields); 1558 1559 return TypeFunc::make(domain,range); 1560 } 1561 1562 const TypeFunc *OptoRuntime::dtrace_object_alloc_Type() { 1563 // create input type (domain) 1564 const Type **fields = TypeTuple::fields(2); 1565 fields[TypeFunc::Parms+0] = TypeRawPtr::BOTTOM; // Thread-local storage 1566 fields[TypeFunc::Parms+1] = TypeInstPtr::NOTNULL; // oop; newly allocated object 1567 1568 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2,fields); 1569 1570 // create result type (range) 1571 fields = TypeTuple::fields(0); 1572 1573 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields); 1574 1575 return TypeFunc::make(domain,range); 1576 } 1577 1578 1579 JRT_ENTRY_NO_ASYNC(void, OptoRuntime::register_finalizer(oopDesc* obj, JavaThread* thread)) 1580 assert(oopDesc::is_oop(obj), "must be a valid oop"); 1581 assert(obj->klass()->has_finalizer(), "shouldn't be here otherwise"); 1582 InstanceKlass::register_finalizer(instanceOop(obj), CHECK); 1583 JRT_END 1584 1585 //----------------------------------------------------------------------------- 1586 1587 NamedCounter * volatile OptoRuntime::_named_counters = NULL; 1588 1589 // 1590 // dump the collected NamedCounters. 1591 // 1592 void OptoRuntime::print_named_counters() { 1593 int total_lock_count = 0; 1594 int eliminated_lock_count = 0; 1595 1596 NamedCounter* c = _named_counters; 1597 while (c) { 1598 if (c->tag() == NamedCounter::LockCounter || c->tag() == NamedCounter::EliminatedLockCounter) { 1599 int count = c->count(); 1600 if (count > 0) { 1601 bool eliminated = c->tag() == NamedCounter::EliminatedLockCounter; 1602 if (Verbose) { 1603 tty->print_cr("%d %s%s", count, c->name(), eliminated ? " (eliminated)" : ""); 1604 } 1605 total_lock_count += count; 1606 if (eliminated) { 1607 eliminated_lock_count += count; 1608 } 1609 } 1610 } else if (c->tag() == NamedCounter::BiasedLockingCounter) { 1611 BiasedLockingCounters* blc = ((BiasedLockingNamedCounter*)c)->counters(); 1612 if (blc->nonzero()) { 1613 tty->print_cr("%s", c->name()); 1614 blc->print_on(tty); 1615 } 1616 #if INCLUDE_RTM_OPT 1617 } else if (c->tag() == NamedCounter::RTMLockingCounter) { 1618 RTMLockingCounters* rlc = ((RTMLockingNamedCounter*)c)->counters(); 1619 if (rlc->nonzero()) { 1620 tty->print_cr("%s", c->name()); 1621 rlc->print_on(tty); 1622 } 1623 #endif 1624 } 1625 c = c->next(); 1626 } 1627 if (total_lock_count > 0) { 1628 tty->print_cr("dynamic locks: %d", total_lock_count); 1629 if (eliminated_lock_count) { 1630 tty->print_cr("eliminated locks: %d (%d%%)", eliminated_lock_count, 1631 (int)(eliminated_lock_count * 100.0 / total_lock_count)); 1632 } 1633 } 1634 } 1635 1636 // 1637 // Allocate a new NamedCounter. The JVMState is used to generate the 1638 // name which consists of method@line for the inlining tree. 1639 // 1640 1641 NamedCounter* OptoRuntime::new_named_counter(JVMState* youngest_jvms, NamedCounter::CounterTag tag) { 1642 int max_depth = youngest_jvms->depth(); 1643 1644 // Visit scopes from youngest to oldest. 1645 bool first = true; 1646 stringStream st; 1647 for (int depth = max_depth; depth >= 1; depth--) { 1648 JVMState* jvms = youngest_jvms->of_depth(depth); 1649 ciMethod* m = jvms->has_method() ? jvms->method() : NULL; 1650 if (!first) { 1651 st.print(" "); 1652 } else { 1653 first = false; 1654 } 1655 int bci = jvms->bci(); 1656 if (bci < 0) bci = 0; 1657 if (m != NULL) { 1658 st.print("%s.%s", m->holder()->name()->as_utf8(), m->name()->as_utf8()); 1659 } else { 1660 st.print("no method"); 1661 } 1662 st.print("@%d", bci); 1663 // To print linenumbers instead of bci use: m->line_number_from_bci(bci) 1664 } 1665 NamedCounter* c; 1666 if (tag == NamedCounter::BiasedLockingCounter) { 1667 c = new BiasedLockingNamedCounter(st.as_string()); 1668 } else if (tag == NamedCounter::RTMLockingCounter) { 1669 c = new RTMLockingNamedCounter(st.as_string()); 1670 } else { 1671 c = new NamedCounter(st.as_string(), tag); 1672 } 1673 1674 // atomically add the new counter to the head of the list. We only 1675 // add counters so this is safe. 1676 NamedCounter* head; 1677 do { 1678 c->set_next(NULL); 1679 head = _named_counters; 1680 c->set_next(head); 1681 } while (Atomic::cmpxchg(&_named_counters, head, c) != head); 1682 return c; 1683 } 1684 1685 int trace_exception_counter = 0; 1686 static void trace_exception(outputStream* st, oop exception_oop, address exception_pc, const char* msg) { 1687 trace_exception_counter++; 1688 stringStream tempst; 1689 1690 tempst.print("%d [Exception (%s): ", trace_exception_counter, msg); 1691 exception_oop->print_value_on(&tempst); 1692 tempst.print(" in "); 1693 CodeBlob* blob = CodeCache::find_blob(exception_pc); 1694 if (blob->is_compiled()) { 1695 CompiledMethod* cm = blob->as_compiled_method_or_null(); 1696 cm->method()->print_value_on(&tempst); 1697 } else if (blob->is_runtime_stub()) { 1698 tempst.print("<runtime-stub>"); 1699 } else { 1700 tempst.print("<unknown>"); 1701 } 1702 tempst.print(" at " INTPTR_FORMAT, p2i(exception_pc)); 1703 tempst.print("]"); 1704 1705 st->print_raw_cr(tempst.as_string()); 1706 }