1 /* 2 * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/systemDictionary.hpp" 27 #include "code/codeCache.hpp" 28 #include "code/debugInfoRec.hpp" 29 #include "code/nmethod.hpp" 30 #include "code/pcDesc.hpp" 31 #include "code/scopeDesc.hpp" 32 #include "interpreter/bytecode.hpp" 33 #include "interpreter/interpreter.hpp" 34 #include "interpreter/oopMapCache.hpp" 35 #include "memory/allocation.inline.hpp" 36 #include "memory/oopFactory.hpp" 37 #include "memory/resourceArea.hpp" 38 #include "oops/method.hpp" 39 #include "oops/objArrayOop.inline.hpp" 40 #include "oops/oop.inline.hpp" 41 #include "oops/fieldStreams.hpp" 42 #include "oops/valueKlass.hpp" 43 #include "oops/verifyOopClosure.hpp" 44 #include "prims/jvmtiThreadState.hpp" 45 #include "runtime/biasedLocking.hpp" 46 #include "runtime/compilationPolicy.hpp" 47 #include "runtime/deoptimization.hpp" 48 #include "runtime/interfaceSupport.hpp" 49 #include "runtime/sharedRuntime.hpp" 50 #include "runtime/signature.hpp" 51 #include "runtime/stubRoutines.hpp" 52 #include "runtime/thread.hpp" 53 #include "runtime/vframe.hpp" 54 #include "runtime/vframeArray.hpp" 55 #include "runtime/vframe_hp.hpp" 56 #include "utilities/events.hpp" 57 #include "utilities/xmlstream.hpp" 58 59 #if INCLUDE_JVMCI 60 #include "jvmci/jvmciRuntime.hpp" 61 #include "jvmci/jvmciJavaClasses.hpp" 62 #endif 63 64 65 bool DeoptimizationMarker::_is_active = false; 66 67 Deoptimization::UnrollBlock::UnrollBlock(int size_of_deoptimized_frame, 68 int caller_adjustment, 69 int caller_actual_parameters, 70 int number_of_frames, 71 intptr_t* frame_sizes, 72 address* frame_pcs, 73 BasicType return_type, 74 int exec_mode) { 75 _size_of_deoptimized_frame = size_of_deoptimized_frame; 76 _caller_adjustment = caller_adjustment; 77 _caller_actual_parameters = caller_actual_parameters; 78 _number_of_frames = number_of_frames; 79 _frame_sizes = frame_sizes; 80 _frame_pcs = frame_pcs; 81 _register_block = NEW_C_HEAP_ARRAY(intptr_t, RegisterMap::reg_count * 2, mtCompiler); 82 _return_type = return_type; 83 _initial_info = 0; 84 // PD (x86 only) 85 _counter_temp = 0; 86 _unpack_kind = exec_mode; 87 _sender_sp_temp = 0; 88 89 _total_frame_sizes = size_of_frames(); 90 assert(exec_mode >= 0 && exec_mode < Unpack_LIMIT, "Unexpected exec_mode"); 91 } 92 93 94 Deoptimization::UnrollBlock::~UnrollBlock() { 95 FREE_C_HEAP_ARRAY(intptr_t, _frame_sizes); 96 FREE_C_HEAP_ARRAY(intptr_t, _frame_pcs); 97 FREE_C_HEAP_ARRAY(intptr_t, _register_block); 98 } 99 100 101 intptr_t* Deoptimization::UnrollBlock::value_addr_at(int register_number) const { 102 assert(register_number < RegisterMap::reg_count, "checking register number"); 103 return &_register_block[register_number * 2]; 104 } 105 106 107 108 int Deoptimization::UnrollBlock::size_of_frames() const { 109 // Acount first for the adjustment of the initial frame 110 int result = _caller_adjustment; 111 for (int index = 0; index < number_of_frames(); index++) { 112 result += frame_sizes()[index]; 113 } 114 return result; 115 } 116 117 118 void Deoptimization::UnrollBlock::print() { 119 ttyLocker ttyl; 120 tty->print_cr("UnrollBlock"); 121 tty->print_cr(" size_of_deoptimized_frame = %d", _size_of_deoptimized_frame); 122 tty->print( " frame_sizes: "); 123 for (int index = 0; index < number_of_frames(); index++) { 124 tty->print(INTX_FORMAT " ", frame_sizes()[index]); 125 } 126 tty->cr(); 127 } 128 129 130 // In order to make fetch_unroll_info work properly with escape 131 // analysis, The method was changed from JRT_LEAF to JRT_BLOCK_ENTRY and 132 // ResetNoHandleMark and HandleMark were removed from it. The actual reallocation 133 // of previously eliminated objects occurs in realloc_objects, which is 134 // called from the method fetch_unroll_info_helper below. 135 JRT_BLOCK_ENTRY(Deoptimization::UnrollBlock*, Deoptimization::fetch_unroll_info(JavaThread* thread, int exec_mode)) 136 // It is actually ok to allocate handles in a leaf method. It causes no safepoints, 137 // but makes the entry a little slower. There is however a little dance we have to 138 // do in debug mode to get around the NoHandleMark code in the JRT_LEAF macro 139 140 // fetch_unroll_info() is called at the beginning of the deoptimization 141 // handler. Note this fact before we start generating temporary frames 142 // that can confuse an asynchronous stack walker. This counter is 143 // decremented at the end of unpack_frames(). 144 if (TraceDeoptimization) { 145 tty->print_cr("Deoptimizing thread " INTPTR_FORMAT, p2i(thread)); 146 } 147 thread->inc_in_deopt_handler(); 148 149 return fetch_unroll_info_helper(thread, exec_mode); 150 JRT_END 151 152 153 // This is factored, since it is both called from a JRT_LEAF (deoptimization) and a JRT_ENTRY (uncommon_trap) 154 Deoptimization::UnrollBlock* Deoptimization::fetch_unroll_info_helper(JavaThread* thread, int exec_mode) { 155 156 // Note: there is a safepoint safety issue here. No matter whether we enter 157 // via vanilla deopt or uncommon trap we MUST NOT stop at a safepoint once 158 // the vframeArray is created. 159 // 160 161 // Allocate our special deoptimization ResourceMark 162 DeoptResourceMark* dmark = new DeoptResourceMark(thread); 163 assert(thread->deopt_mark() == NULL, "Pending deopt!"); 164 thread->set_deopt_mark(dmark); 165 166 frame stub_frame = thread->last_frame(); // Makes stack walkable as side effect 167 RegisterMap map(thread, true); 168 RegisterMap dummy_map(thread, false); 169 // Now get the deoptee with a valid map 170 frame deoptee = stub_frame.sender(&map); 171 // Set the deoptee nmethod 172 assert(thread->deopt_nmethod() == NULL, "Pending deopt!"); 173 thread->set_deopt_nmethod(deoptee.cb()->as_nmethod_or_null()); 174 bool skip_internal = thread->deopt_nmethod() != NULL && !thread->deopt_nmethod()->compiler()->is_jvmci(); 175 176 if (VerifyStack) { 177 thread->validate_frame_layout(); 178 } 179 180 // Create a growable array of VFrames where each VFrame represents an inlined 181 // Java frame. This storage is allocated with the usual system arena. 182 assert(deoptee.is_compiled_frame(), "Wrong frame type"); 183 GrowableArray<compiledVFrame*>* chunk = new GrowableArray<compiledVFrame*>(10); 184 vframe* vf = vframe::new_vframe(&deoptee, &map, thread); 185 while (!vf->is_top()) { 186 assert(vf->is_compiled_frame(), "Wrong frame type"); 187 chunk->push(compiledVFrame::cast(vf)); 188 vf = vf->sender(); 189 } 190 assert(vf->is_compiled_frame(), "Wrong frame type"); 191 chunk->push(compiledVFrame::cast(vf)); 192 193 ScopeDesc* trap_scope = chunk->at(0)->scope(); 194 Handle exceptionObject; 195 if (trap_scope->rethrow_exception()) { 196 if (PrintDeoptimizationDetails) { 197 tty->print_cr("Exception to be rethrown in the interpreter for method %s::%s at bci %d", trap_scope->method()->method_holder()->name()->as_C_string(), trap_scope->method()->name()->as_C_string(), trap_scope->bci()); 198 } 199 GrowableArray<ScopeValue*>* expressions = trap_scope->expressions(); 200 guarantee(expressions != NULL && expressions->length() > 0, "must have exception to throw"); 201 ScopeValue* topOfStack = expressions->top(); 202 exceptionObject = StackValue::create_stack_value(&deoptee, &map, topOfStack)->get_obj(); 203 assert(exceptionObject() != NULL, "exception oop can not be null"); 204 } 205 206 bool realloc_failures = false; 207 208 #if defined(COMPILER2) || INCLUDE_JVMCI 209 // Reallocate the non-escaping objects and restore their fields. Then 210 // relock objects if synchronization on them was eliminated. 211 #ifndef INCLUDE_JVMCI 212 if (DoEscapeAnalysis || EliminateNestedLocks) { 213 if (EliminateAllocations) { 214 #endif // INCLUDE_JVMCI 215 assert (chunk->at(0)->scope() != NULL,"expect only compiled java frames"); 216 GrowableArray<ScopeValue*>* objects = chunk->at(0)->scope()->objects(); 217 218 // The flag return_oop() indicates call sites which return oop 219 // in compiled code. Such sites include java method calls, 220 // runtime calls (for example, used to allocate new objects/arrays 221 // on slow code path) and any other calls generated in compiled code. 222 // It is not guaranteed that we can get such information here only 223 // by analyzing bytecode in deoptimized frames. This is why this flag 224 // is set during method compilation (see Compile::Process_OopMap_Node()). 225 // If the previous frame was popped, we don't have a result. 226 bool save_oop_result = chunk->at(0)->scope()->return_oop() && !thread->popframe_forcing_deopt_reexecution(); 227 Handle return_value; 228 if (save_oop_result) { 229 // Reallocation may trigger GC. If deoptimization happened on return from 230 // call which returns oop we need to save it since it is not in oopmap. 231 oop result = deoptee.saved_oop_result(&map); 232 assert(result == NULL || result->is_oop(), "must be oop"); 233 return_value = Handle(thread, result); 234 assert(Universe::heap()->is_in_or_null(result), "must be heap pointer"); 235 if (TraceDeoptimization) { 236 ttyLocker ttyl; 237 tty->print_cr("SAVED OOP RESULT " INTPTR_FORMAT " in thread " INTPTR_FORMAT, p2i(result), p2i(thread)); 238 } 239 } 240 if (objects != NULL) { 241 JRT_BLOCK 242 realloc_failures = realloc_objects(thread, &deoptee, objects, THREAD); 243 reassign_fields(&deoptee, &map, objects, realloc_failures, skip_internal, THREAD); 244 JRT_END 245 #ifndef PRODUCT 246 if (TraceDeoptimization) { 247 ttyLocker ttyl; 248 tty->print_cr("REALLOC OBJECTS in thread " INTPTR_FORMAT, p2i(thread)); 249 print_objects(objects, realloc_failures); 250 } 251 #endif 252 } 253 if (save_oop_result) { 254 // Restore result. 255 deoptee.set_saved_oop_result(&map, return_value()); 256 } 257 #ifndef INCLUDE_JVMCI 258 } 259 if (EliminateLocks) { 260 #endif // INCLUDE_JVMCI 261 #ifndef PRODUCT 262 bool first = true; 263 #endif 264 for (int i = 0; i < chunk->length(); i++) { 265 compiledVFrame* cvf = chunk->at(i); 266 assert (cvf->scope() != NULL,"expect only compiled java frames"); 267 GrowableArray<MonitorInfo*>* monitors = cvf->monitors(); 268 if (monitors->is_nonempty()) { 269 relock_objects(monitors, thread, realloc_failures); 270 #ifndef PRODUCT 271 if (PrintDeoptimizationDetails) { 272 ttyLocker ttyl; 273 for (int j = 0; j < monitors->length(); j++) { 274 MonitorInfo* mi = monitors->at(j); 275 if (mi->eliminated()) { 276 if (first) { 277 first = false; 278 tty->print_cr("RELOCK OBJECTS in thread " INTPTR_FORMAT, p2i(thread)); 279 } 280 if (mi->owner_is_scalar_replaced()) { 281 Klass* k = java_lang_Class::as_Klass(mi->owner_klass()); 282 tty->print_cr(" failed reallocation for klass %s", k->external_name()); 283 } else { 284 tty->print_cr(" object <" INTPTR_FORMAT "> locked", p2i(mi->owner())); 285 } 286 } 287 } 288 } 289 #endif // !PRODUCT 290 } 291 } 292 #ifndef INCLUDE_JVMCI 293 } 294 } 295 #endif // INCLUDE_JVMCI 296 #endif // COMPILER2 || INCLUDE_JVMCI 297 298 // Ensure that no safepoint is taken after pointers have been stored 299 // in fields of rematerialized objects. If a safepoint occurs from here on 300 // out the java state residing in the vframeArray will be missed. 301 NoSafepointVerifier no_safepoint; 302 303 vframeArray* array = create_vframeArray(thread, deoptee, &map, chunk, realloc_failures); 304 #if defined(COMPILER2) || INCLUDE_JVMCI 305 if (realloc_failures) { 306 pop_frames_failed_reallocs(thread, array); 307 } 308 #endif 309 310 assert(thread->vframe_array_head() == NULL, "Pending deopt!"); 311 thread->set_vframe_array_head(array); 312 313 // Now that the vframeArray has been created if we have any deferred local writes 314 // added by jvmti then we can free up that structure as the data is now in the 315 // vframeArray 316 317 if (thread->deferred_locals() != NULL) { 318 GrowableArray<jvmtiDeferredLocalVariableSet*>* list = thread->deferred_locals(); 319 int i = 0; 320 do { 321 // Because of inlining we could have multiple vframes for a single frame 322 // and several of the vframes could have deferred writes. Find them all. 323 if (list->at(i)->id() == array->original().id()) { 324 jvmtiDeferredLocalVariableSet* dlv = list->at(i); 325 list->remove_at(i); 326 // individual jvmtiDeferredLocalVariableSet are CHeapObj's 327 delete dlv; 328 } else { 329 i++; 330 } 331 } while ( i < list->length() ); 332 if (list->length() == 0) { 333 thread->set_deferred_locals(NULL); 334 // free the list and elements back to C heap. 335 delete list; 336 } 337 338 } 339 340 #ifndef SHARK 341 // Compute the caller frame based on the sender sp of stub_frame and stored frame sizes info. 342 CodeBlob* cb = stub_frame.cb(); 343 // Verify we have the right vframeArray 344 assert(cb->frame_size() >= 0, "Unexpected frame size"); 345 intptr_t* unpack_sp = stub_frame.sp() + cb->frame_size(); 346 347 // If the deopt call site is a MethodHandle invoke call site we have 348 // to adjust the unpack_sp. 349 nmethod* deoptee_nm = deoptee.cb()->as_nmethod_or_null(); 350 if (deoptee_nm != NULL && deoptee_nm->is_method_handle_return(deoptee.pc())) 351 unpack_sp = deoptee.unextended_sp(); 352 353 #ifdef ASSERT 354 assert(cb->is_deoptimization_stub() || 355 cb->is_uncommon_trap_stub() || 356 strcmp("Stub<DeoptimizationStub.deoptimizationHandler>", cb->name()) == 0 || 357 strcmp("Stub<UncommonTrapStub.uncommonTrapHandler>", cb->name()) == 0, 358 "unexpected code blob: %s", cb->name()); 359 #endif 360 #else 361 intptr_t* unpack_sp = stub_frame.sender(&dummy_map).unextended_sp(); 362 #endif // !SHARK 363 364 // This is a guarantee instead of an assert because if vframe doesn't match 365 // we will unpack the wrong deoptimized frame and wind up in strange places 366 // where it will be very difficult to figure out what went wrong. Better 367 // to die an early death here than some very obscure death later when the 368 // trail is cold. 369 // Note: on ia64 this guarantee can be fooled by frames with no memory stack 370 // in that it will fail to detect a problem when there is one. This needs 371 // more work in tiger timeframe. 372 guarantee(array->unextended_sp() == unpack_sp, "vframe_array_head must contain the vframeArray to unpack"); 373 374 int number_of_frames = array->frames(); 375 376 // Compute the vframes' sizes. Note that frame_sizes[] entries are ordered from outermost to innermost 377 // virtual activation, which is the reverse of the elements in the vframes array. 378 intptr_t* frame_sizes = NEW_C_HEAP_ARRAY(intptr_t, number_of_frames, mtCompiler); 379 // +1 because we always have an interpreter return address for the final slot. 380 address* frame_pcs = NEW_C_HEAP_ARRAY(address, number_of_frames + 1, mtCompiler); 381 int popframe_extra_args = 0; 382 // Create an interpreter return address for the stub to use as its return 383 // address so the skeletal frames are perfectly walkable 384 frame_pcs[number_of_frames] = Interpreter::deopt_entry(vtos, 0); 385 386 // PopFrame requires that the preserved incoming arguments from the recently-popped topmost 387 // activation be put back on the expression stack of the caller for reexecution 388 if (JvmtiExport::can_pop_frame() && thread->popframe_forcing_deopt_reexecution()) { 389 popframe_extra_args = in_words(thread->popframe_preserved_args_size_in_words()); 390 } 391 392 // Find the current pc for sender of the deoptee. Since the sender may have been deoptimized 393 // itself since the deoptee vframeArray was created we must get a fresh value of the pc rather 394 // than simply use array->sender.pc(). This requires us to walk the current set of frames 395 // 396 frame deopt_sender = stub_frame.sender(&dummy_map); // First is the deoptee frame 397 deopt_sender = deopt_sender.sender(&dummy_map); // Now deoptee caller 398 399 // It's possible that the number of parameters at the call site is 400 // different than number of arguments in the callee when method 401 // handles are used. If the caller is interpreted get the real 402 // value so that the proper amount of space can be added to it's 403 // frame. 404 bool caller_was_method_handle = false; 405 if (deopt_sender.is_interpreted_frame()) { 406 methodHandle method = deopt_sender.interpreter_frame_method(); 407 Bytecode_invoke cur = Bytecode_invoke_check(method, deopt_sender.interpreter_frame_bci()); 408 if (cur.is_invokedynamic() || cur.is_invokehandle()) { 409 // Method handle invokes may involve fairly arbitrary chains of 410 // calls so it's impossible to know how much actual space the 411 // caller has for locals. 412 caller_was_method_handle = true; 413 } 414 } 415 416 // 417 // frame_sizes/frame_pcs[0] oldest frame (int or c2i) 418 // frame_sizes/frame_pcs[1] next oldest frame (int) 419 // frame_sizes/frame_pcs[n] youngest frame (int) 420 // 421 // Now a pc in frame_pcs is actually the return address to the frame's caller (a frame 422 // owns the space for the return address to it's caller). Confusing ain't it. 423 // 424 // The vframe array can address vframes with indices running from 425 // 0.._frames-1. Index 0 is the youngest frame and _frame - 1 is the oldest (root) frame. 426 // When we create the skeletal frames we need the oldest frame to be in the zero slot 427 // in the frame_sizes/frame_pcs so the assembly code can do a trivial walk. 428 // so things look a little strange in this loop. 429 // 430 int callee_parameters = 0; 431 int callee_locals = 0; 432 for (int index = 0; index < array->frames(); index++ ) { 433 // frame[number_of_frames - 1 ] = on_stack_size(youngest) 434 // frame[number_of_frames - 2 ] = on_stack_size(sender(youngest)) 435 // frame[number_of_frames - 3 ] = on_stack_size(sender(sender(youngest))) 436 frame_sizes[number_of_frames - 1 - index] = BytesPerWord * array->element(index)->on_stack_size(callee_parameters, 437 callee_locals, 438 index == 0, 439 popframe_extra_args); 440 // This pc doesn't have to be perfect just good enough to identify the frame 441 // as interpreted so the skeleton frame will be walkable 442 // The correct pc will be set when the skeleton frame is completely filled out 443 // The final pc we store in the loop is wrong and will be overwritten below 444 frame_pcs[number_of_frames - 1 - index ] = Interpreter::deopt_entry(vtos, 0) - frame::pc_return_offset; 445 446 callee_parameters = array->element(index)->method()->size_of_parameters(); 447 callee_locals = array->element(index)->method()->max_locals(); 448 popframe_extra_args = 0; 449 } 450 451 // Compute whether the root vframe returns a float or double value. 452 BasicType return_type; 453 { 454 HandleMark hm; 455 methodHandle method(thread, array->element(0)->method()); 456 Bytecode_invoke invoke = Bytecode_invoke_check(method, array->element(0)->bci()); 457 return_type = invoke.is_valid() ? invoke.result_type() : T_ILLEGAL; 458 } 459 460 // Compute information for handling adapters and adjusting the frame size of the caller. 461 int caller_adjustment = 0; 462 463 // Compute the amount the oldest interpreter frame will have to adjust 464 // its caller's stack by. If the caller is a compiled frame then 465 // we pretend that the callee has no parameters so that the 466 // extension counts for the full amount of locals and not just 467 // locals-parms. This is because without a c2i adapter the parm 468 // area as created by the compiled frame will not be usable by 469 // the interpreter. (Depending on the calling convention there 470 // may not even be enough space). 471 472 // QQQ I'd rather see this pushed down into last_frame_adjust 473 // and have it take the sender (aka caller). 474 475 if (deopt_sender.is_compiled_frame() || caller_was_method_handle) { 476 caller_adjustment = last_frame_adjust(0, callee_locals); 477 } else if (callee_locals > callee_parameters) { 478 // The caller frame may need extending to accommodate 479 // non-parameter locals of the first unpacked interpreted frame. 480 // Compute that adjustment. 481 caller_adjustment = last_frame_adjust(callee_parameters, callee_locals); 482 } 483 484 // If the sender is deoptimized the we must retrieve the address of the handler 485 // since the frame will "magically" show the original pc before the deopt 486 // and we'd undo the deopt. 487 488 frame_pcs[0] = deopt_sender.raw_pc(); 489 490 #ifndef SHARK 491 assert(CodeCache::find_blob_unsafe(frame_pcs[0]) != NULL, "bad pc"); 492 #endif // SHARK 493 494 #ifdef INCLUDE_JVMCI 495 if (exceptionObject() != NULL) { 496 thread->set_exception_oop(exceptionObject()); 497 exec_mode = Unpack_exception; 498 } 499 #endif 500 501 UnrollBlock* info = new UnrollBlock(array->frame_size() * BytesPerWord, 502 caller_adjustment * BytesPerWord, 503 caller_was_method_handle ? 0 : callee_parameters, 504 number_of_frames, 505 frame_sizes, 506 frame_pcs, 507 return_type, 508 exec_mode); 509 // On some platforms, we need a way to pass some platform dependent 510 // information to the unpacking code so the skeletal frames come out 511 // correct (initial fp value, unextended sp, ...) 512 info->set_initial_info((intptr_t) array->sender().initial_deoptimization_info()); 513 514 if (array->frames() > 1) { 515 if (VerifyStack && TraceDeoptimization) { 516 ttyLocker ttyl; 517 tty->print_cr("Deoptimizing method containing inlining"); 518 } 519 } 520 521 array->set_unroll_block(info); 522 return info; 523 } 524 525 // Called to cleanup deoptimization data structures in normal case 526 // after unpacking to stack and when stack overflow error occurs 527 void Deoptimization::cleanup_deopt_info(JavaThread *thread, 528 vframeArray *array) { 529 530 // Get array if coming from exception 531 if (array == NULL) { 532 array = thread->vframe_array_head(); 533 } 534 thread->set_vframe_array_head(NULL); 535 536 // Free the previous UnrollBlock 537 vframeArray* old_array = thread->vframe_array_last(); 538 thread->set_vframe_array_last(array); 539 540 if (old_array != NULL) { 541 UnrollBlock* old_info = old_array->unroll_block(); 542 old_array->set_unroll_block(NULL); 543 delete old_info; 544 delete old_array; 545 } 546 547 // Deallocate any resource creating in this routine and any ResourceObjs allocated 548 // inside the vframeArray (StackValueCollections) 549 550 delete thread->deopt_mark(); 551 thread->set_deopt_mark(NULL); 552 thread->set_deopt_nmethod(NULL); 553 554 555 if (JvmtiExport::can_pop_frame()) { 556 #ifndef CC_INTERP 557 // Regardless of whether we entered this routine with the pending 558 // popframe condition bit set, we should always clear it now 559 thread->clear_popframe_condition(); 560 #else 561 // C++ interpreter will clear has_pending_popframe when it enters 562 // with method_resume. For deopt_resume2 we clear it now. 563 if (thread->popframe_forcing_deopt_reexecution()) 564 thread->clear_popframe_condition(); 565 #endif /* CC_INTERP */ 566 } 567 568 // unpack_frames() is called at the end of the deoptimization handler 569 // and (in C2) at the end of the uncommon trap handler. Note this fact 570 // so that an asynchronous stack walker can work again. This counter is 571 // incremented at the beginning of fetch_unroll_info() and (in C2) at 572 // the beginning of uncommon_trap(). 573 thread->dec_in_deopt_handler(); 574 } 575 576 // Moved from cpu directories because none of the cpus has callee save values. 577 // If a cpu implements callee save values, move this to deoptimization_<cpu>.cpp. 578 void Deoptimization::unwind_callee_save_values(frame* f, vframeArray* vframe_array) { 579 580 // This code is sort of the equivalent of C2IAdapter::setup_stack_frame back in 581 // the days we had adapter frames. When we deoptimize a situation where a 582 // compiled caller calls a compiled caller will have registers it expects 583 // to survive the call to the callee. If we deoptimize the callee the only 584 // way we can restore these registers is to have the oldest interpreter 585 // frame that we create restore these values. That is what this routine 586 // will accomplish. 587 588 // At the moment we have modified c2 to not have any callee save registers 589 // so this problem does not exist and this routine is just a place holder. 590 591 assert(f->is_interpreted_frame(), "must be interpreted"); 592 } 593 594 // Return BasicType of value being returned 595 JRT_LEAF(BasicType, Deoptimization::unpack_frames(JavaThread* thread, int exec_mode)) 596 597 // We are already active int he special DeoptResourceMark any ResourceObj's we 598 // allocate will be freed at the end of the routine. 599 600 // It is actually ok to allocate handles in a leaf method. It causes no safepoints, 601 // but makes the entry a little slower. There is however a little dance we have to 602 // do in debug mode to get around the NoHandleMark code in the JRT_LEAF macro 603 ResetNoHandleMark rnhm; // No-op in release/product versions 604 HandleMark hm; 605 606 frame stub_frame = thread->last_frame(); 607 608 // Since the frame to unpack is the top frame of this thread, the vframe_array_head 609 // must point to the vframeArray for the unpack frame. 610 vframeArray* array = thread->vframe_array_head(); 611 612 #ifndef PRODUCT 613 if (TraceDeoptimization) { 614 ttyLocker ttyl; 615 tty->print_cr("DEOPT UNPACKING thread " INTPTR_FORMAT " vframeArray " INTPTR_FORMAT " mode %d", 616 p2i(thread), p2i(array), exec_mode); 617 } 618 #endif 619 Events::log(thread, "DEOPT UNPACKING pc=" INTPTR_FORMAT " sp=" INTPTR_FORMAT " mode %d", 620 p2i(stub_frame.pc()), p2i(stub_frame.sp()), exec_mode); 621 622 UnrollBlock* info = array->unroll_block(); 623 624 // Unpack the interpreter frames and any adapter frame (c2 only) we might create. 625 array->unpack_to_stack(stub_frame, exec_mode, info->caller_actual_parameters()); 626 627 BasicType bt = info->return_type(); 628 629 // If we have an exception pending, claim that the return type is an oop 630 // so the deopt_blob does not overwrite the exception_oop. 631 632 if (exec_mode == Unpack_exception) 633 bt = T_OBJECT; 634 635 // Cleanup thread deopt data 636 cleanup_deopt_info(thread, array); 637 638 #ifndef PRODUCT 639 if (VerifyStack) { 640 ResourceMark res_mark; 641 642 thread->validate_frame_layout(); 643 644 // Verify that the just-unpacked frames match the interpreter's 645 // notions of expression stack and locals 646 vframeArray* cur_array = thread->vframe_array_last(); 647 RegisterMap rm(thread, false); 648 rm.set_include_argument_oops(false); 649 bool is_top_frame = true; 650 int callee_size_of_parameters = 0; 651 int callee_max_locals = 0; 652 for (int i = 0; i < cur_array->frames(); i++) { 653 vframeArrayElement* el = cur_array->element(i); 654 frame* iframe = el->iframe(); 655 guarantee(iframe->is_interpreted_frame(), "Wrong frame type"); 656 657 // Get the oop map for this bci 658 InterpreterOopMap mask; 659 int cur_invoke_parameter_size = 0; 660 bool try_next_mask = false; 661 int next_mask_expression_stack_size = -1; 662 int top_frame_expression_stack_adjustment = 0; 663 methodHandle mh(thread, iframe->interpreter_frame_method()); 664 OopMapCache::compute_one_oop_map(mh, iframe->interpreter_frame_bci(), &mask); 665 BytecodeStream str(mh); 666 str.set_start(iframe->interpreter_frame_bci()); 667 int max_bci = mh->code_size(); 668 // Get to the next bytecode if possible 669 assert(str.bci() < max_bci, "bci in interpreter frame out of bounds"); 670 // Check to see if we can grab the number of outgoing arguments 671 // at an uncommon trap for an invoke (where the compiler 672 // generates debug info before the invoke has executed) 673 Bytecodes::Code cur_code = str.next(); 674 if (cur_code == Bytecodes::_invokevirtual || 675 cur_code == Bytecodes::_invokedirect || 676 cur_code == Bytecodes::_invokespecial || 677 cur_code == Bytecodes::_invokestatic || 678 cur_code == Bytecodes::_invokeinterface || 679 cur_code == Bytecodes::_invokedynamic) { 680 Bytecode_invoke invoke(mh, iframe->interpreter_frame_bci()); 681 Symbol* signature = invoke.signature(); 682 ArgumentSizeComputer asc(signature); 683 cur_invoke_parameter_size = asc.size(); 684 if (invoke.has_receiver()) { 685 // Add in receiver 686 ++cur_invoke_parameter_size; 687 } 688 if (i != 0 && !invoke.is_invokedynamic() && MethodHandles::has_member_arg(invoke.klass(), invoke.name())) { 689 callee_size_of_parameters++; 690 } 691 } 692 if (str.bci() < max_bci) { 693 Bytecodes::Code bc = str.next(); 694 if (bc >= 0) { 695 // The interpreter oop map generator reports results before 696 // the current bytecode has executed except in the case of 697 // calls. It seems to be hard to tell whether the compiler 698 // has emitted debug information matching the "state before" 699 // a given bytecode or the state after, so we try both 700 switch (cur_code) { 701 case Bytecodes::_invokevirtual: 702 case Bytecodes::_invokedirect: 703 case Bytecodes::_invokespecial: 704 case Bytecodes::_invokestatic: 705 case Bytecodes::_invokeinterface: 706 case Bytecodes::_invokedynamic: 707 case Bytecodes::_athrow: 708 break; 709 default: { 710 InterpreterOopMap next_mask; 711 OopMapCache::compute_one_oop_map(mh, str.bci(), &next_mask); 712 next_mask_expression_stack_size = next_mask.expression_stack_size(); 713 // Need to subtract off the size of the result type of 714 // the bytecode because this is not described in the 715 // debug info but returned to the interpreter in the TOS 716 // caching register 717 BasicType bytecode_result_type = Bytecodes::result_type(cur_code); 718 if (bytecode_result_type != T_ILLEGAL) { 719 top_frame_expression_stack_adjustment = type2size[bytecode_result_type]; 720 } 721 assert(top_frame_expression_stack_adjustment >= 0, ""); 722 try_next_mask = true; 723 break; 724 } 725 } 726 } 727 } 728 729 // Verify stack depth and oops in frame 730 // This assertion may be dependent on the platform we're running on and may need modification (tested on x86 and sparc) 731 if (!( 732 /* SPARC */ 733 (iframe->interpreter_frame_expression_stack_size() == mask.expression_stack_size() + callee_size_of_parameters) || 734 /* x86 */ 735 (iframe->interpreter_frame_expression_stack_size() == mask.expression_stack_size() + callee_max_locals) || 736 (try_next_mask && 737 (iframe->interpreter_frame_expression_stack_size() == (next_mask_expression_stack_size - 738 top_frame_expression_stack_adjustment))) || 739 (is_top_frame && (exec_mode == Unpack_exception) && iframe->interpreter_frame_expression_stack_size() == 0) || 740 (is_top_frame && (exec_mode == Unpack_uncommon_trap || exec_mode == Unpack_reexecute || el->should_reexecute()) && 741 (iframe->interpreter_frame_expression_stack_size() == mask.expression_stack_size() + cur_invoke_parameter_size)) 742 )) { 743 ttyLocker ttyl; 744 745 // Print out some information that will help us debug the problem 746 tty->print_cr("Wrong number of expression stack elements during deoptimization"); 747 tty->print_cr(" Error occurred while verifying frame %d (0..%d, 0 is topmost)", i, cur_array->frames() - 1); 748 tty->print_cr(" Fabricated interpreter frame had %d expression stack elements", 749 iframe->interpreter_frame_expression_stack_size()); 750 tty->print_cr(" Interpreter oop map had %d expression stack elements", mask.expression_stack_size()); 751 tty->print_cr(" try_next_mask = %d", try_next_mask); 752 tty->print_cr(" next_mask_expression_stack_size = %d", next_mask_expression_stack_size); 753 tty->print_cr(" callee_size_of_parameters = %d", callee_size_of_parameters); 754 tty->print_cr(" callee_max_locals = %d", callee_max_locals); 755 tty->print_cr(" top_frame_expression_stack_adjustment = %d", top_frame_expression_stack_adjustment); 756 tty->print_cr(" exec_mode = %d", exec_mode); 757 tty->print_cr(" cur_invoke_parameter_size = %d", cur_invoke_parameter_size); 758 tty->print_cr(" Thread = " INTPTR_FORMAT ", thread ID = %d", p2i(thread), thread->osthread()->thread_id()); 759 tty->print_cr(" Interpreted frames:"); 760 for (int k = 0; k < cur_array->frames(); k++) { 761 vframeArrayElement* el = cur_array->element(k); 762 tty->print_cr(" %s (bci %d)", el->method()->name_and_sig_as_C_string(), el->bci()); 763 } 764 cur_array->print_on_2(tty); 765 guarantee(false, "wrong number of expression stack elements during deopt"); 766 } 767 VerifyOopClosure verify; 768 iframe->oops_interpreted_do(&verify, NULL, &rm, false); 769 callee_size_of_parameters = mh->size_of_parameters(); 770 callee_max_locals = mh->max_locals(); 771 is_top_frame = false; 772 } 773 } 774 #endif /* !PRODUCT */ 775 776 777 return bt; 778 JRT_END 779 780 781 int Deoptimization::deoptimize_dependents() { 782 Threads::deoptimized_wrt_marked_nmethods(); 783 return 0; 784 } 785 786 Deoptimization::DeoptAction Deoptimization::_unloaded_action 787 = Deoptimization::Action_reinterpret; 788 789 #if defined(COMPILER2) || INCLUDE_JVMCI 790 bool Deoptimization::realloc_objects(JavaThread* thread, frame* fr, GrowableArray<ScopeValue*>* objects, TRAPS) { 791 Handle pending_exception(thread->pending_exception()); 792 const char* exception_file = thread->exception_file(); 793 int exception_line = thread->exception_line(); 794 thread->clear_pending_exception(); 795 796 bool failures = false; 797 798 for (int i = 0; i < objects->length(); i++) { 799 assert(objects->at(i)->is_object(), "invalid debug information"); 800 ObjectValue* sv = (ObjectValue*) objects->at(i); 801 802 KlassHandle k(java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()())); 803 oop obj = NULL; 804 805 if (k->is_instance_klass()) { 806 InstanceKlass* ik = InstanceKlass::cast(k()); 807 obj = ik->allocate_instance(THREAD); 808 } else if (k->is_typeArray_klass()) { 809 TypeArrayKlass* ak = TypeArrayKlass::cast(k()); 810 assert(sv->field_size() % type2size[ak->element_type()] == 0, "non-integral array length"); 811 int len = sv->field_size() / type2size[ak->element_type()]; 812 obj = ak->allocate(len, THREAD); 813 } else if (k->is_objArray_klass()) { 814 ObjArrayKlass* ak = ObjArrayKlass::cast(k()); 815 obj = ak->allocate(sv->field_size(), THREAD); 816 } 817 818 if (obj == NULL) { 819 failures = true; 820 } 821 822 assert(sv->value().is_null(), "redundant reallocation"); 823 assert(obj != NULL || HAS_PENDING_EXCEPTION, "allocation should succeed or we should get an exception"); 824 CLEAR_PENDING_EXCEPTION; 825 sv->set_value(obj); 826 } 827 828 if (failures) { 829 THROW_OOP_(Universe::out_of_memory_error_realloc_objects(), failures); 830 } else if (pending_exception.not_null()) { 831 thread->set_pending_exception(pending_exception(), exception_file, exception_line); 832 } 833 834 return failures; 835 } 836 837 // restore elements of an eliminated type array 838 void Deoptimization::reassign_type_array_elements(frame* fr, RegisterMap* reg_map, ObjectValue* sv, typeArrayOop obj, BasicType type) { 839 int index = 0; 840 intptr_t val; 841 842 for (int i = 0; i < sv->field_size(); i++) { 843 StackValue* value = StackValue::create_stack_value(fr, reg_map, sv->field_at(i)); 844 switch(type) { 845 case T_LONG: case T_DOUBLE: { 846 assert(value->type() == T_INT, "Agreement."); 847 StackValue* low = 848 StackValue::create_stack_value(fr, reg_map, sv->field_at(++i)); 849 #ifdef _LP64 850 jlong res = (jlong)low->get_int(); 851 #else 852 #ifdef SPARC 853 // For SPARC we have to swap high and low words. 854 jlong res = jlong_from((jint)low->get_int(), (jint)value->get_int()); 855 #else 856 jlong res = jlong_from((jint)value->get_int(), (jint)low->get_int()); 857 #endif //SPARC 858 #endif 859 obj->long_at_put(index, res); 860 break; 861 } 862 863 // Have to cast to INT (32 bits) pointer to avoid little/big-endian problem. 864 case T_INT: case T_FLOAT: { // 4 bytes. 865 assert(value->type() == T_INT, "Agreement."); 866 bool big_value = false; 867 if (i + 1 < sv->field_size() && type == T_INT) { 868 if (sv->field_at(i)->is_location()) { 869 Location::Type type = ((LocationValue*) sv->field_at(i))->location().type(); 870 if (type == Location::dbl || type == Location::lng) { 871 big_value = true; 872 } 873 } else if (sv->field_at(i)->is_constant_int()) { 874 ScopeValue* next_scope_field = sv->field_at(i + 1); 875 if (next_scope_field->is_constant_long() || next_scope_field->is_constant_double()) { 876 big_value = true; 877 } 878 } 879 } 880 881 if (big_value) { 882 StackValue* low = StackValue::create_stack_value(fr, reg_map, sv->field_at(++i)); 883 #ifdef _LP64 884 jlong res = (jlong)low->get_int(); 885 #else 886 #ifdef SPARC 887 // For SPARC we have to swap high and low words. 888 jlong res = jlong_from((jint)low->get_int(), (jint)value->get_int()); 889 #else 890 jlong res = jlong_from((jint)value->get_int(), (jint)low->get_int()); 891 #endif //SPARC 892 #endif 893 obj->int_at_put(index, (jint)*((jint*)&res)); 894 obj->int_at_put(++index, (jint)*(((jint*)&res) + 1)); 895 } else { 896 val = value->get_int(); 897 obj->int_at_put(index, (jint)*((jint*)&val)); 898 } 899 break; 900 } 901 902 case T_SHORT: case T_CHAR: // 2 bytes 903 assert(value->type() == T_INT, "Agreement."); 904 val = value->get_int(); 905 obj->short_at_put(index, (jshort)*((jint*)&val)); 906 break; 907 908 case T_BOOLEAN: case T_BYTE: // 1 byte 909 assert(value->type() == T_INT, "Agreement."); 910 val = value->get_int(); 911 obj->bool_at_put(index, (jboolean)*((jint*)&val)); 912 break; 913 914 default: 915 ShouldNotReachHere(); 916 } 917 index++; 918 } 919 } 920 921 922 // restore fields of an eliminated object array 923 void Deoptimization::reassign_object_array_elements(frame* fr, RegisterMap* reg_map, ObjectValue* sv, objArrayOop obj) { 924 for (int i = 0; i < sv->field_size(); i++) { 925 StackValue* value = StackValue::create_stack_value(fr, reg_map, sv->field_at(i)); 926 assert(value->type() == T_OBJECT, "object element expected"); 927 obj->obj_at_put(i, value->get_obj()()); 928 } 929 } 930 931 class ReassignedField { 932 public: 933 int _offset; 934 BasicType _type; 935 InstanceKlass* _klass; 936 public: 937 ReassignedField() { 938 _offset = 0; 939 _type = T_ILLEGAL; 940 _klass = NULL; 941 } 942 }; 943 944 int compare(ReassignedField* left, ReassignedField* right) { 945 return left->_offset - right->_offset; 946 } 947 948 // Restore fields of an eliminated instance object using the same field order 949 // returned by HotSpotResolvedObjectTypeImpl.getInstanceFields(true) 950 static int reassign_fields_by_klass(InstanceKlass* klass, frame* fr, RegisterMap* reg_map, ObjectValue* sv, int svIndex, oop obj, bool skip_internal, int base_offset, TRAPS) { 951 if (klass->superklass() != NULL) { 952 svIndex = reassign_fields_by_klass(klass->superklass(), fr, reg_map, sv, svIndex, obj, skip_internal, 0, CHECK_0); 953 } 954 955 GrowableArray<ReassignedField>* fields = new GrowableArray<ReassignedField>(); 956 for (AllFieldStream fs(klass); !fs.done(); fs.next()) { 957 if (!fs.access_flags().is_static() && (!skip_internal || !fs.access_flags().is_internal())) { 958 ReassignedField field; 959 field._offset = fs.offset(); 960 field._type = FieldType::basic_type(fs.signature()); 961 if (field._type == T_VALUETYPE) { 962 // Resolve klass of flattened value type field 963 SignatureStream ss(fs.signature(), false); 964 Klass* vk = ss.as_klass(Handle(klass->class_loader()), Handle(klass->protection_domain()), SignatureStream::NCDFError, THREAD); 965 guarantee(!HAS_PENDING_EXCEPTION, "Should not have any exceptions pending"); 966 assert(vk->is_value(), "must be a ValueKlass"); 967 field._klass = InstanceKlass::cast(vk); 968 } 969 fields->append(field); 970 } 971 } 972 fields->sort(compare); 973 for (int i = 0; i < fields->length(); i++) { 974 intptr_t val; 975 ScopeValue* scope_field = sv->field_at(svIndex); 976 StackValue* value = StackValue::create_stack_value(fr, reg_map, scope_field); 977 int offset = base_offset + fields->at(i)._offset; 978 BasicType type = fields->at(i)._type; 979 switch (type) { 980 case T_OBJECT: case T_ARRAY: 981 assert(value->type() == T_OBJECT, "Agreement."); 982 obj->obj_field_put(offset, value->get_obj()()); 983 break; 984 985 case T_VALUETYPE: { 986 // Recursively re-assign flattened value type fields 987 InstanceKlass* vk = fields->at(i)._klass; 988 assert(vk != NULL, "must be resolved"); 989 offset -= ValueKlass::cast(vk)->first_field_offset(); // Adjust offset to omit oop header 990 svIndex = reassign_fields_by_klass(vk, fr, reg_map, sv, svIndex, obj, skip_internal, offset, CHECK_0); 991 continue; // Continue because we don't need to increment svIndex 992 } 993 994 // Have to cast to INT (32 bits) pointer to avoid little/big-endian problem. 995 case T_INT: case T_FLOAT: { // 4 bytes. 996 assert(value->type() == T_INT, "Agreement."); 997 bool big_value = false; 998 if (i+1 < fields->length() && fields->at(i+1)._type == T_INT) { 999 if (scope_field->is_location()) { 1000 Location::Type type = ((LocationValue*) scope_field)->location().type(); 1001 if (type == Location::dbl || type == Location::lng) { 1002 big_value = true; 1003 } 1004 } 1005 if (scope_field->is_constant_int()) { 1006 ScopeValue* next_scope_field = sv->field_at(svIndex + 1); 1007 if (next_scope_field->is_constant_long() || next_scope_field->is_constant_double()) { 1008 big_value = true; 1009 } 1010 } 1011 } 1012 1013 if (big_value) { 1014 i++; 1015 assert(i < fields->length(), "second T_INT field needed"); 1016 assert(fields->at(i)._type == T_INT, "T_INT field needed"); 1017 } else { 1018 val = value->get_int(); 1019 obj->int_field_put(offset, (jint)*((jint*)&val)); 1020 break; 1021 } 1022 } 1023 /* no break */ 1024 1025 case T_LONG: case T_DOUBLE: { 1026 assert(value->type() == T_INT, "Agreement."); 1027 StackValue* low = StackValue::create_stack_value(fr, reg_map, sv->field_at(++svIndex)); 1028 #ifdef _LP64 1029 jlong res = (jlong)low->get_int(); 1030 #else 1031 #ifdef SPARC 1032 // For SPARC we have to swap high and low words. 1033 jlong res = jlong_from((jint)low->get_int(), (jint)value->get_int()); 1034 #else 1035 jlong res = jlong_from((jint)value->get_int(), (jint)low->get_int()); 1036 #endif //SPARC 1037 #endif 1038 obj->long_field_put(offset, res); 1039 break; 1040 } 1041 1042 case T_SHORT: case T_CHAR: // 2 bytes 1043 assert(value->type() == T_INT, "Agreement."); 1044 val = value->get_int(); 1045 obj->short_field_put(offset, (jshort)*((jint*)&val)); 1046 break; 1047 1048 case T_BOOLEAN: case T_BYTE: // 1 byte 1049 assert(value->type() == T_INT, "Agreement."); 1050 val = value->get_int(); 1051 obj->bool_field_put(offset, (jboolean)*((jint*)&val)); 1052 break; 1053 1054 default: 1055 ShouldNotReachHere(); 1056 } 1057 svIndex++; 1058 } 1059 return svIndex; 1060 } 1061 1062 // restore fields of all eliminated objects and arrays 1063 void Deoptimization::reassign_fields(frame* fr, RegisterMap* reg_map, GrowableArray<ScopeValue*>* objects, bool realloc_failures, bool skip_internal, TRAPS) { 1064 for (int i = 0; i < objects->length(); i++) { 1065 ObjectValue* sv = (ObjectValue*) objects->at(i); 1066 KlassHandle k(java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()())); 1067 Handle obj = sv->value(); 1068 assert(obj.not_null() || realloc_failures, "reallocation was missed"); 1069 if (PrintDeoptimizationDetails) { 1070 tty->print_cr("reassign fields for object of type %s!", k->name()->as_C_string()); 1071 } 1072 if (obj.is_null()) { 1073 continue; 1074 } 1075 1076 if (k->is_instance_klass()) { 1077 InstanceKlass* ik = InstanceKlass::cast(k()); 1078 reassign_fields_by_klass(ik, fr, reg_map, sv, 0, obj(), skip_internal, 0, CHECK); 1079 } else if (k->is_typeArray_klass()) { 1080 TypeArrayKlass* ak = TypeArrayKlass::cast(k()); 1081 reassign_type_array_elements(fr, reg_map, sv, (typeArrayOop) obj(), ak->element_type()); 1082 } else if (k->is_objArray_klass()) { 1083 reassign_object_array_elements(fr, reg_map, sv, (objArrayOop) obj()); 1084 } 1085 } 1086 } 1087 1088 1089 // relock objects for which synchronization was eliminated 1090 void Deoptimization::relock_objects(GrowableArray<MonitorInfo*>* monitors, JavaThread* thread, bool realloc_failures) { 1091 for (int i = 0; i < monitors->length(); i++) { 1092 MonitorInfo* mon_info = monitors->at(i); 1093 if (mon_info->eliminated()) { 1094 assert(!mon_info->owner_is_scalar_replaced() || realloc_failures, "reallocation was missed"); 1095 if (!mon_info->owner_is_scalar_replaced()) { 1096 Handle obj = Handle(mon_info->owner()); 1097 markOop mark = obj->mark(); 1098 if (UseBiasedLocking && mark->has_bias_pattern()) { 1099 // New allocated objects may have the mark set to anonymously biased. 1100 // Also the deoptimized method may called methods with synchronization 1101 // where the thread-local object is bias locked to the current thread. 1102 assert(mark->is_biased_anonymously() || 1103 mark->biased_locker() == thread, "should be locked to current thread"); 1104 // Reset mark word to unbiased prototype. 1105 markOop unbiased_prototype = markOopDesc::prototype()->set_age(mark->age()); 1106 obj->set_mark(unbiased_prototype); 1107 } 1108 BasicLock* lock = mon_info->lock(); 1109 ObjectSynchronizer::slow_enter(obj, lock, thread); 1110 assert(mon_info->owner()->is_locked(), "object must be locked now"); 1111 } 1112 } 1113 } 1114 } 1115 1116 1117 #ifndef PRODUCT 1118 // print information about reallocated objects 1119 void Deoptimization::print_objects(GrowableArray<ScopeValue*>* objects, bool realloc_failures) { 1120 fieldDescriptor fd; 1121 1122 for (int i = 0; i < objects->length(); i++) { 1123 ObjectValue* sv = (ObjectValue*) objects->at(i); 1124 KlassHandle k(java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()())); 1125 Handle obj = sv->value(); 1126 1127 tty->print(" object <" INTPTR_FORMAT "> of type ", p2i(sv->value()())); 1128 k->print_value(); 1129 assert(obj.not_null() || realloc_failures, "reallocation was missed"); 1130 if (obj.is_null()) { 1131 tty->print(" allocation failed"); 1132 } else { 1133 tty->print(" allocated (%d bytes)", obj->size() * HeapWordSize); 1134 } 1135 tty->cr(); 1136 1137 if (Verbose && !obj.is_null()) { 1138 k->oop_print_on(obj(), tty); 1139 } 1140 } 1141 } 1142 #endif 1143 #endif // COMPILER2 || INCLUDE_JVMCI 1144 1145 vframeArray* Deoptimization::create_vframeArray(JavaThread* thread, frame fr, RegisterMap *reg_map, GrowableArray<compiledVFrame*>* chunk, bool realloc_failures) { 1146 Events::log(thread, "DEOPT PACKING pc=" INTPTR_FORMAT " sp=" INTPTR_FORMAT, p2i(fr.pc()), p2i(fr.sp())); 1147 1148 #ifndef PRODUCT 1149 if (PrintDeoptimizationDetails) { 1150 ttyLocker ttyl; 1151 tty->print("DEOPT PACKING thread " INTPTR_FORMAT " ", p2i(thread)); 1152 fr.print_on(tty); 1153 tty->print_cr(" Virtual frames (innermost first):"); 1154 for (int index = 0; index < chunk->length(); index++) { 1155 compiledVFrame* vf = chunk->at(index); 1156 tty->print(" %2d - ", index); 1157 vf->print_value(); 1158 int bci = chunk->at(index)->raw_bci(); 1159 const char* code_name; 1160 if (bci == SynchronizationEntryBCI) { 1161 code_name = "sync entry"; 1162 } else { 1163 Bytecodes::Code code = vf->method()->code_at(bci); 1164 code_name = Bytecodes::name(code); 1165 } 1166 tty->print(" - %s", code_name); 1167 tty->print_cr(" @ bci %d ", bci); 1168 if (Verbose) { 1169 vf->print(); 1170 tty->cr(); 1171 } 1172 } 1173 } 1174 #endif 1175 1176 // Register map for next frame (used for stack crawl). We capture 1177 // the state of the deopt'ing frame's caller. Thus if we need to 1178 // stuff a C2I adapter we can properly fill in the callee-save 1179 // register locations. 1180 frame caller = fr.sender(reg_map); 1181 int frame_size = caller.sp() - fr.sp(); 1182 1183 frame sender = caller; 1184 1185 // Since the Java thread being deoptimized will eventually adjust it's own stack, 1186 // the vframeArray containing the unpacking information is allocated in the C heap. 1187 // For Compiler1, the caller of the deoptimized frame is saved for use by unpack_frames(). 1188 vframeArray* array = vframeArray::allocate(thread, frame_size, chunk, reg_map, sender, caller, fr, realloc_failures); 1189 1190 // Compare the vframeArray to the collected vframes 1191 assert(array->structural_compare(thread, chunk), "just checking"); 1192 1193 #ifndef PRODUCT 1194 if (PrintDeoptimizationDetails) { 1195 ttyLocker ttyl; 1196 tty->print_cr(" Created vframeArray " INTPTR_FORMAT, p2i(array)); 1197 } 1198 #endif // PRODUCT 1199 1200 return array; 1201 } 1202 1203 #if defined(COMPILER2) || INCLUDE_JVMCI 1204 void Deoptimization::pop_frames_failed_reallocs(JavaThread* thread, vframeArray* array) { 1205 // Reallocation of some scalar replaced objects failed. Record 1206 // that we need to pop all the interpreter frames for the 1207 // deoptimized compiled frame. 1208 assert(thread->frames_to_pop_failed_realloc() == 0, "missed frames to pop?"); 1209 thread->set_frames_to_pop_failed_realloc(array->frames()); 1210 // Unlock all monitors here otherwise the interpreter will see a 1211 // mix of locked and unlocked monitors (because of failed 1212 // reallocations of synchronized objects) and be confused. 1213 for (int i = 0; i < array->frames(); i++) { 1214 MonitorChunk* monitors = array->element(i)->monitors(); 1215 if (monitors != NULL) { 1216 for (int j = 0; j < monitors->number_of_monitors(); j++) { 1217 BasicObjectLock* src = monitors->at(j); 1218 if (src->obj() != NULL) { 1219 ObjectSynchronizer::fast_exit(src->obj(), src->lock(), thread); 1220 } 1221 } 1222 array->element(i)->free_monitors(thread); 1223 #ifdef ASSERT 1224 array->element(i)->set_removed_monitors(); 1225 #endif 1226 } 1227 } 1228 } 1229 #endif 1230 1231 static void collect_monitors(compiledVFrame* cvf, GrowableArray<Handle>* objects_to_revoke) { 1232 GrowableArray<MonitorInfo*>* monitors = cvf->monitors(); 1233 for (int i = 0; i < monitors->length(); i++) { 1234 MonitorInfo* mon_info = monitors->at(i); 1235 if (!mon_info->eliminated() && mon_info->owner() != NULL) { 1236 objects_to_revoke->append(Handle(mon_info->owner())); 1237 } 1238 } 1239 } 1240 1241 1242 void Deoptimization::revoke_biases_of_monitors(JavaThread* thread, frame fr, RegisterMap* map) { 1243 if (!UseBiasedLocking) { 1244 return; 1245 } 1246 1247 GrowableArray<Handle>* objects_to_revoke = new GrowableArray<Handle>(); 1248 1249 // Unfortunately we don't have a RegisterMap available in most of 1250 // the places we want to call this routine so we need to walk the 1251 // stack again to update the register map. 1252 if (map == NULL || !map->update_map()) { 1253 StackFrameStream sfs(thread, true); 1254 bool found = false; 1255 while (!found && !sfs.is_done()) { 1256 frame* cur = sfs.current(); 1257 sfs.next(); 1258 found = cur->id() == fr.id(); 1259 } 1260 assert(found, "frame to be deoptimized not found on target thread's stack"); 1261 map = sfs.register_map(); 1262 } 1263 1264 vframe* vf = vframe::new_vframe(&fr, map, thread); 1265 compiledVFrame* cvf = compiledVFrame::cast(vf); 1266 // Revoke monitors' biases in all scopes 1267 while (!cvf->is_top()) { 1268 collect_monitors(cvf, objects_to_revoke); 1269 cvf = compiledVFrame::cast(cvf->sender()); 1270 } 1271 collect_monitors(cvf, objects_to_revoke); 1272 1273 if (SafepointSynchronize::is_at_safepoint()) { 1274 BiasedLocking::revoke_at_safepoint(objects_to_revoke); 1275 } else { 1276 BiasedLocking::revoke(objects_to_revoke); 1277 } 1278 } 1279 1280 1281 void Deoptimization::revoke_biases_of_monitors(CodeBlob* cb) { 1282 if (!UseBiasedLocking) { 1283 return; 1284 } 1285 1286 assert(SafepointSynchronize::is_at_safepoint(), "must only be called from safepoint"); 1287 GrowableArray<Handle>* objects_to_revoke = new GrowableArray<Handle>(); 1288 for (JavaThread* jt = Threads::first(); jt != NULL ; jt = jt->next()) { 1289 if (jt->has_last_Java_frame()) { 1290 StackFrameStream sfs(jt, true); 1291 while (!sfs.is_done()) { 1292 frame* cur = sfs.current(); 1293 if (cb->contains(cur->pc())) { 1294 vframe* vf = vframe::new_vframe(cur, sfs.register_map(), jt); 1295 compiledVFrame* cvf = compiledVFrame::cast(vf); 1296 // Revoke monitors' biases in all scopes 1297 while (!cvf->is_top()) { 1298 collect_monitors(cvf, objects_to_revoke); 1299 cvf = compiledVFrame::cast(cvf->sender()); 1300 } 1301 collect_monitors(cvf, objects_to_revoke); 1302 } 1303 sfs.next(); 1304 } 1305 } 1306 } 1307 BiasedLocking::revoke_at_safepoint(objects_to_revoke); 1308 } 1309 1310 1311 void Deoptimization::deoptimize_single_frame(JavaThread* thread, frame fr, Deoptimization::DeoptReason reason) { 1312 assert(fr.can_be_deoptimized(), "checking frame type"); 1313 1314 gather_statistics(reason, Action_none, Bytecodes::_illegal); 1315 1316 if (LogCompilation && xtty != NULL) { 1317 nmethod* nm = fr.cb()->as_nmethod_or_null(); 1318 assert(nm != NULL, "only compiled methods can deopt"); 1319 1320 ttyLocker ttyl; 1321 xtty->begin_head("deoptimized thread='" UINTX_FORMAT "'", (uintx)thread->osthread()->thread_id()); 1322 nm->log_identity(xtty); 1323 xtty->end_head(); 1324 for (ScopeDesc* sd = nm->scope_desc_at(fr.pc()); ; sd = sd->sender()) { 1325 xtty->begin_elem("jvms bci='%d'", sd->bci()); 1326 xtty->method(sd->method()); 1327 xtty->end_elem(); 1328 if (sd->is_top()) break; 1329 } 1330 xtty->tail("deoptimized"); 1331 } 1332 1333 // Patch the compiled method so that when execution returns to it we will 1334 // deopt the execution state and return to the interpreter. 1335 fr.deoptimize(thread); 1336 } 1337 1338 void Deoptimization::deoptimize(JavaThread* thread, frame fr, RegisterMap *map) { 1339 deoptimize(thread, fr, map, Reason_constraint); 1340 } 1341 1342 void Deoptimization::deoptimize(JavaThread* thread, frame fr, RegisterMap *map, DeoptReason reason) { 1343 // Deoptimize only if the frame comes from compile code. 1344 // Do not deoptimize the frame which is already patched 1345 // during the execution of the loops below. 1346 if (!fr.is_compiled_frame() || fr.is_deoptimized_frame()) { 1347 return; 1348 } 1349 ResourceMark rm; 1350 DeoptimizationMarker dm; 1351 if (UseBiasedLocking) { 1352 revoke_biases_of_monitors(thread, fr, map); 1353 } 1354 deoptimize_single_frame(thread, fr, reason); 1355 1356 } 1357 1358 1359 void Deoptimization::deoptimize_frame_internal(JavaThread* thread, intptr_t* id, DeoptReason reason) { 1360 assert(thread == Thread::current() || SafepointSynchronize::is_at_safepoint(), 1361 "can only deoptimize other thread at a safepoint"); 1362 // Compute frame and register map based on thread and sp. 1363 RegisterMap reg_map(thread, UseBiasedLocking); 1364 frame fr = thread->last_frame(); 1365 while (fr.id() != id) { 1366 fr = fr.sender(®_map); 1367 } 1368 deoptimize(thread, fr, ®_map, reason); 1369 } 1370 1371 1372 void Deoptimization::deoptimize_frame(JavaThread* thread, intptr_t* id, DeoptReason reason) { 1373 if (thread == Thread::current()) { 1374 Deoptimization::deoptimize_frame_internal(thread, id, reason); 1375 } else { 1376 VM_DeoptimizeFrame deopt(thread, id, reason); 1377 VMThread::execute(&deopt); 1378 } 1379 } 1380 1381 void Deoptimization::deoptimize_frame(JavaThread* thread, intptr_t* id) { 1382 deoptimize_frame(thread, id, Reason_constraint); 1383 } 1384 1385 // JVMTI PopFrame support 1386 JRT_LEAF(void, Deoptimization::popframe_preserve_args(JavaThread* thread, int bytes_to_save, void* start_address)) 1387 { 1388 thread->popframe_preserve_args(in_ByteSize(bytes_to_save), start_address); 1389 } 1390 JRT_END 1391 1392 MethodData* 1393 Deoptimization::get_method_data(JavaThread* thread, methodHandle m, 1394 bool create_if_missing) { 1395 Thread* THREAD = thread; 1396 MethodData* mdo = m()->method_data(); 1397 if (mdo == NULL && create_if_missing && !HAS_PENDING_EXCEPTION) { 1398 // Build an MDO. Ignore errors like OutOfMemory; 1399 // that simply means we won't have an MDO to update. 1400 Method::build_interpreter_method_data(m, THREAD); 1401 if (HAS_PENDING_EXCEPTION) { 1402 assert((PENDING_EXCEPTION->is_a(SystemDictionary::OutOfMemoryError_klass())), "we expect only an OOM error here"); 1403 CLEAR_PENDING_EXCEPTION; 1404 } 1405 mdo = m()->method_data(); 1406 } 1407 return mdo; 1408 } 1409 1410 #if defined(COMPILER2) || defined(SHARK) || INCLUDE_JVMCI 1411 void Deoptimization::load_class_by_index(const constantPoolHandle& constant_pool, int index, TRAPS) { 1412 // in case of an unresolved klass entry, load the class. 1413 if (constant_pool->tag_at(index).is_unresolved_klass()) { 1414 Klass* tk = constant_pool->klass_at_ignore_error(index, CHECK); 1415 return; 1416 } 1417 1418 if (!constant_pool->tag_at(index).is_symbol()) return; 1419 1420 Handle class_loader (THREAD, constant_pool->pool_holder()->class_loader()); 1421 Symbol* symbol = constant_pool->symbol_at(index); 1422 1423 // class name? 1424 if (symbol->byte_at(0) != '(') { 1425 Handle protection_domain (THREAD, constant_pool->pool_holder()->protection_domain()); 1426 SystemDictionary::resolve_or_null(symbol, class_loader, protection_domain, CHECK); 1427 return; 1428 } 1429 1430 // then it must be a signature! 1431 ResourceMark rm(THREAD); 1432 for (SignatureStream ss(symbol); !ss.is_done(); ss.next()) { 1433 if (ss.is_object()) { 1434 Symbol* class_name = ss.as_symbol(CHECK); 1435 Handle protection_domain (THREAD, constant_pool->pool_holder()->protection_domain()); 1436 SystemDictionary::resolve_or_null(class_name, class_loader, protection_domain, CHECK); 1437 } 1438 } 1439 } 1440 1441 1442 void Deoptimization::load_class_by_index(const constantPoolHandle& constant_pool, int index) { 1443 EXCEPTION_MARK; 1444 load_class_by_index(constant_pool, index, THREAD); 1445 if (HAS_PENDING_EXCEPTION) { 1446 // Exception happened during classloading. We ignore the exception here, since it 1447 // is going to be rethrown since the current activation is going to be deoptimized and 1448 // the interpreter will re-execute the bytecode. 1449 CLEAR_PENDING_EXCEPTION; 1450 // Class loading called java code which may have caused a stack 1451 // overflow. If the exception was thrown right before the return 1452 // to the runtime the stack is no longer guarded. Reguard the 1453 // stack otherwise if we return to the uncommon trap blob and the 1454 // stack bang causes a stack overflow we crash. 1455 assert(THREAD->is_Java_thread(), "only a java thread can be here"); 1456 JavaThread* thread = (JavaThread*)THREAD; 1457 bool guard_pages_enabled = thread->stack_guards_enabled(); 1458 if (!guard_pages_enabled) guard_pages_enabled = thread->reguard_stack(); 1459 assert(guard_pages_enabled, "stack banging in uncommon trap blob may cause crash"); 1460 } 1461 } 1462 1463 JRT_ENTRY(void, Deoptimization::uncommon_trap_inner(JavaThread* thread, jint trap_request)) { 1464 HandleMark hm; 1465 1466 // uncommon_trap() is called at the beginning of the uncommon trap 1467 // handler. Note this fact before we start generating temporary frames 1468 // that can confuse an asynchronous stack walker. This counter is 1469 // decremented at the end of unpack_frames(). 1470 thread->inc_in_deopt_handler(); 1471 1472 // We need to update the map if we have biased locking. 1473 #if INCLUDE_JVMCI 1474 // JVMCI might need to get an exception from the stack, which in turn requires the register map to be valid 1475 RegisterMap reg_map(thread, true); 1476 #else 1477 RegisterMap reg_map(thread, UseBiasedLocking); 1478 #endif 1479 frame stub_frame = thread->last_frame(); 1480 frame fr = stub_frame.sender(®_map); 1481 // Make sure the calling nmethod is not getting deoptimized and removed 1482 // before we are done with it. 1483 nmethodLocker nl(fr.pc()); 1484 1485 // Log a message 1486 Events::log(thread, "Uncommon trap: trap_request=" PTR32_FORMAT " fr.pc=" INTPTR_FORMAT " relative=" INTPTR_FORMAT, 1487 trap_request, p2i(fr.pc()), fr.pc() - fr.cb()->code_begin()); 1488 1489 { 1490 ResourceMark rm; 1491 1492 // Revoke biases of any monitors in the frame to ensure we can migrate them 1493 revoke_biases_of_monitors(thread, fr, ®_map); 1494 1495 DeoptReason reason = trap_request_reason(trap_request); 1496 DeoptAction action = trap_request_action(trap_request); 1497 #if INCLUDE_JVMCI 1498 int debug_id = trap_request_debug_id(trap_request); 1499 #endif 1500 jint unloaded_class_index = trap_request_index(trap_request); // CP idx or -1 1501 1502 vframe* vf = vframe::new_vframe(&fr, ®_map, thread); 1503 compiledVFrame* cvf = compiledVFrame::cast(vf); 1504 1505 nmethod* nm = cvf->code(); 1506 1507 ScopeDesc* trap_scope = cvf->scope(); 1508 1509 if (TraceDeoptimization) { 1510 ttyLocker ttyl; 1511 tty->print_cr(" bci=%d pc=" INTPTR_FORMAT ", relative_pc=" INTPTR_FORMAT ", method=%s" JVMCI_ONLY(", debug_id=%d"), trap_scope->bci(), p2i(fr.pc()), fr.pc() - nm->code_begin(), trap_scope->method()->name_and_sig_as_C_string() 1512 #if INCLUDE_JVMCI 1513 , debug_id 1514 #endif 1515 ); 1516 } 1517 1518 methodHandle trap_method = trap_scope->method(); 1519 int trap_bci = trap_scope->bci(); 1520 #if INCLUDE_JVMCI 1521 oop speculation = thread->pending_failed_speculation(); 1522 if (nm->is_compiled_by_jvmci()) { 1523 if (speculation != NULL) { 1524 oop speculation_log = nm->speculation_log(); 1525 if (speculation_log != NULL) { 1526 if (TraceDeoptimization || TraceUncollectedSpeculations) { 1527 if (HotSpotSpeculationLog::lastFailed(speculation_log) != NULL) { 1528 tty->print_cr("A speculation that was not collected by the compiler is being overwritten"); 1529 } 1530 } 1531 if (TraceDeoptimization) { 1532 tty->print_cr("Saving speculation to speculation log"); 1533 } 1534 HotSpotSpeculationLog::set_lastFailed(speculation_log, speculation); 1535 } else { 1536 if (TraceDeoptimization) { 1537 tty->print_cr("Speculation present but no speculation log"); 1538 } 1539 } 1540 thread->set_pending_failed_speculation(NULL); 1541 } else { 1542 if (TraceDeoptimization) { 1543 tty->print_cr("No speculation"); 1544 } 1545 } 1546 } else { 1547 assert(speculation == NULL, "There should not be a speculation for method compiled by non-JVMCI compilers"); 1548 } 1549 1550 if (trap_bci == SynchronizationEntryBCI) { 1551 trap_bci = 0; 1552 thread->set_pending_monitorenter(true); 1553 } 1554 1555 if (reason == Deoptimization::Reason_transfer_to_interpreter) { 1556 thread->set_pending_transfer_to_interpreter(true); 1557 } 1558 #endif 1559 1560 Bytecodes::Code trap_bc = trap_method->java_code_at(trap_bci); 1561 // Record this event in the histogram. 1562 gather_statistics(reason, action, trap_bc); 1563 1564 // Ensure that we can record deopt. history: 1565 // Need MDO to record RTM code generation state. 1566 bool create_if_missing = ProfileTraps || UseCodeAging RTM_OPT_ONLY( || UseRTMLocking ); 1567 1568 methodHandle profiled_method; 1569 #if INCLUDE_JVMCI 1570 if (nm->is_compiled_by_jvmci()) { 1571 profiled_method = nm->method(); 1572 } else { 1573 profiled_method = trap_method; 1574 } 1575 #else 1576 profiled_method = trap_method; 1577 #endif 1578 1579 MethodData* trap_mdo = 1580 get_method_data(thread, profiled_method, create_if_missing); 1581 1582 // Log a message 1583 Events::log_deopt_message(thread, "Uncommon trap: reason=%s action=%s pc=" INTPTR_FORMAT " method=%s @ %d", 1584 trap_reason_name(reason), trap_action_name(action), p2i(fr.pc()), 1585 trap_method->name_and_sig_as_C_string(), trap_bci); 1586 1587 // Print a bunch of diagnostics, if requested. 1588 if (TraceDeoptimization || LogCompilation) { 1589 ResourceMark rm; 1590 ttyLocker ttyl; 1591 char buf[100]; 1592 if (xtty != NULL) { 1593 xtty->begin_head("uncommon_trap thread='" UINTX_FORMAT "' %s", 1594 os::current_thread_id(), 1595 format_trap_request(buf, sizeof(buf), trap_request)); 1596 nm->log_identity(xtty); 1597 } 1598 Symbol* class_name = NULL; 1599 bool unresolved = false; 1600 if (unloaded_class_index >= 0) { 1601 constantPoolHandle constants (THREAD, trap_method->constants()); 1602 if (constants->tag_at(unloaded_class_index).is_unresolved_klass()) { 1603 class_name = constants->klass_name_at(unloaded_class_index); 1604 unresolved = true; 1605 if (xtty != NULL) 1606 xtty->print(" unresolved='1'"); 1607 } else if (constants->tag_at(unloaded_class_index).is_symbol()) { 1608 class_name = constants->symbol_at(unloaded_class_index); 1609 } 1610 if (xtty != NULL) 1611 xtty->name(class_name); 1612 } 1613 if (xtty != NULL && trap_mdo != NULL && (int)reason < (int)MethodData::_trap_hist_limit) { 1614 // Dump the relevant MDO state. 1615 // This is the deopt count for the current reason, any previous 1616 // reasons or recompiles seen at this point. 1617 int dcnt = trap_mdo->trap_count(reason); 1618 if (dcnt != 0) 1619 xtty->print(" count='%d'", dcnt); 1620 ProfileData* pdata = trap_mdo->bci_to_data(trap_bci); 1621 int dos = (pdata == NULL)? 0: pdata->trap_state(); 1622 if (dos != 0) { 1623 xtty->print(" state='%s'", format_trap_state(buf, sizeof(buf), dos)); 1624 if (trap_state_is_recompiled(dos)) { 1625 int recnt2 = trap_mdo->overflow_recompile_count(); 1626 if (recnt2 != 0) 1627 xtty->print(" recompiles2='%d'", recnt2); 1628 } 1629 } 1630 } 1631 if (xtty != NULL) { 1632 xtty->stamp(); 1633 xtty->end_head(); 1634 } 1635 if (TraceDeoptimization) { // make noise on the tty 1636 tty->print("Uncommon trap occurred in"); 1637 nm->method()->print_short_name(tty); 1638 tty->print(" compiler=%s compile_id=%d", nm->compiler() == NULL ? "" : nm->compiler()->name(), nm->compile_id()); 1639 #if INCLUDE_JVMCI 1640 oop installedCode = nm->jvmci_installed_code(); 1641 if (installedCode != NULL) { 1642 oop installedCodeName = NULL; 1643 if (installedCode->is_a(InstalledCode::klass())) { 1644 installedCodeName = InstalledCode::name(installedCode); 1645 } 1646 if (installedCodeName != NULL) { 1647 tty->print(" (JVMCI: installedCodeName=%s) ", java_lang_String::as_utf8_string(installedCodeName)); 1648 } else { 1649 tty->print(" (JVMCI: installed code has no name) "); 1650 } 1651 } else if (nm->is_compiled_by_jvmci()) { 1652 tty->print(" (JVMCI: no installed code) "); 1653 } 1654 #endif 1655 tty->print(" (@" INTPTR_FORMAT ") thread=" UINTX_FORMAT " reason=%s action=%s unloaded_class_index=%d" JVMCI_ONLY(" debug_id=%d"), 1656 p2i(fr.pc()), 1657 os::current_thread_id(), 1658 trap_reason_name(reason), 1659 trap_action_name(action), 1660 unloaded_class_index 1661 #if INCLUDE_JVMCI 1662 , debug_id 1663 #endif 1664 ); 1665 if (class_name != NULL) { 1666 tty->print(unresolved ? " unresolved class: " : " symbol: "); 1667 class_name->print_symbol_on(tty); 1668 } 1669 tty->cr(); 1670 } 1671 if (xtty != NULL) { 1672 // Log the precise location of the trap. 1673 for (ScopeDesc* sd = trap_scope; ; sd = sd->sender()) { 1674 xtty->begin_elem("jvms bci='%d'", sd->bci()); 1675 xtty->method(sd->method()); 1676 xtty->end_elem(); 1677 if (sd->is_top()) break; 1678 } 1679 xtty->tail("uncommon_trap"); 1680 } 1681 } 1682 // (End diagnostic printout.) 1683 1684 // Load class if necessary 1685 if (unloaded_class_index >= 0) { 1686 constantPoolHandle constants(THREAD, trap_method->constants()); 1687 load_class_by_index(constants, unloaded_class_index); 1688 } 1689 1690 // Flush the nmethod if necessary and desirable. 1691 // 1692 // We need to avoid situations where we are re-flushing the nmethod 1693 // because of a hot deoptimization site. Repeated flushes at the same 1694 // point need to be detected by the compiler and avoided. If the compiler 1695 // cannot avoid them (or has a bug and "refuses" to avoid them), this 1696 // module must take measures to avoid an infinite cycle of recompilation 1697 // and deoptimization. There are several such measures: 1698 // 1699 // 1. If a recompilation is ordered a second time at some site X 1700 // and for the same reason R, the action is adjusted to 'reinterpret', 1701 // to give the interpreter time to exercise the method more thoroughly. 1702 // If this happens, the method's overflow_recompile_count is incremented. 1703 // 1704 // 2. If the compiler fails to reduce the deoptimization rate, then 1705 // the method's overflow_recompile_count will begin to exceed the set 1706 // limit PerBytecodeRecompilationCutoff. If this happens, the action 1707 // is adjusted to 'make_not_compilable', and the method is abandoned 1708 // to the interpreter. This is a performance hit for hot methods, 1709 // but is better than a disastrous infinite cycle of recompilations. 1710 // (Actually, only the method containing the site X is abandoned.) 1711 // 1712 // 3. In parallel with the previous measures, if the total number of 1713 // recompilations of a method exceeds the much larger set limit 1714 // PerMethodRecompilationCutoff, the method is abandoned. 1715 // This should only happen if the method is very large and has 1716 // many "lukewarm" deoptimizations. The code which enforces this 1717 // limit is elsewhere (class nmethod, class Method). 1718 // 1719 // Note that the per-BCI 'is_recompiled' bit gives the compiler one chance 1720 // to recompile at each bytecode independently of the per-BCI cutoff. 1721 // 1722 // The decision to update code is up to the compiler, and is encoded 1723 // in the Action_xxx code. If the compiler requests Action_none 1724 // no trap state is changed, no compiled code is changed, and the 1725 // computation suffers along in the interpreter. 1726 // 1727 // The other action codes specify various tactics for decompilation 1728 // and recompilation. Action_maybe_recompile is the loosest, and 1729 // allows the compiled code to stay around until enough traps are seen, 1730 // and until the compiler gets around to recompiling the trapping method. 1731 // 1732 // The other actions cause immediate removal of the present code. 1733 1734 // Traps caused by injected profile shouldn't pollute trap counts. 1735 bool injected_profile_trap = trap_method->has_injected_profile() && 1736 (reason == Reason_intrinsic || reason == Reason_unreached); 1737 1738 bool update_trap_state = (reason != Reason_tenured) && !injected_profile_trap; 1739 bool make_not_entrant = false; 1740 bool make_not_compilable = false; 1741 bool reprofile = false; 1742 switch (action) { 1743 case Action_none: 1744 // Keep the old code. 1745 update_trap_state = false; 1746 break; 1747 case Action_maybe_recompile: 1748 // Do not need to invalidate the present code, but we can 1749 // initiate another 1750 // Start compiler without (necessarily) invalidating the nmethod. 1751 // The system will tolerate the old code, but new code should be 1752 // generated when possible. 1753 break; 1754 case Action_reinterpret: 1755 // Go back into the interpreter for a while, and then consider 1756 // recompiling form scratch. 1757 make_not_entrant = true; 1758 // Reset invocation counter for outer most method. 1759 // This will allow the interpreter to exercise the bytecodes 1760 // for a while before recompiling. 1761 // By contrast, Action_make_not_entrant is immediate. 1762 // 1763 // Note that the compiler will track null_check, null_assert, 1764 // range_check, and class_check events and log them as if they 1765 // had been traps taken from compiled code. This will update 1766 // the MDO trap history so that the next compilation will 1767 // properly detect hot trap sites. 1768 reprofile = true; 1769 break; 1770 case Action_make_not_entrant: 1771 // Request immediate recompilation, and get rid of the old code. 1772 // Make them not entrant, so next time they are called they get 1773 // recompiled. Unloaded classes are loaded now so recompile before next 1774 // time they are called. Same for uninitialized. The interpreter will 1775 // link the missing class, if any. 1776 make_not_entrant = true; 1777 break; 1778 case Action_make_not_compilable: 1779 // Give up on compiling this method at all. 1780 make_not_entrant = true; 1781 make_not_compilable = true; 1782 break; 1783 default: 1784 ShouldNotReachHere(); 1785 } 1786 1787 // Setting +ProfileTraps fixes the following, on all platforms: 1788 // 4852688: ProfileInterpreter is off by default for ia64. The result is 1789 // infinite heroic-opt-uncommon-trap/deopt/recompile cycles, since the 1790 // recompile relies on a MethodData* to record heroic opt failures. 1791 1792 // Whether the interpreter is producing MDO data or not, we also need 1793 // to use the MDO to detect hot deoptimization points and control 1794 // aggressive optimization. 1795 bool inc_recompile_count = false; 1796 ProfileData* pdata = NULL; 1797 if (ProfileTraps && update_trap_state && trap_mdo != NULL) { 1798 assert(trap_mdo == get_method_data(thread, profiled_method, false), "sanity"); 1799 uint this_trap_count = 0; 1800 bool maybe_prior_trap = false; 1801 bool maybe_prior_recompile = false; 1802 pdata = query_update_method_data(trap_mdo, trap_bci, reason, true, 1803 #if INCLUDE_JVMCI 1804 nm->is_compiled_by_jvmci() && nm->is_osr_method(), 1805 #endif 1806 nm->method(), 1807 //outputs: 1808 this_trap_count, 1809 maybe_prior_trap, 1810 maybe_prior_recompile); 1811 // Because the interpreter also counts null, div0, range, and class 1812 // checks, these traps from compiled code are double-counted. 1813 // This is harmless; it just means that the PerXTrapLimit values 1814 // are in effect a little smaller than they look. 1815 1816 DeoptReason per_bc_reason = reason_recorded_per_bytecode_if_any(reason); 1817 if (per_bc_reason != Reason_none) { 1818 // Now take action based on the partially known per-BCI history. 1819 if (maybe_prior_trap 1820 && this_trap_count >= (uint)PerBytecodeTrapLimit) { 1821 // If there are too many traps at this BCI, force a recompile. 1822 // This will allow the compiler to see the limit overflow, and 1823 // take corrective action, if possible. The compiler generally 1824 // does not use the exact PerBytecodeTrapLimit value, but instead 1825 // changes its tactics if it sees any traps at all. This provides 1826 // a little hysteresis, delaying a recompile until a trap happens 1827 // several times. 1828 // 1829 // Actually, since there is only one bit of counter per BCI, 1830 // the possible per-BCI counts are {0,1,(per-method count)}. 1831 // This produces accurate results if in fact there is only 1832 // one hot trap site, but begins to get fuzzy if there are 1833 // many sites. For example, if there are ten sites each 1834 // trapping two or more times, they each get the blame for 1835 // all of their traps. 1836 make_not_entrant = true; 1837 } 1838 1839 // Detect repeated recompilation at the same BCI, and enforce a limit. 1840 if (make_not_entrant && maybe_prior_recompile) { 1841 // More than one recompile at this point. 1842 inc_recompile_count = maybe_prior_trap; 1843 } 1844 } else { 1845 // For reasons which are not recorded per-bytecode, we simply 1846 // force recompiles unconditionally. 1847 // (Note that PerMethodRecompilationCutoff is enforced elsewhere.) 1848 make_not_entrant = true; 1849 } 1850 1851 // Go back to the compiler if there are too many traps in this method. 1852 if (this_trap_count >= per_method_trap_limit(reason)) { 1853 // If there are too many traps in this method, force a recompile. 1854 // This will allow the compiler to see the limit overflow, and 1855 // take corrective action, if possible. 1856 // (This condition is an unlikely backstop only, because the 1857 // PerBytecodeTrapLimit is more likely to take effect first, 1858 // if it is applicable.) 1859 make_not_entrant = true; 1860 } 1861 1862 // Here's more hysteresis: If there has been a recompile at 1863 // this trap point already, run the method in the interpreter 1864 // for a while to exercise it more thoroughly. 1865 if (make_not_entrant && maybe_prior_recompile && maybe_prior_trap) { 1866 reprofile = true; 1867 } 1868 } 1869 1870 // Take requested actions on the method: 1871 1872 // Recompile 1873 if (make_not_entrant) { 1874 if (!nm->make_not_entrant()) { 1875 return; // the call did not change nmethod's state 1876 } 1877 1878 if (pdata != NULL) { 1879 // Record the recompilation event, if any. 1880 int tstate0 = pdata->trap_state(); 1881 int tstate1 = trap_state_set_recompiled(tstate0, true); 1882 if (tstate1 != tstate0) 1883 pdata->set_trap_state(tstate1); 1884 } 1885 1886 #if INCLUDE_RTM_OPT 1887 // Restart collecting RTM locking abort statistic if the method 1888 // is recompiled for a reason other than RTM state change. 1889 // Assume that in new recompiled code the statistic could be different, 1890 // for example, due to different inlining. 1891 if ((reason != Reason_rtm_state_change) && (trap_mdo != NULL) && 1892 UseRTMDeopt && (nm->rtm_state() != ProfileRTM)) { 1893 trap_mdo->atomic_set_rtm_state(ProfileRTM); 1894 } 1895 #endif 1896 // For code aging we count traps separately here, using make_not_entrant() 1897 // as a guard against simultaneous deopts in multiple threads. 1898 if (reason == Reason_tenured && trap_mdo != NULL) { 1899 trap_mdo->inc_tenure_traps(); 1900 } 1901 } 1902 1903 if (inc_recompile_count) { 1904 trap_mdo->inc_overflow_recompile_count(); 1905 if ((uint)trap_mdo->overflow_recompile_count() > 1906 (uint)PerBytecodeRecompilationCutoff) { 1907 // Give up on the method containing the bad BCI. 1908 if (trap_method() == nm->method()) { 1909 make_not_compilable = true; 1910 } else { 1911 trap_method->set_not_compilable(CompLevel_full_optimization, true, "overflow_recompile_count > PerBytecodeRecompilationCutoff"); 1912 // But give grace to the enclosing nm->method(). 1913 } 1914 } 1915 } 1916 1917 // Reprofile 1918 if (reprofile) { 1919 CompilationPolicy::policy()->reprofile(trap_scope, nm->is_osr_method()); 1920 } 1921 1922 // Give up compiling 1923 if (make_not_compilable && !nm->method()->is_not_compilable(CompLevel_full_optimization)) { 1924 assert(make_not_entrant, "consistent"); 1925 nm->method()->set_not_compilable(CompLevel_full_optimization); 1926 } 1927 1928 } // Free marked resources 1929 1930 } 1931 JRT_END 1932 1933 ProfileData* 1934 Deoptimization::query_update_method_data(MethodData* trap_mdo, 1935 int trap_bci, 1936 Deoptimization::DeoptReason reason, 1937 bool update_total_trap_count, 1938 #if INCLUDE_JVMCI 1939 bool is_osr, 1940 #endif 1941 Method* compiled_method, 1942 //outputs: 1943 uint& ret_this_trap_count, 1944 bool& ret_maybe_prior_trap, 1945 bool& ret_maybe_prior_recompile) { 1946 bool maybe_prior_trap = false; 1947 bool maybe_prior_recompile = false; 1948 uint this_trap_count = 0; 1949 if (update_total_trap_count) { 1950 uint idx = reason; 1951 #if INCLUDE_JVMCI 1952 if (is_osr) { 1953 idx += Reason_LIMIT; 1954 } 1955 #endif 1956 uint prior_trap_count = trap_mdo->trap_count(idx); 1957 this_trap_count = trap_mdo->inc_trap_count(idx); 1958 1959 // If the runtime cannot find a place to store trap history, 1960 // it is estimated based on the general condition of the method. 1961 // If the method has ever been recompiled, or has ever incurred 1962 // a trap with the present reason , then this BCI is assumed 1963 // (pessimistically) to be the culprit. 1964 maybe_prior_trap = (prior_trap_count != 0); 1965 maybe_prior_recompile = (trap_mdo->decompile_count() != 0); 1966 } 1967 ProfileData* pdata = NULL; 1968 1969 1970 // For reasons which are recorded per bytecode, we check per-BCI data. 1971 DeoptReason per_bc_reason = reason_recorded_per_bytecode_if_any(reason); 1972 assert(per_bc_reason != Reason_none || update_total_trap_count, "must be"); 1973 if (per_bc_reason != Reason_none) { 1974 // Find the profile data for this BCI. If there isn't one, 1975 // try to allocate one from the MDO's set of spares. 1976 // This will let us detect a repeated trap at this point. 1977 pdata = trap_mdo->allocate_bci_to_data(trap_bci, reason_is_speculate(reason) ? compiled_method : NULL); 1978 1979 if (pdata != NULL) { 1980 if (reason_is_speculate(reason) && !pdata->is_SpeculativeTrapData()) { 1981 if (LogCompilation && xtty != NULL) { 1982 ttyLocker ttyl; 1983 // no more room for speculative traps in this MDO 1984 xtty->elem("speculative_traps_oom"); 1985 } 1986 } 1987 // Query the trap state of this profile datum. 1988 int tstate0 = pdata->trap_state(); 1989 if (!trap_state_has_reason(tstate0, per_bc_reason)) 1990 maybe_prior_trap = false; 1991 if (!trap_state_is_recompiled(tstate0)) 1992 maybe_prior_recompile = false; 1993 1994 // Update the trap state of this profile datum. 1995 int tstate1 = tstate0; 1996 // Record the reason. 1997 tstate1 = trap_state_add_reason(tstate1, per_bc_reason); 1998 // Store the updated state on the MDO, for next time. 1999 if (tstate1 != tstate0) 2000 pdata->set_trap_state(tstate1); 2001 } else { 2002 if (LogCompilation && xtty != NULL) { 2003 ttyLocker ttyl; 2004 // Missing MDP? Leave a small complaint in the log. 2005 xtty->elem("missing_mdp bci='%d'", trap_bci); 2006 } 2007 } 2008 } 2009 2010 // Return results: 2011 ret_this_trap_count = this_trap_count; 2012 ret_maybe_prior_trap = maybe_prior_trap; 2013 ret_maybe_prior_recompile = maybe_prior_recompile; 2014 return pdata; 2015 } 2016 2017 void 2018 Deoptimization::update_method_data_from_interpreter(MethodData* trap_mdo, int trap_bci, int reason) { 2019 ResourceMark rm; 2020 // Ignored outputs: 2021 uint ignore_this_trap_count; 2022 bool ignore_maybe_prior_trap; 2023 bool ignore_maybe_prior_recompile; 2024 assert(!reason_is_speculate(reason), "reason speculate only used by compiler"); 2025 // JVMCI uses the total counts to determine if deoptimizations are happening too frequently -> do not adjust total counts 2026 bool update_total_counts = JVMCI_ONLY(false) NOT_JVMCI(true); 2027 query_update_method_data(trap_mdo, trap_bci, 2028 (DeoptReason)reason, 2029 update_total_counts, 2030 #if INCLUDE_JVMCI 2031 false, 2032 #endif 2033 NULL, 2034 ignore_this_trap_count, 2035 ignore_maybe_prior_trap, 2036 ignore_maybe_prior_recompile); 2037 } 2038 2039 Deoptimization::UnrollBlock* Deoptimization::uncommon_trap(JavaThread* thread, jint trap_request, jint exec_mode) { 2040 if (TraceDeoptimization) { 2041 tty->print("Uncommon trap "); 2042 } 2043 // Still in Java no safepoints 2044 { 2045 // This enters VM and may safepoint 2046 uncommon_trap_inner(thread, trap_request); 2047 } 2048 return fetch_unroll_info_helper(thread, exec_mode); 2049 } 2050 2051 // Local derived constants. 2052 // Further breakdown of DataLayout::trap_state, as promised by DataLayout. 2053 const int DS_REASON_MASK = DataLayout::trap_mask >> 1; 2054 const int DS_RECOMPILE_BIT = DataLayout::trap_mask - DS_REASON_MASK; 2055 2056 //---------------------------trap_state_reason--------------------------------- 2057 Deoptimization::DeoptReason 2058 Deoptimization::trap_state_reason(int trap_state) { 2059 // This assert provides the link between the width of DataLayout::trap_bits 2060 // and the encoding of "recorded" reasons. It ensures there are enough 2061 // bits to store all needed reasons in the per-BCI MDO profile. 2062 assert(DS_REASON_MASK >= Reason_RECORDED_LIMIT, "enough bits"); 2063 int recompile_bit = (trap_state & DS_RECOMPILE_BIT); 2064 trap_state -= recompile_bit; 2065 if (trap_state == DS_REASON_MASK) { 2066 return Reason_many; 2067 } else { 2068 assert((int)Reason_none == 0, "state=0 => Reason_none"); 2069 return (DeoptReason)trap_state; 2070 } 2071 } 2072 //-------------------------trap_state_has_reason------------------------------- 2073 int Deoptimization::trap_state_has_reason(int trap_state, int reason) { 2074 assert(reason_is_recorded_per_bytecode((DeoptReason)reason), "valid reason"); 2075 assert(DS_REASON_MASK >= Reason_RECORDED_LIMIT, "enough bits"); 2076 int recompile_bit = (trap_state & DS_RECOMPILE_BIT); 2077 trap_state -= recompile_bit; 2078 if (trap_state == DS_REASON_MASK) { 2079 return -1; // true, unspecifically (bottom of state lattice) 2080 } else if (trap_state == reason) { 2081 return 1; // true, definitely 2082 } else if (trap_state == 0) { 2083 return 0; // false, definitely (top of state lattice) 2084 } else { 2085 return 0; // false, definitely 2086 } 2087 } 2088 //-------------------------trap_state_add_reason------------------------------- 2089 int Deoptimization::trap_state_add_reason(int trap_state, int reason) { 2090 assert(reason_is_recorded_per_bytecode((DeoptReason)reason) || reason == Reason_many, "valid reason"); 2091 int recompile_bit = (trap_state & DS_RECOMPILE_BIT); 2092 trap_state -= recompile_bit; 2093 if (trap_state == DS_REASON_MASK) { 2094 return trap_state + recompile_bit; // already at state lattice bottom 2095 } else if (trap_state == reason) { 2096 return trap_state + recompile_bit; // the condition is already true 2097 } else if (trap_state == 0) { 2098 return reason + recompile_bit; // no condition has yet been true 2099 } else { 2100 return DS_REASON_MASK + recompile_bit; // fall to state lattice bottom 2101 } 2102 } 2103 //-----------------------trap_state_is_recompiled------------------------------ 2104 bool Deoptimization::trap_state_is_recompiled(int trap_state) { 2105 return (trap_state & DS_RECOMPILE_BIT) != 0; 2106 } 2107 //-----------------------trap_state_set_recompiled----------------------------- 2108 int Deoptimization::trap_state_set_recompiled(int trap_state, bool z) { 2109 if (z) return trap_state | DS_RECOMPILE_BIT; 2110 else return trap_state & ~DS_RECOMPILE_BIT; 2111 } 2112 //---------------------------format_trap_state--------------------------------- 2113 // This is used for debugging and diagnostics, including LogFile output. 2114 const char* Deoptimization::format_trap_state(char* buf, size_t buflen, 2115 int trap_state) { 2116 assert(buflen > 0, "sanity"); 2117 DeoptReason reason = trap_state_reason(trap_state); 2118 bool recomp_flag = trap_state_is_recompiled(trap_state); 2119 // Re-encode the state from its decoded components. 2120 int decoded_state = 0; 2121 if (reason_is_recorded_per_bytecode(reason) || reason == Reason_many) 2122 decoded_state = trap_state_add_reason(decoded_state, reason); 2123 if (recomp_flag) 2124 decoded_state = trap_state_set_recompiled(decoded_state, recomp_flag); 2125 // If the state re-encodes properly, format it symbolically. 2126 // Because this routine is used for debugging and diagnostics, 2127 // be robust even if the state is a strange value. 2128 size_t len; 2129 if (decoded_state != trap_state) { 2130 // Random buggy state that doesn't decode?? 2131 len = jio_snprintf(buf, buflen, "#%d", trap_state); 2132 } else { 2133 len = jio_snprintf(buf, buflen, "%s%s", 2134 trap_reason_name(reason), 2135 recomp_flag ? " recompiled" : ""); 2136 } 2137 return buf; 2138 } 2139 2140 2141 //--------------------------------statics-------------------------------------- 2142 const char* Deoptimization::_trap_reason_name[] = { 2143 // Note: Keep this in sync. with enum DeoptReason. 2144 "none", 2145 "null_check", 2146 "null_assert" JVMCI_ONLY("_or_unreached0"), 2147 "range_check", 2148 "class_check", 2149 "array_check", 2150 "intrinsic" JVMCI_ONLY("_or_type_checked_inlining"), 2151 "bimorphic" JVMCI_ONLY("_or_optimized_type_check"), 2152 "unloaded", 2153 "uninitialized", 2154 "unreached", 2155 "unhandled", 2156 "constraint", 2157 "div0_check", 2158 "age", 2159 "predicate", 2160 "loop_limit_check", 2161 "speculate_class_check", 2162 "speculate_null_check", 2163 "rtm_state_change", 2164 "unstable_if", 2165 "unstable_fused_if", 2166 #if INCLUDE_JVMCI 2167 "aliasing", 2168 "transfer_to_interpreter", 2169 "not_compiled_exception_handler", 2170 "unresolved", 2171 "jsr_mismatch", 2172 #endif 2173 "tenured" 2174 }; 2175 const char* Deoptimization::_trap_action_name[] = { 2176 // Note: Keep this in sync. with enum DeoptAction. 2177 "none", 2178 "maybe_recompile", 2179 "reinterpret", 2180 "make_not_entrant", 2181 "make_not_compilable" 2182 }; 2183 2184 const char* Deoptimization::trap_reason_name(int reason) { 2185 // Check that every reason has a name 2186 STATIC_ASSERT(sizeof(_trap_reason_name)/sizeof(const char*) == Reason_LIMIT); 2187 2188 if (reason == Reason_many) return "many"; 2189 if ((uint)reason < Reason_LIMIT) 2190 return _trap_reason_name[reason]; 2191 static char buf[20]; 2192 sprintf(buf, "reason%d", reason); 2193 return buf; 2194 } 2195 const char* Deoptimization::trap_action_name(int action) { 2196 // Check that every action has a name 2197 STATIC_ASSERT(sizeof(_trap_action_name)/sizeof(const char*) == Action_LIMIT); 2198 2199 if ((uint)action < Action_LIMIT) 2200 return _trap_action_name[action]; 2201 static char buf[20]; 2202 sprintf(buf, "action%d", action); 2203 return buf; 2204 } 2205 2206 // This is used for debugging and diagnostics, including LogFile output. 2207 const char* Deoptimization::format_trap_request(char* buf, size_t buflen, 2208 int trap_request) { 2209 jint unloaded_class_index = trap_request_index(trap_request); 2210 const char* reason = trap_reason_name(trap_request_reason(trap_request)); 2211 const char* action = trap_action_name(trap_request_action(trap_request)); 2212 #if INCLUDE_JVMCI 2213 int debug_id = trap_request_debug_id(trap_request); 2214 #endif 2215 size_t len; 2216 if (unloaded_class_index < 0) { 2217 len = jio_snprintf(buf, buflen, "reason='%s' action='%s'" JVMCI_ONLY(" debug_id='%d'"), 2218 reason, action 2219 #if INCLUDE_JVMCI 2220 ,debug_id 2221 #endif 2222 ); 2223 } else { 2224 len = jio_snprintf(buf, buflen, "reason='%s' action='%s' index='%d'" JVMCI_ONLY(" debug_id='%d'"), 2225 reason, action, unloaded_class_index 2226 #if INCLUDE_JVMCI 2227 ,debug_id 2228 #endif 2229 ); 2230 } 2231 return buf; 2232 } 2233 2234 juint Deoptimization::_deoptimization_hist 2235 [Deoptimization::Reason_LIMIT] 2236 [1 + Deoptimization::Action_LIMIT] 2237 [Deoptimization::BC_CASE_LIMIT] 2238 = {0}; 2239 2240 enum { 2241 LSB_BITS = 8, 2242 LSB_MASK = right_n_bits(LSB_BITS) 2243 }; 2244 2245 void Deoptimization::gather_statistics(DeoptReason reason, DeoptAction action, 2246 Bytecodes::Code bc) { 2247 assert(reason >= 0 && reason < Reason_LIMIT, "oob"); 2248 assert(action >= 0 && action < Action_LIMIT, "oob"); 2249 _deoptimization_hist[Reason_none][0][0] += 1; // total 2250 _deoptimization_hist[reason][0][0] += 1; // per-reason total 2251 juint* cases = _deoptimization_hist[reason][1+action]; 2252 juint* bc_counter_addr = NULL; 2253 juint bc_counter = 0; 2254 // Look for an unused counter, or an exact match to this BC. 2255 if (bc != Bytecodes::_illegal) { 2256 for (int bc_case = 0; bc_case < BC_CASE_LIMIT; bc_case++) { 2257 juint* counter_addr = &cases[bc_case]; 2258 juint counter = *counter_addr; 2259 if ((counter == 0 && bc_counter_addr == NULL) 2260 || (Bytecodes::Code)(counter & LSB_MASK) == bc) { 2261 // this counter is either free or is already devoted to this BC 2262 bc_counter_addr = counter_addr; 2263 bc_counter = counter | bc; 2264 } 2265 } 2266 } 2267 if (bc_counter_addr == NULL) { 2268 // Overflow, or no given bytecode. 2269 bc_counter_addr = &cases[BC_CASE_LIMIT-1]; 2270 bc_counter = (*bc_counter_addr & ~LSB_MASK); // clear LSB 2271 } 2272 *bc_counter_addr = bc_counter + (1 << LSB_BITS); 2273 } 2274 2275 jint Deoptimization::total_deoptimization_count() { 2276 return _deoptimization_hist[Reason_none][0][0]; 2277 } 2278 2279 jint Deoptimization::deoptimization_count(DeoptReason reason) { 2280 assert(reason >= 0 && reason < Reason_LIMIT, "oob"); 2281 return _deoptimization_hist[reason][0][0]; 2282 } 2283 2284 void Deoptimization::print_statistics() { 2285 juint total = total_deoptimization_count(); 2286 juint account = total; 2287 if (total != 0) { 2288 ttyLocker ttyl; 2289 if (xtty != NULL) xtty->head("statistics type='deoptimization'"); 2290 tty->print_cr("Deoptimization traps recorded:"); 2291 #define PRINT_STAT_LINE(name, r) \ 2292 tty->print_cr(" %4d (%4.1f%%) %s", (int)(r), ((r) * 100.0) / total, name); 2293 PRINT_STAT_LINE("total", total); 2294 // For each non-zero entry in the histogram, print the reason, 2295 // the action, and (if specifically known) the type of bytecode. 2296 for (int reason = 0; reason < Reason_LIMIT; reason++) { 2297 for (int action = 0; action < Action_LIMIT; action++) { 2298 juint* cases = _deoptimization_hist[reason][1+action]; 2299 for (int bc_case = 0; bc_case < BC_CASE_LIMIT; bc_case++) { 2300 juint counter = cases[bc_case]; 2301 if (counter != 0) { 2302 char name[1*K]; 2303 Bytecodes::Code bc = (Bytecodes::Code)(counter & LSB_MASK); 2304 if (bc_case == BC_CASE_LIMIT && (int)bc == 0) 2305 bc = Bytecodes::_illegal; 2306 sprintf(name, "%s/%s/%s", 2307 trap_reason_name(reason), 2308 trap_action_name(action), 2309 Bytecodes::is_defined(bc)? Bytecodes::name(bc): "other"); 2310 juint r = counter >> LSB_BITS; 2311 tty->print_cr(" %40s: " UINT32_FORMAT " (%.1f%%)", name, r, (r * 100.0) / total); 2312 account -= r; 2313 } 2314 } 2315 } 2316 } 2317 if (account != 0) { 2318 PRINT_STAT_LINE("unaccounted", account); 2319 } 2320 #undef PRINT_STAT_LINE 2321 if (xtty != NULL) xtty->tail("statistics"); 2322 } 2323 } 2324 #else // COMPILER2 || SHARK || INCLUDE_JVMCI 2325 2326 2327 // Stubs for C1 only system. 2328 bool Deoptimization::trap_state_is_recompiled(int trap_state) { 2329 return false; 2330 } 2331 2332 const char* Deoptimization::trap_reason_name(int reason) { 2333 return "unknown"; 2334 } 2335 2336 void Deoptimization::print_statistics() { 2337 // no output 2338 } 2339 2340 void 2341 Deoptimization::update_method_data_from_interpreter(MethodData* trap_mdo, int trap_bci, int reason) { 2342 // no udpate 2343 } 2344 2345 int Deoptimization::trap_state_has_reason(int trap_state, int reason) { 2346 return 0; 2347 } 2348 2349 void Deoptimization::gather_statistics(DeoptReason reason, DeoptAction action, 2350 Bytecodes::Code bc) { 2351 // no update 2352 } 2353 2354 const char* Deoptimization::format_trap_state(char* buf, size_t buflen, 2355 int trap_state) { 2356 jio_snprintf(buf, buflen, "#%d", trap_state); 2357 return buf; 2358 } 2359 2360 #endif // COMPILER2 || SHARK || INCLUDE_JVMCI --- EOF ---