1 /* 2 * Copyright (c) 1998, 2014, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "ci/ciValueKlass.hpp" 27 #include "classfile/systemDictionary.hpp" 28 #include "compiler/compileLog.hpp" 29 #include "oops/objArrayKlass.hpp" 30 #include "opto/addnode.hpp" 31 #include "opto/memnode.hpp" 32 #include "opto/mulnode.hpp" 33 #include "opto/parse.hpp" 34 #include "opto/rootnode.hpp" 35 #include "opto/runtime.hpp" 36 #include "opto/valuetypenode.hpp" 37 #include "runtime/sharedRuntime.hpp" 38 39 //------------------------------make_dtrace_method_entry_exit ---------------- 40 // Dtrace -- record entry or exit of a method if compiled with dtrace support 41 void GraphKit::make_dtrace_method_entry_exit(ciMethod* method, bool is_entry) { 42 const TypeFunc *call_type = OptoRuntime::dtrace_method_entry_exit_Type(); 43 address call_address = is_entry ? CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry) : 44 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit); 45 const char *call_name = is_entry ? "dtrace_method_entry" : "dtrace_method_exit"; 46 47 // Get base of thread-local storage area 48 Node* thread = _gvn.transform( new ThreadLocalNode() ); 49 50 // Get method 51 const TypePtr* method_type = TypeMetadataPtr::make(method); 52 Node *method_node = _gvn.transform(ConNode::make(method_type)); 53 54 kill_dead_locals(); 55 56 // For some reason, this call reads only raw memory. 57 const TypePtr* raw_adr_type = TypeRawPtr::BOTTOM; 58 make_runtime_call(RC_LEAF | RC_NARROW_MEM, 59 call_type, call_address, 60 call_name, raw_adr_type, 61 thread, method_node); 62 } 63 64 65 //============================================================================= 66 //------------------------------do_checkcast----------------------------------- 67 void Parse::do_checkcast() { 68 bool will_link; 69 ciKlass* klass = iter().get_klass(will_link); 70 71 Node *obj = peek(); 72 73 // Throw uncommon trap if class is not loaded or the value we are casting 74 // _from_ is not loaded, and value is not null. If the value _is_ NULL, 75 // then the checkcast does nothing. 76 const TypeOopPtr *tp = _gvn.type(obj)->isa_oopptr(); 77 if (!will_link || (tp && tp->klass() && !tp->klass()->is_loaded())) { 78 if (C->log() != NULL) { 79 if (!will_link) { 80 C->log()->elem("assert_null reason='checkcast' klass='%d'", 81 C->log()->identify(klass)); 82 } 83 if (tp && tp->klass() && !tp->klass()->is_loaded()) { 84 // %%% Cannot happen? 85 C->log()->elem("assert_null reason='checkcast source' klass='%d'", 86 C->log()->identify(tp->klass())); 87 } 88 } 89 null_assert(obj); 90 assert( stopped() || _gvn.type(peek())->higher_equal(TypePtr::NULL_PTR), "what's left behind is null" ); 91 if (!stopped()) { 92 profile_null_checkcast(); 93 } 94 return; 95 } 96 97 Node *res = gen_checkcast(obj, makecon(TypeKlassPtr::make(klass)) ); 98 99 // Pop from stack AFTER gen_checkcast because it can uncommon trap and 100 // the debug info has to be correct. 101 pop(); 102 push(res); 103 } 104 105 106 //------------------------------do_instanceof---------------------------------- 107 void Parse::do_instanceof() { 108 if (stopped()) return; 109 // We would like to return false if class is not loaded, emitting a 110 // dependency, but Java requires instanceof to load its operand. 111 112 // Throw uncommon trap if class is not loaded 113 bool will_link; 114 ciKlass* klass = iter().get_klass(will_link); 115 116 if (!will_link) { 117 if (C->log() != NULL) { 118 C->log()->elem("assert_null reason='instanceof' klass='%d'", 119 C->log()->identify(klass)); 120 } 121 null_assert(peek()); 122 assert( stopped() || _gvn.type(peek())->higher_equal(TypePtr::NULL_PTR), "what's left behind is null" ); 123 if (!stopped()) { 124 // The object is now known to be null. 125 // Shortcut the effect of gen_instanceof and return "false" directly. 126 pop(); // pop the null 127 push(_gvn.intcon(0)); // push false answer 128 } 129 return; 130 } 131 132 // Push the bool result back on stack 133 Node* res = gen_instanceof(peek(), makecon(TypeKlassPtr::make(klass)), true); 134 135 // Pop from stack AFTER gen_instanceof because it can uncommon trap. 136 pop(); 137 push(res); 138 } 139 140 //------------------------------array_store_check------------------------------ 141 // pull array from stack and check that the store is valid 142 void Parse::array_store_check() { 143 144 // Shorthand access to array store elements without popping them. 145 Node *obj = peek(0); 146 Node *idx = peek(1); 147 Node *ary = peek(2); 148 149 if (_gvn.type(obj) == TypePtr::NULL_PTR) { 150 // There's never a type check on null values. 151 // This cutout lets us avoid the uncommon_trap(Reason_array_check) 152 // below, which turns into a performance liability if the 153 // gen_checkcast folds up completely. 154 return; 155 } 156 157 // Extract the array klass type 158 int klass_offset = oopDesc::klass_offset_in_bytes(); 159 Node* p = basic_plus_adr( ary, ary, klass_offset ); 160 // p's type is array-of-OOPS plus klass_offset 161 Node* array_klass = _gvn.transform(LoadKlassNode::make(_gvn, NULL, immutable_memory(), p, TypeInstPtr::KLASS)); 162 // Get the array klass 163 const TypeKlassPtr *tak = _gvn.type(array_klass)->is_klassptr(); 164 165 // The type of array_klass is usually INexact array-of-oop. Heroically 166 // cast array_klass to EXACT array and uncommon-trap if the cast fails. 167 // Make constant out of the inexact array klass, but use it only if the cast 168 // succeeds. 169 bool always_see_exact_class = false; 170 if (MonomorphicArrayCheck 171 && !too_many_traps(Deoptimization::Reason_array_check) 172 && !tak->klass_is_exact() 173 && tak != TypeKlassPtr::OBJECT) { 174 // Regarding the fourth condition in the if-statement from above: 175 // 176 // If the compiler has determined that the type of array 'ary' (represented 177 // by 'array_klass') is java/lang/Object, the compiler must not assume that 178 // the array 'ary' is monomorphic. 179 // 180 // If 'ary' were of type java/lang/Object, this arraystore would have to fail, 181 // because it is not possible to perform a arraystore into an object that is not 182 // a "proper" array. 183 // 184 // Therefore, let's obtain at runtime the type of 'ary' and check if we can still 185 // successfully perform the store. 186 // 187 // The implementation reasons for the condition are the following: 188 // 189 // java/lang/Object is the superclass of all arrays, but it is represented by the VM 190 // as an InstanceKlass. The checks generated by gen_checkcast() (see below) expect 191 // 'array_klass' to be ObjArrayKlass, which can result in invalid memory accesses. 192 // 193 // See issue JDK-8057622 for details. 194 195 always_see_exact_class = true; 196 // (If no MDO at all, hope for the best, until a trap actually occurs.) 197 198 // Make a constant out of the inexact array klass 199 const TypeKlassPtr *extak = tak->cast_to_exactness(true)->is_klassptr(); 200 Node* con = makecon(extak); 201 Node* cmp = _gvn.transform(new CmpPNode( array_klass, con )); 202 Node* bol = _gvn.transform(new BoolNode( cmp, BoolTest::eq )); 203 Node* ctrl= control(); 204 { BuildCutout unless(this, bol, PROB_MAX); 205 uncommon_trap(Deoptimization::Reason_array_check, 206 Deoptimization::Action_maybe_recompile, 207 tak->klass()); 208 } 209 if (stopped()) { // MUST uncommon-trap? 210 set_control(ctrl); // Then Don't Do It, just fall into the normal checking 211 } else { // Cast array klass to exactness: 212 // Use the exact constant value we know it is. 213 replace_in_map(array_klass,con); 214 CompileLog* log = C->log(); 215 if (log != NULL) { 216 log->elem("cast_up reason='monomorphic_array' from='%d' to='(exact)'", 217 log->identify(tak->klass())); 218 } 219 array_klass = con; // Use cast value moving forward 220 } 221 } 222 223 // Come here for polymorphic array klasses 224 225 // Extract the array element class 226 int element_klass_offset = in_bytes(ObjArrayKlass::element_klass_offset()); 227 Node *p2 = basic_plus_adr(array_klass, array_klass, element_klass_offset); 228 // We are allowed to use the constant type only if cast succeeded. If always_see_exact_class is true, 229 // we must set a control edge from the IfTrue node created by the uncommon_trap above to the 230 // LoadKlassNode. 231 Node* a_e_klass = _gvn.transform(LoadKlassNode::make(_gvn, always_see_exact_class ? control() : NULL, 232 immutable_memory(), p2, tak)); 233 234 // Check (the hard way) and throw if not a subklass. 235 // Result is ignored, we just need the CFG effects. 236 gen_checkcast(obj, a_e_klass); 237 } 238 239 240 void Parse::emit_guard_for_new(ciInstanceKlass* klass) { 241 // Emit guarded new 242 // if (klass->_init_thread != current_thread || 243 // klass->_init_state != being_initialized) 244 // uncommon_trap 245 Node* cur_thread = _gvn.transform( new ThreadLocalNode() ); 246 Node* merge = new RegionNode(3); 247 _gvn.set_type(merge, Type::CONTROL); 248 Node* kls = makecon(TypeKlassPtr::make(klass)); 249 250 Node* init_thread_offset = _gvn.MakeConX(in_bytes(InstanceKlass::init_thread_offset())); 251 Node* adr_node = basic_plus_adr(kls, kls, init_thread_offset); 252 Node* init_thread = make_load(NULL, adr_node, TypeRawPtr::BOTTOM, T_ADDRESS, MemNode::unordered); 253 Node *tst = Bool( CmpP( init_thread, cur_thread), BoolTest::eq); 254 IfNode* iff = create_and_map_if(control(), tst, PROB_ALWAYS, COUNT_UNKNOWN); 255 set_control(IfTrue(iff)); 256 merge->set_req(1, IfFalse(iff)); 257 258 Node* init_state_offset = _gvn.MakeConX(in_bytes(InstanceKlass::init_state_offset())); 259 adr_node = basic_plus_adr(kls, kls, init_state_offset); 260 // Use T_BOOLEAN for InstanceKlass::_init_state so the compiler 261 // can generate code to load it as unsigned byte. 262 Node* init_state = make_load(NULL, adr_node, TypeInt::UBYTE, T_BOOLEAN, MemNode::unordered); 263 Node* being_init = _gvn.intcon(InstanceKlass::being_initialized); 264 tst = Bool( CmpI( init_state, being_init), BoolTest::eq); 265 iff = create_and_map_if(control(), tst, PROB_ALWAYS, COUNT_UNKNOWN); 266 set_control(IfTrue(iff)); 267 merge->set_req(2, IfFalse(iff)); 268 269 PreserveJVMState pjvms(this); 270 record_for_igvn(merge); 271 set_control(merge); 272 273 uncommon_trap(Deoptimization::Reason_uninitialized, 274 Deoptimization::Action_reinterpret, 275 klass); 276 } 277 278 279 //------------------------------do_new----------------------------------------- 280 void Parse::do_new() { 281 kill_dead_locals(); 282 283 bool will_link; 284 ciInstanceKlass* klass = iter().get_klass(will_link)->as_instance_klass(); 285 assert(will_link, "_new: typeflow responsibility"); 286 287 // Should initialize, or throw an InstantiationError? 288 if (!klass->is_initialized() && !klass->is_being_initialized() || 289 klass->is_abstract() || klass->is_interface() || 290 klass->name() == ciSymbol::java_lang_Class() || 291 iter().is_unresolved_klass()) { 292 uncommon_trap(Deoptimization::Reason_uninitialized, 293 Deoptimization::Action_reinterpret, 294 klass); 295 return; 296 } 297 if (klass->is_being_initialized()) { 298 emit_guard_for_new(klass); 299 } 300 301 Node* kls = makecon(TypeKlassPtr::make(klass)); 302 Node* obj = new_instance(kls); 303 304 // Push resultant oop onto stack 305 push(obj); 306 307 // Keep track of whether opportunities exist for StringBuilder 308 // optimizations. 309 if (OptimizeStringConcat && 310 (klass == C->env()->StringBuilder_klass() || 311 klass == C->env()->StringBuffer_klass())) { 312 C->set_has_stringbuilder(true); 313 } 314 315 // Keep track of boxed values for EliminateAutoBox optimizations. 316 if (C->eliminate_boxing() && klass->is_box_klass()) { 317 C->set_has_boxed_value(true); 318 } 319 } 320 321 //------------------------------do_vnew----------------------------------------- 322 void Parse::do_vnew() { 323 kill_dead_locals(); 324 // Fixme additional checks needed? (see InterpreterRuntime::_vnew) 325 326 // Create a new ValueTypeNode 327 ciValueKlass* vk = iter().method()->holder()->as_value_klass(); 328 ValueTypeNode* vt = ValueTypeNode::make(_gvn, vk)->as_ValueType(); 329 330 // Pop values from stack (last argument is first) and 331 // connect them to the ValueTypeNode in reverse order. 332 for (int arg_index = vk->param_count() - 1; arg_index >= 0 ; --arg_index) { 333 int field_index = vk->field_index_for_argument(arg_index); 334 ciType* field_type = vt->get_field_type(field_index); 335 Node* value = field_type->size() == 1 ? pop() : pop_pair(); 336 vt->set_field_value(field_index, value); 337 } 338 push(_gvn.transform(vt)); 339 } 340 341 #ifndef PRODUCT 342 //------------------------------dump_map_adr_mem------------------------------- 343 // Debug dump of the mapping from address types to MergeMemNode indices. 344 void Parse::dump_map_adr_mem() const { 345 tty->print_cr("--- Mapping from address types to memory Nodes ---"); 346 MergeMemNode *mem = map() == NULL ? NULL : (map()->memory()->is_MergeMem() ? 347 map()->memory()->as_MergeMem() : NULL); 348 for (uint i = 0; i < (uint)C->num_alias_types(); i++) { 349 C->alias_type(i)->print_on(tty); 350 tty->print("\t"); 351 // Node mapping, if any 352 if (mem && i < mem->req() && mem->in(i) && mem->in(i) != mem->empty_memory()) { 353 mem->in(i)->dump(); 354 } else { 355 tty->cr(); 356 } 357 } 358 } 359 360 #endif 361 362 363 //============================================================================= 364 // 365 // parser methods for profiling 366 367 368 //----------------------test_counter_against_threshold ------------------------ 369 void Parse::test_counter_against_threshold(Node* cnt, int limit) { 370 // Test the counter against the limit and uncommon trap if greater. 371 372 // This code is largely copied from the range check code in 373 // array_addressing() 374 375 // Test invocation count vs threshold 376 Node *threshold = makecon(TypeInt::make(limit)); 377 Node *chk = _gvn.transform( new CmpUNode( cnt, threshold) ); 378 BoolTest::mask btest = BoolTest::lt; 379 Node *tst = _gvn.transform( new BoolNode( chk, btest) ); 380 // Branch to failure if threshold exceeded 381 { BuildCutout unless(this, tst, PROB_ALWAYS); 382 uncommon_trap(Deoptimization::Reason_age, 383 Deoptimization::Action_maybe_recompile); 384 } 385 } 386 387 //----------------------increment_and_test_invocation_counter------------------- 388 void Parse::increment_and_test_invocation_counter(int limit) { 389 if (!count_invocations()) return; 390 391 // Get the Method* node. 392 ciMethod* m = method(); 393 MethodCounters* counters_adr = m->ensure_method_counters(); 394 if (counters_adr == NULL) { 395 C->record_failure("method counters allocation failed"); 396 return; 397 } 398 399 Node* ctrl = control(); 400 const TypePtr* adr_type = TypeRawPtr::make((address) counters_adr); 401 Node *counters_node = makecon(adr_type); 402 Node* adr_iic_node = basic_plus_adr(counters_node, counters_node, 403 MethodCounters::interpreter_invocation_counter_offset_in_bytes()); 404 Node* cnt = make_load(ctrl, adr_iic_node, TypeInt::INT, T_INT, adr_type, MemNode::unordered); 405 406 test_counter_against_threshold(cnt, limit); 407 408 // Add one to the counter and store 409 Node* incr = _gvn.transform(new AddINode(cnt, _gvn.intcon(1))); 410 store_to_memory(ctrl, adr_iic_node, incr, T_INT, adr_type, MemNode::unordered); 411 } 412 413 //----------------------------method_data_addressing--------------------------- 414 Node* Parse::method_data_addressing(ciMethodData* md, ciProfileData* data, ByteSize counter_offset, Node* idx, uint stride) { 415 // Get offset within MethodData* of the data array 416 ByteSize data_offset = MethodData::data_offset(); 417 418 // Get cell offset of the ProfileData within data array 419 int cell_offset = md->dp_to_di(data->dp()); 420 421 // Add in counter_offset, the # of bytes into the ProfileData of counter or flag 422 int offset = in_bytes(data_offset) + cell_offset + in_bytes(counter_offset); 423 424 const TypePtr* adr_type = TypeMetadataPtr::make(md); 425 Node* mdo = makecon(adr_type); 426 Node* ptr = basic_plus_adr(mdo, mdo, offset); 427 428 if (stride != 0) { 429 Node* str = _gvn.MakeConX(stride); 430 Node* scale = _gvn.transform( new MulXNode( idx, str ) ); 431 ptr = _gvn.transform( new AddPNode( mdo, ptr, scale ) ); 432 } 433 434 return ptr; 435 } 436 437 //--------------------------increment_md_counter_at---------------------------- 438 void Parse::increment_md_counter_at(ciMethodData* md, ciProfileData* data, ByteSize counter_offset, Node* idx, uint stride) { 439 Node* adr_node = method_data_addressing(md, data, counter_offset, idx, stride); 440 441 const TypePtr* adr_type = _gvn.type(adr_node)->is_ptr(); 442 Node* cnt = make_load(NULL, adr_node, TypeInt::INT, T_INT, adr_type, MemNode::unordered); 443 Node* incr = _gvn.transform(new AddINode(cnt, _gvn.intcon(DataLayout::counter_increment))); 444 store_to_memory(NULL, adr_node, incr, T_INT, adr_type, MemNode::unordered); 445 } 446 447 //--------------------------test_for_osr_md_counter_at------------------------- 448 void Parse::test_for_osr_md_counter_at(ciMethodData* md, ciProfileData* data, ByteSize counter_offset, int limit) { 449 Node* adr_node = method_data_addressing(md, data, counter_offset); 450 451 const TypePtr* adr_type = _gvn.type(adr_node)->is_ptr(); 452 Node* cnt = make_load(NULL, adr_node, TypeInt::INT, T_INT, adr_type, MemNode::unordered); 453 454 test_counter_against_threshold(cnt, limit); 455 } 456 457 //-------------------------------set_md_flag_at-------------------------------- 458 void Parse::set_md_flag_at(ciMethodData* md, ciProfileData* data, int flag_constant) { 459 Node* adr_node = method_data_addressing(md, data, DataLayout::flags_offset()); 460 461 const TypePtr* adr_type = _gvn.type(adr_node)->is_ptr(); 462 Node* flags = make_load(NULL, adr_node, TypeInt::BYTE, T_BYTE, adr_type, MemNode::unordered); 463 Node* incr = _gvn.transform(new OrINode(flags, _gvn.intcon(flag_constant))); 464 store_to_memory(NULL, adr_node, incr, T_BYTE, adr_type, MemNode::unordered); 465 } 466 467 //----------------------------profile_taken_branch----------------------------- 468 void Parse::profile_taken_branch(int target_bci, bool force_update) { 469 // This is a potential osr_site if we have a backedge. 470 int cur_bci = bci(); 471 bool osr_site = 472 (target_bci <= cur_bci) && count_invocations() && UseOnStackReplacement; 473 474 // If we are going to OSR, restart at the target bytecode. 475 set_bci(target_bci); 476 477 // To do: factor out the the limit calculations below. These duplicate 478 // the similar limit calculations in the interpreter. 479 480 if (method_data_update() || force_update) { 481 ciMethodData* md = method()->method_data(); 482 assert(md != NULL, "expected valid ciMethodData"); 483 ciProfileData* data = md->bci_to_data(cur_bci); 484 assert(data->is_JumpData(), "need JumpData for taken branch"); 485 increment_md_counter_at(md, data, JumpData::taken_offset()); 486 } 487 488 // In the new tiered system this is all we need to do. In the old 489 // (c2 based) tiered sytem we must do the code below. 490 #ifndef TIERED 491 if (method_data_update()) { 492 ciMethodData* md = method()->method_data(); 493 if (osr_site) { 494 ciProfileData* data = md->bci_to_data(cur_bci); 495 int limit = (CompileThreshold 496 * (OnStackReplacePercentage - InterpreterProfilePercentage)) / 100; 497 test_for_osr_md_counter_at(md, data, JumpData::taken_offset(), limit); 498 } 499 } else { 500 // With method data update off, use the invocation counter to trigger an 501 // OSR compilation, as done in the interpreter. 502 if (osr_site) { 503 int limit = (CompileThreshold * OnStackReplacePercentage) / 100; 504 increment_and_test_invocation_counter(limit); 505 } 506 } 507 #endif // TIERED 508 509 // Restore the original bytecode. 510 set_bci(cur_bci); 511 } 512 513 //--------------------------profile_not_taken_branch--------------------------- 514 void Parse::profile_not_taken_branch(bool force_update) { 515 516 if (method_data_update() || force_update) { 517 ciMethodData* md = method()->method_data(); 518 assert(md != NULL, "expected valid ciMethodData"); 519 ciProfileData* data = md->bci_to_data(bci()); 520 assert(data->is_BranchData(), "need BranchData for not taken branch"); 521 increment_md_counter_at(md, data, BranchData::not_taken_offset()); 522 } 523 524 } 525 526 //---------------------------------profile_call-------------------------------- 527 void Parse::profile_call(Node* receiver) { 528 if (!method_data_update()) return; 529 530 switch (bc()) { 531 case Bytecodes::_invokevirtual: 532 case Bytecodes::_invokedirect: 533 case Bytecodes::_invokeinterface: 534 profile_receiver_type(receiver); 535 break; 536 case Bytecodes::_invokestatic: 537 case Bytecodes::_invokedynamic: 538 case Bytecodes::_invokespecial: 539 profile_generic_call(); 540 break; 541 default: fatal("unexpected call bytecode"); 542 } 543 } 544 545 //------------------------------profile_generic_call--------------------------- 546 void Parse::profile_generic_call() { 547 assert(method_data_update(), "must be generating profile code"); 548 549 ciMethodData* md = method()->method_data(); 550 assert(md != NULL, "expected valid ciMethodData"); 551 ciProfileData* data = md->bci_to_data(bci()); 552 assert(data->is_CounterData(), "need CounterData for not taken branch"); 553 increment_md_counter_at(md, data, CounterData::count_offset()); 554 } 555 556 //-----------------------------profile_receiver_type--------------------------- 557 void Parse::profile_receiver_type(Node* receiver) { 558 assert(method_data_update(), "must be generating profile code"); 559 560 ciMethodData* md = method()->method_data(); 561 assert(md != NULL, "expected valid ciMethodData"); 562 ciProfileData* data = md->bci_to_data(bci()); 563 assert(data->is_ReceiverTypeData(), "need ReceiverTypeData here"); 564 565 // Skip if we aren't tracking receivers 566 if (TypeProfileWidth < 1) { 567 increment_md_counter_at(md, data, CounterData::count_offset()); 568 return; 569 } 570 ciReceiverTypeData* rdata = (ciReceiverTypeData*)data->as_ReceiverTypeData(); 571 572 Node* method_data = method_data_addressing(md, rdata, in_ByteSize(0)); 573 574 // Using an adr_type of TypePtr::BOTTOM to work around anti-dep problems. 575 // A better solution might be to use TypeRawPtr::BOTTOM with RC_NARROW_MEM. 576 make_runtime_call(RC_LEAF, OptoRuntime::profile_receiver_type_Type(), 577 CAST_FROM_FN_PTR(address, 578 OptoRuntime::profile_receiver_type_C), 579 "profile_receiver_type_C", 580 TypePtr::BOTTOM, 581 method_data, receiver); 582 } 583 584 //---------------------------------profile_ret--------------------------------- 585 void Parse::profile_ret(int target_bci) { 586 if (!method_data_update()) return; 587 588 // Skip if we aren't tracking ret targets 589 if (TypeProfileWidth < 1) return; 590 591 ciMethodData* md = method()->method_data(); 592 assert(md != NULL, "expected valid ciMethodData"); 593 ciProfileData* data = md->bci_to_data(bci()); 594 assert(data->is_RetData(), "need RetData for ret"); 595 ciRetData* ret_data = (ciRetData*)data->as_RetData(); 596 597 // Look for the target_bci is already in the table 598 uint row; 599 bool table_full = true; 600 for (row = 0; row < ret_data->row_limit(); row++) { 601 int key = ret_data->bci(row); 602 table_full &= (key != RetData::no_bci); 603 if (key == target_bci) break; 604 } 605 606 if (row >= ret_data->row_limit()) { 607 // The target_bci was not found in the table. 608 if (!table_full) { 609 // XXX: Make slow call to update RetData 610 } 611 return; 612 } 613 614 // the target_bci is already in the table 615 increment_md_counter_at(md, data, RetData::bci_count_offset(row)); 616 } 617 618 //--------------------------profile_null_checkcast---------------------------- 619 void Parse::profile_null_checkcast() { 620 // Set the null-seen flag, done in conjunction with the usual null check. We 621 // never unset the flag, so this is a one-way switch. 622 if (!method_data_update()) return; 623 624 ciMethodData* md = method()->method_data(); 625 assert(md != NULL, "expected valid ciMethodData"); 626 ciProfileData* data = md->bci_to_data(bci()); 627 assert(data->is_BitData(), "need BitData for checkcast"); 628 set_md_flag_at(md, data, BitData::null_seen_byte_constant()); 629 } 630 631 //-----------------------------profile_switch_case----------------------------- 632 void Parse::profile_switch_case(int table_index) { 633 if (!method_data_update()) return; 634 635 ciMethodData* md = method()->method_data(); 636 assert(md != NULL, "expected valid ciMethodData"); 637 638 ciProfileData* data = md->bci_to_data(bci()); 639 assert(data->is_MultiBranchData(), "need MultiBranchData for switch case"); 640 if (table_index >= 0) { 641 increment_md_counter_at(md, data, MultiBranchData::case_count_offset(table_index)); 642 } else { 643 increment_md_counter_at(md, data, MultiBranchData::default_count_offset()); 644 } 645 }