1 /* 2 * Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "ci/ciMethodData.hpp" 27 #include "classfile/systemDictionary.hpp" 28 #include "classfile/vmSymbols.hpp" 29 #include "compiler/compileLog.hpp" 30 #include "interpreter/linkResolver.hpp" 31 #include "memory/resourceArea.hpp" 32 #include "memory/universe.hpp" 33 #include "oops/oop.inline.hpp" 34 #include "opto/addnode.hpp" 35 #include "opto/castnode.hpp" 36 #include "opto/convertnode.hpp" 37 #include "opto/divnode.hpp" 38 #include "opto/idealGraphPrinter.hpp" 39 #include "opto/idealKit.hpp" 40 #include "opto/matcher.hpp" 41 #include "opto/memnode.hpp" 42 #include "opto/mulnode.hpp" 43 #include "opto/opaquenode.hpp" 44 #include "opto/parse.hpp" 45 #include "opto/runtime.hpp" 46 #include "opto/valuetypenode.hpp" 47 #include "runtime/deoptimization.hpp" 48 #include "runtime/sharedRuntime.hpp" 49 50 #ifndef PRODUCT 51 extern int explicit_null_checks_inserted, 52 explicit_null_checks_elided; 53 #endif 54 55 //---------------------------------array_load---------------------------------- 56 void Parse::array_load(BasicType bt) { 57 const Type* elemtype = Type::TOP; 58 Node* adr = array_addressing(bt, 0, &elemtype); 59 if (stopped()) return; // guaranteed null or range check 60 61 Node* idx = pop(); 62 Node* ary = pop(); 63 64 // Handle value type arrays 65 const TypeOopPtr* elemptr = elemtype->make_oopptr(); 66 const TypeAryPtr* ary_t = _gvn.type(ary)->is_aryptr(); 67 if (elemtype->isa_valuetype() != NULL) { 68 // Load from flattened value type array 69 ciValueKlass* vk = elemtype->is_valuetype()->value_klass(); 70 Node* vt = ValueTypeNode::make_from_flattened(this, vk, ary, adr); 71 push(vt); 72 return; 73 } else if (elemptr != NULL && elemptr->is_valuetypeptr()) { 74 // Load from non-flattened value type array (elements can never be null) 75 bt = T_VALUETYPE; 76 assert(elemptr->meet(TypePtr::NULL_PTR) != elemptr, "value type array elements should never be null"); 77 } else if (ValueArrayFlatten && elemptr != NULL && elemptr->can_be_value_type() && 78 !ary_t->klass_is_exact()) { 79 // Cannot statically determine if array is flattened, emit runtime check 80 IdealKit ideal(this); 81 IdealVariable res(ideal); 82 ideal.declarations_done(); 83 Node* kls = load_object_klass(ary); 84 Node* tag = load_lh_array_tag(kls); 85 ideal.if_then(tag, BoolTest::ne, intcon(Klass::_lh_array_tag_vt_value)); { 86 // non flattened 87 sync_kit(ideal); 88 const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(bt); 89 elemtype = ary_t->elem()->make_oopptr(); 90 Node* ld = access_load_at(ary, adr, adr_type, elemtype, bt, 91 IN_HEAP | IS_ARRAY | C2_CONTROL_DEPENDENT_LOAD); 92 ideal.sync_kit(this); 93 ideal.set(res, ld); 94 } ideal.else_(); { 95 // flattened 96 sync_kit(ideal); 97 Node* k_adr = basic_plus_adr(kls, in_bytes(ArrayKlass::element_klass_offset())); 98 Node* elem_klass = _gvn.transform(LoadKlassNode::make(_gvn, NULL, immutable_memory(), k_adr, TypeInstPtr::KLASS)); 99 Node* obj_size = NULL; 100 kill_dead_locals(); 101 inc_sp(2); 102 Node* alloc_obj = new_instance(elem_klass, NULL, &obj_size, /*deoptimize_on_exception=*/true); 103 dec_sp(2); 104 105 AllocateNode* alloc = AllocateNode::Ideal_allocation(alloc_obj, &_gvn); 106 assert(alloc->maybe_set_complete(&_gvn), ""); 107 alloc->initialization()->set_complete_with_arraycopy(); 108 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); 109 // Unknown value type so might have reference fields 110 if (!bs->array_copy_requires_gc_barriers(false, T_OBJECT, false, BarrierSetC2::Parsing)) { 111 int base_off = sizeof(instanceOopDesc); 112 Node* dst_base = basic_plus_adr(alloc_obj, base_off); 113 Node* countx = obj_size; 114 countx = _gvn.transform(new SubXNode(countx, MakeConX(base_off))); 115 countx = _gvn.transform(new URShiftXNode(countx, intcon(LogBytesPerLong))); 116 117 assert(Klass::_lh_log2_element_size_shift == 0, "use shift in place"); 118 Node* lhp = basic_plus_adr(kls, in_bytes(Klass::layout_helper_offset())); 119 Node* elem_shift = make_load(NULL, lhp, TypeInt::INT, T_INT, MemNode::unordered); 120 uint header = arrayOopDesc::base_offset_in_bytes(T_VALUETYPE); 121 Node* base = basic_plus_adr(ary, header); 122 idx = Compile::conv_I2X_index(&_gvn, idx, TypeInt::POS, control()); 123 Node* scale = _gvn.transform(new LShiftXNode(idx, elem_shift)); 124 Node* adr = basic_plus_adr(ary, base, scale); 125 126 access_clone(adr, dst_base, countx, false); 127 } else { 128 ideal.sync_kit(this); 129 ideal.make_leaf_call(OptoRuntime::load_unknown_value_Type(), 130 CAST_FROM_FN_PTR(address, OptoRuntime::load_unknown_value), 131 "load_unknown_value", 132 ary, idx, alloc_obj); 133 sync_kit(ideal); 134 } 135 136 insert_mem_bar(Op_MemBarStoreStore, alloc->proj_out_or_null(AllocateNode::RawAddress)); 137 138 ideal.sync_kit(this); 139 ideal.set(res, alloc_obj); 140 } ideal.end_if(); 141 sync_kit(ideal); 142 push_node(bt, ideal.value(res)); 143 return; 144 } 145 146 if (elemtype == TypeInt::BOOL) { 147 bt = T_BOOLEAN; 148 } else if (bt == T_OBJECT) { 149 elemtype = ary_t->elem()->make_oopptr(); 150 } 151 152 const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(bt); 153 Node* ld = access_load_at(ary, adr, adr_type, elemtype, bt, 154 IN_HEAP | IS_ARRAY | C2_CONTROL_DEPENDENT_LOAD); 155 if (bt == T_VALUETYPE) { 156 // Loading a non-flattened (but flattenable) value type from an array 157 assert(!gvn().type(ld)->maybe_null(), "value type array elements should never be null"); 158 if (elemptr->value_klass()->is_scalarizable()) { 159 ld = ValueTypeNode::make_from_oop(this, ld, elemptr->value_klass()); 160 } 161 } 162 163 push_node(bt, ld); 164 } 165 166 167 //--------------------------------array_store---------------------------------- 168 void Parse::array_store(BasicType bt) { 169 const Type* elemtype = Type::TOP; 170 Node* adr = array_addressing(bt, type2size[bt], &elemtype); 171 if (stopped()) return; // guaranteed null or range check 172 Node* cast_val = NULL; 173 if (bt == T_OBJECT) { 174 cast_val = array_store_check(); 175 if (stopped()) return; 176 } 177 Node* val = pop_node(bt); // Value to store 178 Node* idx = pop(); // Index in the array 179 Node* ary = pop(); // The array itself 180 181 const TypeAryPtr* ary_t = _gvn.type(ary)->is_aryptr(); 182 if (bt == T_OBJECT) { 183 const TypeOopPtr* elemptr = elemtype->make_oopptr(); 184 const Type* val_t = _gvn.type(val); 185 if (elemtype->isa_valuetype() != NULL) { 186 // Store to flattened value type array 187 if (!cast_val->is_ValueType()) { 188 inc_sp(3); 189 cast_val = null_check(cast_val); 190 if (stopped()) return; 191 dec_sp(3); 192 cast_val = ValueTypeNode::make_from_oop(this, cast_val, elemtype->is_valuetype()->value_klass()); 193 } 194 cast_val->as_ValueType()->store_flattened(this, ary, adr); 195 return; 196 } else if (elemptr->is_valuetypeptr()) { 197 // Store to non-flattened value type array 198 if (!cast_val->is_ValueType()) { 199 // Can not store null into a value type array 200 inc_sp(3); 201 cast_val = null_check(cast_val); 202 if (stopped()) return; 203 dec_sp(3); 204 } 205 } else if (elemptr->can_be_value_type() && !ary_t->klass_is_exact() && 206 (val->is_ValueType() || val_t == TypePtr::NULL_PTR || val_t->is_oopptr()->can_be_value_type())) { 207 if (ValueArrayFlatten) { 208 IdealKit ideal(this); 209 Node* kls = load_object_klass(ary); 210 Node* layout_val = load_lh_array_tag(kls); 211 ideal.if_then(layout_val, BoolTest::ne, intcon(Klass::_lh_array_tag_vt_value)); { 212 // non flattened 213 sync_kit(ideal); 214 215 if (!val->is_ValueType() && TypePtr::NULL_PTR->higher_equal(val_t)) { 216 gen_value_type_array_guard(ary, val, 3); 217 } 218 219 const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(bt); 220 elemtype = ary_t->elem()->make_oopptr(); 221 access_store_at(ary, adr, adr_type, val, elemtype, bt, MO_UNORDERED | IN_HEAP | IS_ARRAY); 222 ideal.sync_kit(this); 223 } ideal.else_(); { 224 // flattened 225 // Object/interface array must be flattened, cast it 226 if (val->is_ValueType()) { 227 sync_kit(ideal); 228 const TypeValueType* vt = _gvn.type(val)->is_valuetype(); 229 ciArrayKlass* array_klass = ciArrayKlass::make(vt->value_klass()); 230 const TypeAryPtr* arytype = TypeOopPtr::make_from_klass(array_klass)->isa_aryptr(); 231 ary = _gvn.transform(new CheckCastPPNode(control(), ary, arytype)); 232 adr = array_element_address(ary, idx, T_OBJECT, arytype->size(), control()); 233 val->as_ValueType()->store_flattened(this, ary, adr); 234 ideal.sync_kit(this); 235 } else { 236 if (TypePtr::NULL_PTR->higher_equal(val_t)) { 237 sync_kit(ideal); 238 Node* null_ctl = top(); 239 val = null_check_oop(val, &null_ctl); 240 if (null_ctl != top()) { 241 PreserveJVMState pjvms(this); 242 inc_sp(3); 243 set_control(null_ctl); 244 uncommon_trap(Deoptimization::Reason_null_check, Deoptimization::Action_none); 245 dec_sp(3); 246 } 247 ideal.sync_kit(this); 248 } 249 if (!ideal.ctrl()->is_top()) { 250 ideal.make_leaf_call(OptoRuntime::store_unknown_value_Type(), 251 CAST_FROM_FN_PTR(address, OptoRuntime::store_unknown_value), 252 "store_unknown_value", 253 val, ary, idx); 254 } 255 } 256 } ideal.end_if(); 257 sync_kit(ideal); 258 return; 259 } else { 260 if (!val->is_ValueType() && TypePtr::NULL_PTR->higher_equal(val_t)) { 261 gen_value_type_array_guard(ary, val, 3); 262 } 263 } 264 } 265 } 266 267 if (elemtype == TypeInt::BOOL) { 268 bt = T_BOOLEAN; 269 } else if (bt == T_OBJECT) { 270 elemtype = ary_t->elem()->make_oopptr(); 271 } 272 273 const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(bt); 274 275 access_store_at(ary, adr, adr_type, val, elemtype, bt, MO_UNORDERED | IN_HEAP | IS_ARRAY); 276 } 277 278 279 //------------------------------array_addressing------------------------------- 280 // Pull array and index from the stack. Compute pointer-to-element. 281 Node* Parse::array_addressing(BasicType type, int vals, const Type* *result2) { 282 Node *idx = peek(0+vals); // Get from stack without popping 283 Node *ary = peek(1+vals); // in case of exception 284 285 // Null check the array base, with correct stack contents 286 ary = null_check(ary, T_ARRAY); 287 // Compile-time detect of null-exception? 288 if (stopped()) return top(); 289 290 const TypeAryPtr* arytype = _gvn.type(ary)->is_aryptr(); 291 const TypeInt* sizetype = arytype->size(); 292 const Type* elemtype = arytype->elem(); 293 294 if (UseUniqueSubclasses && result2 != NULL) { 295 const Type* el = elemtype->make_ptr(); 296 if (el && el->isa_instptr()) { 297 const TypeInstPtr* toop = el->is_instptr(); 298 if (toop->klass()->as_instance_klass()->unique_concrete_subklass()) { 299 // If we load from "AbstractClass[]" we must see "ConcreteSubClass". 300 const Type* subklass = Type::get_const_type(toop->klass()); 301 elemtype = subklass->join_speculative(el); 302 } 303 } 304 } 305 306 // Check for big class initializers with all constant offsets 307 // feeding into a known-size array. 308 const TypeInt* idxtype = _gvn.type(idx)->is_int(); 309 // See if the highest idx value is less than the lowest array bound, 310 // and if the idx value cannot be negative: 311 bool need_range_check = true; 312 if (idxtype->_hi < sizetype->_lo && idxtype->_lo >= 0) { 313 need_range_check = false; 314 if (C->log() != NULL) C->log()->elem("observe that='!need_range_check'"); 315 } 316 317 ciKlass * arytype_klass = arytype->klass(); 318 if ((arytype_klass != NULL) && (!arytype_klass->is_loaded())) { 319 // Only fails for some -Xcomp runs 320 // The class is unloaded. We have to run this bytecode in the interpreter. 321 uncommon_trap(Deoptimization::Reason_unloaded, 322 Deoptimization::Action_reinterpret, 323 arytype->klass(), "!loaded array"); 324 return top(); 325 } 326 327 // Do the range check 328 if (GenerateRangeChecks && need_range_check) { 329 Node* tst; 330 if (sizetype->_hi <= 0) { 331 // The greatest array bound is negative, so we can conclude that we're 332 // compiling unreachable code, but the unsigned compare trick used below 333 // only works with non-negative lengths. Instead, hack "tst" to be zero so 334 // the uncommon_trap path will always be taken. 335 tst = _gvn.intcon(0); 336 } else { 337 // Range is constant in array-oop, so we can use the original state of mem 338 Node* len = load_array_length(ary); 339 340 // Test length vs index (standard trick using unsigned compare) 341 Node* chk = _gvn.transform( new CmpUNode(idx, len) ); 342 BoolTest::mask btest = BoolTest::lt; 343 tst = _gvn.transform( new BoolNode(chk, btest) ); 344 } 345 RangeCheckNode* rc = new RangeCheckNode(control(), tst, PROB_MAX, COUNT_UNKNOWN); 346 _gvn.set_type(rc, rc->Value(&_gvn)); 347 if (!tst->is_Con()) { 348 record_for_igvn(rc); 349 } 350 set_control(_gvn.transform(new IfTrueNode(rc))); 351 // Branch to failure if out of bounds 352 { 353 PreserveJVMState pjvms(this); 354 set_control(_gvn.transform(new IfFalseNode(rc))); 355 if (C->allow_range_check_smearing()) { 356 // Do not use builtin_throw, since range checks are sometimes 357 // made more stringent by an optimistic transformation. 358 // This creates "tentative" range checks at this point, 359 // which are not guaranteed to throw exceptions. 360 // See IfNode::Ideal, is_range_check, adjust_check. 361 uncommon_trap(Deoptimization::Reason_range_check, 362 Deoptimization::Action_make_not_entrant, 363 NULL, "range_check"); 364 } else { 365 // If we have already recompiled with the range-check-widening 366 // heroic optimization turned off, then we must really be throwing 367 // range check exceptions. 368 builtin_throw(Deoptimization::Reason_range_check, idx); 369 } 370 } 371 } 372 // Check for always knowing you are throwing a range-check exception 373 if (stopped()) return top(); 374 375 // Make array address computation control dependent to prevent it 376 // from floating above the range check during loop optimizations. 377 Node* ptr = array_element_address(ary, idx, type, sizetype, control()); 378 379 if (result2 != NULL) *result2 = elemtype; 380 381 assert(ptr != top(), "top should go hand-in-hand with stopped"); 382 383 return ptr; 384 } 385 386 387 // returns IfNode 388 IfNode* Parse::jump_if_fork_int(Node* a, Node* b, BoolTest::mask mask, float prob, float cnt) { 389 Node *cmp = _gvn.transform(new CmpINode(a, b)); // two cases: shiftcount > 32 and shiftcount <= 32 390 Node *tst = _gvn.transform(new BoolNode(cmp, mask)); 391 IfNode *iff = create_and_map_if(control(), tst, prob, cnt); 392 return iff; 393 } 394 395 // return Region node 396 Node* Parse::jump_if_join(Node* iffalse, Node* iftrue) { 397 Node *region = new RegionNode(3); // 2 results 398 record_for_igvn(region); 399 region->init_req(1, iffalse); 400 region->init_req(2, iftrue ); 401 _gvn.set_type(region, Type::CONTROL); 402 region = _gvn.transform(region); 403 set_control (region); 404 return region; 405 } 406 407 // sentinel value for the target bci to mark never taken branches 408 // (according to profiling) 409 static const int never_reached = INT_MAX; 410 411 //------------------------------helper for tableswitch------------------------- 412 void Parse::jump_if_true_fork(IfNode *iff, int dest_bci_if_true, int prof_table_index, bool unc) { 413 // True branch, use existing map info 414 { PreserveJVMState pjvms(this); 415 Node *iftrue = _gvn.transform( new IfTrueNode (iff) ); 416 set_control( iftrue ); 417 if (unc) { 418 repush_if_args(); 419 uncommon_trap(Deoptimization::Reason_unstable_if, 420 Deoptimization::Action_reinterpret, 421 NULL, 422 "taken always"); 423 } else { 424 assert(dest_bci_if_true != never_reached, "inconsistent dest"); 425 profile_switch_case(prof_table_index); 426 merge_new_path(dest_bci_if_true); 427 } 428 } 429 430 // False branch 431 Node *iffalse = _gvn.transform( new IfFalseNode(iff) ); 432 set_control( iffalse ); 433 } 434 435 void Parse::jump_if_false_fork(IfNode *iff, int dest_bci_if_true, int prof_table_index, bool unc) { 436 // True branch, use existing map info 437 { PreserveJVMState pjvms(this); 438 Node *iffalse = _gvn.transform( new IfFalseNode (iff) ); 439 set_control( iffalse ); 440 if (unc) { 441 repush_if_args(); 442 uncommon_trap(Deoptimization::Reason_unstable_if, 443 Deoptimization::Action_reinterpret, 444 NULL, 445 "taken never"); 446 } else { 447 assert(dest_bci_if_true != never_reached, "inconsistent dest"); 448 profile_switch_case(prof_table_index); 449 merge_new_path(dest_bci_if_true); 450 } 451 } 452 453 // False branch 454 Node *iftrue = _gvn.transform( new IfTrueNode(iff) ); 455 set_control( iftrue ); 456 } 457 458 void Parse::jump_if_always_fork(int dest_bci, int prof_table_index, bool unc) { 459 // False branch, use existing map and control() 460 if (unc) { 461 repush_if_args(); 462 uncommon_trap(Deoptimization::Reason_unstable_if, 463 Deoptimization::Action_reinterpret, 464 NULL, 465 "taken never"); 466 } else { 467 assert(dest_bci != never_reached, "inconsistent dest"); 468 profile_switch_case(prof_table_index); 469 merge_new_path(dest_bci); 470 } 471 } 472 473 474 extern "C" { 475 static int jint_cmp(const void *i, const void *j) { 476 int a = *(jint *)i; 477 int b = *(jint *)j; 478 return a > b ? 1 : a < b ? -1 : 0; 479 } 480 } 481 482 483 // Default value for methodData switch indexing. Must be a negative value to avoid 484 // conflict with any legal switch index. 485 #define NullTableIndex -1 486 487 class SwitchRange : public StackObj { 488 // a range of integers coupled with a bci destination 489 jint _lo; // inclusive lower limit 490 jint _hi; // inclusive upper limit 491 int _dest; 492 int _table_index; // index into method data table 493 float _cnt; // how many times this range was hit according to profiling 494 495 public: 496 jint lo() const { return _lo; } 497 jint hi() const { return _hi; } 498 int dest() const { return _dest; } 499 int table_index() const { return _table_index; } 500 bool is_singleton() const { return _lo == _hi; } 501 float cnt() const { return _cnt; } 502 503 void setRange(jint lo, jint hi, int dest, int table_index, float cnt) { 504 assert(lo <= hi, "must be a non-empty range"); 505 _lo = lo, _hi = hi; _dest = dest; _table_index = table_index; _cnt = cnt; 506 assert(_cnt >= 0, ""); 507 } 508 bool adjoinRange(jint lo, jint hi, int dest, int table_index, float cnt, bool trim_ranges) { 509 assert(lo <= hi, "must be a non-empty range"); 510 if (lo == _hi+1 && table_index == _table_index) { 511 // see merge_ranges() comment below 512 if (trim_ranges) { 513 if (cnt == 0) { 514 if (_cnt != 0) { 515 return false; 516 } 517 if (dest != _dest) { 518 _dest = never_reached; 519 } 520 } else { 521 if (_cnt == 0) { 522 return false; 523 } 524 if (dest != _dest) { 525 return false; 526 } 527 } 528 } else { 529 if (dest != _dest) { 530 return false; 531 } 532 } 533 _hi = hi; 534 _cnt += cnt; 535 return true; 536 } 537 return false; 538 } 539 540 void set (jint value, int dest, int table_index, float cnt) { 541 setRange(value, value, dest, table_index, cnt); 542 } 543 bool adjoin(jint value, int dest, int table_index, float cnt, bool trim_ranges) { 544 return adjoinRange(value, value, dest, table_index, cnt, trim_ranges); 545 } 546 bool adjoin(SwitchRange& other) { 547 return adjoinRange(other._lo, other._hi, other._dest, other._table_index, other._cnt, false); 548 } 549 550 void print() { 551 if (is_singleton()) 552 tty->print(" {%d}=>%d (cnt=%f)", lo(), dest(), cnt()); 553 else if (lo() == min_jint) 554 tty->print(" {..%d}=>%d (cnt=%f)", hi(), dest(), cnt()); 555 else if (hi() == max_jint) 556 tty->print(" {%d..}=>%d (cnt=%f)", lo(), dest(), cnt()); 557 else 558 tty->print(" {%d..%d}=>%d (cnt=%f)", lo(), hi(), dest(), cnt()); 559 } 560 }; 561 562 // We try to minimize the number of ranges and the size of the taken 563 // ones using profiling data. When ranges are created, 564 // SwitchRange::adjoinRange() only allows 2 adjoining ranges to merge 565 // if both were never hit or both were hit to build longer unreached 566 // ranges. Here, we now merge adjoining ranges with the same 567 // destination and finally set destination of unreached ranges to the 568 // special value never_reached because it can help minimize the number 569 // of tests that are necessary. 570 // 571 // For instance: 572 // [0, 1] to target1 sometimes taken 573 // [1, 2] to target1 never taken 574 // [2, 3] to target2 never taken 575 // would lead to: 576 // [0, 1] to target1 sometimes taken 577 // [1, 3] never taken 578 // 579 // (first 2 ranges to target1 are not merged) 580 static void merge_ranges(SwitchRange* ranges, int& rp) { 581 if (rp == 0) { 582 return; 583 } 584 int shift = 0; 585 for (int j = 0; j < rp; j++) { 586 SwitchRange& r1 = ranges[j-shift]; 587 SwitchRange& r2 = ranges[j+1]; 588 if (r1.adjoin(r2)) { 589 shift++; 590 } else if (shift > 0) { 591 ranges[j+1-shift] = r2; 592 } 593 } 594 rp -= shift; 595 for (int j = 0; j <= rp; j++) { 596 SwitchRange& r = ranges[j]; 597 if (r.cnt() == 0 && r.dest() != never_reached) { 598 r.setRange(r.lo(), r.hi(), never_reached, r.table_index(), r.cnt()); 599 } 600 } 601 } 602 603 //-------------------------------do_tableswitch-------------------------------- 604 void Parse::do_tableswitch() { 605 Node* lookup = pop(); 606 // Get information about tableswitch 607 int default_dest = iter().get_dest_table(0); 608 int lo_index = iter().get_int_table(1); 609 int hi_index = iter().get_int_table(2); 610 int len = hi_index - lo_index + 1; 611 612 if (len < 1) { 613 // If this is a backward branch, add safepoint 614 maybe_add_safepoint(default_dest); 615 merge(default_dest); 616 return; 617 } 618 619 ciMethodData* methodData = method()->method_data(); 620 ciMultiBranchData* profile = NULL; 621 if (methodData->is_mature() && UseSwitchProfiling) { 622 ciProfileData* data = methodData->bci_to_data(bci()); 623 if (data != NULL && data->is_MultiBranchData()) { 624 profile = (ciMultiBranchData*)data; 625 } 626 } 627 bool trim_ranges = !method_data_update() && !C->too_many_traps(method(), bci(), Deoptimization::Reason_unstable_if); 628 629 // generate decision tree, using trichotomy when possible 630 int rnum = len+2; 631 bool makes_backward_branch = false; 632 SwitchRange* ranges = NEW_RESOURCE_ARRAY(SwitchRange, rnum); 633 int rp = -1; 634 if (lo_index != min_jint) { 635 uint cnt = 1; 636 if (profile != NULL) { 637 cnt = profile->default_count() / (hi_index != max_jint ? 2 : 1); 638 } 639 ranges[++rp].setRange(min_jint, lo_index-1, default_dest, NullTableIndex, cnt); 640 } 641 for (int j = 0; j < len; j++) { 642 jint match_int = lo_index+j; 643 int dest = iter().get_dest_table(j+3); 644 makes_backward_branch |= (dest <= bci()); 645 int table_index = method_data_update() ? j : NullTableIndex; 646 uint cnt = 1; 647 if (profile != NULL) { 648 cnt = profile->count_at(j); 649 } 650 if (rp < 0 || !ranges[rp].adjoin(match_int, dest, table_index, cnt, trim_ranges)) { 651 ranges[++rp].set(match_int, dest, table_index, cnt); 652 } 653 } 654 jint highest = lo_index+(len-1); 655 assert(ranges[rp].hi() == highest, ""); 656 if (highest != max_jint) { 657 uint cnt = 1; 658 if (profile != NULL) { 659 cnt = profile->default_count() / (lo_index != min_jint ? 2 : 1); 660 } 661 if (!ranges[rp].adjoinRange(highest+1, max_jint, default_dest, NullTableIndex, cnt, trim_ranges)) { 662 ranges[++rp].setRange(highest+1, max_jint, default_dest, NullTableIndex, cnt); 663 } 664 } 665 assert(rp < len+2, "not too many ranges"); 666 667 if (trim_ranges) { 668 merge_ranges(ranges, rp); 669 } 670 671 // Safepoint in case if backward branch observed 672 if( makes_backward_branch && UseLoopSafepoints ) 673 add_safepoint(); 674 675 jump_switch_ranges(lookup, &ranges[0], &ranges[rp]); 676 } 677 678 679 //------------------------------do_lookupswitch-------------------------------- 680 void Parse::do_lookupswitch() { 681 Node *lookup = pop(); // lookup value 682 // Get information about lookupswitch 683 int default_dest = iter().get_dest_table(0); 684 int len = iter().get_int_table(1); 685 686 if (len < 1) { // If this is a backward branch, add safepoint 687 maybe_add_safepoint(default_dest); 688 merge(default_dest); 689 return; 690 } 691 692 ciMethodData* methodData = method()->method_data(); 693 ciMultiBranchData* profile = NULL; 694 if (methodData->is_mature() && UseSwitchProfiling) { 695 ciProfileData* data = methodData->bci_to_data(bci()); 696 if (data != NULL && data->is_MultiBranchData()) { 697 profile = (ciMultiBranchData*)data; 698 } 699 } 700 bool trim_ranges = !method_data_update() && !C->too_many_traps(method(), bci(), Deoptimization::Reason_unstable_if); 701 702 // generate decision tree, using trichotomy when possible 703 jint* table = NEW_RESOURCE_ARRAY(jint, len*3); 704 { 705 for (int j = 0; j < len; j++) { 706 table[3*j+0] = iter().get_int_table(2+2*j); 707 table[3*j+1] = iter().get_dest_table(2+2*j+1); 708 table[3*j+2] = profile == NULL ? 1 : profile->count_at(j); 709 } 710 qsort(table, len, 3*sizeof(table[0]), jint_cmp); 711 } 712 713 float defaults = 0; 714 jint prev = min_jint; 715 for (int j = 0; j < len; j++) { 716 jint match_int = table[3*j+0]; 717 if (match_int != prev) { 718 defaults += (float)match_int - prev; 719 } 720 prev = match_int+1; 721 } 722 if (prev-1 != max_jint) { 723 defaults += (float)max_jint - prev + 1; 724 } 725 float default_cnt = 1; 726 if (profile != NULL) { 727 default_cnt = profile->default_count()/defaults; 728 } 729 730 int rnum = len*2+1; 731 bool makes_backward_branch = false; 732 SwitchRange* ranges = NEW_RESOURCE_ARRAY(SwitchRange, rnum); 733 int rp = -1; 734 for (int j = 0; j < len; j++) { 735 jint match_int = table[3*j+0]; 736 int dest = table[3*j+1]; 737 int cnt = table[3*j+2]; 738 int next_lo = rp < 0 ? min_jint : ranges[rp].hi()+1; 739 int table_index = method_data_update() ? j : NullTableIndex; 740 makes_backward_branch |= (dest <= bci()); 741 float c = default_cnt * ((float)match_int - next_lo); 742 if (match_int != next_lo && (rp < 0 || !ranges[rp].adjoinRange(next_lo, match_int-1, default_dest, NullTableIndex, c, trim_ranges))) { 743 assert(default_dest != never_reached, "sentinel value for dead destinations"); 744 ranges[++rp].setRange(next_lo, match_int-1, default_dest, NullTableIndex, c); 745 } 746 if (rp < 0 || !ranges[rp].adjoin(match_int, dest, table_index, cnt, trim_ranges)) { 747 assert(dest != never_reached, "sentinel value for dead destinations"); 748 ranges[++rp].set(match_int, dest, table_index, cnt); 749 } 750 } 751 jint highest = table[3*(len-1)]; 752 assert(ranges[rp].hi() == highest, ""); 753 if (highest != max_jint && 754 !ranges[rp].adjoinRange(highest+1, max_jint, default_dest, NullTableIndex, default_cnt * ((float)max_jint - highest), trim_ranges)) { 755 ranges[++rp].setRange(highest+1, max_jint, default_dest, NullTableIndex, default_cnt * ((float)max_jint - highest)); 756 } 757 assert(rp < rnum, "not too many ranges"); 758 759 if (trim_ranges) { 760 merge_ranges(ranges, rp); 761 } 762 763 // Safepoint in case backward branch observed 764 if (makes_backward_branch && UseLoopSafepoints) 765 add_safepoint(); 766 767 jump_switch_ranges(lookup, &ranges[0], &ranges[rp]); 768 } 769 770 static float if_prob(float taken_cnt, float total_cnt) { 771 assert(taken_cnt <= total_cnt, ""); 772 if (total_cnt == 0) { 773 return PROB_FAIR; 774 } 775 float p = taken_cnt / total_cnt; 776 return MIN2(MAX2(p, PROB_MIN), PROB_MAX); 777 } 778 779 static float if_cnt(float cnt) { 780 if (cnt == 0) { 781 return COUNT_UNKNOWN; 782 } 783 return cnt; 784 } 785 786 static float sum_of_cnts(SwitchRange *lo, SwitchRange *hi) { 787 float total_cnt = 0; 788 for (SwitchRange* sr = lo; sr <= hi; sr++) { 789 total_cnt += sr->cnt(); 790 } 791 return total_cnt; 792 } 793 794 class SwitchRanges : public ResourceObj { 795 public: 796 SwitchRange* _lo; 797 SwitchRange* _hi; 798 SwitchRange* _mid; 799 float _cost; 800 801 enum { 802 Start, 803 LeftDone, 804 RightDone, 805 Done 806 } _state; 807 808 SwitchRanges(SwitchRange *lo, SwitchRange *hi) 809 : _lo(lo), _hi(hi), _mid(NULL), 810 _cost(0), _state(Start) { 811 } 812 813 SwitchRanges() 814 : _lo(NULL), _hi(NULL), _mid(NULL), 815 _cost(0), _state(Start) {} 816 }; 817 818 // Estimate cost of performing a binary search on lo..hi 819 static float compute_tree_cost(SwitchRange *lo, SwitchRange *hi, float total_cnt) { 820 GrowableArray<SwitchRanges> tree; 821 SwitchRanges root(lo, hi); 822 tree.push(root); 823 824 float cost = 0; 825 do { 826 SwitchRanges& r = *tree.adr_at(tree.length()-1); 827 if (r._hi != r._lo) { 828 if (r._mid == NULL) { 829 float r_cnt = sum_of_cnts(r._lo, r._hi); 830 831 if (r_cnt == 0) { 832 tree.pop(); 833 cost = 0; 834 continue; 835 } 836 837 SwitchRange* mid = NULL; 838 mid = r._lo; 839 for (float cnt = 0; ; ) { 840 assert(mid <= r._hi, "out of bounds"); 841 cnt += mid->cnt(); 842 if (cnt > r_cnt / 2) { 843 break; 844 } 845 mid++; 846 } 847 assert(mid <= r._hi, "out of bounds"); 848 r._mid = mid; 849 r._cost = r_cnt / total_cnt; 850 } 851 r._cost += cost; 852 if (r._state < SwitchRanges::LeftDone && r._mid > r._lo) { 853 cost = 0; 854 r._state = SwitchRanges::LeftDone; 855 tree.push(SwitchRanges(r._lo, r._mid-1)); 856 } else if (r._state < SwitchRanges::RightDone) { 857 cost = 0; 858 r._state = SwitchRanges::RightDone; 859 tree.push(SwitchRanges(r._mid == r._lo ? r._mid+1 : r._mid, r._hi)); 860 } else { 861 tree.pop(); 862 cost = r._cost; 863 } 864 } else { 865 tree.pop(); 866 cost = r._cost; 867 } 868 } while (tree.length() > 0); 869 870 871 return cost; 872 } 873 874 // It sometimes pays off to test most common ranges before the binary search 875 void Parse::linear_search_switch_ranges(Node* key_val, SwitchRange*& lo, SwitchRange*& hi) { 876 uint nr = hi - lo + 1; 877 float total_cnt = sum_of_cnts(lo, hi); 878 879 float min = compute_tree_cost(lo, hi, total_cnt); 880 float extra = 1; 881 float sub = 0; 882 883 SwitchRange* array1 = lo; 884 SwitchRange* array2 = NEW_RESOURCE_ARRAY(SwitchRange, nr); 885 886 SwitchRange* ranges = NULL; 887 888 while (nr >= 2) { 889 assert(lo == array1 || lo == array2, "one the 2 already allocated arrays"); 890 ranges = (lo == array1) ? array2 : array1; 891 892 // Find highest frequency range 893 SwitchRange* candidate = lo; 894 for (SwitchRange* sr = lo+1; sr <= hi; sr++) { 895 if (sr->cnt() > candidate->cnt()) { 896 candidate = sr; 897 } 898 } 899 SwitchRange most_freq = *candidate; 900 if (most_freq.cnt() == 0) { 901 break; 902 } 903 904 // Copy remaining ranges into another array 905 int shift = 0; 906 for (uint i = 0; i < nr; i++) { 907 SwitchRange* sr = &lo[i]; 908 if (sr != candidate) { 909 ranges[i-shift] = *sr; 910 } else { 911 shift++; 912 if (i > 0 && i < nr-1) { 913 SwitchRange prev = lo[i-1]; 914 prev.setRange(prev.lo(), sr->hi(), prev.dest(), prev.table_index(), prev.cnt()); 915 if (prev.adjoin(lo[i+1])) { 916 shift++; 917 i++; 918 } 919 ranges[i-shift] = prev; 920 } 921 } 922 } 923 nr -= shift; 924 925 // Evaluate cost of testing the most common range and performing a 926 // binary search on the other ranges 927 float cost = extra + compute_tree_cost(&ranges[0], &ranges[nr-1], total_cnt); 928 if (cost >= min) { 929 break; 930 } 931 // swap arrays 932 lo = &ranges[0]; 933 hi = &ranges[nr-1]; 934 935 // It pays off: emit the test for the most common range 936 assert(most_freq.cnt() > 0, "must be taken"); 937 Node* val = _gvn.transform(new SubINode(key_val, _gvn.intcon(most_freq.lo()))); 938 Node* cmp = _gvn.transform(new CmpUNode(val, _gvn.intcon(most_freq.hi() - most_freq.lo()))); 939 Node* tst = _gvn.transform(new BoolNode(cmp, BoolTest::le)); 940 IfNode* iff = create_and_map_if(control(), tst, if_prob(most_freq.cnt(), total_cnt), if_cnt(most_freq.cnt())); 941 jump_if_true_fork(iff, most_freq.dest(), most_freq.table_index(), false); 942 943 sub += most_freq.cnt() / total_cnt; 944 extra += 1 - sub; 945 min = cost; 946 } 947 } 948 949 //----------------------------create_jump_tables------------------------------- 950 bool Parse::create_jump_tables(Node* key_val, SwitchRange* lo, SwitchRange* hi) { 951 // Are jumptables enabled 952 if (!UseJumpTables) return false; 953 954 // Are jumptables supported 955 if (!Matcher::has_match_rule(Op_Jump)) return false; 956 957 // Don't make jump table if profiling 958 if (method_data_update()) return false; 959 960 bool trim_ranges = !C->too_many_traps(method(), bci(), Deoptimization::Reason_unstable_if); 961 962 // Decide if a guard is needed to lop off big ranges at either (or 963 // both) end(s) of the input set. We'll call this the default target 964 // even though we can't be sure that it is the true "default". 965 966 bool needs_guard = false; 967 int default_dest; 968 int64_t total_outlier_size = 0; 969 int64_t hi_size = ((int64_t)hi->hi()) - ((int64_t)hi->lo()) + 1; 970 int64_t lo_size = ((int64_t)lo->hi()) - ((int64_t)lo->lo()) + 1; 971 972 if (lo->dest() == hi->dest()) { 973 total_outlier_size = hi_size + lo_size; 974 default_dest = lo->dest(); 975 } else if (lo_size > hi_size) { 976 total_outlier_size = lo_size; 977 default_dest = lo->dest(); 978 } else { 979 total_outlier_size = hi_size; 980 default_dest = hi->dest(); 981 } 982 983 float total = sum_of_cnts(lo, hi); 984 float cost = compute_tree_cost(lo, hi, total); 985 986 // If a guard test will eliminate very sparse end ranges, then 987 // it is worth the cost of an extra jump. 988 float trimmed_cnt = 0; 989 if (total_outlier_size > (MaxJumpTableSparseness * 4)) { 990 needs_guard = true; 991 if (default_dest == lo->dest()) { 992 trimmed_cnt += lo->cnt(); 993 lo++; 994 } 995 if (default_dest == hi->dest()) { 996 trimmed_cnt += hi->cnt(); 997 hi--; 998 } 999 } 1000 1001 // Find the total number of cases and ranges 1002 int64_t num_cases = ((int64_t)hi->hi()) - ((int64_t)lo->lo()) + 1; 1003 int num_range = hi - lo + 1; 1004 1005 // Don't create table if: too large, too small, or too sparse. 1006 if (num_cases > MaxJumpTableSize) 1007 return false; 1008 if (UseSwitchProfiling) { 1009 // MinJumpTableSize is set so with a well balanced binary tree, 1010 // when the number of ranges is MinJumpTableSize, it's cheaper to 1011 // go through a JumpNode that a tree of IfNodes. Average cost of a 1012 // tree of IfNodes with MinJumpTableSize is 1013 // log2f(MinJumpTableSize) comparisons. So if the cost computed 1014 // from profile data is less than log2f(MinJumpTableSize) then 1015 // going with the binary search is cheaper. 1016 if (cost < log2f(MinJumpTableSize)) { 1017 return false; 1018 } 1019 } else { 1020 if (num_cases < MinJumpTableSize) 1021 return false; 1022 } 1023 if (num_cases > (MaxJumpTableSparseness * num_range)) 1024 return false; 1025 1026 // Normalize table lookups to zero 1027 int lowval = lo->lo(); 1028 key_val = _gvn.transform( new SubINode(key_val, _gvn.intcon(lowval)) ); 1029 1030 // Generate a guard to protect against input keyvals that aren't 1031 // in the switch domain. 1032 if (needs_guard) { 1033 Node* size = _gvn.intcon(num_cases); 1034 Node* cmp = _gvn.transform(new CmpUNode(key_val, size)); 1035 Node* tst = _gvn.transform(new BoolNode(cmp, BoolTest::ge)); 1036 IfNode* iff = create_and_map_if(control(), tst, if_prob(trimmed_cnt, total), if_cnt(trimmed_cnt)); 1037 jump_if_true_fork(iff, default_dest, NullTableIndex, trim_ranges && trimmed_cnt == 0); 1038 1039 total -= trimmed_cnt; 1040 } 1041 1042 // Create an ideal node JumpTable that has projections 1043 // of all possible ranges for a switch statement 1044 // The key_val input must be converted to a pointer offset and scaled. 1045 // Compare Parse::array_addressing above. 1046 1047 // Clean the 32-bit int into a real 64-bit offset. 1048 // Otherwise, the jint value 0 might turn into an offset of 0x0800000000. 1049 const TypeInt* ikeytype = TypeInt::make(0, num_cases, Type::WidenMin); 1050 // Make I2L conversion control dependent to prevent it from 1051 // floating above the range check during loop optimizations. 1052 key_val = C->conv_I2X_index(&_gvn, key_val, ikeytype, control()); 1053 1054 // Shift the value by wordsize so we have an index into the table, rather 1055 // than a switch value 1056 Node *shiftWord = _gvn.MakeConX(wordSize); 1057 key_val = _gvn.transform( new MulXNode( key_val, shiftWord)); 1058 1059 // Create the JumpNode 1060 Arena* arena = C->comp_arena(); 1061 float* probs = (float*)arena->Amalloc(sizeof(float)*num_cases); 1062 int i = 0; 1063 if (total == 0) { 1064 for (SwitchRange* r = lo; r <= hi; r++) { 1065 for (int64_t j = r->lo(); j <= r->hi(); j++, i++) { 1066 probs[i] = 1.0F / num_cases; 1067 } 1068 } 1069 } else { 1070 for (SwitchRange* r = lo; r <= hi; r++) { 1071 float prob = r->cnt()/total; 1072 for (int64_t j = r->lo(); j <= r->hi(); j++, i++) { 1073 probs[i] = prob / (r->hi() - r->lo() + 1); 1074 } 1075 } 1076 } 1077 1078 ciMethodData* methodData = method()->method_data(); 1079 ciMultiBranchData* profile = NULL; 1080 if (methodData->is_mature()) { 1081 ciProfileData* data = methodData->bci_to_data(bci()); 1082 if (data != NULL && data->is_MultiBranchData()) { 1083 profile = (ciMultiBranchData*)data; 1084 } 1085 } 1086 1087 Node* jtn = _gvn.transform(new JumpNode(control(), key_val, num_cases, probs, profile == NULL ? COUNT_UNKNOWN : total)); 1088 1089 // These are the switch destinations hanging off the jumpnode 1090 i = 0; 1091 for (SwitchRange* r = lo; r <= hi; r++) { 1092 for (int64_t j = r->lo(); j <= r->hi(); j++, i++) { 1093 Node* input = _gvn.transform(new JumpProjNode(jtn, i, r->dest(), (int)(j - lowval))); 1094 { 1095 PreserveJVMState pjvms(this); 1096 set_control(input); 1097 jump_if_always_fork(r->dest(), r->table_index(), trim_ranges && r->cnt() == 0); 1098 } 1099 } 1100 } 1101 assert(i == num_cases, "miscount of cases"); 1102 stop_and_kill_map(); // no more uses for this JVMS 1103 return true; 1104 } 1105 1106 //----------------------------jump_switch_ranges------------------------------- 1107 void Parse::jump_switch_ranges(Node* key_val, SwitchRange *lo, SwitchRange *hi, int switch_depth) { 1108 Block* switch_block = block(); 1109 bool trim_ranges = !method_data_update() && !C->too_many_traps(method(), bci(), Deoptimization::Reason_unstable_if); 1110 1111 if (switch_depth == 0) { 1112 // Do special processing for the top-level call. 1113 assert(lo->lo() == min_jint, "initial range must exhaust Type::INT"); 1114 assert(hi->hi() == max_jint, "initial range must exhaust Type::INT"); 1115 1116 // Decrement pred-numbers for the unique set of nodes. 1117 #ifdef ASSERT 1118 if (!trim_ranges) { 1119 // Ensure that the block's successors are a (duplicate-free) set. 1120 int successors_counted = 0; // block occurrences in [hi..lo] 1121 int unique_successors = switch_block->num_successors(); 1122 for (int i = 0; i < unique_successors; i++) { 1123 Block* target = switch_block->successor_at(i); 1124 1125 // Check that the set of successors is the same in both places. 1126 int successors_found = 0; 1127 for (SwitchRange* p = lo; p <= hi; p++) { 1128 if (p->dest() == target->start()) successors_found++; 1129 } 1130 assert(successors_found > 0, "successor must be known"); 1131 successors_counted += successors_found; 1132 } 1133 assert(successors_counted == (hi-lo)+1, "no unexpected successors"); 1134 } 1135 #endif 1136 1137 // Maybe prune the inputs, based on the type of key_val. 1138 jint min_val = min_jint; 1139 jint max_val = max_jint; 1140 const TypeInt* ti = key_val->bottom_type()->isa_int(); 1141 if (ti != NULL) { 1142 min_val = ti->_lo; 1143 max_val = ti->_hi; 1144 assert(min_val <= max_val, "invalid int type"); 1145 } 1146 while (lo->hi() < min_val) { 1147 lo++; 1148 } 1149 if (lo->lo() < min_val) { 1150 lo->setRange(min_val, lo->hi(), lo->dest(), lo->table_index(), lo->cnt()); 1151 } 1152 while (hi->lo() > max_val) { 1153 hi--; 1154 } 1155 if (hi->hi() > max_val) { 1156 hi->setRange(hi->lo(), max_val, hi->dest(), hi->table_index(), hi->cnt()); 1157 } 1158 1159 linear_search_switch_ranges(key_val, lo, hi); 1160 } 1161 1162 #ifndef PRODUCT 1163 if (switch_depth == 0) { 1164 _max_switch_depth = 0; 1165 _est_switch_depth = log2_intptr((hi-lo+1)-1)+1; 1166 } 1167 #endif 1168 1169 assert(lo <= hi, "must be a non-empty set of ranges"); 1170 if (lo == hi) { 1171 jump_if_always_fork(lo->dest(), lo->table_index(), trim_ranges && lo->cnt() == 0); 1172 } else { 1173 assert(lo->hi() == (lo+1)->lo()-1, "contiguous ranges"); 1174 assert(hi->lo() == (hi-1)->hi()+1, "contiguous ranges"); 1175 1176 if (create_jump_tables(key_val, lo, hi)) return; 1177 1178 SwitchRange* mid = NULL; 1179 float total_cnt = sum_of_cnts(lo, hi); 1180 1181 int nr = hi - lo + 1; 1182 if (UseSwitchProfiling) { 1183 // Don't keep the binary search tree balanced: pick up mid point 1184 // that split frequencies in half. 1185 float cnt = 0; 1186 for (SwitchRange* sr = lo; sr <= hi; sr++) { 1187 cnt += sr->cnt(); 1188 if (cnt >= total_cnt / 2) { 1189 mid = sr; 1190 break; 1191 } 1192 } 1193 } else { 1194 mid = lo + nr/2; 1195 1196 // if there is an easy choice, pivot at a singleton: 1197 if (nr > 3 && !mid->is_singleton() && (mid-1)->is_singleton()) mid--; 1198 1199 assert(lo < mid && mid <= hi, "good pivot choice"); 1200 assert(nr != 2 || mid == hi, "should pick higher of 2"); 1201 assert(nr != 3 || mid == hi-1, "should pick middle of 3"); 1202 } 1203 1204 1205 Node *test_val = _gvn.intcon(mid == lo ? mid->hi() : mid->lo()); 1206 1207 if (mid->is_singleton()) { 1208 IfNode *iff_ne = jump_if_fork_int(key_val, test_val, BoolTest::ne, 1-if_prob(mid->cnt(), total_cnt), if_cnt(mid->cnt())); 1209 jump_if_false_fork(iff_ne, mid->dest(), mid->table_index(), trim_ranges && mid->cnt() == 0); 1210 1211 // Special Case: If there are exactly three ranges, and the high 1212 // and low range each go to the same place, omit the "gt" test, 1213 // since it will not discriminate anything. 1214 bool eq_test_only = (hi == lo+2 && hi->dest() == lo->dest() && mid == hi-1) || mid == lo; 1215 1216 // if there is a higher range, test for it and process it: 1217 if (mid < hi && !eq_test_only) { 1218 // two comparisons of same values--should enable 1 test for 2 branches 1219 // Use BoolTest::le instead of BoolTest::gt 1220 float cnt = sum_of_cnts(lo, mid-1); 1221 IfNode *iff_le = jump_if_fork_int(key_val, test_val, BoolTest::le, if_prob(cnt, total_cnt), if_cnt(cnt)); 1222 Node *iftrue = _gvn.transform( new IfTrueNode(iff_le) ); 1223 Node *iffalse = _gvn.transform( new IfFalseNode(iff_le) ); 1224 { PreserveJVMState pjvms(this); 1225 set_control(iffalse); 1226 jump_switch_ranges(key_val, mid+1, hi, switch_depth+1); 1227 } 1228 set_control(iftrue); 1229 } 1230 1231 } else { 1232 // mid is a range, not a singleton, so treat mid..hi as a unit 1233 float cnt = sum_of_cnts(mid == lo ? mid+1 : mid, hi); 1234 IfNode *iff_ge = jump_if_fork_int(key_val, test_val, mid == lo ? BoolTest::gt : BoolTest::ge, if_prob(cnt, total_cnt), if_cnt(cnt)); 1235 1236 // if there is a higher range, test for it and process it: 1237 if (mid == hi) { 1238 jump_if_true_fork(iff_ge, mid->dest(), mid->table_index(), trim_ranges && cnt == 0); 1239 } else { 1240 Node *iftrue = _gvn.transform( new IfTrueNode(iff_ge) ); 1241 Node *iffalse = _gvn.transform( new IfFalseNode(iff_ge) ); 1242 { PreserveJVMState pjvms(this); 1243 set_control(iftrue); 1244 jump_switch_ranges(key_val, mid == lo ? mid+1 : mid, hi, switch_depth+1); 1245 } 1246 set_control(iffalse); 1247 } 1248 } 1249 1250 // in any case, process the lower range 1251 if (mid == lo) { 1252 if (mid->is_singleton()) { 1253 jump_switch_ranges(key_val, lo+1, hi, switch_depth+1); 1254 } else { 1255 jump_if_always_fork(lo->dest(), lo->table_index(), trim_ranges && lo->cnt() == 0); 1256 } 1257 } else { 1258 jump_switch_ranges(key_val, lo, mid-1, switch_depth+1); 1259 } 1260 } 1261 1262 // Decrease pred_count for each successor after all is done. 1263 if (switch_depth == 0) { 1264 int unique_successors = switch_block->num_successors(); 1265 for (int i = 0; i < unique_successors; i++) { 1266 Block* target = switch_block->successor_at(i); 1267 // Throw away the pre-allocated path for each unique successor. 1268 target->next_path_num(); 1269 } 1270 } 1271 1272 #ifndef PRODUCT 1273 _max_switch_depth = MAX2(switch_depth, _max_switch_depth); 1274 if (TraceOptoParse && Verbose && WizardMode && switch_depth == 0) { 1275 SwitchRange* r; 1276 int nsing = 0; 1277 for( r = lo; r <= hi; r++ ) { 1278 if( r->is_singleton() ) nsing++; 1279 } 1280 tty->print(">>> "); 1281 _method->print_short_name(); 1282 tty->print_cr(" switch decision tree"); 1283 tty->print_cr(" %d ranges (%d singletons), max_depth=%d, est_depth=%d", 1284 (int) (hi-lo+1), nsing, _max_switch_depth, _est_switch_depth); 1285 if (_max_switch_depth > _est_switch_depth) { 1286 tty->print_cr("******** BAD SWITCH DEPTH ********"); 1287 } 1288 tty->print(" "); 1289 for( r = lo; r <= hi; r++ ) { 1290 r->print(); 1291 } 1292 tty->cr(); 1293 } 1294 #endif 1295 } 1296 1297 void Parse::modf() { 1298 Node *f2 = pop(); 1299 Node *f1 = pop(); 1300 Node* c = make_runtime_call(RC_LEAF, OptoRuntime::modf_Type(), 1301 CAST_FROM_FN_PTR(address, SharedRuntime::frem), 1302 "frem", NULL, //no memory effects 1303 f1, f2); 1304 Node* res = _gvn.transform(new ProjNode(c, TypeFunc::Parms + 0)); 1305 1306 push(res); 1307 } 1308 1309 void Parse::modd() { 1310 Node *d2 = pop_pair(); 1311 Node *d1 = pop_pair(); 1312 Node* c = make_runtime_call(RC_LEAF, OptoRuntime::Math_DD_D_Type(), 1313 CAST_FROM_FN_PTR(address, SharedRuntime::drem), 1314 "drem", NULL, //no memory effects 1315 d1, top(), d2, top()); 1316 Node* res_d = _gvn.transform(new ProjNode(c, TypeFunc::Parms + 0)); 1317 1318 #ifdef ASSERT 1319 Node* res_top = _gvn.transform(new ProjNode(c, TypeFunc::Parms + 1)); 1320 assert(res_top == top(), "second value must be top"); 1321 #endif 1322 1323 push_pair(res_d); 1324 } 1325 1326 void Parse::l2f() { 1327 Node* f2 = pop(); 1328 Node* f1 = pop(); 1329 Node* c = make_runtime_call(RC_LEAF, OptoRuntime::l2f_Type(), 1330 CAST_FROM_FN_PTR(address, SharedRuntime::l2f), 1331 "l2f", NULL, //no memory effects 1332 f1, f2); 1333 Node* res = _gvn.transform(new ProjNode(c, TypeFunc::Parms + 0)); 1334 1335 push(res); 1336 } 1337 1338 void Parse::do_irem() { 1339 // Must keep both values on the expression-stack during null-check 1340 zero_check_int(peek()); 1341 // Compile-time detect of null-exception? 1342 if (stopped()) return; 1343 1344 Node* b = pop(); 1345 Node* a = pop(); 1346 1347 const Type *t = _gvn.type(b); 1348 if (t != Type::TOP) { 1349 const TypeInt *ti = t->is_int(); 1350 if (ti->is_con()) { 1351 int divisor = ti->get_con(); 1352 // check for positive power of 2 1353 if (divisor > 0 && 1354 (divisor & ~(divisor-1)) == divisor) { 1355 // yes ! 1356 Node *mask = _gvn.intcon((divisor - 1)); 1357 // Sigh, must handle negative dividends 1358 Node *zero = _gvn.intcon(0); 1359 IfNode *ifff = jump_if_fork_int(a, zero, BoolTest::lt, PROB_FAIR, COUNT_UNKNOWN); 1360 Node *iff = _gvn.transform( new IfFalseNode(ifff) ); 1361 Node *ift = _gvn.transform( new IfTrueNode (ifff) ); 1362 Node *reg = jump_if_join(ift, iff); 1363 Node *phi = PhiNode::make(reg, NULL, TypeInt::INT); 1364 // Negative path; negate/and/negate 1365 Node *neg = _gvn.transform( new SubINode(zero, a) ); 1366 Node *andn= _gvn.transform( new AndINode(neg, mask) ); 1367 Node *negn= _gvn.transform( new SubINode(zero, andn) ); 1368 phi->init_req(1, negn); 1369 // Fast positive case 1370 Node *andx = _gvn.transform( new AndINode(a, mask) ); 1371 phi->init_req(2, andx); 1372 // Push the merge 1373 push( _gvn.transform(phi) ); 1374 return; 1375 } 1376 } 1377 } 1378 // Default case 1379 push( _gvn.transform( new ModINode(control(),a,b) ) ); 1380 } 1381 1382 // Handle jsr and jsr_w bytecode 1383 void Parse::do_jsr() { 1384 assert(bc() == Bytecodes::_jsr || bc() == Bytecodes::_jsr_w, "wrong bytecode"); 1385 1386 // Store information about current state, tagged with new _jsr_bci 1387 int return_bci = iter().next_bci(); 1388 int jsr_bci = (bc() == Bytecodes::_jsr) ? iter().get_dest() : iter().get_far_dest(); 1389 1390 // Update method data 1391 profile_taken_branch(jsr_bci); 1392 1393 // The way we do things now, there is only one successor block 1394 // for the jsr, because the target code is cloned by ciTypeFlow. 1395 Block* target = successor_for_bci(jsr_bci); 1396 1397 // What got pushed? 1398 const Type* ret_addr = target->peek(); 1399 assert(ret_addr->singleton(), "must be a constant (cloned jsr body)"); 1400 1401 // Effect on jsr on stack 1402 push(_gvn.makecon(ret_addr)); 1403 1404 // Flow to the jsr. 1405 merge(jsr_bci); 1406 } 1407 1408 // Handle ret bytecode 1409 void Parse::do_ret() { 1410 // Find to whom we return. 1411 assert(block()->num_successors() == 1, "a ret can only go one place now"); 1412 Block* target = block()->successor_at(0); 1413 assert(!target->is_ready(), "our arrival must be expected"); 1414 profile_ret(target->flow()->start()); 1415 int pnum = target->next_path_num(); 1416 merge_common(target, pnum); 1417 } 1418 1419 static bool has_injected_profile(BoolTest::mask btest, Node* test, int& taken, int& not_taken) { 1420 if (btest != BoolTest::eq && btest != BoolTest::ne) { 1421 // Only ::eq and ::ne are supported for profile injection. 1422 return false; 1423 } 1424 if (test->is_Cmp() && 1425 test->in(1)->Opcode() == Op_ProfileBoolean) { 1426 ProfileBooleanNode* profile = (ProfileBooleanNode*)test->in(1); 1427 int false_cnt = profile->false_count(); 1428 int true_cnt = profile->true_count(); 1429 1430 // Counts matching depends on the actual test operation (::eq or ::ne). 1431 // No need to scale the counts because profile injection was designed 1432 // to feed exact counts into VM. 1433 taken = (btest == BoolTest::eq) ? false_cnt : true_cnt; 1434 not_taken = (btest == BoolTest::eq) ? true_cnt : false_cnt; 1435 1436 profile->consume(); 1437 return true; 1438 } 1439 return false; 1440 } 1441 //--------------------------dynamic_branch_prediction-------------------------- 1442 // Try to gather dynamic branch prediction behavior. Return a probability 1443 // of the branch being taken and set the "cnt" field. Returns a -1.0 1444 // if we need to use static prediction for some reason. 1445 float Parse::dynamic_branch_prediction(float &cnt, BoolTest::mask btest, Node* test) { 1446 ResourceMark rm; 1447 1448 cnt = COUNT_UNKNOWN; 1449 1450 int taken = 0; 1451 int not_taken = 0; 1452 1453 bool use_mdo = !has_injected_profile(btest, test, taken, not_taken); 1454 1455 if (use_mdo) { 1456 // Use MethodData information if it is available 1457 // FIXME: free the ProfileData structure 1458 ciMethodData* methodData = method()->method_data(); 1459 if (!methodData->is_mature()) return PROB_UNKNOWN; 1460 ciProfileData* data = methodData->bci_to_data(bci()); 1461 if (data == NULL) { 1462 return PROB_UNKNOWN; 1463 } 1464 if (!data->is_JumpData()) return PROB_UNKNOWN; 1465 1466 // get taken and not taken values 1467 taken = data->as_JumpData()->taken(); 1468 not_taken = 0; 1469 if (data->is_BranchData()) { 1470 not_taken = data->as_BranchData()->not_taken(); 1471 } 1472 1473 // scale the counts to be commensurate with invocation counts: 1474 taken = method()->scale_count(taken); 1475 not_taken = method()->scale_count(not_taken); 1476 } 1477 1478 // Give up if too few (or too many, in which case the sum will overflow) counts to be meaningful. 1479 // We also check that individual counters are positive first, otherwise the sum can become positive. 1480 if (taken < 0 || not_taken < 0 || taken + not_taken < 40) { 1481 if (C->log() != NULL) { 1482 C->log()->elem("branch target_bci='%d' taken='%d' not_taken='%d'", iter().get_dest(), taken, not_taken); 1483 } 1484 return PROB_UNKNOWN; 1485 } 1486 1487 // Compute frequency that we arrive here 1488 float sum = taken + not_taken; 1489 // Adjust, if this block is a cloned private block but the 1490 // Jump counts are shared. Taken the private counts for 1491 // just this path instead of the shared counts. 1492 if( block()->count() > 0 ) 1493 sum = block()->count(); 1494 cnt = sum / FreqCountInvocations; 1495 1496 // Pin probability to sane limits 1497 float prob; 1498 if( !taken ) 1499 prob = (0+PROB_MIN) / 2; 1500 else if( !not_taken ) 1501 prob = (1+PROB_MAX) / 2; 1502 else { // Compute probability of true path 1503 prob = (float)taken / (float)(taken + not_taken); 1504 if (prob > PROB_MAX) prob = PROB_MAX; 1505 if (prob < PROB_MIN) prob = PROB_MIN; 1506 } 1507 1508 assert((cnt > 0.0f) && (prob > 0.0f), 1509 "Bad frequency assignment in if"); 1510 1511 if (C->log() != NULL) { 1512 const char* prob_str = NULL; 1513 if (prob >= PROB_MAX) prob_str = (prob == PROB_MAX) ? "max" : "always"; 1514 if (prob <= PROB_MIN) prob_str = (prob == PROB_MIN) ? "min" : "never"; 1515 char prob_str_buf[30]; 1516 if (prob_str == NULL) { 1517 jio_snprintf(prob_str_buf, sizeof(prob_str_buf), "%20.2f", prob); 1518 prob_str = prob_str_buf; 1519 } 1520 C->log()->elem("branch target_bci='%d' taken='%d' not_taken='%d' cnt='%f' prob='%s'", 1521 iter().get_dest(), taken, not_taken, cnt, prob_str); 1522 } 1523 return prob; 1524 } 1525 1526 //-----------------------------branch_prediction------------------------------- 1527 float Parse::branch_prediction(float& cnt, 1528 BoolTest::mask btest, 1529 int target_bci, 1530 Node* test) { 1531 float prob = dynamic_branch_prediction(cnt, btest, test); 1532 // If prob is unknown, switch to static prediction 1533 if (prob != PROB_UNKNOWN) return prob; 1534 1535 prob = PROB_FAIR; // Set default value 1536 if (btest == BoolTest::eq) // Exactly equal test? 1537 prob = PROB_STATIC_INFREQUENT; // Assume its relatively infrequent 1538 else if (btest == BoolTest::ne) 1539 prob = PROB_STATIC_FREQUENT; // Assume its relatively frequent 1540 1541 // If this is a conditional test guarding a backwards branch, 1542 // assume its a loop-back edge. Make it a likely taken branch. 1543 if (target_bci < bci()) { 1544 if (is_osr_parse()) { // Could be a hot OSR'd loop; force deopt 1545 // Since it's an OSR, we probably have profile data, but since 1546 // branch_prediction returned PROB_UNKNOWN, the counts are too small. 1547 // Let's make a special check here for completely zero counts. 1548 ciMethodData* methodData = method()->method_data(); 1549 if (!methodData->is_empty()) { 1550 ciProfileData* data = methodData->bci_to_data(bci()); 1551 // Only stop for truly zero counts, which mean an unknown part 1552 // of the OSR-ed method, and we want to deopt to gather more stats. 1553 // If you have ANY counts, then this loop is simply 'cold' relative 1554 // to the OSR loop. 1555 if (data == NULL || 1556 (data->as_BranchData()->taken() + data->as_BranchData()->not_taken() == 0)) { 1557 // This is the only way to return PROB_UNKNOWN: 1558 return PROB_UNKNOWN; 1559 } 1560 } 1561 } 1562 prob = PROB_STATIC_FREQUENT; // Likely to take backwards branch 1563 } 1564 1565 assert(prob != PROB_UNKNOWN, "must have some guess at this point"); 1566 return prob; 1567 } 1568 1569 // The magic constants are chosen so as to match the output of 1570 // branch_prediction() when the profile reports a zero taken count. 1571 // It is important to distinguish zero counts unambiguously, because 1572 // some branches (e.g., _213_javac.Assembler.eliminate) validly produce 1573 // very small but nonzero probabilities, which if confused with zero 1574 // counts would keep the program recompiling indefinitely. 1575 bool Parse::seems_never_taken(float prob) const { 1576 return prob < PROB_MIN; 1577 } 1578 1579 // True if the comparison seems to be the kind that will not change its 1580 // statistics from true to false. See comments in adjust_map_after_if. 1581 // This question is only asked along paths which are already 1582 // classifed as untaken (by seems_never_taken), so really, 1583 // if a path is never taken, its controlling comparison is 1584 // already acting in a stable fashion. If the comparison 1585 // seems stable, we will put an expensive uncommon trap 1586 // on the untaken path. 1587 bool Parse::seems_stable_comparison() const { 1588 if (C->too_many_traps(method(), bci(), Deoptimization::Reason_unstable_if)) { 1589 return false; 1590 } 1591 return true; 1592 } 1593 1594 //-------------------------------repush_if_args-------------------------------- 1595 // Push arguments of an "if" bytecode back onto the stack by adjusting _sp. 1596 inline int Parse::repush_if_args() { 1597 if (PrintOpto && WizardMode) { 1598 tty->print("defending against excessive implicit null exceptions on %s @%d in ", 1599 Bytecodes::name(iter().cur_bc()), iter().cur_bci()); 1600 method()->print_name(); tty->cr(); 1601 } 1602 int bc_depth = - Bytecodes::depth(iter().cur_bc()); 1603 assert(bc_depth == 1 || bc_depth == 2, "only two kinds of branches"); 1604 DEBUG_ONLY(sync_jvms()); // argument(n) requires a synced jvms 1605 assert(argument(0) != NULL, "must exist"); 1606 assert(bc_depth == 1 || argument(1) != NULL, "two must exist"); 1607 inc_sp(bc_depth); 1608 return bc_depth; 1609 } 1610 1611 //----------------------------------do_ifnull---------------------------------- 1612 void Parse::do_ifnull(BoolTest::mask btest, Node *c) { 1613 int target_bci = iter().get_dest(); 1614 1615 Block* branch_block = successor_for_bci(target_bci); 1616 Block* next_block = successor_for_bci(iter().next_bci()); 1617 1618 float cnt; 1619 float prob = branch_prediction(cnt, btest, target_bci, c); 1620 if (prob == PROB_UNKNOWN) { 1621 // (An earlier version of do_ifnull omitted this trap for OSR methods.) 1622 if (PrintOpto && Verbose) { 1623 tty->print_cr("Never-taken edge stops compilation at bci %d", bci()); 1624 } 1625 repush_if_args(); // to gather stats on loop 1626 // We need to mark this branch as taken so that if we recompile we will 1627 // see that it is possible. In the tiered system the interpreter doesn't 1628 // do profiling and by the time we get to the lower tier from the interpreter 1629 // the path may be cold again. Make sure it doesn't look untaken 1630 profile_taken_branch(target_bci, !ProfileInterpreter); 1631 uncommon_trap(Deoptimization::Reason_unreached, 1632 Deoptimization::Action_reinterpret, 1633 NULL, "cold"); 1634 if (C->eliminate_boxing()) { 1635 // Mark the successor blocks as parsed 1636 branch_block->next_path_num(); 1637 next_block->next_path_num(); 1638 } 1639 return; 1640 } 1641 1642 NOT_PRODUCT(explicit_null_checks_inserted++); 1643 1644 // Generate real control flow 1645 Node *tst = _gvn.transform( new BoolNode( c, btest ) ); 1646 1647 // Sanity check the probability value 1648 assert(prob > 0.0f,"Bad probability in Parser"); 1649 // Need xform to put node in hash table 1650 IfNode *iff = create_and_xform_if( control(), tst, prob, cnt ); 1651 assert(iff->_prob > 0.0f,"Optimizer made bad probability in parser"); 1652 // True branch 1653 { PreserveJVMState pjvms(this); 1654 Node* iftrue = _gvn.transform( new IfTrueNode (iff) ); 1655 set_control(iftrue); 1656 1657 if (stopped()) { // Path is dead? 1658 NOT_PRODUCT(explicit_null_checks_elided++); 1659 if (C->eliminate_boxing()) { 1660 // Mark the successor block as parsed 1661 branch_block->next_path_num(); 1662 } 1663 } else { // Path is live. 1664 // Update method data 1665 profile_taken_branch(target_bci); 1666 adjust_map_after_if(btest, c, prob, branch_block); 1667 if (!stopped()) { 1668 merge(target_bci); 1669 } 1670 } 1671 } 1672 1673 // False branch 1674 Node* iffalse = _gvn.transform( new IfFalseNode(iff) ); 1675 set_control(iffalse); 1676 1677 if (stopped()) { // Path is dead? 1678 NOT_PRODUCT(explicit_null_checks_elided++); 1679 if (C->eliminate_boxing()) { 1680 // Mark the successor block as parsed 1681 next_block->next_path_num(); 1682 } 1683 } else { // Path is live. 1684 // Update method data 1685 profile_not_taken_branch(); 1686 adjust_map_after_if(BoolTest(btest).negate(), c, 1.0-prob, next_block); 1687 } 1688 } 1689 1690 //------------------------------------do_if------------------------------------ 1691 void Parse::do_if(BoolTest::mask btest, Node* c, bool new_path, Node** ctrl_taken) { 1692 int target_bci = iter().get_dest(); 1693 1694 Block* branch_block = successor_for_bci(target_bci); 1695 Block* next_block = successor_for_bci(iter().next_bci()); 1696 1697 float cnt; 1698 float prob = branch_prediction(cnt, btest, target_bci, c); 1699 float untaken_prob = 1.0 - prob; 1700 1701 if (prob == PROB_UNKNOWN) { 1702 if (PrintOpto && Verbose) { 1703 tty->print_cr("Never-taken edge stops compilation at bci %d", bci()); 1704 } 1705 repush_if_args(); // to gather stats on loop 1706 // We need to mark this branch as taken so that if we recompile we will 1707 // see that it is possible. In the tiered system the interpreter doesn't 1708 // do profiling and by the time we get to the lower tier from the interpreter 1709 // the path may be cold again. Make sure it doesn't look untaken 1710 profile_taken_branch(target_bci, !ProfileInterpreter); 1711 uncommon_trap(Deoptimization::Reason_unreached, 1712 Deoptimization::Action_reinterpret, 1713 NULL, "cold"); 1714 if (C->eliminate_boxing()) { 1715 // Mark the successor blocks as parsed 1716 branch_block->next_path_num(); 1717 next_block->next_path_num(); 1718 } 1719 return; 1720 } 1721 1722 // Sanity check the probability value 1723 assert(0.0f < prob && prob < 1.0f,"Bad probability in Parser"); 1724 1725 bool taken_if_true = true; 1726 // Convert BoolTest to canonical form: 1727 if (!BoolTest(btest).is_canonical()) { 1728 btest = BoolTest(btest).negate(); 1729 taken_if_true = false; 1730 // prob is NOT updated here; it remains the probability of the taken 1731 // path (as opposed to the prob of the path guarded by an 'IfTrueNode'). 1732 } 1733 assert(btest != BoolTest::eq, "!= is the only canonical exact test"); 1734 1735 Node* tst0 = new BoolNode(c, btest); 1736 Node* tst = _gvn.transform(tst0); 1737 BoolTest::mask taken_btest = BoolTest::illegal; 1738 BoolTest::mask untaken_btest = BoolTest::illegal; 1739 1740 if (tst->is_Bool()) { 1741 // Refresh c from the transformed bool node, since it may be 1742 // simpler than the original c. Also re-canonicalize btest. 1743 // This wins when (Bool ne (Conv2B p) 0) => (Bool ne (CmpP p NULL)). 1744 // That can arise from statements like: if (x instanceof C) ... 1745 if (tst != tst0) { 1746 // Canonicalize one more time since transform can change it. 1747 btest = tst->as_Bool()->_test._test; 1748 if (!BoolTest(btest).is_canonical()) { 1749 // Reverse edges one more time... 1750 tst = _gvn.transform( tst->as_Bool()->negate(&_gvn) ); 1751 btest = tst->as_Bool()->_test._test; 1752 assert(BoolTest(btest).is_canonical(), "sanity"); 1753 taken_if_true = !taken_if_true; 1754 } 1755 c = tst->in(1); 1756 } 1757 BoolTest::mask neg_btest = BoolTest(btest).negate(); 1758 taken_btest = taken_if_true ? btest : neg_btest; 1759 untaken_btest = taken_if_true ? neg_btest : btest; 1760 } 1761 1762 // Generate real control flow 1763 float true_prob = (taken_if_true ? prob : untaken_prob); 1764 IfNode* iff = create_and_map_if(control(), tst, true_prob, cnt); 1765 assert(iff->_prob > 0.0f,"Optimizer made bad probability in parser"); 1766 Node* taken_branch = new IfTrueNode(iff); 1767 Node* untaken_branch = new IfFalseNode(iff); 1768 if (!taken_if_true) { // Finish conversion to canonical form 1769 Node* tmp = taken_branch; 1770 taken_branch = untaken_branch; 1771 untaken_branch = tmp; 1772 } 1773 1774 // Branch is taken: 1775 { PreserveJVMState pjvms(this); 1776 taken_branch = _gvn.transform(taken_branch); 1777 set_control(taken_branch); 1778 1779 if (stopped()) { 1780 if (C->eliminate_boxing() && !new_path) { 1781 // Mark the successor block as parsed (if we haven't created a new path) 1782 branch_block->next_path_num(); 1783 } 1784 } else { 1785 // Update method data 1786 profile_taken_branch(target_bci); 1787 adjust_map_after_if(taken_btest, c, prob, branch_block); 1788 if (!stopped()) { 1789 if (new_path) { 1790 // Merge by using a new path 1791 merge_new_path(target_bci); 1792 } else if (ctrl_taken != NULL) { 1793 // Don't merge but save taken branch to be wired by caller 1794 *ctrl_taken = control(); 1795 } else { 1796 merge(target_bci); 1797 } 1798 } 1799 } 1800 } 1801 1802 untaken_branch = _gvn.transform(untaken_branch); 1803 set_control(untaken_branch); 1804 1805 // Branch not taken. 1806 if (stopped() && ctrl_taken == NULL) { 1807 if (C->eliminate_boxing()) { 1808 // Mark the successor block as parsed (if caller does not re-wire control flow) 1809 next_block->next_path_num(); 1810 } 1811 } else { 1812 // Update method data 1813 profile_not_taken_branch(); 1814 adjust_map_after_if(untaken_btest, c, untaken_prob, next_block); 1815 } 1816 } 1817 1818 void Parse::do_acmp(BoolTest::mask btest, Node* a, Node* b) { 1819 ciMethod* subst_method = ciEnv::current()->ValueBootstrapMethods_klass()->find_method(ciSymbol::isSubstitutable_name(), ciSymbol::object_object_boolean_signature()); 1820 // If current method is ValueBootstrapMethods::isSubstitutable(), 1821 // compile the acmp as a regular pointer comparison otherwise we 1822 // could call ValueBootstrapMethods::isSubstitutable() back 1823 if (ACmpOnValues == 0 || !EnableValhalla || method() == subst_method) { 1824 Node* cmp = CmpP(a, b); 1825 cmp = optimize_cmp_with_klass(cmp); 1826 do_if(btest, cmp); 1827 return; 1828 } 1829 1830 if (ACmpOnValues == 3) { 1831 // Substituability test 1832 if (a->is_ValueType()) { 1833 inc_sp(2); 1834 a = a->as_ValueType()->allocate(this, true)->get_oop(); 1835 dec_sp(2); 1836 } 1837 if (b->is_ValueType()) { 1838 inc_sp(2); 1839 b = b->as_ValueType()->allocate(this, true)->get_oop(); 1840 dec_sp(2); 1841 } 1842 1843 const TypeOopPtr* ta = _gvn.type(a)->isa_oopptr(); 1844 const TypeOopPtr* tb = _gvn.type(b)->isa_oopptr(); 1845 1846 if (ta == NULL || !ta->can_be_value_type() || 1847 tb == NULL || !tb->can_be_value_type()) { 1848 Node* cmp = CmpP(a, b); 1849 cmp = optimize_cmp_with_klass(cmp); 1850 do_if(btest, cmp); 1851 return; 1852 } 1853 1854 Node* cmp = CmpP(a, b); 1855 cmp = optimize_cmp_with_klass(cmp); 1856 Node* eq_region = NULL; 1857 if (btest == BoolTest::eq) { 1858 do_if(btest, cmp, true); 1859 if (stopped()) { 1860 return; 1861 } 1862 } else { 1863 assert(btest == BoolTest::ne, "only eq or ne"); 1864 Node* is_not_equal = NULL; 1865 eq_region = new RegionNode(3); 1866 { 1867 PreserveJVMState pjvms(this); 1868 do_if(btest, cmp, false, &is_not_equal); 1869 if (!stopped()) { 1870 eq_region->init_req(1, control()); 1871 } 1872 } 1873 if (is_not_equal == NULL || is_not_equal->is_top()) { 1874 record_for_igvn(eq_region); 1875 set_control(_gvn.transform(eq_region)); 1876 return; 1877 } 1878 set_control(is_not_equal); 1879 } 1880 // Pointers not equal, check for values 1881 Node* ne_region = new RegionNode(6); 1882 inc_sp(2); 1883 Node* null_ctl = top(); 1884 Node* not_null_a = null_check_oop(a, &null_ctl, !too_many_traps(Deoptimization::Reason_null_check), false, false); 1885 dec_sp(2); 1886 ne_region->init_req(1, null_ctl); 1887 if (stopped()) { 1888 record_for_igvn(ne_region); 1889 set_control(_gvn.transform(ne_region)); 1890 if (btest == BoolTest::ne) { 1891 { 1892 PreserveJVMState pjvms(this); 1893 int target_bci = iter().get_dest(); 1894 merge(target_bci); 1895 } 1896 record_for_igvn(eq_region); 1897 set_control(_gvn.transform(eq_region)); 1898 } 1899 return; 1900 } 1901 1902 Node* is_value = is_always_locked(not_null_a); 1903 Node* value_mask = _gvn.MakeConX(markOopDesc::always_locked_pattern); 1904 Node* is_value_cmp = _gvn.transform(new CmpXNode(is_value, value_mask)); 1905 Node* is_value_bol = _gvn.transform(new BoolNode(is_value_cmp, BoolTest::ne)); 1906 IfNode* is_value_iff = create_and_map_if(control(), is_value_bol, PROB_FAIR, COUNT_UNKNOWN); 1907 Node* not_value = _gvn.transform(new IfTrueNode(is_value_iff)); 1908 set_control(_gvn.transform(new IfFalseNode(is_value_iff))); 1909 ne_region->init_req(2, not_value); 1910 1911 // One of the 2 pointers refer to a value, check if both are of 1912 // the same class 1913 inc_sp(2); 1914 null_ctl = top(); 1915 Node* not_null_b = null_check_oop(b, &null_ctl, !too_many_traps(Deoptimization::Reason_null_check), false, false); 1916 dec_sp(2); 1917 ne_region->init_req(3, null_ctl); 1918 if (stopped()) { 1919 record_for_igvn(ne_region); 1920 set_control(_gvn.transform(ne_region)); 1921 if (btest == BoolTest::ne) { 1922 { 1923 PreserveJVMState pjvms(this); 1924 int target_bci = iter().get_dest(); 1925 merge(target_bci); 1926 } 1927 record_for_igvn(eq_region); 1928 set_control(_gvn.transform(eq_region)); 1929 } 1930 return; 1931 } 1932 Node* kls_a = load_object_klass(not_null_a); 1933 Node* kls_b = load_object_klass(not_null_b); 1934 Node* kls_cmp = CmpP(kls_a, kls_b); 1935 Node* kls_bol = _gvn.transform(new BoolNode(kls_cmp, BoolTest::ne)); 1936 IfNode* kls_iff = create_and_map_if(control(), kls_bol, PROB_FAIR, COUNT_UNKNOWN); 1937 Node* kls_ne = _gvn.transform(new IfTrueNode(kls_iff)); 1938 set_control(_gvn.transform(new IfFalseNode(kls_iff))); 1939 ne_region->init_req(4, kls_ne); 1940 1941 if (stopped()) { 1942 record_for_igvn(ne_region); 1943 set_control(_gvn.transform(ne_region)); 1944 if (btest == BoolTest::ne) { 1945 { 1946 PreserveJVMState pjvms(this); 1947 int target_bci = iter().get_dest(); 1948 merge(target_bci); 1949 } 1950 record_for_igvn(eq_region); 1951 set_control(_gvn.transform(eq_region)); 1952 } 1953 return; 1954 } 1955 // Both are values of the same class, we need to perform a 1956 // substituability test. Delegate to 1957 // ValueBootstrapMethods::isSubstitutable(). 1958 1959 Node* ne_io_phi = PhiNode::make(ne_region, i_o()); 1960 Node* mem = reset_memory(); 1961 Node* ne_mem_phi = PhiNode::make(ne_region, mem); 1962 1963 Node* eq_io_phi = NULL; 1964 Node* eq_mem_phi = NULL; 1965 if (eq_region != NULL) { 1966 eq_io_phi = PhiNode::make(eq_region, i_o()); 1967 eq_mem_phi = PhiNode::make(eq_region, mem); 1968 } 1969 1970 set_all_memory(mem); 1971 1972 kill_dead_locals(); 1973 CallStaticJavaNode *call = new CallStaticJavaNode(C, TypeFunc::make(subst_method), SharedRuntime::get_resolve_static_call_stub(), subst_method, bci()); 1974 call->set_override_symbolic_info(true); 1975 call->init_req(TypeFunc::Parms, not_null_a); 1976 call->init_req(TypeFunc::Parms+1, not_null_b); 1977 inc_sp(2); 1978 set_edges_for_java_call(call, false, false); 1979 Node* ret = set_results_for_java_call(call, false, true); 1980 dec_sp(2); 1981 1982 // Test the return value of ValueBootstrapMethods::isSubstitutable() 1983 Node* subst_cmp = _gvn.transform(new CmpINode(ret, intcon(1))); 1984 if (btest == BoolTest::eq) { 1985 do_if(btest, subst_cmp); 1986 } else { 1987 assert(btest == BoolTest::ne, "only eq or ne"); 1988 Node* is_not_equal = NULL; 1989 { 1990 PreserveJVMState pjvms(this); 1991 do_if(btest, subst_cmp, false, &is_not_equal); 1992 if (!stopped()) { 1993 eq_region->init_req(2, control()); 1994 eq_io_phi->init_req(2, i_o()); 1995 eq_mem_phi->init_req(2, reset_memory()); 1996 } 1997 } 1998 set_control(is_not_equal); 1999 } 2000 ne_region->init_req(5, control()); 2001 ne_io_phi->init_req(5, i_o()); 2002 ne_mem_phi->init_req(5, reset_memory()); 2003 2004 record_for_igvn(ne_region); 2005 set_control(_gvn.transform(ne_region)); 2006 set_i_o(_gvn.transform(ne_io_phi)); 2007 set_all_memory(_gvn.transform(ne_mem_phi)); 2008 2009 if (btest == BoolTest::ne) { 2010 { 2011 PreserveJVMState pjvms(this); 2012 int target_bci = iter().get_dest(); 2013 merge(target_bci); 2014 } 2015 2016 record_for_igvn(eq_region); 2017 set_control(_gvn.transform(eq_region)); 2018 set_i_o(_gvn.transform(eq_io_phi)); 2019 set_all_memory(_gvn.transform(eq_mem_phi)); 2020 } 2021 2022 return; 2023 } 2024 // In the case were both operands might be value types, we need to 2025 // use the new acmp implementation. Otherwise, i.e. if one operand 2026 // is not a value type, we can use the old acmp implementation. 2027 Node* cmp = C->optimize_acmp(&_gvn, a, b); 2028 if (cmp != NULL) { 2029 // Use optimized/old acmp 2030 cmp = optimize_cmp_with_klass(_gvn.transform(cmp)); 2031 do_if(btest, cmp); 2032 return; 2033 } 2034 2035 Node* ctrl = NULL; 2036 bool safe_for_replace = true; 2037 if (ACmpOnValues != 1) { 2038 // Emit old acmp before new acmp for quick a != b check 2039 cmp = CmpP(a, b); 2040 cmp = optimize_cmp_with_klass(_gvn.transform(cmp)); 2041 if (btest == BoolTest::ne) { 2042 do_if(btest, cmp, true); 2043 if (stopped()) { 2044 return; // Never equal 2045 } 2046 } else if (btest == BoolTest::eq) { 2047 Node* is_equal = NULL; 2048 { 2049 PreserveJVMState pjvms(this); 2050 do_if(btest, cmp, false, &is_equal); 2051 if (!stopped()) { 2052 // Not equal, skip valuetype check 2053 ctrl = new RegionNode(3); 2054 ctrl->init_req(1, control()); 2055 _gvn.set_type(ctrl, Type::CONTROL); 2056 record_for_igvn(ctrl); 2057 safe_for_replace = false; 2058 } 2059 } 2060 if (is_equal == NULL) { 2061 assert(ctrl != NULL, "no control left"); 2062 set_control(_gvn.transform(ctrl)); 2063 return; // Never equal 2064 } 2065 set_control(is_equal); 2066 } 2067 } 2068 2069 // Null check operand before loading the is_value bit 2070 bool speculate = false; 2071 if (!TypePtr::NULL_PTR->higher_equal(_gvn.type(b))) { 2072 // Operand 'b' is never null, swap operands to avoid null check 2073 swap(a, b); 2074 } else if (!too_many_traps(Deoptimization::Reason_speculate_null_check)) { 2075 // Speculate on non-nullness of one operand 2076 if (!_gvn.type(a)->speculative_maybe_null()) { 2077 speculate = true; 2078 } else if (!_gvn.type(b)->speculative_maybe_null()) { 2079 speculate = true; 2080 swap(a, b); 2081 } 2082 } 2083 inc_sp(2); 2084 Node* null_ctl = top(); 2085 Node* not_null_a = null_check_oop(a, &null_ctl, speculate, safe_for_replace, speculate); 2086 assert(!stopped(), "operand is always null"); 2087 dec_sp(2); 2088 Node* region = new RegionNode(2); 2089 Node* is_value = new PhiNode(region, TypeX_X); 2090 if (null_ctl != top()) { 2091 assert(!speculate, "should never be null"); 2092 region->add_req(null_ctl); 2093 is_value->add_req(_gvn.MakeConX(0)); 2094 } 2095 2096 Node* value_mask = _gvn.MakeConX(markOopDesc::always_locked_pattern); 2097 if (ACmpOnValues == 1) { 2098 Node* mark_addr = basic_plus_adr(not_null_a, oopDesc::mark_offset_in_bytes()); 2099 Node* mark = make_load(NULL, mark_addr, TypeX_X, TypeX_X->basic_type(), MemNode::unordered); 2100 Node* not_mark = _gvn.transform(new XorXNode(mark, _gvn.MakeConX(-1))); 2101 Node* andn = _gvn.transform(new AndXNode(not_mark, value_mask)); 2102 Node* neg_if_value = _gvn.transform(new SubXNode(andn, _gvn.MakeConX(1))); 2103 is_value->init_req(1, _gvn.transform(new RShiftXNode(neg_if_value, _gvn.intcon(63)))); 2104 } else { 2105 is_value->init_req(1, is_always_locked(not_null_a)); 2106 } 2107 region->init_req(1, control()); 2108 2109 set_control(_gvn.transform(region)); 2110 is_value = _gvn.transform(is_value); 2111 2112 if (ACmpOnValues == 1) { 2113 // Perturbe oop if operand is a value type to make comparison fail 2114 Node* pert = _gvn.transform(new AddPNode(a, a, is_value)); 2115 cmp = _gvn.transform(new CmpPNode(pert, b)); 2116 } else { 2117 // Check for a value type because we already know that operands are equal 2118 cmp = _gvn.transform(new CmpXNode(is_value, value_mask)); 2119 btest = (btest == BoolTest::eq) ? BoolTest::ne : BoolTest::eq; 2120 } 2121 cmp = optimize_cmp_with_klass(cmp); 2122 do_if(btest, cmp); 2123 2124 if (ctrl != NULL) { 2125 ctrl->init_req(2, control()); 2126 set_control(_gvn.transform(ctrl)); 2127 } 2128 } 2129 2130 bool Parse::path_is_suitable_for_uncommon_trap(float prob) const { 2131 // Don't want to speculate on uncommon traps when running with -Xcomp 2132 if (!UseInterpreter) { 2133 return false; 2134 } 2135 return (seems_never_taken(prob) && seems_stable_comparison()); 2136 } 2137 2138 void Parse::maybe_add_predicate_after_if(Block* path) { 2139 if (path->is_SEL_head() && path->preds_parsed() == 0) { 2140 // Add predicates at bci of if dominating the loop so traps can be 2141 // recorded on the if's profile data 2142 int bc_depth = repush_if_args(); 2143 add_predicate(); 2144 dec_sp(bc_depth); 2145 path->set_has_predicates(); 2146 } 2147 } 2148 2149 2150 //----------------------------adjust_map_after_if------------------------------ 2151 // Adjust the JVM state to reflect the result of taking this path. 2152 // Basically, it means inspecting the CmpNode controlling this 2153 // branch, seeing how it constrains a tested value, and then 2154 // deciding if it's worth our while to encode this constraint 2155 // as graph nodes in the current abstract interpretation map. 2156 void Parse::adjust_map_after_if(BoolTest::mask btest, Node* c, float prob, Block* path) { 2157 if (!c->is_Cmp()) { 2158 maybe_add_predicate_after_if(path); 2159 return; 2160 } 2161 2162 if (stopped() || btest == BoolTest::illegal) { 2163 return; // nothing to do 2164 } 2165 2166 bool is_fallthrough = (path == successor_for_bci(iter().next_bci())); 2167 2168 if (path_is_suitable_for_uncommon_trap(prob)) { 2169 repush_if_args(); 2170 uncommon_trap(Deoptimization::Reason_unstable_if, 2171 Deoptimization::Action_reinterpret, 2172 NULL, 2173 (is_fallthrough ? "taken always" : "taken never")); 2174 return; 2175 } 2176 2177 Node* val = c->in(1); 2178 Node* con = c->in(2); 2179 const Type* tcon = _gvn.type(con); 2180 const Type* tval = _gvn.type(val); 2181 bool have_con = tcon->singleton(); 2182 if (tval->singleton()) { 2183 if (!have_con) { 2184 // Swap, so constant is in con. 2185 con = val; 2186 tcon = tval; 2187 val = c->in(2); 2188 tval = _gvn.type(val); 2189 btest = BoolTest(btest).commute(); 2190 have_con = true; 2191 } else { 2192 // Do we have two constants? Then leave well enough alone. 2193 have_con = false; 2194 } 2195 } 2196 if (!have_con) { // remaining adjustments need a con 2197 maybe_add_predicate_after_if(path); 2198 return; 2199 } 2200 2201 sharpen_type_after_if(btest, con, tcon, val, tval); 2202 maybe_add_predicate_after_if(path); 2203 } 2204 2205 2206 static Node* extract_obj_from_klass_load(PhaseGVN* gvn, Node* n) { 2207 Node* ldk; 2208 if (n->is_DecodeNKlass()) { 2209 if (n->in(1)->Opcode() != Op_LoadNKlass) { 2210 return NULL; 2211 } else { 2212 ldk = n->in(1); 2213 } 2214 } else if (n->Opcode() != Op_LoadKlass) { 2215 return NULL; 2216 } else { 2217 ldk = n; 2218 } 2219 assert(ldk != NULL && ldk->is_Load(), "should have found a LoadKlass or LoadNKlass node"); 2220 2221 Node* adr = ldk->in(MemNode::Address); 2222 intptr_t off = 0; 2223 Node* obj = AddPNode::Ideal_base_and_offset(adr, gvn, off); 2224 if (obj == NULL || off != oopDesc::klass_offset_in_bytes()) // loading oopDesc::_klass? 2225 return NULL; 2226 const TypePtr* tp = gvn->type(obj)->is_ptr(); 2227 if (tp == NULL || !(tp->isa_instptr() || tp->isa_aryptr())) // is obj a Java object ptr? 2228 return NULL; 2229 2230 return obj; 2231 } 2232 2233 void Parse::sharpen_type_after_if(BoolTest::mask btest, 2234 Node* con, const Type* tcon, 2235 Node* val, const Type* tval) { 2236 // Look for opportunities to sharpen the type of a node 2237 // whose klass is compared with a constant klass. 2238 if (btest == BoolTest::eq && tcon->isa_klassptr()) { 2239 Node* obj = extract_obj_from_klass_load(&_gvn, val); 2240 const TypeOopPtr* con_type = tcon->isa_klassptr()->as_instance_type(); 2241 if (obj != NULL && (con_type->isa_instptr() || con_type->isa_aryptr())) { 2242 // Found: 2243 // Bool(CmpP(LoadKlass(obj._klass), ConP(Foo.klass)), [eq]) 2244 // or the narrowOop equivalent. 2245 const Type* obj_type = _gvn.type(obj); 2246 const TypeOopPtr* tboth = obj_type->join_speculative(con_type)->isa_oopptr(); 2247 if (tboth != NULL && tboth->klass_is_exact() && tboth != obj_type && 2248 tboth->higher_equal(obj_type)) { 2249 // obj has to be of the exact type Foo if the CmpP succeeds. 2250 int obj_in_map = map()->find_edge(obj); 2251 JVMState* jvms = this->jvms(); 2252 if (obj_in_map >= 0 && 2253 (jvms->is_loc(obj_in_map) || jvms->is_stk(obj_in_map))) { 2254 TypeNode* ccast = new CheckCastPPNode(control(), obj, tboth); 2255 const Type* tcc = ccast->as_Type()->type(); 2256 assert(tcc != obj_type && tcc->higher_equal(obj_type), "must improve"); 2257 // Delay transform() call to allow recovery of pre-cast value 2258 // at the control merge. 2259 _gvn.set_type_bottom(ccast); 2260 record_for_igvn(ccast); 2261 // Here's the payoff. 2262 replace_in_map(obj, ccast); 2263 } 2264 } 2265 } 2266 } 2267 2268 int val_in_map = map()->find_edge(val); 2269 if (val_in_map < 0) return; // replace_in_map would be useless 2270 { 2271 JVMState* jvms = this->jvms(); 2272 if (!(jvms->is_loc(val_in_map) || 2273 jvms->is_stk(val_in_map))) 2274 return; // again, it would be useless 2275 } 2276 2277 // Check for a comparison to a constant, and "know" that the compared 2278 // value is constrained on this path. 2279 assert(tcon->singleton(), ""); 2280 ConstraintCastNode* ccast = NULL; 2281 Node* cast = NULL; 2282 2283 switch (btest) { 2284 case BoolTest::eq: // Constant test? 2285 { 2286 const Type* tboth = tcon->join_speculative(tval); 2287 if (tboth == tval) break; // Nothing to gain. 2288 if (tcon->isa_int()) { 2289 ccast = new CastIINode(val, tboth); 2290 } else if (tcon == TypePtr::NULL_PTR) { 2291 // Cast to null, but keep the pointer identity temporarily live. 2292 ccast = new CastPPNode(val, tboth); 2293 } else { 2294 const TypeF* tf = tcon->isa_float_constant(); 2295 const TypeD* td = tcon->isa_double_constant(); 2296 // Exclude tests vs float/double 0 as these could be 2297 // either +0 or -0. Just because you are equal to +0 2298 // doesn't mean you ARE +0! 2299 // Note, following code also replaces Long and Oop values. 2300 if ((!tf || tf->_f != 0.0) && 2301 (!td || td->_d != 0.0)) 2302 cast = con; // Replace non-constant val by con. 2303 } 2304 } 2305 break; 2306 2307 case BoolTest::ne: 2308 if (tcon == TypePtr::NULL_PTR) { 2309 cast = cast_not_null(val, false); 2310 } 2311 break; 2312 2313 default: 2314 // (At this point we could record int range types with CastII.) 2315 break; 2316 } 2317 2318 if (ccast != NULL) { 2319 const Type* tcc = ccast->as_Type()->type(); 2320 assert(tcc != tval && tcc->higher_equal(tval), "must improve"); 2321 // Delay transform() call to allow recovery of pre-cast value 2322 // at the control merge. 2323 ccast->set_req(0, control()); 2324 _gvn.set_type_bottom(ccast); 2325 record_for_igvn(ccast); 2326 cast = ccast; 2327 } 2328 2329 if (cast != NULL) { // Here's the payoff. 2330 replace_in_map(val, cast); 2331 } 2332 } 2333 2334 /** 2335 * Use speculative type to optimize CmpP node: if comparison is 2336 * against the low level class, cast the object to the speculative 2337 * type if any. CmpP should then go away. 2338 * 2339 * @param c expected CmpP node 2340 * @return result of CmpP on object casted to speculative type 2341 * 2342 */ 2343 Node* Parse::optimize_cmp_with_klass(Node* c) { 2344 // If this is transformed by the _gvn to a comparison with the low 2345 // level klass then we may be able to use speculation 2346 if (c->Opcode() == Op_CmpP && 2347 (c->in(1)->Opcode() == Op_LoadKlass || c->in(1)->Opcode() == Op_DecodeNKlass) && 2348 c->in(2)->is_Con()) { 2349 Node* load_klass = NULL; 2350 Node* decode = NULL; 2351 if (c->in(1)->Opcode() == Op_DecodeNKlass) { 2352 decode = c->in(1); 2353 load_klass = c->in(1)->in(1); 2354 } else { 2355 load_klass = c->in(1); 2356 } 2357 if (load_klass->in(2)->is_AddP()) { 2358 Node* addp = load_klass->in(2); 2359 Node* obj = addp->in(AddPNode::Address); 2360 const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr(); 2361 if (obj_type->speculative_type_not_null() != NULL) { 2362 ciKlass* k = obj_type->speculative_type(); 2363 inc_sp(2); 2364 obj = maybe_cast_profiled_obj(obj, k); 2365 dec_sp(2); 2366 if (obj->is_ValueType()) { 2367 assert(obj->as_ValueType()->is_allocated(&_gvn), "must be allocated"); 2368 obj = obj->as_ValueType()->get_oop(); 2369 } 2370 // Make the CmpP use the casted obj 2371 addp = basic_plus_adr(obj, addp->in(AddPNode::Offset)); 2372 load_klass = load_klass->clone(); 2373 load_klass->set_req(2, addp); 2374 load_klass = _gvn.transform(load_klass); 2375 if (decode != NULL) { 2376 decode = decode->clone(); 2377 decode->set_req(1, load_klass); 2378 load_klass = _gvn.transform(decode); 2379 } 2380 c = c->clone(); 2381 c->set_req(1, load_klass); 2382 c = _gvn.transform(c); 2383 } 2384 } 2385 } 2386 return c; 2387 } 2388 2389 //------------------------------do_one_bytecode-------------------------------- 2390 // Parse this bytecode, and alter the Parsers JVM->Node mapping 2391 void Parse::do_one_bytecode() { 2392 Node *a, *b, *c, *d; // Handy temps 2393 BoolTest::mask btest; 2394 int i; 2395 2396 assert(!has_exceptions(), "bytecode entry state must be clear of throws"); 2397 2398 if (C->check_node_count(NodeLimitFudgeFactor * 5, 2399 "out of nodes parsing method")) { 2400 return; 2401 } 2402 2403 #ifdef ASSERT 2404 // for setting breakpoints 2405 if (TraceOptoParse) { 2406 tty->print(" @"); 2407 dump_bci(bci()); 2408 tty->cr(); 2409 } 2410 #endif 2411 2412 switch (bc()) { 2413 case Bytecodes::_nop: 2414 // do nothing 2415 break; 2416 case Bytecodes::_lconst_0: 2417 push_pair(longcon(0)); 2418 break; 2419 2420 case Bytecodes::_lconst_1: 2421 push_pair(longcon(1)); 2422 break; 2423 2424 case Bytecodes::_fconst_0: 2425 push(zerocon(T_FLOAT)); 2426 break; 2427 2428 case Bytecodes::_fconst_1: 2429 push(makecon(TypeF::ONE)); 2430 break; 2431 2432 case Bytecodes::_fconst_2: 2433 push(makecon(TypeF::make(2.0f))); 2434 break; 2435 2436 case Bytecodes::_dconst_0: 2437 push_pair(zerocon(T_DOUBLE)); 2438 break; 2439 2440 case Bytecodes::_dconst_1: 2441 push_pair(makecon(TypeD::ONE)); 2442 break; 2443 2444 case Bytecodes::_iconst_m1:push(intcon(-1)); break; 2445 case Bytecodes::_iconst_0: push(intcon( 0)); break; 2446 case Bytecodes::_iconst_1: push(intcon( 1)); break; 2447 case Bytecodes::_iconst_2: push(intcon( 2)); break; 2448 case Bytecodes::_iconst_3: push(intcon( 3)); break; 2449 case Bytecodes::_iconst_4: push(intcon( 4)); break; 2450 case Bytecodes::_iconst_5: push(intcon( 5)); break; 2451 case Bytecodes::_bipush: push(intcon(iter().get_constant_u1())); break; 2452 case Bytecodes::_sipush: push(intcon(iter().get_constant_u2())); break; 2453 case Bytecodes::_aconst_null: push(null()); break; 2454 case Bytecodes::_ldc: 2455 case Bytecodes::_ldc_w: 2456 case Bytecodes::_ldc2_w: 2457 // If the constant is unresolved, run this BC once in the interpreter. 2458 { 2459 ciConstant constant = iter().get_constant(); 2460 if (!constant.is_valid() || 2461 (constant.basic_type() == T_OBJECT && 2462 !constant.as_object()->is_loaded())) { 2463 int index = iter().get_constant_pool_index(); 2464 constantTag tag = iter().get_constant_pool_tag(index); 2465 uncommon_trap(Deoptimization::make_trap_request 2466 (Deoptimization::Reason_unloaded, 2467 Deoptimization::Action_reinterpret, 2468 index), 2469 NULL, tag.internal_name()); 2470 break; 2471 } 2472 assert(constant.basic_type() != T_OBJECT || constant.as_object()->is_instance(), 2473 "must be java_mirror of klass"); 2474 const Type* con_type = Type::make_from_constant(constant); 2475 if (con_type != NULL) { 2476 push_node(con_type->basic_type(), makecon(con_type)); 2477 } 2478 } 2479 2480 break; 2481 2482 case Bytecodes::_aload_0: 2483 push( local(0) ); 2484 break; 2485 case Bytecodes::_aload_1: 2486 push( local(1) ); 2487 break; 2488 case Bytecodes::_aload_2: 2489 push( local(2) ); 2490 break; 2491 case Bytecodes::_aload_3: 2492 push( local(3) ); 2493 break; 2494 case Bytecodes::_aload: 2495 push( local(iter().get_index()) ); 2496 break; 2497 2498 case Bytecodes::_fload_0: 2499 case Bytecodes::_iload_0: 2500 push( local(0) ); 2501 break; 2502 case Bytecodes::_fload_1: 2503 case Bytecodes::_iload_1: 2504 push( local(1) ); 2505 break; 2506 case Bytecodes::_fload_2: 2507 case Bytecodes::_iload_2: 2508 push( local(2) ); 2509 break; 2510 case Bytecodes::_fload_3: 2511 case Bytecodes::_iload_3: 2512 push( local(3) ); 2513 break; 2514 case Bytecodes::_fload: 2515 case Bytecodes::_iload: 2516 push( local(iter().get_index()) ); 2517 break; 2518 case Bytecodes::_lload_0: 2519 push_pair_local( 0 ); 2520 break; 2521 case Bytecodes::_lload_1: 2522 push_pair_local( 1 ); 2523 break; 2524 case Bytecodes::_lload_2: 2525 push_pair_local( 2 ); 2526 break; 2527 case Bytecodes::_lload_3: 2528 push_pair_local( 3 ); 2529 break; 2530 case Bytecodes::_lload: 2531 push_pair_local( iter().get_index() ); 2532 break; 2533 2534 case Bytecodes::_dload_0: 2535 push_pair_local(0); 2536 break; 2537 case Bytecodes::_dload_1: 2538 push_pair_local(1); 2539 break; 2540 case Bytecodes::_dload_2: 2541 push_pair_local(2); 2542 break; 2543 case Bytecodes::_dload_3: 2544 push_pair_local(3); 2545 break; 2546 case Bytecodes::_dload: 2547 push_pair_local(iter().get_index()); 2548 break; 2549 case Bytecodes::_fstore_0: 2550 case Bytecodes::_istore_0: 2551 case Bytecodes::_astore_0: 2552 set_local( 0, pop() ); 2553 break; 2554 case Bytecodes::_fstore_1: 2555 case Bytecodes::_istore_1: 2556 case Bytecodes::_astore_1: 2557 set_local( 1, pop() ); 2558 break; 2559 case Bytecodes::_fstore_2: 2560 case Bytecodes::_istore_2: 2561 case Bytecodes::_astore_2: 2562 set_local( 2, pop() ); 2563 break; 2564 case Bytecodes::_fstore_3: 2565 case Bytecodes::_istore_3: 2566 case Bytecodes::_astore_3: 2567 set_local( 3, pop() ); 2568 break; 2569 case Bytecodes::_fstore: 2570 case Bytecodes::_istore: 2571 case Bytecodes::_astore: 2572 set_local( iter().get_index(), pop() ); 2573 break; 2574 // long stores 2575 case Bytecodes::_lstore_0: 2576 set_pair_local( 0, pop_pair() ); 2577 break; 2578 case Bytecodes::_lstore_1: 2579 set_pair_local( 1, pop_pair() ); 2580 break; 2581 case Bytecodes::_lstore_2: 2582 set_pair_local( 2, pop_pair() ); 2583 break; 2584 case Bytecodes::_lstore_3: 2585 set_pair_local( 3, pop_pair() ); 2586 break; 2587 case Bytecodes::_lstore: 2588 set_pair_local( iter().get_index(), pop_pair() ); 2589 break; 2590 2591 // double stores 2592 case Bytecodes::_dstore_0: 2593 set_pair_local( 0, dstore_rounding(pop_pair()) ); 2594 break; 2595 case Bytecodes::_dstore_1: 2596 set_pair_local( 1, dstore_rounding(pop_pair()) ); 2597 break; 2598 case Bytecodes::_dstore_2: 2599 set_pair_local( 2, dstore_rounding(pop_pair()) ); 2600 break; 2601 case Bytecodes::_dstore_3: 2602 set_pair_local( 3, dstore_rounding(pop_pair()) ); 2603 break; 2604 case Bytecodes::_dstore: 2605 set_pair_local( iter().get_index(), dstore_rounding(pop_pair()) ); 2606 break; 2607 2608 case Bytecodes::_pop: dec_sp(1); break; 2609 case Bytecodes::_pop2: dec_sp(2); break; 2610 case Bytecodes::_swap: 2611 a = pop(); 2612 b = pop(); 2613 push(a); 2614 push(b); 2615 break; 2616 case Bytecodes::_dup: 2617 a = pop(); 2618 push(a); 2619 push(a); 2620 break; 2621 case Bytecodes::_dup_x1: 2622 a = pop(); 2623 b = pop(); 2624 push( a ); 2625 push( b ); 2626 push( a ); 2627 break; 2628 case Bytecodes::_dup_x2: 2629 a = pop(); 2630 b = pop(); 2631 c = pop(); 2632 push( a ); 2633 push( c ); 2634 push( b ); 2635 push( a ); 2636 break; 2637 case Bytecodes::_dup2: 2638 a = pop(); 2639 b = pop(); 2640 push( b ); 2641 push( a ); 2642 push( b ); 2643 push( a ); 2644 break; 2645 2646 case Bytecodes::_dup2_x1: 2647 // before: .. c, b, a 2648 // after: .. b, a, c, b, a 2649 // not tested 2650 a = pop(); 2651 b = pop(); 2652 c = pop(); 2653 push( b ); 2654 push( a ); 2655 push( c ); 2656 push( b ); 2657 push( a ); 2658 break; 2659 case Bytecodes::_dup2_x2: 2660 // before: .. d, c, b, a 2661 // after: .. b, a, d, c, b, a 2662 // not tested 2663 a = pop(); 2664 b = pop(); 2665 c = pop(); 2666 d = pop(); 2667 push( b ); 2668 push( a ); 2669 push( d ); 2670 push( c ); 2671 push( b ); 2672 push( a ); 2673 break; 2674 2675 case Bytecodes::_arraylength: { 2676 // Must do null-check with value on expression stack 2677 Node *ary = null_check(peek(), T_ARRAY); 2678 // Compile-time detect of null-exception? 2679 if (stopped()) return; 2680 a = pop(); 2681 push(load_array_length(a)); 2682 break; 2683 } 2684 2685 case Bytecodes::_baload: array_load(T_BYTE); break; 2686 case Bytecodes::_caload: array_load(T_CHAR); break; 2687 case Bytecodes::_iaload: array_load(T_INT); break; 2688 case Bytecodes::_saload: array_load(T_SHORT); break; 2689 case Bytecodes::_faload: array_load(T_FLOAT); break; 2690 case Bytecodes::_aaload: array_load(T_OBJECT); break; 2691 case Bytecodes::_laload: array_load(T_LONG); break; 2692 case Bytecodes::_daload: array_load(T_DOUBLE); break; 2693 case Bytecodes::_bastore: array_store(T_BYTE); break; 2694 case Bytecodes::_castore: array_store(T_CHAR); break; 2695 case Bytecodes::_iastore: array_store(T_INT); break; 2696 case Bytecodes::_sastore: array_store(T_SHORT); break; 2697 case Bytecodes::_fastore: array_store(T_FLOAT); break; 2698 case Bytecodes::_aastore: array_store(T_OBJECT); break; 2699 case Bytecodes::_lastore: array_store(T_LONG); break; 2700 case Bytecodes::_dastore: array_store(T_DOUBLE); break; 2701 2702 case Bytecodes::_getfield: 2703 do_getfield(); 2704 break; 2705 2706 case Bytecodes::_getstatic: 2707 do_getstatic(); 2708 break; 2709 2710 case Bytecodes::_putfield: 2711 do_putfield(); 2712 break; 2713 2714 case Bytecodes::_putstatic: 2715 do_putstatic(); 2716 break; 2717 2718 case Bytecodes::_irem: 2719 do_irem(); 2720 break; 2721 case Bytecodes::_idiv: 2722 // Must keep both values on the expression-stack during null-check 2723 zero_check_int(peek()); 2724 // Compile-time detect of null-exception? 2725 if (stopped()) return; 2726 b = pop(); 2727 a = pop(); 2728 push( _gvn.transform( new DivINode(control(),a,b) ) ); 2729 break; 2730 case Bytecodes::_imul: 2731 b = pop(); a = pop(); 2732 push( _gvn.transform( new MulINode(a,b) ) ); 2733 break; 2734 case Bytecodes::_iadd: 2735 b = pop(); a = pop(); 2736 push( _gvn.transform( new AddINode(a,b) ) ); 2737 break; 2738 case Bytecodes::_ineg: 2739 a = pop(); 2740 push( _gvn.transform( new SubINode(_gvn.intcon(0),a)) ); 2741 break; 2742 case Bytecodes::_isub: 2743 b = pop(); a = pop(); 2744 push( _gvn.transform( new SubINode(a,b) ) ); 2745 break; 2746 case Bytecodes::_iand: 2747 b = pop(); a = pop(); 2748 push( _gvn.transform( new AndINode(a,b) ) ); 2749 break; 2750 case Bytecodes::_ior: 2751 b = pop(); a = pop(); 2752 push( _gvn.transform( new OrINode(a,b) ) ); 2753 break; 2754 case Bytecodes::_ixor: 2755 b = pop(); a = pop(); 2756 push( _gvn.transform( new XorINode(a,b) ) ); 2757 break; 2758 case Bytecodes::_ishl: 2759 b = pop(); a = pop(); 2760 push( _gvn.transform( new LShiftINode(a,b) ) ); 2761 break; 2762 case Bytecodes::_ishr: 2763 b = pop(); a = pop(); 2764 push( _gvn.transform( new RShiftINode(a,b) ) ); 2765 break; 2766 case Bytecodes::_iushr: 2767 b = pop(); a = pop(); 2768 push( _gvn.transform( new URShiftINode(a,b) ) ); 2769 break; 2770 2771 case Bytecodes::_fneg: 2772 a = pop(); 2773 b = _gvn.transform(new NegFNode (a)); 2774 push(b); 2775 break; 2776 2777 case Bytecodes::_fsub: 2778 b = pop(); 2779 a = pop(); 2780 c = _gvn.transform( new SubFNode(a,b) ); 2781 d = precision_rounding(c); 2782 push( d ); 2783 break; 2784 2785 case Bytecodes::_fadd: 2786 b = pop(); 2787 a = pop(); 2788 c = _gvn.transform( new AddFNode(a,b) ); 2789 d = precision_rounding(c); 2790 push( d ); 2791 break; 2792 2793 case Bytecodes::_fmul: 2794 b = pop(); 2795 a = pop(); 2796 c = _gvn.transform( new MulFNode(a,b) ); 2797 d = precision_rounding(c); 2798 push( d ); 2799 break; 2800 2801 case Bytecodes::_fdiv: 2802 b = pop(); 2803 a = pop(); 2804 c = _gvn.transform( new DivFNode(0,a,b) ); 2805 d = precision_rounding(c); 2806 push( d ); 2807 break; 2808 2809 case Bytecodes::_frem: 2810 if (Matcher::has_match_rule(Op_ModF)) { 2811 // Generate a ModF node. 2812 b = pop(); 2813 a = pop(); 2814 c = _gvn.transform( new ModFNode(0,a,b) ); 2815 d = precision_rounding(c); 2816 push( d ); 2817 } 2818 else { 2819 // Generate a call. 2820 modf(); 2821 } 2822 break; 2823 2824 case Bytecodes::_fcmpl: 2825 b = pop(); 2826 a = pop(); 2827 c = _gvn.transform( new CmpF3Node( a, b)); 2828 push(c); 2829 break; 2830 case Bytecodes::_fcmpg: 2831 b = pop(); 2832 a = pop(); 2833 2834 // Same as fcmpl but need to flip the unordered case. Swap the inputs, 2835 // which negates the result sign except for unordered. Flip the unordered 2836 // as well by using CmpF3 which implements unordered-lesser instead of 2837 // unordered-greater semantics. Finally, commute the result bits. Result 2838 // is same as using a CmpF3Greater except we did it with CmpF3 alone. 2839 c = _gvn.transform( new CmpF3Node( b, a)); 2840 c = _gvn.transform( new SubINode(_gvn.intcon(0),c) ); 2841 push(c); 2842 break; 2843 2844 case Bytecodes::_f2i: 2845 a = pop(); 2846 push(_gvn.transform(new ConvF2INode(a))); 2847 break; 2848 2849 case Bytecodes::_d2i: 2850 a = pop_pair(); 2851 b = _gvn.transform(new ConvD2INode(a)); 2852 push( b ); 2853 break; 2854 2855 case Bytecodes::_f2d: 2856 a = pop(); 2857 b = _gvn.transform( new ConvF2DNode(a)); 2858 push_pair( b ); 2859 break; 2860 2861 case Bytecodes::_d2f: 2862 a = pop_pair(); 2863 b = _gvn.transform( new ConvD2FNode(a)); 2864 // This breaks _227_mtrt (speed & correctness) and _222_mpegaudio (speed) 2865 //b = _gvn.transform(new RoundFloatNode(0, b) ); 2866 push( b ); 2867 break; 2868 2869 case Bytecodes::_l2f: 2870 if (Matcher::convL2FSupported()) { 2871 a = pop_pair(); 2872 b = _gvn.transform( new ConvL2FNode(a)); 2873 // For i486.ad, FILD doesn't restrict precision to 24 or 53 bits. 2874 // Rather than storing the result into an FP register then pushing 2875 // out to memory to round, the machine instruction that implements 2876 // ConvL2D is responsible for rounding. 2877 // c = precision_rounding(b); 2878 c = _gvn.transform(b); 2879 push(c); 2880 } else { 2881 l2f(); 2882 } 2883 break; 2884 2885 case Bytecodes::_l2d: 2886 a = pop_pair(); 2887 b = _gvn.transform( new ConvL2DNode(a)); 2888 // For i486.ad, rounding is always necessary (see _l2f above). 2889 // c = dprecision_rounding(b); 2890 c = _gvn.transform(b); 2891 push_pair(c); 2892 break; 2893 2894 case Bytecodes::_f2l: 2895 a = pop(); 2896 b = _gvn.transform( new ConvF2LNode(a)); 2897 push_pair(b); 2898 break; 2899 2900 case Bytecodes::_d2l: 2901 a = pop_pair(); 2902 b = _gvn.transform( new ConvD2LNode(a)); 2903 push_pair(b); 2904 break; 2905 2906 case Bytecodes::_dsub: 2907 b = pop_pair(); 2908 a = pop_pair(); 2909 c = _gvn.transform( new SubDNode(a,b) ); 2910 d = dprecision_rounding(c); 2911 push_pair( d ); 2912 break; 2913 2914 case Bytecodes::_dadd: 2915 b = pop_pair(); 2916 a = pop_pair(); 2917 c = _gvn.transform( new AddDNode(a,b) ); 2918 d = dprecision_rounding(c); 2919 push_pair( d ); 2920 break; 2921 2922 case Bytecodes::_dmul: 2923 b = pop_pair(); 2924 a = pop_pair(); 2925 c = _gvn.transform( new MulDNode(a,b) ); 2926 d = dprecision_rounding(c); 2927 push_pair( d ); 2928 break; 2929 2930 case Bytecodes::_ddiv: 2931 b = pop_pair(); 2932 a = pop_pair(); 2933 c = _gvn.transform( new DivDNode(0,a,b) ); 2934 d = dprecision_rounding(c); 2935 push_pair( d ); 2936 break; 2937 2938 case Bytecodes::_dneg: 2939 a = pop_pair(); 2940 b = _gvn.transform(new NegDNode (a)); 2941 push_pair(b); 2942 break; 2943 2944 case Bytecodes::_drem: 2945 if (Matcher::has_match_rule(Op_ModD)) { 2946 // Generate a ModD node. 2947 b = pop_pair(); 2948 a = pop_pair(); 2949 // a % b 2950 2951 c = _gvn.transform( new ModDNode(0,a,b) ); 2952 d = dprecision_rounding(c); 2953 push_pair( d ); 2954 } 2955 else { 2956 // Generate a call. 2957 modd(); 2958 } 2959 break; 2960 2961 case Bytecodes::_dcmpl: 2962 b = pop_pair(); 2963 a = pop_pair(); 2964 c = _gvn.transform( new CmpD3Node( a, b)); 2965 push(c); 2966 break; 2967 2968 case Bytecodes::_dcmpg: 2969 b = pop_pair(); 2970 a = pop_pair(); 2971 // Same as dcmpl but need to flip the unordered case. 2972 // Commute the inputs, which negates the result sign except for unordered. 2973 // Flip the unordered as well by using CmpD3 which implements 2974 // unordered-lesser instead of unordered-greater semantics. 2975 // Finally, negate the result bits. Result is same as using a 2976 // CmpD3Greater except we did it with CmpD3 alone. 2977 c = _gvn.transform( new CmpD3Node( b, a)); 2978 c = _gvn.transform( new SubINode(_gvn.intcon(0),c) ); 2979 push(c); 2980 break; 2981 2982 2983 // Note for longs -> lo word is on TOS, hi word is on TOS - 1 2984 case Bytecodes::_land: 2985 b = pop_pair(); 2986 a = pop_pair(); 2987 c = _gvn.transform( new AndLNode(a,b) ); 2988 push_pair(c); 2989 break; 2990 case Bytecodes::_lor: 2991 b = pop_pair(); 2992 a = pop_pair(); 2993 c = _gvn.transform( new OrLNode(a,b) ); 2994 push_pair(c); 2995 break; 2996 case Bytecodes::_lxor: 2997 b = pop_pair(); 2998 a = pop_pair(); 2999 c = _gvn.transform( new XorLNode(a,b) ); 3000 push_pair(c); 3001 break; 3002 3003 case Bytecodes::_lshl: 3004 b = pop(); // the shift count 3005 a = pop_pair(); // value to be shifted 3006 c = _gvn.transform( new LShiftLNode(a,b) ); 3007 push_pair(c); 3008 break; 3009 case Bytecodes::_lshr: 3010 b = pop(); // the shift count 3011 a = pop_pair(); // value to be shifted 3012 c = _gvn.transform( new RShiftLNode(a,b) ); 3013 push_pair(c); 3014 break; 3015 case Bytecodes::_lushr: 3016 b = pop(); // the shift count 3017 a = pop_pair(); // value to be shifted 3018 c = _gvn.transform( new URShiftLNode(a,b) ); 3019 push_pair(c); 3020 break; 3021 case Bytecodes::_lmul: 3022 b = pop_pair(); 3023 a = pop_pair(); 3024 c = _gvn.transform( new MulLNode(a,b) ); 3025 push_pair(c); 3026 break; 3027 3028 case Bytecodes::_lrem: 3029 // Must keep both values on the expression-stack during null-check 3030 assert(peek(0) == top(), "long word order"); 3031 zero_check_long(peek(1)); 3032 // Compile-time detect of null-exception? 3033 if (stopped()) return; 3034 b = pop_pair(); 3035 a = pop_pair(); 3036 c = _gvn.transform( new ModLNode(control(),a,b) ); 3037 push_pair(c); 3038 break; 3039 3040 case Bytecodes::_ldiv: 3041 // Must keep both values on the expression-stack during null-check 3042 assert(peek(0) == top(), "long word order"); 3043 zero_check_long(peek(1)); 3044 // Compile-time detect of null-exception? 3045 if (stopped()) return; 3046 b = pop_pair(); 3047 a = pop_pair(); 3048 c = _gvn.transform( new DivLNode(control(),a,b) ); 3049 push_pair(c); 3050 break; 3051 3052 case Bytecodes::_ladd: 3053 b = pop_pair(); 3054 a = pop_pair(); 3055 c = _gvn.transform( new AddLNode(a,b) ); 3056 push_pair(c); 3057 break; 3058 case Bytecodes::_lsub: 3059 b = pop_pair(); 3060 a = pop_pair(); 3061 c = _gvn.transform( new SubLNode(a,b) ); 3062 push_pair(c); 3063 break; 3064 case Bytecodes::_lcmp: 3065 // Safepoints are now inserted _before_ branches. The long-compare 3066 // bytecode painfully produces a 3-way value (-1,0,+1) which requires a 3067 // slew of control flow. These are usually followed by a CmpI vs zero and 3068 // a branch; this pattern then optimizes to the obvious long-compare and 3069 // branch. However, if the branch is backwards there's a Safepoint 3070 // inserted. The inserted Safepoint captures the JVM state at the 3071 // pre-branch point, i.e. it captures the 3-way value. Thus if a 3072 // long-compare is used to control a loop the debug info will force 3073 // computation of the 3-way value, even though the generated code uses a 3074 // long-compare and branch. We try to rectify the situation by inserting 3075 // a SafePoint here and have it dominate and kill the safepoint added at a 3076 // following backwards branch. At this point the JVM state merely holds 2 3077 // longs but not the 3-way value. 3078 if( UseLoopSafepoints ) { 3079 switch( iter().next_bc() ) { 3080 case Bytecodes::_ifgt: 3081 case Bytecodes::_iflt: 3082 case Bytecodes::_ifge: 3083 case Bytecodes::_ifle: 3084 case Bytecodes::_ifne: 3085 case Bytecodes::_ifeq: 3086 // If this is a backwards branch in the bytecodes, add Safepoint 3087 maybe_add_safepoint(iter().next_get_dest()); 3088 default: 3089 break; 3090 } 3091 } 3092 b = pop_pair(); 3093 a = pop_pair(); 3094 c = _gvn.transform( new CmpL3Node( a, b )); 3095 push(c); 3096 break; 3097 3098 case Bytecodes::_lneg: 3099 a = pop_pair(); 3100 b = _gvn.transform( new SubLNode(longcon(0),a)); 3101 push_pair(b); 3102 break; 3103 case Bytecodes::_l2i: 3104 a = pop_pair(); 3105 push( _gvn.transform( new ConvL2INode(a))); 3106 break; 3107 case Bytecodes::_i2l: 3108 a = pop(); 3109 b = _gvn.transform( new ConvI2LNode(a)); 3110 push_pair(b); 3111 break; 3112 case Bytecodes::_i2b: 3113 // Sign extend 3114 a = pop(); 3115 a = _gvn.transform( new LShiftINode(a,_gvn.intcon(24)) ); 3116 a = _gvn.transform( new RShiftINode(a,_gvn.intcon(24)) ); 3117 push( a ); 3118 break; 3119 case Bytecodes::_i2s: 3120 a = pop(); 3121 a = _gvn.transform( new LShiftINode(a,_gvn.intcon(16)) ); 3122 a = _gvn.transform( new RShiftINode(a,_gvn.intcon(16)) ); 3123 push( a ); 3124 break; 3125 case Bytecodes::_i2c: 3126 a = pop(); 3127 push( _gvn.transform( new AndINode(a,_gvn.intcon(0xFFFF)) ) ); 3128 break; 3129 3130 case Bytecodes::_i2f: 3131 a = pop(); 3132 b = _gvn.transform( new ConvI2FNode(a) ) ; 3133 c = precision_rounding(b); 3134 push (b); 3135 break; 3136 3137 case Bytecodes::_i2d: 3138 a = pop(); 3139 b = _gvn.transform( new ConvI2DNode(a)); 3140 push_pair(b); 3141 break; 3142 3143 case Bytecodes::_iinc: // Increment local 3144 i = iter().get_index(); // Get local index 3145 set_local( i, _gvn.transform( new AddINode( _gvn.intcon(iter().get_iinc_con()), local(i) ) ) ); 3146 break; 3147 3148 // Exit points of synchronized methods must have an unlock node 3149 case Bytecodes::_return: 3150 return_current(NULL); 3151 break; 3152 3153 case Bytecodes::_ireturn: 3154 case Bytecodes::_areturn: 3155 case Bytecodes::_freturn: 3156 return_current(pop()); 3157 break; 3158 case Bytecodes::_lreturn: 3159 return_current(pop_pair()); 3160 break; 3161 case Bytecodes::_dreturn: 3162 return_current(pop_pair()); 3163 break; 3164 3165 case Bytecodes::_athrow: 3166 // null exception oop throws NULL pointer exception 3167 null_check(peek()); 3168 if (stopped()) return; 3169 // Hook the thrown exception directly to subsequent handlers. 3170 if (BailoutToInterpreterForThrows) { 3171 // Keep method interpreted from now on. 3172 uncommon_trap(Deoptimization::Reason_unhandled, 3173 Deoptimization::Action_make_not_compilable); 3174 return; 3175 } 3176 if (env()->jvmti_can_post_on_exceptions()) { 3177 // check if we must post exception events, take uncommon trap if so (with must_throw = false) 3178 uncommon_trap_if_should_post_on_exceptions(Deoptimization::Reason_unhandled, false); 3179 } 3180 // Here if either can_post_on_exceptions or should_post_on_exceptions is false 3181 add_exception_state(make_exception_state(peek())); 3182 break; 3183 3184 case Bytecodes::_goto: // fall through 3185 case Bytecodes::_goto_w: { 3186 int target_bci = (bc() == Bytecodes::_goto) ? iter().get_dest() : iter().get_far_dest(); 3187 3188 // If this is a backwards branch in the bytecodes, add Safepoint 3189 maybe_add_safepoint(target_bci); 3190 3191 // Update method data 3192 profile_taken_branch(target_bci); 3193 3194 // Merge the current control into the target basic block 3195 merge(target_bci); 3196 3197 // See if we can get some profile data and hand it off to the next block 3198 Block *target_block = block()->successor_for_bci(target_bci); 3199 if (target_block->pred_count() != 1) break; 3200 ciMethodData* methodData = method()->method_data(); 3201 if (!methodData->is_mature()) break; 3202 ciProfileData* data = methodData->bci_to_data(bci()); 3203 assert(data != NULL && data->is_JumpData(), "need JumpData for taken branch"); 3204 int taken = ((ciJumpData*)data)->taken(); 3205 taken = method()->scale_count(taken); 3206 target_block->set_count(taken); 3207 break; 3208 } 3209 3210 case Bytecodes::_ifnull: btest = BoolTest::eq; goto handle_if_null; 3211 case Bytecodes::_ifnonnull: btest = BoolTest::ne; goto handle_if_null; 3212 handle_if_null: 3213 // If this is a backwards branch in the bytecodes, add Safepoint 3214 maybe_add_safepoint(iter().get_dest()); 3215 a = null(); 3216 b = pop(); 3217 if (b->is_ValueType()) { 3218 // Return constant false because 'b' is always non-null 3219 c = _gvn.makecon(TypeInt::CC_GT); 3220 } else { 3221 if (!_gvn.type(b)->speculative_maybe_null() && 3222 !too_many_traps(Deoptimization::Reason_speculate_null_check)) { 3223 inc_sp(1); 3224 Node* null_ctl = top(); 3225 b = null_check_oop(b, &null_ctl, true, true, true); 3226 assert(null_ctl->is_top(), "no null control here"); 3227 dec_sp(1); 3228 } else if (_gvn.type(b)->speculative_always_null() && 3229 !too_many_traps(Deoptimization::Reason_speculate_null_assert)) { 3230 inc_sp(1); 3231 b = null_assert(b); 3232 dec_sp(1); 3233 } 3234 c = _gvn.transform( new CmpPNode(b, a) ); 3235 } 3236 do_ifnull(btest, c); 3237 break; 3238 3239 case Bytecodes::_if_acmpeq: btest = BoolTest::eq; goto handle_if_acmp; 3240 case Bytecodes::_if_acmpne: btest = BoolTest::ne; goto handle_if_acmp; 3241 handle_if_acmp: 3242 // If this is a backwards branch in the bytecodes, add Safepoint 3243 maybe_add_safepoint(iter().get_dest()); 3244 a = access_resolve(pop(), 0); 3245 b = access_resolve(pop(), 0); 3246 do_acmp(btest, a, b); 3247 break; 3248 3249 case Bytecodes::_ifeq: btest = BoolTest::eq; goto handle_ifxx; 3250 case Bytecodes::_ifne: btest = BoolTest::ne; goto handle_ifxx; 3251 case Bytecodes::_iflt: btest = BoolTest::lt; goto handle_ifxx; 3252 case Bytecodes::_ifle: btest = BoolTest::le; goto handle_ifxx; 3253 case Bytecodes::_ifgt: btest = BoolTest::gt; goto handle_ifxx; 3254 case Bytecodes::_ifge: btest = BoolTest::ge; goto handle_ifxx; 3255 handle_ifxx: 3256 // If this is a backwards branch in the bytecodes, add Safepoint 3257 maybe_add_safepoint(iter().get_dest()); 3258 a = _gvn.intcon(0); 3259 b = pop(); 3260 c = _gvn.transform( new CmpINode(b, a) ); 3261 do_if(btest, c); 3262 break; 3263 3264 case Bytecodes::_if_icmpeq: btest = BoolTest::eq; goto handle_if_icmp; 3265 case Bytecodes::_if_icmpne: btest = BoolTest::ne; goto handle_if_icmp; 3266 case Bytecodes::_if_icmplt: btest = BoolTest::lt; goto handle_if_icmp; 3267 case Bytecodes::_if_icmple: btest = BoolTest::le; goto handle_if_icmp; 3268 case Bytecodes::_if_icmpgt: btest = BoolTest::gt; goto handle_if_icmp; 3269 case Bytecodes::_if_icmpge: btest = BoolTest::ge; goto handle_if_icmp; 3270 handle_if_icmp: 3271 // If this is a backwards branch in the bytecodes, add Safepoint 3272 maybe_add_safepoint(iter().get_dest()); 3273 a = pop(); 3274 b = pop(); 3275 c = _gvn.transform( new CmpINode( b, a ) ); 3276 do_if(btest, c); 3277 break; 3278 3279 case Bytecodes::_tableswitch: 3280 do_tableswitch(); 3281 break; 3282 3283 case Bytecodes::_lookupswitch: 3284 do_lookupswitch(); 3285 break; 3286 3287 case Bytecodes::_invokestatic: 3288 case Bytecodes::_invokedynamic: 3289 case Bytecodes::_invokespecial: 3290 case Bytecodes::_invokevirtual: 3291 case Bytecodes::_invokeinterface: 3292 do_call(); 3293 break; 3294 case Bytecodes::_checkcast: 3295 do_checkcast(); 3296 break; 3297 case Bytecodes::_instanceof: 3298 do_instanceof(); 3299 break; 3300 case Bytecodes::_anewarray: 3301 do_newarray(); 3302 break; 3303 case Bytecodes::_newarray: 3304 do_newarray((BasicType)iter().get_index()); 3305 break; 3306 case Bytecodes::_multianewarray: 3307 do_multianewarray(); 3308 break; 3309 case Bytecodes::_new: 3310 do_new(); 3311 break; 3312 case Bytecodes::_defaultvalue: 3313 do_defaultvalue(); 3314 break; 3315 case Bytecodes::_withfield: 3316 do_withfield(); 3317 break; 3318 3319 case Bytecodes::_jsr: 3320 case Bytecodes::_jsr_w: 3321 do_jsr(); 3322 break; 3323 3324 case Bytecodes::_ret: 3325 do_ret(); 3326 break; 3327 3328 3329 case Bytecodes::_monitorenter: 3330 do_monitor_enter(); 3331 break; 3332 3333 case Bytecodes::_monitorexit: 3334 do_monitor_exit(); 3335 break; 3336 3337 case Bytecodes::_breakpoint: 3338 // Breakpoint set concurrently to compile 3339 // %%% use an uncommon trap? 3340 C->record_failure("breakpoint in method"); 3341 return; 3342 3343 default: 3344 #ifndef PRODUCT 3345 map()->dump(99); 3346 #endif 3347 tty->print("\nUnhandled bytecode %s\n", Bytecodes::name(bc()) ); 3348 ShouldNotReachHere(); 3349 } 3350 3351 #ifndef PRODUCT 3352 IdealGraphPrinter *printer = C->printer(); 3353 if (printer && printer->should_print(1)) { 3354 char buffer[256]; 3355 jio_snprintf(buffer, sizeof(buffer), "Bytecode %d: %s", bci(), Bytecodes::name(bc())); 3356 bool old = printer->traverse_outs(); 3357 printer->set_traverse_outs(true); 3358 printer->print_method(buffer, 4); 3359 printer->set_traverse_outs(old); 3360 } 3361 #endif 3362 }