1 /* 2 * Copyright (c) 1998, 2016, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "ci/ciMethodData.hpp" 27 #include "classfile/systemDictionary.hpp" 28 #include "classfile/vmSymbols.hpp" 29 #include "compiler/compileLog.hpp" 30 #include "interpreter/linkResolver.hpp" 31 #include "memory/resourceArea.hpp" 32 #include "memory/universe.inline.hpp" 33 #include "oops/oop.inline.hpp" 34 #include "opto/addnode.hpp" 35 #include "opto/castnode.hpp" 36 #include "opto/convertnode.hpp" 37 #include "opto/divnode.hpp" 38 #include "opto/idealGraphPrinter.hpp" 39 #include "opto/matcher.hpp" 40 #include "opto/memnode.hpp" 41 #include "opto/mulnode.hpp" 42 #include "opto/opaquenode.hpp" 43 #include "opto/parse.hpp" 44 #include "opto/runtime.hpp" 45 #include "opto/valuetypenode.hpp" 46 #include "runtime/deoptimization.hpp" 47 #include "runtime/sharedRuntime.hpp" 48 49 #ifndef PRODUCT 50 extern int explicit_null_checks_inserted, 51 explicit_null_checks_elided; 52 #endif 53 54 //---------------------------------array_load---------------------------------- 55 void Parse::array_load(BasicType elem_type) { 56 const Type* elem = Type::TOP; 57 Node* adr = array_addressing(elem_type, 0, &elem); 58 if (stopped()) return; // guaranteed null or range check 59 Node* idx = pop(); // Get from stack without popping 60 Node* ary = pop(); // in case of exception 61 //dec_sp(2); // Pop array and index 62 const TypeAryPtr* arytype = _gvn.type(ary)->is_aryptr(); 63 if (arytype->klass()->is_value_array_klass()) { 64 ciValueArrayKlass* vak = arytype->klass()->as_value_array_klass(); 65 Node* vt = ValueTypeNode::make(gvn(), vak->element_klass()->as_value_klass(), map()->memory(), ary, adr); 66 push(vt); 67 return; 68 } 69 const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(elem_type); 70 Node* ld = make_load(control(), adr, elem, elem_type, adr_type, MemNode::unordered); 71 push(ld); 72 } 73 74 75 //--------------------------------array_store---------------------------------- 76 void Parse::array_store(BasicType elem_type) { 77 const Type* elem = Type::TOP; 78 Node* adr = array_addressing(elem_type, 1, &elem); 79 if (stopped()) return; // guaranteed null or range check 80 Node* val = pop(); 81 dec_sp(2); // Pop array and index 82 const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(elem_type); 83 if (elem == TypeInt::BOOL) { 84 elem_type = T_BOOLEAN; 85 } 86 store_to_memory(control(), adr, val, elem_type, adr_type, StoreNode::release_if_reference(elem_type)); 87 } 88 89 90 //------------------------------array_addressing------------------------------- 91 // Pull array and index from the stack. Compute pointer-to-element. 92 Node* Parse::array_addressing(BasicType type, int vals, const Type* *result2) { 93 Node *idx = peek(0+vals); // Get from stack without popping 94 Node *ary = peek(1+vals); // in case of exception 95 96 // Null check the array base, with correct stack contents 97 ary = null_check(ary, T_ARRAY); 98 // Compile-time detect of null-exception? 99 if (stopped()) return top(); 100 101 const TypeAryPtr* arytype = _gvn.type(ary)->is_aryptr(); 102 const TypeInt* sizetype = arytype->size(); 103 const Type* elemtype = arytype->elem(); 104 105 if (UseUniqueSubclasses && result2 != NULL) { 106 const Type* el = elemtype->make_ptr(); 107 if (el && el->isa_instptr()) { 108 const TypeInstPtr* toop = el->is_instptr(); 109 if (toop->klass()->as_instance_klass()->unique_concrete_subklass()) { 110 // If we load from "AbstractClass[]" we must see "ConcreteSubClass". 111 const Type* subklass = Type::get_const_type(toop->klass()); 112 elemtype = subklass->join_speculative(el); 113 } 114 } 115 } 116 117 // Check for big class initializers with all constant offsets 118 // feeding into a known-size array. 119 const TypeInt* idxtype = _gvn.type(idx)->is_int(); 120 // See if the highest idx value is less than the lowest array bound, 121 // and if the idx value cannot be negative: 122 bool need_range_check = true; 123 if (idxtype->_hi < sizetype->_lo && idxtype->_lo >= 0) { 124 need_range_check = false; 125 if (C->log() != NULL) C->log()->elem("observe that='!need_range_check'"); 126 } 127 128 ciKlass * arytype_klass = arytype->klass(); 129 if ((arytype_klass != NULL) && (!arytype_klass->is_loaded())) { 130 // Only fails for some -Xcomp runs 131 // The class is unloaded. We have to run this bytecode in the interpreter. 132 uncommon_trap(Deoptimization::Reason_unloaded, 133 Deoptimization::Action_reinterpret, 134 arytype->klass(), "!loaded array"); 135 return top(); 136 } 137 138 // Do the range check 139 if (GenerateRangeChecks && need_range_check) { 140 Node* tst; 141 if (sizetype->_hi <= 0) { 142 // The greatest array bound is negative, so we can conclude that we're 143 // compiling unreachable code, but the unsigned compare trick used below 144 // only works with non-negative lengths. Instead, hack "tst" to be zero so 145 // the uncommon_trap path will always be taken. 146 tst = _gvn.intcon(0); 147 } else { 148 // Range is constant in array-oop, so we can use the original state of mem 149 Node* len = load_array_length(ary); 150 151 // Test length vs index (standard trick using unsigned compare) 152 Node* chk = _gvn.transform( new CmpUNode(idx, len) ); 153 BoolTest::mask btest = BoolTest::lt; 154 tst = _gvn.transform( new BoolNode(chk, btest) ); 155 } 156 RangeCheckNode* rc = new RangeCheckNode(control(), tst, PROB_MAX, COUNT_UNKNOWN); 157 _gvn.set_type(rc, rc->Value(&_gvn)); 158 if (!tst->is_Con()) { 159 record_for_igvn(rc); 160 } 161 set_control(_gvn.transform(new IfTrueNode(rc))); 162 // Branch to failure if out of bounds 163 { 164 PreserveJVMState pjvms(this); 165 set_control(_gvn.transform(new IfFalseNode(rc))); 166 if (C->allow_range_check_smearing()) { 167 // Do not use builtin_throw, since range checks are sometimes 168 // made more stringent by an optimistic transformation. 169 // This creates "tentative" range checks at this point, 170 // which are not guaranteed to throw exceptions. 171 // See IfNode::Ideal, is_range_check, adjust_check. 172 uncommon_trap(Deoptimization::Reason_range_check, 173 Deoptimization::Action_make_not_entrant, 174 NULL, "range_check"); 175 } else { 176 // If we have already recompiled with the range-check-widening 177 // heroic optimization turned off, then we must really be throwing 178 // range check exceptions. 179 builtin_throw(Deoptimization::Reason_range_check, idx); 180 } 181 } 182 } 183 // Check for always knowing you are throwing a range-check exception 184 if (stopped()) return top(); 185 186 // Make array address computation control dependent to prevent it 187 // from floating above the range check during loop optimizations. 188 Node* ptr = array_element_address(ary, idx, type, sizetype, control()); 189 190 if (result2 != NULL) *result2 = elemtype; 191 192 assert(ptr != top(), "top should go hand-in-hand with stopped"); 193 194 return ptr; 195 } 196 197 198 // returns IfNode 199 IfNode* Parse::jump_if_fork_int(Node* a, Node* b, BoolTest::mask mask) { 200 Node *cmp = _gvn.transform( new CmpINode( a, b)); // two cases: shiftcount > 32 and shiftcount <= 32 201 Node *tst = _gvn.transform( new BoolNode( cmp, mask)); 202 IfNode *iff = create_and_map_if( control(), tst, ((mask == BoolTest::eq) ? PROB_STATIC_INFREQUENT : PROB_FAIR), COUNT_UNKNOWN ); 203 return iff; 204 } 205 206 // return Region node 207 Node* Parse::jump_if_join(Node* iffalse, Node* iftrue) { 208 Node *region = new RegionNode(3); // 2 results 209 record_for_igvn(region); 210 region->init_req(1, iffalse); 211 region->init_req(2, iftrue ); 212 _gvn.set_type(region, Type::CONTROL); 213 region = _gvn.transform(region); 214 set_control (region); 215 return region; 216 } 217 218 219 //------------------------------helper for tableswitch------------------------- 220 void Parse::jump_if_true_fork(IfNode *iff, int dest_bci_if_true, int prof_table_index) { 221 // True branch, use existing map info 222 { PreserveJVMState pjvms(this); 223 Node *iftrue = _gvn.transform( new IfTrueNode (iff) ); 224 set_control( iftrue ); 225 profile_switch_case(prof_table_index); 226 merge_new_path(dest_bci_if_true); 227 } 228 229 // False branch 230 Node *iffalse = _gvn.transform( new IfFalseNode(iff) ); 231 set_control( iffalse ); 232 } 233 234 void Parse::jump_if_false_fork(IfNode *iff, int dest_bci_if_true, int prof_table_index) { 235 // True branch, use existing map info 236 { PreserveJVMState pjvms(this); 237 Node *iffalse = _gvn.transform( new IfFalseNode (iff) ); 238 set_control( iffalse ); 239 profile_switch_case(prof_table_index); 240 merge_new_path(dest_bci_if_true); 241 } 242 243 // False branch 244 Node *iftrue = _gvn.transform( new IfTrueNode(iff) ); 245 set_control( iftrue ); 246 } 247 248 void Parse::jump_if_always_fork(int dest_bci, int prof_table_index) { 249 // False branch, use existing map and control() 250 profile_switch_case(prof_table_index); 251 merge_new_path(dest_bci); 252 } 253 254 255 extern "C" { 256 static int jint_cmp(const void *i, const void *j) { 257 int a = *(jint *)i; 258 int b = *(jint *)j; 259 return a > b ? 1 : a < b ? -1 : 0; 260 } 261 } 262 263 264 // Default value for methodData switch indexing. Must be a negative value to avoid 265 // conflict with any legal switch index. 266 #define NullTableIndex -1 267 268 class SwitchRange : public StackObj { 269 // a range of integers coupled with a bci destination 270 jint _lo; // inclusive lower limit 271 jint _hi; // inclusive upper limit 272 int _dest; 273 int _table_index; // index into method data table 274 275 public: 276 jint lo() const { return _lo; } 277 jint hi() const { return _hi; } 278 int dest() const { return _dest; } 279 int table_index() const { return _table_index; } 280 bool is_singleton() const { return _lo == _hi; } 281 282 void setRange(jint lo, jint hi, int dest, int table_index) { 283 assert(lo <= hi, "must be a non-empty range"); 284 _lo = lo, _hi = hi; _dest = dest; _table_index = table_index; 285 } 286 bool adjoinRange(jint lo, jint hi, int dest, int table_index) { 287 assert(lo <= hi, "must be a non-empty range"); 288 if (lo == _hi+1 && dest == _dest && table_index == _table_index) { 289 _hi = hi; 290 return true; 291 } 292 return false; 293 } 294 295 void set (jint value, int dest, int table_index) { 296 setRange(value, value, dest, table_index); 297 } 298 bool adjoin(jint value, int dest, int table_index) { 299 return adjoinRange(value, value, dest, table_index); 300 } 301 302 void print() { 303 if (is_singleton()) 304 tty->print(" {%d}=>%d", lo(), dest()); 305 else if (lo() == min_jint) 306 tty->print(" {..%d}=>%d", hi(), dest()); 307 else if (hi() == max_jint) 308 tty->print(" {%d..}=>%d", lo(), dest()); 309 else 310 tty->print(" {%d..%d}=>%d", lo(), hi(), dest()); 311 } 312 }; 313 314 315 //-------------------------------do_tableswitch-------------------------------- 316 void Parse::do_tableswitch() { 317 Node* lookup = pop(); 318 319 // Get information about tableswitch 320 int default_dest = iter().get_dest_table(0); 321 int lo_index = iter().get_int_table(1); 322 int hi_index = iter().get_int_table(2); 323 int len = hi_index - lo_index + 1; 324 325 if (len < 1) { 326 // If this is a backward branch, add safepoint 327 maybe_add_safepoint(default_dest); 328 merge(default_dest); 329 return; 330 } 331 332 // generate decision tree, using trichotomy when possible 333 int rnum = len+2; 334 bool makes_backward_branch = false; 335 SwitchRange* ranges = NEW_RESOURCE_ARRAY(SwitchRange, rnum); 336 int rp = -1; 337 if (lo_index != min_jint) { 338 ranges[++rp].setRange(min_jint, lo_index-1, default_dest, NullTableIndex); 339 } 340 for (int j = 0; j < len; j++) { 341 jint match_int = lo_index+j; 342 int dest = iter().get_dest_table(j+3); 343 makes_backward_branch |= (dest <= bci()); 344 int table_index = method_data_update() ? j : NullTableIndex; 345 if (rp < 0 || !ranges[rp].adjoin(match_int, dest, table_index)) { 346 ranges[++rp].set(match_int, dest, table_index); 347 } 348 } 349 jint highest = lo_index+(len-1); 350 assert(ranges[rp].hi() == highest, ""); 351 if (highest != max_jint 352 && !ranges[rp].adjoinRange(highest+1, max_jint, default_dest, NullTableIndex)) { 353 ranges[++rp].setRange(highest+1, max_jint, default_dest, NullTableIndex); 354 } 355 assert(rp < len+2, "not too many ranges"); 356 357 // Safepoint in case if backward branch observed 358 if( makes_backward_branch && UseLoopSafepoints ) 359 add_safepoint(); 360 361 jump_switch_ranges(lookup, &ranges[0], &ranges[rp]); 362 } 363 364 365 //------------------------------do_lookupswitch-------------------------------- 366 void Parse::do_lookupswitch() { 367 Node *lookup = pop(); // lookup value 368 // Get information about lookupswitch 369 int default_dest = iter().get_dest_table(0); 370 int len = iter().get_int_table(1); 371 372 if (len < 1) { // If this is a backward branch, add safepoint 373 maybe_add_safepoint(default_dest); 374 merge(default_dest); 375 return; 376 } 377 378 // generate decision tree, using trichotomy when possible 379 jint* table = NEW_RESOURCE_ARRAY(jint, len*2); 380 { 381 for( int j = 0; j < len; j++ ) { 382 table[j+j+0] = iter().get_int_table(2+j+j); 383 table[j+j+1] = iter().get_dest_table(2+j+j+1); 384 } 385 qsort( table, len, 2*sizeof(table[0]), jint_cmp ); 386 } 387 388 int rnum = len*2+1; 389 bool makes_backward_branch = false; 390 SwitchRange* ranges = NEW_RESOURCE_ARRAY(SwitchRange, rnum); 391 int rp = -1; 392 for( int j = 0; j < len; j++ ) { 393 jint match_int = table[j+j+0]; 394 int dest = table[j+j+1]; 395 int next_lo = rp < 0 ? min_jint : ranges[rp].hi()+1; 396 int table_index = method_data_update() ? j : NullTableIndex; 397 makes_backward_branch |= (dest <= bci()); 398 if( match_int != next_lo ) { 399 ranges[++rp].setRange(next_lo, match_int-1, default_dest, NullTableIndex); 400 } 401 if( rp < 0 || !ranges[rp].adjoin(match_int, dest, table_index) ) { 402 ranges[++rp].set(match_int, dest, table_index); 403 } 404 } 405 jint highest = table[2*(len-1)]; 406 assert(ranges[rp].hi() == highest, ""); 407 if( highest != max_jint 408 && !ranges[rp].adjoinRange(highest+1, max_jint, default_dest, NullTableIndex) ) { 409 ranges[++rp].setRange(highest+1, max_jint, default_dest, NullTableIndex); 410 } 411 assert(rp < rnum, "not too many ranges"); 412 413 // Safepoint in case backward branch observed 414 if( makes_backward_branch && UseLoopSafepoints ) 415 add_safepoint(); 416 417 jump_switch_ranges(lookup, &ranges[0], &ranges[rp]); 418 } 419 420 //----------------------------create_jump_tables------------------------------- 421 bool Parse::create_jump_tables(Node* key_val, SwitchRange* lo, SwitchRange* hi) { 422 // Are jumptables enabled 423 if (!UseJumpTables) return false; 424 425 // Are jumptables supported 426 if (!Matcher::has_match_rule(Op_Jump)) return false; 427 428 // Don't make jump table if profiling 429 if (method_data_update()) return false; 430 431 // Decide if a guard is needed to lop off big ranges at either (or 432 // both) end(s) of the input set. We'll call this the default target 433 // even though we can't be sure that it is the true "default". 434 435 bool needs_guard = false; 436 int default_dest; 437 int64_t total_outlier_size = 0; 438 int64_t hi_size = ((int64_t)hi->hi()) - ((int64_t)hi->lo()) + 1; 439 int64_t lo_size = ((int64_t)lo->hi()) - ((int64_t)lo->lo()) + 1; 440 441 if (lo->dest() == hi->dest()) { 442 total_outlier_size = hi_size + lo_size; 443 default_dest = lo->dest(); 444 } else if (lo_size > hi_size) { 445 total_outlier_size = lo_size; 446 default_dest = lo->dest(); 447 } else { 448 total_outlier_size = hi_size; 449 default_dest = hi->dest(); 450 } 451 452 // If a guard test will eliminate very sparse end ranges, then 453 // it is worth the cost of an extra jump. 454 if (total_outlier_size > (MaxJumpTableSparseness * 4)) { 455 needs_guard = true; 456 if (default_dest == lo->dest()) lo++; 457 if (default_dest == hi->dest()) hi--; 458 } 459 460 // Find the total number of cases and ranges 461 int64_t num_cases = ((int64_t)hi->hi()) - ((int64_t)lo->lo()) + 1; 462 int num_range = hi - lo + 1; 463 464 // Don't create table if: too large, too small, or too sparse. 465 if (num_cases < MinJumpTableSize || num_cases > MaxJumpTableSize) 466 return false; 467 if (num_cases > (MaxJumpTableSparseness * num_range)) 468 return false; 469 470 // Normalize table lookups to zero 471 int lowval = lo->lo(); 472 key_val = _gvn.transform( new SubINode(key_val, _gvn.intcon(lowval)) ); 473 474 // Generate a guard to protect against input keyvals that aren't 475 // in the switch domain. 476 if (needs_guard) { 477 Node* size = _gvn.intcon(num_cases); 478 Node* cmp = _gvn.transform( new CmpUNode(key_val, size) ); 479 Node* tst = _gvn.transform( new BoolNode(cmp, BoolTest::ge) ); 480 IfNode* iff = create_and_map_if( control(), tst, PROB_FAIR, COUNT_UNKNOWN); 481 jump_if_true_fork(iff, default_dest, NullTableIndex); 482 } 483 484 // Create an ideal node JumpTable that has projections 485 // of all possible ranges for a switch statement 486 // The key_val input must be converted to a pointer offset and scaled. 487 // Compare Parse::array_addressing above. 488 489 // Clean the 32-bit int into a real 64-bit offset. 490 // Otherwise, the jint value 0 might turn into an offset of 0x0800000000. 491 const TypeInt* ikeytype = TypeInt::make(0, num_cases, Type::WidenMin); 492 // Make I2L conversion control dependent to prevent it from 493 // floating above the range check during loop optimizations. 494 key_val = C->conv_I2X_index(&_gvn, key_val, ikeytype, control()); 495 496 // Shift the value by wordsize so we have an index into the table, rather 497 // than a switch value 498 Node *shiftWord = _gvn.MakeConX(wordSize); 499 key_val = _gvn.transform( new MulXNode( key_val, shiftWord)); 500 501 // Create the JumpNode 502 Node* jtn = _gvn.transform( new JumpNode(control(), key_val, num_cases) ); 503 504 // These are the switch destinations hanging off the jumpnode 505 int i = 0; 506 for (SwitchRange* r = lo; r <= hi; r++) { 507 for (int64_t j = r->lo(); j <= r->hi(); j++, i++) { 508 Node* input = _gvn.transform(new JumpProjNode(jtn, i, r->dest(), (int)(j - lowval))); 509 { 510 PreserveJVMState pjvms(this); 511 set_control(input); 512 jump_if_always_fork(r->dest(), r->table_index()); 513 } 514 } 515 } 516 assert(i == num_cases, "miscount of cases"); 517 stop_and_kill_map(); // no more uses for this JVMS 518 return true; 519 } 520 521 //----------------------------jump_switch_ranges------------------------------- 522 void Parse::jump_switch_ranges(Node* key_val, SwitchRange *lo, SwitchRange *hi, int switch_depth) { 523 Block* switch_block = block(); 524 525 if (switch_depth == 0) { 526 // Do special processing for the top-level call. 527 assert(lo->lo() == min_jint, "initial range must exhaust Type::INT"); 528 assert(hi->hi() == max_jint, "initial range must exhaust Type::INT"); 529 530 // Decrement pred-numbers for the unique set of nodes. 531 #ifdef ASSERT 532 // Ensure that the block's successors are a (duplicate-free) set. 533 int successors_counted = 0; // block occurrences in [hi..lo] 534 int unique_successors = switch_block->num_successors(); 535 for (int i = 0; i < unique_successors; i++) { 536 Block* target = switch_block->successor_at(i); 537 538 // Check that the set of successors is the same in both places. 539 int successors_found = 0; 540 for (SwitchRange* p = lo; p <= hi; p++) { 541 if (p->dest() == target->start()) successors_found++; 542 } 543 assert(successors_found > 0, "successor must be known"); 544 successors_counted += successors_found; 545 } 546 assert(successors_counted == (hi-lo)+1, "no unexpected successors"); 547 #endif 548 549 // Maybe prune the inputs, based on the type of key_val. 550 jint min_val = min_jint; 551 jint max_val = max_jint; 552 const TypeInt* ti = key_val->bottom_type()->isa_int(); 553 if (ti != NULL) { 554 min_val = ti->_lo; 555 max_val = ti->_hi; 556 assert(min_val <= max_val, "invalid int type"); 557 } 558 while (lo->hi() < min_val) lo++; 559 if (lo->lo() < min_val) lo->setRange(min_val, lo->hi(), lo->dest(), lo->table_index()); 560 while (hi->lo() > max_val) hi--; 561 if (hi->hi() > max_val) hi->setRange(hi->lo(), max_val, hi->dest(), hi->table_index()); 562 } 563 564 #ifndef PRODUCT 565 if (switch_depth == 0) { 566 _max_switch_depth = 0; 567 _est_switch_depth = log2_intptr((hi-lo+1)-1)+1; 568 } 569 #endif 570 571 assert(lo <= hi, "must be a non-empty set of ranges"); 572 if (lo == hi) { 573 jump_if_always_fork(lo->dest(), lo->table_index()); 574 } else { 575 assert(lo->hi() == (lo+1)->lo()-1, "contiguous ranges"); 576 assert(hi->lo() == (hi-1)->hi()+1, "contiguous ranges"); 577 578 if (create_jump_tables(key_val, lo, hi)) return; 579 580 int nr = hi - lo + 1; 581 582 SwitchRange* mid = lo + nr/2; 583 // if there is an easy choice, pivot at a singleton: 584 if (nr > 3 && !mid->is_singleton() && (mid-1)->is_singleton()) mid--; 585 586 assert(lo < mid && mid <= hi, "good pivot choice"); 587 assert(nr != 2 || mid == hi, "should pick higher of 2"); 588 assert(nr != 3 || mid == hi-1, "should pick middle of 3"); 589 590 Node *test_val = _gvn.intcon(mid->lo()); 591 592 if (mid->is_singleton()) { 593 IfNode *iff_ne = jump_if_fork_int(key_val, test_val, BoolTest::ne); 594 jump_if_false_fork(iff_ne, mid->dest(), mid->table_index()); 595 596 // Special Case: If there are exactly three ranges, and the high 597 // and low range each go to the same place, omit the "gt" test, 598 // since it will not discriminate anything. 599 bool eq_test_only = (hi == lo+2 && hi->dest() == lo->dest()); 600 if (eq_test_only) { 601 assert(mid == hi-1, ""); 602 } 603 604 // if there is a higher range, test for it and process it: 605 if (mid < hi && !eq_test_only) { 606 // two comparisons of same values--should enable 1 test for 2 branches 607 // Use BoolTest::le instead of BoolTest::gt 608 IfNode *iff_le = jump_if_fork_int(key_val, test_val, BoolTest::le); 609 Node *iftrue = _gvn.transform( new IfTrueNode(iff_le) ); 610 Node *iffalse = _gvn.transform( new IfFalseNode(iff_le) ); 611 { PreserveJVMState pjvms(this); 612 set_control(iffalse); 613 jump_switch_ranges(key_val, mid+1, hi, switch_depth+1); 614 } 615 set_control(iftrue); 616 } 617 618 } else { 619 // mid is a range, not a singleton, so treat mid..hi as a unit 620 IfNode *iff_ge = jump_if_fork_int(key_val, test_val, BoolTest::ge); 621 622 // if there is a higher range, test for it and process it: 623 if (mid == hi) { 624 jump_if_true_fork(iff_ge, mid->dest(), mid->table_index()); 625 } else { 626 Node *iftrue = _gvn.transform( new IfTrueNode(iff_ge) ); 627 Node *iffalse = _gvn.transform( new IfFalseNode(iff_ge) ); 628 { PreserveJVMState pjvms(this); 629 set_control(iftrue); 630 jump_switch_ranges(key_val, mid, hi, switch_depth+1); 631 } 632 set_control(iffalse); 633 } 634 } 635 636 // in any case, process the lower range 637 jump_switch_ranges(key_val, lo, mid-1, switch_depth+1); 638 } 639 640 // Decrease pred_count for each successor after all is done. 641 if (switch_depth == 0) { 642 int unique_successors = switch_block->num_successors(); 643 for (int i = 0; i < unique_successors; i++) { 644 Block* target = switch_block->successor_at(i); 645 // Throw away the pre-allocated path for each unique successor. 646 target->next_path_num(); 647 } 648 } 649 650 #ifndef PRODUCT 651 _max_switch_depth = MAX2(switch_depth, _max_switch_depth); 652 if (TraceOptoParse && Verbose && WizardMode && switch_depth == 0) { 653 SwitchRange* r; 654 int nsing = 0; 655 for( r = lo; r <= hi; r++ ) { 656 if( r->is_singleton() ) nsing++; 657 } 658 tty->print(">>> "); 659 _method->print_short_name(); 660 tty->print_cr(" switch decision tree"); 661 tty->print_cr(" %d ranges (%d singletons), max_depth=%d, est_depth=%d", 662 (int) (hi-lo+1), nsing, _max_switch_depth, _est_switch_depth); 663 if (_max_switch_depth > _est_switch_depth) { 664 tty->print_cr("******** BAD SWITCH DEPTH ********"); 665 } 666 tty->print(" "); 667 for( r = lo; r <= hi; r++ ) { 668 r->print(); 669 } 670 tty->cr(); 671 } 672 #endif 673 } 674 675 void Parse::modf() { 676 Node *f2 = pop(); 677 Node *f1 = pop(); 678 Node* c = make_runtime_call(RC_LEAF, OptoRuntime::modf_Type(), 679 CAST_FROM_FN_PTR(address, SharedRuntime::frem), 680 "frem", NULL, //no memory effects 681 f1, f2); 682 Node* res = _gvn.transform(new ProjNode(c, TypeFunc::Parms + 0)); 683 684 push(res); 685 } 686 687 void Parse::modd() { 688 Node *d2 = pop_pair(); 689 Node *d1 = pop_pair(); 690 Node* c = make_runtime_call(RC_LEAF, OptoRuntime::Math_DD_D_Type(), 691 CAST_FROM_FN_PTR(address, SharedRuntime::drem), 692 "drem", NULL, //no memory effects 693 d1, top(), d2, top()); 694 Node* res_d = _gvn.transform(new ProjNode(c, TypeFunc::Parms + 0)); 695 696 #ifdef ASSERT 697 Node* res_top = _gvn.transform(new ProjNode(c, TypeFunc::Parms + 1)); 698 assert(res_top == top(), "second value must be top"); 699 #endif 700 701 push_pair(res_d); 702 } 703 704 void Parse::l2f() { 705 Node* f2 = pop(); 706 Node* f1 = pop(); 707 Node* c = make_runtime_call(RC_LEAF, OptoRuntime::l2f_Type(), 708 CAST_FROM_FN_PTR(address, SharedRuntime::l2f), 709 "l2f", NULL, //no memory effects 710 f1, f2); 711 Node* res = _gvn.transform(new ProjNode(c, TypeFunc::Parms + 0)); 712 713 push(res); 714 } 715 716 void Parse::do_irem() { 717 // Must keep both values on the expression-stack during null-check 718 zero_check_int(peek()); 719 // Compile-time detect of null-exception? 720 if (stopped()) return; 721 722 Node* b = pop(); 723 Node* a = pop(); 724 725 const Type *t = _gvn.type(b); 726 if (t != Type::TOP) { 727 const TypeInt *ti = t->is_int(); 728 if (ti->is_con()) { 729 int divisor = ti->get_con(); 730 // check for positive power of 2 731 if (divisor > 0 && 732 (divisor & ~(divisor-1)) == divisor) { 733 // yes ! 734 Node *mask = _gvn.intcon((divisor - 1)); 735 // Sigh, must handle negative dividends 736 Node *zero = _gvn.intcon(0); 737 IfNode *ifff = jump_if_fork_int(a, zero, BoolTest::lt); 738 Node *iff = _gvn.transform( new IfFalseNode(ifff) ); 739 Node *ift = _gvn.transform( new IfTrueNode (ifff) ); 740 Node *reg = jump_if_join(ift, iff); 741 Node *phi = PhiNode::make(reg, NULL, TypeInt::INT); 742 // Negative path; negate/and/negate 743 Node *neg = _gvn.transform( new SubINode(zero, a) ); 744 Node *andn= _gvn.transform( new AndINode(neg, mask) ); 745 Node *negn= _gvn.transform( new SubINode(zero, andn) ); 746 phi->init_req(1, negn); 747 // Fast positive case 748 Node *andx = _gvn.transform( new AndINode(a, mask) ); 749 phi->init_req(2, andx); 750 // Push the merge 751 push( _gvn.transform(phi) ); 752 return; 753 } 754 } 755 } 756 // Default case 757 push( _gvn.transform( new ModINode(control(),a,b) ) ); 758 } 759 760 // Handle jsr and jsr_w bytecode 761 void Parse::do_jsr() { 762 assert(bc() == Bytecodes::_jsr || bc() == Bytecodes::_jsr_w, "wrong bytecode"); 763 764 // Store information about current state, tagged with new _jsr_bci 765 int return_bci = iter().next_bci(); 766 int jsr_bci = (bc() == Bytecodes::_jsr) ? iter().get_dest() : iter().get_far_dest(); 767 768 // Update method data 769 profile_taken_branch(jsr_bci); 770 771 // The way we do things now, there is only one successor block 772 // for the jsr, because the target code is cloned by ciTypeFlow. 773 Block* target = successor_for_bci(jsr_bci); 774 775 // What got pushed? 776 const Type* ret_addr = target->peek(); 777 assert(ret_addr->singleton(), "must be a constant (cloned jsr body)"); 778 779 // Effect on jsr on stack 780 push(_gvn.makecon(ret_addr)); 781 782 // Flow to the jsr. 783 merge(jsr_bci); 784 } 785 786 // Handle ret bytecode 787 void Parse::do_ret() { 788 // Find to whom we return. 789 assert(block()->num_successors() == 1, "a ret can only go one place now"); 790 Block* target = block()->successor_at(0); 791 assert(!target->is_ready(), "our arrival must be expected"); 792 profile_ret(target->flow()->start()); 793 int pnum = target->next_path_num(); 794 merge_common(target, pnum); 795 } 796 797 static bool has_injected_profile(BoolTest::mask btest, Node* test, int& taken, int& not_taken) { 798 if (btest != BoolTest::eq && btest != BoolTest::ne) { 799 // Only ::eq and ::ne are supported for profile injection. 800 return false; 801 } 802 if (test->is_Cmp() && 803 test->in(1)->Opcode() == Op_ProfileBoolean) { 804 ProfileBooleanNode* profile = (ProfileBooleanNode*)test->in(1); 805 int false_cnt = profile->false_count(); 806 int true_cnt = profile->true_count(); 807 808 // Counts matching depends on the actual test operation (::eq or ::ne). 809 // No need to scale the counts because profile injection was designed 810 // to feed exact counts into VM. 811 taken = (btest == BoolTest::eq) ? false_cnt : true_cnt; 812 not_taken = (btest == BoolTest::eq) ? true_cnt : false_cnt; 813 814 profile->consume(); 815 return true; 816 } 817 return false; 818 } 819 //--------------------------dynamic_branch_prediction-------------------------- 820 // Try to gather dynamic branch prediction behavior. Return a probability 821 // of the branch being taken and set the "cnt" field. Returns a -1.0 822 // if we need to use static prediction for some reason. 823 float Parse::dynamic_branch_prediction(float &cnt, BoolTest::mask btest, Node* test) { 824 ResourceMark rm; 825 826 cnt = COUNT_UNKNOWN; 827 828 int taken = 0; 829 int not_taken = 0; 830 831 bool use_mdo = !has_injected_profile(btest, test, taken, not_taken); 832 833 if (use_mdo) { 834 // Use MethodData information if it is available 835 // FIXME: free the ProfileData structure 836 ciMethodData* methodData = method()->method_data(); 837 if (!methodData->is_mature()) return PROB_UNKNOWN; 838 ciProfileData* data = methodData->bci_to_data(bci()); 839 if (data == NULL) { 840 return PROB_UNKNOWN; 841 } 842 if (!data->is_JumpData()) return PROB_UNKNOWN; 843 844 // get taken and not taken values 845 taken = data->as_JumpData()->taken(); 846 not_taken = 0; 847 if (data->is_BranchData()) { 848 not_taken = data->as_BranchData()->not_taken(); 849 } 850 851 // scale the counts to be commensurate with invocation counts: 852 taken = method()->scale_count(taken); 853 not_taken = method()->scale_count(not_taken); 854 } 855 856 // Give up if too few (or too many, in which case the sum will overflow) counts to be meaningful. 857 // We also check that individual counters are positive first, otherwise the sum can become positive. 858 if (taken < 0 || not_taken < 0 || taken + not_taken < 40) { 859 if (C->log() != NULL) { 860 C->log()->elem("branch target_bci='%d' taken='%d' not_taken='%d'", iter().get_dest(), taken, not_taken); 861 } 862 return PROB_UNKNOWN; 863 } 864 865 // Compute frequency that we arrive here 866 float sum = taken + not_taken; 867 // Adjust, if this block is a cloned private block but the 868 // Jump counts are shared. Taken the private counts for 869 // just this path instead of the shared counts. 870 if( block()->count() > 0 ) 871 sum = block()->count(); 872 cnt = sum / FreqCountInvocations; 873 874 // Pin probability to sane limits 875 float prob; 876 if( !taken ) 877 prob = (0+PROB_MIN) / 2; 878 else if( !not_taken ) 879 prob = (1+PROB_MAX) / 2; 880 else { // Compute probability of true path 881 prob = (float)taken / (float)(taken + not_taken); 882 if (prob > PROB_MAX) prob = PROB_MAX; 883 if (prob < PROB_MIN) prob = PROB_MIN; 884 } 885 886 assert((cnt > 0.0f) && (prob > 0.0f), 887 "Bad frequency assignment in if"); 888 889 if (C->log() != NULL) { 890 const char* prob_str = NULL; 891 if (prob >= PROB_MAX) prob_str = (prob == PROB_MAX) ? "max" : "always"; 892 if (prob <= PROB_MIN) prob_str = (prob == PROB_MIN) ? "min" : "never"; 893 char prob_str_buf[30]; 894 if (prob_str == NULL) { 895 sprintf(prob_str_buf, "%g", prob); 896 prob_str = prob_str_buf; 897 } 898 C->log()->elem("branch target_bci='%d' taken='%d' not_taken='%d' cnt='%f' prob='%s'", 899 iter().get_dest(), taken, not_taken, cnt, prob_str); 900 } 901 return prob; 902 } 903 904 //-----------------------------branch_prediction------------------------------- 905 float Parse::branch_prediction(float& cnt, 906 BoolTest::mask btest, 907 int target_bci, 908 Node* test) { 909 float prob = dynamic_branch_prediction(cnt, btest, test); 910 // If prob is unknown, switch to static prediction 911 if (prob != PROB_UNKNOWN) return prob; 912 913 prob = PROB_FAIR; // Set default value 914 if (btest == BoolTest::eq) // Exactly equal test? 915 prob = PROB_STATIC_INFREQUENT; // Assume its relatively infrequent 916 else if (btest == BoolTest::ne) 917 prob = PROB_STATIC_FREQUENT; // Assume its relatively frequent 918 919 // If this is a conditional test guarding a backwards branch, 920 // assume its a loop-back edge. Make it a likely taken branch. 921 if (target_bci < bci()) { 922 if (is_osr_parse()) { // Could be a hot OSR'd loop; force deopt 923 // Since it's an OSR, we probably have profile data, but since 924 // branch_prediction returned PROB_UNKNOWN, the counts are too small. 925 // Let's make a special check here for completely zero counts. 926 ciMethodData* methodData = method()->method_data(); 927 if (!methodData->is_empty()) { 928 ciProfileData* data = methodData->bci_to_data(bci()); 929 // Only stop for truly zero counts, which mean an unknown part 930 // of the OSR-ed method, and we want to deopt to gather more stats. 931 // If you have ANY counts, then this loop is simply 'cold' relative 932 // to the OSR loop. 933 if (data == NULL || 934 (data->as_BranchData()->taken() + data->as_BranchData()->not_taken() == 0)) { 935 // This is the only way to return PROB_UNKNOWN: 936 return PROB_UNKNOWN; 937 } 938 } 939 } 940 prob = PROB_STATIC_FREQUENT; // Likely to take backwards branch 941 } 942 943 assert(prob != PROB_UNKNOWN, "must have some guess at this point"); 944 return prob; 945 } 946 947 // The magic constants are chosen so as to match the output of 948 // branch_prediction() when the profile reports a zero taken count. 949 // It is important to distinguish zero counts unambiguously, because 950 // some branches (e.g., _213_javac.Assembler.eliminate) validly produce 951 // very small but nonzero probabilities, which if confused with zero 952 // counts would keep the program recompiling indefinitely. 953 bool Parse::seems_never_taken(float prob) const { 954 return prob < PROB_MIN; 955 } 956 957 // True if the comparison seems to be the kind that will not change its 958 // statistics from true to false. See comments in adjust_map_after_if. 959 // This question is only asked along paths which are already 960 // classifed as untaken (by seems_never_taken), so really, 961 // if a path is never taken, its controlling comparison is 962 // already acting in a stable fashion. If the comparison 963 // seems stable, we will put an expensive uncommon trap 964 // on the untaken path. 965 bool Parse::seems_stable_comparison() const { 966 if (C->too_many_traps(method(), bci(), Deoptimization::Reason_unstable_if)) { 967 return false; 968 } 969 return true; 970 } 971 972 //-------------------------------repush_if_args-------------------------------- 973 // Push arguments of an "if" bytecode back onto the stack by adjusting _sp. 974 inline int Parse::repush_if_args() { 975 if (PrintOpto && WizardMode) { 976 tty->print("defending against excessive implicit null exceptions on %s @%d in ", 977 Bytecodes::name(iter().cur_bc()), iter().cur_bci()); 978 method()->print_name(); tty->cr(); 979 } 980 int bc_depth = - Bytecodes::depth(iter().cur_bc()); 981 assert(bc_depth == 1 || bc_depth == 2, "only two kinds of branches"); 982 DEBUG_ONLY(sync_jvms()); // argument(n) requires a synced jvms 983 assert(argument(0) != NULL, "must exist"); 984 assert(bc_depth == 1 || argument(1) != NULL, "two must exist"); 985 inc_sp(bc_depth); 986 return bc_depth; 987 } 988 989 //----------------------------------do_ifnull---------------------------------- 990 void Parse::do_ifnull(BoolTest::mask btest, Node *c) { 991 int target_bci = iter().get_dest(); 992 993 Block* branch_block = successor_for_bci(target_bci); 994 Block* next_block = successor_for_bci(iter().next_bci()); 995 996 float cnt; 997 float prob = branch_prediction(cnt, btest, target_bci, c); 998 if (prob == PROB_UNKNOWN) { 999 // (An earlier version of do_ifnull omitted this trap for OSR methods.) 1000 if (PrintOpto && Verbose) { 1001 tty->print_cr("Never-taken edge stops compilation at bci %d", bci()); 1002 } 1003 repush_if_args(); // to gather stats on loop 1004 // We need to mark this branch as taken so that if we recompile we will 1005 // see that it is possible. In the tiered system the interpreter doesn't 1006 // do profiling and by the time we get to the lower tier from the interpreter 1007 // the path may be cold again. Make sure it doesn't look untaken 1008 profile_taken_branch(target_bci, !ProfileInterpreter); 1009 uncommon_trap(Deoptimization::Reason_unreached, 1010 Deoptimization::Action_reinterpret, 1011 NULL, "cold"); 1012 if (C->eliminate_boxing()) { 1013 // Mark the successor blocks as parsed 1014 branch_block->next_path_num(); 1015 next_block->next_path_num(); 1016 } 1017 return; 1018 } 1019 1020 NOT_PRODUCT(explicit_null_checks_inserted++); 1021 1022 // Generate real control flow 1023 Node *tst = _gvn.transform( new BoolNode( c, btest ) ); 1024 1025 // Sanity check the probability value 1026 assert(prob > 0.0f,"Bad probability in Parser"); 1027 // Need xform to put node in hash table 1028 IfNode *iff = create_and_xform_if( control(), tst, prob, cnt ); 1029 assert(iff->_prob > 0.0f,"Optimizer made bad probability in parser"); 1030 // True branch 1031 { PreserveJVMState pjvms(this); 1032 Node* iftrue = _gvn.transform( new IfTrueNode (iff) ); 1033 set_control(iftrue); 1034 1035 if (stopped()) { // Path is dead? 1036 NOT_PRODUCT(explicit_null_checks_elided++); 1037 if (C->eliminate_boxing()) { 1038 // Mark the successor block as parsed 1039 branch_block->next_path_num(); 1040 } 1041 } else { // Path is live. 1042 // Update method data 1043 profile_taken_branch(target_bci); 1044 adjust_map_after_if(btest, c, prob, branch_block, next_block); 1045 if (!stopped()) { 1046 merge(target_bci); 1047 } 1048 } 1049 } 1050 1051 // False branch 1052 Node* iffalse = _gvn.transform( new IfFalseNode(iff) ); 1053 set_control(iffalse); 1054 1055 if (stopped()) { // Path is dead? 1056 NOT_PRODUCT(explicit_null_checks_elided++); 1057 if (C->eliminate_boxing()) { 1058 // Mark the successor block as parsed 1059 next_block->next_path_num(); 1060 } 1061 } else { // Path is live. 1062 // Update method data 1063 profile_not_taken_branch(); 1064 adjust_map_after_if(BoolTest(btest).negate(), c, 1.0-prob, 1065 next_block, branch_block); 1066 } 1067 } 1068 1069 //------------------------------------do_if------------------------------------ 1070 void Parse::do_if(BoolTest::mask btest, Node* c) { 1071 int target_bci = iter().get_dest(); 1072 1073 Block* branch_block = successor_for_bci(target_bci); 1074 Block* next_block = successor_for_bci(iter().next_bci()); 1075 1076 float cnt; 1077 float prob = branch_prediction(cnt, btest, target_bci, c); 1078 float untaken_prob = 1.0 - prob; 1079 1080 if (prob == PROB_UNKNOWN) { 1081 if (PrintOpto && Verbose) { 1082 tty->print_cr("Never-taken edge stops compilation at bci %d", bci()); 1083 } 1084 repush_if_args(); // to gather stats on loop 1085 // We need to mark this branch as taken so that if we recompile we will 1086 // see that it is possible. In the tiered system the interpreter doesn't 1087 // do profiling and by the time we get to the lower tier from the interpreter 1088 // the path may be cold again. Make sure it doesn't look untaken 1089 profile_taken_branch(target_bci, !ProfileInterpreter); 1090 uncommon_trap(Deoptimization::Reason_unreached, 1091 Deoptimization::Action_reinterpret, 1092 NULL, "cold"); 1093 if (C->eliminate_boxing()) { 1094 // Mark the successor blocks as parsed 1095 branch_block->next_path_num(); 1096 next_block->next_path_num(); 1097 } 1098 return; 1099 } 1100 1101 // Sanity check the probability value 1102 assert(0.0f < prob && prob < 1.0f,"Bad probability in Parser"); 1103 1104 bool taken_if_true = true; 1105 // Convert BoolTest to canonical form: 1106 if (!BoolTest(btest).is_canonical()) { 1107 btest = BoolTest(btest).negate(); 1108 taken_if_true = false; 1109 // prob is NOT updated here; it remains the probability of the taken 1110 // path (as opposed to the prob of the path guarded by an 'IfTrueNode'). 1111 } 1112 assert(btest != BoolTest::eq, "!= is the only canonical exact test"); 1113 1114 Node* tst0 = new BoolNode(c, btest); 1115 Node* tst = _gvn.transform(tst0); 1116 BoolTest::mask taken_btest = BoolTest::illegal; 1117 BoolTest::mask untaken_btest = BoolTest::illegal; 1118 1119 if (tst->is_Bool()) { 1120 // Refresh c from the transformed bool node, since it may be 1121 // simpler than the original c. Also re-canonicalize btest. 1122 // This wins when (Bool ne (Conv2B p) 0) => (Bool ne (CmpP p NULL)). 1123 // That can arise from statements like: if (x instanceof C) ... 1124 if (tst != tst0) { 1125 // Canonicalize one more time since transform can change it. 1126 btest = tst->as_Bool()->_test._test; 1127 if (!BoolTest(btest).is_canonical()) { 1128 // Reverse edges one more time... 1129 tst = _gvn.transform( tst->as_Bool()->negate(&_gvn) ); 1130 btest = tst->as_Bool()->_test._test; 1131 assert(BoolTest(btest).is_canonical(), "sanity"); 1132 taken_if_true = !taken_if_true; 1133 } 1134 c = tst->in(1); 1135 } 1136 BoolTest::mask neg_btest = BoolTest(btest).negate(); 1137 taken_btest = taken_if_true ? btest : neg_btest; 1138 untaken_btest = taken_if_true ? neg_btest : btest; 1139 } 1140 1141 // Generate real control flow 1142 float true_prob = (taken_if_true ? prob : untaken_prob); 1143 IfNode* iff = create_and_map_if(control(), tst, true_prob, cnt); 1144 assert(iff->_prob > 0.0f,"Optimizer made bad probability in parser"); 1145 Node* taken_branch = new IfTrueNode(iff); 1146 Node* untaken_branch = new IfFalseNode(iff); 1147 if (!taken_if_true) { // Finish conversion to canonical form 1148 Node* tmp = taken_branch; 1149 taken_branch = untaken_branch; 1150 untaken_branch = tmp; 1151 } 1152 1153 // Branch is taken: 1154 { PreserveJVMState pjvms(this); 1155 taken_branch = _gvn.transform(taken_branch); 1156 set_control(taken_branch); 1157 1158 if (stopped()) { 1159 if (C->eliminate_boxing()) { 1160 // Mark the successor block as parsed 1161 branch_block->next_path_num(); 1162 } 1163 } else { 1164 // Update method data 1165 profile_taken_branch(target_bci); 1166 adjust_map_after_if(taken_btest, c, prob, branch_block, next_block); 1167 if (!stopped()) { 1168 merge(target_bci); 1169 } 1170 } 1171 } 1172 1173 untaken_branch = _gvn.transform(untaken_branch); 1174 set_control(untaken_branch); 1175 1176 // Branch not taken. 1177 if (stopped()) { 1178 if (C->eliminate_boxing()) { 1179 // Mark the successor block as parsed 1180 next_block->next_path_num(); 1181 } 1182 } else { 1183 // Update method data 1184 profile_not_taken_branch(); 1185 adjust_map_after_if(untaken_btest, c, untaken_prob, 1186 next_block, branch_block); 1187 } 1188 } 1189 1190 bool Parse::path_is_suitable_for_uncommon_trap(float prob) const { 1191 // Don't want to speculate on uncommon traps when running with -Xcomp 1192 if (!UseInterpreter) { 1193 return false; 1194 } 1195 return (seems_never_taken(prob) && seems_stable_comparison()); 1196 } 1197 1198 //----------------------------adjust_map_after_if------------------------------ 1199 // Adjust the JVM state to reflect the result of taking this path. 1200 // Basically, it means inspecting the CmpNode controlling this 1201 // branch, seeing how it constrains a tested value, and then 1202 // deciding if it's worth our while to encode this constraint 1203 // as graph nodes in the current abstract interpretation map. 1204 void Parse::adjust_map_after_if(BoolTest::mask btest, Node* c, float prob, 1205 Block* path, Block* other_path) { 1206 if (stopped() || !c->is_Cmp() || btest == BoolTest::illegal) 1207 return; // nothing to do 1208 1209 bool is_fallthrough = (path == successor_for_bci(iter().next_bci())); 1210 1211 if (path_is_suitable_for_uncommon_trap(prob)) { 1212 repush_if_args(); 1213 uncommon_trap(Deoptimization::Reason_unstable_if, 1214 Deoptimization::Action_reinterpret, 1215 NULL, 1216 (is_fallthrough ? "taken always" : "taken never")); 1217 return; 1218 } 1219 1220 Node* val = c->in(1); 1221 Node* con = c->in(2); 1222 const Type* tcon = _gvn.type(con); 1223 const Type* tval = _gvn.type(val); 1224 bool have_con = tcon->singleton(); 1225 if (tval->singleton()) { 1226 if (!have_con) { 1227 // Swap, so constant is in con. 1228 con = val; 1229 tcon = tval; 1230 val = c->in(2); 1231 tval = _gvn.type(val); 1232 btest = BoolTest(btest).commute(); 1233 have_con = true; 1234 } else { 1235 // Do we have two constants? Then leave well enough alone. 1236 have_con = false; 1237 } 1238 } 1239 if (!have_con) // remaining adjustments need a con 1240 return; 1241 1242 sharpen_type_after_if(btest, con, tcon, val, tval); 1243 } 1244 1245 1246 static Node* extract_obj_from_klass_load(PhaseGVN* gvn, Node* n) { 1247 Node* ldk; 1248 if (n->is_DecodeNKlass()) { 1249 if (n->in(1)->Opcode() != Op_LoadNKlass) { 1250 return NULL; 1251 } else { 1252 ldk = n->in(1); 1253 } 1254 } else if (n->Opcode() != Op_LoadKlass) { 1255 return NULL; 1256 } else { 1257 ldk = n; 1258 } 1259 assert(ldk != NULL && ldk->is_Load(), "should have found a LoadKlass or LoadNKlass node"); 1260 1261 Node* adr = ldk->in(MemNode::Address); 1262 intptr_t off = 0; 1263 Node* obj = AddPNode::Ideal_base_and_offset(adr, gvn, off); 1264 if (obj == NULL || off != oopDesc::klass_offset_in_bytes()) // loading oopDesc::_klass? 1265 return NULL; 1266 const TypePtr* tp = gvn->type(obj)->is_ptr(); 1267 if (tp == NULL || !(tp->isa_instptr() || tp->isa_aryptr())) // is obj a Java object ptr? 1268 return NULL; 1269 1270 return obj; 1271 } 1272 1273 void Parse::sharpen_type_after_if(BoolTest::mask btest, 1274 Node* con, const Type* tcon, 1275 Node* val, const Type* tval) { 1276 // Look for opportunities to sharpen the type of a node 1277 // whose klass is compared with a constant klass. 1278 if (btest == BoolTest::eq && tcon->isa_klassptr()) { 1279 Node* obj = extract_obj_from_klass_load(&_gvn, val); 1280 const TypeOopPtr* con_type = tcon->isa_klassptr()->as_instance_type(); 1281 if (obj != NULL && (con_type->isa_instptr() || con_type->isa_aryptr())) { 1282 // Found: 1283 // Bool(CmpP(LoadKlass(obj._klass), ConP(Foo.klass)), [eq]) 1284 // or the narrowOop equivalent. 1285 const Type* obj_type = _gvn.type(obj); 1286 const TypeOopPtr* tboth = obj_type->join_speculative(con_type)->isa_oopptr(); 1287 if (tboth != NULL && tboth->klass_is_exact() && tboth != obj_type && 1288 tboth->higher_equal(obj_type)) { 1289 // obj has to be of the exact type Foo if the CmpP succeeds. 1290 int obj_in_map = map()->find_edge(obj); 1291 JVMState* jvms = this->jvms(); 1292 if (obj_in_map >= 0 && 1293 (jvms->is_loc(obj_in_map) || jvms->is_stk(obj_in_map))) { 1294 TypeNode* ccast = new CheckCastPPNode(control(), obj, tboth); 1295 const Type* tcc = ccast->as_Type()->type(); 1296 assert(tcc != obj_type && tcc->higher_equal(obj_type), "must improve"); 1297 // Delay transform() call to allow recovery of pre-cast value 1298 // at the control merge. 1299 _gvn.set_type_bottom(ccast); 1300 record_for_igvn(ccast); 1301 // Here's the payoff. 1302 replace_in_map(obj, ccast); 1303 } 1304 } 1305 } 1306 } 1307 1308 int val_in_map = map()->find_edge(val); 1309 if (val_in_map < 0) return; // replace_in_map would be useless 1310 { 1311 JVMState* jvms = this->jvms(); 1312 if (!(jvms->is_loc(val_in_map) || 1313 jvms->is_stk(val_in_map))) 1314 return; // again, it would be useless 1315 } 1316 1317 // Check for a comparison to a constant, and "know" that the compared 1318 // value is constrained on this path. 1319 assert(tcon->singleton(), ""); 1320 ConstraintCastNode* ccast = NULL; 1321 Node* cast = NULL; 1322 1323 switch (btest) { 1324 case BoolTest::eq: // Constant test? 1325 { 1326 const Type* tboth = tcon->join_speculative(tval); 1327 if (tboth == tval) break; // Nothing to gain. 1328 if (tcon->isa_int()) { 1329 ccast = new CastIINode(val, tboth); 1330 } else if (tcon == TypePtr::NULL_PTR) { 1331 // Cast to null, but keep the pointer identity temporarily live. 1332 ccast = new CastPPNode(val, tboth); 1333 } else { 1334 const TypeF* tf = tcon->isa_float_constant(); 1335 const TypeD* td = tcon->isa_double_constant(); 1336 // Exclude tests vs float/double 0 as these could be 1337 // either +0 or -0. Just because you are equal to +0 1338 // doesn't mean you ARE +0! 1339 // Note, following code also replaces Long and Oop values. 1340 if ((!tf || tf->_f != 0.0) && 1341 (!td || td->_d != 0.0)) 1342 cast = con; // Replace non-constant val by con. 1343 } 1344 } 1345 break; 1346 1347 case BoolTest::ne: 1348 if (tcon == TypePtr::NULL_PTR) { 1349 cast = cast_not_null(val, false); 1350 } 1351 break; 1352 1353 default: 1354 // (At this point we could record int range types with CastII.) 1355 break; 1356 } 1357 1358 if (ccast != NULL) { 1359 const Type* tcc = ccast->as_Type()->type(); 1360 assert(tcc != tval && tcc->higher_equal(tval), "must improve"); 1361 // Delay transform() call to allow recovery of pre-cast value 1362 // at the control merge. 1363 ccast->set_req(0, control()); 1364 _gvn.set_type_bottom(ccast); 1365 record_for_igvn(ccast); 1366 cast = ccast; 1367 } 1368 1369 if (cast != NULL) { // Here's the payoff. 1370 replace_in_map(val, cast); 1371 } 1372 } 1373 1374 /** 1375 * Use speculative type to optimize CmpP node: if comparison is 1376 * against the low level class, cast the object to the speculative 1377 * type if any. CmpP should then go away. 1378 * 1379 * @param c expected CmpP node 1380 * @return result of CmpP on object casted to speculative type 1381 * 1382 */ 1383 Node* Parse::optimize_cmp_with_klass(Node* c) { 1384 // If this is transformed by the _gvn to a comparison with the low 1385 // level klass then we may be able to use speculation 1386 if (c->Opcode() == Op_CmpP && 1387 (c->in(1)->Opcode() == Op_LoadKlass || c->in(1)->Opcode() == Op_DecodeNKlass) && 1388 c->in(2)->is_Con()) { 1389 Node* load_klass = NULL; 1390 Node* decode = NULL; 1391 if (c->in(1)->Opcode() == Op_DecodeNKlass) { 1392 decode = c->in(1); 1393 load_klass = c->in(1)->in(1); 1394 } else { 1395 load_klass = c->in(1); 1396 } 1397 if (load_klass->in(2)->is_AddP()) { 1398 Node* addp = load_klass->in(2); 1399 Node* obj = addp->in(AddPNode::Address); 1400 const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr(); 1401 if (obj_type->speculative_type_not_null() != NULL) { 1402 ciKlass* k = obj_type->speculative_type(); 1403 inc_sp(2); 1404 obj = maybe_cast_profiled_obj(obj, k); 1405 dec_sp(2); 1406 // Make the CmpP use the casted obj 1407 addp = basic_plus_adr(obj, addp->in(AddPNode::Offset)); 1408 load_klass = load_klass->clone(); 1409 load_klass->set_req(2, addp); 1410 load_klass = _gvn.transform(load_klass); 1411 if (decode != NULL) { 1412 decode = decode->clone(); 1413 decode->set_req(1, load_klass); 1414 load_klass = _gvn.transform(decode); 1415 } 1416 c = c->clone(); 1417 c->set_req(1, load_klass); 1418 c = _gvn.transform(c); 1419 } 1420 } 1421 } 1422 return c; 1423 } 1424 1425 //------------------------------do_one_bytecode-------------------------------- 1426 // Parse this bytecode, and alter the Parsers JVM->Node mapping 1427 void Parse::do_one_bytecode() { 1428 Node *a, *b, *c, *d; // Handy temps 1429 BoolTest::mask btest; 1430 int i; 1431 1432 assert(!has_exceptions(), "bytecode entry state must be clear of throws"); 1433 1434 if (C->check_node_count(NodeLimitFudgeFactor * 5, 1435 "out of nodes parsing method")) { 1436 return; 1437 } 1438 1439 #ifdef ASSERT 1440 // for setting breakpoints 1441 if (TraceOptoParse) { 1442 tty->print(" @"); 1443 dump_bci(bci()); 1444 tty->cr(); 1445 } 1446 #endif 1447 1448 switch (bc()) { 1449 case Bytecodes::_nop: 1450 // do nothing 1451 break; 1452 case Bytecodes::_lconst_0: 1453 push_pair(longcon(0)); 1454 break; 1455 1456 case Bytecodes::_lconst_1: 1457 push_pair(longcon(1)); 1458 break; 1459 1460 case Bytecodes::_fconst_0: 1461 push(zerocon(T_FLOAT)); 1462 break; 1463 1464 case Bytecodes::_fconst_1: 1465 push(makecon(TypeF::ONE)); 1466 break; 1467 1468 case Bytecodes::_fconst_2: 1469 push(makecon(TypeF::make(2.0f))); 1470 break; 1471 1472 case Bytecodes::_dconst_0: 1473 push_pair(zerocon(T_DOUBLE)); 1474 break; 1475 1476 case Bytecodes::_dconst_1: 1477 push_pair(makecon(TypeD::ONE)); 1478 break; 1479 1480 case Bytecodes::_iconst_m1:push(intcon(-1)); break; 1481 case Bytecodes::_iconst_0: push(intcon( 0)); break; 1482 case Bytecodes::_iconst_1: push(intcon( 1)); break; 1483 case Bytecodes::_iconst_2: push(intcon( 2)); break; 1484 case Bytecodes::_iconst_3: push(intcon( 3)); break; 1485 case Bytecodes::_iconst_4: push(intcon( 4)); break; 1486 case Bytecodes::_iconst_5: push(intcon( 5)); break; 1487 case Bytecodes::_bipush: push(intcon(iter().get_constant_u1())); break; 1488 case Bytecodes::_sipush: push(intcon(iter().get_constant_u2())); break; 1489 case Bytecodes::_aconst_null: push(null()); break; 1490 case Bytecodes::_ldc: 1491 case Bytecodes::_ldc_w: 1492 case Bytecodes::_ldc2_w: 1493 // If the constant is unresolved, run this BC once in the interpreter. 1494 { 1495 ciConstant constant = iter().get_constant(); 1496 if (constant.basic_type() == T_OBJECT && 1497 !constant.as_object()->is_loaded()) { 1498 int index = iter().get_constant_pool_index(); 1499 constantTag tag = iter().get_constant_pool_tag(index); 1500 uncommon_trap(Deoptimization::make_trap_request 1501 (Deoptimization::Reason_unloaded, 1502 Deoptimization::Action_reinterpret, 1503 index), 1504 NULL, tag.internal_name()); 1505 break; 1506 } 1507 assert(constant.basic_type() != T_OBJECT || constant.as_object()->is_instance(), 1508 "must be java_mirror of klass"); 1509 const Type* con_type = Type::make_from_constant(constant); 1510 if (con_type != NULL) { 1511 push_node(con_type->basic_type(), makecon(con_type)); 1512 } 1513 } 1514 1515 break; 1516 1517 case Bytecodes::_aload_0: 1518 push( local(0) ); 1519 break; 1520 case Bytecodes::_aload_1: 1521 push( local(1) ); 1522 break; 1523 case Bytecodes::_aload_2: 1524 push( local(2) ); 1525 break; 1526 case Bytecodes::_aload_3: 1527 push( local(3) ); 1528 break; 1529 case Bytecodes::_aload: 1530 case Bytecodes::_vload: 1531 push( local(iter().get_index()) ); 1532 break; 1533 1534 case Bytecodes::_fload_0: 1535 case Bytecodes::_iload_0: 1536 push( local(0) ); 1537 break; 1538 case Bytecodes::_fload_1: 1539 case Bytecodes::_iload_1: 1540 push( local(1) ); 1541 break; 1542 case Bytecodes::_fload_2: 1543 case Bytecodes::_iload_2: 1544 push( local(2) ); 1545 break; 1546 case Bytecodes::_fload_3: 1547 case Bytecodes::_iload_3: 1548 push( local(3) ); 1549 break; 1550 case Bytecodes::_fload: 1551 case Bytecodes::_iload: 1552 push( local(iter().get_index()) ); 1553 break; 1554 case Bytecodes::_lload_0: 1555 push_pair_local( 0 ); 1556 break; 1557 case Bytecodes::_lload_1: 1558 push_pair_local( 1 ); 1559 break; 1560 case Bytecodes::_lload_2: 1561 push_pair_local( 2 ); 1562 break; 1563 case Bytecodes::_lload_3: 1564 push_pair_local( 3 ); 1565 break; 1566 case Bytecodes::_lload: 1567 push_pair_local( iter().get_index() ); 1568 break; 1569 1570 case Bytecodes::_dload_0: 1571 push_pair_local(0); 1572 break; 1573 case Bytecodes::_dload_1: 1574 push_pair_local(1); 1575 break; 1576 case Bytecodes::_dload_2: 1577 push_pair_local(2); 1578 break; 1579 case Bytecodes::_dload_3: 1580 push_pair_local(3); 1581 break; 1582 case Bytecodes::_dload: 1583 push_pair_local(iter().get_index()); 1584 break; 1585 case Bytecodes::_fstore_0: 1586 case Bytecodes::_istore_0: 1587 case Bytecodes::_astore_0: 1588 set_local( 0, pop() ); 1589 break; 1590 case Bytecodes::_fstore_1: 1591 case Bytecodes::_istore_1: 1592 case Bytecodes::_astore_1: 1593 set_local( 1, pop() ); 1594 break; 1595 case Bytecodes::_fstore_2: 1596 case Bytecodes::_istore_2: 1597 case Bytecodes::_astore_2: 1598 set_local( 2, pop() ); 1599 break; 1600 case Bytecodes::_fstore_3: 1601 case Bytecodes::_istore_3: 1602 case Bytecodes::_astore_3: 1603 set_local( 3, pop() ); 1604 break; 1605 case Bytecodes::_fstore: 1606 case Bytecodes::_istore: 1607 case Bytecodes::_astore: 1608 case Bytecodes::_vstore: 1609 set_local( iter().get_index(), pop() ); 1610 break; 1611 // long stores 1612 case Bytecodes::_lstore_0: 1613 set_pair_local( 0, pop_pair() ); 1614 break; 1615 case Bytecodes::_lstore_1: 1616 set_pair_local( 1, pop_pair() ); 1617 break; 1618 case Bytecodes::_lstore_2: 1619 set_pair_local( 2, pop_pair() ); 1620 break; 1621 case Bytecodes::_lstore_3: 1622 set_pair_local( 3, pop_pair() ); 1623 break; 1624 case Bytecodes::_lstore: 1625 set_pair_local( iter().get_index(), pop_pair() ); 1626 break; 1627 1628 // double stores 1629 case Bytecodes::_dstore_0: 1630 set_pair_local( 0, dstore_rounding(pop_pair()) ); 1631 break; 1632 case Bytecodes::_dstore_1: 1633 set_pair_local( 1, dstore_rounding(pop_pair()) ); 1634 break; 1635 case Bytecodes::_dstore_2: 1636 set_pair_local( 2, dstore_rounding(pop_pair()) ); 1637 break; 1638 case Bytecodes::_dstore_3: 1639 set_pair_local( 3, dstore_rounding(pop_pair()) ); 1640 break; 1641 case Bytecodes::_dstore: 1642 set_pair_local( iter().get_index(), dstore_rounding(pop_pair()) ); 1643 break; 1644 1645 case Bytecodes::_pop: dec_sp(1); break; 1646 case Bytecodes::_pop2: dec_sp(2); break; 1647 case Bytecodes::_swap: 1648 a = pop(); 1649 b = pop(); 1650 push(a); 1651 push(b); 1652 break; 1653 case Bytecodes::_dup: 1654 a = pop(); 1655 push(a); 1656 push(a); 1657 break; 1658 case Bytecodes::_dup_x1: 1659 a = pop(); 1660 b = pop(); 1661 push( a ); 1662 push( b ); 1663 push( a ); 1664 break; 1665 case Bytecodes::_dup_x2: 1666 a = pop(); 1667 b = pop(); 1668 c = pop(); 1669 push( a ); 1670 push( c ); 1671 push( b ); 1672 push( a ); 1673 break; 1674 case Bytecodes::_dup2: 1675 a = pop(); 1676 b = pop(); 1677 push( b ); 1678 push( a ); 1679 push( b ); 1680 push( a ); 1681 break; 1682 1683 case Bytecodes::_dup2_x1: 1684 // before: .. c, b, a 1685 // after: .. b, a, c, b, a 1686 // not tested 1687 a = pop(); 1688 b = pop(); 1689 c = pop(); 1690 push( b ); 1691 push( a ); 1692 push( c ); 1693 push( b ); 1694 push( a ); 1695 break; 1696 case Bytecodes::_dup2_x2: 1697 // before: .. d, c, b, a 1698 // after: .. b, a, d, c, b, a 1699 // not tested 1700 a = pop(); 1701 b = pop(); 1702 c = pop(); 1703 d = pop(); 1704 push( b ); 1705 push( a ); 1706 push( d ); 1707 push( c ); 1708 push( b ); 1709 push( a ); 1710 break; 1711 1712 case Bytecodes::_arraylength: { 1713 // Must do null-check with value on expression stack 1714 Node *ary = null_check(peek(), T_ARRAY); 1715 // Compile-time detect of null-exception? 1716 if (stopped()) return; 1717 a = pop(); 1718 push(load_array_length(a)); 1719 break; 1720 } 1721 1722 case Bytecodes::_baload: array_load(T_BYTE); break; 1723 case Bytecodes::_caload: array_load(T_CHAR); break; 1724 case Bytecodes::_iaload: array_load(T_INT); break; 1725 case Bytecodes::_saload: array_load(T_SHORT); break; 1726 case Bytecodes::_faload: array_load(T_FLOAT); break; 1727 case Bytecodes::_vaload: array_load(T_VALUETYPE); break; 1728 case Bytecodes::_aaload: array_load(T_OBJECT); break; 1729 case Bytecodes::_laload: { 1730 a = array_addressing(T_LONG, 0); 1731 if (stopped()) return; // guaranteed null or range check 1732 dec_sp(2); // Pop array and index 1733 push_pair(make_load(control(), a, TypeLong::LONG, T_LONG, TypeAryPtr::LONGS, MemNode::unordered)); 1734 break; 1735 } 1736 case Bytecodes::_daload: { 1737 a = array_addressing(T_DOUBLE, 0); 1738 if (stopped()) return; // guaranteed null or range check 1739 dec_sp(2); // Pop array and index 1740 push_pair(make_load(control(), a, Type::DOUBLE, T_DOUBLE, TypeAryPtr::DOUBLES, MemNode::unordered)); 1741 break; 1742 } 1743 case Bytecodes::_bastore: array_store(T_BYTE); break; 1744 case Bytecodes::_castore: array_store(T_CHAR); break; 1745 case Bytecodes::_iastore: array_store(T_INT); break; 1746 case Bytecodes::_sastore: array_store(T_SHORT); break; 1747 case Bytecodes::_fastore: array_store(T_FLOAT); break; 1748 case Bytecodes::_vastore: { 1749 d = array_addressing(T_OBJECT, 1); 1750 if (stopped()) return; // guaranteed null or range check 1751 // TODO fix this 1752 // array_store_check(); 1753 c = pop(); // Oop to store 1754 b = pop(); // index (already used) 1755 a = pop(); // the array itself 1756 const TypeAryPtr* arytype = _gvn.type(a)->is_aryptr(); 1757 const Type* elemtype = arytype->elem(); 1758 1759 if (elemtype->isa_valuetype()) { 1760 c->as_ValueType()->store(this, a, d); 1761 break; 1762 } 1763 1764 const TypeAryPtr* adr_type = TypeAryPtr::OOPS; 1765 Node* oop = c->as_ValueType()->store_to_memory(this); 1766 Node* store = store_oop_to_array(control(), a, d, adr_type, oop, elemtype->make_oopptr(), T_OBJECT, 1767 StoreNode::release_if_reference(T_OBJECT)); 1768 break; 1769 } 1770 case Bytecodes::_aastore: { 1771 d = array_addressing(T_OBJECT, 1); 1772 if (stopped()) return; // guaranteed null or range check 1773 array_store_check(); 1774 c = pop(); // Oop to store 1775 b = pop(); // index (already used) 1776 a = pop(); // the array itself 1777 const TypeOopPtr* elemtype = _gvn.type(a)->is_aryptr()->elem()->make_oopptr(); 1778 const TypeAryPtr* adr_type = TypeAryPtr::OOPS; 1779 Node* store = store_oop_to_array(control(), a, d, adr_type, c, elemtype, T_OBJECT, 1780 StoreNode::release_if_reference(T_OBJECT)); 1781 break; 1782 } 1783 case Bytecodes::_lastore: { 1784 a = array_addressing(T_LONG, 2); 1785 if (stopped()) return; // guaranteed null or range check 1786 c = pop_pair(); 1787 dec_sp(2); // Pop array and index 1788 store_to_memory(control(), a, c, T_LONG, TypeAryPtr::LONGS, MemNode::unordered); 1789 break; 1790 } 1791 case Bytecodes::_dastore: { 1792 a = array_addressing(T_DOUBLE, 2); 1793 if (stopped()) return; // guaranteed null or range check 1794 c = pop_pair(); 1795 dec_sp(2); // Pop array and index 1796 c = dstore_rounding(c); 1797 store_to_memory(control(), a, c, T_DOUBLE, TypeAryPtr::DOUBLES, MemNode::unordered); 1798 break; 1799 } 1800 1801 case Bytecodes::_getfield: 1802 do_getfield(); 1803 break; 1804 1805 case Bytecodes::_vgetfield: 1806 do_vgetfield(); 1807 break; 1808 1809 case Bytecodes::_getstatic: 1810 do_getstatic(); 1811 break; 1812 1813 case Bytecodes::_putfield: 1814 do_putfield(); 1815 break; 1816 1817 case Bytecodes::_putstatic: 1818 do_putstatic(); 1819 break; 1820 1821 case Bytecodes::_irem: 1822 do_irem(); 1823 break; 1824 case Bytecodes::_idiv: 1825 // Must keep both values on the expression-stack during null-check 1826 zero_check_int(peek()); 1827 // Compile-time detect of null-exception? 1828 if (stopped()) return; 1829 b = pop(); 1830 a = pop(); 1831 push( _gvn.transform( new DivINode(control(),a,b) ) ); 1832 break; 1833 case Bytecodes::_imul: 1834 b = pop(); a = pop(); 1835 push( _gvn.transform( new MulINode(a,b) ) ); 1836 break; 1837 case Bytecodes::_iadd: 1838 b = pop(); a = pop(); 1839 push( _gvn.transform( new AddINode(a,b) ) ); 1840 break; 1841 case Bytecodes::_ineg: 1842 a = pop(); 1843 push( _gvn.transform( new SubINode(_gvn.intcon(0),a)) ); 1844 break; 1845 case Bytecodes::_isub: 1846 b = pop(); a = pop(); 1847 push( _gvn.transform( new SubINode(a,b) ) ); 1848 break; 1849 case Bytecodes::_iand: 1850 b = pop(); a = pop(); 1851 push( _gvn.transform( new AndINode(a,b) ) ); 1852 break; 1853 case Bytecodes::_ior: 1854 b = pop(); a = pop(); 1855 push( _gvn.transform( new OrINode(a,b) ) ); 1856 break; 1857 case Bytecodes::_ixor: 1858 b = pop(); a = pop(); 1859 push( _gvn.transform( new XorINode(a,b) ) ); 1860 break; 1861 case Bytecodes::_ishl: 1862 b = pop(); a = pop(); 1863 push( _gvn.transform( new LShiftINode(a,b) ) ); 1864 break; 1865 case Bytecodes::_ishr: 1866 b = pop(); a = pop(); 1867 push( _gvn.transform( new RShiftINode(a,b) ) ); 1868 break; 1869 case Bytecodes::_iushr: 1870 b = pop(); a = pop(); 1871 push( _gvn.transform( new URShiftINode(a,b) ) ); 1872 break; 1873 1874 case Bytecodes::_fneg: 1875 a = pop(); 1876 b = _gvn.transform(new NegFNode (a)); 1877 push(b); 1878 break; 1879 1880 case Bytecodes::_fsub: 1881 b = pop(); 1882 a = pop(); 1883 c = _gvn.transform( new SubFNode(a,b) ); 1884 d = precision_rounding(c); 1885 push( d ); 1886 break; 1887 1888 case Bytecodes::_fadd: 1889 b = pop(); 1890 a = pop(); 1891 c = _gvn.transform( new AddFNode(a,b) ); 1892 d = precision_rounding(c); 1893 push( d ); 1894 break; 1895 1896 case Bytecodes::_fmul: 1897 b = pop(); 1898 a = pop(); 1899 c = _gvn.transform( new MulFNode(a,b) ); 1900 d = precision_rounding(c); 1901 push( d ); 1902 break; 1903 1904 case Bytecodes::_fdiv: 1905 b = pop(); 1906 a = pop(); 1907 c = _gvn.transform( new DivFNode(0,a,b) ); 1908 d = precision_rounding(c); 1909 push( d ); 1910 break; 1911 1912 case Bytecodes::_frem: 1913 if (Matcher::has_match_rule(Op_ModF)) { 1914 // Generate a ModF node. 1915 b = pop(); 1916 a = pop(); 1917 c = _gvn.transform( new ModFNode(0,a,b) ); 1918 d = precision_rounding(c); 1919 push( d ); 1920 } 1921 else { 1922 // Generate a call. 1923 modf(); 1924 } 1925 break; 1926 1927 case Bytecodes::_fcmpl: 1928 b = pop(); 1929 a = pop(); 1930 c = _gvn.transform( new CmpF3Node( a, b)); 1931 push(c); 1932 break; 1933 case Bytecodes::_fcmpg: 1934 b = pop(); 1935 a = pop(); 1936 1937 // Same as fcmpl but need to flip the unordered case. Swap the inputs, 1938 // which negates the result sign except for unordered. Flip the unordered 1939 // as well by using CmpF3 which implements unordered-lesser instead of 1940 // unordered-greater semantics. Finally, commute the result bits. Result 1941 // is same as using a CmpF3Greater except we did it with CmpF3 alone. 1942 c = _gvn.transform( new CmpF3Node( b, a)); 1943 c = _gvn.transform( new SubINode(_gvn.intcon(0),c) ); 1944 push(c); 1945 break; 1946 1947 case Bytecodes::_f2i: 1948 a = pop(); 1949 push(_gvn.transform(new ConvF2INode(a))); 1950 break; 1951 1952 case Bytecodes::_d2i: 1953 a = pop_pair(); 1954 b = _gvn.transform(new ConvD2INode(a)); 1955 push( b ); 1956 break; 1957 1958 case Bytecodes::_f2d: 1959 a = pop(); 1960 b = _gvn.transform( new ConvF2DNode(a)); 1961 push_pair( b ); 1962 break; 1963 1964 case Bytecodes::_d2f: 1965 a = pop_pair(); 1966 b = _gvn.transform( new ConvD2FNode(a)); 1967 // This breaks _227_mtrt (speed & correctness) and _222_mpegaudio (speed) 1968 //b = _gvn.transform(new RoundFloatNode(0, b) ); 1969 push( b ); 1970 break; 1971 1972 case Bytecodes::_l2f: 1973 if (Matcher::convL2FSupported()) { 1974 a = pop_pair(); 1975 b = _gvn.transform( new ConvL2FNode(a)); 1976 // For i486.ad, FILD doesn't restrict precision to 24 or 53 bits. 1977 // Rather than storing the result into an FP register then pushing 1978 // out to memory to round, the machine instruction that implements 1979 // ConvL2D is responsible for rounding. 1980 // c = precision_rounding(b); 1981 c = _gvn.transform(b); 1982 push(c); 1983 } else { 1984 l2f(); 1985 } 1986 break; 1987 1988 case Bytecodes::_l2d: 1989 a = pop_pair(); 1990 b = _gvn.transform( new ConvL2DNode(a)); 1991 // For i486.ad, rounding is always necessary (see _l2f above). 1992 // c = dprecision_rounding(b); 1993 c = _gvn.transform(b); 1994 push_pair(c); 1995 break; 1996 1997 case Bytecodes::_f2l: 1998 a = pop(); 1999 b = _gvn.transform( new ConvF2LNode(a)); 2000 push_pair(b); 2001 break; 2002 2003 case Bytecodes::_d2l: 2004 a = pop_pair(); 2005 b = _gvn.transform( new ConvD2LNode(a)); 2006 push_pair(b); 2007 break; 2008 2009 case Bytecodes::_dsub: 2010 b = pop_pair(); 2011 a = pop_pair(); 2012 c = _gvn.transform( new SubDNode(a,b) ); 2013 d = dprecision_rounding(c); 2014 push_pair( d ); 2015 break; 2016 2017 case Bytecodes::_dadd: 2018 b = pop_pair(); 2019 a = pop_pair(); 2020 c = _gvn.transform( new AddDNode(a,b) ); 2021 d = dprecision_rounding(c); 2022 push_pair( d ); 2023 break; 2024 2025 case Bytecodes::_dmul: 2026 b = pop_pair(); 2027 a = pop_pair(); 2028 c = _gvn.transform( new MulDNode(a,b) ); 2029 d = dprecision_rounding(c); 2030 push_pair( d ); 2031 break; 2032 2033 case Bytecodes::_ddiv: 2034 b = pop_pair(); 2035 a = pop_pair(); 2036 c = _gvn.transform( new DivDNode(0,a,b) ); 2037 d = dprecision_rounding(c); 2038 push_pair( d ); 2039 break; 2040 2041 case Bytecodes::_dneg: 2042 a = pop_pair(); 2043 b = _gvn.transform(new NegDNode (a)); 2044 push_pair(b); 2045 break; 2046 2047 case Bytecodes::_drem: 2048 if (Matcher::has_match_rule(Op_ModD)) { 2049 // Generate a ModD node. 2050 b = pop_pair(); 2051 a = pop_pair(); 2052 // a % b 2053 2054 c = _gvn.transform( new ModDNode(0,a,b) ); 2055 d = dprecision_rounding(c); 2056 push_pair( d ); 2057 } 2058 else { 2059 // Generate a call. 2060 modd(); 2061 } 2062 break; 2063 2064 case Bytecodes::_dcmpl: 2065 b = pop_pair(); 2066 a = pop_pair(); 2067 c = _gvn.transform( new CmpD3Node( a, b)); 2068 push(c); 2069 break; 2070 2071 case Bytecodes::_dcmpg: 2072 b = pop_pair(); 2073 a = pop_pair(); 2074 // Same as dcmpl but need to flip the unordered case. 2075 // Commute the inputs, which negates the result sign except for unordered. 2076 // Flip the unordered as well by using CmpD3 which implements 2077 // unordered-lesser instead of unordered-greater semantics. 2078 // Finally, negate the result bits. Result is same as using a 2079 // CmpD3Greater except we did it with CmpD3 alone. 2080 c = _gvn.transform( new CmpD3Node( b, a)); 2081 c = _gvn.transform( new SubINode(_gvn.intcon(0),c) ); 2082 push(c); 2083 break; 2084 2085 2086 // Note for longs -> lo word is on TOS, hi word is on TOS - 1 2087 case Bytecodes::_land: 2088 b = pop_pair(); 2089 a = pop_pair(); 2090 c = _gvn.transform( new AndLNode(a,b) ); 2091 push_pair(c); 2092 break; 2093 case Bytecodes::_lor: 2094 b = pop_pair(); 2095 a = pop_pair(); 2096 c = _gvn.transform( new OrLNode(a,b) ); 2097 push_pair(c); 2098 break; 2099 case Bytecodes::_lxor: 2100 b = pop_pair(); 2101 a = pop_pair(); 2102 c = _gvn.transform( new XorLNode(a,b) ); 2103 push_pair(c); 2104 break; 2105 2106 case Bytecodes::_lshl: 2107 b = pop(); // the shift count 2108 a = pop_pair(); // value to be shifted 2109 c = _gvn.transform( new LShiftLNode(a,b) ); 2110 push_pair(c); 2111 break; 2112 case Bytecodes::_lshr: 2113 b = pop(); // the shift count 2114 a = pop_pair(); // value to be shifted 2115 c = _gvn.transform( new RShiftLNode(a,b) ); 2116 push_pair(c); 2117 break; 2118 case Bytecodes::_lushr: 2119 b = pop(); // the shift count 2120 a = pop_pair(); // value to be shifted 2121 c = _gvn.transform( new URShiftLNode(a,b) ); 2122 push_pair(c); 2123 break; 2124 case Bytecodes::_lmul: 2125 b = pop_pair(); 2126 a = pop_pair(); 2127 c = _gvn.transform( new MulLNode(a,b) ); 2128 push_pair(c); 2129 break; 2130 2131 case Bytecodes::_lrem: 2132 // Must keep both values on the expression-stack during null-check 2133 assert(peek(0) == top(), "long word order"); 2134 zero_check_long(peek(1)); 2135 // Compile-time detect of null-exception? 2136 if (stopped()) return; 2137 b = pop_pair(); 2138 a = pop_pair(); 2139 c = _gvn.transform( new ModLNode(control(),a,b) ); 2140 push_pair(c); 2141 break; 2142 2143 case Bytecodes::_ldiv: 2144 // Must keep both values on the expression-stack during null-check 2145 assert(peek(0) == top(), "long word order"); 2146 zero_check_long(peek(1)); 2147 // Compile-time detect of null-exception? 2148 if (stopped()) return; 2149 b = pop_pair(); 2150 a = pop_pair(); 2151 c = _gvn.transform( new DivLNode(control(),a,b) ); 2152 push_pair(c); 2153 break; 2154 2155 case Bytecodes::_ladd: 2156 b = pop_pair(); 2157 a = pop_pair(); 2158 c = _gvn.transform( new AddLNode(a,b) ); 2159 push_pair(c); 2160 break; 2161 case Bytecodes::_lsub: 2162 b = pop_pair(); 2163 a = pop_pair(); 2164 c = _gvn.transform( new SubLNode(a,b) ); 2165 push_pair(c); 2166 break; 2167 case Bytecodes::_lcmp: 2168 // Safepoints are now inserted _before_ branches. The long-compare 2169 // bytecode painfully produces a 3-way value (-1,0,+1) which requires a 2170 // slew of control flow. These are usually followed by a CmpI vs zero and 2171 // a branch; this pattern then optimizes to the obvious long-compare and 2172 // branch. However, if the branch is backwards there's a Safepoint 2173 // inserted. The inserted Safepoint captures the JVM state at the 2174 // pre-branch point, i.e. it captures the 3-way value. Thus if a 2175 // long-compare is used to control a loop the debug info will force 2176 // computation of the 3-way value, even though the generated code uses a 2177 // long-compare and branch. We try to rectify the situation by inserting 2178 // a SafePoint here and have it dominate and kill the safepoint added at a 2179 // following backwards branch. At this point the JVM state merely holds 2 2180 // longs but not the 3-way value. 2181 if( UseLoopSafepoints ) { 2182 switch( iter().next_bc() ) { 2183 case Bytecodes::_ifgt: 2184 case Bytecodes::_iflt: 2185 case Bytecodes::_ifge: 2186 case Bytecodes::_ifle: 2187 case Bytecodes::_ifne: 2188 case Bytecodes::_ifeq: 2189 // If this is a backwards branch in the bytecodes, add Safepoint 2190 maybe_add_safepoint(iter().next_get_dest()); 2191 } 2192 } 2193 b = pop_pair(); 2194 a = pop_pair(); 2195 c = _gvn.transform( new CmpL3Node( a, b )); 2196 push(c); 2197 break; 2198 2199 case Bytecodes::_lneg: 2200 a = pop_pair(); 2201 b = _gvn.transform( new SubLNode(longcon(0),a)); 2202 push_pair(b); 2203 break; 2204 case Bytecodes::_l2i: 2205 a = pop_pair(); 2206 push( _gvn.transform( new ConvL2INode(a))); 2207 break; 2208 case Bytecodes::_i2l: 2209 a = pop(); 2210 b = _gvn.transform( new ConvI2LNode(a)); 2211 push_pair(b); 2212 break; 2213 case Bytecodes::_i2b: 2214 // Sign extend 2215 a = pop(); 2216 a = _gvn.transform( new LShiftINode(a,_gvn.intcon(24)) ); 2217 a = _gvn.transform( new RShiftINode(a,_gvn.intcon(24)) ); 2218 push( a ); 2219 break; 2220 case Bytecodes::_i2s: 2221 a = pop(); 2222 a = _gvn.transform( new LShiftINode(a,_gvn.intcon(16)) ); 2223 a = _gvn.transform( new RShiftINode(a,_gvn.intcon(16)) ); 2224 push( a ); 2225 break; 2226 case Bytecodes::_i2c: 2227 a = pop(); 2228 push( _gvn.transform( new AndINode(a,_gvn.intcon(0xFFFF)) ) ); 2229 break; 2230 2231 case Bytecodes::_i2f: 2232 a = pop(); 2233 b = _gvn.transform( new ConvI2FNode(a) ) ; 2234 c = precision_rounding(b); 2235 push (b); 2236 break; 2237 2238 case Bytecodes::_i2d: 2239 a = pop(); 2240 b = _gvn.transform( new ConvI2DNode(a)); 2241 push_pair(b); 2242 break; 2243 2244 case Bytecodes::_iinc: // Increment local 2245 i = iter().get_index(); // Get local index 2246 set_local( i, _gvn.transform( new AddINode( _gvn.intcon(iter().get_iinc_con()), local(i) ) ) ); 2247 break; 2248 2249 // Exit points of synchronized methods must have an unlock node 2250 case Bytecodes::_return: 2251 return_current(NULL); 2252 break; 2253 2254 case Bytecodes::_ireturn: 2255 case Bytecodes::_areturn: 2256 case Bytecodes::_vreturn: 2257 case Bytecodes::_freturn: 2258 return_current(pop()); 2259 break; 2260 case Bytecodes::_lreturn: 2261 return_current(pop_pair()); 2262 break; 2263 case Bytecodes::_dreturn: 2264 return_current(pop_pair()); 2265 break; 2266 2267 case Bytecodes::_athrow: 2268 // null exception oop throws NULL pointer exception 2269 null_check(peek()); 2270 if (stopped()) return; 2271 // Hook the thrown exception directly to subsequent handlers. 2272 if (BailoutToInterpreterForThrows) { 2273 // Keep method interpreted from now on. 2274 uncommon_trap(Deoptimization::Reason_unhandled, 2275 Deoptimization::Action_make_not_compilable); 2276 return; 2277 } 2278 if (env()->jvmti_can_post_on_exceptions()) { 2279 // check if we must post exception events, take uncommon trap if so (with must_throw = false) 2280 uncommon_trap_if_should_post_on_exceptions(Deoptimization::Reason_unhandled, false); 2281 } 2282 // Here if either can_post_on_exceptions or should_post_on_exceptions is false 2283 add_exception_state(make_exception_state(peek())); 2284 break; 2285 2286 case Bytecodes::_goto: // fall through 2287 case Bytecodes::_goto_w: { 2288 int target_bci = (bc() == Bytecodes::_goto) ? iter().get_dest() : iter().get_far_dest(); 2289 2290 // If this is a backwards branch in the bytecodes, add Safepoint 2291 maybe_add_safepoint(target_bci); 2292 2293 // Update method data 2294 profile_taken_branch(target_bci); 2295 2296 // Merge the current control into the target basic block 2297 merge(target_bci); 2298 2299 // See if we can get some profile data and hand it off to the next block 2300 Block *target_block = block()->successor_for_bci(target_bci); 2301 if (target_block->pred_count() != 1) break; 2302 ciMethodData* methodData = method()->method_data(); 2303 if (!methodData->is_mature()) break; 2304 ciProfileData* data = methodData->bci_to_data(bci()); 2305 assert( data->is_JumpData(), "" ); 2306 int taken = ((ciJumpData*)data)->taken(); 2307 taken = method()->scale_count(taken); 2308 target_block->set_count(taken); 2309 break; 2310 } 2311 2312 case Bytecodes::_ifnull: btest = BoolTest::eq; goto handle_if_null; 2313 case Bytecodes::_ifnonnull: btest = BoolTest::ne; goto handle_if_null; 2314 handle_if_null: 2315 // If this is a backwards branch in the bytecodes, add Safepoint 2316 maybe_add_safepoint(iter().get_dest()); 2317 a = null(); 2318 b = pop(); 2319 if (!_gvn.type(b)->speculative_maybe_null() && 2320 !too_many_traps(Deoptimization::Reason_speculate_null_check)) { 2321 inc_sp(1); 2322 Node* null_ctl = top(); 2323 b = null_check_oop(b, &null_ctl, true, true, true); 2324 assert(null_ctl->is_top(), "no null control here"); 2325 dec_sp(1); 2326 } 2327 c = _gvn.transform( new CmpPNode(b, a) ); 2328 do_ifnull(btest, c); 2329 break; 2330 2331 case Bytecodes::_if_acmpeq: btest = BoolTest::eq; goto handle_if_acmp; 2332 case Bytecodes::_if_acmpne: btest = BoolTest::ne; goto handle_if_acmp; 2333 handle_if_acmp: 2334 // If this is a backwards branch in the bytecodes, add Safepoint 2335 maybe_add_safepoint(iter().get_dest()); 2336 a = pop(); 2337 b = pop(); 2338 c = _gvn.transform( new CmpPNode(b, a) ); 2339 c = optimize_cmp_with_klass(c); 2340 do_if(btest, c); 2341 break; 2342 2343 case Bytecodes::_ifeq: btest = BoolTest::eq; goto handle_ifxx; 2344 case Bytecodes::_ifne: btest = BoolTest::ne; goto handle_ifxx; 2345 case Bytecodes::_iflt: btest = BoolTest::lt; goto handle_ifxx; 2346 case Bytecodes::_ifle: btest = BoolTest::le; goto handle_ifxx; 2347 case Bytecodes::_ifgt: btest = BoolTest::gt; goto handle_ifxx; 2348 case Bytecodes::_ifge: btest = BoolTest::ge; goto handle_ifxx; 2349 handle_ifxx: 2350 // If this is a backwards branch in the bytecodes, add Safepoint 2351 maybe_add_safepoint(iter().get_dest()); 2352 a = _gvn.intcon(0); 2353 b = pop(); 2354 c = _gvn.transform( new CmpINode(b, a) ); 2355 do_if(btest, c); 2356 break; 2357 2358 case Bytecodes::_if_icmpeq: btest = BoolTest::eq; goto handle_if_icmp; 2359 case Bytecodes::_if_icmpne: btest = BoolTest::ne; goto handle_if_icmp; 2360 case Bytecodes::_if_icmplt: btest = BoolTest::lt; goto handle_if_icmp; 2361 case Bytecodes::_if_icmple: btest = BoolTest::le; goto handle_if_icmp; 2362 case Bytecodes::_if_icmpgt: btest = BoolTest::gt; goto handle_if_icmp; 2363 case Bytecodes::_if_icmpge: btest = BoolTest::ge; goto handle_if_icmp; 2364 handle_if_icmp: 2365 // If this is a backwards branch in the bytecodes, add Safepoint 2366 maybe_add_safepoint(iter().get_dest()); 2367 a = pop(); 2368 b = pop(); 2369 c = _gvn.transform( new CmpINode( b, a ) ); 2370 do_if(btest, c); 2371 break; 2372 2373 case Bytecodes::_tableswitch: 2374 do_tableswitch(); 2375 break; 2376 2377 case Bytecodes::_lookupswitch: 2378 do_lookupswitch(); 2379 break; 2380 2381 case Bytecodes::_invokestatic: 2382 case Bytecodes::_invokedynamic: 2383 case Bytecodes::_invokespecial: 2384 case Bytecodes::_invokevirtual: 2385 case Bytecodes::_invokeinterface: 2386 do_call(); 2387 break; 2388 case Bytecodes::_checkcast: 2389 do_checkcast(); 2390 break; 2391 case Bytecodes::_instanceof: 2392 do_instanceof(); 2393 break; 2394 case Bytecodes::_anewarray: 2395 do_newarray(); 2396 break; 2397 case Bytecodes::_newarray: 2398 do_newarray((BasicType)iter().get_index()); 2399 break; 2400 case Bytecodes::_multianewarray: 2401 do_multianewarray(); 2402 break; 2403 case Bytecodes::_new: 2404 do_new(); 2405 break; 2406 case Bytecodes::_vdefault: 2407 do_vdefault(); 2408 break; 2409 case Bytecodes::_vwithfield: 2410 do_vwithfield(); 2411 break; 2412 2413 case Bytecodes::_jsr: 2414 case Bytecodes::_jsr_w: 2415 do_jsr(); 2416 break; 2417 2418 case Bytecodes::_ret: 2419 do_ret(); 2420 break; 2421 2422 2423 case Bytecodes::_monitorenter: 2424 do_monitor_enter(); 2425 break; 2426 2427 case Bytecodes::_monitorexit: 2428 do_monitor_exit(); 2429 break; 2430 2431 case Bytecodes::_vunbox: 2432 do_vunbox(); 2433 break; 2434 2435 case Bytecodes::_vbox: 2436 do_vbox(); 2437 break; 2438 2439 case Bytecodes::_breakpoint: 2440 // Breakpoint set concurrently to compile 2441 // %%% use an uncommon trap? 2442 C->record_failure("breakpoint in method"); 2443 return; 2444 2445 default: 2446 #ifndef PRODUCT 2447 map()->dump(99); 2448 #endif 2449 tty->print("\nUnhandled bytecode %s\n", Bytecodes::name(bc()) ); 2450 ShouldNotReachHere(); 2451 } 2452 2453 #ifndef PRODUCT 2454 IdealGraphPrinter *printer = C->printer(); 2455 if (printer && printer->should_print(1)) { 2456 char buffer[256]; 2457 sprintf(buffer, "Bytecode %d: %s", bci(), Bytecodes::name(bc())); 2458 bool old = printer->traverse_outs(); 2459 printer->set_traverse_outs(true); 2460 printer->print_method(buffer, 4); 2461 printer->set_traverse_outs(old); 2462 } 2463 #endif 2464 }