1 /* 2 * Copyright (c) 1998, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "ci/ciMethodData.hpp" 27 #include "compiler/compileLog.hpp" 28 #include "gc/shared/barrierSet.hpp" 29 #include "gc/shared/c2/barrierSetC2.hpp" 30 #include "libadt/vectset.hpp" 31 #include "memory/allocation.inline.hpp" 32 #include "memory/resourceArea.hpp" 33 #include "opto/addnode.hpp" 34 #include "opto/callnode.hpp" 35 #include "opto/connode.hpp" 36 #include "opto/convertnode.hpp" 37 #include "opto/divnode.hpp" 38 #include "opto/idealGraphPrinter.hpp" 39 #include "opto/loopnode.hpp" 40 #include "opto/mulnode.hpp" 41 #include "opto/rootnode.hpp" 42 #include "opto/superword.hpp" 43 44 //============================================================================= 45 //--------------------------is_cloop_ind_var----------------------------------- 46 // Determine if a node is a counted loop induction variable. 47 // NOTE: The method is declared in "node.hpp". 48 bool Node::is_cloop_ind_var() const { 49 return (is_Phi() && !as_Phi()->is_copy() && 50 as_Phi()->region()->is_CountedLoop() && 51 as_Phi()->region()->as_CountedLoop()->phi() == this); 52 } 53 54 //============================================================================= 55 //------------------------------dump_spec-------------------------------------- 56 // Dump special per-node info 57 #ifndef PRODUCT 58 void LoopNode::dump_spec(outputStream *st) const { 59 if (is_inner_loop()) st->print( "inner " ); 60 if (is_partial_peel_loop()) st->print( "partial_peel " ); 61 if (partial_peel_has_failed()) st->print( "partial_peel_failed " ); 62 } 63 #endif 64 65 //------------------------------is_valid_counted_loop------------------------- 66 bool LoopNode::is_valid_counted_loop() const { 67 if (is_CountedLoop()) { 68 CountedLoopNode* l = as_CountedLoop(); 69 CountedLoopEndNode* le = l->loopexit_or_null(); 70 if (le != NULL && 71 le->proj_out_or_null(1 /* true */) == l->in(LoopNode::LoopBackControl)) { 72 Node* phi = l->phi(); 73 Node* exit = le->proj_out_or_null(0 /* false */); 74 if (exit != NULL && exit->Opcode() == Op_IfFalse && 75 phi != NULL && phi->is_Phi() && 76 phi->in(LoopNode::LoopBackControl) == l->incr() && 77 le->loopnode() == l && le->stride_is_con()) { 78 return true; 79 } 80 } 81 } 82 return false; 83 } 84 85 //------------------------------get_early_ctrl--------------------------------- 86 // Compute earliest legal control 87 Node *PhaseIdealLoop::get_early_ctrl( Node *n ) { 88 assert( !n->is_Phi() && !n->is_CFG(), "this code only handles data nodes" ); 89 uint i; 90 Node *early; 91 if (n->in(0) && !n->is_expensive()) { 92 early = n->in(0); 93 if (!early->is_CFG()) // Might be a non-CFG multi-def 94 early = get_ctrl(early); // So treat input as a straight data input 95 i = 1; 96 } else { 97 early = get_ctrl(n->in(1)); 98 i = 2; 99 } 100 uint e_d = dom_depth(early); 101 assert( early, "" ); 102 for (; i < n->req(); i++) { 103 Node *cin = get_ctrl(n->in(i)); 104 assert( cin, "" ); 105 // Keep deepest dominator depth 106 uint c_d = dom_depth(cin); 107 if (c_d > e_d) { // Deeper guy? 108 early = cin; // Keep deepest found so far 109 e_d = c_d; 110 } else if (c_d == e_d && // Same depth? 111 early != cin) { // If not equal, must use slower algorithm 112 // If same depth but not equal, one _must_ dominate the other 113 // and we want the deeper (i.e., dominated) guy. 114 Node *n1 = early; 115 Node *n2 = cin; 116 while (1) { 117 n1 = idom(n1); // Walk up until break cycle 118 n2 = idom(n2); 119 if (n1 == cin || // Walked early up to cin 120 dom_depth(n2) < c_d) 121 break; // early is deeper; keep him 122 if (n2 == early || // Walked cin up to early 123 dom_depth(n1) < c_d) { 124 early = cin; // cin is deeper; keep him 125 break; 126 } 127 } 128 e_d = dom_depth(early); // Reset depth register cache 129 } 130 } 131 132 // Return earliest legal location 133 assert(early == find_non_split_ctrl(early), "unexpected early control"); 134 135 if (n->is_expensive() && !_verify_only && !_verify_me) { 136 assert(n->in(0), "should have control input"); 137 early = get_early_ctrl_for_expensive(n, early); 138 } 139 140 return early; 141 } 142 143 //------------------------------get_early_ctrl_for_expensive--------------------------------- 144 // Move node up the dominator tree as high as legal while still beneficial 145 Node *PhaseIdealLoop::get_early_ctrl_for_expensive(Node *n, Node* earliest) { 146 assert(n->in(0) && n->is_expensive(), "expensive node with control input here"); 147 assert(OptimizeExpensiveOps, "optimization off?"); 148 149 Node* ctl = n->in(0); 150 assert(ctl->is_CFG(), "expensive input 0 must be cfg"); 151 uint min_dom_depth = dom_depth(earliest); 152 #ifdef ASSERT 153 if (!is_dominator(ctl, earliest) && !is_dominator(earliest, ctl)) { 154 dump_bad_graph("Bad graph detected in get_early_ctrl_for_expensive", n, earliest, ctl); 155 assert(false, "Bad graph detected in get_early_ctrl_for_expensive"); 156 } 157 #endif 158 if (dom_depth(ctl) < min_dom_depth) { 159 return earliest; 160 } 161 162 while (1) { 163 Node *next = ctl; 164 // Moving the node out of a loop on the projection of a If 165 // confuses loop predication. So once we hit a Loop in a If branch 166 // that doesn't branch to an UNC, we stop. The code that process 167 // expensive nodes will notice the loop and skip over it to try to 168 // move the node further up. 169 if (ctl->is_CountedLoop() && ctl->in(1) != NULL && ctl->in(1)->in(0) != NULL && ctl->in(1)->in(0)->is_If()) { 170 if (!ctl->in(1)->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none)) { 171 break; 172 } 173 next = idom(ctl->in(1)->in(0)); 174 } else if (ctl->is_Proj()) { 175 // We only move it up along a projection if the projection is 176 // the single control projection for its parent: same code path, 177 // if it's a If with UNC or fallthrough of a call. 178 Node* parent_ctl = ctl->in(0); 179 if (parent_ctl == NULL) { 180 break; 181 } else if (parent_ctl->is_CountedLoopEnd() && parent_ctl->as_CountedLoopEnd()->loopnode() != NULL) { 182 next = parent_ctl->as_CountedLoopEnd()->loopnode()->init_control(); 183 } else if (parent_ctl->is_If()) { 184 if (!ctl->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none)) { 185 break; 186 } 187 assert(idom(ctl) == parent_ctl, "strange"); 188 next = idom(parent_ctl); 189 } else if (ctl->is_CatchProj()) { 190 if (ctl->as_Proj()->_con != CatchProjNode::fall_through_index) { 191 break; 192 } 193 assert(parent_ctl->in(0)->in(0)->is_Call(), "strange graph"); 194 next = parent_ctl->in(0)->in(0)->in(0); 195 } else { 196 // Check if parent control has a single projection (this 197 // control is the only possible successor of the parent 198 // control). If so, we can try to move the node above the 199 // parent control. 200 int nb_ctl_proj = 0; 201 for (DUIterator_Fast imax, i = parent_ctl->fast_outs(imax); i < imax; i++) { 202 Node *p = parent_ctl->fast_out(i); 203 if (p->is_Proj() && p->is_CFG()) { 204 nb_ctl_proj++; 205 if (nb_ctl_proj > 1) { 206 break; 207 } 208 } 209 } 210 211 if (nb_ctl_proj > 1) { 212 break; 213 } 214 assert(parent_ctl->is_Start() || parent_ctl->is_MemBar() || parent_ctl->is_Call() || 215 BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(parent_ctl), "unexpected node"); 216 assert(idom(ctl) == parent_ctl, "strange"); 217 next = idom(parent_ctl); 218 } 219 } else { 220 next = idom(ctl); 221 } 222 if (next->is_Root() || next->is_Start() || dom_depth(next) < min_dom_depth) { 223 break; 224 } 225 ctl = next; 226 } 227 228 if (ctl != n->in(0)) { 229 _igvn.replace_input_of(n, 0, ctl); 230 _igvn.hash_insert(n); 231 } 232 233 return ctl; 234 } 235 236 237 //------------------------------set_early_ctrl--------------------------------- 238 // Set earliest legal control 239 void PhaseIdealLoop::set_early_ctrl( Node *n ) { 240 Node *early = get_early_ctrl(n); 241 242 // Record earliest legal location 243 set_ctrl(n, early); 244 } 245 246 //------------------------------set_subtree_ctrl------------------------------- 247 // set missing _ctrl entries on new nodes 248 void PhaseIdealLoop::set_subtree_ctrl( Node *n ) { 249 // Already set? Get out. 250 if( _nodes[n->_idx] ) return; 251 // Recursively set _nodes array to indicate where the Node goes 252 uint i; 253 for( i = 0; i < n->req(); ++i ) { 254 Node *m = n->in(i); 255 if( m && m != C->root() ) 256 set_subtree_ctrl( m ); 257 } 258 259 // Fixup self 260 set_early_ctrl( n ); 261 } 262 263 // Create a skeleton strip mined outer loop: a Loop head before the 264 // inner strip mined loop, a safepoint and an exit condition guarded 265 // by an opaque node after the inner strip mined loop with a backedge 266 // to the loop head. The inner strip mined loop is left as it is. Only 267 // once loop optimizations are over, do we adjust the inner loop exit 268 // condition to limit its number of iterations, set the outer loop 269 // exit condition and add Phis to the outer loop head. Some loop 270 // optimizations that operate on the inner strip mined loop need to be 271 // aware of the outer strip mined loop: loop unswitching needs to 272 // clone the outer loop as well as the inner, unrolling needs to only 273 // clone the inner loop etc. No optimizations need to change the outer 274 // strip mined loop as it is only a skeleton. 275 IdealLoopTree* PhaseIdealLoop::create_outer_strip_mined_loop(BoolNode *test, Node *cmp, Node *init_control, 276 IdealLoopTree* loop, float cl_prob, float le_fcnt, 277 Node*& entry_control, Node*& iffalse) { 278 Node* outer_test = _igvn.intcon(0); 279 set_ctrl(outer_test, C->root()); 280 Node *orig = iffalse; 281 iffalse = iffalse->clone(); 282 _igvn.register_new_node_with_optimizer(iffalse); 283 set_idom(iffalse, idom(orig), dom_depth(orig)); 284 285 IfNode *outer_le = new OuterStripMinedLoopEndNode(iffalse, outer_test, cl_prob, le_fcnt); 286 Node *outer_ift = new IfTrueNode (outer_le); 287 Node* outer_iff = orig; 288 _igvn.replace_input_of(outer_iff, 0, outer_le); 289 290 LoopNode *outer_l = new OuterStripMinedLoopNode(C, init_control, outer_ift); 291 entry_control = outer_l; 292 293 IdealLoopTree* outer_ilt = new IdealLoopTree(this, outer_l, outer_ift); 294 IdealLoopTree* parent = loop->_parent; 295 IdealLoopTree* sibling = parent->_child; 296 if (sibling == loop) { 297 parent->_child = outer_ilt; 298 } else { 299 while (sibling->_next != loop) { 300 sibling = sibling->_next; 301 } 302 sibling->_next = outer_ilt; 303 } 304 outer_ilt->_next = loop->_next; 305 outer_ilt->_parent = parent; 306 outer_ilt->_child = loop; 307 outer_ilt->_nest = loop->_nest; 308 loop->_parent = outer_ilt; 309 loop->_next = NULL; 310 loop->_nest++; 311 312 set_loop(iffalse, outer_ilt); 313 register_control(outer_le, outer_ilt, iffalse); 314 register_control(outer_ift, outer_ilt, outer_le); 315 set_idom(outer_iff, outer_le, dom_depth(outer_le)); 316 _igvn.register_new_node_with_optimizer(outer_l); 317 set_loop(outer_l, outer_ilt); 318 set_idom(outer_l, init_control, dom_depth(init_control)+1); 319 320 return outer_ilt; 321 } 322 323 void PhaseIdealLoop::insert_loop_limit_check(ProjNode* limit_check_proj, Node* cmp_limit, Node* bol) { 324 Node* new_predicate_proj = create_new_if_for_predicate(limit_check_proj, NULL, 325 Deoptimization::Reason_loop_limit_check, 326 Op_If); 327 Node* iff = new_predicate_proj->in(0); 328 assert(iff->Opcode() == Op_If, "bad graph shape"); 329 Node* conv = iff->in(1); 330 assert(conv->Opcode() == Op_Conv2B, "bad graph shape"); 331 Node* opaq = conv->in(1); 332 assert(opaq->Opcode() == Op_Opaque1, "bad graph shape"); 333 cmp_limit = _igvn.register_new_node_with_optimizer(cmp_limit); 334 bol = _igvn.register_new_node_with_optimizer(bol); 335 set_subtree_ctrl(bol); 336 _igvn.replace_input_of(iff, 1, bol); 337 338 #ifndef PRODUCT 339 // report that the loop predication has been actually performed 340 // for this loop 341 if (TraceLoopLimitCheck) { 342 tty->print_cr("Counted Loop Limit Check generated:"); 343 debug_only( bol->dump(2); ) 344 } 345 #endif 346 } 347 348 //------------------------------is_counted_loop-------------------------------- 349 bool PhaseIdealLoop::is_counted_loop(Node* x, IdealLoopTree*& loop) { 350 PhaseGVN *gvn = &_igvn; 351 352 // Counted loop head must be a good RegionNode with only 3 not NULL 353 // control input edges: Self, Entry, LoopBack. 354 if (x->in(LoopNode::Self) == NULL || x->req() != 3 || loop->_irreducible) { 355 return false; 356 } 357 Node *init_control = x->in(LoopNode::EntryControl); 358 Node *back_control = x->in(LoopNode::LoopBackControl); 359 if (init_control == NULL || back_control == NULL) // Partially dead 360 return false; 361 // Must also check for TOP when looking for a dead loop 362 if (init_control->is_top() || back_control->is_top()) 363 return false; 364 365 // Allow funny placement of Safepoint 366 if (back_control->Opcode() == Op_SafePoint) { 367 if (LoopStripMiningIter != 0) { 368 // Leaving the safepoint on the backedge and creating a 369 // CountedLoop will confuse optimizations. We can't move the 370 // safepoint around because its jvm state wouldn't match a new 371 // location. Give up on that loop. 372 return false; 373 } 374 back_control = back_control->in(TypeFunc::Control); 375 } 376 377 // Controlling test for loop 378 Node *iftrue = back_control; 379 uint iftrue_op = iftrue->Opcode(); 380 if (iftrue_op != Op_IfTrue && 381 iftrue_op != Op_IfFalse) 382 // I have a weird back-control. Probably the loop-exit test is in 383 // the middle of the loop and I am looking at some trailing control-flow 384 // merge point. To fix this I would have to partially peel the loop. 385 return false; // Obscure back-control 386 387 // Get boolean guarding loop-back test 388 Node *iff = iftrue->in(0); 389 if (get_loop(iff) != loop || !iff->in(1)->is_Bool()) 390 return false; 391 BoolNode *test = iff->in(1)->as_Bool(); 392 BoolTest::mask bt = test->_test._test; 393 float cl_prob = iff->as_If()->_prob; 394 if (iftrue_op == Op_IfFalse) { 395 bt = BoolTest(bt).negate(); 396 cl_prob = 1.0 - cl_prob; 397 } 398 // Get backedge compare 399 Node *cmp = test->in(1); 400 int cmp_op = cmp->Opcode(); 401 if (cmp_op != Op_CmpI) 402 return false; // Avoid pointer & float compares 403 404 // Find the trip-counter increment & limit. Limit must be loop invariant. 405 Node *incr = cmp->in(1); 406 Node *limit = cmp->in(2); 407 408 // --------- 409 // need 'loop()' test to tell if limit is loop invariant 410 // --------- 411 412 if (!is_member(loop, get_ctrl(incr))) { // Swapped trip counter and limit? 413 Node *tmp = incr; // Then reverse order into the CmpI 414 incr = limit; 415 limit = tmp; 416 bt = BoolTest(bt).commute(); // And commute the exit test 417 } 418 if (is_member(loop, get_ctrl(limit))) // Limit must be loop-invariant 419 return false; 420 if (!is_member(loop, get_ctrl(incr))) // Trip counter must be loop-variant 421 return false; 422 423 Node* phi_incr = NULL; 424 // Trip-counter increment must be commutative & associative. 425 if (incr->Opcode() == Op_CastII) { 426 incr = incr->in(1); 427 } 428 if (incr->is_Phi()) { 429 if (incr->as_Phi()->region() != x || incr->req() != 3) 430 return false; // Not simple trip counter expression 431 phi_incr = incr; 432 incr = phi_incr->in(LoopNode::LoopBackControl); // Assume incr is on backedge of Phi 433 if (!is_member(loop, get_ctrl(incr))) // Trip counter must be loop-variant 434 return false; 435 } 436 437 Node* trunc1 = NULL; 438 Node* trunc2 = NULL; 439 const TypeInt* iv_trunc_t = NULL; 440 Node* orig_incr = incr; 441 if (!(incr = CountedLoopNode::match_incr_with_optional_truncation(incr, &trunc1, &trunc2, &iv_trunc_t))) { 442 return false; // Funny increment opcode 443 } 444 assert(incr->Opcode() == Op_AddI, "wrong increment code"); 445 446 const TypeInt* limit_t = gvn->type(limit)->is_int(); 447 if (trunc1 != NULL) { 448 // When there is a truncation, we must be sure that after the truncation 449 // the trip counter will end up higher than the limit, otherwise we are looking 450 // at an endless loop. Can happen with range checks. 451 452 // Example: 453 // int i = 0; 454 // while (true) 455 // sum + = array[i]; 456 // i++; 457 // i = i && 0x7fff; 458 // } 459 // 460 // If the array is shorter than 0x8000 this exits through a AIOOB 461 // - Counted loop transformation is ok 462 // If the array is longer then this is an endless loop 463 // - No transformation can be done. 464 465 const TypeInt* incr_t = gvn->type(orig_incr)->is_int(); 466 if (limit_t->_hi > incr_t->_hi) { 467 // if the limit can have a higher value than the increment (before the phi) 468 return false; 469 } 470 } 471 472 // Get merge point 473 Node *xphi = incr->in(1); 474 Node *stride = incr->in(2); 475 if (!stride->is_Con()) { // Oops, swap these 476 if (!xphi->is_Con()) // Is the other guy a constant? 477 return false; // Nope, unknown stride, bail out 478 Node *tmp = xphi; // 'incr' is commutative, so ok to swap 479 xphi = stride; 480 stride = tmp; 481 } 482 if (xphi->Opcode() == Op_CastII) { 483 xphi = xphi->in(1); 484 } 485 // Stride must be constant 486 int stride_con = stride->get_int(); 487 if (stride_con == 0) 488 return false; // missed some peephole opt 489 490 if (!xphi->is_Phi()) 491 return false; // Too much math on the trip counter 492 if (phi_incr != NULL && phi_incr != xphi) 493 return false; 494 PhiNode *phi = xphi->as_Phi(); 495 496 // Phi must be of loop header; backedge must wrap to increment 497 if (phi->region() != x) 498 return false; 499 if ((trunc1 == NULL && phi->in(LoopNode::LoopBackControl) != incr) || 500 (trunc1 != NULL && phi->in(LoopNode::LoopBackControl) != trunc1)) { 501 return false; 502 } 503 Node *init_trip = phi->in(LoopNode::EntryControl); 504 505 // If iv trunc type is smaller than int, check for possible wrap. 506 if (!TypeInt::INT->higher_equal(iv_trunc_t)) { 507 assert(trunc1 != NULL, "must have found some truncation"); 508 509 // Get a better type for the phi (filtered thru if's) 510 const TypeInt* phi_ft = filtered_type(phi); 511 512 // Can iv take on a value that will wrap? 513 // 514 // Ensure iv's limit is not within "stride" of the wrap value. 515 // 516 // Example for "short" type 517 // Truncation ensures value is in the range -32768..32767 (iv_trunc_t) 518 // If the stride is +10, then the last value of the induction 519 // variable before the increment (phi_ft->_hi) must be 520 // <= 32767 - 10 and (phi_ft->_lo) must be >= -32768 to 521 // ensure no truncation occurs after the increment. 522 523 if (stride_con > 0) { 524 if (iv_trunc_t->_hi - phi_ft->_hi < stride_con || 525 iv_trunc_t->_lo > phi_ft->_lo) { 526 return false; // truncation may occur 527 } 528 } else if (stride_con < 0) { 529 if (iv_trunc_t->_lo - phi_ft->_lo > stride_con || 530 iv_trunc_t->_hi < phi_ft->_hi) { 531 return false; // truncation may occur 532 } 533 } 534 // No possibility of wrap so truncation can be discarded 535 // Promote iv type to Int 536 } else { 537 assert(trunc1 == NULL && trunc2 == NULL, "no truncation for int"); 538 } 539 540 // If the condition is inverted and we will be rolling 541 // through MININT to MAXINT, then bail out. 542 if (bt == BoolTest::eq || // Bail out, but this loop trips at most twice! 543 // Odd stride 544 (bt == BoolTest::ne && stride_con != 1 && stride_con != -1) || 545 // Count down loop rolls through MAXINT 546 ((bt == BoolTest::le || bt == BoolTest::lt) && stride_con < 0) || 547 // Count up loop rolls through MININT 548 ((bt == BoolTest::ge || bt == BoolTest::gt) && stride_con > 0)) { 549 return false; // Bail out 550 } 551 552 const TypeInt* init_t = gvn->type(init_trip)->is_int(); 553 554 if (stride_con > 0) { 555 jlong init_p = (jlong)init_t->_lo + stride_con; 556 if (init_p > (jlong)max_jint || init_p > (jlong)limit_t->_hi) 557 return false; // cyclic loop or this loop trips only once 558 } else { 559 jlong init_p = (jlong)init_t->_hi + stride_con; 560 if (init_p < (jlong)min_jint || init_p < (jlong)limit_t->_lo) 561 return false; // cyclic loop or this loop trips only once 562 } 563 564 if (phi_incr != NULL && bt != BoolTest::ne) { 565 // check if there is a possiblity of IV overflowing after the first increment 566 if (stride_con > 0) { 567 if (init_t->_hi > max_jint - stride_con) { 568 return false; 569 } 570 } else { 571 if (init_t->_lo < min_jint - stride_con) { 572 return false; 573 } 574 } 575 } 576 577 // ================================================= 578 // ---- SUCCESS! Found A Trip-Counted Loop! ----- 579 // 580 assert(x->Opcode() == Op_Loop, "regular loops only"); 581 C->print_method(PHASE_BEFORE_CLOOPS, 3); 582 583 Node *hook = new Node(6); 584 585 // =================================================== 586 // Generate loop limit check to avoid integer overflow 587 // in cases like next (cyclic loops): 588 // 589 // for (i=0; i <= max_jint; i++) {} 590 // for (i=0; i < max_jint; i+=2) {} 591 // 592 // 593 // Limit check predicate depends on the loop test: 594 // 595 // for(;i != limit; i++) --> limit <= (max_jint) 596 // for(;i < limit; i+=stride) --> limit <= (max_jint - stride + 1) 597 // for(;i <= limit; i+=stride) --> limit <= (max_jint - stride ) 598 // 599 600 // Check if limit is excluded to do more precise int overflow check. 601 bool incl_limit = (bt == BoolTest::le || bt == BoolTest::ge); 602 int stride_m = stride_con - (incl_limit ? 0 : (stride_con > 0 ? 1 : -1)); 603 604 // If compare points directly to the phi we need to adjust 605 // the compare so that it points to the incr. Limit have 606 // to be adjusted to keep trip count the same and the 607 // adjusted limit should be checked for int overflow. 608 if (phi_incr != NULL) { 609 stride_m += stride_con; 610 } 611 612 if (limit->is_Con()) { 613 int limit_con = limit->get_int(); 614 if ((stride_con > 0 && limit_con > (max_jint - stride_m)) || 615 (stride_con < 0 && limit_con < (min_jint - stride_m))) { 616 // Bailout: it could be integer overflow. 617 return false; 618 } 619 } else if ((stride_con > 0 && limit_t->_hi <= (max_jint - stride_m)) || 620 (stride_con < 0 && limit_t->_lo >= (min_jint - stride_m))) { 621 // Limit's type may satisfy the condition, for example, 622 // when it is an array length. 623 } else { 624 // Generate loop's limit check. 625 // Loop limit check predicate should be near the loop. 626 ProjNode *limit_check_proj = find_predicate_insertion_point(init_control, Deoptimization::Reason_loop_limit_check); 627 if (!limit_check_proj) { 628 // The limit check predicate is not generated if this method trapped here before. 629 #ifdef ASSERT 630 if (TraceLoopLimitCheck) { 631 tty->print("missing loop limit check:"); 632 loop->dump_head(); 633 x->dump(1); 634 } 635 #endif 636 return false; 637 } 638 639 IfNode* check_iff = limit_check_proj->in(0)->as_If(); 640 641 if (!is_dominator(get_ctrl(limit), check_iff->in(0))) { 642 return false; 643 } 644 645 Node* cmp_limit; 646 Node* bol; 647 648 if (stride_con > 0) { 649 cmp_limit = new CmpINode(limit, _igvn.intcon(max_jint - stride_m)); 650 bol = new BoolNode(cmp_limit, BoolTest::le); 651 } else { 652 cmp_limit = new CmpINode(limit, _igvn.intcon(min_jint - stride_m)); 653 bol = new BoolNode(cmp_limit, BoolTest::ge); 654 } 655 656 insert_loop_limit_check(limit_check_proj, cmp_limit, bol); 657 } 658 659 // Now we need to canonicalize loop condition. 660 if (bt == BoolTest::ne) { 661 assert(stride_con == 1 || stride_con == -1, "simple increment only"); 662 if (stride_con > 0 && init_t->_hi < limit_t->_lo) { 663 // 'ne' can be replaced with 'lt' only when init < limit. 664 bt = BoolTest::lt; 665 } else if (stride_con < 0 && init_t->_lo > limit_t->_hi) { 666 // 'ne' can be replaced with 'gt' only when init > limit. 667 bt = BoolTest::gt; 668 } else { 669 ProjNode *limit_check_proj = find_predicate_insertion_point(init_control, Deoptimization::Reason_loop_limit_check); 670 if (!limit_check_proj) { 671 // The limit check predicate is not generated if this method trapped here before. 672 #ifdef ASSERT 673 if (TraceLoopLimitCheck) { 674 tty->print("missing loop limit check:"); 675 loop->dump_head(); 676 x->dump(1); 677 } 678 #endif 679 return false; 680 } 681 IfNode* check_iff = limit_check_proj->in(0)->as_If(); 682 683 if (!is_dominator(get_ctrl(limit), check_iff->in(0)) || 684 !is_dominator(get_ctrl(init_trip), check_iff->in(0))) { 685 return false; 686 } 687 688 Node* cmp_limit; 689 Node* bol; 690 691 if (stride_con > 0) { 692 cmp_limit = new CmpINode(init_trip, limit); 693 bol = new BoolNode(cmp_limit, BoolTest::lt); 694 } else { 695 cmp_limit = new CmpINode(init_trip, limit); 696 bol = new BoolNode(cmp_limit, BoolTest::gt); 697 } 698 699 insert_loop_limit_check(limit_check_proj, cmp_limit, bol); 700 701 if (stride_con > 0) { 702 // 'ne' can be replaced with 'lt' only when init < limit. 703 bt = BoolTest::lt; 704 } else if (stride_con < 0) { 705 // 'ne' can be replaced with 'gt' only when init > limit. 706 bt = BoolTest::gt; 707 } 708 } 709 } 710 711 if (phi_incr != NULL) { 712 // If compare points directly to the phi we need to adjust 713 // the compare so that it points to the incr. Limit have 714 // to be adjusted to keep trip count the same and we 715 // should avoid int overflow. 716 // 717 // i = init; do {} while(i++ < limit); 718 // is converted to 719 // i = init; do {} while(++i < limit+1); 720 // 721 limit = gvn->transform(new AddINode(limit, stride)); 722 } 723 724 if (incl_limit) { 725 // The limit check guaranties that 'limit <= (max_jint - stride)' so 726 // we can convert 'i <= limit' to 'i < limit+1' since stride != 0. 727 // 728 Node* one = (stride_con > 0) ? gvn->intcon( 1) : gvn->intcon(-1); 729 limit = gvn->transform(new AddINode(limit, one)); 730 if (bt == BoolTest::le) 731 bt = BoolTest::lt; 732 else if (bt == BoolTest::ge) 733 bt = BoolTest::gt; 734 else 735 ShouldNotReachHere(); 736 } 737 set_subtree_ctrl( limit ); 738 739 if (LoopStripMiningIter == 0) { 740 // Check for SafePoint on backedge and remove 741 Node *sfpt = x->in(LoopNode::LoopBackControl); 742 if (sfpt->Opcode() == Op_SafePoint && is_deleteable_safept(sfpt)) { 743 lazy_replace( sfpt, iftrue ); 744 if (loop->_safepts != NULL) { 745 loop->_safepts->yank(sfpt); 746 } 747 loop->_tail = iftrue; 748 } 749 } 750 751 // Build a canonical trip test. 752 // Clone code, as old values may be in use. 753 incr = incr->clone(); 754 incr->set_req(1,phi); 755 incr->set_req(2,stride); 756 incr = _igvn.register_new_node_with_optimizer(incr); 757 set_early_ctrl( incr ); 758 _igvn.rehash_node_delayed(phi); 759 phi->set_req_X( LoopNode::LoopBackControl, incr, &_igvn ); 760 761 // If phi type is more restrictive than Int, raise to 762 // Int to prevent (almost) infinite recursion in igvn 763 // which can only handle integer types for constants or minint..maxint. 764 if (!TypeInt::INT->higher_equal(phi->bottom_type())) { 765 Node* nphi = PhiNode::make(phi->in(0), phi->in(LoopNode::EntryControl), TypeInt::INT); 766 nphi->set_req(LoopNode::LoopBackControl, phi->in(LoopNode::LoopBackControl)); 767 nphi = _igvn.register_new_node_with_optimizer(nphi); 768 set_ctrl(nphi, get_ctrl(phi)); 769 _igvn.replace_node(phi, nphi); 770 phi = nphi->as_Phi(); 771 } 772 cmp = cmp->clone(); 773 cmp->set_req(1,incr); 774 cmp->set_req(2,limit); 775 cmp = _igvn.register_new_node_with_optimizer(cmp); 776 set_ctrl(cmp, iff->in(0)); 777 778 test = test->clone()->as_Bool(); 779 (*(BoolTest*)&test->_test)._test = bt; 780 test->set_req(1,cmp); 781 _igvn.register_new_node_with_optimizer(test); 782 set_ctrl(test, iff->in(0)); 783 784 // Replace the old IfNode with a new LoopEndNode 785 Node *lex = _igvn.register_new_node_with_optimizer(new CountedLoopEndNode( iff->in(0), test, cl_prob, iff->as_If()->_fcnt )); 786 IfNode *le = lex->as_If(); 787 uint dd = dom_depth(iff); 788 set_idom(le, le->in(0), dd); // Update dominance for loop exit 789 set_loop(le, loop); 790 791 // Get the loop-exit control 792 Node *iffalse = iff->as_If()->proj_out(!(iftrue_op == Op_IfTrue)); 793 794 // Need to swap loop-exit and loop-back control? 795 if (iftrue_op == Op_IfFalse) { 796 Node *ift2=_igvn.register_new_node_with_optimizer(new IfTrueNode (le)); 797 Node *iff2=_igvn.register_new_node_with_optimizer(new IfFalseNode(le)); 798 799 loop->_tail = back_control = ift2; 800 set_loop(ift2, loop); 801 set_loop(iff2, get_loop(iffalse)); 802 803 // Lazy update of 'get_ctrl' mechanism. 804 lazy_replace(iffalse, iff2); 805 lazy_replace(iftrue, ift2); 806 807 // Swap names 808 iffalse = iff2; 809 iftrue = ift2; 810 } else { 811 _igvn.rehash_node_delayed(iffalse); 812 _igvn.rehash_node_delayed(iftrue); 813 iffalse->set_req_X( 0, le, &_igvn ); 814 iftrue ->set_req_X( 0, le, &_igvn ); 815 } 816 817 set_idom(iftrue, le, dd+1); 818 set_idom(iffalse, le, dd+1); 819 assert(iff->outcnt() == 0, "should be dead now"); 820 lazy_replace( iff, le ); // fix 'get_ctrl' 821 822 Node *sfpt2 = le->in(0); 823 824 Node* entry_control = init_control; 825 bool strip_mine_loop = LoopStripMiningIter > 1 && loop->_child == NULL && 826 sfpt2->Opcode() == Op_SafePoint && !loop->_has_call; 827 IdealLoopTree* outer_ilt = NULL; 828 if (strip_mine_loop) { 829 outer_ilt = create_outer_strip_mined_loop(test, cmp, init_control, loop, 830 cl_prob, le->_fcnt, entry_control, 831 iffalse); 832 } 833 834 // Now setup a new CountedLoopNode to replace the existing LoopNode 835 CountedLoopNode *l = new CountedLoopNode(entry_control, back_control); 836 l->set_unswitch_count(x->as_Loop()->unswitch_count()); // Preserve 837 // The following assert is approximately true, and defines the intention 838 // of can_be_counted_loop. It fails, however, because phase->type 839 // is not yet initialized for this loop and its parts. 840 //assert(l->can_be_counted_loop(this), "sanity"); 841 _igvn.register_new_node_with_optimizer(l); 842 set_loop(l, loop); 843 loop->_head = l; 844 // Fix all data nodes placed at the old loop head. 845 // Uses the lazy-update mechanism of 'get_ctrl'. 846 lazy_replace( x, l ); 847 set_idom(l, entry_control, dom_depth(entry_control) + 1); 848 849 if (LoopStripMiningIter == 0 || strip_mine_loop) { 850 // Check for immediately preceding SafePoint and remove 851 if (sfpt2->Opcode() == Op_SafePoint && (LoopStripMiningIter != 0 || is_deleteable_safept(sfpt2))) { 852 if (strip_mine_loop) { 853 Node* outer_le = outer_ilt->_tail->in(0); 854 Node* sfpt = sfpt2->clone(); 855 sfpt->set_req(0, iffalse); 856 outer_le->set_req(0, sfpt); 857 register_control(sfpt, outer_ilt, iffalse); 858 set_idom(outer_le, sfpt, dom_depth(sfpt)); 859 } 860 lazy_replace( sfpt2, sfpt2->in(TypeFunc::Control)); 861 if (loop->_safepts != NULL) { 862 loop->_safepts->yank(sfpt2); 863 } 864 } 865 } 866 867 // Free up intermediate goo 868 _igvn.remove_dead_node(hook); 869 870 #ifdef ASSERT 871 assert(l->is_valid_counted_loop(), "counted loop shape is messed up"); 872 assert(l == loop->_head && l->phi() == phi && l->loopexit_or_null() == lex, "" ); 873 #endif 874 #ifndef PRODUCT 875 if (TraceLoopOpts) { 876 tty->print("Counted "); 877 loop->dump_head(); 878 } 879 #endif 880 881 C->print_method(PHASE_AFTER_CLOOPS, 3); 882 883 // Capture bounds of the loop in the induction variable Phi before 884 // subsequent transformation (iteration splitting) obscures the 885 // bounds 886 l->phi()->as_Phi()->set_type(l->phi()->Value(&_igvn)); 887 888 if (strip_mine_loop) { 889 l->mark_strip_mined(); 890 l->verify_strip_mined(1); 891 outer_ilt->_head->as_Loop()->verify_strip_mined(1); 892 loop = outer_ilt; 893 } 894 895 return true; 896 } 897 898 //----------------------exact_limit------------------------------------------- 899 Node* PhaseIdealLoop::exact_limit( IdealLoopTree *loop ) { 900 assert(loop->_head->is_CountedLoop(), ""); 901 CountedLoopNode *cl = loop->_head->as_CountedLoop(); 902 assert(cl->is_valid_counted_loop(), ""); 903 904 if (ABS(cl->stride_con()) == 1 || 905 cl->limit()->Opcode() == Op_LoopLimit) { 906 // Old code has exact limit (it could be incorrect in case of int overflow). 907 // Loop limit is exact with stride == 1. And loop may already have exact limit. 908 return cl->limit(); 909 } 910 Node *limit = NULL; 911 #ifdef ASSERT 912 BoolTest::mask bt = cl->loopexit()->test_trip(); 913 assert(bt == BoolTest::lt || bt == BoolTest::gt, "canonical test is expected"); 914 #endif 915 if (cl->has_exact_trip_count()) { 916 // Simple case: loop has constant boundaries. 917 // Use jlongs to avoid integer overflow. 918 int stride_con = cl->stride_con(); 919 jlong init_con = cl->init_trip()->get_int(); 920 jlong limit_con = cl->limit()->get_int(); 921 julong trip_cnt = cl->trip_count(); 922 jlong final_con = init_con + trip_cnt*stride_con; 923 int final_int = (int)final_con; 924 // The final value should be in integer range since the loop 925 // is counted and the limit was checked for overflow. 926 assert(final_con == (jlong)final_int, "final value should be integer"); 927 limit = _igvn.intcon(final_int); 928 } else { 929 // Create new LoopLimit node to get exact limit (final iv value). 930 limit = new LoopLimitNode(C, cl->init_trip(), cl->limit(), cl->stride()); 931 register_new_node(limit, cl->in(LoopNode::EntryControl)); 932 } 933 assert(limit != NULL, "sanity"); 934 return limit; 935 } 936 937 //------------------------------Ideal------------------------------------------ 938 // Return a node which is more "ideal" than the current node. 939 // Attempt to convert into a counted-loop. 940 Node *LoopNode::Ideal(PhaseGVN *phase, bool can_reshape) { 941 if (!can_be_counted_loop(phase) && !is_OuterStripMinedLoop()) { 942 phase->C->set_major_progress(); 943 } 944 return RegionNode::Ideal(phase, can_reshape); 945 } 946 947 void LoopNode::verify_strip_mined(int expect_skeleton) const { 948 #ifdef ASSERT 949 const OuterStripMinedLoopNode* outer = NULL; 950 const CountedLoopNode* inner = NULL; 951 if (is_strip_mined()) { 952 assert(is_CountedLoop(), "no Loop should be marked strip mined"); 953 inner = as_CountedLoop(); 954 outer = inner->in(LoopNode::EntryControl)->as_OuterStripMinedLoop(); 955 } else if (is_OuterStripMinedLoop()) { 956 outer = this->as_OuterStripMinedLoop(); 957 inner = outer->unique_ctrl_out()->as_CountedLoop(); 958 assert(!is_strip_mined(), "outer loop shouldn't be marked strip mined"); 959 } 960 if (inner != NULL || outer != NULL) { 961 assert(inner != NULL && outer != NULL, "missing loop in strip mined nest"); 962 Node* outer_tail = outer->in(LoopNode::LoopBackControl); 963 Node* outer_le = outer_tail->in(0); 964 assert(outer_le->Opcode() == Op_OuterStripMinedLoopEnd, "tail of outer loop should be an If"); 965 Node* sfpt = outer_le->in(0); 966 assert(sfpt->Opcode() == Op_SafePoint, "where's the safepoint?"); 967 Node* inner_out = sfpt->in(0); 968 if (inner_out->outcnt() != 1) { 969 ResourceMark rm; 970 Unique_Node_List wq; 971 972 for (DUIterator_Fast imax, i = inner_out->fast_outs(imax); i < imax; i++) { 973 Node* u = inner_out->fast_out(i); 974 if (u == sfpt) { 975 continue; 976 } 977 wq.clear(); 978 wq.push(u); 979 bool found_sfpt = false; 980 for (uint next = 0; next < wq.size() && !found_sfpt; next++) { 981 Node* n = wq.at(next); 982 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax && !found_sfpt; i++) { 983 Node* u = n->fast_out(i); 984 if (u == sfpt) { 985 found_sfpt = true; 986 } 987 if (!u->is_CFG()) { 988 wq.push(u); 989 } 990 } 991 } 992 assert(found_sfpt, "no node in loop that's not input to safepoint"); 993 } 994 } 995 996 CountedLoopEndNode* cle = inner_out->in(0)->as_CountedLoopEnd(); 997 assert(cle == inner->loopexit_or_null(), "mismatch"); 998 bool has_skeleton = outer_le->in(1)->bottom_type()->singleton() && outer_le->in(1)->bottom_type()->is_int()->get_con() == 0; 999 if (has_skeleton) { 1000 assert(expect_skeleton == 1 || expect_skeleton == -1, "unexpected skeleton node"); 1001 assert(outer->outcnt() == 2, "only phis"); 1002 } else { 1003 assert(expect_skeleton == 0 || expect_skeleton == -1, "no skeleton node?"); 1004 uint phis = 0; 1005 for (DUIterator_Fast imax, i = inner->fast_outs(imax); i < imax; i++) { 1006 Node* u = inner->fast_out(i); 1007 if (u->is_Phi()) { 1008 phis++; 1009 } 1010 } 1011 for (DUIterator_Fast imax, i = outer->fast_outs(imax); i < imax; i++) { 1012 Node* u = outer->fast_out(i); 1013 assert(u == outer || u == inner || u->is_Phi(), "nothing between inner and outer loop"); 1014 } 1015 uint stores = 0; 1016 for (DUIterator_Fast imax, i = inner_out->fast_outs(imax); i < imax; i++) { 1017 Node* u = inner_out->fast_out(i); 1018 if (u->is_Store()) { 1019 stores++; 1020 } 1021 } 1022 assert(outer->outcnt() >= phis + 2 && outer->outcnt() <= phis + 2 + stores + 1, "only phis"); 1023 } 1024 assert(sfpt->outcnt() == 1, "no data node"); 1025 assert(outer_tail->outcnt() == 1 || !has_skeleton, "no data node"); 1026 } 1027 #endif 1028 } 1029 1030 //============================================================================= 1031 //------------------------------Ideal------------------------------------------ 1032 // Return a node which is more "ideal" than the current node. 1033 // Attempt to convert into a counted-loop. 1034 Node *CountedLoopNode::Ideal(PhaseGVN *phase, bool can_reshape) { 1035 return RegionNode::Ideal(phase, can_reshape); 1036 } 1037 1038 //------------------------------dump_spec-------------------------------------- 1039 // Dump special per-node info 1040 #ifndef PRODUCT 1041 void CountedLoopNode::dump_spec(outputStream *st) const { 1042 LoopNode::dump_spec(st); 1043 if (stride_is_con()) { 1044 st->print("stride: %d ",stride_con()); 1045 } 1046 if (is_pre_loop ()) st->print("pre of N%d" , _main_idx); 1047 if (is_main_loop()) st->print("main of N%d", _idx); 1048 if (is_post_loop()) st->print("post of N%d", _main_idx); 1049 if (is_strip_mined()) st->print(" strip mined"); 1050 } 1051 #endif 1052 1053 //============================================================================= 1054 int CountedLoopEndNode::stride_con() const { 1055 return stride()->bottom_type()->is_int()->get_con(); 1056 } 1057 1058 //============================================================================= 1059 //------------------------------Value----------------------------------------- 1060 const Type* LoopLimitNode::Value(PhaseGVN* phase) const { 1061 const Type* init_t = phase->type(in(Init)); 1062 const Type* limit_t = phase->type(in(Limit)); 1063 const Type* stride_t = phase->type(in(Stride)); 1064 // Either input is TOP ==> the result is TOP 1065 if (init_t == Type::TOP) return Type::TOP; 1066 if (limit_t == Type::TOP) return Type::TOP; 1067 if (stride_t == Type::TOP) return Type::TOP; 1068 1069 int stride_con = stride_t->is_int()->get_con(); 1070 if (stride_con == 1) 1071 return NULL; // Identity 1072 1073 if (init_t->is_int()->is_con() && limit_t->is_int()->is_con()) { 1074 // Use jlongs to avoid integer overflow. 1075 jlong init_con = init_t->is_int()->get_con(); 1076 jlong limit_con = limit_t->is_int()->get_con(); 1077 int stride_m = stride_con - (stride_con > 0 ? 1 : -1); 1078 jlong trip_count = (limit_con - init_con + stride_m)/stride_con; 1079 jlong final_con = init_con + stride_con*trip_count; 1080 int final_int = (int)final_con; 1081 // The final value should be in integer range since the loop 1082 // is counted and the limit was checked for overflow. 1083 assert(final_con == (jlong)final_int, "final value should be integer"); 1084 return TypeInt::make(final_int); 1085 } 1086 1087 return bottom_type(); // TypeInt::INT 1088 } 1089 1090 //------------------------------Ideal------------------------------------------ 1091 // Return a node which is more "ideal" than the current node. 1092 Node *LoopLimitNode::Ideal(PhaseGVN *phase, bool can_reshape) { 1093 if (phase->type(in(Init)) == Type::TOP || 1094 phase->type(in(Limit)) == Type::TOP || 1095 phase->type(in(Stride)) == Type::TOP) 1096 return NULL; // Dead 1097 1098 int stride_con = phase->type(in(Stride))->is_int()->get_con(); 1099 if (stride_con == 1) 1100 return NULL; // Identity 1101 1102 if (in(Init)->is_Con() && in(Limit)->is_Con()) 1103 return NULL; // Value 1104 1105 // Delay following optimizations until all loop optimizations 1106 // done to keep Ideal graph simple. 1107 if (!can_reshape || phase->C->major_progress()) 1108 return NULL; 1109 1110 const TypeInt* init_t = phase->type(in(Init) )->is_int(); 1111 const TypeInt* limit_t = phase->type(in(Limit))->is_int(); 1112 int stride_p; 1113 jlong lim, ini; 1114 julong max; 1115 if (stride_con > 0) { 1116 stride_p = stride_con; 1117 lim = limit_t->_hi; 1118 ini = init_t->_lo; 1119 max = (julong)max_jint; 1120 } else { 1121 stride_p = -stride_con; 1122 lim = init_t->_hi; 1123 ini = limit_t->_lo; 1124 max = (julong)min_jint; 1125 } 1126 julong range = lim - ini + stride_p; 1127 if (range <= max) { 1128 // Convert to integer expression if it is not overflow. 1129 Node* stride_m = phase->intcon(stride_con - (stride_con > 0 ? 1 : -1)); 1130 Node *range = phase->transform(new SubINode(in(Limit), in(Init))); 1131 Node *bias = phase->transform(new AddINode(range, stride_m)); 1132 Node *trip = phase->transform(new DivINode(0, bias, in(Stride))); 1133 Node *span = phase->transform(new MulINode(trip, in(Stride))); 1134 return new AddINode(span, in(Init)); // exact limit 1135 } 1136 1137 if (is_power_of_2(stride_p) || // divisor is 2^n 1138 !Matcher::has_match_rule(Op_LoopLimit)) { // or no specialized Mach node? 1139 // Convert to long expression to avoid integer overflow 1140 // and let igvn optimizer convert this division. 1141 // 1142 Node* init = phase->transform( new ConvI2LNode(in(Init))); 1143 Node* limit = phase->transform( new ConvI2LNode(in(Limit))); 1144 Node* stride = phase->longcon(stride_con); 1145 Node* stride_m = phase->longcon(stride_con - (stride_con > 0 ? 1 : -1)); 1146 1147 Node *range = phase->transform(new SubLNode(limit, init)); 1148 Node *bias = phase->transform(new AddLNode(range, stride_m)); 1149 Node *span; 1150 if (stride_con > 0 && is_power_of_2(stride_p)) { 1151 // bias >= 0 if stride >0, so if stride is 2^n we can use &(-stride) 1152 // and avoid generating rounding for division. Zero trip guard should 1153 // guarantee that init < limit but sometimes the guard is missing and 1154 // we can get situation when init > limit. Note, for the empty loop 1155 // optimization zero trip guard is generated explicitly which leaves 1156 // only RCE predicate where exact limit is used and the predicate 1157 // will simply fail forcing recompilation. 1158 Node* neg_stride = phase->longcon(-stride_con); 1159 span = phase->transform(new AndLNode(bias, neg_stride)); 1160 } else { 1161 Node *trip = phase->transform(new DivLNode(0, bias, stride)); 1162 span = phase->transform(new MulLNode(trip, stride)); 1163 } 1164 // Convert back to int 1165 Node *span_int = phase->transform(new ConvL2INode(span)); 1166 return new AddINode(span_int, in(Init)); // exact limit 1167 } 1168 1169 return NULL; // No progress 1170 } 1171 1172 //------------------------------Identity--------------------------------------- 1173 // If stride == 1 return limit node. 1174 Node* LoopLimitNode::Identity(PhaseGVN* phase) { 1175 int stride_con = phase->type(in(Stride))->is_int()->get_con(); 1176 if (stride_con == 1 || stride_con == -1) 1177 return in(Limit); 1178 return this; 1179 } 1180 1181 //============================================================================= 1182 //----------------------match_incr_with_optional_truncation-------------------- 1183 // Match increment with optional truncation: 1184 // CHAR: (i+1)&0x7fff, BYTE: ((i+1)<<8)>>8, or SHORT: ((i+1)<<16)>>16 1185 // Return NULL for failure. Success returns the increment node. 1186 Node* CountedLoopNode::match_incr_with_optional_truncation( 1187 Node* expr, Node** trunc1, Node** trunc2, const TypeInt** trunc_type) { 1188 // Quick cutouts: 1189 if (expr == NULL || expr->req() != 3) return NULL; 1190 1191 Node *t1 = NULL; 1192 Node *t2 = NULL; 1193 const TypeInt* trunc_t = TypeInt::INT; 1194 Node* n1 = expr; 1195 int n1op = n1->Opcode(); 1196 1197 // Try to strip (n1 & M) or (n1 << N >> N) from n1. 1198 if (n1op == Op_AndI && 1199 n1->in(2)->is_Con() && 1200 n1->in(2)->bottom_type()->is_int()->get_con() == 0x7fff) { 1201 // %%% This check should match any mask of 2**K-1. 1202 t1 = n1; 1203 n1 = t1->in(1); 1204 n1op = n1->Opcode(); 1205 trunc_t = TypeInt::CHAR; 1206 } else if (n1op == Op_RShiftI && 1207 n1->in(1) != NULL && 1208 n1->in(1)->Opcode() == Op_LShiftI && 1209 n1->in(2) == n1->in(1)->in(2) && 1210 n1->in(2)->is_Con()) { 1211 jint shift = n1->in(2)->bottom_type()->is_int()->get_con(); 1212 // %%% This check should match any shift in [1..31]. 1213 if (shift == 16 || shift == 8) { 1214 t1 = n1; 1215 t2 = t1->in(1); 1216 n1 = t2->in(1); 1217 n1op = n1->Opcode(); 1218 if (shift == 16) { 1219 trunc_t = TypeInt::SHORT; 1220 } else if (shift == 8) { 1221 trunc_t = TypeInt::BYTE; 1222 } 1223 } 1224 } 1225 1226 // If (maybe after stripping) it is an AddI, we won: 1227 if (n1op == Op_AddI) { 1228 *trunc1 = t1; 1229 *trunc2 = t2; 1230 *trunc_type = trunc_t; 1231 return n1; 1232 } 1233 1234 // failed 1235 return NULL; 1236 } 1237 1238 LoopNode* CountedLoopNode::skip_strip_mined(int expect_skeleton) { 1239 if (is_strip_mined()) { 1240 verify_strip_mined(expect_skeleton); 1241 return in(EntryControl)->as_Loop(); 1242 } 1243 return this; 1244 } 1245 1246 OuterStripMinedLoopNode* CountedLoopNode::outer_loop() const { 1247 assert(is_strip_mined(), "not a strip mined loop"); 1248 Node* c = in(EntryControl); 1249 if (c == NULL || c->is_top() || !c->is_OuterStripMinedLoop()) { 1250 return NULL; 1251 } 1252 return c->as_OuterStripMinedLoop(); 1253 } 1254 1255 IfTrueNode* OuterStripMinedLoopNode::outer_loop_tail() const { 1256 Node* c = in(LoopBackControl); 1257 if (c == NULL || c->is_top()) { 1258 return NULL; 1259 } 1260 return c->as_IfTrue(); 1261 } 1262 1263 IfTrueNode* CountedLoopNode::outer_loop_tail() const { 1264 LoopNode* l = outer_loop(); 1265 if (l == NULL) { 1266 return NULL; 1267 } 1268 return l->outer_loop_tail(); 1269 } 1270 1271 OuterStripMinedLoopEndNode* OuterStripMinedLoopNode::outer_loop_end() const { 1272 IfTrueNode* proj = outer_loop_tail(); 1273 if (proj == NULL) { 1274 return NULL; 1275 } 1276 Node* c = proj->in(0); 1277 if (c == NULL || c->is_top() || c->outcnt() != 2) { 1278 return NULL; 1279 } 1280 return c->as_OuterStripMinedLoopEnd(); 1281 } 1282 1283 OuterStripMinedLoopEndNode* CountedLoopNode::outer_loop_end() const { 1284 LoopNode* l = outer_loop(); 1285 if (l == NULL) { 1286 return NULL; 1287 } 1288 return l->outer_loop_end(); 1289 } 1290 1291 IfFalseNode* OuterStripMinedLoopNode::outer_loop_exit() const { 1292 IfNode* le = outer_loop_end(); 1293 if (le == NULL) { 1294 return NULL; 1295 } 1296 Node* c = le->proj_out_or_null(false); 1297 if (c == NULL) { 1298 return NULL; 1299 } 1300 return c->as_IfFalse(); 1301 } 1302 1303 IfFalseNode* CountedLoopNode::outer_loop_exit() const { 1304 LoopNode* l = outer_loop(); 1305 if (l == NULL) { 1306 return NULL; 1307 } 1308 return l->outer_loop_exit(); 1309 } 1310 1311 SafePointNode* OuterStripMinedLoopNode::outer_safepoint() const { 1312 IfNode* le = outer_loop_end(); 1313 if (le == NULL) { 1314 return NULL; 1315 } 1316 Node* c = le->in(0); 1317 if (c == NULL || c->is_top()) { 1318 return NULL; 1319 } 1320 assert(c->Opcode() == Op_SafePoint, "broken outer loop"); 1321 return c->as_SafePoint(); 1322 } 1323 1324 SafePointNode* CountedLoopNode::outer_safepoint() const { 1325 LoopNode* l = outer_loop(); 1326 if (l == NULL) { 1327 return NULL; 1328 } 1329 return l->outer_safepoint(); 1330 } 1331 1332 Node* CountedLoopNode::skip_predicates_from_entry(Node* ctrl) { 1333 while (ctrl != NULL && ctrl->is_Proj() && ctrl->in(0)->is_If() && 1334 ctrl->in(0)->as_If()->proj_out(1-ctrl->as_Proj()->_con)->outcnt() == 1 && 1335 ctrl->in(0)->as_If()->proj_out(1-ctrl->as_Proj()->_con)->unique_out()->Opcode() == Op_Halt) { 1336 ctrl = ctrl->in(0)->in(0); 1337 } 1338 1339 return ctrl; 1340 } 1341 1342 Node* CountedLoopNode::skip_predicates() { 1343 if (is_main_loop()) { 1344 Node* ctrl = skip_strip_mined()->in(LoopNode::EntryControl); 1345 1346 return skip_predicates_from_entry(ctrl); 1347 } 1348 return in(LoopNode::EntryControl); 1349 } 1350 1351 void OuterStripMinedLoopNode::adjust_strip_mined_loop(PhaseIterGVN* igvn) { 1352 // Look for the outer & inner strip mined loop, reduce number of 1353 // iterations of the inner loop, set exit condition of outer loop, 1354 // construct required phi nodes for outer loop. 1355 CountedLoopNode* inner_cl = unique_ctrl_out()->as_CountedLoop(); 1356 assert(inner_cl->is_strip_mined(), "inner loop should be strip mined"); 1357 Node* inner_iv_phi = inner_cl->phi(); 1358 if (inner_iv_phi == NULL) { 1359 IfNode* outer_le = outer_loop_end(); 1360 Node* iff = igvn->transform(new IfNode(outer_le->in(0), outer_le->in(1), outer_le->_prob, outer_le->_fcnt)); 1361 igvn->replace_node(outer_le, iff); 1362 inner_cl->clear_strip_mined(); 1363 return; 1364 } 1365 CountedLoopEndNode* inner_cle = inner_cl->loopexit(); 1366 1367 int stride = inner_cl->stride_con(); 1368 jlong scaled_iters_long = ((jlong)LoopStripMiningIter) * ABS(stride); 1369 int scaled_iters = (int)scaled_iters_long; 1370 int short_scaled_iters = LoopStripMiningIterShortLoop* ABS(stride); 1371 const TypeInt* inner_iv_t = igvn->type(inner_iv_phi)->is_int(); 1372 jlong iter_estimate = (jlong)inner_iv_t->_hi - (jlong)inner_iv_t->_lo; 1373 assert(iter_estimate > 0, "broken"); 1374 if ((jlong)scaled_iters != scaled_iters_long || iter_estimate <= short_scaled_iters) { 1375 // Remove outer loop and safepoint (too few iterations) 1376 Node* outer_sfpt = outer_safepoint(); 1377 Node* outer_out = outer_loop_exit(); 1378 igvn->replace_node(outer_out, outer_sfpt->in(0)); 1379 igvn->replace_input_of(outer_sfpt, 0, igvn->C->top()); 1380 inner_cl->clear_strip_mined(); 1381 return; 1382 } 1383 if (iter_estimate <= scaled_iters_long) { 1384 // We would only go through one iteration of 1385 // the outer loop: drop the outer loop but 1386 // keep the safepoint so we don't run for 1387 // too long without a safepoint 1388 IfNode* outer_le = outer_loop_end(); 1389 Node* iff = igvn->transform(new IfNode(outer_le->in(0), outer_le->in(1), outer_le->_prob, outer_le->_fcnt)); 1390 igvn->replace_node(outer_le, iff); 1391 inner_cl->clear_strip_mined(); 1392 return; 1393 } 1394 1395 Node* cle_tail = inner_cle->proj_out(true); 1396 ResourceMark rm; 1397 Node_List old_new; 1398 if (cle_tail->outcnt() > 1) { 1399 // Look for nodes on backedge of inner loop and clone them 1400 Unique_Node_List backedge_nodes; 1401 for (DUIterator_Fast imax, i = cle_tail->fast_outs(imax); i < imax; i++) { 1402 Node* u = cle_tail->fast_out(i); 1403 if (u != inner_cl) { 1404 assert(!u->is_CFG(), "control flow on the backedge?"); 1405 backedge_nodes.push(u); 1406 } 1407 } 1408 uint last = igvn->C->unique(); 1409 for (uint next = 0; next < backedge_nodes.size(); next++) { 1410 Node* n = backedge_nodes.at(next); 1411 old_new.map(n->_idx, n->clone()); 1412 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 1413 Node* u = n->fast_out(i); 1414 assert(!u->is_CFG(), "broken"); 1415 if (u->_idx >= last) { 1416 continue; 1417 } 1418 if (!u->is_Phi()) { 1419 backedge_nodes.push(u); 1420 } else { 1421 assert(u->in(0) == inner_cl, "strange phi on the backedge"); 1422 } 1423 } 1424 } 1425 // Put the clones on the outer loop backedge 1426 Node* le_tail = outer_loop_tail(); 1427 for (uint next = 0; next < backedge_nodes.size(); next++) { 1428 Node *n = old_new[backedge_nodes.at(next)->_idx]; 1429 for (uint i = 1; i < n->req(); i++) { 1430 if (n->in(i) != NULL && old_new[n->in(i)->_idx] != NULL) { 1431 n->set_req(i, old_new[n->in(i)->_idx]); 1432 } 1433 } 1434 if (n->in(0) != NULL && n->in(0) == cle_tail) { 1435 n->set_req(0, le_tail); 1436 } 1437 igvn->register_new_node_with_optimizer(n); 1438 } 1439 } 1440 1441 Node* iv_phi = NULL; 1442 // Make a clone of each phi in the inner loop 1443 // for the outer loop 1444 for (uint i = 0; i < inner_cl->outcnt(); i++) { 1445 Node* u = inner_cl->raw_out(i); 1446 if (u->is_Phi()) { 1447 assert(u->in(0) == inner_cl, "inconsistent"); 1448 Node* phi = u->clone(); 1449 phi->set_req(0, this); 1450 Node* be = old_new[phi->in(LoopNode::LoopBackControl)->_idx]; 1451 if (be != NULL) { 1452 phi->set_req(LoopNode::LoopBackControl, be); 1453 } 1454 phi = igvn->transform(phi); 1455 igvn->replace_input_of(u, LoopNode::EntryControl, phi); 1456 if (u == inner_iv_phi) { 1457 iv_phi = phi; 1458 } 1459 } 1460 } 1461 Node* cle_out = inner_cle->proj_out(false); 1462 if (cle_out->outcnt() > 1) { 1463 // Look for chains of stores that were sunk 1464 // out of the inner loop and are in the outer loop 1465 for (DUIterator_Fast imax, i = cle_out->fast_outs(imax); i < imax; i++) { 1466 Node* u = cle_out->fast_out(i); 1467 if (u->is_Store()) { 1468 Node* first = u; 1469 for(;;) { 1470 Node* next = first->in(MemNode::Memory); 1471 if (!next->is_Store() || next->in(0) != cle_out) { 1472 break; 1473 } 1474 first = next; 1475 } 1476 Node* last = u; 1477 for(;;) { 1478 Node* next = NULL; 1479 for (DUIterator_Fast jmax, j = last->fast_outs(jmax); j < jmax; j++) { 1480 Node* uu = last->fast_out(j); 1481 if (uu->is_Store() && uu->in(0) == cle_out) { 1482 assert(next == NULL, "only one in the outer loop"); 1483 next = uu; 1484 } 1485 } 1486 if (next == NULL) { 1487 break; 1488 } 1489 last = next; 1490 } 1491 Node* phi = NULL; 1492 for (DUIterator_Fast jmax, j = fast_outs(jmax); j < jmax; j++) { 1493 Node* uu = fast_out(j); 1494 if (uu->is_Phi()) { 1495 Node* be = uu->in(LoopNode::LoopBackControl); 1496 if (be->is_Store() && old_new[be->_idx] != NULL) { 1497 assert(false, "store on the backedge + sunk stores: unsupported"); 1498 // drop outer loop 1499 IfNode* outer_le = outer_loop_end(); 1500 Node* iff = igvn->transform(new IfNode(outer_le->in(0), outer_le->in(1), outer_le->_prob, outer_le->_fcnt)); 1501 igvn->replace_node(outer_le, iff); 1502 inner_cl->clear_strip_mined(); 1503 return; 1504 } 1505 if (be == last || be == first->in(MemNode::Memory)) { 1506 assert(phi == NULL, "only one phi"); 1507 phi = uu; 1508 } 1509 } 1510 } 1511 #ifdef ASSERT 1512 for (DUIterator_Fast jmax, j = fast_outs(jmax); j < jmax; j++) { 1513 Node* uu = fast_out(j); 1514 if (uu->is_Phi() && uu->bottom_type() == Type::MEMORY) { 1515 if (uu->adr_type() == igvn->C->get_adr_type(igvn->C->get_alias_index(u->adr_type()))) { 1516 assert(phi == uu, "what's that phi?"); 1517 } else if (uu->adr_type() == TypePtr::BOTTOM) { 1518 Node* n = uu->in(LoopNode::LoopBackControl); 1519 uint limit = igvn->C->live_nodes(); 1520 uint i = 0; 1521 while (n != uu) { 1522 i++; 1523 assert(i < limit, "infinite loop"); 1524 if (n->is_Proj()) { 1525 n = n->in(0); 1526 } else if (n->is_SafePoint() || n->is_MemBar()) { 1527 n = n->in(TypeFunc::Memory); 1528 } else if (n->is_Phi()) { 1529 n = n->in(1); 1530 } else if (n->is_MergeMem()) { 1531 n = n->as_MergeMem()->memory_at(igvn->C->get_alias_index(u->adr_type())); 1532 } else if (n->is_Store() || n->is_LoadStore() || n->is_ClearArray()) { 1533 n = n->in(MemNode::Memory); 1534 } else { 1535 n->dump(); 1536 ShouldNotReachHere(); 1537 } 1538 } 1539 } 1540 } 1541 } 1542 #endif 1543 if (phi == NULL) { 1544 // If the an entire chains was sunk, the 1545 // inner loop has no phi for that memory 1546 // slice, create one for the outer loop 1547 phi = PhiNode::make(this, first->in(MemNode::Memory), Type::MEMORY, 1548 igvn->C->get_adr_type(igvn->C->get_alias_index(u->adr_type()))); 1549 phi->set_req(LoopNode::LoopBackControl, last); 1550 phi = igvn->transform(phi); 1551 igvn->replace_input_of(first, MemNode::Memory, phi); 1552 } else { 1553 // Or fix the outer loop fix to include 1554 // that chain of stores. 1555 Node* be = phi->in(LoopNode::LoopBackControl); 1556 assert(!(be->is_Store() && old_new[be->_idx] != NULL), "store on the backedge + sunk stores: unsupported"); 1557 if (be == first->in(MemNode::Memory)) { 1558 if (be == phi->in(LoopNode::LoopBackControl)) { 1559 igvn->replace_input_of(phi, LoopNode::LoopBackControl, last); 1560 } else { 1561 igvn->replace_input_of(be, MemNode::Memory, last); 1562 } 1563 } else { 1564 #ifdef ASSERT 1565 if (be == phi->in(LoopNode::LoopBackControl)) { 1566 assert(phi->in(LoopNode::LoopBackControl) == last, ""); 1567 } else { 1568 assert(be->in(MemNode::Memory) == last, ""); 1569 } 1570 #endif 1571 } 1572 } 1573 } 1574 } 1575 } 1576 1577 if (iv_phi != NULL) { 1578 // Now adjust the inner loop's exit condition 1579 Node* limit = inner_cl->limit(); 1580 Node* sub = NULL; 1581 if (stride > 0) { 1582 sub = igvn->transform(new SubINode(limit, iv_phi)); 1583 } else { 1584 sub = igvn->transform(new SubINode(iv_phi, limit)); 1585 } 1586 Node* min = igvn->transform(new MinINode(sub, igvn->intcon(scaled_iters))); 1587 Node* new_limit = NULL; 1588 if (stride > 0) { 1589 new_limit = igvn->transform(new AddINode(min, iv_phi)); 1590 } else { 1591 new_limit = igvn->transform(new SubINode(iv_phi, min)); 1592 } 1593 Node* inner_cmp = inner_cle->cmp_node(); 1594 Node* inner_bol = inner_cle->in(CountedLoopEndNode::TestValue); 1595 Node* outer_bol = inner_bol; 1596 // cmp node for inner loop may be shared 1597 inner_cmp = inner_cmp->clone(); 1598 inner_cmp->set_req(2, new_limit); 1599 inner_bol = inner_bol->clone(); 1600 inner_bol->set_req(1, igvn->transform(inner_cmp)); 1601 igvn->replace_input_of(inner_cle, CountedLoopEndNode::TestValue, igvn->transform(inner_bol)); 1602 // Set the outer loop's exit condition too 1603 igvn->replace_input_of(outer_loop_end(), 1, outer_bol); 1604 } else { 1605 assert(false, "should be able to adjust outer loop"); 1606 IfNode* outer_le = outer_loop_end(); 1607 Node* iff = igvn->transform(new IfNode(outer_le->in(0), outer_le->in(1), outer_le->_prob, outer_le->_fcnt)); 1608 igvn->replace_node(outer_le, iff); 1609 inner_cl->clear_strip_mined(); 1610 } 1611 } 1612 1613 const Type* OuterStripMinedLoopEndNode::Value(PhaseGVN* phase) const { 1614 if (!in(0)) return Type::TOP; 1615 if (phase->type(in(0)) == Type::TOP) 1616 return Type::TOP; 1617 1618 return TypeTuple::IFBOTH; 1619 } 1620 1621 Node *OuterStripMinedLoopEndNode::Ideal(PhaseGVN *phase, bool can_reshape) { 1622 if (remove_dead_region(phase, can_reshape)) return this; 1623 1624 return NULL; 1625 } 1626 1627 //------------------------------filtered_type-------------------------------- 1628 // Return a type based on condition control flow 1629 // A successful return will be a type that is restricted due 1630 // to a series of dominating if-tests, such as: 1631 // if (i < 10) { 1632 // if (i > 0) { 1633 // here: "i" type is [1..10) 1634 // } 1635 // } 1636 // or a control flow merge 1637 // if (i < 10) { 1638 // do { 1639 // phi( , ) -- at top of loop type is [min_int..10) 1640 // i = ? 1641 // } while ( i < 10) 1642 // 1643 const TypeInt* PhaseIdealLoop::filtered_type( Node *n, Node* n_ctrl) { 1644 assert(n && n->bottom_type()->is_int(), "must be int"); 1645 const TypeInt* filtered_t = NULL; 1646 if (!n->is_Phi()) { 1647 assert(n_ctrl != NULL || n_ctrl == C->top(), "valid control"); 1648 filtered_t = filtered_type_from_dominators(n, n_ctrl); 1649 1650 } else { 1651 Node* phi = n->as_Phi(); 1652 Node* region = phi->in(0); 1653 assert(n_ctrl == NULL || n_ctrl == region, "ctrl parameter must be region"); 1654 if (region && region != C->top()) { 1655 for (uint i = 1; i < phi->req(); i++) { 1656 Node* val = phi->in(i); 1657 Node* use_c = region->in(i); 1658 const TypeInt* val_t = filtered_type_from_dominators(val, use_c); 1659 if (val_t != NULL) { 1660 if (filtered_t == NULL) { 1661 filtered_t = val_t; 1662 } else { 1663 filtered_t = filtered_t->meet(val_t)->is_int(); 1664 } 1665 } 1666 } 1667 } 1668 } 1669 const TypeInt* n_t = _igvn.type(n)->is_int(); 1670 if (filtered_t != NULL) { 1671 n_t = n_t->join(filtered_t)->is_int(); 1672 } 1673 return n_t; 1674 } 1675 1676 1677 //------------------------------filtered_type_from_dominators-------------------------------- 1678 // Return a possibly more restrictive type for val based on condition control flow of dominators 1679 const TypeInt* PhaseIdealLoop::filtered_type_from_dominators( Node* val, Node *use_ctrl) { 1680 if (val->is_Con()) { 1681 return val->bottom_type()->is_int(); 1682 } 1683 uint if_limit = 10; // Max number of dominating if's visited 1684 const TypeInt* rtn_t = NULL; 1685 1686 if (use_ctrl && use_ctrl != C->top()) { 1687 Node* val_ctrl = get_ctrl(val); 1688 uint val_dom_depth = dom_depth(val_ctrl); 1689 Node* pred = use_ctrl; 1690 uint if_cnt = 0; 1691 while (if_cnt < if_limit) { 1692 if ((pred->Opcode() == Op_IfTrue || pred->Opcode() == Op_IfFalse)) { 1693 if_cnt++; 1694 const TypeInt* if_t = IfNode::filtered_int_type(&_igvn, val, pred); 1695 if (if_t != NULL) { 1696 if (rtn_t == NULL) { 1697 rtn_t = if_t; 1698 } else { 1699 rtn_t = rtn_t->join(if_t)->is_int(); 1700 } 1701 } 1702 } 1703 pred = idom(pred); 1704 if (pred == NULL || pred == C->top()) { 1705 break; 1706 } 1707 // Stop if going beyond definition block of val 1708 if (dom_depth(pred) < val_dom_depth) { 1709 break; 1710 } 1711 } 1712 } 1713 return rtn_t; 1714 } 1715 1716 1717 //------------------------------dump_spec-------------------------------------- 1718 // Dump special per-node info 1719 #ifndef PRODUCT 1720 void CountedLoopEndNode::dump_spec(outputStream *st) const { 1721 if( in(TestValue) != NULL && in(TestValue)->is_Bool() ) { 1722 BoolTest bt( test_trip()); // Added this for g++. 1723 1724 st->print("["); 1725 bt.dump_on(st); 1726 st->print("]"); 1727 } 1728 st->print(" "); 1729 IfNode::dump_spec(st); 1730 } 1731 #endif 1732 1733 //============================================================================= 1734 //------------------------------is_member-------------------------------------- 1735 // Is 'l' a member of 'this'? 1736 bool IdealLoopTree::is_member(const IdealLoopTree *l) const { 1737 while( l->_nest > _nest ) l = l->_parent; 1738 return l == this; 1739 } 1740 1741 //------------------------------set_nest--------------------------------------- 1742 // Set loop tree nesting depth. Accumulate _has_call bits. 1743 int IdealLoopTree::set_nest( uint depth ) { 1744 _nest = depth; 1745 int bits = _has_call; 1746 if( _child ) bits |= _child->set_nest(depth+1); 1747 if( bits ) _has_call = 1; 1748 if( _next ) bits |= _next ->set_nest(depth ); 1749 return bits; 1750 } 1751 1752 //------------------------------split_fall_in---------------------------------- 1753 // Split out multiple fall-in edges from the loop header. Move them to a 1754 // private RegionNode before the loop. This becomes the loop landing pad. 1755 void IdealLoopTree::split_fall_in( PhaseIdealLoop *phase, int fall_in_cnt ) { 1756 PhaseIterGVN &igvn = phase->_igvn; 1757 uint i; 1758 1759 // Make a new RegionNode to be the landing pad. 1760 Node *landing_pad = new RegionNode( fall_in_cnt+1 ); 1761 phase->set_loop(landing_pad,_parent); 1762 // Gather all the fall-in control paths into the landing pad 1763 uint icnt = fall_in_cnt; 1764 uint oreq = _head->req(); 1765 for( i = oreq-1; i>0; i-- ) 1766 if( !phase->is_member( this, _head->in(i) ) ) 1767 landing_pad->set_req(icnt--,_head->in(i)); 1768 1769 // Peel off PhiNode edges as well 1770 for (DUIterator_Fast jmax, j = _head->fast_outs(jmax); j < jmax; j++) { 1771 Node *oj = _head->fast_out(j); 1772 if( oj->is_Phi() ) { 1773 PhiNode* old_phi = oj->as_Phi(); 1774 assert( old_phi->region() == _head, "" ); 1775 igvn.hash_delete(old_phi); // Yank from hash before hacking edges 1776 Node *p = PhiNode::make_blank(landing_pad, old_phi); 1777 uint icnt = fall_in_cnt; 1778 for( i = oreq-1; i>0; i-- ) { 1779 if( !phase->is_member( this, _head->in(i) ) ) { 1780 p->init_req(icnt--, old_phi->in(i)); 1781 // Go ahead and clean out old edges from old phi 1782 old_phi->del_req(i); 1783 } 1784 } 1785 // Search for CSE's here, because ZKM.jar does a lot of 1786 // loop hackery and we need to be a little incremental 1787 // with the CSE to avoid O(N^2) node blow-up. 1788 Node *p2 = igvn.hash_find_insert(p); // Look for a CSE 1789 if( p2 ) { // Found CSE 1790 p->destruct(); // Recover useless new node 1791 p = p2; // Use old node 1792 } else { 1793 igvn.register_new_node_with_optimizer(p, old_phi); 1794 } 1795 // Make old Phi refer to new Phi. 1796 old_phi->add_req(p); 1797 // Check for the special case of making the old phi useless and 1798 // disappear it. In JavaGrande I have a case where this useless 1799 // Phi is the loop limit and prevents recognizing a CountedLoop 1800 // which in turn prevents removing an empty loop. 1801 Node *id_old_phi = igvn.apply_identity(old_phi); 1802 if( id_old_phi != old_phi ) { // Found a simple identity? 1803 // Note that I cannot call 'replace_node' here, because 1804 // that will yank the edge from old_phi to the Region and 1805 // I'm mid-iteration over the Region's uses. 1806 for (DUIterator_Last imin, i = old_phi->last_outs(imin); i >= imin; ) { 1807 Node* use = old_phi->last_out(i); 1808 igvn.rehash_node_delayed(use); 1809 uint uses_found = 0; 1810 for (uint j = 0; j < use->len(); j++) { 1811 if (use->in(j) == old_phi) { 1812 if (j < use->req()) use->set_req (j, id_old_phi); 1813 else use->set_prec(j, id_old_phi); 1814 uses_found++; 1815 } 1816 } 1817 i -= uses_found; // we deleted 1 or more copies of this edge 1818 } 1819 } 1820 igvn._worklist.push(old_phi); 1821 } 1822 } 1823 // Finally clean out the fall-in edges from the RegionNode 1824 for( i = oreq-1; i>0; i-- ) { 1825 if( !phase->is_member( this, _head->in(i) ) ) { 1826 _head->del_req(i); 1827 } 1828 } 1829 igvn.rehash_node_delayed(_head); 1830 // Transform landing pad 1831 igvn.register_new_node_with_optimizer(landing_pad, _head); 1832 // Insert landing pad into the header 1833 _head->add_req(landing_pad); 1834 } 1835 1836 //------------------------------split_outer_loop------------------------------- 1837 // Split out the outermost loop from this shared header. 1838 void IdealLoopTree::split_outer_loop( PhaseIdealLoop *phase ) { 1839 PhaseIterGVN &igvn = phase->_igvn; 1840 1841 // Find index of outermost loop; it should also be my tail. 1842 uint outer_idx = 1; 1843 while( _head->in(outer_idx) != _tail ) outer_idx++; 1844 1845 // Make a LoopNode for the outermost loop. 1846 Node *ctl = _head->in(LoopNode::EntryControl); 1847 Node *outer = new LoopNode( ctl, _head->in(outer_idx) ); 1848 outer = igvn.register_new_node_with_optimizer(outer, _head); 1849 phase->set_created_loop_node(); 1850 1851 // Outermost loop falls into '_head' loop 1852 _head->set_req(LoopNode::EntryControl, outer); 1853 _head->del_req(outer_idx); 1854 // Split all the Phis up between '_head' loop and 'outer' loop. 1855 for (DUIterator_Fast jmax, j = _head->fast_outs(jmax); j < jmax; j++) { 1856 Node *out = _head->fast_out(j); 1857 if( out->is_Phi() ) { 1858 PhiNode *old_phi = out->as_Phi(); 1859 assert( old_phi->region() == _head, "" ); 1860 Node *phi = PhiNode::make_blank(outer, old_phi); 1861 phi->init_req(LoopNode::EntryControl, old_phi->in(LoopNode::EntryControl)); 1862 phi->init_req(LoopNode::LoopBackControl, old_phi->in(outer_idx)); 1863 phi = igvn.register_new_node_with_optimizer(phi, old_phi); 1864 // Make old Phi point to new Phi on the fall-in path 1865 igvn.replace_input_of(old_phi, LoopNode::EntryControl, phi); 1866 old_phi->del_req(outer_idx); 1867 } 1868 } 1869 1870 // Use the new loop head instead of the old shared one 1871 _head = outer; 1872 phase->set_loop(_head, this); 1873 } 1874 1875 //------------------------------fix_parent------------------------------------- 1876 static void fix_parent( IdealLoopTree *loop, IdealLoopTree *parent ) { 1877 loop->_parent = parent; 1878 if( loop->_child ) fix_parent( loop->_child, loop ); 1879 if( loop->_next ) fix_parent( loop->_next , parent ); 1880 } 1881 1882 //------------------------------estimate_path_freq----------------------------- 1883 static float estimate_path_freq( Node *n ) { 1884 // Try to extract some path frequency info 1885 IfNode *iff; 1886 for( int i = 0; i < 50; i++ ) { // Skip through a bunch of uncommon tests 1887 uint nop = n->Opcode(); 1888 if( nop == Op_SafePoint ) { // Skip any safepoint 1889 n = n->in(0); 1890 continue; 1891 } 1892 if( nop == Op_CatchProj ) { // Get count from a prior call 1893 // Assume call does not always throw exceptions: means the call-site 1894 // count is also the frequency of the fall-through path. 1895 assert( n->is_CatchProj(), "" ); 1896 if( ((CatchProjNode*)n)->_con != CatchProjNode::fall_through_index ) 1897 return 0.0f; // Assume call exception path is rare 1898 Node *call = n->in(0)->in(0)->in(0); 1899 assert( call->is_Call(), "expect a call here" ); 1900 const JVMState *jvms = ((CallNode*)call)->jvms(); 1901 ciMethodData* methodData = jvms->method()->method_data(); 1902 if (!methodData->is_mature()) return 0.0f; // No call-site data 1903 ciProfileData* data = methodData->bci_to_data(jvms->bci()); 1904 if ((data == NULL) || !data->is_CounterData()) { 1905 // no call profile available, try call's control input 1906 n = n->in(0); 1907 continue; 1908 } 1909 return data->as_CounterData()->count()/FreqCountInvocations; 1910 } 1911 // See if there's a gating IF test 1912 Node *n_c = n->in(0); 1913 if( !n_c->is_If() ) break; // No estimate available 1914 iff = n_c->as_If(); 1915 if( iff->_fcnt != COUNT_UNKNOWN ) // Have a valid count? 1916 // Compute how much count comes on this path 1917 return ((nop == Op_IfTrue) ? iff->_prob : 1.0f - iff->_prob) * iff->_fcnt; 1918 // Have no count info. Skip dull uncommon-trap like branches. 1919 if( (nop == Op_IfTrue && iff->_prob < PROB_LIKELY_MAG(5)) || 1920 (nop == Op_IfFalse && iff->_prob > PROB_UNLIKELY_MAG(5)) ) 1921 break; 1922 // Skip through never-taken branch; look for a real loop exit. 1923 n = iff->in(0); 1924 } 1925 return 0.0f; // No estimate available 1926 } 1927 1928 //------------------------------merge_many_backedges--------------------------- 1929 // Merge all the backedges from the shared header into a private Region. 1930 // Feed that region as the one backedge to this loop. 1931 void IdealLoopTree::merge_many_backedges( PhaseIdealLoop *phase ) { 1932 uint i; 1933 1934 // Scan for the top 2 hottest backedges 1935 float hotcnt = 0.0f; 1936 float warmcnt = 0.0f; 1937 uint hot_idx = 0; 1938 // Loop starts at 2 because slot 1 is the fall-in path 1939 for( i = 2; i < _head->req(); i++ ) { 1940 float cnt = estimate_path_freq(_head->in(i)); 1941 if( cnt > hotcnt ) { // Grab hottest path 1942 warmcnt = hotcnt; 1943 hotcnt = cnt; 1944 hot_idx = i; 1945 } else if( cnt > warmcnt ) { // And 2nd hottest path 1946 warmcnt = cnt; 1947 } 1948 } 1949 1950 // See if the hottest backedge is worthy of being an inner loop 1951 // by being much hotter than the next hottest backedge. 1952 if( hotcnt <= 0.0001 || 1953 hotcnt < 2.0*warmcnt ) hot_idx = 0;// No hot backedge 1954 1955 // Peel out the backedges into a private merge point; peel 1956 // them all except optionally hot_idx. 1957 PhaseIterGVN &igvn = phase->_igvn; 1958 1959 Node *hot_tail = NULL; 1960 // Make a Region for the merge point 1961 Node *r = new RegionNode(1); 1962 for( i = 2; i < _head->req(); i++ ) { 1963 if( i != hot_idx ) 1964 r->add_req( _head->in(i) ); 1965 else hot_tail = _head->in(i); 1966 } 1967 igvn.register_new_node_with_optimizer(r, _head); 1968 // Plug region into end of loop _head, followed by hot_tail 1969 while( _head->req() > 3 ) _head->del_req( _head->req()-1 ); 1970 igvn.replace_input_of(_head, 2, r); 1971 if( hot_idx ) _head->add_req(hot_tail); 1972 1973 // Split all the Phis up between '_head' loop and the Region 'r' 1974 for (DUIterator_Fast jmax, j = _head->fast_outs(jmax); j < jmax; j++) { 1975 Node *out = _head->fast_out(j); 1976 if( out->is_Phi() ) { 1977 PhiNode* n = out->as_Phi(); 1978 igvn.hash_delete(n); // Delete from hash before hacking edges 1979 Node *hot_phi = NULL; 1980 Node *phi = new PhiNode(r, n->type(), n->adr_type()); 1981 // Check all inputs for the ones to peel out 1982 uint j = 1; 1983 for( uint i = 2; i < n->req(); i++ ) { 1984 if( i != hot_idx ) 1985 phi->set_req( j++, n->in(i) ); 1986 else hot_phi = n->in(i); 1987 } 1988 // Register the phi but do not transform until whole place transforms 1989 igvn.register_new_node_with_optimizer(phi, n); 1990 // Add the merge phi to the old Phi 1991 while( n->req() > 3 ) n->del_req( n->req()-1 ); 1992 igvn.replace_input_of(n, 2, phi); 1993 if( hot_idx ) n->add_req(hot_phi); 1994 } 1995 } 1996 1997 1998 // Insert a new IdealLoopTree inserted below me. Turn it into a clone 1999 // of self loop tree. Turn self into a loop headed by _head and with 2000 // tail being the new merge point. 2001 IdealLoopTree *ilt = new IdealLoopTree( phase, _head, _tail ); 2002 phase->set_loop(_tail,ilt); // Adjust tail 2003 _tail = r; // Self's tail is new merge point 2004 phase->set_loop(r,this); 2005 ilt->_child = _child; // New guy has my children 2006 _child = ilt; // Self has new guy as only child 2007 ilt->_parent = this; // new guy has self for parent 2008 ilt->_nest = _nest; // Same nesting depth (for now) 2009 2010 // Starting with 'ilt', look for child loop trees using the same shared 2011 // header. Flatten these out; they will no longer be loops in the end. 2012 IdealLoopTree **pilt = &_child; 2013 while( ilt ) { 2014 if( ilt->_head == _head ) { 2015 uint i; 2016 for( i = 2; i < _head->req(); i++ ) 2017 if( _head->in(i) == ilt->_tail ) 2018 break; // Still a loop 2019 if( i == _head->req() ) { // No longer a loop 2020 // Flatten ilt. Hang ilt's "_next" list from the end of 2021 // ilt's '_child' list. Move the ilt's _child up to replace ilt. 2022 IdealLoopTree **cp = &ilt->_child; 2023 while( *cp ) cp = &(*cp)->_next; // Find end of child list 2024 *cp = ilt->_next; // Hang next list at end of child list 2025 *pilt = ilt->_child; // Move child up to replace ilt 2026 ilt->_head = NULL; // Flag as a loop UNIONED into parent 2027 ilt = ilt->_child; // Repeat using new ilt 2028 continue; // do not advance over ilt->_child 2029 } 2030 assert( ilt->_tail == hot_tail, "expected to only find the hot inner loop here" ); 2031 phase->set_loop(_head,ilt); 2032 } 2033 pilt = &ilt->_child; // Advance to next 2034 ilt = *pilt; 2035 } 2036 2037 if( _child ) fix_parent( _child, this ); 2038 } 2039 2040 //------------------------------beautify_loops--------------------------------- 2041 // Split shared headers and insert loop landing pads. 2042 // Insert a LoopNode to replace the RegionNode. 2043 // Return TRUE if loop tree is structurally changed. 2044 bool IdealLoopTree::beautify_loops( PhaseIdealLoop *phase ) { 2045 bool result = false; 2046 // Cache parts in locals for easy 2047 PhaseIterGVN &igvn = phase->_igvn; 2048 2049 igvn.hash_delete(_head); // Yank from hash before hacking edges 2050 2051 // Check for multiple fall-in paths. Peel off a landing pad if need be. 2052 int fall_in_cnt = 0; 2053 for( uint i = 1; i < _head->req(); i++ ) 2054 if( !phase->is_member( this, _head->in(i) ) ) 2055 fall_in_cnt++; 2056 assert( fall_in_cnt, "at least 1 fall-in path" ); 2057 if( fall_in_cnt > 1 ) // Need a loop landing pad to merge fall-ins 2058 split_fall_in( phase, fall_in_cnt ); 2059 2060 // Swap inputs to the _head and all Phis to move the fall-in edge to 2061 // the left. 2062 fall_in_cnt = 1; 2063 while( phase->is_member( this, _head->in(fall_in_cnt) ) ) 2064 fall_in_cnt++; 2065 if( fall_in_cnt > 1 ) { 2066 // Since I am just swapping inputs I do not need to update def-use info 2067 Node *tmp = _head->in(1); 2068 igvn.rehash_node_delayed(_head); 2069 _head->set_req( 1, _head->in(fall_in_cnt) ); 2070 _head->set_req( fall_in_cnt, tmp ); 2071 // Swap also all Phis 2072 for (DUIterator_Fast imax, i = _head->fast_outs(imax); i < imax; i++) { 2073 Node* phi = _head->fast_out(i); 2074 if( phi->is_Phi() ) { 2075 igvn.rehash_node_delayed(phi); // Yank from hash before hacking edges 2076 tmp = phi->in(1); 2077 phi->set_req( 1, phi->in(fall_in_cnt) ); 2078 phi->set_req( fall_in_cnt, tmp ); 2079 } 2080 } 2081 } 2082 assert( !phase->is_member( this, _head->in(1) ), "left edge is fall-in" ); 2083 assert( phase->is_member( this, _head->in(2) ), "right edge is loop" ); 2084 2085 // If I am a shared header (multiple backedges), peel off the many 2086 // backedges into a private merge point and use the merge point as 2087 // the one true backedge. 2088 if( _head->req() > 3 ) { 2089 // Merge the many backedges into a single backedge but leave 2090 // the hottest backedge as separate edge for the following peel. 2091 merge_many_backedges( phase ); 2092 result = true; 2093 } 2094 2095 // If I have one hot backedge, peel off myself loop. 2096 // I better be the outermost loop. 2097 if (_head->req() > 3 && !_irreducible) { 2098 split_outer_loop( phase ); 2099 result = true; 2100 2101 } else if (!_head->is_Loop() && !_irreducible) { 2102 // Make a new LoopNode to replace the old loop head 2103 Node *l = new LoopNode( _head->in(1), _head->in(2) ); 2104 l = igvn.register_new_node_with_optimizer(l, _head); 2105 phase->set_created_loop_node(); 2106 // Go ahead and replace _head 2107 phase->_igvn.replace_node( _head, l ); 2108 _head = l; 2109 phase->set_loop(_head, this); 2110 } 2111 2112 // Now recursively beautify nested loops 2113 if( _child ) result |= _child->beautify_loops( phase ); 2114 if( _next ) result |= _next ->beautify_loops( phase ); 2115 return result; 2116 } 2117 2118 //------------------------------allpaths_check_safepts---------------------------- 2119 // Allpaths backwards scan from loop tail, terminating each path at first safepoint 2120 // encountered. Helper for check_safepts. 2121 void IdealLoopTree::allpaths_check_safepts(VectorSet &visited, Node_List &stack) { 2122 assert(stack.size() == 0, "empty stack"); 2123 stack.push(_tail); 2124 visited.clear(); 2125 visited.set(_tail->_idx); 2126 while (stack.size() > 0) { 2127 Node* n = stack.pop(); 2128 if (n->is_Call() && n->as_Call()->guaranteed_safepoint()) { 2129 // Terminate this path 2130 } else if (n->Opcode() == Op_SafePoint) { 2131 if (_phase->get_loop(n) != this) { 2132 if (_required_safept == NULL) _required_safept = new Node_List(); 2133 _required_safept->push(n); // save the one closest to the tail 2134 } 2135 // Terminate this path 2136 } else { 2137 uint start = n->is_Region() ? 1 : 0; 2138 uint end = n->is_Region() && !n->is_Loop() ? n->req() : start + 1; 2139 for (uint i = start; i < end; i++) { 2140 Node* in = n->in(i); 2141 assert(in->is_CFG(), "must be"); 2142 if (!visited.test_set(in->_idx) && is_member(_phase->get_loop(in))) { 2143 stack.push(in); 2144 } 2145 } 2146 } 2147 } 2148 } 2149 2150 //------------------------------check_safepts---------------------------- 2151 // Given dominators, try to find loops with calls that must always be 2152 // executed (call dominates loop tail). These loops do not need non-call 2153 // safepoints (ncsfpt). 2154 // 2155 // A complication is that a safepoint in a inner loop may be needed 2156 // by an outer loop. In the following, the inner loop sees it has a 2157 // call (block 3) on every path from the head (block 2) to the 2158 // backedge (arc 3->2). So it deletes the ncsfpt (non-call safepoint) 2159 // in block 2, _but_ this leaves the outer loop without a safepoint. 2160 // 2161 // entry 0 2162 // | 2163 // v 2164 // outer 1,2 +->1 2165 // | | 2166 // | v 2167 // | 2<---+ ncsfpt in 2 2168 // |_/|\ | 2169 // | v | 2170 // inner 2,3 / 3 | call in 3 2171 // / | | 2172 // v +--+ 2173 // exit 4 2174 // 2175 // 2176 // This method creates a list (_required_safept) of ncsfpt nodes that must 2177 // be protected is created for each loop. When a ncsfpt maybe deleted, it 2178 // is first looked for in the lists for the outer loops of the current loop. 2179 // 2180 // The insights into the problem: 2181 // A) counted loops are okay 2182 // B) innermost loops are okay (only an inner loop can delete 2183 // a ncsfpt needed by an outer loop) 2184 // C) a loop is immune from an inner loop deleting a safepoint 2185 // if the loop has a call on the idom-path 2186 // D) a loop is also immune if it has a ncsfpt (non-call safepoint) on the 2187 // idom-path that is not in a nested loop 2188 // E) otherwise, an ncsfpt on the idom-path that is nested in an inner 2189 // loop needs to be prevented from deletion by an inner loop 2190 // 2191 // There are two analyses: 2192 // 1) The first, and cheaper one, scans the loop body from 2193 // tail to head following the idom (immediate dominator) 2194 // chain, looking for the cases (C,D,E) above. 2195 // Since inner loops are scanned before outer loops, there is summary 2196 // information about inner loops. Inner loops can be skipped over 2197 // when the tail of an inner loop is encountered. 2198 // 2199 // 2) The second, invoked if the first fails to find a call or ncsfpt on 2200 // the idom path (which is rare), scans all predecessor control paths 2201 // from the tail to the head, terminating a path when a call or sfpt 2202 // is encountered, to find the ncsfpt's that are closest to the tail. 2203 // 2204 void IdealLoopTree::check_safepts(VectorSet &visited, Node_List &stack) { 2205 // Bottom up traversal 2206 IdealLoopTree* ch = _child; 2207 if (_child) _child->check_safepts(visited, stack); 2208 if (_next) _next ->check_safepts(visited, stack); 2209 2210 if (!_head->is_CountedLoop() && !_has_sfpt && _parent != NULL && !_irreducible) { 2211 bool has_call = false; // call on dom-path 2212 bool has_local_ncsfpt = false; // ncsfpt on dom-path at this loop depth 2213 Node* nonlocal_ncsfpt = NULL; // ncsfpt on dom-path at a deeper depth 2214 // Scan the dom-path nodes from tail to head 2215 for (Node* n = tail(); n != _head; n = _phase->idom(n)) { 2216 if (n->is_Call() && n->as_Call()->guaranteed_safepoint()) { 2217 has_call = true; 2218 _has_sfpt = 1; // Then no need for a safept! 2219 break; 2220 } else if (n->Opcode() == Op_SafePoint) { 2221 if (_phase->get_loop(n) == this) { 2222 has_local_ncsfpt = true; 2223 break; 2224 } 2225 if (nonlocal_ncsfpt == NULL) { 2226 nonlocal_ncsfpt = n; // save the one closest to the tail 2227 } 2228 } else { 2229 IdealLoopTree* nlpt = _phase->get_loop(n); 2230 if (this != nlpt) { 2231 // If at an inner loop tail, see if the inner loop has already 2232 // recorded seeing a call on the dom-path (and stop.) If not, 2233 // jump to the head of the inner loop. 2234 assert(is_member(nlpt), "nested loop"); 2235 Node* tail = nlpt->_tail; 2236 if (tail->in(0)->is_If()) tail = tail->in(0); 2237 if (n == tail) { 2238 // If inner loop has call on dom-path, so does outer loop 2239 if (nlpt->_has_sfpt) { 2240 has_call = true; 2241 _has_sfpt = 1; 2242 break; 2243 } 2244 // Skip to head of inner loop 2245 assert(_phase->is_dominator(_head, nlpt->_head), "inner head dominated by outer head"); 2246 n = nlpt->_head; 2247 } 2248 } 2249 } 2250 } 2251 // Record safept's that this loop needs preserved when an 2252 // inner loop attempts to delete it's safepoints. 2253 if (_child != NULL && !has_call && !has_local_ncsfpt) { 2254 if (nonlocal_ncsfpt != NULL) { 2255 if (_required_safept == NULL) _required_safept = new Node_List(); 2256 _required_safept->push(nonlocal_ncsfpt); 2257 } else { 2258 // Failed to find a suitable safept on the dom-path. Now use 2259 // an all paths walk from tail to head, looking for safepoints to preserve. 2260 allpaths_check_safepts(visited, stack); 2261 } 2262 } 2263 } 2264 } 2265 2266 //---------------------------is_deleteable_safept---------------------------- 2267 // Is safept not required by an outer loop? 2268 bool PhaseIdealLoop::is_deleteable_safept(Node* sfpt) { 2269 assert(sfpt->Opcode() == Op_SafePoint, ""); 2270 IdealLoopTree* lp = get_loop(sfpt)->_parent; 2271 while (lp != NULL) { 2272 Node_List* sfpts = lp->_required_safept; 2273 if (sfpts != NULL) { 2274 for (uint i = 0; i < sfpts->size(); i++) { 2275 if (sfpt == sfpts->at(i)) 2276 return false; 2277 } 2278 } 2279 lp = lp->_parent; 2280 } 2281 return true; 2282 } 2283 2284 //---------------------------replace_parallel_iv------------------------------- 2285 // Replace parallel induction variable (parallel to trip counter) 2286 void PhaseIdealLoop::replace_parallel_iv(IdealLoopTree *loop) { 2287 assert(loop->_head->is_CountedLoop(), ""); 2288 CountedLoopNode *cl = loop->_head->as_CountedLoop(); 2289 if (!cl->is_valid_counted_loop()) 2290 return; // skip malformed counted loop 2291 Node *incr = cl->incr(); 2292 if (incr == NULL) 2293 return; // Dead loop? 2294 Node *init = cl->init_trip(); 2295 Node *phi = cl->phi(); 2296 int stride_con = cl->stride_con(); 2297 2298 // Visit all children, looking for Phis 2299 for (DUIterator i = cl->outs(); cl->has_out(i); i++) { 2300 Node *out = cl->out(i); 2301 // Look for other phis (secondary IVs). Skip dead ones 2302 if (!out->is_Phi() || out == phi || !has_node(out)) 2303 continue; 2304 PhiNode* phi2 = out->as_Phi(); 2305 Node *incr2 = phi2->in( LoopNode::LoopBackControl ); 2306 // Look for induction variables of the form: X += constant 2307 if (phi2->region() != loop->_head || 2308 incr2->req() != 3 || 2309 incr2->in(1) != phi2 || 2310 incr2 == incr || 2311 incr2->Opcode() != Op_AddI || 2312 !incr2->in(2)->is_Con()) 2313 continue; 2314 2315 // Check for parallel induction variable (parallel to trip counter) 2316 // via an affine function. In particular, count-down loops with 2317 // count-up array indices are common. We only RCE references off 2318 // the trip-counter, so we need to convert all these to trip-counter 2319 // expressions. 2320 Node *init2 = phi2->in( LoopNode::EntryControl ); 2321 int stride_con2 = incr2->in(2)->get_int(); 2322 2323 // The ratio of the two strides cannot be represented as an int 2324 // if stride_con2 is min_int and stride_con is -1. 2325 if (stride_con2 == min_jint && stride_con == -1) { 2326 continue; 2327 } 2328 2329 // The general case here gets a little tricky. We want to find the 2330 // GCD of all possible parallel IV's and make a new IV using this 2331 // GCD for the loop. Then all possible IVs are simple multiples of 2332 // the GCD. In practice, this will cover very few extra loops. 2333 // Instead we require 'stride_con2' to be a multiple of 'stride_con', 2334 // where +/-1 is the common case, but other integer multiples are 2335 // also easy to handle. 2336 int ratio_con = stride_con2/stride_con; 2337 2338 if ((ratio_con * stride_con) == stride_con2) { // Check for exact 2339 #ifndef PRODUCT 2340 if (TraceLoopOpts) { 2341 tty->print("Parallel IV: %d ", phi2->_idx); 2342 loop->dump_head(); 2343 } 2344 #endif 2345 // Convert to using the trip counter. The parallel induction 2346 // variable differs from the trip counter by a loop-invariant 2347 // amount, the difference between their respective initial values. 2348 // It is scaled by the 'ratio_con'. 2349 Node* ratio = _igvn.intcon(ratio_con); 2350 set_ctrl(ratio, C->root()); 2351 Node* ratio_init = new MulINode(init, ratio); 2352 _igvn.register_new_node_with_optimizer(ratio_init, init); 2353 set_early_ctrl(ratio_init); 2354 Node* diff = new SubINode(init2, ratio_init); 2355 _igvn.register_new_node_with_optimizer(diff, init2); 2356 set_early_ctrl(diff); 2357 Node* ratio_idx = new MulINode(phi, ratio); 2358 _igvn.register_new_node_with_optimizer(ratio_idx, phi); 2359 set_ctrl(ratio_idx, cl); 2360 Node* add = new AddINode(ratio_idx, diff); 2361 _igvn.register_new_node_with_optimizer(add); 2362 set_ctrl(add, cl); 2363 _igvn.replace_node( phi2, add ); 2364 // Sometimes an induction variable is unused 2365 if (add->outcnt() == 0) { 2366 _igvn.remove_dead_node(add); 2367 } 2368 --i; // deleted this phi; rescan starting with next position 2369 continue; 2370 } 2371 } 2372 } 2373 2374 void IdealLoopTree::remove_safepoints(PhaseIdealLoop* phase, bool keep_one) { 2375 Node* keep = NULL; 2376 if (keep_one) { 2377 // Look for a safepoint on the idom-path. 2378 for (Node* i = tail(); i != _head; i = phase->idom(i)) { 2379 if (i->Opcode() == Op_SafePoint && phase->get_loop(i) == this) { 2380 keep = i; 2381 break; // Found one 2382 } 2383 } 2384 } 2385 2386 // Don't remove any safepoints if it is requested to keep a single safepoint and 2387 // no safepoint was found on idom-path. It is not safe to remove any safepoint 2388 // in this case since there's no safepoint dominating all paths in the loop body. 2389 bool prune = !keep_one || keep != NULL; 2390 2391 // Delete other safepoints in this loop. 2392 Node_List* sfpts = _safepts; 2393 if (prune && sfpts != NULL) { 2394 assert(keep == NULL || keep->Opcode() == Op_SafePoint, "not safepoint"); 2395 for (uint i = 0; i < sfpts->size(); i++) { 2396 Node* n = sfpts->at(i); 2397 assert(phase->get_loop(n) == this, ""); 2398 if (n != keep && phase->is_deleteable_safept(n)) { 2399 phase->lazy_replace(n, n->in(TypeFunc::Control)); 2400 } 2401 } 2402 } 2403 } 2404 2405 //------------------------------counted_loop----------------------------------- 2406 // Convert to counted loops where possible 2407 void IdealLoopTree::counted_loop( PhaseIdealLoop *phase ) { 2408 2409 // For grins, set the inner-loop flag here 2410 if (!_child) { 2411 if (_head->is_Loop()) _head->as_Loop()->set_inner_loop(); 2412 } 2413 2414 IdealLoopTree* loop = this; 2415 if (_head->is_CountedLoop() || 2416 phase->is_counted_loop(_head, loop)) { 2417 2418 if (LoopStripMiningIter == 0 || (LoopStripMiningIter > 1 && _child == NULL)) { 2419 // Indicate we do not need a safepoint here 2420 _has_sfpt = 1; 2421 } 2422 2423 // Remove safepoints 2424 bool keep_one_sfpt = !(_has_call || _has_sfpt); 2425 remove_safepoints(phase, keep_one_sfpt); 2426 2427 // Look for induction variables 2428 phase->replace_parallel_iv(this); 2429 2430 } else if (_parent != NULL && !_irreducible) { 2431 // Not a counted loop. Keep one safepoint. 2432 bool keep_one_sfpt = true; 2433 remove_safepoints(phase, keep_one_sfpt); 2434 } 2435 2436 // Recursively 2437 assert(loop->_child != this || (loop->_head->as_Loop()->is_OuterStripMinedLoop() && _head->as_CountedLoop()->is_strip_mined()), "what kind of loop was added?"); 2438 assert(loop->_child != this || (loop->_child->_child == NULL && loop->_child->_next == NULL), "would miss some loops"); 2439 if (loop->_child && loop->_child != this) loop->_child->counted_loop(phase); 2440 if (loop->_next) loop->_next ->counted_loop(phase); 2441 } 2442 2443 2444 // The Estimated Loop Clone Size: 2445 // CloneFactor * (~112% * BodySize + BC) + CC + FanOutTerm, 2446 // where BC and CC are totally ad-hoc/magic "body" and "clone" constants, 2447 // respectively, used to ensure that the node usage estimates made are on the 2448 // safe side, for the most part. The FanOutTerm is an attempt to estimate the 2449 // possible additional/excessive nodes generated due to data and control flow 2450 // merging, for edges reaching outside the loop. 2451 uint IdealLoopTree::est_loop_clone_sz(uint factor) const { 2452 2453 precond(0 < factor && factor < 16); 2454 2455 uint const bc = 13; 2456 uint const cc = 17; 2457 uint const sz = _body.size() + (_body.size() + 7) / 8; 2458 uint estimate = factor * (sz + bc) + cc; 2459 2460 assert((estimate - cc) / factor == sz + bc, "overflow"); 2461 2462 return estimate + est_loop_flow_merge_sz(); 2463 } 2464 2465 // The Estimated Loop (full-) Unroll Size: 2466 // UnrollFactor * (~106% * BodySize) + CC + FanOutTerm, 2467 // where CC is a (totally) ad-hoc/magic "clone" constant, used to ensure that 2468 // node usage estimates made are on the safe side, for the most part. This is 2469 // a "light" version of the loop clone size calculation (above), based on the 2470 // assumption that most of the loop-construct overhead will be unraveled when 2471 // (fully) unrolled. Defined for unroll factors larger or equal to one (>=1), 2472 // including an overflow check and returning UINT_MAX in case of an overflow. 2473 uint IdealLoopTree::est_loop_unroll_sz(uint factor) const { 2474 2475 precond(factor > 0); 2476 2477 // Take into account that after unroll conjoined heads and tails will fold. 2478 uint const b0 = _body.size() - EMPTY_LOOP_SIZE; 2479 uint const cc = 7; 2480 uint const sz = b0 + (b0 + 15) / 16; 2481 uint estimate = factor * sz + cc; 2482 2483 if ((estimate - cc) / factor != sz) { 2484 return UINT_MAX; 2485 } 2486 2487 return estimate + est_loop_flow_merge_sz(); 2488 } 2489 2490 // Estimate the growth effect (in nodes) of merging control and data flow when 2491 // cloning a loop body, based on the amount of control and data flow reaching 2492 // outside of the (current) loop body. 2493 uint IdealLoopTree::est_loop_flow_merge_sz() const { 2494 2495 uint ctrl_edge_out_cnt = 0; 2496 uint data_edge_out_cnt = 0; 2497 2498 for (uint i = 0; i < _body.size(); i++) { 2499 Node* node = _body.at(i); 2500 uint outcnt = node->outcnt(); 2501 2502 for (uint k = 0; k < outcnt; k++) { 2503 Node* out = node->raw_out(k); 2504 2505 if (out->is_CFG()) { 2506 if (!is_member(_phase->get_loop(out))) { 2507 ctrl_edge_out_cnt++; 2508 } 2509 } else { 2510 Node* ctrl = _phase->get_ctrl(out); 2511 assert(ctrl->is_CFG(), "must be"); 2512 if (!is_member(_phase->get_loop(ctrl))) { 2513 data_edge_out_cnt++; 2514 } 2515 } 2516 } 2517 } 2518 // Use data and control count (x2.0) in estimate iff both are > 0. This is 2519 // a rather pessimistic estimate for the most part, in particular for some 2520 // complex loops, but still not enough to capture all loops. 2521 if (ctrl_edge_out_cnt > 0 && data_edge_out_cnt > 0) { 2522 return 2 * (ctrl_edge_out_cnt + data_edge_out_cnt); 2523 } 2524 return 0; 2525 } 2526 2527 #ifndef PRODUCT 2528 //------------------------------dump_head-------------------------------------- 2529 // Dump 1 liner for loop header info 2530 void IdealLoopTree::dump_head() const { 2531 tty->sp(2 * _nest); 2532 tty->print("Loop: N%d/N%d ", _head->_idx, _tail->_idx); 2533 if (_irreducible) tty->print(" IRREDUCIBLE"); 2534 Node* entry = _head->is_Loop() ? _head->as_Loop()->skip_strip_mined(-1)->in(LoopNode::EntryControl) : _head->in(LoopNode::EntryControl); 2535 Node* predicate = PhaseIdealLoop::find_predicate_insertion_point(entry, Deoptimization::Reason_loop_limit_check); 2536 if (predicate != NULL ) { 2537 tty->print(" limit_check"); 2538 entry = PhaseIdealLoop::skip_loop_predicates(entry); 2539 } 2540 if (UseLoopPredicate) { 2541 entry = PhaseIdealLoop::find_predicate_insertion_point(entry, Deoptimization::Reason_predicate); 2542 if (entry != NULL) { 2543 tty->print(" predicated"); 2544 entry = PhaseIdealLoop::skip_loop_predicates(entry); 2545 } 2546 } 2547 if (UseProfiledLoopPredicate) { 2548 entry = PhaseIdealLoop::find_predicate_insertion_point(entry, Deoptimization::Reason_profile_predicate); 2549 if (entry != NULL) { 2550 tty->print(" profile_predicated"); 2551 } 2552 } 2553 if (_head->is_CountedLoop()) { 2554 CountedLoopNode *cl = _head->as_CountedLoop(); 2555 tty->print(" counted"); 2556 2557 Node* init_n = cl->init_trip(); 2558 if (init_n != NULL && init_n->is_Con()) 2559 tty->print(" [%d,", cl->init_trip()->get_int()); 2560 else 2561 tty->print(" [int,"); 2562 Node* limit_n = cl->limit(); 2563 if (limit_n != NULL && limit_n->is_Con()) 2564 tty->print("%d),", cl->limit()->get_int()); 2565 else 2566 tty->print("int),"); 2567 int stride_con = cl->stride_con(); 2568 if (stride_con > 0) tty->print("+"); 2569 tty->print("%d", stride_con); 2570 2571 tty->print(" (%0.f iters) ", cl->profile_trip_cnt()); 2572 2573 if (cl->is_pre_loop ()) tty->print(" pre" ); 2574 if (cl->is_main_loop()) tty->print(" main"); 2575 if (cl->is_post_loop()) tty->print(" post"); 2576 if (cl->is_vectorized_loop()) tty->print(" vector"); 2577 if (cl->range_checks_present()) tty->print(" rc "); 2578 if (cl->is_multiversioned()) tty->print(" multi "); 2579 } 2580 if (_has_call) tty->print(" has_call"); 2581 if (_has_sfpt) tty->print(" has_sfpt"); 2582 if (_rce_candidate) tty->print(" rce"); 2583 if (_safepts != NULL && _safepts->size() > 0) { 2584 tty->print(" sfpts={"); _safepts->dump_simple(); tty->print(" }"); 2585 } 2586 if (_required_safept != NULL && _required_safept->size() > 0) { 2587 tty->print(" req={"); _required_safept->dump_simple(); tty->print(" }"); 2588 } 2589 if (Verbose) { 2590 tty->print(" body={"); _body.dump_simple(); tty->print(" }"); 2591 } 2592 if (_head->is_Loop() && _head->as_Loop()->is_strip_mined()) { 2593 tty->print(" strip_mined"); 2594 } 2595 tty->cr(); 2596 } 2597 2598 //------------------------------dump------------------------------------------- 2599 // Dump loops by loop tree 2600 void IdealLoopTree::dump() const { 2601 dump_head(); 2602 if (_child) _child->dump(); 2603 if (_next) _next ->dump(); 2604 } 2605 2606 #endif 2607 2608 static void log_loop_tree(IdealLoopTree* root, IdealLoopTree* loop, CompileLog* log) { 2609 if (loop == root) { 2610 if (loop->_child != NULL) { 2611 log->begin_head("loop_tree"); 2612 log->end_head(); 2613 if( loop->_child ) log_loop_tree(root, loop->_child, log); 2614 log->tail("loop_tree"); 2615 assert(loop->_next == NULL, "what?"); 2616 } 2617 } else { 2618 Node* head = loop->_head; 2619 log->begin_head("loop"); 2620 log->print(" idx='%d' ", head->_idx); 2621 if (loop->_irreducible) log->print("irreducible='1' "); 2622 if (head->is_Loop()) { 2623 if (head->as_Loop()->is_inner_loop()) log->print("inner_loop='1' "); 2624 if (head->as_Loop()->is_partial_peel_loop()) log->print("partial_peel_loop='1' "); 2625 } 2626 if (head->is_CountedLoop()) { 2627 CountedLoopNode* cl = head->as_CountedLoop(); 2628 if (cl->is_pre_loop()) log->print("pre_loop='%d' ", cl->main_idx()); 2629 if (cl->is_main_loop()) log->print("main_loop='%d' ", cl->_idx); 2630 if (cl->is_post_loop()) log->print("post_loop='%d' ", cl->main_idx()); 2631 } 2632 log->end_head(); 2633 if( loop->_child ) log_loop_tree(root, loop->_child, log); 2634 log->tail("loop"); 2635 if( loop->_next ) log_loop_tree(root, loop->_next, log); 2636 } 2637 } 2638 2639 //---------------------collect_potentially_useful_predicates----------------------- 2640 // Helper function to collect potentially useful predicates to prevent them from 2641 // being eliminated by PhaseIdealLoop::eliminate_useless_predicates 2642 void PhaseIdealLoop::collect_potentially_useful_predicates( 2643 IdealLoopTree * loop, Unique_Node_List &useful_predicates) { 2644 if (loop->_child) { // child 2645 collect_potentially_useful_predicates(loop->_child, useful_predicates); 2646 } 2647 2648 // self (only loops that we can apply loop predication may use their predicates) 2649 if (loop->_head->is_Loop() && 2650 !loop->_irreducible && 2651 !loop->tail()->is_top()) { 2652 LoopNode* lpn = loop->_head->as_Loop(); 2653 Node* entry = lpn->in(LoopNode::EntryControl); 2654 Node* predicate_proj = find_predicate(entry); // loop_limit_check first 2655 if (predicate_proj != NULL ) { // right pattern that can be used by loop predication 2656 assert(entry->in(0)->in(1)->in(1)->Opcode() == Op_Opaque1, "must be"); 2657 useful_predicates.push(entry->in(0)->in(1)->in(1)); // good one 2658 entry = skip_loop_predicates(entry); 2659 } 2660 predicate_proj = find_predicate(entry); // Predicate 2661 if (predicate_proj != NULL ) { 2662 useful_predicates.push(entry->in(0)->in(1)->in(1)); // good one 2663 entry = skip_loop_predicates(entry); 2664 } 2665 if (UseProfiledLoopPredicate) { 2666 predicate_proj = find_predicate(entry); // Predicate 2667 if (predicate_proj != NULL ) { 2668 useful_predicates.push(entry->in(0)->in(1)->in(1)); // good one 2669 } 2670 } 2671 } 2672 2673 if (loop->_next) { // sibling 2674 collect_potentially_useful_predicates(loop->_next, useful_predicates); 2675 } 2676 } 2677 2678 //------------------------eliminate_useless_predicates----------------------------- 2679 // Eliminate all inserted predicates if they could not be used by loop predication. 2680 // Note: it will also eliminates loop limits check predicate since it also uses 2681 // Opaque1 node (see Parse::add_predicate()). 2682 void PhaseIdealLoop::eliminate_useless_predicates() { 2683 if (C->predicate_count() == 0) 2684 return; // no predicate left 2685 2686 Unique_Node_List useful_predicates; // to store useful predicates 2687 if (C->has_loops()) { 2688 collect_potentially_useful_predicates(_ltree_root->_child, useful_predicates); 2689 } 2690 2691 for (int i = C->predicate_count(); i > 0; i--) { 2692 Node * n = C->predicate_opaque1_node(i-1); 2693 assert(n->Opcode() == Op_Opaque1, "must be"); 2694 if (!useful_predicates.member(n)) { // not in the useful list 2695 _igvn.replace_node(n, n->in(1)); 2696 } 2697 } 2698 } 2699 2700 //------------------------process_expensive_nodes----------------------------- 2701 // Expensive nodes have their control input set to prevent the GVN 2702 // from commoning them and as a result forcing the resulting node to 2703 // be in a more frequent path. Use CFG information here, to change the 2704 // control inputs so that some expensive nodes can be commoned while 2705 // not executed more frequently. 2706 bool PhaseIdealLoop::process_expensive_nodes() { 2707 assert(OptimizeExpensiveOps, "optimization off?"); 2708 2709 // Sort nodes to bring similar nodes together 2710 C->sort_expensive_nodes(); 2711 2712 bool progress = false; 2713 2714 for (int i = 0; i < C->expensive_count(); ) { 2715 Node* n = C->expensive_node(i); 2716 int start = i; 2717 // Find nodes similar to n 2718 i++; 2719 for (; i < C->expensive_count() && Compile::cmp_expensive_nodes(n, C->expensive_node(i)) == 0; i++); 2720 int end = i; 2721 // And compare them two by two 2722 for (int j = start; j < end; j++) { 2723 Node* n1 = C->expensive_node(j); 2724 if (is_node_unreachable(n1)) { 2725 continue; 2726 } 2727 for (int k = j+1; k < end; k++) { 2728 Node* n2 = C->expensive_node(k); 2729 if (is_node_unreachable(n2)) { 2730 continue; 2731 } 2732 2733 assert(n1 != n2, "should be pair of nodes"); 2734 2735 Node* c1 = n1->in(0); 2736 Node* c2 = n2->in(0); 2737 2738 Node* parent_c1 = c1; 2739 Node* parent_c2 = c2; 2740 2741 // The call to get_early_ctrl_for_expensive() moves the 2742 // expensive nodes up but stops at loops that are in a if 2743 // branch. See whether we can exit the loop and move above the 2744 // If. 2745 if (c1->is_Loop()) { 2746 parent_c1 = c1->in(1); 2747 } 2748 if (c2->is_Loop()) { 2749 parent_c2 = c2->in(1); 2750 } 2751 2752 if (parent_c1 == parent_c2) { 2753 _igvn._worklist.push(n1); 2754 _igvn._worklist.push(n2); 2755 continue; 2756 } 2757 2758 // Look for identical expensive node up the dominator chain. 2759 if (is_dominator(c1, c2)) { 2760 c2 = c1; 2761 } else if (is_dominator(c2, c1)) { 2762 c1 = c2; 2763 } else if (parent_c1->is_Proj() && parent_c1->in(0)->is_If() && 2764 parent_c2->is_Proj() && parent_c1->in(0) == parent_c2->in(0)) { 2765 // Both branches have the same expensive node so move it up 2766 // before the if. 2767 c1 = c2 = idom(parent_c1->in(0)); 2768 } 2769 // Do the actual moves 2770 if (n1->in(0) != c1) { 2771 _igvn.hash_delete(n1); 2772 n1->set_req(0, c1); 2773 _igvn.hash_insert(n1); 2774 _igvn._worklist.push(n1); 2775 progress = true; 2776 } 2777 if (n2->in(0) != c2) { 2778 _igvn.hash_delete(n2); 2779 n2->set_req(0, c2); 2780 _igvn.hash_insert(n2); 2781 _igvn._worklist.push(n2); 2782 progress = true; 2783 } 2784 } 2785 } 2786 } 2787 2788 return progress; 2789 } 2790 2791 2792 //============================================================================= 2793 //----------------------------build_and_optimize------------------------------- 2794 // Create a PhaseLoop. Build the ideal Loop tree. Map each Ideal Node to 2795 // its corresponding LoopNode. If 'optimize' is true, do some loop cleanups. 2796 void PhaseIdealLoop::build_and_optimize(LoopOptsMode mode) { 2797 bool do_split_ifs = (mode == LoopOptsDefault); 2798 bool skip_loop_opts = (mode == LoopOptsNone); 2799 2800 int old_progress = C->major_progress(); 2801 uint orig_worklist_size = _igvn._worklist.size(); 2802 2803 // Reset major-progress flag for the driver's heuristics 2804 C->clear_major_progress(); 2805 2806 #ifndef PRODUCT 2807 // Capture for later assert 2808 uint unique = C->unique(); 2809 _loop_invokes++; 2810 _loop_work += unique; 2811 #endif 2812 2813 // True if the method has at least 1 irreducible loop 2814 _has_irreducible_loops = false; 2815 2816 _created_loop_node = false; 2817 2818 Arena *a = Thread::current()->resource_area(); 2819 VectorSet visited(a); 2820 // Pre-grow the mapping from Nodes to IdealLoopTrees. 2821 _nodes.map(C->unique(), NULL); 2822 memset(_nodes.adr(), 0, wordSize * C->unique()); 2823 2824 // Pre-build the top-level outermost loop tree entry 2825 _ltree_root = new IdealLoopTree( this, C->root(), C->root() ); 2826 // Do not need a safepoint at the top level 2827 _ltree_root->_has_sfpt = 1; 2828 2829 // Initialize Dominators. 2830 // Checked in clone_loop_predicate() during beautify_loops(). 2831 _idom_size = 0; 2832 _idom = NULL; 2833 _dom_depth = NULL; 2834 _dom_stk = NULL; 2835 2836 // Empty pre-order array 2837 allocate_preorders(); 2838 2839 // Build a loop tree on the fly. Build a mapping from CFG nodes to 2840 // IdealLoopTree entries. Data nodes are NOT walked. 2841 build_loop_tree(); 2842 // Check for bailout, and return 2843 if (C->failing()) { 2844 return; 2845 } 2846 2847 // No loops after all 2848 if( !_ltree_root->_child && !_verify_only ) C->set_has_loops(false); 2849 2850 // There should always be an outer loop containing the Root and Return nodes. 2851 // If not, we have a degenerate empty program. Bail out in this case. 2852 if (!has_node(C->root())) { 2853 if (!_verify_only) { 2854 C->clear_major_progress(); 2855 C->record_method_not_compilable("empty program detected during loop optimization"); 2856 } 2857 return; 2858 } 2859 2860 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); 2861 // Nothing to do, so get out 2862 bool stop_early = !C->has_loops() && !skip_loop_opts && !do_split_ifs && !_verify_me && !_verify_only && 2863 !bs->is_gc_specific_loop_opts_pass(mode); 2864 bool do_expensive_nodes = C->should_optimize_expensive_nodes(_igvn); 2865 bool strip_mined_loops_expanded = bs->strip_mined_loops_expanded(mode); 2866 if (stop_early && !do_expensive_nodes) { 2867 _igvn.optimize(); // Cleanup NeverBranches 2868 return; 2869 } 2870 2871 // Set loop nesting depth 2872 _ltree_root->set_nest( 0 ); 2873 2874 // Split shared headers and insert loop landing pads. 2875 // Do not bother doing this on the Root loop of course. 2876 if( !_verify_me && !_verify_only && _ltree_root->_child ) { 2877 C->print_method(PHASE_BEFORE_BEAUTIFY_LOOPS, 3); 2878 if( _ltree_root->_child->beautify_loops( this ) ) { 2879 // Re-build loop tree! 2880 _ltree_root->_child = NULL; 2881 _nodes.clear(); 2882 reallocate_preorders(); 2883 build_loop_tree(); 2884 // Check for bailout, and return 2885 if (C->failing()) { 2886 return; 2887 } 2888 // Reset loop nesting depth 2889 _ltree_root->set_nest( 0 ); 2890 2891 C->print_method(PHASE_AFTER_BEAUTIFY_LOOPS, 3); 2892 } 2893 } 2894 2895 // Build Dominators for elision of NULL checks & loop finding. 2896 // Since nodes do not have a slot for immediate dominator, make 2897 // a persistent side array for that info indexed on node->_idx. 2898 _idom_size = C->unique(); 2899 _idom = NEW_RESOURCE_ARRAY( Node*, _idom_size ); 2900 _dom_depth = NEW_RESOURCE_ARRAY( uint, _idom_size ); 2901 _dom_stk = NULL; // Allocated on demand in recompute_dom_depth 2902 memset( _dom_depth, 0, _idom_size * sizeof(uint) ); 2903 2904 Dominators(); 2905 2906 if (!_verify_only) { 2907 // As a side effect, Dominators removed any unreachable CFG paths 2908 // into RegionNodes. It doesn't do this test against Root, so 2909 // we do it here. 2910 for( uint i = 1; i < C->root()->req(); i++ ) { 2911 if( !_nodes[C->root()->in(i)->_idx] ) { // Dead path into Root? 2912 _igvn.delete_input_of(C->root(), i); 2913 i--; // Rerun same iteration on compressed edges 2914 } 2915 } 2916 2917 // Given dominators, try to find inner loops with calls that must 2918 // always be executed (call dominates loop tail). These loops do 2919 // not need a separate safepoint. 2920 Node_List cisstack(a); 2921 _ltree_root->check_safepts(visited, cisstack); 2922 } 2923 2924 // Walk the DATA nodes and place into loops. Find earliest control 2925 // node. For CFG nodes, the _nodes array starts out and remains 2926 // holding the associated IdealLoopTree pointer. For DATA nodes, the 2927 // _nodes array holds the earliest legal controlling CFG node. 2928 2929 // Allocate stack with enough space to avoid frequent realloc 2930 int stack_size = (C->live_nodes() >> 1) + 16; // (live_nodes>>1)+16 from Java2D stats 2931 Node_Stack nstack( a, stack_size ); 2932 2933 visited.clear(); 2934 Node_List worklist(a); 2935 // Don't need C->root() on worklist since 2936 // it will be processed among C->top() inputs 2937 worklist.push(C->top()); 2938 visited.set(C->top()->_idx); // Set C->top() as visited now 2939 build_loop_early( visited, worklist, nstack ); 2940 2941 // Given early legal placement, try finding counted loops. This placement 2942 // is good enough to discover most loop invariants. 2943 if (!_verify_me && !_verify_only && !strip_mined_loops_expanded) { 2944 _ltree_root->counted_loop( this ); 2945 } 2946 2947 // Find latest loop placement. Find ideal loop placement. 2948 visited.clear(); 2949 init_dom_lca_tags(); 2950 // Need C->root() on worklist when processing outs 2951 worklist.push(C->root()); 2952 NOT_PRODUCT( C->verify_graph_edges(); ) 2953 worklist.push(C->top()); 2954 build_loop_late( visited, worklist, nstack ); 2955 2956 if (_verify_only) { 2957 C->restore_major_progress(old_progress); 2958 assert(C->unique() == unique, "verification mode made Nodes? ? ?"); 2959 assert(_igvn._worklist.size() == orig_worklist_size, "shouldn't push anything"); 2960 return; 2961 } 2962 2963 // clear out the dead code after build_loop_late 2964 while (_deadlist.size()) { 2965 _igvn.remove_globally_dead_node(_deadlist.pop()); 2966 } 2967 2968 if (stop_early) { 2969 assert(do_expensive_nodes, "why are we here?"); 2970 if (process_expensive_nodes()) { 2971 // If we made some progress when processing expensive nodes then 2972 // the IGVN may modify the graph in a way that will allow us to 2973 // make some more progress: we need to try processing expensive 2974 // nodes again. 2975 C->set_major_progress(); 2976 } 2977 _igvn.optimize(); 2978 return; 2979 } 2980 2981 // Some parser-inserted loop predicates could never be used by loop 2982 // predication or they were moved away from loop during some optimizations. 2983 // For example, peeling. Eliminate them before next loop optimizations. 2984 eliminate_useless_predicates(); 2985 2986 #ifndef PRODUCT 2987 C->verify_graph_edges(); 2988 if (_verify_me) { // Nested verify pass? 2989 // Check to see if the verify mode is broken 2990 assert(C->unique() == unique, "non-optimize mode made Nodes? ? ?"); 2991 return; 2992 } 2993 if (VerifyLoopOptimizations) verify(); 2994 if (TraceLoopOpts && C->has_loops()) { 2995 _ltree_root->dump(); 2996 } 2997 #endif 2998 2999 if (skip_loop_opts) { 3000 // restore major progress flag 3001 C->restore_major_progress(old_progress); 3002 3003 // Cleanup any modified bits 3004 _igvn.optimize(); 3005 3006 if (C->log() != NULL) { 3007 log_loop_tree(_ltree_root, _ltree_root, C->log()); 3008 } 3009 return; 3010 } 3011 3012 if (bs->optimize_loops(this, mode, visited, nstack, worklist)) { 3013 _igvn.optimize(); 3014 if (C->log() != NULL) { 3015 log_loop_tree(_ltree_root, _ltree_root, C->log()); 3016 } 3017 return; 3018 } 3019 3020 if (ReassociateInvariants) { 3021 // Reassociate invariants and prep for split_thru_phi 3022 for (LoopTreeIterator iter(_ltree_root); !iter.done(); iter.next()) { 3023 IdealLoopTree* lpt = iter.current(); 3024 bool is_counted = lpt->is_counted(); 3025 if (!is_counted || !lpt->is_innermost()) continue; 3026 3027 // check for vectorized loops, any reassociation of invariants was already done 3028 if (is_counted && lpt->_head->as_CountedLoop()->is_unroll_only()) { 3029 continue; 3030 } else { 3031 AutoNodeBudget node_budget(this); 3032 lpt->reassociate_invariants(this); 3033 } 3034 // Because RCE opportunities can be masked by split_thru_phi, 3035 // look for RCE candidates and inhibit split_thru_phi 3036 // on just their loop-phi's for this pass of loop opts 3037 if (SplitIfBlocks && do_split_ifs) { 3038 AutoNodeBudget node_budget(this, AutoNodeBudget::NO_BUDGET_CHECK); 3039 if (lpt->policy_range_check(this)) { 3040 lpt->_rce_candidate = 1; // = true 3041 } 3042 } 3043 } 3044 } 3045 3046 // Check for aggressive application of split-if and other transforms 3047 // that require basic-block info (like cloning through Phi's) 3048 if( SplitIfBlocks && do_split_ifs ) { 3049 visited.clear(); 3050 split_if_with_blocks( visited, nstack); 3051 NOT_PRODUCT( if( VerifyLoopOptimizations ) verify(); ); 3052 } 3053 3054 if (!C->major_progress() && do_expensive_nodes && process_expensive_nodes()) { 3055 C->set_major_progress(); 3056 } 3057 3058 // Perform loop predication before iteration splitting 3059 if (C->has_loops() && !C->major_progress() && (C->predicate_count() > 0)) { 3060 _ltree_root->_child->loop_predication(this); 3061 } 3062 3063 if (OptimizeFill && UseLoopPredicate && C->has_loops() && !C->major_progress()) { 3064 if (do_intrinsify_fill()) { 3065 C->set_major_progress(); 3066 } 3067 } 3068 3069 // Perform iteration-splitting on inner loops. Split iterations to avoid 3070 // range checks or one-shot null checks. 3071 3072 // If split-if's didn't hack the graph too bad (no CFG changes) 3073 // then do loop opts. 3074 if (C->has_loops() && !C->major_progress()) { 3075 memset( worklist.adr(), 0, worklist.Size()*sizeof(Node*) ); 3076 _ltree_root->_child->iteration_split( this, worklist ); 3077 // No verify after peeling! GCM has hoisted code out of the loop. 3078 // After peeling, the hoisted code could sink inside the peeled area. 3079 // The peeling code does not try to recompute the best location for 3080 // all the code before the peeled area, so the verify pass will always 3081 // complain about it. 3082 } 3083 // Do verify graph edges in any case 3084 NOT_PRODUCT( C->verify_graph_edges(); ); 3085 3086 if (!do_split_ifs) { 3087 // We saw major progress in Split-If to get here. We forced a 3088 // pass with unrolling and not split-if, however more split-if's 3089 // might make progress. If the unrolling didn't make progress 3090 // then the major-progress flag got cleared and we won't try 3091 // another round of Split-If. In particular the ever-common 3092 // instance-of/check-cast pattern requires at least 2 rounds of 3093 // Split-If to clear out. 3094 C->set_major_progress(); 3095 } 3096 3097 // Repeat loop optimizations if new loops were seen 3098 if (created_loop_node()) { 3099 C->set_major_progress(); 3100 } 3101 3102 // Keep loop predicates and perform optimizations with them 3103 // until no more loop optimizations could be done. 3104 // After that switch predicates off and do more loop optimizations. 3105 if (!C->major_progress() && (C->predicate_count() > 0)) { 3106 C->cleanup_loop_predicates(_igvn); 3107 if (TraceLoopOpts) { 3108 tty->print_cr("PredicatesOff"); 3109 } 3110 C->set_major_progress(); 3111 } 3112 3113 // Convert scalar to superword operations at the end of all loop opts. 3114 if (UseSuperWord && C->has_loops() && !C->major_progress()) { 3115 // SuperWord transform 3116 SuperWord sw(this); 3117 for (LoopTreeIterator iter(_ltree_root); !iter.done(); iter.next()) { 3118 IdealLoopTree* lpt = iter.current(); 3119 if (lpt->is_counted()) { 3120 CountedLoopNode *cl = lpt->_head->as_CountedLoop(); 3121 3122 if (PostLoopMultiversioning && cl->is_rce_post_loop() && !cl->is_vectorized_loop()) { 3123 // Check that the rce'd post loop is encountered first, multiversion after all 3124 // major main loop optimization are concluded 3125 if (!C->major_progress()) { 3126 IdealLoopTree *lpt_next = lpt->_next; 3127 if (lpt_next && lpt_next->is_counted()) { 3128 CountedLoopNode *cl = lpt_next->_head->as_CountedLoop(); 3129 has_range_checks(lpt_next); 3130 if (cl->is_post_loop() && cl->range_checks_present()) { 3131 if (!cl->is_multiversioned()) { 3132 if (multi_version_post_loops(lpt, lpt_next) == false) { 3133 // Cause the rce loop to be optimized away if we fail 3134 cl->mark_is_multiversioned(); 3135 cl->set_slp_max_unroll(0); 3136 poison_rce_post_loop(lpt); 3137 } 3138 } 3139 } 3140 } 3141 sw.transform_loop(lpt, true); 3142 } 3143 } else if (cl->is_main_loop()) { 3144 sw.transform_loop(lpt, true); 3145 } 3146 } 3147 } 3148 } 3149 3150 // Cleanup any modified bits 3151 _igvn.optimize(); 3152 3153 // disable assert until issue with split_flow_path is resolved (6742111) 3154 // assert(!_has_irreducible_loops || C->parsed_irreducible_loop() || C->is_osr_compilation(), 3155 // "shouldn't introduce irreducible loops"); 3156 3157 if (C->log() != NULL) { 3158 log_loop_tree(_ltree_root, _ltree_root, C->log()); 3159 } 3160 } 3161 3162 #ifndef PRODUCT 3163 //------------------------------print_statistics------------------------------- 3164 int PhaseIdealLoop::_loop_invokes=0;// Count of PhaseIdealLoop invokes 3165 int PhaseIdealLoop::_loop_work=0; // Sum of PhaseIdealLoop x unique 3166 void PhaseIdealLoop::print_statistics() { 3167 tty->print_cr("PhaseIdealLoop=%d, sum _unique=%d", _loop_invokes, _loop_work); 3168 } 3169 3170 //------------------------------verify----------------------------------------- 3171 // Build a verify-only PhaseIdealLoop, and see that it agrees with me. 3172 static int fail; // debug only, so its multi-thread dont care 3173 void PhaseIdealLoop::verify() const { 3174 int old_progress = C->major_progress(); 3175 ResourceMark rm; 3176 PhaseIdealLoop loop_verify( _igvn, this ); 3177 VectorSet visited(Thread::current()->resource_area()); 3178 3179 fail = 0; 3180 verify_compare( C->root(), &loop_verify, visited ); 3181 assert( fail == 0, "verify loops failed" ); 3182 // Verify loop structure is the same 3183 _ltree_root->verify_tree(loop_verify._ltree_root, NULL); 3184 // Reset major-progress. It was cleared by creating a verify version of 3185 // PhaseIdealLoop. 3186 C->restore_major_progress(old_progress); 3187 } 3188 3189 //------------------------------verify_compare--------------------------------- 3190 // Make sure me and the given PhaseIdealLoop agree on key data structures 3191 void PhaseIdealLoop::verify_compare( Node *n, const PhaseIdealLoop *loop_verify, VectorSet &visited ) const { 3192 if( !n ) return; 3193 if( visited.test_set( n->_idx ) ) return; 3194 if( !_nodes[n->_idx] ) { // Unreachable 3195 assert( !loop_verify->_nodes[n->_idx], "both should be unreachable" ); 3196 return; 3197 } 3198 3199 uint i; 3200 for( i = 0; i < n->req(); i++ ) 3201 verify_compare( n->in(i), loop_verify, visited ); 3202 3203 // Check the '_nodes' block/loop structure 3204 i = n->_idx; 3205 if( has_ctrl(n) ) { // We have control; verify has loop or ctrl 3206 if( _nodes[i] != loop_verify->_nodes[i] && 3207 get_ctrl_no_update(n) != loop_verify->get_ctrl_no_update(n) ) { 3208 tty->print("Mismatched control setting for: "); 3209 n->dump(); 3210 if( fail++ > 10 ) return; 3211 Node *c = get_ctrl_no_update(n); 3212 tty->print("We have it as: "); 3213 if( c->in(0) ) c->dump(); 3214 else tty->print_cr("N%d",c->_idx); 3215 tty->print("Verify thinks: "); 3216 if( loop_verify->has_ctrl(n) ) 3217 loop_verify->get_ctrl_no_update(n)->dump(); 3218 else 3219 loop_verify->get_loop_idx(n)->dump(); 3220 tty->cr(); 3221 } 3222 } else { // We have a loop 3223 IdealLoopTree *us = get_loop_idx(n); 3224 if( loop_verify->has_ctrl(n) ) { 3225 tty->print("Mismatched loop setting for: "); 3226 n->dump(); 3227 if( fail++ > 10 ) return; 3228 tty->print("We have it as: "); 3229 us->dump(); 3230 tty->print("Verify thinks: "); 3231 loop_verify->get_ctrl_no_update(n)->dump(); 3232 tty->cr(); 3233 } else if (!C->major_progress()) { 3234 // Loop selection can be messed up if we did a major progress 3235 // operation, like split-if. Do not verify in that case. 3236 IdealLoopTree *them = loop_verify->get_loop_idx(n); 3237 if( us->_head != them->_head || us->_tail != them->_tail ) { 3238 tty->print("Unequals loops for: "); 3239 n->dump(); 3240 if( fail++ > 10 ) return; 3241 tty->print("We have it as: "); 3242 us->dump(); 3243 tty->print("Verify thinks: "); 3244 them->dump(); 3245 tty->cr(); 3246 } 3247 } 3248 } 3249 3250 // Check for immediate dominators being equal 3251 if( i >= _idom_size ) { 3252 if( !n->is_CFG() ) return; 3253 tty->print("CFG Node with no idom: "); 3254 n->dump(); 3255 return; 3256 } 3257 if( !n->is_CFG() ) return; 3258 if( n == C->root() ) return; // No IDOM here 3259 3260 assert(n->_idx == i, "sanity"); 3261 Node *id = idom_no_update(n); 3262 if( id != loop_verify->idom_no_update(n) ) { 3263 tty->print("Unequals idoms for: "); 3264 n->dump(); 3265 if( fail++ > 10 ) return; 3266 tty->print("We have it as: "); 3267 id->dump(); 3268 tty->print("Verify thinks: "); 3269 loop_verify->idom_no_update(n)->dump(); 3270 tty->cr(); 3271 } 3272 3273 } 3274 3275 //------------------------------verify_tree------------------------------------ 3276 // Verify that tree structures match. Because the CFG can change, siblings 3277 // within the loop tree can be reordered. We attempt to deal with that by 3278 // reordering the verify's loop tree if possible. 3279 void IdealLoopTree::verify_tree(IdealLoopTree *loop, const IdealLoopTree *parent) const { 3280 assert( _parent == parent, "Badly formed loop tree" ); 3281 3282 // Siblings not in same order? Attempt to re-order. 3283 if( _head != loop->_head ) { 3284 // Find _next pointer to update 3285 IdealLoopTree **pp = &loop->_parent->_child; 3286 while( *pp != loop ) 3287 pp = &((*pp)->_next); 3288 // Find proper sibling to be next 3289 IdealLoopTree **nn = &loop->_next; 3290 while( (*nn) && (*nn)->_head != _head ) 3291 nn = &((*nn)->_next); 3292 3293 // Check for no match. 3294 if( !(*nn) ) { 3295 // Annoyingly, irreducible loops can pick different headers 3296 // after a major_progress operation, so the rest of the loop 3297 // tree cannot be matched. 3298 if (_irreducible && Compile::current()->major_progress()) return; 3299 assert( 0, "failed to match loop tree" ); 3300 } 3301 3302 // Move (*nn) to (*pp) 3303 IdealLoopTree *hit = *nn; 3304 *nn = hit->_next; 3305 hit->_next = loop; 3306 *pp = loop; 3307 loop = hit; 3308 // Now try again to verify 3309 } 3310 3311 assert( _head == loop->_head , "mismatched loop head" ); 3312 Node *tail = _tail; // Inline a non-updating version of 3313 while( !tail->in(0) ) // the 'tail()' call. 3314 tail = tail->in(1); 3315 assert( tail == loop->_tail, "mismatched loop tail" ); 3316 3317 // Counted loops that are guarded should be able to find their guards 3318 if( _head->is_CountedLoop() && _head->as_CountedLoop()->is_main_loop() ) { 3319 CountedLoopNode *cl = _head->as_CountedLoop(); 3320 Node *init = cl->init_trip(); 3321 Node *ctrl = cl->in(LoopNode::EntryControl); 3322 assert( ctrl->Opcode() == Op_IfTrue || ctrl->Opcode() == Op_IfFalse, "" ); 3323 Node *iff = ctrl->in(0); 3324 assert( iff->Opcode() == Op_If, "" ); 3325 Node *bol = iff->in(1); 3326 assert( bol->Opcode() == Op_Bool, "" ); 3327 Node *cmp = bol->in(1); 3328 assert( cmp->Opcode() == Op_CmpI, "" ); 3329 Node *add = cmp->in(1); 3330 Node *opaq; 3331 if( add->Opcode() == Op_Opaque1 ) { 3332 opaq = add; 3333 } else { 3334 assert( add->Opcode() == Op_AddI || add->Opcode() == Op_ConI , "" ); 3335 assert( add == init, "" ); 3336 opaq = cmp->in(2); 3337 } 3338 assert( opaq->Opcode() == Op_Opaque1, "" ); 3339 3340 } 3341 3342 if (_child != NULL) _child->verify_tree(loop->_child, this); 3343 if (_next != NULL) _next ->verify_tree(loop->_next, parent); 3344 // Innermost loops need to verify loop bodies, 3345 // but only if no 'major_progress' 3346 int fail = 0; 3347 if (!Compile::current()->major_progress() && _child == NULL) { 3348 for( uint i = 0; i < _body.size(); i++ ) { 3349 Node *n = _body.at(i); 3350 if (n->outcnt() == 0) continue; // Ignore dead 3351 uint j; 3352 for( j = 0; j < loop->_body.size(); j++ ) 3353 if( loop->_body.at(j) == n ) 3354 break; 3355 if( j == loop->_body.size() ) { // Not found in loop body 3356 // Last ditch effort to avoid assertion: Its possible that we 3357 // have some users (so outcnt not zero) but are still dead. 3358 // Try to find from root. 3359 if (Compile::current()->root()->find(n->_idx)) { 3360 fail++; 3361 tty->print("We have that verify does not: "); 3362 n->dump(); 3363 } 3364 } 3365 } 3366 for( uint i2 = 0; i2 < loop->_body.size(); i2++ ) { 3367 Node *n = loop->_body.at(i2); 3368 if (n->outcnt() == 0) continue; // Ignore dead 3369 uint j; 3370 for( j = 0; j < _body.size(); j++ ) 3371 if( _body.at(j) == n ) 3372 break; 3373 if( j == _body.size() ) { // Not found in loop body 3374 // Last ditch effort to avoid assertion: Its possible that we 3375 // have some users (so outcnt not zero) but are still dead. 3376 // Try to find from root. 3377 if (Compile::current()->root()->find(n->_idx)) { 3378 fail++; 3379 tty->print("Verify has that we do not: "); 3380 n->dump(); 3381 } 3382 } 3383 } 3384 assert( !fail, "loop body mismatch" ); 3385 } 3386 } 3387 3388 #endif 3389 3390 //------------------------------set_idom--------------------------------------- 3391 void PhaseIdealLoop::set_idom(Node* d, Node* n, uint dom_depth) { 3392 uint idx = d->_idx; 3393 if (idx >= _idom_size) { 3394 uint newsize = next_power_of_2(idx); 3395 _idom = REALLOC_RESOURCE_ARRAY( Node*, _idom,_idom_size,newsize); 3396 _dom_depth = REALLOC_RESOURCE_ARRAY( uint, _dom_depth,_idom_size,newsize); 3397 memset( _dom_depth + _idom_size, 0, (newsize - _idom_size) * sizeof(uint) ); 3398 _idom_size = newsize; 3399 } 3400 _idom[idx] = n; 3401 _dom_depth[idx] = dom_depth; 3402 } 3403 3404 //------------------------------recompute_dom_depth--------------------------------------- 3405 // The dominator tree is constructed with only parent pointers. 3406 // This recomputes the depth in the tree by first tagging all 3407 // nodes as "no depth yet" marker. The next pass then runs up 3408 // the dom tree from each node marked "no depth yet", and computes 3409 // the depth on the way back down. 3410 void PhaseIdealLoop::recompute_dom_depth() { 3411 uint no_depth_marker = C->unique(); 3412 uint i; 3413 // Initialize depth to "no depth yet" and realize all lazy updates 3414 for (i = 0; i < _idom_size; i++) { 3415 // Only indices with a _dom_depth has a Node* or NULL (otherwise uninitalized). 3416 if (_dom_depth[i] > 0 && _idom[i] != NULL) { 3417 _dom_depth[i] = no_depth_marker; 3418 3419 // heal _idom if it has a fwd mapping in _nodes 3420 if (_idom[i]->in(0) == NULL) { 3421 idom(i); 3422 } 3423 } 3424 } 3425 if (_dom_stk == NULL) { 3426 uint init_size = C->live_nodes() / 100; // Guess that 1/100 is a reasonable initial size. 3427 if (init_size < 10) init_size = 10; 3428 _dom_stk = new GrowableArray<uint>(init_size); 3429 } 3430 // Compute new depth for each node. 3431 for (i = 0; i < _idom_size; i++) { 3432 uint j = i; 3433 // Run up the dom tree to find a node with a depth 3434 while (_dom_depth[j] == no_depth_marker) { 3435 _dom_stk->push(j); 3436 j = _idom[j]->_idx; 3437 } 3438 // Compute the depth on the way back down this tree branch 3439 uint dd = _dom_depth[j] + 1; 3440 while (_dom_stk->length() > 0) { 3441 uint j = _dom_stk->pop(); 3442 _dom_depth[j] = dd; 3443 dd++; 3444 } 3445 } 3446 } 3447 3448 //------------------------------sort------------------------------------------- 3449 // Insert 'loop' into the existing loop tree. 'innermost' is a leaf of the 3450 // loop tree, not the root. 3451 IdealLoopTree *PhaseIdealLoop::sort( IdealLoopTree *loop, IdealLoopTree *innermost ) { 3452 if( !innermost ) return loop; // New innermost loop 3453 3454 int loop_preorder = get_preorder(loop->_head); // Cache pre-order number 3455 assert( loop_preorder, "not yet post-walked loop" ); 3456 IdealLoopTree **pp = &innermost; // Pointer to previous next-pointer 3457 IdealLoopTree *l = *pp; // Do I go before or after 'l'? 3458 3459 // Insert at start of list 3460 while( l ) { // Insertion sort based on pre-order 3461 if( l == loop ) return innermost; // Already on list! 3462 int l_preorder = get_preorder(l->_head); // Cache pre-order number 3463 assert( l_preorder, "not yet post-walked l" ); 3464 // Check header pre-order number to figure proper nesting 3465 if( loop_preorder > l_preorder ) 3466 break; // End of insertion 3467 // If headers tie (e.g., shared headers) check tail pre-order numbers. 3468 // Since I split shared headers, you'd think this could not happen. 3469 // BUT: I must first do the preorder numbering before I can discover I 3470 // have shared headers, so the split headers all get the same preorder 3471 // number as the RegionNode they split from. 3472 if( loop_preorder == l_preorder && 3473 get_preorder(loop->_tail) < get_preorder(l->_tail) ) 3474 break; // Also check for shared headers (same pre#) 3475 pp = &l->_parent; // Chain up list 3476 l = *pp; 3477 } 3478 // Link into list 3479 // Point predecessor to me 3480 *pp = loop; 3481 // Point me to successor 3482 IdealLoopTree *p = loop->_parent; 3483 loop->_parent = l; // Point me to successor 3484 if( p ) sort( p, innermost ); // Insert my parents into list as well 3485 return innermost; 3486 } 3487 3488 //------------------------------build_loop_tree-------------------------------- 3489 // I use a modified Vick/Tarjan algorithm. I need pre- and a post- visit 3490 // bits. The _nodes[] array is mapped by Node index and holds a NULL for 3491 // not-yet-pre-walked, pre-order # for pre-but-not-post-walked and holds the 3492 // tightest enclosing IdealLoopTree for post-walked. 3493 // 3494 // During my forward walk I do a short 1-layer lookahead to see if I can find 3495 // a loop backedge with that doesn't have any work on the backedge. This 3496 // helps me construct nested loops with shared headers better. 3497 // 3498 // Once I've done the forward recursion, I do the post-work. For each child 3499 // I check to see if there is a backedge. Backedges define a loop! I 3500 // insert an IdealLoopTree at the target of the backedge. 3501 // 3502 // During the post-work I also check to see if I have several children 3503 // belonging to different loops. If so, then this Node is a decision point 3504 // where control flow can choose to change loop nests. It is at this 3505 // decision point where I can figure out how loops are nested. At this 3506 // time I can properly order the different loop nests from my children. 3507 // Note that there may not be any backedges at the decision point! 3508 // 3509 // Since the decision point can be far removed from the backedges, I can't 3510 // order my loops at the time I discover them. Thus at the decision point 3511 // I need to inspect loop header pre-order numbers to properly nest my 3512 // loops. This means I need to sort my childrens' loops by pre-order. 3513 // The sort is of size number-of-control-children, which generally limits 3514 // it to size 2 (i.e., I just choose between my 2 target loops). 3515 void PhaseIdealLoop::build_loop_tree() { 3516 // Allocate stack of size C->live_nodes()/2 to avoid frequent realloc 3517 GrowableArray <Node *> bltstack(C->live_nodes() >> 1); 3518 Node *n = C->root(); 3519 bltstack.push(n); 3520 int pre_order = 1; 3521 int stack_size; 3522 3523 while ( ( stack_size = bltstack.length() ) != 0 ) { 3524 n = bltstack.top(); // Leave node on stack 3525 if ( !is_visited(n) ) { 3526 // ---- Pre-pass Work ---- 3527 // Pre-walked but not post-walked nodes need a pre_order number. 3528 3529 set_preorder_visited( n, pre_order ); // set as visited 3530 3531 // ---- Scan over children ---- 3532 // Scan first over control projections that lead to loop headers. 3533 // This helps us find inner-to-outer loops with shared headers better. 3534 3535 // Scan children's children for loop headers. 3536 for ( int i = n->outcnt() - 1; i >= 0; --i ) { 3537 Node* m = n->raw_out(i); // Child 3538 if( m->is_CFG() && !is_visited(m) ) { // Only for CFG children 3539 // Scan over children's children to find loop 3540 for (DUIterator_Fast jmax, j = m->fast_outs(jmax); j < jmax; j++) { 3541 Node* l = m->fast_out(j); 3542 if( is_visited(l) && // Been visited? 3543 !is_postvisited(l) && // But not post-visited 3544 get_preorder(l) < pre_order ) { // And smaller pre-order 3545 // Found! Scan the DFS down this path before doing other paths 3546 bltstack.push(m); 3547 break; 3548 } 3549 } 3550 } 3551 } 3552 pre_order++; 3553 } 3554 else if ( !is_postvisited(n) ) { 3555 // Note: build_loop_tree_impl() adds out edges on rare occasions, 3556 // such as com.sun.rsasign.am::a. 3557 // For non-recursive version, first, process current children. 3558 // On next iteration, check if additional children were added. 3559 for ( int k = n->outcnt() - 1; k >= 0; --k ) { 3560 Node* u = n->raw_out(k); 3561 if ( u->is_CFG() && !is_visited(u) ) { 3562 bltstack.push(u); 3563 } 3564 } 3565 if ( bltstack.length() == stack_size ) { 3566 // There were no additional children, post visit node now 3567 (void)bltstack.pop(); // Remove node from stack 3568 pre_order = build_loop_tree_impl( n, pre_order ); 3569 // Check for bailout 3570 if (C->failing()) { 3571 return; 3572 } 3573 // Check to grow _preorders[] array for the case when 3574 // build_loop_tree_impl() adds new nodes. 3575 check_grow_preorders(); 3576 } 3577 } 3578 else { 3579 (void)bltstack.pop(); // Remove post-visited node from stack 3580 } 3581 } 3582 } 3583 3584 //------------------------------build_loop_tree_impl--------------------------- 3585 int PhaseIdealLoop::build_loop_tree_impl( Node *n, int pre_order ) { 3586 // ---- Post-pass Work ---- 3587 // Pre-walked but not post-walked nodes need a pre_order number. 3588 3589 // Tightest enclosing loop for this Node 3590 IdealLoopTree *innermost = NULL; 3591 3592 // For all children, see if any edge is a backedge. If so, make a loop 3593 // for it. Then find the tightest enclosing loop for the self Node. 3594 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 3595 Node* m = n->fast_out(i); // Child 3596 if( n == m ) continue; // Ignore control self-cycles 3597 if( !m->is_CFG() ) continue;// Ignore non-CFG edges 3598 3599 IdealLoopTree *l; // Child's loop 3600 if( !is_postvisited(m) ) { // Child visited but not post-visited? 3601 // Found a backedge 3602 assert( get_preorder(m) < pre_order, "should be backedge" ); 3603 // Check for the RootNode, which is already a LoopNode and is allowed 3604 // to have multiple "backedges". 3605 if( m == C->root()) { // Found the root? 3606 l = _ltree_root; // Root is the outermost LoopNode 3607 } else { // Else found a nested loop 3608 // Insert a LoopNode to mark this loop. 3609 l = new IdealLoopTree(this, m, n); 3610 } // End of Else found a nested loop 3611 if( !has_loop(m) ) // If 'm' does not already have a loop set 3612 set_loop(m, l); // Set loop header to loop now 3613 3614 } else { // Else not a nested loop 3615 if( !_nodes[m->_idx] ) continue; // Dead code has no loop 3616 l = get_loop(m); // Get previously determined loop 3617 // If successor is header of a loop (nest), move up-loop till it 3618 // is a member of some outer enclosing loop. Since there are no 3619 // shared headers (I've split them already) I only need to go up 3620 // at most 1 level. 3621 while( l && l->_head == m ) // Successor heads loop? 3622 l = l->_parent; // Move up 1 for me 3623 // If this loop is not properly parented, then this loop 3624 // has no exit path out, i.e. its an infinite loop. 3625 if( !l ) { 3626 // Make loop "reachable" from root so the CFG is reachable. Basically 3627 // insert a bogus loop exit that is never taken. 'm', the loop head, 3628 // points to 'n', one (of possibly many) fall-in paths. There may be 3629 // many backedges as well. 3630 3631 // Here I set the loop to be the root loop. I could have, after 3632 // inserting a bogus loop exit, restarted the recursion and found my 3633 // new loop exit. This would make the infinite loop a first-class 3634 // loop and it would then get properly optimized. What's the use of 3635 // optimizing an infinite loop? 3636 l = _ltree_root; // Oops, found infinite loop 3637 3638 if (!_verify_only) { 3639 // Insert the NeverBranch between 'm' and it's control user. 3640 NeverBranchNode *iff = new NeverBranchNode( m ); 3641 _igvn.register_new_node_with_optimizer(iff); 3642 set_loop(iff, l); 3643 Node *if_t = new CProjNode( iff, 0 ); 3644 _igvn.register_new_node_with_optimizer(if_t); 3645 set_loop(if_t, l); 3646 3647 Node* cfg = NULL; // Find the One True Control User of m 3648 for (DUIterator_Fast jmax, j = m->fast_outs(jmax); j < jmax; j++) { 3649 Node* x = m->fast_out(j); 3650 if (x->is_CFG() && x != m && x != iff) 3651 { cfg = x; break; } 3652 } 3653 assert(cfg != NULL, "must find the control user of m"); 3654 uint k = 0; // Probably cfg->in(0) 3655 while( cfg->in(k) != m ) k++; // But check incase cfg is a Region 3656 cfg->set_req( k, if_t ); // Now point to NeverBranch 3657 _igvn._worklist.push(cfg); 3658 3659 // Now create the never-taken loop exit 3660 Node *if_f = new CProjNode( iff, 1 ); 3661 _igvn.register_new_node_with_optimizer(if_f); 3662 set_loop(if_f, l); 3663 // Find frame ptr for Halt. Relies on the optimizer 3664 // V-N'ing. Easier and quicker than searching through 3665 // the program structure. 3666 Node *frame = new ParmNode( C->start(), TypeFunc::FramePtr ); 3667 _igvn.register_new_node_with_optimizer(frame); 3668 // Halt & Catch Fire 3669 Node* halt = new HaltNode(if_f, frame, "never-taken loop exit reached"); 3670 _igvn.register_new_node_with_optimizer(halt); 3671 set_loop(halt, l); 3672 C->root()->add_req(halt); 3673 } 3674 set_loop(C->root(), _ltree_root); 3675 } 3676 } 3677 // Weeny check for irreducible. This child was already visited (this 3678 // IS the post-work phase). Is this child's loop header post-visited 3679 // as well? If so, then I found another entry into the loop. 3680 if (!_verify_only) { 3681 while( is_postvisited(l->_head) ) { 3682 // found irreducible 3683 l->_irreducible = 1; // = true 3684 l = l->_parent; 3685 _has_irreducible_loops = true; 3686 // Check for bad CFG here to prevent crash, and bailout of compile 3687 if (l == NULL) { 3688 C->record_method_not_compilable("unhandled CFG detected during loop optimization"); 3689 return pre_order; 3690 } 3691 } 3692 C->set_has_irreducible_loop(_has_irreducible_loops); 3693 } 3694 3695 // This Node might be a decision point for loops. It is only if 3696 // it's children belong to several different loops. The sort call 3697 // does a trivial amount of work if there is only 1 child or all 3698 // children belong to the same loop. If however, the children 3699 // belong to different loops, the sort call will properly set the 3700 // _parent pointers to show how the loops nest. 3701 // 3702 // In any case, it returns the tightest enclosing loop. 3703 innermost = sort( l, innermost ); 3704 } 3705 3706 // Def-use info will have some dead stuff; dead stuff will have no 3707 // loop decided on. 3708 3709 // Am I a loop header? If so fix up my parent's child and next ptrs. 3710 if( innermost && innermost->_head == n ) { 3711 assert( get_loop(n) == innermost, "" ); 3712 IdealLoopTree *p = innermost->_parent; 3713 IdealLoopTree *l = innermost; 3714 while( p && l->_head == n ) { 3715 l->_next = p->_child; // Put self on parents 'next child' 3716 p->_child = l; // Make self as first child of parent 3717 l = p; // Now walk up the parent chain 3718 p = l->_parent; 3719 } 3720 } else { 3721 // Note that it is possible for a LoopNode to reach here, if the 3722 // backedge has been made unreachable (hence the LoopNode no longer 3723 // denotes a Loop, and will eventually be removed). 3724 3725 // Record tightest enclosing loop for self. Mark as post-visited. 3726 set_loop(n, innermost); 3727 // Also record has_call flag early on 3728 if( innermost ) { 3729 if( n->is_Call() && !n->is_CallLeaf() && !n->is_macro() ) { 3730 // Do not count uncommon calls 3731 if( !n->is_CallStaticJava() || !n->as_CallStaticJava()->_name ) { 3732 Node *iff = n->in(0)->in(0); 3733 // No any calls for vectorized loops. 3734 if( UseSuperWord || !iff->is_If() || 3735 (n->in(0)->Opcode() == Op_IfFalse && 3736 (1.0 - iff->as_If()->_prob) >= 0.01) || 3737 (iff->as_If()->_prob >= 0.01) ) 3738 innermost->_has_call = 1; 3739 } 3740 } else if( n->is_Allocate() && n->as_Allocate()->_is_scalar_replaceable ) { 3741 // Disable loop optimizations if the loop has a scalar replaceable 3742 // allocation. This disabling may cause a potential performance lost 3743 // if the allocation is not eliminated for some reason. 3744 innermost->_allow_optimizations = false; 3745 innermost->_has_call = 1; // = true 3746 } else if (n->Opcode() == Op_SafePoint) { 3747 // Record all safepoints in this loop. 3748 if (innermost->_safepts == NULL) innermost->_safepts = new Node_List(); 3749 innermost->_safepts->push(n); 3750 } 3751 } 3752 } 3753 3754 // Flag as post-visited now 3755 set_postvisited(n); 3756 return pre_order; 3757 } 3758 3759 3760 //------------------------------build_loop_early------------------------------- 3761 // Put Data nodes into some loop nest, by setting the _nodes[]->loop mapping. 3762 // First pass computes the earliest controlling node possible. This is the 3763 // controlling input with the deepest dominating depth. 3764 void PhaseIdealLoop::build_loop_early( VectorSet &visited, Node_List &worklist, Node_Stack &nstack ) { 3765 while (worklist.size() != 0) { 3766 // Use local variables nstack_top_n & nstack_top_i to cache values 3767 // on nstack's top. 3768 Node *nstack_top_n = worklist.pop(); 3769 uint nstack_top_i = 0; 3770 //while_nstack_nonempty: 3771 while (true) { 3772 // Get parent node and next input's index from stack's top. 3773 Node *n = nstack_top_n; 3774 uint i = nstack_top_i; 3775 uint cnt = n->req(); // Count of inputs 3776 if (i == 0) { // Pre-process the node. 3777 if( has_node(n) && // Have either loop or control already? 3778 !has_ctrl(n) ) { // Have loop picked out already? 3779 // During "merge_many_backedges" we fold up several nested loops 3780 // into a single loop. This makes the members of the original 3781 // loop bodies pointing to dead loops; they need to move up 3782 // to the new UNION'd larger loop. I set the _head field of these 3783 // dead loops to NULL and the _parent field points to the owning 3784 // loop. Shades of UNION-FIND algorithm. 3785 IdealLoopTree *ilt; 3786 while( !(ilt = get_loop(n))->_head ) { 3787 // Normally I would use a set_loop here. But in this one special 3788 // case, it is legal (and expected) to change what loop a Node 3789 // belongs to. 3790 _nodes.map(n->_idx, (Node*)(ilt->_parent) ); 3791 } 3792 // Remove safepoints ONLY if I've already seen I don't need one. 3793 // (the old code here would yank a 2nd safepoint after seeing a 3794 // first one, even though the 1st did not dominate in the loop body 3795 // and thus could be avoided indefinitely) 3796 if( !_verify_only && !_verify_me && ilt->_has_sfpt && n->Opcode() == Op_SafePoint && 3797 is_deleteable_safept(n)) { 3798 Node *in = n->in(TypeFunc::Control); 3799 lazy_replace(n,in); // Pull safepoint now 3800 if (ilt->_safepts != NULL) { 3801 ilt->_safepts->yank(n); 3802 } 3803 // Carry on with the recursion "as if" we are walking 3804 // only the control input 3805 if( !visited.test_set( in->_idx ) ) { 3806 worklist.push(in); // Visit this guy later, using worklist 3807 } 3808 // Get next node from nstack: 3809 // - skip n's inputs processing by setting i > cnt; 3810 // - we also will not call set_early_ctrl(n) since 3811 // has_node(n) == true (see the condition above). 3812 i = cnt + 1; 3813 } 3814 } 3815 } // if (i == 0) 3816 3817 // Visit all inputs 3818 bool done = true; // Assume all n's inputs will be processed 3819 while (i < cnt) { 3820 Node *in = n->in(i); 3821 ++i; 3822 if (in == NULL) continue; 3823 if (in->pinned() && !in->is_CFG()) 3824 set_ctrl(in, in->in(0)); 3825 int is_visited = visited.test_set( in->_idx ); 3826 if (!has_node(in)) { // No controlling input yet? 3827 assert( !in->is_CFG(), "CFG Node with no controlling input?" ); 3828 assert( !is_visited, "visit only once" ); 3829 nstack.push(n, i); // Save parent node and next input's index. 3830 nstack_top_n = in; // Process current input now. 3831 nstack_top_i = 0; 3832 done = false; // Not all n's inputs processed. 3833 break; // continue while_nstack_nonempty; 3834 } else if (!is_visited) { 3835 // This guy has a location picked out for him, but has not yet 3836 // been visited. Happens to all CFG nodes, for instance. 3837 // Visit him using the worklist instead of recursion, to break 3838 // cycles. Since he has a location already we do not need to 3839 // find his location before proceeding with the current Node. 3840 worklist.push(in); // Visit this guy later, using worklist 3841 } 3842 } 3843 if (done) { 3844 // All of n's inputs have been processed, complete post-processing. 3845 3846 // Compute earliest point this Node can go. 3847 // CFG, Phi, pinned nodes already know their controlling input. 3848 if (!has_node(n)) { 3849 // Record earliest legal location 3850 set_early_ctrl( n ); 3851 } 3852 if (nstack.is_empty()) { 3853 // Finished all nodes on stack. 3854 // Process next node on the worklist. 3855 break; 3856 } 3857 // Get saved parent node and next input's index. 3858 nstack_top_n = nstack.node(); 3859 nstack_top_i = nstack.index(); 3860 nstack.pop(); 3861 } 3862 } // while (true) 3863 } 3864 } 3865 3866 //------------------------------dom_lca_internal-------------------------------- 3867 // Pair-wise LCA 3868 Node *PhaseIdealLoop::dom_lca_internal( Node *n1, Node *n2 ) const { 3869 if( !n1 ) return n2; // Handle NULL original LCA 3870 assert( n1->is_CFG(), "" ); 3871 assert( n2->is_CFG(), "" ); 3872 // find LCA of all uses 3873 uint d1 = dom_depth(n1); 3874 uint d2 = dom_depth(n2); 3875 while (n1 != n2) { 3876 if (d1 > d2) { 3877 n1 = idom(n1); 3878 d1 = dom_depth(n1); 3879 } else if (d1 < d2) { 3880 n2 = idom(n2); 3881 d2 = dom_depth(n2); 3882 } else { 3883 // Here d1 == d2. Due to edits of the dominator-tree, sections 3884 // of the tree might have the same depth. These sections have 3885 // to be searched more carefully. 3886 3887 // Scan up all the n1's with equal depth, looking for n2. 3888 Node *t1 = idom(n1); 3889 while (dom_depth(t1) == d1) { 3890 if (t1 == n2) return n2; 3891 t1 = idom(t1); 3892 } 3893 // Scan up all the n2's with equal depth, looking for n1. 3894 Node *t2 = idom(n2); 3895 while (dom_depth(t2) == d2) { 3896 if (t2 == n1) return n1; 3897 t2 = idom(t2); 3898 } 3899 // Move up to a new dominator-depth value as well as up the dom-tree. 3900 n1 = t1; 3901 n2 = t2; 3902 d1 = dom_depth(n1); 3903 d2 = dom_depth(n2); 3904 } 3905 } 3906 return n1; 3907 } 3908 3909 //------------------------------compute_idom----------------------------------- 3910 // Locally compute IDOM using dom_lca call. Correct only if the incoming 3911 // IDOMs are correct. 3912 Node *PhaseIdealLoop::compute_idom( Node *region ) const { 3913 assert( region->is_Region(), "" ); 3914 Node *LCA = NULL; 3915 for( uint i = 1; i < region->req(); i++ ) { 3916 if( region->in(i) != C->top() ) 3917 LCA = dom_lca( LCA, region->in(i) ); 3918 } 3919 return LCA; 3920 } 3921 3922 bool PhaseIdealLoop::verify_dominance(Node* n, Node* use, Node* LCA, Node* early) { 3923 bool had_error = false; 3924 #ifdef ASSERT 3925 if (early != C->root()) { 3926 // Make sure that there's a dominance path from LCA to early 3927 Node* d = LCA; 3928 while (d != early) { 3929 if (d == C->root()) { 3930 dump_bad_graph("Bad graph detected in compute_lca_of_uses", n, early, LCA); 3931 tty->print_cr("*** Use %d isn't dominated by def %d ***", use->_idx, n->_idx); 3932 had_error = true; 3933 break; 3934 } 3935 d = idom(d); 3936 } 3937 } 3938 #endif 3939 return had_error; 3940 } 3941 3942 3943 Node* PhaseIdealLoop::compute_lca_of_uses(Node* n, Node* early, bool verify) { 3944 // Compute LCA over list of uses 3945 bool had_error = false; 3946 Node *LCA = NULL; 3947 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax && LCA != early; i++) { 3948 Node* c = n->fast_out(i); 3949 if (_nodes[c->_idx] == NULL) 3950 continue; // Skip the occasional dead node 3951 if( c->is_Phi() ) { // For Phis, we must land above on the path 3952 for( uint j=1; j<c->req(); j++ ) {// For all inputs 3953 if( c->in(j) == n ) { // Found matching input? 3954 Node *use = c->in(0)->in(j); 3955 if (_verify_only && use->is_top()) continue; 3956 LCA = dom_lca_for_get_late_ctrl( LCA, use, n ); 3957 if (verify) had_error = verify_dominance(n, use, LCA, early) || had_error; 3958 } 3959 } 3960 } else { 3961 // For CFG data-users, use is in the block just prior 3962 Node *use = has_ctrl(c) ? get_ctrl(c) : c->in(0); 3963 LCA = dom_lca_for_get_late_ctrl( LCA, use, n ); 3964 if (verify) had_error = verify_dominance(n, use, LCA, early) || had_error; 3965 } 3966 } 3967 assert(!had_error, "bad dominance"); 3968 return LCA; 3969 } 3970 3971 // Check the shape of the graph at the loop entry. In some cases, 3972 // the shape of the graph does not match the shape outlined below. 3973 // That is caused by the Opaque1 node "protecting" the shape of 3974 // the graph being removed by, for example, the IGVN performed 3975 // in PhaseIdealLoop::build_and_optimize(). 3976 // 3977 // After the Opaque1 node has been removed, optimizations (e.g., split-if, 3978 // loop unswitching, and IGVN, or a combination of them) can freely change 3979 // the graph's shape. As a result, the graph shape outlined below cannot 3980 // be guaranteed anymore. 3981 bool PhaseIdealLoop::is_canonical_loop_entry(CountedLoopNode* cl) { 3982 if (!cl->is_main_loop() && !cl->is_post_loop()) { 3983 return false; 3984 } 3985 Node* ctrl = cl->skip_predicates(); 3986 3987 if (ctrl == NULL || (!ctrl->is_IfTrue() && !ctrl->is_IfFalse())) { 3988 return false; 3989 } 3990 Node* iffm = ctrl->in(0); 3991 if (iffm == NULL || !iffm->is_If()) { 3992 return false; 3993 } 3994 Node* bolzm = iffm->in(1); 3995 if (bolzm == NULL || !bolzm->is_Bool()) { 3996 return false; 3997 } 3998 Node* cmpzm = bolzm->in(1); 3999 if (cmpzm == NULL || !cmpzm->is_Cmp()) { 4000 return false; 4001 } 4002 // compares can get conditionally flipped 4003 bool found_opaque = false; 4004 for (uint i = 1; i < cmpzm->req(); i++) { 4005 Node* opnd = cmpzm->in(i); 4006 if (opnd && opnd->Opcode() == Op_Opaque1) { 4007 found_opaque = true; 4008 break; 4009 } 4010 } 4011 if (!found_opaque) { 4012 return false; 4013 } 4014 return true; 4015 } 4016 4017 //------------------------------get_late_ctrl---------------------------------- 4018 // Compute latest legal control. 4019 Node *PhaseIdealLoop::get_late_ctrl( Node *n, Node *early ) { 4020 assert(early != NULL, "early control should not be NULL"); 4021 4022 Node* LCA = compute_lca_of_uses(n, early); 4023 #ifdef ASSERT 4024 if (LCA == C->root() && LCA != early) { 4025 // def doesn't dominate uses so print some useful debugging output 4026 compute_lca_of_uses(n, early, true); 4027 } 4028 #endif 4029 4030 // if this is a load, check for anti-dependent stores 4031 // We use a conservative algorithm to identify potential interfering 4032 // instructions and for rescheduling the load. The users of the memory 4033 // input of this load are examined. Any use which is not a load and is 4034 // dominated by early is considered a potentially interfering store. 4035 // This can produce false positives. 4036 if (n->is_Load() && LCA != early) { 4037 int load_alias_idx = C->get_alias_index(n->adr_type()); 4038 if (C->alias_type(load_alias_idx)->is_rewritable()) { 4039 4040 Node_List worklist; 4041 4042 Node *mem = n->in(MemNode::Memory); 4043 for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) { 4044 Node* s = mem->fast_out(i); 4045 worklist.push(s); 4046 } 4047 while(worklist.size() != 0 && LCA != early) { 4048 Node* s = worklist.pop(); 4049 if (s->is_Load() || s->Opcode() == Op_SafePoint || 4050 (s->is_CallStaticJava() && s->as_CallStaticJava()->uncommon_trap_request() != 0)) { 4051 continue; 4052 } else if (s->is_MergeMem()) { 4053 for (DUIterator_Fast imax, i = s->fast_outs(imax); i < imax; i++) { 4054 Node* s1 = s->fast_out(i); 4055 worklist.push(s1); 4056 } 4057 } else { 4058 Node *sctrl = has_ctrl(s) ? get_ctrl(s) : s->in(0); 4059 assert(sctrl != NULL || s->outcnt() == 0, "must have control"); 4060 if (sctrl != NULL && !sctrl->is_top() && C->can_alias(s->adr_type(), load_alias_idx) && is_dominator(early, sctrl)) { 4061 LCA = dom_lca_for_get_late_ctrl(LCA, sctrl, n); 4062 } 4063 } 4064 } 4065 } 4066 } 4067 4068 assert(LCA == find_non_split_ctrl(LCA), "unexpected late control"); 4069 return LCA; 4070 } 4071 4072 // true if CFG node d dominates CFG node n 4073 bool PhaseIdealLoop::is_dominator(Node *d, Node *n) { 4074 if (d == n) 4075 return true; 4076 assert(d->is_CFG() && n->is_CFG(), "must have CFG nodes"); 4077 uint dd = dom_depth(d); 4078 while (dom_depth(n) >= dd) { 4079 if (n == d) 4080 return true; 4081 n = idom(n); 4082 } 4083 return false; 4084 } 4085 4086 //------------------------------dom_lca_for_get_late_ctrl_internal------------- 4087 // Pair-wise LCA with tags. 4088 // Tag each index with the node 'tag' currently being processed 4089 // before advancing up the dominator chain using idom(). 4090 // Later calls that find a match to 'tag' know that this path has already 4091 // been considered in the current LCA (which is input 'n1' by convention). 4092 // Since get_late_ctrl() is only called once for each node, the tag array 4093 // does not need to be cleared between calls to get_late_ctrl(). 4094 // Algorithm trades a larger constant factor for better asymptotic behavior 4095 // 4096 Node *PhaseIdealLoop::dom_lca_for_get_late_ctrl_internal( Node *n1, Node *n2, Node *tag ) { 4097 uint d1 = dom_depth(n1); 4098 uint d2 = dom_depth(n2); 4099 4100 do { 4101 if (d1 > d2) { 4102 // current lca is deeper than n2 4103 _dom_lca_tags.map(n1->_idx, tag); 4104 n1 = idom(n1); 4105 d1 = dom_depth(n1); 4106 } else if (d1 < d2) { 4107 // n2 is deeper than current lca 4108 Node *memo = _dom_lca_tags[n2->_idx]; 4109 if( memo == tag ) { 4110 return n1; // Return the current LCA 4111 } 4112 _dom_lca_tags.map(n2->_idx, tag); 4113 n2 = idom(n2); 4114 d2 = dom_depth(n2); 4115 } else { 4116 // Here d1 == d2. Due to edits of the dominator-tree, sections 4117 // of the tree might have the same depth. These sections have 4118 // to be searched more carefully. 4119 4120 // Scan up all the n1's with equal depth, looking for n2. 4121 _dom_lca_tags.map(n1->_idx, tag); 4122 Node *t1 = idom(n1); 4123 while (dom_depth(t1) == d1) { 4124 if (t1 == n2) return n2; 4125 _dom_lca_tags.map(t1->_idx, tag); 4126 t1 = idom(t1); 4127 } 4128 // Scan up all the n2's with equal depth, looking for n1. 4129 _dom_lca_tags.map(n2->_idx, tag); 4130 Node *t2 = idom(n2); 4131 while (dom_depth(t2) == d2) { 4132 if (t2 == n1) return n1; 4133 _dom_lca_tags.map(t2->_idx, tag); 4134 t2 = idom(t2); 4135 } 4136 // Move up to a new dominator-depth value as well as up the dom-tree. 4137 n1 = t1; 4138 n2 = t2; 4139 d1 = dom_depth(n1); 4140 d2 = dom_depth(n2); 4141 } 4142 } while (n1 != n2); 4143 return n1; 4144 } 4145 4146 //------------------------------init_dom_lca_tags------------------------------ 4147 // Tag could be a node's integer index, 32bits instead of 64bits in some cases 4148 // Intended use does not involve any growth for the array, so it could 4149 // be of fixed size. 4150 void PhaseIdealLoop::init_dom_lca_tags() { 4151 uint limit = C->unique() + 1; 4152 _dom_lca_tags.map( limit, NULL ); 4153 #ifdef ASSERT 4154 for( uint i = 0; i < limit; ++i ) { 4155 assert(_dom_lca_tags[i] == NULL, "Must be distinct from each node pointer"); 4156 } 4157 #endif // ASSERT 4158 } 4159 4160 //------------------------------clear_dom_lca_tags------------------------------ 4161 // Tag could be a node's integer index, 32bits instead of 64bits in some cases 4162 // Intended use does not involve any growth for the array, so it could 4163 // be of fixed size. 4164 void PhaseIdealLoop::clear_dom_lca_tags() { 4165 uint limit = C->unique() + 1; 4166 _dom_lca_tags.map( limit, NULL ); 4167 _dom_lca_tags.clear(); 4168 #ifdef ASSERT 4169 for( uint i = 0; i < limit; ++i ) { 4170 assert(_dom_lca_tags[i] == NULL, "Must be distinct from each node pointer"); 4171 } 4172 #endif // ASSERT 4173 } 4174 4175 //------------------------------build_loop_late-------------------------------- 4176 // Put Data nodes into some loop nest, by setting the _nodes[]->loop mapping. 4177 // Second pass finds latest legal placement, and ideal loop placement. 4178 void PhaseIdealLoop::build_loop_late( VectorSet &visited, Node_List &worklist, Node_Stack &nstack ) { 4179 while (worklist.size() != 0) { 4180 Node *n = worklist.pop(); 4181 // Only visit once 4182 if (visited.test_set(n->_idx)) continue; 4183 uint cnt = n->outcnt(); 4184 uint i = 0; 4185 while (true) { 4186 assert( _nodes[n->_idx], "no dead nodes" ); 4187 // Visit all children 4188 if (i < cnt) { 4189 Node* use = n->raw_out(i); 4190 ++i; 4191 // Check for dead uses. Aggressively prune such junk. It might be 4192 // dead in the global sense, but still have local uses so I cannot 4193 // easily call 'remove_dead_node'. 4194 if( _nodes[use->_idx] != NULL || use->is_top() ) { // Not dead? 4195 // Due to cycles, we might not hit the same fixed point in the verify 4196 // pass as we do in the regular pass. Instead, visit such phis as 4197 // simple uses of the loop head. 4198 if( use->in(0) && (use->is_CFG() || use->is_Phi()) ) { 4199 if( !visited.test(use->_idx) ) 4200 worklist.push(use); 4201 } else if( !visited.test_set(use->_idx) ) { 4202 nstack.push(n, i); // Save parent and next use's index. 4203 n = use; // Process all children of current use. 4204 cnt = use->outcnt(); 4205 i = 0; 4206 } 4207 } else { 4208 // Do not visit around the backedge of loops via data edges. 4209 // push dead code onto a worklist 4210 _deadlist.push(use); 4211 } 4212 } else { 4213 // All of n's children have been processed, complete post-processing. 4214 build_loop_late_post(n); 4215 if (nstack.is_empty()) { 4216 // Finished all nodes on stack. 4217 // Process next node on the worklist. 4218 break; 4219 } 4220 // Get saved parent node and next use's index. Visit the rest of uses. 4221 n = nstack.node(); 4222 cnt = n->outcnt(); 4223 i = nstack.index(); 4224 nstack.pop(); 4225 } 4226 } 4227 } 4228 } 4229 4230 // Verify that no data node is scheduled in the outer loop of a strip 4231 // mined loop. 4232 void PhaseIdealLoop::verify_strip_mined_scheduling(Node *n, Node* least) { 4233 #ifdef ASSERT 4234 if (get_loop(least)->_nest == 0) { 4235 return; 4236 } 4237 IdealLoopTree* loop = get_loop(least); 4238 Node* head = loop->_head; 4239 if (head->is_OuterStripMinedLoop() && 4240 // Verification can't be applied to fully built strip mined loops 4241 head->as_Loop()->outer_loop_end()->in(1)->find_int_con(-1) == 0) { 4242 Node* sfpt = head->as_Loop()->outer_safepoint(); 4243 ResourceMark rm; 4244 Unique_Node_List wq; 4245 wq.push(sfpt); 4246 for (uint i = 0; i < wq.size(); i++) { 4247 Node *m = wq.at(i); 4248 for (uint i = 1; i < m->req(); i++) { 4249 Node* nn = m->in(i); 4250 if (nn == n) { 4251 return; 4252 } 4253 if (nn != NULL && has_ctrl(nn) && get_loop(get_ctrl(nn)) == loop) { 4254 wq.push(nn); 4255 } 4256 } 4257 } 4258 ShouldNotReachHere(); 4259 } 4260 #endif 4261 } 4262 4263 4264 //------------------------------build_loop_late_post--------------------------- 4265 // Put Data nodes into some loop nest, by setting the _nodes[]->loop mapping. 4266 // Second pass finds latest legal placement, and ideal loop placement. 4267 void PhaseIdealLoop::build_loop_late_post(Node *n) { 4268 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); 4269 4270 if (bs->build_loop_late_post(this, n)) { 4271 return; 4272 } 4273 4274 build_loop_late_post_work(n, true); 4275 } 4276 4277 void PhaseIdealLoop::build_loop_late_post_work(Node *n, bool pinned) { 4278 4279 if (n->req() == 2 && (n->Opcode() == Op_ConvI2L || n->Opcode() == Op_CastII) && !C->major_progress() && !_verify_only) { 4280 _igvn._worklist.push(n); // Maybe we'll normalize it, if no more loops. 4281 } 4282 4283 #ifdef ASSERT 4284 if (_verify_only && !n->is_CFG()) { 4285 // Check def-use domination. 4286 compute_lca_of_uses(n, get_ctrl(n), true /* verify */); 4287 } 4288 #endif 4289 4290 // CFG and pinned nodes already handled 4291 if( n->in(0) ) { 4292 if( n->in(0)->is_top() ) return; // Dead? 4293 4294 // We'd like +VerifyLoopOptimizations to not believe that Mod's/Loads 4295 // _must_ be pinned (they have to observe their control edge of course). 4296 // Unlike Stores (which modify an unallocable resource, the memory 4297 // state), Mods/Loads can float around. So free them up. 4298 switch( n->Opcode() ) { 4299 case Op_DivI: 4300 case Op_DivF: 4301 case Op_DivD: 4302 case Op_ModI: 4303 case Op_ModF: 4304 case Op_ModD: 4305 case Op_LoadB: // Same with Loads; they can sink 4306 case Op_LoadUB: // during loop optimizations. 4307 case Op_LoadUS: 4308 case Op_LoadD: 4309 case Op_LoadF: 4310 case Op_LoadI: 4311 case Op_LoadKlass: 4312 case Op_LoadNKlass: 4313 case Op_LoadL: 4314 case Op_LoadS: 4315 case Op_LoadP: 4316 case Op_LoadN: 4317 case Op_LoadRange: 4318 case Op_LoadD_unaligned: 4319 case Op_LoadL_unaligned: 4320 case Op_StrComp: // Does a bunch of load-like effects 4321 case Op_StrEquals: 4322 case Op_StrIndexOf: 4323 case Op_StrIndexOfChar: 4324 case Op_AryEq: 4325 case Op_HasNegatives: 4326 pinned = false; 4327 } 4328 if( pinned ) { 4329 IdealLoopTree *chosen_loop = get_loop(n->is_CFG() ? n : get_ctrl(n)); 4330 if( !chosen_loop->_child ) // Inner loop? 4331 chosen_loop->_body.push(n); // Collect inner loops 4332 return; 4333 } 4334 } else { // No slot zero 4335 if( n->is_CFG() ) { // CFG with no slot 0 is dead 4336 _nodes.map(n->_idx,0); // No block setting, it's globally dead 4337 return; 4338 } 4339 assert(!n->is_CFG() || n->outcnt() == 0, ""); 4340 } 4341 4342 // Do I have a "safe range" I can select over? 4343 Node *early = get_ctrl(n);// Early location already computed 4344 4345 // Compute latest point this Node can go 4346 Node *LCA = get_late_ctrl( n, early ); 4347 // LCA is NULL due to uses being dead 4348 if( LCA == NULL ) { 4349 #ifdef ASSERT 4350 for (DUIterator i1 = n->outs(); n->has_out(i1); i1++) { 4351 assert( _nodes[n->out(i1)->_idx] == NULL, "all uses must also be dead"); 4352 } 4353 #endif 4354 _nodes.map(n->_idx, 0); // This node is useless 4355 _deadlist.push(n); 4356 return; 4357 } 4358 assert(LCA != NULL && !LCA->is_top(), "no dead nodes"); 4359 4360 Node *legal = LCA; // Walk 'legal' up the IDOM chain 4361 Node *least = legal; // Best legal position so far 4362 while( early != legal ) { // While not at earliest legal 4363 #ifdef ASSERT 4364 if (legal->is_Start() && !early->is_Root()) { 4365 // Bad graph. Print idom path and fail. 4366 dump_bad_graph("Bad graph detected in build_loop_late", n, early, LCA); 4367 assert(false, "Bad graph detected in build_loop_late"); 4368 } 4369 #endif 4370 // Find least loop nesting depth 4371 legal = idom(legal); // Bump up the IDOM tree 4372 // Check for lower nesting depth 4373 if( get_loop(legal)->_nest < get_loop(least)->_nest ) 4374 least = legal; 4375 } 4376 assert(early == legal || legal != C->root(), "bad dominance of inputs"); 4377 4378 // Try not to place code on a loop entry projection 4379 // which can inhibit range check elimination. 4380 if (least != early) { 4381 Node* ctrl_out = least->unique_ctrl_out(); 4382 if (ctrl_out && ctrl_out->is_Loop() && 4383 least == ctrl_out->in(LoopNode::EntryControl)) { 4384 // Move the node above predicates as far up as possible so a 4385 // following pass of loop predication doesn't hoist a predicate 4386 // that depends on it above that node. 4387 Node* new_ctrl = least; 4388 for (;;) { 4389 if (!new_ctrl->is_Proj()) { 4390 break; 4391 } 4392 CallStaticJavaNode* call = new_ctrl->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none); 4393 if (call == NULL) { 4394 break; 4395 } 4396 int req = call->uncommon_trap_request(); 4397 Deoptimization::DeoptReason trap_reason = Deoptimization::trap_request_reason(req); 4398 if (trap_reason != Deoptimization::Reason_loop_limit_check && 4399 trap_reason != Deoptimization::Reason_predicate && 4400 trap_reason != Deoptimization::Reason_profile_predicate) { 4401 break; 4402 } 4403 Node* c = new_ctrl->in(0)->in(0); 4404 if (is_dominator(c, early) && c != early) { 4405 break; 4406 } 4407 new_ctrl = c; 4408 } 4409 least = new_ctrl; 4410 } 4411 } 4412 4413 #ifdef ASSERT 4414 // If verifying, verify that 'verify_me' has a legal location 4415 // and choose it as our location. 4416 if( _verify_me ) { 4417 Node *v_ctrl = _verify_me->get_ctrl_no_update(n); 4418 Node *legal = LCA; 4419 while( early != legal ) { // While not at earliest legal 4420 if( legal == v_ctrl ) break; // Check for prior good location 4421 legal = idom(legal) ;// Bump up the IDOM tree 4422 } 4423 // Check for prior good location 4424 if( legal == v_ctrl ) least = legal; // Keep prior if found 4425 } 4426 #endif 4427 4428 // Assign discovered "here or above" point 4429 least = find_non_split_ctrl(least); 4430 verify_strip_mined_scheduling(n, least); 4431 set_ctrl(n, least); 4432 4433 // Collect inner loop bodies 4434 IdealLoopTree *chosen_loop = get_loop(least); 4435 if( !chosen_loop->_child ) // Inner loop? 4436 chosen_loop->_body.push(n);// Collect inner loops 4437 } 4438 4439 #ifdef ASSERT 4440 void PhaseIdealLoop::dump_bad_graph(const char* msg, Node* n, Node* early, Node* LCA) { 4441 tty->print_cr("%s", msg); 4442 tty->print("n: "); n->dump(); 4443 tty->print("early(n): "); early->dump(); 4444 if (n->in(0) != NULL && !n->in(0)->is_top() && 4445 n->in(0) != early && !n->in(0)->is_Root()) { 4446 tty->print("n->in(0): "); n->in(0)->dump(); 4447 } 4448 for (uint i = 1; i < n->req(); i++) { 4449 Node* in1 = n->in(i); 4450 if (in1 != NULL && in1 != n && !in1->is_top()) { 4451 tty->print("n->in(%d): ", i); in1->dump(); 4452 Node* in1_early = get_ctrl(in1); 4453 tty->print("early(n->in(%d)): ", i); in1_early->dump(); 4454 if (in1->in(0) != NULL && !in1->in(0)->is_top() && 4455 in1->in(0) != in1_early && !in1->in(0)->is_Root()) { 4456 tty->print("n->in(%d)->in(0): ", i); in1->in(0)->dump(); 4457 } 4458 for (uint j = 1; j < in1->req(); j++) { 4459 Node* in2 = in1->in(j); 4460 if (in2 != NULL && in2 != n && in2 != in1 && !in2->is_top()) { 4461 tty->print("n->in(%d)->in(%d): ", i, j); in2->dump(); 4462 Node* in2_early = get_ctrl(in2); 4463 tty->print("early(n->in(%d)->in(%d)): ", i, j); in2_early->dump(); 4464 if (in2->in(0) != NULL && !in2->in(0)->is_top() && 4465 in2->in(0) != in2_early && !in2->in(0)->is_Root()) { 4466 tty->print("n->in(%d)->in(%d)->in(0): ", i, j); in2->in(0)->dump(); 4467 } 4468 } 4469 } 4470 } 4471 } 4472 tty->cr(); 4473 tty->print("LCA(n): "); LCA->dump(); 4474 for (uint i = 0; i < n->outcnt(); i++) { 4475 Node* u1 = n->raw_out(i); 4476 if (u1 == n) 4477 continue; 4478 tty->print("n->out(%d): ", i); u1->dump(); 4479 if (u1->is_CFG()) { 4480 for (uint j = 0; j < u1->outcnt(); j++) { 4481 Node* u2 = u1->raw_out(j); 4482 if (u2 != u1 && u2 != n && u2->is_CFG()) { 4483 tty->print("n->out(%d)->out(%d): ", i, j); u2->dump(); 4484 } 4485 } 4486 } else { 4487 Node* u1_later = get_ctrl(u1); 4488 tty->print("later(n->out(%d)): ", i); u1_later->dump(); 4489 if (u1->in(0) != NULL && !u1->in(0)->is_top() && 4490 u1->in(0) != u1_later && !u1->in(0)->is_Root()) { 4491 tty->print("n->out(%d)->in(0): ", i); u1->in(0)->dump(); 4492 } 4493 for (uint j = 0; j < u1->outcnt(); j++) { 4494 Node* u2 = u1->raw_out(j); 4495 if (u2 == n || u2 == u1) 4496 continue; 4497 tty->print("n->out(%d)->out(%d): ", i, j); u2->dump(); 4498 if (!u2->is_CFG()) { 4499 Node* u2_later = get_ctrl(u2); 4500 tty->print("later(n->out(%d)->out(%d)): ", i, j); u2_later->dump(); 4501 if (u2->in(0) != NULL && !u2->in(0)->is_top() && 4502 u2->in(0) != u2_later && !u2->in(0)->is_Root()) { 4503 tty->print("n->out(%d)->in(0): ", i); u2->in(0)->dump(); 4504 } 4505 } 4506 } 4507 } 4508 } 4509 tty->cr(); 4510 int ct = 0; 4511 Node *dbg_legal = LCA; 4512 while(!dbg_legal->is_Start() && ct < 100) { 4513 tty->print("idom[%d] ",ct); dbg_legal->dump(); 4514 ct++; 4515 dbg_legal = idom(dbg_legal); 4516 } 4517 tty->cr(); 4518 } 4519 #endif 4520 4521 #ifndef PRODUCT 4522 //------------------------------dump------------------------------------------- 4523 void PhaseIdealLoop::dump() const { 4524 ResourceMark rm; 4525 Arena* arena = Thread::current()->resource_area(); 4526 Node_Stack stack(arena, C->live_nodes() >> 2); 4527 Node_List rpo_list; 4528 VectorSet visited(arena); 4529 visited.set(C->top()->_idx); 4530 rpo(C->root(), stack, visited, rpo_list); 4531 // Dump root loop indexed by last element in PO order 4532 dump(_ltree_root, rpo_list.size(), rpo_list); 4533 } 4534 4535 void PhaseIdealLoop::dump(IdealLoopTree* loop, uint idx, Node_List &rpo_list) const { 4536 loop->dump_head(); 4537 4538 // Now scan for CFG nodes in the same loop 4539 for (uint j = idx; j > 0; j--) { 4540 Node* n = rpo_list[j-1]; 4541 if (!_nodes[n->_idx]) // Skip dead nodes 4542 continue; 4543 4544 if (get_loop(n) != loop) { // Wrong loop nest 4545 if (get_loop(n)->_head == n && // Found nested loop? 4546 get_loop(n)->_parent == loop) 4547 dump(get_loop(n), rpo_list.size(), rpo_list); // Print it nested-ly 4548 continue; 4549 } 4550 4551 // Dump controlling node 4552 tty->sp(2 * loop->_nest); 4553 tty->print("C"); 4554 if (n == C->root()) { 4555 n->dump(); 4556 } else { 4557 Node* cached_idom = idom_no_update(n); 4558 Node* computed_idom = n->in(0); 4559 if (n->is_Region()) { 4560 computed_idom = compute_idom(n); 4561 // computed_idom() will return n->in(0) when idom(n) is an IfNode (or 4562 // any MultiBranch ctrl node), so apply a similar transform to 4563 // the cached idom returned from idom_no_update. 4564 cached_idom = find_non_split_ctrl(cached_idom); 4565 } 4566 tty->print(" ID:%d", computed_idom->_idx); 4567 n->dump(); 4568 if (cached_idom != computed_idom) { 4569 tty->print_cr("*** BROKEN IDOM! Computed as: %d, cached as: %d", 4570 computed_idom->_idx, cached_idom->_idx); 4571 } 4572 } 4573 // Dump nodes it controls 4574 for (uint k = 0; k < _nodes.Size(); k++) { 4575 // (k < C->unique() && get_ctrl(find(k)) == n) 4576 if (k < C->unique() && _nodes[k] == (Node*)((intptr_t)n + 1)) { 4577 Node* m = C->root()->find(k); 4578 if (m && m->outcnt() > 0) { 4579 if (!(has_ctrl(m) && get_ctrl_no_update(m) == n)) { 4580 tty->print_cr("*** BROKEN CTRL ACCESSOR! _nodes[k] is %p, ctrl is %p", 4581 _nodes[k], has_ctrl(m) ? get_ctrl_no_update(m) : NULL); 4582 } 4583 tty->sp(2 * loop->_nest + 1); 4584 m->dump(); 4585 } 4586 } 4587 } 4588 } 4589 } 4590 #endif 4591 4592 // Collect a R-P-O for the whole CFG. 4593 // Result list is in post-order (scan backwards for RPO) 4594 void PhaseIdealLoop::rpo(Node* start, Node_Stack &stk, VectorSet &visited, Node_List &rpo_list) const { 4595 stk.push(start, 0); 4596 visited.set(start->_idx); 4597 4598 while (stk.is_nonempty()) { 4599 Node* m = stk.node(); 4600 uint idx = stk.index(); 4601 if (idx < m->outcnt()) { 4602 stk.set_index(idx + 1); 4603 Node* n = m->raw_out(idx); 4604 if (n->is_CFG() && !visited.test_set(n->_idx)) { 4605 stk.push(n, 0); 4606 } 4607 } else { 4608 rpo_list.push(m); 4609 stk.pop(); 4610 } 4611 } 4612 } 4613 4614 4615 //============================================================================= 4616 //------------------------------LoopTreeIterator------------------------------- 4617 4618 // Advance to next loop tree using a preorder, left-to-right traversal. 4619 void LoopTreeIterator::next() { 4620 assert(!done(), "must not be done."); 4621 if (_curnt->_child != NULL) { 4622 _curnt = _curnt->_child; 4623 } else if (_curnt->_next != NULL) { 4624 _curnt = _curnt->_next; 4625 } else { 4626 while (_curnt != _root && _curnt->_next == NULL) { 4627 _curnt = _curnt->_parent; 4628 } 4629 if (_curnt == _root) { 4630 _curnt = NULL; 4631 assert(done(), "must be done."); 4632 } else { 4633 assert(_curnt->_next != NULL, "must be more to do"); 4634 _curnt = _curnt->_next; 4635 } 4636 } 4637 } --- EOF ---