1 /* 2 * Copyright (c) 1998, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "ci/ciMethodData.hpp" 27 #include "compiler/compileLog.hpp" 28 #include "gc/shared/barrierSet.hpp" 29 #include "gc/shared/c2/barrierSetC2.hpp" 30 #include "libadt/vectset.hpp" 31 #include "memory/allocation.inline.hpp" 32 #include "memory/resourceArea.hpp" 33 #include "opto/addnode.hpp" 34 #include "opto/callnode.hpp" 35 #include "opto/connode.hpp" 36 #include "opto/convertnode.hpp" 37 #include "opto/divnode.hpp" 38 #include "opto/idealGraphPrinter.hpp" 39 #include "opto/loopnode.hpp" 40 #include "opto/mulnode.hpp" 41 #include "opto/rootnode.hpp" 42 #include "opto/superword.hpp" 43 44 //============================================================================= 45 //--------------------------is_cloop_ind_var----------------------------------- 46 // Determine if a node is a counted loop induction variable. 47 // NOTE: The method is declared in "node.hpp". 48 bool Node::is_cloop_ind_var() const { 49 return (is_Phi() && !as_Phi()->is_copy() && 50 as_Phi()->region()->is_CountedLoop() && 51 as_Phi()->region()->as_CountedLoop()->phi() == this); 52 } 53 54 //============================================================================= 55 //------------------------------dump_spec-------------------------------------- 56 // Dump special per-node info 57 #ifndef PRODUCT 58 void LoopNode::dump_spec(outputStream *st) const { 59 if (is_inner_loop()) st->print( "inner " ); 60 if (is_partial_peel_loop()) st->print( "partial_peel " ); 61 if (partial_peel_has_failed()) st->print( "partial_peel_failed " ); 62 } 63 #endif 64 65 //------------------------------is_valid_counted_loop------------------------- 66 bool LoopNode::is_valid_counted_loop() const { 67 if (is_CountedLoop()) { 68 CountedLoopNode* l = as_CountedLoop(); 69 CountedLoopEndNode* le = l->loopexit_or_null(); 70 if (le != NULL && 71 le->proj_out_or_null(1 /* true */) == l->in(LoopNode::LoopBackControl)) { 72 Node* phi = l->phi(); 73 Node* exit = le->proj_out_or_null(0 /* false */); 74 if (exit != NULL && exit->Opcode() == Op_IfFalse && 75 phi != NULL && phi->is_Phi() && 76 phi->in(LoopNode::LoopBackControl) == l->incr() && 77 le->loopnode() == l && le->stride_is_con()) { 78 return true; 79 } 80 } 81 } 82 return false; 83 } 84 85 //------------------------------get_early_ctrl--------------------------------- 86 // Compute earliest legal control 87 Node *PhaseIdealLoop::get_early_ctrl( Node *n ) { 88 assert( !n->is_Phi() && !n->is_CFG(), "this code only handles data nodes" ); 89 uint i; 90 Node *early; 91 if (n->in(0) && !n->is_expensive()) { 92 early = n->in(0); 93 if (!early->is_CFG()) // Might be a non-CFG multi-def 94 early = get_ctrl(early); // So treat input as a straight data input 95 i = 1; 96 } else { 97 early = get_ctrl(n->in(1)); 98 i = 2; 99 } 100 uint e_d = dom_depth(early); 101 assert( early, "" ); 102 for (; i < n->req(); i++) { 103 Node *cin = get_ctrl(n->in(i)); 104 assert( cin, "" ); 105 // Keep deepest dominator depth 106 uint c_d = dom_depth(cin); 107 if (c_d > e_d) { // Deeper guy? 108 early = cin; // Keep deepest found so far 109 e_d = c_d; 110 } else if (c_d == e_d && // Same depth? 111 early != cin) { // If not equal, must use slower algorithm 112 // If same depth but not equal, one _must_ dominate the other 113 // and we want the deeper (i.e., dominated) guy. 114 Node *n1 = early; 115 Node *n2 = cin; 116 while (1) { 117 n1 = idom(n1); // Walk up until break cycle 118 n2 = idom(n2); 119 if (n1 == cin || // Walked early up to cin 120 dom_depth(n2) < c_d) 121 break; // early is deeper; keep him 122 if (n2 == early || // Walked cin up to early 123 dom_depth(n1) < c_d) { 124 early = cin; // cin is deeper; keep him 125 break; 126 } 127 } 128 e_d = dom_depth(early); // Reset depth register cache 129 } 130 } 131 132 // Return earliest legal location 133 assert(early == find_non_split_ctrl(early), "unexpected early control"); 134 135 if (n->is_expensive() && !_verify_only && !_verify_me) { 136 assert(n->in(0), "should have control input"); 137 early = get_early_ctrl_for_expensive(n, early); 138 } 139 140 return early; 141 } 142 143 //------------------------------get_early_ctrl_for_expensive--------------------------------- 144 // Move node up the dominator tree as high as legal while still beneficial 145 Node *PhaseIdealLoop::get_early_ctrl_for_expensive(Node *n, Node* earliest) { 146 assert(n->in(0) && n->is_expensive(), "expensive node with control input here"); 147 assert(OptimizeExpensiveOps, "optimization off?"); 148 149 Node* ctl = n->in(0); 150 assert(ctl->is_CFG(), "expensive input 0 must be cfg"); 151 uint min_dom_depth = dom_depth(earliest); 152 #ifdef ASSERT 153 if (!is_dominator(ctl, earliest) && !is_dominator(earliest, ctl)) { 154 dump_bad_graph("Bad graph detected in get_early_ctrl_for_expensive", n, earliest, ctl); 155 assert(false, "Bad graph detected in get_early_ctrl_for_expensive"); 156 } 157 #endif 158 if (dom_depth(ctl) < min_dom_depth) { 159 return earliest; 160 } 161 162 while (1) { 163 Node *next = ctl; 164 // Moving the node out of a loop on the projection of a If 165 // confuses loop predication. So once we hit a Loop in a If branch 166 // that doesn't branch to an UNC, we stop. The code that process 167 // expensive nodes will notice the loop and skip over it to try to 168 // move the node further up. 169 if (ctl->is_CountedLoop() && ctl->in(1) != NULL && ctl->in(1)->in(0) != NULL && ctl->in(1)->in(0)->is_If()) { 170 if (!ctl->in(1)->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none)) { 171 break; 172 } 173 next = idom(ctl->in(1)->in(0)); 174 } else if (ctl->is_Proj()) { 175 // We only move it up along a projection if the projection is 176 // the single control projection for its parent: same code path, 177 // if it's a If with UNC or fallthrough of a call. 178 Node* parent_ctl = ctl->in(0); 179 if (parent_ctl == NULL) { 180 break; 181 } else if (parent_ctl->is_CountedLoopEnd() && parent_ctl->as_CountedLoopEnd()->loopnode() != NULL) { 182 next = parent_ctl->as_CountedLoopEnd()->loopnode()->init_control(); 183 } else if (parent_ctl->is_If()) { 184 if (!ctl->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none)) { 185 break; 186 } 187 assert(idom(ctl) == parent_ctl, "strange"); 188 next = idom(parent_ctl); 189 } else if (ctl->is_CatchProj()) { 190 if (ctl->as_Proj()->_con != CatchProjNode::fall_through_index) { 191 break; 192 } 193 assert(parent_ctl->in(0)->in(0)->is_Call(), "strange graph"); 194 next = parent_ctl->in(0)->in(0)->in(0); 195 } else { 196 // Check if parent control has a single projection (this 197 // control is the only possible successor of the parent 198 // control). If so, we can try to move the node above the 199 // parent control. 200 int nb_ctl_proj = 0; 201 for (DUIterator_Fast imax, i = parent_ctl->fast_outs(imax); i < imax; i++) { 202 Node *p = parent_ctl->fast_out(i); 203 if (p->is_Proj() && p->is_CFG()) { 204 nb_ctl_proj++; 205 if (nb_ctl_proj > 1) { 206 break; 207 } 208 } 209 } 210 211 if (nb_ctl_proj > 1) { 212 break; 213 } 214 assert(parent_ctl->is_Start() || parent_ctl->is_MemBar() || parent_ctl->is_Call() || 215 BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(parent_ctl), "unexpected node"); 216 assert(idom(ctl) == parent_ctl, "strange"); 217 next = idom(parent_ctl); 218 } 219 } else { 220 next = idom(ctl); 221 } 222 if (next->is_Root() || next->is_Start() || dom_depth(next) < min_dom_depth) { 223 break; 224 } 225 ctl = next; 226 } 227 228 if (ctl != n->in(0)) { 229 _igvn.replace_input_of(n, 0, ctl); 230 _igvn.hash_insert(n); 231 } 232 233 return ctl; 234 } 235 236 237 //------------------------------set_early_ctrl--------------------------------- 238 // Set earliest legal control 239 void PhaseIdealLoop::set_early_ctrl( Node *n ) { 240 Node *early = get_early_ctrl(n); 241 242 // Record earliest legal location 243 set_ctrl(n, early); 244 } 245 246 //------------------------------set_subtree_ctrl------------------------------- 247 // set missing _ctrl entries on new nodes 248 void PhaseIdealLoop::set_subtree_ctrl( Node *n ) { 249 // Already set? Get out. 250 if( _nodes[n->_idx] ) return; 251 // Recursively set _nodes array to indicate where the Node goes 252 uint i; 253 for( i = 0; i < n->req(); ++i ) { 254 Node *m = n->in(i); 255 if( m && m != C->root() ) 256 set_subtree_ctrl( m ); 257 } 258 259 // Fixup self 260 set_early_ctrl( n ); 261 } 262 263 // Create a skeleton strip mined outer loop: a Loop head before the 264 // inner strip mined loop, a safepoint and an exit condition guarded 265 // by an opaque node after the inner strip mined loop with a backedge 266 // to the loop head. The inner strip mined loop is left as it is. Only 267 // once loop optimizations are over, do we adjust the inner loop exit 268 // condition to limit its number of iterations, set the outer loop 269 // exit condition and add Phis to the outer loop head. Some loop 270 // optimizations that operate on the inner strip mined loop need to be 271 // aware of the outer strip mined loop: loop unswitching needs to 272 // clone the outer loop as well as the inner, unrolling needs to only 273 // clone the inner loop etc. No optimizations need to change the outer 274 // strip mined loop as it is only a skeleton. 275 IdealLoopTree* PhaseIdealLoop::create_outer_strip_mined_loop(BoolNode *test, Node *cmp, Node *init_control, 276 IdealLoopTree* loop, float cl_prob, float le_fcnt, 277 Node*& entry_control, Node*& iffalse) { 278 Node* outer_test = _igvn.intcon(0); 279 set_ctrl(outer_test, C->root()); 280 Node *orig = iffalse; 281 iffalse = iffalse->clone(); 282 _igvn.register_new_node_with_optimizer(iffalse); 283 set_idom(iffalse, idom(orig), dom_depth(orig)); 284 285 IfNode *outer_le = new OuterStripMinedLoopEndNode(iffalse, outer_test, cl_prob, le_fcnt); 286 Node *outer_ift = new IfTrueNode (outer_le); 287 Node* outer_iff = orig; 288 _igvn.replace_input_of(outer_iff, 0, outer_le); 289 290 LoopNode *outer_l = new OuterStripMinedLoopNode(C, init_control, outer_ift); 291 entry_control = outer_l; 292 293 IdealLoopTree* outer_ilt = new IdealLoopTree(this, outer_l, outer_ift); 294 IdealLoopTree* parent = loop->_parent; 295 IdealLoopTree* sibling = parent->_child; 296 if (sibling == loop) { 297 parent->_child = outer_ilt; 298 } else { 299 while (sibling->_next != loop) { 300 sibling = sibling->_next; 301 } 302 sibling->_next = outer_ilt; 303 } 304 outer_ilt->_next = loop->_next; 305 outer_ilt->_parent = parent; 306 outer_ilt->_child = loop; 307 outer_ilt->_nest = loop->_nest; 308 loop->_parent = outer_ilt; 309 loop->_next = NULL; 310 loop->_nest++; 311 312 set_loop(iffalse, outer_ilt); 313 register_control(outer_le, outer_ilt, iffalse); 314 register_control(outer_ift, outer_ilt, outer_le); 315 set_idom(outer_iff, outer_le, dom_depth(outer_le)); 316 _igvn.register_new_node_with_optimizer(outer_l); 317 set_loop(outer_l, outer_ilt); 318 set_idom(outer_l, init_control, dom_depth(init_control)+1); 319 320 return outer_ilt; 321 } 322 323 void PhaseIdealLoop::insert_loop_limit_check(ProjNode* limit_check_proj, Node* cmp_limit, Node* bol) { 324 Node* new_predicate_proj = create_new_if_for_predicate(limit_check_proj, NULL, 325 Deoptimization::Reason_loop_limit_check, 326 Op_If); 327 Node* iff = new_predicate_proj->in(0); 328 assert(iff->Opcode() == Op_If, "bad graph shape"); 329 Node* conv = iff->in(1); 330 assert(conv->Opcode() == Op_Conv2B, "bad graph shape"); 331 Node* opaq = conv->in(1); 332 assert(opaq->Opcode() == Op_Opaque1, "bad graph shape"); 333 cmp_limit = _igvn.register_new_node_with_optimizer(cmp_limit); 334 bol = _igvn.register_new_node_with_optimizer(bol); 335 set_subtree_ctrl(bol); 336 _igvn.replace_input_of(iff, 1, bol); 337 338 #ifndef PRODUCT 339 // report that the loop predication has been actually performed 340 // for this loop 341 if (TraceLoopLimitCheck) { 342 tty->print_cr("Counted Loop Limit Check generated:"); 343 debug_only( bol->dump(2); ) 344 } 345 #endif 346 } 347 348 //------------------------------is_counted_loop-------------------------------- 349 bool PhaseIdealLoop::is_counted_loop(Node* x, IdealLoopTree*& loop) { 350 PhaseGVN *gvn = &_igvn; 351 352 // Counted loop head must be a good RegionNode with only 3 not NULL 353 // control input edges: Self, Entry, LoopBack. 354 if (x->in(LoopNode::Self) == NULL || x->req() != 3 || loop->_irreducible) { 355 return false; 356 } 357 Node *init_control = x->in(LoopNode::EntryControl); 358 Node *back_control = x->in(LoopNode::LoopBackControl); 359 if (init_control == NULL || back_control == NULL) // Partially dead 360 return false; 361 // Must also check for TOP when looking for a dead loop 362 if (init_control->is_top() || back_control->is_top()) 363 return false; 364 365 // Allow funny placement of Safepoint 366 if (back_control->Opcode() == Op_SafePoint) { 367 if (LoopStripMiningIter != 0) { 368 // Leaving the safepoint on the backedge and creating a 369 // CountedLoop will confuse optimizations. We can't move the 370 // safepoint around because its jvm state wouldn't match a new 371 // location. Give up on that loop. 372 return false; 373 } 374 back_control = back_control->in(TypeFunc::Control); 375 } 376 377 // Controlling test for loop 378 Node *iftrue = back_control; 379 uint iftrue_op = iftrue->Opcode(); 380 if (iftrue_op != Op_IfTrue && 381 iftrue_op != Op_IfFalse) 382 // I have a weird back-control. Probably the loop-exit test is in 383 // the middle of the loop and I am looking at some trailing control-flow 384 // merge point. To fix this I would have to partially peel the loop. 385 return false; // Obscure back-control 386 387 // Get boolean guarding loop-back test 388 Node *iff = iftrue->in(0); 389 if (get_loop(iff) != loop || !iff->in(1)->is_Bool()) 390 return false; 391 BoolNode *test = iff->in(1)->as_Bool(); 392 BoolTest::mask bt = test->_test._test; 393 float cl_prob = iff->as_If()->_prob; 394 if (iftrue_op == Op_IfFalse) { 395 bt = BoolTest(bt).negate(); 396 cl_prob = 1.0 - cl_prob; 397 } 398 // Get backedge compare 399 Node *cmp = test->in(1); 400 int cmp_op = cmp->Opcode(); 401 if (cmp_op != Op_CmpI) 402 return false; // Avoid pointer & float compares 403 404 // Find the trip-counter increment & limit. Limit must be loop invariant. 405 Node *incr = cmp->in(1); 406 Node *limit = cmp->in(2); 407 408 // --------- 409 // need 'loop()' test to tell if limit is loop invariant 410 // --------- 411 412 if (!is_member(loop, get_ctrl(incr))) { // Swapped trip counter and limit? 413 Node *tmp = incr; // Then reverse order into the CmpI 414 incr = limit; 415 limit = tmp; 416 bt = BoolTest(bt).commute(); // And commute the exit test 417 } 418 if (is_member(loop, get_ctrl(limit))) // Limit must be loop-invariant 419 return false; 420 if (!is_member(loop, get_ctrl(incr))) // Trip counter must be loop-variant 421 return false; 422 423 Node* phi_incr = NULL; 424 // Trip-counter increment must be commutative & associative. 425 if (incr->Opcode() == Op_CastII) { 426 incr = incr->in(1); 427 } 428 if (incr->is_Phi()) { 429 if (incr->as_Phi()->region() != x || incr->req() != 3) 430 return false; // Not simple trip counter expression 431 phi_incr = incr; 432 incr = phi_incr->in(LoopNode::LoopBackControl); // Assume incr is on backedge of Phi 433 if (!is_member(loop, get_ctrl(incr))) // Trip counter must be loop-variant 434 return false; 435 } 436 437 Node* trunc1 = NULL; 438 Node* trunc2 = NULL; 439 const TypeInt* iv_trunc_t = NULL; 440 Node* orig_incr = incr; 441 if (!(incr = CountedLoopNode::match_incr_with_optional_truncation(incr, &trunc1, &trunc2, &iv_trunc_t))) { 442 return false; // Funny increment opcode 443 } 444 assert(incr->Opcode() == Op_AddI, "wrong increment code"); 445 446 const TypeInt* limit_t = gvn->type(limit)->is_int(); 447 if (trunc1 != NULL) { 448 // When there is a truncation, we must be sure that after the truncation 449 // the trip counter will end up higher than the limit, otherwise we are looking 450 // at an endless loop. Can happen with range checks. 451 452 // Example: 453 // int i = 0; 454 // while (true) 455 // sum + = array[i]; 456 // i++; 457 // i = i && 0x7fff; 458 // } 459 // 460 // If the array is shorter than 0x8000 this exits through a AIOOB 461 // - Counted loop transformation is ok 462 // If the array is longer then this is an endless loop 463 // - No transformation can be done. 464 465 const TypeInt* incr_t = gvn->type(orig_incr)->is_int(); 466 if (limit_t->_hi > incr_t->_hi) { 467 // if the limit can have a higher value than the increment (before the phi) 468 return false; 469 } 470 } 471 472 // Get merge point 473 Node *xphi = incr->in(1); 474 Node *stride = incr->in(2); 475 if (!stride->is_Con()) { // Oops, swap these 476 if (!xphi->is_Con()) // Is the other guy a constant? 477 return false; // Nope, unknown stride, bail out 478 Node *tmp = xphi; // 'incr' is commutative, so ok to swap 479 xphi = stride; 480 stride = tmp; 481 } 482 if (xphi->Opcode() == Op_CastII) { 483 xphi = xphi->in(1); 484 } 485 // Stride must be constant 486 int stride_con = stride->get_int(); 487 if (stride_con == 0) 488 return false; // missed some peephole opt 489 490 if (!xphi->is_Phi()) 491 return false; // Too much math on the trip counter 492 if (phi_incr != NULL && phi_incr != xphi) 493 return false; 494 PhiNode *phi = xphi->as_Phi(); 495 496 // Phi must be of loop header; backedge must wrap to increment 497 if (phi->region() != x) 498 return false; 499 if ((trunc1 == NULL && phi->in(LoopNode::LoopBackControl) != incr) || 500 (trunc1 != NULL && phi->in(LoopNode::LoopBackControl) != trunc1)) { 501 return false; 502 } 503 Node *init_trip = phi->in(LoopNode::EntryControl); 504 505 // If iv trunc type is smaller than int, check for possible wrap. 506 if (!TypeInt::INT->higher_equal(iv_trunc_t)) { 507 assert(trunc1 != NULL, "must have found some truncation"); 508 509 // Get a better type for the phi (filtered thru if's) 510 const TypeInt* phi_ft = filtered_type(phi); 511 512 // Can iv take on a value that will wrap? 513 // 514 // Ensure iv's limit is not within "stride" of the wrap value. 515 // 516 // Example for "short" type 517 // Truncation ensures value is in the range -32768..32767 (iv_trunc_t) 518 // If the stride is +10, then the last value of the induction 519 // variable before the increment (phi_ft->_hi) must be 520 // <= 32767 - 10 and (phi_ft->_lo) must be >= -32768 to 521 // ensure no truncation occurs after the increment. 522 523 if (stride_con > 0) { 524 if (iv_trunc_t->_hi - phi_ft->_hi < stride_con || 525 iv_trunc_t->_lo > phi_ft->_lo) { 526 return false; // truncation may occur 527 } 528 } else if (stride_con < 0) { 529 if (iv_trunc_t->_lo - phi_ft->_lo > stride_con || 530 iv_trunc_t->_hi < phi_ft->_hi) { 531 return false; // truncation may occur 532 } 533 } 534 // No possibility of wrap so truncation can be discarded 535 // Promote iv type to Int 536 } else { 537 assert(trunc1 == NULL && trunc2 == NULL, "no truncation for int"); 538 } 539 540 // If the condition is inverted and we will be rolling 541 // through MININT to MAXINT, then bail out. 542 if (bt == BoolTest::eq || // Bail out, but this loop trips at most twice! 543 // Odd stride 544 (bt == BoolTest::ne && stride_con != 1 && stride_con != -1) || 545 // Count down loop rolls through MAXINT 546 ((bt == BoolTest::le || bt == BoolTest::lt) && stride_con < 0) || 547 // Count up loop rolls through MININT 548 ((bt == BoolTest::ge || bt == BoolTest::gt) && stride_con > 0)) { 549 return false; // Bail out 550 } 551 552 const TypeInt* init_t = gvn->type(init_trip)->is_int(); 553 554 if (stride_con > 0) { 555 jlong init_p = (jlong)init_t->_lo + stride_con; 556 if (init_p > (jlong)max_jint || init_p > (jlong)limit_t->_hi) 557 return false; // cyclic loop or this loop trips only once 558 } else { 559 jlong init_p = (jlong)init_t->_hi + stride_con; 560 if (init_p < (jlong)min_jint || init_p < (jlong)limit_t->_lo) 561 return false; // cyclic loop or this loop trips only once 562 } 563 564 if (phi_incr != NULL && bt != BoolTest::ne) { 565 // check if there is a possiblity of IV overflowing after the first increment 566 if (stride_con > 0) { 567 if (init_t->_hi > max_jint - stride_con) { 568 return false; 569 } 570 } else { 571 if (init_t->_lo < min_jint - stride_con) { 572 return false; 573 } 574 } 575 } 576 577 // ================================================= 578 // ---- SUCCESS! Found A Trip-Counted Loop! ----- 579 // 580 assert(x->Opcode() == Op_Loop, "regular loops only"); 581 C->print_method(PHASE_BEFORE_CLOOPS, 3); 582 583 Node *hook = new Node(6); 584 585 // =================================================== 586 // Generate loop limit check to avoid integer overflow 587 // in cases like next (cyclic loops): 588 // 589 // for (i=0; i <= max_jint; i++) {} 590 // for (i=0; i < max_jint; i+=2) {} 591 // 592 // 593 // Limit check predicate depends on the loop test: 594 // 595 // for(;i != limit; i++) --> limit <= (max_jint) 596 // for(;i < limit; i+=stride) --> limit <= (max_jint - stride + 1) 597 // for(;i <= limit; i+=stride) --> limit <= (max_jint - stride ) 598 // 599 600 // Check if limit is excluded to do more precise int overflow check. 601 bool incl_limit = (bt == BoolTest::le || bt == BoolTest::ge); 602 int stride_m = stride_con - (incl_limit ? 0 : (stride_con > 0 ? 1 : -1)); 603 604 // If compare points directly to the phi we need to adjust 605 // the compare so that it points to the incr. Limit have 606 // to be adjusted to keep trip count the same and the 607 // adjusted limit should be checked for int overflow. 608 if (phi_incr != NULL) { 609 stride_m += stride_con; 610 } 611 612 if (limit->is_Con()) { 613 int limit_con = limit->get_int(); 614 if ((stride_con > 0 && limit_con > (max_jint - stride_m)) || 615 (stride_con < 0 && limit_con < (min_jint - stride_m))) { 616 // Bailout: it could be integer overflow. 617 return false; 618 } 619 } else if ((stride_con > 0 && limit_t->_hi <= (max_jint - stride_m)) || 620 (stride_con < 0 && limit_t->_lo >= (min_jint - stride_m))) { 621 // Limit's type may satisfy the condition, for example, 622 // when it is an array length. 623 } else { 624 // Generate loop's limit check. 625 // Loop limit check predicate should be near the loop. 626 ProjNode *limit_check_proj = find_predicate_insertion_point(init_control, Deoptimization::Reason_loop_limit_check); 627 if (!limit_check_proj) { 628 // The limit check predicate is not generated if this method trapped here before. 629 #ifdef ASSERT 630 if (TraceLoopLimitCheck) { 631 tty->print("missing loop limit check:"); 632 loop->dump_head(); 633 x->dump(1); 634 } 635 #endif 636 return false; 637 } 638 639 IfNode* check_iff = limit_check_proj->in(0)->as_If(); 640 641 if (!is_dominator(get_ctrl(limit), check_iff->in(0))) { 642 return false; 643 } 644 645 Node* cmp_limit; 646 Node* bol; 647 648 if (stride_con > 0) { 649 cmp_limit = new CmpINode(limit, _igvn.intcon(max_jint - stride_m)); 650 bol = new BoolNode(cmp_limit, BoolTest::le); 651 } else { 652 cmp_limit = new CmpINode(limit, _igvn.intcon(min_jint - stride_m)); 653 bol = new BoolNode(cmp_limit, BoolTest::ge); 654 } 655 656 insert_loop_limit_check(limit_check_proj, cmp_limit, bol); 657 } 658 659 // Now we need to canonicalize loop condition. 660 if (bt == BoolTest::ne) { 661 assert(stride_con == 1 || stride_con == -1, "simple increment only"); 662 if (stride_con > 0 && init_t->_hi < limit_t->_lo) { 663 // 'ne' can be replaced with 'lt' only when init < limit. 664 bt = BoolTest::lt; 665 } else if (stride_con < 0 && init_t->_lo > limit_t->_hi) { 666 // 'ne' can be replaced with 'gt' only when init > limit. 667 bt = BoolTest::gt; 668 } else { 669 ProjNode *limit_check_proj = find_predicate_insertion_point(init_control, Deoptimization::Reason_loop_limit_check); 670 if (!limit_check_proj) { 671 // The limit check predicate is not generated if this method trapped here before. 672 #ifdef ASSERT 673 if (TraceLoopLimitCheck) { 674 tty->print("missing loop limit check:"); 675 loop->dump_head(); 676 x->dump(1); 677 } 678 #endif 679 return false; 680 } 681 IfNode* check_iff = limit_check_proj->in(0)->as_If(); 682 683 if (!is_dominator(get_ctrl(limit), check_iff->in(0)) || 684 !is_dominator(get_ctrl(init_trip), check_iff->in(0))) { 685 return false; 686 } 687 688 Node* cmp_limit; 689 Node* bol; 690 691 if (stride_con > 0) { 692 cmp_limit = new CmpINode(init_trip, limit); 693 bol = new BoolNode(cmp_limit, BoolTest::lt); 694 } else { 695 cmp_limit = new CmpINode(init_trip, limit); 696 bol = new BoolNode(cmp_limit, BoolTest::gt); 697 } 698 699 insert_loop_limit_check(limit_check_proj, cmp_limit, bol); 700 701 if (stride_con > 0) { 702 // 'ne' can be replaced with 'lt' only when init < limit. 703 bt = BoolTest::lt; 704 } else if (stride_con < 0) { 705 // 'ne' can be replaced with 'gt' only when init > limit. 706 bt = BoolTest::gt; 707 } 708 } 709 } 710 711 if (phi_incr != NULL) { 712 // If compare points directly to the phi we need to adjust 713 // the compare so that it points to the incr. Limit have 714 // to be adjusted to keep trip count the same and we 715 // should avoid int overflow. 716 // 717 // i = init; do {} while(i++ < limit); 718 // is converted to 719 // i = init; do {} while(++i < limit+1); 720 // 721 limit = gvn->transform(new AddINode(limit, stride)); 722 } 723 724 if (incl_limit) { 725 // The limit check guaranties that 'limit <= (max_jint - stride)' so 726 // we can convert 'i <= limit' to 'i < limit+1' since stride != 0. 727 // 728 Node* one = (stride_con > 0) ? gvn->intcon( 1) : gvn->intcon(-1); 729 limit = gvn->transform(new AddINode(limit, one)); 730 if (bt == BoolTest::le) 731 bt = BoolTest::lt; 732 else if (bt == BoolTest::ge) 733 bt = BoolTest::gt; 734 else 735 ShouldNotReachHere(); 736 } 737 set_subtree_ctrl( limit ); 738 739 if (LoopStripMiningIter == 0) { 740 // Check for SafePoint on backedge and remove 741 Node *sfpt = x->in(LoopNode::LoopBackControl); 742 if (sfpt->Opcode() == Op_SafePoint && is_deleteable_safept(sfpt)) { 743 lazy_replace( sfpt, iftrue ); 744 if (loop->_safepts != NULL) { 745 loop->_safepts->yank(sfpt); 746 } 747 loop->_tail = iftrue; 748 } 749 } 750 751 // Build a canonical trip test. 752 // Clone code, as old values may be in use. 753 incr = incr->clone(); 754 incr->set_req(1,phi); 755 incr->set_req(2,stride); 756 incr = _igvn.register_new_node_with_optimizer(incr); 757 set_early_ctrl( incr ); 758 _igvn.rehash_node_delayed(phi); 759 phi->set_req_X( LoopNode::LoopBackControl, incr, &_igvn ); 760 761 // If phi type is more restrictive than Int, raise to 762 // Int to prevent (almost) infinite recursion in igvn 763 // which can only handle integer types for constants or minint..maxint. 764 if (!TypeInt::INT->higher_equal(phi->bottom_type())) { 765 Node* nphi = PhiNode::make(phi->in(0), phi->in(LoopNode::EntryControl), TypeInt::INT); 766 nphi->set_req(LoopNode::LoopBackControl, phi->in(LoopNode::LoopBackControl)); 767 nphi = _igvn.register_new_node_with_optimizer(nphi); 768 set_ctrl(nphi, get_ctrl(phi)); 769 _igvn.replace_node(phi, nphi); 770 phi = nphi->as_Phi(); 771 } 772 cmp = cmp->clone(); 773 cmp->set_req(1,incr); 774 cmp->set_req(2,limit); 775 cmp = _igvn.register_new_node_with_optimizer(cmp); 776 set_ctrl(cmp, iff->in(0)); 777 778 test = test->clone()->as_Bool(); 779 (*(BoolTest*)&test->_test)._test = bt; 780 test->set_req(1,cmp); 781 _igvn.register_new_node_with_optimizer(test); 782 set_ctrl(test, iff->in(0)); 783 784 // Replace the old IfNode with a new LoopEndNode 785 Node *lex = _igvn.register_new_node_with_optimizer(new CountedLoopEndNode( iff->in(0), test, cl_prob, iff->as_If()->_fcnt )); 786 IfNode *le = lex->as_If(); 787 uint dd = dom_depth(iff); 788 set_idom(le, le->in(0), dd); // Update dominance for loop exit 789 set_loop(le, loop); 790 791 // Get the loop-exit control 792 Node *iffalse = iff->as_If()->proj_out(!(iftrue_op == Op_IfTrue)); 793 794 // Need to swap loop-exit and loop-back control? 795 if (iftrue_op == Op_IfFalse) { 796 Node *ift2=_igvn.register_new_node_with_optimizer(new IfTrueNode (le)); 797 Node *iff2=_igvn.register_new_node_with_optimizer(new IfFalseNode(le)); 798 799 loop->_tail = back_control = ift2; 800 set_loop(ift2, loop); 801 set_loop(iff2, get_loop(iffalse)); 802 803 // Lazy update of 'get_ctrl' mechanism. 804 lazy_replace(iffalse, iff2); 805 lazy_replace(iftrue, ift2); 806 807 // Swap names 808 iffalse = iff2; 809 iftrue = ift2; 810 } else { 811 _igvn.rehash_node_delayed(iffalse); 812 _igvn.rehash_node_delayed(iftrue); 813 iffalse->set_req_X( 0, le, &_igvn ); 814 iftrue ->set_req_X( 0, le, &_igvn ); 815 } 816 817 set_idom(iftrue, le, dd+1); 818 set_idom(iffalse, le, dd+1); 819 assert(iff->outcnt() == 0, "should be dead now"); 820 lazy_replace( iff, le ); // fix 'get_ctrl' 821 822 Node *sfpt2 = le->in(0); 823 824 Node* entry_control = init_control; 825 bool strip_mine_loop = LoopStripMiningIter > 1 && loop->_child == NULL && 826 sfpt2->Opcode() == Op_SafePoint && !loop->_has_call; 827 IdealLoopTree* outer_ilt = NULL; 828 if (strip_mine_loop) { 829 outer_ilt = create_outer_strip_mined_loop(test, cmp, init_control, loop, 830 cl_prob, le->_fcnt, entry_control, 831 iffalse); 832 } 833 834 // Now setup a new CountedLoopNode to replace the existing LoopNode 835 CountedLoopNode *l = new CountedLoopNode(entry_control, back_control); 836 l->set_unswitch_count(x->as_Loop()->unswitch_count()); // Preserve 837 // The following assert is approximately true, and defines the intention 838 // of can_be_counted_loop. It fails, however, because phase->type 839 // is not yet initialized for this loop and its parts. 840 //assert(l->can_be_counted_loop(this), "sanity"); 841 _igvn.register_new_node_with_optimizer(l); 842 set_loop(l, loop); 843 loop->_head = l; 844 // Fix all data nodes placed at the old loop head. 845 // Uses the lazy-update mechanism of 'get_ctrl'. 846 lazy_replace( x, l ); 847 set_idom(l, entry_control, dom_depth(entry_control) + 1); 848 849 if (LoopStripMiningIter == 0 || strip_mine_loop) { 850 // Check for immediately preceding SafePoint and remove 851 if (sfpt2->Opcode() == Op_SafePoint && (LoopStripMiningIter != 0 || is_deleteable_safept(sfpt2))) { 852 if (strip_mine_loop) { 853 Node* outer_le = outer_ilt->_tail->in(0); 854 Node* sfpt = sfpt2->clone(); 855 sfpt->set_req(0, iffalse); 856 outer_le->set_req(0, sfpt); 857 register_control(sfpt, outer_ilt, iffalse); 858 set_idom(outer_le, sfpt, dom_depth(sfpt)); 859 } 860 lazy_replace( sfpt2, sfpt2->in(TypeFunc::Control)); 861 if (loop->_safepts != NULL) { 862 loop->_safepts->yank(sfpt2); 863 } 864 } 865 } 866 867 // Free up intermediate goo 868 _igvn.remove_dead_node(hook); 869 870 #ifdef ASSERT 871 assert(l->is_valid_counted_loop(), "counted loop shape is messed up"); 872 assert(l == loop->_head && l->phi() == phi && l->loopexit_or_null() == lex, "" ); 873 #endif 874 #ifndef PRODUCT 875 if (TraceLoopOpts) { 876 tty->print("Counted "); 877 loop->dump_head(); 878 } 879 #endif 880 881 C->print_method(PHASE_AFTER_CLOOPS, 3); 882 883 // Capture bounds of the loop in the induction variable Phi before 884 // subsequent transformation (iteration splitting) obscures the 885 // bounds 886 l->phi()->as_Phi()->set_type(l->phi()->Value(&_igvn)); 887 888 if (strip_mine_loop) { 889 l->mark_strip_mined(); 890 l->verify_strip_mined(1); 891 outer_ilt->_head->as_Loop()->verify_strip_mined(1); 892 loop = outer_ilt; 893 } 894 895 return true; 896 } 897 898 //----------------------exact_limit------------------------------------------- 899 Node* PhaseIdealLoop::exact_limit( IdealLoopTree *loop ) { 900 assert(loop->_head->is_CountedLoop(), ""); 901 CountedLoopNode *cl = loop->_head->as_CountedLoop(); 902 assert(cl->is_valid_counted_loop(), ""); 903 904 if (ABS(cl->stride_con()) == 1 || 905 cl->limit()->Opcode() == Op_LoopLimit) { 906 // Old code has exact limit (it could be incorrect in case of int overflow). 907 // Loop limit is exact with stride == 1. And loop may already have exact limit. 908 return cl->limit(); 909 } 910 Node *limit = NULL; 911 #ifdef ASSERT 912 BoolTest::mask bt = cl->loopexit()->test_trip(); 913 assert(bt == BoolTest::lt || bt == BoolTest::gt, "canonical test is expected"); 914 #endif 915 if (cl->has_exact_trip_count()) { 916 // Simple case: loop has constant boundaries. 917 // Use jlongs to avoid integer overflow. 918 int stride_con = cl->stride_con(); 919 jlong init_con = cl->init_trip()->get_int(); 920 jlong limit_con = cl->limit()->get_int(); 921 julong trip_cnt = cl->trip_count(); 922 jlong final_con = init_con + trip_cnt*stride_con; 923 int final_int = (int)final_con; 924 // The final value should be in integer range since the loop 925 // is counted and the limit was checked for overflow. 926 assert(final_con == (jlong)final_int, "final value should be integer"); 927 limit = _igvn.intcon(final_int); 928 } else { 929 // Create new LoopLimit node to get exact limit (final iv value). 930 limit = new LoopLimitNode(C, cl->init_trip(), cl->limit(), cl->stride()); 931 register_new_node(limit, cl->in(LoopNode::EntryControl)); 932 } 933 assert(limit != NULL, "sanity"); 934 return limit; 935 } 936 937 //------------------------------Ideal------------------------------------------ 938 // Return a node which is more "ideal" than the current node. 939 // Attempt to convert into a counted-loop. 940 Node *LoopNode::Ideal(PhaseGVN *phase, bool can_reshape) { 941 if (!can_be_counted_loop(phase) && !is_OuterStripMinedLoop()) { 942 phase->C->set_major_progress(); 943 } 944 return RegionNode::Ideal(phase, can_reshape); 945 } 946 947 #ifdef ASSERT 948 void LoopNode::verify_strip_mined(int expect_skeleton) const { 949 if (!is_valid_counted_loop()) { 950 return; // Skip malformed counted loop 951 } 952 const OuterStripMinedLoopNode* outer = NULL; 953 const CountedLoopNode* inner = NULL; 954 if (is_strip_mined()) { 955 assert(is_CountedLoop(), "no Loop should be marked strip mined"); 956 inner = as_CountedLoop(); 957 outer = inner->in(LoopNode::EntryControl)->as_OuterStripMinedLoop(); 958 } else if (is_OuterStripMinedLoop()) { 959 outer = this->as_OuterStripMinedLoop(); 960 inner = outer->unique_ctrl_out()->as_CountedLoop(); 961 assert(inner->is_valid_counted_loop(), "OuterStripMinedLoop should have been removed"); 962 assert(!is_strip_mined(), "outer loop shouldn't be marked strip mined"); 963 } 964 if (inner != NULL || outer != NULL) { 965 assert(inner != NULL && outer != NULL, "missing loop in strip mined nest"); 966 Node* outer_tail = outer->in(LoopNode::LoopBackControl); 967 Node* outer_le = outer_tail->in(0); 968 assert(outer_le->Opcode() == Op_OuterStripMinedLoopEnd, "tail of outer loop should be an If"); 969 Node* sfpt = outer_le->in(0); 970 assert(sfpt->Opcode() == Op_SafePoint, "where's the safepoint?"); 971 Node* inner_out = sfpt->in(0); 972 if (inner_out->outcnt() != 1) { 973 ResourceMark rm; 974 Unique_Node_List wq; 975 976 for (DUIterator_Fast imax, i = inner_out->fast_outs(imax); i < imax; i++) { 977 Node* u = inner_out->fast_out(i); 978 if (u == sfpt) { 979 continue; 980 } 981 wq.clear(); 982 wq.push(u); 983 bool found_sfpt = false; 984 for (uint next = 0; next < wq.size() && !found_sfpt; next++) { 985 Node* n = wq.at(next); 986 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax && !found_sfpt; i++) { 987 Node* u = n->fast_out(i); 988 if (u == sfpt) { 989 found_sfpt = true; 990 } 991 if (!u->is_CFG()) { 992 wq.push(u); 993 } 994 } 995 } 996 assert(found_sfpt, "no node in loop that's not input to safepoint"); 997 } 998 } 999 1000 CountedLoopEndNode* cle = inner_out->in(0)->as_CountedLoopEnd(); 1001 assert(cle == inner->loopexit_or_null(), "mismatch"); 1002 bool has_skeleton = outer_le->in(1)->bottom_type()->singleton() && outer_le->in(1)->bottom_type()->is_int()->get_con() == 0; 1003 if (has_skeleton) { 1004 assert(expect_skeleton == 1 || expect_skeleton == -1, "unexpected skeleton node"); 1005 assert(outer->outcnt() == 2, "only phis"); 1006 } else { 1007 assert(expect_skeleton == 0 || expect_skeleton == -1, "no skeleton node?"); 1008 uint phis = 0; 1009 for (DUIterator_Fast imax, i = inner->fast_outs(imax); i < imax; i++) { 1010 Node* u = inner->fast_out(i); 1011 if (u->is_Phi()) { 1012 phis++; 1013 } 1014 } 1015 for (DUIterator_Fast imax, i = outer->fast_outs(imax); i < imax; i++) { 1016 Node* u = outer->fast_out(i); 1017 assert(u == outer || u == inner || u->is_Phi(), "nothing between inner and outer loop"); 1018 } 1019 uint stores = 0; 1020 for (DUIterator_Fast imax, i = inner_out->fast_outs(imax); i < imax; i++) { 1021 Node* u = inner_out->fast_out(i); 1022 if (u->is_Store()) { 1023 stores++; 1024 } 1025 } 1026 assert(outer->outcnt() >= phis + 2 && outer->outcnt() <= phis + 2 + stores + 1, "only phis"); 1027 } 1028 assert(sfpt->outcnt() == 1, "no data node"); 1029 assert(outer_tail->outcnt() == 1 || !has_skeleton, "no data node"); 1030 } 1031 } 1032 #endif 1033 1034 //============================================================================= 1035 //------------------------------Ideal------------------------------------------ 1036 // Return a node which is more "ideal" than the current node. 1037 // Attempt to convert into a counted-loop. 1038 Node *CountedLoopNode::Ideal(PhaseGVN *phase, bool can_reshape) { 1039 return RegionNode::Ideal(phase, can_reshape); 1040 } 1041 1042 //------------------------------dump_spec-------------------------------------- 1043 // Dump special per-node info 1044 #ifndef PRODUCT 1045 void CountedLoopNode::dump_spec(outputStream *st) const { 1046 LoopNode::dump_spec(st); 1047 if (stride_is_con()) { 1048 st->print("stride: %d ",stride_con()); 1049 } 1050 if (is_pre_loop ()) st->print("pre of N%d" , _main_idx); 1051 if (is_main_loop()) st->print("main of N%d", _idx); 1052 if (is_post_loop()) st->print("post of N%d", _main_idx); 1053 if (is_strip_mined()) st->print(" strip mined"); 1054 } 1055 #endif 1056 1057 //============================================================================= 1058 int CountedLoopEndNode::stride_con() const { 1059 return stride()->bottom_type()->is_int()->get_con(); 1060 } 1061 1062 //============================================================================= 1063 //------------------------------Value----------------------------------------- 1064 const Type* LoopLimitNode::Value(PhaseGVN* phase) const { 1065 const Type* init_t = phase->type(in(Init)); 1066 const Type* limit_t = phase->type(in(Limit)); 1067 const Type* stride_t = phase->type(in(Stride)); 1068 // Either input is TOP ==> the result is TOP 1069 if (init_t == Type::TOP) return Type::TOP; 1070 if (limit_t == Type::TOP) return Type::TOP; 1071 if (stride_t == Type::TOP) return Type::TOP; 1072 1073 int stride_con = stride_t->is_int()->get_con(); 1074 if (stride_con == 1) 1075 return NULL; // Identity 1076 1077 if (init_t->is_int()->is_con() && limit_t->is_int()->is_con()) { 1078 // Use jlongs to avoid integer overflow. 1079 jlong init_con = init_t->is_int()->get_con(); 1080 jlong limit_con = limit_t->is_int()->get_con(); 1081 int stride_m = stride_con - (stride_con > 0 ? 1 : -1); 1082 jlong trip_count = (limit_con - init_con + stride_m)/stride_con; 1083 jlong final_con = init_con + stride_con*trip_count; 1084 int final_int = (int)final_con; 1085 // The final value should be in integer range since the loop 1086 // is counted and the limit was checked for overflow. 1087 assert(final_con == (jlong)final_int, "final value should be integer"); 1088 return TypeInt::make(final_int); 1089 } 1090 1091 return bottom_type(); // TypeInt::INT 1092 } 1093 1094 //------------------------------Ideal------------------------------------------ 1095 // Return a node which is more "ideal" than the current node. 1096 Node *LoopLimitNode::Ideal(PhaseGVN *phase, bool can_reshape) { 1097 if (phase->type(in(Init)) == Type::TOP || 1098 phase->type(in(Limit)) == Type::TOP || 1099 phase->type(in(Stride)) == Type::TOP) 1100 return NULL; // Dead 1101 1102 int stride_con = phase->type(in(Stride))->is_int()->get_con(); 1103 if (stride_con == 1) 1104 return NULL; // Identity 1105 1106 if (in(Init)->is_Con() && in(Limit)->is_Con()) 1107 return NULL; // Value 1108 1109 // Delay following optimizations until all loop optimizations 1110 // done to keep Ideal graph simple. 1111 if (!can_reshape || phase->C->major_progress()) 1112 return NULL; 1113 1114 const TypeInt* init_t = phase->type(in(Init) )->is_int(); 1115 const TypeInt* limit_t = phase->type(in(Limit))->is_int(); 1116 int stride_p; 1117 jlong lim, ini; 1118 julong max; 1119 if (stride_con > 0) { 1120 stride_p = stride_con; 1121 lim = limit_t->_hi; 1122 ini = init_t->_lo; 1123 max = (julong)max_jint; 1124 } else { 1125 stride_p = -stride_con; 1126 lim = init_t->_hi; 1127 ini = limit_t->_lo; 1128 max = (julong)min_jint; 1129 } 1130 julong range = lim - ini + stride_p; 1131 if (range <= max) { 1132 // Convert to integer expression if it is not overflow. 1133 Node* stride_m = phase->intcon(stride_con - (stride_con > 0 ? 1 : -1)); 1134 Node *range = phase->transform(new SubINode(in(Limit), in(Init))); 1135 Node *bias = phase->transform(new AddINode(range, stride_m)); 1136 Node *trip = phase->transform(new DivINode(0, bias, in(Stride))); 1137 Node *span = phase->transform(new MulINode(trip, in(Stride))); 1138 return new AddINode(span, in(Init)); // exact limit 1139 } 1140 1141 if (is_power_of_2(stride_p) || // divisor is 2^n 1142 !Matcher::has_match_rule(Op_LoopLimit)) { // or no specialized Mach node? 1143 // Convert to long expression to avoid integer overflow 1144 // and let igvn optimizer convert this division. 1145 // 1146 Node* init = phase->transform( new ConvI2LNode(in(Init))); 1147 Node* limit = phase->transform( new ConvI2LNode(in(Limit))); 1148 Node* stride = phase->longcon(stride_con); 1149 Node* stride_m = phase->longcon(stride_con - (stride_con > 0 ? 1 : -1)); 1150 1151 Node *range = phase->transform(new SubLNode(limit, init)); 1152 Node *bias = phase->transform(new AddLNode(range, stride_m)); 1153 Node *span; 1154 if (stride_con > 0 && is_power_of_2(stride_p)) { 1155 // bias >= 0 if stride >0, so if stride is 2^n we can use &(-stride) 1156 // and avoid generating rounding for division. Zero trip guard should 1157 // guarantee that init < limit but sometimes the guard is missing and 1158 // we can get situation when init > limit. Note, for the empty loop 1159 // optimization zero trip guard is generated explicitly which leaves 1160 // only RCE predicate where exact limit is used and the predicate 1161 // will simply fail forcing recompilation. 1162 Node* neg_stride = phase->longcon(-stride_con); 1163 span = phase->transform(new AndLNode(bias, neg_stride)); 1164 } else { 1165 Node *trip = phase->transform(new DivLNode(0, bias, stride)); 1166 span = phase->transform(new MulLNode(trip, stride)); 1167 } 1168 // Convert back to int 1169 Node *span_int = phase->transform(new ConvL2INode(span)); 1170 return new AddINode(span_int, in(Init)); // exact limit 1171 } 1172 1173 return NULL; // No progress 1174 } 1175 1176 //------------------------------Identity--------------------------------------- 1177 // If stride == 1 return limit node. 1178 Node* LoopLimitNode::Identity(PhaseGVN* phase) { 1179 int stride_con = phase->type(in(Stride))->is_int()->get_con(); 1180 if (stride_con == 1 || stride_con == -1) 1181 return in(Limit); 1182 return this; 1183 } 1184 1185 //============================================================================= 1186 //----------------------match_incr_with_optional_truncation-------------------- 1187 // Match increment with optional truncation: 1188 // CHAR: (i+1)&0x7fff, BYTE: ((i+1)<<8)>>8, or SHORT: ((i+1)<<16)>>16 1189 // Return NULL for failure. Success returns the increment node. 1190 Node* CountedLoopNode::match_incr_with_optional_truncation( 1191 Node* expr, Node** trunc1, Node** trunc2, const TypeInt** trunc_type) { 1192 // Quick cutouts: 1193 if (expr == NULL || expr->req() != 3) return NULL; 1194 1195 Node *t1 = NULL; 1196 Node *t2 = NULL; 1197 const TypeInt* trunc_t = TypeInt::INT; 1198 Node* n1 = expr; 1199 int n1op = n1->Opcode(); 1200 1201 // Try to strip (n1 & M) or (n1 << N >> N) from n1. 1202 if (n1op == Op_AndI && 1203 n1->in(2)->is_Con() && 1204 n1->in(2)->bottom_type()->is_int()->get_con() == 0x7fff) { 1205 // %%% This check should match any mask of 2**K-1. 1206 t1 = n1; 1207 n1 = t1->in(1); 1208 n1op = n1->Opcode(); 1209 trunc_t = TypeInt::CHAR; 1210 } else if (n1op == Op_RShiftI && 1211 n1->in(1) != NULL && 1212 n1->in(1)->Opcode() == Op_LShiftI && 1213 n1->in(2) == n1->in(1)->in(2) && 1214 n1->in(2)->is_Con()) { 1215 jint shift = n1->in(2)->bottom_type()->is_int()->get_con(); 1216 // %%% This check should match any shift in [1..31]. 1217 if (shift == 16 || shift == 8) { 1218 t1 = n1; 1219 t2 = t1->in(1); 1220 n1 = t2->in(1); 1221 n1op = n1->Opcode(); 1222 if (shift == 16) { 1223 trunc_t = TypeInt::SHORT; 1224 } else if (shift == 8) { 1225 trunc_t = TypeInt::BYTE; 1226 } 1227 } 1228 } 1229 1230 // If (maybe after stripping) it is an AddI, we won: 1231 if (n1op == Op_AddI) { 1232 *trunc1 = t1; 1233 *trunc2 = t2; 1234 *trunc_type = trunc_t; 1235 return n1; 1236 } 1237 1238 // failed 1239 return NULL; 1240 } 1241 1242 LoopNode* CountedLoopNode::skip_strip_mined(int expect_skeleton) { 1243 if (is_strip_mined()) { 1244 verify_strip_mined(expect_skeleton); 1245 return in(EntryControl)->as_Loop(); 1246 } 1247 return this; 1248 } 1249 1250 OuterStripMinedLoopNode* CountedLoopNode::outer_loop() const { 1251 assert(is_strip_mined(), "not a strip mined loop"); 1252 Node* c = in(EntryControl); 1253 if (c == NULL || c->is_top() || !c->is_OuterStripMinedLoop()) { 1254 return NULL; 1255 } 1256 return c->as_OuterStripMinedLoop(); 1257 } 1258 1259 IfTrueNode* OuterStripMinedLoopNode::outer_loop_tail() const { 1260 Node* c = in(LoopBackControl); 1261 if (c == NULL || c->is_top()) { 1262 return NULL; 1263 } 1264 return c->as_IfTrue(); 1265 } 1266 1267 IfTrueNode* CountedLoopNode::outer_loop_tail() const { 1268 LoopNode* l = outer_loop(); 1269 if (l == NULL) { 1270 return NULL; 1271 } 1272 return l->outer_loop_tail(); 1273 } 1274 1275 OuterStripMinedLoopEndNode* OuterStripMinedLoopNode::outer_loop_end() const { 1276 IfTrueNode* proj = outer_loop_tail(); 1277 if (proj == NULL) { 1278 return NULL; 1279 } 1280 Node* c = proj->in(0); 1281 if (c == NULL || c->is_top() || c->outcnt() != 2) { 1282 return NULL; 1283 } 1284 return c->as_OuterStripMinedLoopEnd(); 1285 } 1286 1287 OuterStripMinedLoopEndNode* CountedLoopNode::outer_loop_end() const { 1288 LoopNode* l = outer_loop(); 1289 if (l == NULL) { 1290 return NULL; 1291 } 1292 return l->outer_loop_end(); 1293 } 1294 1295 IfFalseNode* OuterStripMinedLoopNode::outer_loop_exit() const { 1296 IfNode* le = outer_loop_end(); 1297 if (le == NULL) { 1298 return NULL; 1299 } 1300 Node* c = le->proj_out_or_null(false); 1301 if (c == NULL) { 1302 return NULL; 1303 } 1304 return c->as_IfFalse(); 1305 } 1306 1307 IfFalseNode* CountedLoopNode::outer_loop_exit() const { 1308 LoopNode* l = outer_loop(); 1309 if (l == NULL) { 1310 return NULL; 1311 } 1312 return l->outer_loop_exit(); 1313 } 1314 1315 SafePointNode* OuterStripMinedLoopNode::outer_safepoint() const { 1316 IfNode* le = outer_loop_end(); 1317 if (le == NULL) { 1318 return NULL; 1319 } 1320 Node* c = le->in(0); 1321 if (c == NULL || c->is_top()) { 1322 return NULL; 1323 } 1324 assert(c->Opcode() == Op_SafePoint, "broken outer loop"); 1325 return c->as_SafePoint(); 1326 } 1327 1328 SafePointNode* CountedLoopNode::outer_safepoint() const { 1329 LoopNode* l = outer_loop(); 1330 if (l == NULL) { 1331 return NULL; 1332 } 1333 return l->outer_safepoint(); 1334 } 1335 1336 Node* CountedLoopNode::skip_predicates_from_entry(Node* ctrl) { 1337 while (ctrl != NULL && ctrl->is_Proj() && ctrl->in(0)->is_If() && 1338 ctrl->in(0)->as_If()->proj_out(1-ctrl->as_Proj()->_con)->outcnt() == 1 && 1339 ctrl->in(0)->as_If()->proj_out(1-ctrl->as_Proj()->_con)->unique_out()->Opcode() == Op_Halt) { 1340 ctrl = ctrl->in(0)->in(0); 1341 } 1342 1343 return ctrl; 1344 } 1345 1346 Node* CountedLoopNode::skip_predicates() { 1347 if (is_main_loop()) { 1348 Node* ctrl = skip_strip_mined()->in(LoopNode::EntryControl); 1349 1350 return skip_predicates_from_entry(ctrl); 1351 } 1352 return in(LoopNode::EntryControl); 1353 } 1354 1355 void OuterStripMinedLoopNode::adjust_strip_mined_loop(PhaseIterGVN* igvn) { 1356 // Look for the outer & inner strip mined loop, reduce number of 1357 // iterations of the inner loop, set exit condition of outer loop, 1358 // construct required phi nodes for outer loop. 1359 CountedLoopNode* inner_cl = unique_ctrl_out()->as_CountedLoop(); 1360 assert(inner_cl->is_strip_mined(), "inner loop should be strip mined"); 1361 Node* inner_iv_phi = inner_cl->phi(); 1362 if (inner_iv_phi == NULL) { 1363 IfNode* outer_le = outer_loop_end(); 1364 Node* iff = igvn->transform(new IfNode(outer_le->in(0), outer_le->in(1), outer_le->_prob, outer_le->_fcnt)); 1365 igvn->replace_node(outer_le, iff); 1366 inner_cl->clear_strip_mined(); 1367 return; 1368 } 1369 CountedLoopEndNode* inner_cle = inner_cl->loopexit(); 1370 1371 int stride = inner_cl->stride_con(); 1372 jlong scaled_iters_long = ((jlong)LoopStripMiningIter) * ABS(stride); 1373 int scaled_iters = (int)scaled_iters_long; 1374 int short_scaled_iters = LoopStripMiningIterShortLoop* ABS(stride); 1375 const TypeInt* inner_iv_t = igvn->type(inner_iv_phi)->is_int(); 1376 jlong iter_estimate = (jlong)inner_iv_t->_hi - (jlong)inner_iv_t->_lo; 1377 assert(iter_estimate > 0, "broken"); 1378 if ((jlong)scaled_iters != scaled_iters_long || iter_estimate <= short_scaled_iters) { 1379 // Remove outer loop and safepoint (too few iterations) 1380 Node* outer_sfpt = outer_safepoint(); 1381 Node* outer_out = outer_loop_exit(); 1382 igvn->replace_node(outer_out, outer_sfpt->in(0)); 1383 igvn->replace_input_of(outer_sfpt, 0, igvn->C->top()); 1384 inner_cl->clear_strip_mined(); 1385 return; 1386 } 1387 if (iter_estimate <= scaled_iters_long) { 1388 // We would only go through one iteration of 1389 // the outer loop: drop the outer loop but 1390 // keep the safepoint so we don't run for 1391 // too long without a safepoint 1392 IfNode* outer_le = outer_loop_end(); 1393 Node* iff = igvn->transform(new IfNode(outer_le->in(0), outer_le->in(1), outer_le->_prob, outer_le->_fcnt)); 1394 igvn->replace_node(outer_le, iff); 1395 inner_cl->clear_strip_mined(); 1396 return; 1397 } 1398 1399 Node* cle_tail = inner_cle->proj_out(true); 1400 ResourceMark rm; 1401 Node_List old_new; 1402 if (cle_tail->outcnt() > 1) { 1403 // Look for nodes on backedge of inner loop and clone them 1404 Unique_Node_List backedge_nodes; 1405 for (DUIterator_Fast imax, i = cle_tail->fast_outs(imax); i < imax; i++) { 1406 Node* u = cle_tail->fast_out(i); 1407 if (u != inner_cl) { 1408 assert(!u->is_CFG(), "control flow on the backedge?"); 1409 backedge_nodes.push(u); 1410 } 1411 } 1412 uint last = igvn->C->unique(); 1413 for (uint next = 0; next < backedge_nodes.size(); next++) { 1414 Node* n = backedge_nodes.at(next); 1415 old_new.map(n->_idx, n->clone()); 1416 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 1417 Node* u = n->fast_out(i); 1418 assert(!u->is_CFG(), "broken"); 1419 if (u->_idx >= last) { 1420 continue; 1421 } 1422 if (!u->is_Phi()) { 1423 backedge_nodes.push(u); 1424 } else { 1425 assert(u->in(0) == inner_cl, "strange phi on the backedge"); 1426 } 1427 } 1428 } 1429 // Put the clones on the outer loop backedge 1430 Node* le_tail = outer_loop_tail(); 1431 for (uint next = 0; next < backedge_nodes.size(); next++) { 1432 Node *n = old_new[backedge_nodes.at(next)->_idx]; 1433 for (uint i = 1; i < n->req(); i++) { 1434 if (n->in(i) != NULL && old_new[n->in(i)->_idx] != NULL) { 1435 n->set_req(i, old_new[n->in(i)->_idx]); 1436 } 1437 } 1438 if (n->in(0) != NULL && n->in(0) == cle_tail) { 1439 n->set_req(0, le_tail); 1440 } 1441 igvn->register_new_node_with_optimizer(n); 1442 } 1443 } 1444 1445 Node* iv_phi = NULL; 1446 // Make a clone of each phi in the inner loop 1447 // for the outer loop 1448 for (uint i = 0; i < inner_cl->outcnt(); i++) { 1449 Node* u = inner_cl->raw_out(i); 1450 if (u->is_Phi()) { 1451 assert(u->in(0) == inner_cl, "inconsistent"); 1452 Node* phi = u->clone(); 1453 phi->set_req(0, this); 1454 Node* be = old_new[phi->in(LoopNode::LoopBackControl)->_idx]; 1455 if (be != NULL) { 1456 phi->set_req(LoopNode::LoopBackControl, be); 1457 } 1458 phi = igvn->transform(phi); 1459 igvn->replace_input_of(u, LoopNode::EntryControl, phi); 1460 if (u == inner_iv_phi) { 1461 iv_phi = phi; 1462 } 1463 } 1464 } 1465 Node* cle_out = inner_cle->proj_out(false); 1466 if (cle_out->outcnt() > 1) { 1467 // Look for chains of stores that were sunk 1468 // out of the inner loop and are in the outer loop 1469 for (DUIterator_Fast imax, i = cle_out->fast_outs(imax); i < imax; i++) { 1470 Node* u = cle_out->fast_out(i); 1471 if (u->is_Store()) { 1472 Node* first = u; 1473 for(;;) { 1474 Node* next = first->in(MemNode::Memory); 1475 if (!next->is_Store() || next->in(0) != cle_out) { 1476 break; 1477 } 1478 first = next; 1479 } 1480 Node* last = u; 1481 for(;;) { 1482 Node* next = NULL; 1483 for (DUIterator_Fast jmax, j = last->fast_outs(jmax); j < jmax; j++) { 1484 Node* uu = last->fast_out(j); 1485 if (uu->is_Store() && uu->in(0) == cle_out) { 1486 assert(next == NULL, "only one in the outer loop"); 1487 next = uu; 1488 } 1489 } 1490 if (next == NULL) { 1491 break; 1492 } 1493 last = next; 1494 } 1495 Node* phi = NULL; 1496 for (DUIterator_Fast jmax, j = fast_outs(jmax); j < jmax; j++) { 1497 Node* uu = fast_out(j); 1498 if (uu->is_Phi()) { 1499 Node* be = uu->in(LoopNode::LoopBackControl); 1500 if (be->is_Store() && old_new[be->_idx] != NULL) { 1501 assert(false, "store on the backedge + sunk stores: unsupported"); 1502 // drop outer loop 1503 IfNode* outer_le = outer_loop_end(); 1504 Node* iff = igvn->transform(new IfNode(outer_le->in(0), outer_le->in(1), outer_le->_prob, outer_le->_fcnt)); 1505 igvn->replace_node(outer_le, iff); 1506 inner_cl->clear_strip_mined(); 1507 return; 1508 } 1509 if (be == last || be == first->in(MemNode::Memory)) { 1510 assert(phi == NULL, "only one phi"); 1511 phi = uu; 1512 } 1513 } 1514 } 1515 #ifdef ASSERT 1516 for (DUIterator_Fast jmax, j = fast_outs(jmax); j < jmax; j++) { 1517 Node* uu = fast_out(j); 1518 if (uu->is_Phi() && uu->bottom_type() == Type::MEMORY) { 1519 if (uu->adr_type() == igvn->C->get_adr_type(igvn->C->get_alias_index(u->adr_type()))) { 1520 assert(phi == uu, "what's that phi?"); 1521 } else if (uu->adr_type() == TypePtr::BOTTOM) { 1522 Node* n = uu->in(LoopNode::LoopBackControl); 1523 uint limit = igvn->C->live_nodes(); 1524 uint i = 0; 1525 while (n != uu) { 1526 i++; 1527 assert(i < limit, "infinite loop"); 1528 if (n->is_Proj()) { 1529 n = n->in(0); 1530 } else if (n->is_SafePoint() || n->is_MemBar()) { 1531 n = n->in(TypeFunc::Memory); 1532 } else if (n->is_Phi()) { 1533 n = n->in(1); 1534 } else if (n->is_MergeMem()) { 1535 n = n->as_MergeMem()->memory_at(igvn->C->get_alias_index(u->adr_type())); 1536 } else if (n->is_Store() || n->is_LoadStore() || n->is_ClearArray()) { 1537 n = n->in(MemNode::Memory); 1538 } else { 1539 n->dump(); 1540 ShouldNotReachHere(); 1541 } 1542 } 1543 } 1544 } 1545 } 1546 #endif 1547 if (phi == NULL) { 1548 // If the an entire chains was sunk, the 1549 // inner loop has no phi for that memory 1550 // slice, create one for the outer loop 1551 phi = PhiNode::make(this, first->in(MemNode::Memory), Type::MEMORY, 1552 igvn->C->get_adr_type(igvn->C->get_alias_index(u->adr_type()))); 1553 phi->set_req(LoopNode::LoopBackControl, last); 1554 phi = igvn->transform(phi); 1555 igvn->replace_input_of(first, MemNode::Memory, phi); 1556 } else { 1557 // Or fix the outer loop fix to include 1558 // that chain of stores. 1559 Node* be = phi->in(LoopNode::LoopBackControl); 1560 assert(!(be->is_Store() && old_new[be->_idx] != NULL), "store on the backedge + sunk stores: unsupported"); 1561 if (be == first->in(MemNode::Memory)) { 1562 if (be == phi->in(LoopNode::LoopBackControl)) { 1563 igvn->replace_input_of(phi, LoopNode::LoopBackControl, last); 1564 } else { 1565 igvn->replace_input_of(be, MemNode::Memory, last); 1566 } 1567 } else { 1568 #ifdef ASSERT 1569 if (be == phi->in(LoopNode::LoopBackControl)) { 1570 assert(phi->in(LoopNode::LoopBackControl) == last, ""); 1571 } else { 1572 assert(be->in(MemNode::Memory) == last, ""); 1573 } 1574 #endif 1575 } 1576 } 1577 } 1578 } 1579 } 1580 1581 if (iv_phi != NULL) { 1582 // Now adjust the inner loop's exit condition 1583 Node* limit = inner_cl->limit(); 1584 Node* sub = NULL; 1585 if (stride > 0) { 1586 sub = igvn->transform(new SubINode(limit, iv_phi)); 1587 } else { 1588 sub = igvn->transform(new SubINode(iv_phi, limit)); 1589 } 1590 Node* min = igvn->transform(new MinINode(sub, igvn->intcon(scaled_iters))); 1591 Node* new_limit = NULL; 1592 if (stride > 0) { 1593 new_limit = igvn->transform(new AddINode(min, iv_phi)); 1594 } else { 1595 new_limit = igvn->transform(new SubINode(iv_phi, min)); 1596 } 1597 Node* inner_cmp = inner_cle->cmp_node(); 1598 Node* inner_bol = inner_cle->in(CountedLoopEndNode::TestValue); 1599 Node* outer_bol = inner_bol; 1600 // cmp node for inner loop may be shared 1601 inner_cmp = inner_cmp->clone(); 1602 inner_cmp->set_req(2, new_limit); 1603 inner_bol = inner_bol->clone(); 1604 inner_bol->set_req(1, igvn->transform(inner_cmp)); 1605 igvn->replace_input_of(inner_cle, CountedLoopEndNode::TestValue, igvn->transform(inner_bol)); 1606 // Set the outer loop's exit condition too 1607 igvn->replace_input_of(outer_loop_end(), 1, outer_bol); 1608 } else { 1609 assert(false, "should be able to adjust outer loop"); 1610 IfNode* outer_le = outer_loop_end(); 1611 Node* iff = igvn->transform(new IfNode(outer_le->in(0), outer_le->in(1), outer_le->_prob, outer_le->_fcnt)); 1612 igvn->replace_node(outer_le, iff); 1613 inner_cl->clear_strip_mined(); 1614 } 1615 } 1616 1617 const Type* OuterStripMinedLoopEndNode::Value(PhaseGVN* phase) const { 1618 if (!in(0)) return Type::TOP; 1619 if (phase->type(in(0)) == Type::TOP) 1620 return Type::TOP; 1621 1622 return TypeTuple::IFBOTH; 1623 } 1624 1625 Node *OuterStripMinedLoopEndNode::Ideal(PhaseGVN *phase, bool can_reshape) { 1626 if (remove_dead_region(phase, can_reshape)) return this; 1627 1628 return NULL; 1629 } 1630 1631 //------------------------------filtered_type-------------------------------- 1632 // Return a type based on condition control flow 1633 // A successful return will be a type that is restricted due 1634 // to a series of dominating if-tests, such as: 1635 // if (i < 10) { 1636 // if (i > 0) { 1637 // here: "i" type is [1..10) 1638 // } 1639 // } 1640 // or a control flow merge 1641 // if (i < 10) { 1642 // do { 1643 // phi( , ) -- at top of loop type is [min_int..10) 1644 // i = ? 1645 // } while ( i < 10) 1646 // 1647 const TypeInt* PhaseIdealLoop::filtered_type( Node *n, Node* n_ctrl) { 1648 assert(n && n->bottom_type()->is_int(), "must be int"); 1649 const TypeInt* filtered_t = NULL; 1650 if (!n->is_Phi()) { 1651 assert(n_ctrl != NULL || n_ctrl == C->top(), "valid control"); 1652 filtered_t = filtered_type_from_dominators(n, n_ctrl); 1653 1654 } else { 1655 Node* phi = n->as_Phi(); 1656 Node* region = phi->in(0); 1657 assert(n_ctrl == NULL || n_ctrl == region, "ctrl parameter must be region"); 1658 if (region && region != C->top()) { 1659 for (uint i = 1; i < phi->req(); i++) { 1660 Node* val = phi->in(i); 1661 Node* use_c = region->in(i); 1662 const TypeInt* val_t = filtered_type_from_dominators(val, use_c); 1663 if (val_t != NULL) { 1664 if (filtered_t == NULL) { 1665 filtered_t = val_t; 1666 } else { 1667 filtered_t = filtered_t->meet(val_t)->is_int(); 1668 } 1669 } 1670 } 1671 } 1672 } 1673 const TypeInt* n_t = _igvn.type(n)->is_int(); 1674 if (filtered_t != NULL) { 1675 n_t = n_t->join(filtered_t)->is_int(); 1676 } 1677 return n_t; 1678 } 1679 1680 1681 //------------------------------filtered_type_from_dominators-------------------------------- 1682 // Return a possibly more restrictive type for val based on condition control flow of dominators 1683 const TypeInt* PhaseIdealLoop::filtered_type_from_dominators( Node* val, Node *use_ctrl) { 1684 if (val->is_Con()) { 1685 return val->bottom_type()->is_int(); 1686 } 1687 uint if_limit = 10; // Max number of dominating if's visited 1688 const TypeInt* rtn_t = NULL; 1689 1690 if (use_ctrl && use_ctrl != C->top()) { 1691 Node* val_ctrl = get_ctrl(val); 1692 uint val_dom_depth = dom_depth(val_ctrl); 1693 Node* pred = use_ctrl; 1694 uint if_cnt = 0; 1695 while (if_cnt < if_limit) { 1696 if ((pred->Opcode() == Op_IfTrue || pred->Opcode() == Op_IfFalse)) { 1697 if_cnt++; 1698 const TypeInt* if_t = IfNode::filtered_int_type(&_igvn, val, pred); 1699 if (if_t != NULL) { 1700 if (rtn_t == NULL) { 1701 rtn_t = if_t; 1702 } else { 1703 rtn_t = rtn_t->join(if_t)->is_int(); 1704 } 1705 } 1706 } 1707 pred = idom(pred); 1708 if (pred == NULL || pred == C->top()) { 1709 break; 1710 } 1711 // Stop if going beyond definition block of val 1712 if (dom_depth(pred) < val_dom_depth) { 1713 break; 1714 } 1715 } 1716 } 1717 return rtn_t; 1718 } 1719 1720 1721 //------------------------------dump_spec-------------------------------------- 1722 // Dump special per-node info 1723 #ifndef PRODUCT 1724 void CountedLoopEndNode::dump_spec(outputStream *st) const { 1725 if( in(TestValue) != NULL && in(TestValue)->is_Bool() ) { 1726 BoolTest bt( test_trip()); // Added this for g++. 1727 1728 st->print("["); 1729 bt.dump_on(st); 1730 st->print("]"); 1731 } 1732 st->print(" "); 1733 IfNode::dump_spec(st); 1734 } 1735 #endif 1736 1737 //============================================================================= 1738 //------------------------------is_member-------------------------------------- 1739 // Is 'l' a member of 'this'? 1740 bool IdealLoopTree::is_member(const IdealLoopTree *l) const { 1741 while( l->_nest > _nest ) l = l->_parent; 1742 return l == this; 1743 } 1744 1745 //------------------------------set_nest--------------------------------------- 1746 // Set loop tree nesting depth. Accumulate _has_call bits. 1747 int IdealLoopTree::set_nest( uint depth ) { 1748 _nest = depth; 1749 int bits = _has_call; 1750 if( _child ) bits |= _child->set_nest(depth+1); 1751 if( bits ) _has_call = 1; 1752 if( _next ) bits |= _next ->set_nest(depth ); 1753 return bits; 1754 } 1755 1756 //------------------------------split_fall_in---------------------------------- 1757 // Split out multiple fall-in edges from the loop header. Move them to a 1758 // private RegionNode before the loop. This becomes the loop landing pad. 1759 void IdealLoopTree::split_fall_in( PhaseIdealLoop *phase, int fall_in_cnt ) { 1760 PhaseIterGVN &igvn = phase->_igvn; 1761 uint i; 1762 1763 // Make a new RegionNode to be the landing pad. 1764 Node *landing_pad = new RegionNode( fall_in_cnt+1 ); 1765 phase->set_loop(landing_pad,_parent); 1766 // Gather all the fall-in control paths into the landing pad 1767 uint icnt = fall_in_cnt; 1768 uint oreq = _head->req(); 1769 for( i = oreq-1; i>0; i-- ) 1770 if( !phase->is_member( this, _head->in(i) ) ) 1771 landing_pad->set_req(icnt--,_head->in(i)); 1772 1773 // Peel off PhiNode edges as well 1774 for (DUIterator_Fast jmax, j = _head->fast_outs(jmax); j < jmax; j++) { 1775 Node *oj = _head->fast_out(j); 1776 if( oj->is_Phi() ) { 1777 PhiNode* old_phi = oj->as_Phi(); 1778 assert( old_phi->region() == _head, "" ); 1779 igvn.hash_delete(old_phi); // Yank from hash before hacking edges 1780 Node *p = PhiNode::make_blank(landing_pad, old_phi); 1781 uint icnt = fall_in_cnt; 1782 for( i = oreq-1; i>0; i-- ) { 1783 if( !phase->is_member( this, _head->in(i) ) ) { 1784 p->init_req(icnt--, old_phi->in(i)); 1785 // Go ahead and clean out old edges from old phi 1786 old_phi->del_req(i); 1787 } 1788 } 1789 // Search for CSE's here, because ZKM.jar does a lot of 1790 // loop hackery and we need to be a little incremental 1791 // with the CSE to avoid O(N^2) node blow-up. 1792 Node *p2 = igvn.hash_find_insert(p); // Look for a CSE 1793 if( p2 ) { // Found CSE 1794 p->destruct(); // Recover useless new node 1795 p = p2; // Use old node 1796 } else { 1797 igvn.register_new_node_with_optimizer(p, old_phi); 1798 } 1799 // Make old Phi refer to new Phi. 1800 old_phi->add_req(p); 1801 // Check for the special case of making the old phi useless and 1802 // disappear it. In JavaGrande I have a case where this useless 1803 // Phi is the loop limit and prevents recognizing a CountedLoop 1804 // which in turn prevents removing an empty loop. 1805 Node *id_old_phi = igvn.apply_identity(old_phi); 1806 if( id_old_phi != old_phi ) { // Found a simple identity? 1807 // Note that I cannot call 'replace_node' here, because 1808 // that will yank the edge from old_phi to the Region and 1809 // I'm mid-iteration over the Region's uses. 1810 for (DUIterator_Last imin, i = old_phi->last_outs(imin); i >= imin; ) { 1811 Node* use = old_phi->last_out(i); 1812 igvn.rehash_node_delayed(use); 1813 uint uses_found = 0; 1814 for (uint j = 0; j < use->len(); j++) { 1815 if (use->in(j) == old_phi) { 1816 if (j < use->req()) use->set_req (j, id_old_phi); 1817 else use->set_prec(j, id_old_phi); 1818 uses_found++; 1819 } 1820 } 1821 i -= uses_found; // we deleted 1 or more copies of this edge 1822 } 1823 } 1824 igvn._worklist.push(old_phi); 1825 } 1826 } 1827 // Finally clean out the fall-in edges from the RegionNode 1828 for( i = oreq-1; i>0; i-- ) { 1829 if( !phase->is_member( this, _head->in(i) ) ) { 1830 _head->del_req(i); 1831 } 1832 } 1833 igvn.rehash_node_delayed(_head); 1834 // Transform landing pad 1835 igvn.register_new_node_with_optimizer(landing_pad, _head); 1836 // Insert landing pad into the header 1837 _head->add_req(landing_pad); 1838 } 1839 1840 //------------------------------split_outer_loop------------------------------- 1841 // Split out the outermost loop from this shared header. 1842 void IdealLoopTree::split_outer_loop( PhaseIdealLoop *phase ) { 1843 PhaseIterGVN &igvn = phase->_igvn; 1844 1845 // Find index of outermost loop; it should also be my tail. 1846 uint outer_idx = 1; 1847 while( _head->in(outer_idx) != _tail ) outer_idx++; 1848 1849 // Make a LoopNode for the outermost loop. 1850 Node *ctl = _head->in(LoopNode::EntryControl); 1851 Node *outer = new LoopNode( ctl, _head->in(outer_idx) ); 1852 outer = igvn.register_new_node_with_optimizer(outer, _head); 1853 phase->set_created_loop_node(); 1854 1855 // Outermost loop falls into '_head' loop 1856 _head->set_req(LoopNode::EntryControl, outer); 1857 _head->del_req(outer_idx); 1858 // Split all the Phis up between '_head' loop and 'outer' loop. 1859 for (DUIterator_Fast jmax, j = _head->fast_outs(jmax); j < jmax; j++) { 1860 Node *out = _head->fast_out(j); 1861 if( out->is_Phi() ) { 1862 PhiNode *old_phi = out->as_Phi(); 1863 assert( old_phi->region() == _head, "" ); 1864 Node *phi = PhiNode::make_blank(outer, old_phi); 1865 phi->init_req(LoopNode::EntryControl, old_phi->in(LoopNode::EntryControl)); 1866 phi->init_req(LoopNode::LoopBackControl, old_phi->in(outer_idx)); 1867 phi = igvn.register_new_node_with_optimizer(phi, old_phi); 1868 // Make old Phi point to new Phi on the fall-in path 1869 igvn.replace_input_of(old_phi, LoopNode::EntryControl, phi); 1870 old_phi->del_req(outer_idx); 1871 } 1872 } 1873 1874 // Use the new loop head instead of the old shared one 1875 _head = outer; 1876 phase->set_loop(_head, this); 1877 } 1878 1879 //------------------------------fix_parent------------------------------------- 1880 static void fix_parent( IdealLoopTree *loop, IdealLoopTree *parent ) { 1881 loop->_parent = parent; 1882 if( loop->_child ) fix_parent( loop->_child, loop ); 1883 if( loop->_next ) fix_parent( loop->_next , parent ); 1884 } 1885 1886 //------------------------------estimate_path_freq----------------------------- 1887 static float estimate_path_freq( Node *n ) { 1888 // Try to extract some path frequency info 1889 IfNode *iff; 1890 for( int i = 0; i < 50; i++ ) { // Skip through a bunch of uncommon tests 1891 uint nop = n->Opcode(); 1892 if( nop == Op_SafePoint ) { // Skip any safepoint 1893 n = n->in(0); 1894 continue; 1895 } 1896 if( nop == Op_CatchProj ) { // Get count from a prior call 1897 // Assume call does not always throw exceptions: means the call-site 1898 // count is also the frequency of the fall-through path. 1899 assert( n->is_CatchProj(), "" ); 1900 if( ((CatchProjNode*)n)->_con != CatchProjNode::fall_through_index ) 1901 return 0.0f; // Assume call exception path is rare 1902 Node *call = n->in(0)->in(0)->in(0); 1903 assert( call->is_Call(), "expect a call here" ); 1904 const JVMState *jvms = ((CallNode*)call)->jvms(); 1905 ciMethodData* methodData = jvms->method()->method_data(); 1906 if (!methodData->is_mature()) return 0.0f; // No call-site data 1907 ciProfileData* data = methodData->bci_to_data(jvms->bci()); 1908 if ((data == NULL) || !data->is_CounterData()) { 1909 // no call profile available, try call's control input 1910 n = n->in(0); 1911 continue; 1912 } 1913 return data->as_CounterData()->count()/FreqCountInvocations; 1914 } 1915 // See if there's a gating IF test 1916 Node *n_c = n->in(0); 1917 if( !n_c->is_If() ) break; // No estimate available 1918 iff = n_c->as_If(); 1919 if( iff->_fcnt != COUNT_UNKNOWN ) // Have a valid count? 1920 // Compute how much count comes on this path 1921 return ((nop == Op_IfTrue) ? iff->_prob : 1.0f - iff->_prob) * iff->_fcnt; 1922 // Have no count info. Skip dull uncommon-trap like branches. 1923 if( (nop == Op_IfTrue && iff->_prob < PROB_LIKELY_MAG(5)) || 1924 (nop == Op_IfFalse && iff->_prob > PROB_UNLIKELY_MAG(5)) ) 1925 break; 1926 // Skip through never-taken branch; look for a real loop exit. 1927 n = iff->in(0); 1928 } 1929 return 0.0f; // No estimate available 1930 } 1931 1932 //------------------------------merge_many_backedges--------------------------- 1933 // Merge all the backedges from the shared header into a private Region. 1934 // Feed that region as the one backedge to this loop. 1935 void IdealLoopTree::merge_many_backedges( PhaseIdealLoop *phase ) { 1936 uint i; 1937 1938 // Scan for the top 2 hottest backedges 1939 float hotcnt = 0.0f; 1940 float warmcnt = 0.0f; 1941 uint hot_idx = 0; 1942 // Loop starts at 2 because slot 1 is the fall-in path 1943 for( i = 2; i < _head->req(); i++ ) { 1944 float cnt = estimate_path_freq(_head->in(i)); 1945 if( cnt > hotcnt ) { // Grab hottest path 1946 warmcnt = hotcnt; 1947 hotcnt = cnt; 1948 hot_idx = i; 1949 } else if( cnt > warmcnt ) { // And 2nd hottest path 1950 warmcnt = cnt; 1951 } 1952 } 1953 1954 // See if the hottest backedge is worthy of being an inner loop 1955 // by being much hotter than the next hottest backedge. 1956 if( hotcnt <= 0.0001 || 1957 hotcnt < 2.0*warmcnt ) hot_idx = 0;// No hot backedge 1958 1959 // Peel out the backedges into a private merge point; peel 1960 // them all except optionally hot_idx. 1961 PhaseIterGVN &igvn = phase->_igvn; 1962 1963 Node *hot_tail = NULL; 1964 // Make a Region for the merge point 1965 Node *r = new RegionNode(1); 1966 for( i = 2; i < _head->req(); i++ ) { 1967 if( i != hot_idx ) 1968 r->add_req( _head->in(i) ); 1969 else hot_tail = _head->in(i); 1970 } 1971 igvn.register_new_node_with_optimizer(r, _head); 1972 // Plug region into end of loop _head, followed by hot_tail 1973 while( _head->req() > 3 ) _head->del_req( _head->req()-1 ); 1974 igvn.replace_input_of(_head, 2, r); 1975 if( hot_idx ) _head->add_req(hot_tail); 1976 1977 // Split all the Phis up between '_head' loop and the Region 'r' 1978 for (DUIterator_Fast jmax, j = _head->fast_outs(jmax); j < jmax; j++) { 1979 Node *out = _head->fast_out(j); 1980 if( out->is_Phi() ) { 1981 PhiNode* n = out->as_Phi(); 1982 igvn.hash_delete(n); // Delete from hash before hacking edges 1983 Node *hot_phi = NULL; 1984 Node *phi = new PhiNode(r, n->type(), n->adr_type()); 1985 // Check all inputs for the ones to peel out 1986 uint j = 1; 1987 for( uint i = 2; i < n->req(); i++ ) { 1988 if( i != hot_idx ) 1989 phi->set_req( j++, n->in(i) ); 1990 else hot_phi = n->in(i); 1991 } 1992 // Register the phi but do not transform until whole place transforms 1993 igvn.register_new_node_with_optimizer(phi, n); 1994 // Add the merge phi to the old Phi 1995 while( n->req() > 3 ) n->del_req( n->req()-1 ); 1996 igvn.replace_input_of(n, 2, phi); 1997 if( hot_idx ) n->add_req(hot_phi); 1998 } 1999 } 2000 2001 2002 // Insert a new IdealLoopTree inserted below me. Turn it into a clone 2003 // of self loop tree. Turn self into a loop headed by _head and with 2004 // tail being the new merge point. 2005 IdealLoopTree *ilt = new IdealLoopTree( phase, _head, _tail ); 2006 phase->set_loop(_tail,ilt); // Adjust tail 2007 _tail = r; // Self's tail is new merge point 2008 phase->set_loop(r,this); 2009 ilt->_child = _child; // New guy has my children 2010 _child = ilt; // Self has new guy as only child 2011 ilt->_parent = this; // new guy has self for parent 2012 ilt->_nest = _nest; // Same nesting depth (for now) 2013 2014 // Starting with 'ilt', look for child loop trees using the same shared 2015 // header. Flatten these out; they will no longer be loops in the end. 2016 IdealLoopTree **pilt = &_child; 2017 while( ilt ) { 2018 if( ilt->_head == _head ) { 2019 uint i; 2020 for( i = 2; i < _head->req(); i++ ) 2021 if( _head->in(i) == ilt->_tail ) 2022 break; // Still a loop 2023 if( i == _head->req() ) { // No longer a loop 2024 // Flatten ilt. Hang ilt's "_next" list from the end of 2025 // ilt's '_child' list. Move the ilt's _child up to replace ilt. 2026 IdealLoopTree **cp = &ilt->_child; 2027 while( *cp ) cp = &(*cp)->_next; // Find end of child list 2028 *cp = ilt->_next; // Hang next list at end of child list 2029 *pilt = ilt->_child; // Move child up to replace ilt 2030 ilt->_head = NULL; // Flag as a loop UNIONED into parent 2031 ilt = ilt->_child; // Repeat using new ilt 2032 continue; // do not advance over ilt->_child 2033 } 2034 assert( ilt->_tail == hot_tail, "expected to only find the hot inner loop here" ); 2035 phase->set_loop(_head,ilt); 2036 } 2037 pilt = &ilt->_child; // Advance to next 2038 ilt = *pilt; 2039 } 2040 2041 if( _child ) fix_parent( _child, this ); 2042 } 2043 2044 //------------------------------beautify_loops--------------------------------- 2045 // Split shared headers and insert loop landing pads. 2046 // Insert a LoopNode to replace the RegionNode. 2047 // Return TRUE if loop tree is structurally changed. 2048 bool IdealLoopTree::beautify_loops( PhaseIdealLoop *phase ) { 2049 bool result = false; 2050 // Cache parts in locals for easy 2051 PhaseIterGVN &igvn = phase->_igvn; 2052 2053 igvn.hash_delete(_head); // Yank from hash before hacking edges 2054 2055 // Check for multiple fall-in paths. Peel off a landing pad if need be. 2056 int fall_in_cnt = 0; 2057 for( uint i = 1; i < _head->req(); i++ ) 2058 if( !phase->is_member( this, _head->in(i) ) ) 2059 fall_in_cnt++; 2060 assert( fall_in_cnt, "at least 1 fall-in path" ); 2061 if( fall_in_cnt > 1 ) // Need a loop landing pad to merge fall-ins 2062 split_fall_in( phase, fall_in_cnt ); 2063 2064 // Swap inputs to the _head and all Phis to move the fall-in edge to 2065 // the left. 2066 fall_in_cnt = 1; 2067 while( phase->is_member( this, _head->in(fall_in_cnt) ) ) 2068 fall_in_cnt++; 2069 if( fall_in_cnt > 1 ) { 2070 // Since I am just swapping inputs I do not need to update def-use info 2071 Node *tmp = _head->in(1); 2072 igvn.rehash_node_delayed(_head); 2073 _head->set_req( 1, _head->in(fall_in_cnt) ); 2074 _head->set_req( fall_in_cnt, tmp ); 2075 // Swap also all Phis 2076 for (DUIterator_Fast imax, i = _head->fast_outs(imax); i < imax; i++) { 2077 Node* phi = _head->fast_out(i); 2078 if( phi->is_Phi() ) { 2079 igvn.rehash_node_delayed(phi); // Yank from hash before hacking edges 2080 tmp = phi->in(1); 2081 phi->set_req( 1, phi->in(fall_in_cnt) ); 2082 phi->set_req( fall_in_cnt, tmp ); 2083 } 2084 } 2085 } 2086 assert( !phase->is_member( this, _head->in(1) ), "left edge is fall-in" ); 2087 assert( phase->is_member( this, _head->in(2) ), "right edge is loop" ); 2088 2089 // If I am a shared header (multiple backedges), peel off the many 2090 // backedges into a private merge point and use the merge point as 2091 // the one true backedge. 2092 if( _head->req() > 3 ) { 2093 // Merge the many backedges into a single backedge but leave 2094 // the hottest backedge as separate edge for the following peel. 2095 merge_many_backedges( phase ); 2096 result = true; 2097 } 2098 2099 // If I have one hot backedge, peel off myself loop. 2100 // I better be the outermost loop. 2101 if (_head->req() > 3 && !_irreducible) { 2102 split_outer_loop( phase ); 2103 result = true; 2104 2105 } else if (!_head->is_Loop() && !_irreducible) { 2106 // Make a new LoopNode to replace the old loop head 2107 Node *l = new LoopNode( _head->in(1), _head->in(2) ); 2108 l = igvn.register_new_node_with_optimizer(l, _head); 2109 phase->set_created_loop_node(); 2110 // Go ahead and replace _head 2111 phase->_igvn.replace_node( _head, l ); 2112 _head = l; 2113 phase->set_loop(_head, this); 2114 } 2115 2116 // Now recursively beautify nested loops 2117 if( _child ) result |= _child->beautify_loops( phase ); 2118 if( _next ) result |= _next ->beautify_loops( phase ); 2119 return result; 2120 } 2121 2122 //------------------------------allpaths_check_safepts---------------------------- 2123 // Allpaths backwards scan from loop tail, terminating each path at first safepoint 2124 // encountered. Helper for check_safepts. 2125 void IdealLoopTree::allpaths_check_safepts(VectorSet &visited, Node_List &stack) { 2126 assert(stack.size() == 0, "empty stack"); 2127 stack.push(_tail); 2128 visited.clear(); 2129 visited.set(_tail->_idx); 2130 while (stack.size() > 0) { 2131 Node* n = stack.pop(); 2132 if (n->is_Call() && n->as_Call()->guaranteed_safepoint()) { 2133 // Terminate this path 2134 } else if (n->Opcode() == Op_SafePoint) { 2135 if (_phase->get_loop(n) != this) { 2136 if (_required_safept == NULL) _required_safept = new Node_List(); 2137 _required_safept->push(n); // save the one closest to the tail 2138 } 2139 // Terminate this path 2140 } else { 2141 uint start = n->is_Region() ? 1 : 0; 2142 uint end = n->is_Region() && !n->is_Loop() ? n->req() : start + 1; 2143 for (uint i = start; i < end; i++) { 2144 Node* in = n->in(i); 2145 assert(in->is_CFG(), "must be"); 2146 if (!visited.test_set(in->_idx) && is_member(_phase->get_loop(in))) { 2147 stack.push(in); 2148 } 2149 } 2150 } 2151 } 2152 } 2153 2154 //------------------------------check_safepts---------------------------- 2155 // Given dominators, try to find loops with calls that must always be 2156 // executed (call dominates loop tail). These loops do not need non-call 2157 // safepoints (ncsfpt). 2158 // 2159 // A complication is that a safepoint in a inner loop may be needed 2160 // by an outer loop. In the following, the inner loop sees it has a 2161 // call (block 3) on every path from the head (block 2) to the 2162 // backedge (arc 3->2). So it deletes the ncsfpt (non-call safepoint) 2163 // in block 2, _but_ this leaves the outer loop without a safepoint. 2164 // 2165 // entry 0 2166 // | 2167 // v 2168 // outer 1,2 +->1 2169 // | | 2170 // | v 2171 // | 2<---+ ncsfpt in 2 2172 // |_/|\ | 2173 // | v | 2174 // inner 2,3 / 3 | call in 3 2175 // / | | 2176 // v +--+ 2177 // exit 4 2178 // 2179 // 2180 // This method creates a list (_required_safept) of ncsfpt nodes that must 2181 // be protected is created for each loop. When a ncsfpt maybe deleted, it 2182 // is first looked for in the lists for the outer loops of the current loop. 2183 // 2184 // The insights into the problem: 2185 // A) counted loops are okay 2186 // B) innermost loops are okay (only an inner loop can delete 2187 // a ncsfpt needed by an outer loop) 2188 // C) a loop is immune from an inner loop deleting a safepoint 2189 // if the loop has a call on the idom-path 2190 // D) a loop is also immune if it has a ncsfpt (non-call safepoint) on the 2191 // idom-path that is not in a nested loop 2192 // E) otherwise, an ncsfpt on the idom-path that is nested in an inner 2193 // loop needs to be prevented from deletion by an inner loop 2194 // 2195 // There are two analyses: 2196 // 1) The first, and cheaper one, scans the loop body from 2197 // tail to head following the idom (immediate dominator) 2198 // chain, looking for the cases (C,D,E) above. 2199 // Since inner loops are scanned before outer loops, there is summary 2200 // information about inner loops. Inner loops can be skipped over 2201 // when the tail of an inner loop is encountered. 2202 // 2203 // 2) The second, invoked if the first fails to find a call or ncsfpt on 2204 // the idom path (which is rare), scans all predecessor control paths 2205 // from the tail to the head, terminating a path when a call or sfpt 2206 // is encountered, to find the ncsfpt's that are closest to the tail. 2207 // 2208 void IdealLoopTree::check_safepts(VectorSet &visited, Node_List &stack) { 2209 // Bottom up traversal 2210 IdealLoopTree* ch = _child; 2211 if (_child) _child->check_safepts(visited, stack); 2212 if (_next) _next ->check_safepts(visited, stack); 2213 2214 if (!_head->is_CountedLoop() && !_has_sfpt && _parent != NULL && !_irreducible) { 2215 bool has_call = false; // call on dom-path 2216 bool has_local_ncsfpt = false; // ncsfpt on dom-path at this loop depth 2217 Node* nonlocal_ncsfpt = NULL; // ncsfpt on dom-path at a deeper depth 2218 // Scan the dom-path nodes from tail to head 2219 for (Node* n = tail(); n != _head; n = _phase->idom(n)) { 2220 if (n->is_Call() && n->as_Call()->guaranteed_safepoint()) { 2221 has_call = true; 2222 _has_sfpt = 1; // Then no need for a safept! 2223 break; 2224 } else if (n->Opcode() == Op_SafePoint) { 2225 if (_phase->get_loop(n) == this) { 2226 has_local_ncsfpt = true; 2227 break; 2228 } 2229 if (nonlocal_ncsfpt == NULL) { 2230 nonlocal_ncsfpt = n; // save the one closest to the tail 2231 } 2232 } else { 2233 IdealLoopTree* nlpt = _phase->get_loop(n); 2234 if (this != nlpt) { 2235 // If at an inner loop tail, see if the inner loop has already 2236 // recorded seeing a call on the dom-path (and stop.) If not, 2237 // jump to the head of the inner loop. 2238 assert(is_member(nlpt), "nested loop"); 2239 Node* tail = nlpt->_tail; 2240 if (tail->in(0)->is_If()) tail = tail->in(0); 2241 if (n == tail) { 2242 // If inner loop has call on dom-path, so does outer loop 2243 if (nlpt->_has_sfpt) { 2244 has_call = true; 2245 _has_sfpt = 1; 2246 break; 2247 } 2248 // Skip to head of inner loop 2249 assert(_phase->is_dominator(_head, nlpt->_head), "inner head dominated by outer head"); 2250 n = nlpt->_head; 2251 } 2252 } 2253 } 2254 } 2255 // Record safept's that this loop needs preserved when an 2256 // inner loop attempts to delete it's safepoints. 2257 if (_child != NULL && !has_call && !has_local_ncsfpt) { 2258 if (nonlocal_ncsfpt != NULL) { 2259 if (_required_safept == NULL) _required_safept = new Node_List(); 2260 _required_safept->push(nonlocal_ncsfpt); 2261 } else { 2262 // Failed to find a suitable safept on the dom-path. Now use 2263 // an all paths walk from tail to head, looking for safepoints to preserve. 2264 allpaths_check_safepts(visited, stack); 2265 } 2266 } 2267 } 2268 } 2269 2270 //---------------------------is_deleteable_safept---------------------------- 2271 // Is safept not required by an outer loop? 2272 bool PhaseIdealLoop::is_deleteable_safept(Node* sfpt) { 2273 assert(sfpt->Opcode() == Op_SafePoint, ""); 2274 IdealLoopTree* lp = get_loop(sfpt)->_parent; 2275 while (lp != NULL) { 2276 Node_List* sfpts = lp->_required_safept; 2277 if (sfpts != NULL) { 2278 for (uint i = 0; i < sfpts->size(); i++) { 2279 if (sfpt == sfpts->at(i)) 2280 return false; 2281 } 2282 } 2283 lp = lp->_parent; 2284 } 2285 return true; 2286 } 2287 2288 //---------------------------replace_parallel_iv------------------------------- 2289 // Replace parallel induction variable (parallel to trip counter) 2290 void PhaseIdealLoop::replace_parallel_iv(IdealLoopTree *loop) { 2291 assert(loop->_head->is_CountedLoop(), ""); 2292 CountedLoopNode *cl = loop->_head->as_CountedLoop(); 2293 if (!cl->is_valid_counted_loop()) 2294 return; // skip malformed counted loop 2295 Node *incr = cl->incr(); 2296 if (incr == NULL) 2297 return; // Dead loop? 2298 Node *init = cl->init_trip(); 2299 Node *phi = cl->phi(); 2300 int stride_con = cl->stride_con(); 2301 2302 // Visit all children, looking for Phis 2303 for (DUIterator i = cl->outs(); cl->has_out(i); i++) { 2304 Node *out = cl->out(i); 2305 // Look for other phis (secondary IVs). Skip dead ones 2306 if (!out->is_Phi() || out == phi || !has_node(out)) 2307 continue; 2308 PhiNode* phi2 = out->as_Phi(); 2309 Node *incr2 = phi2->in( LoopNode::LoopBackControl ); 2310 // Look for induction variables of the form: X += constant 2311 if (phi2->region() != loop->_head || 2312 incr2->req() != 3 || 2313 incr2->in(1) != phi2 || 2314 incr2 == incr || 2315 incr2->Opcode() != Op_AddI || 2316 !incr2->in(2)->is_Con()) 2317 continue; 2318 2319 // Check for parallel induction variable (parallel to trip counter) 2320 // via an affine function. In particular, count-down loops with 2321 // count-up array indices are common. We only RCE references off 2322 // the trip-counter, so we need to convert all these to trip-counter 2323 // expressions. 2324 Node *init2 = phi2->in( LoopNode::EntryControl ); 2325 int stride_con2 = incr2->in(2)->get_int(); 2326 2327 // The ratio of the two strides cannot be represented as an int 2328 // if stride_con2 is min_int and stride_con is -1. 2329 if (stride_con2 == min_jint && stride_con == -1) { 2330 continue; 2331 } 2332 2333 // The general case here gets a little tricky. We want to find the 2334 // GCD of all possible parallel IV's and make a new IV using this 2335 // GCD for the loop. Then all possible IVs are simple multiples of 2336 // the GCD. In practice, this will cover very few extra loops. 2337 // Instead we require 'stride_con2' to be a multiple of 'stride_con', 2338 // where +/-1 is the common case, but other integer multiples are 2339 // also easy to handle. 2340 int ratio_con = stride_con2/stride_con; 2341 2342 if ((ratio_con * stride_con) == stride_con2) { // Check for exact 2343 #ifndef PRODUCT 2344 if (TraceLoopOpts) { 2345 tty->print("Parallel IV: %d ", phi2->_idx); 2346 loop->dump_head(); 2347 } 2348 #endif 2349 // Convert to using the trip counter. The parallel induction 2350 // variable differs from the trip counter by a loop-invariant 2351 // amount, the difference between their respective initial values. 2352 // It is scaled by the 'ratio_con'. 2353 Node* ratio = _igvn.intcon(ratio_con); 2354 set_ctrl(ratio, C->root()); 2355 Node* ratio_init = new MulINode(init, ratio); 2356 _igvn.register_new_node_with_optimizer(ratio_init, init); 2357 set_early_ctrl(ratio_init); 2358 Node* diff = new SubINode(init2, ratio_init); 2359 _igvn.register_new_node_with_optimizer(diff, init2); 2360 set_early_ctrl(diff); 2361 Node* ratio_idx = new MulINode(phi, ratio); 2362 _igvn.register_new_node_with_optimizer(ratio_idx, phi); 2363 set_ctrl(ratio_idx, cl); 2364 Node* add = new AddINode(ratio_idx, diff); 2365 _igvn.register_new_node_with_optimizer(add); 2366 set_ctrl(add, cl); 2367 _igvn.replace_node( phi2, add ); 2368 // Sometimes an induction variable is unused 2369 if (add->outcnt() == 0) { 2370 _igvn.remove_dead_node(add); 2371 } 2372 --i; // deleted this phi; rescan starting with next position 2373 continue; 2374 } 2375 } 2376 } 2377 2378 void IdealLoopTree::remove_safepoints(PhaseIdealLoop* phase, bool keep_one) { 2379 Node* keep = NULL; 2380 if (keep_one) { 2381 // Look for a safepoint on the idom-path. 2382 for (Node* i = tail(); i != _head; i = phase->idom(i)) { 2383 if (i->Opcode() == Op_SafePoint && phase->get_loop(i) == this) { 2384 keep = i; 2385 break; // Found one 2386 } 2387 } 2388 } 2389 2390 // Don't remove any safepoints if it is requested to keep a single safepoint and 2391 // no safepoint was found on idom-path. It is not safe to remove any safepoint 2392 // in this case since there's no safepoint dominating all paths in the loop body. 2393 bool prune = !keep_one || keep != NULL; 2394 2395 // Delete other safepoints in this loop. 2396 Node_List* sfpts = _safepts; 2397 if (prune && sfpts != NULL) { 2398 assert(keep == NULL || keep->Opcode() == Op_SafePoint, "not safepoint"); 2399 for (uint i = 0; i < sfpts->size(); i++) { 2400 Node* n = sfpts->at(i); 2401 assert(phase->get_loop(n) == this, ""); 2402 if (n != keep && phase->is_deleteable_safept(n)) { 2403 phase->lazy_replace(n, n->in(TypeFunc::Control)); 2404 } 2405 } 2406 } 2407 } 2408 2409 //------------------------------counted_loop----------------------------------- 2410 // Convert to counted loops where possible 2411 void IdealLoopTree::counted_loop( PhaseIdealLoop *phase ) { 2412 2413 // For grins, set the inner-loop flag here 2414 if (!_child) { 2415 if (_head->is_Loop()) _head->as_Loop()->set_inner_loop(); 2416 } 2417 2418 IdealLoopTree* loop = this; 2419 if (_head->is_CountedLoop() || 2420 phase->is_counted_loop(_head, loop)) { 2421 2422 if (LoopStripMiningIter == 0 || (LoopStripMiningIter > 1 && _child == NULL)) { 2423 // Indicate we do not need a safepoint here 2424 _has_sfpt = 1; 2425 } 2426 2427 // Remove safepoints 2428 bool keep_one_sfpt = !(_has_call || _has_sfpt); 2429 remove_safepoints(phase, keep_one_sfpt); 2430 2431 // Look for induction variables 2432 phase->replace_parallel_iv(this); 2433 2434 } else if (_parent != NULL && !_irreducible) { 2435 // Not a counted loop. Keep one safepoint. 2436 bool keep_one_sfpt = true; 2437 remove_safepoints(phase, keep_one_sfpt); 2438 } 2439 2440 // Recursively 2441 assert(loop->_child != this || (loop->_head->as_Loop()->is_OuterStripMinedLoop() && _head->as_CountedLoop()->is_strip_mined()), "what kind of loop was added?"); 2442 assert(loop->_child != this || (loop->_child->_child == NULL && loop->_child->_next == NULL), "would miss some loops"); 2443 if (loop->_child && loop->_child != this) loop->_child->counted_loop(phase); 2444 if (loop->_next) loop->_next ->counted_loop(phase); 2445 } 2446 2447 2448 // The Estimated Loop Clone Size: 2449 // CloneFactor * (~112% * BodySize + BC) + CC + FanOutTerm, 2450 // where BC and CC are totally ad-hoc/magic "body" and "clone" constants, 2451 // respectively, used to ensure that the node usage estimates made are on the 2452 // safe side, for the most part. The FanOutTerm is an attempt to estimate the 2453 // possible additional/excessive nodes generated due to data and control flow 2454 // merging, for edges reaching outside the loop. 2455 uint IdealLoopTree::est_loop_clone_sz(uint factor) const { 2456 2457 precond(0 < factor && factor < 16); 2458 2459 uint const bc = 13; 2460 uint const cc = 17; 2461 uint const sz = _body.size() + (_body.size() + 7) / 8; 2462 uint estimate = factor * (sz + bc) + cc; 2463 2464 assert((estimate - cc) / factor == sz + bc, "overflow"); 2465 2466 return estimate + est_loop_flow_merge_sz(); 2467 } 2468 2469 // The Estimated Loop (full-) Unroll Size: 2470 // UnrollFactor * (~106% * BodySize) + CC + FanOutTerm, 2471 // where CC is a (totally) ad-hoc/magic "clone" constant, used to ensure that 2472 // node usage estimates made are on the safe side, for the most part. This is 2473 // a "light" version of the loop clone size calculation (above), based on the 2474 // assumption that most of the loop-construct overhead will be unraveled when 2475 // (fully) unrolled. Defined for unroll factors larger or equal to one (>=1), 2476 // including an overflow check and returning UINT_MAX in case of an overflow. 2477 uint IdealLoopTree::est_loop_unroll_sz(uint factor) const { 2478 2479 precond(factor > 0); 2480 2481 // Take into account that after unroll conjoined heads and tails will fold. 2482 uint const b0 = _body.size() - EMPTY_LOOP_SIZE; 2483 uint const cc = 7; 2484 uint const sz = b0 + (b0 + 15) / 16; 2485 uint estimate = factor * sz + cc; 2486 2487 if ((estimate - cc) / factor != sz) { 2488 return UINT_MAX; 2489 } 2490 2491 return estimate + est_loop_flow_merge_sz(); 2492 } 2493 2494 // Estimate the growth effect (in nodes) of merging control and data flow when 2495 // cloning a loop body, based on the amount of control and data flow reaching 2496 // outside of the (current) loop body. 2497 uint IdealLoopTree::est_loop_flow_merge_sz() const { 2498 2499 uint ctrl_edge_out_cnt = 0; 2500 uint data_edge_out_cnt = 0; 2501 2502 for (uint i = 0; i < _body.size(); i++) { 2503 Node* node = _body.at(i); 2504 uint outcnt = node->outcnt(); 2505 2506 for (uint k = 0; k < outcnt; k++) { 2507 Node* out = node->raw_out(k); 2508 2509 if (out->is_CFG()) { 2510 if (!is_member(_phase->get_loop(out))) { 2511 ctrl_edge_out_cnt++; 2512 } 2513 } else { 2514 Node* ctrl = _phase->get_ctrl(out); 2515 assert(ctrl->is_CFG(), "must be"); 2516 if (!is_member(_phase->get_loop(ctrl))) { 2517 data_edge_out_cnt++; 2518 } 2519 } 2520 } 2521 } 2522 // Use data and control count (x2.0) in estimate iff both are > 0. This is 2523 // a rather pessimistic estimate for the most part, in particular for some 2524 // complex loops, but still not enough to capture all loops. 2525 if (ctrl_edge_out_cnt > 0 && data_edge_out_cnt > 0) { 2526 return 2 * (ctrl_edge_out_cnt + data_edge_out_cnt); 2527 } 2528 return 0; 2529 } 2530 2531 #ifndef PRODUCT 2532 //------------------------------dump_head-------------------------------------- 2533 // Dump 1 liner for loop header info 2534 void IdealLoopTree::dump_head() const { 2535 tty->sp(2 * _nest); 2536 tty->print("Loop: N%d/N%d ", _head->_idx, _tail->_idx); 2537 if (_irreducible) tty->print(" IRREDUCIBLE"); 2538 Node* entry = _head->is_Loop() ? _head->as_Loop()->skip_strip_mined(-1)->in(LoopNode::EntryControl) : _head->in(LoopNode::EntryControl); 2539 Node* predicate = PhaseIdealLoop::find_predicate_insertion_point(entry, Deoptimization::Reason_loop_limit_check); 2540 if (predicate != NULL ) { 2541 tty->print(" limit_check"); 2542 entry = PhaseIdealLoop::skip_loop_predicates(entry); 2543 } 2544 if (UseLoopPredicate) { 2545 entry = PhaseIdealLoop::find_predicate_insertion_point(entry, Deoptimization::Reason_predicate); 2546 if (entry != NULL) { 2547 tty->print(" predicated"); 2548 entry = PhaseIdealLoop::skip_loop_predicates(entry); 2549 } 2550 } 2551 if (UseProfiledLoopPredicate) { 2552 entry = PhaseIdealLoop::find_predicate_insertion_point(entry, Deoptimization::Reason_profile_predicate); 2553 if (entry != NULL) { 2554 tty->print(" profile_predicated"); 2555 } 2556 } 2557 if (_head->is_CountedLoop()) { 2558 CountedLoopNode *cl = _head->as_CountedLoop(); 2559 tty->print(" counted"); 2560 2561 Node* init_n = cl->init_trip(); 2562 if (init_n != NULL && init_n->is_Con()) 2563 tty->print(" [%d,", cl->init_trip()->get_int()); 2564 else 2565 tty->print(" [int,"); 2566 Node* limit_n = cl->limit(); 2567 if (limit_n != NULL && limit_n->is_Con()) 2568 tty->print("%d),", cl->limit()->get_int()); 2569 else 2570 tty->print("int),"); 2571 int stride_con = cl->stride_con(); 2572 if (stride_con > 0) tty->print("+"); 2573 tty->print("%d", stride_con); 2574 2575 tty->print(" (%0.f iters) ", cl->profile_trip_cnt()); 2576 2577 if (cl->is_pre_loop ()) tty->print(" pre" ); 2578 if (cl->is_main_loop()) tty->print(" main"); 2579 if (cl->is_post_loop()) tty->print(" post"); 2580 if (cl->is_vectorized_loop()) tty->print(" vector"); 2581 if (cl->range_checks_present()) tty->print(" rc "); 2582 if (cl->is_multiversioned()) tty->print(" multi "); 2583 } 2584 if (_has_call) tty->print(" has_call"); 2585 if (_has_sfpt) tty->print(" has_sfpt"); 2586 if (_rce_candidate) tty->print(" rce"); 2587 if (_safepts != NULL && _safepts->size() > 0) { 2588 tty->print(" sfpts={"); _safepts->dump_simple(); tty->print(" }"); 2589 } 2590 if (_required_safept != NULL && _required_safept->size() > 0) { 2591 tty->print(" req={"); _required_safept->dump_simple(); tty->print(" }"); 2592 } 2593 if (Verbose) { 2594 tty->print(" body={"); _body.dump_simple(); tty->print(" }"); 2595 } 2596 if (_head->is_Loop() && _head->as_Loop()->is_strip_mined()) { 2597 tty->print(" strip_mined"); 2598 } 2599 tty->cr(); 2600 } 2601 2602 //------------------------------dump------------------------------------------- 2603 // Dump loops by loop tree 2604 void IdealLoopTree::dump() const { 2605 dump_head(); 2606 if (_child) _child->dump(); 2607 if (_next) _next ->dump(); 2608 } 2609 2610 #endif 2611 2612 static void log_loop_tree(IdealLoopTree* root, IdealLoopTree* loop, CompileLog* log) { 2613 if (loop == root) { 2614 if (loop->_child != NULL) { 2615 log->begin_head("loop_tree"); 2616 log->end_head(); 2617 if( loop->_child ) log_loop_tree(root, loop->_child, log); 2618 log->tail("loop_tree"); 2619 assert(loop->_next == NULL, "what?"); 2620 } 2621 } else { 2622 Node* head = loop->_head; 2623 log->begin_head("loop"); 2624 log->print(" idx='%d' ", head->_idx); 2625 if (loop->_irreducible) log->print("irreducible='1' "); 2626 if (head->is_Loop()) { 2627 if (head->as_Loop()->is_inner_loop()) log->print("inner_loop='1' "); 2628 if (head->as_Loop()->is_partial_peel_loop()) log->print("partial_peel_loop='1' "); 2629 } 2630 if (head->is_CountedLoop()) { 2631 CountedLoopNode* cl = head->as_CountedLoop(); 2632 if (cl->is_pre_loop()) log->print("pre_loop='%d' ", cl->main_idx()); 2633 if (cl->is_main_loop()) log->print("main_loop='%d' ", cl->_idx); 2634 if (cl->is_post_loop()) log->print("post_loop='%d' ", cl->main_idx()); 2635 } 2636 log->end_head(); 2637 if( loop->_child ) log_loop_tree(root, loop->_child, log); 2638 log->tail("loop"); 2639 if( loop->_next ) log_loop_tree(root, loop->_next, log); 2640 } 2641 } 2642 2643 //---------------------collect_potentially_useful_predicates----------------------- 2644 // Helper function to collect potentially useful predicates to prevent them from 2645 // being eliminated by PhaseIdealLoop::eliminate_useless_predicates 2646 void PhaseIdealLoop::collect_potentially_useful_predicates( 2647 IdealLoopTree * loop, Unique_Node_List &useful_predicates) { 2648 if (loop->_child) { // child 2649 collect_potentially_useful_predicates(loop->_child, useful_predicates); 2650 } 2651 2652 // self (only loops that we can apply loop predication may use their predicates) 2653 if (loop->_head->is_Loop() && 2654 !loop->_irreducible && 2655 !loop->tail()->is_top()) { 2656 LoopNode* lpn = loop->_head->as_Loop(); 2657 Node* entry = lpn->in(LoopNode::EntryControl); 2658 Node* predicate_proj = find_predicate(entry); // loop_limit_check first 2659 if (predicate_proj != NULL ) { // right pattern that can be used by loop predication 2660 assert(entry->in(0)->in(1)->in(1)->Opcode() == Op_Opaque1, "must be"); 2661 useful_predicates.push(entry->in(0)->in(1)->in(1)); // good one 2662 entry = skip_loop_predicates(entry); 2663 } 2664 predicate_proj = find_predicate(entry); // Predicate 2665 if (predicate_proj != NULL ) { 2666 useful_predicates.push(entry->in(0)->in(1)->in(1)); // good one 2667 entry = skip_loop_predicates(entry); 2668 } 2669 if (UseProfiledLoopPredicate) { 2670 predicate_proj = find_predicate(entry); // Predicate 2671 if (predicate_proj != NULL ) { 2672 useful_predicates.push(entry->in(0)->in(1)->in(1)); // good one 2673 } 2674 } 2675 } 2676 2677 if (loop->_next) { // sibling 2678 collect_potentially_useful_predicates(loop->_next, useful_predicates); 2679 } 2680 } 2681 2682 //------------------------eliminate_useless_predicates----------------------------- 2683 // Eliminate all inserted predicates if they could not be used by loop predication. 2684 // Note: it will also eliminates loop limits check predicate since it also uses 2685 // Opaque1 node (see Parse::add_predicate()). 2686 void PhaseIdealLoop::eliminate_useless_predicates() { 2687 if (C->predicate_count() == 0) 2688 return; // no predicate left 2689 2690 Unique_Node_List useful_predicates; // to store useful predicates 2691 if (C->has_loops()) { 2692 collect_potentially_useful_predicates(_ltree_root->_child, useful_predicates); 2693 } 2694 2695 for (int i = C->predicate_count(); i > 0; i--) { 2696 Node * n = C->predicate_opaque1_node(i-1); 2697 assert(n->Opcode() == Op_Opaque1, "must be"); 2698 if (!useful_predicates.member(n)) { // not in the useful list 2699 _igvn.replace_node(n, n->in(1)); 2700 } 2701 } 2702 } 2703 2704 //------------------------process_expensive_nodes----------------------------- 2705 // Expensive nodes have their control input set to prevent the GVN 2706 // from commoning them and as a result forcing the resulting node to 2707 // be in a more frequent path. Use CFG information here, to change the 2708 // control inputs so that some expensive nodes can be commoned while 2709 // not executed more frequently. 2710 bool PhaseIdealLoop::process_expensive_nodes() { 2711 assert(OptimizeExpensiveOps, "optimization off?"); 2712 2713 // Sort nodes to bring similar nodes together 2714 C->sort_expensive_nodes(); 2715 2716 bool progress = false; 2717 2718 for (int i = 0; i < C->expensive_count(); ) { 2719 Node* n = C->expensive_node(i); 2720 int start = i; 2721 // Find nodes similar to n 2722 i++; 2723 for (; i < C->expensive_count() && Compile::cmp_expensive_nodes(n, C->expensive_node(i)) == 0; i++); 2724 int end = i; 2725 // And compare them two by two 2726 for (int j = start; j < end; j++) { 2727 Node* n1 = C->expensive_node(j); 2728 if (is_node_unreachable(n1)) { 2729 continue; 2730 } 2731 for (int k = j+1; k < end; k++) { 2732 Node* n2 = C->expensive_node(k); 2733 if (is_node_unreachable(n2)) { 2734 continue; 2735 } 2736 2737 assert(n1 != n2, "should be pair of nodes"); 2738 2739 Node* c1 = n1->in(0); 2740 Node* c2 = n2->in(0); 2741 2742 Node* parent_c1 = c1; 2743 Node* parent_c2 = c2; 2744 2745 // The call to get_early_ctrl_for_expensive() moves the 2746 // expensive nodes up but stops at loops that are in a if 2747 // branch. See whether we can exit the loop and move above the 2748 // If. 2749 if (c1->is_Loop()) { 2750 parent_c1 = c1->in(1); 2751 } 2752 if (c2->is_Loop()) { 2753 parent_c2 = c2->in(1); 2754 } 2755 2756 if (parent_c1 == parent_c2) { 2757 _igvn._worklist.push(n1); 2758 _igvn._worklist.push(n2); 2759 continue; 2760 } 2761 2762 // Look for identical expensive node up the dominator chain. 2763 if (is_dominator(c1, c2)) { 2764 c2 = c1; 2765 } else if (is_dominator(c2, c1)) { 2766 c1 = c2; 2767 } else if (parent_c1->is_Proj() && parent_c1->in(0)->is_If() && 2768 parent_c2->is_Proj() && parent_c1->in(0) == parent_c2->in(0)) { 2769 // Both branches have the same expensive node so move it up 2770 // before the if. 2771 c1 = c2 = idom(parent_c1->in(0)); 2772 } 2773 // Do the actual moves 2774 if (n1->in(0) != c1) { 2775 _igvn.hash_delete(n1); 2776 n1->set_req(0, c1); 2777 _igvn.hash_insert(n1); 2778 _igvn._worklist.push(n1); 2779 progress = true; 2780 } 2781 if (n2->in(0) != c2) { 2782 _igvn.hash_delete(n2); 2783 n2->set_req(0, c2); 2784 _igvn.hash_insert(n2); 2785 _igvn._worklist.push(n2); 2786 progress = true; 2787 } 2788 } 2789 } 2790 } 2791 2792 return progress; 2793 } 2794 2795 2796 //============================================================================= 2797 //----------------------------build_and_optimize------------------------------- 2798 // Create a PhaseLoop. Build the ideal Loop tree. Map each Ideal Node to 2799 // its corresponding LoopNode. If 'optimize' is true, do some loop cleanups. 2800 void PhaseIdealLoop::build_and_optimize(LoopOptsMode mode) { 2801 bool do_split_ifs = (mode == LoopOptsDefault); 2802 bool skip_loop_opts = (mode == LoopOptsNone); 2803 2804 int old_progress = C->major_progress(); 2805 uint orig_worklist_size = _igvn._worklist.size(); 2806 2807 // Reset major-progress flag for the driver's heuristics 2808 C->clear_major_progress(); 2809 2810 #ifndef PRODUCT 2811 // Capture for later assert 2812 uint unique = C->unique(); 2813 _loop_invokes++; 2814 _loop_work += unique; 2815 #endif 2816 2817 // True if the method has at least 1 irreducible loop 2818 _has_irreducible_loops = false; 2819 2820 _created_loop_node = false; 2821 2822 Arena *a = Thread::current()->resource_area(); 2823 VectorSet visited(a); 2824 // Pre-grow the mapping from Nodes to IdealLoopTrees. 2825 _nodes.map(C->unique(), NULL); 2826 memset(_nodes.adr(), 0, wordSize * C->unique()); 2827 2828 // Pre-build the top-level outermost loop tree entry 2829 _ltree_root = new IdealLoopTree( this, C->root(), C->root() ); 2830 // Do not need a safepoint at the top level 2831 _ltree_root->_has_sfpt = 1; 2832 2833 // Initialize Dominators. 2834 // Checked in clone_loop_predicate() during beautify_loops(). 2835 _idom_size = 0; 2836 _idom = NULL; 2837 _dom_depth = NULL; 2838 _dom_stk = NULL; 2839 2840 // Empty pre-order array 2841 allocate_preorders(); 2842 2843 // Build a loop tree on the fly. Build a mapping from CFG nodes to 2844 // IdealLoopTree entries. Data nodes are NOT walked. 2845 build_loop_tree(); 2846 // Check for bailout, and return 2847 if (C->failing()) { 2848 return; 2849 } 2850 2851 // No loops after all 2852 if( !_ltree_root->_child && !_verify_only ) C->set_has_loops(false); 2853 2854 // There should always be an outer loop containing the Root and Return nodes. 2855 // If not, we have a degenerate empty program. Bail out in this case. 2856 if (!has_node(C->root())) { 2857 if (!_verify_only) { 2858 C->clear_major_progress(); 2859 C->record_method_not_compilable("empty program detected during loop optimization"); 2860 } 2861 return; 2862 } 2863 2864 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); 2865 // Nothing to do, so get out 2866 bool stop_early = !C->has_loops() && !skip_loop_opts && !do_split_ifs && !_verify_me && !_verify_only && 2867 !bs->is_gc_specific_loop_opts_pass(mode); 2868 bool do_expensive_nodes = C->should_optimize_expensive_nodes(_igvn); 2869 bool strip_mined_loops_expanded = bs->strip_mined_loops_expanded(mode); 2870 if (stop_early && !do_expensive_nodes) { 2871 _igvn.optimize(); // Cleanup NeverBranches 2872 return; 2873 } 2874 2875 // Set loop nesting depth 2876 _ltree_root->set_nest( 0 ); 2877 2878 // Split shared headers and insert loop landing pads. 2879 // Do not bother doing this on the Root loop of course. 2880 if( !_verify_me && !_verify_only && _ltree_root->_child ) { 2881 C->print_method(PHASE_BEFORE_BEAUTIFY_LOOPS, 3); 2882 if( _ltree_root->_child->beautify_loops( this ) ) { 2883 // Re-build loop tree! 2884 _ltree_root->_child = NULL; 2885 _nodes.clear(); 2886 reallocate_preorders(); 2887 build_loop_tree(); 2888 // Check for bailout, and return 2889 if (C->failing()) { 2890 return; 2891 } 2892 // Reset loop nesting depth 2893 _ltree_root->set_nest( 0 ); 2894 2895 C->print_method(PHASE_AFTER_BEAUTIFY_LOOPS, 3); 2896 } 2897 } 2898 2899 // Build Dominators for elision of NULL checks & loop finding. 2900 // Since nodes do not have a slot for immediate dominator, make 2901 // a persistent side array for that info indexed on node->_idx. 2902 _idom_size = C->unique(); 2903 _idom = NEW_RESOURCE_ARRAY( Node*, _idom_size ); 2904 _dom_depth = NEW_RESOURCE_ARRAY( uint, _idom_size ); 2905 _dom_stk = NULL; // Allocated on demand in recompute_dom_depth 2906 memset( _dom_depth, 0, _idom_size * sizeof(uint) ); 2907 2908 Dominators(); 2909 2910 if (!_verify_only) { 2911 // As a side effect, Dominators removed any unreachable CFG paths 2912 // into RegionNodes. It doesn't do this test against Root, so 2913 // we do it here. 2914 for( uint i = 1; i < C->root()->req(); i++ ) { 2915 if( !_nodes[C->root()->in(i)->_idx] ) { // Dead path into Root? 2916 _igvn.delete_input_of(C->root(), i); 2917 i--; // Rerun same iteration on compressed edges 2918 } 2919 } 2920 2921 // Given dominators, try to find inner loops with calls that must 2922 // always be executed (call dominates loop tail). These loops do 2923 // not need a separate safepoint. 2924 Node_List cisstack(a); 2925 _ltree_root->check_safepts(visited, cisstack); 2926 } 2927 2928 // Walk the DATA nodes and place into loops. Find earliest control 2929 // node. For CFG nodes, the _nodes array starts out and remains 2930 // holding the associated IdealLoopTree pointer. For DATA nodes, the 2931 // _nodes array holds the earliest legal controlling CFG node. 2932 2933 // Allocate stack with enough space to avoid frequent realloc 2934 int stack_size = (C->live_nodes() >> 1) + 16; // (live_nodes>>1)+16 from Java2D stats 2935 Node_Stack nstack( a, stack_size ); 2936 2937 visited.clear(); 2938 Node_List worklist(a); 2939 // Don't need C->root() on worklist since 2940 // it will be processed among C->top() inputs 2941 worklist.push(C->top()); 2942 visited.set(C->top()->_idx); // Set C->top() as visited now 2943 build_loop_early( visited, worklist, nstack ); 2944 2945 // Given early legal placement, try finding counted loops. This placement 2946 // is good enough to discover most loop invariants. 2947 if (!_verify_me && !_verify_only && !strip_mined_loops_expanded) { 2948 _ltree_root->counted_loop( this ); 2949 } 2950 2951 // Find latest loop placement. Find ideal loop placement. 2952 visited.clear(); 2953 init_dom_lca_tags(); 2954 // Need C->root() on worklist when processing outs 2955 worklist.push(C->root()); 2956 NOT_PRODUCT( C->verify_graph_edges(); ) 2957 worklist.push(C->top()); 2958 build_loop_late( visited, worklist, nstack ); 2959 2960 if (_verify_only) { 2961 C->restore_major_progress(old_progress); 2962 assert(C->unique() == unique, "verification mode made Nodes? ? ?"); 2963 assert(_igvn._worklist.size() == orig_worklist_size, "shouldn't push anything"); 2964 return; 2965 } 2966 2967 // clear out the dead code after build_loop_late 2968 while (_deadlist.size()) { 2969 _igvn.remove_globally_dead_node(_deadlist.pop()); 2970 } 2971 2972 if (stop_early) { 2973 assert(do_expensive_nodes, "why are we here?"); 2974 if (process_expensive_nodes()) { 2975 // If we made some progress when processing expensive nodes then 2976 // the IGVN may modify the graph in a way that will allow us to 2977 // make some more progress: we need to try processing expensive 2978 // nodes again. 2979 C->set_major_progress(); 2980 } 2981 _igvn.optimize(); 2982 return; 2983 } 2984 2985 // Some parser-inserted loop predicates could never be used by loop 2986 // predication or they were moved away from loop during some optimizations. 2987 // For example, peeling. Eliminate them before next loop optimizations. 2988 eliminate_useless_predicates(); 2989 2990 #ifndef PRODUCT 2991 C->verify_graph_edges(); 2992 if (_verify_me) { // Nested verify pass? 2993 // Check to see if the verify mode is broken 2994 assert(C->unique() == unique, "non-optimize mode made Nodes? ? ?"); 2995 return; 2996 } 2997 if (VerifyLoopOptimizations) verify(); 2998 if (TraceLoopOpts && C->has_loops()) { 2999 _ltree_root->dump(); 3000 } 3001 #endif 3002 3003 if (skip_loop_opts) { 3004 // restore major progress flag 3005 C->restore_major_progress(old_progress); 3006 3007 // Cleanup any modified bits 3008 _igvn.optimize(); 3009 3010 if (C->log() != NULL) { 3011 log_loop_tree(_ltree_root, _ltree_root, C->log()); 3012 } 3013 return; 3014 } 3015 3016 if (bs->optimize_loops(this, mode, visited, nstack, worklist)) { 3017 _igvn.optimize(); 3018 if (C->log() != NULL) { 3019 log_loop_tree(_ltree_root, _ltree_root, C->log()); 3020 } 3021 return; 3022 } 3023 3024 if (ReassociateInvariants) { 3025 // Reassociate invariants and prep for split_thru_phi 3026 for (LoopTreeIterator iter(_ltree_root); !iter.done(); iter.next()) { 3027 IdealLoopTree* lpt = iter.current(); 3028 bool is_counted = lpt->is_counted(); 3029 if (!is_counted || !lpt->is_innermost()) continue; 3030 3031 // check for vectorized loops, any reassociation of invariants was already done 3032 if (is_counted && lpt->_head->as_CountedLoop()->is_unroll_only()) { 3033 continue; 3034 } else { 3035 AutoNodeBudget node_budget(this); 3036 lpt->reassociate_invariants(this); 3037 } 3038 // Because RCE opportunities can be masked by split_thru_phi, 3039 // look for RCE candidates and inhibit split_thru_phi 3040 // on just their loop-phi's for this pass of loop opts 3041 if (SplitIfBlocks && do_split_ifs) { 3042 AutoNodeBudget node_budget(this, AutoNodeBudget::NO_BUDGET_CHECK); 3043 if (lpt->policy_range_check(this)) { 3044 lpt->_rce_candidate = 1; // = true 3045 } 3046 } 3047 } 3048 } 3049 3050 // Check for aggressive application of split-if and other transforms 3051 // that require basic-block info (like cloning through Phi's) 3052 if( SplitIfBlocks && do_split_ifs ) { 3053 visited.clear(); 3054 split_if_with_blocks( visited, nstack); 3055 NOT_PRODUCT( if( VerifyLoopOptimizations ) verify(); ); 3056 } 3057 3058 if (!C->major_progress() && do_expensive_nodes && process_expensive_nodes()) { 3059 C->set_major_progress(); 3060 } 3061 3062 // Perform loop predication before iteration splitting 3063 if (C->has_loops() && !C->major_progress() && (C->predicate_count() > 0)) { 3064 _ltree_root->_child->loop_predication(this); 3065 } 3066 3067 if (OptimizeFill && UseLoopPredicate && C->has_loops() && !C->major_progress()) { 3068 if (do_intrinsify_fill()) { 3069 C->set_major_progress(); 3070 } 3071 } 3072 3073 // Perform iteration-splitting on inner loops. Split iterations to avoid 3074 // range checks or one-shot null checks. 3075 3076 // If split-if's didn't hack the graph too bad (no CFG changes) 3077 // then do loop opts. 3078 if (C->has_loops() && !C->major_progress()) { 3079 memset( worklist.adr(), 0, worklist.Size()*sizeof(Node*) ); 3080 _ltree_root->_child->iteration_split( this, worklist ); 3081 // No verify after peeling! GCM has hoisted code out of the loop. 3082 // After peeling, the hoisted code could sink inside the peeled area. 3083 // The peeling code does not try to recompute the best location for 3084 // all the code before the peeled area, so the verify pass will always 3085 // complain about it. 3086 } 3087 // Do verify graph edges in any case 3088 NOT_PRODUCT( C->verify_graph_edges(); ); 3089 3090 if (!do_split_ifs) { 3091 // We saw major progress in Split-If to get here. We forced a 3092 // pass with unrolling and not split-if, however more split-if's 3093 // might make progress. If the unrolling didn't make progress 3094 // then the major-progress flag got cleared and we won't try 3095 // another round of Split-If. In particular the ever-common 3096 // instance-of/check-cast pattern requires at least 2 rounds of 3097 // Split-If to clear out. 3098 C->set_major_progress(); 3099 } 3100 3101 // Repeat loop optimizations if new loops were seen 3102 if (created_loop_node()) { 3103 C->set_major_progress(); 3104 } 3105 3106 // Keep loop predicates and perform optimizations with them 3107 // until no more loop optimizations could be done. 3108 // After that switch predicates off and do more loop optimizations. 3109 if (!C->major_progress() && (C->predicate_count() > 0)) { 3110 C->cleanup_loop_predicates(_igvn); 3111 if (TraceLoopOpts) { 3112 tty->print_cr("PredicatesOff"); 3113 } 3114 C->set_major_progress(); 3115 } 3116 3117 // Convert scalar to superword operations at the end of all loop opts. 3118 if (UseSuperWord && C->has_loops() && !C->major_progress()) { 3119 // SuperWord transform 3120 SuperWord sw(this); 3121 for (LoopTreeIterator iter(_ltree_root); !iter.done(); iter.next()) { 3122 IdealLoopTree* lpt = iter.current(); 3123 if (lpt->is_counted()) { 3124 CountedLoopNode *cl = lpt->_head->as_CountedLoop(); 3125 3126 if (PostLoopMultiversioning && cl->is_rce_post_loop() && !cl->is_vectorized_loop()) { 3127 // Check that the rce'd post loop is encountered first, multiversion after all 3128 // major main loop optimization are concluded 3129 if (!C->major_progress()) { 3130 IdealLoopTree *lpt_next = lpt->_next; 3131 if (lpt_next && lpt_next->is_counted()) { 3132 CountedLoopNode *cl = lpt_next->_head->as_CountedLoop(); 3133 has_range_checks(lpt_next); 3134 if (cl->is_post_loop() && cl->range_checks_present()) { 3135 if (!cl->is_multiversioned()) { 3136 if (multi_version_post_loops(lpt, lpt_next) == false) { 3137 // Cause the rce loop to be optimized away if we fail 3138 cl->mark_is_multiversioned(); 3139 cl->set_slp_max_unroll(0); 3140 poison_rce_post_loop(lpt); 3141 } 3142 } 3143 } 3144 } 3145 sw.transform_loop(lpt, true); 3146 } 3147 } else if (cl->is_main_loop()) { 3148 sw.transform_loop(lpt, true); 3149 } 3150 } 3151 } 3152 } 3153 3154 // Cleanup any modified bits 3155 _igvn.optimize(); 3156 3157 // disable assert until issue with split_flow_path is resolved (6742111) 3158 // assert(!_has_irreducible_loops || C->parsed_irreducible_loop() || C->is_osr_compilation(), 3159 // "shouldn't introduce irreducible loops"); 3160 3161 if (C->log() != NULL) { 3162 log_loop_tree(_ltree_root, _ltree_root, C->log()); 3163 } 3164 } 3165 3166 #ifndef PRODUCT 3167 //------------------------------print_statistics------------------------------- 3168 int PhaseIdealLoop::_loop_invokes=0;// Count of PhaseIdealLoop invokes 3169 int PhaseIdealLoop::_loop_work=0; // Sum of PhaseIdealLoop x unique 3170 void PhaseIdealLoop::print_statistics() { 3171 tty->print_cr("PhaseIdealLoop=%d, sum _unique=%d", _loop_invokes, _loop_work); 3172 } 3173 3174 //------------------------------verify----------------------------------------- 3175 // Build a verify-only PhaseIdealLoop, and see that it agrees with me. 3176 static int fail; // debug only, so its multi-thread dont care 3177 void PhaseIdealLoop::verify() const { 3178 int old_progress = C->major_progress(); 3179 ResourceMark rm; 3180 PhaseIdealLoop loop_verify( _igvn, this ); 3181 VectorSet visited(Thread::current()->resource_area()); 3182 3183 fail = 0; 3184 verify_compare( C->root(), &loop_verify, visited ); 3185 assert( fail == 0, "verify loops failed" ); 3186 // Verify loop structure is the same 3187 _ltree_root->verify_tree(loop_verify._ltree_root, NULL); 3188 // Reset major-progress. It was cleared by creating a verify version of 3189 // PhaseIdealLoop. 3190 C->restore_major_progress(old_progress); 3191 } 3192 3193 //------------------------------verify_compare--------------------------------- 3194 // Make sure me and the given PhaseIdealLoop agree on key data structures 3195 void PhaseIdealLoop::verify_compare( Node *n, const PhaseIdealLoop *loop_verify, VectorSet &visited ) const { 3196 if( !n ) return; 3197 if( visited.test_set( n->_idx ) ) return; 3198 if( !_nodes[n->_idx] ) { // Unreachable 3199 assert( !loop_verify->_nodes[n->_idx], "both should be unreachable" ); 3200 return; 3201 } 3202 3203 uint i; 3204 for( i = 0; i < n->req(); i++ ) 3205 verify_compare( n->in(i), loop_verify, visited ); 3206 3207 // Check the '_nodes' block/loop structure 3208 i = n->_idx; 3209 if( has_ctrl(n) ) { // We have control; verify has loop or ctrl 3210 if( _nodes[i] != loop_verify->_nodes[i] && 3211 get_ctrl_no_update(n) != loop_verify->get_ctrl_no_update(n) ) { 3212 tty->print("Mismatched control setting for: "); 3213 n->dump(); 3214 if( fail++ > 10 ) return; 3215 Node *c = get_ctrl_no_update(n); 3216 tty->print("We have it as: "); 3217 if( c->in(0) ) c->dump(); 3218 else tty->print_cr("N%d",c->_idx); 3219 tty->print("Verify thinks: "); 3220 if( loop_verify->has_ctrl(n) ) 3221 loop_verify->get_ctrl_no_update(n)->dump(); 3222 else 3223 loop_verify->get_loop_idx(n)->dump(); 3224 tty->cr(); 3225 } 3226 } else { // We have a loop 3227 IdealLoopTree *us = get_loop_idx(n); 3228 if( loop_verify->has_ctrl(n) ) { 3229 tty->print("Mismatched loop setting for: "); 3230 n->dump(); 3231 if( fail++ > 10 ) return; 3232 tty->print("We have it as: "); 3233 us->dump(); 3234 tty->print("Verify thinks: "); 3235 loop_verify->get_ctrl_no_update(n)->dump(); 3236 tty->cr(); 3237 } else if (!C->major_progress()) { 3238 // Loop selection can be messed up if we did a major progress 3239 // operation, like split-if. Do not verify in that case. 3240 IdealLoopTree *them = loop_verify->get_loop_idx(n); 3241 if( us->_head != them->_head || us->_tail != them->_tail ) { 3242 tty->print("Unequals loops for: "); 3243 n->dump(); 3244 if( fail++ > 10 ) return; 3245 tty->print("We have it as: "); 3246 us->dump(); 3247 tty->print("Verify thinks: "); 3248 them->dump(); 3249 tty->cr(); 3250 } 3251 } 3252 } 3253 3254 // Check for immediate dominators being equal 3255 if( i >= _idom_size ) { 3256 if( !n->is_CFG() ) return; 3257 tty->print("CFG Node with no idom: "); 3258 n->dump(); 3259 return; 3260 } 3261 if( !n->is_CFG() ) return; 3262 if( n == C->root() ) return; // No IDOM here 3263 3264 assert(n->_idx == i, "sanity"); 3265 Node *id = idom_no_update(n); 3266 if( id != loop_verify->idom_no_update(n) ) { 3267 tty->print("Unequals idoms for: "); 3268 n->dump(); 3269 if( fail++ > 10 ) return; 3270 tty->print("We have it as: "); 3271 id->dump(); 3272 tty->print("Verify thinks: "); 3273 loop_verify->idom_no_update(n)->dump(); 3274 tty->cr(); 3275 } 3276 3277 } 3278 3279 //------------------------------verify_tree------------------------------------ 3280 // Verify that tree structures match. Because the CFG can change, siblings 3281 // within the loop tree can be reordered. We attempt to deal with that by 3282 // reordering the verify's loop tree if possible. 3283 void IdealLoopTree::verify_tree(IdealLoopTree *loop, const IdealLoopTree *parent) const { 3284 assert( _parent == parent, "Badly formed loop tree" ); 3285 3286 // Siblings not in same order? Attempt to re-order. 3287 if( _head != loop->_head ) { 3288 // Find _next pointer to update 3289 IdealLoopTree **pp = &loop->_parent->_child; 3290 while( *pp != loop ) 3291 pp = &((*pp)->_next); 3292 // Find proper sibling to be next 3293 IdealLoopTree **nn = &loop->_next; 3294 while( (*nn) && (*nn)->_head != _head ) 3295 nn = &((*nn)->_next); 3296 3297 // Check for no match. 3298 if( !(*nn) ) { 3299 // Annoyingly, irreducible loops can pick different headers 3300 // after a major_progress operation, so the rest of the loop 3301 // tree cannot be matched. 3302 if (_irreducible && Compile::current()->major_progress()) return; 3303 assert( 0, "failed to match loop tree" ); 3304 } 3305 3306 // Move (*nn) to (*pp) 3307 IdealLoopTree *hit = *nn; 3308 *nn = hit->_next; 3309 hit->_next = loop; 3310 *pp = loop; 3311 loop = hit; 3312 // Now try again to verify 3313 } 3314 3315 assert( _head == loop->_head , "mismatched loop head" ); 3316 Node *tail = _tail; // Inline a non-updating version of 3317 while( !tail->in(0) ) // the 'tail()' call. 3318 tail = tail->in(1); 3319 assert( tail == loop->_tail, "mismatched loop tail" ); 3320 3321 // Counted loops that are guarded should be able to find their guards 3322 if( _head->is_CountedLoop() && _head->as_CountedLoop()->is_main_loop() ) { 3323 CountedLoopNode *cl = _head->as_CountedLoop(); 3324 Node *init = cl->init_trip(); 3325 Node *ctrl = cl->in(LoopNode::EntryControl); 3326 assert( ctrl->Opcode() == Op_IfTrue || ctrl->Opcode() == Op_IfFalse, "" ); 3327 Node *iff = ctrl->in(0); 3328 assert( iff->Opcode() == Op_If, "" ); 3329 Node *bol = iff->in(1); 3330 assert( bol->Opcode() == Op_Bool, "" ); 3331 Node *cmp = bol->in(1); 3332 assert( cmp->Opcode() == Op_CmpI, "" ); 3333 Node *add = cmp->in(1); 3334 Node *opaq; 3335 if( add->Opcode() == Op_Opaque1 ) { 3336 opaq = add; 3337 } else { 3338 assert( add->Opcode() == Op_AddI || add->Opcode() == Op_ConI , "" ); 3339 assert( add == init, "" ); 3340 opaq = cmp->in(2); 3341 } 3342 assert( opaq->Opcode() == Op_Opaque1, "" ); 3343 3344 } 3345 3346 if (_child != NULL) _child->verify_tree(loop->_child, this); 3347 if (_next != NULL) _next ->verify_tree(loop->_next, parent); 3348 // Innermost loops need to verify loop bodies, 3349 // but only if no 'major_progress' 3350 int fail = 0; 3351 if (!Compile::current()->major_progress() && _child == NULL) { 3352 for( uint i = 0; i < _body.size(); i++ ) { 3353 Node *n = _body.at(i); 3354 if (n->outcnt() == 0) continue; // Ignore dead 3355 uint j; 3356 for( j = 0; j < loop->_body.size(); j++ ) 3357 if( loop->_body.at(j) == n ) 3358 break; 3359 if( j == loop->_body.size() ) { // Not found in loop body 3360 // Last ditch effort to avoid assertion: Its possible that we 3361 // have some users (so outcnt not zero) but are still dead. 3362 // Try to find from root. 3363 if (Compile::current()->root()->find(n->_idx)) { 3364 fail++; 3365 tty->print("We have that verify does not: "); 3366 n->dump(); 3367 } 3368 } 3369 } 3370 for( uint i2 = 0; i2 < loop->_body.size(); i2++ ) { 3371 Node *n = loop->_body.at(i2); 3372 if (n->outcnt() == 0) continue; // Ignore dead 3373 uint j; 3374 for( j = 0; j < _body.size(); j++ ) 3375 if( _body.at(j) == n ) 3376 break; 3377 if( j == _body.size() ) { // Not found in loop body 3378 // Last ditch effort to avoid assertion: Its possible that we 3379 // have some users (so outcnt not zero) but are still dead. 3380 // Try to find from root. 3381 if (Compile::current()->root()->find(n->_idx)) { 3382 fail++; 3383 tty->print("Verify has that we do not: "); 3384 n->dump(); 3385 } 3386 } 3387 } 3388 assert( !fail, "loop body mismatch" ); 3389 } 3390 } 3391 3392 #endif 3393 3394 //------------------------------set_idom--------------------------------------- 3395 void PhaseIdealLoop::set_idom(Node* d, Node* n, uint dom_depth) { 3396 uint idx = d->_idx; 3397 if (idx >= _idom_size) { 3398 uint newsize = next_power_of_2(idx); 3399 _idom = REALLOC_RESOURCE_ARRAY( Node*, _idom,_idom_size,newsize); 3400 _dom_depth = REALLOC_RESOURCE_ARRAY( uint, _dom_depth,_idom_size,newsize); 3401 memset( _dom_depth + _idom_size, 0, (newsize - _idom_size) * sizeof(uint) ); 3402 _idom_size = newsize; 3403 } 3404 _idom[idx] = n; 3405 _dom_depth[idx] = dom_depth; 3406 } 3407 3408 //------------------------------recompute_dom_depth--------------------------------------- 3409 // The dominator tree is constructed with only parent pointers. 3410 // This recomputes the depth in the tree by first tagging all 3411 // nodes as "no depth yet" marker. The next pass then runs up 3412 // the dom tree from each node marked "no depth yet", and computes 3413 // the depth on the way back down. 3414 void PhaseIdealLoop::recompute_dom_depth() { 3415 uint no_depth_marker = C->unique(); 3416 uint i; 3417 // Initialize depth to "no depth yet" and realize all lazy updates 3418 for (i = 0; i < _idom_size; i++) { 3419 // Only indices with a _dom_depth has a Node* or NULL (otherwise uninitalized). 3420 if (_dom_depth[i] > 0 && _idom[i] != NULL) { 3421 _dom_depth[i] = no_depth_marker; 3422 3423 // heal _idom if it has a fwd mapping in _nodes 3424 if (_idom[i]->in(0) == NULL) { 3425 idom(i); 3426 } 3427 } 3428 } 3429 if (_dom_stk == NULL) { 3430 uint init_size = C->live_nodes() / 100; // Guess that 1/100 is a reasonable initial size. 3431 if (init_size < 10) init_size = 10; 3432 _dom_stk = new GrowableArray<uint>(init_size); 3433 } 3434 // Compute new depth for each node. 3435 for (i = 0; i < _idom_size; i++) { 3436 uint j = i; 3437 // Run up the dom tree to find a node with a depth 3438 while (_dom_depth[j] == no_depth_marker) { 3439 _dom_stk->push(j); 3440 j = _idom[j]->_idx; 3441 } 3442 // Compute the depth on the way back down this tree branch 3443 uint dd = _dom_depth[j] + 1; 3444 while (_dom_stk->length() > 0) { 3445 uint j = _dom_stk->pop(); 3446 _dom_depth[j] = dd; 3447 dd++; 3448 } 3449 } 3450 } 3451 3452 //------------------------------sort------------------------------------------- 3453 // Insert 'loop' into the existing loop tree. 'innermost' is a leaf of the 3454 // loop tree, not the root. 3455 IdealLoopTree *PhaseIdealLoop::sort( IdealLoopTree *loop, IdealLoopTree *innermost ) { 3456 if( !innermost ) return loop; // New innermost loop 3457 3458 int loop_preorder = get_preorder(loop->_head); // Cache pre-order number 3459 assert( loop_preorder, "not yet post-walked loop" ); 3460 IdealLoopTree **pp = &innermost; // Pointer to previous next-pointer 3461 IdealLoopTree *l = *pp; // Do I go before or after 'l'? 3462 3463 // Insert at start of list 3464 while( l ) { // Insertion sort based on pre-order 3465 if( l == loop ) return innermost; // Already on list! 3466 int l_preorder = get_preorder(l->_head); // Cache pre-order number 3467 assert( l_preorder, "not yet post-walked l" ); 3468 // Check header pre-order number to figure proper nesting 3469 if( loop_preorder > l_preorder ) 3470 break; // End of insertion 3471 // If headers tie (e.g., shared headers) check tail pre-order numbers. 3472 // Since I split shared headers, you'd think this could not happen. 3473 // BUT: I must first do the preorder numbering before I can discover I 3474 // have shared headers, so the split headers all get the same preorder 3475 // number as the RegionNode they split from. 3476 if( loop_preorder == l_preorder && 3477 get_preorder(loop->_tail) < get_preorder(l->_tail) ) 3478 break; // Also check for shared headers (same pre#) 3479 pp = &l->_parent; // Chain up list 3480 l = *pp; 3481 } 3482 // Link into list 3483 // Point predecessor to me 3484 *pp = loop; 3485 // Point me to successor 3486 IdealLoopTree *p = loop->_parent; 3487 loop->_parent = l; // Point me to successor 3488 if( p ) sort( p, innermost ); // Insert my parents into list as well 3489 return innermost; 3490 } 3491 3492 //------------------------------build_loop_tree-------------------------------- 3493 // I use a modified Vick/Tarjan algorithm. I need pre- and a post- visit 3494 // bits. The _nodes[] array is mapped by Node index and holds a NULL for 3495 // not-yet-pre-walked, pre-order # for pre-but-not-post-walked and holds the 3496 // tightest enclosing IdealLoopTree for post-walked. 3497 // 3498 // During my forward walk I do a short 1-layer lookahead to see if I can find 3499 // a loop backedge with that doesn't have any work on the backedge. This 3500 // helps me construct nested loops with shared headers better. 3501 // 3502 // Once I've done the forward recursion, I do the post-work. For each child 3503 // I check to see if there is a backedge. Backedges define a loop! I 3504 // insert an IdealLoopTree at the target of the backedge. 3505 // 3506 // During the post-work I also check to see if I have several children 3507 // belonging to different loops. If so, then this Node is a decision point 3508 // where control flow can choose to change loop nests. It is at this 3509 // decision point where I can figure out how loops are nested. At this 3510 // time I can properly order the different loop nests from my children. 3511 // Note that there may not be any backedges at the decision point! 3512 // 3513 // Since the decision point can be far removed from the backedges, I can't 3514 // order my loops at the time I discover them. Thus at the decision point 3515 // I need to inspect loop header pre-order numbers to properly nest my 3516 // loops. This means I need to sort my childrens' loops by pre-order. 3517 // The sort is of size number-of-control-children, which generally limits 3518 // it to size 2 (i.e., I just choose between my 2 target loops). 3519 void PhaseIdealLoop::build_loop_tree() { 3520 // Allocate stack of size C->live_nodes()/2 to avoid frequent realloc 3521 GrowableArray <Node *> bltstack(C->live_nodes() >> 1); 3522 Node *n = C->root(); 3523 bltstack.push(n); 3524 int pre_order = 1; 3525 int stack_size; 3526 3527 while ( ( stack_size = bltstack.length() ) != 0 ) { 3528 n = bltstack.top(); // Leave node on stack 3529 if ( !is_visited(n) ) { 3530 // ---- Pre-pass Work ---- 3531 // Pre-walked but not post-walked nodes need a pre_order number. 3532 3533 set_preorder_visited( n, pre_order ); // set as visited 3534 3535 // ---- Scan over children ---- 3536 // Scan first over control projections that lead to loop headers. 3537 // This helps us find inner-to-outer loops with shared headers better. 3538 3539 // Scan children's children for loop headers. 3540 for ( int i = n->outcnt() - 1; i >= 0; --i ) { 3541 Node* m = n->raw_out(i); // Child 3542 if( m->is_CFG() && !is_visited(m) ) { // Only for CFG children 3543 // Scan over children's children to find loop 3544 for (DUIterator_Fast jmax, j = m->fast_outs(jmax); j < jmax; j++) { 3545 Node* l = m->fast_out(j); 3546 if( is_visited(l) && // Been visited? 3547 !is_postvisited(l) && // But not post-visited 3548 get_preorder(l) < pre_order ) { // And smaller pre-order 3549 // Found! Scan the DFS down this path before doing other paths 3550 bltstack.push(m); 3551 break; 3552 } 3553 } 3554 } 3555 } 3556 pre_order++; 3557 } 3558 else if ( !is_postvisited(n) ) { 3559 // Note: build_loop_tree_impl() adds out edges on rare occasions, 3560 // such as com.sun.rsasign.am::a. 3561 // For non-recursive version, first, process current children. 3562 // On next iteration, check if additional children were added. 3563 for ( int k = n->outcnt() - 1; k >= 0; --k ) { 3564 Node* u = n->raw_out(k); 3565 if ( u->is_CFG() && !is_visited(u) ) { 3566 bltstack.push(u); 3567 } 3568 } 3569 if ( bltstack.length() == stack_size ) { 3570 // There were no additional children, post visit node now 3571 (void)bltstack.pop(); // Remove node from stack 3572 pre_order = build_loop_tree_impl( n, pre_order ); 3573 // Check for bailout 3574 if (C->failing()) { 3575 return; 3576 } 3577 // Check to grow _preorders[] array for the case when 3578 // build_loop_tree_impl() adds new nodes. 3579 check_grow_preorders(); 3580 } 3581 } 3582 else { 3583 (void)bltstack.pop(); // Remove post-visited node from stack 3584 } 3585 } 3586 } 3587 3588 //------------------------------build_loop_tree_impl--------------------------- 3589 int PhaseIdealLoop::build_loop_tree_impl( Node *n, int pre_order ) { 3590 // ---- Post-pass Work ---- 3591 // Pre-walked but not post-walked nodes need a pre_order number. 3592 3593 // Tightest enclosing loop for this Node 3594 IdealLoopTree *innermost = NULL; 3595 3596 // For all children, see if any edge is a backedge. If so, make a loop 3597 // for it. Then find the tightest enclosing loop for the self Node. 3598 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 3599 Node* m = n->fast_out(i); // Child 3600 if( n == m ) continue; // Ignore control self-cycles 3601 if( !m->is_CFG() ) continue;// Ignore non-CFG edges 3602 3603 IdealLoopTree *l; // Child's loop 3604 if( !is_postvisited(m) ) { // Child visited but not post-visited? 3605 // Found a backedge 3606 assert( get_preorder(m) < pre_order, "should be backedge" ); 3607 // Check for the RootNode, which is already a LoopNode and is allowed 3608 // to have multiple "backedges". 3609 if( m == C->root()) { // Found the root? 3610 l = _ltree_root; // Root is the outermost LoopNode 3611 } else { // Else found a nested loop 3612 // Insert a LoopNode to mark this loop. 3613 l = new IdealLoopTree(this, m, n); 3614 } // End of Else found a nested loop 3615 if( !has_loop(m) ) // If 'm' does not already have a loop set 3616 set_loop(m, l); // Set loop header to loop now 3617 3618 } else { // Else not a nested loop 3619 if( !_nodes[m->_idx] ) continue; // Dead code has no loop 3620 l = get_loop(m); // Get previously determined loop 3621 // If successor is header of a loop (nest), move up-loop till it 3622 // is a member of some outer enclosing loop. Since there are no 3623 // shared headers (I've split them already) I only need to go up 3624 // at most 1 level. 3625 while( l && l->_head == m ) // Successor heads loop? 3626 l = l->_parent; // Move up 1 for me 3627 // If this loop is not properly parented, then this loop 3628 // has no exit path out, i.e. its an infinite loop. 3629 if( !l ) { 3630 // Make loop "reachable" from root so the CFG is reachable. Basically 3631 // insert a bogus loop exit that is never taken. 'm', the loop head, 3632 // points to 'n', one (of possibly many) fall-in paths. There may be 3633 // many backedges as well. 3634 3635 // Here I set the loop to be the root loop. I could have, after 3636 // inserting a bogus loop exit, restarted the recursion and found my 3637 // new loop exit. This would make the infinite loop a first-class 3638 // loop and it would then get properly optimized. What's the use of 3639 // optimizing an infinite loop? 3640 l = _ltree_root; // Oops, found infinite loop 3641 3642 if (!_verify_only) { 3643 // Insert the NeverBranch between 'm' and it's control user. 3644 NeverBranchNode *iff = new NeverBranchNode( m ); 3645 _igvn.register_new_node_with_optimizer(iff); 3646 set_loop(iff, l); 3647 Node *if_t = new CProjNode( iff, 0 ); 3648 _igvn.register_new_node_with_optimizer(if_t); 3649 set_loop(if_t, l); 3650 3651 Node* cfg = NULL; // Find the One True Control User of m 3652 for (DUIterator_Fast jmax, j = m->fast_outs(jmax); j < jmax; j++) { 3653 Node* x = m->fast_out(j); 3654 if (x->is_CFG() && x != m && x != iff) 3655 { cfg = x; break; } 3656 } 3657 assert(cfg != NULL, "must find the control user of m"); 3658 uint k = 0; // Probably cfg->in(0) 3659 while( cfg->in(k) != m ) k++; // But check incase cfg is a Region 3660 cfg->set_req( k, if_t ); // Now point to NeverBranch 3661 _igvn._worklist.push(cfg); 3662 3663 // Now create the never-taken loop exit 3664 Node *if_f = new CProjNode( iff, 1 ); 3665 _igvn.register_new_node_with_optimizer(if_f); 3666 set_loop(if_f, l); 3667 // Find frame ptr for Halt. Relies on the optimizer 3668 // V-N'ing. Easier and quicker than searching through 3669 // the program structure. 3670 Node *frame = new ParmNode( C->start(), TypeFunc::FramePtr ); 3671 _igvn.register_new_node_with_optimizer(frame); 3672 // Halt & Catch Fire 3673 Node* halt = new HaltNode(if_f, frame, "never-taken loop exit reached"); 3674 _igvn.register_new_node_with_optimizer(halt); 3675 set_loop(halt, l); 3676 C->root()->add_req(halt); 3677 } 3678 set_loop(C->root(), _ltree_root); 3679 } 3680 } 3681 // Weeny check for irreducible. This child was already visited (this 3682 // IS the post-work phase). Is this child's loop header post-visited 3683 // as well? If so, then I found another entry into the loop. 3684 if (!_verify_only) { 3685 while( is_postvisited(l->_head) ) { 3686 // found irreducible 3687 l->_irreducible = 1; // = true 3688 l = l->_parent; 3689 _has_irreducible_loops = true; 3690 // Check for bad CFG here to prevent crash, and bailout of compile 3691 if (l == NULL) { 3692 C->record_method_not_compilable("unhandled CFG detected during loop optimization"); 3693 return pre_order; 3694 } 3695 } 3696 C->set_has_irreducible_loop(_has_irreducible_loops); 3697 } 3698 3699 // This Node might be a decision point for loops. It is only if 3700 // it's children belong to several different loops. The sort call 3701 // does a trivial amount of work if there is only 1 child or all 3702 // children belong to the same loop. If however, the children 3703 // belong to different loops, the sort call will properly set the 3704 // _parent pointers to show how the loops nest. 3705 // 3706 // In any case, it returns the tightest enclosing loop. 3707 innermost = sort( l, innermost ); 3708 } 3709 3710 // Def-use info will have some dead stuff; dead stuff will have no 3711 // loop decided on. 3712 3713 // Am I a loop header? If so fix up my parent's child and next ptrs. 3714 if( innermost && innermost->_head == n ) { 3715 assert( get_loop(n) == innermost, "" ); 3716 IdealLoopTree *p = innermost->_parent; 3717 IdealLoopTree *l = innermost; 3718 while( p && l->_head == n ) { 3719 l->_next = p->_child; // Put self on parents 'next child' 3720 p->_child = l; // Make self as first child of parent 3721 l = p; // Now walk up the parent chain 3722 p = l->_parent; 3723 } 3724 } else { 3725 // Note that it is possible for a LoopNode to reach here, if the 3726 // backedge has been made unreachable (hence the LoopNode no longer 3727 // denotes a Loop, and will eventually be removed). 3728 3729 // Record tightest enclosing loop for self. Mark as post-visited. 3730 set_loop(n, innermost); 3731 // Also record has_call flag early on 3732 if( innermost ) { 3733 if( n->is_Call() && !n->is_CallLeaf() && !n->is_macro() ) { 3734 // Do not count uncommon calls 3735 if( !n->is_CallStaticJava() || !n->as_CallStaticJava()->_name ) { 3736 Node *iff = n->in(0)->in(0); 3737 // No any calls for vectorized loops. 3738 if( UseSuperWord || !iff->is_If() || 3739 (n->in(0)->Opcode() == Op_IfFalse && 3740 (1.0 - iff->as_If()->_prob) >= 0.01) || 3741 (iff->as_If()->_prob >= 0.01) ) 3742 innermost->_has_call = 1; 3743 } 3744 } else if( n->is_Allocate() && n->as_Allocate()->_is_scalar_replaceable ) { 3745 // Disable loop optimizations if the loop has a scalar replaceable 3746 // allocation. This disabling may cause a potential performance lost 3747 // if the allocation is not eliminated for some reason. 3748 innermost->_allow_optimizations = false; 3749 innermost->_has_call = 1; // = true 3750 } else if (n->Opcode() == Op_SafePoint) { 3751 // Record all safepoints in this loop. 3752 if (innermost->_safepts == NULL) innermost->_safepts = new Node_List(); 3753 innermost->_safepts->push(n); 3754 } 3755 } 3756 } 3757 3758 // Flag as post-visited now 3759 set_postvisited(n); 3760 return pre_order; 3761 } 3762 3763 3764 //------------------------------build_loop_early------------------------------- 3765 // Put Data nodes into some loop nest, by setting the _nodes[]->loop mapping. 3766 // First pass computes the earliest controlling node possible. This is the 3767 // controlling input with the deepest dominating depth. 3768 void PhaseIdealLoop::build_loop_early( VectorSet &visited, Node_List &worklist, Node_Stack &nstack ) { 3769 while (worklist.size() != 0) { 3770 // Use local variables nstack_top_n & nstack_top_i to cache values 3771 // on nstack's top. 3772 Node *nstack_top_n = worklist.pop(); 3773 uint nstack_top_i = 0; 3774 //while_nstack_nonempty: 3775 while (true) { 3776 // Get parent node and next input's index from stack's top. 3777 Node *n = nstack_top_n; 3778 uint i = nstack_top_i; 3779 uint cnt = n->req(); // Count of inputs 3780 if (i == 0) { // Pre-process the node. 3781 if( has_node(n) && // Have either loop or control already? 3782 !has_ctrl(n) ) { // Have loop picked out already? 3783 // During "merge_many_backedges" we fold up several nested loops 3784 // into a single loop. This makes the members of the original 3785 // loop bodies pointing to dead loops; they need to move up 3786 // to the new UNION'd larger loop. I set the _head field of these 3787 // dead loops to NULL and the _parent field points to the owning 3788 // loop. Shades of UNION-FIND algorithm. 3789 IdealLoopTree *ilt; 3790 while( !(ilt = get_loop(n))->_head ) { 3791 // Normally I would use a set_loop here. But in this one special 3792 // case, it is legal (and expected) to change what loop a Node 3793 // belongs to. 3794 _nodes.map(n->_idx, (Node*)(ilt->_parent) ); 3795 } 3796 // Remove safepoints ONLY if I've already seen I don't need one. 3797 // (the old code here would yank a 2nd safepoint after seeing a 3798 // first one, even though the 1st did not dominate in the loop body 3799 // and thus could be avoided indefinitely) 3800 if( !_verify_only && !_verify_me && ilt->_has_sfpt && n->Opcode() == Op_SafePoint && 3801 is_deleteable_safept(n)) { 3802 Node *in = n->in(TypeFunc::Control); 3803 lazy_replace(n,in); // Pull safepoint now 3804 if (ilt->_safepts != NULL) { 3805 ilt->_safepts->yank(n); 3806 } 3807 // Carry on with the recursion "as if" we are walking 3808 // only the control input 3809 if( !visited.test_set( in->_idx ) ) { 3810 worklist.push(in); // Visit this guy later, using worklist 3811 } 3812 // Get next node from nstack: 3813 // - skip n's inputs processing by setting i > cnt; 3814 // - we also will not call set_early_ctrl(n) since 3815 // has_node(n) == true (see the condition above). 3816 i = cnt + 1; 3817 } 3818 } 3819 } // if (i == 0) 3820 3821 // Visit all inputs 3822 bool done = true; // Assume all n's inputs will be processed 3823 while (i < cnt) { 3824 Node *in = n->in(i); 3825 ++i; 3826 if (in == NULL) continue; 3827 if (in->pinned() && !in->is_CFG()) 3828 set_ctrl(in, in->in(0)); 3829 int is_visited = visited.test_set( in->_idx ); 3830 if (!has_node(in)) { // No controlling input yet? 3831 assert( !in->is_CFG(), "CFG Node with no controlling input?" ); 3832 assert( !is_visited, "visit only once" ); 3833 nstack.push(n, i); // Save parent node and next input's index. 3834 nstack_top_n = in; // Process current input now. 3835 nstack_top_i = 0; 3836 done = false; // Not all n's inputs processed. 3837 break; // continue while_nstack_nonempty; 3838 } else if (!is_visited) { 3839 // This guy has a location picked out for him, but has not yet 3840 // been visited. Happens to all CFG nodes, for instance. 3841 // Visit him using the worklist instead of recursion, to break 3842 // cycles. Since he has a location already we do not need to 3843 // find his location before proceeding with the current Node. 3844 worklist.push(in); // Visit this guy later, using worklist 3845 } 3846 } 3847 if (done) { 3848 // All of n's inputs have been processed, complete post-processing. 3849 3850 // Compute earliest point this Node can go. 3851 // CFG, Phi, pinned nodes already know their controlling input. 3852 if (!has_node(n)) { 3853 // Record earliest legal location 3854 set_early_ctrl( n ); 3855 } 3856 if (nstack.is_empty()) { 3857 // Finished all nodes on stack. 3858 // Process next node on the worklist. 3859 break; 3860 } 3861 // Get saved parent node and next input's index. 3862 nstack_top_n = nstack.node(); 3863 nstack_top_i = nstack.index(); 3864 nstack.pop(); 3865 } 3866 } // while (true) 3867 } 3868 } 3869 3870 //------------------------------dom_lca_internal-------------------------------- 3871 // Pair-wise LCA 3872 Node *PhaseIdealLoop::dom_lca_internal( Node *n1, Node *n2 ) const { 3873 if( !n1 ) return n2; // Handle NULL original LCA 3874 assert( n1->is_CFG(), "" ); 3875 assert( n2->is_CFG(), "" ); 3876 // find LCA of all uses 3877 uint d1 = dom_depth(n1); 3878 uint d2 = dom_depth(n2); 3879 while (n1 != n2) { 3880 if (d1 > d2) { 3881 n1 = idom(n1); 3882 d1 = dom_depth(n1); 3883 } else if (d1 < d2) { 3884 n2 = idom(n2); 3885 d2 = dom_depth(n2); 3886 } else { 3887 // Here d1 == d2. Due to edits of the dominator-tree, sections 3888 // of the tree might have the same depth. These sections have 3889 // to be searched more carefully. 3890 3891 // Scan up all the n1's with equal depth, looking for n2. 3892 Node *t1 = idom(n1); 3893 while (dom_depth(t1) == d1) { 3894 if (t1 == n2) return n2; 3895 t1 = idom(t1); 3896 } 3897 // Scan up all the n2's with equal depth, looking for n1. 3898 Node *t2 = idom(n2); 3899 while (dom_depth(t2) == d2) { 3900 if (t2 == n1) return n1; 3901 t2 = idom(t2); 3902 } 3903 // Move up to a new dominator-depth value as well as up the dom-tree. 3904 n1 = t1; 3905 n2 = t2; 3906 d1 = dom_depth(n1); 3907 d2 = dom_depth(n2); 3908 } 3909 } 3910 return n1; 3911 } 3912 3913 //------------------------------compute_idom----------------------------------- 3914 // Locally compute IDOM using dom_lca call. Correct only if the incoming 3915 // IDOMs are correct. 3916 Node *PhaseIdealLoop::compute_idom( Node *region ) const { 3917 assert( region->is_Region(), "" ); 3918 Node *LCA = NULL; 3919 for( uint i = 1; i < region->req(); i++ ) { 3920 if( region->in(i) != C->top() ) 3921 LCA = dom_lca( LCA, region->in(i) ); 3922 } 3923 return LCA; 3924 } 3925 3926 bool PhaseIdealLoop::verify_dominance(Node* n, Node* use, Node* LCA, Node* early) { 3927 bool had_error = false; 3928 #ifdef ASSERT 3929 if (early != C->root()) { 3930 // Make sure that there's a dominance path from LCA to early 3931 Node* d = LCA; 3932 while (d != early) { 3933 if (d == C->root()) { 3934 dump_bad_graph("Bad graph detected in compute_lca_of_uses", n, early, LCA); 3935 tty->print_cr("*** Use %d isn't dominated by def %d ***", use->_idx, n->_idx); 3936 had_error = true; 3937 break; 3938 } 3939 d = idom(d); 3940 } 3941 } 3942 #endif 3943 return had_error; 3944 } 3945 3946 3947 Node* PhaseIdealLoop::compute_lca_of_uses(Node* n, Node* early, bool verify) { 3948 // Compute LCA over list of uses 3949 bool had_error = false; 3950 Node *LCA = NULL; 3951 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax && LCA != early; i++) { 3952 Node* c = n->fast_out(i); 3953 if (_nodes[c->_idx] == NULL) 3954 continue; // Skip the occasional dead node 3955 if( c->is_Phi() ) { // For Phis, we must land above on the path 3956 for( uint j=1; j<c->req(); j++ ) {// For all inputs 3957 if( c->in(j) == n ) { // Found matching input? 3958 Node *use = c->in(0)->in(j); 3959 if (_verify_only && use->is_top()) continue; 3960 LCA = dom_lca_for_get_late_ctrl( LCA, use, n ); 3961 if (verify) had_error = verify_dominance(n, use, LCA, early) || had_error; 3962 } 3963 } 3964 } else { 3965 // For CFG data-users, use is in the block just prior 3966 Node *use = has_ctrl(c) ? get_ctrl(c) : c->in(0); 3967 LCA = dom_lca_for_get_late_ctrl( LCA, use, n ); 3968 if (verify) had_error = verify_dominance(n, use, LCA, early) || had_error; 3969 } 3970 } 3971 assert(!had_error, "bad dominance"); 3972 return LCA; 3973 } 3974 3975 // Check the shape of the graph at the loop entry. In some cases, 3976 // the shape of the graph does not match the shape outlined below. 3977 // That is caused by the Opaque1 node "protecting" the shape of 3978 // the graph being removed by, for example, the IGVN performed 3979 // in PhaseIdealLoop::build_and_optimize(). 3980 // 3981 // After the Opaque1 node has been removed, optimizations (e.g., split-if, 3982 // loop unswitching, and IGVN, or a combination of them) can freely change 3983 // the graph's shape. As a result, the graph shape outlined below cannot 3984 // be guaranteed anymore. 3985 bool PhaseIdealLoop::is_canonical_loop_entry(CountedLoopNode* cl) { 3986 if (!cl->is_main_loop() && !cl->is_post_loop()) { 3987 return false; 3988 } 3989 Node* ctrl = cl->skip_predicates(); 3990 3991 if (ctrl == NULL || (!ctrl->is_IfTrue() && !ctrl->is_IfFalse())) { 3992 return false; 3993 } 3994 Node* iffm = ctrl->in(0); 3995 if (iffm == NULL || !iffm->is_If()) { 3996 return false; 3997 } 3998 Node* bolzm = iffm->in(1); 3999 if (bolzm == NULL || !bolzm->is_Bool()) { 4000 return false; 4001 } 4002 Node* cmpzm = bolzm->in(1); 4003 if (cmpzm == NULL || !cmpzm->is_Cmp()) { 4004 return false; 4005 } 4006 // compares can get conditionally flipped 4007 bool found_opaque = false; 4008 for (uint i = 1; i < cmpzm->req(); i++) { 4009 Node* opnd = cmpzm->in(i); 4010 if (opnd && opnd->Opcode() == Op_Opaque1) { 4011 found_opaque = true; 4012 break; 4013 } 4014 } 4015 if (!found_opaque) { 4016 return false; 4017 } 4018 return true; 4019 } 4020 4021 //------------------------------get_late_ctrl---------------------------------- 4022 // Compute latest legal control. 4023 Node *PhaseIdealLoop::get_late_ctrl( Node *n, Node *early ) { 4024 assert(early != NULL, "early control should not be NULL"); 4025 4026 Node* LCA = compute_lca_of_uses(n, early); 4027 #ifdef ASSERT 4028 if (LCA == C->root() && LCA != early) { 4029 // def doesn't dominate uses so print some useful debugging output 4030 compute_lca_of_uses(n, early, true); 4031 } 4032 #endif 4033 4034 // if this is a load, check for anti-dependent stores 4035 // We use a conservative algorithm to identify potential interfering 4036 // instructions and for rescheduling the load. The users of the memory 4037 // input of this load are examined. Any use which is not a load and is 4038 // dominated by early is considered a potentially interfering store. 4039 // This can produce false positives. 4040 if (n->is_Load() && LCA != early) { 4041 int load_alias_idx = C->get_alias_index(n->adr_type()); 4042 if (C->alias_type(load_alias_idx)->is_rewritable()) { 4043 4044 Node_List worklist; 4045 4046 Node *mem = n->in(MemNode::Memory); 4047 for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) { 4048 Node* s = mem->fast_out(i); 4049 worklist.push(s); 4050 } 4051 while(worklist.size() != 0 && LCA != early) { 4052 Node* s = worklist.pop(); 4053 if (s->is_Load() || s->Opcode() == Op_SafePoint || 4054 (s->is_CallStaticJava() && s->as_CallStaticJava()->uncommon_trap_request() != 0)) { 4055 continue; 4056 } else if (s->is_MergeMem()) { 4057 for (DUIterator_Fast imax, i = s->fast_outs(imax); i < imax; i++) { 4058 Node* s1 = s->fast_out(i); 4059 worklist.push(s1); 4060 } 4061 } else { 4062 Node *sctrl = has_ctrl(s) ? get_ctrl(s) : s->in(0); 4063 assert(sctrl != NULL || s->outcnt() == 0, "must have control"); 4064 if (sctrl != NULL && !sctrl->is_top() && C->can_alias(s->adr_type(), load_alias_idx) && is_dominator(early, sctrl)) { 4065 LCA = dom_lca_for_get_late_ctrl(LCA, sctrl, n); 4066 } 4067 } 4068 } 4069 } 4070 } 4071 4072 assert(LCA == find_non_split_ctrl(LCA), "unexpected late control"); 4073 return LCA; 4074 } 4075 4076 // true if CFG node d dominates CFG node n 4077 bool PhaseIdealLoop::is_dominator(Node *d, Node *n) { 4078 if (d == n) 4079 return true; 4080 assert(d->is_CFG() && n->is_CFG(), "must have CFG nodes"); 4081 uint dd = dom_depth(d); 4082 while (dom_depth(n) >= dd) { 4083 if (n == d) 4084 return true; 4085 n = idom(n); 4086 } 4087 return false; 4088 } 4089 4090 //------------------------------dom_lca_for_get_late_ctrl_internal------------- 4091 // Pair-wise LCA with tags. 4092 // Tag each index with the node 'tag' currently being processed 4093 // before advancing up the dominator chain using idom(). 4094 // Later calls that find a match to 'tag' know that this path has already 4095 // been considered in the current LCA (which is input 'n1' by convention). 4096 // Since get_late_ctrl() is only called once for each node, the tag array 4097 // does not need to be cleared between calls to get_late_ctrl(). 4098 // Algorithm trades a larger constant factor for better asymptotic behavior 4099 // 4100 Node *PhaseIdealLoop::dom_lca_for_get_late_ctrl_internal( Node *n1, Node *n2, Node *tag ) { 4101 uint d1 = dom_depth(n1); 4102 uint d2 = dom_depth(n2); 4103 4104 do { 4105 if (d1 > d2) { 4106 // current lca is deeper than n2 4107 _dom_lca_tags.map(n1->_idx, tag); 4108 n1 = idom(n1); 4109 d1 = dom_depth(n1); 4110 } else if (d1 < d2) { 4111 // n2 is deeper than current lca 4112 Node *memo = _dom_lca_tags[n2->_idx]; 4113 if( memo == tag ) { 4114 return n1; // Return the current LCA 4115 } 4116 _dom_lca_tags.map(n2->_idx, tag); 4117 n2 = idom(n2); 4118 d2 = dom_depth(n2); 4119 } else { 4120 // Here d1 == d2. Due to edits of the dominator-tree, sections 4121 // of the tree might have the same depth. These sections have 4122 // to be searched more carefully. 4123 4124 // Scan up all the n1's with equal depth, looking for n2. 4125 _dom_lca_tags.map(n1->_idx, tag); 4126 Node *t1 = idom(n1); 4127 while (dom_depth(t1) == d1) { 4128 if (t1 == n2) return n2; 4129 _dom_lca_tags.map(t1->_idx, tag); 4130 t1 = idom(t1); 4131 } 4132 // Scan up all the n2's with equal depth, looking for n1. 4133 _dom_lca_tags.map(n2->_idx, tag); 4134 Node *t2 = idom(n2); 4135 while (dom_depth(t2) == d2) { 4136 if (t2 == n1) return n1; 4137 _dom_lca_tags.map(t2->_idx, tag); 4138 t2 = idom(t2); 4139 } 4140 // Move up to a new dominator-depth value as well as up the dom-tree. 4141 n1 = t1; 4142 n2 = t2; 4143 d1 = dom_depth(n1); 4144 d2 = dom_depth(n2); 4145 } 4146 } while (n1 != n2); 4147 return n1; 4148 } 4149 4150 //------------------------------init_dom_lca_tags------------------------------ 4151 // Tag could be a node's integer index, 32bits instead of 64bits in some cases 4152 // Intended use does not involve any growth for the array, so it could 4153 // be of fixed size. 4154 void PhaseIdealLoop::init_dom_lca_tags() { 4155 uint limit = C->unique() + 1; 4156 _dom_lca_tags.map( limit, NULL ); 4157 #ifdef ASSERT 4158 for( uint i = 0; i < limit; ++i ) { 4159 assert(_dom_lca_tags[i] == NULL, "Must be distinct from each node pointer"); 4160 } 4161 #endif // ASSERT 4162 } 4163 4164 //------------------------------clear_dom_lca_tags------------------------------ 4165 // Tag could be a node's integer index, 32bits instead of 64bits in some cases 4166 // Intended use does not involve any growth for the array, so it could 4167 // be of fixed size. 4168 void PhaseIdealLoop::clear_dom_lca_tags() { 4169 uint limit = C->unique() + 1; 4170 _dom_lca_tags.map( limit, NULL ); 4171 _dom_lca_tags.clear(); 4172 #ifdef ASSERT 4173 for( uint i = 0; i < limit; ++i ) { 4174 assert(_dom_lca_tags[i] == NULL, "Must be distinct from each node pointer"); 4175 } 4176 #endif // ASSERT 4177 } 4178 4179 //------------------------------build_loop_late-------------------------------- 4180 // Put Data nodes into some loop nest, by setting the _nodes[]->loop mapping. 4181 // Second pass finds latest legal placement, and ideal loop placement. 4182 void PhaseIdealLoop::build_loop_late( VectorSet &visited, Node_List &worklist, Node_Stack &nstack ) { 4183 while (worklist.size() != 0) { 4184 Node *n = worklist.pop(); 4185 // Only visit once 4186 if (visited.test_set(n->_idx)) continue; 4187 uint cnt = n->outcnt(); 4188 uint i = 0; 4189 while (true) { 4190 assert( _nodes[n->_idx], "no dead nodes" ); 4191 // Visit all children 4192 if (i < cnt) { 4193 Node* use = n->raw_out(i); 4194 ++i; 4195 // Check for dead uses. Aggressively prune such junk. It might be 4196 // dead in the global sense, but still have local uses so I cannot 4197 // easily call 'remove_dead_node'. 4198 if( _nodes[use->_idx] != NULL || use->is_top() ) { // Not dead? 4199 // Due to cycles, we might not hit the same fixed point in the verify 4200 // pass as we do in the regular pass. Instead, visit such phis as 4201 // simple uses of the loop head. 4202 if( use->in(0) && (use->is_CFG() || use->is_Phi()) ) { 4203 if( !visited.test(use->_idx) ) 4204 worklist.push(use); 4205 } else if( !visited.test_set(use->_idx) ) { 4206 nstack.push(n, i); // Save parent and next use's index. 4207 n = use; // Process all children of current use. 4208 cnt = use->outcnt(); 4209 i = 0; 4210 } 4211 } else { 4212 // Do not visit around the backedge of loops via data edges. 4213 // push dead code onto a worklist 4214 _deadlist.push(use); 4215 } 4216 } else { 4217 // All of n's children have been processed, complete post-processing. 4218 build_loop_late_post(n); 4219 if (nstack.is_empty()) { 4220 // Finished all nodes on stack. 4221 // Process next node on the worklist. 4222 break; 4223 } 4224 // Get saved parent node and next use's index. Visit the rest of uses. 4225 n = nstack.node(); 4226 cnt = n->outcnt(); 4227 i = nstack.index(); 4228 nstack.pop(); 4229 } 4230 } 4231 } 4232 } 4233 4234 // Verify that no data node is scheduled in the outer loop of a strip 4235 // mined loop. 4236 void PhaseIdealLoop::verify_strip_mined_scheduling(Node *n, Node* least) { 4237 #ifdef ASSERT 4238 if (get_loop(least)->_nest == 0) { 4239 return; 4240 } 4241 IdealLoopTree* loop = get_loop(least); 4242 Node* head = loop->_head; 4243 if (head->is_OuterStripMinedLoop() && 4244 // Verification can't be applied to fully built strip mined loops 4245 head->as_Loop()->outer_loop_end()->in(1)->find_int_con(-1) == 0) { 4246 Node* sfpt = head->as_Loop()->outer_safepoint(); 4247 ResourceMark rm; 4248 Unique_Node_List wq; 4249 wq.push(sfpt); 4250 for (uint i = 0; i < wq.size(); i++) { 4251 Node *m = wq.at(i); 4252 for (uint i = 1; i < m->req(); i++) { 4253 Node* nn = m->in(i); 4254 if (nn == n) { 4255 return; 4256 } 4257 if (nn != NULL && has_ctrl(nn) && get_loop(get_ctrl(nn)) == loop) { 4258 wq.push(nn); 4259 } 4260 } 4261 } 4262 ShouldNotReachHere(); 4263 } 4264 #endif 4265 } 4266 4267 4268 //------------------------------build_loop_late_post--------------------------- 4269 // Put Data nodes into some loop nest, by setting the _nodes[]->loop mapping. 4270 // Second pass finds latest legal placement, and ideal loop placement. 4271 void PhaseIdealLoop::build_loop_late_post(Node *n) { 4272 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); 4273 4274 if (bs->build_loop_late_post(this, n)) { 4275 return; 4276 } 4277 4278 build_loop_late_post_work(n, true); 4279 } 4280 4281 void PhaseIdealLoop::build_loop_late_post_work(Node *n, bool pinned) { 4282 4283 if (n->req() == 2 && (n->Opcode() == Op_ConvI2L || n->Opcode() == Op_CastII) && !C->major_progress() && !_verify_only) { 4284 _igvn._worklist.push(n); // Maybe we'll normalize it, if no more loops. 4285 } 4286 4287 #ifdef ASSERT 4288 if (_verify_only && !n->is_CFG()) { 4289 // Check def-use domination. 4290 compute_lca_of_uses(n, get_ctrl(n), true /* verify */); 4291 } 4292 #endif 4293 4294 // CFG and pinned nodes already handled 4295 if( n->in(0) ) { 4296 if( n->in(0)->is_top() ) return; // Dead? 4297 4298 // We'd like +VerifyLoopOptimizations to not believe that Mod's/Loads 4299 // _must_ be pinned (they have to observe their control edge of course). 4300 // Unlike Stores (which modify an unallocable resource, the memory 4301 // state), Mods/Loads can float around. So free them up. 4302 switch( n->Opcode() ) { 4303 case Op_DivI: 4304 case Op_DivF: 4305 case Op_DivD: 4306 case Op_ModI: 4307 case Op_ModF: 4308 case Op_ModD: 4309 case Op_LoadB: // Same with Loads; they can sink 4310 case Op_LoadUB: // during loop optimizations. 4311 case Op_LoadUS: 4312 case Op_LoadD: 4313 case Op_LoadF: 4314 case Op_LoadI: 4315 case Op_LoadKlass: 4316 case Op_LoadNKlass: 4317 case Op_LoadL: 4318 case Op_LoadS: 4319 case Op_LoadP: 4320 case Op_LoadN: 4321 case Op_LoadRange: 4322 case Op_LoadD_unaligned: 4323 case Op_LoadL_unaligned: 4324 case Op_StrComp: // Does a bunch of load-like effects 4325 case Op_StrEquals: 4326 case Op_StrIndexOf: 4327 case Op_StrIndexOfChar: 4328 case Op_AryEq: 4329 case Op_HasNegatives: 4330 pinned = false; 4331 } 4332 if( pinned ) { 4333 IdealLoopTree *chosen_loop = get_loop(n->is_CFG() ? n : get_ctrl(n)); 4334 if( !chosen_loop->_child ) // Inner loop? 4335 chosen_loop->_body.push(n); // Collect inner loops 4336 return; 4337 } 4338 } else { // No slot zero 4339 if( n->is_CFG() ) { // CFG with no slot 0 is dead 4340 _nodes.map(n->_idx,0); // No block setting, it's globally dead 4341 return; 4342 } 4343 assert(!n->is_CFG() || n->outcnt() == 0, ""); 4344 } 4345 4346 // Do I have a "safe range" I can select over? 4347 Node *early = get_ctrl(n);// Early location already computed 4348 4349 // Compute latest point this Node can go 4350 Node *LCA = get_late_ctrl( n, early ); 4351 // LCA is NULL due to uses being dead 4352 if( LCA == NULL ) { 4353 #ifdef ASSERT 4354 for (DUIterator i1 = n->outs(); n->has_out(i1); i1++) { 4355 assert( _nodes[n->out(i1)->_idx] == NULL, "all uses must also be dead"); 4356 } 4357 #endif 4358 _nodes.map(n->_idx, 0); // This node is useless 4359 _deadlist.push(n); 4360 return; 4361 } 4362 assert(LCA != NULL && !LCA->is_top(), "no dead nodes"); 4363 4364 Node *legal = LCA; // Walk 'legal' up the IDOM chain 4365 Node *least = legal; // Best legal position so far 4366 while( early != legal ) { // While not at earliest legal 4367 #ifdef ASSERT 4368 if (legal->is_Start() && !early->is_Root()) { 4369 // Bad graph. Print idom path and fail. 4370 dump_bad_graph("Bad graph detected in build_loop_late", n, early, LCA); 4371 assert(false, "Bad graph detected in build_loop_late"); 4372 } 4373 #endif 4374 // Find least loop nesting depth 4375 legal = idom(legal); // Bump up the IDOM tree 4376 // Check for lower nesting depth 4377 if( get_loop(legal)->_nest < get_loop(least)->_nest ) 4378 least = legal; 4379 } 4380 assert(early == legal || legal != C->root(), "bad dominance of inputs"); 4381 4382 // Try not to place code on a loop entry projection 4383 // which can inhibit range check elimination. 4384 if (least != early) { 4385 Node* ctrl_out = least->unique_ctrl_out(); 4386 if (ctrl_out && ctrl_out->is_Loop() && 4387 least == ctrl_out->in(LoopNode::EntryControl)) { 4388 // Move the node above predicates as far up as possible so a 4389 // following pass of loop predication doesn't hoist a predicate 4390 // that depends on it above that node. 4391 Node* new_ctrl = least; 4392 for (;;) { 4393 if (!new_ctrl->is_Proj()) { 4394 break; 4395 } 4396 CallStaticJavaNode* call = new_ctrl->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none); 4397 if (call == NULL) { 4398 break; 4399 } 4400 int req = call->uncommon_trap_request(); 4401 Deoptimization::DeoptReason trap_reason = Deoptimization::trap_request_reason(req); 4402 if (trap_reason != Deoptimization::Reason_loop_limit_check && 4403 trap_reason != Deoptimization::Reason_predicate && 4404 trap_reason != Deoptimization::Reason_profile_predicate) { 4405 break; 4406 } 4407 Node* c = new_ctrl->in(0)->in(0); 4408 if (is_dominator(c, early) && c != early) { 4409 break; 4410 } 4411 new_ctrl = c; 4412 } 4413 least = new_ctrl; 4414 } 4415 } 4416 4417 #ifdef ASSERT 4418 // If verifying, verify that 'verify_me' has a legal location 4419 // and choose it as our location. 4420 if( _verify_me ) { 4421 Node *v_ctrl = _verify_me->get_ctrl_no_update(n); 4422 Node *legal = LCA; 4423 while( early != legal ) { // While not at earliest legal 4424 if( legal == v_ctrl ) break; // Check for prior good location 4425 legal = idom(legal) ;// Bump up the IDOM tree 4426 } 4427 // Check for prior good location 4428 if( legal == v_ctrl ) least = legal; // Keep prior if found 4429 } 4430 #endif 4431 4432 // Assign discovered "here or above" point 4433 least = find_non_split_ctrl(least); 4434 verify_strip_mined_scheduling(n, least); 4435 set_ctrl(n, least); 4436 4437 // Collect inner loop bodies 4438 IdealLoopTree *chosen_loop = get_loop(least); 4439 if( !chosen_loop->_child ) // Inner loop? 4440 chosen_loop->_body.push(n);// Collect inner loops 4441 } 4442 4443 #ifdef ASSERT 4444 void PhaseIdealLoop::dump_bad_graph(const char* msg, Node* n, Node* early, Node* LCA) { 4445 tty->print_cr("%s", msg); 4446 tty->print("n: "); n->dump(); 4447 tty->print("early(n): "); early->dump(); 4448 if (n->in(0) != NULL && !n->in(0)->is_top() && 4449 n->in(0) != early && !n->in(0)->is_Root()) { 4450 tty->print("n->in(0): "); n->in(0)->dump(); 4451 } 4452 for (uint i = 1; i < n->req(); i++) { 4453 Node* in1 = n->in(i); 4454 if (in1 != NULL && in1 != n && !in1->is_top()) { 4455 tty->print("n->in(%d): ", i); in1->dump(); 4456 Node* in1_early = get_ctrl(in1); 4457 tty->print("early(n->in(%d)): ", i); in1_early->dump(); 4458 if (in1->in(0) != NULL && !in1->in(0)->is_top() && 4459 in1->in(0) != in1_early && !in1->in(0)->is_Root()) { 4460 tty->print("n->in(%d)->in(0): ", i); in1->in(0)->dump(); 4461 } 4462 for (uint j = 1; j < in1->req(); j++) { 4463 Node* in2 = in1->in(j); 4464 if (in2 != NULL && in2 != n && in2 != in1 && !in2->is_top()) { 4465 tty->print("n->in(%d)->in(%d): ", i, j); in2->dump(); 4466 Node* in2_early = get_ctrl(in2); 4467 tty->print("early(n->in(%d)->in(%d)): ", i, j); in2_early->dump(); 4468 if (in2->in(0) != NULL && !in2->in(0)->is_top() && 4469 in2->in(0) != in2_early && !in2->in(0)->is_Root()) { 4470 tty->print("n->in(%d)->in(%d)->in(0): ", i, j); in2->in(0)->dump(); 4471 } 4472 } 4473 } 4474 } 4475 } 4476 tty->cr(); 4477 tty->print("LCA(n): "); LCA->dump(); 4478 for (uint i = 0; i < n->outcnt(); i++) { 4479 Node* u1 = n->raw_out(i); 4480 if (u1 == n) 4481 continue; 4482 tty->print("n->out(%d): ", i); u1->dump(); 4483 if (u1->is_CFG()) { 4484 for (uint j = 0; j < u1->outcnt(); j++) { 4485 Node* u2 = u1->raw_out(j); 4486 if (u2 != u1 && u2 != n && u2->is_CFG()) { 4487 tty->print("n->out(%d)->out(%d): ", i, j); u2->dump(); 4488 } 4489 } 4490 } else { 4491 Node* u1_later = get_ctrl(u1); 4492 tty->print("later(n->out(%d)): ", i); u1_later->dump(); 4493 if (u1->in(0) != NULL && !u1->in(0)->is_top() && 4494 u1->in(0) != u1_later && !u1->in(0)->is_Root()) { 4495 tty->print("n->out(%d)->in(0): ", i); u1->in(0)->dump(); 4496 } 4497 for (uint j = 0; j < u1->outcnt(); j++) { 4498 Node* u2 = u1->raw_out(j); 4499 if (u2 == n || u2 == u1) 4500 continue; 4501 tty->print("n->out(%d)->out(%d): ", i, j); u2->dump(); 4502 if (!u2->is_CFG()) { 4503 Node* u2_later = get_ctrl(u2); 4504 tty->print("later(n->out(%d)->out(%d)): ", i, j); u2_later->dump(); 4505 if (u2->in(0) != NULL && !u2->in(0)->is_top() && 4506 u2->in(0) != u2_later && !u2->in(0)->is_Root()) { 4507 tty->print("n->out(%d)->in(0): ", i); u2->in(0)->dump(); 4508 } 4509 } 4510 } 4511 } 4512 } 4513 tty->cr(); 4514 int ct = 0; 4515 Node *dbg_legal = LCA; 4516 while(!dbg_legal->is_Start() && ct < 100) { 4517 tty->print("idom[%d] ",ct); dbg_legal->dump(); 4518 ct++; 4519 dbg_legal = idom(dbg_legal); 4520 } 4521 tty->cr(); 4522 } 4523 #endif 4524 4525 #ifndef PRODUCT 4526 //------------------------------dump------------------------------------------- 4527 void PhaseIdealLoop::dump() const { 4528 ResourceMark rm; 4529 Arena* arena = Thread::current()->resource_area(); 4530 Node_Stack stack(arena, C->live_nodes() >> 2); 4531 Node_List rpo_list; 4532 VectorSet visited(arena); 4533 visited.set(C->top()->_idx); 4534 rpo(C->root(), stack, visited, rpo_list); 4535 // Dump root loop indexed by last element in PO order 4536 dump(_ltree_root, rpo_list.size(), rpo_list); 4537 } 4538 4539 void PhaseIdealLoop::dump(IdealLoopTree* loop, uint idx, Node_List &rpo_list) const { 4540 loop->dump_head(); 4541 4542 // Now scan for CFG nodes in the same loop 4543 for (uint j = idx; j > 0; j--) { 4544 Node* n = rpo_list[j-1]; 4545 if (!_nodes[n->_idx]) // Skip dead nodes 4546 continue; 4547 4548 if (get_loop(n) != loop) { // Wrong loop nest 4549 if (get_loop(n)->_head == n && // Found nested loop? 4550 get_loop(n)->_parent == loop) 4551 dump(get_loop(n), rpo_list.size(), rpo_list); // Print it nested-ly 4552 continue; 4553 } 4554 4555 // Dump controlling node 4556 tty->sp(2 * loop->_nest); 4557 tty->print("C"); 4558 if (n == C->root()) { 4559 n->dump(); 4560 } else { 4561 Node* cached_idom = idom_no_update(n); 4562 Node* computed_idom = n->in(0); 4563 if (n->is_Region()) { 4564 computed_idom = compute_idom(n); 4565 // computed_idom() will return n->in(0) when idom(n) is an IfNode (or 4566 // any MultiBranch ctrl node), so apply a similar transform to 4567 // the cached idom returned from idom_no_update. 4568 cached_idom = find_non_split_ctrl(cached_idom); 4569 } 4570 tty->print(" ID:%d", computed_idom->_idx); 4571 n->dump(); 4572 if (cached_idom != computed_idom) { 4573 tty->print_cr("*** BROKEN IDOM! Computed as: %d, cached as: %d", 4574 computed_idom->_idx, cached_idom->_idx); 4575 } 4576 } 4577 // Dump nodes it controls 4578 for (uint k = 0; k < _nodes.Size(); k++) { 4579 // (k < C->unique() && get_ctrl(find(k)) == n) 4580 if (k < C->unique() && _nodes[k] == (Node*)((intptr_t)n + 1)) { 4581 Node* m = C->root()->find(k); 4582 if (m && m->outcnt() > 0) { 4583 if (!(has_ctrl(m) && get_ctrl_no_update(m) == n)) { 4584 tty->print_cr("*** BROKEN CTRL ACCESSOR! _nodes[k] is %p, ctrl is %p", 4585 _nodes[k], has_ctrl(m) ? get_ctrl_no_update(m) : NULL); 4586 } 4587 tty->sp(2 * loop->_nest + 1); 4588 m->dump(); 4589 } 4590 } 4591 } 4592 } 4593 } 4594 #endif 4595 4596 // Collect a R-P-O for the whole CFG. 4597 // Result list is in post-order (scan backwards for RPO) 4598 void PhaseIdealLoop::rpo(Node* start, Node_Stack &stk, VectorSet &visited, Node_List &rpo_list) const { 4599 stk.push(start, 0); 4600 visited.set(start->_idx); 4601 4602 while (stk.is_nonempty()) { 4603 Node* m = stk.node(); 4604 uint idx = stk.index(); 4605 if (idx < m->outcnt()) { 4606 stk.set_index(idx + 1); 4607 Node* n = m->raw_out(idx); 4608 if (n->is_CFG() && !visited.test_set(n->_idx)) { 4609 stk.push(n, 0); 4610 } 4611 } else { 4612 rpo_list.push(m); 4613 stk.pop(); 4614 } 4615 } 4616 } 4617 4618 4619 //============================================================================= 4620 //------------------------------LoopTreeIterator------------------------------- 4621 4622 // Advance to next loop tree using a preorder, left-to-right traversal. 4623 void LoopTreeIterator::next() { 4624 assert(!done(), "must not be done."); 4625 if (_curnt->_child != NULL) { 4626 _curnt = _curnt->_child; 4627 } else if (_curnt->_next != NULL) { 4628 _curnt = _curnt->_next; 4629 } else { 4630 while (_curnt != _root && _curnt->_next == NULL) { 4631 _curnt = _curnt->_parent; 4632 } 4633 if (_curnt == _root) { 4634 _curnt = NULL; 4635 assert(done(), "must be done."); 4636 } else { 4637 assert(_curnt->_next != NULL, "must be more to do"); 4638 _curnt = _curnt->_next; 4639 } 4640 } 4641 } --- EOF ---