1 /* 2 * Copyright (c) 1999, 2016, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "memory/allocation.inline.hpp" 27 #include "memory/resourceArea.hpp" 28 #include "opto/addnode.hpp" 29 #include "opto/castnode.hpp" 30 #include "opto/connode.hpp" 31 #include "opto/castnode.hpp" 32 #include "opto/divnode.hpp" 33 #include "opto/loopnode.hpp" 34 #include "opto/matcher.hpp" 35 #include "opto/mulnode.hpp" 36 #include "opto/movenode.hpp" 37 #include "opto/opaquenode.hpp" 38 #include "opto/rootnode.hpp" 39 #include "opto/shenandoahSupport.hpp" 40 #include "opto/subnode.hpp" 41 42 //============================================================================= 43 //------------------------------split_thru_phi--------------------------------- 44 // Split Node 'n' through merge point if there is enough win. 45 Node *PhaseIdealLoop::split_thru_phi( Node *n, Node *region, int policy ) { 46 if (n->Opcode() == Op_ConvI2L && n->bottom_type() != TypeLong::LONG) { 47 // ConvI2L may have type information on it which is unsafe to push up 48 // so disable this for now 49 return NULL; 50 } 51 52 // Splitting range check CastIIs through a loop induction Phi can 53 // cause new Phis to be created that are left unrelated to the loop 54 // induction Phi and prevent optimizations (vectorization) 55 if (n->Opcode() == Op_CastII && n->as_CastII()->has_range_check() && 56 region->is_CountedLoop() && n->in(1) == region->as_CountedLoop()->phi()) { 57 return NULL; 58 } 59 60 int wins = 0; 61 assert(!n->is_CFG(), ""); 62 assert(region->is_Region(), ""); 63 64 const Type* type = n->bottom_type(); 65 const TypeOopPtr *t_oop = _igvn.type(n)->isa_oopptr(); 66 Node *phi; 67 if (t_oop != NULL && t_oop->is_known_instance_field()) { 68 int iid = t_oop->instance_id(); 69 int index = C->get_alias_index(t_oop); 70 int offset = t_oop->offset(); 71 phi = new PhiNode(region, type, NULL, iid, index, offset); 72 } else { 73 phi = PhiNode::make_blank(region, n); 74 } 75 uint old_unique = C->unique(); 76 for (uint i = 1; i < region->req(); i++) { 77 Node *x; 78 Node* the_clone = NULL; 79 if (region->in(i) == C->top()) { 80 x = C->top(); // Dead path? Use a dead data op 81 } else { 82 x = n->clone(); // Else clone up the data op 83 the_clone = x; // Remember for possible deletion. 84 // Alter data node to use pre-phi inputs 85 if (n->in(0) == region) 86 x->set_req( 0, region->in(i) ); 87 for (uint j = 1; j < n->req(); j++) { 88 Node *in = n->in(j); 89 if (in->is_Phi() && in->in(0) == region) 90 x->set_req( j, in->in(i) ); // Use pre-Phi input for the clone 91 } 92 } 93 // Check for a 'win' on some paths 94 const Type *t = x->Value(&_igvn); 95 96 bool singleton = t->singleton(); 97 98 // A TOP singleton indicates that there are no possible values incoming 99 // along a particular edge. In most cases, this is OK, and the Phi will 100 // be eliminated later in an Ideal call. However, we can't allow this to 101 // happen if the singleton occurs on loop entry, as the elimination of 102 // the PhiNode may cause the resulting node to migrate back to a previous 103 // loop iteration. 104 if (singleton && t == Type::TOP) { 105 // Is_Loop() == false does not confirm the absence of a loop (e.g., an 106 // irreducible loop may not be indicated by an affirmative is_Loop()); 107 // therefore, the only top we can split thru a phi is on a backedge of 108 // a loop. 109 singleton &= region->is_Loop() && (i != LoopNode::EntryControl); 110 } 111 112 if (singleton) { 113 wins++; 114 x = ((PhaseGVN&)_igvn).makecon(t); 115 } else { 116 // We now call Identity to try to simplify the cloned node. 117 // Note that some Identity methods call phase->type(this). 118 // Make sure that the type array is big enough for 119 // our new node, even though we may throw the node away. 120 // (Note: This tweaking with igvn only works because x is a new node.) 121 _igvn.set_type(x, t); 122 // If x is a TypeNode, capture any more-precise type permanently into Node 123 // otherwise it will be not updated during igvn->transform since 124 // igvn->type(x) is set to x->Value() already. 125 x->raise_bottom_type(t); 126 if (x->Opcode() != Op_ShenandoahWriteBarrier) { 127 Node *y = x->Identity(&_igvn); 128 if (y != x) { 129 wins++; 130 x = y; 131 } else { 132 y = _igvn.hash_find(x); 133 if (y) { 134 wins++; 135 x = y; 136 } else { 137 // Else x is a new node we are keeping 138 // We do not need register_new_node_with_optimizer 139 // because set_type has already been called. 140 _igvn._worklist.push(x); 141 } 142 } 143 } else { 144 _igvn._worklist.push(x); 145 } 146 } 147 if (x != the_clone && the_clone != NULL) 148 _igvn.remove_dead_node(the_clone); 149 phi->set_req( i, x ); 150 } 151 // Too few wins? 152 if (wins <= policy) { 153 _igvn.remove_dead_node(phi); 154 return NULL; 155 } 156 157 // Record Phi 158 register_new_node( phi, region ); 159 160 for (uint i2 = 1; i2 < phi->req(); i2++) { 161 Node *x = phi->in(i2); 162 // If we commoned up the cloned 'x' with another existing Node, 163 // the existing Node picks up a new use. We need to make the 164 // existing Node occur higher up so it dominates its uses. 165 Node *old_ctrl; 166 IdealLoopTree *old_loop; 167 168 if (x->is_Con()) { 169 // Constant's control is always root. 170 set_ctrl(x, C->root()); 171 continue; 172 } 173 // The occasional new node 174 if (x->_idx >= old_unique) { // Found a new, unplaced node? 175 old_ctrl = NULL; 176 old_loop = NULL; // Not in any prior loop 177 } else { 178 old_ctrl = get_ctrl(x); 179 old_loop = get_loop(old_ctrl); // Get prior loop 180 } 181 // New late point must dominate new use 182 Node *new_ctrl = dom_lca(old_ctrl, region->in(i2)); 183 if (new_ctrl == old_ctrl) // Nothing is changed 184 continue; 185 186 IdealLoopTree *new_loop = get_loop(new_ctrl); 187 188 // Don't move x into a loop if its uses are 189 // outside of loop. Otherwise x will be cloned 190 // for each use outside of this loop. 191 IdealLoopTree *use_loop = get_loop(region); 192 if (!new_loop->is_member(use_loop) && 193 (old_loop == NULL || !new_loop->is_member(old_loop))) { 194 // Take early control, later control will be recalculated 195 // during next iteration of loop optimizations. 196 new_ctrl = get_early_ctrl(x); 197 new_loop = get_loop(new_ctrl); 198 } 199 // Set new location 200 set_ctrl(x, new_ctrl); 201 // If changing loop bodies, see if we need to collect into new body 202 if (old_loop != new_loop) { 203 if (old_loop && !old_loop->_child) 204 old_loop->_body.yank(x); 205 if (!new_loop->_child) 206 new_loop->_body.push(x); // Collect body info 207 } 208 } 209 210 return phi; 211 } 212 213 /** 214 * When splitting a Shenandoah write barrier through a phi, we 215 * can not replace the write-barrier input of the ShenandoahWBMemProj 216 * with the phi. We must also split the ShenandoahWBMemProj through the 217 * phi and generate a new memory phi for it. 218 */ 219 void PhaseIdealLoop::split_mem_thru_phi(Node* n, Node* r, Node* phi) { 220 if (n->Opcode() == Op_ShenandoahWriteBarrier) { 221 if (n->has_out_with(Op_ShenandoahWBMemProj)) { 222 Node* old_mem_phi = n->in(ShenandoahBarrierNode::Memory); 223 assert(r->is_Region(), "need region to control phi"); 224 assert(phi->is_Phi(), "expect phi"); 225 Node* memphi = PhiNode::make(r, old_mem_phi, Type::MEMORY, C->alias_type(n->adr_type())->adr_type()); 226 for (uint i = 1; i < r->req(); i++) { 227 Node* wb = phi->in(i); 228 if (wb->Opcode() == Op_ShenandoahWriteBarrier) { 229 // assert(! wb->has_out_with(Op_ShenandoahWBMemProj), "new clone does not have mem proj"); 230 Node* new_proj = new ShenandoahWBMemProjNode(wb); 231 register_new_node(new_proj, r->in(i)); 232 memphi->set_req(i, new_proj); 233 } else { 234 if (old_mem_phi->is_Phi() && old_mem_phi->in(0) == r) { 235 memphi->set_req(i, old_mem_phi->in(i)); 236 } 237 } 238 } 239 register_new_node(memphi, r); 240 Node* old_mem_out = n->find_out_with(Op_ShenandoahWBMemProj); 241 while (old_mem_out != NULL) { 242 assert(old_mem_out != NULL, "expect memory projection"); 243 _igvn.replace_node(old_mem_out, memphi); 244 old_mem_out = n->find_out_with(Op_ShenandoahWBMemProj); 245 } 246 } 247 assert(! n->has_out_with(Op_ShenandoahWBMemProj), "no more memory outs"); 248 } 249 } 250 251 //------------------------------dominated_by------------------------------------ 252 // Replace the dominated test with an obvious true or false. Place it on the 253 // IGVN worklist for later cleanup. Move control-dependent data Nodes on the 254 // live path up to the dominating control. 255 void PhaseIdealLoop::dominated_by( Node *prevdom, Node *iff, bool flip, bool exclude_loop_predicate ) { 256 if (VerifyLoopOptimizations && PrintOpto) { tty->print_cr("dominating test"); } 257 258 // prevdom is the dominating projection of the dominating test. 259 assert( iff->is_If(), "" ); 260 assert(iff->Opcode() == Op_If || iff->Opcode() == Op_CountedLoopEnd || iff->Opcode() == Op_RangeCheck, "Check this code when new subtype is added"); 261 int pop = prevdom->Opcode(); 262 assert( pop == Op_IfFalse || pop == Op_IfTrue, "" ); 263 if (flip) { 264 if (pop == Op_IfTrue) 265 pop = Op_IfFalse; 266 else 267 pop = Op_IfTrue; 268 } 269 // 'con' is set to true or false to kill the dominated test. 270 Node *con = _igvn.makecon(pop == Op_IfTrue ? TypeInt::ONE : TypeInt::ZERO); 271 set_ctrl(con, C->root()); // Constant gets a new use 272 // Hack the dominated test 273 _igvn.replace_input_of(iff, 1, con); 274 275 // If I dont have a reachable TRUE and FALSE path following the IfNode then 276 // I can assume this path reaches an infinite loop. In this case it's not 277 // important to optimize the data Nodes - either the whole compilation will 278 // be tossed or this path (and all data Nodes) will go dead. 279 if (iff->outcnt() != 2) return; 280 281 // Make control-dependent data Nodes on the live path (path that will remain 282 // once the dominated IF is removed) become control-dependent on the 283 // dominating projection. 284 Node* dp = iff->as_If()->proj_out(pop == Op_IfTrue); 285 286 // Loop predicates may have depending checks which should not 287 // be skipped. For example, range check predicate has two checks 288 // for lower and upper bounds. 289 if (dp == NULL) 290 return; 291 292 ProjNode* dp_proj = dp->as_Proj(); 293 ProjNode* unc_proj = iff->as_If()->proj_out(1 - dp_proj->_con)->as_Proj(); 294 if (exclude_loop_predicate && 295 (unc_proj->is_uncommon_trap_proj(Deoptimization::Reason_predicate) != NULL || 296 unc_proj->is_uncommon_trap_proj(Deoptimization::Reason_range_check) != NULL)) { 297 // If this is a range check (IfNode::is_range_check), do not 298 // reorder because Compile::allow_range_check_smearing might have 299 // changed the check. 300 return; // Let IGVN transformation change control dependence. 301 } 302 303 IdealLoopTree *old_loop = get_loop(dp); 304 305 for (DUIterator_Fast imax, i = dp->fast_outs(imax); i < imax; i++) { 306 Node* cd = dp->fast_out(i); // Control-dependent node 307 if (cd->depends_only_on_test()) { 308 assert(cd->in(0) == dp, ""); 309 _igvn.replace_input_of(cd, 0, prevdom); 310 set_early_ctrl(cd); 311 IdealLoopTree *new_loop = get_loop(get_ctrl(cd)); 312 if (old_loop != new_loop) { 313 if (!old_loop->_child) old_loop->_body.yank(cd); 314 if (!new_loop->_child) new_loop->_body.push(cd); 315 } 316 --i; 317 --imax; 318 } 319 } 320 } 321 322 //------------------------------has_local_phi_input---------------------------- 323 // Return TRUE if 'n' has Phi inputs from its local block and no other 324 // block-local inputs (all non-local-phi inputs come from earlier blocks) 325 Node *PhaseIdealLoop::has_local_phi_input( Node *n ) { 326 Node *n_ctrl = get_ctrl(n); 327 // See if some inputs come from a Phi in this block, or from before 328 // this block. 329 uint i; 330 for( i = 1; i < n->req(); i++ ) { 331 Node *phi = n->in(i); 332 if( phi->is_Phi() && phi->in(0) == n_ctrl ) 333 break; 334 } 335 if( i >= n->req() ) 336 return NULL; // No Phi inputs; nowhere to clone thru 337 338 // Check for inputs created between 'n' and the Phi input. These 339 // must split as well; they have already been given the chance 340 // (courtesy of a post-order visit) and since they did not we must 341 // recover the 'cost' of splitting them by being very profitable 342 // when splitting 'n'. Since this is unlikely we simply give up. 343 for( i = 1; i < n->req(); i++ ) { 344 Node *m = n->in(i); 345 if( get_ctrl(m) == n_ctrl && !m->is_Phi() ) { 346 // We allow the special case of AddP's with no local inputs. 347 // This allows us to split-up address expressions. 348 if (m->is_AddP() && 349 get_ctrl(m->in(2)) != n_ctrl && 350 get_ctrl(m->in(3)) != n_ctrl) { 351 // Move the AddP up to dominating point 352 set_ctrl_and_loop(m, find_non_split_ctrl(idom(n_ctrl))); 353 continue; 354 } 355 return NULL; 356 } 357 } 358 359 return n_ctrl; 360 } 361 362 //------------------------------remix_address_expressions---------------------- 363 // Rework addressing expressions to get the most loop-invariant stuff 364 // moved out. We'd like to do all associative operators, but it's especially 365 // important (common) to do address expressions. 366 Node *PhaseIdealLoop::remix_address_expressions( Node *n ) { 367 if (!has_ctrl(n)) return NULL; 368 Node *n_ctrl = get_ctrl(n); 369 IdealLoopTree *n_loop = get_loop(n_ctrl); 370 371 // See if 'n' mixes loop-varying and loop-invariant inputs and 372 // itself is loop-varying. 373 374 // Only interested in binary ops (and AddP) 375 if( n->req() < 3 || n->req() > 4 ) return NULL; 376 377 Node *n1_ctrl = get_ctrl(n->in( 1)); 378 Node *n2_ctrl = get_ctrl(n->in( 2)); 379 Node *n3_ctrl = get_ctrl(n->in(n->req() == 3 ? 2 : 3)); 380 IdealLoopTree *n1_loop = get_loop( n1_ctrl ); 381 IdealLoopTree *n2_loop = get_loop( n2_ctrl ); 382 IdealLoopTree *n3_loop = get_loop( n3_ctrl ); 383 384 // Does one of my inputs spin in a tighter loop than self? 385 if( (n_loop->is_member( n1_loop ) && n_loop != n1_loop) || 386 (n_loop->is_member( n2_loop ) && n_loop != n2_loop) || 387 (n_loop->is_member( n3_loop ) && n_loop != n3_loop) ) 388 return NULL; // Leave well enough alone 389 390 // Is at least one of my inputs loop-invariant? 391 if( n1_loop == n_loop && 392 n2_loop == n_loop && 393 n3_loop == n_loop ) 394 return NULL; // No loop-invariant inputs 395 396 397 int n_op = n->Opcode(); 398 399 // Replace expressions like ((V+I) << 2) with (V<<2 + I<<2). 400 if( n_op == Op_LShiftI ) { 401 // Scale is loop invariant 402 Node *scale = n->in(2); 403 Node *scale_ctrl = get_ctrl(scale); 404 IdealLoopTree *scale_loop = get_loop(scale_ctrl ); 405 if( n_loop == scale_loop || !scale_loop->is_member( n_loop ) ) 406 return NULL; 407 const TypeInt *scale_t = scale->bottom_type()->isa_int(); 408 if( scale_t && scale_t->is_con() && scale_t->get_con() >= 16 ) 409 return NULL; // Dont bother with byte/short masking 410 // Add must vary with loop (else shift would be loop-invariant) 411 Node *add = n->in(1); 412 Node *add_ctrl = get_ctrl(add); 413 IdealLoopTree *add_loop = get_loop(add_ctrl); 414 //assert( n_loop == add_loop, "" ); 415 if( n_loop != add_loop ) return NULL; // happens w/ evil ZKM loops 416 417 // Convert I-V into I+ (0-V); same for V-I 418 if( add->Opcode() == Op_SubI && 419 _igvn.type( add->in(1) ) != TypeInt::ZERO ) { 420 Node *zero = _igvn.intcon(0); 421 set_ctrl(zero, C->root()); 422 Node *neg = new SubINode( _igvn.intcon(0), add->in(2) ); 423 register_new_node( neg, get_ctrl(add->in(2) ) ); 424 add = new AddINode( add->in(1), neg ); 425 register_new_node( add, add_ctrl ); 426 } 427 if( add->Opcode() != Op_AddI ) return NULL; 428 // See if one add input is loop invariant 429 Node *add_var = add->in(1); 430 Node *add_var_ctrl = get_ctrl(add_var); 431 IdealLoopTree *add_var_loop = get_loop(add_var_ctrl ); 432 Node *add_invar = add->in(2); 433 Node *add_invar_ctrl = get_ctrl(add_invar); 434 IdealLoopTree *add_invar_loop = get_loop(add_invar_ctrl ); 435 if( add_var_loop == n_loop ) { 436 } else if( add_invar_loop == n_loop ) { 437 // Swap to find the invariant part 438 add_invar = add_var; 439 add_invar_ctrl = add_var_ctrl; 440 add_invar_loop = add_var_loop; 441 add_var = add->in(2); 442 Node *add_var_ctrl = get_ctrl(add_var); 443 IdealLoopTree *add_var_loop = get_loop(add_var_ctrl ); 444 } else // Else neither input is loop invariant 445 return NULL; 446 if( n_loop == add_invar_loop || !add_invar_loop->is_member( n_loop ) ) 447 return NULL; // No invariant part of the add? 448 449 // Yes! Reshape address expression! 450 Node *inv_scale = new LShiftINode( add_invar, scale ); 451 Node *inv_scale_ctrl = 452 dom_depth(add_invar_ctrl) > dom_depth(scale_ctrl) ? 453 add_invar_ctrl : scale_ctrl; 454 register_new_node( inv_scale, inv_scale_ctrl ); 455 Node *var_scale = new LShiftINode( add_var, scale ); 456 register_new_node( var_scale, n_ctrl ); 457 Node *var_add = new AddINode( var_scale, inv_scale ); 458 register_new_node( var_add, n_ctrl ); 459 _igvn.replace_node( n, var_add ); 460 return var_add; 461 } 462 463 // Replace (I+V) with (V+I) 464 if( n_op == Op_AddI || 465 n_op == Op_AddL || 466 n_op == Op_AddF || 467 n_op == Op_AddD || 468 n_op == Op_MulI || 469 n_op == Op_MulL || 470 n_op == Op_MulF || 471 n_op == Op_MulD ) { 472 if( n2_loop == n_loop ) { 473 assert( n1_loop != n_loop, "" ); 474 n->swap_edges(1, 2); 475 } 476 } 477 478 // Replace ((I1 +p V) +p I2) with ((I1 +p I2) +p V), 479 // but not if I2 is a constant. 480 if( n_op == Op_AddP ) { 481 if( n2_loop == n_loop && n3_loop != n_loop ) { 482 if( n->in(2)->Opcode() == Op_AddP && !n->in(3)->is_Con() ) { 483 Node *n22_ctrl = get_ctrl(n->in(2)->in(2)); 484 Node *n23_ctrl = get_ctrl(n->in(2)->in(3)); 485 IdealLoopTree *n22loop = get_loop( n22_ctrl ); 486 IdealLoopTree *n23_loop = get_loop( n23_ctrl ); 487 if( n22loop != n_loop && n22loop->is_member(n_loop) && 488 n23_loop == n_loop ) { 489 Node *add1 = new AddPNode( n->in(1), n->in(2)->in(2), n->in(3) ); 490 // Stuff new AddP in the loop preheader 491 register_new_node( add1, n_loop->_head->in(LoopNode::EntryControl) ); 492 Node *add2 = new AddPNode( n->in(1), add1, n->in(2)->in(3) ); 493 register_new_node( add2, n_ctrl ); 494 _igvn.replace_node( n, add2 ); 495 return add2; 496 } 497 } 498 } 499 500 // Replace (I1 +p (I2 + V)) with ((I1 +p I2) +p V) 501 if (n2_loop != n_loop && n3_loop == n_loop) { 502 if (n->in(3)->Opcode() == Op_AddX) { 503 Node *V = n->in(3)->in(1); 504 Node *I = n->in(3)->in(2); 505 if (is_member(n_loop,get_ctrl(V))) { 506 } else { 507 Node *tmp = V; V = I; I = tmp; 508 } 509 if (!is_member(n_loop,get_ctrl(I))) { 510 Node *add1 = new AddPNode(n->in(1), n->in(2), I); 511 // Stuff new AddP in the loop preheader 512 register_new_node(add1, n_loop->_head->in(LoopNode::EntryControl)); 513 Node *add2 = new AddPNode(n->in(1), add1, V); 514 register_new_node(add2, n_ctrl); 515 _igvn.replace_node(n, add2); 516 return add2; 517 } 518 } 519 } 520 } 521 522 return NULL; 523 } 524 525 //------------------------------conditional_move------------------------------- 526 // Attempt to replace a Phi with a conditional move. We have some pretty 527 // strict profitability requirements. All Phis at the merge point must 528 // be converted, so we can remove the control flow. We need to limit the 529 // number of c-moves to a small handful. All code that was in the side-arms 530 // of the CFG diamond is now speculatively executed. This code has to be 531 // "cheap enough". We are pretty much limited to CFG diamonds that merge 532 // 1 or 2 items with a total of 1 or 2 ops executed speculatively. 533 Node *PhaseIdealLoop::conditional_move( Node *region ) { 534 535 assert(region->is_Region(), "sanity check"); 536 if (region->req() != 3) return NULL; 537 538 // Check for CFG diamond 539 Node *lp = region->in(1); 540 Node *rp = region->in(2); 541 if (!lp || !rp) return NULL; 542 Node *lp_c = lp->in(0); 543 if (lp_c == NULL || lp_c != rp->in(0) || !lp_c->is_If()) return NULL; 544 IfNode *iff = lp_c->as_If(); 545 546 // Check for ops pinned in an arm of the diamond. 547 // Can't remove the control flow in this case 548 if (lp->outcnt() > 1) return NULL; 549 if (rp->outcnt() > 1) return NULL; 550 551 IdealLoopTree* r_loop = get_loop(region); 552 assert(r_loop == get_loop(iff), "sanity"); 553 // Always convert to CMOVE if all results are used only outside this loop. 554 bool used_inside_loop = (r_loop == _ltree_root); 555 556 // Check profitability 557 int cost = 0; 558 int phis = 0; 559 for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) { 560 Node *out = region->fast_out(i); 561 if (!out->is_Phi()) continue; // Ignore other control edges, etc 562 phis++; 563 PhiNode* phi = out->as_Phi(); 564 BasicType bt = phi->type()->basic_type(); 565 switch (bt) { 566 case T_DOUBLE: 567 if (C->use_cmove()) { 568 continue; //TODO: maybe we want to add some cost 569 } 570 case T_FLOAT: { 571 cost += Matcher::float_cmove_cost(); // Could be very expensive 572 break; 573 } 574 case T_LONG: { 575 cost += Matcher::long_cmove_cost(); // May encodes as 2 CMOV's 576 } 577 case T_INT: // These all CMOV fine 578 case T_ADDRESS: { // (RawPtr) 579 cost++; 580 break; 581 } 582 case T_NARROWOOP: // Fall through 583 case T_OBJECT: { // Base oops are OK, but not derived oops 584 const TypeOopPtr *tp = phi->type()->make_ptr()->isa_oopptr(); 585 // Derived pointers are Bad (tm): what's the Base (for GC purposes) of a 586 // CMOVE'd derived pointer? It's a CMOVE'd derived base. Thus 587 // CMOVE'ing a derived pointer requires we also CMOVE the base. If we 588 // have a Phi for the base here that we convert to a CMOVE all is well 589 // and good. But if the base is dead, we'll not make a CMOVE. Later 590 // the allocator will have to produce a base by creating a CMOVE of the 591 // relevant bases. This puts the allocator in the business of 592 // manufacturing expensive instructions, generally a bad plan. 593 // Just Say No to Conditionally-Moved Derived Pointers. 594 if (tp && tp->offset() != 0) 595 return NULL; 596 cost++; 597 break; 598 } 599 default: 600 return NULL; // In particular, can't do memory or I/O 601 } 602 // Add in cost any speculative ops 603 for (uint j = 1; j < region->req(); j++) { 604 Node *proj = region->in(j); 605 Node *inp = phi->in(j); 606 if (get_ctrl(inp) == proj) { // Found local op 607 cost++; 608 // Check for a chain of dependent ops; these will all become 609 // speculative in a CMOV. 610 for (uint k = 1; k < inp->req(); k++) 611 if (get_ctrl(inp->in(k)) == proj) 612 cost += ConditionalMoveLimit; // Too much speculative goo 613 } 614 } 615 // See if the Phi is used by a Cmp or Narrow oop Decode/Encode. 616 // This will likely Split-If, a higher-payoff operation. 617 for (DUIterator_Fast kmax, k = phi->fast_outs(kmax); k < kmax; k++) { 618 Node* use = phi->fast_out(k); 619 if (use->is_Cmp() || use->is_DecodeNarrowPtr() || use->is_EncodeNarrowPtr()) 620 cost += ConditionalMoveLimit; 621 // Is there a use inside the loop? 622 // Note: check only basic types since CMoveP is pinned. 623 if (!used_inside_loop && is_java_primitive(bt)) { 624 IdealLoopTree* u_loop = get_loop(has_ctrl(use) ? get_ctrl(use) : use); 625 if (r_loop == u_loop || r_loop->is_member(u_loop)) { 626 used_inside_loop = true; 627 } 628 } 629 } 630 }//for 631 Node* bol = iff->in(1); 632 assert(bol->Opcode() == Op_Bool, ""); 633 int cmp_op = bol->in(1)->Opcode(); 634 // It is expensive to generate flags from a float compare. 635 // Avoid duplicated float compare. 636 if (phis > 1 && (cmp_op == Op_CmpF || cmp_op == Op_CmpD)) return NULL; 637 638 float infrequent_prob = PROB_UNLIKELY_MAG(3); 639 // Ignore cost and blocks frequency if CMOVE can be moved outside the loop. 640 if (used_inside_loop) { 641 if (cost >= ConditionalMoveLimit) return NULL; // Too much goo 642 643 // BlockLayoutByFrequency optimization moves infrequent branch 644 // from hot path. No point in CMOV'ing in such case (110 is used 645 // instead of 100 to take into account not exactness of float value). 646 if (BlockLayoutByFrequency) { 647 infrequent_prob = MAX2(infrequent_prob, (float)BlockLayoutMinDiamondPercentage/110.0f); 648 } 649 } 650 // Check for highly predictable branch. No point in CMOV'ing if 651 // we are going to predict accurately all the time. 652 if (C->use_cmove() && cmp_op == Op_CmpD) ;//keep going 653 else if (iff->_prob < infrequent_prob || 654 iff->_prob > (1.0f - infrequent_prob)) 655 return NULL; 656 657 // -------------- 658 // Now replace all Phis with CMOV's 659 Node *cmov_ctrl = iff->in(0); 660 uint flip = (lp->Opcode() == Op_IfTrue); 661 while (1) { 662 PhiNode* phi = NULL; 663 for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) { 664 Node *out = region->fast_out(i); 665 if (out->is_Phi()) { 666 phi = out->as_Phi(); 667 break; 668 } 669 } 670 if (phi == NULL) break; 671 if (PrintOpto && VerifyLoopOptimizations) { tty->print_cr("CMOV"); } 672 // Move speculative ops 673 for (uint j = 1; j < region->req(); j++) { 674 Node *proj = region->in(j); 675 Node *inp = phi->in(j); 676 if (get_ctrl(inp) == proj) { // Found local op 677 #ifndef PRODUCT 678 if (PrintOpto && VerifyLoopOptimizations) { 679 tty->print(" speculate: "); 680 inp->dump(); 681 } 682 #endif 683 set_ctrl(inp, cmov_ctrl); 684 } 685 } 686 Node *cmov = CMoveNode::make(cmov_ctrl, iff->in(1), phi->in(1+flip), phi->in(2-flip), _igvn.type(phi)); 687 register_new_node( cmov, cmov_ctrl ); 688 _igvn.replace_node( phi, cmov ); 689 #ifndef PRODUCT 690 if (TraceLoopOpts) { 691 tty->print("CMOV "); 692 r_loop->dump_head(); 693 if (Verbose) { 694 bol->in(1)->dump(1); 695 cmov->dump(1); 696 } 697 } 698 if (VerifyLoopOptimizations) verify(); 699 #endif 700 } 701 702 // The useless CFG diamond will fold up later; see the optimization in 703 // RegionNode::Ideal. 704 _igvn._worklist.push(region); 705 706 return iff->in(1); 707 } 708 709 static void enqueue_cfg_uses(Node* m, Unique_Node_List& wq) { 710 for (DUIterator_Fast imax, i = m->fast_outs(imax); i < imax; i++) { 711 Node* u = m->fast_out(i); 712 if (u->is_CFG()) { 713 if (u->Opcode() == Op_NeverBranch) { 714 u = ((NeverBranchNode*)u)->proj_out(0); 715 enqueue_cfg_uses(u, wq); 716 } else { 717 wq.push(u); 718 } 719 } 720 } 721 } 722 723 // Try moving a store out of a loop, right before the loop 724 Node* PhaseIdealLoop::try_move_store_before_loop(Node* n, Node *n_ctrl) { 725 // Store has to be first in the loop body 726 IdealLoopTree *n_loop = get_loop(n_ctrl); 727 if (n->is_Store() && n_loop != _ltree_root && n_loop->is_loop() && n->in(0) != NULL) { 728 Node* address = n->in(MemNode::Address); 729 Node* value = n->in(MemNode::ValueIn); 730 Node* mem = n->in(MemNode::Memory); 731 IdealLoopTree* address_loop = get_loop(get_ctrl(address)); 732 IdealLoopTree* value_loop = get_loop(get_ctrl(value)); 733 734 // - address and value must be loop invariant 735 // - memory must be a memory Phi for the loop 736 // - Store must be the only store on this memory slice in the 737 // loop: if there's another store following this one then value 738 // written at iteration i by the second store could be overwritten 739 // at iteration i+n by the first store: it's not safe to move the 740 // first store out of the loop 741 // - nothing must observe the memory Phi: it guarantees no read 742 // before the store, we are also guaranteed the store post 743 // dominates the loop head (ignoring a possible early 744 // exit). Otherwise there would be extra Phi involved between the 745 // loop's Phi and the store. 746 // - there must be no early exit from the loop before the Store 747 // (such an exit most of the time would be an extra use of the 748 // memory Phi but sometimes is a bottom memory Phi that takes the 749 // store as input). 750 751 if (!n_loop->is_member(address_loop) && 752 !n_loop->is_member(value_loop) && 753 mem->is_Phi() && mem->in(0) == n_loop->_head && 754 mem->outcnt() == 1 && 755 mem->in(LoopNode::LoopBackControl) == n) { 756 757 assert(n_loop->_tail != NULL, "need a tail"); 758 assert(is_dominator(n_ctrl, n_loop->_tail), "store control must not be in a branch in the loop"); 759 760 // Verify that there's no early exit of the loop before the store. 761 bool ctrl_ok = false; 762 { 763 // Follow control from loop head until n, we exit the loop or 764 // we reach the tail 765 ResourceMark rm; 766 Unique_Node_List wq; 767 wq.push(n_loop->_head); 768 769 for (uint next = 0; next < wq.size(); ++next) { 770 Node *m = wq.at(next); 771 if (m == n->in(0)) { 772 ctrl_ok = true; 773 continue; 774 } 775 assert(!has_ctrl(m), "should be CFG"); 776 if (!n_loop->is_member(get_loop(m)) || m == n_loop->_tail) { 777 ctrl_ok = false; 778 break; 779 } 780 enqueue_cfg_uses(m, wq); 781 if (wq.size() > 10) { 782 ctrl_ok = false; 783 break; 784 } 785 } 786 } 787 if (ctrl_ok) { 788 // move the Store 789 _igvn.replace_input_of(mem, LoopNode::LoopBackControl, mem); 790 _igvn.replace_input_of(n, 0, n_loop->_head->in(LoopNode::EntryControl)); 791 _igvn.replace_input_of(n, MemNode::Memory, mem->in(LoopNode::EntryControl)); 792 // Disconnect the phi now. An empty phi can confuse other 793 // optimizations in this pass of loop opts. 794 _igvn.replace_node(mem, mem->in(LoopNode::EntryControl)); 795 n_loop->_body.yank(mem); 796 797 set_ctrl_and_loop(n, n->in(0)); 798 799 return n; 800 } 801 } 802 } 803 return NULL; 804 } 805 806 // Try moving a store out of a loop, right after the loop 807 void PhaseIdealLoop::try_move_store_after_loop(Node* n) { 808 if (n->is_Store() && n->in(0) != NULL) { 809 Node *n_ctrl = get_ctrl(n); 810 IdealLoopTree *n_loop = get_loop(n_ctrl); 811 // Store must be in a loop 812 if (n_loop != _ltree_root && !n_loop->_irreducible) { 813 Node* address = n->in(MemNode::Address); 814 Node* value = n->in(MemNode::ValueIn); 815 IdealLoopTree* address_loop = get_loop(get_ctrl(address)); 816 // address must be loop invariant 817 if (!n_loop->is_member(address_loop)) { 818 // Store must be last on this memory slice in the loop and 819 // nothing in the loop must observe it 820 Node* phi = NULL; 821 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 822 Node* u = n->fast_out(i); 823 if (has_ctrl(u)) { // control use? 824 IdealLoopTree *u_loop = get_loop(get_ctrl(u)); 825 if (!n_loop->is_member(u_loop)) { 826 continue; 827 } 828 if (u->is_Phi() && u->in(0) == n_loop->_head) { 829 assert(_igvn.type(u) == Type::MEMORY, "bad phi"); 830 // multiple phis on the same slice are possible 831 if (phi != NULL) { 832 return; 833 } 834 phi = u; 835 continue; 836 } 837 } 838 return; 839 } 840 if (phi != NULL) { 841 // Nothing in the loop before the store (next iteration) 842 // must observe the stored value 843 bool mem_ok = true; 844 { 845 ResourceMark rm; 846 Unique_Node_List wq; 847 wq.push(phi); 848 for (uint next = 0; next < wq.size() && mem_ok; ++next) { 849 Node *m = wq.at(next); 850 for (DUIterator_Fast imax, i = m->fast_outs(imax); i < imax && mem_ok; i++) { 851 Node* u = m->fast_out(i); 852 if (u->is_Store() || u->is_Phi()) { 853 if (u != n) { 854 wq.push(u); 855 mem_ok = (wq.size() <= 10); 856 } 857 } else { 858 mem_ok = false; 859 break; 860 } 861 } 862 } 863 } 864 if (mem_ok) { 865 // Move the Store out of the loop creating clones along 866 // all paths out of the loop that observe the stored value 867 _igvn.rehash_node_delayed(phi); 868 int count = phi->replace_edge(n, n->in(MemNode::Memory)); 869 assert(count > 0, "inconsistent phi"); 870 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 871 Node* u = n->fast_out(i); 872 Node* c = get_ctrl(u); 873 874 if (u->is_Phi()) { 875 c = u->in(0)->in(u->find_edge(n)); 876 } 877 IdealLoopTree *u_loop = get_loop(c); 878 assert (!n_loop->is_member(u_loop), "only the phi should have been a use in the loop"); 879 while(true) { 880 Node* next_c = find_non_split_ctrl(idom(c)); 881 if (n_loop->is_member(get_loop(next_c))) { 882 break; 883 } 884 c = next_c; 885 } 886 887 Node* st = n->clone(); 888 st->set_req(0, c); 889 _igvn.register_new_node_with_optimizer(st); 890 891 set_ctrl(st, c); 892 IdealLoopTree* new_loop = get_loop(c); 893 assert(new_loop != n_loop, "should be moved out of loop"); 894 if (new_loop->_child == NULL) new_loop->_body.push(st); 895 896 _igvn.replace_input_of(u, u->find_edge(n), st); 897 --imax; 898 --i; 899 } 900 901 902 assert(n->outcnt() == 0, "all uses should be gone"); 903 _igvn.replace_input_of(n, MemNode::Memory, C->top()); 904 // Disconnect the phi now. An empty phi can confuse other 905 // optimizations in this pass of loop opts.. 906 if (phi->in(LoopNode::LoopBackControl) == phi) { 907 _igvn.replace_node(phi, phi->in(LoopNode::EntryControl)); 908 n_loop->_body.yank(phi); 909 } 910 } 911 } 912 } 913 } 914 } 915 } 916 917 //------------------------------split_if_with_blocks_pre----------------------- 918 // Do the real work in a non-recursive function. Data nodes want to be 919 // cloned in the pre-order so they can feed each other nicely. 920 Node *PhaseIdealLoop::split_if_with_blocks_pre( Node *n ) { 921 // Cloning these guys is unlikely to win 922 int n_op = n->Opcode(); 923 if( n_op == Op_MergeMem ) return n; 924 if( n->is_Proj() ) return n; 925 // Do not clone-up CmpFXXX variations, as these are always 926 // followed by a CmpI 927 if( n->is_Cmp() ) return n; 928 // Attempt to use a conditional move instead of a phi/branch 929 if( ConditionalMoveLimit > 0 && n_op == Op_Region ) { 930 Node *cmov = conditional_move( n ); 931 if( cmov ) return cmov; 932 } 933 if( n->is_CFG() || n->is_LoadStore() ) 934 return n; 935 if( n_op == Op_Opaque1 || // Opaque nodes cannot be mod'd 936 n_op == Op_Opaque2 ) { 937 if( !C->major_progress() ) // If chance of no more loop opts... 938 _igvn._worklist.push(n); // maybe we'll remove them 939 return n; 940 } 941 942 if( n->is_Con() ) return n; // No cloning for Con nodes 943 944 Node *n_ctrl = get_ctrl(n); 945 if( !n_ctrl ) return n; // Dead node 946 947 Node* res = try_move_store_before_loop(n, n_ctrl); 948 if (res != NULL) { 949 return n; 950 } 951 952 try_move_shenandoah_barrier_before_loop(n, n_ctrl); 953 954 res = try_common_shenandoah_barriers(n, n_ctrl); 955 if (res != NULL) { 956 return res; 957 } 958 959 // Attempt to remix address expressions for loop invariants 960 Node *m = remix_address_expressions( n ); 961 if( m ) return m; 962 963 if (n->is_ConstraintCast()) { 964 Node* dom_cast = n->as_ConstraintCast()->dominating_cast(this); 965 if (dom_cast != NULL) { 966 _igvn.replace_node(n, dom_cast); 967 return dom_cast; 968 } 969 } 970 971 // Determine if the Node has inputs from some local Phi. 972 // Returns the block to clone thru. 973 Node *n_blk = has_local_phi_input( n ); 974 if( !n_blk ) return n; 975 976 // Do not clone the trip counter through on a CountedLoop 977 // (messes up the canonical shape). 978 if( n_blk->is_CountedLoop() && n->Opcode() == Op_AddI ) return n; 979 980 // Check for having no control input; not pinned. Allow 981 // dominating control. 982 if (n->in(0)) { 983 Node *dom = idom(n_blk); 984 if (dom_lca(n->in(0), dom) != n->in(0)) { 985 return n; 986 } 987 } 988 // Policy: when is it profitable. You must get more wins than 989 // policy before it is considered profitable. Policy is usually 0, 990 // so 1 win is considered profitable. Big merges will require big 991 // cloning, so get a larger policy. 992 int policy = n_blk->req() >> 2; 993 994 // If the loop is a candidate for range check elimination, 995 // delay splitting through it's phi until a later loop optimization 996 if (n_blk->is_CountedLoop()) { 997 IdealLoopTree *lp = get_loop(n_blk); 998 if (lp && lp->_rce_candidate) { 999 return n; 1000 } 1001 } 1002 1003 // Use same limit as split_if_with_blocks_post 1004 if( C->live_nodes() > 35000 ) return n; // Method too big 1005 1006 // Split 'n' through the merge point if it is profitable 1007 Node *phi = split_thru_phi( n, n_blk, policy ); 1008 if (!phi) return n; 1009 1010 // Found a Phi to split thru! 1011 // Replace 'n' with the new phi 1012 split_mem_thru_phi(n, n_blk, phi); 1013 _igvn.replace_node( n, phi ); 1014 // Moved a load around the loop, 'en-registering' something. 1015 if (n_blk->is_Loop() && n->is_Load() && 1016 !phi->in(LoopNode::LoopBackControl)->is_Load()) 1017 C->set_major_progress(); 1018 1019 // Moved a barrier around the loop, 'en-registering' something. 1020 if (n_blk->is_Loop() && n->is_ShenandoahBarrier() && 1021 !phi->in(LoopNode::LoopBackControl)->is_ShenandoahBarrier()) 1022 C->set_major_progress(); 1023 1024 return phi; 1025 } 1026 1027 static bool merge_point_too_heavy(Compile* C, Node* region) { 1028 // Bail out if the region and its phis have too many users. 1029 int weight = 0; 1030 for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) { 1031 weight += region->fast_out(i)->outcnt(); 1032 } 1033 int nodes_left = C->max_node_limit() - C->live_nodes(); 1034 if (weight * 8 > nodes_left) { 1035 if (PrintOpto) { 1036 tty->print_cr("*** Split-if bails out: %d nodes, region weight %d", C->unique(), weight); 1037 } 1038 return true; 1039 } else { 1040 return false; 1041 } 1042 } 1043 1044 static bool merge_point_safe(Node* region) { 1045 // 4799512: Stop split_if_with_blocks from splitting a block with a ConvI2LNode 1046 // having a PhiNode input. This sidesteps the dangerous case where the split 1047 // ConvI2LNode may become TOP if the input Value() does not 1048 // overlap the ConvI2L range, leaving a node which may not dominate its 1049 // uses. 1050 // A better fix for this problem can be found in the BugTraq entry, but 1051 // expediency for Mantis demands this hack. 1052 // 6855164: If the merge point has a FastLockNode with a PhiNode input, we stop 1053 // split_if_with_blocks from splitting a block because we could not move around 1054 // the FastLockNode. 1055 for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) { 1056 Node* n = region->fast_out(i); 1057 if (n->is_Phi()) { 1058 for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) { 1059 Node* m = n->fast_out(j); 1060 if (m->is_FastLock()) 1061 return false; 1062 #ifdef _LP64 1063 if (m->Opcode() == Op_ConvI2L) 1064 return false; 1065 if (m->is_CastII() && m->isa_CastII()->has_range_check()) { 1066 return false; 1067 } 1068 #endif 1069 } 1070 } 1071 } 1072 return true; 1073 } 1074 1075 1076 //------------------------------place_near_use--------------------------------- 1077 // Place some computation next to use but not inside inner loops. 1078 // For inner loop uses move it to the preheader area. 1079 Node *PhaseIdealLoop::place_near_use( Node *useblock ) const { 1080 IdealLoopTree *u_loop = get_loop( useblock ); 1081 return (u_loop->_irreducible || u_loop->_child) 1082 ? useblock 1083 : u_loop->_head->in(LoopNode::EntryControl); 1084 } 1085 1086 1087 bool PhaseIdealLoop::identical_backtoback_ifs(Node *n) { 1088 if (!n->is_If()) { 1089 return false; 1090 } 1091 if (!n->in(0)->is_Region()) { 1092 return false; 1093 } 1094 Node* region = n->in(0); 1095 Node* dom = idom(region); 1096 if (!dom->is_If() || dom->in(1) != n->in(1)) { 1097 return false; 1098 } 1099 IfNode* dom_if = dom->as_If(); 1100 Node* proj_true = dom_if->proj_out(1); 1101 Node* proj_false = dom_if->proj_out(0); 1102 1103 for (uint i = 1; i < region->req(); i++) { 1104 if (is_dominator(proj_true, region->in(i))) { 1105 continue; 1106 } 1107 if (is_dominator(proj_false, region->in(i))) { 1108 continue; 1109 } 1110 return false; 1111 } 1112 1113 return true; 1114 } 1115 1116 bool PhaseIdealLoop::can_split_if(Node *n_ctrl) { 1117 if (C->live_nodes() > 35000) { 1118 return false; // Method too big 1119 } 1120 1121 // Do not do 'split-if' if irreducible loops are present. 1122 if (_has_irreducible_loops) { 1123 return false; 1124 } 1125 1126 if (merge_point_too_heavy(C, n_ctrl)) { 1127 return false; 1128 } 1129 1130 // Do not do 'split-if' if some paths are dead. First do dead code 1131 // elimination and then see if its still profitable. 1132 for (uint i = 1; i < n_ctrl->req(); i++) { 1133 if (n_ctrl->in(i) == C->top()) { 1134 return false; 1135 } 1136 } 1137 1138 // If trying to do a 'Split-If' at the loop head, it is only 1139 // profitable if the cmp folds up on BOTH paths. Otherwise we 1140 // risk peeling a loop forever. 1141 1142 // CNC - Disabled for now. Requires careful handling of loop 1143 // body selection for the cloned code. Also, make sure we check 1144 // for any input path not being in the same loop as n_ctrl. For 1145 // irreducible loops we cannot check for 'n_ctrl->is_Loop()' 1146 // because the alternative loop entry points won't be converted 1147 // into LoopNodes. 1148 IdealLoopTree *n_loop = get_loop(n_ctrl); 1149 for (uint j = 1; j < n_ctrl->req(); j++) { 1150 if (get_loop(n_ctrl->in(j)) != n_loop) { 1151 return false; 1152 } 1153 } 1154 1155 // Check for safety of the merge point. 1156 if (!merge_point_safe(n_ctrl)) { 1157 return false; 1158 } 1159 1160 return true; 1161 } 1162 1163 //------------------------------split_if_with_blocks_post---------------------- 1164 // Do the real work in a non-recursive function. CFG hackery wants to be 1165 // in the post-order, so it can dirty the I-DOM info and not use the dirtied 1166 // info. 1167 void PhaseIdealLoop::split_if_with_blocks_post(Node *n) { 1168 1169 // Cloning Cmp through Phi's involves the split-if transform. 1170 // FastLock is not used by an If 1171 if (n->is_Cmp() && !n->is_FastLock()) { 1172 Node *n_ctrl = get_ctrl(n); 1173 // Determine if the Node has inputs from some local Phi. 1174 // Returns the block to clone thru. 1175 Node *n_blk = has_local_phi_input(n); 1176 if (n_blk != n_ctrl) { 1177 return; 1178 } 1179 1180 if (!can_split_if(n_ctrl)) { 1181 return; 1182 } 1183 1184 if (n->outcnt() != 1) { 1185 return; // Multiple bool's from 1 compare? 1186 } 1187 Node *bol = n->unique_out(); 1188 assert(bol->is_Bool(), "expect a bool here"); 1189 if (bol->outcnt() != 1) { 1190 return;// Multiple branches from 1 compare? 1191 } 1192 Node *iff = bol->unique_out(); 1193 1194 // Check some safety conditions 1195 if (iff->is_If()) { // Classic split-if? 1196 if (iff->in(0) != n_ctrl) { 1197 return; // Compare must be in same blk as if 1198 } 1199 } else if (iff->is_CMove()) { // Trying to split-up a CMOVE 1200 // Can't split CMove with different control edge. 1201 if (iff->in(0) != NULL && iff->in(0) != n_ctrl ) { 1202 return; 1203 } 1204 if (get_ctrl(iff->in(2)) == n_ctrl || 1205 get_ctrl(iff->in(3)) == n_ctrl) { 1206 return; // Inputs not yet split-up 1207 } 1208 if (get_loop(n_ctrl) != get_loop(get_ctrl(iff))) { 1209 return; // Loop-invar test gates loop-varying CMOVE 1210 } 1211 } else { 1212 return; // some other kind of node, such as an Allocate 1213 } 1214 1215 // When is split-if profitable? Every 'win' on means some control flow 1216 // goes dead, so it's almost always a win. 1217 int policy = 0; 1218 // Split compare 'n' through the merge point if it is profitable 1219 Node *phi = split_thru_phi( n, n_ctrl, policy); 1220 if (!phi) { 1221 return; 1222 } 1223 1224 // Found a Phi to split thru! 1225 // Replace 'n' with the new phi 1226 _igvn.replace_node(n, phi); 1227 1228 // Now split the bool up thru the phi 1229 Node *bolphi = split_thru_phi(bol, n_ctrl, -1); 1230 guarantee(bolphi != NULL, "null boolean phi node"); 1231 1232 _igvn.replace_node(bol, bolphi); 1233 assert(iff->in(1) == bolphi, ""); 1234 1235 if (bolphi->Value(&_igvn)->singleton()) { 1236 return; 1237 } 1238 1239 // Conditional-move? Must split up now 1240 if (!iff->is_If()) { 1241 Node *cmovphi = split_thru_phi(iff, n_ctrl, -1); 1242 _igvn.replace_node(iff, cmovphi); 1243 return; 1244 } 1245 1246 // Now split the IF 1247 do_split_if(iff); 1248 return; 1249 } 1250 1251 // Two identical ifs back to back can be merged 1252 if (identical_backtoback_ifs(n) && can_split_if(n->in(0))) { 1253 Node *n_ctrl = n->in(0); 1254 PhiNode* bolphi = PhiNode::make_blank(n_ctrl, n->in(1)); 1255 IfNode* dom_if = idom(n_ctrl)->as_If(); 1256 Node* proj_true = dom_if->proj_out(1); 1257 Node* proj_false = dom_if->proj_out(0); 1258 Node* con_true = _igvn.makecon(TypeInt::ONE); 1259 Node* con_false = _igvn.makecon(TypeInt::ZERO); 1260 1261 for (uint i = 1; i < n_ctrl->req(); i++) { 1262 if (is_dominator(proj_true, n_ctrl->in(i))) { 1263 bolphi->init_req(i, con_true); 1264 } else { 1265 assert(is_dominator(proj_false, n_ctrl->in(i)), "bad if"); 1266 bolphi->init_req(i, con_false); 1267 } 1268 } 1269 register_new_node(bolphi, n_ctrl); 1270 _igvn.replace_input_of(n, 1, bolphi); 1271 1272 // Now split the IF 1273 do_split_if(n); 1274 return; 1275 } 1276 1277 // Check for an IF ready to split; one that has its 1278 // condition codes input coming from a Phi at the block start. 1279 int n_op = n->Opcode(); 1280 1281 // Check for an IF being dominated by another IF same test 1282 if (n_op == Op_If || 1283 n_op == Op_RangeCheck) { 1284 Node *bol = n->in(1); 1285 uint max = bol->outcnt(); 1286 // Check for same test used more than once? 1287 if (max > 1 && bol->is_Bool()) { 1288 // Search up IDOMs to see if this IF is dominated. 1289 Node *cutoff = get_ctrl(bol); 1290 1291 // Now search up IDOMs till cutoff, looking for a dominating test 1292 Node *prevdom = n; 1293 Node *dom = idom(prevdom); 1294 while (dom != cutoff) { 1295 if (dom->req() > 1 && dom->in(1) == bol && prevdom->in(0) == dom) { 1296 // Replace the dominated test with an obvious true or false. 1297 // Place it on the IGVN worklist for later cleanup. 1298 C->set_major_progress(); 1299 dominated_by(prevdom, n, false, true); 1300 #ifndef PRODUCT 1301 if( VerifyLoopOptimizations ) verify(); 1302 #endif 1303 return; 1304 } 1305 prevdom = dom; 1306 dom = idom(prevdom); 1307 } 1308 } 1309 } 1310 1311 // See if a shared loop-varying computation has no loop-varying uses. 1312 // Happens if something is only used for JVM state in uncommon trap exits, 1313 // like various versions of induction variable+offset. Clone the 1314 // computation per usage to allow it to sink out of the loop. 1315 if (has_ctrl(n) && !n->in(0)) {// n not dead and has no control edge (can float about) 1316 Node *n_ctrl = get_ctrl(n); 1317 IdealLoopTree *n_loop = get_loop(n_ctrl); 1318 if( n_loop != _ltree_root ) { 1319 DUIterator_Fast imax, i = n->fast_outs(imax); 1320 for (; i < imax; i++) { 1321 Node* u = n->fast_out(i); 1322 if( !has_ctrl(u) ) break; // Found control user 1323 IdealLoopTree *u_loop = get_loop(get_ctrl(u)); 1324 if( u_loop == n_loop ) break; // Found loop-varying use 1325 if( n_loop->is_member( u_loop ) ) break; // Found use in inner loop 1326 if( u->Opcode() == Op_Opaque1 ) break; // Found loop limit, bugfix for 4677003 1327 } 1328 bool did_break = (i < imax); // Did we break out of the previous loop? 1329 if (!did_break && n->outcnt() > 1) { // All uses in outer loops! 1330 Node *late_load_ctrl = NULL; 1331 if (n->is_Load()) { 1332 // If n is a load, get and save the result from get_late_ctrl(), 1333 // to be later used in calculating the control for n's clones. 1334 clear_dom_lca_tags(); 1335 late_load_ctrl = get_late_ctrl(n, n_ctrl); 1336 } 1337 // If n is a load, and the late control is the same as the current 1338 // control, then the cloning of n is a pointless exercise, because 1339 // GVN will ensure that we end up where we started. 1340 if (!n->is_Load() || late_load_ctrl != n_ctrl) { 1341 for (DUIterator_Last jmin, j = n->last_outs(jmin); j >= jmin; ) { 1342 Node *u = n->last_out(j); // Clone private computation per use 1343 _igvn.rehash_node_delayed(u); 1344 Node *x = n->clone(); // Clone computation 1345 Node *x_ctrl = NULL; 1346 if( u->is_Phi() ) { 1347 // Replace all uses of normal nodes. Replace Phi uses 1348 // individually, so the separate Nodes can sink down 1349 // different paths. 1350 uint k = 1; 1351 while( u->in(k) != n ) k++; 1352 u->set_req( k, x ); 1353 // x goes next to Phi input path 1354 x_ctrl = u->in(0)->in(k); 1355 --j; 1356 } else { // Normal use 1357 // Replace all uses 1358 for( uint k = 0; k < u->req(); k++ ) { 1359 if( u->in(k) == n ) { 1360 u->set_req( k, x ); 1361 --j; 1362 } 1363 } 1364 x_ctrl = get_ctrl(u); 1365 } 1366 1367 // Find control for 'x' next to use but not inside inner loops. 1368 // For inner loop uses get the preheader area. 1369 x_ctrl = place_near_use(x_ctrl); 1370 1371 if (n->is_Load()) { 1372 // For loads, add a control edge to a CFG node outside of the loop 1373 // to force them to not combine and return back inside the loop 1374 // during GVN optimization (4641526). 1375 // 1376 // Because we are setting the actual control input, factor in 1377 // the result from get_late_ctrl() so we respect any 1378 // anti-dependences. (6233005). 1379 x_ctrl = dom_lca(late_load_ctrl, x_ctrl); 1380 1381 // Don't allow the control input to be a CFG splitting node. 1382 // Such nodes should only have ProjNodes as outs, e.g. IfNode 1383 // should only have IfTrueNode and IfFalseNode (4985384). 1384 x_ctrl = find_non_split_ctrl(x_ctrl); 1385 assert(dom_depth(n_ctrl) <= dom_depth(x_ctrl), "n is later than its clone"); 1386 1387 x->set_req(0, x_ctrl); 1388 } 1389 register_new_node(x, x_ctrl); 1390 1391 // Some institutional knowledge is needed here: 'x' is 1392 // yanked because if the optimizer runs GVN on it all the 1393 // cloned x's will common up and undo this optimization and 1394 // be forced back in the loop. This is annoying because it 1395 // makes +VerifyOpto report false-positives on progress. I 1396 // tried setting control edges on the x's to force them to 1397 // not combine, but the matching gets worried when it tries 1398 // to fold a StoreP and an AddP together (as part of an 1399 // address expression) and the AddP and StoreP have 1400 // different controls. 1401 if (!x->is_Load() && !x->is_DecodeNarrowPtr() && !x->is_ShenandoahBarrier()) _igvn._worklist.yank(x); 1402 } 1403 _igvn.remove_dead_node(n); 1404 } 1405 } 1406 } 1407 } 1408 1409 try_move_store_after_loop(n); 1410 1411 // Check for Opaque2's who's loop has disappeared - who's input is in the 1412 // same loop nest as their output. Remove 'em, they are no longer useful. 1413 if( n_op == Op_Opaque2 && 1414 n->in(1) != NULL && 1415 get_loop(get_ctrl(n)) == get_loop(get_ctrl(n->in(1))) ) { 1416 _igvn.replace_node( n, n->in(1) ); 1417 } 1418 } 1419 1420 //------------------------------split_if_with_blocks--------------------------- 1421 // Check for aggressive application of 'split-if' optimization, 1422 // using basic block level info. 1423 void PhaseIdealLoop::split_if_with_blocks( VectorSet &visited, Node_Stack &nstack ) { 1424 Node *n = C->root(); 1425 visited.set(n->_idx); // first, mark node as visited 1426 // Do pre-visit work for root 1427 n = split_if_with_blocks_pre( n ); 1428 uint cnt = n->outcnt(); 1429 uint i = 0; 1430 while (true) { 1431 // Visit all children 1432 if (i < cnt) { 1433 Node* use = n->raw_out(i); 1434 ++i; 1435 if (use->outcnt() != 0 && !visited.test_set(use->_idx)) { 1436 // Now do pre-visit work for this use 1437 use = split_if_with_blocks_pre( use ); 1438 nstack.push(n, i); // Save parent and next use's index. 1439 n = use; // Process all children of current use. 1440 cnt = use->outcnt(); 1441 i = 0; 1442 } 1443 } 1444 else { 1445 // All of n's children have been processed, complete post-processing. 1446 if (cnt != 0 && !n->is_Con()) { 1447 assert(has_node(n), "no dead nodes"); 1448 split_if_with_blocks_post( n ); 1449 } 1450 if (nstack.is_empty()) { 1451 // Finished all nodes on stack. 1452 break; 1453 } 1454 // Get saved parent node and next use's index. Visit the rest of uses. 1455 n = nstack.node(); 1456 cnt = n->outcnt(); 1457 i = nstack.index(); 1458 nstack.pop(); 1459 } 1460 } 1461 } 1462 1463 1464 //============================================================================= 1465 // 1466 // C L O N E A L O O P B O D Y 1467 // 1468 1469 //------------------------------clone_iff-------------------------------------- 1470 // Passed in a Phi merging (recursively) some nearly equivalent Bool/Cmps. 1471 // "Nearly" because all Nodes have been cloned from the original in the loop, 1472 // but the fall-in edges to the Cmp are different. Clone bool/Cmp pairs 1473 // through the Phi recursively, and return a Bool. 1474 BoolNode *PhaseIdealLoop::clone_iff( PhiNode *phi, IdealLoopTree *loop ) { 1475 1476 // Convert this Phi into a Phi merging Bools 1477 uint i; 1478 for( i = 1; i < phi->req(); i++ ) { 1479 Node *b = phi->in(i); 1480 if( b->is_Phi() ) { 1481 _igvn.replace_input_of(phi, i, clone_iff( b->as_Phi(), loop )); 1482 } else { 1483 assert( b->is_Bool(), "" ); 1484 } 1485 } 1486 1487 Node *sample_bool = phi->in(1); 1488 Node *sample_cmp = sample_bool->in(1); 1489 1490 // Make Phis to merge the Cmp's inputs. 1491 PhiNode *phi1 = new PhiNode( phi->in(0), Type::TOP ); 1492 PhiNode *phi2 = new PhiNode( phi->in(0), Type::TOP ); 1493 for( i = 1; i < phi->req(); i++ ) { 1494 Node *n1 = phi->in(i)->in(1)->in(1); 1495 Node *n2 = phi->in(i)->in(1)->in(2); 1496 phi1->set_req( i, n1 ); 1497 phi2->set_req( i, n2 ); 1498 phi1->set_type( phi1->type()->meet_speculative(n1->bottom_type())); 1499 phi2->set_type( phi2->type()->meet_speculative(n2->bottom_type())); 1500 } 1501 // See if these Phis have been made before. 1502 // Register with optimizer 1503 Node *hit1 = _igvn.hash_find_insert(phi1); 1504 if( hit1 ) { // Hit, toss just made Phi 1505 _igvn.remove_dead_node(phi1); // Remove new phi 1506 assert( hit1->is_Phi(), "" ); 1507 phi1 = (PhiNode*)hit1; // Use existing phi 1508 } else { // Miss 1509 _igvn.register_new_node_with_optimizer(phi1); 1510 } 1511 Node *hit2 = _igvn.hash_find_insert(phi2); 1512 if( hit2 ) { // Hit, toss just made Phi 1513 _igvn.remove_dead_node(phi2); // Remove new phi 1514 assert( hit2->is_Phi(), "" ); 1515 phi2 = (PhiNode*)hit2; // Use existing phi 1516 } else { // Miss 1517 _igvn.register_new_node_with_optimizer(phi2); 1518 } 1519 // Register Phis with loop/block info 1520 set_ctrl(phi1, phi->in(0)); 1521 set_ctrl(phi2, phi->in(0)); 1522 // Make a new Cmp 1523 Node *cmp = sample_cmp->clone(); 1524 cmp->set_req( 1, phi1 ); 1525 cmp->set_req( 2, phi2 ); 1526 _igvn.register_new_node_with_optimizer(cmp); 1527 set_ctrl(cmp, phi->in(0)); 1528 1529 // Make a new Bool 1530 Node *b = sample_bool->clone(); 1531 b->set_req(1,cmp); 1532 _igvn.register_new_node_with_optimizer(b); 1533 set_ctrl(b, phi->in(0)); 1534 1535 assert( b->is_Bool(), "" ); 1536 return (BoolNode*)b; 1537 } 1538 1539 //------------------------------clone_bool------------------------------------- 1540 // Passed in a Phi merging (recursively) some nearly equivalent Bool/Cmps. 1541 // "Nearly" because all Nodes have been cloned from the original in the loop, 1542 // but the fall-in edges to the Cmp are different. Clone bool/Cmp pairs 1543 // through the Phi recursively, and return a Bool. 1544 CmpNode *PhaseIdealLoop::clone_bool( PhiNode *phi, IdealLoopTree *loop ) { 1545 uint i; 1546 // Convert this Phi into a Phi merging Bools 1547 for( i = 1; i < phi->req(); i++ ) { 1548 Node *b = phi->in(i); 1549 if( b->is_Phi() ) { 1550 _igvn.replace_input_of(phi, i, clone_bool( b->as_Phi(), loop )); 1551 } else { 1552 assert( b->is_Cmp() || b->is_top(), "inputs are all Cmp or TOP" ); 1553 } 1554 } 1555 1556 Node *sample_cmp = phi->in(1); 1557 1558 // Make Phis to merge the Cmp's inputs. 1559 PhiNode *phi1 = new PhiNode( phi->in(0), Type::TOP ); 1560 PhiNode *phi2 = new PhiNode( phi->in(0), Type::TOP ); 1561 for( uint j = 1; j < phi->req(); j++ ) { 1562 Node *cmp_top = phi->in(j); // Inputs are all Cmp or TOP 1563 Node *n1, *n2; 1564 if( cmp_top->is_Cmp() ) { 1565 n1 = cmp_top->in(1); 1566 n2 = cmp_top->in(2); 1567 } else { 1568 n1 = n2 = cmp_top; 1569 } 1570 phi1->set_req( j, n1 ); 1571 phi2->set_req( j, n2 ); 1572 phi1->set_type(phi1->type()->meet_speculative(n1->bottom_type())); 1573 phi2->set_type(phi2->type()->meet_speculative(n2->bottom_type())); 1574 } 1575 1576 // See if these Phis have been made before. 1577 // Register with optimizer 1578 Node *hit1 = _igvn.hash_find_insert(phi1); 1579 if( hit1 ) { // Hit, toss just made Phi 1580 _igvn.remove_dead_node(phi1); // Remove new phi 1581 assert( hit1->is_Phi(), "" ); 1582 phi1 = (PhiNode*)hit1; // Use existing phi 1583 } else { // Miss 1584 _igvn.register_new_node_with_optimizer(phi1); 1585 } 1586 Node *hit2 = _igvn.hash_find_insert(phi2); 1587 if( hit2 ) { // Hit, toss just made Phi 1588 _igvn.remove_dead_node(phi2); // Remove new phi 1589 assert( hit2->is_Phi(), "" ); 1590 phi2 = (PhiNode*)hit2; // Use existing phi 1591 } else { // Miss 1592 _igvn.register_new_node_with_optimizer(phi2); 1593 } 1594 // Register Phis with loop/block info 1595 set_ctrl(phi1, phi->in(0)); 1596 set_ctrl(phi2, phi->in(0)); 1597 // Make a new Cmp 1598 Node *cmp = sample_cmp->clone(); 1599 cmp->set_req( 1, phi1 ); 1600 cmp->set_req( 2, phi2 ); 1601 _igvn.register_new_node_with_optimizer(cmp); 1602 set_ctrl(cmp, phi->in(0)); 1603 1604 assert( cmp->is_Cmp(), "" ); 1605 return (CmpNode*)cmp; 1606 } 1607 1608 //------------------------------sink_use--------------------------------------- 1609 // If 'use' was in the loop-exit block, it now needs to be sunk 1610 // below the post-loop merge point. 1611 void PhaseIdealLoop::sink_use( Node *use, Node *post_loop ) { 1612 if (!use->is_CFG() && get_ctrl(use) == post_loop->in(2)) { 1613 set_ctrl(use, post_loop); 1614 for (DUIterator j = use->outs(); use->has_out(j); j++) 1615 sink_use(use->out(j), post_loop); 1616 } 1617 } 1618 1619 //------------------------------clone_loop------------------------------------- 1620 // 1621 // C L O N E A L O O P B O D Y 1622 // 1623 // This is the basic building block of the loop optimizations. It clones an 1624 // entire loop body. It makes an old_new loop body mapping; with this mapping 1625 // you can find the new-loop equivalent to an old-loop node. All new-loop 1626 // nodes are exactly equal to their old-loop counterparts, all edges are the 1627 // same. All exits from the old-loop now have a RegionNode that merges the 1628 // equivalent new-loop path. This is true even for the normal "loop-exit" 1629 // condition. All uses of loop-invariant old-loop values now come from (one 1630 // or more) Phis that merge their new-loop equivalents. 1631 // 1632 // This operation leaves the graph in an illegal state: there are two valid 1633 // control edges coming from the loop pre-header to both loop bodies. I'll 1634 // definitely have to hack the graph after running this transform. 1635 // 1636 // From this building block I will further edit edges to perform loop peeling 1637 // or loop unrolling or iteration splitting (Range-Check-Elimination), etc. 1638 // 1639 // Parameter side_by_size_idom: 1640 // When side_by_size_idom is NULL, the dominator tree is constructed for 1641 // the clone loop to dominate the original. Used in construction of 1642 // pre-main-post loop sequence. 1643 // When nonnull, the clone and original are side-by-side, both are 1644 // dominated by the side_by_side_idom node. Used in construction of 1645 // unswitched loops. 1646 void PhaseIdealLoop::clone_loop( IdealLoopTree *loop, Node_List &old_new, int dd, 1647 Node* side_by_side_idom) { 1648 1649 if (C->do_vector_loop() && PrintOpto) { 1650 const char* mname = C->method()->name()->as_quoted_ascii(); 1651 if (mname != NULL) { 1652 tty->print("PhaseIdealLoop::clone_loop: for vectorize method %s\n", mname); 1653 } 1654 } 1655 1656 CloneMap& cm = C->clone_map(); 1657 Dict* dict = cm.dict(); 1658 if (C->do_vector_loop()) { 1659 cm.set_clone_idx(cm.max_gen()+1); 1660 #ifndef PRODUCT 1661 if (PrintOpto) { 1662 tty->print_cr("PhaseIdealLoop::clone_loop: _clone_idx %d", cm.clone_idx()); 1663 loop->dump_head(); 1664 } 1665 #endif 1666 } 1667 1668 // Step 1: Clone the loop body. Make the old->new mapping. 1669 uint i; 1670 for( i = 0; i < loop->_body.size(); i++ ) { 1671 Node *old = loop->_body.at(i); 1672 Node *nnn = old->clone(); 1673 old_new.map( old->_idx, nnn ); 1674 if (C->do_vector_loop()) { 1675 cm.verify_insert_and_clone(old, nnn, cm.clone_idx()); 1676 } 1677 _igvn.register_new_node_with_optimizer(nnn); 1678 } 1679 1680 1681 // Step 2: Fix the edges in the new body. If the old input is outside the 1682 // loop use it. If the old input is INside the loop, use the corresponding 1683 // new node instead. 1684 for( i = 0; i < loop->_body.size(); i++ ) { 1685 Node *old = loop->_body.at(i); 1686 Node *nnn = old_new[old->_idx]; 1687 // Fix CFG/Loop controlling the new node 1688 if (has_ctrl(old)) { 1689 set_ctrl(nnn, old_new[get_ctrl(old)->_idx]); 1690 } else { 1691 set_loop(nnn, loop->_parent); 1692 if (old->outcnt() > 0) { 1693 set_idom( nnn, old_new[idom(old)->_idx], dd ); 1694 } 1695 } 1696 // Correct edges to the new node 1697 for( uint j = 0; j < nnn->req(); j++ ) { 1698 Node *n = nnn->in(j); 1699 if( n ) { 1700 IdealLoopTree *old_in_loop = get_loop( has_ctrl(n) ? get_ctrl(n) : n ); 1701 if( loop->is_member( old_in_loop ) ) 1702 nnn->set_req(j, old_new[n->_idx]); 1703 } 1704 } 1705 _igvn.hash_find_insert(nnn); 1706 } 1707 Node *newhead = old_new[loop->_head->_idx]; 1708 set_idom(newhead, newhead->in(LoopNode::EntryControl), dd); 1709 1710 1711 // Step 3: Now fix control uses. Loop varying control uses have already 1712 // been fixed up (as part of all input edges in Step 2). Loop invariant 1713 // control uses must be either an IfFalse or an IfTrue. Make a merge 1714 // point to merge the old and new IfFalse/IfTrue nodes; make the use 1715 // refer to this. 1716 ResourceArea *area = Thread::current()->resource_area(); 1717 Node_List worklist(area); 1718 uint new_counter = C->unique(); 1719 for( i = 0; i < loop->_body.size(); i++ ) { 1720 Node* old = loop->_body.at(i); 1721 if( !old->is_CFG() ) continue; 1722 Node* nnn = old_new[old->_idx]; 1723 1724 // Copy uses to a worklist, so I can munge the def-use info 1725 // with impunity. 1726 for (DUIterator_Fast jmax, j = old->fast_outs(jmax); j < jmax; j++) 1727 worklist.push(old->fast_out(j)); 1728 1729 while( worklist.size() ) { // Visit all uses 1730 Node *use = worklist.pop(); 1731 if (!has_node(use)) continue; // Ignore dead nodes 1732 IdealLoopTree *use_loop = get_loop( has_ctrl(use) ? get_ctrl(use) : use ); 1733 if( !loop->is_member( use_loop ) && use->is_CFG() ) { 1734 // Both OLD and USE are CFG nodes here. 1735 assert( use->is_Proj(), "" ); 1736 1737 // Clone the loop exit control projection 1738 Node *newuse = use->clone(); 1739 if (C->do_vector_loop()) { 1740 cm.verify_insert_and_clone(use, newuse, cm.clone_idx()); 1741 } 1742 newuse->set_req(0,nnn); 1743 _igvn.register_new_node_with_optimizer(newuse); 1744 set_loop(newuse, use_loop); 1745 set_idom(newuse, nnn, dom_depth(nnn) + 1 ); 1746 1747 // We need a Region to merge the exit from the peeled body and the 1748 // exit from the old loop body. 1749 RegionNode *r = new RegionNode(3); 1750 // Map the old use to the new merge point 1751 old_new.map( use->_idx, r ); 1752 uint dd_r = MIN2(dom_depth(newuse),dom_depth(use)); 1753 assert( dd_r >= dom_depth(dom_lca(newuse,use)), "" ); 1754 1755 // The original user of 'use' uses 'r' instead. 1756 for (DUIterator_Last lmin, l = use->last_outs(lmin); l >= lmin;) { 1757 Node* useuse = use->last_out(l); 1758 _igvn.rehash_node_delayed(useuse); 1759 uint uses_found = 0; 1760 if( useuse->in(0) == use ) { 1761 useuse->set_req(0, r); 1762 uses_found++; 1763 if( useuse->is_CFG() ) { 1764 assert( dom_depth(useuse) > dd_r, "" ); 1765 set_idom(useuse, r, dom_depth(useuse)); 1766 } 1767 } 1768 for( uint k = 1; k < useuse->req(); k++ ) { 1769 if( useuse->in(k) == use ) { 1770 useuse->set_req(k, r); 1771 uses_found++; 1772 } 1773 } 1774 l -= uses_found; // we deleted 1 or more copies of this edge 1775 } 1776 1777 // Now finish up 'r' 1778 r->set_req( 1, newuse ); 1779 r->set_req( 2, use ); 1780 _igvn.register_new_node_with_optimizer(r); 1781 set_loop(r, use_loop); 1782 set_idom(r, !side_by_side_idom ? newuse->in(0) : side_by_side_idom, dd_r); 1783 } // End of if a loop-exit test 1784 } 1785 } 1786 1787 // Step 4: If loop-invariant use is not control, it must be dominated by a 1788 // loop exit IfFalse/IfTrue. Find "proper" loop exit. Make a Region 1789 // there if needed. Make a Phi there merging old and new used values. 1790 Node_List *split_if_set = NULL; 1791 Node_List *split_bool_set = NULL; 1792 Node_List *split_cex_set = NULL; 1793 for( i = 0; i < loop->_body.size(); i++ ) { 1794 Node* old = loop->_body.at(i); 1795 Node* nnn = old_new[old->_idx]; 1796 // Copy uses to a worklist, so I can munge the def-use info 1797 // with impunity. 1798 for (DUIterator_Fast jmax, j = old->fast_outs(jmax); j < jmax; j++) 1799 worklist.push(old->fast_out(j)); 1800 1801 while( worklist.size() ) { 1802 Node *use = worklist.pop(); 1803 if (!has_node(use)) continue; // Ignore dead nodes 1804 if (use->in(0) == C->top()) continue; 1805 IdealLoopTree *use_loop = get_loop( has_ctrl(use) ? get_ctrl(use) : use ); 1806 // Check for data-use outside of loop - at least one of OLD or USE 1807 // must not be a CFG node. 1808 if( !loop->is_member( use_loop ) && (!old->is_CFG() || !use->is_CFG())) { 1809 1810 // If the Data use is an IF, that means we have an IF outside of the 1811 // loop that is switching on a condition that is set inside of the 1812 // loop. Happens if people set a loop-exit flag; then test the flag 1813 // in the loop to break the loop, then test is again outside of the 1814 // loop to determine which way the loop exited. 1815 // Loop predicate If node connects to Bool node through Opaque1 node. 1816 if (use->is_If() || use->is_CMove() || C->is_predicate_opaq(use)) { 1817 // Since this code is highly unlikely, we lazily build the worklist 1818 // of such Nodes to go split. 1819 if( !split_if_set ) 1820 split_if_set = new Node_List(area); 1821 split_if_set->push(use); 1822 } 1823 if( use->is_Bool() ) { 1824 if( !split_bool_set ) 1825 split_bool_set = new Node_List(area); 1826 split_bool_set->push(use); 1827 } 1828 if( use->Opcode() == Op_CreateEx ) { 1829 if( !split_cex_set ) 1830 split_cex_set = new Node_List(area); 1831 split_cex_set->push(use); 1832 } 1833 1834 1835 // Get "block" use is in 1836 uint idx = 0; 1837 while( use->in(idx) != old ) idx++; 1838 Node *prev = use->is_CFG() ? use : get_ctrl(use); 1839 assert( !loop->is_member( get_loop( prev ) ), "" ); 1840 Node *cfg = prev->_idx >= new_counter 1841 ? prev->in(2) 1842 : idom(prev); 1843 if( use->is_Phi() ) // Phi use is in prior block 1844 cfg = prev->in(idx); // NOT in block of Phi itself 1845 if (cfg->is_top()) { // Use is dead? 1846 _igvn.replace_input_of(use, idx, C->top()); 1847 continue; 1848 } 1849 1850 while( !loop->is_member( get_loop( cfg ) ) ) { 1851 prev = cfg; 1852 cfg = cfg->_idx >= new_counter ? cfg->in(2) : idom(cfg); 1853 } 1854 // If the use occurs after merging several exits from the loop, then 1855 // old value must have dominated all those exits. Since the same old 1856 // value was used on all those exits we did not need a Phi at this 1857 // merge point. NOW we do need a Phi here. Each loop exit value 1858 // is now merged with the peeled body exit; each exit gets its own 1859 // private Phi and those Phis need to be merged here. 1860 Node *phi; 1861 if( prev->is_Region() ) { 1862 if( idx == 0 && use->Opcode() != Op_ShenandoahWBMemProj) { // Updating control edge? 1863 phi = prev; // Just use existing control 1864 } else { // Else need a new Phi 1865 phi = PhiNode::make( prev, old ); 1866 // Now recursively fix up the new uses of old! 1867 uint first = use->Opcode() != Op_ShenandoahWBMemProj ? 1 : 0; 1868 for( uint i = first; i < prev->req(); i++ ) { 1869 worklist.push(phi); // Onto worklist once for each 'old' input 1870 } 1871 } 1872 } else { 1873 // Get new RegionNode merging old and new loop exits 1874 prev = old_new[prev->_idx]; 1875 assert( prev, "just made this in step 7" ); 1876 if( idx == 0 && use->Opcode() != Op_ShenandoahWBMemProj) { // Updating control edge? 1877 phi = prev; // Just use existing control 1878 } else { // Else need a new Phi 1879 // Make a new Phi merging data values properly 1880 phi = PhiNode::make( prev, old ); 1881 phi->set_req( 1, nnn ); 1882 } 1883 } 1884 // If inserting a new Phi, check for prior hits 1885 if( idx != 0 ) { 1886 Node *hit = _igvn.hash_find_insert(phi); 1887 if( hit == NULL ) { 1888 _igvn.register_new_node_with_optimizer(phi); // Register new phi 1889 } else { // or 1890 // Remove the new phi from the graph and use the hit 1891 _igvn.remove_dead_node(phi); 1892 phi = hit; // Use existing phi 1893 } 1894 set_ctrl(phi, prev); 1895 } 1896 // Make 'use' use the Phi instead of the old loop body exit value 1897 _igvn.replace_input_of(use, idx, phi); 1898 if( use->_idx >= new_counter ) { // If updating new phis 1899 // Not needed for correctness, but prevents a weak assert 1900 // in AddPNode from tripping (when we end up with different 1901 // base & derived Phis that will become the same after 1902 // IGVN does CSE). 1903 Node *hit = _igvn.hash_find_insert(use); 1904 if( hit ) // Go ahead and re-hash for hits. 1905 _igvn.replace_node( use, hit ); 1906 } 1907 1908 // If 'use' was in the loop-exit block, it now needs to be sunk 1909 // below the post-loop merge point. 1910 sink_use( use, prev ); 1911 } 1912 } 1913 } 1914 1915 // Check for IFs that need splitting/cloning. Happens if an IF outside of 1916 // the loop uses a condition set in the loop. The original IF probably 1917 // takes control from one or more OLD Regions (which in turn get from NEW 1918 // Regions). In any case, there will be a set of Phis for each merge point 1919 // from the IF up to where the original BOOL def exists the loop. 1920 if( split_if_set ) { 1921 while( split_if_set->size() ) { 1922 Node *iff = split_if_set->pop(); 1923 if( iff->in(1)->is_Phi() ) { 1924 BoolNode *b = clone_iff( iff->in(1)->as_Phi(), loop ); 1925 _igvn.replace_input_of(iff, 1, b); 1926 } 1927 } 1928 } 1929 if( split_bool_set ) { 1930 while( split_bool_set->size() ) { 1931 Node *b = split_bool_set->pop(); 1932 Node *phi = b->in(1); 1933 assert( phi->is_Phi(), "" ); 1934 CmpNode *cmp = clone_bool( (PhiNode*)phi, loop ); 1935 _igvn.replace_input_of(b, 1, cmp); 1936 } 1937 } 1938 if( split_cex_set ) { 1939 while( split_cex_set->size() ) { 1940 Node *b = split_cex_set->pop(); 1941 assert( b->in(0)->is_Region(), "" ); 1942 assert( b->in(1)->is_Phi(), "" ); 1943 assert( b->in(0)->in(0) == b->in(1)->in(0), "" ); 1944 split_up( b, b->in(0), NULL ); 1945 } 1946 } 1947 1948 } 1949 1950 1951 //---------------------- stride_of_possible_iv ------------------------------------- 1952 // Looks for an iff/bool/comp with one operand of the compare 1953 // being a cycle involving an add and a phi, 1954 // with an optional truncation (left-shift followed by a right-shift) 1955 // of the add. Returns zero if not an iv. 1956 int PhaseIdealLoop::stride_of_possible_iv(Node* iff) { 1957 Node* trunc1 = NULL; 1958 Node* trunc2 = NULL; 1959 const TypeInt* ttype = NULL; 1960 if (!iff->is_If() || iff->in(1) == NULL || !iff->in(1)->is_Bool()) { 1961 return 0; 1962 } 1963 BoolNode* bl = iff->in(1)->as_Bool(); 1964 Node* cmp = bl->in(1); 1965 if (!cmp || cmp->Opcode() != Op_CmpI && cmp->Opcode() != Op_CmpU) { 1966 return 0; 1967 } 1968 // Must have an invariant operand 1969 if (is_member(get_loop(iff), get_ctrl(cmp->in(2)))) { 1970 return 0; 1971 } 1972 Node* add2 = NULL; 1973 Node* cmp1 = cmp->in(1); 1974 if (cmp1->is_Phi()) { 1975 // (If (Bool (CmpX phi:(Phi ...(Optional-trunc(AddI phi add2))) ))) 1976 Node* phi = cmp1; 1977 for (uint i = 1; i < phi->req(); i++) { 1978 Node* in = phi->in(i); 1979 Node* add = CountedLoopNode::match_incr_with_optional_truncation(in, 1980 &trunc1, &trunc2, &ttype); 1981 if (add && add->in(1) == phi) { 1982 add2 = add->in(2); 1983 break; 1984 } 1985 } 1986 } else { 1987 // (If (Bool (CmpX addtrunc:(Optional-trunc((AddI (Phi ...addtrunc...) add2)) ))) 1988 Node* addtrunc = cmp1; 1989 Node* add = CountedLoopNode::match_incr_with_optional_truncation(addtrunc, 1990 &trunc1, &trunc2, &ttype); 1991 if (add && add->in(1)->is_Phi()) { 1992 Node* phi = add->in(1); 1993 for (uint i = 1; i < phi->req(); i++) { 1994 if (phi->in(i) == addtrunc) { 1995 add2 = add->in(2); 1996 break; 1997 } 1998 } 1999 } 2000 } 2001 if (add2 != NULL) { 2002 const TypeInt* add2t = _igvn.type(add2)->is_int(); 2003 if (add2t->is_con()) { 2004 return add2t->get_con(); 2005 } 2006 } 2007 return 0; 2008 } 2009 2010 2011 //---------------------- stay_in_loop ------------------------------------- 2012 // Return the (unique) control output node that's in the loop (if it exists.) 2013 Node* PhaseIdealLoop::stay_in_loop( Node* n, IdealLoopTree *loop) { 2014 Node* unique = NULL; 2015 if (!n) return NULL; 2016 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 2017 Node* use = n->fast_out(i); 2018 if (!has_ctrl(use) && loop->is_member(get_loop(use))) { 2019 if (unique != NULL) { 2020 return NULL; 2021 } 2022 unique = use; 2023 } 2024 } 2025 return unique; 2026 } 2027 2028 //------------------------------ register_node ------------------------------------- 2029 // Utility to register node "n" with PhaseIdealLoop 2030 void PhaseIdealLoop::register_node(Node* n, IdealLoopTree *loop, Node* pred, int ddepth) { 2031 _igvn.register_new_node_with_optimizer(n); 2032 loop->_body.push(n); 2033 if (n->is_CFG()) { 2034 set_loop(n, loop); 2035 set_idom(n, pred, ddepth); 2036 } else { 2037 set_ctrl(n, pred); 2038 } 2039 } 2040 2041 //------------------------------ proj_clone ------------------------------------- 2042 // Utility to create an if-projection 2043 ProjNode* PhaseIdealLoop::proj_clone(ProjNode* p, IfNode* iff) { 2044 ProjNode* c = p->clone()->as_Proj(); 2045 c->set_req(0, iff); 2046 return c; 2047 } 2048 2049 //------------------------------ short_circuit_if ------------------------------------- 2050 // Force the iff control output to be the live_proj 2051 Node* PhaseIdealLoop::short_circuit_if(IfNode* iff, ProjNode* live_proj) { 2052 guarantee(live_proj != NULL, "null projection"); 2053 int proj_con = live_proj->_con; 2054 assert(proj_con == 0 || proj_con == 1, "false or true projection"); 2055 Node *con = _igvn.intcon(proj_con); 2056 set_ctrl(con, C->root()); 2057 if (iff) { 2058 iff->set_req(1, con); 2059 } 2060 return con; 2061 } 2062 2063 //------------------------------ insert_if_before_proj ------------------------------------- 2064 // Insert a new if before an if projection (* - new node) 2065 // 2066 // before 2067 // if(test) 2068 // / \ 2069 // v v 2070 // other-proj proj (arg) 2071 // 2072 // after 2073 // if(test) 2074 // / \ 2075 // / v 2076 // | * proj-clone 2077 // v | 2078 // other-proj v 2079 // * new_if(relop(cmp[IU](left,right))) 2080 // / \ 2081 // v v 2082 // * new-proj proj 2083 // (returned) 2084 // 2085 ProjNode* PhaseIdealLoop::insert_if_before_proj(Node* left, bool Signed, BoolTest::mask relop, Node* right, ProjNode* proj) { 2086 IfNode* iff = proj->in(0)->as_If(); 2087 IdealLoopTree *loop = get_loop(proj); 2088 ProjNode *other_proj = iff->proj_out(!proj->is_IfTrue())->as_Proj(); 2089 int ddepth = dom_depth(proj); 2090 2091 _igvn.rehash_node_delayed(iff); 2092 _igvn.rehash_node_delayed(proj); 2093 2094 proj->set_req(0, NULL); // temporary disconnect 2095 ProjNode* proj2 = proj_clone(proj, iff); 2096 register_node(proj2, loop, iff, ddepth); 2097 2098 Node* cmp = Signed ? (Node*) new CmpINode(left, right) : (Node*) new CmpUNode(left, right); 2099 register_node(cmp, loop, proj2, ddepth); 2100 2101 BoolNode* bol = new BoolNode(cmp, relop); 2102 register_node(bol, loop, proj2, ddepth); 2103 2104 int opcode = iff->Opcode(); 2105 assert(opcode == Op_If || opcode == Op_RangeCheck, "unexpected opcode"); 2106 IfNode* new_if = (opcode == Op_If) ? new IfNode(proj2, bol, iff->_prob, iff->_fcnt): 2107 new RangeCheckNode(proj2, bol, iff->_prob, iff->_fcnt); 2108 register_node(new_if, loop, proj2, ddepth); 2109 2110 proj->set_req(0, new_if); // reattach 2111 set_idom(proj, new_if, ddepth); 2112 2113 ProjNode* new_exit = proj_clone(other_proj, new_if)->as_Proj(); 2114 guarantee(new_exit != NULL, "null exit node"); 2115 register_node(new_exit, get_loop(other_proj), new_if, ddepth); 2116 2117 return new_exit; 2118 } 2119 2120 //------------------------------ insert_region_before_proj ------------------------------------- 2121 // Insert a region before an if projection (* - new node) 2122 // 2123 // before 2124 // if(test) 2125 // / | 2126 // v | 2127 // proj v 2128 // other-proj 2129 // 2130 // after 2131 // if(test) 2132 // / | 2133 // v | 2134 // * proj-clone v 2135 // | other-proj 2136 // v 2137 // * new-region 2138 // | 2139 // v 2140 // * dum_if 2141 // / \ 2142 // v \ 2143 // * dum-proj v 2144 // proj 2145 // 2146 RegionNode* PhaseIdealLoop::insert_region_before_proj(ProjNode* proj) { 2147 IfNode* iff = proj->in(0)->as_If(); 2148 IdealLoopTree *loop = get_loop(proj); 2149 ProjNode *other_proj = iff->proj_out(!proj->is_IfTrue())->as_Proj(); 2150 int ddepth = dom_depth(proj); 2151 2152 _igvn.rehash_node_delayed(iff); 2153 _igvn.rehash_node_delayed(proj); 2154 2155 proj->set_req(0, NULL); // temporary disconnect 2156 ProjNode* proj2 = proj_clone(proj, iff); 2157 register_node(proj2, loop, iff, ddepth); 2158 2159 RegionNode* reg = new RegionNode(2); 2160 reg->set_req(1, proj2); 2161 register_node(reg, loop, iff, ddepth); 2162 2163 IfNode* dum_if = new IfNode(reg, short_circuit_if(NULL, proj), iff->_prob, iff->_fcnt); 2164 register_node(dum_if, loop, reg, ddepth); 2165 2166 proj->set_req(0, dum_if); // reattach 2167 set_idom(proj, dum_if, ddepth); 2168 2169 ProjNode* dum_proj = proj_clone(other_proj, dum_if); 2170 register_node(dum_proj, loop, dum_if, ddepth); 2171 2172 return reg; 2173 } 2174 2175 //------------------------------ insert_cmpi_loop_exit ------------------------------------- 2176 // Clone a signed compare loop exit from an unsigned compare and 2177 // insert it before the unsigned cmp on the stay-in-loop path. 2178 // All new nodes inserted in the dominator tree between the original 2179 // if and it's projections. The original if test is replaced with 2180 // a constant to force the stay-in-loop path. 2181 // 2182 // This is done to make sure that the original if and it's projections 2183 // still dominate the same set of control nodes, that the ctrl() relation 2184 // from data nodes to them is preserved, and that their loop nesting is 2185 // preserved. 2186 // 2187 // before 2188 // if(i <u limit) unsigned compare loop exit 2189 // / | 2190 // v v 2191 // exit-proj stay-in-loop-proj 2192 // 2193 // after 2194 // if(stay-in-loop-const) original if 2195 // / | 2196 // / v 2197 // / if(i < limit) new signed test 2198 // / / | 2199 // / / v 2200 // / / if(i <u limit) new cloned unsigned test 2201 // / / / | 2202 // v v v | 2203 // region | 2204 // | | 2205 // dum-if | 2206 // / | | 2207 // ether | | 2208 // v v 2209 // exit-proj stay-in-loop-proj 2210 // 2211 IfNode* PhaseIdealLoop::insert_cmpi_loop_exit(IfNode* if_cmpu, IdealLoopTree *loop) { 2212 const bool Signed = true; 2213 const bool Unsigned = false; 2214 2215 BoolNode* bol = if_cmpu->in(1)->as_Bool(); 2216 if (bol->_test._test != BoolTest::lt) return NULL; 2217 CmpNode* cmpu = bol->in(1)->as_Cmp(); 2218 if (cmpu->Opcode() != Op_CmpU) return NULL; 2219 int stride = stride_of_possible_iv(if_cmpu); 2220 if (stride == 0) return NULL; 2221 2222 Node* lp_proj = stay_in_loop(if_cmpu, loop); 2223 guarantee(lp_proj != NULL, "null loop node"); 2224 2225 ProjNode* lp_continue = lp_proj->as_Proj(); 2226 ProjNode* lp_exit = if_cmpu->proj_out(!lp_continue->is_IfTrue())->as_Proj(); 2227 2228 Node* limit = NULL; 2229 if (stride > 0) { 2230 limit = cmpu->in(2); 2231 } else { 2232 limit = _igvn.makecon(TypeInt::ZERO); 2233 set_ctrl(limit, C->root()); 2234 } 2235 // Create a new region on the exit path 2236 RegionNode* reg = insert_region_before_proj(lp_exit); 2237 guarantee(reg != NULL, "null region node"); 2238 2239 // Clone the if-cmpu-true-false using a signed compare 2240 BoolTest::mask rel_i = stride > 0 ? bol->_test._test : BoolTest::ge; 2241 ProjNode* cmpi_exit = insert_if_before_proj(cmpu->in(1), Signed, rel_i, limit, lp_continue); 2242 reg->add_req(cmpi_exit); 2243 2244 // Clone the if-cmpu-true-false 2245 BoolTest::mask rel_u = bol->_test._test; 2246 ProjNode* cmpu_exit = insert_if_before_proj(cmpu->in(1), Unsigned, rel_u, cmpu->in(2), lp_continue); 2247 reg->add_req(cmpu_exit); 2248 2249 // Force original if to stay in loop. 2250 short_circuit_if(if_cmpu, lp_continue); 2251 2252 return cmpi_exit->in(0)->as_If(); 2253 } 2254 2255 //------------------------------ remove_cmpi_loop_exit ------------------------------------- 2256 // Remove a previously inserted signed compare loop exit. 2257 void PhaseIdealLoop::remove_cmpi_loop_exit(IfNode* if_cmp, IdealLoopTree *loop) { 2258 Node* lp_proj = stay_in_loop(if_cmp, loop); 2259 assert(if_cmp->in(1)->in(1)->Opcode() == Op_CmpI && 2260 stay_in_loop(lp_proj, loop)->is_If() && 2261 stay_in_loop(lp_proj, loop)->in(1)->in(1)->Opcode() == Op_CmpU, "inserted cmpi before cmpu"); 2262 Node *con = _igvn.makecon(lp_proj->is_IfTrue() ? TypeInt::ONE : TypeInt::ZERO); 2263 set_ctrl(con, C->root()); 2264 if_cmp->set_req(1, con); 2265 } 2266 2267 //------------------------------ scheduled_nodelist ------------------------------------- 2268 // Create a post order schedule of nodes that are in the 2269 // "member" set. The list is returned in "sched". 2270 // The first node in "sched" is the loop head, followed by 2271 // nodes which have no inputs in the "member" set, and then 2272 // followed by the nodes that have an immediate input dependence 2273 // on a node in "sched". 2274 void PhaseIdealLoop::scheduled_nodelist( IdealLoopTree *loop, VectorSet& member, Node_List &sched ) { 2275 2276 assert(member.test(loop->_head->_idx), "loop head must be in member set"); 2277 Arena *a = Thread::current()->resource_area(); 2278 VectorSet visited(a); 2279 Node_Stack nstack(a, loop->_body.size()); 2280 2281 Node* n = loop->_head; // top of stack is cached in "n" 2282 uint idx = 0; 2283 visited.set(n->_idx); 2284 2285 // Initially push all with no inputs from within member set 2286 for(uint i = 0; i < loop->_body.size(); i++ ) { 2287 Node *elt = loop->_body.at(i); 2288 if (member.test(elt->_idx)) { 2289 bool found = false; 2290 for (uint j = 0; j < elt->req(); j++) { 2291 Node* def = elt->in(j); 2292 if (def && member.test(def->_idx) && def != elt) { 2293 found = true; 2294 break; 2295 } 2296 } 2297 if (!found && elt != loop->_head) { 2298 nstack.push(n, idx); 2299 n = elt; 2300 assert(!visited.test(n->_idx), "not seen yet"); 2301 visited.set(n->_idx); 2302 } 2303 } 2304 } 2305 2306 // traverse out's that are in the member set 2307 while (true) { 2308 if (idx < n->outcnt()) { 2309 Node* use = n->raw_out(idx); 2310 idx++; 2311 if (!visited.test_set(use->_idx)) { 2312 if (member.test(use->_idx)) { 2313 nstack.push(n, idx); 2314 n = use; 2315 idx = 0; 2316 } 2317 } 2318 } else { 2319 // All outputs processed 2320 sched.push(n); 2321 if (nstack.is_empty()) break; 2322 n = nstack.node(); 2323 idx = nstack.index(); 2324 nstack.pop(); 2325 } 2326 } 2327 } 2328 2329 2330 //------------------------------ has_use_in_set ------------------------------------- 2331 // Has a use in the vector set 2332 bool PhaseIdealLoop::has_use_in_set( Node* n, VectorSet& vset ) { 2333 for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) { 2334 Node* use = n->fast_out(j); 2335 if (vset.test(use->_idx)) { 2336 return true; 2337 } 2338 } 2339 return false; 2340 } 2341 2342 2343 //------------------------------ has_use_internal_to_set ------------------------------------- 2344 // Has use internal to the vector set (ie. not in a phi at the loop head) 2345 bool PhaseIdealLoop::has_use_internal_to_set( Node* n, VectorSet& vset, IdealLoopTree *loop ) { 2346 Node* head = loop->_head; 2347 for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) { 2348 Node* use = n->fast_out(j); 2349 if (vset.test(use->_idx) && !(use->is_Phi() && use->in(0) == head)) { 2350 return true; 2351 } 2352 } 2353 return false; 2354 } 2355 2356 2357 //------------------------------ clone_for_use_outside_loop ------------------------------------- 2358 // clone "n" for uses that are outside of loop 2359 int PhaseIdealLoop::clone_for_use_outside_loop( IdealLoopTree *loop, Node* n, Node_List& worklist ) { 2360 int cloned = 0; 2361 assert(worklist.size() == 0, "should be empty"); 2362 for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) { 2363 Node* use = n->fast_out(j); 2364 if( !loop->is_member(get_loop(has_ctrl(use) ? get_ctrl(use) : use)) ) { 2365 worklist.push(use); 2366 } 2367 } 2368 while( worklist.size() ) { 2369 Node *use = worklist.pop(); 2370 if (!has_node(use) || use->in(0) == C->top()) continue; 2371 uint j; 2372 for (j = 0; j < use->req(); j++) { 2373 if (use->in(j) == n) break; 2374 } 2375 assert(j < use->req(), "must be there"); 2376 2377 // clone "n" and insert it between the inputs of "n" and the use outside the loop 2378 Node* n_clone = n->clone(); 2379 _igvn.replace_input_of(use, j, n_clone); 2380 cloned++; 2381 Node* use_c; 2382 if (!use->is_Phi()) { 2383 use_c = has_ctrl(use) ? get_ctrl(use) : use->in(0); 2384 } else { 2385 // Use in a phi is considered a use in the associated predecessor block 2386 use_c = use->in(0)->in(j); 2387 } 2388 set_ctrl(n_clone, use_c); 2389 assert(!loop->is_member(get_loop(use_c)), "should be outside loop"); 2390 get_loop(use_c)->_body.push(n_clone); 2391 _igvn.register_new_node_with_optimizer(n_clone); 2392 #if !defined(PRODUCT) 2393 if (TracePartialPeeling) { 2394 tty->print_cr("loop exit cloning old: %d new: %d newbb: %d", n->_idx, n_clone->_idx, get_ctrl(n_clone)->_idx); 2395 } 2396 #endif 2397 } 2398 return cloned; 2399 } 2400 2401 2402 //------------------------------ clone_for_special_use_inside_loop ------------------------------------- 2403 // clone "n" for special uses that are in the not_peeled region. 2404 // If these def-uses occur in separate blocks, the code generator 2405 // marks the method as not compilable. For example, if a "BoolNode" 2406 // is in a different basic block than the "IfNode" that uses it, then 2407 // the compilation is aborted in the code generator. 2408 void PhaseIdealLoop::clone_for_special_use_inside_loop( IdealLoopTree *loop, Node* n, 2409 VectorSet& not_peel, Node_List& sink_list, Node_List& worklist ) { 2410 if (n->is_Phi() || n->is_Load()) { 2411 return; 2412 } 2413 assert(worklist.size() == 0, "should be empty"); 2414 for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) { 2415 Node* use = n->fast_out(j); 2416 if (not_peel.test(use->_idx) && 2417 (use->is_If() || use->is_CMove() || use->is_Bool()) && 2418 use->in(1) == n) { 2419 worklist.push(use); 2420 } 2421 } 2422 if (worklist.size() > 0) { 2423 // clone "n" and insert it between inputs of "n" and the use 2424 Node* n_clone = n->clone(); 2425 loop->_body.push(n_clone); 2426 _igvn.register_new_node_with_optimizer(n_clone); 2427 set_ctrl(n_clone, get_ctrl(n)); 2428 sink_list.push(n_clone); 2429 not_peel <<= n_clone->_idx; // add n_clone to not_peel set. 2430 #if !defined(PRODUCT) 2431 if (TracePartialPeeling) { 2432 tty->print_cr("special not_peeled cloning old: %d new: %d", n->_idx, n_clone->_idx); 2433 } 2434 #endif 2435 while( worklist.size() ) { 2436 Node *use = worklist.pop(); 2437 _igvn.rehash_node_delayed(use); 2438 for (uint j = 1; j < use->req(); j++) { 2439 if (use->in(j) == n) { 2440 use->set_req(j, n_clone); 2441 } 2442 } 2443 } 2444 } 2445 } 2446 2447 2448 //------------------------------ insert_phi_for_loop ------------------------------------- 2449 // Insert phi(lp_entry_val, back_edge_val) at use->in(idx) for loop lp if phi does not already exist 2450 void PhaseIdealLoop::insert_phi_for_loop( Node* use, uint idx, Node* lp_entry_val, Node* back_edge_val, LoopNode* lp ) { 2451 Node *phi = PhiNode::make(lp, back_edge_val); 2452 phi->set_req(LoopNode::EntryControl, lp_entry_val); 2453 // Use existing phi if it already exists 2454 Node *hit = _igvn.hash_find_insert(phi); 2455 if( hit == NULL ) { 2456 _igvn.register_new_node_with_optimizer(phi); 2457 set_ctrl(phi, lp); 2458 } else { 2459 // Remove the new phi from the graph and use the hit 2460 _igvn.remove_dead_node(phi); 2461 phi = hit; 2462 } 2463 _igvn.replace_input_of(use, idx, phi); 2464 } 2465 2466 #ifdef ASSERT 2467 //------------------------------ is_valid_loop_partition ------------------------------------- 2468 // Validate the loop partition sets: peel and not_peel 2469 bool PhaseIdealLoop::is_valid_loop_partition( IdealLoopTree *loop, VectorSet& peel, Node_List& peel_list, 2470 VectorSet& not_peel ) { 2471 uint i; 2472 // Check that peel_list entries are in the peel set 2473 for (i = 0; i < peel_list.size(); i++) { 2474 if (!peel.test(peel_list.at(i)->_idx)) { 2475 return false; 2476 } 2477 } 2478 // Check at loop members are in one of peel set or not_peel set 2479 for (i = 0; i < loop->_body.size(); i++ ) { 2480 Node *def = loop->_body.at(i); 2481 uint di = def->_idx; 2482 // Check that peel set elements are in peel_list 2483 if (peel.test(di)) { 2484 if (not_peel.test(di)) { 2485 return false; 2486 } 2487 // Must be in peel_list also 2488 bool found = false; 2489 for (uint j = 0; j < peel_list.size(); j++) { 2490 if (peel_list.at(j)->_idx == di) { 2491 found = true; 2492 break; 2493 } 2494 } 2495 if (!found) { 2496 return false; 2497 } 2498 } else if (not_peel.test(di)) { 2499 if (peel.test(di)) { 2500 return false; 2501 } 2502 } else { 2503 return false; 2504 } 2505 } 2506 return true; 2507 } 2508 2509 //------------------------------ is_valid_clone_loop_exit_use ------------------------------------- 2510 // Ensure a use outside of loop is of the right form 2511 bool PhaseIdealLoop::is_valid_clone_loop_exit_use( IdealLoopTree *loop, Node* use, uint exit_idx) { 2512 Node *use_c = has_ctrl(use) ? get_ctrl(use) : use; 2513 return (use->is_Phi() && 2514 use_c->is_Region() && use_c->req() == 3 && 2515 (use_c->in(exit_idx)->Opcode() == Op_IfTrue || 2516 use_c->in(exit_idx)->Opcode() == Op_IfFalse || 2517 use_c->in(exit_idx)->Opcode() == Op_JumpProj) && 2518 loop->is_member( get_loop( use_c->in(exit_idx)->in(0) ) ) ); 2519 } 2520 2521 //------------------------------ is_valid_clone_loop_form ------------------------------------- 2522 // Ensure that all uses outside of loop are of the right form 2523 bool PhaseIdealLoop::is_valid_clone_loop_form( IdealLoopTree *loop, Node_List& peel_list, 2524 uint orig_exit_idx, uint clone_exit_idx) { 2525 uint len = peel_list.size(); 2526 for (uint i = 0; i < len; i++) { 2527 Node *def = peel_list.at(i); 2528 2529 for (DUIterator_Fast jmax, j = def->fast_outs(jmax); j < jmax; j++) { 2530 Node *use = def->fast_out(j); 2531 Node *use_c = has_ctrl(use) ? get_ctrl(use) : use; 2532 if (!loop->is_member(get_loop(use_c))) { 2533 // use is not in the loop, check for correct structure 2534 if (use->in(0) == def) { 2535 // Okay 2536 } else if (!is_valid_clone_loop_exit_use(loop, use, orig_exit_idx)) { 2537 return false; 2538 } 2539 } 2540 } 2541 } 2542 return true; 2543 } 2544 #endif 2545 2546 //------------------------------ partial_peel ------------------------------------- 2547 // Partially peel (aka loop rotation) the top portion of a loop (called 2548 // the peel section below) by cloning it and placing one copy just before 2549 // the new loop head and the other copy at the bottom of the new loop. 2550 // 2551 // before after where it came from 2552 // 2553 // stmt1 stmt1 2554 // loop: stmt2 clone 2555 // stmt2 if condA goto exitA clone 2556 // if condA goto exitA new_loop: new 2557 // stmt3 stmt3 clone 2558 // if !condB goto loop if condB goto exitB clone 2559 // exitB: stmt2 orig 2560 // stmt4 if !condA goto new_loop orig 2561 // exitA: goto exitA 2562 // exitB: 2563 // stmt4 2564 // exitA: 2565 // 2566 // Step 1: find the cut point: an exit test on probable 2567 // induction variable. 2568 // Step 2: schedule (with cloning) operations in the peel 2569 // section that can be executed after the cut into 2570 // the section that is not peeled. This may need 2571 // to clone operations into exit blocks. For 2572 // instance, a reference to A[i] in the not-peel 2573 // section and a reference to B[i] in an exit block 2574 // may cause a left-shift of i by 2 to be placed 2575 // in the peel block. This step will clone the left 2576 // shift into the exit block and sink the left shift 2577 // from the peel to the not-peel section. 2578 // Step 3: clone the loop, retarget the control, and insert 2579 // phis for values that are live across the new loop 2580 // head. This is very dependent on the graph structure 2581 // from clone_loop. It creates region nodes for 2582 // exit control and associated phi nodes for values 2583 // flow out of the loop through that exit. The region 2584 // node is dominated by the clone's control projection. 2585 // So the clone's peel section is placed before the 2586 // new loop head, and the clone's not-peel section is 2587 // forms the top part of the new loop. The original 2588 // peel section forms the tail of the new loop. 2589 // Step 4: update the dominator tree and recompute the 2590 // dominator depth. 2591 // 2592 // orig 2593 // 2594 // stmt1 2595 // | 2596 // v 2597 // loop predicate 2598 // | 2599 // v 2600 // loop<----+ 2601 // | | 2602 // stmt2 | 2603 // | | 2604 // v | 2605 // ifA | 2606 // / | | 2607 // v v | 2608 // false true ^ <-- last_peel 2609 // / | | 2610 // / ===|==cut | 2611 // / stmt3 | <-- first_not_peel 2612 // / | | 2613 // | v | 2614 // v ifB | 2615 // exitA: / \ | 2616 // / \ | 2617 // v v | 2618 // false true | 2619 // / \ | 2620 // / ----+ 2621 // | 2622 // v 2623 // exitB: 2624 // stmt4 2625 // 2626 // 2627 // after clone loop 2628 // 2629 // stmt1 2630 // | 2631 // v 2632 // loop predicate 2633 // / \ 2634 // clone / \ orig 2635 // / \ 2636 // / \ 2637 // v v 2638 // +---->loop loop<----+ 2639 // | | | | 2640 // | stmt2 stmt2 | 2641 // | | | | 2642 // | v v | 2643 // | ifA ifA | 2644 // | | \ / | | 2645 // | v v v v | 2646 // ^ true false false true ^ <-- last_peel 2647 // | | ^ \ / | | 2648 // | cut==|== \ \ / ===|==cut | 2649 // | stmt3 \ \ / stmt3 | <-- first_not_peel 2650 // | | dom | | | | 2651 // | v \ 1v v2 v | 2652 // | ifB regionA ifB | 2653 // | / \ | / \ | 2654 // | / \ v / \ | 2655 // | v v exitA: v v | 2656 // | true false false true | 2657 // | / ^ \ / \ | 2658 // +---- \ \ / ----+ 2659 // dom \ / 2660 // \ 1v v2 2661 // regionB 2662 // | 2663 // v 2664 // exitB: 2665 // stmt4 2666 // 2667 // 2668 // after partial peel 2669 // 2670 // stmt1 2671 // | 2672 // v 2673 // loop predicate 2674 // / 2675 // clone / orig 2676 // / TOP 2677 // / \ 2678 // v v 2679 // TOP->loop loop----+ 2680 // | | | 2681 // stmt2 stmt2 | 2682 // | | | 2683 // v v | 2684 // ifA ifA | 2685 // | \ / | | 2686 // v v v v | 2687 // true false false true | <-- last_peel 2688 // | ^ \ / +------|---+ 2689 // +->newloop \ \ / === ==cut | | 2690 // | stmt3 \ \ / TOP | | 2691 // | | dom | | stmt3 | | <-- first_not_peel 2692 // | v \ 1v v2 v | | 2693 // | ifB regionA ifB ^ v 2694 // | / \ | / \ | | 2695 // | / \ v / \ | | 2696 // | v v exitA: v v | | 2697 // | true false false true | | 2698 // | / ^ \ / \ | | 2699 // | | \ \ / v | | 2700 // | | dom \ / TOP | | 2701 // | | \ 1v v2 | | 2702 // ^ v regionB | | 2703 // | | | | | 2704 // | | v ^ v 2705 // | | exitB: | | 2706 // | | stmt4 | | 2707 // | +------------>-----------------+ | 2708 // | | 2709 // +-----------------<---------------------+ 2710 // 2711 // 2712 // final graph 2713 // 2714 // stmt1 2715 // | 2716 // v 2717 // loop predicate 2718 // | 2719 // v 2720 // stmt2 clone 2721 // | 2722 // v 2723 // ........> ifA clone 2724 // : / | 2725 // dom / | 2726 // : v v 2727 // : false true 2728 // : | | 2729 // : | v 2730 // : | newloop<-----+ 2731 // : | | | 2732 // : | stmt3 clone | 2733 // : | | | 2734 // : | v | 2735 // : | ifB | 2736 // : | / \ | 2737 // : | v v | 2738 // : | false true | 2739 // : | | | | 2740 // : | v stmt2 | 2741 // : | exitB: | | 2742 // : | stmt4 v | 2743 // : | ifA orig | 2744 // : | / \ | 2745 // : | / \ | 2746 // : | v v | 2747 // : | false true | 2748 // : | / \ | 2749 // : v v -----+ 2750 // RegionA 2751 // | 2752 // v 2753 // exitA 2754 // 2755 bool PhaseIdealLoop::partial_peel( IdealLoopTree *loop, Node_List &old_new ) { 2756 2757 assert(!loop->_head->is_CountedLoop(), "Non-counted loop only"); 2758 if (!loop->_head->is_Loop()) { 2759 return false; } 2760 2761 LoopNode *head = loop->_head->as_Loop(); 2762 2763 if (head->is_partial_peel_loop() || head->partial_peel_has_failed()) { 2764 return false; 2765 } 2766 2767 // Check for complex exit control 2768 for(uint ii = 0; ii < loop->_body.size(); ii++ ) { 2769 Node *n = loop->_body.at(ii); 2770 int opc = n->Opcode(); 2771 if (n->is_Call() || 2772 opc == Op_Catch || 2773 opc == Op_CatchProj || 2774 opc == Op_Jump || 2775 opc == Op_JumpProj) { 2776 #if !defined(PRODUCT) 2777 if (TracePartialPeeling) { 2778 tty->print_cr("\nExit control too complex: lp: %d", head->_idx); 2779 } 2780 #endif 2781 return false; 2782 } 2783 } 2784 2785 int dd = dom_depth(head); 2786 2787 // Step 1: find cut point 2788 2789 // Walk up dominators to loop head looking for first loop exit 2790 // which is executed on every path thru loop. 2791 IfNode *peel_if = NULL; 2792 IfNode *peel_if_cmpu = NULL; 2793 2794 Node *iff = loop->tail(); 2795 while( iff != head ) { 2796 if( iff->is_If() ) { 2797 Node *ctrl = get_ctrl(iff->in(1)); 2798 if (ctrl->is_top()) return false; // Dead test on live IF. 2799 // If loop-varying exit-test, check for induction variable 2800 if( loop->is_member(get_loop(ctrl)) && 2801 loop->is_loop_exit(iff) && 2802 is_possible_iv_test(iff)) { 2803 Node* cmp = iff->in(1)->in(1); 2804 if (cmp->Opcode() == Op_CmpI) { 2805 peel_if = iff->as_If(); 2806 } else { 2807 assert(cmp->Opcode() == Op_CmpU, "must be CmpI or CmpU"); 2808 peel_if_cmpu = iff->as_If(); 2809 } 2810 } 2811 } 2812 iff = idom(iff); 2813 } 2814 // Prefer signed compare over unsigned compare. 2815 IfNode* new_peel_if = NULL; 2816 if (peel_if == NULL) { 2817 if (!PartialPeelAtUnsignedTests || peel_if_cmpu == NULL) { 2818 return false; // No peel point found 2819 } 2820 new_peel_if = insert_cmpi_loop_exit(peel_if_cmpu, loop); 2821 if (new_peel_if == NULL) { 2822 return false; // No peel point found 2823 } 2824 peel_if = new_peel_if; 2825 } 2826 Node* last_peel = stay_in_loop(peel_if, loop); 2827 Node* first_not_peeled = stay_in_loop(last_peel, loop); 2828 if (first_not_peeled == NULL || first_not_peeled == head) { 2829 return false; 2830 } 2831 2832 #if !defined(PRODUCT) 2833 if (TraceLoopOpts) { 2834 tty->print("PartialPeel "); 2835 loop->dump_head(); 2836 } 2837 2838 if (TracePartialPeeling) { 2839 tty->print_cr("before partial peel one iteration"); 2840 Node_List wl; 2841 Node* t = head->in(2); 2842 while (true) { 2843 wl.push(t); 2844 if (t == head) break; 2845 t = idom(t); 2846 } 2847 while (wl.size() > 0) { 2848 Node* tt = wl.pop(); 2849 tt->dump(); 2850 if (tt == last_peel) tty->print_cr("-- cut --"); 2851 } 2852 } 2853 #endif 2854 ResourceArea *area = Thread::current()->resource_area(); 2855 VectorSet peel(area); 2856 VectorSet not_peel(area); 2857 Node_List peel_list(area); 2858 Node_List worklist(area); 2859 Node_List sink_list(area); 2860 2861 // Set of cfg nodes to peel are those that are executable from 2862 // the head through last_peel. 2863 assert(worklist.size() == 0, "should be empty"); 2864 worklist.push(head); 2865 peel.set(head->_idx); 2866 while (worklist.size() > 0) { 2867 Node *n = worklist.pop(); 2868 if (n != last_peel) { 2869 for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) { 2870 Node* use = n->fast_out(j); 2871 if (use->is_CFG() && 2872 loop->is_member(get_loop(use)) && 2873 !peel.test_set(use->_idx)) { 2874 worklist.push(use); 2875 } 2876 } 2877 } 2878 } 2879 2880 // Set of non-cfg nodes to peel are those that are control 2881 // dependent on the cfg nodes. 2882 uint i; 2883 for(i = 0; i < loop->_body.size(); i++ ) { 2884 Node *n = loop->_body.at(i); 2885 Node *n_c = has_ctrl(n) ? get_ctrl(n) : n; 2886 if (peel.test(n_c->_idx)) { 2887 peel.set(n->_idx); 2888 } else { 2889 not_peel.set(n->_idx); 2890 } 2891 } 2892 2893 // Step 2: move operations from the peeled section down into the 2894 // not-peeled section 2895 2896 // Get a post order schedule of nodes in the peel region 2897 // Result in right-most operand. 2898 scheduled_nodelist(loop, peel, peel_list ); 2899 2900 assert(is_valid_loop_partition(loop, peel, peel_list, not_peel), "bad partition"); 2901 2902 // For future check for too many new phis 2903 uint old_phi_cnt = 0; 2904 for (DUIterator_Fast jmax, j = head->fast_outs(jmax); j < jmax; j++) { 2905 Node* use = head->fast_out(j); 2906 if (use->is_Phi()) old_phi_cnt++; 2907 } 2908 2909 #if !defined(PRODUCT) 2910 if (TracePartialPeeling) { 2911 tty->print_cr("\npeeled list"); 2912 } 2913 #endif 2914 2915 // Evacuate nodes in peel region into the not_peeled region if possible 2916 uint new_phi_cnt = 0; 2917 uint cloned_for_outside_use = 0; 2918 for (i = 0; i < peel_list.size();) { 2919 Node* n = peel_list.at(i); 2920 #if !defined(PRODUCT) 2921 if (TracePartialPeeling) n->dump(); 2922 #endif 2923 bool incr = true; 2924 if ( !n->is_CFG() ) { 2925 2926 if ( has_use_in_set(n, not_peel) ) { 2927 2928 // If not used internal to the peeled region, 2929 // move "n" from peeled to not_peeled region. 2930 2931 if ( !has_use_internal_to_set(n, peel, loop) ) { 2932 2933 // if not pinned and not a load (which maybe anti-dependent on a store) 2934 // and not a CMove (Matcher expects only bool->cmove). 2935 if ( n->in(0) == NULL && !n->is_Load() && !n->is_CMove() ) { 2936 cloned_for_outside_use += clone_for_use_outside_loop( loop, n, worklist ); 2937 sink_list.push(n); 2938 peel >>= n->_idx; // delete n from peel set. 2939 not_peel <<= n->_idx; // add n to not_peel set. 2940 peel_list.remove(i); 2941 incr = false; 2942 #if !defined(PRODUCT) 2943 if (TracePartialPeeling) { 2944 tty->print_cr("sink to not_peeled region: %d newbb: %d", 2945 n->_idx, get_ctrl(n)->_idx); 2946 } 2947 #endif 2948 } 2949 } else { 2950 // Otherwise check for special def-use cases that span 2951 // the peel/not_peel boundary such as bool->if 2952 clone_for_special_use_inside_loop( loop, n, not_peel, sink_list, worklist ); 2953 new_phi_cnt++; 2954 } 2955 } 2956 } 2957 if (incr) i++; 2958 } 2959 2960 if (new_phi_cnt > old_phi_cnt + PartialPeelNewPhiDelta) { 2961 #if !defined(PRODUCT) 2962 if (TracePartialPeeling) { 2963 tty->print_cr("\nToo many new phis: %d old %d new cmpi: %c", 2964 new_phi_cnt, old_phi_cnt, new_peel_if != NULL?'T':'F'); 2965 } 2966 #endif 2967 if (new_peel_if != NULL) { 2968 remove_cmpi_loop_exit(new_peel_if, loop); 2969 } 2970 // Inhibit more partial peeling on this loop 2971 assert(!head->is_partial_peel_loop(), "not partial peeled"); 2972 head->mark_partial_peel_failed(); 2973 if (cloned_for_outside_use > 0) { 2974 // Terminate this round of loop opts because 2975 // the graph outside this loop was changed. 2976 C->set_major_progress(); 2977 return true; 2978 } 2979 return false; 2980 } 2981 2982 // Step 3: clone loop, retarget control, and insert new phis 2983 2984 // Create new loop head for new phis and to hang 2985 // the nodes being moved (sinked) from the peel region. 2986 LoopNode* new_head = new LoopNode(last_peel, last_peel); 2987 new_head->set_unswitch_count(head->unswitch_count()); // Preserve 2988 _igvn.register_new_node_with_optimizer(new_head); 2989 assert(first_not_peeled->in(0) == last_peel, "last_peel <- first_not_peeled"); 2990 _igvn.replace_input_of(first_not_peeled, 0, new_head); 2991 set_loop(new_head, loop); 2992 loop->_body.push(new_head); 2993 not_peel.set(new_head->_idx); 2994 set_idom(new_head, last_peel, dom_depth(first_not_peeled)); 2995 set_idom(first_not_peeled, new_head, dom_depth(first_not_peeled)); 2996 2997 while (sink_list.size() > 0) { 2998 Node* n = sink_list.pop(); 2999 set_ctrl(n, new_head); 3000 } 3001 3002 assert(is_valid_loop_partition(loop, peel, peel_list, not_peel), "bad partition"); 3003 3004 clone_loop( loop, old_new, dd ); 3005 3006 const uint clone_exit_idx = 1; 3007 const uint orig_exit_idx = 2; 3008 assert(is_valid_clone_loop_form( loop, peel_list, orig_exit_idx, clone_exit_idx ), "bad clone loop"); 3009 3010 Node* head_clone = old_new[head->_idx]; 3011 LoopNode* new_head_clone = old_new[new_head->_idx]->as_Loop(); 3012 Node* orig_tail_clone = head_clone->in(2); 3013 3014 // Add phi if "def" node is in peel set and "use" is not 3015 3016 for(i = 0; i < peel_list.size(); i++ ) { 3017 Node *def = peel_list.at(i); 3018 if (!def->is_CFG()) { 3019 for (DUIterator_Fast jmax, j = def->fast_outs(jmax); j < jmax; j++) { 3020 Node *use = def->fast_out(j); 3021 if (has_node(use) && use->in(0) != C->top() && 3022 (!peel.test(use->_idx) || 3023 (use->is_Phi() && use->in(0) == head)) ) { 3024 worklist.push(use); 3025 } 3026 } 3027 while( worklist.size() ) { 3028 Node *use = worklist.pop(); 3029 for (uint j = 1; j < use->req(); j++) { 3030 Node* n = use->in(j); 3031 if (n == def) { 3032 3033 // "def" is in peel set, "use" is not in peel set 3034 // or "use" is in the entry boundary (a phi) of the peel set 3035 3036 Node* use_c = has_ctrl(use) ? get_ctrl(use) : use; 3037 3038 if ( loop->is_member(get_loop( use_c )) ) { 3039 // use is in loop 3040 if (old_new[use->_idx] != NULL) { // null for dead code 3041 Node* use_clone = old_new[use->_idx]; 3042 _igvn.replace_input_of(use, j, C->top()); 3043 insert_phi_for_loop( use_clone, j, old_new[def->_idx], def, new_head_clone ); 3044 } 3045 } else { 3046 assert(is_valid_clone_loop_exit_use(loop, use, orig_exit_idx), "clone loop format"); 3047 // use is not in the loop, check if the live range includes the cut 3048 Node* lp_if = use_c->in(orig_exit_idx)->in(0); 3049 if (not_peel.test(lp_if->_idx)) { 3050 assert(j == orig_exit_idx, "use from original loop"); 3051 insert_phi_for_loop( use, clone_exit_idx, old_new[def->_idx], def, new_head_clone ); 3052 } 3053 } 3054 } 3055 } 3056 } 3057 } 3058 } 3059 3060 // Step 3b: retarget control 3061 3062 // Redirect control to the new loop head if a cloned node in 3063 // the not_peeled region has control that points into the peeled region. 3064 // This necessary because the cloned peeled region will be outside 3065 // the loop. 3066 // from to 3067 // cloned-peeled <---+ 3068 // new_head_clone: | <--+ 3069 // cloned-not_peeled in(0) in(0) 3070 // orig-peeled 3071 3072 for(i = 0; i < loop->_body.size(); i++ ) { 3073 Node *n = loop->_body.at(i); 3074 if (!n->is_CFG() && n->in(0) != NULL && 3075 not_peel.test(n->_idx) && peel.test(n->in(0)->_idx)) { 3076 Node* n_clone = old_new[n->_idx]; 3077 _igvn.replace_input_of(n_clone, 0, new_head_clone); 3078 } 3079 } 3080 3081 // Backedge of the surviving new_head (the clone) is original last_peel 3082 _igvn.replace_input_of(new_head_clone, LoopNode::LoopBackControl, last_peel); 3083 3084 // Cut first node in original not_peel set 3085 _igvn.rehash_node_delayed(new_head); // Multiple edge updates: 3086 new_head->set_req(LoopNode::EntryControl, C->top()); // use rehash_node_delayed / set_req instead of 3087 new_head->set_req(LoopNode::LoopBackControl, C->top()); // multiple replace_input_of calls 3088 3089 // Copy head_clone back-branch info to original head 3090 // and remove original head's loop entry and 3091 // clone head's back-branch 3092 _igvn.rehash_node_delayed(head); // Multiple edge updates 3093 head->set_req(LoopNode::EntryControl, head_clone->in(LoopNode::LoopBackControl)); 3094 head->set_req(LoopNode::LoopBackControl, C->top()); 3095 _igvn.replace_input_of(head_clone, LoopNode::LoopBackControl, C->top()); 3096 3097 // Similarly modify the phis 3098 for (DUIterator_Fast kmax, k = head->fast_outs(kmax); k < kmax; k++) { 3099 Node* use = head->fast_out(k); 3100 if (use->is_Phi() && use->outcnt() > 0) { 3101 Node* use_clone = old_new[use->_idx]; 3102 _igvn.rehash_node_delayed(use); // Multiple edge updates 3103 use->set_req(LoopNode::EntryControl, use_clone->in(LoopNode::LoopBackControl)); 3104 use->set_req(LoopNode::LoopBackControl, C->top()); 3105 _igvn.replace_input_of(use_clone, LoopNode::LoopBackControl, C->top()); 3106 } 3107 } 3108 3109 // Step 4: update dominator tree and dominator depth 3110 3111 set_idom(head, orig_tail_clone, dd); 3112 recompute_dom_depth(); 3113 3114 // Inhibit more partial peeling on this loop 3115 new_head_clone->set_partial_peel_loop(); 3116 C->set_major_progress(); 3117 loop->record_for_igvn(); 3118 3119 #if !defined(PRODUCT) 3120 if (TracePartialPeeling) { 3121 tty->print_cr("\nafter partial peel one iteration"); 3122 Node_List wl(area); 3123 Node* t = last_peel; 3124 while (true) { 3125 wl.push(t); 3126 if (t == head_clone) break; 3127 t = idom(t); 3128 } 3129 while (wl.size() > 0) { 3130 Node* tt = wl.pop(); 3131 if (tt == head) tty->print_cr("orig head"); 3132 else if (tt == new_head_clone) tty->print_cr("new head"); 3133 else if (tt == head_clone) tty->print_cr("clone head"); 3134 tt->dump(); 3135 } 3136 } 3137 #endif 3138 return true; 3139 } 3140 3141 //------------------------------reorg_offsets---------------------------------- 3142 // Reorganize offset computations to lower register pressure. Mostly 3143 // prevent loop-fallout uses of the pre-incremented trip counter (which are 3144 // then alive with the post-incremented trip counter forcing an extra 3145 // register move) 3146 void PhaseIdealLoop::reorg_offsets(IdealLoopTree *loop) { 3147 // Perform it only for canonical counted loops. 3148 // Loop's shape could be messed up by iteration_split_impl. 3149 if (!loop->_head->is_CountedLoop()) 3150 return; 3151 if (!loop->_head->as_Loop()->is_valid_counted_loop()) 3152 return; 3153 3154 CountedLoopNode *cl = loop->_head->as_CountedLoop(); 3155 CountedLoopEndNode *cle = cl->loopexit(); 3156 Node *exit = cle->proj_out(false); 3157 Node *phi = cl->phi(); 3158 3159 // Check for the special case of folks using the pre-incremented 3160 // trip-counter on the fall-out path (forces the pre-incremented 3161 // and post-incremented trip counter to be live at the same time). 3162 // Fix this by adjusting to use the post-increment trip counter. 3163 3164 bool progress = true; 3165 while (progress) { 3166 progress = false; 3167 for (DUIterator_Fast imax, i = phi->fast_outs(imax); i < imax; i++) { 3168 Node* use = phi->fast_out(i); // User of trip-counter 3169 if (!has_ctrl(use)) continue; 3170 Node *u_ctrl = get_ctrl(use); 3171 if (use->is_Phi()) { 3172 u_ctrl = NULL; 3173 for (uint j = 1; j < use->req(); j++) 3174 if (use->in(j) == phi) 3175 u_ctrl = dom_lca(u_ctrl, use->in(0)->in(j)); 3176 } 3177 IdealLoopTree *u_loop = get_loop(u_ctrl); 3178 // Look for loop-invariant use 3179 if (u_loop == loop) continue; 3180 if (loop->is_member(u_loop)) continue; 3181 // Check that use is live out the bottom. Assuming the trip-counter 3182 // update is right at the bottom, uses of of the loop middle are ok. 3183 if (dom_lca(exit, u_ctrl) != exit) continue; 3184 // Hit! Refactor use to use the post-incremented tripcounter. 3185 // Compute a post-increment tripcounter. 3186 Node *opaq = new Opaque2Node( C, cle->incr() ); 3187 register_new_node(opaq, exit); 3188 Node *neg_stride = _igvn.intcon(-cle->stride_con()); 3189 set_ctrl(neg_stride, C->root()); 3190 Node *post = new AddINode( opaq, neg_stride); 3191 register_new_node(post, exit); 3192 _igvn.rehash_node_delayed(use); 3193 for (uint j = 1; j < use->req(); j++) { 3194 if (use->in(j) == phi) 3195 use->set_req(j, post); 3196 } 3197 // Since DU info changed, rerun loop 3198 progress = true; 3199 break; 3200 } 3201 } 3202 3203 }