1 /* 2 * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "compiler/compileLog.hpp" 27 #include "memory/allocation.inline.hpp" 28 #include "opto/addnode.hpp" 29 #include "opto/callnode.hpp" 30 #include "opto/connode.hpp" 31 #include "opto/divnode.hpp" 32 #include "opto/loopnode.hpp" 33 #include "opto/mulnode.hpp" 34 #include "opto/rootnode.hpp" 35 #include "opto/runtime.hpp" 36 #include "opto/subnode.hpp" 37 38 //------------------------------is_loop_exit----------------------------------- 39 // Given an IfNode, return the loop-exiting projection or NULL if both 40 // arms remain in the loop. 41 Node *IdealLoopTree::is_loop_exit(Node *iff) const { 42 if( iff->outcnt() != 2 ) return NULL; // Ignore partially dead tests 43 PhaseIdealLoop *phase = _phase; 44 // Test is an IfNode, has 2 projections. If BOTH are in the loop 45 // we need loop unswitching instead of peeling. 46 if( !is_member(phase->get_loop( iff->raw_out(0) )) ) 47 return iff->raw_out(0); 48 if( !is_member(phase->get_loop( iff->raw_out(1) )) ) 49 return iff->raw_out(1); 50 return NULL; 51 } 52 53 54 //============================================================================= 55 56 57 //------------------------------record_for_igvn---------------------------- 58 // Put loop body on igvn work list 59 void IdealLoopTree::record_for_igvn() { 60 for( uint i = 0; i < _body.size(); i++ ) { 61 Node *n = _body.at(i); 62 _phase->_igvn._worklist.push(n); 63 } 64 } 65 66 //------------------------------compute_exact_trip_count----------------------- 67 // Compute loop exact trip count if possible. Do not recalculate trip count for 68 // split loops (pre-main-post) which have their limits and inits behind Opaque node. 69 void IdealLoopTree::compute_exact_trip_count( PhaseIdealLoop *phase ) { 70 if (!_head->as_Loop()->is_valid_counted_loop()) { 71 return; 72 } 73 CountedLoopNode* cl = _head->as_CountedLoop(); 74 // Trip count may become nonexact for iteration split loops since 75 // RCE modifies limits. Note, _trip_count value is not reset since 76 // it is used to limit unrolling of main loop. 77 cl->set_nonexact_trip_count(); 78 79 // Loop's test should be part of loop. 80 if (!phase->is_member(this, phase->get_ctrl(cl->loopexit()->in(CountedLoopEndNode::TestValue)))) 81 return; // Infinite loop 82 83 #ifdef ASSERT 84 BoolTest::mask bt = cl->loopexit()->test_trip(); 85 assert(bt == BoolTest::lt || bt == BoolTest::gt || 86 bt == BoolTest::ne, "canonical test is expected"); 87 #endif 88 89 Node* init_n = cl->init_trip(); 90 Node* limit_n = cl->limit(); 91 if (init_n != NULL && init_n->is_Con() && 92 limit_n != NULL && limit_n->is_Con()) { 93 // Use longs to avoid integer overflow. 94 int stride_con = cl->stride_con(); 95 jlong init_con = cl->init_trip()->get_int(); 96 jlong limit_con = cl->limit()->get_int(); 97 int stride_m = stride_con - (stride_con > 0 ? 1 : -1); 98 jlong trip_count = (limit_con - init_con + stride_m)/stride_con; 99 if (trip_count > 0 && (julong)trip_count < (julong)max_juint) { 100 // Set exact trip count. 101 cl->set_exact_trip_count((uint)trip_count); 102 } 103 } 104 } 105 106 //------------------------------compute_profile_trip_cnt---------------------------- 107 // Compute loop trip count from profile data as 108 // (backedge_count + loop_exit_count) / loop_exit_count 109 void IdealLoopTree::compute_profile_trip_cnt( PhaseIdealLoop *phase ) { 110 if (!_head->is_CountedLoop()) { 111 return; 112 } 113 CountedLoopNode* head = _head->as_CountedLoop(); 114 if (head->profile_trip_cnt() != COUNT_UNKNOWN) { 115 return; // Already computed 116 } 117 float trip_cnt = (float)max_jint; // default is big 118 119 Node* back = head->in(LoopNode::LoopBackControl); 120 while (back != head) { 121 if ((back->Opcode() == Op_IfTrue || back->Opcode() == Op_IfFalse) && 122 back->in(0) && 123 back->in(0)->is_If() && 124 back->in(0)->as_If()->_fcnt != COUNT_UNKNOWN && 125 back->in(0)->as_If()->_prob != PROB_UNKNOWN) { 126 break; 127 } 128 back = phase->idom(back); 129 } 130 if (back != head) { 131 assert((back->Opcode() == Op_IfTrue || back->Opcode() == Op_IfFalse) && 132 back->in(0), "if-projection exists"); 133 IfNode* back_if = back->in(0)->as_If(); 134 float loop_back_cnt = back_if->_fcnt * back_if->_prob; 135 136 // Now compute a loop exit count 137 float loop_exit_cnt = 0.0f; 138 for( uint i = 0; i < _body.size(); i++ ) { 139 Node *n = _body[i]; 140 if( n->is_If() ) { 141 IfNode *iff = n->as_If(); 142 if( iff->_fcnt != COUNT_UNKNOWN && iff->_prob != PROB_UNKNOWN ) { 143 Node *exit = is_loop_exit(iff); 144 if( exit ) { 145 float exit_prob = iff->_prob; 146 if (exit->Opcode() == Op_IfFalse) exit_prob = 1.0 - exit_prob; 147 if (exit_prob > PROB_MIN) { 148 float exit_cnt = iff->_fcnt * exit_prob; 149 loop_exit_cnt += exit_cnt; 150 } 151 } 152 } 153 } 154 } 155 if (loop_exit_cnt > 0.0f) { 156 trip_cnt = (loop_back_cnt + loop_exit_cnt) / loop_exit_cnt; 157 } else { 158 // No exit count so use 159 trip_cnt = loop_back_cnt; 160 } 161 } 162 #ifndef PRODUCT 163 if (TraceProfileTripCount) { 164 tty->print_cr("compute_profile_trip_cnt lp: %d cnt: %f\n", head->_idx, trip_cnt); 165 } 166 #endif 167 head->set_profile_trip_cnt(trip_cnt); 168 } 169 170 //---------------------is_invariant_addition----------------------------- 171 // Return nonzero index of invariant operand for an Add or Sub 172 // of (nonconstant) invariant and variant values. Helper for reassociate_invariants. 173 int IdealLoopTree::is_invariant_addition(Node* n, PhaseIdealLoop *phase) { 174 int op = n->Opcode(); 175 if (op == Op_AddI || op == Op_SubI) { 176 bool in1_invar = this->is_invariant(n->in(1)); 177 bool in2_invar = this->is_invariant(n->in(2)); 178 if (in1_invar && !in2_invar) return 1; 179 if (!in1_invar && in2_invar) return 2; 180 } 181 return 0; 182 } 183 184 //---------------------reassociate_add_sub----------------------------- 185 // Reassociate invariant add and subtract expressions: 186 // 187 // inv1 + (x + inv2) => ( inv1 + inv2) + x 188 // (x + inv2) + inv1 => ( inv1 + inv2) + x 189 // inv1 + (x - inv2) => ( inv1 - inv2) + x 190 // inv1 - (inv2 - x) => ( inv1 - inv2) + x 191 // (x + inv2) - inv1 => (-inv1 + inv2) + x 192 // (x - inv2) + inv1 => ( inv1 - inv2) + x 193 // (x - inv2) - inv1 => (-inv1 - inv2) + x 194 // inv1 + (inv2 - x) => ( inv1 + inv2) - x 195 // inv1 - (x - inv2) => ( inv1 + inv2) - x 196 // (inv2 - x) + inv1 => ( inv1 + inv2) - x 197 // (inv2 - x) - inv1 => (-inv1 + inv2) - x 198 // inv1 - (x + inv2) => ( inv1 - inv2) - x 199 // 200 Node* IdealLoopTree::reassociate_add_sub(Node* n1, PhaseIdealLoop *phase) { 201 if (!n1->is_Add() && !n1->is_Sub() || n1->outcnt() == 0) return NULL; 202 if (is_invariant(n1)) return NULL; 203 int inv1_idx = is_invariant_addition(n1, phase); 204 if (!inv1_idx) return NULL; 205 // Don't mess with add of constant (igvn moves them to expression tree root.) 206 if (n1->is_Add() && n1->in(2)->is_Con()) return NULL; 207 Node* inv1 = n1->in(inv1_idx); 208 Node* n2 = n1->in(3 - inv1_idx); 209 int inv2_idx = is_invariant_addition(n2, phase); 210 if (!inv2_idx) return NULL; 211 Node* x = n2->in(3 - inv2_idx); 212 Node* inv2 = n2->in(inv2_idx); 213 214 bool neg_x = n2->is_Sub() && inv2_idx == 1; 215 bool neg_inv2 = n2->is_Sub() && inv2_idx == 2; 216 bool neg_inv1 = n1->is_Sub() && inv1_idx == 2; 217 if (n1->is_Sub() && inv1_idx == 1) { 218 neg_x = !neg_x; 219 neg_inv2 = !neg_inv2; 220 } 221 Node* inv1_c = phase->get_ctrl(inv1); 222 Node* inv2_c = phase->get_ctrl(inv2); 223 Node* n_inv1; 224 if (neg_inv1) { 225 Node *zero = phase->_igvn.intcon(0); 226 phase->set_ctrl(zero, phase->C->root()); 227 n_inv1 = new (phase->C) SubINode(zero, inv1); 228 phase->register_new_node(n_inv1, inv1_c); 229 } else { 230 n_inv1 = inv1; 231 } 232 Node* inv; 233 if (neg_inv2) { 234 inv = new (phase->C) SubINode(n_inv1, inv2); 235 } else { 236 inv = new (phase->C) AddINode(n_inv1, inv2); 237 } 238 phase->register_new_node(inv, phase->get_early_ctrl(inv)); 239 240 Node* addx; 241 if (neg_x) { 242 addx = new (phase->C) SubINode(inv, x); 243 } else { 244 addx = new (phase->C) AddINode(x, inv); 245 } 246 phase->register_new_node(addx, phase->get_ctrl(x)); 247 phase->_igvn.replace_node(n1, addx); 248 assert(phase->get_loop(phase->get_ctrl(n1)) == this, ""); 249 _body.yank(n1); 250 return addx; 251 } 252 253 //---------------------reassociate_invariants----------------------------- 254 // Reassociate invariant expressions: 255 void IdealLoopTree::reassociate_invariants(PhaseIdealLoop *phase) { 256 for (int i = _body.size() - 1; i >= 0; i--) { 257 Node *n = _body.at(i); 258 for (int j = 0; j < 5; j++) { 259 Node* nn = reassociate_add_sub(n, phase); 260 if (nn == NULL) break; 261 n = nn; // again 262 }; 263 } 264 } 265 266 //------------------------------policy_peeling--------------------------------- 267 // Return TRUE or FALSE if the loop should be peeled or not. Peel if we can 268 // make some loop-invariant test (usually a null-check) happen before the loop. 269 bool IdealLoopTree::policy_peeling( PhaseIdealLoop *phase ) const { 270 Node *test = ((IdealLoopTree*)this)->tail(); 271 int body_size = ((IdealLoopTree*)this)->_body.size(); 272 // Peeling does loop cloning which can result in O(N^2) node construction 273 if( body_size > 255 /* Prevent overflow for large body_size */ 274 || (body_size * body_size + phase->C->live_nodes()) > phase->C->max_node_limit() ) { 275 return false; // too large to safely clone 276 } 277 while( test != _head ) { // Scan till run off top of loop 278 if( test->is_If() ) { // Test? 279 Node *ctrl = phase->get_ctrl(test->in(1)); 280 if (ctrl->is_top()) 281 return false; // Found dead test on live IF? No peeling! 282 // Standard IF only has one input value to check for loop invariance 283 assert( test->Opcode() == Op_If || test->Opcode() == Op_CountedLoopEnd, "Check this code when new subtype is added"); 284 // Condition is not a member of this loop? 285 if( !is_member(phase->get_loop(ctrl)) && 286 is_loop_exit(test) ) 287 return true; // Found reason to peel! 288 } 289 // Walk up dominators to loop _head looking for test which is 290 // executed on every path thru loop. 291 test = phase->idom(test); 292 } 293 return false; 294 } 295 296 //------------------------------peeled_dom_test_elim--------------------------- 297 // If we got the effect of peeling, either by actually peeling or by making 298 // a pre-loop which must execute at least once, we can remove all 299 // loop-invariant dominated tests in the main body. 300 void PhaseIdealLoop::peeled_dom_test_elim( IdealLoopTree *loop, Node_List &old_new ) { 301 bool progress = true; 302 while( progress ) { 303 progress = false; // Reset for next iteration 304 Node *prev = loop->_head->in(LoopNode::LoopBackControl);//loop->tail(); 305 Node *test = prev->in(0); 306 while( test != loop->_head ) { // Scan till run off top of loop 307 308 int p_op = prev->Opcode(); 309 if( (p_op == Op_IfFalse || p_op == Op_IfTrue) && 310 test->is_If() && // Test? 311 !test->in(1)->is_Con() && // And not already obvious? 312 // Condition is not a member of this loop? 313 !loop->is_member(get_loop(get_ctrl(test->in(1))))){ 314 // Walk loop body looking for instances of this test 315 for( uint i = 0; i < loop->_body.size(); i++ ) { 316 Node *n = loop->_body.at(i); 317 if( n->is_If() && n->in(1) == test->in(1) /*&& n != loop->tail()->in(0)*/ ) { 318 // IfNode was dominated by version in peeled loop body 319 progress = true; 320 dominated_by( old_new[prev->_idx], n ); 321 } 322 } 323 } 324 prev = test; 325 test = idom(test); 326 } // End of scan tests in loop 327 328 } // End of while( progress ) 329 } 330 331 //------------------------------do_peeling------------------------------------- 332 // Peel the first iteration of the given loop. 333 // Step 1: Clone the loop body. The clone becomes the peeled iteration. 334 // The pre-loop illegally has 2 control users (old & new loops). 335 // Step 2: Make the old-loop fall-in edges point to the peeled iteration. 336 // Do this by making the old-loop fall-in edges act as if they came 337 // around the loopback from the prior iteration (follow the old-loop 338 // backedges) and then map to the new peeled iteration. This leaves 339 // the pre-loop with only 1 user (the new peeled iteration), but the 340 // peeled-loop backedge has 2 users. 341 // Step 3: Cut the backedge on the clone (so its not a loop) and remove the 342 // extra backedge user. 343 // 344 // orig 345 // 346 // stmt1 347 // | 348 // v 349 // loop predicate 350 // | 351 // v 352 // loop<----+ 353 // | | 354 // stmt2 | 355 // | | 356 // v | 357 // if ^ 358 // / \ | 359 // / \ | 360 // v v | 361 // false true | 362 // / \ | 363 // / ----+ 364 // | 365 // v 366 // exit 367 // 368 // 369 // after clone loop 370 // 371 // stmt1 372 // | 373 // v 374 // loop predicate 375 // / \ 376 // clone / \ orig 377 // / \ 378 // / \ 379 // v v 380 // +---->loop clone loop<----+ 381 // | | | | 382 // | stmt2 clone stmt2 | 383 // | | | | 384 // | v v | 385 // ^ if clone If ^ 386 // | / \ / \ | 387 // | / \ / \ | 388 // | v v v v | 389 // | true false false true | 390 // | / \ / \ | 391 // +---- \ / ----+ 392 // \ / 393 // 1v v2 394 // region 395 // | 396 // v 397 // exit 398 // 399 // 400 // after peel and predicate move 401 // 402 // stmt1 403 // / 404 // / 405 // clone / orig 406 // / 407 // / +----------+ 408 // / | | 409 // / loop predicate | 410 // / | | 411 // v v | 412 // TOP-->loop clone loop<----+ | 413 // | | | | 414 // stmt2 clone stmt2 | | 415 // | | | ^ 416 // v v | | 417 // if clone If ^ | 418 // / \ / \ | | 419 // / \ / \ | | 420 // v v v v | | 421 // true false false true | | 422 // | \ / \ | | 423 // | \ / ----+ ^ 424 // | \ / | 425 // | 1v v2 | 426 // v region | 427 // | | | 428 // | v | 429 // | exit | 430 // | | 431 // +--------------->-----------------+ 432 // 433 // 434 // final graph 435 // 436 // stmt1 437 // | 438 // v 439 // stmt2 clone 440 // | 441 // v 442 // if clone 443 // / | 444 // / | 445 // v v 446 // false true 447 // | | 448 // | v 449 // | loop predicate 450 // | | 451 // | v 452 // | loop<----+ 453 // | | | 454 // | stmt2 | 455 // | | | 456 // | v | 457 // v if ^ 458 // | / \ | 459 // | / \ | 460 // | v v | 461 // | false true | 462 // | | \ | 463 // v v --+ 464 // region 465 // | 466 // v 467 // exit 468 // 469 void PhaseIdealLoop::do_peeling( IdealLoopTree *loop, Node_List &old_new ) { 470 471 C->set_major_progress(); 472 // Peeling a 'main' loop in a pre/main/post situation obfuscates the 473 // 'pre' loop from the main and the 'pre' can no longer have it's 474 // iterations adjusted. Therefore, we need to declare this loop as 475 // no longer a 'main' loop; it will need new pre and post loops before 476 // we can do further RCE. 477 #ifndef PRODUCT 478 if (TraceLoopOpts) { 479 tty->print("Peel "); 480 loop->dump_head(); 481 } 482 #endif 483 Node* head = loop->_head; 484 bool counted_loop = head->is_CountedLoop(); 485 if (counted_loop) { 486 CountedLoopNode *cl = head->as_CountedLoop(); 487 assert(cl->trip_count() > 0, "peeling a fully unrolled loop"); 488 cl->set_trip_count(cl->trip_count() - 1); 489 if (cl->is_main_loop()) { 490 cl->set_normal_loop(); 491 #ifndef PRODUCT 492 if (PrintOpto && VerifyLoopOptimizations) { 493 tty->print("Peeling a 'main' loop; resetting to 'normal' "); 494 loop->dump_head(); 495 } 496 #endif 497 } 498 } 499 Node* entry = head->in(LoopNode::EntryControl); 500 501 // Step 1: Clone the loop body. The clone becomes the peeled iteration. 502 // The pre-loop illegally has 2 control users (old & new loops). 503 clone_loop( loop, old_new, dom_depth(head) ); 504 505 // Step 2: Make the old-loop fall-in edges point to the peeled iteration. 506 // Do this by making the old-loop fall-in edges act as if they came 507 // around the loopback from the prior iteration (follow the old-loop 508 // backedges) and then map to the new peeled iteration. This leaves 509 // the pre-loop with only 1 user (the new peeled iteration), but the 510 // peeled-loop backedge has 2 users. 511 Node* new_entry = old_new[head->in(LoopNode::LoopBackControl)->_idx]; 512 _igvn.hash_delete(head); 513 head->set_req(LoopNode::EntryControl, new_entry); 514 for (DUIterator_Fast jmax, j = head->fast_outs(jmax); j < jmax; j++) { 515 Node* old = head->fast_out(j); 516 if (old->in(0) == loop->_head && old->req() == 3 && old->is_Phi()) { 517 Node* new_exit_value = old_new[old->in(LoopNode::LoopBackControl)->_idx]; 518 if (!new_exit_value ) // Backedge value is ALSO loop invariant? 519 // Then loop body backedge value remains the same. 520 new_exit_value = old->in(LoopNode::LoopBackControl); 521 _igvn.hash_delete(old); 522 old->set_req(LoopNode::EntryControl, new_exit_value); 523 } 524 } 525 526 527 // Step 3: Cut the backedge on the clone (so its not a loop) and remove the 528 // extra backedge user. 529 Node* new_head = old_new[head->_idx]; 530 _igvn.hash_delete(new_head); 531 new_head->set_req(LoopNode::LoopBackControl, C->top()); 532 for (DUIterator_Fast j2max, j2 = new_head->fast_outs(j2max); j2 < j2max; j2++) { 533 Node* use = new_head->fast_out(j2); 534 if (use->in(0) == new_head && use->req() == 3 && use->is_Phi()) { 535 _igvn.hash_delete(use); 536 use->set_req(LoopNode::LoopBackControl, C->top()); 537 } 538 } 539 540 541 // Step 4: Correct dom-depth info. Set to loop-head depth. 542 int dd = dom_depth(head); 543 set_idom(head, head->in(1), dd); 544 for (uint j3 = 0; j3 < loop->_body.size(); j3++) { 545 Node *old = loop->_body.at(j3); 546 Node *nnn = old_new[old->_idx]; 547 if (!has_ctrl(nnn)) 548 set_idom(nnn, idom(nnn), dd-1); 549 } 550 551 // Now force out all loop-invariant dominating tests. The optimizer 552 // finds some, but we _know_ they are all useless. 553 peeled_dom_test_elim(loop,old_new); 554 555 loop->record_for_igvn(); 556 } 557 558 #define EMPTY_LOOP_SIZE 7 // number of nodes in an empty loop 559 560 //------------------------------policy_maximally_unroll------------------------ 561 // Calculate exact loop trip count and return true if loop can be maximally 562 // unrolled. 563 bool IdealLoopTree::policy_maximally_unroll( PhaseIdealLoop *phase ) const { 564 CountedLoopNode *cl = _head->as_CountedLoop(); 565 assert(cl->is_normal_loop(), ""); 566 if (!cl->is_valid_counted_loop()) 567 return false; // Malformed counted loop 568 569 if (!cl->has_exact_trip_count()) { 570 // Trip count is not exact. 571 return false; 572 } 573 574 uint trip_count = cl->trip_count(); 575 // Note, max_juint is used to indicate unknown trip count. 576 assert(trip_count > 1, "one iteration loop should be optimized out already"); 577 assert(trip_count < max_juint, "exact trip_count should be less than max_uint."); 578 579 // Real policy: if we maximally unroll, does it get too big? 580 // Allow the unrolled mess to get larger than standard loop 581 // size. After all, it will no longer be a loop. 582 uint body_size = _body.size(); 583 uint unroll_limit = (uint)LoopUnrollLimit * 4; 584 assert( (intx)unroll_limit == LoopUnrollLimit * 4, "LoopUnrollLimit must fit in 32bits"); 585 if (trip_count > unroll_limit || body_size > unroll_limit) { 586 return false; 587 } 588 589 // Fully unroll a loop with few iterations regardless next 590 // conditions since following loop optimizations will split 591 // such loop anyway (pre-main-post). 592 if (trip_count <= 3) 593 return true; 594 595 // Take into account that after unroll conjoined heads and tails will fold, 596 // otherwise policy_unroll() may allow more unrolling than max unrolling. 597 uint new_body_size = EMPTY_LOOP_SIZE + (body_size - EMPTY_LOOP_SIZE) * trip_count; 598 uint tst_body_size = (new_body_size - EMPTY_LOOP_SIZE) / trip_count + EMPTY_LOOP_SIZE; 599 if (body_size != tst_body_size) // Check for int overflow 600 return false; 601 if (new_body_size > unroll_limit || 602 // Unrolling can result in a large amount of node construction 603 new_body_size >= phase->C->max_node_limit() - phase->C->live_nodes()) { 604 return false; 605 } 606 607 // Do not unroll a loop with String intrinsics code. 608 // String intrinsics are large and have loops. 609 for (uint k = 0; k < _body.size(); k++) { 610 Node* n = _body.at(k); 611 switch (n->Opcode()) { 612 case Op_StrComp: 613 case Op_StrEquals: 614 case Op_StrIndexOf: 615 case Op_EncodeISOArray: 616 case Op_AryEq: { 617 return false; 618 } 619 #if INCLUDE_RTM_OPT 620 case Op_FastLock: 621 case Op_FastUnlock: { 622 // Don't unroll RTM locking code because it is large. 623 if (UseRTMLocking) { 624 return false; 625 } 626 } 627 #endif 628 } // switch 629 } 630 631 return true; // Do maximally unroll 632 } 633 634 635 //------------------------------policy_unroll---------------------------------- 636 // Return TRUE or FALSE if the loop should be unrolled or not. Unroll if 637 // the loop is a CountedLoop and the body is small enough. 638 bool IdealLoopTree::policy_unroll( PhaseIdealLoop *phase ) const { 639 640 CountedLoopNode *cl = _head->as_CountedLoop(); 641 assert(cl->is_normal_loop() || cl->is_main_loop(), ""); 642 643 if (!cl->is_valid_counted_loop()) 644 return false; // Malformed counted loop 645 646 // Protect against over-unrolling. 647 // After split at least one iteration will be executed in pre-loop. 648 if (cl->trip_count() <= (uint)(cl->is_normal_loop() ? 2 : 1)) return false; 649 650 int future_unroll_ct = cl->unrolled_count() * 2; 651 if (future_unroll_ct > LoopMaxUnroll) return false; 652 653 // Check for initial stride being a small enough constant 654 if (abs(cl->stride_con()) > (1<<2)*future_unroll_ct) return false; 655 656 // Don't unroll if the next round of unrolling would push us 657 // over the expected trip count of the loop. One is subtracted 658 // from the expected trip count because the pre-loop normally 659 // executes 1 iteration. 660 if (UnrollLimitForProfileCheck > 0 && 661 cl->profile_trip_cnt() != COUNT_UNKNOWN && 662 future_unroll_ct > UnrollLimitForProfileCheck && 663 (float)future_unroll_ct > cl->profile_trip_cnt() - 1.0) { 664 return false; 665 } 666 667 // When unroll count is greater than LoopUnrollMin, don't unroll if: 668 // the residual iterations are more than 10% of the trip count 669 // and rounds of "unroll,optimize" are not making significant progress 670 // Progress defined as current size less than 20% larger than previous size. 671 if (UseSuperWord && cl->node_count_before_unroll() > 0 && 672 future_unroll_ct > LoopUnrollMin && 673 (future_unroll_ct - 1) * 10.0 > cl->profile_trip_cnt() && 674 1.2 * cl->node_count_before_unroll() < (double)_body.size()) { 675 return false; 676 } 677 678 Node *init_n = cl->init_trip(); 679 Node *limit_n = cl->limit(); 680 int stride_con = cl->stride_con(); 681 // Non-constant bounds. 682 // Protect against over-unrolling when init or/and limit are not constant 683 // (so that trip_count's init value is maxint) but iv range is known. 684 if (init_n == NULL || !init_n->is_Con() || 685 limit_n == NULL || !limit_n->is_Con()) { 686 Node* phi = cl->phi(); 687 if (phi != NULL) { 688 assert(phi->is_Phi() && phi->in(0) == _head, "Counted loop should have iv phi."); 689 const TypeInt* iv_type = phase->_igvn.type(phi)->is_int(); 690 int next_stride = stride_con * 2; // stride after this unroll 691 if (next_stride > 0) { 692 if (iv_type->_lo + next_stride <= iv_type->_lo || // overflow 693 iv_type->_lo + next_stride > iv_type->_hi) { 694 return false; // over-unrolling 695 } 696 } else if (next_stride < 0) { 697 if (iv_type->_hi + next_stride >= iv_type->_hi || // overflow 698 iv_type->_hi + next_stride < iv_type->_lo) { 699 return false; // over-unrolling 700 } 701 } 702 } 703 } 704 705 // After unroll limit will be adjusted: new_limit = limit-stride. 706 // Bailout if adjustment overflow. 707 const TypeInt* limit_type = phase->_igvn.type(limit_n)->is_int(); 708 if (stride_con > 0 && ((limit_type->_hi - stride_con) >= limit_type->_hi) || 709 stride_con < 0 && ((limit_type->_lo - stride_con) <= limit_type->_lo)) 710 return false; // overflow 711 712 // Adjust body_size to determine if we unroll or not 713 uint body_size = _body.size(); 714 // Key test to unroll loop in CRC32 java code 715 int xors_in_loop = 0; 716 // Also count ModL, DivL and MulL which expand mightly 717 for (uint k = 0; k < _body.size(); k++) { 718 Node* n = _body.at(k); 719 switch (n->Opcode()) { 720 case Op_XorI: xors_in_loop++; break; // CRC32 java code 721 case Op_ModL: body_size += 30; break; 722 case Op_DivL: body_size += 30; break; 723 case Op_MulL: body_size += 10; break; 724 case Op_StrComp: 725 case Op_StrEquals: 726 case Op_StrIndexOf: 727 case Op_EncodeISOArray: 728 case Op_AryEq: { 729 // Do not unroll a loop with String intrinsics code. 730 // String intrinsics are large and have loops. 731 return false; 732 } 733 #if INCLUDE_RTM_OPT 734 case Op_FastLock: 735 case Op_FastUnlock: { 736 // Don't unroll RTM locking code because it is large. 737 if (UseRTMLocking) { 738 return false; 739 } 740 } 741 #endif 742 } // switch 743 } 744 745 // Check for being too big 746 if (body_size > (uint)LoopUnrollLimit) { 747 if (xors_in_loop >= 4 && body_size < (uint)LoopUnrollLimit*4) return true; 748 // Normal case: loop too big 749 return false; 750 } 751 752 // Unroll once! (Each trip will soon do double iterations) 753 return true; 754 } 755 756 //------------------------------policy_align----------------------------------- 757 // Return TRUE or FALSE if the loop should be cache-line aligned. Gather the 758 // expression that does the alignment. Note that only one array base can be 759 // aligned in a loop (unless the VM guarantees mutual alignment). Note that 760 // if we vectorize short memory ops into longer memory ops, we may want to 761 // increase alignment. 762 bool IdealLoopTree::policy_align( PhaseIdealLoop *phase ) const { 763 return false; 764 } 765 766 //------------------------------policy_range_check----------------------------- 767 // Return TRUE or FALSE if the loop should be range-check-eliminated. 768 // Actually we do iteration-splitting, a more powerful form of RCE. 769 bool IdealLoopTree::policy_range_check( PhaseIdealLoop *phase ) const { 770 if (!RangeCheckElimination) return false; 771 772 CountedLoopNode *cl = _head->as_CountedLoop(); 773 // If we unrolled with no intention of doing RCE and we later 774 // changed our minds, we got no pre-loop. Either we need to 775 // make a new pre-loop, or we gotta disallow RCE. 776 if (cl->is_main_no_pre_loop()) return false; // Disallowed for now. 777 Node *trip_counter = cl->phi(); 778 779 // Check loop body for tests of trip-counter plus loop-invariant vs 780 // loop-invariant. 781 for (uint i = 0; i < _body.size(); i++) { 782 Node *iff = _body[i]; 783 if (iff->Opcode() == Op_If) { // Test? 784 785 // Comparing trip+off vs limit 786 Node *bol = iff->in(1); 787 if (bol->req() != 2) continue; // dead constant test 788 if (!bol->is_Bool()) { 789 assert(UseLoopPredicate && bol->Opcode() == Op_Conv2B, "predicate check only"); 790 continue; 791 } 792 if (bol->as_Bool()->_test._test == BoolTest::ne) 793 continue; // not RC 794 795 Node *cmp = bol->in(1); 796 Node *rc_exp = cmp->in(1); 797 Node *limit = cmp->in(2); 798 799 Node *limit_c = phase->get_ctrl(limit); 800 if( limit_c == phase->C->top() ) 801 return false; // Found dead test on live IF? No RCE! 802 if( is_member(phase->get_loop(limit_c) ) ) { 803 // Compare might have operands swapped; commute them 804 rc_exp = cmp->in(2); 805 limit = cmp->in(1); 806 limit_c = phase->get_ctrl(limit); 807 if( is_member(phase->get_loop(limit_c) ) ) 808 continue; // Both inputs are loop varying; cannot RCE 809 } 810 811 if (!phase->is_scaled_iv_plus_offset(rc_exp, trip_counter, NULL, NULL)) { 812 continue; 813 } 814 // Yeah! Found a test like 'trip+off vs limit' 815 // Test is an IfNode, has 2 projections. If BOTH are in the loop 816 // we need loop unswitching instead of iteration splitting. 817 if( is_loop_exit(iff) ) 818 return true; // Found reason to split iterations 819 } // End of is IF 820 } 821 822 return false; 823 } 824 825 //------------------------------policy_peel_only------------------------------- 826 // Return TRUE or FALSE if the loop should NEVER be RCE'd or aligned. Useful 827 // for unrolling loops with NO array accesses. 828 bool IdealLoopTree::policy_peel_only( PhaseIdealLoop *phase ) const { 829 830 for( uint i = 0; i < _body.size(); i++ ) 831 if( _body[i]->is_Mem() ) 832 return false; 833 834 // No memory accesses at all! 835 return true; 836 } 837 838 //------------------------------clone_up_backedge_goo-------------------------- 839 // If Node n lives in the back_ctrl block and cannot float, we clone a private 840 // version of n in preheader_ctrl block and return that, otherwise return n. 841 Node *PhaseIdealLoop::clone_up_backedge_goo( Node *back_ctrl, Node *preheader_ctrl, Node *n, VectorSet &visited, Node_Stack &clones ) { 842 if( get_ctrl(n) != back_ctrl ) return n; 843 844 // Only visit once 845 if (visited.test_set(n->_idx)) { 846 Node *x = clones.find(n->_idx); 847 if (x != NULL) 848 return x; 849 return n; 850 } 851 852 Node *x = NULL; // If required, a clone of 'n' 853 // Check for 'n' being pinned in the backedge. 854 if( n->in(0) && n->in(0) == back_ctrl ) { 855 assert(clones.find(n->_idx) == NULL, "dead loop"); 856 x = n->clone(); // Clone a copy of 'n' to preheader 857 clones.push(x, n->_idx); 858 x->set_req( 0, preheader_ctrl ); // Fix x's control input to preheader 859 } 860 861 // Recursive fixup any other input edges into x. 862 // If there are no changes we can just return 'n', otherwise 863 // we need to clone a private copy and change it. 864 for( uint i = 1; i < n->req(); i++ ) { 865 Node *g = clone_up_backedge_goo( back_ctrl, preheader_ctrl, n->in(i), visited, clones ); 866 if( g != n->in(i) ) { 867 if( !x ) { 868 assert(clones.find(n->_idx) == NULL, "dead loop"); 869 x = n->clone(); 870 clones.push(x, n->_idx); 871 } 872 x->set_req(i, g); 873 } 874 } 875 if( x ) { // x can legally float to pre-header location 876 register_new_node( x, preheader_ctrl ); 877 return x; 878 } else { // raise n to cover LCA of uses 879 set_ctrl( n, find_non_split_ctrl(back_ctrl->in(0)) ); 880 } 881 return n; 882 } 883 884 bool PhaseIdealLoop::cast_incr_before_loop(Node* incr, Node* ctrl, Node* loop) { 885 Node* castii = new (C) CastIINode(incr, TypeInt::INT, true); 886 castii->set_req(0, ctrl); 887 register_new_node(castii, ctrl); 888 for (DUIterator_Fast imax, i = incr->fast_outs(imax); i < imax; i++) { 889 Node* n = incr->fast_out(i); 890 if (n->is_Phi() && n->in(0) == loop) { 891 int nrep = n->replace_edge(incr, castii); 892 return true; 893 } 894 } 895 return false; 896 } 897 898 //------------------------------insert_pre_post_loops-------------------------- 899 // Insert pre and post loops. If peel_only is set, the pre-loop can not have 900 // more iterations added. It acts as a 'peel' only, no lower-bound RCE, no 901 // alignment. Useful to unroll loops that do no array accesses. 902 void PhaseIdealLoop::insert_pre_post_loops( IdealLoopTree *loop, Node_List &old_new, bool peel_only ) { 903 904 #ifndef PRODUCT 905 if (TraceLoopOpts) { 906 if (peel_only) 907 tty->print("PeelMainPost "); 908 else 909 tty->print("PreMainPost "); 910 loop->dump_head(); 911 } 912 #endif 913 C->set_major_progress(); 914 915 // Find common pieces of the loop being guarded with pre & post loops 916 CountedLoopNode *main_head = loop->_head->as_CountedLoop(); 917 assert( main_head->is_normal_loop(), "" ); 918 CountedLoopEndNode *main_end = main_head->loopexit(); 919 guarantee(main_end != NULL, "no loop exit node"); 920 assert( main_end->outcnt() == 2, "1 true, 1 false path only" ); 921 uint dd_main_head = dom_depth(main_head); 922 uint max = main_head->outcnt(); 923 924 Node *pre_header= main_head->in(LoopNode::EntryControl); 925 Node *init = main_head->init_trip(); 926 Node *incr = main_end ->incr(); 927 Node *limit = main_end ->limit(); 928 Node *stride = main_end ->stride(); 929 Node *cmp = main_end ->cmp_node(); 930 BoolTest::mask b_test = main_end->test_trip(); 931 932 // Need only 1 user of 'bol' because I will be hacking the loop bounds. 933 Node *bol = main_end->in(CountedLoopEndNode::TestValue); 934 if( bol->outcnt() != 1 ) { 935 bol = bol->clone(); 936 register_new_node(bol,main_end->in(CountedLoopEndNode::TestControl)); 937 _igvn.hash_delete(main_end); 938 main_end->set_req(CountedLoopEndNode::TestValue, bol); 939 } 940 // Need only 1 user of 'cmp' because I will be hacking the loop bounds. 941 if( cmp->outcnt() != 1 ) { 942 cmp = cmp->clone(); 943 register_new_node(cmp,main_end->in(CountedLoopEndNode::TestControl)); 944 _igvn.hash_delete(bol); 945 bol->set_req(1, cmp); 946 } 947 948 //------------------------------ 949 // Step A: Create Post-Loop. 950 Node* main_exit = main_end->proj_out(false); 951 assert( main_exit->Opcode() == Op_IfFalse, "" ); 952 int dd_main_exit = dom_depth(main_exit); 953 954 // Step A1: Clone the loop body. The clone becomes the post-loop. The main 955 // loop pre-header illegally has 2 control users (old & new loops). 956 clone_loop( loop, old_new, dd_main_exit ); 957 assert( old_new[main_end ->_idx]->Opcode() == Op_CountedLoopEnd, "" ); 958 CountedLoopNode *post_head = old_new[main_head->_idx]->as_CountedLoop(); 959 post_head->set_post_loop(main_head); 960 961 // Reduce the post-loop trip count. 962 CountedLoopEndNode* post_end = old_new[main_end ->_idx]->as_CountedLoopEnd(); 963 post_end->_prob = PROB_FAIR; 964 965 // Build the main-loop normal exit. 966 IfFalseNode *new_main_exit = new (C) IfFalseNode(main_end); 967 _igvn.register_new_node_with_optimizer( new_main_exit ); 968 set_idom(new_main_exit, main_end, dd_main_exit ); 969 set_loop(new_main_exit, loop->_parent); 970 971 // Step A2: Build a zero-trip guard for the post-loop. After leaving the 972 // main-loop, the post-loop may not execute at all. We 'opaque' the incr 973 // (the main-loop trip-counter exit value) because we will be changing 974 // the exit value (via unrolling) so we cannot constant-fold away the zero 975 // trip guard until all unrolling is done. 976 Node *zer_opaq = new (C) Opaque1Node(C, incr); 977 Node *zer_cmp = new (C) CmpINode( zer_opaq, limit ); 978 Node *zer_bol = new (C) BoolNode( zer_cmp, b_test ); 979 register_new_node( zer_opaq, new_main_exit ); 980 register_new_node( zer_cmp , new_main_exit ); 981 register_new_node( zer_bol , new_main_exit ); 982 983 // Build the IfNode 984 IfNode *zer_iff = new (C) IfNode( new_main_exit, zer_bol, PROB_FAIR, COUNT_UNKNOWN ); 985 _igvn.register_new_node_with_optimizer( zer_iff ); 986 set_idom(zer_iff, new_main_exit, dd_main_exit); 987 set_loop(zer_iff, loop->_parent); 988 989 // Plug in the false-path, taken if we need to skip post-loop 990 _igvn.replace_input_of(main_exit, 0, zer_iff); 991 set_idom(main_exit, zer_iff, dd_main_exit); 992 set_idom(main_exit->unique_out(), zer_iff, dd_main_exit); 993 // Make the true-path, must enter the post loop 994 Node *zer_taken = new (C) IfTrueNode( zer_iff ); 995 _igvn.register_new_node_with_optimizer( zer_taken ); 996 set_idom(zer_taken, zer_iff, dd_main_exit); 997 set_loop(zer_taken, loop->_parent); 998 // Plug in the true path 999 _igvn.hash_delete( post_head ); 1000 post_head->set_req(LoopNode::EntryControl, zer_taken); 1001 set_idom(post_head, zer_taken, dd_main_exit); 1002 1003 Arena *a = Thread::current()->resource_area(); 1004 VectorSet visited(a); 1005 Node_Stack clones(a, main_head->back_control()->outcnt()); 1006 // Step A3: Make the fall-in values to the post-loop come from the 1007 // fall-out values of the main-loop. 1008 for (DUIterator_Fast imax, i = main_head->fast_outs(imax); i < imax; i++) { 1009 Node* main_phi = main_head->fast_out(i); 1010 if( main_phi->is_Phi() && main_phi->in(0) == main_head && main_phi->outcnt() >0 ) { 1011 Node *post_phi = old_new[main_phi->_idx]; 1012 Node *fallmain = clone_up_backedge_goo(main_head->back_control(), 1013 post_head->init_control(), 1014 main_phi->in(LoopNode::LoopBackControl), 1015 visited, clones); 1016 _igvn.hash_delete(post_phi); 1017 post_phi->set_req( LoopNode::EntryControl, fallmain ); 1018 } 1019 } 1020 1021 // Update local caches for next stanza 1022 main_exit = new_main_exit; 1023 1024 1025 //------------------------------ 1026 // Step B: Create Pre-Loop. 1027 1028 // Step B1: Clone the loop body. The clone becomes the pre-loop. The main 1029 // loop pre-header illegally has 2 control users (old & new loops). 1030 clone_loop( loop, old_new, dd_main_head ); 1031 CountedLoopNode* pre_head = old_new[main_head->_idx]->as_CountedLoop(); 1032 CountedLoopEndNode* pre_end = old_new[main_end ->_idx]->as_CountedLoopEnd(); 1033 pre_head->set_pre_loop(main_head); 1034 Node *pre_incr = old_new[incr->_idx]; 1035 1036 // Reduce the pre-loop trip count. 1037 pre_end->_prob = PROB_FAIR; 1038 1039 // Find the pre-loop normal exit. 1040 Node* pre_exit = pre_end->proj_out(false); 1041 assert( pre_exit->Opcode() == Op_IfFalse, "" ); 1042 IfFalseNode *new_pre_exit = new (C) IfFalseNode(pre_end); 1043 _igvn.register_new_node_with_optimizer( new_pre_exit ); 1044 set_idom(new_pre_exit, pre_end, dd_main_head); 1045 set_loop(new_pre_exit, loop->_parent); 1046 1047 // Step B2: Build a zero-trip guard for the main-loop. After leaving the 1048 // pre-loop, the main-loop may not execute at all. Later in life this 1049 // zero-trip guard will become the minimum-trip guard when we unroll 1050 // the main-loop. 1051 Node *min_opaq = new (C) Opaque1Node(C, limit); 1052 Node *min_cmp = new (C) CmpINode( pre_incr, min_opaq ); 1053 Node *min_bol = new (C) BoolNode( min_cmp, b_test ); 1054 register_new_node( min_opaq, new_pre_exit ); 1055 register_new_node( min_cmp , new_pre_exit ); 1056 register_new_node( min_bol , new_pre_exit ); 1057 1058 // Build the IfNode (assume the main-loop is executed always). 1059 IfNode *min_iff = new (C) IfNode( new_pre_exit, min_bol, PROB_ALWAYS, COUNT_UNKNOWN ); 1060 _igvn.register_new_node_with_optimizer( min_iff ); 1061 set_idom(min_iff, new_pre_exit, dd_main_head); 1062 set_loop(min_iff, loop->_parent); 1063 1064 // Plug in the false-path, taken if we need to skip main-loop 1065 _igvn.hash_delete( pre_exit ); 1066 pre_exit->set_req(0, min_iff); 1067 set_idom(pre_exit, min_iff, dd_main_head); 1068 set_idom(pre_exit->unique_out(), min_iff, dd_main_head); 1069 // Make the true-path, must enter the main loop 1070 Node *min_taken = new (C) IfTrueNode( min_iff ); 1071 _igvn.register_new_node_with_optimizer( min_taken ); 1072 set_idom(min_taken, min_iff, dd_main_head); 1073 set_loop(min_taken, loop->_parent); 1074 // Plug in the true path 1075 _igvn.hash_delete( main_head ); 1076 main_head->set_req(LoopNode::EntryControl, min_taken); 1077 set_idom(main_head, min_taken, dd_main_head); 1078 1079 visited.Clear(); 1080 clones.clear(); 1081 // Step B3: Make the fall-in values to the main-loop come from the 1082 // fall-out values of the pre-loop. 1083 for (DUIterator_Fast i2max, i2 = main_head->fast_outs(i2max); i2 < i2max; i2++) { 1084 Node* main_phi = main_head->fast_out(i2); 1085 if( main_phi->is_Phi() && main_phi->in(0) == main_head && main_phi->outcnt() > 0 ) { 1086 Node *pre_phi = old_new[main_phi->_idx]; 1087 Node *fallpre = clone_up_backedge_goo(pre_head->back_control(), 1088 main_head->init_control(), 1089 pre_phi->in(LoopNode::LoopBackControl), 1090 visited, clones); 1091 _igvn.hash_delete(main_phi); 1092 main_phi->set_req( LoopNode::EntryControl, fallpre ); 1093 } 1094 } 1095 1096 // Nodes inside the loop may be control dependent on a predicate 1097 // that was moved before the preloop. If the back branch of the main 1098 // or post loops becomes dead, those nodes won't be dependent on the 1099 // test that guards that loop nest anymore which could lead to an 1100 // incorrect array access because it executes independently of the 1101 // test that was guarding the loop nest. We add a special CastII on 1102 // the if branch that enters the loop, between the input induction 1103 // variable value and the induction variable Phi to preserve correct 1104 // dependencies. 1105 1106 // CastII for the post loop: 1107 bool inserted = cast_incr_before_loop(zer_opaq->in(1), zer_taken, post_head); 1108 assert(inserted, "no castII inserted"); 1109 1110 // CastII for the main loop: 1111 inserted = cast_incr_before_loop(pre_incr, min_taken, main_head); 1112 assert(inserted, "no castII inserted"); 1113 1114 // Step B4: Shorten the pre-loop to run only 1 iteration (for now). 1115 // RCE and alignment may change this later. 1116 Node *cmp_end = pre_end->cmp_node(); 1117 assert( cmp_end->in(2) == limit, "" ); 1118 Node *pre_limit = new (C) AddINode( init, stride ); 1119 1120 // Save the original loop limit in this Opaque1 node for 1121 // use by range check elimination. 1122 Node *pre_opaq = new (C) Opaque1Node(C, pre_limit, limit); 1123 1124 register_new_node( pre_limit, pre_head->in(0) ); 1125 register_new_node( pre_opaq , pre_head->in(0) ); 1126 1127 // Since no other users of pre-loop compare, I can hack limit directly 1128 assert( cmp_end->outcnt() == 1, "no other users" ); 1129 _igvn.hash_delete(cmp_end); 1130 cmp_end->set_req(2, peel_only ? pre_limit : pre_opaq); 1131 1132 // Special case for not-equal loop bounds: 1133 // Change pre loop test, main loop test, and the 1134 // main loop guard test to use lt or gt depending on stride 1135 // direction: 1136 // positive stride use < 1137 // negative stride use > 1138 // 1139 // not-equal test is kept for post loop to handle case 1140 // when init > limit when stride > 0 (and reverse). 1141 1142 if (pre_end->in(CountedLoopEndNode::TestValue)->as_Bool()->_test._test == BoolTest::ne) { 1143 1144 BoolTest::mask new_test = (main_end->stride_con() > 0) ? BoolTest::lt : BoolTest::gt; 1145 // Modify pre loop end condition 1146 Node* pre_bol = pre_end->in(CountedLoopEndNode::TestValue)->as_Bool(); 1147 BoolNode* new_bol0 = new (C) BoolNode(pre_bol->in(1), new_test); 1148 register_new_node( new_bol0, pre_head->in(0) ); 1149 _igvn.hash_delete(pre_end); 1150 pre_end->set_req(CountedLoopEndNode::TestValue, new_bol0); 1151 // Modify main loop guard condition 1152 assert(min_iff->in(CountedLoopEndNode::TestValue) == min_bol, "guard okay"); 1153 BoolNode* new_bol1 = new (C) BoolNode(min_bol->in(1), new_test); 1154 register_new_node( new_bol1, new_pre_exit ); 1155 _igvn.hash_delete(min_iff); 1156 min_iff->set_req(CountedLoopEndNode::TestValue, new_bol1); 1157 // Modify main loop end condition 1158 BoolNode* main_bol = main_end->in(CountedLoopEndNode::TestValue)->as_Bool(); 1159 BoolNode* new_bol2 = new (C) BoolNode(main_bol->in(1), new_test); 1160 register_new_node( new_bol2, main_end->in(CountedLoopEndNode::TestControl) ); 1161 _igvn.hash_delete(main_end); 1162 main_end->set_req(CountedLoopEndNode::TestValue, new_bol2); 1163 } 1164 1165 // Flag main loop 1166 main_head->set_main_loop(); 1167 if( peel_only ) main_head->set_main_no_pre_loop(); 1168 1169 // Subtract a trip count for the pre-loop. 1170 main_head->set_trip_count(main_head->trip_count() - 1); 1171 1172 // It's difficult to be precise about the trip-counts 1173 // for the pre/post loops. They are usually very short, 1174 // so guess that 4 trips is a reasonable value. 1175 post_head->set_profile_trip_cnt(4.0); 1176 pre_head->set_profile_trip_cnt(4.0); 1177 1178 // Now force out all loop-invariant dominating tests. The optimizer 1179 // finds some, but we _know_ they are all useless. 1180 peeled_dom_test_elim(loop,old_new); 1181 loop->record_for_igvn(); 1182 } 1183 1184 //------------------------------is_invariant----------------------------- 1185 // Return true if n is invariant 1186 bool IdealLoopTree::is_invariant(Node* n) const { 1187 Node *n_c = _phase->has_ctrl(n) ? _phase->get_ctrl(n) : n; 1188 if (n_c->is_top()) return false; 1189 return !is_member(_phase->get_loop(n_c)); 1190 } 1191 1192 1193 //------------------------------do_unroll-------------------------------------- 1194 // Unroll the loop body one step - make each trip do 2 iterations. 1195 void PhaseIdealLoop::do_unroll( IdealLoopTree *loop, Node_List &old_new, bool adjust_min_trip ) { 1196 assert(LoopUnrollLimit, ""); 1197 CountedLoopNode *loop_head = loop->_head->as_CountedLoop(); 1198 CountedLoopEndNode *loop_end = loop_head->loopexit(); 1199 assert(loop_end, ""); 1200 #ifndef PRODUCT 1201 if (PrintOpto && VerifyLoopOptimizations) { 1202 tty->print("Unrolling "); 1203 loop->dump_head(); 1204 } else if (TraceLoopOpts) { 1205 if (loop_head->trip_count() < (uint)LoopUnrollLimit) { 1206 tty->print("Unroll %d(%2d) ", loop_head->unrolled_count()*2, loop_head->trip_count()); 1207 } else { 1208 tty->print("Unroll %d ", loop_head->unrolled_count()*2); 1209 } 1210 loop->dump_head(); 1211 } 1212 #endif 1213 1214 // Remember loop node count before unrolling to detect 1215 // if rounds of unroll,optimize are making progress 1216 loop_head->set_node_count_before_unroll(loop->_body.size()); 1217 1218 Node *ctrl = loop_head->in(LoopNode::EntryControl); 1219 Node *limit = loop_head->limit(); 1220 Node *init = loop_head->init_trip(); 1221 Node *stride = loop_head->stride(); 1222 1223 Node *opaq = NULL; 1224 if (adjust_min_trip) { // If not maximally unrolling, need adjustment 1225 // Search for zero-trip guard. 1226 assert( loop_head->is_main_loop(), "" ); 1227 assert( ctrl->Opcode() == Op_IfTrue || ctrl->Opcode() == Op_IfFalse, "" ); 1228 Node *iff = ctrl->in(0); 1229 assert( iff->Opcode() == Op_If, "" ); 1230 Node *bol = iff->in(1); 1231 assert( bol->Opcode() == Op_Bool, "" ); 1232 Node *cmp = bol->in(1); 1233 assert( cmp->Opcode() == Op_CmpI, "" ); 1234 opaq = cmp->in(2); 1235 // Occasionally it's possible for a zero-trip guard Opaque1 node to be 1236 // optimized away and then another round of loop opts attempted. 1237 // We can not optimize this particular loop in that case. 1238 if (opaq->Opcode() != Op_Opaque1) 1239 return; // Cannot find zero-trip guard! Bail out! 1240 // Zero-trip test uses an 'opaque' node which is not shared. 1241 assert(opaq->outcnt() == 1 && opaq->in(1) == limit, ""); 1242 } 1243 1244 C->set_major_progress(); 1245 1246 Node* new_limit = NULL; 1247 if (UnrollLimitCheck) { 1248 int stride_con = stride->get_int(); 1249 int stride_p = (stride_con > 0) ? stride_con : -stride_con; 1250 uint old_trip_count = loop_head->trip_count(); 1251 // Verify that unroll policy result is still valid. 1252 assert(old_trip_count > 1 && 1253 (!adjust_min_trip || stride_p <= (1<<3)*loop_head->unrolled_count()), "sanity"); 1254 1255 // Adjust loop limit to keep valid iterations number after unroll. 1256 // Use (limit - stride) instead of (((limit - init)/stride) & (-2))*stride 1257 // which may overflow. 1258 if (!adjust_min_trip) { 1259 assert(old_trip_count > 1 && (old_trip_count & 1) == 0, 1260 "odd trip count for maximally unroll"); 1261 // Don't need to adjust limit for maximally unroll since trip count is even. 1262 } else if (loop_head->has_exact_trip_count() && init->is_Con()) { 1263 // Loop's limit is constant. Loop's init could be constant when pre-loop 1264 // become peeled iteration. 1265 jlong init_con = init->get_int(); 1266 // We can keep old loop limit if iterations count stays the same: 1267 // old_trip_count == new_trip_count * 2 1268 // Note: since old_trip_count >= 2 then new_trip_count >= 1 1269 // so we also don't need to adjust zero trip test. 1270 jlong limit_con = limit->get_int(); 1271 // (stride_con*2) not overflow since stride_con <= 8. 1272 int new_stride_con = stride_con * 2; 1273 int stride_m = new_stride_con - (stride_con > 0 ? 1 : -1); 1274 jlong trip_count = (limit_con - init_con + stride_m)/new_stride_con; 1275 // New trip count should satisfy next conditions. 1276 assert(trip_count > 0 && (julong)trip_count < (julong)max_juint/2, "sanity"); 1277 uint new_trip_count = (uint)trip_count; 1278 adjust_min_trip = (old_trip_count != new_trip_count*2); 1279 } 1280 1281 if (adjust_min_trip) { 1282 // Step 2: Adjust the trip limit if it is called for. 1283 // The adjustment amount is -stride. Need to make sure if the 1284 // adjustment underflows or overflows, then the main loop is skipped. 1285 Node* cmp = loop_end->cmp_node(); 1286 assert(cmp->in(2) == limit, "sanity"); 1287 assert(opaq != NULL && opaq->in(1) == limit, "sanity"); 1288 1289 // Verify that policy_unroll result is still valid. 1290 const TypeInt* limit_type = _igvn.type(limit)->is_int(); 1291 assert(stride_con > 0 && ((limit_type->_hi - stride_con) < limit_type->_hi) || 1292 stride_con < 0 && ((limit_type->_lo - stride_con) > limit_type->_lo), "sanity"); 1293 1294 if (limit->is_Con()) { 1295 // The check in policy_unroll and the assert above guarantee 1296 // no underflow if limit is constant. 1297 new_limit = _igvn.intcon(limit->get_int() - stride_con); 1298 set_ctrl(new_limit, C->root()); 1299 } else { 1300 // Limit is not constant. 1301 if (loop_head->unrolled_count() == 1) { // only for first unroll 1302 // Separate limit by Opaque node in case it is an incremented 1303 // variable from previous loop to avoid using pre-incremented 1304 // value which could increase register pressure. 1305 // Otherwise reorg_offsets() optimization will create a separate 1306 // Opaque node for each use of trip-counter and as result 1307 // zero trip guard limit will be different from loop limit. 1308 assert(has_ctrl(opaq), "should have it"); 1309 Node* opaq_ctrl = get_ctrl(opaq); 1310 limit = new (C) Opaque2Node( C, limit ); 1311 register_new_node( limit, opaq_ctrl ); 1312 } 1313 if (stride_con > 0 && (java_subtract(limit_type->_lo, stride_con) < limit_type->_lo) || 1314 stride_con < 0 && (java_subtract(limit_type->_hi, stride_con) > limit_type->_hi)) { 1315 // No underflow. 1316 new_limit = new (C) SubINode(limit, stride); 1317 } else { 1318 // (limit - stride) may underflow. 1319 // Clamp the adjustment value with MININT or MAXINT: 1320 // 1321 // new_limit = limit-stride 1322 // if (stride > 0) 1323 // new_limit = (limit < new_limit) ? MININT : new_limit; 1324 // else 1325 // new_limit = (limit > new_limit) ? MAXINT : new_limit; 1326 // 1327 BoolTest::mask bt = loop_end->test_trip(); 1328 assert(bt == BoolTest::lt || bt == BoolTest::gt, "canonical test is expected"); 1329 Node* adj_max = _igvn.intcon((stride_con > 0) ? min_jint : max_jint); 1330 set_ctrl(adj_max, C->root()); 1331 Node* old_limit = NULL; 1332 Node* adj_limit = NULL; 1333 Node* bol = limit->is_CMove() ? limit->in(CMoveNode::Condition) : NULL; 1334 if (loop_head->unrolled_count() > 1 && 1335 limit->is_CMove() && limit->Opcode() == Op_CMoveI && 1336 limit->in(CMoveNode::IfTrue) == adj_max && 1337 bol->as_Bool()->_test._test == bt && 1338 bol->in(1)->Opcode() == Op_CmpI && 1339 bol->in(1)->in(2) == limit->in(CMoveNode::IfFalse)) { 1340 // Loop was unrolled before. 1341 // Optimize the limit to avoid nested CMove: 1342 // use original limit as old limit. 1343 old_limit = bol->in(1)->in(1); 1344 // Adjust previous adjusted limit. 1345 adj_limit = limit->in(CMoveNode::IfFalse); 1346 adj_limit = new (C) SubINode(adj_limit, stride); 1347 } else { 1348 old_limit = limit; 1349 adj_limit = new (C) SubINode(limit, stride); 1350 } 1351 assert(old_limit != NULL && adj_limit != NULL, ""); 1352 register_new_node( adj_limit, ctrl ); // adjust amount 1353 Node* adj_cmp = new (C) CmpINode(old_limit, adj_limit); 1354 register_new_node( adj_cmp, ctrl ); 1355 Node* adj_bool = new (C) BoolNode(adj_cmp, bt); 1356 register_new_node( adj_bool, ctrl ); 1357 new_limit = new (C) CMoveINode(adj_bool, adj_limit, adj_max, TypeInt::INT); 1358 } 1359 register_new_node(new_limit, ctrl); 1360 } 1361 assert(new_limit != NULL, ""); 1362 // Replace in loop test. 1363 assert(loop_end->in(1)->in(1) == cmp, "sanity"); 1364 if (cmp->outcnt() == 1 && loop_end->in(1)->outcnt() == 1) { 1365 // Don't need to create new test since only one user. 1366 _igvn.hash_delete(cmp); 1367 cmp->set_req(2, new_limit); 1368 } else { 1369 // Create new test since it is shared. 1370 Node* ctrl2 = loop_end->in(0); 1371 Node* cmp2 = cmp->clone(); 1372 cmp2->set_req(2, new_limit); 1373 register_new_node(cmp2, ctrl2); 1374 Node* bol2 = loop_end->in(1)->clone(); 1375 bol2->set_req(1, cmp2); 1376 register_new_node(bol2, ctrl2); 1377 _igvn.hash_delete(loop_end); 1378 loop_end->set_req(1, bol2); 1379 } 1380 // Step 3: Find the min-trip test guaranteed before a 'main' loop. 1381 // Make it a 1-trip test (means at least 2 trips). 1382 1383 // Guard test uses an 'opaque' node which is not shared. Hence I 1384 // can edit it's inputs directly. Hammer in the new limit for the 1385 // minimum-trip guard. 1386 assert(opaq->outcnt() == 1, ""); 1387 _igvn.hash_delete(opaq); 1388 opaq->set_req(1, new_limit); 1389 } 1390 1391 // Adjust max trip count. The trip count is intentionally rounded 1392 // down here (e.g. 15-> 7-> 3-> 1) because if we unwittingly over-unroll, 1393 // the main, unrolled, part of the loop will never execute as it is protected 1394 // by the min-trip test. See bug 4834191 for a case where we over-unrolled 1395 // and later determined that part of the unrolled loop was dead. 1396 loop_head->set_trip_count(old_trip_count / 2); 1397 1398 // Double the count of original iterations in the unrolled loop body. 1399 loop_head->double_unrolled_count(); 1400 1401 } else { // LoopLimitCheck 1402 1403 // Adjust max trip count. The trip count is intentionally rounded 1404 // down here (e.g. 15-> 7-> 3-> 1) because if we unwittingly over-unroll, 1405 // the main, unrolled, part of the loop will never execute as it is protected 1406 // by the min-trip test. See bug 4834191 for a case where we over-unrolled 1407 // and later determined that part of the unrolled loop was dead. 1408 loop_head->set_trip_count(loop_head->trip_count() / 2); 1409 1410 // Double the count of original iterations in the unrolled loop body. 1411 loop_head->double_unrolled_count(); 1412 1413 // ----------- 1414 // Step 2: Cut back the trip counter for an unroll amount of 2. 1415 // Loop will normally trip (limit - init)/stride_con. Since it's a 1416 // CountedLoop this is exact (stride divides limit-init exactly). 1417 // We are going to double the loop body, so we want to knock off any 1418 // odd iteration: (trip_cnt & ~1). Then back compute a new limit. 1419 Node *span = new (C) SubINode( limit, init ); 1420 register_new_node( span, ctrl ); 1421 Node *trip = new (C) DivINode( 0, span, stride ); 1422 register_new_node( trip, ctrl ); 1423 Node *mtwo = _igvn.intcon(-2); 1424 set_ctrl(mtwo, C->root()); 1425 Node *rond = new (C) AndINode( trip, mtwo ); 1426 register_new_node( rond, ctrl ); 1427 Node *spn2 = new (C) MulINode( rond, stride ); 1428 register_new_node( spn2, ctrl ); 1429 new_limit = new (C) AddINode( spn2, init ); 1430 register_new_node( new_limit, ctrl ); 1431 1432 // Hammer in the new limit 1433 Node *ctrl2 = loop_end->in(0); 1434 Node *cmp2 = new (C) CmpINode( loop_head->incr(), new_limit ); 1435 register_new_node( cmp2, ctrl2 ); 1436 Node *bol2 = new (C) BoolNode( cmp2, loop_end->test_trip() ); 1437 register_new_node( bol2, ctrl2 ); 1438 _igvn.hash_delete(loop_end); 1439 loop_end->set_req(CountedLoopEndNode::TestValue, bol2); 1440 1441 // Step 3: Find the min-trip test guaranteed before a 'main' loop. 1442 // Make it a 1-trip test (means at least 2 trips). 1443 if( adjust_min_trip ) { 1444 assert( new_limit != NULL, "" ); 1445 // Guard test uses an 'opaque' node which is not shared. Hence I 1446 // can edit it's inputs directly. Hammer in the new limit for the 1447 // minimum-trip guard. 1448 assert( opaq->outcnt() == 1, "" ); 1449 _igvn.hash_delete(opaq); 1450 opaq->set_req(1, new_limit); 1451 } 1452 } // LoopLimitCheck 1453 1454 // --------- 1455 // Step 4: Clone the loop body. Move it inside the loop. This loop body 1456 // represents the odd iterations; since the loop trips an even number of 1457 // times its backedge is never taken. Kill the backedge. 1458 uint dd = dom_depth(loop_head); 1459 clone_loop( loop, old_new, dd ); 1460 1461 // Make backedges of the clone equal to backedges of the original. 1462 // Make the fall-in from the original come from the fall-out of the clone. 1463 for (DUIterator_Fast jmax, j = loop_head->fast_outs(jmax); j < jmax; j++) { 1464 Node* phi = loop_head->fast_out(j); 1465 if( phi->is_Phi() && phi->in(0) == loop_head && phi->outcnt() > 0 ) { 1466 Node *newphi = old_new[phi->_idx]; 1467 _igvn.hash_delete( phi ); 1468 _igvn.hash_delete( newphi ); 1469 1470 phi ->set_req(LoopNode:: EntryControl, newphi->in(LoopNode::LoopBackControl)); 1471 newphi->set_req(LoopNode::LoopBackControl, phi ->in(LoopNode::LoopBackControl)); 1472 phi ->set_req(LoopNode::LoopBackControl, C->top()); 1473 } 1474 } 1475 Node *clone_head = old_new[loop_head->_idx]; 1476 _igvn.hash_delete( clone_head ); 1477 loop_head ->set_req(LoopNode:: EntryControl, clone_head->in(LoopNode::LoopBackControl)); 1478 clone_head->set_req(LoopNode::LoopBackControl, loop_head ->in(LoopNode::LoopBackControl)); 1479 loop_head ->set_req(LoopNode::LoopBackControl, C->top()); 1480 loop->_head = clone_head; // New loop header 1481 1482 set_idom(loop_head, loop_head ->in(LoopNode::EntryControl), dd); 1483 set_idom(clone_head, clone_head->in(LoopNode::EntryControl), dd); 1484 1485 // Kill the clone's backedge 1486 Node *newcle = old_new[loop_end->_idx]; 1487 _igvn.hash_delete( newcle ); 1488 Node *one = _igvn.intcon(1); 1489 set_ctrl(one, C->root()); 1490 newcle->set_req(1, one); 1491 // Force clone into same loop body 1492 uint max = loop->_body.size(); 1493 for( uint k = 0; k < max; k++ ) { 1494 Node *old = loop->_body.at(k); 1495 Node *nnn = old_new[old->_idx]; 1496 loop->_body.push(nnn); 1497 if (!has_ctrl(old)) 1498 set_loop(nnn, loop); 1499 } 1500 1501 loop->record_for_igvn(); 1502 } 1503 1504 //------------------------------do_maximally_unroll---------------------------- 1505 1506 void PhaseIdealLoop::do_maximally_unroll( IdealLoopTree *loop, Node_List &old_new ) { 1507 CountedLoopNode *cl = loop->_head->as_CountedLoop(); 1508 assert(cl->has_exact_trip_count(), "trip count is not exact"); 1509 assert(cl->trip_count() > 0, ""); 1510 #ifndef PRODUCT 1511 if (TraceLoopOpts) { 1512 tty->print("MaxUnroll %d ", cl->trip_count()); 1513 loop->dump_head(); 1514 } 1515 #endif 1516 1517 // If loop is tripping an odd number of times, peel odd iteration 1518 if ((cl->trip_count() & 1) == 1) { 1519 do_peeling(loop, old_new); 1520 } 1521 1522 // Now its tripping an even number of times remaining. Double loop body. 1523 // Do not adjust pre-guards; they are not needed and do not exist. 1524 if (cl->trip_count() > 0) { 1525 assert((cl->trip_count() & 1) == 0, "missed peeling"); 1526 do_unroll(loop, old_new, false); 1527 } 1528 } 1529 1530 //------------------------------dominates_backedge--------------------------------- 1531 // Returns true if ctrl is executed on every complete iteration 1532 bool IdealLoopTree::dominates_backedge(Node* ctrl) { 1533 assert(ctrl->is_CFG(), "must be control"); 1534 Node* backedge = _head->as_Loop()->in(LoopNode::LoopBackControl); 1535 return _phase->dom_lca_internal(ctrl, backedge) == ctrl; 1536 } 1537 1538 //------------------------------adjust_limit----------------------------------- 1539 // Helper function for add_constraint(). 1540 Node* PhaseIdealLoop::adjust_limit(int stride_con, Node * scale, Node *offset, Node *rc_limit, Node *loop_limit, Node *pre_ctrl, bool round_up) { 1541 // Compute "I :: (limit-offset)/scale" 1542 Node *con = new (C) SubINode(rc_limit, offset); 1543 register_new_node(con, pre_ctrl); 1544 Node *X = new (C) DivINode(0, con, scale); 1545 register_new_node(X, pre_ctrl); 1546 1547 // When the absolute value of scale is greater than one, the integer 1548 // division may round limit down so add one to the limit. 1549 if (round_up) { 1550 X = new (C) AddINode(X, _igvn.intcon(1)); 1551 register_new_node(X, pre_ctrl); 1552 } 1553 1554 // Adjust loop limit 1555 loop_limit = (stride_con > 0) 1556 ? (Node*)(new (C) MinINode(loop_limit, X)) 1557 : (Node*)(new (C) MaxINode(loop_limit, X)); 1558 register_new_node(loop_limit, pre_ctrl); 1559 return loop_limit; 1560 } 1561 1562 //------------------------------add_constraint--------------------------------- 1563 // Constrain the main loop iterations so the conditions: 1564 // low_limit <= scale_con * I + offset < upper_limit 1565 // always holds true. That is, either increase the number of iterations in 1566 // the pre-loop or the post-loop until the condition holds true in the main 1567 // loop. Stride, scale, offset and limit are all loop invariant. Further, 1568 // stride and scale are constants (offset and limit often are). 1569 void PhaseIdealLoop::add_constraint( int stride_con, int scale_con, Node *offset, Node *low_limit, Node *upper_limit, Node *pre_ctrl, Node **pre_limit, Node **main_limit ) { 1570 // For positive stride, the pre-loop limit always uses a MAX function 1571 // and the main loop a MIN function. For negative stride these are 1572 // reversed. 1573 1574 // Also for positive stride*scale the affine function is increasing, so the 1575 // pre-loop must check for underflow and the post-loop for overflow. 1576 // Negative stride*scale reverses this; pre-loop checks for overflow and 1577 // post-loop for underflow. 1578 1579 Node *scale = _igvn.intcon(scale_con); 1580 set_ctrl(scale, C->root()); 1581 1582 if ((stride_con^scale_con) >= 0) { // Use XOR to avoid overflow 1583 // The overflow limit: scale*I+offset < upper_limit 1584 // For main-loop compute 1585 // ( if (scale > 0) /* and stride > 0 */ 1586 // I < (upper_limit-offset)/scale 1587 // else /* scale < 0 and stride < 0 */ 1588 // I > (upper_limit-offset)/scale 1589 // ) 1590 // 1591 // (upper_limit-offset) may overflow or underflow. 1592 // But it is fine since main loop will either have 1593 // less iterations or will be skipped in such case. 1594 *main_limit = adjust_limit(stride_con, scale, offset, upper_limit, *main_limit, pre_ctrl, false); 1595 1596 // The underflow limit: low_limit <= scale*I+offset. 1597 // For pre-loop compute 1598 // NOT(scale*I+offset >= low_limit) 1599 // scale*I+offset < low_limit 1600 // ( if (scale > 0) /* and stride > 0 */ 1601 // I < (low_limit-offset)/scale 1602 // else /* scale < 0 and stride < 0 */ 1603 // I > (low_limit-offset)/scale 1604 // ) 1605 1606 if (low_limit->get_int() == -max_jint) { 1607 if (!RangeLimitCheck) return; 1608 // We need this guard when scale*pre_limit+offset >= limit 1609 // due to underflow. So we need execute pre-loop until 1610 // scale*I+offset >= min_int. But (min_int-offset) will 1611 // underflow when offset > 0 and X will be > original_limit 1612 // when stride > 0. To avoid it we replace positive offset with 0. 1613 // 1614 // Also (min_int+1 == -max_int) is used instead of min_int here 1615 // to avoid problem with scale == -1 (min_int/(-1) == min_int). 1616 Node* shift = _igvn.intcon(31); 1617 set_ctrl(shift, C->root()); 1618 Node* sign = new (C) RShiftINode(offset, shift); 1619 register_new_node(sign, pre_ctrl); 1620 offset = new (C) AndINode(offset, sign); 1621 register_new_node(offset, pre_ctrl); 1622 } else { 1623 assert(low_limit->get_int() == 0, "wrong low limit for range check"); 1624 // The only problem we have here when offset == min_int 1625 // since (0-min_int) == min_int. It may be fine for stride > 0 1626 // but for stride < 0 X will be < original_limit. To avoid it 1627 // max(pre_limit, original_limit) is used in do_range_check(). 1628 } 1629 // Pass (-stride) to indicate pre_loop_cond = NOT(main_loop_cond); 1630 *pre_limit = adjust_limit((-stride_con), scale, offset, low_limit, *pre_limit, pre_ctrl, 1631 scale_con > 1 && stride_con > 0); 1632 1633 } else { // stride_con*scale_con < 0 1634 // For negative stride*scale pre-loop checks for overflow and 1635 // post-loop for underflow. 1636 // 1637 // The overflow limit: scale*I+offset < upper_limit 1638 // For pre-loop compute 1639 // NOT(scale*I+offset < upper_limit) 1640 // scale*I+offset >= upper_limit 1641 // scale*I+offset+1 > upper_limit 1642 // ( if (scale < 0) /* and stride > 0 */ 1643 // I < (upper_limit-(offset+1))/scale 1644 // else /* scale > 0 and stride < 0 */ 1645 // I > (upper_limit-(offset+1))/scale 1646 // ) 1647 // 1648 // (upper_limit-offset-1) may underflow or overflow. 1649 // To avoid it min(pre_limit, original_limit) is used 1650 // in do_range_check() for stride > 0 and max() for < 0. 1651 Node *one = _igvn.intcon(1); 1652 set_ctrl(one, C->root()); 1653 1654 Node *plus_one = new (C) AddINode(offset, one); 1655 register_new_node( plus_one, pre_ctrl ); 1656 // Pass (-stride) to indicate pre_loop_cond = NOT(main_loop_cond); 1657 *pre_limit = adjust_limit((-stride_con), scale, plus_one, upper_limit, *pre_limit, pre_ctrl, 1658 scale_con < -1 && stride_con > 0); 1659 1660 if (low_limit->get_int() == -max_jint) { 1661 if (!RangeLimitCheck) return; 1662 // We need this guard when scale*main_limit+offset >= limit 1663 // due to underflow. So we need execute main-loop while 1664 // scale*I+offset+1 > min_int. But (min_int-offset-1) will 1665 // underflow when (offset+1) > 0 and X will be < main_limit 1666 // when scale < 0 (and stride > 0). To avoid it we replace 1667 // positive (offset+1) with 0. 1668 // 1669 // Also (min_int+1 == -max_int) is used instead of min_int here 1670 // to avoid problem with scale == -1 (min_int/(-1) == min_int). 1671 Node* shift = _igvn.intcon(31); 1672 set_ctrl(shift, C->root()); 1673 Node* sign = new (C) RShiftINode(plus_one, shift); 1674 register_new_node(sign, pre_ctrl); 1675 plus_one = new (C) AndINode(plus_one, sign); 1676 register_new_node(plus_one, pre_ctrl); 1677 } else { 1678 assert(low_limit->get_int() == 0, "wrong low limit for range check"); 1679 // The only problem we have here when offset == max_int 1680 // since (max_int+1) == min_int and (0-min_int) == min_int. 1681 // But it is fine since main loop will either have 1682 // less iterations or will be skipped in such case. 1683 } 1684 // The underflow limit: low_limit <= scale*I+offset. 1685 // For main-loop compute 1686 // scale*I+offset+1 > low_limit 1687 // ( if (scale < 0) /* and stride > 0 */ 1688 // I < (low_limit-(offset+1))/scale 1689 // else /* scale > 0 and stride < 0 */ 1690 // I > (low_limit-(offset+1))/scale 1691 // ) 1692 1693 *main_limit = adjust_limit(stride_con, scale, plus_one, low_limit, *main_limit, pre_ctrl, 1694 false); 1695 } 1696 } 1697 1698 1699 //------------------------------is_scaled_iv--------------------------------- 1700 // Return true if exp is a constant times an induction var 1701 bool PhaseIdealLoop::is_scaled_iv(Node* exp, Node* iv, int* p_scale) { 1702 if (exp == iv) { 1703 if (p_scale != NULL) { 1704 *p_scale = 1; 1705 } 1706 return true; 1707 } 1708 int opc = exp->Opcode(); 1709 if (opc == Op_MulI) { 1710 if (exp->in(1) == iv && exp->in(2)->is_Con()) { 1711 if (p_scale != NULL) { 1712 *p_scale = exp->in(2)->get_int(); 1713 } 1714 return true; 1715 } 1716 if (exp->in(2) == iv && exp->in(1)->is_Con()) { 1717 if (p_scale != NULL) { 1718 *p_scale = exp->in(1)->get_int(); 1719 } 1720 return true; 1721 } 1722 } else if (opc == Op_LShiftI) { 1723 if (exp->in(1) == iv && exp->in(2)->is_Con()) { 1724 if (p_scale != NULL) { 1725 *p_scale = 1 << exp->in(2)->get_int(); 1726 } 1727 return true; 1728 } 1729 } 1730 return false; 1731 } 1732 1733 //-----------------------------is_scaled_iv_plus_offset------------------------------ 1734 // Return true if exp is a simple induction variable expression: k1*iv + (invar + k2) 1735 bool PhaseIdealLoop::is_scaled_iv_plus_offset(Node* exp, Node* iv, int* p_scale, Node** p_offset, int depth) { 1736 if (is_scaled_iv(exp, iv, p_scale)) { 1737 if (p_offset != NULL) { 1738 Node *zero = _igvn.intcon(0); 1739 set_ctrl(zero, C->root()); 1740 *p_offset = zero; 1741 } 1742 return true; 1743 } 1744 int opc = exp->Opcode(); 1745 if (opc == Op_AddI) { 1746 if (is_scaled_iv(exp->in(1), iv, p_scale)) { 1747 if (p_offset != NULL) { 1748 *p_offset = exp->in(2); 1749 } 1750 return true; 1751 } 1752 if (is_scaled_iv(exp->in(2), iv, p_scale)) { 1753 if (p_offset != NULL) { 1754 *p_offset = exp->in(1); 1755 } 1756 return true; 1757 } 1758 if (exp->in(2)->is_Con()) { 1759 Node* offset2 = NULL; 1760 if (depth < 2 && 1761 is_scaled_iv_plus_offset(exp->in(1), iv, p_scale, 1762 p_offset != NULL ? &offset2 : NULL, depth+1)) { 1763 if (p_offset != NULL) { 1764 Node *ctrl_off2 = get_ctrl(offset2); 1765 Node* offset = new (C) AddINode(offset2, exp->in(2)); 1766 register_new_node(offset, ctrl_off2); 1767 *p_offset = offset; 1768 } 1769 return true; 1770 } 1771 } 1772 } else if (opc == Op_SubI) { 1773 if (is_scaled_iv(exp->in(1), iv, p_scale)) { 1774 if (p_offset != NULL) { 1775 Node *zero = _igvn.intcon(0); 1776 set_ctrl(zero, C->root()); 1777 Node *ctrl_off = get_ctrl(exp->in(2)); 1778 Node* offset = new (C) SubINode(zero, exp->in(2)); 1779 register_new_node(offset, ctrl_off); 1780 *p_offset = offset; 1781 } 1782 return true; 1783 } 1784 if (is_scaled_iv(exp->in(2), iv, p_scale)) { 1785 if (p_offset != NULL) { 1786 *p_scale *= -1; 1787 *p_offset = exp->in(1); 1788 } 1789 return true; 1790 } 1791 } 1792 return false; 1793 } 1794 1795 //------------------------------do_range_check--------------------------------- 1796 // Eliminate range-checks and other trip-counter vs loop-invariant tests. 1797 void PhaseIdealLoop::do_range_check( IdealLoopTree *loop, Node_List &old_new ) { 1798 #ifndef PRODUCT 1799 if (PrintOpto && VerifyLoopOptimizations) { 1800 tty->print("Range Check Elimination "); 1801 loop->dump_head(); 1802 } else if (TraceLoopOpts) { 1803 tty->print("RangeCheck "); 1804 loop->dump_head(); 1805 } 1806 #endif 1807 assert(RangeCheckElimination, ""); 1808 CountedLoopNode *cl = loop->_head->as_CountedLoop(); 1809 assert(cl->is_main_loop(), ""); 1810 1811 // protect against stride not being a constant 1812 if (!cl->stride_is_con()) 1813 return; 1814 1815 // Find the trip counter; we are iteration splitting based on it 1816 Node *trip_counter = cl->phi(); 1817 // Find the main loop limit; we will trim it's iterations 1818 // to not ever trip end tests 1819 Node *main_limit = cl->limit(); 1820 1821 // Need to find the main-loop zero-trip guard 1822 Node *ctrl = cl->in(LoopNode::EntryControl); 1823 assert(ctrl->Opcode() == Op_IfTrue || ctrl->Opcode() == Op_IfFalse, ""); 1824 Node *iffm = ctrl->in(0); 1825 assert(iffm->Opcode() == Op_If, ""); 1826 Node *bolzm = iffm->in(1); 1827 assert(bolzm->Opcode() == Op_Bool, ""); 1828 Node *cmpzm = bolzm->in(1); 1829 assert(cmpzm->is_Cmp(), ""); 1830 Node *opqzm = cmpzm->in(2); 1831 // Can not optimize a loop if zero-trip Opaque1 node is optimized 1832 // away and then another round of loop opts attempted. 1833 if (opqzm->Opcode() != Op_Opaque1) 1834 return; 1835 assert(opqzm->in(1) == main_limit, "do not understand situation"); 1836 1837 // Find the pre-loop limit; we will expand it's iterations to 1838 // not ever trip low tests. 1839 Node *p_f = iffm->in(0); 1840 // pre loop may have been optimized out 1841 if (p_f->Opcode() != Op_IfFalse) { 1842 return; 1843 } 1844 CountedLoopEndNode *pre_end = p_f->in(0)->as_CountedLoopEnd(); 1845 assert(pre_end->loopnode()->is_pre_loop(), ""); 1846 Node *pre_opaq1 = pre_end->limit(); 1847 // Occasionally it's possible for a pre-loop Opaque1 node to be 1848 // optimized away and then another round of loop opts attempted. 1849 // We can not optimize this particular loop in that case. 1850 if (pre_opaq1->Opcode() != Op_Opaque1) 1851 return; 1852 Opaque1Node *pre_opaq = (Opaque1Node*)pre_opaq1; 1853 Node *pre_limit = pre_opaq->in(1); 1854 1855 // Where do we put new limit calculations 1856 Node *pre_ctrl = pre_end->loopnode()->in(LoopNode::EntryControl); 1857 1858 // Ensure the original loop limit is available from the 1859 // pre-loop Opaque1 node. 1860 Node *orig_limit = pre_opaq->original_loop_limit(); 1861 if (orig_limit == NULL || _igvn.type(orig_limit) == Type::TOP) 1862 return; 1863 1864 // Must know if its a count-up or count-down loop 1865 1866 int stride_con = cl->stride_con(); 1867 Node *zero = _igvn.intcon(0); 1868 Node *one = _igvn.intcon(1); 1869 // Use symmetrical int range [-max_jint,max_jint] 1870 Node *mini = _igvn.intcon(-max_jint); 1871 set_ctrl(zero, C->root()); 1872 set_ctrl(one, C->root()); 1873 set_ctrl(mini, C->root()); 1874 1875 // Range checks that do not dominate the loop backedge (ie. 1876 // conditionally executed) can lengthen the pre loop limit beyond 1877 // the original loop limit. To prevent this, the pre limit is 1878 // (for stride > 0) MINed with the original loop limit (MAXed 1879 // stride < 0) when some range_check (rc) is conditionally 1880 // executed. 1881 bool conditional_rc = false; 1882 1883 // Check loop body for tests of trip-counter plus loop-invariant vs 1884 // loop-invariant. 1885 for( uint i = 0; i < loop->_body.size(); i++ ) { 1886 Node *iff = loop->_body[i]; 1887 if( iff->Opcode() == Op_If ) { // Test? 1888 1889 // Test is an IfNode, has 2 projections. If BOTH are in the loop 1890 // we need loop unswitching instead of iteration splitting. 1891 Node *exit = loop->is_loop_exit(iff); 1892 if( !exit ) continue; 1893 int flip = (exit->Opcode() == Op_IfTrue) ? 1 : 0; 1894 1895 // Get boolean condition to test 1896 Node *i1 = iff->in(1); 1897 if( !i1->is_Bool() ) continue; 1898 BoolNode *bol = i1->as_Bool(); 1899 BoolTest b_test = bol->_test; 1900 // Flip sense of test if exit condition is flipped 1901 if( flip ) 1902 b_test = b_test.negate(); 1903 1904 // Get compare 1905 Node *cmp = bol->in(1); 1906 1907 // Look for trip_counter + offset vs limit 1908 Node *rc_exp = cmp->in(1); 1909 Node *limit = cmp->in(2); 1910 jint scale_con= 1; // Assume trip counter not scaled 1911 1912 Node *limit_c = get_ctrl(limit); 1913 if( loop->is_member(get_loop(limit_c) ) ) { 1914 // Compare might have operands swapped; commute them 1915 b_test = b_test.commute(); 1916 rc_exp = cmp->in(2); 1917 limit = cmp->in(1); 1918 limit_c = get_ctrl(limit); 1919 if( loop->is_member(get_loop(limit_c) ) ) 1920 continue; // Both inputs are loop varying; cannot RCE 1921 } 1922 // Here we know 'limit' is loop invariant 1923 1924 // 'limit' maybe pinned below the zero trip test (probably from a 1925 // previous round of rce), in which case, it can't be used in the 1926 // zero trip test expression which must occur before the zero test's if. 1927 if( limit_c == ctrl ) { 1928 continue; // Don't rce this check but continue looking for other candidates. 1929 } 1930 1931 // Check for scaled induction variable plus an offset 1932 Node *offset = NULL; 1933 1934 if (!is_scaled_iv_plus_offset(rc_exp, trip_counter, &scale_con, &offset)) { 1935 continue; 1936 } 1937 1938 Node *offset_c = get_ctrl(offset); 1939 if( loop->is_member( get_loop(offset_c) ) ) 1940 continue; // Offset is not really loop invariant 1941 // Here we know 'offset' is loop invariant. 1942 1943 // As above for the 'limit', the 'offset' maybe pinned below the 1944 // zero trip test. 1945 if( offset_c == ctrl ) { 1946 continue; // Don't rce this check but continue looking for other candidates. 1947 } 1948 #ifdef ASSERT 1949 if (TraceRangeLimitCheck) { 1950 tty->print_cr("RC bool node%s", flip ? " flipped:" : ":"); 1951 bol->dump(2); 1952 } 1953 #endif 1954 // At this point we have the expression as: 1955 // scale_con * trip_counter + offset :: limit 1956 // where scale_con, offset and limit are loop invariant. Trip_counter 1957 // monotonically increases by stride_con, a constant. Both (or either) 1958 // stride_con and scale_con can be negative which will flip about the 1959 // sense of the test. 1960 1961 // Adjust pre and main loop limits to guard the correct iteration set 1962 if( cmp->Opcode() == Op_CmpU ) {// Unsigned compare is really 2 tests 1963 if( b_test._test == BoolTest::lt ) { // Range checks always use lt 1964 // The underflow and overflow limits: 0 <= scale*I+offset < limit 1965 add_constraint( stride_con, scale_con, offset, zero, limit, pre_ctrl, &pre_limit, &main_limit ); 1966 if (!conditional_rc) { 1967 // (0-offset)/scale could be outside of loop iterations range. 1968 conditional_rc = !loop->dominates_backedge(iff) || RangeLimitCheck; 1969 } 1970 } else { 1971 #ifndef PRODUCT 1972 if( PrintOpto ) 1973 tty->print_cr("missed RCE opportunity"); 1974 #endif 1975 continue; // In release mode, ignore it 1976 } 1977 } else { // Otherwise work on normal compares 1978 switch( b_test._test ) { 1979 case BoolTest::gt: 1980 // Fall into GE case 1981 case BoolTest::ge: 1982 // Convert (I*scale+offset) >= Limit to (I*(-scale)+(-offset)) <= -Limit 1983 scale_con = -scale_con; 1984 offset = new (C) SubINode( zero, offset ); 1985 register_new_node( offset, pre_ctrl ); 1986 limit = new (C) SubINode( zero, limit ); 1987 register_new_node( limit, pre_ctrl ); 1988 // Fall into LE case 1989 case BoolTest::le: 1990 if (b_test._test != BoolTest::gt) { 1991 // Convert X <= Y to X < Y+1 1992 limit = new (C) AddINode( limit, one ); 1993 register_new_node( limit, pre_ctrl ); 1994 } 1995 // Fall into LT case 1996 case BoolTest::lt: 1997 // The underflow and overflow limits: MIN_INT <= scale*I+offset < limit 1998 // Note: (MIN_INT+1 == -MAX_INT) is used instead of MIN_INT here 1999 // to avoid problem with scale == -1: MIN_INT/(-1) == MIN_INT. 2000 add_constraint( stride_con, scale_con, offset, mini, limit, pre_ctrl, &pre_limit, &main_limit ); 2001 if (!conditional_rc) { 2002 // ((MIN_INT+1)-offset)/scale could be outside of loop iterations range. 2003 // Note: negative offset is replaced with 0 but (MIN_INT+1)/scale could 2004 // still be outside of loop range. 2005 conditional_rc = !loop->dominates_backedge(iff) || RangeLimitCheck; 2006 } 2007 break; 2008 default: 2009 #ifndef PRODUCT 2010 if( PrintOpto ) 2011 tty->print_cr("missed RCE opportunity"); 2012 #endif 2013 continue; // Unhandled case 2014 } 2015 } 2016 2017 // Kill the eliminated test 2018 C->set_major_progress(); 2019 Node *kill_con = _igvn.intcon( 1-flip ); 2020 set_ctrl(kill_con, C->root()); 2021 _igvn.replace_input_of(iff, 1, kill_con); 2022 // Find surviving projection 2023 assert(iff->is_If(), ""); 2024 ProjNode* dp = ((IfNode*)iff)->proj_out(1-flip); 2025 // Find loads off the surviving projection; remove their control edge 2026 for (DUIterator_Fast imax, i = dp->fast_outs(imax); i < imax; i++) { 2027 Node* cd = dp->fast_out(i); // Control-dependent node 2028 if (cd->is_Load() && cd->depends_only_on_test()) { // Loads can now float around in the loop 2029 // Allow the load to float around in the loop, or before it 2030 // but NOT before the pre-loop. 2031 _igvn.replace_input_of(cd, 0, ctrl); // ctrl, not NULL 2032 --i; 2033 --imax; 2034 } 2035 } 2036 2037 } // End of is IF 2038 2039 } 2040 2041 // Update loop limits 2042 if (conditional_rc) { 2043 pre_limit = (stride_con > 0) ? (Node*)new (C) MinINode(pre_limit, orig_limit) 2044 : (Node*)new (C) MaxINode(pre_limit, orig_limit); 2045 register_new_node(pre_limit, pre_ctrl); 2046 } 2047 _igvn.hash_delete(pre_opaq); 2048 pre_opaq->set_req(1, pre_limit); 2049 2050 // Note:: we are making the main loop limit no longer precise; 2051 // need to round up based on stride. 2052 cl->set_nonexact_trip_count(); 2053 if (!LoopLimitCheck && stride_con != 1 && stride_con != -1) { // Cutout for common case 2054 // "Standard" round-up logic: ([main_limit-init+(y-1)]/y)*y+init 2055 // Hopefully, compiler will optimize for powers of 2. 2056 Node *ctrl = get_ctrl(main_limit); 2057 Node *stride = cl->stride(); 2058 Node *init = cl->init_trip(); 2059 Node *span = new (C) SubINode(main_limit,init); 2060 register_new_node(span,ctrl); 2061 Node *rndup = _igvn.intcon(stride_con + ((stride_con>0)?-1:1)); 2062 Node *add = new (C) AddINode(span,rndup); 2063 register_new_node(add,ctrl); 2064 Node *div = new (C) DivINode(0,add,stride); 2065 register_new_node(div,ctrl); 2066 Node *mul = new (C) MulINode(div,stride); 2067 register_new_node(mul,ctrl); 2068 Node *newlim = new (C) AddINode(mul,init); 2069 register_new_node(newlim,ctrl); 2070 main_limit = newlim; 2071 } 2072 2073 Node *main_cle = cl->loopexit(); 2074 Node *main_bol = main_cle->in(1); 2075 // Hacking loop bounds; need private copies of exit test 2076 if( main_bol->outcnt() > 1 ) {// BoolNode shared? 2077 _igvn.hash_delete(main_cle); 2078 main_bol = main_bol->clone();// Clone a private BoolNode 2079 register_new_node( main_bol, main_cle->in(0) ); 2080 main_cle->set_req(1,main_bol); 2081 } 2082 Node *main_cmp = main_bol->in(1); 2083 if( main_cmp->outcnt() > 1 ) { // CmpNode shared? 2084 _igvn.hash_delete(main_bol); 2085 main_cmp = main_cmp->clone();// Clone a private CmpNode 2086 register_new_node( main_cmp, main_cle->in(0) ); 2087 main_bol->set_req(1,main_cmp); 2088 } 2089 // Hack the now-private loop bounds 2090 _igvn.replace_input_of(main_cmp, 2, main_limit); 2091 // The OpaqueNode is unshared by design 2092 assert( opqzm->outcnt() == 1, "cannot hack shared node" ); 2093 _igvn.replace_input_of(opqzm, 1, main_limit); 2094 } 2095 2096 //------------------------------DCE_loop_body---------------------------------- 2097 // Remove simplistic dead code from loop body 2098 void IdealLoopTree::DCE_loop_body() { 2099 for( uint i = 0; i < _body.size(); i++ ) 2100 if( _body.at(i)->outcnt() == 0 ) 2101 _body.map( i--, _body.pop() ); 2102 } 2103 2104 2105 //------------------------------adjust_loop_exit_prob-------------------------- 2106 // Look for loop-exit tests with the 50/50 (or worse) guesses from the parsing stage. 2107 // Replace with a 1-in-10 exit guess. 2108 void IdealLoopTree::adjust_loop_exit_prob( PhaseIdealLoop *phase ) { 2109 Node *test = tail(); 2110 while( test != _head ) { 2111 uint top = test->Opcode(); 2112 if( top == Op_IfTrue || top == Op_IfFalse ) { 2113 int test_con = ((ProjNode*)test)->_con; 2114 assert(top == (uint)(test_con? Op_IfTrue: Op_IfFalse), "sanity"); 2115 IfNode *iff = test->in(0)->as_If(); 2116 if( iff->outcnt() == 2 ) { // Ignore dead tests 2117 Node *bol = iff->in(1); 2118 if( bol && bol->req() > 1 && bol->in(1) && 2119 ((bol->in(1)->Opcode() == Op_StorePConditional ) || 2120 (bol->in(1)->Opcode() == Op_StoreIConditional ) || 2121 (bol->in(1)->Opcode() == Op_StoreLConditional ) || 2122 (bol->in(1)->Opcode() == Op_CompareAndSwapI ) || 2123 (bol->in(1)->Opcode() == Op_CompareAndSwapL ) || 2124 (bol->in(1)->Opcode() == Op_CompareAndSwapP ) || 2125 (bol->in(1)->Opcode() == Op_CompareAndSwapN ))) 2126 return; // Allocation loops RARELY take backedge 2127 // Find the OTHER exit path from the IF 2128 Node* ex = iff->proj_out(1-test_con); 2129 float p = iff->_prob; 2130 if( !phase->is_member( this, ex ) && iff->_fcnt == COUNT_UNKNOWN ) { 2131 if( top == Op_IfTrue ) { 2132 if( p < (PROB_FAIR + PROB_UNLIKELY_MAG(3))) { 2133 iff->_prob = PROB_STATIC_FREQUENT; 2134 } 2135 } else { 2136 if( p > (PROB_FAIR - PROB_UNLIKELY_MAG(3))) { 2137 iff->_prob = PROB_STATIC_INFREQUENT; 2138 } 2139 } 2140 } 2141 } 2142 } 2143 test = phase->idom(test); 2144 } 2145 } 2146 2147 2148 //------------------------------policy_do_remove_empty_loop-------------------- 2149 // Micro-benchmark spamming. Policy is to always remove empty loops. 2150 // The 'DO' part is to replace the trip counter with the value it will 2151 // have on the last iteration. This will break the loop. 2152 bool IdealLoopTree::policy_do_remove_empty_loop( PhaseIdealLoop *phase ) { 2153 // Minimum size must be empty loop 2154 if (_body.size() > EMPTY_LOOP_SIZE) 2155 return false; 2156 2157 if (!_head->is_CountedLoop()) 2158 return false; // Dead loop 2159 CountedLoopNode *cl = _head->as_CountedLoop(); 2160 if (!cl->is_valid_counted_loop()) 2161 return false; // Malformed loop 2162 if (!phase->is_member(this, phase->get_ctrl(cl->loopexit()->in(CountedLoopEndNode::TestValue)))) 2163 return false; // Infinite loop 2164 2165 #ifdef ASSERT 2166 // Ensure only one phi which is the iv. 2167 Node* iv = NULL; 2168 for (DUIterator_Fast imax, i = cl->fast_outs(imax); i < imax; i++) { 2169 Node* n = cl->fast_out(i); 2170 if (n->Opcode() == Op_Phi) { 2171 assert(iv == NULL, "Too many phis" ); 2172 iv = n; 2173 } 2174 } 2175 assert(iv == cl->phi(), "Wrong phi" ); 2176 #endif 2177 2178 // main and post loops have explicitly created zero trip guard 2179 bool needs_guard = !cl->is_main_loop() && !cl->is_post_loop(); 2180 if (needs_guard) { 2181 // Skip guard if values not overlap. 2182 const TypeInt* init_t = phase->_igvn.type(cl->init_trip())->is_int(); 2183 const TypeInt* limit_t = phase->_igvn.type(cl->limit())->is_int(); 2184 int stride_con = cl->stride_con(); 2185 if (stride_con > 0) { 2186 needs_guard = (init_t->_hi >= limit_t->_lo); 2187 } else { 2188 needs_guard = (init_t->_lo <= limit_t->_hi); 2189 } 2190 } 2191 if (needs_guard) { 2192 // Check for an obvious zero trip guard. 2193 Node* inctrl = PhaseIdealLoop::skip_loop_predicates(cl->in(LoopNode::EntryControl)); 2194 if (inctrl->Opcode() == Op_IfTrue) { 2195 // The test should look like just the backedge of a CountedLoop 2196 Node* iff = inctrl->in(0); 2197 if (iff->is_If()) { 2198 Node* bol = iff->in(1); 2199 if (bol->is_Bool() && bol->as_Bool()->_test._test == cl->loopexit()->test_trip()) { 2200 Node* cmp = bol->in(1); 2201 if (cmp->is_Cmp() && cmp->in(1) == cl->init_trip() && cmp->in(2) == cl->limit()) { 2202 needs_guard = false; 2203 } 2204 } 2205 } 2206 } 2207 } 2208 2209 #ifndef PRODUCT 2210 if (PrintOpto) { 2211 tty->print("Removing empty loop with%s zero trip guard", needs_guard ? "out" : ""); 2212 this->dump_head(); 2213 } else if (TraceLoopOpts) { 2214 tty->print("Empty with%s zero trip guard ", needs_guard ? "out" : ""); 2215 this->dump_head(); 2216 } 2217 #endif 2218 2219 if (needs_guard) { 2220 // Peel the loop to ensure there's a zero trip guard 2221 Node_List old_new; 2222 phase->do_peeling(this, old_new); 2223 } 2224 2225 // Replace the phi at loop head with the final value of the last 2226 // iteration. Then the CountedLoopEnd will collapse (backedge never 2227 // taken) and all loop-invariant uses of the exit values will be correct. 2228 Node *phi = cl->phi(); 2229 Node *exact_limit = phase->exact_limit(this); 2230 if (exact_limit != cl->limit()) { 2231 // We also need to replace the original limit to collapse loop exit. 2232 Node* cmp = cl->loopexit()->cmp_node(); 2233 assert(cl->limit() == cmp->in(2), "sanity"); 2234 phase->_igvn._worklist.push(cmp->in(2)); // put limit on worklist 2235 phase->_igvn.replace_input_of(cmp, 2, exact_limit); // put cmp on worklist 2236 } 2237 // Note: the final value after increment should not overflow since 2238 // counted loop has limit check predicate. 2239 Node *final = new (phase->C) SubINode( exact_limit, cl->stride() ); 2240 phase->register_new_node(final,cl->in(LoopNode::EntryControl)); 2241 phase->_igvn.replace_node(phi,final); 2242 phase->C->set_major_progress(); 2243 return true; 2244 } 2245 2246 //------------------------------policy_do_one_iteration_loop------------------- 2247 // Convert one iteration loop into normal code. 2248 bool IdealLoopTree::policy_do_one_iteration_loop( PhaseIdealLoop *phase ) { 2249 if (!_head->as_Loop()->is_valid_counted_loop()) 2250 return false; // Only for counted loop 2251 2252 CountedLoopNode *cl = _head->as_CountedLoop(); 2253 if (!cl->has_exact_trip_count() || cl->trip_count() != 1) { 2254 return false; 2255 } 2256 2257 #ifndef PRODUCT 2258 if(TraceLoopOpts) { 2259 tty->print("OneIteration "); 2260 this->dump_head(); 2261 } 2262 #endif 2263 2264 Node *init_n = cl->init_trip(); 2265 #ifdef ASSERT 2266 // Loop boundaries should be constant since trip count is exact. 2267 assert(init_n->get_int() + cl->stride_con() >= cl->limit()->get_int(), "should be one iteration"); 2268 #endif 2269 // Replace the phi at loop head with the value of the init_trip. 2270 // Then the CountedLoopEnd will collapse (backedge will not be taken) 2271 // and all loop-invariant uses of the exit values will be correct. 2272 phase->_igvn.replace_node(cl->phi(), cl->init_trip()); 2273 phase->C->set_major_progress(); 2274 return true; 2275 } 2276 2277 //============================================================================= 2278 //------------------------------iteration_split_impl--------------------------- 2279 bool IdealLoopTree::iteration_split_impl( PhaseIdealLoop *phase, Node_List &old_new ) { 2280 // Compute exact loop trip count if possible. 2281 compute_exact_trip_count(phase); 2282 2283 // Convert one iteration loop into normal code. 2284 if (policy_do_one_iteration_loop(phase)) 2285 return true; 2286 2287 // Check and remove empty loops (spam micro-benchmarks) 2288 if (policy_do_remove_empty_loop(phase)) 2289 return true; // Here we removed an empty loop 2290 2291 bool should_peel = policy_peeling(phase); // Should we peel? 2292 2293 bool should_unswitch = policy_unswitching(phase); 2294 2295 // Non-counted loops may be peeled; exactly 1 iteration is peeled. 2296 // This removes loop-invariant tests (usually null checks). 2297 if (!_head->is_CountedLoop()) { // Non-counted loop 2298 if (PartialPeelLoop && phase->partial_peel(this, old_new)) { 2299 // Partial peel succeeded so terminate this round of loop opts 2300 return false; 2301 } 2302 if (should_peel) { // Should we peel? 2303 #ifndef PRODUCT 2304 if (PrintOpto) tty->print_cr("should_peel"); 2305 #endif 2306 phase->do_peeling(this,old_new); 2307 } else if (should_unswitch) { 2308 phase->do_unswitching(this, old_new); 2309 } 2310 return true; 2311 } 2312 CountedLoopNode *cl = _head->as_CountedLoop(); 2313 2314 if (!cl->is_valid_counted_loop()) return true; // Ignore various kinds of broken loops 2315 2316 // Do nothing special to pre- and post- loops 2317 if (cl->is_pre_loop() || cl->is_post_loop()) return true; 2318 2319 // Compute loop trip count from profile data 2320 compute_profile_trip_cnt(phase); 2321 2322 // Before attempting fancy unrolling, RCE or alignment, see if we want 2323 // to completely unroll this loop or do loop unswitching. 2324 if (cl->is_normal_loop()) { 2325 if (should_unswitch) { 2326 phase->do_unswitching(this, old_new); 2327 return true; 2328 } 2329 bool should_maximally_unroll = policy_maximally_unroll(phase); 2330 if (should_maximally_unroll) { 2331 // Here we did some unrolling and peeling. Eventually we will 2332 // completely unroll this loop and it will no longer be a loop. 2333 phase->do_maximally_unroll(this,old_new); 2334 return true; 2335 } 2336 } 2337 2338 // Skip next optimizations if running low on nodes. Note that 2339 // policy_unswitching and policy_maximally_unroll have this check. 2340 int nodes_left = phase->C->max_node_limit() - phase->C->live_nodes(); 2341 if ((int)(2 * _body.size()) > nodes_left) { 2342 return true; 2343 } 2344 2345 // Counted loops may be peeled, may need some iterations run up 2346 // front for RCE, and may want to align loop refs to a cache 2347 // line. Thus we clone a full loop up front whose trip count is 2348 // at least 1 (if peeling), but may be several more. 2349 2350 // The main loop will start cache-line aligned with at least 1 2351 // iteration of the unrolled body (zero-trip test required) and 2352 // will have some range checks removed. 2353 2354 // A post-loop will finish any odd iterations (leftover after 2355 // unrolling), plus any needed for RCE purposes. 2356 2357 bool should_unroll = policy_unroll(phase); 2358 2359 bool should_rce = policy_range_check(phase); 2360 2361 bool should_align = policy_align(phase); 2362 2363 // If not RCE'ing (iteration splitting) or Aligning, then we do not 2364 // need a pre-loop. We may still need to peel an initial iteration but 2365 // we will not be needing an unknown number of pre-iterations. 2366 // 2367 // Basically, if may_rce_align reports FALSE first time through, 2368 // we will not be able to later do RCE or Aligning on this loop. 2369 bool may_rce_align = !policy_peel_only(phase) || should_rce || should_align; 2370 2371 // If we have any of these conditions (RCE, alignment, unrolling) met, then 2372 // we switch to the pre-/main-/post-loop model. This model also covers 2373 // peeling. 2374 if (should_rce || should_align || should_unroll) { 2375 if (cl->is_normal_loop()) // Convert to 'pre/main/post' loops 2376 phase->insert_pre_post_loops(this,old_new, !may_rce_align); 2377 2378 // Adjust the pre- and main-loop limits to let the pre and post loops run 2379 // with full checks, but the main-loop with no checks. Remove said 2380 // checks from the main body. 2381 if (should_rce) 2382 phase->do_range_check(this,old_new); 2383 2384 // Double loop body for unrolling. Adjust the minimum-trip test (will do 2385 // twice as many iterations as before) and the main body limit (only do 2386 // an even number of trips). If we are peeling, we might enable some RCE 2387 // and we'd rather unroll the post-RCE'd loop SO... do not unroll if 2388 // peeling. 2389 if (should_unroll && !should_peel) 2390 phase->do_unroll(this,old_new, true); 2391 2392 // Adjust the pre-loop limits to align the main body 2393 // iterations. 2394 if (should_align) 2395 Unimplemented(); 2396 2397 } else { // Else we have an unchanged counted loop 2398 if (should_peel) // Might want to peel but do nothing else 2399 phase->do_peeling(this,old_new); 2400 } 2401 return true; 2402 } 2403 2404 2405 //============================================================================= 2406 //------------------------------iteration_split-------------------------------- 2407 bool IdealLoopTree::iteration_split( PhaseIdealLoop *phase, Node_List &old_new ) { 2408 // Recursively iteration split nested loops 2409 if (_child && !_child->iteration_split(phase, old_new)) 2410 return false; 2411 2412 // Clean out prior deadwood 2413 DCE_loop_body(); 2414 2415 2416 // Look for loop-exit tests with my 50/50 guesses from the Parsing stage. 2417 // Replace with a 1-in-10 exit guess. 2418 if (_parent /*not the root loop*/ && 2419 !_irreducible && 2420 // Also ignore the occasional dead backedge 2421 !tail()->is_top()) { 2422 adjust_loop_exit_prob(phase); 2423 } 2424 2425 // Gate unrolling, RCE and peeling efforts. 2426 if (!_child && // If not an inner loop, do not split 2427 !_irreducible && 2428 _allow_optimizations && 2429 !tail()->is_top()) { // Also ignore the occasional dead backedge 2430 if (!_has_call) { 2431 if (!iteration_split_impl(phase, old_new)) { 2432 return false; 2433 } 2434 } else if (policy_unswitching(phase)) { 2435 phase->do_unswitching(this, old_new); 2436 } 2437 } 2438 2439 // Minor offset re-organization to remove loop-fallout uses of 2440 // trip counter when there was no major reshaping. 2441 phase->reorg_offsets(this); 2442 2443 if (_next && !_next->iteration_split(phase, old_new)) 2444 return false; 2445 return true; 2446 } 2447 2448 2449 //============================================================================= 2450 // Process all the loops in the loop tree and replace any fill 2451 // patterns with an intrinsic version. 2452 bool PhaseIdealLoop::do_intrinsify_fill() { 2453 bool changed = false; 2454 for (LoopTreeIterator iter(_ltree_root); !iter.done(); iter.next()) { 2455 IdealLoopTree* lpt = iter.current(); 2456 changed |= intrinsify_fill(lpt); 2457 } 2458 return changed; 2459 } 2460 2461 2462 // Examine an inner loop looking for a a single store of an invariant 2463 // value in a unit stride loop, 2464 bool PhaseIdealLoop::match_fill_loop(IdealLoopTree* lpt, Node*& store, Node*& store_value, 2465 Node*& shift, Node*& con) { 2466 const char* msg = NULL; 2467 Node* msg_node = NULL; 2468 2469 store_value = NULL; 2470 con = NULL; 2471 shift = NULL; 2472 2473 // Process the loop looking for stores. If there are multiple 2474 // stores or extra control flow give at this point. 2475 CountedLoopNode* head = lpt->_head->as_CountedLoop(); 2476 for (uint i = 0; msg == NULL && i < lpt->_body.size(); i++) { 2477 Node* n = lpt->_body.at(i); 2478 if (n->outcnt() == 0) continue; // Ignore dead 2479 if (n->is_Store()) { 2480 if (store != NULL) { 2481 msg = "multiple stores"; 2482 break; 2483 } 2484 int opc = n->Opcode(); 2485 if (opc == Op_StoreP || opc == Op_StoreN || opc == Op_StoreNKlass || opc == Op_StoreCM) { 2486 msg = "oop fills not handled"; 2487 break; 2488 } 2489 Node* value = n->in(MemNode::ValueIn); 2490 if (!lpt->is_invariant(value)) { 2491 msg = "variant store value"; 2492 } else if (!_igvn.type(n->in(MemNode::Address))->isa_aryptr()) { 2493 msg = "not array address"; 2494 } 2495 store = n; 2496 store_value = value; 2497 } else if (n->is_If() && n != head->loopexit()) { 2498 msg = "extra control flow"; 2499 msg_node = n; 2500 } 2501 } 2502 2503 if (store == NULL) { 2504 // No store in loop 2505 return false; 2506 } 2507 2508 if (msg == NULL && head->stride_con() != 1) { 2509 // could handle negative strides too 2510 if (head->stride_con() < 0) { 2511 msg = "negative stride"; 2512 } else { 2513 msg = "non-unit stride"; 2514 } 2515 } 2516 2517 if (msg == NULL && !store->in(MemNode::Address)->is_AddP()) { 2518 msg = "can't handle store address"; 2519 msg_node = store->in(MemNode::Address); 2520 } 2521 2522 if (msg == NULL && 2523 (!store->in(MemNode::Memory)->is_Phi() || 2524 store->in(MemNode::Memory)->in(LoopNode::LoopBackControl) != store)) { 2525 msg = "store memory isn't proper phi"; 2526 msg_node = store->in(MemNode::Memory); 2527 } 2528 2529 // Make sure there is an appropriate fill routine 2530 BasicType t = store->as_Mem()->memory_type(); 2531 const char* fill_name; 2532 if (msg == NULL && 2533 StubRoutines::select_fill_function(t, false, fill_name) == NULL) { 2534 msg = "unsupported store"; 2535 msg_node = store; 2536 } 2537 2538 if (msg != NULL) { 2539 #ifndef PRODUCT 2540 if (TraceOptimizeFill) { 2541 tty->print_cr("not fill intrinsic candidate: %s", msg); 2542 if (msg_node != NULL) msg_node->dump(); 2543 } 2544 #endif 2545 return false; 2546 } 2547 2548 // Make sure the address expression can be handled. It should be 2549 // head->phi * elsize + con. head->phi might have a ConvI2L(CastII()). 2550 Node* elements[4]; 2551 Node* cast = NULL; 2552 Node* conv = NULL; 2553 bool found_index = false; 2554 int count = store->in(MemNode::Address)->as_AddP()->unpack_offsets(elements, ARRAY_SIZE(elements)); 2555 for (int e = 0; e < count; e++) { 2556 Node* n = elements[e]; 2557 if (n->is_Con() && con == NULL) { 2558 con = n; 2559 } else if (n->Opcode() == Op_LShiftX && shift == NULL) { 2560 Node* value = n->in(1); 2561 #ifdef _LP64 2562 if (value->Opcode() == Op_ConvI2L) { 2563 conv = value; 2564 value = value->in(1); 2565 } 2566 if (value->Opcode() == Op_CastII && 2567 value->as_CastII()->has_range_check()) { 2568 // Skip range check dependent CastII nodes 2569 cast = value; 2570 value = value->in(1); 2571 } 2572 #endif 2573 if (value != head->phi()) { 2574 msg = "unhandled shift in address"; 2575 } else { 2576 if (type2aelembytes(store->as_Mem()->memory_type(), true) != (1 << n->in(2)->get_int())) { 2577 msg = "scale doesn't match"; 2578 } else { 2579 found_index = true; 2580 shift = n; 2581 } 2582 } 2583 } else if (n->Opcode() == Op_ConvI2L && conv == NULL) { 2584 conv = n; 2585 n = n->in(1); 2586 if (n->Opcode() == Op_CastII && 2587 n->as_CastII()->has_range_check()) { 2588 // Skip range check dependent CastII nodes 2589 cast = n; 2590 n = n->in(1); 2591 } 2592 if (n == head->phi()) { 2593 found_index = true; 2594 } else { 2595 msg = "unhandled input to ConvI2L"; 2596 } 2597 } else if (n == head->phi()) { 2598 // no shift, check below for allowed cases 2599 found_index = true; 2600 } else { 2601 msg = "unhandled node in address"; 2602 msg_node = n; 2603 } 2604 } 2605 2606 if (count == -1) { 2607 msg = "malformed address expression"; 2608 msg_node = store; 2609 } 2610 2611 if (!found_index) { 2612 msg = "missing use of index"; 2613 } 2614 2615 // byte sized items won't have a shift 2616 if (msg == NULL && shift == NULL && t != T_BYTE && t != T_BOOLEAN) { 2617 msg = "can't find shift"; 2618 msg_node = store; 2619 } 2620 2621 if (msg != NULL) { 2622 #ifndef PRODUCT 2623 if (TraceOptimizeFill) { 2624 tty->print_cr("not fill intrinsic: %s", msg); 2625 if (msg_node != NULL) msg_node->dump(); 2626 } 2627 #endif 2628 return false; 2629 } 2630 2631 // No make sure all the other nodes in the loop can be handled 2632 VectorSet ok(Thread::current()->resource_area()); 2633 2634 // store related values are ok 2635 ok.set(store->_idx); 2636 ok.set(store->in(MemNode::Memory)->_idx); 2637 2638 CountedLoopEndNode* loop_exit = head->loopexit(); 2639 guarantee(loop_exit != NULL, "no loop exit node"); 2640 2641 // Loop structure is ok 2642 ok.set(head->_idx); 2643 ok.set(loop_exit->_idx); 2644 ok.set(head->phi()->_idx); 2645 ok.set(head->incr()->_idx); 2646 ok.set(loop_exit->cmp_node()->_idx); 2647 ok.set(loop_exit->in(1)->_idx); 2648 2649 // Address elements are ok 2650 if (con) ok.set(con->_idx); 2651 if (shift) ok.set(shift->_idx); 2652 if (cast) ok.set(cast->_idx); 2653 if (conv) ok.set(conv->_idx); 2654 2655 for (uint i = 0; msg == NULL && i < lpt->_body.size(); i++) { 2656 Node* n = lpt->_body.at(i); 2657 if (n->outcnt() == 0) continue; // Ignore dead 2658 if (ok.test(n->_idx)) continue; 2659 // Backedge projection is ok 2660 if (n->is_IfTrue() && n->in(0) == loop_exit) continue; 2661 if (!n->is_AddP()) { 2662 msg = "unhandled node"; 2663 msg_node = n; 2664 break; 2665 } 2666 } 2667 2668 // Make sure no unexpected values are used outside the loop 2669 for (uint i = 0; msg == NULL && i < lpt->_body.size(); i++) { 2670 Node* n = lpt->_body.at(i); 2671 // These values can be replaced with other nodes if they are used 2672 // outside the loop. 2673 if (n == store || n == loop_exit || n == head->incr() || n == store->in(MemNode::Memory)) continue; 2674 for (SimpleDUIterator iter(n); iter.has_next(); iter.next()) { 2675 Node* use = iter.get(); 2676 if (!lpt->_body.contains(use)) { 2677 msg = "node is used outside loop"; 2678 // lpt->_body.dump(); 2679 msg_node = n; 2680 break; 2681 } 2682 } 2683 } 2684 2685 #ifdef ASSERT 2686 if (TraceOptimizeFill) { 2687 if (msg != NULL) { 2688 tty->print_cr("no fill intrinsic: %s", msg); 2689 if (msg_node != NULL) msg_node->dump(); 2690 } else { 2691 tty->print_cr("fill intrinsic for:"); 2692 } 2693 store->dump(); 2694 if (Verbose) { 2695 lpt->_body.dump(); 2696 } 2697 } 2698 #endif 2699 2700 return msg == NULL; 2701 } 2702 2703 2704 2705 bool PhaseIdealLoop::intrinsify_fill(IdealLoopTree* lpt) { 2706 // Only for counted inner loops 2707 if (!lpt->is_counted() || !lpt->is_inner()) { 2708 return false; 2709 } 2710 2711 // Must have constant stride 2712 CountedLoopNode* head = lpt->_head->as_CountedLoop(); 2713 if (!head->is_valid_counted_loop() || !head->is_normal_loop()) { 2714 return false; 2715 } 2716 2717 // Check that the body only contains a store of a loop invariant 2718 // value that is indexed by the loop phi. 2719 Node* store = NULL; 2720 Node* store_value = NULL; 2721 Node* shift = NULL; 2722 Node* offset = NULL; 2723 if (!match_fill_loop(lpt, store, store_value, shift, offset)) { 2724 return false; 2725 } 2726 2727 Node* exit = head->loopexit()->proj_out(0); 2728 if (exit == NULL) { 2729 return false; 2730 } 2731 2732 #ifndef PRODUCT 2733 if (TraceLoopOpts) { 2734 tty->print("ArrayFill "); 2735 lpt->dump_head(); 2736 } 2737 #endif 2738 2739 // Now replace the whole loop body by a call to a fill routine that 2740 // covers the same region as the loop. 2741 Node* base = store->in(MemNode::Address)->as_AddP()->in(AddPNode::Base); 2742 2743 // Build an expression for the beginning of the copy region 2744 Node* index = head->init_trip(); 2745 #ifdef _LP64 2746 index = new (C) ConvI2LNode(index); 2747 _igvn.register_new_node_with_optimizer(index); 2748 #endif 2749 if (shift != NULL) { 2750 // byte arrays don't require a shift but others do. 2751 index = new (C) LShiftXNode(index, shift->in(2)); 2752 _igvn.register_new_node_with_optimizer(index); 2753 } 2754 index = new (C) AddPNode(base, base, index); 2755 _igvn.register_new_node_with_optimizer(index); 2756 Node* from = new (C) AddPNode(base, index, offset); 2757 _igvn.register_new_node_with_optimizer(from); 2758 // Compute the number of elements to copy 2759 Node* len = new (C) SubINode(head->limit(), head->init_trip()); 2760 _igvn.register_new_node_with_optimizer(len); 2761 2762 BasicType t = store->as_Mem()->memory_type(); 2763 bool aligned = false; 2764 if (offset != NULL && head->init_trip()->is_Con()) { 2765 int element_size = type2aelembytes(t); 2766 aligned = (offset->find_intptr_t_type()->get_con() + head->init_trip()->get_int() * element_size) % HeapWordSize == 0; 2767 } 2768 2769 // Build a call to the fill routine 2770 const char* fill_name; 2771 address fill = StubRoutines::select_fill_function(t, aligned, fill_name); 2772 assert(fill != NULL, "what?"); 2773 2774 // Convert float/double to int/long for fill routines 2775 if (t == T_FLOAT) { 2776 store_value = new (C) MoveF2INode(store_value); 2777 _igvn.register_new_node_with_optimizer(store_value); 2778 } else if (t == T_DOUBLE) { 2779 store_value = new (C) MoveD2LNode(store_value); 2780 _igvn.register_new_node_with_optimizer(store_value); 2781 } 2782 2783 if (CCallingConventionRequiresIntsAsLongs && 2784 // See StubRoutines::select_fill_function for types. FLOAT has been converted to INT. 2785 (t == T_FLOAT || t == T_INT || is_subword_type(t))) { 2786 store_value = new (C) ConvI2LNode(store_value); 2787 _igvn.register_new_node_with_optimizer(store_value); 2788 } 2789 2790 Node* mem_phi = store->in(MemNode::Memory); 2791 Node* result_ctrl; 2792 Node* result_mem; 2793 const TypeFunc* call_type = OptoRuntime::array_fill_Type(); 2794 CallLeafNode *call = new (C) CallLeafNoFPNode(call_type, fill, 2795 fill_name, TypeAryPtr::get_array_body_type(t)); 2796 uint cnt = 0; 2797 call->init_req(TypeFunc::Parms + cnt++, from); 2798 call->init_req(TypeFunc::Parms + cnt++, store_value); 2799 if (CCallingConventionRequiresIntsAsLongs) { 2800 call->init_req(TypeFunc::Parms + cnt++, C->top()); 2801 } 2802 #ifdef _LP64 2803 len = new (C) ConvI2LNode(len); 2804 _igvn.register_new_node_with_optimizer(len); 2805 #endif 2806 call->init_req(TypeFunc::Parms + cnt++, len); 2807 #ifdef _LP64 2808 call->init_req(TypeFunc::Parms + cnt++, C->top()); 2809 #endif 2810 call->init_req(TypeFunc::Control, head->init_control()); 2811 call->init_req(TypeFunc::I_O, C->top()); // Does no I/O. 2812 call->init_req(TypeFunc::Memory, mem_phi->in(LoopNode::EntryControl)); 2813 call->init_req(TypeFunc::ReturnAdr, C->start()->proj_out(TypeFunc::ReturnAdr)); 2814 call->init_req(TypeFunc::FramePtr, C->start()->proj_out(TypeFunc::FramePtr)); 2815 _igvn.register_new_node_with_optimizer(call); 2816 result_ctrl = new (C) ProjNode(call,TypeFunc::Control); 2817 _igvn.register_new_node_with_optimizer(result_ctrl); 2818 result_mem = new (C) ProjNode(call,TypeFunc::Memory); 2819 _igvn.register_new_node_with_optimizer(result_mem); 2820 2821 /* Disable following optimization until proper fix (add missing checks). 2822 2823 // If this fill is tightly coupled to an allocation and overwrites 2824 // the whole body, allow it to take over the zeroing. 2825 AllocateNode* alloc = AllocateNode::Ideal_allocation(base, this); 2826 if (alloc != NULL && alloc->is_AllocateArray()) { 2827 Node* length = alloc->as_AllocateArray()->Ideal_length(); 2828 if (head->limit() == length && 2829 head->init_trip() == _igvn.intcon(0)) { 2830 if (TraceOptimizeFill) { 2831 tty->print_cr("Eliminated zeroing in allocation"); 2832 } 2833 alloc->maybe_set_complete(&_igvn); 2834 } else { 2835 #ifdef ASSERT 2836 if (TraceOptimizeFill) { 2837 tty->print_cr("filling array but bounds don't match"); 2838 alloc->dump(); 2839 head->init_trip()->dump(); 2840 head->limit()->dump(); 2841 length->dump(); 2842 } 2843 #endif 2844 } 2845 } 2846 */ 2847 2848 // Redirect the old control and memory edges that are outside the loop. 2849 // Sometimes the memory phi of the head is used as the outgoing 2850 // state of the loop. It's safe in this case to replace it with the 2851 // result_mem. 2852 _igvn.replace_node(store->in(MemNode::Memory), result_mem); 2853 lazy_replace(exit, result_ctrl); 2854 _igvn.replace_node(store, result_mem); 2855 // Any uses the increment outside of the loop become the loop limit. 2856 _igvn.replace_node(head->incr(), head->limit()); 2857 2858 // Disconnect the head from the loop. 2859 for (uint i = 0; i < lpt->_body.size(); i++) { 2860 Node* n = lpt->_body.at(i); 2861 _igvn.replace_node(n, C->top()); 2862 } 2863 2864 return true; 2865 }