1 /* 2 * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "libadt/vectset.hpp" 27 #include "memory/allocation.inline.hpp" 28 #include "opto/block.hpp" 29 #include "opto/cfgnode.hpp" 30 #include "opto/chaitin.hpp" 31 #include "opto/loopnode.hpp" 32 #include "opto/machnode.hpp" 33 #include "opto/matcher.hpp" 34 #include "opto/opcodes.hpp" 35 #include "opto/rootnode.hpp" 36 #include "utilities/copy.hpp" 37 38 // Optimization - Graph Style 39 40 41 //----------------------------------------------------------------------------- 42 void Block_Array::grow( uint i ) { 43 assert(i >= Max(), "must be an overflow"); 44 debug_only(_limit = i+1); 45 if( i < _size ) return; 46 if( !_size ) { 47 _size = 1; 48 _blocks = (Block**)_arena->Amalloc( _size * sizeof(Block*) ); 49 _blocks[0] = NULL; 50 } 51 uint old = _size; 52 while( i >= _size ) _size <<= 1; // Double to fit 53 _blocks = (Block**)_arena->Arealloc( _blocks, old*sizeof(Block*),_size*sizeof(Block*)); 54 Copy::zero_to_bytes( &_blocks[old], (_size-old)*sizeof(Block*) ); 55 } 56 57 //============================================================================= 58 void Block_List::remove(uint i) { 59 assert(i < _cnt, "index out of bounds"); 60 Copy::conjoint_words_to_lower((HeapWord*)&_blocks[i+1], (HeapWord*)&_blocks[i], ((_cnt-i-1)*sizeof(Block*))); 61 pop(); // shrink list by one block 62 } 63 64 void Block_List::insert(uint i, Block *b) { 65 push(b); // grow list by one block 66 Copy::conjoint_words_to_higher((HeapWord*)&_blocks[i], (HeapWord*)&_blocks[i+1], ((_cnt-i-1)*sizeof(Block*))); 67 _blocks[i] = b; 68 } 69 70 #ifndef PRODUCT 71 void Block_List::print() { 72 for (uint i=0; i < size(); i++) { 73 tty->print("B%d ", _blocks[i]->_pre_order); 74 } 75 tty->print("size = %d\n", size()); 76 } 77 #endif 78 79 //============================================================================= 80 81 uint Block::code_alignment() { 82 // Check for Root block 83 if (_pre_order == 0) return CodeEntryAlignment; 84 // Check for Start block 85 if (_pre_order == 1) return InteriorEntryAlignment; 86 // Check for loop alignment 87 if (has_loop_alignment()) return loop_alignment(); 88 89 return relocInfo::addr_unit(); // no particular alignment 90 } 91 92 uint Block::compute_loop_alignment() { 93 Node *h = head(); 94 int unit_sz = relocInfo::addr_unit(); 95 if (h->is_Loop() && h->as_Loop()->is_inner_loop()) { 96 // Pre- and post-loops have low trip count so do not bother with 97 // NOPs for align loop head. The constants are hidden from tuning 98 // but only because my "divide by 4" heuristic surely gets nearly 99 // all possible gain (a "do not align at all" heuristic has a 100 // chance of getting a really tiny gain). 101 if (h->is_CountedLoop() && (h->as_CountedLoop()->is_pre_loop() || 102 h->as_CountedLoop()->is_post_loop())) { 103 return (OptoLoopAlignment > 4*unit_sz) ? (OptoLoopAlignment>>2) : unit_sz; 104 } 105 // Loops with low backedge frequency should not be aligned. 106 Node *n = h->in(LoopNode::LoopBackControl)->in(0); 107 if (n->is_MachIf() && n->as_MachIf()->_prob < 0.01) { 108 return unit_sz; // Loop does not loop, more often than not! 109 } 110 return OptoLoopAlignment; // Otherwise align loop head 111 } 112 113 return unit_sz; // no particular alignment 114 } 115 116 //----------------------------------------------------------------------------- 117 // Compute the size of first 'inst_cnt' instructions in this block. 118 // Return the number of instructions left to compute if the block has 119 // less then 'inst_cnt' instructions. Stop, and return 0 if sum_size 120 // exceeds OptoLoopAlignment. 121 uint Block::compute_first_inst_size(uint& sum_size, uint inst_cnt, 122 PhaseRegAlloc* ra) { 123 uint last_inst = _nodes.size(); 124 for( uint j = 0; j < last_inst && inst_cnt > 0; j++ ) { 125 uint inst_size = _nodes[j]->size(ra); 126 if( inst_size > 0 ) { 127 inst_cnt--; 128 uint sz = sum_size + inst_size; 129 if( sz <= (uint)OptoLoopAlignment ) { 130 // Compute size of instructions which fit into fetch buffer only 131 // since all inst_cnt instructions will not fit even if we align them. 132 sum_size = sz; 133 } else { 134 return 0; 135 } 136 } 137 } 138 return inst_cnt; 139 } 140 141 //----------------------------------------------------------------------------- 142 uint Block::find_node( const Node *n ) const { 143 for( uint i = 0; i < _nodes.size(); i++ ) { 144 if( _nodes[i] == n ) 145 return i; 146 } 147 ShouldNotReachHere(); 148 return 0; 149 } 150 151 // Find and remove n from block list 152 void Block::find_remove( const Node *n ) { 153 _nodes.remove(find_node(n)); 154 } 155 156 //------------------------------is_Empty--------------------------------------- 157 // Return empty status of a block. Empty blocks contain only the head, other 158 // ideal nodes, and an optional trailing goto. 159 int Block::is_Empty() const { 160 161 // Root or start block is not considered empty 162 if (head()->is_Root() || head()->is_Start()) { 163 return not_empty; 164 } 165 166 int success_result = completely_empty; 167 int end_idx = _nodes.size()-1; 168 169 // Check for ending goto 170 if ((end_idx > 0) && (_nodes[end_idx]->is_MachGoto())) { 171 success_result = empty_with_goto; 172 end_idx--; 173 } 174 175 // Unreachable blocks are considered empty 176 if (num_preds() <= 1) { 177 return success_result; 178 } 179 180 // Ideal nodes are allowable in empty blocks: skip them Only MachNodes 181 // turn directly into code, because only MachNodes have non-trivial 182 // emit() functions. 183 while ((end_idx > 0) && !_nodes[end_idx]->is_Mach()) { 184 end_idx--; 185 } 186 187 // No room for any interesting instructions? 188 if (end_idx == 0) { 189 return success_result; 190 } 191 192 return not_empty; 193 } 194 195 //------------------------------has_uncommon_code------------------------------ 196 // Return true if the block's code implies that it is likely to be 197 // executed infrequently. Check to see if the block ends in a Halt or 198 // a low probability call. 199 bool Block::has_uncommon_code() const { 200 Node* en = end(); 201 202 if (en->is_MachGoto()) 203 en = en->in(0); 204 if (en->is_Catch()) 205 en = en->in(0); 206 if (en->is_MachProj() && en->in(0)->is_MachCall()) { 207 MachCallNode* call = en->in(0)->as_MachCall(); 208 if (call->cnt() != COUNT_UNKNOWN && call->cnt() <= PROB_UNLIKELY_MAG(4)) { 209 // This is true for slow-path stubs like new_{instance,array}, 210 // slow_arraycopy, complete_monitor_locking, uncommon_trap. 211 // The magic number corresponds to the probability of an uncommon_trap, 212 // even though it is a count not a probability. 213 return true; 214 } 215 } 216 217 int op = en->is_Mach() ? en->as_Mach()->ideal_Opcode() : en->Opcode(); 218 return op == Op_Halt; 219 } 220 221 //------------------------------is_uncommon------------------------------------ 222 // True if block is low enough frequency or guarded by a test which 223 // mostly does not go here. 224 bool Block::is_uncommon( Block_Array &bbs ) const { 225 // Initial blocks must never be moved, so are never uncommon. 226 if (head()->is_Root() || head()->is_Start()) return false; 227 228 // Check for way-low freq 229 if( _freq < BLOCK_FREQUENCY(0.00001f) ) return true; 230 231 // Look for code shape indicating uncommon_trap or slow path 232 if (has_uncommon_code()) return true; 233 234 const float epsilon = 0.05f; 235 const float guard_factor = PROB_UNLIKELY_MAG(4) / (1.f - epsilon); 236 uint uncommon_preds = 0; 237 uint freq_preds = 0; 238 uint uncommon_for_freq_preds = 0; 239 240 for( uint i=1; i<num_preds(); i++ ) { 241 Block* guard = bbs[pred(i)->_idx]; 242 // Check to see if this block follows its guard 1 time out of 10000 243 // or less. 244 // 245 // See list of magnitude-4 unlikely probabilities in cfgnode.hpp which 246 // we intend to be "uncommon", such as slow-path TLE allocation, 247 // predicted call failure, and uncommon trap triggers. 248 // 249 // Use an epsilon value of 5% to allow for variability in frequency 250 // predictions and floating point calculations. The net effect is 251 // that guard_factor is set to 9500. 252 // 253 // Ignore low-frequency blocks. 254 // The next check is (guard->_freq < 1.e-5 * 9500.). 255 if(guard->_freq*BLOCK_FREQUENCY(guard_factor) < BLOCK_FREQUENCY(0.00001f)) { 256 uncommon_preds++; 257 } else { 258 freq_preds++; 259 if( _freq < guard->_freq * guard_factor ) { 260 uncommon_for_freq_preds++; 261 } 262 } 263 } 264 if( num_preds() > 1 && 265 // The block is uncommon if all preds are uncommon or 266 (uncommon_preds == (num_preds()-1) || 267 // it is uncommon for all frequent preds. 268 uncommon_for_freq_preds == freq_preds) ) { 269 return true; 270 } 271 return false; 272 } 273 274 //------------------------------dump------------------------------------------- 275 #ifndef PRODUCT 276 void Block::dump_bidx(const Block* orig, outputStream* st) const { 277 if (_pre_order) st->print("B%d",_pre_order); 278 else st->print("N%d", head()->_idx); 279 280 if (Verbose && orig != this) { 281 // Dump the original block's idx 282 st->print(" ("); 283 orig->dump_bidx(orig, st); 284 st->print(")"); 285 } 286 } 287 288 void Block::dump_pred(const Block_Array *bbs, Block* orig, outputStream* st) const { 289 if (is_connector()) { 290 for (uint i=1; i<num_preds(); i++) { 291 Block *p = ((*bbs)[pred(i)->_idx]); 292 p->dump_pred(bbs, orig, st); 293 } 294 } else { 295 dump_bidx(orig, st); 296 st->print(" "); 297 } 298 } 299 300 void Block::dump_head( const Block_Array *bbs, outputStream* st ) const { 301 // Print the basic block 302 dump_bidx(this, st); 303 st->print(": #\t"); 304 305 // Print the incoming CFG edges and the outgoing CFG edges 306 for( uint i=0; i<_num_succs; i++ ) { 307 non_connector_successor(i)->dump_bidx(_succs[i], st); 308 st->print(" "); 309 } 310 st->print("<- "); 311 if( head()->is_block_start() ) { 312 for (uint i=1; i<num_preds(); i++) { 313 Node *s = pred(i); 314 if (bbs) { 315 Block *p = (*bbs)[s->_idx]; 316 p->dump_pred(bbs, p, st); 317 } else { 318 while (!s->is_block_start()) 319 s = s->in(0); 320 st->print("N%d ", s->_idx ); 321 } 322 } 323 } else 324 st->print("BLOCK HEAD IS JUNK "); 325 326 // Print loop, if any 327 const Block *bhead = this; // Head of self-loop 328 Node *bh = bhead->head(); 329 if( bbs && bh->is_Loop() && !head()->is_Root() ) { 330 LoopNode *loop = bh->as_Loop(); 331 const Block *bx = (*bbs)[loop->in(LoopNode::LoopBackControl)->_idx]; 332 while (bx->is_connector()) { 333 bx = (*bbs)[bx->pred(1)->_idx]; 334 } 335 st->print("\tLoop: B%d-B%d ", bhead->_pre_order, bx->_pre_order); 336 // Dump any loop-specific bits, especially for CountedLoops. 337 loop->dump_spec(st); 338 } else if (has_loop_alignment()) { 339 st->print(" top-of-loop"); 340 } 341 st->print(" Freq: %g",_freq); 342 if( Verbose || WizardMode ) { 343 st->print(" IDom: %d/#%d", _idom ? _idom->_pre_order : 0, _dom_depth); 344 st->print(" RegPressure: %d",_reg_pressure); 345 st->print(" IHRP Index: %d",_ihrp_index); 346 st->print(" FRegPressure: %d",_freg_pressure); 347 st->print(" FHRP Index: %d",_fhrp_index); 348 } 349 st->print_cr(""); 350 } 351 352 void Block::dump() const { dump(NULL); } 353 354 void Block::dump( const Block_Array *bbs ) const { 355 dump_head(bbs); 356 uint cnt = _nodes.size(); 357 for( uint i=0; i<cnt; i++ ) 358 _nodes[i]->dump(); 359 tty->print("\n"); 360 } 361 #endif 362 363 //============================================================================= 364 //------------------------------PhaseCFG--------------------------------------- 365 PhaseCFG::PhaseCFG( Arena *a, RootNode *r, Matcher &m ) : 366 Phase(CFG), 367 _bbs(a), 368 _root(r), 369 _node_latency(NULL) 370 #ifndef PRODUCT 371 , _trace_opto_pipelining(TraceOptoPipelining || C->method_has_option("TraceOptoPipelining")) 372 #endif 373 #ifdef ASSERT 374 , _raw_oops(a) 375 #endif 376 { 377 ResourceMark rm; 378 // I'll need a few machine-specific GotoNodes. Make an Ideal GotoNode, 379 // then Match it into a machine-specific Node. Then clone the machine 380 // Node on demand. 381 Node *x = new (C, 1) GotoNode(NULL); 382 x->init_req(0, x); 383 _goto = m.match_tree(x); 384 assert(_goto != NULL, ""); 385 _goto->set_req(0,_goto); 386 387 // Build the CFG in Reverse Post Order 388 _num_blocks = build_cfg(); 389 _broot = _bbs[_root->_idx]; 390 } 391 392 //------------------------------build_cfg-------------------------------------- 393 // Build a proper looking CFG. Make every block begin with either a StartNode 394 // or a RegionNode. Make every block end with either a Goto, If or Return. 395 // The RootNode both starts and ends it's own block. Do this with a recursive 396 // backwards walk over the control edges. 397 uint PhaseCFG::build_cfg() { 398 Arena *a = Thread::current()->resource_area(); 399 VectorSet visited(a); 400 401 // Allocate stack with enough space to avoid frequent realloc 402 Node_Stack nstack(a, C->unique() >> 1); 403 nstack.push(_root, 0); 404 uint sum = 0; // Counter for blocks 405 406 while (nstack.is_nonempty()) { 407 // node and in's index from stack's top 408 // 'np' is _root (see above) or RegionNode, StartNode: we push on stack 409 // only nodes which point to the start of basic block (see below). 410 Node *np = nstack.node(); 411 // idx > 0, except for the first node (_root) pushed on stack 412 // at the beginning when idx == 0. 413 // We will use the condition (idx == 0) later to end the build. 414 uint idx = nstack.index(); 415 Node *proj = np->in(idx); 416 const Node *x = proj->is_block_proj(); 417 // Does the block end with a proper block-ending Node? One of Return, 418 // If or Goto? (This check should be done for visited nodes also). 419 if (x == NULL) { // Does not end right... 420 Node *g = _goto->clone(); // Force it to end in a Goto 421 g->set_req(0, proj); 422 np->set_req(idx, g); 423 x = proj = g; 424 } 425 if (!visited.test_set(x->_idx)) { // Visit this block once 426 // Skip any control-pinned middle'in stuff 427 Node *p = proj; 428 do { 429 proj = p; // Update pointer to last Control 430 p = p->in(0); // Move control forward 431 } while( !p->is_block_proj() && 432 !p->is_block_start() ); 433 // Make the block begin with one of Region or StartNode. 434 if( !p->is_block_start() ) { 435 RegionNode *r = new (C, 2) RegionNode( 2 ); 436 r->init_req(1, p); // Insert RegionNode in the way 437 proj->set_req(0, r); // Insert RegionNode in the way 438 p = r; 439 } 440 // 'p' now points to the start of this basic block 441 442 // Put self in array of basic blocks 443 Block *bb = new (_bbs._arena) Block(_bbs._arena,p); 444 _bbs.map(p->_idx,bb); 445 _bbs.map(x->_idx,bb); 446 if( x != p ) { // Only for root is x == p 447 bb->_nodes.push((Node*)x); 448 } 449 // Now handle predecessors 450 ++sum; // Count 1 for self block 451 uint cnt = bb->num_preds(); 452 for (int i = (cnt - 1); i > 0; i-- ) { // For all predecessors 453 Node *prevproj = p->in(i); // Get prior input 454 assert( !prevproj->is_Con(), "dead input not removed" ); 455 // Check to see if p->in(i) is a "control-dependent" CFG edge - 456 // i.e., it splits at the source (via an IF or SWITCH) and merges 457 // at the destination (via a many-input Region). 458 // This breaks critical edges. The RegionNode to start the block 459 // will be added when <p,i> is pulled off the node stack 460 if ( cnt > 2 ) { // Merging many things? 461 assert( prevproj== bb->pred(i),""); 462 if(prevproj->is_block_proj() != prevproj) { // Control-dependent edge? 463 // Force a block on the control-dependent edge 464 Node *g = _goto->clone(); // Force it to end in a Goto 465 g->set_req(0,prevproj); 466 p->set_req(i,g); 467 } 468 } 469 nstack.push(p, i); // 'p' is RegionNode or StartNode 470 } 471 } else { // Post-processing visited nodes 472 nstack.pop(); // remove node from stack 473 // Check if it the fist node pushed on stack at the beginning. 474 if (idx == 0) break; // end of the build 475 // Find predecessor basic block 476 Block *pb = _bbs[x->_idx]; 477 // Insert into nodes array, if not already there 478 if( !_bbs.lookup(proj->_idx) ) { 479 assert( x != proj, "" ); 480 // Map basic block of projection 481 _bbs.map(proj->_idx,pb); 482 pb->_nodes.push(proj); 483 } 484 // Insert self as a child of my predecessor block 485 pb->_succs.map(pb->_num_succs++, _bbs[np->_idx]); 486 assert( pb->_nodes[ pb->_nodes.size() - pb->_num_succs ]->is_block_proj(), 487 "too many control users, not a CFG?" ); 488 } 489 } 490 // Return number of basic blocks for all children and self 491 return sum; 492 } 493 494 //------------------------------insert_goto_at--------------------------------- 495 // Inserts a goto & corresponding basic block between 496 // block[block_no] and its succ_no'th successor block 497 void PhaseCFG::insert_goto_at(uint block_no, uint succ_no) { 498 // get block with block_no 499 assert(block_no < _num_blocks, "illegal block number"); 500 Block* in = _blocks[block_no]; 501 // get successor block succ_no 502 assert(succ_no < in->_num_succs, "illegal successor number"); 503 Block* out = in->_succs[succ_no]; 504 // Compute frequency of the new block. Do this before inserting 505 // new block in case succ_prob() needs to infer the probability from 506 // surrounding blocks. 507 float freq = in->_freq * in->succ_prob(succ_no); 508 // get ProjNode corresponding to the succ_no'th successor of the in block 509 ProjNode* proj = in->_nodes[in->_nodes.size() - in->_num_succs + succ_no]->as_Proj(); 510 // create region for basic block 511 RegionNode* region = new (C, 2) RegionNode(2); 512 region->init_req(1, proj); 513 // setup corresponding basic block 514 Block* block = new (_bbs._arena) Block(_bbs._arena, region); 515 _bbs.map(region->_idx, block); 516 C->regalloc()->set_bad(region->_idx); 517 // add a goto node 518 Node* gto = _goto->clone(); // get a new goto node 519 gto->set_req(0, region); 520 // add it to the basic block 521 block->_nodes.push(gto); 522 _bbs.map(gto->_idx, block); 523 C->regalloc()->set_bad(gto->_idx); 524 // hook up successor block 525 block->_succs.map(block->_num_succs++, out); 526 // remap successor's predecessors if necessary 527 for (uint i = 1; i < out->num_preds(); i++) { 528 if (out->pred(i) == proj) out->head()->set_req(i, gto); 529 } 530 // remap predecessor's successor to new block 531 in->_succs.map(succ_no, block); 532 // Set the frequency of the new block 533 block->_freq = freq; 534 // add new basic block to basic block list 535 _blocks.insert(block_no + 1, block); 536 _num_blocks++; 537 } 538 539 //------------------------------no_flip_branch--------------------------------- 540 // Does this block end in a multiway branch that cannot have the default case 541 // flipped for another case? 542 static bool no_flip_branch( Block *b ) { 543 int branch_idx = b->_nodes.size() - b->_num_succs-1; 544 if( branch_idx < 1 ) return false; 545 Node *bra = b->_nodes[branch_idx]; 546 if( bra->is_Catch() ) 547 return true; 548 if( bra->is_Mach() ) { 549 if( bra->is_MachNullCheck() ) 550 return true; 551 int iop = bra->as_Mach()->ideal_Opcode(); 552 if( iop == Op_FastLock || iop == Op_FastUnlock ) 553 return true; 554 } 555 return false; 556 } 557 558 //------------------------------convert_NeverBranch_to_Goto-------------------- 559 // Check for NeverBranch at block end. This needs to become a GOTO to the 560 // true target. NeverBranch are treated as a conditional branch that always 561 // goes the same direction for most of the optimizer and are used to give a 562 // fake exit path to infinite loops. At this late stage they need to turn 563 // into Goto's so that when you enter the infinite loop you indeed hang. 564 void PhaseCFG::convert_NeverBranch_to_Goto(Block *b) { 565 // Find true target 566 int end_idx = b->end_idx(); 567 int idx = b->_nodes[end_idx+1]->as_Proj()->_con; 568 Block *succ = b->_succs[idx]; 569 Node* gto = _goto->clone(); // get a new goto node 570 gto->set_req(0, b->head()); 571 Node *bp = b->_nodes[end_idx]; 572 b->_nodes.map(end_idx,gto); // Slam over NeverBranch 573 _bbs.map(gto->_idx, b); 574 C->regalloc()->set_bad(gto->_idx); 575 b->_nodes.pop(); // Yank projections 576 b->_nodes.pop(); // Yank projections 577 b->_succs.map(0,succ); // Map only successor 578 b->_num_succs = 1; 579 // remap successor's predecessors if necessary 580 uint j; 581 for( j = 1; j < succ->num_preds(); j++) 582 if( succ->pred(j)->in(0) == bp ) 583 succ->head()->set_req(j, gto); 584 // Kill alternate exit path 585 Block *dead = b->_succs[1-idx]; 586 for( j = 1; j < dead->num_preds(); j++) 587 if( dead->pred(j)->in(0) == bp ) 588 break; 589 // Scan through block, yanking dead path from 590 // all regions and phis. 591 dead->head()->del_req(j); 592 for( int k = 1; dead->_nodes[k]->is_Phi(); k++ ) 593 dead->_nodes[k]->del_req(j); 594 } 595 596 //------------------------------move_to_next----------------------------------- 597 // Helper function to move block bx to the slot following b_index. Return 598 // true if the move is successful, otherwise false 599 bool PhaseCFG::move_to_next(Block* bx, uint b_index) { 600 if (bx == NULL) return false; 601 602 // Return false if bx is already scheduled. 603 uint bx_index = bx->_pre_order; 604 if ((bx_index <= b_index) && (_blocks[bx_index] == bx)) { 605 return false; 606 } 607 608 // Find the current index of block bx on the block list 609 bx_index = b_index + 1; 610 while( bx_index < _num_blocks && _blocks[bx_index] != bx ) bx_index++; 611 assert(_blocks[bx_index] == bx, "block not found"); 612 613 // If the previous block conditionally falls into bx, return false, 614 // because moving bx will create an extra jump. 615 for(uint k = 1; k < bx->num_preds(); k++ ) { 616 Block* pred = _bbs[bx->pred(k)->_idx]; 617 if (pred == _blocks[bx_index-1]) { 618 if (pred->_num_succs != 1) { 619 return false; 620 } 621 } 622 } 623 624 // Reinsert bx just past block 'b' 625 _blocks.remove(bx_index); 626 _blocks.insert(b_index + 1, bx); 627 return true; 628 } 629 630 //------------------------------move_to_end------------------------------------ 631 // Move empty and uncommon blocks to the end. 632 void PhaseCFG::move_to_end(Block *b, uint i) { 633 int e = b->is_Empty(); 634 if (e != Block::not_empty) { 635 if (e == Block::empty_with_goto) { 636 // Remove the goto, but leave the block. 637 b->_nodes.pop(); 638 } 639 // Mark this block as a connector block, which will cause it to be 640 // ignored in certain functions such as non_connector_successor(). 641 b->set_connector(); 642 } 643 // Move the empty block to the end, and don't recheck. 644 _blocks.remove(i); 645 _blocks.push(b); 646 } 647 648 //---------------------------set_loop_alignment-------------------------------- 649 // Set loop alignment for every block 650 void PhaseCFG::set_loop_alignment() { 651 uint last = _num_blocks; 652 assert( _blocks[0] == _broot, "" ); 653 654 for (uint i = 1; i < last; i++ ) { 655 Block *b = _blocks[i]; 656 if (b->head()->is_Loop()) { 657 b->set_loop_alignment(b); 658 } 659 } 660 } 661 662 //-----------------------------remove_empty------------------------------------ 663 // Make empty basic blocks to be "connector" blocks, Move uncommon blocks 664 // to the end. 665 void PhaseCFG::remove_empty() { 666 // Move uncommon blocks to the end 667 uint last = _num_blocks; 668 assert( _blocks[0] == _broot, "" ); 669 670 for (uint i = 1; i < last; i++) { 671 Block *b = _blocks[i]; 672 if (b->is_connector()) break; 673 674 // Check for NeverBranch at block end. This needs to become a GOTO to the 675 // true target. NeverBranch are treated as a conditional branch that 676 // always goes the same direction for most of the optimizer and are used 677 // to give a fake exit path to infinite loops. At this late stage they 678 // need to turn into Goto's so that when you enter the infinite loop you 679 // indeed hang. 680 if( b->_nodes[b->end_idx()]->Opcode() == Op_NeverBranch ) 681 convert_NeverBranch_to_Goto(b); 682 683 // Look for uncommon blocks and move to end. 684 if (!C->do_freq_based_layout()) { 685 if( b->is_uncommon(_bbs) ) { 686 move_to_end(b, i); 687 last--; // No longer check for being uncommon! 688 if( no_flip_branch(b) ) { // Fall-thru case must follow? 689 b = _blocks[i]; // Find the fall-thru block 690 move_to_end(b, i); 691 last--; 692 } 693 i--; // backup block counter post-increment 694 } 695 } 696 } 697 698 // Move empty blocks to the end 699 last = _num_blocks; 700 for (uint i = 1; i < last; i++) { 701 Block *b = _blocks[i]; 702 if (b->is_Empty() != Block::not_empty) { 703 move_to_end(b, i); 704 last--; 705 i--; 706 } 707 } // End of for all blocks 708 } 709 710 //-----------------------------fixup_flow-------------------------------------- 711 // Fix up the final control flow for basic blocks. 712 void PhaseCFG::fixup_flow() { 713 // Fixup final control flow for the blocks. Remove jump-to-next 714 // block. If neither arm of a IF follows the conditional branch, we 715 // have to add a second jump after the conditional. We place the 716 // TRUE branch target in succs[0] for both GOTOs and IFs. 717 for (uint i=0; i < _num_blocks; i++) { 718 Block *b = _blocks[i]; 719 b->_pre_order = i; // turn pre-order into block-index 720 721 // Connector blocks need no further processing. 722 if (b->is_connector()) { 723 assert((i+1) == _num_blocks || _blocks[i+1]->is_connector(), 724 "All connector blocks should sink to the end"); 725 continue; 726 } 727 assert(b->is_Empty() != Block::completely_empty, 728 "Empty blocks should be connectors"); 729 730 Block *bnext = (i < _num_blocks-1) ? _blocks[i+1] : NULL; 731 Block *bs0 = b->non_connector_successor(0); 732 733 // Check for multi-way branches where I cannot negate the test to 734 // exchange the true and false targets. 735 if( no_flip_branch( b ) ) { 736 // Find fall through case - if must fall into its target 737 int branch_idx = b->_nodes.size() - b->_num_succs; 738 for (uint j2 = 0; j2 < b->_num_succs; j2++) { 739 const ProjNode* p = b->_nodes[branch_idx + j2]->as_Proj(); 740 if (p->_con == 0) { 741 // successor j2 is fall through case 742 if (b->non_connector_successor(j2) != bnext) { 743 // but it is not the next block => insert a goto 744 insert_goto_at(i, j2); 745 } 746 // Put taken branch in slot 0 747 if( j2 == 0 && b->_num_succs == 2) { 748 // Flip targets in succs map 749 Block *tbs0 = b->_succs[0]; 750 Block *tbs1 = b->_succs[1]; 751 b->_succs.map( 0, tbs1 ); 752 b->_succs.map( 1, tbs0 ); 753 } 754 break; 755 } 756 } 757 // Remove all CatchProjs 758 for (uint j1 = 0; j1 < b->_num_succs; j1++) b->_nodes.pop(); 759 760 } else if (b->_num_succs == 1) { 761 // Block ends in a Goto? 762 if (bnext == bs0) { 763 // We fall into next block; remove the Goto 764 b->_nodes.pop(); 765 } 766 767 } else if( b->_num_succs == 2 ) { // Block ends in a If? 768 // Get opcode of 1st projection (matches _succs[0]) 769 // Note: Since this basic block has 2 exits, the last 2 nodes must 770 // be projections (in any order), the 3rd last node must be 771 // the IfNode (we have excluded other 2-way exits such as 772 // CatchNodes already). 773 MachNode *iff = b->_nodes[b->_nodes.size()-3]->as_Mach(); 774 ProjNode *proj0 = b->_nodes[b->_nodes.size()-2]->as_Proj(); 775 ProjNode *proj1 = b->_nodes[b->_nodes.size()-1]->as_Proj(); 776 777 // Assert that proj0 and succs[0] match up. Similarly for proj1 and succs[1]. 778 assert(proj0->raw_out(0) == b->_succs[0]->head(), "Mismatch successor 0"); 779 assert(proj1->raw_out(0) == b->_succs[1]->head(), "Mismatch successor 1"); 780 781 Block *bs1 = b->non_connector_successor(1); 782 783 // Check for neither successor block following the current 784 // block ending in a conditional. If so, move one of the 785 // successors after the current one, provided that the 786 // successor was previously unscheduled, but moveable 787 // (i.e., all paths to it involve a branch). 788 if( !C->do_freq_based_layout() && bnext != bs0 && bnext != bs1 ) { 789 // Choose the more common successor based on the probability 790 // of the conditional branch. 791 Block *bx = bs0; 792 Block *by = bs1; 793 794 // _prob is the probability of taking the true path. Make 795 // p the probability of taking successor #1. 796 float p = iff->as_MachIf()->_prob; 797 if( proj0->Opcode() == Op_IfTrue ) { 798 p = 1.0 - p; 799 } 800 801 // Prefer successor #1 if p > 0.5 802 if (p > PROB_FAIR) { 803 bx = bs1; 804 by = bs0; 805 } 806 807 // Attempt the more common successor first 808 if (move_to_next(bx, i)) { 809 bnext = bx; 810 } else if (move_to_next(by, i)) { 811 bnext = by; 812 } 813 } 814 815 // Check for conditional branching the wrong way. Negate 816 // conditional, if needed, so it falls into the following block 817 // and branches to the not-following block. 818 819 // Check for the next block being in succs[0]. We are going to branch 820 // to succs[0], so we want the fall-thru case as the next block in 821 // succs[1]. 822 if (bnext == bs0) { 823 // Fall-thru case in succs[0], so flip targets in succs map 824 Block *tbs0 = b->_succs[0]; 825 Block *tbs1 = b->_succs[1]; 826 b->_succs.map( 0, tbs1 ); 827 b->_succs.map( 1, tbs0 ); 828 // Flip projection for each target 829 { ProjNode *tmp = proj0; proj0 = proj1; proj1 = tmp; } 830 831 } else if( bnext != bs1 ) { 832 // Need a double-branch 833 // The existing conditional branch need not change. 834 // Add a unconditional branch to the false target. 835 // Alas, it must appear in its own block and adding a 836 // block this late in the game is complicated. Sigh. 837 insert_goto_at(i, 1); 838 } 839 840 // Make sure we TRUE branch to the target 841 if( proj0->Opcode() == Op_IfFalse ) { 842 iff->negate(); 843 } 844 845 b->_nodes.pop(); // Remove IfFalse & IfTrue projections 846 b->_nodes.pop(); 847 848 } else { 849 // Multi-exit block, e.g. a switch statement 850 // But we don't need to do anything here 851 } 852 } // End of for all blocks 853 } 854 855 856 //------------------------------dump------------------------------------------- 857 #ifndef PRODUCT 858 void PhaseCFG::_dump_cfg( const Node *end, VectorSet &visited ) const { 859 const Node *x = end->is_block_proj(); 860 assert( x, "not a CFG" ); 861 862 // Do not visit this block again 863 if( visited.test_set(x->_idx) ) return; 864 865 // Skip through this block 866 const Node *p = x; 867 do { 868 p = p->in(0); // Move control forward 869 assert( !p->is_block_proj() || p->is_Root(), "not a CFG" ); 870 } while( !p->is_block_start() ); 871 872 // Recursively visit 873 for( uint i=1; i<p->req(); i++ ) 874 _dump_cfg(p->in(i),visited); 875 876 // Dump the block 877 _bbs[p->_idx]->dump(&_bbs); 878 } 879 880 void PhaseCFG::dump( ) const { 881 tty->print("\n--- CFG --- %d BBs\n",_num_blocks); 882 if( _blocks.size() ) { // Did we do basic-block layout? 883 for( uint i=0; i<_num_blocks; i++ ) 884 _blocks[i]->dump(&_bbs); 885 } else { // Else do it with a DFS 886 VectorSet visited(_bbs._arena); 887 _dump_cfg(_root,visited); 888 } 889 } 890 891 void PhaseCFG::dump_headers() { 892 for( uint i = 0; i < _num_blocks; i++ ) { 893 if( _blocks[i] == NULL ) continue; 894 _blocks[i]->dump_head(&_bbs); 895 } 896 } 897 898 void PhaseCFG::verify( ) const { 899 #ifdef ASSERT 900 // Verify sane CFG 901 for( uint i = 0; i < _num_blocks; i++ ) { 902 Block *b = _blocks[i]; 903 uint cnt = b->_nodes.size(); 904 uint j; 905 for( j = 0; j < cnt; j++ ) { 906 Node *n = b->_nodes[j]; 907 assert( _bbs[n->_idx] == b, "" ); 908 if( j >= 1 && n->is_Mach() && 909 n->as_Mach()->ideal_Opcode() == Op_CreateEx ) { 910 assert( j == 1 || b->_nodes[j-1]->is_Phi(), 911 "CreateEx must be first instruction in block" ); 912 } 913 for( uint k = 0; k < n->req(); k++ ) { 914 Node *def = n->in(k); 915 if( def && def != n ) { 916 assert( _bbs[def->_idx] || def->is_Con(), 917 "must have block; constants for debug info ok" ); 918 // Verify that instructions in the block is in correct order. 919 // Uses must follow their definition if they are at the same block. 920 // Mostly done to check that MachSpillCopy nodes are placed correctly 921 // when CreateEx node is moved in build_ifg_physical(). 922 if( _bbs[def->_idx] == b && 923 !(b->head()->is_Loop() && n->is_Phi()) && 924 // See (+++) comment in reg_split.cpp 925 !(n->jvms() != NULL && n->jvms()->is_monitor_use(k)) ) { 926 bool is_loop = false; 927 if (n->is_Phi()) { 928 for( uint l = 1; l < def->req(); l++ ) { 929 if (n == def->in(l)) { 930 is_loop = true; 931 break; // Some kind of loop 932 } 933 } 934 } 935 assert( is_loop || b->find_node(def) < j, "uses must follow definitions" ); 936 } 937 if( def->is_SafePointScalarObject() ) { 938 assert(_bbs[def->_idx] == b, "SafePointScalarObject Node should be at the same block as its SafePoint node"); 939 assert(_bbs[def->_idx] == _bbs[def->in(0)->_idx], "SafePointScalarObject Node should be at the same block as its control edge"); 940 } 941 } 942 } 943 } 944 945 j = b->end_idx(); 946 Node *bp = (Node*)b->_nodes[b->_nodes.size()-1]->is_block_proj(); 947 assert( bp, "last instruction must be a block proj" ); 948 assert( bp == b->_nodes[j], "wrong number of successors for this block" ); 949 if( bp->is_Catch() ) { 950 while( b->_nodes[--j]->is_MachProj() ) ; 951 assert( b->_nodes[j]->is_MachCall(), "CatchProj must follow call" ); 952 } 953 else if( bp->is_Mach() && bp->as_Mach()->ideal_Opcode() == Op_If ) { 954 assert( b->_num_succs == 2, "Conditional branch must have two targets"); 955 } 956 } 957 #endif 958 } 959 #endif 960 961 //============================================================================= 962 //------------------------------UnionFind-------------------------------------- 963 UnionFind::UnionFind( uint max ) : _cnt(max), _max(max), _indices(NEW_RESOURCE_ARRAY(uint,max)) { 964 Copy::zero_to_bytes( _indices, sizeof(uint)*max ); 965 } 966 967 void UnionFind::extend( uint from_idx, uint to_idx ) { 968 _nesting.check(); 969 if( from_idx >= _max ) { 970 uint size = 16; 971 while( size <= from_idx ) size <<=1; 972 _indices = REALLOC_RESOURCE_ARRAY( uint, _indices, _max, size ); 973 _max = size; 974 } 975 while( _cnt <= from_idx ) _indices[_cnt++] = 0; 976 _indices[from_idx] = to_idx; 977 } 978 979 void UnionFind::reset( uint max ) { 980 assert( max <= max_uint, "Must fit within uint" ); 981 // Force the Union-Find mapping to be at least this large 982 extend(max,0); 983 // Initialize to be the ID mapping. 984 for( uint i=0; i<max; i++ ) map(i,i); 985 } 986 987 //------------------------------Find_compress---------------------------------- 988 // Straight out of Tarjan's union-find algorithm 989 uint UnionFind::Find_compress( uint idx ) { 990 uint cur = idx; 991 uint next = lookup(cur); 992 while( next != cur ) { // Scan chain of equivalences 993 assert( next < cur, "always union smaller" ); 994 cur = next; // until find a fixed-point 995 next = lookup(cur); 996 } 997 // Core of union-find algorithm: update chain of 998 // equivalences to be equal to the root. 999 while( idx != next ) { 1000 uint tmp = lookup(idx); 1001 map(idx, next); 1002 idx = tmp; 1003 } 1004 return idx; 1005 } 1006 1007 //------------------------------Find_const------------------------------------- 1008 // Like Find above, but no path compress, so bad asymptotic behavior 1009 uint UnionFind::Find_const( uint idx ) const { 1010 if( idx == 0 ) return idx; // Ignore the zero idx 1011 // Off the end? This can happen during debugging dumps 1012 // when data structures have not finished being updated. 1013 if( idx >= _max ) return idx; 1014 uint next = lookup(idx); 1015 while( next != idx ) { // Scan chain of equivalences 1016 idx = next; // until find a fixed-point 1017 next = lookup(idx); 1018 } 1019 return next; 1020 } 1021 1022 //------------------------------Union------------------------------------------ 1023 // union 2 sets together. 1024 void UnionFind::Union( uint idx1, uint idx2 ) { 1025 uint src = Find(idx1); 1026 uint dst = Find(idx2); 1027 assert( src, "" ); 1028 assert( dst, "" ); 1029 assert( src < _max, "oob" ); 1030 assert( dst < _max, "oob" ); 1031 assert( src < dst, "always union smaller" ); 1032 map(dst,src); 1033 } 1034 1035 #ifndef PRODUCT 1036 static void edge_dump(GrowableArray<CFGEdge *> *edges) { 1037 tty->print_cr("---- Edges ----"); 1038 for (int i = 0; i < edges->length(); i++) { 1039 CFGEdge *e = edges->at(i); 1040 if (e != NULL) { 1041 edges->at(i)->dump(); 1042 } 1043 } 1044 } 1045 1046 static void trace_dump(Trace *traces[], int count) { 1047 tty->print_cr("---- Traces ----"); 1048 for (int i = 0; i < count; i++) { 1049 Trace *tr = traces[i]; 1050 if (tr != NULL) { 1051 tr->dump(); 1052 } 1053 } 1054 } 1055 1056 void Trace::dump( ) const { 1057 tty->print_cr("Trace (freq %f)", first_block()->_freq); 1058 for (Block *b = first_block(); b != NULL; b = next(b)) { 1059 tty->print(" B%d", b->_pre_order); 1060 if (b->head()->is_Loop()) { 1061 tty->print(" (L%d)", b->compute_loop_alignment()); 1062 } 1063 if (b->has_loop_alignment()) { 1064 tty->print(" (T%d)", b->code_alignment()); 1065 } 1066 } 1067 tty->cr(); 1068 } 1069 1070 void CFGEdge::dump( ) const { 1071 tty->print(" B%d --> B%d Freq: %f out:%3d%% in:%3d%% State: ", 1072 from()->_pre_order, to()->_pre_order, freq(), _from_pct, _to_pct); 1073 switch(state()) { 1074 case connected: 1075 tty->print("connected"); 1076 break; 1077 case open: 1078 tty->print("open"); 1079 break; 1080 case interior: 1081 tty->print("interior"); 1082 break; 1083 } 1084 if (infrequent()) { 1085 tty->print(" infrequent"); 1086 } 1087 tty->cr(); 1088 } 1089 #endif 1090 1091 //============================================================================= 1092 1093 //------------------------------edge_order------------------------------------- 1094 // Comparison function for edges 1095 static int edge_order(CFGEdge **e0, CFGEdge **e1) { 1096 float freq0 = (*e0)->freq(); 1097 float freq1 = (*e1)->freq(); 1098 if (freq0 != freq1) { 1099 return freq0 > freq1 ? -1 : 1; 1100 } 1101 1102 int dist0 = (*e0)->to()->_rpo - (*e0)->from()->_rpo; 1103 int dist1 = (*e1)->to()->_rpo - (*e1)->from()->_rpo; 1104 1105 return dist1 - dist0; 1106 } 1107 1108 //------------------------------trace_frequency_order-------------------------- 1109 // Comparison function for edges 1110 static int trace_frequency_order(const void *p0, const void *p1) { 1111 Trace *tr0 = *(Trace **) p0; 1112 Trace *tr1 = *(Trace **) p1; 1113 Block *b0 = tr0->first_block(); 1114 Block *b1 = tr1->first_block(); 1115 1116 // The trace of connector blocks goes at the end; 1117 // we only expect one such trace 1118 if (b0->is_connector() != b1->is_connector()) { 1119 return b1->is_connector() ? -1 : 1; 1120 } 1121 1122 // Pull more frequently executed blocks to the beginning 1123 float freq0 = b0->_freq; 1124 float freq1 = b1->_freq; 1125 if (freq0 != freq1) { 1126 return freq0 > freq1 ? -1 : 1; 1127 } 1128 1129 int diff = tr0->first_block()->_rpo - tr1->first_block()->_rpo; 1130 1131 return diff; 1132 } 1133 1134 //------------------------------find_edges------------------------------------- 1135 // Find edges of interest, i.e, those which can fall through. Presumes that 1136 // edges which don't fall through are of low frequency and can be generally 1137 // ignored. Initialize the list of traces. 1138 void PhaseBlockLayout::find_edges() 1139 { 1140 // Walk the blocks, creating edges and Traces 1141 uint i; 1142 Trace *tr = NULL; 1143 for (i = 0; i < _cfg._num_blocks; i++) { 1144 Block *b = _cfg._blocks[i]; 1145 tr = new Trace(b, next, prev); 1146 traces[tr->id()] = tr; 1147 1148 // All connector blocks should be at the end of the list 1149 if (b->is_connector()) break; 1150 1151 // If this block and the next one have a one-to-one successor 1152 // predecessor relationship, simply append the next block 1153 int nfallthru = b->num_fall_throughs(); 1154 while (nfallthru == 1 && 1155 b->succ_fall_through(0)) { 1156 Block *n = b->_succs[0]; 1157 1158 // Skip over single-entry connector blocks, we don't want to 1159 // add them to the trace. 1160 while (n->is_connector() && n->num_preds() == 1) { 1161 n = n->_succs[0]; 1162 } 1163 1164 // We see a merge point, so stop search for the next block 1165 if (n->num_preds() != 1) break; 1166 1167 i++; 1168 assert(n = _cfg._blocks[i], "expecting next block"); 1169 tr->append(n); 1170 uf->map(n->_pre_order, tr->id()); 1171 traces[n->_pre_order] = NULL; 1172 nfallthru = b->num_fall_throughs(); 1173 b = n; 1174 } 1175 1176 if (nfallthru > 0) { 1177 // Create a CFGEdge for each outgoing 1178 // edge that could be a fall-through. 1179 for (uint j = 0; j < b->_num_succs; j++ ) { 1180 if (b->succ_fall_through(j)) { 1181 Block *target = b->non_connector_successor(j); 1182 float freq = b->_freq * b->succ_prob(j); 1183 int from_pct = (int) ((100 * freq) / b->_freq); 1184 int to_pct = (int) ((100 * freq) / target->_freq); 1185 edges->append(new CFGEdge(b, target, freq, from_pct, to_pct)); 1186 } 1187 } 1188 } 1189 } 1190 1191 // Group connector blocks into one trace 1192 for (i++; i < _cfg._num_blocks; i++) { 1193 Block *b = _cfg._blocks[i]; 1194 assert(b->is_connector(), "connector blocks at the end"); 1195 tr->append(b); 1196 uf->map(b->_pre_order, tr->id()); 1197 traces[b->_pre_order] = NULL; 1198 } 1199 } 1200 1201 //------------------------------union_traces---------------------------------- 1202 // Union two traces together in uf, and null out the trace in the list 1203 void PhaseBlockLayout::union_traces(Trace* updated_trace, Trace* old_trace) 1204 { 1205 uint old_id = old_trace->id(); 1206 uint updated_id = updated_trace->id(); 1207 1208 uint lo_id = updated_id; 1209 uint hi_id = old_id; 1210 1211 // If from is greater than to, swap values to meet 1212 // UnionFind guarantee. 1213 if (updated_id > old_id) { 1214 lo_id = old_id; 1215 hi_id = updated_id; 1216 1217 // Fix up the trace ids 1218 traces[lo_id] = traces[updated_id]; 1219 updated_trace->set_id(lo_id); 1220 } 1221 1222 // Union the lower with the higher and remove the pointer 1223 // to the higher. 1224 uf->Union(lo_id, hi_id); 1225 traces[hi_id] = NULL; 1226 } 1227 1228 //------------------------------grow_traces------------------------------------- 1229 // Append traces together via the most frequently executed edges 1230 void PhaseBlockLayout::grow_traces() 1231 { 1232 // Order the edges, and drive the growth of Traces via the most 1233 // frequently executed edges. 1234 edges->sort(edge_order); 1235 for (int i = 0; i < edges->length(); i++) { 1236 CFGEdge *e = edges->at(i); 1237 1238 if (e->state() != CFGEdge::open) continue; 1239 1240 Block *src_block = e->from(); 1241 Block *targ_block = e->to(); 1242 1243 // Don't grow traces along backedges? 1244 if (!BlockLayoutRotateLoops) { 1245 if (targ_block->_rpo <= src_block->_rpo) { 1246 targ_block->set_loop_alignment(targ_block); 1247 continue; 1248 } 1249 } 1250 1251 Trace *src_trace = trace(src_block); 1252 Trace *targ_trace = trace(targ_block); 1253 1254 // If the edge in question can join two traces at their ends, 1255 // append one trace to the other. 1256 if (src_trace->last_block() == src_block) { 1257 if (src_trace == targ_trace) { 1258 e->set_state(CFGEdge::interior); 1259 if (targ_trace->backedge(e)) { 1260 // Reset i to catch any newly eligible edge 1261 // (Or we could remember the first "open" edge, and reset there) 1262 i = 0; 1263 } 1264 } else if (targ_trace->first_block() == targ_block) { 1265 e->set_state(CFGEdge::connected); 1266 src_trace->append(targ_trace); 1267 union_traces(src_trace, targ_trace); 1268 } 1269 } 1270 } 1271 } 1272 1273 //------------------------------merge_traces----------------------------------- 1274 // Embed one trace into another, if the fork or join points are sufficiently 1275 // balanced. 1276 void PhaseBlockLayout::merge_traces(bool fall_thru_only) 1277 { 1278 // Walk the edge list a another time, looking at unprocessed edges. 1279 // Fold in diamonds 1280 for (int i = 0; i < edges->length(); i++) { 1281 CFGEdge *e = edges->at(i); 1282 1283 if (e->state() != CFGEdge::open) continue; 1284 if (fall_thru_only) { 1285 if (e->infrequent()) continue; 1286 } 1287 1288 Block *src_block = e->from(); 1289 Trace *src_trace = trace(src_block); 1290 bool src_at_tail = src_trace->last_block() == src_block; 1291 1292 Block *targ_block = e->to(); 1293 Trace *targ_trace = trace(targ_block); 1294 bool targ_at_start = targ_trace->first_block() == targ_block; 1295 1296 if (src_trace == targ_trace) { 1297 // This may be a loop, but we can't do much about it. 1298 e->set_state(CFGEdge::interior); 1299 continue; 1300 } 1301 1302 if (fall_thru_only) { 1303 // If the edge links the middle of two traces, we can't do anything. 1304 // Mark the edge and continue. 1305 if (!src_at_tail & !targ_at_start) { 1306 continue; 1307 } 1308 1309 // Don't grow traces along backedges? 1310 if (!BlockLayoutRotateLoops && (targ_block->_rpo <= src_block->_rpo)) { 1311 continue; 1312 } 1313 1314 // If both ends of the edge are available, why didn't we handle it earlier? 1315 assert(src_at_tail ^ targ_at_start, "Should have caught this edge earlier."); 1316 1317 if (targ_at_start) { 1318 // Insert the "targ" trace in the "src" trace if the insertion point 1319 // is a two way branch. 1320 // Better profitability check possible, but may not be worth it. 1321 // Someday, see if the this "fork" has an associated "join"; 1322 // then make a policy on merging this trace at the fork or join. 1323 // For example, other things being equal, it may be better to place this 1324 // trace at the join point if the "src" trace ends in a two-way, but 1325 // the insertion point is one-way. 1326 assert(src_block->num_fall_throughs() == 2, "unexpected diamond"); 1327 e->set_state(CFGEdge::connected); 1328 src_trace->insert_after(src_block, targ_trace); 1329 union_traces(src_trace, targ_trace); 1330 } else if (src_at_tail) { 1331 if (src_trace != trace(_cfg._broot)) { 1332 e->set_state(CFGEdge::connected); 1333 targ_trace->insert_before(targ_block, src_trace); 1334 union_traces(targ_trace, src_trace); 1335 } 1336 } 1337 } else if (e->state() == CFGEdge::open) { 1338 // Append traces, even without a fall-thru connection. 1339 // But leave root entry at the beginning of the block list. 1340 if (targ_trace != trace(_cfg._broot)) { 1341 e->set_state(CFGEdge::connected); 1342 src_trace->append(targ_trace); 1343 union_traces(src_trace, targ_trace); 1344 } 1345 } 1346 } 1347 } 1348 1349 //----------------------------reorder_traces----------------------------------- 1350 // Order the sequence of the traces in some desirable way, and fixup the 1351 // jumps at the end of each block. 1352 void PhaseBlockLayout::reorder_traces(int count) 1353 { 1354 ResourceArea *area = Thread::current()->resource_area(); 1355 Trace ** new_traces = NEW_ARENA_ARRAY(area, Trace *, count); 1356 Block_List worklist; 1357 int new_count = 0; 1358 1359 // Compact the traces. 1360 for (int i = 0; i < count; i++) { 1361 Trace *tr = traces[i]; 1362 if (tr != NULL) { 1363 new_traces[new_count++] = tr; 1364 } 1365 } 1366 1367 // The entry block should be first on the new trace list. 1368 Trace *tr = trace(_cfg._broot); 1369 assert(tr == new_traces[0], "entry trace misplaced"); 1370 1371 // Sort the new trace list by frequency 1372 qsort(new_traces + 1, new_count - 1, sizeof(new_traces[0]), trace_frequency_order); 1373 1374 // Patch up the successor blocks 1375 _cfg._blocks.reset(); 1376 _cfg._num_blocks = 0; 1377 for (int i = 0; i < new_count; i++) { 1378 Trace *tr = new_traces[i]; 1379 if (tr != NULL) { 1380 tr->fixup_blocks(_cfg); 1381 } 1382 } 1383 } 1384 1385 //------------------------------PhaseBlockLayout------------------------------- 1386 // Order basic blocks based on frequency 1387 PhaseBlockLayout::PhaseBlockLayout(PhaseCFG &cfg) : 1388 Phase(BlockLayout), 1389 _cfg(cfg) 1390 { 1391 ResourceMark rm; 1392 ResourceArea *area = Thread::current()->resource_area(); 1393 1394 // List of traces 1395 int size = _cfg._num_blocks + 1; 1396 traces = NEW_ARENA_ARRAY(area, Trace *, size); 1397 memset(traces, 0, size*sizeof(Trace*)); 1398 next = NEW_ARENA_ARRAY(area, Block *, size); 1399 memset(next, 0, size*sizeof(Block *)); 1400 prev = NEW_ARENA_ARRAY(area, Block *, size); 1401 memset(prev , 0, size*sizeof(Block *)); 1402 1403 // List of edges 1404 edges = new GrowableArray<CFGEdge*>; 1405 1406 // Mapping block index --> block_trace 1407 uf = new UnionFind(size); 1408 uf->reset(size); 1409 1410 // Find edges and create traces. 1411 find_edges(); 1412 1413 // Grow traces at their ends via most frequent edges. 1414 grow_traces(); 1415 1416 // Merge one trace into another, but only at fall-through points. 1417 // This may make diamonds and other related shapes in a trace. 1418 merge_traces(true); 1419 1420 // Run merge again, allowing two traces to be catenated, even if 1421 // one does not fall through into the other. This appends loosely 1422 // related traces to be near each other. 1423 merge_traces(false); 1424 1425 // Re-order all the remaining traces by frequency 1426 reorder_traces(size); 1427 1428 assert(_cfg._num_blocks >= (uint) (size - 1), "number of blocks can not shrink"); 1429 } 1430 1431 1432 //------------------------------backedge--------------------------------------- 1433 // Edge e completes a loop in a trace. If the target block is head of the 1434 // loop, rotate the loop block so that the loop ends in a conditional branch. 1435 bool Trace::backedge(CFGEdge *e) { 1436 bool loop_rotated = false; 1437 Block *src_block = e->from(); 1438 Block *targ_block = e->to(); 1439 1440 assert(last_block() == src_block, "loop discovery at back branch"); 1441 if (first_block() == targ_block) { 1442 if (BlockLayoutRotateLoops && last_block()->num_fall_throughs() < 2) { 1443 // Find the last block in the trace that has a conditional 1444 // branch. 1445 Block *b; 1446 for (b = last_block(); b != NULL; b = prev(b)) { 1447 if (b->num_fall_throughs() == 2) { 1448 break; 1449 } 1450 } 1451 1452 if (b != last_block() && b != NULL) { 1453 loop_rotated = true; 1454 1455 // Rotate the loop by doing two-part linked-list surgery. 1456 append(first_block()); 1457 break_loop_after(b); 1458 } 1459 } 1460 1461 // Backbranch to the top of a trace 1462 // Scroll forward through the trace from the targ_block. If we find 1463 // a loop head before another loop top, use the the loop head alignment. 1464 for (Block *b = targ_block; b != NULL; b = next(b)) { 1465 if (b->has_loop_alignment()) { 1466 break; 1467 } 1468 if (b->head()->is_Loop()) { 1469 targ_block = b; 1470 break; 1471 } 1472 } 1473 1474 first_block()->set_loop_alignment(targ_block); 1475 1476 } else { 1477 // Backbranch into the middle of a trace 1478 targ_block->set_loop_alignment(targ_block); 1479 } 1480 1481 return loop_rotated; 1482 } 1483 1484 //------------------------------fixup_blocks----------------------------------- 1485 // push blocks onto the CFG list 1486 // ensure that blocks have the correct two-way branch sense 1487 void Trace::fixup_blocks(PhaseCFG &cfg) { 1488 Block *last = last_block(); 1489 for (Block *b = first_block(); b != NULL; b = next(b)) { 1490 cfg._blocks.push(b); 1491 cfg._num_blocks++; 1492 if (!b->is_connector()) { 1493 int nfallthru = b->num_fall_throughs(); 1494 if (b != last) { 1495 if (nfallthru == 2) { 1496 // Ensure that the sense of the branch is correct 1497 Block *bnext = next(b); 1498 Block *bs0 = b->non_connector_successor(0); 1499 1500 MachNode *iff = b->_nodes[b->_nodes.size()-3]->as_Mach(); 1501 ProjNode *proj0 = b->_nodes[b->_nodes.size()-2]->as_Proj(); 1502 ProjNode *proj1 = b->_nodes[b->_nodes.size()-1]->as_Proj(); 1503 1504 if (bnext == bs0) { 1505 // Fall-thru case in succs[0], should be in succs[1] 1506 1507 // Flip targets in _succs map 1508 Block *tbs0 = b->_succs[0]; 1509 Block *tbs1 = b->_succs[1]; 1510 b->_succs.map( 0, tbs1 ); 1511 b->_succs.map( 1, tbs0 ); 1512 1513 // Flip projections to match targets 1514 b->_nodes.map(b->_nodes.size()-2, proj1); 1515 b->_nodes.map(b->_nodes.size()-1, proj0); 1516 } 1517 } 1518 } 1519 } 1520 } 1521 }