1 /* 2 * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "libadt/vectset.hpp" 27 #include "memory/allocation.inline.hpp" 28 #include "opto/block.hpp" 29 #include "opto/c2compiler.hpp" 30 #include "opto/callnode.hpp" 31 #include "opto/cfgnode.hpp" 32 #include "opto/machnode.hpp" 33 #include "opto/opcodes.hpp" 34 #include "opto/phaseX.hpp" 35 #include "opto/rootnode.hpp" 36 #include "opto/runtime.hpp" 37 #include "runtime/deoptimization.hpp" 38 #if defined AD_MD_HPP 39 # include AD_MD_HPP 40 #elif defined TARGET_ARCH_MODEL_x86_32 41 # include "adfiles/ad_x86_32.hpp" 42 #elif defined TARGET_ARCH_MODEL_x86_64 43 # include "adfiles/ad_x86_64.hpp" 44 #elif defined TARGET_ARCH_MODEL_sparc 45 # include "adfiles/ad_sparc.hpp" 46 #elif defined TARGET_ARCH_MODEL_zero 47 # include "adfiles/ad_zero.hpp" 48 #elif defined TARGET_ARCH_MODEL_ppc_64 49 # include "adfiles/ad_ppc_64.hpp" 50 #endif 51 52 53 // Portions of code courtesy of Clifford Click 54 55 // Optimization - Graph Style 56 57 // To avoid float value underflow 58 #define MIN_BLOCK_FREQUENCY 1.e-35f 59 60 //----------------------------schedule_node_into_block------------------------- 61 // Insert node n into block b. Look for projections of n and make sure they 62 // are in b also. 63 void PhaseCFG::schedule_node_into_block( Node *n, Block *b ) { 64 // Set basic block of n, Add n to b, 65 map_node_to_block(n, b); 66 b->add_inst(n); 67 68 // After Matching, nearly any old Node may have projections trailing it. 69 // These are usually machine-dependent flags. In any case, they might 70 // float to another block below this one. Move them up. 71 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 72 Node* use = n->fast_out(i); 73 if (use->is_Proj()) { 74 Block* buse = get_block_for_node(use); 75 if (buse != b) { // In wrong block? 76 if (buse != NULL) { 77 buse->find_remove(use); // Remove from wrong block 78 } 79 map_node_to_block(use, b); 80 b->add_inst(use); 81 } 82 } 83 } 84 } 85 86 //----------------------------replace_block_proj_ctrl------------------------- 87 // Nodes that have is_block_proj() nodes as their control need to use 88 // the appropriate Region for their actual block as their control since 89 // the projection will be in a predecessor block. 90 void PhaseCFG::replace_block_proj_ctrl( Node *n ) { 91 const Node *in0 = n->in(0); 92 assert(in0 != NULL, "Only control-dependent"); 93 const Node *p = in0->is_block_proj(); 94 if (p != NULL && p != n) { // Control from a block projection? 95 assert(!n->pinned() || n->is_MachConstantBase(), "only pinned MachConstantBase node is expected here"); 96 // Find trailing Region 97 Block *pb = get_block_for_node(in0); // Block-projection already has basic block 98 uint j = 0; 99 if (pb->_num_succs != 1) { // More then 1 successor? 100 // Search for successor 101 uint max = pb->number_of_nodes(); 102 assert( max > 1, "" ); 103 uint start = max - pb->_num_succs; 104 // Find which output path belongs to projection 105 for (j = start; j < max; j++) { 106 if( pb->get_node(j) == in0 ) 107 break; 108 } 109 assert( j < max, "must find" ); 110 // Change control to match head of successor basic block 111 j -= start; 112 } 113 n->set_req(0, pb->_succs[j]->head()); 114 } 115 } 116 117 118 //------------------------------schedule_pinned_nodes-------------------------- 119 // Set the basic block for Nodes pinned into blocks 120 void PhaseCFG::schedule_pinned_nodes(VectorSet &visited) { 121 // Allocate node stack of size C->unique()+8 to avoid frequent realloc 122 GrowableArray <Node *> spstack(C->unique() + 8); 123 spstack.push(_root); 124 while (spstack.is_nonempty()) { 125 Node* node = spstack.pop(); 126 if (!visited.test_set(node->_idx)) { // Test node and flag it as visited 127 if (node->pinned() && !has_block(node)) { // Pinned? Nail it down! 128 assert(node->in(0), "pinned Node must have Control"); 129 // Before setting block replace block_proj control edge 130 replace_block_proj_ctrl(node); 131 Node* input = node->in(0); 132 while (!input->is_block_start()) { 133 input = input->in(0); 134 } 135 Block* block = get_block_for_node(input); // Basic block of controlling input 136 schedule_node_into_block(node, block); 137 } 138 139 // process all inputs that are non NULL 140 for (int i = node->req() - 1; i >= 0; --i) { 141 if (node->in(i) != NULL) { 142 spstack.push(node->in(i)); 143 } 144 } 145 } 146 } 147 } 148 149 #ifdef ASSERT 150 // Assert that new input b2 is dominated by all previous inputs. 151 // Check this by by seeing that it is dominated by b1, the deepest 152 // input observed until b2. 153 static void assert_dom(Block* b1, Block* b2, Node* n, const PhaseCFG* cfg) { 154 if (b1 == NULL) return; 155 assert(b1->_dom_depth < b2->_dom_depth, "sanity"); 156 Block* tmp = b2; 157 while (tmp != b1 && tmp != NULL) { 158 tmp = tmp->_idom; 159 } 160 if (tmp != b1) { 161 // Detected an unschedulable graph. Print some nice stuff and die. 162 tty->print_cr("!!! Unschedulable graph !!!"); 163 for (uint j=0; j<n->len(); j++) { // For all inputs 164 Node* inn = n->in(j); // Get input 165 if (inn == NULL) continue; // Ignore NULL, missing inputs 166 Block* inb = cfg->get_block_for_node(inn); 167 tty->print("B%d idom=B%d depth=%2d ",inb->_pre_order, 168 inb->_idom ? inb->_idom->_pre_order : 0, inb->_dom_depth); 169 inn->dump(); 170 } 171 tty->print("Failing node: "); 172 n->dump(); 173 assert(false, "unscheduable graph"); 174 } 175 } 176 #endif 177 178 static Block* find_deepest_input(Node* n, const PhaseCFG* cfg) { 179 // Find the last input dominated by all other inputs. 180 Block* deepb = NULL; // Deepest block so far 181 int deepb_dom_depth = 0; 182 for (uint k = 0; k < n->len(); k++) { // For all inputs 183 Node* inn = n->in(k); // Get input 184 if (inn == NULL) continue; // Ignore NULL, missing inputs 185 Block* inb = cfg->get_block_for_node(inn); 186 assert(inb != NULL, "must already have scheduled this input"); 187 if (deepb_dom_depth < (int) inb->_dom_depth) { 188 // The new inb must be dominated by the previous deepb. 189 // The various inputs must be linearly ordered in the dom 190 // tree, or else there will not be a unique deepest block. 191 DEBUG_ONLY(assert_dom(deepb, inb, n, cfg)); 192 deepb = inb; // Save deepest block 193 deepb_dom_depth = deepb->_dom_depth; 194 } 195 } 196 assert(deepb != NULL, "must be at least one input to n"); 197 return deepb; 198 } 199 200 201 //------------------------------schedule_early--------------------------------- 202 // Find the earliest Block any instruction can be placed in. Some instructions 203 // are pinned into Blocks. Unpinned instructions can appear in last block in 204 // which all their inputs occur. 205 bool PhaseCFG::schedule_early(VectorSet &visited, Node_List &roots) { 206 // Allocate stack with enough space to avoid frequent realloc 207 Node_Stack nstack(roots.Size() + 8); 208 // _root will be processed among C->top() inputs 209 roots.push(C->top()); 210 visited.set(C->top()->_idx); 211 212 while (roots.size() != 0) { 213 // Use local variables nstack_top_n & nstack_top_i to cache values 214 // on stack's top. 215 Node* parent_node = roots.pop(); 216 uint input_index = 0; 217 218 while (true) { 219 if (input_index == 0) { 220 // Fixup some control. Constants without control get attached 221 // to root and nodes that use is_block_proj() nodes should be attached 222 // to the region that starts their block. 223 const Node* control_input = parent_node->in(0); 224 if (control_input != NULL) { 225 replace_block_proj_ctrl(parent_node); 226 } else { 227 // Is a constant with NO inputs? 228 if (parent_node->req() == 1) { 229 parent_node->set_req(0, _root); 230 } 231 } 232 } 233 234 // First, visit all inputs and force them to get a block. If an 235 // input is already in a block we quit following inputs (to avoid 236 // cycles). Instead we put that Node on a worklist to be handled 237 // later (since IT'S inputs may not have a block yet). 238 239 // Assume all n's inputs will be processed 240 bool done = true; 241 242 while (input_index < parent_node->len()) { 243 Node* in = parent_node->in(input_index++); 244 if (in == NULL) { 245 continue; 246 } 247 248 int is_visited = visited.test_set(in->_idx); 249 if (!has_block(in)) { 250 if (is_visited) { 251 return false; 252 } 253 // Save parent node and next input's index. 254 nstack.push(parent_node, input_index); 255 // Process current input now. 256 parent_node = in; 257 input_index = 0; 258 // Not all n's inputs processed. 259 done = false; 260 break; 261 } else if (!is_visited) { 262 // Visit this guy later, using worklist 263 roots.push(in); 264 } 265 } 266 267 if (done) { 268 // All of n's inputs have been processed, complete post-processing. 269 270 // Some instructions are pinned into a block. These include Region, 271 // Phi, Start, Return, and other control-dependent instructions and 272 // any projections which depend on them. 273 if (!parent_node->pinned()) { 274 // Set earliest legal block. 275 Block* earliest_block = find_deepest_input(parent_node, this); 276 map_node_to_block(parent_node, earliest_block); 277 } else { 278 assert(get_block_for_node(parent_node) == get_block_for_node(parent_node->in(0)), "Pinned Node should be at the same block as its control edge"); 279 } 280 281 if (nstack.is_empty()) { 282 // Finished all nodes on stack. 283 // Process next node on the worklist 'roots'. 284 break; 285 } 286 // Get saved parent node and next input's index. 287 parent_node = nstack.node(); 288 input_index = nstack.index(); 289 nstack.pop(); 290 } 291 } 292 } 293 return true; 294 } 295 296 //------------------------------dom_lca---------------------------------------- 297 // Find least common ancestor in dominator tree 298 // LCA is a current notion of LCA, to be raised above 'this'. 299 // As a convenient boundary condition, return 'this' if LCA is NULL. 300 // Find the LCA of those two nodes. 301 Block* Block::dom_lca(Block* LCA) { 302 if (LCA == NULL || LCA == this) return this; 303 304 Block* anc = this; 305 while (anc->_dom_depth > LCA->_dom_depth) 306 anc = anc->_idom; // Walk up till anc is as high as LCA 307 308 while (LCA->_dom_depth > anc->_dom_depth) 309 LCA = LCA->_idom; // Walk up till LCA is as high as anc 310 311 while (LCA != anc) { // Walk both up till they are the same 312 LCA = LCA->_idom; 313 anc = anc->_idom; 314 } 315 316 return LCA; 317 } 318 319 //--------------------------raise_LCA_above_use-------------------------------- 320 // We are placing a definition, and have been given a def->use edge. 321 // The definition must dominate the use, so move the LCA upward in the 322 // dominator tree to dominate the use. If the use is a phi, adjust 323 // the LCA only with the phi input paths which actually use this def. 324 static Block* raise_LCA_above_use(Block* LCA, Node* use, Node* def, const PhaseCFG* cfg) { 325 Block* buse = cfg->get_block_for_node(use); 326 if (buse == NULL) return LCA; // Unused killing Projs have no use block 327 if (!use->is_Phi()) return buse->dom_lca(LCA); 328 uint pmax = use->req(); // Number of Phi inputs 329 // Why does not this loop just break after finding the matching input to 330 // the Phi? Well...it's like this. I do not have true def-use/use-def 331 // chains. Means I cannot distinguish, from the def-use direction, which 332 // of many use-defs lead from the same use to the same def. That is, this 333 // Phi might have several uses of the same def. Each use appears in a 334 // different predecessor block. But when I enter here, I cannot distinguish 335 // which use-def edge I should find the predecessor block for. So I find 336 // them all. Means I do a little extra work if a Phi uses the same value 337 // more than once. 338 for (uint j=1; j<pmax; j++) { // For all inputs 339 if (use->in(j) == def) { // Found matching input? 340 Block* pred = cfg->get_block_for_node(buse->pred(j)); 341 LCA = pred->dom_lca(LCA); 342 } 343 } 344 return LCA; 345 } 346 347 //----------------------------raise_LCA_above_marks---------------------------- 348 // Return a new LCA that dominates LCA and any of its marked predecessors. 349 // Search all my parents up to 'early' (exclusive), looking for predecessors 350 // which are marked with the given index. Return the LCA (in the dom tree) 351 // of all marked blocks. If there are none marked, return the original 352 // LCA. 353 static Block* raise_LCA_above_marks(Block* LCA, node_idx_t mark, Block* early, const PhaseCFG* cfg) { 354 Block_List worklist; 355 worklist.push(LCA); 356 while (worklist.size() > 0) { 357 Block* mid = worklist.pop(); 358 if (mid == early) continue; // stop searching here 359 360 // Test and set the visited bit. 361 if (mid->raise_LCA_visited() == mark) continue; // already visited 362 363 // Don't process the current LCA, otherwise the search may terminate early 364 if (mid != LCA && mid->raise_LCA_mark() == mark) { 365 // Raise the LCA. 366 LCA = mid->dom_lca(LCA); 367 if (LCA == early) break; // stop searching everywhere 368 assert(early->dominates(LCA), "early is high enough"); 369 // Resume searching at that point, skipping intermediate levels. 370 worklist.push(LCA); 371 if (LCA == mid) 372 continue; // Don't mark as visited to avoid early termination. 373 } else { 374 // Keep searching through this block's predecessors. 375 for (uint j = 1, jmax = mid->num_preds(); j < jmax; j++) { 376 Block* mid_parent = cfg->get_block_for_node(mid->pred(j)); 377 worklist.push(mid_parent); 378 } 379 } 380 mid->set_raise_LCA_visited(mark); 381 } 382 return LCA; 383 } 384 385 //--------------------------memory_early_block-------------------------------- 386 // This is a variation of find_deepest_input, the heart of schedule_early. 387 // Find the "early" block for a load, if we considered only memory and 388 // address inputs, that is, if other data inputs were ignored. 389 // 390 // Because a subset of edges are considered, the resulting block will 391 // be earlier (at a shallower dom_depth) than the true schedule_early 392 // point of the node. We compute this earlier block as a more permissive 393 // site for anti-dependency insertion, but only if subsume_loads is enabled. 394 static Block* memory_early_block(Node* load, Block* early, const PhaseCFG* cfg) { 395 Node* base; 396 Node* index; 397 Node* store = load->in(MemNode::Memory); 398 load->as_Mach()->memory_inputs(base, index); 399 400 assert(base != NodeSentinel && index != NodeSentinel, 401 "unexpected base/index inputs"); 402 403 Node* mem_inputs[4]; 404 int mem_inputs_length = 0; 405 if (base != NULL) mem_inputs[mem_inputs_length++] = base; 406 if (index != NULL) mem_inputs[mem_inputs_length++] = index; 407 if (store != NULL) mem_inputs[mem_inputs_length++] = store; 408 409 // In the comparision below, add one to account for the control input, 410 // which may be null, but always takes up a spot in the in array. 411 if (mem_inputs_length + 1 < (int) load->req()) { 412 // This "load" has more inputs than just the memory, base and index inputs. 413 // For purposes of checking anti-dependences, we need to start 414 // from the early block of only the address portion of the instruction, 415 // and ignore other blocks that may have factored into the wider 416 // schedule_early calculation. 417 if (load->in(0) != NULL) mem_inputs[mem_inputs_length++] = load->in(0); 418 419 Block* deepb = NULL; // Deepest block so far 420 int deepb_dom_depth = 0; 421 for (int i = 0; i < mem_inputs_length; i++) { 422 Block* inb = cfg->get_block_for_node(mem_inputs[i]); 423 if (deepb_dom_depth < (int) inb->_dom_depth) { 424 // The new inb must be dominated by the previous deepb. 425 // The various inputs must be linearly ordered in the dom 426 // tree, or else there will not be a unique deepest block. 427 DEBUG_ONLY(assert_dom(deepb, inb, load, cfg)); 428 deepb = inb; // Save deepest block 429 deepb_dom_depth = deepb->_dom_depth; 430 } 431 } 432 early = deepb; 433 } 434 435 return early; 436 } 437 438 //--------------------------insert_anti_dependences--------------------------- 439 // A load may need to witness memory that nearby stores can overwrite. 440 // For each nearby store, either insert an "anti-dependence" edge 441 // from the load to the store, or else move LCA upward to force the 442 // load to (eventually) be scheduled in a block above the store. 443 // 444 // Do not add edges to stores on distinct control-flow paths; 445 // only add edges to stores which might interfere. 446 // 447 // Return the (updated) LCA. There will not be any possibly interfering 448 // store between the load's "early block" and the updated LCA. 449 // Any stores in the updated LCA will have new precedence edges 450 // back to the load. The caller is expected to schedule the load 451 // in the LCA, in which case the precedence edges will make LCM 452 // preserve anti-dependences. The caller may also hoist the load 453 // above the LCA, if it is not the early block. 454 Block* PhaseCFG::insert_anti_dependences(Block* LCA, Node* load, bool verify) { 455 assert(load->needs_anti_dependence_check(), "must be a load of some sort"); 456 assert(LCA != NULL, ""); 457 DEBUG_ONLY(Block* LCA_orig = LCA); 458 459 // Compute the alias index. Loads and stores with different alias indices 460 // do not need anti-dependence edges. 461 uint load_alias_idx = C->get_alias_index(load->adr_type()); 462 #ifdef ASSERT 463 if (load_alias_idx == Compile::AliasIdxBot && C->AliasLevel() > 0 && 464 (PrintOpto || VerifyAliases || 465 PrintMiscellaneous && (WizardMode || Verbose))) { 466 // Load nodes should not consume all of memory. 467 // Reporting a bottom type indicates a bug in adlc. 468 // If some particular type of node validly consumes all of memory, 469 // sharpen the preceding "if" to exclude it, so we can catch bugs here. 470 tty->print_cr("*** Possible Anti-Dependence Bug: Load consumes all of memory."); 471 load->dump(2); 472 if (VerifyAliases) assert(load_alias_idx != Compile::AliasIdxBot, ""); 473 } 474 #endif 475 assert(load_alias_idx || (load->is_Mach() && load->as_Mach()->ideal_Opcode() == Op_StrComp), 476 "String compare is only known 'load' that does not conflict with any stores"); 477 assert(load_alias_idx || (load->is_Mach() && load->as_Mach()->ideal_Opcode() == Op_StrEquals), 478 "String equals is a 'load' that does not conflict with any stores"); 479 assert(load_alias_idx || (load->is_Mach() && load->as_Mach()->ideal_Opcode() == Op_StrIndexOf), 480 "String indexOf is a 'load' that does not conflict with any stores"); 481 assert(load_alias_idx || (load->is_Mach() && load->as_Mach()->ideal_Opcode() == Op_AryEq), 482 "Arrays equals is a 'load' that do not conflict with any stores"); 483 484 if (!C->alias_type(load_alias_idx)->is_rewritable()) { 485 // It is impossible to spoil this load by putting stores before it, 486 // because we know that the stores will never update the value 487 // which 'load' must witness. 488 return LCA; 489 } 490 491 node_idx_t load_index = load->_idx; 492 493 // Note the earliest legal placement of 'load', as determined by 494 // by the unique point in the dom tree where all memory effects 495 // and other inputs are first available. (Computed by schedule_early.) 496 // For normal loads, 'early' is the shallowest place (dom graph wise) 497 // to look for anti-deps between this load and any store. 498 Block* early = get_block_for_node(load); 499 500 // If we are subsuming loads, compute an "early" block that only considers 501 // memory or address inputs. This block may be different than the 502 // schedule_early block in that it could be at an even shallower depth in the 503 // dominator tree, and allow for a broader discovery of anti-dependences. 504 if (C->subsume_loads()) { 505 early = memory_early_block(load, early, this); 506 } 507 508 ResourceArea *area = Thread::current()->resource_area(); 509 Node_List worklist_mem(area); // prior memory state to store 510 Node_List worklist_store(area); // possible-def to explore 511 Node_List worklist_visited(area); // visited mergemem nodes 512 Node_List non_early_stores(area); // all relevant stores outside of early 513 bool must_raise_LCA = false; 514 515 #ifdef TRACK_PHI_INPUTS 516 // %%% This extra checking fails because MergeMem nodes are not GVNed. 517 // Provide "phi_inputs" to check if every input to a PhiNode is from the 518 // original memory state. This indicates a PhiNode for which should not 519 // prevent the load from sinking. For such a block, set_raise_LCA_mark 520 // may be overly conservative. 521 // Mechanism: count inputs seen for each Phi encountered in worklist_store. 522 DEBUG_ONLY(GrowableArray<uint> phi_inputs(area, C->unique(),0,0)); 523 #endif 524 525 // 'load' uses some memory state; look for users of the same state. 526 // Recurse through MergeMem nodes to the stores that use them. 527 528 // Each of these stores is a possible definition of memory 529 // that 'load' needs to use. We need to force 'load' 530 // to occur before each such store. When the store is in 531 // the same block as 'load', we insert an anti-dependence 532 // edge load->store. 533 534 // The relevant stores "nearby" the load consist of a tree rooted 535 // at initial_mem, with internal nodes of type MergeMem. 536 // Therefore, the branches visited by the worklist are of this form: 537 // initial_mem -> (MergeMem ->)* store 538 // The anti-dependence constraints apply only to the fringe of this tree. 539 540 Node* initial_mem = load->in(MemNode::Memory); 541 worklist_store.push(initial_mem); 542 worklist_visited.push(initial_mem); 543 worklist_mem.push(NULL); 544 while (worklist_store.size() > 0) { 545 // Examine a nearby store to see if it might interfere with our load. 546 Node* mem = worklist_mem.pop(); 547 Node* store = worklist_store.pop(); 548 uint op = store->Opcode(); 549 550 // MergeMems do not directly have anti-deps. 551 // Treat them as internal nodes in a forward tree of memory states, 552 // the leaves of which are each a 'possible-def'. 553 if (store == initial_mem // root (exclusive) of tree we are searching 554 || op == Op_MergeMem // internal node of tree we are searching 555 ) { 556 mem = store; // It's not a possibly interfering store. 557 if (store == initial_mem) 558 initial_mem = NULL; // only process initial memory once 559 560 for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) { 561 store = mem->fast_out(i); 562 if (store->is_MergeMem()) { 563 // Be sure we don't get into combinatorial problems. 564 // (Allow phis to be repeated; they can merge two relevant states.) 565 uint j = worklist_visited.size(); 566 for (; j > 0; j--) { 567 if (worklist_visited.at(j-1) == store) break; 568 } 569 if (j > 0) continue; // already on work list; do not repeat 570 worklist_visited.push(store); 571 } 572 worklist_mem.push(mem); 573 worklist_store.push(store); 574 } 575 continue; 576 } 577 578 if (op == Op_MachProj || op == Op_Catch) continue; 579 if (store->needs_anti_dependence_check()) continue; // not really a store 580 581 // Compute the alias index. Loads and stores with different alias 582 // indices do not need anti-dependence edges. Wide MemBar's are 583 // anti-dependent on everything (except immutable memories). 584 const TypePtr* adr_type = store->adr_type(); 585 if (!C->can_alias(adr_type, load_alias_idx)) continue; 586 587 // Most slow-path runtime calls do NOT modify Java memory, but 588 // they can block and so write Raw memory. 589 if (store->is_Mach()) { 590 MachNode* mstore = store->as_Mach(); 591 if (load_alias_idx != Compile::AliasIdxRaw) { 592 // Check for call into the runtime using the Java calling 593 // convention (and from there into a wrapper); it has no 594 // _method. Can't do this optimization for Native calls because 595 // they CAN write to Java memory. 596 if (mstore->ideal_Opcode() == Op_CallStaticJava) { 597 assert(mstore->is_MachSafePoint(), ""); 598 MachSafePointNode* ms = (MachSafePointNode*) mstore; 599 assert(ms->is_MachCallJava(), ""); 600 MachCallJavaNode* mcj = (MachCallJavaNode*) ms; 601 if (mcj->_method == NULL) { 602 // These runtime calls do not write to Java visible memory 603 // (other than Raw) and so do not require anti-dependence edges. 604 continue; 605 } 606 } 607 // Same for SafePoints: they read/write Raw but only read otherwise. 608 // This is basically a workaround for SafePoints only defining control 609 // instead of control + memory. 610 if (mstore->ideal_Opcode() == Op_SafePoint) 611 continue; 612 } else { 613 // Some raw memory, such as the load of "top" at an allocation, 614 // can be control dependent on the previous safepoint. See 615 // comments in GraphKit::allocate_heap() about control input. 616 // Inserting an anti-dep between such a safepoint and a use 617 // creates a cycle, and will cause a subsequent failure in 618 // local scheduling. (BugId 4919904) 619 // (%%% How can a control input be a safepoint and not a projection??) 620 if (mstore->ideal_Opcode() == Op_SafePoint && load->in(0) == mstore) 621 continue; 622 } 623 } 624 625 // Identify a block that the current load must be above, 626 // or else observe that 'store' is all the way up in the 627 // earliest legal block for 'load'. In the latter case, 628 // immediately insert an anti-dependence edge. 629 Block* store_block = get_block_for_node(store); 630 assert(store_block != NULL, "unused killing projections skipped above"); 631 632 if (store->is_Phi()) { 633 // 'load' uses memory which is one (or more) of the Phi's inputs. 634 // It must be scheduled not before the Phi, but rather before 635 // each of the relevant Phi inputs. 636 // 637 // Instead of finding the LCA of all inputs to a Phi that match 'mem', 638 // we mark each corresponding predecessor block and do a combined 639 // hoisting operation later (raise_LCA_above_marks). 640 // 641 // Do not assert(store_block != early, "Phi merging memory after access") 642 // PhiNode may be at start of block 'early' with backedge to 'early' 643 DEBUG_ONLY(bool found_match = false); 644 for (uint j = PhiNode::Input, jmax = store->req(); j < jmax; j++) { 645 if (store->in(j) == mem) { // Found matching input? 646 DEBUG_ONLY(found_match = true); 647 Block* pred_block = get_block_for_node(store_block->pred(j)); 648 if (pred_block != early) { 649 // If any predecessor of the Phi matches the load's "early block", 650 // we do not need a precedence edge between the Phi and 'load' 651 // since the load will be forced into a block preceding the Phi. 652 pred_block->set_raise_LCA_mark(load_index); 653 assert(!LCA_orig->dominates(pred_block) || 654 early->dominates(pred_block), "early is high enough"); 655 must_raise_LCA = true; 656 } else { 657 // anti-dependent upon PHI pinned below 'early', no edge needed 658 LCA = early; // but can not schedule below 'early' 659 } 660 } 661 } 662 assert(found_match, "no worklist bug"); 663 #ifdef TRACK_PHI_INPUTS 664 #ifdef ASSERT 665 // This assert asks about correct handling of PhiNodes, which may not 666 // have all input edges directly from 'mem'. See BugId 4621264 667 int num_mem_inputs = phi_inputs.at_grow(store->_idx,0) + 1; 668 // Increment by exactly one even if there are multiple copies of 'mem' 669 // coming into the phi, because we will run this block several times 670 // if there are several copies of 'mem'. (That's how DU iterators work.) 671 phi_inputs.at_put(store->_idx, num_mem_inputs); 672 assert(PhiNode::Input + num_mem_inputs < store->req(), 673 "Expect at least one phi input will not be from original memory state"); 674 #endif //ASSERT 675 #endif //TRACK_PHI_INPUTS 676 } else if (store_block != early) { 677 // 'store' is between the current LCA and earliest possible block. 678 // Label its block, and decide later on how to raise the LCA 679 // to include the effect on LCA of this store. 680 // If this store's block gets chosen as the raised LCA, we 681 // will find him on the non_early_stores list and stick him 682 // with a precedence edge. 683 // (But, don't bother if LCA is already raised all the way.) 684 if (LCA != early) { 685 store_block->set_raise_LCA_mark(load_index); 686 must_raise_LCA = true; 687 non_early_stores.push(store); 688 } 689 } else { 690 // Found a possibly-interfering store in the load's 'early' block. 691 // This means 'load' cannot sink at all in the dominator tree. 692 // Add an anti-dep edge, and squeeze 'load' into the highest block. 693 assert(store != load->in(0), "dependence cycle found"); 694 if (verify) { 695 assert(store->find_edge(load) != -1, "missing precedence edge"); 696 } else { 697 store->add_prec(load); 698 } 699 LCA = early; 700 // This turns off the process of gathering non_early_stores. 701 } 702 } 703 // (Worklist is now empty; all nearby stores have been visited.) 704 705 // Finished if 'load' must be scheduled in its 'early' block. 706 // If we found any stores there, they have already been given 707 // precedence edges. 708 if (LCA == early) return LCA; 709 710 // We get here only if there are no possibly-interfering stores 711 // in the load's 'early' block. Move LCA up above all predecessors 712 // which contain stores we have noted. 713 // 714 // The raised LCA block can be a home to such interfering stores, 715 // but its predecessors must not contain any such stores. 716 // 717 // The raised LCA will be a lower bound for placing the load, 718 // preventing the load from sinking past any block containing 719 // a store that may invalidate the memory state required by 'load'. 720 if (must_raise_LCA) 721 LCA = raise_LCA_above_marks(LCA, load->_idx, early, this); 722 if (LCA == early) return LCA; 723 724 // Insert anti-dependence edges from 'load' to each store 725 // in the non-early LCA block. 726 // Mine the non_early_stores list for such stores. 727 if (LCA->raise_LCA_mark() == load_index) { 728 while (non_early_stores.size() > 0) { 729 Node* store = non_early_stores.pop(); 730 Block* store_block = get_block_for_node(store); 731 if (store_block == LCA) { 732 // add anti_dependence from store to load in its own block 733 assert(store != load->in(0), "dependence cycle found"); 734 if (verify) { 735 assert(store->find_edge(load) != -1, "missing precedence edge"); 736 } else { 737 store->add_prec(load); 738 } 739 } else { 740 assert(store_block->raise_LCA_mark() == load_index, "block was marked"); 741 // Any other stores we found must be either inside the new LCA 742 // or else outside the original LCA. In the latter case, they 743 // did not interfere with any use of 'load'. 744 assert(LCA->dominates(store_block) 745 || !LCA_orig->dominates(store_block), "no stray stores"); 746 } 747 } 748 } 749 750 // Return the highest block containing stores; any stores 751 // within that block have been given anti-dependence edges. 752 return LCA; 753 } 754 755 // This class is used to iterate backwards over the nodes in the graph. 756 757 class Node_Backward_Iterator { 758 759 private: 760 Node_Backward_Iterator(); 761 762 public: 763 // Constructor for the iterator 764 Node_Backward_Iterator(Node *root, VectorSet &visited, Node_List &stack, PhaseCFG &cfg); 765 766 // Postincrement operator to iterate over the nodes 767 Node *next(); 768 769 private: 770 VectorSet &_visited; 771 Node_List &_stack; 772 PhaseCFG &_cfg; 773 }; 774 775 // Constructor for the Node_Backward_Iterator 776 Node_Backward_Iterator::Node_Backward_Iterator( Node *root, VectorSet &visited, Node_List &stack, PhaseCFG &cfg) 777 : _visited(visited), _stack(stack), _cfg(cfg) { 778 // The stack should contain exactly the root 779 stack.clear(); 780 stack.push(root); 781 782 // Clear the visited bits 783 visited.Clear(); 784 } 785 786 // Iterator for the Node_Backward_Iterator 787 Node *Node_Backward_Iterator::next() { 788 789 // If the _stack is empty, then just return NULL: finished. 790 if ( !_stack.size() ) 791 return NULL; 792 793 // '_stack' is emulating a real _stack. The 'visit-all-users' loop has been 794 // made stateless, so I do not need to record the index 'i' on my _stack. 795 // Instead I visit all users each time, scanning for unvisited users. 796 // I visit unvisited not-anti-dependence users first, then anti-dependent 797 // children next. 798 Node *self = _stack.pop(); 799 800 // I cycle here when I am entering a deeper level of recursion. 801 // The key variable 'self' was set prior to jumping here. 802 while( 1 ) { 803 804 _visited.set(self->_idx); 805 806 // Now schedule all uses as late as possible. 807 const Node* src = self->is_Proj() ? self->in(0) : self; 808 uint src_rpo = _cfg.get_block_for_node(src)->_rpo; 809 810 // Schedule all nodes in a post-order visit 811 Node *unvisited = NULL; // Unvisited anti-dependent Node, if any 812 813 // Scan for unvisited nodes 814 for (DUIterator_Fast imax, i = self->fast_outs(imax); i < imax; i++) { 815 // For all uses, schedule late 816 Node* n = self->fast_out(i); // Use 817 818 // Skip already visited children 819 if ( _visited.test(n->_idx) ) 820 continue; 821 822 // do not traverse backward control edges 823 Node *use = n->is_Proj() ? n->in(0) : n; 824 uint use_rpo = _cfg.get_block_for_node(use)->_rpo; 825 826 if ( use_rpo < src_rpo ) 827 continue; 828 829 // Phi nodes always precede uses in a basic block 830 if ( use_rpo == src_rpo && use->is_Phi() ) 831 continue; 832 833 unvisited = n; // Found unvisited 834 835 // Check for possible-anti-dependent 836 if( !n->needs_anti_dependence_check() ) 837 break; // Not visited, not anti-dep; schedule it NOW 838 } 839 840 // Did I find an unvisited not-anti-dependent Node? 841 if ( !unvisited ) 842 break; // All done with children; post-visit 'self' 843 844 // Visit the unvisited Node. Contains the obvious push to 845 // indicate I'm entering a deeper level of recursion. I push the 846 // old state onto the _stack and set a new state and loop (recurse). 847 _stack.push(self); 848 self = unvisited; 849 } // End recursion loop 850 851 return self; 852 } 853 854 //------------------------------ComputeLatenciesBackwards---------------------- 855 // Compute the latency of all the instructions. 856 void PhaseCFG::compute_latencies_backwards(VectorSet &visited, Node_List &stack) { 857 #ifndef PRODUCT 858 if (trace_opto_pipelining()) 859 tty->print("\n#---- ComputeLatenciesBackwards ----\n"); 860 #endif 861 862 Node_Backward_Iterator iter((Node *)_root, visited, stack, *this); 863 Node *n; 864 865 // Walk over all the nodes from last to first 866 while (n = iter.next()) { 867 // Set the latency for the definitions of this instruction 868 partial_latency_of_defs(n); 869 } 870 } // end ComputeLatenciesBackwards 871 872 //------------------------------partial_latency_of_defs------------------------ 873 // Compute the latency impact of this node on all defs. This computes 874 // a number that increases as we approach the beginning of the routine. 875 void PhaseCFG::partial_latency_of_defs(Node *n) { 876 // Set the latency for this instruction 877 #ifndef PRODUCT 878 if (trace_opto_pipelining()) { 879 tty->print("# latency_to_inputs: node_latency[%d] = %d for node", n->_idx, get_latency_for_node(n)); 880 dump(); 881 } 882 #endif 883 884 if (n->is_Proj()) { 885 n = n->in(0); 886 } 887 888 if (n->is_Root()) { 889 return; 890 } 891 892 uint nlen = n->len(); 893 uint use_latency = get_latency_for_node(n); 894 uint use_pre_order = get_block_for_node(n)->_pre_order; 895 896 for (uint j = 0; j < nlen; j++) { 897 Node *def = n->in(j); 898 899 if (!def || def == n) { 900 continue; 901 } 902 903 // Walk backwards thru projections 904 if (def->is_Proj()) { 905 def = def->in(0); 906 } 907 908 #ifndef PRODUCT 909 if (trace_opto_pipelining()) { 910 tty->print("# in(%2d): ", j); 911 def->dump(); 912 } 913 #endif 914 915 // If the defining block is not known, assume it is ok 916 Block *def_block = get_block_for_node(def); 917 uint def_pre_order = def_block ? def_block->_pre_order : 0; 918 919 if ((use_pre_order < def_pre_order) || (use_pre_order == def_pre_order && n->is_Phi())) { 920 continue; 921 } 922 923 uint delta_latency = n->latency(j); 924 uint current_latency = delta_latency + use_latency; 925 926 if (get_latency_for_node(def) < current_latency) { 927 set_latency_for_node(def, current_latency); 928 } 929 930 #ifndef PRODUCT 931 if (trace_opto_pipelining()) { 932 tty->print_cr("# %d + edge_latency(%d) == %d -> %d, node_latency[%d] = %d", use_latency, j, delta_latency, current_latency, def->_idx, get_latency_for_node(def)); 933 } 934 #endif 935 } 936 } 937 938 //------------------------------latency_from_use------------------------------- 939 // Compute the latency of a specific use 940 int PhaseCFG::latency_from_use(Node *n, const Node *def, Node *use) { 941 // If self-reference, return no latency 942 if (use == n || use->is_Root()) { 943 return 0; 944 } 945 946 uint def_pre_order = get_block_for_node(def)->_pre_order; 947 uint latency = 0; 948 949 // If the use is not a projection, then it is simple... 950 if (!use->is_Proj()) { 951 #ifndef PRODUCT 952 if (trace_opto_pipelining()) { 953 tty->print("# out(): "); 954 use->dump(); 955 } 956 #endif 957 958 uint use_pre_order = get_block_for_node(use)->_pre_order; 959 960 if (use_pre_order < def_pre_order) 961 return 0; 962 963 if (use_pre_order == def_pre_order && use->is_Phi()) 964 return 0; 965 966 uint nlen = use->len(); 967 uint nl = get_latency_for_node(use); 968 969 for ( uint j=0; j<nlen; j++ ) { 970 if (use->in(j) == n) { 971 // Change this if we want local latencies 972 uint ul = use->latency(j); 973 uint l = ul + nl; 974 if (latency < l) latency = l; 975 #ifndef PRODUCT 976 if (trace_opto_pipelining()) { 977 tty->print_cr("# %d + edge_latency(%d) == %d -> %d, latency = %d", 978 nl, j, ul, l, latency); 979 } 980 #endif 981 } 982 } 983 } else { 984 // This is a projection, just grab the latency of the use(s) 985 for (DUIterator_Fast jmax, j = use->fast_outs(jmax); j < jmax; j++) { 986 uint l = latency_from_use(use, def, use->fast_out(j)); 987 if (latency < l) latency = l; 988 } 989 } 990 991 return latency; 992 } 993 994 //------------------------------latency_from_uses------------------------------ 995 // Compute the latency of this instruction relative to all of it's uses. 996 // This computes a number that increases as we approach the beginning of the 997 // routine. 998 void PhaseCFG::latency_from_uses(Node *n) { 999 // Set the latency for this instruction 1000 #ifndef PRODUCT 1001 if (trace_opto_pipelining()) { 1002 tty->print("# latency_from_outputs: node_latency[%d] = %d for node", n->_idx, get_latency_for_node(n)); 1003 dump(); 1004 } 1005 #endif 1006 uint latency=0; 1007 const Node *def = n->is_Proj() ? n->in(0): n; 1008 1009 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 1010 uint l = latency_from_use(n, def, n->fast_out(i)); 1011 1012 if (latency < l) latency = l; 1013 } 1014 1015 set_latency_for_node(n, latency); 1016 } 1017 1018 //------------------------------hoist_to_cheaper_block------------------------- 1019 // Pick a block for node self, between early and LCA, that is a cheaper 1020 // alternative to LCA. 1021 Block* PhaseCFG::hoist_to_cheaper_block(Block* LCA, Block* early, Node* self) { 1022 const double delta = 1+PROB_UNLIKELY_MAG(4); 1023 Block* least = LCA; 1024 double least_freq = least->_freq; 1025 uint target = get_latency_for_node(self); 1026 uint start_latency = get_latency_for_node(LCA->head()); 1027 uint end_latency = get_latency_for_node(LCA->get_node(LCA->end_idx())); 1028 bool in_latency = (target <= start_latency); 1029 const Block* root_block = get_block_for_node(_root); 1030 1031 // Turn off latency scheduling if scheduling is just plain off 1032 if (!C->do_scheduling()) 1033 in_latency = true; 1034 1035 // Do not hoist (to cover latency) instructions which target a 1036 // single register. Hoisting stretches the live range of the 1037 // single register and may force spilling. 1038 MachNode* mach = self->is_Mach() ? self->as_Mach() : NULL; 1039 if (mach && mach->out_RegMask().is_bound1() && mach->out_RegMask().is_NotEmpty()) 1040 in_latency = true; 1041 1042 #ifndef PRODUCT 1043 if (trace_opto_pipelining()) { 1044 tty->print("# Find cheaper block for latency %d: ", get_latency_for_node(self)); 1045 self->dump(); 1046 tty->print_cr("# B%d: start latency for [%4d]=%d, end latency for [%4d]=%d, freq=%g", 1047 LCA->_pre_order, 1048 LCA->head()->_idx, 1049 start_latency, 1050 LCA->get_node(LCA->end_idx())->_idx, 1051 end_latency, 1052 least_freq); 1053 } 1054 #endif 1055 1056 int cand_cnt = 0; // number of candidates tried 1057 1058 // Walk up the dominator tree from LCA (Lowest common ancestor) to 1059 // the earliest legal location. Capture the least execution frequency. 1060 while (LCA != early) { 1061 LCA = LCA->_idom; // Follow up the dominator tree 1062 1063 if (LCA == NULL) { 1064 // Bailout without retry 1065 C->record_method_not_compilable("late schedule failed: LCA == NULL"); 1066 return least; 1067 } 1068 1069 // Don't hoist machine instructions to the root basic block 1070 if (mach && LCA == root_block) 1071 break; 1072 1073 uint start_lat = get_latency_for_node(LCA->head()); 1074 uint end_idx = LCA->end_idx(); 1075 uint end_lat = get_latency_for_node(LCA->get_node(end_idx)); 1076 double LCA_freq = LCA->_freq; 1077 #ifndef PRODUCT 1078 if (trace_opto_pipelining()) { 1079 tty->print_cr("# B%d: start latency for [%4d]=%d, end latency for [%4d]=%d, freq=%g", 1080 LCA->_pre_order, LCA->head()->_idx, start_lat, end_idx, end_lat, LCA_freq); 1081 } 1082 #endif 1083 cand_cnt++; 1084 if (LCA_freq < least_freq || // Better Frequency 1085 (StressGCM && Compile::randomized_select(cand_cnt)) || // Should be randomly accepted in stress mode 1086 (!StressGCM && // Otherwise, choose with latency 1087 !in_latency && // No block containing latency 1088 LCA_freq < least_freq * delta && // No worse frequency 1089 target >= end_lat && // within latency range 1090 !self->is_iteratively_computed() ) // But don't hoist IV increments 1091 // because they may end up above other uses of their phi forcing 1092 // their result register to be different from their input. 1093 ) { 1094 least = LCA; // Found cheaper block 1095 least_freq = LCA_freq; 1096 start_latency = start_lat; 1097 end_latency = end_lat; 1098 if (target <= start_lat) 1099 in_latency = true; 1100 } 1101 } 1102 1103 #ifndef PRODUCT 1104 if (trace_opto_pipelining()) { 1105 tty->print_cr("# Choose block B%d with start latency=%d and freq=%g", 1106 least->_pre_order, start_latency, least_freq); 1107 } 1108 #endif 1109 1110 // See if the latency needs to be updated 1111 if (target < end_latency) { 1112 #ifndef PRODUCT 1113 if (trace_opto_pipelining()) { 1114 tty->print_cr("# Change latency for [%4d] from %d to %d", self->_idx, target, end_latency); 1115 } 1116 #endif 1117 set_latency_for_node(self, end_latency); 1118 partial_latency_of_defs(self); 1119 } 1120 1121 return least; 1122 } 1123 1124 1125 //------------------------------schedule_late----------------------------------- 1126 // Now schedule all codes as LATE as possible. This is the LCA in the 1127 // dominator tree of all USES of a value. Pick the block with the least 1128 // loop nesting depth that is lowest in the dominator tree. 1129 extern const char must_clone[]; 1130 void PhaseCFG::schedule_late(VectorSet &visited, Node_List &stack) { 1131 #ifndef PRODUCT 1132 if (trace_opto_pipelining()) 1133 tty->print("\n#---- schedule_late ----\n"); 1134 #endif 1135 1136 Node_Backward_Iterator iter((Node *)_root, visited, stack, *this); 1137 Node *self; 1138 1139 // Walk over all the nodes from last to first 1140 while (self = iter.next()) { 1141 Block* early = get_block_for_node(self); // Earliest legal placement 1142 1143 if (self->is_top()) { 1144 // Top node goes in bb #2 with other constants. 1145 // It must be special-cased, because it has no out edges. 1146 early->add_inst(self); 1147 continue; 1148 } 1149 1150 // No uses, just terminate 1151 if (self->outcnt() == 0) { 1152 assert(self->is_MachProj(), "sanity"); 1153 continue; // Must be a dead machine projection 1154 } 1155 1156 // If node is pinned in the block, then no scheduling can be done. 1157 if( self->pinned() ) // Pinned in block? 1158 continue; 1159 1160 MachNode* mach = self->is_Mach() ? self->as_Mach() : NULL; 1161 if (mach) { 1162 switch (mach->ideal_Opcode()) { 1163 case Op_CreateEx: 1164 // Don't move exception creation 1165 early->add_inst(self); 1166 continue; 1167 break; 1168 case Op_CheckCastPP: 1169 // Don't move CheckCastPP nodes away from their input, if the input 1170 // is a rawptr (5071820). 1171 Node *def = self->in(1); 1172 if (def != NULL && def->bottom_type()->base() == Type::RawPtr) { 1173 early->add_inst(self); 1174 #ifdef ASSERT 1175 _raw_oops.push(def); 1176 #endif 1177 continue; 1178 } 1179 break; 1180 } 1181 } 1182 1183 // Gather LCA of all uses 1184 Block *LCA = NULL; 1185 { 1186 for (DUIterator_Fast imax, i = self->fast_outs(imax); i < imax; i++) { 1187 // For all uses, find LCA 1188 Node* use = self->fast_out(i); 1189 LCA = raise_LCA_above_use(LCA, use, self, this); 1190 } 1191 } // (Hide defs of imax, i from rest of block.) 1192 1193 // Place temps in the block of their use. This isn't a 1194 // requirement for correctness but it reduces useless 1195 // interference between temps and other nodes. 1196 if (mach != NULL && mach->is_MachTemp()) { 1197 map_node_to_block(self, LCA); 1198 LCA->add_inst(self); 1199 continue; 1200 } 1201 1202 // Check if 'self' could be anti-dependent on memory 1203 if (self->needs_anti_dependence_check()) { 1204 // Hoist LCA above possible-defs and insert anti-dependences to 1205 // defs in new LCA block. 1206 LCA = insert_anti_dependences(LCA, self); 1207 } 1208 1209 if (early->_dom_depth > LCA->_dom_depth) { 1210 // Somehow the LCA has moved above the earliest legal point. 1211 // (One way this can happen is via memory_early_block.) 1212 if (C->subsume_loads() == true && !C->failing()) { 1213 // Retry with subsume_loads == false 1214 // If this is the first failure, the sentinel string will "stick" 1215 // to the Compile object, and the C2Compiler will see it and retry. 1216 C->record_failure(C2Compiler::retry_no_subsuming_loads()); 1217 } else { 1218 // Bailout without retry when (early->_dom_depth > LCA->_dom_depth) 1219 C->record_method_not_compilable("late schedule failed: incorrect graph"); 1220 } 1221 return; 1222 } 1223 1224 // If there is no opportunity to hoist, then we're done. 1225 // In stress mode, try to hoist even the single operations. 1226 bool try_to_hoist = StressGCM || (LCA != early); 1227 1228 // Must clone guys stay next to use; no hoisting allowed. 1229 // Also cannot hoist guys that alter memory or are otherwise not 1230 // allocatable (hoisting can make a value live longer, leading to 1231 // anti and output dependency problems which are normally resolved 1232 // by the register allocator giving everyone a different register). 1233 if (mach != NULL && must_clone[mach->ideal_Opcode()]) 1234 try_to_hoist = false; 1235 1236 Block* late = NULL; 1237 if (try_to_hoist) { 1238 // Now find the block with the least execution frequency. 1239 // Start at the latest schedule and work up to the earliest schedule 1240 // in the dominator tree. Thus the Node will dominate all its uses. 1241 late = hoist_to_cheaper_block(LCA, early, self); 1242 } else { 1243 // Just use the LCA of the uses. 1244 late = LCA; 1245 } 1246 1247 // Put the node into target block 1248 schedule_node_into_block(self, late); 1249 1250 #ifdef ASSERT 1251 if (self->needs_anti_dependence_check()) { 1252 // since precedence edges are only inserted when we're sure they 1253 // are needed make sure that after placement in a block we don't 1254 // need any new precedence edges. 1255 verify_anti_dependences(late, self); 1256 } 1257 #endif 1258 } // Loop until all nodes have been visited 1259 1260 } // end ScheduleLate 1261 1262 //------------------------------GlobalCodeMotion------------------------------- 1263 void PhaseCFG::global_code_motion() { 1264 ResourceMark rm; 1265 1266 #ifndef PRODUCT 1267 if (trace_opto_pipelining()) { 1268 tty->print("\n---- Start GlobalCodeMotion ----\n"); 1269 } 1270 #endif 1271 1272 // Initialize the node to block mapping for things on the proj_list 1273 for (uint i = 0; i < _matcher.number_of_projections(); i++) { 1274 unmap_node_from_block(_matcher.get_projection(i)); 1275 } 1276 1277 // Set the basic block for Nodes pinned into blocks 1278 Arena* arena = Thread::current()->resource_area(); 1279 VectorSet visited(arena); 1280 schedule_pinned_nodes(visited); 1281 1282 // Find the earliest Block any instruction can be placed in. Some 1283 // instructions are pinned into Blocks. Unpinned instructions can 1284 // appear in last block in which all their inputs occur. 1285 visited.Clear(); 1286 Node_List stack(arena); 1287 // Pre-grow the list 1288 stack.map((C->unique() >> 1) + 16, NULL); 1289 if (!schedule_early(visited, stack)) { 1290 // Bailout without retry 1291 C->record_method_not_compilable("early schedule failed"); 1292 return; 1293 } 1294 1295 // Build Def-Use edges. 1296 // Compute the latency information (via backwards walk) for all the 1297 // instructions in the graph 1298 _node_latency = new GrowableArray<uint>(); // resource_area allocation 1299 1300 if (C->do_scheduling()) { 1301 compute_latencies_backwards(visited, stack); 1302 } 1303 1304 // Now schedule all codes as LATE as possible. This is the LCA in the 1305 // dominator tree of all USES of a value. Pick the block with the least 1306 // loop nesting depth that is lowest in the dominator tree. 1307 // ( visited.Clear() called in schedule_late()->Node_Backward_Iterator() ) 1308 schedule_late(visited, stack); 1309 if (C->failing()) { 1310 // schedule_late fails only when graph is incorrect. 1311 assert(!VerifyGraphEdges, "verification should have failed"); 1312 return; 1313 } 1314 1315 #ifndef PRODUCT 1316 if (trace_opto_pipelining()) { 1317 tty->print("\n---- Detect implicit null checks ----\n"); 1318 } 1319 #endif 1320 1321 // Detect implicit-null-check opportunities. Basically, find NULL checks 1322 // with suitable memory ops nearby. Use the memory op to do the NULL check. 1323 // I can generate a memory op if there is not one nearby. 1324 if (C->is_method_compilation()) { 1325 // By reversing the loop direction we get a very minor gain on mpegaudio. 1326 // Feel free to revert to a forward loop for clarity. 1327 // for( int i=0; i < (int)matcher._null_check_tests.size(); i+=2 ) { 1328 for (int i = _matcher._null_check_tests.size() - 2; i >= 0; i -= 2) { 1329 Node* proj = _matcher._null_check_tests[i]; 1330 Node* val = _matcher._null_check_tests[i + 1]; 1331 Block* block = get_block_for_node(proj); 1332 implicit_null_check(block, proj, val, C->allowed_deopt_reasons()); 1333 // The implicit_null_check will only perform the transformation 1334 // if the null branch is truly uncommon, *and* it leads to an 1335 // uncommon trap. Combined with the too_many_traps guards 1336 // above, this prevents SEGV storms reported in 6366351, 1337 // by recompiling offending methods without this optimization. 1338 } 1339 } 1340 1341 #ifndef PRODUCT 1342 if (trace_opto_pipelining()) { 1343 tty->print("\n---- Start Local Scheduling ----\n"); 1344 } 1345 #endif 1346 1347 // Schedule locally. Right now a simple topological sort. 1348 // Later, do a real latency aware scheduler. 1349 GrowableArray<int> ready_cnt(C->unique(), C->unique(), -1); 1350 visited.Clear(); 1351 for (uint i = 0; i < number_of_blocks(); i++) { 1352 Block* block = get_block(i); 1353 if (!schedule_local(block, ready_cnt, visited)) { 1354 if (!C->failure_reason_is(C2Compiler::retry_no_subsuming_loads())) { 1355 C->record_method_not_compilable("local schedule failed"); 1356 } 1357 return; 1358 } 1359 } 1360 1361 // If we inserted any instructions between a Call and his CatchNode, 1362 // clone the instructions on all paths below the Catch. 1363 for (uint i = 0; i < number_of_blocks(); i++) { 1364 Block* block = get_block(i); 1365 call_catch_cleanup(block); 1366 } 1367 1368 #ifndef PRODUCT 1369 if (trace_opto_pipelining()) { 1370 tty->print("\n---- After GlobalCodeMotion ----\n"); 1371 for (uint i = 0; i < number_of_blocks(); i++) { 1372 Block* block = get_block(i); 1373 block->dump(); 1374 } 1375 } 1376 #endif 1377 // Dead. 1378 _node_latency = (GrowableArray<uint> *)0xdeadbeef; 1379 } 1380 1381 bool PhaseCFG::do_global_code_motion() { 1382 1383 build_dominator_tree(); 1384 if (C->failing()) { 1385 return false; 1386 } 1387 1388 NOT_PRODUCT( C->verify_graph_edges(); ) 1389 1390 estimate_block_frequency(); 1391 1392 global_code_motion(); 1393 1394 if (C->failing()) { 1395 return false; 1396 } 1397 1398 return true; 1399 } 1400 1401 //------------------------------Estimate_Block_Frequency----------------------- 1402 // Estimate block frequencies based on IfNode probabilities. 1403 void PhaseCFG::estimate_block_frequency() { 1404 1405 // Force conditional branches leading to uncommon traps to be unlikely, 1406 // not because we get to the uncommon_trap with less relative frequency, 1407 // but because an uncommon_trap typically causes a deopt, so we only get 1408 // there once. 1409 if (C->do_freq_based_layout()) { 1410 Block_List worklist; 1411 Block* root_blk = get_block(0); 1412 for (uint i = 1; i < root_blk->num_preds(); i++) { 1413 Block *pb = get_block_for_node(root_blk->pred(i)); 1414 if (pb->has_uncommon_code()) { 1415 worklist.push(pb); 1416 } 1417 } 1418 while (worklist.size() > 0) { 1419 Block* uct = worklist.pop(); 1420 if (uct == get_root_block()) { 1421 continue; 1422 } 1423 for (uint i = 1; i < uct->num_preds(); i++) { 1424 Block *pb = get_block_for_node(uct->pred(i)); 1425 if (pb->_num_succs == 1) { 1426 worklist.push(pb); 1427 } else if (pb->num_fall_throughs() == 2) { 1428 pb->update_uncommon_branch(uct); 1429 } 1430 } 1431 } 1432 } 1433 1434 // Create the loop tree and calculate loop depth. 1435 _root_loop = create_loop_tree(); 1436 _root_loop->compute_loop_depth(0); 1437 1438 // Compute block frequency of each block, relative to a single loop entry. 1439 _root_loop->compute_freq(); 1440 1441 // Adjust all frequencies to be relative to a single method entry 1442 _root_loop->_freq = 1.0; 1443 _root_loop->scale_freq(); 1444 1445 // Save outmost loop frequency for LRG frequency threshold 1446 _outer_loop_frequency = _root_loop->outer_loop_freq(); 1447 1448 // force paths ending at uncommon traps to be infrequent 1449 if (!C->do_freq_based_layout()) { 1450 Block_List worklist; 1451 Block* root_blk = get_block(0); 1452 for (uint i = 1; i < root_blk->num_preds(); i++) { 1453 Block *pb = get_block_for_node(root_blk->pred(i)); 1454 if (pb->has_uncommon_code()) { 1455 worklist.push(pb); 1456 } 1457 } 1458 while (worklist.size() > 0) { 1459 Block* uct = worklist.pop(); 1460 uct->_freq = PROB_MIN; 1461 for (uint i = 1; i < uct->num_preds(); i++) { 1462 Block *pb = get_block_for_node(uct->pred(i)); 1463 if (pb->_num_succs == 1 && pb->_freq > PROB_MIN) { 1464 worklist.push(pb); 1465 } 1466 } 1467 } 1468 } 1469 1470 #ifdef ASSERT 1471 for (uint i = 0; i < number_of_blocks(); i++) { 1472 Block* b = get_block(i); 1473 assert(b->_freq >= MIN_BLOCK_FREQUENCY, "Register Allocator requires meaningful block frequency"); 1474 } 1475 #endif 1476 1477 #ifndef PRODUCT 1478 if (PrintCFGBlockFreq) { 1479 tty->print_cr("CFG Block Frequencies"); 1480 _root_loop->dump_tree(); 1481 if (Verbose) { 1482 tty->print_cr("PhaseCFG dump"); 1483 dump(); 1484 tty->print_cr("Node dump"); 1485 _root->dump(99999); 1486 } 1487 } 1488 #endif 1489 } 1490 1491 //----------------------------create_loop_tree-------------------------------- 1492 // Create a loop tree from the CFG 1493 CFGLoop* PhaseCFG::create_loop_tree() { 1494 1495 #ifdef ASSERT 1496 assert(get_block(0) == get_root_block(), "first block should be root block"); 1497 for (uint i = 0; i < number_of_blocks(); i++) { 1498 Block* block = get_block(i); 1499 // Check that _loop field are clear...we could clear them if not. 1500 assert(block->_loop == NULL, "clear _loop expected"); 1501 // Sanity check that the RPO numbering is reflected in the _blocks array. 1502 // It doesn't have to be for the loop tree to be built, but if it is not, 1503 // then the blocks have been reordered since dom graph building...which 1504 // may question the RPO numbering 1505 assert(block->_rpo == i, "unexpected reverse post order number"); 1506 } 1507 #endif 1508 1509 int idct = 0; 1510 CFGLoop* root_loop = new CFGLoop(idct++); 1511 1512 Block_List worklist; 1513 1514 // Assign blocks to loops 1515 for(uint i = number_of_blocks() - 1; i > 0; i-- ) { // skip Root block 1516 Block* block = get_block(i); 1517 1518 if (block->head()->is_Loop()) { 1519 Block* loop_head = block; 1520 assert(loop_head->num_preds() - 1 == 2, "loop must have 2 predecessors"); 1521 Node* tail_n = loop_head->pred(LoopNode::LoopBackControl); 1522 Block* tail = get_block_for_node(tail_n); 1523 1524 // Defensively filter out Loop nodes for non-single-entry loops. 1525 // For all reasonable loops, the head occurs before the tail in RPO. 1526 if (i <= tail->_rpo) { 1527 1528 // The tail and (recursive) predecessors of the tail 1529 // are made members of a new loop. 1530 1531 assert(worklist.size() == 0, "nonempty worklist"); 1532 CFGLoop* nloop = new CFGLoop(idct++); 1533 assert(loop_head->_loop == NULL, "just checking"); 1534 loop_head->_loop = nloop; 1535 // Add to nloop so push_pred() will skip over inner loops 1536 nloop->add_member(loop_head); 1537 nloop->push_pred(loop_head, LoopNode::LoopBackControl, worklist, this); 1538 1539 while (worklist.size() > 0) { 1540 Block* member = worklist.pop(); 1541 if (member != loop_head) { 1542 for (uint j = 1; j < member->num_preds(); j++) { 1543 nloop->push_pred(member, j, worklist, this); 1544 } 1545 } 1546 } 1547 } 1548 } 1549 } 1550 1551 // Create a member list for each loop consisting 1552 // of both blocks and (immediate child) loops. 1553 for (uint i = 0; i < number_of_blocks(); i++) { 1554 Block* block = get_block(i); 1555 CFGLoop* lp = block->_loop; 1556 if (lp == NULL) { 1557 // Not assigned to a loop. Add it to the method's pseudo loop. 1558 block->_loop = root_loop; 1559 lp = root_loop; 1560 } 1561 if (lp == root_loop || block != lp->head()) { // loop heads are already members 1562 lp->add_member(block); 1563 } 1564 if (lp != root_loop) { 1565 if (lp->parent() == NULL) { 1566 // Not a nested loop. Make it a child of the method's pseudo loop. 1567 root_loop->add_nested_loop(lp); 1568 } 1569 if (block == lp->head()) { 1570 // Add nested loop to member list of parent loop. 1571 lp->parent()->add_member(lp); 1572 } 1573 } 1574 } 1575 1576 return root_loop; 1577 } 1578 1579 //------------------------------push_pred-------------------------------------- 1580 void CFGLoop::push_pred(Block* blk, int i, Block_List& worklist, PhaseCFG* cfg) { 1581 Node* pred_n = blk->pred(i); 1582 Block* pred = cfg->get_block_for_node(pred_n); 1583 CFGLoop *pred_loop = pred->_loop; 1584 if (pred_loop == NULL) { 1585 // Filter out blocks for non-single-entry loops. 1586 // For all reasonable loops, the head occurs before the tail in RPO. 1587 if (pred->_rpo > head()->_rpo) { 1588 pred->_loop = this; 1589 worklist.push(pred); 1590 } 1591 } else if (pred_loop != this) { 1592 // Nested loop. 1593 while (pred_loop->_parent != NULL && pred_loop->_parent != this) { 1594 pred_loop = pred_loop->_parent; 1595 } 1596 // Make pred's loop be a child 1597 if (pred_loop->_parent == NULL) { 1598 add_nested_loop(pred_loop); 1599 // Continue with loop entry predecessor. 1600 Block* pred_head = pred_loop->head(); 1601 assert(pred_head->num_preds() - 1 == 2, "loop must have 2 predecessors"); 1602 assert(pred_head != head(), "loop head in only one loop"); 1603 push_pred(pred_head, LoopNode::EntryControl, worklist, cfg); 1604 } else { 1605 assert(pred_loop->_parent == this && _parent == NULL, "just checking"); 1606 } 1607 } 1608 } 1609 1610 //------------------------------add_nested_loop-------------------------------- 1611 // Make cl a child of the current loop in the loop tree. 1612 void CFGLoop::add_nested_loop(CFGLoop* cl) { 1613 assert(_parent == NULL, "no parent yet"); 1614 assert(cl != this, "not my own parent"); 1615 cl->_parent = this; 1616 CFGLoop* ch = _child; 1617 if (ch == NULL) { 1618 _child = cl; 1619 } else { 1620 while (ch->_sibling != NULL) { ch = ch->_sibling; } 1621 ch->_sibling = cl; 1622 } 1623 } 1624 1625 //------------------------------compute_loop_depth----------------------------- 1626 // Store the loop depth in each CFGLoop object. 1627 // Recursively walk the children to do the same for them. 1628 void CFGLoop::compute_loop_depth(int depth) { 1629 _depth = depth; 1630 CFGLoop* ch = _child; 1631 while (ch != NULL) { 1632 ch->compute_loop_depth(depth + 1); 1633 ch = ch->_sibling; 1634 } 1635 } 1636 1637 //------------------------------compute_freq----------------------------------- 1638 // Compute the frequency of each block and loop, relative to a single entry 1639 // into the dominating loop head. 1640 void CFGLoop::compute_freq() { 1641 // Bottom up traversal of loop tree (visit inner loops first.) 1642 // Set loop head frequency to 1.0, then transitively 1643 // compute frequency for all successors in the loop, 1644 // as well as for each exit edge. Inner loops are 1645 // treated as single blocks with loop exit targets 1646 // as the successor blocks. 1647 1648 // Nested loops first 1649 CFGLoop* ch = _child; 1650 while (ch != NULL) { 1651 ch->compute_freq(); 1652 ch = ch->_sibling; 1653 } 1654 assert (_members.length() > 0, "no empty loops"); 1655 Block* hd = head(); 1656 hd->_freq = 1.0f; 1657 for (int i = 0; i < _members.length(); i++) { 1658 CFGElement* s = _members.at(i); 1659 float freq = s->_freq; 1660 if (s->is_block()) { 1661 Block* b = s->as_Block(); 1662 for (uint j = 0; j < b->_num_succs; j++) { 1663 Block* sb = b->_succs[j]; 1664 update_succ_freq(sb, freq * b->succ_prob(j)); 1665 } 1666 } else { 1667 CFGLoop* lp = s->as_CFGLoop(); 1668 assert(lp->_parent == this, "immediate child"); 1669 for (int k = 0; k < lp->_exits.length(); k++) { 1670 Block* eb = lp->_exits.at(k).get_target(); 1671 float prob = lp->_exits.at(k).get_prob(); 1672 update_succ_freq(eb, freq * prob); 1673 } 1674 } 1675 } 1676 1677 // For all loops other than the outer, "method" loop, 1678 // sum and normalize the exit probability. The "method" loop 1679 // should keep the initial exit probability of 1, so that 1680 // inner blocks do not get erroneously scaled. 1681 if (_depth != 0) { 1682 // Total the exit probabilities for this loop. 1683 float exits_sum = 0.0f; 1684 for (int i = 0; i < _exits.length(); i++) { 1685 exits_sum += _exits.at(i).get_prob(); 1686 } 1687 1688 // Normalize the exit probabilities. Until now, the 1689 // probabilities estimate the possibility of exit per 1690 // a single loop iteration; afterward, they estimate 1691 // the probability of exit per loop entry. 1692 for (int i = 0; i < _exits.length(); i++) { 1693 Block* et = _exits.at(i).get_target(); 1694 float new_prob = 0.0f; 1695 if (_exits.at(i).get_prob() > 0.0f) { 1696 new_prob = _exits.at(i).get_prob() / exits_sum; 1697 } 1698 BlockProbPair bpp(et, new_prob); 1699 _exits.at_put(i, bpp); 1700 } 1701 1702 // Save the total, but guard against unreasonable probability, 1703 // as the value is used to estimate the loop trip count. 1704 // An infinite trip count would blur relative block 1705 // frequencies. 1706 if (exits_sum > 1.0f) exits_sum = 1.0; 1707 if (exits_sum < PROB_MIN) exits_sum = PROB_MIN; 1708 _exit_prob = exits_sum; 1709 } 1710 } 1711 1712 //------------------------------succ_prob------------------------------------- 1713 // Determine the probability of reaching successor 'i' from the receiver block. 1714 float Block::succ_prob(uint i) { 1715 int eidx = end_idx(); 1716 Node *n = get_node(eidx); // Get ending Node 1717 1718 int op = n->Opcode(); 1719 if (n->is_Mach()) { 1720 if (n->is_MachNullCheck()) { 1721 // Can only reach here if called after lcm. The original Op_If is gone, 1722 // so we attempt to infer the probability from one or both of the 1723 // successor blocks. 1724 assert(_num_succs == 2, "expecting 2 successors of a null check"); 1725 // If either successor has only one predecessor, then the 1726 // probability estimate can be derived using the 1727 // relative frequency of the successor and this block. 1728 if (_succs[i]->num_preds() == 2) { 1729 return _succs[i]->_freq / _freq; 1730 } else if (_succs[1-i]->num_preds() == 2) { 1731 return 1 - (_succs[1-i]->_freq / _freq); 1732 } else { 1733 // Estimate using both successor frequencies 1734 float freq = _succs[i]->_freq; 1735 return freq / (freq + _succs[1-i]->_freq); 1736 } 1737 } 1738 op = n->as_Mach()->ideal_Opcode(); 1739 } 1740 1741 1742 // Switch on branch type 1743 switch( op ) { 1744 case Op_CountedLoopEnd: 1745 case Op_If: { 1746 assert (i < 2, "just checking"); 1747 // Conditionals pass on only part of their frequency 1748 float prob = n->as_MachIf()->_prob; 1749 assert(prob >= 0.0 && prob <= 1.0, "out of range probability"); 1750 // If succ[i] is the FALSE branch, invert path info 1751 if( get_node(i + eidx + 1)->Opcode() == Op_IfFalse ) { 1752 return 1.0f - prob; // not taken 1753 } else { 1754 return prob; // taken 1755 } 1756 } 1757 1758 case Op_Jump: 1759 // Divide the frequency between all successors evenly 1760 return 1.0f/_num_succs; 1761 1762 case Op_Catch: { 1763 const CatchProjNode *ci = get_node(i + eidx + 1)->as_CatchProj(); 1764 if (ci->_con == CatchProjNode::fall_through_index) { 1765 // Fall-thru path gets the lion's share. 1766 return 1.0f - PROB_UNLIKELY_MAG(5)*_num_succs; 1767 } else { 1768 // Presume exceptional paths are equally unlikely 1769 return PROB_UNLIKELY_MAG(5); 1770 } 1771 } 1772 1773 case Op_Root: 1774 case Op_Goto: 1775 // Pass frequency straight thru to target 1776 return 1.0f; 1777 1778 case Op_NeverBranch: 1779 return 0.0f; 1780 1781 case Op_TailCall: 1782 case Op_TailJump: 1783 case Op_Return: 1784 case Op_Halt: 1785 case Op_Rethrow: 1786 // Do not push out freq to root block 1787 return 0.0f; 1788 1789 default: 1790 ShouldNotReachHere(); 1791 } 1792 1793 return 0.0f; 1794 } 1795 1796 //------------------------------num_fall_throughs----------------------------- 1797 // Return the number of fall-through candidates for a block 1798 int Block::num_fall_throughs() { 1799 int eidx = end_idx(); 1800 Node *n = get_node(eidx); // Get ending Node 1801 1802 int op = n->Opcode(); 1803 if (n->is_Mach()) { 1804 if (n->is_MachNullCheck()) { 1805 // In theory, either side can fall-thru, for simplicity sake, 1806 // let's say only the false branch can now. 1807 return 1; 1808 } 1809 op = n->as_Mach()->ideal_Opcode(); 1810 } 1811 1812 // Switch on branch type 1813 switch( op ) { 1814 case Op_CountedLoopEnd: 1815 case Op_If: 1816 return 2; 1817 1818 case Op_Root: 1819 case Op_Goto: 1820 return 1; 1821 1822 case Op_Catch: { 1823 for (uint i = 0; i < _num_succs; i++) { 1824 const CatchProjNode *ci = get_node(i + eidx + 1)->as_CatchProj(); 1825 if (ci->_con == CatchProjNode::fall_through_index) { 1826 return 1; 1827 } 1828 } 1829 return 0; 1830 } 1831 1832 case Op_Jump: 1833 case Op_NeverBranch: 1834 case Op_TailCall: 1835 case Op_TailJump: 1836 case Op_Return: 1837 case Op_Halt: 1838 case Op_Rethrow: 1839 return 0; 1840 1841 default: 1842 ShouldNotReachHere(); 1843 } 1844 1845 return 0; 1846 } 1847 1848 //------------------------------succ_fall_through----------------------------- 1849 // Return true if a specific successor could be fall-through target. 1850 bool Block::succ_fall_through(uint i) { 1851 int eidx = end_idx(); 1852 Node *n = get_node(eidx); // Get ending Node 1853 1854 int op = n->Opcode(); 1855 if (n->is_Mach()) { 1856 if (n->is_MachNullCheck()) { 1857 // In theory, either side can fall-thru, for simplicity sake, 1858 // let's say only the false branch can now. 1859 return get_node(i + eidx + 1)->Opcode() == Op_IfFalse; 1860 } 1861 op = n->as_Mach()->ideal_Opcode(); 1862 } 1863 1864 // Switch on branch type 1865 switch( op ) { 1866 case Op_CountedLoopEnd: 1867 case Op_If: 1868 case Op_Root: 1869 case Op_Goto: 1870 return true; 1871 1872 case Op_Catch: { 1873 const CatchProjNode *ci = get_node(i + eidx + 1)->as_CatchProj(); 1874 return ci->_con == CatchProjNode::fall_through_index; 1875 } 1876 1877 case Op_Jump: 1878 case Op_NeverBranch: 1879 case Op_TailCall: 1880 case Op_TailJump: 1881 case Op_Return: 1882 case Op_Halt: 1883 case Op_Rethrow: 1884 return false; 1885 1886 default: 1887 ShouldNotReachHere(); 1888 } 1889 1890 return false; 1891 } 1892 1893 //------------------------------update_uncommon_branch------------------------ 1894 // Update the probability of a two-branch to be uncommon 1895 void Block::update_uncommon_branch(Block* ub) { 1896 int eidx = end_idx(); 1897 Node *n = get_node(eidx); // Get ending Node 1898 1899 int op = n->as_Mach()->ideal_Opcode(); 1900 1901 assert(op == Op_CountedLoopEnd || op == Op_If, "must be a If"); 1902 assert(num_fall_throughs() == 2, "must be a two way branch block"); 1903 1904 // Which successor is ub? 1905 uint s; 1906 for (s = 0; s <_num_succs; s++) { 1907 if (_succs[s] == ub) break; 1908 } 1909 assert(s < 2, "uncommon successor must be found"); 1910 1911 // If ub is the true path, make the proability small, else 1912 // ub is the false path, and make the probability large 1913 bool invert = (get_node(s + eidx + 1)->Opcode() == Op_IfFalse); 1914 1915 // Get existing probability 1916 float p = n->as_MachIf()->_prob; 1917 1918 if (invert) p = 1.0 - p; 1919 if (p > PROB_MIN) { 1920 p = PROB_MIN; 1921 } 1922 if (invert) p = 1.0 - p; 1923 1924 n->as_MachIf()->_prob = p; 1925 } 1926 1927 //------------------------------update_succ_freq------------------------------- 1928 // Update the appropriate frequency associated with block 'b', a successor of 1929 // a block in this loop. 1930 void CFGLoop::update_succ_freq(Block* b, float freq) { 1931 if (b->_loop == this) { 1932 if (b == head()) { 1933 // back branch within the loop 1934 // Do nothing now, the loop carried frequency will be 1935 // adjust later in scale_freq(). 1936 } else { 1937 // simple branch within the loop 1938 b->_freq += freq; 1939 } 1940 } else if (!in_loop_nest(b)) { 1941 // branch is exit from this loop 1942 BlockProbPair bpp(b, freq); 1943 _exits.append(bpp); 1944 } else { 1945 // branch into nested loop 1946 CFGLoop* ch = b->_loop; 1947 ch->_freq += freq; 1948 } 1949 } 1950 1951 //------------------------------in_loop_nest----------------------------------- 1952 // Determine if block b is in the receiver's loop nest. 1953 bool CFGLoop::in_loop_nest(Block* b) { 1954 int depth = _depth; 1955 CFGLoop* b_loop = b->_loop; 1956 int b_depth = b_loop->_depth; 1957 if (depth == b_depth) { 1958 return true; 1959 } 1960 while (b_depth > depth) { 1961 b_loop = b_loop->_parent; 1962 b_depth = b_loop->_depth; 1963 } 1964 return b_loop == this; 1965 } 1966 1967 //------------------------------scale_freq------------------------------------- 1968 // Scale frequency of loops and blocks by trip counts from outer loops 1969 // Do a top down traversal of loop tree (visit outer loops first.) 1970 void CFGLoop::scale_freq() { 1971 float loop_freq = _freq * trip_count(); 1972 _freq = loop_freq; 1973 for (int i = 0; i < _members.length(); i++) { 1974 CFGElement* s = _members.at(i); 1975 float block_freq = s->_freq * loop_freq; 1976 if (g_isnan(block_freq) || block_freq < MIN_BLOCK_FREQUENCY) 1977 block_freq = MIN_BLOCK_FREQUENCY; 1978 s->_freq = block_freq; 1979 } 1980 CFGLoop* ch = _child; 1981 while (ch != NULL) { 1982 ch->scale_freq(); 1983 ch = ch->_sibling; 1984 } 1985 } 1986 1987 // Frequency of outer loop 1988 float CFGLoop::outer_loop_freq() const { 1989 if (_child != NULL) { 1990 return _child->_freq; 1991 } 1992 return _freq; 1993 } 1994 1995 #ifndef PRODUCT 1996 //------------------------------dump_tree-------------------------------------- 1997 void CFGLoop::dump_tree() const { 1998 dump(); 1999 if (_child != NULL) _child->dump_tree(); 2000 if (_sibling != NULL) _sibling->dump_tree(); 2001 } 2002 2003 //------------------------------dump------------------------------------------- 2004 void CFGLoop::dump() const { 2005 for (int i = 0; i < _depth; i++) tty->print(" "); 2006 tty->print("%s: %d trip_count: %6.0f freq: %6.0f\n", 2007 _depth == 0 ? "Method" : "Loop", _id, trip_count(), _freq); 2008 for (int i = 0; i < _depth; i++) tty->print(" "); 2009 tty->print(" members:"); 2010 int k = 0; 2011 for (int i = 0; i < _members.length(); i++) { 2012 if (k++ >= 6) { 2013 tty->print("\n "); 2014 for (int j = 0; j < _depth+1; j++) tty->print(" "); 2015 k = 0; 2016 } 2017 CFGElement *s = _members.at(i); 2018 if (s->is_block()) { 2019 Block *b = s->as_Block(); 2020 tty->print(" B%d(%6.3f)", b->_pre_order, b->_freq); 2021 } else { 2022 CFGLoop* lp = s->as_CFGLoop(); 2023 tty->print(" L%d(%6.3f)", lp->_id, lp->_freq); 2024 } 2025 } 2026 tty->print("\n"); 2027 for (int i = 0; i < _depth; i++) tty->print(" "); 2028 tty->print(" exits: "); 2029 k = 0; 2030 for (int i = 0; i < _exits.length(); i++) { 2031 if (k++ >= 7) { 2032 tty->print("\n "); 2033 for (int j = 0; j < _depth+1; j++) tty->print(" "); 2034 k = 0; 2035 } 2036 Block *blk = _exits.at(i).get_target(); 2037 float prob = _exits.at(i).get_prob(); 2038 tty->print(" ->%d@%d%%", blk->_pre_order, (int)(prob*100)); 2039 } 2040 tty->print("\n"); 2041 } 2042 #endif