1 /* 2 * Copyright (c) 2005, 2020, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "ci/bcEscapeAnalyzer.hpp" 27 #include "compiler/compileLog.hpp" 28 #include "gc/shared/barrierSet.hpp" 29 #include "gc/shared/c2/barrierSetC2.hpp" 30 #include "libadt/vectset.hpp" 31 #include "memory/allocation.hpp" 32 #include "memory/resourceArea.hpp" 33 #include "opto/c2compiler.hpp" 34 #include "opto/arraycopynode.hpp" 35 #include "opto/callnode.hpp" 36 #include "opto/cfgnode.hpp" 37 #include "opto/compile.hpp" 38 #include "opto/escape.hpp" 39 #include "opto/phaseX.hpp" 40 #include "opto/movenode.hpp" 41 #include "opto/rootnode.hpp" 42 #include "utilities/macros.hpp" 43 44 ConnectionGraph::ConnectionGraph(Compile * C, PhaseIterGVN *igvn) : 45 _nodes(C->comp_arena(), C->unique(), C->unique(), NULL), 46 _in_worklist(C->comp_arena()), 47 _next_pidx(0), 48 _collecting(true), 49 _verify(false), 50 _compile(C), 51 _igvn(igvn), 52 _node_map(C->comp_arena()) { 53 // Add unknown java object. 54 add_java_object(C->top(), PointsToNode::GlobalEscape); 55 phantom_obj = ptnode_adr(C->top()->_idx)->as_JavaObject(); 56 // Add ConP(#NULL) and ConN(#NULL) nodes. 57 Node* oop_null = igvn->zerocon(T_OBJECT); 58 assert(oop_null->_idx < nodes_size(), "should be created already"); 59 add_java_object(oop_null, PointsToNode::NoEscape); 60 null_obj = ptnode_adr(oop_null->_idx)->as_JavaObject(); 61 if (UseCompressedOops) { 62 Node* noop_null = igvn->zerocon(T_NARROWOOP); 63 assert(noop_null->_idx < nodes_size(), "should be created already"); 64 map_ideal_node(noop_null, null_obj); 65 } 66 _pcmp_neq = NULL; // Should be initialized 67 _pcmp_eq = NULL; 68 } 69 70 bool ConnectionGraph::has_candidates(Compile *C) { 71 // EA brings benefits only when the code has allocations and/or locks which 72 // are represented by ideal Macro nodes. 73 int cnt = C->macro_count(); 74 for (int i = 0; i < cnt; i++) { 75 Node *n = C->macro_node(i); 76 if (n->is_Allocate()) 77 return true; 78 if (n->is_Lock()) { 79 Node* obj = n->as_Lock()->obj_node()->uncast(); 80 if (!(obj->is_Parm() || obj->is_Con())) 81 return true; 82 } 83 if (n->is_CallStaticJava() && 84 n->as_CallStaticJava()->is_boxing_method()) { 85 return true; 86 } 87 } 88 return false; 89 } 90 91 void ConnectionGraph::do_analysis(Compile *C, PhaseIterGVN *igvn) { 92 Compile::TracePhase tp("escapeAnalysis", &Phase::timers[Phase::_t_escapeAnalysis]); 93 ResourceMark rm; 94 95 // Add ConP#NULL and ConN#NULL nodes before ConnectionGraph construction 96 // to create space for them in ConnectionGraph::_nodes[]. 97 Node* oop_null = igvn->zerocon(T_OBJECT); 98 Node* noop_null = igvn->zerocon(T_NARROWOOP); 99 ConnectionGraph* congraph = new(C->comp_arena()) ConnectionGraph(C, igvn); 100 // Perform escape analysis 101 if (congraph->compute_escape()) { 102 // There are non escaping objects. 103 C->set_congraph(congraph); 104 } 105 // Cleanup. 106 if (oop_null->outcnt() == 0) 107 igvn->hash_delete(oop_null); 108 if (noop_null->outcnt() == 0) 109 igvn->hash_delete(noop_null); 110 } 111 112 bool ConnectionGraph::compute_escape() { 113 Compile* C = _compile; 114 PhaseGVN* igvn = _igvn; 115 116 // Worklists used by EA. 117 Unique_Node_List delayed_worklist; 118 GrowableArray<Node*> alloc_worklist; 119 GrowableArray<Node*> ptr_cmp_worklist; 120 GrowableArray<Node*> storestore_worklist; 121 GrowableArray<ArrayCopyNode*> arraycopy_worklist; 122 GrowableArray<PointsToNode*> ptnodes_worklist; 123 GrowableArray<JavaObjectNode*> java_objects_worklist; 124 GrowableArray<JavaObjectNode*> non_escaped_worklist; 125 GrowableArray<FieldNode*> oop_fields_worklist; 126 DEBUG_ONLY( GrowableArray<Node*> addp_worklist; ) 127 128 { Compile::TracePhase tp("connectionGraph", &Phase::timers[Phase::_t_connectionGraph]); 129 130 // 1. Populate Connection Graph (CG) with PointsTo nodes. 131 ideal_nodes.map(C->live_nodes(), NULL); // preallocate space 132 // Initialize worklist 133 if (C->root() != NULL) { 134 ideal_nodes.push(C->root()); 135 } 136 // Processed ideal nodes are unique on ideal_nodes list 137 // but several ideal nodes are mapped to the phantom_obj. 138 // To avoid duplicated entries on the following worklists 139 // add the phantom_obj only once to them. 140 ptnodes_worklist.append(phantom_obj); 141 java_objects_worklist.append(phantom_obj); 142 for( uint next = 0; next < ideal_nodes.size(); ++next ) { 143 Node* n = ideal_nodes.at(next); 144 // Create PointsTo nodes and add them to Connection Graph. Called 145 // only once per ideal node since ideal_nodes is Unique_Node list. 146 add_node_to_connection_graph(n, &delayed_worklist); 147 PointsToNode* ptn = ptnode_adr(n->_idx); 148 if (ptn != NULL && ptn != phantom_obj) { 149 ptnodes_worklist.append(ptn); 150 if (ptn->is_JavaObject()) { 151 java_objects_worklist.append(ptn->as_JavaObject()); 152 if ((n->is_Allocate() || n->is_CallStaticJava()) && 153 (ptn->escape_state() < PointsToNode::GlobalEscape)) { 154 // Only allocations and java static calls results are interesting. 155 non_escaped_worklist.append(ptn->as_JavaObject()); 156 } 157 } else if (ptn->is_Field() && ptn->as_Field()->is_oop()) { 158 oop_fields_worklist.append(ptn->as_Field()); 159 } 160 } 161 if (n->is_MergeMem()) { 162 // Collect all MergeMem nodes to add memory slices for 163 // scalar replaceable objects in split_unique_types(). 164 _mergemem_worklist.append(n->as_MergeMem()); 165 } else if (OptimizePtrCompare && n->is_Cmp() && 166 (n->Opcode() == Op_CmpP || n->Opcode() == Op_CmpN)) { 167 // Collect compare pointers nodes. 168 ptr_cmp_worklist.append(n); 169 } else if (n->is_MemBarStoreStore()) { 170 // Collect all MemBarStoreStore nodes so that depending on the 171 // escape status of the associated Allocate node some of them 172 // may be eliminated. 173 storestore_worklist.append(n); 174 } else if (n->is_MemBar() && (n->Opcode() == Op_MemBarRelease) && 175 (n->req() > MemBarNode::Precedent)) { 176 record_for_optimizer(n); 177 #ifdef ASSERT 178 } else if (n->is_AddP()) { 179 // Collect address nodes for graph verification. 180 addp_worklist.append(n); 181 #endif 182 } else if (n->is_ArrayCopy()) { 183 // Keep a list of ArrayCopy nodes so if one of its input is non 184 // escaping, we can record a unique type 185 arraycopy_worklist.append(n->as_ArrayCopy()); 186 } 187 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 188 Node* m = n->fast_out(i); // Get user 189 ideal_nodes.push(m); 190 } 191 } 192 if (non_escaped_worklist.length() == 0) { 193 _collecting = false; 194 return false; // Nothing to do. 195 } 196 // Add final simple edges to graph. 197 while(delayed_worklist.size() > 0) { 198 Node* n = delayed_worklist.pop(); 199 add_final_edges(n); 200 } 201 int ptnodes_length = ptnodes_worklist.length(); 202 203 #ifdef ASSERT 204 if (VerifyConnectionGraph) { 205 // Verify that no new simple edges could be created and all 206 // local vars has edges. 207 _verify = true; 208 for (int next = 0; next < ptnodes_length; ++next) { 209 PointsToNode* ptn = ptnodes_worklist.at(next); 210 add_final_edges(ptn->ideal_node()); 211 if (ptn->is_LocalVar() && ptn->edge_count() == 0) { 212 ptn->dump(); 213 assert(ptn->as_LocalVar()->edge_count() > 0, "sanity"); 214 } 215 } 216 _verify = false; 217 } 218 #endif 219 // Bytecode analyzer BCEscapeAnalyzer, used for Call nodes 220 // processing, calls to CI to resolve symbols (types, fields, methods) 221 // referenced in bytecode. During symbol resolution VM may throw 222 // an exception which CI cleans and converts to compilation failure. 223 if (C->failing()) return false; 224 225 // 2. Finish Graph construction by propagating references to all 226 // java objects through graph. 227 if (!complete_connection_graph(ptnodes_worklist, non_escaped_worklist, 228 java_objects_worklist, oop_fields_worklist)) { 229 // All objects escaped or hit time or iterations limits. 230 _collecting = false; 231 return false; 232 } 233 234 // 3. Adjust scalar_replaceable state of nonescaping objects and push 235 // scalar replaceable allocations on alloc_worklist for processing 236 // in split_unique_types(). 237 int non_escaped_length = non_escaped_worklist.length(); 238 for (int next = 0; next < non_escaped_length; next++) { 239 JavaObjectNode* ptn = non_escaped_worklist.at(next); 240 bool noescape = (ptn->escape_state() == PointsToNode::NoEscape); 241 Node* n = ptn->ideal_node(); 242 if (n->is_Allocate()) { 243 n->as_Allocate()->_is_non_escaping = noescape; 244 } 245 if (n->is_CallStaticJava()) { 246 n->as_CallStaticJava()->_is_non_escaping = noescape; 247 } 248 if (noescape && ptn->scalar_replaceable()) { 249 adjust_scalar_replaceable_state(ptn); 250 if (ptn->scalar_replaceable()) { 251 alloc_worklist.append(ptn->ideal_node()); 252 } 253 } 254 } 255 256 #ifdef ASSERT 257 if (VerifyConnectionGraph) { 258 // Verify that graph is complete - no new edges could be added or needed. 259 verify_connection_graph(ptnodes_worklist, non_escaped_worklist, 260 java_objects_worklist, addp_worklist); 261 } 262 assert(C->unique() == nodes_size(), "no new ideal nodes should be added during ConnectionGraph build"); 263 assert(null_obj->escape_state() == PointsToNode::NoEscape && 264 null_obj->edge_count() == 0 && 265 !null_obj->arraycopy_src() && 266 !null_obj->arraycopy_dst(), "sanity"); 267 #endif 268 269 _collecting = false; 270 271 } // TracePhase t3("connectionGraph") 272 273 // 4. Optimize ideal graph based on EA information. 274 bool has_non_escaping_obj = (non_escaped_worklist.length() > 0); 275 if (has_non_escaping_obj) { 276 optimize_ideal_graph(ptr_cmp_worklist, storestore_worklist); 277 } 278 279 #ifndef PRODUCT 280 if (PrintEscapeAnalysis) { 281 dump(ptnodes_worklist); // Dump ConnectionGraph 282 } 283 #endif 284 285 bool has_scalar_replaceable_candidates = (alloc_worklist.length() > 0); 286 #ifdef ASSERT 287 if (VerifyConnectionGraph) { 288 int alloc_length = alloc_worklist.length(); 289 for (int next = 0; next < alloc_length; ++next) { 290 Node* n = alloc_worklist.at(next); 291 PointsToNode* ptn = ptnode_adr(n->_idx); 292 assert(ptn->escape_state() == PointsToNode::NoEscape && ptn->scalar_replaceable(), "sanity"); 293 } 294 } 295 #endif 296 297 // 5. Separate memory graph for scalar replaceable allcations. 298 if (has_scalar_replaceable_candidates && 299 C->AliasLevel() >= 3 && EliminateAllocations) { 300 // Now use the escape information to create unique types for 301 // scalar replaceable objects. 302 split_unique_types(alloc_worklist, arraycopy_worklist); 303 if (C->failing()) return false; 304 C->print_method(PHASE_AFTER_EA, 2); 305 306 #ifdef ASSERT 307 } else if (Verbose && (PrintEscapeAnalysis || PrintEliminateAllocations)) { 308 tty->print("=== No allocations eliminated for "); 309 C->method()->print_short_name(); 310 if(!EliminateAllocations) { 311 tty->print(" since EliminateAllocations is off ==="); 312 } else if(!has_scalar_replaceable_candidates) { 313 tty->print(" since there are no scalar replaceable candidates ==="); 314 } else if(C->AliasLevel() < 3) { 315 tty->print(" since AliasLevel < 3 ==="); 316 } 317 tty->cr(); 318 #endif 319 } 320 return has_non_escaping_obj; 321 } 322 323 // Utility function for nodes that load an object 324 void ConnectionGraph::add_objload_to_connection_graph(Node *n, Unique_Node_List *delayed_worklist) { 325 // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because 326 // ThreadLocal has RawPtr type. 327 const Type* t = _igvn->type(n); 328 if (t->make_ptr() != NULL) { 329 Node* adr = n->in(MemNode::Address); 330 #ifdef ASSERT 331 if (!adr->is_AddP()) { 332 assert(_igvn->type(adr)->isa_rawptr(), "sanity"); 333 } else { 334 assert((ptnode_adr(adr->_idx) == NULL || 335 ptnode_adr(adr->_idx)->as_Field()->is_oop()), "sanity"); 336 } 337 #endif 338 add_local_var_and_edge(n, PointsToNode::NoEscape, 339 adr, delayed_worklist); 340 } 341 } 342 343 // Populate Connection Graph with PointsTo nodes and create simple 344 // connection graph edges. 345 void ConnectionGraph::add_node_to_connection_graph(Node *n, Unique_Node_List *delayed_worklist) { 346 assert(!_verify, "this method should not be called for verification"); 347 PhaseGVN* igvn = _igvn; 348 uint n_idx = n->_idx; 349 PointsToNode* n_ptn = ptnode_adr(n_idx); 350 if (n_ptn != NULL) 351 return; // No need to redefine PointsTo node during first iteration. 352 353 int opcode = n->Opcode(); 354 bool gc_handled = BarrierSet::barrier_set()->barrier_set_c2()->escape_add_to_con_graph(this, igvn, delayed_worklist, n, opcode); 355 if (gc_handled) { 356 return; // Ignore node if already handled by GC. 357 } 358 359 if (n->is_Call()) { 360 // Arguments to allocation and locking don't escape. 361 if (n->is_AbstractLock()) { 362 // Put Lock and Unlock nodes on IGVN worklist to process them during 363 // first IGVN optimization when escape information is still available. 364 record_for_optimizer(n); 365 } else if (n->is_Allocate()) { 366 add_call_node(n->as_Call()); 367 record_for_optimizer(n); 368 } else { 369 if (n->is_CallStaticJava()) { 370 const char* name = n->as_CallStaticJava()->_name; 371 if (name != NULL && strcmp(name, "uncommon_trap") == 0) 372 return; // Skip uncommon traps 373 } 374 // Don't mark as processed since call's arguments have to be processed. 375 delayed_worklist->push(n); 376 // Check if a call returns an object. 377 if ((n->as_Call()->returns_pointer() && 378 n->as_Call()->proj_out_or_null(TypeFunc::Parms) != NULL) || 379 (n->is_CallStaticJava() && 380 n->as_CallStaticJava()->is_boxing_method())) { 381 add_call_node(n->as_Call()); 382 } 383 } 384 return; 385 } 386 // Put this check here to process call arguments since some call nodes 387 // point to phantom_obj. 388 if (n_ptn == phantom_obj || n_ptn == null_obj) 389 return; // Skip predefined nodes. 390 391 switch (opcode) { 392 case Op_AddP: { 393 Node* base = get_addp_base(n); 394 PointsToNode* ptn_base = ptnode_adr(base->_idx); 395 // Field nodes are created for all field types. They are used in 396 // adjust_scalar_replaceable_state() and split_unique_types(). 397 // Note, non-oop fields will have only base edges in Connection 398 // Graph because such fields are not used for oop loads and stores. 399 int offset = address_offset(n, igvn); 400 add_field(n, PointsToNode::NoEscape, offset); 401 if (ptn_base == NULL) { 402 delayed_worklist->push(n); // Process it later. 403 } else { 404 n_ptn = ptnode_adr(n_idx); 405 add_base(n_ptn->as_Field(), ptn_base); 406 } 407 break; 408 } 409 case Op_CastX2P: { 410 map_ideal_node(n, phantom_obj); 411 break; 412 } 413 case Op_CastPP: 414 case Op_CheckCastPP: 415 case Op_EncodeP: 416 case Op_DecodeN: 417 case Op_EncodePKlass: 418 case Op_DecodeNKlass: { 419 add_local_var_and_edge(n, PointsToNode::NoEscape, 420 n->in(1), delayed_worklist); 421 break; 422 } 423 case Op_CMoveP: { 424 add_local_var(n, PointsToNode::NoEscape); 425 // Do not add edges during first iteration because some could be 426 // not defined yet. 427 delayed_worklist->push(n); 428 break; 429 } 430 case Op_ConP: 431 case Op_ConN: 432 case Op_ConNKlass: { 433 // assume all oop constants globally escape except for null 434 PointsToNode::EscapeState es; 435 const Type* t = igvn->type(n); 436 if (t == TypePtr::NULL_PTR || t == TypeNarrowOop::NULL_PTR) { 437 es = PointsToNode::NoEscape; 438 } else { 439 es = PointsToNode::GlobalEscape; 440 } 441 add_java_object(n, es); 442 break; 443 } 444 case Op_CreateEx: { 445 // assume that all exception objects globally escape 446 map_ideal_node(n, phantom_obj); 447 break; 448 } 449 case Op_LoadKlass: 450 case Op_LoadNKlass: { 451 // Unknown class is loaded 452 map_ideal_node(n, phantom_obj); 453 break; 454 } 455 case Op_LoadP: 456 case Op_LoadN: 457 case Op_LoadPLocked: { 458 add_objload_to_connection_graph(n, delayed_worklist); 459 break; 460 } 461 case Op_Parm: { 462 map_ideal_node(n, phantom_obj); 463 break; 464 } 465 case Op_PartialSubtypeCheck: { 466 // Produces Null or notNull and is used in only in CmpP so 467 // phantom_obj could be used. 468 map_ideal_node(n, phantom_obj); // Result is unknown 469 break; 470 } 471 case Op_Phi: { 472 // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because 473 // ThreadLocal has RawPtr type. 474 const Type* t = n->as_Phi()->type(); 475 if (t->make_ptr() != NULL) { 476 add_local_var(n, PointsToNode::NoEscape); 477 // Do not add edges during first iteration because some could be 478 // not defined yet. 479 delayed_worklist->push(n); 480 } 481 break; 482 } 483 case Op_Proj: { 484 // we are only interested in the oop result projection from a call 485 if (n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() && 486 n->in(0)->as_Call()->returns_pointer()) { 487 add_local_var_and_edge(n, PointsToNode::NoEscape, 488 n->in(0), delayed_worklist); 489 } 490 break; 491 } 492 case Op_Rethrow: // Exception object escapes 493 case Op_Return: { 494 if (n->req() > TypeFunc::Parms && 495 igvn->type(n->in(TypeFunc::Parms))->isa_oopptr()) { 496 // Treat Return value as LocalVar with GlobalEscape escape state. 497 add_local_var_and_edge(n, PointsToNode::GlobalEscape, 498 n->in(TypeFunc::Parms), delayed_worklist); 499 } 500 break; 501 } 502 case Op_CompareAndExchangeP: 503 case Op_CompareAndExchangeN: 504 case Op_GetAndSetP: 505 case Op_GetAndSetN: { 506 add_objload_to_connection_graph(n, delayed_worklist); 507 // fallthrough 508 } 509 case Op_StoreP: 510 case Op_StoreN: 511 case Op_StoreNKlass: 512 case Op_StorePConditional: 513 case Op_WeakCompareAndSwapP: 514 case Op_WeakCompareAndSwapN: 515 case Op_CompareAndSwapP: 516 case Op_CompareAndSwapN: { 517 add_to_congraph_unsafe_access(n, opcode, delayed_worklist); 518 break; 519 } 520 case Op_AryEq: 521 case Op_HasNegatives: 522 case Op_StrComp: 523 case Op_StrEquals: 524 case Op_StrIndexOf: 525 case Op_StrIndexOfChar: 526 case Op_StrInflatedCopy: 527 case Op_StrCompressedCopy: 528 case Op_EncodeISOArray: { 529 add_local_var(n, PointsToNode::ArgEscape); 530 delayed_worklist->push(n); // Process it later. 531 break; 532 } 533 case Op_ThreadLocal: { 534 add_java_object(n, PointsToNode::ArgEscape); 535 break; 536 } 537 default: 538 ; // Do nothing for nodes not related to EA. 539 } 540 return; 541 } 542 543 #ifdef ASSERT 544 #define ELSE_FAIL(name) \ 545 /* Should not be called for not pointer type. */ \ 546 n->dump(1); \ 547 assert(false, name); \ 548 break; 549 #else 550 #define ELSE_FAIL(name) \ 551 break; 552 #endif 553 554 // Add final simple edges to graph. 555 void ConnectionGraph::add_final_edges(Node *n) { 556 PointsToNode* n_ptn = ptnode_adr(n->_idx); 557 #ifdef ASSERT 558 if (_verify && n_ptn->is_JavaObject()) 559 return; // This method does not change graph for JavaObject. 560 #endif 561 562 if (n->is_Call()) { 563 process_call_arguments(n->as_Call()); 564 return; 565 } 566 assert(n->is_Store() || n->is_LoadStore() || 567 (n_ptn != NULL) && (n_ptn->ideal_node() != NULL), 568 "node should be registered already"); 569 int opcode = n->Opcode(); 570 bool gc_handled = BarrierSet::barrier_set()->barrier_set_c2()->escape_add_final_edges(this, _igvn, n, opcode); 571 if (gc_handled) { 572 return; // Ignore node if already handled by GC. 573 } 574 switch (opcode) { 575 case Op_AddP: { 576 Node* base = get_addp_base(n); 577 PointsToNode* ptn_base = ptnode_adr(base->_idx); 578 assert(ptn_base != NULL, "field's base should be registered"); 579 add_base(n_ptn->as_Field(), ptn_base); 580 break; 581 } 582 case Op_CastPP: 583 case Op_CheckCastPP: 584 case Op_EncodeP: 585 case Op_DecodeN: 586 case Op_EncodePKlass: 587 case Op_DecodeNKlass: { 588 add_local_var_and_edge(n, PointsToNode::NoEscape, 589 n->in(1), NULL); 590 break; 591 } 592 case Op_CMoveP: { 593 for (uint i = CMoveNode::IfFalse; i < n->req(); i++) { 594 Node* in = n->in(i); 595 if (in == NULL) 596 continue; // ignore NULL 597 Node* uncast_in = in->uncast(); 598 if (uncast_in->is_top() || uncast_in == n) 599 continue; // ignore top or inputs which go back this node 600 PointsToNode* ptn = ptnode_adr(in->_idx); 601 assert(ptn != NULL, "node should be registered"); 602 add_edge(n_ptn, ptn); 603 } 604 break; 605 } 606 case Op_LoadP: 607 case Op_LoadN: 608 case Op_LoadPLocked: { 609 // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because 610 // ThreadLocal has RawPtr type. 611 const Type* t = _igvn->type(n); 612 if (t->make_ptr() != NULL) { 613 Node* adr = n->in(MemNode::Address); 614 add_local_var_and_edge(n, PointsToNode::NoEscape, adr, NULL); 615 break; 616 } 617 ELSE_FAIL("Op_LoadP"); 618 } 619 case Op_Phi: { 620 // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because 621 // ThreadLocal has RawPtr type. 622 const Type* t = n->as_Phi()->type(); 623 if (t->make_ptr() != NULL) { 624 for (uint i = 1; i < n->req(); i++) { 625 Node* in = n->in(i); 626 if (in == NULL) 627 continue; // ignore NULL 628 Node* uncast_in = in->uncast(); 629 if (uncast_in->is_top() || uncast_in == n) 630 continue; // ignore top or inputs which go back this node 631 PointsToNode* ptn = ptnode_adr(in->_idx); 632 assert(ptn != NULL, "node should be registered"); 633 add_edge(n_ptn, ptn); 634 } 635 break; 636 } 637 ELSE_FAIL("Op_Phi"); 638 } 639 case Op_Proj: { 640 // we are only interested in the oop result projection from a call 641 if (n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() && 642 n->in(0)->as_Call()->returns_pointer()) { 643 add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0), NULL); 644 break; 645 } 646 ELSE_FAIL("Op_Proj"); 647 } 648 case Op_Rethrow: // Exception object escapes 649 case Op_Return: { 650 if (n->req() > TypeFunc::Parms && 651 _igvn->type(n->in(TypeFunc::Parms))->isa_oopptr()) { 652 // Treat Return value as LocalVar with GlobalEscape escape state. 653 add_local_var_and_edge(n, PointsToNode::GlobalEscape, 654 n->in(TypeFunc::Parms), NULL); 655 break; 656 } 657 ELSE_FAIL("Op_Return"); 658 } 659 case Op_StoreP: 660 case Op_StoreN: 661 case Op_StoreNKlass: 662 case Op_StorePConditional: 663 case Op_CompareAndExchangeP: 664 case Op_CompareAndExchangeN: 665 case Op_CompareAndSwapP: 666 case Op_CompareAndSwapN: 667 case Op_WeakCompareAndSwapP: 668 case Op_WeakCompareAndSwapN: 669 case Op_GetAndSetP: 670 case Op_GetAndSetN: { 671 if (add_final_edges_unsafe_access(n, opcode)) { 672 break; 673 } 674 ELSE_FAIL("Op_StoreP"); 675 } 676 case Op_AryEq: 677 case Op_HasNegatives: 678 case Op_StrComp: 679 case Op_StrEquals: 680 case Op_StrIndexOf: 681 case Op_StrIndexOfChar: 682 case Op_StrInflatedCopy: 683 case Op_StrCompressedCopy: 684 case Op_EncodeISOArray: { 685 // char[]/byte[] arrays passed to string intrinsic do not escape but 686 // they are not scalar replaceable. Adjust escape state for them. 687 // Start from in(2) edge since in(1) is memory edge. 688 for (uint i = 2; i < n->req(); i++) { 689 Node* adr = n->in(i); 690 const Type* at = _igvn->type(adr); 691 if (!adr->is_top() && at->isa_ptr()) { 692 assert(at == Type::TOP || at == TypePtr::NULL_PTR || 693 at->isa_ptr() != NULL, "expecting a pointer"); 694 if (adr->is_AddP()) { 695 adr = get_addp_base(adr); 696 } 697 PointsToNode* ptn = ptnode_adr(adr->_idx); 698 assert(ptn != NULL, "node should be registered"); 699 add_edge(n_ptn, ptn); 700 } 701 } 702 break; 703 } 704 default: { 705 // This method should be called only for EA specific nodes which may 706 // miss some edges when they were created. 707 #ifdef ASSERT 708 n->dump(1); 709 #endif 710 guarantee(false, "unknown node"); 711 } 712 } 713 return; 714 } 715 716 void ConnectionGraph::add_to_congraph_unsafe_access(Node* n, uint opcode, Unique_Node_List* delayed_worklist) { 717 Node* adr = n->in(MemNode::Address); 718 const Type* adr_type = _igvn->type(adr); 719 adr_type = adr_type->make_ptr(); 720 if (adr_type == NULL) { 721 return; // skip dead nodes 722 } 723 if (adr_type->isa_oopptr() 724 || ((opcode == Op_StoreP || opcode == Op_StoreN || opcode == Op_StoreNKlass) 725 && adr_type == TypeRawPtr::NOTNULL 726 && is_captured_store_address(adr))) { 727 delayed_worklist->push(n); // Process it later. 728 #ifdef ASSERT 729 assert (adr->is_AddP(), "expecting an AddP"); 730 if (adr_type == TypeRawPtr::NOTNULL) { 731 // Verify a raw address for a store captured by Initialize node. 732 int offs = (int) _igvn->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot); 733 assert(offs != Type::OffsetBot, "offset must be a constant"); 734 } 735 #endif 736 } else { 737 // Ignore copy the displaced header to the BoxNode (OSR compilation). 738 if (adr->is_BoxLock()) { 739 return; 740 } 741 // Stored value escapes in unsafe access. 742 if ((opcode == Op_StoreP) && adr_type->isa_rawptr()) { 743 delayed_worklist->push(n); // Process unsafe access later. 744 return; 745 } 746 #ifdef ASSERT 747 n->dump(1); 748 assert(false, "not unsafe"); 749 #endif 750 } 751 } 752 753 bool ConnectionGraph::add_final_edges_unsafe_access(Node* n, uint opcode) { 754 Node* adr = n->in(MemNode::Address); 755 const Type *adr_type = _igvn->type(adr); 756 adr_type = adr_type->make_ptr(); 757 #ifdef ASSERT 758 if (adr_type == NULL) { 759 n->dump(1); 760 assert(adr_type != NULL, "dead node should not be on list"); 761 return true; 762 } 763 #endif 764 765 if (opcode == Op_GetAndSetP || opcode == Op_GetAndSetN || 766 opcode == Op_CompareAndExchangeN || opcode == Op_CompareAndExchangeP) { 767 add_local_var_and_edge(n, PointsToNode::NoEscape, adr, NULL); 768 } 769 770 if (adr_type->isa_oopptr() 771 || ((opcode == Op_StoreP || opcode == Op_StoreN || opcode == Op_StoreNKlass) 772 && adr_type == TypeRawPtr::NOTNULL 773 && is_captured_store_address(adr))) { 774 // Point Address to Value 775 PointsToNode* adr_ptn = ptnode_adr(adr->_idx); 776 assert(adr_ptn != NULL && 777 adr_ptn->as_Field()->is_oop(), "node should be registered"); 778 Node* val = n->in(MemNode::ValueIn); 779 PointsToNode* ptn = ptnode_adr(val->_idx); 780 assert(ptn != NULL, "node should be registered"); 781 add_edge(adr_ptn, ptn); 782 return true; 783 } else if ((opcode == Op_StoreP) && adr_type->isa_rawptr()) { 784 // Stored value escapes in unsafe access. 785 Node* val = n->in(MemNode::ValueIn); 786 PointsToNode* ptn = ptnode_adr(val->_idx); 787 assert(ptn != NULL, "node should be registered"); 788 set_escape_state(ptn, PointsToNode::GlobalEscape); 789 // Add edge to object for unsafe access with offset. 790 PointsToNode* adr_ptn = ptnode_adr(adr->_idx); 791 assert(adr_ptn != NULL, "node should be registered"); 792 if (adr_ptn->is_Field()) { 793 assert(adr_ptn->as_Field()->is_oop(), "should be oop field"); 794 add_edge(adr_ptn, ptn); 795 } 796 return true; 797 } 798 return false; 799 } 800 801 void ConnectionGraph::add_call_node(CallNode* call) { 802 assert(call->returns_pointer(), "only for call which returns pointer"); 803 uint call_idx = call->_idx; 804 if (call->is_Allocate()) { 805 Node* k = call->in(AllocateNode::KlassNode); 806 const TypeKlassPtr* kt = k->bottom_type()->isa_klassptr(); 807 assert(kt != NULL, "TypeKlassPtr required."); 808 ciKlass* cik = kt->klass(); 809 PointsToNode::EscapeState es = PointsToNode::NoEscape; 810 bool scalar_replaceable = true; 811 if (call->is_AllocateArray()) { 812 if (!cik->is_array_klass()) { // StressReflectiveCode 813 es = PointsToNode::GlobalEscape; 814 } else { 815 int length = call->in(AllocateNode::ALength)->find_int_con(-1); 816 if (length < 0 || length > EliminateAllocationArraySizeLimit) { 817 // Not scalar replaceable if the length is not constant or too big. 818 scalar_replaceable = false; 819 } 820 } 821 } else { // Allocate instance 822 if (cik->is_subclass_of(_compile->env()->Thread_klass()) || 823 cik->is_subclass_of(_compile->env()->Reference_klass()) || 824 !cik->is_instance_klass() || // StressReflectiveCode 825 !cik->as_instance_klass()->can_be_instantiated() || 826 cik->as_instance_klass()->has_finalizer()) { 827 es = PointsToNode::GlobalEscape; 828 } 829 } 830 add_java_object(call, es); 831 PointsToNode* ptn = ptnode_adr(call_idx); 832 if (!scalar_replaceable && ptn->scalar_replaceable()) { 833 ptn->set_scalar_replaceable(false); 834 } 835 } else if (call->is_CallStaticJava()) { 836 // Call nodes could be different types: 837 // 838 // 1. CallDynamicJavaNode (what happened during call is unknown): 839 // 840 // - mapped to GlobalEscape JavaObject node if oop is returned; 841 // 842 // - all oop arguments are escaping globally; 843 // 844 // 2. CallStaticJavaNode (execute bytecode analysis if possible): 845 // 846 // - the same as CallDynamicJavaNode if can't do bytecode analysis; 847 // 848 // - mapped to GlobalEscape JavaObject node if unknown oop is returned; 849 // - mapped to NoEscape JavaObject node if non-escaping object allocated 850 // during call is returned; 851 // - mapped to ArgEscape LocalVar node pointed to object arguments 852 // which are returned and does not escape during call; 853 // 854 // - oop arguments escaping status is defined by bytecode analysis; 855 // 856 // For a static call, we know exactly what method is being called. 857 // Use bytecode estimator to record whether the call's return value escapes. 858 ciMethod* meth = call->as_CallJava()->method(); 859 if (meth == NULL) { 860 const char* name = call->as_CallStaticJava()->_name; 861 assert(strncmp(name, "_multianewarray", 15) == 0, "TODO: add failed case check"); 862 // Returns a newly allocated unescaped object. 863 add_java_object(call, PointsToNode::NoEscape); 864 ptnode_adr(call_idx)->set_scalar_replaceable(false); 865 } else if (meth->is_boxing_method()) { 866 // Returns boxing object 867 PointsToNode::EscapeState es; 868 vmIntrinsics::ID intr = meth->intrinsic_id(); 869 if (intr == vmIntrinsics::_floatValue || intr == vmIntrinsics::_doubleValue) { 870 // It does not escape if object is always allocated. 871 es = PointsToNode::NoEscape; 872 } else { 873 // It escapes globally if object could be loaded from cache. 874 es = PointsToNode::GlobalEscape; 875 } 876 add_java_object(call, es); 877 } else { 878 BCEscapeAnalyzer* call_analyzer = meth->get_bcea(); 879 call_analyzer->copy_dependencies(_compile->dependencies()); 880 if (call_analyzer->is_return_allocated()) { 881 // Returns a newly allocated unescaped object, simply 882 // update dependency information. 883 // Mark it as NoEscape so that objects referenced by 884 // it's fields will be marked as NoEscape at least. 885 add_java_object(call, PointsToNode::NoEscape); 886 ptnode_adr(call_idx)->set_scalar_replaceable(false); 887 } else { 888 // Determine whether any arguments are returned. 889 const TypeTuple* d = call->tf()->domain(); 890 bool ret_arg = false; 891 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { 892 if (d->field_at(i)->isa_ptr() != NULL && 893 call_analyzer->is_arg_returned(i - TypeFunc::Parms)) { 894 ret_arg = true; 895 break; 896 } 897 } 898 if (ret_arg) { 899 add_local_var(call, PointsToNode::ArgEscape); 900 } else { 901 // Returns unknown object. 902 map_ideal_node(call, phantom_obj); 903 } 904 } 905 } 906 } else { 907 // An other type of call, assume the worst case: 908 // returned value is unknown and globally escapes. 909 assert(call->Opcode() == Op_CallDynamicJava, "add failed case check"); 910 map_ideal_node(call, phantom_obj); 911 } 912 } 913 914 void ConnectionGraph::process_call_arguments(CallNode *call) { 915 bool is_arraycopy = false; 916 switch (call->Opcode()) { 917 #ifdef ASSERT 918 case Op_Allocate: 919 case Op_AllocateArray: 920 case Op_Lock: 921 case Op_Unlock: 922 assert(false, "should be done already"); 923 break; 924 #endif 925 case Op_ArrayCopy: 926 case Op_CallLeafNoFP: 927 // Most array copies are ArrayCopy nodes at this point but there 928 // are still a few direct calls to the copy subroutines (See 929 // PhaseStringOpts::copy_string()) 930 is_arraycopy = (call->Opcode() == Op_ArrayCopy) || 931 call->as_CallLeaf()->is_call_to_arraycopystub(); 932 // fall through 933 case Op_CallLeaf: { 934 // Stub calls, objects do not escape but they are not scale replaceable. 935 // Adjust escape state for outgoing arguments. 936 const TypeTuple * d = call->tf()->domain(); 937 bool src_has_oops = false; 938 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { 939 const Type* at = d->field_at(i); 940 Node *arg = call->in(i); 941 if (arg == NULL) { 942 continue; 943 } 944 const Type *aat = _igvn->type(arg); 945 if (arg->is_top() || !at->isa_ptr() || !aat->isa_ptr()) 946 continue; 947 if (arg->is_AddP()) { 948 // 949 // The inline_native_clone() case when the arraycopy stub is called 950 // after the allocation before Initialize and CheckCastPP nodes. 951 // Or normal arraycopy for object arrays case. 952 // 953 // Set AddP's base (Allocate) as not scalar replaceable since 954 // pointer to the base (with offset) is passed as argument. 955 // 956 arg = get_addp_base(arg); 957 } 958 PointsToNode* arg_ptn = ptnode_adr(arg->_idx); 959 assert(arg_ptn != NULL, "should be registered"); 960 PointsToNode::EscapeState arg_esc = arg_ptn->escape_state(); 961 if (is_arraycopy || arg_esc < PointsToNode::ArgEscape) { 962 assert(aat == Type::TOP || aat == TypePtr::NULL_PTR || 963 aat->isa_ptr() != NULL, "expecting an Ptr"); 964 bool arg_has_oops = aat->isa_oopptr() && 965 (aat->isa_oopptr()->klass() == NULL || aat->isa_instptr() || 966 (aat->isa_aryptr() && aat->isa_aryptr()->klass()->is_obj_array_klass())); 967 if (i == TypeFunc::Parms) { 968 src_has_oops = arg_has_oops; 969 } 970 // 971 // src or dst could be j.l.Object when other is basic type array: 972 // 973 // arraycopy(char[],0,Object*,0,size); 974 // arraycopy(Object*,0,char[],0,size); 975 // 976 // Don't add edges in such cases. 977 // 978 bool arg_is_arraycopy_dest = src_has_oops && is_arraycopy && 979 arg_has_oops && (i > TypeFunc::Parms); 980 #ifdef ASSERT 981 if (!(is_arraycopy || 982 BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(call) || 983 (call->as_CallLeaf()->_name != NULL && 984 (strcmp(call->as_CallLeaf()->_name, "updateBytesCRC32") == 0 || 985 strcmp(call->as_CallLeaf()->_name, "updateBytesCRC32C") == 0 || 986 strcmp(call->as_CallLeaf()->_name, "updateBytesAdler32") == 0 || 987 strcmp(call->as_CallLeaf()->_name, "aescrypt_encryptBlock") == 0 || 988 strcmp(call->as_CallLeaf()->_name, "aescrypt_decryptBlock") == 0 || 989 strcmp(call->as_CallLeaf()->_name, "cipherBlockChaining_encryptAESCrypt") == 0 || 990 strcmp(call->as_CallLeaf()->_name, "cipherBlockChaining_decryptAESCrypt") == 0 || 991 strcmp(call->as_CallLeaf()->_name, "electronicCodeBook_encryptAESCrypt") == 0 || 992 strcmp(call->as_CallLeaf()->_name, "electronicCodeBook_decryptAESCrypt") == 0 || 993 strcmp(call->as_CallLeaf()->_name, "counterMode_AESCrypt") == 0 || 994 strcmp(call->as_CallLeaf()->_name, "ghash_processBlocks") == 0 || 995 strcmp(call->as_CallLeaf()->_name, "encodeBlock") == 0 || 996 strcmp(call->as_CallLeaf()->_name, "md5_implCompress") == 0 || 997 strcmp(call->as_CallLeaf()->_name, "md5_implCompressMB") == 0 || 998 strcmp(call->as_CallLeaf()->_name, "sha1_implCompress") == 0 || 999 strcmp(call->as_CallLeaf()->_name, "sha1_implCompressMB") == 0 || 1000 strcmp(call->as_CallLeaf()->_name, "sha256_implCompress") == 0 || 1001 strcmp(call->as_CallLeaf()->_name, "sha256_implCompressMB") == 0 || 1002 strcmp(call->as_CallLeaf()->_name, "sha512_implCompress") == 0 || 1003 strcmp(call->as_CallLeaf()->_name, "sha512_implCompressMB") == 0 || 1004 strcmp(call->as_CallLeaf()->_name, "multiplyToLen") == 0 || 1005 strcmp(call->as_CallLeaf()->_name, "squareToLen") == 0 || 1006 strcmp(call->as_CallLeaf()->_name, "mulAdd") == 0 || 1007 strcmp(call->as_CallLeaf()->_name, "montgomery_multiply") == 0 || 1008 strcmp(call->as_CallLeaf()->_name, "montgomery_square") == 0 || 1009 strcmp(call->as_CallLeaf()->_name, "bigIntegerRightShiftWorker") == 0 || 1010 strcmp(call->as_CallLeaf()->_name, "bigIntegerLeftShiftWorker") == 0 || 1011 strcmp(call->as_CallLeaf()->_name, "vectorizedMismatch") == 0) 1012 ))) { 1013 call->dump(); 1014 fatal("EA unexpected CallLeaf %s", call->as_CallLeaf()->_name); 1015 } 1016 #endif 1017 // Always process arraycopy's destination object since 1018 // we need to add all possible edges to references in 1019 // source object. 1020 if (arg_esc >= PointsToNode::ArgEscape && 1021 !arg_is_arraycopy_dest) { 1022 continue; 1023 } 1024 PointsToNode::EscapeState es = PointsToNode::ArgEscape; 1025 if (call->is_ArrayCopy()) { 1026 ArrayCopyNode* ac = call->as_ArrayCopy(); 1027 if (ac->is_clonebasic() || 1028 ac->is_arraycopy_validated() || 1029 ac->is_copyof_validated() || 1030 ac->is_copyofrange_validated()) { 1031 es = PointsToNode::NoEscape; 1032 } 1033 } 1034 set_escape_state(arg_ptn, es); 1035 if (arg_is_arraycopy_dest) { 1036 Node* src = call->in(TypeFunc::Parms); 1037 if (src->is_AddP()) { 1038 src = get_addp_base(src); 1039 } 1040 PointsToNode* src_ptn = ptnode_adr(src->_idx); 1041 assert(src_ptn != NULL, "should be registered"); 1042 if (arg_ptn != src_ptn) { 1043 // Special arraycopy edge: 1044 // A destination object's field can't have the source object 1045 // as base since objects escape states are not related. 1046 // Only escape state of destination object's fields affects 1047 // escape state of fields in source object. 1048 add_arraycopy(call, es, src_ptn, arg_ptn); 1049 } 1050 } 1051 } 1052 } 1053 break; 1054 } 1055 case Op_CallStaticJava: { 1056 // For a static call, we know exactly what method is being called. 1057 // Use bytecode estimator to record the call's escape affects 1058 #ifdef ASSERT 1059 const char* name = call->as_CallStaticJava()->_name; 1060 assert((name == NULL || strcmp(name, "uncommon_trap") != 0), "normal calls only"); 1061 #endif 1062 ciMethod* meth = call->as_CallJava()->method(); 1063 if ((meth != NULL) && meth->is_boxing_method()) { 1064 break; // Boxing methods do not modify any oops. 1065 } 1066 BCEscapeAnalyzer* call_analyzer = (meth !=NULL) ? meth->get_bcea() : NULL; 1067 // fall-through if not a Java method or no analyzer information 1068 if (call_analyzer != NULL) { 1069 PointsToNode* call_ptn = ptnode_adr(call->_idx); 1070 const TypeTuple* d = call->tf()->domain(); 1071 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { 1072 const Type* at = d->field_at(i); 1073 int k = i - TypeFunc::Parms; 1074 Node* arg = call->in(i); 1075 PointsToNode* arg_ptn = ptnode_adr(arg->_idx); 1076 if (at->isa_ptr() != NULL && 1077 call_analyzer->is_arg_returned(k)) { 1078 // The call returns arguments. 1079 if (call_ptn != NULL) { // Is call's result used? 1080 assert(call_ptn->is_LocalVar(), "node should be registered"); 1081 assert(arg_ptn != NULL, "node should be registered"); 1082 add_edge(call_ptn, arg_ptn); 1083 } 1084 } 1085 if (at->isa_oopptr() != NULL && 1086 arg_ptn->escape_state() < PointsToNode::GlobalEscape) { 1087 if (!call_analyzer->is_arg_stack(k)) { 1088 // The argument global escapes 1089 set_escape_state(arg_ptn, PointsToNode::GlobalEscape); 1090 } else { 1091 set_escape_state(arg_ptn, PointsToNode::ArgEscape); 1092 if (!call_analyzer->is_arg_local(k)) { 1093 // The argument itself doesn't escape, but any fields might 1094 set_fields_escape_state(arg_ptn, PointsToNode::GlobalEscape); 1095 } 1096 } 1097 } 1098 } 1099 if (call_ptn != NULL && call_ptn->is_LocalVar()) { 1100 // The call returns arguments. 1101 assert(call_ptn->edge_count() > 0, "sanity"); 1102 if (!call_analyzer->is_return_local()) { 1103 // Returns also unknown object. 1104 add_edge(call_ptn, phantom_obj); 1105 } 1106 } 1107 break; 1108 } 1109 } 1110 default: { 1111 // Fall-through here if not a Java method or no analyzer information 1112 // or some other type of call, assume the worst case: all arguments 1113 // globally escape. 1114 const TypeTuple* d = call->tf()->domain(); 1115 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { 1116 const Type* at = d->field_at(i); 1117 if (at->isa_oopptr() != NULL) { 1118 Node* arg = call->in(i); 1119 if (arg->is_AddP()) { 1120 arg = get_addp_base(arg); 1121 } 1122 assert(ptnode_adr(arg->_idx) != NULL, "should be defined already"); 1123 set_escape_state(ptnode_adr(arg->_idx), PointsToNode::GlobalEscape); 1124 } 1125 } 1126 } 1127 } 1128 } 1129 1130 1131 // Finish Graph construction. 1132 bool ConnectionGraph::complete_connection_graph( 1133 GrowableArray<PointsToNode*>& ptnodes_worklist, 1134 GrowableArray<JavaObjectNode*>& non_escaped_worklist, 1135 GrowableArray<JavaObjectNode*>& java_objects_worklist, 1136 GrowableArray<FieldNode*>& oop_fields_worklist) { 1137 // Normally only 1-3 passes needed to build Connection Graph depending 1138 // on graph complexity. Observed 8 passes in jvm2008 compiler.compiler. 1139 // Set limit to 20 to catch situation when something did go wrong and 1140 // bailout Escape Analysis. 1141 // Also limit build time to 20 sec (60 in debug VM), EscapeAnalysisTimeout flag. 1142 #define CG_BUILD_ITER_LIMIT 20 1143 1144 // Propagate GlobalEscape and ArgEscape escape states and check that 1145 // we still have non-escaping objects. The method pushs on _worklist 1146 // Field nodes which reference phantom_object. 1147 if (!find_non_escaped_objects(ptnodes_worklist, non_escaped_worklist)) { 1148 return false; // Nothing to do. 1149 } 1150 // Now propagate references to all JavaObject nodes. 1151 int java_objects_length = java_objects_worklist.length(); 1152 elapsedTimer time; 1153 bool timeout = false; 1154 int new_edges = 1; 1155 int iterations = 0; 1156 do { 1157 while ((new_edges > 0) && 1158 (iterations++ < CG_BUILD_ITER_LIMIT)) { 1159 double start_time = time.seconds(); 1160 time.start(); 1161 new_edges = 0; 1162 // Propagate references to phantom_object for nodes pushed on _worklist 1163 // by find_non_escaped_objects() and find_field_value(). 1164 new_edges += add_java_object_edges(phantom_obj, false); 1165 for (int next = 0; next < java_objects_length; ++next) { 1166 JavaObjectNode* ptn = java_objects_worklist.at(next); 1167 new_edges += add_java_object_edges(ptn, true); 1168 1169 #define SAMPLE_SIZE 4 1170 if ((next % SAMPLE_SIZE) == 0) { 1171 // Each 4 iterations calculate how much time it will take 1172 // to complete graph construction. 1173 time.stop(); 1174 // Poll for requests from shutdown mechanism to quiesce compiler 1175 // because Connection graph construction may take long time. 1176 CompileBroker::maybe_block(); 1177 double stop_time = time.seconds(); 1178 double time_per_iter = (stop_time - start_time) / (double)SAMPLE_SIZE; 1179 double time_until_end = time_per_iter * (double)(java_objects_length - next); 1180 if ((start_time + time_until_end) >= EscapeAnalysisTimeout) { 1181 timeout = true; 1182 break; // Timeout 1183 } 1184 start_time = stop_time; 1185 time.start(); 1186 } 1187 #undef SAMPLE_SIZE 1188 1189 } 1190 if (timeout) break; 1191 if (new_edges > 0) { 1192 // Update escape states on each iteration if graph was updated. 1193 if (!find_non_escaped_objects(ptnodes_worklist, non_escaped_worklist)) { 1194 return false; // Nothing to do. 1195 } 1196 } 1197 time.stop(); 1198 if (time.seconds() >= EscapeAnalysisTimeout) { 1199 timeout = true; 1200 break; 1201 } 1202 } 1203 if ((iterations < CG_BUILD_ITER_LIMIT) && !timeout) { 1204 time.start(); 1205 // Find fields which have unknown value. 1206 int fields_length = oop_fields_worklist.length(); 1207 for (int next = 0; next < fields_length; next++) { 1208 FieldNode* field = oop_fields_worklist.at(next); 1209 if (field->edge_count() == 0) { 1210 new_edges += find_field_value(field); 1211 // This code may added new edges to phantom_object. 1212 // Need an other cycle to propagate references to phantom_object. 1213 } 1214 } 1215 time.stop(); 1216 if (time.seconds() >= EscapeAnalysisTimeout) { 1217 timeout = true; 1218 break; 1219 } 1220 } else { 1221 new_edges = 0; // Bailout 1222 } 1223 } while (new_edges > 0); 1224 1225 // Bailout if passed limits. 1226 if ((iterations >= CG_BUILD_ITER_LIMIT) || timeout) { 1227 Compile* C = _compile; 1228 if (C->log() != NULL) { 1229 C->log()->begin_elem("connectionGraph_bailout reason='reached "); 1230 C->log()->text("%s", timeout ? "time" : "iterations"); 1231 C->log()->end_elem(" limit'"); 1232 } 1233 assert(ExitEscapeAnalysisOnTimeout, "infinite EA connection graph build (%f sec, %d iterations) with %d nodes and worklist size %d", 1234 time.seconds(), iterations, nodes_size(), ptnodes_worklist.length()); 1235 // Possible infinite build_connection_graph loop, 1236 // bailout (no changes to ideal graph were made). 1237 return false; 1238 } 1239 #ifdef ASSERT 1240 if (Verbose && PrintEscapeAnalysis) { 1241 tty->print_cr("EA: %d iterations to build connection graph with %d nodes and worklist size %d", 1242 iterations, nodes_size(), ptnodes_worklist.length()); 1243 } 1244 #endif 1245 1246 #undef CG_BUILD_ITER_LIMIT 1247 1248 // Find fields initialized by NULL for non-escaping Allocations. 1249 int non_escaped_length = non_escaped_worklist.length(); 1250 for (int next = 0; next < non_escaped_length; next++) { 1251 JavaObjectNode* ptn = non_escaped_worklist.at(next); 1252 PointsToNode::EscapeState es = ptn->escape_state(); 1253 assert(es <= PointsToNode::ArgEscape, "sanity"); 1254 if (es == PointsToNode::NoEscape) { 1255 if (find_init_values(ptn, null_obj, _igvn) > 0) { 1256 // Adding references to NULL object does not change escape states 1257 // since it does not escape. Also no fields are added to NULL object. 1258 add_java_object_edges(null_obj, false); 1259 } 1260 } 1261 Node* n = ptn->ideal_node(); 1262 if (n->is_Allocate()) { 1263 // The object allocated by this Allocate node will never be 1264 // seen by an other thread. Mark it so that when it is 1265 // expanded no MemBarStoreStore is added. 1266 InitializeNode* ini = n->as_Allocate()->initialization(); 1267 if (ini != NULL) 1268 ini->set_does_not_escape(); 1269 } 1270 } 1271 return true; // Finished graph construction. 1272 } 1273 1274 // Propagate GlobalEscape and ArgEscape escape states to all nodes 1275 // and check that we still have non-escaping java objects. 1276 bool ConnectionGraph::find_non_escaped_objects(GrowableArray<PointsToNode*>& ptnodes_worklist, 1277 GrowableArray<JavaObjectNode*>& non_escaped_worklist) { 1278 GrowableArray<PointsToNode*> escape_worklist; 1279 // First, put all nodes with GlobalEscape and ArgEscape states on worklist. 1280 int ptnodes_length = ptnodes_worklist.length(); 1281 for (int next = 0; next < ptnodes_length; ++next) { 1282 PointsToNode* ptn = ptnodes_worklist.at(next); 1283 if (ptn->escape_state() >= PointsToNode::ArgEscape || 1284 ptn->fields_escape_state() >= PointsToNode::ArgEscape) { 1285 escape_worklist.push(ptn); 1286 } 1287 } 1288 // Set escape states to referenced nodes (edges list). 1289 while (escape_worklist.length() > 0) { 1290 PointsToNode* ptn = escape_worklist.pop(); 1291 PointsToNode::EscapeState es = ptn->escape_state(); 1292 PointsToNode::EscapeState field_es = ptn->fields_escape_state(); 1293 if (ptn->is_Field() && ptn->as_Field()->is_oop() && 1294 es >= PointsToNode::ArgEscape) { 1295 // GlobalEscape or ArgEscape state of field means it has unknown value. 1296 if (add_edge(ptn, phantom_obj)) { 1297 // New edge was added 1298 add_field_uses_to_worklist(ptn->as_Field()); 1299 } 1300 } 1301 for (EdgeIterator i(ptn); i.has_next(); i.next()) { 1302 PointsToNode* e = i.get(); 1303 if (e->is_Arraycopy()) { 1304 assert(ptn->arraycopy_dst(), "sanity"); 1305 // Propagate only fields escape state through arraycopy edge. 1306 if (e->fields_escape_state() < field_es) { 1307 set_fields_escape_state(e, field_es); 1308 escape_worklist.push(e); 1309 } 1310 } else if (es >= field_es) { 1311 // fields_escape_state is also set to 'es' if it is less than 'es'. 1312 if (e->escape_state() < es) { 1313 set_escape_state(e, es); 1314 escape_worklist.push(e); 1315 } 1316 } else { 1317 // Propagate field escape state. 1318 bool es_changed = false; 1319 if (e->fields_escape_state() < field_es) { 1320 set_fields_escape_state(e, field_es); 1321 es_changed = true; 1322 } 1323 if ((e->escape_state() < field_es) && 1324 e->is_Field() && ptn->is_JavaObject() && 1325 e->as_Field()->is_oop()) { 1326 // Change escape state of referenced fields. 1327 set_escape_state(e, field_es); 1328 es_changed = true; 1329 } else if (e->escape_state() < es) { 1330 set_escape_state(e, es); 1331 es_changed = true; 1332 } 1333 if (es_changed) { 1334 escape_worklist.push(e); 1335 } 1336 } 1337 } 1338 } 1339 // Remove escaped objects from non_escaped list. 1340 for (int next = non_escaped_worklist.length()-1; next >= 0 ; --next) { 1341 JavaObjectNode* ptn = non_escaped_worklist.at(next); 1342 if (ptn->escape_state() >= PointsToNode::GlobalEscape) { 1343 non_escaped_worklist.delete_at(next); 1344 } 1345 if (ptn->escape_state() == PointsToNode::NoEscape) { 1346 // Find fields in non-escaped allocations which have unknown value. 1347 find_init_values(ptn, phantom_obj, NULL); 1348 } 1349 } 1350 return (non_escaped_worklist.length() > 0); 1351 } 1352 1353 // Add all references to JavaObject node by walking over all uses. 1354 int ConnectionGraph::add_java_object_edges(JavaObjectNode* jobj, bool populate_worklist) { 1355 int new_edges = 0; 1356 if (populate_worklist) { 1357 // Populate _worklist by uses of jobj's uses. 1358 for (UseIterator i(jobj); i.has_next(); i.next()) { 1359 PointsToNode* use = i.get(); 1360 if (use->is_Arraycopy()) 1361 continue; 1362 add_uses_to_worklist(use); 1363 if (use->is_Field() && use->as_Field()->is_oop()) { 1364 // Put on worklist all field's uses (loads) and 1365 // related field nodes (same base and offset). 1366 add_field_uses_to_worklist(use->as_Field()); 1367 } 1368 } 1369 } 1370 for (int l = 0; l < _worklist.length(); l++) { 1371 PointsToNode* use = _worklist.at(l); 1372 if (PointsToNode::is_base_use(use)) { 1373 // Add reference from jobj to field and from field to jobj (field's base). 1374 use = PointsToNode::get_use_node(use)->as_Field(); 1375 if (add_base(use->as_Field(), jobj)) { 1376 new_edges++; 1377 } 1378 continue; 1379 } 1380 assert(!use->is_JavaObject(), "sanity"); 1381 if (use->is_Arraycopy()) { 1382 if (jobj == null_obj) // NULL object does not have field edges 1383 continue; 1384 // Added edge from Arraycopy node to arraycopy's source java object 1385 if (add_edge(use, jobj)) { 1386 jobj->set_arraycopy_src(); 1387 new_edges++; 1388 } 1389 // and stop here. 1390 continue; 1391 } 1392 if (!add_edge(use, jobj)) 1393 continue; // No new edge added, there was such edge already. 1394 new_edges++; 1395 if (use->is_LocalVar()) { 1396 add_uses_to_worklist(use); 1397 if (use->arraycopy_dst()) { 1398 for (EdgeIterator i(use); i.has_next(); i.next()) { 1399 PointsToNode* e = i.get(); 1400 if (e->is_Arraycopy()) { 1401 if (jobj == null_obj) // NULL object does not have field edges 1402 continue; 1403 // Add edge from arraycopy's destination java object to Arraycopy node. 1404 if (add_edge(jobj, e)) { 1405 new_edges++; 1406 jobj->set_arraycopy_dst(); 1407 } 1408 } 1409 } 1410 } 1411 } else { 1412 // Added new edge to stored in field values. 1413 // Put on worklist all field's uses (loads) and 1414 // related field nodes (same base and offset). 1415 add_field_uses_to_worklist(use->as_Field()); 1416 } 1417 } 1418 _worklist.clear(); 1419 _in_worklist.reset(); 1420 return new_edges; 1421 } 1422 1423 // Put on worklist all related field nodes. 1424 void ConnectionGraph::add_field_uses_to_worklist(FieldNode* field) { 1425 assert(field->is_oop(), "sanity"); 1426 int offset = field->offset(); 1427 add_uses_to_worklist(field); 1428 // Loop over all bases of this field and push on worklist Field nodes 1429 // with the same offset and base (since they may reference the same field). 1430 for (BaseIterator i(field); i.has_next(); i.next()) { 1431 PointsToNode* base = i.get(); 1432 add_fields_to_worklist(field, base); 1433 // Check if the base was source object of arraycopy and go over arraycopy's 1434 // destination objects since values stored to a field of source object are 1435 // accessable by uses (loads) of fields of destination objects. 1436 if (base->arraycopy_src()) { 1437 for (UseIterator j(base); j.has_next(); j.next()) { 1438 PointsToNode* arycp = j.get(); 1439 if (arycp->is_Arraycopy()) { 1440 for (UseIterator k(arycp); k.has_next(); k.next()) { 1441 PointsToNode* abase = k.get(); 1442 if (abase->arraycopy_dst() && abase != base) { 1443 // Look for the same arraycopy reference. 1444 add_fields_to_worklist(field, abase); 1445 } 1446 } 1447 } 1448 } 1449 } 1450 } 1451 } 1452 1453 // Put on worklist all related field nodes. 1454 void ConnectionGraph::add_fields_to_worklist(FieldNode* field, PointsToNode* base) { 1455 int offset = field->offset(); 1456 if (base->is_LocalVar()) { 1457 for (UseIterator j(base); j.has_next(); j.next()) { 1458 PointsToNode* f = j.get(); 1459 if (PointsToNode::is_base_use(f)) { // Field 1460 f = PointsToNode::get_use_node(f); 1461 if (f == field || !f->as_Field()->is_oop()) 1462 continue; 1463 int offs = f->as_Field()->offset(); 1464 if (offs == offset || offset == Type::OffsetBot || offs == Type::OffsetBot) { 1465 add_to_worklist(f); 1466 } 1467 } 1468 } 1469 } else { 1470 assert(base->is_JavaObject(), "sanity"); 1471 if (// Skip phantom_object since it is only used to indicate that 1472 // this field's content globally escapes. 1473 (base != phantom_obj) && 1474 // NULL object node does not have fields. 1475 (base != null_obj)) { 1476 for (EdgeIterator i(base); i.has_next(); i.next()) { 1477 PointsToNode* f = i.get(); 1478 // Skip arraycopy edge since store to destination object field 1479 // does not update value in source object field. 1480 if (f->is_Arraycopy()) { 1481 assert(base->arraycopy_dst(), "sanity"); 1482 continue; 1483 } 1484 if (f == field || !f->as_Field()->is_oop()) 1485 continue; 1486 int offs = f->as_Field()->offset(); 1487 if (offs == offset || offset == Type::OffsetBot || offs == Type::OffsetBot) { 1488 add_to_worklist(f); 1489 } 1490 } 1491 } 1492 } 1493 } 1494 1495 // Find fields which have unknown value. 1496 int ConnectionGraph::find_field_value(FieldNode* field) { 1497 // Escaped fields should have init value already. 1498 assert(field->escape_state() == PointsToNode::NoEscape, "sanity"); 1499 int new_edges = 0; 1500 for (BaseIterator i(field); i.has_next(); i.next()) { 1501 PointsToNode* base = i.get(); 1502 if (base->is_JavaObject()) { 1503 // Skip Allocate's fields which will be processed later. 1504 if (base->ideal_node()->is_Allocate()) 1505 return 0; 1506 assert(base == null_obj, "only NULL ptr base expected here"); 1507 } 1508 } 1509 if (add_edge(field, phantom_obj)) { 1510 // New edge was added 1511 new_edges++; 1512 add_field_uses_to_worklist(field); 1513 } 1514 return new_edges; 1515 } 1516 1517 // Find fields initializing values for allocations. 1518 int ConnectionGraph::find_init_values(JavaObjectNode* pta, PointsToNode* init_val, PhaseTransform* phase) { 1519 assert(pta->escape_state() == PointsToNode::NoEscape, "Not escaped Allocate nodes only"); 1520 int new_edges = 0; 1521 Node* alloc = pta->ideal_node(); 1522 if (init_val == phantom_obj) { 1523 // Do nothing for Allocate nodes since its fields values are 1524 // "known" unless they are initialized by arraycopy/clone. 1525 if (alloc->is_Allocate() && !pta->arraycopy_dst()) 1526 return 0; 1527 assert(pta->arraycopy_dst() || alloc->as_CallStaticJava(), "sanity"); 1528 #ifdef ASSERT 1529 if (!pta->arraycopy_dst() && alloc->as_CallStaticJava()->method() == NULL) { 1530 const char* name = alloc->as_CallStaticJava()->_name; 1531 assert(strncmp(name, "_multianewarray", 15) == 0, "sanity"); 1532 } 1533 #endif 1534 // Non-escaped allocation returned from Java or runtime call have 1535 // unknown values in fields. 1536 for (EdgeIterator i(pta); i.has_next(); i.next()) { 1537 PointsToNode* field = i.get(); 1538 if (field->is_Field() && field->as_Field()->is_oop()) { 1539 if (add_edge(field, phantom_obj)) { 1540 // New edge was added 1541 new_edges++; 1542 add_field_uses_to_worklist(field->as_Field()); 1543 } 1544 } 1545 } 1546 return new_edges; 1547 } 1548 assert(init_val == null_obj, "sanity"); 1549 // Do nothing for Call nodes since its fields values are unknown. 1550 if (!alloc->is_Allocate()) 1551 return 0; 1552 1553 InitializeNode* ini = alloc->as_Allocate()->initialization(); 1554 bool visited_bottom_offset = false; 1555 GrowableArray<int> offsets_worklist; 1556 1557 // Check if an oop field's initializing value is recorded and add 1558 // a corresponding NULL if field's value if it is not recorded. 1559 // Connection Graph does not record a default initialization by NULL 1560 // captured by Initialize node. 1561 // 1562 for (EdgeIterator i(pta); i.has_next(); i.next()) { 1563 PointsToNode* field = i.get(); // Field (AddP) 1564 if (!field->is_Field() || !field->as_Field()->is_oop()) 1565 continue; // Not oop field 1566 int offset = field->as_Field()->offset(); 1567 if (offset == Type::OffsetBot) { 1568 if (!visited_bottom_offset) { 1569 // OffsetBot is used to reference array's element, 1570 // always add reference to NULL to all Field nodes since we don't 1571 // known which element is referenced. 1572 if (add_edge(field, null_obj)) { 1573 // New edge was added 1574 new_edges++; 1575 add_field_uses_to_worklist(field->as_Field()); 1576 visited_bottom_offset = true; 1577 } 1578 } 1579 } else { 1580 // Check only oop fields. 1581 const Type* adr_type = field->ideal_node()->as_AddP()->bottom_type(); 1582 if (adr_type->isa_rawptr()) { 1583 #ifdef ASSERT 1584 // Raw pointers are used for initializing stores so skip it 1585 // since it should be recorded already 1586 Node* base = get_addp_base(field->ideal_node()); 1587 assert(adr_type->isa_rawptr() && is_captured_store_address(field->ideal_node()), "unexpected pointer type"); 1588 #endif 1589 continue; 1590 } 1591 if (!offsets_worklist.contains(offset)) { 1592 offsets_worklist.append(offset); 1593 Node* value = NULL; 1594 if (ini != NULL) { 1595 // StoreP::memory_type() == T_ADDRESS 1596 BasicType ft = UseCompressedOops ? T_NARROWOOP : T_ADDRESS; 1597 Node* store = ini->find_captured_store(offset, type2aelembytes(ft, true), phase); 1598 // Make sure initializing store has the same type as this AddP. 1599 // This AddP may reference non existing field because it is on a 1600 // dead branch of bimorphic call which is not eliminated yet. 1601 if (store != NULL && store->is_Store() && 1602 store->as_Store()->memory_type() == ft) { 1603 value = store->in(MemNode::ValueIn); 1604 #ifdef ASSERT 1605 if (VerifyConnectionGraph) { 1606 // Verify that AddP already points to all objects the value points to. 1607 PointsToNode* val = ptnode_adr(value->_idx); 1608 assert((val != NULL), "should be processed already"); 1609 PointsToNode* missed_obj = NULL; 1610 if (val->is_JavaObject()) { 1611 if (!field->points_to(val->as_JavaObject())) { 1612 missed_obj = val; 1613 } 1614 } else { 1615 if (!val->is_LocalVar() || (val->edge_count() == 0)) { 1616 tty->print_cr("----------init store has invalid value -----"); 1617 store->dump(); 1618 val->dump(); 1619 assert(val->is_LocalVar() && (val->edge_count() > 0), "should be processed already"); 1620 } 1621 for (EdgeIterator j(val); j.has_next(); j.next()) { 1622 PointsToNode* obj = j.get(); 1623 if (obj->is_JavaObject()) { 1624 if (!field->points_to(obj->as_JavaObject())) { 1625 missed_obj = obj; 1626 break; 1627 } 1628 } 1629 } 1630 } 1631 if (missed_obj != NULL) { 1632 tty->print_cr("----------field---------------------------------"); 1633 field->dump(); 1634 tty->print_cr("----------missed referernce to object-----------"); 1635 missed_obj->dump(); 1636 tty->print_cr("----------object referernced by init store -----"); 1637 store->dump(); 1638 val->dump(); 1639 assert(!field->points_to(missed_obj->as_JavaObject()), "missed JavaObject reference"); 1640 } 1641 } 1642 #endif 1643 } else { 1644 // There could be initializing stores which follow allocation. 1645 // For example, a volatile field store is not collected 1646 // by Initialize node. 1647 // 1648 // Need to check for dependent loads to separate such stores from 1649 // stores which follow loads. For now, add initial value NULL so 1650 // that compare pointers optimization works correctly. 1651 } 1652 } 1653 if (value == NULL) { 1654 // A field's initializing value was not recorded. Add NULL. 1655 if (add_edge(field, null_obj)) { 1656 // New edge was added 1657 new_edges++; 1658 add_field_uses_to_worklist(field->as_Field()); 1659 } 1660 } 1661 } 1662 } 1663 } 1664 return new_edges; 1665 } 1666 1667 // Adjust scalar_replaceable state after Connection Graph is built. 1668 void ConnectionGraph::adjust_scalar_replaceable_state(JavaObjectNode* jobj) { 1669 // Search for non-escaping objects which are not scalar replaceable 1670 // and mark them to propagate the state to referenced objects. 1671 1672 // 1. An object is not scalar replaceable if the field into which it is 1673 // stored has unknown offset (stored into unknown element of an array). 1674 // 1675 for (UseIterator i(jobj); i.has_next(); i.next()) { 1676 PointsToNode* use = i.get(); 1677 if (use->is_Arraycopy()) { 1678 continue; 1679 } 1680 if (use->is_Field()) { 1681 FieldNode* field = use->as_Field(); 1682 assert(field->is_oop() && field->scalar_replaceable(), "sanity"); 1683 if (field->offset() == Type::OffsetBot) { 1684 jobj->set_scalar_replaceable(false); 1685 return; 1686 } 1687 // 2. An object is not scalar replaceable if the field into which it is 1688 // stored has multiple bases one of which is null. 1689 if (field->base_count() > 1) { 1690 for (BaseIterator i(field); i.has_next(); i.next()) { 1691 PointsToNode* base = i.get(); 1692 if (base == null_obj) { 1693 jobj->set_scalar_replaceable(false); 1694 return; 1695 } 1696 } 1697 } 1698 } 1699 assert(use->is_Field() || use->is_LocalVar(), "sanity"); 1700 // 3. An object is not scalar replaceable if it is merged with other objects. 1701 for (EdgeIterator j(use); j.has_next(); j.next()) { 1702 PointsToNode* ptn = j.get(); 1703 if (ptn->is_JavaObject() && ptn != jobj) { 1704 // Mark all objects. 1705 jobj->set_scalar_replaceable(false); 1706 ptn->set_scalar_replaceable(false); 1707 } 1708 } 1709 if (!jobj->scalar_replaceable()) { 1710 return; 1711 } 1712 } 1713 1714 for (EdgeIterator j(jobj); j.has_next(); j.next()) { 1715 if (j.get()->is_Arraycopy()) { 1716 continue; 1717 } 1718 1719 // Non-escaping object node should point only to field nodes. 1720 FieldNode* field = j.get()->as_Field(); 1721 int offset = field->as_Field()->offset(); 1722 1723 // 4. An object is not scalar replaceable if it has a field with unknown 1724 // offset (array's element is accessed in loop). 1725 if (offset == Type::OffsetBot) { 1726 jobj->set_scalar_replaceable(false); 1727 return; 1728 } 1729 // 5. Currently an object is not scalar replaceable if a LoadStore node 1730 // access its field since the field value is unknown after it. 1731 // 1732 Node* n = field->ideal_node(); 1733 1734 // Test for an unsafe access that was parsed as maybe off heap 1735 // (with a CheckCastPP to raw memory). 1736 assert(n->is_AddP(), "expect an address computation"); 1737 if (n->in(AddPNode::Base)->is_top() && 1738 n->in(AddPNode::Address)->Opcode() == Op_CheckCastPP) { 1739 assert(n->in(AddPNode::Address)->bottom_type()->isa_rawptr(), "raw address so raw cast expected"); 1740 assert(_igvn->type(n->in(AddPNode::Address)->in(1))->isa_oopptr(), "cast pattern at unsafe access expected"); 1741 jobj->set_scalar_replaceable(false); 1742 return; 1743 } 1744 1745 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 1746 Node* u = n->fast_out(i); 1747 if (u->is_LoadStore() || (u->is_Mem() && u->as_Mem()->is_mismatched_access())) { 1748 jobj->set_scalar_replaceable(false); 1749 return; 1750 } 1751 } 1752 1753 // 6. Or the address may point to more then one object. This may produce 1754 // the false positive result (set not scalar replaceable) 1755 // since the flow-insensitive escape analysis can't separate 1756 // the case when stores overwrite the field's value from the case 1757 // when stores happened on different control branches. 1758 // 1759 // Note: it will disable scalar replacement in some cases: 1760 // 1761 // Point p[] = new Point[1]; 1762 // p[0] = new Point(); // Will be not scalar replaced 1763 // 1764 // but it will save us from incorrect optimizations in next cases: 1765 // 1766 // Point p[] = new Point[1]; 1767 // if ( x ) p[0] = new Point(); // Will be not scalar replaced 1768 // 1769 if (field->base_count() > 1) { 1770 for (BaseIterator i(field); i.has_next(); i.next()) { 1771 PointsToNode* base = i.get(); 1772 // Don't take into account LocalVar nodes which 1773 // may point to only one object which should be also 1774 // this field's base by now. 1775 if (base->is_JavaObject() && base != jobj) { 1776 // Mark all bases. 1777 jobj->set_scalar_replaceable(false); 1778 base->set_scalar_replaceable(false); 1779 } 1780 } 1781 } 1782 } 1783 } 1784 1785 #ifdef ASSERT 1786 void ConnectionGraph::verify_connection_graph( 1787 GrowableArray<PointsToNode*>& ptnodes_worklist, 1788 GrowableArray<JavaObjectNode*>& non_escaped_worklist, 1789 GrowableArray<JavaObjectNode*>& java_objects_worklist, 1790 GrowableArray<Node*>& addp_worklist) { 1791 // Verify that graph is complete - no new edges could be added. 1792 int java_objects_length = java_objects_worklist.length(); 1793 int non_escaped_length = non_escaped_worklist.length(); 1794 int new_edges = 0; 1795 for (int next = 0; next < java_objects_length; ++next) { 1796 JavaObjectNode* ptn = java_objects_worklist.at(next); 1797 new_edges += add_java_object_edges(ptn, true); 1798 } 1799 assert(new_edges == 0, "graph was not complete"); 1800 // Verify that escape state is final. 1801 int length = non_escaped_worklist.length(); 1802 find_non_escaped_objects(ptnodes_worklist, non_escaped_worklist); 1803 assert((non_escaped_length == non_escaped_worklist.length()) && 1804 (non_escaped_length == length) && 1805 (_worklist.length() == 0), "escape state was not final"); 1806 1807 // Verify fields information. 1808 int addp_length = addp_worklist.length(); 1809 for (int next = 0; next < addp_length; ++next ) { 1810 Node* n = addp_worklist.at(next); 1811 FieldNode* field = ptnode_adr(n->_idx)->as_Field(); 1812 if (field->is_oop()) { 1813 // Verify that field has all bases 1814 Node* base = get_addp_base(n); 1815 PointsToNode* ptn = ptnode_adr(base->_idx); 1816 if (ptn->is_JavaObject()) { 1817 assert(field->has_base(ptn->as_JavaObject()), "sanity"); 1818 } else { 1819 assert(ptn->is_LocalVar(), "sanity"); 1820 for (EdgeIterator i(ptn); i.has_next(); i.next()) { 1821 PointsToNode* e = i.get(); 1822 if (e->is_JavaObject()) { 1823 assert(field->has_base(e->as_JavaObject()), "sanity"); 1824 } 1825 } 1826 } 1827 // Verify that all fields have initializing values. 1828 if (field->edge_count() == 0) { 1829 tty->print_cr("----------field does not have references----------"); 1830 field->dump(); 1831 for (BaseIterator i(field); i.has_next(); i.next()) { 1832 PointsToNode* base = i.get(); 1833 tty->print_cr("----------field has next base---------------------"); 1834 base->dump(); 1835 if (base->is_JavaObject() && (base != phantom_obj) && (base != null_obj)) { 1836 tty->print_cr("----------base has fields-------------------------"); 1837 for (EdgeIterator j(base); j.has_next(); j.next()) { 1838 j.get()->dump(); 1839 } 1840 tty->print_cr("----------base has references---------------------"); 1841 for (UseIterator j(base); j.has_next(); j.next()) { 1842 j.get()->dump(); 1843 } 1844 } 1845 } 1846 for (UseIterator i(field); i.has_next(); i.next()) { 1847 i.get()->dump(); 1848 } 1849 assert(field->edge_count() > 0, "sanity"); 1850 } 1851 } 1852 } 1853 } 1854 #endif 1855 1856 // Optimize ideal graph. 1857 void ConnectionGraph::optimize_ideal_graph(GrowableArray<Node*>& ptr_cmp_worklist, 1858 GrowableArray<Node*>& storestore_worklist) { 1859 Compile* C = _compile; 1860 PhaseIterGVN* igvn = _igvn; 1861 if (EliminateLocks) { 1862 // Mark locks before changing ideal graph. 1863 int cnt = C->macro_count(); 1864 for( int i=0; i < cnt; i++ ) { 1865 Node *n = C->macro_node(i); 1866 if (n->is_AbstractLock()) { // Lock and Unlock nodes 1867 AbstractLockNode* alock = n->as_AbstractLock(); 1868 if (!alock->is_non_esc_obj()) { 1869 if (not_global_escape(alock->obj_node())) { 1870 assert(!alock->is_eliminated() || alock->is_coarsened(), "sanity"); 1871 // The lock could be marked eliminated by lock coarsening 1872 // code during first IGVN before EA. Replace coarsened flag 1873 // to eliminate all associated locks/unlocks. 1874 #ifdef ASSERT 1875 alock->log_lock_optimization(C, "eliminate_lock_set_non_esc3"); 1876 #endif 1877 alock->set_non_esc_obj(); 1878 } 1879 } 1880 } 1881 } 1882 } 1883 1884 if (OptimizePtrCompare) { 1885 // Add ConI(#CC_GT) and ConI(#CC_EQ). 1886 _pcmp_neq = igvn->makecon(TypeInt::CC_GT); 1887 _pcmp_eq = igvn->makecon(TypeInt::CC_EQ); 1888 // Optimize objects compare. 1889 while (ptr_cmp_worklist.length() != 0) { 1890 Node *n = ptr_cmp_worklist.pop(); 1891 Node *res = optimize_ptr_compare(n); 1892 if (res != NULL) { 1893 #ifndef PRODUCT 1894 if (PrintOptimizePtrCompare) { 1895 tty->print_cr("++++ Replaced: %d %s(%d,%d) --> %s", n->_idx, (n->Opcode() == Op_CmpP ? "CmpP" : "CmpN"), n->in(1)->_idx, n->in(2)->_idx, (res == _pcmp_eq ? "EQ" : "NotEQ")); 1896 if (Verbose) { 1897 n->dump(1); 1898 } 1899 } 1900 #endif 1901 igvn->replace_node(n, res); 1902 } 1903 } 1904 // cleanup 1905 if (_pcmp_neq->outcnt() == 0) 1906 igvn->hash_delete(_pcmp_neq); 1907 if (_pcmp_eq->outcnt() == 0) 1908 igvn->hash_delete(_pcmp_eq); 1909 } 1910 1911 // For MemBarStoreStore nodes added in library_call.cpp, check 1912 // escape status of associated AllocateNode and optimize out 1913 // MemBarStoreStore node if the allocated object never escapes. 1914 while (storestore_worklist.length() != 0) { 1915 Node *n = storestore_worklist.pop(); 1916 MemBarStoreStoreNode *storestore = n ->as_MemBarStoreStore(); 1917 Node *alloc = storestore->in(MemBarNode::Precedent)->in(0); 1918 if (alloc->is_Allocate() && not_global_escape(alloc)) { 1919 MemBarNode* mb = MemBarNode::make(C, Op_MemBarCPUOrder, Compile::AliasIdxBot); 1920 mb->init_req(TypeFunc::Memory, storestore->in(TypeFunc::Memory)); 1921 mb->init_req(TypeFunc::Control, storestore->in(TypeFunc::Control)); 1922 igvn->register_new_node_with_optimizer(mb); 1923 igvn->replace_node(storestore, mb); 1924 } 1925 } 1926 } 1927 1928 // Optimize objects compare. 1929 Node* ConnectionGraph::optimize_ptr_compare(Node* n) { 1930 assert(OptimizePtrCompare, "sanity"); 1931 PointsToNode* ptn1 = ptnode_adr(n->in(1)->_idx); 1932 PointsToNode* ptn2 = ptnode_adr(n->in(2)->_idx); 1933 JavaObjectNode* jobj1 = unique_java_object(n->in(1)); 1934 JavaObjectNode* jobj2 = unique_java_object(n->in(2)); 1935 assert(ptn1->is_JavaObject() || ptn1->is_LocalVar(), "sanity"); 1936 assert(ptn2->is_JavaObject() || ptn2->is_LocalVar(), "sanity"); 1937 1938 // Check simple cases first. 1939 if (jobj1 != NULL) { 1940 if (jobj1->escape_state() == PointsToNode::NoEscape) { 1941 if (jobj1 == jobj2) { 1942 // Comparing the same not escaping object. 1943 return _pcmp_eq; 1944 } 1945 Node* obj = jobj1->ideal_node(); 1946 // Comparing not escaping allocation. 1947 if ((obj->is_Allocate() || obj->is_CallStaticJava()) && 1948 !ptn2->points_to(jobj1)) { 1949 return _pcmp_neq; // This includes nullness check. 1950 } 1951 } 1952 } 1953 if (jobj2 != NULL) { 1954 if (jobj2->escape_state() == PointsToNode::NoEscape) { 1955 Node* obj = jobj2->ideal_node(); 1956 // Comparing not escaping allocation. 1957 if ((obj->is_Allocate() || obj->is_CallStaticJava()) && 1958 !ptn1->points_to(jobj2)) { 1959 return _pcmp_neq; // This includes nullness check. 1960 } 1961 } 1962 } 1963 if (jobj1 != NULL && jobj1 != phantom_obj && 1964 jobj2 != NULL && jobj2 != phantom_obj && 1965 jobj1->ideal_node()->is_Con() && 1966 jobj2->ideal_node()->is_Con()) { 1967 // Klass or String constants compare. Need to be careful with 1968 // compressed pointers - compare types of ConN and ConP instead of nodes. 1969 const Type* t1 = jobj1->ideal_node()->get_ptr_type(); 1970 const Type* t2 = jobj2->ideal_node()->get_ptr_type(); 1971 if (t1->make_ptr() == t2->make_ptr()) { 1972 return _pcmp_eq; 1973 } else { 1974 return _pcmp_neq; 1975 } 1976 } 1977 if (ptn1->meet(ptn2)) { 1978 return NULL; // Sets are not disjoint 1979 } 1980 1981 // Sets are disjoint. 1982 bool set1_has_unknown_ptr = ptn1->points_to(phantom_obj); 1983 bool set2_has_unknown_ptr = ptn2->points_to(phantom_obj); 1984 bool set1_has_null_ptr = ptn1->points_to(null_obj); 1985 bool set2_has_null_ptr = ptn2->points_to(null_obj); 1986 if ((set1_has_unknown_ptr && set2_has_null_ptr) || 1987 (set2_has_unknown_ptr && set1_has_null_ptr)) { 1988 // Check nullness of unknown object. 1989 return NULL; 1990 } 1991 1992 // Disjointness by itself is not sufficient since 1993 // alias analysis is not complete for escaped objects. 1994 // Disjoint sets are definitely unrelated only when 1995 // at least one set has only not escaping allocations. 1996 if (!set1_has_unknown_ptr && !set1_has_null_ptr) { 1997 if (ptn1->non_escaping_allocation()) { 1998 return _pcmp_neq; 1999 } 2000 } 2001 if (!set2_has_unknown_ptr && !set2_has_null_ptr) { 2002 if (ptn2->non_escaping_allocation()) { 2003 return _pcmp_neq; 2004 } 2005 } 2006 return NULL; 2007 } 2008 2009 // Connection Graph constuction functions. 2010 2011 void ConnectionGraph::add_local_var(Node *n, PointsToNode::EscapeState es) { 2012 PointsToNode* ptadr = _nodes.at(n->_idx); 2013 if (ptadr != NULL) { 2014 assert(ptadr->is_LocalVar() && ptadr->ideal_node() == n, "sanity"); 2015 return; 2016 } 2017 Compile* C = _compile; 2018 ptadr = new (C->comp_arena()) LocalVarNode(this, n, es); 2019 _nodes.at_put(n->_idx, ptadr); 2020 } 2021 2022 void ConnectionGraph::add_java_object(Node *n, PointsToNode::EscapeState es) { 2023 PointsToNode* ptadr = _nodes.at(n->_idx); 2024 if (ptadr != NULL) { 2025 assert(ptadr->is_JavaObject() && ptadr->ideal_node() == n, "sanity"); 2026 return; 2027 } 2028 Compile* C = _compile; 2029 ptadr = new (C->comp_arena()) JavaObjectNode(this, n, es); 2030 _nodes.at_put(n->_idx, ptadr); 2031 } 2032 2033 void ConnectionGraph::add_field(Node *n, PointsToNode::EscapeState es, int offset) { 2034 PointsToNode* ptadr = _nodes.at(n->_idx); 2035 if (ptadr != NULL) { 2036 assert(ptadr->is_Field() && ptadr->ideal_node() == n, "sanity"); 2037 return; 2038 } 2039 bool unsafe = false; 2040 bool is_oop = is_oop_field(n, offset, &unsafe); 2041 if (unsafe) { 2042 es = PointsToNode::GlobalEscape; 2043 } 2044 Compile* C = _compile; 2045 FieldNode* field = new (C->comp_arena()) FieldNode(this, n, es, offset, is_oop); 2046 _nodes.at_put(n->_idx, field); 2047 } 2048 2049 void ConnectionGraph::add_arraycopy(Node *n, PointsToNode::EscapeState es, 2050 PointsToNode* src, PointsToNode* dst) { 2051 assert(!src->is_Field() && !dst->is_Field(), "only for JavaObject and LocalVar"); 2052 assert((src != null_obj) && (dst != null_obj), "not for ConP NULL"); 2053 PointsToNode* ptadr = _nodes.at(n->_idx); 2054 if (ptadr != NULL) { 2055 assert(ptadr->is_Arraycopy() && ptadr->ideal_node() == n, "sanity"); 2056 return; 2057 } 2058 Compile* C = _compile; 2059 ptadr = new (C->comp_arena()) ArraycopyNode(this, n, es); 2060 _nodes.at_put(n->_idx, ptadr); 2061 // Add edge from arraycopy node to source object. 2062 (void)add_edge(ptadr, src); 2063 src->set_arraycopy_src(); 2064 // Add edge from destination object to arraycopy node. 2065 (void)add_edge(dst, ptadr); 2066 dst->set_arraycopy_dst(); 2067 } 2068 2069 bool ConnectionGraph::is_oop_field(Node* n, int offset, bool* unsafe) { 2070 const Type* adr_type = n->as_AddP()->bottom_type(); 2071 BasicType bt = T_INT; 2072 if (offset == Type::OffsetBot) { 2073 // Check only oop fields. 2074 if (!adr_type->isa_aryptr() || 2075 (adr_type->isa_aryptr()->klass() == NULL) || 2076 adr_type->isa_aryptr()->klass()->is_obj_array_klass()) { 2077 // OffsetBot is used to reference array's element. Ignore first AddP. 2078 if (find_second_addp(n, n->in(AddPNode::Base)) == NULL) { 2079 bt = T_OBJECT; 2080 } 2081 } 2082 } else if (offset != oopDesc::klass_offset_in_bytes()) { 2083 if (adr_type->isa_instptr()) { 2084 ciField* field = _compile->alias_type(adr_type->isa_instptr())->field(); 2085 if (field != NULL) { 2086 bt = field->layout_type(); 2087 } else { 2088 // Check for unsafe oop field access 2089 if (n->has_out_with(Op_StoreP, Op_LoadP, Op_StoreN, Op_LoadN) || 2090 n->has_out_with(Op_GetAndSetP, Op_GetAndSetN, Op_CompareAndExchangeP, Op_CompareAndExchangeN) || 2091 n->has_out_with(Op_CompareAndSwapP, Op_CompareAndSwapN, Op_WeakCompareAndSwapP, Op_WeakCompareAndSwapN) || 2092 BarrierSet::barrier_set()->barrier_set_c2()->escape_has_out_with_unsafe_object(n)) { 2093 bt = T_OBJECT; 2094 (*unsafe) = true; 2095 } 2096 } 2097 } else if (adr_type->isa_aryptr()) { 2098 if (offset == arrayOopDesc::length_offset_in_bytes()) { 2099 // Ignore array length load. 2100 } else if (find_second_addp(n, n->in(AddPNode::Base)) != NULL) { 2101 // Ignore first AddP. 2102 } else { 2103 const Type* elemtype = adr_type->isa_aryptr()->elem(); 2104 bt = elemtype->array_element_basic_type(); 2105 } 2106 } else if (adr_type->isa_rawptr() || adr_type->isa_klassptr()) { 2107 // Allocation initialization, ThreadLocal field access, unsafe access 2108 if (n->has_out_with(Op_StoreP, Op_LoadP, Op_StoreN, Op_LoadN) || 2109 n->has_out_with(Op_GetAndSetP, Op_GetAndSetN, Op_CompareAndExchangeP, Op_CompareAndExchangeN) || 2110 n->has_out_with(Op_CompareAndSwapP, Op_CompareAndSwapN, Op_WeakCompareAndSwapP, Op_WeakCompareAndSwapN) || 2111 BarrierSet::barrier_set()->barrier_set_c2()->escape_has_out_with_unsafe_object(n)) { 2112 bt = T_OBJECT; 2113 } 2114 } 2115 } 2116 // Note: T_NARROWOOP is not classed as a real reference type 2117 return (is_reference_type(bt) || bt == T_NARROWOOP); 2118 } 2119 2120 // Returns unique pointed java object or NULL. 2121 JavaObjectNode* ConnectionGraph::unique_java_object(Node *n) { 2122 assert(!_collecting, "should not call when contructed graph"); 2123 // If the node was created after the escape computation we can't answer. 2124 uint idx = n->_idx; 2125 if (idx >= nodes_size()) { 2126 return NULL; 2127 } 2128 PointsToNode* ptn = ptnode_adr(idx); 2129 if (ptn == NULL) { 2130 return NULL; 2131 } 2132 if (ptn->is_JavaObject()) { 2133 return ptn->as_JavaObject(); 2134 } 2135 assert(ptn->is_LocalVar(), "sanity"); 2136 // Check all java objects it points to. 2137 JavaObjectNode* jobj = NULL; 2138 for (EdgeIterator i(ptn); i.has_next(); i.next()) { 2139 PointsToNode* e = i.get(); 2140 if (e->is_JavaObject()) { 2141 if (jobj == NULL) { 2142 jobj = e->as_JavaObject(); 2143 } else if (jobj != e) { 2144 return NULL; 2145 } 2146 } 2147 } 2148 return jobj; 2149 } 2150 2151 // Return true if this node points only to non-escaping allocations. 2152 bool PointsToNode::non_escaping_allocation() { 2153 if (is_JavaObject()) { 2154 Node* n = ideal_node(); 2155 if (n->is_Allocate() || n->is_CallStaticJava()) { 2156 return (escape_state() == PointsToNode::NoEscape); 2157 } else { 2158 return false; 2159 } 2160 } 2161 assert(is_LocalVar(), "sanity"); 2162 // Check all java objects it points to. 2163 for (EdgeIterator i(this); i.has_next(); i.next()) { 2164 PointsToNode* e = i.get(); 2165 if (e->is_JavaObject()) { 2166 Node* n = e->ideal_node(); 2167 if ((e->escape_state() != PointsToNode::NoEscape) || 2168 !(n->is_Allocate() || n->is_CallStaticJava())) { 2169 return false; 2170 } 2171 } 2172 } 2173 return true; 2174 } 2175 2176 // Return true if we know the node does not escape globally. 2177 bool ConnectionGraph::not_global_escape(Node *n) { 2178 assert(!_collecting, "should not call during graph construction"); 2179 // If the node was created after the escape computation we can't answer. 2180 uint idx = n->_idx; 2181 if (idx >= nodes_size()) { 2182 return false; 2183 } 2184 PointsToNode* ptn = ptnode_adr(idx); 2185 if (ptn == NULL) { 2186 return false; // not in congraph (e.g. ConI) 2187 } 2188 PointsToNode::EscapeState es = ptn->escape_state(); 2189 // If we have already computed a value, return it. 2190 if (es >= PointsToNode::GlobalEscape) 2191 return false; 2192 if (ptn->is_JavaObject()) { 2193 return true; // (es < PointsToNode::GlobalEscape); 2194 } 2195 assert(ptn->is_LocalVar(), "sanity"); 2196 // Check all java objects it points to. 2197 for (EdgeIterator i(ptn); i.has_next(); i.next()) { 2198 if (i.get()->escape_state() >= PointsToNode::GlobalEscape) 2199 return false; 2200 } 2201 return true; 2202 } 2203 2204 2205 // Helper functions 2206 2207 // Return true if this node points to specified node or nodes it points to. 2208 bool PointsToNode::points_to(JavaObjectNode* ptn) const { 2209 if (is_JavaObject()) { 2210 return (this == ptn); 2211 } 2212 assert(is_LocalVar() || is_Field(), "sanity"); 2213 for (EdgeIterator i(this); i.has_next(); i.next()) { 2214 if (i.get() == ptn) 2215 return true; 2216 } 2217 return false; 2218 } 2219 2220 // Return true if one node points to an other. 2221 bool PointsToNode::meet(PointsToNode* ptn) { 2222 if (this == ptn) { 2223 return true; 2224 } else if (ptn->is_JavaObject()) { 2225 return this->points_to(ptn->as_JavaObject()); 2226 } else if (this->is_JavaObject()) { 2227 return ptn->points_to(this->as_JavaObject()); 2228 } 2229 assert(this->is_LocalVar() && ptn->is_LocalVar(), "sanity"); 2230 int ptn_count = ptn->edge_count(); 2231 for (EdgeIterator i(this); i.has_next(); i.next()) { 2232 PointsToNode* this_e = i.get(); 2233 for (int j = 0; j < ptn_count; j++) { 2234 if (this_e == ptn->edge(j)) 2235 return true; 2236 } 2237 } 2238 return false; 2239 } 2240 2241 #ifdef ASSERT 2242 // Return true if bases point to this java object. 2243 bool FieldNode::has_base(JavaObjectNode* jobj) const { 2244 for (BaseIterator i(this); i.has_next(); i.next()) { 2245 if (i.get() == jobj) 2246 return true; 2247 } 2248 return false; 2249 } 2250 #endif 2251 2252 bool ConnectionGraph::is_captured_store_address(Node* addp) { 2253 // Handle simple case first. 2254 assert(_igvn->type(addp)->isa_oopptr() == NULL, "should be raw access"); 2255 if (addp->in(AddPNode::Address)->is_Proj() && addp->in(AddPNode::Address)->in(0)->is_Allocate()) { 2256 return true; 2257 } else if (addp->in(AddPNode::Address)->is_Phi()) { 2258 for (DUIterator_Fast imax, i = addp->fast_outs(imax); i < imax; i++) { 2259 Node* addp_use = addp->fast_out(i); 2260 if (addp_use->is_Store()) { 2261 for (DUIterator_Fast jmax, j = addp_use->fast_outs(jmax); j < jmax; j++) { 2262 if (addp_use->fast_out(j)->is_Initialize()) { 2263 return true; 2264 } 2265 } 2266 } 2267 } 2268 } 2269 return false; 2270 } 2271 2272 int ConnectionGraph::address_offset(Node* adr, PhaseTransform *phase) { 2273 const Type *adr_type = phase->type(adr); 2274 if (adr->is_AddP() && adr_type->isa_oopptr() == NULL && is_captured_store_address(adr)) { 2275 // We are computing a raw address for a store captured by an Initialize 2276 // compute an appropriate address type. AddP cases #3 and #5 (see below). 2277 int offs = (int)phase->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot); 2278 assert(offs != Type::OffsetBot || 2279 adr->in(AddPNode::Address)->in(0)->is_AllocateArray(), 2280 "offset must be a constant or it is initialization of array"); 2281 return offs; 2282 } 2283 const TypePtr *t_ptr = adr_type->isa_ptr(); 2284 assert(t_ptr != NULL, "must be a pointer type"); 2285 return t_ptr->offset(); 2286 } 2287 2288 Node* ConnectionGraph::get_addp_base(Node *addp) { 2289 assert(addp->is_AddP(), "must be AddP"); 2290 // 2291 // AddP cases for Base and Address inputs: 2292 // case #1. Direct object's field reference: 2293 // Allocate 2294 // | 2295 // Proj #5 ( oop result ) 2296 // | 2297 // CheckCastPP (cast to instance type) 2298 // | | 2299 // AddP ( base == address ) 2300 // 2301 // case #2. Indirect object's field reference: 2302 // Phi 2303 // | 2304 // CastPP (cast to instance type) 2305 // | | 2306 // AddP ( base == address ) 2307 // 2308 // case #3. Raw object's field reference for Initialize node: 2309 // Allocate 2310 // | 2311 // Proj #5 ( oop result ) 2312 // top | 2313 // \ | 2314 // AddP ( base == top ) 2315 // 2316 // case #4. Array's element reference: 2317 // {CheckCastPP | CastPP} 2318 // | | | 2319 // | AddP ( array's element offset ) 2320 // | | 2321 // AddP ( array's offset ) 2322 // 2323 // case #5. Raw object's field reference for arraycopy stub call: 2324 // The inline_native_clone() case when the arraycopy stub is called 2325 // after the allocation before Initialize and CheckCastPP nodes. 2326 // Allocate 2327 // | 2328 // Proj #5 ( oop result ) 2329 // | | 2330 // AddP ( base == address ) 2331 // 2332 // case #6. Constant Pool, ThreadLocal, CastX2P or 2333 // Raw object's field reference: 2334 // {ConP, ThreadLocal, CastX2P, raw Load} 2335 // top | 2336 // \ | 2337 // AddP ( base == top ) 2338 // 2339 // case #7. Klass's field reference. 2340 // LoadKlass 2341 // | | 2342 // AddP ( base == address ) 2343 // 2344 // case #8. narrow Klass's field reference. 2345 // LoadNKlass 2346 // | 2347 // DecodeN 2348 // | | 2349 // AddP ( base == address ) 2350 // 2351 // case #9. Mixed unsafe access 2352 // {instance} 2353 // | 2354 // CheckCastPP (raw) 2355 // top | 2356 // \ | 2357 // AddP ( base == top ) 2358 // 2359 Node *base = addp->in(AddPNode::Base); 2360 if (base->uncast()->is_top()) { // The AddP case #3 and #6 and #9. 2361 base = addp->in(AddPNode::Address); 2362 while (base->is_AddP()) { 2363 // Case #6 (unsafe access) may have several chained AddP nodes. 2364 assert(base->in(AddPNode::Base)->uncast()->is_top(), "expected unsafe access address only"); 2365 base = base->in(AddPNode::Address); 2366 } 2367 if (base->Opcode() == Op_CheckCastPP && 2368 base->bottom_type()->isa_rawptr() && 2369 _igvn->type(base->in(1))->isa_oopptr()) { 2370 base = base->in(1); // Case #9 2371 } else { 2372 Node* uncast_base = base->uncast(); 2373 int opcode = uncast_base->Opcode(); 2374 assert(opcode == Op_ConP || opcode == Op_ThreadLocal || 2375 opcode == Op_CastX2P || uncast_base->is_DecodeNarrowPtr() || 2376 (uncast_base->is_Mem() && (uncast_base->bottom_type()->isa_rawptr() != NULL)) || 2377 is_captured_store_address(addp), "sanity"); 2378 } 2379 } 2380 return base; 2381 } 2382 2383 Node* ConnectionGraph::find_second_addp(Node* addp, Node* n) { 2384 assert(addp->is_AddP() && addp->outcnt() > 0, "Don't process dead nodes"); 2385 Node* addp2 = addp->raw_out(0); 2386 if (addp->outcnt() == 1 && addp2->is_AddP() && 2387 addp2->in(AddPNode::Base) == n && 2388 addp2->in(AddPNode::Address) == addp) { 2389 assert(addp->in(AddPNode::Base) == n, "expecting the same base"); 2390 // 2391 // Find array's offset to push it on worklist first and 2392 // as result process an array's element offset first (pushed second) 2393 // to avoid CastPP for the array's offset. 2394 // Otherwise the inserted CastPP (LocalVar) will point to what 2395 // the AddP (Field) points to. Which would be wrong since 2396 // the algorithm expects the CastPP has the same point as 2397 // as AddP's base CheckCastPP (LocalVar). 2398 // 2399 // ArrayAllocation 2400 // | 2401 // CheckCastPP 2402 // | 2403 // memProj (from ArrayAllocation CheckCastPP) 2404 // | || 2405 // | || Int (element index) 2406 // | || | ConI (log(element size)) 2407 // | || | / 2408 // | || LShift 2409 // | || / 2410 // | AddP (array's element offset) 2411 // | | 2412 // | | ConI (array's offset: #12(32-bits) or #24(64-bits)) 2413 // | / / 2414 // AddP (array's offset) 2415 // | 2416 // Load/Store (memory operation on array's element) 2417 // 2418 return addp2; 2419 } 2420 return NULL; 2421 } 2422 2423 // 2424 // Adjust the type and inputs of an AddP which computes the 2425 // address of a field of an instance 2426 // 2427 bool ConnectionGraph::split_AddP(Node *addp, Node *base) { 2428 PhaseGVN* igvn = _igvn; 2429 const TypeOopPtr *base_t = igvn->type(base)->isa_oopptr(); 2430 assert(base_t != NULL && base_t->is_known_instance(), "expecting instance oopptr"); 2431 const TypeOopPtr *t = igvn->type(addp)->isa_oopptr(); 2432 if (t == NULL) { 2433 // We are computing a raw address for a store captured by an Initialize 2434 // compute an appropriate address type (cases #3 and #5). 2435 assert(igvn->type(addp) == TypeRawPtr::NOTNULL, "must be raw pointer"); 2436 assert(addp->in(AddPNode::Address)->is_Proj(), "base of raw address must be result projection from allocation"); 2437 intptr_t offs = (int)igvn->find_intptr_t_con(addp->in(AddPNode::Offset), Type::OffsetBot); 2438 assert(offs != Type::OffsetBot, "offset must be a constant"); 2439 t = base_t->add_offset(offs)->is_oopptr(); 2440 } 2441 int inst_id = base_t->instance_id(); 2442 assert(!t->is_known_instance() || t->instance_id() == inst_id, 2443 "old type must be non-instance or match new type"); 2444 2445 // The type 't' could be subclass of 'base_t'. 2446 // As result t->offset() could be large then base_t's size and it will 2447 // cause the failure in add_offset() with narrow oops since TypeOopPtr() 2448 // constructor verifies correctness of the offset. 2449 // 2450 // It could happened on subclass's branch (from the type profiling 2451 // inlining) which was not eliminated during parsing since the exactness 2452 // of the allocation type was not propagated to the subclass type check. 2453 // 2454 // Or the type 't' could be not related to 'base_t' at all. 2455 // It could happened when CHA type is different from MDO type on a dead path 2456 // (for example, from instanceof check) which is not collapsed during parsing. 2457 // 2458 // Do nothing for such AddP node and don't process its users since 2459 // this code branch will go away. 2460 // 2461 if (!t->is_known_instance() && 2462 !base_t->klass()->is_subtype_of(t->klass())) { 2463 return false; // bail out 2464 } 2465 const TypeOopPtr *tinst = base_t->add_offset(t->offset())->is_oopptr(); 2466 // Do NOT remove the next line: ensure a new alias index is allocated 2467 // for the instance type. Note: C++ will not remove it since the call 2468 // has side effect. 2469 int alias_idx = _compile->get_alias_index(tinst); 2470 igvn->set_type(addp, tinst); 2471 // record the allocation in the node map 2472 set_map(addp, get_map(base->_idx)); 2473 // Set addp's Base and Address to 'base'. 2474 Node *abase = addp->in(AddPNode::Base); 2475 Node *adr = addp->in(AddPNode::Address); 2476 if (adr->is_Proj() && adr->in(0)->is_Allocate() && 2477 adr->in(0)->_idx == (uint)inst_id) { 2478 // Skip AddP cases #3 and #5. 2479 } else { 2480 assert(!abase->is_top(), "sanity"); // AddP case #3 2481 if (abase != base) { 2482 igvn->hash_delete(addp); 2483 addp->set_req(AddPNode::Base, base); 2484 if (abase == adr) { 2485 addp->set_req(AddPNode::Address, base); 2486 } else { 2487 // AddP case #4 (adr is array's element offset AddP node) 2488 #ifdef ASSERT 2489 const TypeOopPtr *atype = igvn->type(adr)->isa_oopptr(); 2490 assert(adr->is_AddP() && atype != NULL && 2491 atype->instance_id() == inst_id, "array's element offset should be processed first"); 2492 #endif 2493 } 2494 igvn->hash_insert(addp); 2495 } 2496 } 2497 // Put on IGVN worklist since at least addp's type was changed above. 2498 record_for_optimizer(addp); 2499 return true; 2500 } 2501 2502 // 2503 // Create a new version of orig_phi if necessary. Returns either the newly 2504 // created phi or an existing phi. Sets create_new to indicate whether a new 2505 // phi was created. Cache the last newly created phi in the node map. 2506 // 2507 PhiNode *ConnectionGraph::create_split_phi(PhiNode *orig_phi, int alias_idx, GrowableArray<PhiNode *> &orig_phi_worklist, bool &new_created) { 2508 Compile *C = _compile; 2509 PhaseGVN* igvn = _igvn; 2510 new_created = false; 2511 int phi_alias_idx = C->get_alias_index(orig_phi->adr_type()); 2512 // nothing to do if orig_phi is bottom memory or matches alias_idx 2513 if (phi_alias_idx == alias_idx) { 2514 return orig_phi; 2515 } 2516 // Have we recently created a Phi for this alias index? 2517 PhiNode *result = get_map_phi(orig_phi->_idx); 2518 if (result != NULL && C->get_alias_index(result->adr_type()) == alias_idx) { 2519 return result; 2520 } 2521 // Previous check may fail when the same wide memory Phi was split into Phis 2522 // for different memory slices. Search all Phis for this region. 2523 if (result != NULL) { 2524 Node* region = orig_phi->in(0); 2525 for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) { 2526 Node* phi = region->fast_out(i); 2527 if (phi->is_Phi() && 2528 C->get_alias_index(phi->as_Phi()->adr_type()) == alias_idx) { 2529 assert(phi->_idx >= nodes_size(), "only new Phi per instance memory slice"); 2530 return phi->as_Phi(); 2531 } 2532 } 2533 } 2534 if (C->live_nodes() + 2*NodeLimitFudgeFactor > C->max_node_limit()) { 2535 if (C->do_escape_analysis() == true && !C->failing()) { 2536 // Retry compilation without escape analysis. 2537 // If this is the first failure, the sentinel string will "stick" 2538 // to the Compile object, and the C2Compiler will see it and retry. 2539 C->record_failure(C2Compiler::retry_no_escape_analysis()); 2540 } 2541 return NULL; 2542 } 2543 orig_phi_worklist.append_if_missing(orig_phi); 2544 const TypePtr *atype = C->get_adr_type(alias_idx); 2545 result = PhiNode::make(orig_phi->in(0), NULL, Type::MEMORY, atype); 2546 C->copy_node_notes_to(result, orig_phi); 2547 igvn->set_type(result, result->bottom_type()); 2548 record_for_optimizer(result); 2549 set_map(orig_phi, result); 2550 new_created = true; 2551 return result; 2552 } 2553 2554 // 2555 // Return a new version of Memory Phi "orig_phi" with the inputs having the 2556 // specified alias index. 2557 // 2558 PhiNode *ConnectionGraph::split_memory_phi(PhiNode *orig_phi, int alias_idx, GrowableArray<PhiNode *> &orig_phi_worklist) { 2559 assert(alias_idx != Compile::AliasIdxBot, "can't split out bottom memory"); 2560 Compile *C = _compile; 2561 PhaseGVN* igvn = _igvn; 2562 bool new_phi_created; 2563 PhiNode *result = create_split_phi(orig_phi, alias_idx, orig_phi_worklist, new_phi_created); 2564 if (!new_phi_created) { 2565 return result; 2566 } 2567 GrowableArray<PhiNode *> phi_list; 2568 GrowableArray<uint> cur_input; 2569 PhiNode *phi = orig_phi; 2570 uint idx = 1; 2571 bool finished = false; 2572 while(!finished) { 2573 while (idx < phi->req()) { 2574 Node *mem = find_inst_mem(phi->in(idx), alias_idx, orig_phi_worklist); 2575 if (mem != NULL && mem->is_Phi()) { 2576 PhiNode *newphi = create_split_phi(mem->as_Phi(), alias_idx, orig_phi_worklist, new_phi_created); 2577 if (new_phi_created) { 2578 // found an phi for which we created a new split, push current one on worklist and begin 2579 // processing new one 2580 phi_list.push(phi); 2581 cur_input.push(idx); 2582 phi = mem->as_Phi(); 2583 result = newphi; 2584 idx = 1; 2585 continue; 2586 } else { 2587 mem = newphi; 2588 } 2589 } 2590 if (C->failing()) { 2591 return NULL; 2592 } 2593 result->set_req(idx++, mem); 2594 } 2595 #ifdef ASSERT 2596 // verify that the new Phi has an input for each input of the original 2597 assert( phi->req() == result->req(), "must have same number of inputs."); 2598 assert( result->in(0) != NULL && result->in(0) == phi->in(0), "regions must match"); 2599 #endif 2600 // Check if all new phi's inputs have specified alias index. 2601 // Otherwise use old phi. 2602 for (uint i = 1; i < phi->req(); i++) { 2603 Node* in = result->in(i); 2604 assert((phi->in(i) == NULL) == (in == NULL), "inputs must correspond."); 2605 } 2606 // we have finished processing a Phi, see if there are any more to do 2607 finished = (phi_list.length() == 0 ); 2608 if (!finished) { 2609 phi = phi_list.pop(); 2610 idx = cur_input.pop(); 2611 PhiNode *prev_result = get_map_phi(phi->_idx); 2612 prev_result->set_req(idx++, result); 2613 result = prev_result; 2614 } 2615 } 2616 return result; 2617 } 2618 2619 // 2620 // The next methods are derived from methods in MemNode. 2621 // 2622 Node* ConnectionGraph::step_through_mergemem(MergeMemNode *mmem, int alias_idx, const TypeOopPtr *toop) { 2623 Node *mem = mmem; 2624 // TypeOopPtr::NOTNULL+any is an OOP with unknown offset - generally 2625 // means an array I have not precisely typed yet. Do not do any 2626 // alias stuff with it any time soon. 2627 if (toop->base() != Type::AnyPtr && 2628 !(toop->klass() != NULL && 2629 toop->klass()->is_java_lang_Object() && 2630 toop->offset() == Type::OffsetBot)) { 2631 mem = mmem->memory_at(alias_idx); 2632 // Update input if it is progress over what we have now 2633 } 2634 return mem; 2635 } 2636 2637 // 2638 // Move memory users to their memory slices. 2639 // 2640 void ConnectionGraph::move_inst_mem(Node* n, GrowableArray<PhiNode *> &orig_phis) { 2641 Compile* C = _compile; 2642 PhaseGVN* igvn = _igvn; 2643 const TypePtr* tp = igvn->type(n->in(MemNode::Address))->isa_ptr(); 2644 assert(tp != NULL, "ptr type"); 2645 int alias_idx = C->get_alias_index(tp); 2646 int general_idx = C->get_general_index(alias_idx); 2647 2648 // Move users first 2649 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 2650 Node* use = n->fast_out(i); 2651 if (use->is_MergeMem()) { 2652 MergeMemNode* mmem = use->as_MergeMem(); 2653 assert(n == mmem->memory_at(alias_idx), "should be on instance memory slice"); 2654 if (n != mmem->memory_at(general_idx) || alias_idx == general_idx) { 2655 continue; // Nothing to do 2656 } 2657 // Replace previous general reference to mem node. 2658 uint orig_uniq = C->unique(); 2659 Node* m = find_inst_mem(n, general_idx, orig_phis); 2660 assert(orig_uniq == C->unique(), "no new nodes"); 2661 mmem->set_memory_at(general_idx, m); 2662 --imax; 2663 --i; 2664 } else if (use->is_MemBar()) { 2665 assert(!use->is_Initialize(), "initializing stores should not be moved"); 2666 if (use->req() > MemBarNode::Precedent && 2667 use->in(MemBarNode::Precedent) == n) { 2668 // Don't move related membars. 2669 record_for_optimizer(use); 2670 continue; 2671 } 2672 tp = use->as_MemBar()->adr_type()->isa_ptr(); 2673 if ((tp != NULL && C->get_alias_index(tp) == alias_idx) || 2674 alias_idx == general_idx) { 2675 continue; // Nothing to do 2676 } 2677 // Move to general memory slice. 2678 uint orig_uniq = C->unique(); 2679 Node* m = find_inst_mem(n, general_idx, orig_phis); 2680 assert(orig_uniq == C->unique(), "no new nodes"); 2681 igvn->hash_delete(use); 2682 imax -= use->replace_edge(n, m); 2683 igvn->hash_insert(use); 2684 record_for_optimizer(use); 2685 --i; 2686 #ifdef ASSERT 2687 } else if (use->is_Mem()) { 2688 if (use->Opcode() == Op_StoreCM && use->in(MemNode::OopStore) == n) { 2689 // Don't move related cardmark. 2690 continue; 2691 } 2692 // Memory nodes should have new memory input. 2693 tp = igvn->type(use->in(MemNode::Address))->isa_ptr(); 2694 assert(tp != NULL, "ptr type"); 2695 int idx = C->get_alias_index(tp); 2696 assert(get_map(use->_idx) != NULL || idx == alias_idx, 2697 "Following memory nodes should have new memory input or be on the same memory slice"); 2698 } else if (use->is_Phi()) { 2699 // Phi nodes should be split and moved already. 2700 tp = use->as_Phi()->adr_type()->isa_ptr(); 2701 assert(tp != NULL, "ptr type"); 2702 int idx = C->get_alias_index(tp); 2703 assert(idx == alias_idx, "Following Phi nodes should be on the same memory slice"); 2704 } else { 2705 use->dump(); 2706 assert(false, "should not be here"); 2707 #endif 2708 } 2709 } 2710 } 2711 2712 // 2713 // Search memory chain of "mem" to find a MemNode whose address 2714 // is the specified alias index. 2715 // 2716 Node* ConnectionGraph::find_inst_mem(Node *orig_mem, int alias_idx, GrowableArray<PhiNode *> &orig_phis) { 2717 if (orig_mem == NULL) 2718 return orig_mem; 2719 Compile* C = _compile; 2720 PhaseGVN* igvn = _igvn; 2721 const TypeOopPtr *toop = C->get_adr_type(alias_idx)->isa_oopptr(); 2722 bool is_instance = (toop != NULL) && toop->is_known_instance(); 2723 Node *start_mem = C->start()->proj_out_or_null(TypeFunc::Memory); 2724 Node *prev = NULL; 2725 Node *result = orig_mem; 2726 while (prev != result) { 2727 prev = result; 2728 if (result == start_mem) 2729 break; // hit one of our sentinels 2730 if (result->is_Mem()) { 2731 const Type *at = igvn->type(result->in(MemNode::Address)); 2732 if (at == Type::TOP) 2733 break; // Dead 2734 assert (at->isa_ptr() != NULL, "pointer type required."); 2735 int idx = C->get_alias_index(at->is_ptr()); 2736 if (idx == alias_idx) 2737 break; // Found 2738 if (!is_instance && (at->isa_oopptr() == NULL || 2739 !at->is_oopptr()->is_known_instance())) { 2740 break; // Do not skip store to general memory slice. 2741 } 2742 result = result->in(MemNode::Memory); 2743 } 2744 if (!is_instance) 2745 continue; // don't search further for non-instance types 2746 // skip over a call which does not affect this memory slice 2747 if (result->is_Proj() && result->as_Proj()->_con == TypeFunc::Memory) { 2748 Node *proj_in = result->in(0); 2749 if (proj_in->is_Allocate() && proj_in->_idx == (uint)toop->instance_id()) { 2750 break; // hit one of our sentinels 2751 } else if (proj_in->is_Call()) { 2752 // ArrayCopy node processed here as well 2753 CallNode *call = proj_in->as_Call(); 2754 if (!call->may_modify(toop, igvn)) { 2755 result = call->in(TypeFunc::Memory); 2756 } 2757 } else if (proj_in->is_Initialize()) { 2758 AllocateNode* alloc = proj_in->as_Initialize()->allocation(); 2759 // Stop if this is the initialization for the object instance which 2760 // which contains this memory slice, otherwise skip over it. 2761 if (alloc == NULL || alloc->_idx != (uint)toop->instance_id()) { 2762 result = proj_in->in(TypeFunc::Memory); 2763 } 2764 } else if (proj_in->is_MemBar()) { 2765 // Check if there is an array copy for a clone 2766 // Step over GC barrier when ReduceInitialCardMarks is disabled 2767 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); 2768 Node* control_proj_ac = bs->step_over_gc_barrier(proj_in->in(0)); 2769 2770 if (control_proj_ac->is_Proj() && control_proj_ac->in(0)->is_ArrayCopy()) { 2771 // Stop if it is a clone 2772 ArrayCopyNode* ac = control_proj_ac->in(0)->as_ArrayCopy(); 2773 if (ac->may_modify(toop, igvn)) { 2774 break; 2775 } 2776 } 2777 result = proj_in->in(TypeFunc::Memory); 2778 } 2779 } else if (result->is_MergeMem()) { 2780 MergeMemNode *mmem = result->as_MergeMem(); 2781 result = step_through_mergemem(mmem, alias_idx, toop); 2782 if (result == mmem->base_memory()) { 2783 // Didn't find instance memory, search through general slice recursively. 2784 result = mmem->memory_at(C->get_general_index(alias_idx)); 2785 result = find_inst_mem(result, alias_idx, orig_phis); 2786 if (C->failing()) { 2787 return NULL; 2788 } 2789 mmem->set_memory_at(alias_idx, result); 2790 } 2791 } else if (result->is_Phi() && 2792 C->get_alias_index(result->as_Phi()->adr_type()) != alias_idx) { 2793 Node *un = result->as_Phi()->unique_input(igvn); 2794 if (un != NULL) { 2795 orig_phis.append_if_missing(result->as_Phi()); 2796 result = un; 2797 } else { 2798 break; 2799 } 2800 } else if (result->is_ClearArray()) { 2801 if (!ClearArrayNode::step_through(&result, (uint)toop->instance_id(), igvn)) { 2802 // Can not bypass initialization of the instance 2803 // we are looking for. 2804 break; 2805 } 2806 // Otherwise skip it (the call updated 'result' value). 2807 } else if (result->Opcode() == Op_SCMemProj) { 2808 Node* mem = result->in(0); 2809 Node* adr = NULL; 2810 if (mem->is_LoadStore()) { 2811 adr = mem->in(MemNode::Address); 2812 } else { 2813 assert(mem->Opcode() == Op_EncodeISOArray || 2814 mem->Opcode() == Op_StrCompressedCopy, "sanity"); 2815 adr = mem->in(3); // Memory edge corresponds to destination array 2816 } 2817 const Type *at = igvn->type(adr); 2818 if (at != Type::TOP) { 2819 assert(at->isa_ptr() != NULL, "pointer type required."); 2820 int idx = C->get_alias_index(at->is_ptr()); 2821 if (idx == alias_idx) { 2822 // Assert in debug mode 2823 assert(false, "Object is not scalar replaceable if a LoadStore node accesses its field"); 2824 break; // In product mode return SCMemProj node 2825 } 2826 } 2827 result = mem->in(MemNode::Memory); 2828 } else if (result->Opcode() == Op_StrInflatedCopy) { 2829 Node* adr = result->in(3); // Memory edge corresponds to destination array 2830 const Type *at = igvn->type(adr); 2831 if (at != Type::TOP) { 2832 assert(at->isa_ptr() != NULL, "pointer type required."); 2833 int idx = C->get_alias_index(at->is_ptr()); 2834 if (idx == alias_idx) { 2835 // Assert in debug mode 2836 assert(false, "Object is not scalar replaceable if a StrInflatedCopy node accesses its field"); 2837 break; // In product mode return SCMemProj node 2838 } 2839 } 2840 result = result->in(MemNode::Memory); 2841 } 2842 } 2843 if (result->is_Phi()) { 2844 PhiNode *mphi = result->as_Phi(); 2845 assert(mphi->bottom_type() == Type::MEMORY, "memory phi required"); 2846 const TypePtr *t = mphi->adr_type(); 2847 if (!is_instance) { 2848 // Push all non-instance Phis on the orig_phis worklist to update inputs 2849 // during Phase 4 if needed. 2850 orig_phis.append_if_missing(mphi); 2851 } else if (C->get_alias_index(t) != alias_idx) { 2852 // Create a new Phi with the specified alias index type. 2853 result = split_memory_phi(mphi, alias_idx, orig_phis); 2854 } 2855 } 2856 // the result is either MemNode, PhiNode, InitializeNode. 2857 return result; 2858 } 2859 2860 // 2861 // Convert the types of unescaped object to instance types where possible, 2862 // propagate the new type information through the graph, and update memory 2863 // edges and MergeMem inputs to reflect the new type. 2864 // 2865 // We start with allocations (and calls which may be allocations) on alloc_worklist. 2866 // The processing is done in 4 phases: 2867 // 2868 // Phase 1: Process possible allocations from alloc_worklist. Create instance 2869 // types for the CheckCastPP for allocations where possible. 2870 // Propagate the new types through users as follows: 2871 // casts and Phi: push users on alloc_worklist 2872 // AddP: cast Base and Address inputs to the instance type 2873 // push any AddP users on alloc_worklist and push any memnode 2874 // users onto memnode_worklist. 2875 // Phase 2: Process MemNode's from memnode_worklist. compute new address type and 2876 // search the Memory chain for a store with the appropriate type 2877 // address type. If a Phi is found, create a new version with 2878 // the appropriate memory slices from each of the Phi inputs. 2879 // For stores, process the users as follows: 2880 // MemNode: push on memnode_worklist 2881 // MergeMem: push on mergemem_worklist 2882 // Phase 3: Process MergeMem nodes from mergemem_worklist. Walk each memory slice 2883 // moving the first node encountered of each instance type to the 2884 // the input corresponding to its alias index. 2885 // appropriate memory slice. 2886 // Phase 4: Update the inputs of non-instance memory Phis and the Memory input of memnodes. 2887 // 2888 // In the following example, the CheckCastPP nodes are the cast of allocation 2889 // results and the allocation of node 29 is unescaped and eligible to be an 2890 // instance type. 2891 // 2892 // We start with: 2893 // 2894 // 7 Parm #memory 2895 // 10 ConI "12" 2896 // 19 CheckCastPP "Foo" 2897 // 20 AddP _ 19 19 10 Foo+12 alias_index=4 2898 // 29 CheckCastPP "Foo" 2899 // 30 AddP _ 29 29 10 Foo+12 alias_index=4 2900 // 2901 // 40 StoreP 25 7 20 ... alias_index=4 2902 // 50 StoreP 35 40 30 ... alias_index=4 2903 // 60 StoreP 45 50 20 ... alias_index=4 2904 // 70 LoadP _ 60 30 ... alias_index=4 2905 // 80 Phi 75 50 60 Memory alias_index=4 2906 // 90 LoadP _ 80 30 ... alias_index=4 2907 // 100 LoadP _ 80 20 ... alias_index=4 2908 // 2909 // 2910 // Phase 1 creates an instance type for node 29 assigning it an instance id of 24 2911 // and creating a new alias index for node 30. This gives: 2912 // 2913 // 7 Parm #memory 2914 // 10 ConI "12" 2915 // 19 CheckCastPP "Foo" 2916 // 20 AddP _ 19 19 10 Foo+12 alias_index=4 2917 // 29 CheckCastPP "Foo" iid=24 2918 // 30 AddP _ 29 29 10 Foo+12 alias_index=6 iid=24 2919 // 2920 // 40 StoreP 25 7 20 ... alias_index=4 2921 // 50 StoreP 35 40 30 ... alias_index=6 2922 // 60 StoreP 45 50 20 ... alias_index=4 2923 // 70 LoadP _ 60 30 ... alias_index=6 2924 // 80 Phi 75 50 60 Memory alias_index=4 2925 // 90 LoadP _ 80 30 ... alias_index=6 2926 // 100 LoadP _ 80 20 ... alias_index=4 2927 // 2928 // In phase 2, new memory inputs are computed for the loads and stores, 2929 // And a new version of the phi is created. In phase 4, the inputs to 2930 // node 80 are updated and then the memory nodes are updated with the 2931 // values computed in phase 2. This results in: 2932 // 2933 // 7 Parm #memory 2934 // 10 ConI "12" 2935 // 19 CheckCastPP "Foo" 2936 // 20 AddP _ 19 19 10 Foo+12 alias_index=4 2937 // 29 CheckCastPP "Foo" iid=24 2938 // 30 AddP _ 29 29 10 Foo+12 alias_index=6 iid=24 2939 // 2940 // 40 StoreP 25 7 20 ... alias_index=4 2941 // 50 StoreP 35 7 30 ... alias_index=6 2942 // 60 StoreP 45 40 20 ... alias_index=4 2943 // 70 LoadP _ 50 30 ... alias_index=6 2944 // 80 Phi 75 40 60 Memory alias_index=4 2945 // 120 Phi 75 50 50 Memory alias_index=6 2946 // 90 LoadP _ 120 30 ... alias_index=6 2947 // 100 LoadP _ 80 20 ... alias_index=4 2948 // 2949 void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist, GrowableArray<ArrayCopyNode*> &arraycopy_worklist) { 2950 GrowableArray<Node *> memnode_worklist; 2951 GrowableArray<PhiNode *> orig_phis; 2952 PhaseIterGVN *igvn = _igvn; 2953 uint new_index_start = (uint) _compile->num_alias_types(); 2954 VectorSet visited; 2955 ideal_nodes.clear(); // Reset for use with set_map/get_map. 2956 uint unique_old = _compile->unique(); 2957 2958 // Phase 1: Process possible allocations from alloc_worklist. 2959 // Create instance types for the CheckCastPP for allocations where possible. 2960 // 2961 // (Note: don't forget to change the order of the second AddP node on 2962 // the alloc_worklist if the order of the worklist processing is changed, 2963 // see the comment in find_second_addp().) 2964 // 2965 while (alloc_worklist.length() != 0) { 2966 Node *n = alloc_worklist.pop(); 2967 uint ni = n->_idx; 2968 if (n->is_Call()) { 2969 CallNode *alloc = n->as_Call(); 2970 // copy escape information to call node 2971 PointsToNode* ptn = ptnode_adr(alloc->_idx); 2972 PointsToNode::EscapeState es = ptn->escape_state(); 2973 // We have an allocation or call which returns a Java object, 2974 // see if it is unescaped. 2975 if (es != PointsToNode::NoEscape || !ptn->scalar_replaceable()) 2976 continue; 2977 // Find CheckCastPP for the allocate or for the return value of a call 2978 n = alloc->result_cast(); 2979 if (n == NULL) { // No uses except Initialize node 2980 if (alloc->is_Allocate()) { 2981 // Set the scalar_replaceable flag for allocation 2982 // so it could be eliminated if it has no uses. 2983 alloc->as_Allocate()->_is_scalar_replaceable = true; 2984 } 2985 if (alloc->is_CallStaticJava()) { 2986 // Set the scalar_replaceable flag for boxing method 2987 // so it could be eliminated if it has no uses. 2988 alloc->as_CallStaticJava()->_is_scalar_replaceable = true; 2989 } 2990 continue; 2991 } 2992 if (!n->is_CheckCastPP()) { // not unique CheckCastPP. 2993 // we could reach here for allocate case if one init is associated with many allocs. 2994 if (alloc->is_Allocate()) { 2995 alloc->as_Allocate()->_is_scalar_replaceable = false; 2996 } 2997 continue; 2998 } 2999 3000 // The inline code for Object.clone() casts the allocation result to 3001 // java.lang.Object and then to the actual type of the allocated 3002 // object. Detect this case and use the second cast. 3003 // Also detect j.l.reflect.Array.newInstance(jobject, jint) case when 3004 // the allocation result is cast to java.lang.Object and then 3005 // to the actual Array type. 3006 if (alloc->is_Allocate() && n->as_Type()->type() == TypeInstPtr::NOTNULL 3007 && (alloc->is_AllocateArray() || 3008 igvn->type(alloc->in(AllocateNode::KlassNode)) != TypeKlassPtr::OBJECT)) { 3009 Node *cast2 = NULL; 3010 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 3011 Node *use = n->fast_out(i); 3012 if (use->is_CheckCastPP()) { 3013 cast2 = use; 3014 break; 3015 } 3016 } 3017 if (cast2 != NULL) { 3018 n = cast2; 3019 } else { 3020 // Non-scalar replaceable if the allocation type is unknown statically 3021 // (reflection allocation), the object can't be restored during 3022 // deoptimization without precise type. 3023 continue; 3024 } 3025 } 3026 3027 const TypeOopPtr *t = igvn->type(n)->isa_oopptr(); 3028 if (t == NULL) 3029 continue; // not a TypeOopPtr 3030 if (!t->klass_is_exact()) 3031 continue; // not an unique type 3032 3033 if (alloc->is_Allocate()) { 3034 // Set the scalar_replaceable flag for allocation 3035 // so it could be eliminated. 3036 alloc->as_Allocate()->_is_scalar_replaceable = true; 3037 } 3038 if (alloc->is_CallStaticJava()) { 3039 // Set the scalar_replaceable flag for boxing method 3040 // so it could be eliminated. 3041 alloc->as_CallStaticJava()->_is_scalar_replaceable = true; 3042 } 3043 set_escape_state(ptnode_adr(n->_idx), es); // CheckCastPP escape state 3044 // in order for an object to be scalar-replaceable, it must be: 3045 // - a direct allocation (not a call returning an object) 3046 // - non-escaping 3047 // - eligible to be a unique type 3048 // - not determined to be ineligible by escape analysis 3049 set_map(alloc, n); 3050 set_map(n, alloc); 3051 const TypeOopPtr* tinst = t->cast_to_instance_id(ni); 3052 igvn->hash_delete(n); 3053 igvn->set_type(n, tinst); 3054 n->raise_bottom_type(tinst); 3055 igvn->hash_insert(n); 3056 record_for_optimizer(n); 3057 // Allocate an alias index for the header fields. Accesses to 3058 // the header emitted during macro expansion wouldn't have 3059 // correct memory state otherwise. 3060 _compile->get_alias_index(tinst->add_offset(oopDesc::mark_offset_in_bytes())); 3061 _compile->get_alias_index(tinst->add_offset(oopDesc::klass_offset_in_bytes())); 3062 if (alloc->is_Allocate() && (t->isa_instptr() || t->isa_aryptr())) { 3063 3064 // First, put on the worklist all Field edges from Connection Graph 3065 // which is more accurate than putting immediate users from Ideal Graph. 3066 for (EdgeIterator e(ptn); e.has_next(); e.next()) { 3067 PointsToNode* tgt = e.get(); 3068 if (tgt->is_Arraycopy()) { 3069 continue; 3070 } 3071 Node* use = tgt->ideal_node(); 3072 assert(tgt->is_Field() && use->is_AddP(), 3073 "only AddP nodes are Field edges in CG"); 3074 if (use->outcnt() > 0) { // Don't process dead nodes 3075 Node* addp2 = find_second_addp(use, use->in(AddPNode::Base)); 3076 if (addp2 != NULL) { 3077 assert(alloc->is_AllocateArray(),"array allocation was expected"); 3078 alloc_worklist.append_if_missing(addp2); 3079 } 3080 alloc_worklist.append_if_missing(use); 3081 } 3082 } 3083 3084 // An allocation may have an Initialize which has raw stores. Scan 3085 // the users of the raw allocation result and push AddP users 3086 // on alloc_worklist. 3087 Node *raw_result = alloc->proj_out_or_null(TypeFunc::Parms); 3088 assert (raw_result != NULL, "must have an allocation result"); 3089 for (DUIterator_Fast imax, i = raw_result->fast_outs(imax); i < imax; i++) { 3090 Node *use = raw_result->fast_out(i); 3091 if (use->is_AddP() && use->outcnt() > 0) { // Don't process dead nodes 3092 Node* addp2 = find_second_addp(use, raw_result); 3093 if (addp2 != NULL) { 3094 assert(alloc->is_AllocateArray(),"array allocation was expected"); 3095 alloc_worklist.append_if_missing(addp2); 3096 } 3097 alloc_worklist.append_if_missing(use); 3098 } else if (use->is_MemBar()) { 3099 memnode_worklist.append_if_missing(use); 3100 } 3101 } 3102 } 3103 } else if (n->is_AddP()) { 3104 JavaObjectNode* jobj = unique_java_object(get_addp_base(n)); 3105 if (jobj == NULL || jobj == phantom_obj) { 3106 #ifdef ASSERT 3107 ptnode_adr(get_addp_base(n)->_idx)->dump(); 3108 ptnode_adr(n->_idx)->dump(); 3109 assert(jobj != NULL && jobj != phantom_obj, "escaped allocation"); 3110 #endif 3111 _compile->record_failure(C2Compiler::retry_no_escape_analysis()); 3112 return; 3113 } 3114 Node *base = get_map(jobj->idx()); // CheckCastPP node 3115 if (!split_AddP(n, base)) continue; // wrong type from dead path 3116 } else if (n->is_Phi() || 3117 n->is_CheckCastPP() || 3118 n->is_EncodeP() || 3119 n->is_DecodeN() || 3120 (n->is_ConstraintCast() && n->Opcode() == Op_CastPP)) { 3121 if (visited.test_set(n->_idx)) { 3122 assert(n->is_Phi(), "loops only through Phi's"); 3123 continue; // already processed 3124 } 3125 JavaObjectNode* jobj = unique_java_object(n); 3126 if (jobj == NULL || jobj == phantom_obj) { 3127 #ifdef ASSERT 3128 ptnode_adr(n->_idx)->dump(); 3129 assert(jobj != NULL && jobj != phantom_obj, "escaped allocation"); 3130 #endif 3131 _compile->record_failure(C2Compiler::retry_no_escape_analysis()); 3132 return; 3133 } else { 3134 Node *val = get_map(jobj->idx()); // CheckCastPP node 3135 TypeNode *tn = n->as_Type(); 3136 const TypeOopPtr* tinst = igvn->type(val)->isa_oopptr(); 3137 assert(tinst != NULL && tinst->is_known_instance() && 3138 tinst->instance_id() == jobj->idx() , "instance type expected."); 3139 3140 const Type *tn_type = igvn->type(tn); 3141 const TypeOopPtr *tn_t; 3142 if (tn_type->isa_narrowoop()) { 3143 tn_t = tn_type->make_ptr()->isa_oopptr(); 3144 } else { 3145 tn_t = tn_type->isa_oopptr(); 3146 } 3147 if (tn_t != NULL && tinst->klass()->is_subtype_of(tn_t->klass())) { 3148 if (tn_type->isa_narrowoop()) { 3149 tn_type = tinst->make_narrowoop(); 3150 } else { 3151 tn_type = tinst; 3152 } 3153 igvn->hash_delete(tn); 3154 igvn->set_type(tn, tn_type); 3155 tn->set_type(tn_type); 3156 igvn->hash_insert(tn); 3157 record_for_optimizer(n); 3158 } else { 3159 assert(tn_type == TypePtr::NULL_PTR || 3160 tn_t != NULL && !tinst->klass()->is_subtype_of(tn_t->klass()), 3161 "unexpected type"); 3162 continue; // Skip dead path with different type 3163 } 3164 } 3165 } else { 3166 debug_only(n->dump();) 3167 assert(false, "EA: unexpected node"); 3168 continue; 3169 } 3170 // push allocation's users on appropriate worklist 3171 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 3172 Node *use = n->fast_out(i); 3173 if(use->is_Mem() && use->in(MemNode::Address) == n) { 3174 // Load/store to instance's field 3175 memnode_worklist.append_if_missing(use); 3176 } else if (use->is_MemBar()) { 3177 if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge 3178 memnode_worklist.append_if_missing(use); 3179 } 3180 } else if (use->is_AddP() && use->outcnt() > 0) { // No dead nodes 3181 Node* addp2 = find_second_addp(use, n); 3182 if (addp2 != NULL) { 3183 alloc_worklist.append_if_missing(addp2); 3184 } 3185 alloc_worklist.append_if_missing(use); 3186 } else if (use->is_Phi() || 3187 use->is_CheckCastPP() || 3188 use->is_EncodeNarrowPtr() || 3189 use->is_DecodeNarrowPtr() || 3190 (use->is_ConstraintCast() && use->Opcode() == Op_CastPP)) { 3191 alloc_worklist.append_if_missing(use); 3192 #ifdef ASSERT 3193 } else if (use->is_Mem()) { 3194 assert(use->in(MemNode::Address) != n, "EA: missing allocation reference path"); 3195 } else if (use->is_MergeMem()) { 3196 assert(_mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist"); 3197 } else if (use->is_SafePoint()) { 3198 // Look for MergeMem nodes for calls which reference unique allocation 3199 // (through CheckCastPP nodes) even for debug info. 3200 Node* m = use->in(TypeFunc::Memory); 3201 if (m->is_MergeMem()) { 3202 assert(_mergemem_worklist.contains(m->as_MergeMem()), "EA: missing MergeMem node in the worklist"); 3203 } 3204 } else if (use->Opcode() == Op_EncodeISOArray) { 3205 if (use->in(MemNode::Memory) == n || use->in(3) == n) { 3206 // EncodeISOArray overwrites destination array 3207 memnode_worklist.append_if_missing(use); 3208 } 3209 } else { 3210 uint op = use->Opcode(); 3211 if ((op == Op_StrCompressedCopy || op == Op_StrInflatedCopy) && 3212 (use->in(MemNode::Memory) == n)) { 3213 // They overwrite memory edge corresponding to destination array, 3214 memnode_worklist.append_if_missing(use); 3215 } else if (!(op == Op_CmpP || op == Op_Conv2B || 3216 op == Op_CastP2X || op == Op_StoreCM || 3217 op == Op_FastLock || op == Op_AryEq || op == Op_StrComp || op == Op_HasNegatives || 3218 op == Op_StrCompressedCopy || op == Op_StrInflatedCopy || 3219 op == Op_StrEquals || op == Op_StrIndexOf || op == Op_StrIndexOfChar || 3220 op == Op_SubTypeCheck || 3221 BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(use))) { 3222 n->dump(); 3223 use->dump(); 3224 assert(false, "EA: missing allocation reference path"); 3225 } 3226 #endif 3227 } 3228 } 3229 3230 } 3231 3232 // Go over all ArrayCopy nodes and if one of the inputs has a unique 3233 // type, record it in the ArrayCopy node so we know what memory this 3234 // node uses/modified. 3235 for (int next = 0; next < arraycopy_worklist.length(); next++) { 3236 ArrayCopyNode* ac = arraycopy_worklist.at(next); 3237 Node* dest = ac->in(ArrayCopyNode::Dest); 3238 if (dest->is_AddP()) { 3239 dest = get_addp_base(dest); 3240 } 3241 JavaObjectNode* jobj = unique_java_object(dest); 3242 if (jobj != NULL) { 3243 Node *base = get_map(jobj->idx()); 3244 if (base != NULL) { 3245 const TypeOopPtr *base_t = _igvn->type(base)->isa_oopptr(); 3246 ac->_dest_type = base_t; 3247 } 3248 } 3249 Node* src = ac->in(ArrayCopyNode::Src); 3250 if (src->is_AddP()) { 3251 src = get_addp_base(src); 3252 } 3253 jobj = unique_java_object(src); 3254 if (jobj != NULL) { 3255 Node* base = get_map(jobj->idx()); 3256 if (base != NULL) { 3257 const TypeOopPtr *base_t = _igvn->type(base)->isa_oopptr(); 3258 ac->_src_type = base_t; 3259 } 3260 } 3261 } 3262 3263 // New alias types were created in split_AddP(). 3264 uint new_index_end = (uint) _compile->num_alias_types(); 3265 assert(unique_old == _compile->unique(), "there should be no new ideal nodes after Phase 1"); 3266 3267 // Phase 2: Process MemNode's from memnode_worklist. compute new address type and 3268 // compute new values for Memory inputs (the Memory inputs are not 3269 // actually updated until phase 4.) 3270 if (memnode_worklist.length() == 0) 3271 return; // nothing to do 3272 while (memnode_worklist.length() != 0) { 3273 Node *n = memnode_worklist.pop(); 3274 if (visited.test_set(n->_idx)) 3275 continue; 3276 if (n->is_Phi() || n->is_ClearArray()) { 3277 // we don't need to do anything, but the users must be pushed 3278 } else if (n->is_MemBar()) { // Initialize, MemBar nodes 3279 // we don't need to do anything, but the users must be pushed 3280 n = n->as_MemBar()->proj_out_or_null(TypeFunc::Memory); 3281 if (n == NULL) 3282 continue; 3283 } else if (n->Opcode() == Op_StrCompressedCopy || 3284 n->Opcode() == Op_EncodeISOArray) { 3285 // get the memory projection 3286 n = n->find_out_with(Op_SCMemProj); 3287 assert(n != NULL && n->Opcode() == Op_SCMemProj, "memory projection required"); 3288 } else { 3289 assert(n->is_Mem(), "memory node required."); 3290 Node *addr = n->in(MemNode::Address); 3291 const Type *addr_t = igvn->type(addr); 3292 if (addr_t == Type::TOP) 3293 continue; 3294 assert (addr_t->isa_ptr() != NULL, "pointer type required."); 3295 int alias_idx = _compile->get_alias_index(addr_t->is_ptr()); 3296 assert ((uint)alias_idx < new_index_end, "wrong alias index"); 3297 Node *mem = find_inst_mem(n->in(MemNode::Memory), alias_idx, orig_phis); 3298 if (_compile->failing()) { 3299 return; 3300 } 3301 if (mem != n->in(MemNode::Memory)) { 3302 // We delay the memory edge update since we need old one in 3303 // MergeMem code below when instances memory slices are separated. 3304 set_map(n, mem); 3305 } 3306 if (n->is_Load()) { 3307 continue; // don't push users 3308 } else if (n->is_LoadStore()) { 3309 // get the memory projection 3310 n = n->find_out_with(Op_SCMemProj); 3311 assert(n != NULL && n->Opcode() == Op_SCMemProj, "memory projection required"); 3312 } 3313 } 3314 // push user on appropriate worklist 3315 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 3316 Node *use = n->fast_out(i); 3317 if (use->is_Phi() || use->is_ClearArray()) { 3318 memnode_worklist.append_if_missing(use); 3319 } else if (use->is_Mem() && use->in(MemNode::Memory) == n) { 3320 if (use->Opcode() == Op_StoreCM) // Ignore cardmark stores 3321 continue; 3322 memnode_worklist.append_if_missing(use); 3323 } else if (use->is_MemBar()) { 3324 if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge 3325 memnode_worklist.append_if_missing(use); 3326 } 3327 #ifdef ASSERT 3328 } else if(use->is_Mem()) { 3329 assert(use->in(MemNode::Memory) != n, "EA: missing memory path"); 3330 } else if (use->is_MergeMem()) { 3331 assert(_mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist"); 3332 } else if (use->Opcode() == Op_EncodeISOArray) { 3333 if (use->in(MemNode::Memory) == n || use->in(3) == n) { 3334 // EncodeISOArray overwrites destination array 3335 memnode_worklist.append_if_missing(use); 3336 } 3337 } else { 3338 uint op = use->Opcode(); 3339 if ((use->in(MemNode::Memory) == n) && 3340 (op == Op_StrCompressedCopy || op == Op_StrInflatedCopy)) { 3341 // They overwrite memory edge corresponding to destination array, 3342 memnode_worklist.append_if_missing(use); 3343 } else if (!(BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(use) || 3344 op == Op_AryEq || op == Op_StrComp || op == Op_HasNegatives || 3345 op == Op_StrCompressedCopy || op == Op_StrInflatedCopy || 3346 op == Op_StrEquals || op == Op_StrIndexOf || op == Op_StrIndexOfChar)) { 3347 n->dump(); 3348 use->dump(); 3349 assert(false, "EA: missing memory path"); 3350 } 3351 #endif 3352 } 3353 } 3354 } 3355 3356 // Phase 3: Process MergeMem nodes from mergemem_worklist. 3357 // Walk each memory slice moving the first node encountered of each 3358 // instance type to the the input corresponding to its alias index. 3359 uint length = _mergemem_worklist.length(); 3360 for( uint next = 0; next < length; ++next ) { 3361 MergeMemNode* nmm = _mergemem_worklist.at(next); 3362 assert(!visited.test_set(nmm->_idx), "should not be visited before"); 3363 // Note: we don't want to use MergeMemStream here because we only want to 3364 // scan inputs which exist at the start, not ones we add during processing. 3365 // Note 2: MergeMem may already contains instance memory slices added 3366 // during find_inst_mem() call when memory nodes were processed above. 3367 igvn->hash_delete(nmm); 3368 uint nslices = MIN2(nmm->req(), new_index_start); 3369 for (uint i = Compile::AliasIdxRaw+1; i < nslices; i++) { 3370 Node* mem = nmm->in(i); 3371 Node* cur = NULL; 3372 if (mem == NULL || mem->is_top()) 3373 continue; 3374 // First, update mergemem by moving memory nodes to corresponding slices 3375 // if their type became more precise since this mergemem was created. 3376 while (mem->is_Mem()) { 3377 const Type *at = igvn->type(mem->in(MemNode::Address)); 3378 if (at != Type::TOP) { 3379 assert (at->isa_ptr() != NULL, "pointer type required."); 3380 uint idx = (uint)_compile->get_alias_index(at->is_ptr()); 3381 if (idx == i) { 3382 if (cur == NULL) 3383 cur = mem; 3384 } else { 3385 if (idx >= nmm->req() || nmm->is_empty_memory(nmm->in(idx))) { 3386 nmm->set_memory_at(idx, mem); 3387 } 3388 } 3389 } 3390 mem = mem->in(MemNode::Memory); 3391 } 3392 nmm->set_memory_at(i, (cur != NULL) ? cur : mem); 3393 // Find any instance of the current type if we haven't encountered 3394 // already a memory slice of the instance along the memory chain. 3395 for (uint ni = new_index_start; ni < new_index_end; ni++) { 3396 if((uint)_compile->get_general_index(ni) == i) { 3397 Node *m = (ni >= nmm->req()) ? nmm->empty_memory() : nmm->in(ni); 3398 if (nmm->is_empty_memory(m)) { 3399 Node* result = find_inst_mem(mem, ni, orig_phis); 3400 if (_compile->failing()) { 3401 return; 3402 } 3403 nmm->set_memory_at(ni, result); 3404 } 3405 } 3406 } 3407 } 3408 // Find the rest of instances values 3409 for (uint ni = new_index_start; ni < new_index_end; ni++) { 3410 const TypeOopPtr *tinst = _compile->get_adr_type(ni)->isa_oopptr(); 3411 Node* result = step_through_mergemem(nmm, ni, tinst); 3412 if (result == nmm->base_memory()) { 3413 // Didn't find instance memory, search through general slice recursively. 3414 result = nmm->memory_at(_compile->get_general_index(ni)); 3415 result = find_inst_mem(result, ni, orig_phis); 3416 if (_compile->failing()) { 3417 return; 3418 } 3419 nmm->set_memory_at(ni, result); 3420 } 3421 } 3422 igvn->hash_insert(nmm); 3423 record_for_optimizer(nmm); 3424 } 3425 3426 // Phase 4: Update the inputs of non-instance memory Phis and 3427 // the Memory input of memnodes 3428 // First update the inputs of any non-instance Phi's from 3429 // which we split out an instance Phi. Note we don't have 3430 // to recursively process Phi's encounted on the input memory 3431 // chains as is done in split_memory_phi() since they will 3432 // also be processed here. 3433 for (int j = 0; j < orig_phis.length(); j++) { 3434 PhiNode *phi = orig_phis.at(j); 3435 int alias_idx = _compile->get_alias_index(phi->adr_type()); 3436 igvn->hash_delete(phi); 3437 for (uint i = 1; i < phi->req(); i++) { 3438 Node *mem = phi->in(i); 3439 Node *new_mem = find_inst_mem(mem, alias_idx, orig_phis); 3440 if (_compile->failing()) { 3441 return; 3442 } 3443 if (mem != new_mem) { 3444 phi->set_req(i, new_mem); 3445 } 3446 } 3447 igvn->hash_insert(phi); 3448 record_for_optimizer(phi); 3449 } 3450 3451 // Update the memory inputs of MemNodes with the value we computed 3452 // in Phase 2 and move stores memory users to corresponding memory slices. 3453 // Disable memory split verification code until the fix for 6984348. 3454 // Currently it produces false negative results since it does not cover all cases. 3455 #if 0 // ifdef ASSERT 3456 visited.Reset(); 3457 Node_Stack old_mems(arena, _compile->unique() >> 2); 3458 #endif 3459 for (uint i = 0; i < ideal_nodes.size(); i++) { 3460 Node* n = ideal_nodes.at(i); 3461 Node* nmem = get_map(n->_idx); 3462 assert(nmem != NULL, "sanity"); 3463 if (n->is_Mem()) { 3464 #if 0 // ifdef ASSERT 3465 Node* old_mem = n->in(MemNode::Memory); 3466 if (!visited.test_set(old_mem->_idx)) { 3467 old_mems.push(old_mem, old_mem->outcnt()); 3468 } 3469 #endif 3470 assert(n->in(MemNode::Memory) != nmem, "sanity"); 3471 if (!n->is_Load()) { 3472 // Move memory users of a store first. 3473 move_inst_mem(n, orig_phis); 3474 } 3475 // Now update memory input 3476 igvn->hash_delete(n); 3477 n->set_req(MemNode::Memory, nmem); 3478 igvn->hash_insert(n); 3479 record_for_optimizer(n); 3480 } else { 3481 assert(n->is_Allocate() || n->is_CheckCastPP() || 3482 n->is_AddP() || n->is_Phi(), "unknown node used for set_map()"); 3483 } 3484 } 3485 #if 0 // ifdef ASSERT 3486 // Verify that memory was split correctly 3487 while (old_mems.is_nonempty()) { 3488 Node* old_mem = old_mems.node(); 3489 uint old_cnt = old_mems.index(); 3490 old_mems.pop(); 3491 assert(old_cnt == old_mem->outcnt(), "old mem could be lost"); 3492 } 3493 #endif 3494 } 3495 3496 #ifndef PRODUCT 3497 static const char *node_type_names[] = { 3498 "UnknownType", 3499 "JavaObject", 3500 "LocalVar", 3501 "Field", 3502 "Arraycopy" 3503 }; 3504 3505 static const char *esc_names[] = { 3506 "UnknownEscape", 3507 "NoEscape", 3508 "ArgEscape", 3509 "GlobalEscape" 3510 }; 3511 3512 void PointsToNode::dump(bool print_state) const { 3513 NodeType nt = node_type(); 3514 tty->print("%s ", node_type_names[(int) nt]); 3515 if (print_state) { 3516 EscapeState es = escape_state(); 3517 EscapeState fields_es = fields_escape_state(); 3518 tty->print("%s(%s) ", esc_names[(int)es], esc_names[(int)fields_es]); 3519 if (nt == PointsToNode::JavaObject && !this->scalar_replaceable()) 3520 tty->print("NSR "); 3521 } 3522 if (is_Field()) { 3523 FieldNode* f = (FieldNode*)this; 3524 if (f->is_oop()) 3525 tty->print("oop "); 3526 if (f->offset() > 0) 3527 tty->print("+%d ", f->offset()); 3528 tty->print("("); 3529 for (BaseIterator i(f); i.has_next(); i.next()) { 3530 PointsToNode* b = i.get(); 3531 tty->print(" %d%s", b->idx(),(b->is_JavaObject() ? "P" : "")); 3532 } 3533 tty->print(" )"); 3534 } 3535 tty->print("["); 3536 for (EdgeIterator i(this); i.has_next(); i.next()) { 3537 PointsToNode* e = i.get(); 3538 tty->print(" %d%s%s", e->idx(),(e->is_JavaObject() ? "P" : (e->is_Field() ? "F" : "")), e->is_Arraycopy() ? "cp" : ""); 3539 } 3540 tty->print(" ["); 3541 for (UseIterator i(this); i.has_next(); i.next()) { 3542 PointsToNode* u = i.get(); 3543 bool is_base = false; 3544 if (PointsToNode::is_base_use(u)) { 3545 is_base = true; 3546 u = PointsToNode::get_use_node(u)->as_Field(); 3547 } 3548 tty->print(" %d%s%s", u->idx(), is_base ? "b" : "", u->is_Arraycopy() ? "cp" : ""); 3549 } 3550 tty->print(" ]] "); 3551 if (_node == NULL) 3552 tty->print_cr("<null>"); 3553 else 3554 _node->dump(); 3555 } 3556 3557 void ConnectionGraph::dump(GrowableArray<PointsToNode*>& ptnodes_worklist) { 3558 bool first = true; 3559 int ptnodes_length = ptnodes_worklist.length(); 3560 for (int i = 0; i < ptnodes_length; i++) { 3561 PointsToNode *ptn = ptnodes_worklist.at(i); 3562 if (ptn == NULL || !ptn->is_JavaObject()) 3563 continue; 3564 PointsToNode::EscapeState es = ptn->escape_state(); 3565 if ((es != PointsToNode::NoEscape) && !Verbose) { 3566 continue; 3567 } 3568 Node* n = ptn->ideal_node(); 3569 if (n->is_Allocate() || (n->is_CallStaticJava() && 3570 n->as_CallStaticJava()->is_boxing_method())) { 3571 if (first) { 3572 tty->cr(); 3573 tty->print("======== Connection graph for "); 3574 _compile->method()->print_short_name(); 3575 tty->cr(); 3576 first = false; 3577 } 3578 ptn->dump(); 3579 // Print all locals and fields which reference this allocation 3580 for (UseIterator j(ptn); j.has_next(); j.next()) { 3581 PointsToNode* use = j.get(); 3582 if (use->is_LocalVar()) { 3583 use->dump(Verbose); 3584 } else if (Verbose) { 3585 use->dump(); 3586 } 3587 } 3588 tty->cr(); 3589 } 3590 } 3591 } 3592 #endif 3593 3594 void ConnectionGraph::record_for_optimizer(Node *n) { 3595 _igvn->_worklist.push(n); 3596 _igvn->add_users_to_worklist(n); 3597 }