1 /* 2 * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "compiler/compileLog.hpp" 27 #include "ci/bcEscapeAnalyzer.hpp" 28 #include "compiler/oopMap.hpp" 29 #include "gc/shared/barrierSet.hpp" 30 #include "gc/shared/c2/barrierSetC2.hpp" 31 #include "interpreter/interpreter.hpp" 32 #include "opto/callGenerator.hpp" 33 #include "opto/callnode.hpp" 34 #include "opto/castnode.hpp" 35 #include "opto/convertnode.hpp" 36 #include "opto/escape.hpp" 37 #include "opto/locknode.hpp" 38 #include "opto/machnode.hpp" 39 #include "opto/matcher.hpp" 40 #include "opto/parse.hpp" 41 #include "opto/regalloc.hpp" 42 #include "opto/regmask.hpp" 43 #include "opto/rootnode.hpp" 44 #include "opto/runtime.hpp" 45 #include "opto/valuetypenode.hpp" 46 #include "runtime/sharedRuntime.hpp" 47 48 // Portions of code courtesy of Clifford Click 49 50 // Optimization - Graph Style 51 52 //============================================================================= 53 uint StartNode::size_of() const { return sizeof(*this); } 54 bool StartNode::cmp( const Node &n ) const 55 { return _domain == ((StartNode&)n)._domain; } 56 const Type *StartNode::bottom_type() const { return _domain; } 57 const Type* StartNode::Value(PhaseGVN* phase) const { return _domain; } 58 #ifndef PRODUCT 59 void StartNode::dump_spec(outputStream *st) const { st->print(" #"); _domain->dump_on(st);} 60 void StartNode::dump_compact_spec(outputStream *st) const { /* empty */ } 61 #endif 62 63 //------------------------------Ideal------------------------------------------ 64 Node *StartNode::Ideal(PhaseGVN *phase, bool can_reshape){ 65 return remove_dead_region(phase, can_reshape) ? this : NULL; 66 } 67 68 //------------------------------calling_convention----------------------------- 69 void StartNode::calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const { 70 Matcher::calling_convention( sig_bt, parm_regs, argcnt, false ); 71 } 72 73 //------------------------------Registers-------------------------------------- 74 const RegMask &StartNode::in_RegMask(uint) const { 75 return RegMask::Empty; 76 } 77 78 //------------------------------match------------------------------------------ 79 // Construct projections for incoming parameters, and their RegMask info 80 Node *StartNode::match(const ProjNode *proj, const Matcher *match, const RegMask* mask) { 81 switch (proj->_con) { 82 case TypeFunc::Control: 83 case TypeFunc::I_O: 84 case TypeFunc::Memory: 85 return new MachProjNode(this,proj->_con,RegMask::Empty,MachProjNode::unmatched_proj); 86 case TypeFunc::FramePtr: 87 return new MachProjNode(this,proj->_con,Matcher::c_frame_ptr_mask, Op_RegP); 88 case TypeFunc::ReturnAdr: 89 return new MachProjNode(this,proj->_con,match->_return_addr_mask,Op_RegP); 90 case TypeFunc::Parms: 91 default: { 92 uint parm_num = proj->_con - TypeFunc::Parms; 93 const Type *t = _domain->field_at(proj->_con); 94 if (t->base() == Type::Half) // 2nd half of Longs and Doubles 95 return new ConNode(Type::TOP); 96 uint ideal_reg = t->ideal_reg(); 97 RegMask &rm = match->_calling_convention_mask[parm_num]; 98 return new MachProjNode(this,proj->_con,rm,ideal_reg); 99 } 100 } 101 return NULL; 102 } 103 104 //============================================================================= 105 const char * const ParmNode::names[TypeFunc::Parms+1] = { 106 "Control", "I_O", "Memory", "FramePtr", "ReturnAdr", "Parms" 107 }; 108 109 #ifndef PRODUCT 110 void ParmNode::dump_spec(outputStream *st) const { 111 if( _con < TypeFunc::Parms ) { 112 st->print("%s", names[_con]); 113 } else { 114 st->print("Parm%d: ",_con-TypeFunc::Parms); 115 // Verbose and WizardMode dump bottom_type for all nodes 116 if( !Verbose && !WizardMode ) bottom_type()->dump_on(st); 117 } 118 } 119 120 void ParmNode::dump_compact_spec(outputStream *st) const { 121 if (_con < TypeFunc::Parms) { 122 st->print("%s", names[_con]); 123 } else { 124 st->print("%d:", _con-TypeFunc::Parms); 125 // unconditionally dump bottom_type 126 bottom_type()->dump_on(st); 127 } 128 } 129 130 // For a ParmNode, all immediate inputs and outputs are considered relevant 131 // both in compact and standard representation. 132 void ParmNode::related(GrowableArray<Node*> *in_rel, GrowableArray<Node*> *out_rel, bool compact) const { 133 this->collect_nodes(in_rel, 1, false, false); 134 this->collect_nodes(out_rel, -1, false, false); 135 } 136 #endif 137 138 uint ParmNode::ideal_reg() const { 139 switch( _con ) { 140 case TypeFunc::Control : // fall through 141 case TypeFunc::I_O : // fall through 142 case TypeFunc::Memory : return 0; 143 case TypeFunc::FramePtr : // fall through 144 case TypeFunc::ReturnAdr: return Op_RegP; 145 default : assert( _con > TypeFunc::Parms, "" ); 146 // fall through 147 case TypeFunc::Parms : { 148 // Type of argument being passed 149 const Type *t = in(0)->as_Start()->_domain->field_at(_con); 150 return t->ideal_reg(); 151 } 152 } 153 ShouldNotReachHere(); 154 return 0; 155 } 156 157 //============================================================================= 158 ReturnNode::ReturnNode(uint edges, Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *retadr ) : Node(edges) { 159 init_req(TypeFunc::Control,cntrl); 160 init_req(TypeFunc::I_O,i_o); 161 init_req(TypeFunc::Memory,memory); 162 init_req(TypeFunc::FramePtr,frameptr); 163 init_req(TypeFunc::ReturnAdr,retadr); 164 } 165 166 Node *ReturnNode::Ideal(PhaseGVN *phase, bool can_reshape){ 167 return remove_dead_region(phase, can_reshape) ? this : NULL; 168 } 169 170 const Type* ReturnNode::Value(PhaseGVN* phase) const { 171 return ( phase->type(in(TypeFunc::Control)) == Type::TOP) 172 ? Type::TOP 173 : Type::BOTTOM; 174 } 175 176 // Do we Match on this edge index or not? No edges on return nodes 177 uint ReturnNode::match_edge(uint idx) const { 178 return 0; 179 } 180 181 182 #ifndef PRODUCT 183 void ReturnNode::dump_req(outputStream *st) const { 184 // Dump the required inputs, enclosed in '(' and ')' 185 uint i; // Exit value of loop 186 for (i = 0; i < req(); i++) { // For all required inputs 187 if (i == TypeFunc::Parms) st->print("returns"); 188 if (in(i)) st->print("%c%d ", Compile::current()->node_arena()->contains(in(i)) ? ' ' : 'o', in(i)->_idx); 189 else st->print("_ "); 190 } 191 } 192 #endif 193 194 //============================================================================= 195 RethrowNode::RethrowNode( 196 Node* cntrl, 197 Node* i_o, 198 Node* memory, 199 Node* frameptr, 200 Node* ret_adr, 201 Node* exception 202 ) : Node(TypeFunc::Parms + 1) { 203 init_req(TypeFunc::Control , cntrl ); 204 init_req(TypeFunc::I_O , i_o ); 205 init_req(TypeFunc::Memory , memory ); 206 init_req(TypeFunc::FramePtr , frameptr ); 207 init_req(TypeFunc::ReturnAdr, ret_adr); 208 init_req(TypeFunc::Parms , exception); 209 } 210 211 Node *RethrowNode::Ideal(PhaseGVN *phase, bool can_reshape){ 212 return remove_dead_region(phase, can_reshape) ? this : NULL; 213 } 214 215 const Type* RethrowNode::Value(PhaseGVN* phase) const { 216 return (phase->type(in(TypeFunc::Control)) == Type::TOP) 217 ? Type::TOP 218 : Type::BOTTOM; 219 } 220 221 uint RethrowNode::match_edge(uint idx) const { 222 return 0; 223 } 224 225 #ifndef PRODUCT 226 void RethrowNode::dump_req(outputStream *st) const { 227 // Dump the required inputs, enclosed in '(' and ')' 228 uint i; // Exit value of loop 229 for (i = 0; i < req(); i++) { // For all required inputs 230 if (i == TypeFunc::Parms) st->print("exception"); 231 if (in(i)) st->print("%c%d ", Compile::current()->node_arena()->contains(in(i)) ? ' ' : 'o', in(i)->_idx); 232 else st->print("_ "); 233 } 234 } 235 #endif 236 237 //============================================================================= 238 // Do we Match on this edge index or not? Match only target address & method 239 uint TailCallNode::match_edge(uint idx) const { 240 return TypeFunc::Parms <= idx && idx <= TypeFunc::Parms+1; 241 } 242 243 //============================================================================= 244 // Do we Match on this edge index or not? Match only target address & oop 245 uint TailJumpNode::match_edge(uint idx) const { 246 return TypeFunc::Parms <= idx && idx <= TypeFunc::Parms+1; 247 } 248 249 //============================================================================= 250 JVMState::JVMState(ciMethod* method, JVMState* caller) : 251 _method(method) { 252 assert(method != NULL, "must be valid call site"); 253 _bci = InvocationEntryBci; 254 _reexecute = Reexecute_Undefined; 255 debug_only(_bci = -99); // random garbage value 256 debug_only(_map = (SafePointNode*)-1); 257 _caller = caller; 258 _depth = 1 + (caller == NULL ? 0 : caller->depth()); 259 _locoff = TypeFunc::Parms; 260 _stkoff = _locoff + _method->max_locals(); 261 _monoff = _stkoff + _method->max_stack(); 262 _scloff = _monoff; 263 _endoff = _monoff; 264 _sp = 0; 265 } 266 JVMState::JVMState(int stack_size) : 267 _method(NULL) { 268 _bci = InvocationEntryBci; 269 _reexecute = Reexecute_Undefined; 270 debug_only(_map = (SafePointNode*)-1); 271 _caller = NULL; 272 _depth = 1; 273 _locoff = TypeFunc::Parms; 274 _stkoff = _locoff; 275 _monoff = _stkoff + stack_size; 276 _scloff = _monoff; 277 _endoff = _monoff; 278 _sp = 0; 279 } 280 281 //--------------------------------of_depth------------------------------------- 282 JVMState* JVMState::of_depth(int d) const { 283 const JVMState* jvmp = this; 284 assert(0 < d && (uint)d <= depth(), "oob"); 285 for (int skip = depth() - d; skip > 0; skip--) { 286 jvmp = jvmp->caller(); 287 } 288 assert(jvmp->depth() == (uint)d, "found the right one"); 289 return (JVMState*)jvmp; 290 } 291 292 //-----------------------------same_calls_as----------------------------------- 293 bool JVMState::same_calls_as(const JVMState* that) const { 294 if (this == that) return true; 295 if (this->depth() != that->depth()) return false; 296 const JVMState* p = this; 297 const JVMState* q = that; 298 for (;;) { 299 if (p->_method != q->_method) return false; 300 if (p->_method == NULL) return true; // bci is irrelevant 301 if (p->_bci != q->_bci) return false; 302 if (p->_reexecute != q->_reexecute) return false; 303 p = p->caller(); 304 q = q->caller(); 305 if (p == q) return true; 306 assert(p != NULL && q != NULL, "depth check ensures we don't run off end"); 307 } 308 } 309 310 //------------------------------debug_start------------------------------------ 311 uint JVMState::debug_start() const { 312 debug_only(JVMState* jvmroot = of_depth(1)); 313 assert(jvmroot->locoff() <= this->locoff(), "youngest JVMState must be last"); 314 return of_depth(1)->locoff(); 315 } 316 317 //-------------------------------debug_end------------------------------------- 318 uint JVMState::debug_end() const { 319 debug_only(JVMState* jvmroot = of_depth(1)); 320 assert(jvmroot->endoff() <= this->endoff(), "youngest JVMState must be last"); 321 return endoff(); 322 } 323 324 //------------------------------debug_depth------------------------------------ 325 uint JVMState::debug_depth() const { 326 uint total = 0; 327 for (const JVMState* jvmp = this; jvmp != NULL; jvmp = jvmp->caller()) { 328 total += jvmp->debug_size(); 329 } 330 return total; 331 } 332 333 #ifndef PRODUCT 334 335 //------------------------------format_helper---------------------------------- 336 // Given an allocation (a Chaitin object) and a Node decide if the Node carries 337 // any defined value or not. If it does, print out the register or constant. 338 static void format_helper( PhaseRegAlloc *regalloc, outputStream* st, Node *n, const char *msg, uint i, GrowableArray<SafePointScalarObjectNode*> *scobjs ) { 339 if (n == NULL) { st->print(" NULL"); return; } 340 if (n->is_SafePointScalarObject()) { 341 // Scalar replacement. 342 SafePointScalarObjectNode* spobj = n->as_SafePointScalarObject(); 343 scobjs->append_if_missing(spobj); 344 int sco_n = scobjs->find(spobj); 345 assert(sco_n >= 0, ""); 346 st->print(" %s%d]=#ScObj" INT32_FORMAT, msg, i, sco_n); 347 return; 348 } 349 if (regalloc->node_regs_max_index() > 0 && 350 OptoReg::is_valid(regalloc->get_reg_first(n))) { // Check for undefined 351 char buf[50]; 352 regalloc->dump_register(n,buf); 353 st->print(" %s%d]=%s",msg,i,buf); 354 } else { // No register, but might be constant 355 const Type *t = n->bottom_type(); 356 switch (t->base()) { 357 case Type::Int: 358 st->print(" %s%d]=#" INT32_FORMAT,msg,i,t->is_int()->get_con()); 359 break; 360 case Type::AnyPtr: 361 assert( t == TypePtr::NULL_PTR || n->in_dump(), "" ); 362 st->print(" %s%d]=#NULL",msg,i); 363 break; 364 case Type::AryPtr: 365 case Type::InstPtr: 366 st->print(" %s%d]=#Ptr" INTPTR_FORMAT,msg,i,p2i(t->isa_oopptr()->const_oop())); 367 break; 368 case Type::KlassPtr: 369 st->print(" %s%d]=#Ptr" INTPTR_FORMAT,msg,i,p2i(t->make_ptr()->isa_klassptr()->klass())); 370 break; 371 case Type::MetadataPtr: 372 st->print(" %s%d]=#Ptr" INTPTR_FORMAT,msg,i,p2i(t->make_ptr()->isa_metadataptr()->metadata())); 373 break; 374 case Type::NarrowOop: 375 st->print(" %s%d]=#Ptr" INTPTR_FORMAT,msg,i,p2i(t->make_ptr()->isa_oopptr()->const_oop())); 376 break; 377 case Type::RawPtr: 378 st->print(" %s%d]=#Raw" INTPTR_FORMAT,msg,i,p2i(t->is_rawptr())); 379 break; 380 case Type::DoubleCon: 381 st->print(" %s%d]=#%fD",msg,i,t->is_double_constant()->_d); 382 break; 383 case Type::FloatCon: 384 st->print(" %s%d]=#%fF",msg,i,t->is_float_constant()->_f); 385 break; 386 case Type::Long: 387 st->print(" %s%d]=#" INT64_FORMAT,msg,i,(int64_t)(t->is_long()->get_con())); 388 break; 389 case Type::Half: 390 case Type::Top: 391 st->print(" %s%d]=_",msg,i); 392 break; 393 default: ShouldNotReachHere(); 394 } 395 } 396 } 397 398 //------------------------------format----------------------------------------- 399 void JVMState::format(PhaseRegAlloc *regalloc, const Node *n, outputStream* st) const { 400 st->print(" #"); 401 if (_method) { 402 _method->print_short_name(st); 403 st->print(" @ bci:%d ",_bci); 404 } else { 405 st->print_cr(" runtime stub "); 406 return; 407 } 408 if (n->is_MachSafePoint()) { 409 GrowableArray<SafePointScalarObjectNode*> scobjs; 410 MachSafePointNode *mcall = n->as_MachSafePoint(); 411 uint i; 412 // Print locals 413 for (i = 0; i < (uint)loc_size(); i++) 414 format_helper(regalloc, st, mcall->local(this, i), "L[", i, &scobjs); 415 // Print stack 416 for (i = 0; i < (uint)stk_size(); i++) { 417 if ((uint)(_stkoff + i) >= mcall->len()) 418 st->print(" oob "); 419 else 420 format_helper(regalloc, st, mcall->stack(this, i), "STK[", i, &scobjs); 421 } 422 for (i = 0; (int)i < nof_monitors(); i++) { 423 Node *box = mcall->monitor_box(this, i); 424 Node *obj = mcall->monitor_obj(this, i); 425 if (regalloc->node_regs_max_index() > 0 && 426 OptoReg::is_valid(regalloc->get_reg_first(box))) { 427 box = BoxLockNode::box_node(box); 428 format_helper(regalloc, st, box, "MON-BOX[", i, &scobjs); 429 } else { 430 OptoReg::Name box_reg = BoxLockNode::reg(box); 431 st->print(" MON-BOX%d=%s+%d", 432 i, 433 OptoReg::regname(OptoReg::c_frame_pointer), 434 regalloc->reg2offset(box_reg)); 435 } 436 const char* obj_msg = "MON-OBJ["; 437 if (EliminateLocks) { 438 if (BoxLockNode::box_node(box)->is_eliminated()) 439 obj_msg = "MON-OBJ(LOCK ELIMINATED)["; 440 } 441 format_helper(regalloc, st, obj, obj_msg, i, &scobjs); 442 } 443 444 for (i = 0; i < (uint)scobjs.length(); i++) { 445 // Scalar replaced objects. 446 st->cr(); 447 st->print(" # ScObj" INT32_FORMAT " ", i); 448 SafePointScalarObjectNode* spobj = scobjs.at(i); 449 ciKlass* cik = spobj->bottom_type()->is_oopptr()->klass(); 450 assert(cik->is_instance_klass() || 451 cik->is_array_klass(), "Not supported allocation."); 452 ciInstanceKlass *iklass = NULL; 453 if (cik->is_instance_klass()) { 454 cik->print_name_on(st); 455 iklass = cik->as_instance_klass(); 456 } else if (cik->is_type_array_klass()) { 457 cik->as_array_klass()->base_element_type()->print_name_on(st); 458 st->print("[%d]", spobj->n_fields()); 459 } else if (cik->is_obj_array_klass()) { 460 ciKlass* cie = cik->as_obj_array_klass()->base_element_klass(); 461 if (cie->is_instance_klass()) { 462 cie->print_name_on(st); 463 } else if (cie->is_type_array_klass()) { 464 cie->as_array_klass()->base_element_type()->print_name_on(st); 465 } else { 466 ShouldNotReachHere(); 467 } 468 st->print("[%d]", spobj->n_fields()); 469 int ndim = cik->as_array_klass()->dimension() - 1; 470 while (ndim-- > 0) { 471 st->print("[]"); 472 } 473 } else if (cik->is_value_array_klass()) { 474 ciKlass* cie = cik->as_value_array_klass()->base_element_klass(); 475 cie->print_name_on(st); 476 st->print("[%d]", spobj->n_fields()); 477 int ndim = cik->as_array_klass()->dimension() - 1; 478 while (ndim-- > 0) { 479 st->print("[]"); 480 } 481 } 482 st->print("={"); 483 uint nf = spobj->n_fields(); 484 if (nf > 0) { 485 uint first_ind = spobj->first_index(mcall->jvms()); 486 Node* fld_node = mcall->in(first_ind); 487 ciField* cifield; 488 if (iklass != NULL) { 489 st->print(" ["); 490 cifield = iklass->nonstatic_field_at(0); 491 cifield->print_name_on(st); 492 format_helper(regalloc, st, fld_node, ":", 0, &scobjs); 493 } else { 494 format_helper(regalloc, st, fld_node, "[", 0, &scobjs); 495 } 496 for (uint j = 1; j < nf; j++) { 497 fld_node = mcall->in(first_ind+j); 498 if (iklass != NULL) { 499 st->print(", ["); 500 cifield = iklass->nonstatic_field_at(j); 501 cifield->print_name_on(st); 502 format_helper(regalloc, st, fld_node, ":", j, &scobjs); 503 } else { 504 format_helper(regalloc, st, fld_node, ", [", j, &scobjs); 505 } 506 } 507 } 508 st->print(" }"); 509 } 510 } 511 st->cr(); 512 if (caller() != NULL) caller()->format(regalloc, n, st); 513 } 514 515 516 void JVMState::dump_spec(outputStream *st) const { 517 if (_method != NULL) { 518 bool printed = false; 519 if (!Verbose) { 520 // The JVMS dumps make really, really long lines. 521 // Take out the most boring parts, which are the package prefixes. 522 char buf[500]; 523 stringStream namest(buf, sizeof(buf)); 524 _method->print_short_name(&namest); 525 if (namest.count() < sizeof(buf)) { 526 const char* name = namest.base(); 527 if (name[0] == ' ') ++name; 528 const char* endcn = strchr(name, ':'); // end of class name 529 if (endcn == NULL) endcn = strchr(name, '('); 530 if (endcn == NULL) endcn = name + strlen(name); 531 while (endcn > name && endcn[-1] != '.' && endcn[-1] != '/') 532 --endcn; 533 st->print(" %s", endcn); 534 printed = true; 535 } 536 } 537 if (!printed) 538 _method->print_short_name(st); 539 st->print(" @ bci:%d",_bci); 540 if(_reexecute == Reexecute_True) 541 st->print(" reexecute"); 542 } else { 543 st->print(" runtime stub"); 544 } 545 if (caller() != NULL) caller()->dump_spec(st); 546 } 547 548 549 void JVMState::dump_on(outputStream* st) const { 550 bool print_map = _map && !((uintptr_t)_map & 1) && 551 ((caller() == NULL) || (caller()->map() != _map)); 552 if (print_map) { 553 if (_map->len() > _map->req()) { // _map->has_exceptions() 554 Node* ex = _map->in(_map->req()); // _map->next_exception() 555 // skip the first one; it's already being printed 556 while (ex != NULL && ex->len() > ex->req()) { 557 ex = ex->in(ex->req()); // ex->next_exception() 558 ex->dump(1); 559 } 560 } 561 _map->dump(Verbose ? 2 : 1); 562 } 563 if (caller() != NULL) { 564 caller()->dump_on(st); 565 } 566 st->print("JVMS depth=%d loc=%d stk=%d arg=%d mon=%d scalar=%d end=%d mondepth=%d sp=%d bci=%d reexecute=%s method=", 567 depth(), locoff(), stkoff(), argoff(), monoff(), scloff(), endoff(), monitor_depth(), sp(), bci(), should_reexecute()?"true":"false"); 568 if (_method == NULL) { 569 st->print_cr("(none)"); 570 } else { 571 _method->print_name(st); 572 st->cr(); 573 if (bci() >= 0 && bci() < _method->code_size()) { 574 st->print(" bc: "); 575 _method->print_codes_on(bci(), bci()+1, st); 576 } 577 } 578 } 579 580 // Extra way to dump a jvms from the debugger, 581 // to avoid a bug with C++ member function calls. 582 void dump_jvms(JVMState* jvms) { 583 jvms->dump(); 584 } 585 #endif 586 587 //--------------------------clone_shallow-------------------------------------- 588 JVMState* JVMState::clone_shallow(Compile* C) const { 589 JVMState* n = has_method() ? new (C) JVMState(_method, _caller) : new (C) JVMState(0); 590 n->set_bci(_bci); 591 n->_reexecute = _reexecute; 592 n->set_locoff(_locoff); 593 n->set_stkoff(_stkoff); 594 n->set_monoff(_monoff); 595 n->set_scloff(_scloff); 596 n->set_endoff(_endoff); 597 n->set_sp(_sp); 598 n->set_map(_map); 599 return n; 600 } 601 602 //---------------------------clone_deep---------------------------------------- 603 JVMState* JVMState::clone_deep(Compile* C) const { 604 JVMState* n = clone_shallow(C); 605 for (JVMState* p = n; p->_caller != NULL; p = p->_caller) { 606 p->_caller = p->_caller->clone_shallow(C); 607 } 608 assert(n->depth() == depth(), "sanity"); 609 assert(n->debug_depth() == debug_depth(), "sanity"); 610 return n; 611 } 612 613 /** 614 * Reset map for all callers 615 */ 616 void JVMState::set_map_deep(SafePointNode* map) { 617 for (JVMState* p = this; p->_caller != NULL; p = p->_caller) { 618 p->set_map(map); 619 } 620 } 621 622 // Adapt offsets in in-array after adding or removing an edge. 623 // Prerequisite is that the JVMState is used by only one node. 624 void JVMState::adapt_position(int delta) { 625 for (JVMState* jvms = this; jvms != NULL; jvms = jvms->caller()) { 626 jvms->set_locoff(jvms->locoff() + delta); 627 jvms->set_stkoff(jvms->stkoff() + delta); 628 jvms->set_monoff(jvms->monoff() + delta); 629 jvms->set_scloff(jvms->scloff() + delta); 630 jvms->set_endoff(jvms->endoff() + delta); 631 } 632 } 633 634 // Mirror the stack size calculation in the deopt code 635 // How much stack space would we need at this point in the program in 636 // case of deoptimization? 637 int JVMState::interpreter_frame_size() const { 638 const JVMState* jvms = this; 639 int size = 0; 640 int callee_parameters = 0; 641 int callee_locals = 0; 642 int extra_args = method()->max_stack() - stk_size(); 643 644 while (jvms != NULL) { 645 int locks = jvms->nof_monitors(); 646 int temps = jvms->stk_size(); 647 bool is_top_frame = (jvms == this); 648 ciMethod* method = jvms->method(); 649 650 int frame_size = BytesPerWord * Interpreter::size_activation(method->max_stack(), 651 temps + callee_parameters, 652 extra_args, 653 locks, 654 callee_parameters, 655 callee_locals, 656 is_top_frame); 657 size += frame_size; 658 659 callee_parameters = method->size_of_parameters(); 660 callee_locals = method->max_locals(); 661 extra_args = 0; 662 jvms = jvms->caller(); 663 } 664 return size + Deoptimization::last_frame_adjust(0, callee_locals) * BytesPerWord; 665 } 666 667 //============================================================================= 668 bool CallNode::cmp( const Node &n ) const 669 { return _tf == ((CallNode&)n)._tf && _jvms == ((CallNode&)n)._jvms; } 670 #ifndef PRODUCT 671 void CallNode::dump_req(outputStream *st) const { 672 // Dump the required inputs, enclosed in '(' and ')' 673 uint i; // Exit value of loop 674 for (i = 0; i < req(); i++) { // For all required inputs 675 if (i == TypeFunc::Parms) st->print("("); 676 if (in(i)) st->print("%c%d ", Compile::current()->node_arena()->contains(in(i)) ? ' ' : 'o', in(i)->_idx); 677 else st->print("_ "); 678 } 679 st->print(")"); 680 } 681 682 void CallNode::dump_spec(outputStream *st) const { 683 st->print(" "); 684 if (tf() != NULL) tf()->dump_on(st); 685 if (_cnt != COUNT_UNKNOWN) st->print(" C=%f",_cnt); 686 if (jvms() != NULL) jvms()->dump_spec(st); 687 } 688 #endif 689 690 const Type *CallNode::bottom_type() const { return tf()->range_cc(); } 691 const Type* CallNode::Value(PhaseGVN* phase) const { 692 if (!in(0) || phase->type(in(0)) == Type::TOP) { 693 return Type::TOP; 694 } 695 return tf()->range_cc(); 696 } 697 698 //------------------------------calling_convention----------------------------- 699 void CallNode::calling_convention(BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt) const { 700 if (_entry_point == StubRoutines::store_value_type_fields_to_buf()) { 701 // The call to that stub is a special case: its inputs are 702 // multiple values returned from a call and so it should follow 703 // the return convention. 704 SharedRuntime::java_return_convention(sig_bt, parm_regs, argcnt); 705 return; 706 } 707 // Use the standard compiler calling convention 708 Matcher::calling_convention( sig_bt, parm_regs, argcnt, true ); 709 } 710 711 712 //------------------------------match------------------------------------------ 713 // Construct projections for control, I/O, memory-fields, ..., and 714 // return result(s) along with their RegMask info 715 Node *CallNode::match(const ProjNode *proj, const Matcher *match, const RegMask* mask) { 716 uint con = proj->_con; 717 const TypeTuple *range_cc = tf()->range_cc(); 718 if (con >= TypeFunc::Parms) { 719 if (is_CallRuntime()) { 720 if (con == TypeFunc::Parms) { 721 uint ideal_reg = range_cc->field_at(TypeFunc::Parms)->ideal_reg(); 722 OptoRegPair regs = match->c_return_value(ideal_reg,true); 723 RegMask rm = RegMask(regs.first()); 724 if (OptoReg::is_valid(regs.second())) { 725 rm.Insert(regs.second()); 726 } 727 return new MachProjNode(this,con,rm,ideal_reg); 728 } else { 729 assert(con == TypeFunc::Parms+1, "only one return value"); 730 assert(range_cc->field_at(TypeFunc::Parms+1) == Type::HALF, ""); 731 return new MachProjNode(this,con, RegMask::Empty, (uint)OptoReg::Bad); 732 } 733 } else { 734 // The Call may return multiple values (value type fields): we 735 // create one projection per returned values. 736 assert(con <= TypeFunc::Parms+1 || ValueTypeReturnedAsFields, "only for multi value return"); 737 uint ideal_reg = range_cc->field_at(con)->ideal_reg(); 738 return new MachProjNode(this, con, mask[con-TypeFunc::Parms], ideal_reg); 739 } 740 } 741 742 switch (con) { 743 case TypeFunc::Control: 744 case TypeFunc::I_O: 745 case TypeFunc::Memory: 746 return new MachProjNode(this,proj->_con,RegMask::Empty,MachProjNode::unmatched_proj); 747 748 case TypeFunc::ReturnAdr: 749 case TypeFunc::FramePtr: 750 default: 751 ShouldNotReachHere(); 752 } 753 return NULL; 754 } 755 756 // Do we Match on this edge index or not? Match no edges 757 uint CallNode::match_edge(uint idx) const { 758 return 0; 759 } 760 761 // 762 // Determine whether the call could modify the field of the specified 763 // instance at the specified offset. 764 // 765 bool CallNode::may_modify(const TypeOopPtr *t_oop, PhaseTransform *phase) { 766 assert((t_oop != NULL), "sanity"); 767 if (is_call_to_arraycopystub() && strcmp(_name, "unsafe_arraycopy") != 0) { 768 const TypeTuple* args = _tf->domain_sig(); 769 Node* dest = NULL; 770 // Stubs that can be called once an ArrayCopyNode is expanded have 771 // different signatures. Look for the second pointer argument, 772 // that is the destination of the copy. 773 for (uint i = TypeFunc::Parms, j = 0; i < args->cnt(); i++) { 774 if (args->field_at(i)->isa_ptr()) { 775 j++; 776 if (j == 2) { 777 dest = in(i); 778 break; 779 } 780 } 781 } 782 guarantee(dest != NULL, "Call had only one ptr in, broken IR!"); 783 if (!dest->is_top() && may_modify_arraycopy_helper(phase->type(dest)->is_oopptr(), t_oop, phase)) { 784 return true; 785 } 786 return false; 787 } 788 if (t_oop->is_known_instance()) { 789 // The instance_id is set only for scalar-replaceable allocations which 790 // are not passed as arguments according to Escape Analysis. 791 return false; 792 } 793 if (t_oop->is_ptr_to_boxed_value()) { 794 ciKlass* boxing_klass = t_oop->klass(); 795 if (is_CallStaticJava() && as_CallStaticJava()->is_boxing_method()) { 796 // Skip unrelated boxing methods. 797 Node* proj = proj_out_or_null(TypeFunc::Parms); 798 if ((proj == NULL) || (phase->type(proj)->is_instptr()->klass() != boxing_klass)) { 799 return false; 800 } 801 } 802 if (is_CallJava() && as_CallJava()->method() != NULL) { 803 ciMethod* meth = as_CallJava()->method(); 804 if (meth->is_getter()) { 805 return false; 806 } 807 // May modify (by reflection) if an boxing object is passed 808 // as argument or returned. 809 Node* proj = returns_pointer() ? proj_out_or_null(TypeFunc::Parms) : NULL; 810 if (proj != NULL) { 811 const TypeInstPtr* inst_t = phase->type(proj)->isa_instptr(); 812 if ((inst_t != NULL) && (!inst_t->klass_is_exact() || 813 (inst_t->klass() == boxing_klass))) { 814 return true; 815 } 816 } 817 const TypeTuple* d = tf()->domain_cc(); 818 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { 819 const TypeInstPtr* inst_t = d->field_at(i)->isa_instptr(); 820 if ((inst_t != NULL) && (!inst_t->klass_is_exact() || 821 (inst_t->klass() == boxing_klass))) { 822 return true; 823 } 824 } 825 return false; 826 } 827 } 828 return true; 829 } 830 831 // Does this call have a direct reference to n other than debug information? 832 bool CallNode::has_non_debug_use(Node *n) { 833 const TypeTuple * d = tf()->domain_cc(); 834 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { 835 Node *arg = in(i); 836 if (arg == n) { 837 return true; 838 } 839 } 840 return false; 841 } 842 843 bool CallNode::has_debug_use(Node *n) { 844 assert(jvms() != NULL, "jvms should not be null"); 845 for (uint i = jvms()->debug_start(); i < jvms()->debug_end(); i++) { 846 Node *arg = in(i); 847 if (arg == n) { 848 return true; 849 } 850 } 851 return false; 852 } 853 854 // Returns the unique CheckCastPP of a call 855 // or 'this' if there are several CheckCastPP or unexpected uses 856 // or returns NULL if there is no one. 857 Node *CallNode::result_cast() { 858 Node *cast = NULL; 859 860 Node *p = proj_out_or_null(TypeFunc::Parms); 861 if (p == NULL) 862 return NULL; 863 864 for (DUIterator_Fast imax, i = p->fast_outs(imax); i < imax; i++) { 865 Node *use = p->fast_out(i); 866 if (use->is_CheckCastPP()) { 867 if (cast != NULL) { 868 return this; // more than 1 CheckCastPP 869 } 870 cast = use; 871 } else if (!use->is_Initialize() && 872 !use->is_AddP() && 873 use->Opcode() != Op_MemBarStoreStore) { 874 // Expected uses are restricted to a CheckCastPP, an Initialize 875 // node, a MemBarStoreStore (clone) and AddP nodes. If we 876 // encounter any other use (a Phi node can be seen in rare 877 // cases) return this to prevent incorrect optimizations. 878 return this; 879 } 880 } 881 return cast; 882 } 883 884 885 CallProjections* CallNode::extract_projections(bool separate_io_proj, bool do_asserts) { 886 uint max_res = TypeFunc::Parms-1; 887 for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) { 888 ProjNode *pn = fast_out(i)->as_Proj(); 889 max_res = MAX2(max_res, pn->_con); 890 } 891 892 assert(max_res < _tf->range_cc()->cnt(), "result out of bounds"); 893 894 uint projs_size = sizeof(CallProjections); 895 if (max_res > TypeFunc::Parms) { 896 projs_size += (max_res-TypeFunc::Parms)*sizeof(Node*); 897 } 898 char* projs_storage = resource_allocate_bytes(projs_size); 899 CallProjections* projs = new(projs_storage)CallProjections(max_res - TypeFunc::Parms + 1); 900 901 for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) { 902 ProjNode *pn = fast_out(i)->as_Proj(); 903 if (pn->outcnt() == 0) continue; 904 switch (pn->_con) { 905 case TypeFunc::Control: 906 { 907 // For Control (fallthrough) and I_O (catch_all_index) we have CatchProj -> Catch -> Proj 908 projs->fallthrough_proj = pn; 909 DUIterator_Fast jmax, j = pn->fast_outs(jmax); 910 const Node *cn = pn->fast_out(j); 911 if (cn->is_Catch()) { 912 ProjNode *cpn = NULL; 913 for (DUIterator_Fast kmax, k = cn->fast_outs(kmax); k < kmax; k++) { 914 cpn = cn->fast_out(k)->as_Proj(); 915 assert(cpn->is_CatchProj(), "must be a CatchProjNode"); 916 if (cpn->_con == CatchProjNode::fall_through_index) 917 projs->fallthrough_catchproj = cpn; 918 else { 919 assert(cpn->_con == CatchProjNode::catch_all_index, "must be correct index."); 920 projs->catchall_catchproj = cpn; 921 } 922 } 923 } 924 break; 925 } 926 case TypeFunc::I_O: 927 if (pn->_is_io_use) 928 projs->catchall_ioproj = pn; 929 else 930 projs->fallthrough_ioproj = pn; 931 for (DUIterator j = pn->outs(); pn->has_out(j); j++) { 932 Node* e = pn->out(j); 933 if (e->Opcode() == Op_CreateEx && e->in(0)->is_CatchProj() && e->outcnt() > 0) { 934 assert(projs->exobj == NULL, "only one"); 935 projs->exobj = e; 936 } 937 } 938 break; 939 case TypeFunc::Memory: 940 if (pn->_is_io_use) 941 projs->catchall_memproj = pn; 942 else 943 projs->fallthrough_memproj = pn; 944 break; 945 case TypeFunc::Parms: 946 projs->resproj[0] = pn; 947 break; 948 default: 949 assert(pn->_con <= max_res, "unexpected projection from allocation node."); 950 projs->resproj[pn->_con-TypeFunc::Parms] = pn; 951 break; 952 } 953 } 954 955 // The resproj may not exist because the result could be ignored 956 // and the exception object may not exist if an exception handler 957 // swallows the exception but all the other must exist and be found. 958 do_asserts = do_asserts && !Compile::current()->inlining_incrementally(); 959 assert(!do_asserts || projs->fallthrough_proj != NULL, "must be found"); 960 assert(!do_asserts || projs->fallthrough_catchproj != NULL, "must be found"); 961 assert(!do_asserts || projs->fallthrough_memproj != NULL, "must be found"); 962 assert(!do_asserts || projs->fallthrough_ioproj != NULL, "must be found"); 963 assert(!do_asserts || projs->catchall_catchproj != NULL, "must be found"); 964 if (separate_io_proj) { 965 assert(!do_asserts || projs->catchall_memproj != NULL, "must be found"); 966 assert(!do_asserts || projs->catchall_ioproj != NULL, "must be found"); 967 } 968 return projs; 969 } 970 971 Node *CallNode::Ideal(PhaseGVN *phase, bool can_reshape) { 972 CallGenerator* cg = generator(); 973 if (can_reshape && cg != NULL && cg->is_mh_late_inline() && !cg->already_attempted()) { 974 // Check whether this MH handle call becomes a candidate for inlining 975 ciMethod* callee = cg->method(); 976 vmIntrinsics::ID iid = callee->intrinsic_id(); 977 if (iid == vmIntrinsics::_invokeBasic) { 978 if (in(TypeFunc::Parms)->Opcode() == Op_ConP) { 979 phase->C->prepend_late_inline(cg); 980 set_generator(NULL); 981 } 982 } else { 983 assert(callee->has_member_arg(), "wrong type of call?"); 984 if (in(TypeFunc::Parms + callee->arg_size() - 1)->Opcode() == Op_ConP) { 985 phase->C->prepend_late_inline(cg); 986 set_generator(NULL); 987 } 988 } 989 } 990 return SafePointNode::Ideal(phase, can_reshape); 991 } 992 993 bool CallNode::is_call_to_arraycopystub() const { 994 if (_name != NULL && strstr(_name, "arraycopy") != 0) { 995 return true; 996 } 997 return false; 998 } 999 1000 //============================================================================= 1001 uint CallJavaNode::size_of() const { return sizeof(*this); } 1002 bool CallJavaNode::cmp( const Node &n ) const { 1003 CallJavaNode &call = (CallJavaNode&)n; 1004 return CallNode::cmp(call) && _method == call._method && 1005 _override_symbolic_info == call._override_symbolic_info; 1006 } 1007 1008 void CallJavaNode::copy_call_debug_info(PhaseIterGVN* phase, CallNode *oldcall) { 1009 // Copy debug information and adjust JVMState information 1010 uint old_dbg_start = oldcall->tf()->domain_sig()->cnt(); 1011 uint new_dbg_start = tf()->domain_sig()->cnt(); 1012 int jvms_adj = new_dbg_start - old_dbg_start; 1013 assert (new_dbg_start == req(), "argument count mismatch"); 1014 Compile* C = phase->C; 1015 1016 // SafePointScalarObject node could be referenced several times in debug info. 1017 // Use Dict to record cloned nodes. 1018 Dict* sosn_map = new Dict(cmpkey,hashkey); 1019 for (uint i = old_dbg_start; i < oldcall->req(); i++) { 1020 Node* old_in = oldcall->in(i); 1021 // Clone old SafePointScalarObjectNodes, adjusting their field contents. 1022 if (old_in != NULL && old_in->is_SafePointScalarObject()) { 1023 SafePointScalarObjectNode* old_sosn = old_in->as_SafePointScalarObject(); 1024 uint old_unique = C->unique(); 1025 Node* new_in = old_sosn->clone(sosn_map); 1026 if (old_unique != C->unique()) { // New node? 1027 new_in->set_req(0, C->root()); // reset control edge 1028 new_in = phase->transform(new_in); // Register new node. 1029 } 1030 old_in = new_in; 1031 } 1032 add_req(old_in); 1033 } 1034 1035 // JVMS may be shared so clone it before we modify it 1036 set_jvms(oldcall->jvms() != NULL ? oldcall->jvms()->clone_deep(C) : NULL); 1037 for (JVMState *jvms = this->jvms(); jvms != NULL; jvms = jvms->caller()) { 1038 jvms->set_map(this); 1039 jvms->set_locoff(jvms->locoff()+jvms_adj); 1040 jvms->set_stkoff(jvms->stkoff()+jvms_adj); 1041 jvms->set_monoff(jvms->monoff()+jvms_adj); 1042 jvms->set_scloff(jvms->scloff()+jvms_adj); 1043 jvms->set_endoff(jvms->endoff()+jvms_adj); 1044 } 1045 } 1046 1047 #ifdef ASSERT 1048 bool CallJavaNode::validate_symbolic_info() const { 1049 if (method() == NULL) { 1050 return true; // call into runtime or uncommon trap 1051 } 1052 Bytecodes::Code bc = jvms()->method()->java_code_at_bci(_bci); 1053 if (EnableValhalla && (bc == Bytecodes::_if_acmpeq || bc == Bytecodes::_if_acmpne)) { 1054 return true; 1055 } 1056 ciMethod* symbolic_info = jvms()->method()->get_method_at_bci(_bci); 1057 ciMethod* callee = method(); 1058 if (symbolic_info->is_method_handle_intrinsic() && !callee->is_method_handle_intrinsic()) { 1059 assert(override_symbolic_info(), "should be set"); 1060 } 1061 assert(ciMethod::is_consistent_info(symbolic_info, callee), "inconsistent info"); 1062 return true; 1063 } 1064 #endif 1065 1066 #ifndef PRODUCT 1067 void CallJavaNode::dump_spec(outputStream *st) const { 1068 if( _method ) _method->print_short_name(st); 1069 CallNode::dump_spec(st); 1070 } 1071 1072 void CallJavaNode::dump_compact_spec(outputStream* st) const { 1073 if (_method) { 1074 _method->print_short_name(st); 1075 } else { 1076 st->print("<?>"); 1077 } 1078 } 1079 #endif 1080 1081 //============================================================================= 1082 uint CallStaticJavaNode::size_of() const { return sizeof(*this); } 1083 bool CallStaticJavaNode::cmp( const Node &n ) const { 1084 CallStaticJavaNode &call = (CallStaticJavaNode&)n; 1085 return CallJavaNode::cmp(call); 1086 } 1087 1088 //----------------------------uncommon_trap_request---------------------------- 1089 // If this is an uncommon trap, return the request code, else zero. 1090 int CallStaticJavaNode::uncommon_trap_request() const { 1091 if (_name != NULL && !strcmp(_name, "uncommon_trap")) { 1092 return extract_uncommon_trap_request(this); 1093 } 1094 return 0; 1095 } 1096 int CallStaticJavaNode::extract_uncommon_trap_request(const Node* call) { 1097 #ifndef PRODUCT 1098 if (!(call->req() > TypeFunc::Parms && 1099 call->in(TypeFunc::Parms) != NULL && 1100 call->in(TypeFunc::Parms)->is_Con() && 1101 call->in(TypeFunc::Parms)->bottom_type()->isa_int())) { 1102 assert(in_dump() != 0, "OK if dumping"); 1103 tty->print("[bad uncommon trap]"); 1104 return 0; 1105 } 1106 #endif 1107 return call->in(TypeFunc::Parms)->bottom_type()->is_int()->get_con(); 1108 } 1109 1110 bool CallStaticJavaNode::remove_useless_allocation(PhaseGVN *phase, Node* ctl, Node* mem, Node* unc_arg) { 1111 // Split if can cause the flattened array branch of an array load to 1112 // end in an uncommon trap. In that case, the allocation of the 1113 // loaded value and its initialization is useless. Eliminate it. use 1114 // the jvm state of the allocation to create a new uncommon trap 1115 // call at the load. 1116 if (ctl == NULL || ctl->is_top() || mem == NULL || mem->is_top() || !mem->is_MergeMem()) { 1117 return false; 1118 } 1119 PhaseIterGVN* igvn = phase->is_IterGVN(); 1120 if (ctl->is_Region()) { 1121 bool res = false; 1122 for (uint i = 1; i < ctl->req(); i++) { 1123 MergeMemNode* mm = mem->clone()->as_MergeMem(); 1124 for (MergeMemStream mms(mm); mms.next_non_empty(); ) { 1125 Node* m = mms.memory(); 1126 if (m->is_Phi() && m->in(0) == ctl) { 1127 mms.set_memory(m->in(i)); 1128 } 1129 } 1130 if (remove_useless_allocation(phase, ctl->in(i), mm, unc_arg)) { 1131 res = true; 1132 if (!ctl->in(i)->is_Region()) { 1133 igvn->replace_input_of(ctl, i, phase->C->top()); 1134 } 1135 } 1136 igvn->remove_dead_node(mm); 1137 } 1138 return res; 1139 } 1140 // verify the control flow is ok 1141 Node* c = ctl; 1142 Node* copy = NULL; 1143 Node* alloc = NULL; 1144 for (;;) { 1145 if (c == NULL || c->is_top()) { 1146 return false; 1147 } 1148 if (c->is_Proj() || c->is_Catch() || c->is_MemBar()) { 1149 c = c->in(0); 1150 } else if (c->Opcode() == Op_CallLeaf && 1151 c->as_Call()->entry_point() == CAST_FROM_FN_PTR(address, OptoRuntime::load_unknown_value)) { 1152 copy = c; 1153 c = c->in(0); 1154 } else if (c->is_Allocate()) { 1155 Node* new_obj = c->as_Allocate()->result_cast(); 1156 if (copy == NULL || new_obj == NULL) { 1157 return false; 1158 } 1159 Node* copy_dest = copy->in(TypeFunc::Parms + 2); 1160 if (copy_dest != new_obj) { 1161 return false; 1162 } 1163 alloc = c; 1164 break; 1165 } else { 1166 return false; 1167 } 1168 } 1169 1170 JVMState* jvms = alloc->jvms(); 1171 if (phase->C->too_many_traps(jvms->method(), jvms->bci(), Deoptimization::trap_request_reason(uncommon_trap_request()))) { 1172 return false; 1173 } 1174 1175 Node* alloc_mem = alloc->in(TypeFunc::Memory); 1176 if (alloc_mem == NULL || alloc_mem->is_top()) { 1177 return false; 1178 } 1179 if (!alloc_mem->is_MergeMem()) { 1180 alloc_mem = MergeMemNode::make(alloc_mem); 1181 } 1182 1183 // and that there's no unexpected side effect 1184 for (MergeMemStream mms2(mem->as_MergeMem(), alloc_mem->as_MergeMem()); mms2.next_non_empty2(); ) { 1185 Node* m1 = mms2.is_empty() ? mms2.base_memory() : mms2.memory(); 1186 Node* m2 = mms2.memory2(); 1187 1188 for (uint i = 0; i < 100; i++) { 1189 if (m1 == m2) { 1190 break; 1191 } else if (m1->is_Proj()) { 1192 m1 = m1->in(0); 1193 } else if (m1->is_MemBar()) { 1194 m1 = m1->in(TypeFunc::Memory); 1195 } else if (m1->Opcode() == Op_CallLeaf && 1196 m1->as_Call()->entry_point() == CAST_FROM_FN_PTR(address, OptoRuntime::load_unknown_value)) { 1197 if (m1 != copy) { 1198 return false; 1199 } 1200 m1 = m1->in(TypeFunc::Memory); 1201 } else if (m1->is_Allocate()) { 1202 if (m1 != alloc) { 1203 return false; 1204 } 1205 break; 1206 } else if (m1->is_MergeMem()) { 1207 MergeMemNode* mm = m1->as_MergeMem(); 1208 int idx = mms2.alias_idx(); 1209 if (idx == Compile::AliasIdxBot) { 1210 m1 = mm->base_memory(); 1211 } else { 1212 m1 = mm->memory_at(idx); 1213 } 1214 } else { 1215 return false; 1216 } 1217 } 1218 } 1219 if (alloc_mem->outcnt() == 0) { 1220 igvn->remove_dead_node(alloc_mem); 1221 } 1222 1223 address call_addr = SharedRuntime::uncommon_trap_blob()->entry_point(); 1224 CallNode* unc = new CallStaticJavaNode(OptoRuntime::uncommon_trap_Type(), call_addr, "uncommon_trap", 1225 jvms->bci(), NULL); 1226 unc->init_req(TypeFunc::Control, alloc->in(0)); 1227 unc->init_req(TypeFunc::I_O, alloc->in(TypeFunc::I_O)); 1228 unc->init_req(TypeFunc::Memory, alloc->in(TypeFunc::Memory)); 1229 unc->init_req(TypeFunc::FramePtr, alloc->in(TypeFunc::FramePtr)); 1230 unc->init_req(TypeFunc::ReturnAdr, alloc->in(TypeFunc::ReturnAdr)); 1231 unc->init_req(TypeFunc::Parms+0, unc_arg); 1232 unc->set_cnt(PROB_UNLIKELY_MAG(4)); 1233 unc->copy_call_debug_info(igvn, alloc->as_Allocate()); 1234 1235 igvn->replace_input_of(alloc, 0, phase->C->top()); 1236 1237 igvn->register_new_node_with_optimizer(unc); 1238 1239 Node* ctrl = phase->transform(new ProjNode(unc, TypeFunc::Control)); 1240 Node* halt = phase->transform(new HaltNode(ctrl, alloc->in(TypeFunc::FramePtr), "uncommon trap returned which should never happen")); 1241 phase->C->root()->add_req(halt); 1242 1243 return true; 1244 } 1245 1246 1247 Node* CallStaticJavaNode::Ideal(PhaseGVN *phase, bool can_reshape) { 1248 if (can_reshape && uncommon_trap_request() != 0) { 1249 if (remove_useless_allocation(phase, in(0), in(TypeFunc::Memory), in(TypeFunc::Parms))) { 1250 if (!in(0)->is_Region()) { 1251 PhaseIterGVN* igvn = phase->is_IterGVN(); 1252 igvn->replace_input_of(this, 0, phase->C->top()); 1253 } 1254 return this; 1255 } 1256 } 1257 return CallNode::Ideal(phase, can_reshape); 1258 } 1259 1260 1261 #ifndef PRODUCT 1262 void CallStaticJavaNode::dump_spec(outputStream *st) const { 1263 st->print("# Static "); 1264 if (_name != NULL) { 1265 st->print("%s", _name); 1266 int trap_req = uncommon_trap_request(); 1267 if (trap_req != 0) { 1268 char buf[100]; 1269 st->print("(%s)", 1270 Deoptimization::format_trap_request(buf, sizeof(buf), 1271 trap_req)); 1272 } 1273 st->print(" "); 1274 } 1275 CallJavaNode::dump_spec(st); 1276 } 1277 1278 void CallStaticJavaNode::dump_compact_spec(outputStream* st) const { 1279 if (_method) { 1280 _method->print_short_name(st); 1281 } else if (_name) { 1282 st->print("%s", _name); 1283 } else { 1284 st->print("<?>"); 1285 } 1286 } 1287 #endif 1288 1289 //============================================================================= 1290 uint CallDynamicJavaNode::size_of() const { return sizeof(*this); } 1291 bool CallDynamicJavaNode::cmp( const Node &n ) const { 1292 CallDynamicJavaNode &call = (CallDynamicJavaNode&)n; 1293 return CallJavaNode::cmp(call); 1294 } 1295 #ifndef PRODUCT 1296 void CallDynamicJavaNode::dump_spec(outputStream *st) const { 1297 st->print("# Dynamic "); 1298 CallJavaNode::dump_spec(st); 1299 } 1300 #endif 1301 1302 //============================================================================= 1303 uint CallRuntimeNode::size_of() const { return sizeof(*this); } 1304 bool CallRuntimeNode::cmp( const Node &n ) const { 1305 CallRuntimeNode &call = (CallRuntimeNode&)n; 1306 return CallNode::cmp(call) && !strcmp(_name,call._name); 1307 } 1308 #ifndef PRODUCT 1309 void CallRuntimeNode::dump_spec(outputStream *st) const { 1310 st->print("# "); 1311 st->print("%s", _name); 1312 CallNode::dump_spec(st); 1313 } 1314 #endif 1315 1316 //------------------------------calling_convention----------------------------- 1317 void CallRuntimeNode::calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const { 1318 if (_entry_point == NULL) { 1319 // The call to that stub is a special case: its inputs are 1320 // multiple values returned from a call and so it should follow 1321 // the return convention. 1322 SharedRuntime::java_return_convention(sig_bt, parm_regs, argcnt); 1323 return; 1324 } 1325 Matcher::c_calling_convention( sig_bt, parm_regs, argcnt ); 1326 } 1327 1328 //============================================================================= 1329 //------------------------------calling_convention----------------------------- 1330 1331 1332 //============================================================================= 1333 #ifndef PRODUCT 1334 void CallLeafNode::dump_spec(outputStream *st) const { 1335 st->print("# "); 1336 st->print("%s", _name); 1337 CallNode::dump_spec(st); 1338 } 1339 #endif 1340 1341 uint CallLeafNoFPNode::match_edge(uint idx) const { 1342 // Null entry point is a special case for which the target is in a 1343 // register. Need to match that edge. 1344 return entry_point() == NULL && idx == TypeFunc::Parms; 1345 } 1346 1347 //============================================================================= 1348 1349 void SafePointNode::set_local(JVMState* jvms, uint idx, Node *c) { 1350 assert(verify_jvms(jvms), "jvms must match"); 1351 int loc = jvms->locoff() + idx; 1352 if (in(loc)->is_top() && idx > 0 && !c->is_top() ) { 1353 // If current local idx is top then local idx - 1 could 1354 // be a long/double that needs to be killed since top could 1355 // represent the 2nd half ofthe long/double. 1356 uint ideal = in(loc -1)->ideal_reg(); 1357 if (ideal == Op_RegD || ideal == Op_RegL) { 1358 // set other (low index) half to top 1359 set_req(loc - 1, in(loc)); 1360 } 1361 } 1362 set_req(loc, c); 1363 } 1364 1365 uint SafePointNode::size_of() const { return sizeof(*this); } 1366 bool SafePointNode::cmp( const Node &n ) const { 1367 return (&n == this); // Always fail except on self 1368 } 1369 1370 //-------------------------set_next_exception---------------------------------- 1371 void SafePointNode::set_next_exception(SafePointNode* n) { 1372 assert(n == NULL || n->Opcode() == Op_SafePoint, "correct value for next_exception"); 1373 if (len() == req()) { 1374 if (n != NULL) add_prec(n); 1375 } else { 1376 set_prec(req(), n); 1377 } 1378 } 1379 1380 1381 //----------------------------next_exception----------------------------------- 1382 SafePointNode* SafePointNode::next_exception() const { 1383 if (len() == req()) { 1384 return NULL; 1385 } else { 1386 Node* n = in(req()); 1387 assert(n == NULL || n->Opcode() == Op_SafePoint, "no other uses of prec edges"); 1388 return (SafePointNode*) n; 1389 } 1390 } 1391 1392 1393 //------------------------------Ideal------------------------------------------ 1394 // Skip over any collapsed Regions 1395 Node *SafePointNode::Ideal(PhaseGVN *phase, bool can_reshape) { 1396 return remove_dead_region(phase, can_reshape) ? this : NULL; 1397 } 1398 1399 //------------------------------Identity--------------------------------------- 1400 // Remove obviously duplicate safepoints 1401 Node* SafePointNode::Identity(PhaseGVN* phase) { 1402 1403 // If you have back to back safepoints, remove one 1404 if( in(TypeFunc::Control)->is_SafePoint() ) 1405 return in(TypeFunc::Control); 1406 1407 if( in(0)->is_Proj() ) { 1408 Node *n0 = in(0)->in(0); 1409 // Check if he is a call projection (except Leaf Call) 1410 if( n0->is_Catch() ) { 1411 n0 = n0->in(0)->in(0); 1412 assert( n0->is_Call(), "expect a call here" ); 1413 } 1414 if( n0->is_Call() && n0->as_Call()->guaranteed_safepoint() ) { 1415 // Don't remove a safepoint belonging to an OuterStripMinedLoopEndNode. 1416 // If the loop dies, they will be removed together. 1417 if (has_out_with(Op_OuterStripMinedLoopEnd)) { 1418 return this; 1419 } 1420 // Useless Safepoint, so remove it 1421 return in(TypeFunc::Control); 1422 } 1423 } 1424 1425 return this; 1426 } 1427 1428 //------------------------------Value------------------------------------------ 1429 const Type* SafePointNode::Value(PhaseGVN* phase) const { 1430 if( phase->type(in(0)) == Type::TOP ) return Type::TOP; 1431 if( phase->eqv( in(0), this ) ) return Type::TOP; // Dead infinite loop 1432 return Type::CONTROL; 1433 } 1434 1435 #ifndef PRODUCT 1436 void SafePointNode::dump_spec(outputStream *st) const { 1437 st->print(" SafePoint "); 1438 _replaced_nodes.dump(st); 1439 } 1440 1441 // The related nodes of a SafepointNode are all data inputs, excluding the 1442 // control boundary, as well as all outputs till level 2 (to include projection 1443 // nodes and targets). In compact mode, just include inputs till level 1 and 1444 // outputs as before. 1445 void SafePointNode::related(GrowableArray<Node*> *in_rel, GrowableArray<Node*> *out_rel, bool compact) const { 1446 if (compact) { 1447 this->collect_nodes(in_rel, 1, false, false); 1448 } else { 1449 this->collect_nodes_in_all_data(in_rel, false); 1450 } 1451 this->collect_nodes(out_rel, -2, false, false); 1452 } 1453 #endif 1454 1455 const RegMask &SafePointNode::in_RegMask(uint idx) const { 1456 if( idx < TypeFunc::Parms ) return RegMask::Empty; 1457 // Values outside the domain represent debug info 1458 return *(Compile::current()->matcher()->idealreg2debugmask[in(idx)->ideal_reg()]); 1459 } 1460 const RegMask &SafePointNode::out_RegMask() const { 1461 return RegMask::Empty; 1462 } 1463 1464 1465 void SafePointNode::grow_stack(JVMState* jvms, uint grow_by) { 1466 assert((int)grow_by > 0, "sanity"); 1467 int monoff = jvms->monoff(); 1468 int scloff = jvms->scloff(); 1469 int endoff = jvms->endoff(); 1470 assert(endoff == (int)req(), "no other states or debug info after me"); 1471 Node* top = Compile::current()->top(); 1472 for (uint i = 0; i < grow_by; i++) { 1473 ins_req(monoff, top); 1474 } 1475 jvms->set_monoff(monoff + grow_by); 1476 jvms->set_scloff(scloff + grow_by); 1477 jvms->set_endoff(endoff + grow_by); 1478 } 1479 1480 void SafePointNode::push_monitor(const FastLockNode *lock) { 1481 // Add a LockNode, which points to both the original BoxLockNode (the 1482 // stack space for the monitor) and the Object being locked. 1483 const int MonitorEdges = 2; 1484 assert(JVMState::logMonitorEdges == exact_log2(MonitorEdges), "correct MonitorEdges"); 1485 assert(req() == jvms()->endoff(), "correct sizing"); 1486 int nextmon = jvms()->scloff(); 1487 if (GenerateSynchronizationCode) { 1488 ins_req(nextmon, lock->box_node()); 1489 ins_req(nextmon+1, lock->obj_node()); 1490 } else { 1491 Node* top = Compile::current()->top(); 1492 ins_req(nextmon, top); 1493 ins_req(nextmon, top); 1494 } 1495 jvms()->set_scloff(nextmon + MonitorEdges); 1496 jvms()->set_endoff(req()); 1497 } 1498 1499 void SafePointNode::pop_monitor() { 1500 // Delete last monitor from debug info 1501 debug_only(int num_before_pop = jvms()->nof_monitors()); 1502 const int MonitorEdges = 2; 1503 assert(JVMState::logMonitorEdges == exact_log2(MonitorEdges), "correct MonitorEdges"); 1504 int scloff = jvms()->scloff(); 1505 int endoff = jvms()->endoff(); 1506 int new_scloff = scloff - MonitorEdges; 1507 int new_endoff = endoff - MonitorEdges; 1508 jvms()->set_scloff(new_scloff); 1509 jvms()->set_endoff(new_endoff); 1510 while (scloff > new_scloff) del_req_ordered(--scloff); 1511 assert(jvms()->nof_monitors() == num_before_pop-1, ""); 1512 } 1513 1514 Node *SafePointNode::peek_monitor_box() const { 1515 int mon = jvms()->nof_monitors() - 1; 1516 assert(mon >= 0, "must have a monitor"); 1517 return monitor_box(jvms(), mon); 1518 } 1519 1520 Node *SafePointNode::peek_monitor_obj() const { 1521 int mon = jvms()->nof_monitors() - 1; 1522 assert(mon >= 0, "must have a monitor"); 1523 return monitor_obj(jvms(), mon); 1524 } 1525 1526 // Do we Match on this edge index or not? Match no edges 1527 uint SafePointNode::match_edge(uint idx) const { 1528 if( !needs_polling_address_input() ) 1529 return 0; 1530 1531 return (TypeFunc::Parms == idx); 1532 } 1533 1534 void SafePointNode::disconnect_from_root(PhaseIterGVN *igvn) { 1535 assert(Opcode() == Op_SafePoint, "only value for safepoint in loops"); 1536 int nb = igvn->C->root()->find_prec_edge(this); 1537 if (nb != -1) { 1538 igvn->C->root()->rm_prec(nb); 1539 } 1540 } 1541 1542 //============== SafePointScalarObjectNode ============== 1543 1544 SafePointScalarObjectNode::SafePointScalarObjectNode(const TypeOopPtr* tp, 1545 #ifdef ASSERT 1546 AllocateNode* alloc, 1547 #endif 1548 uint first_index, 1549 uint n_fields) : 1550 TypeNode(tp, 1), // 1 control input -- seems required. Get from root. 1551 _first_index(first_index), 1552 _n_fields(n_fields) 1553 #ifdef ASSERT 1554 , _alloc(alloc) 1555 #endif 1556 { 1557 init_class_id(Class_SafePointScalarObject); 1558 } 1559 1560 // Do not allow value-numbering for SafePointScalarObject node. 1561 uint SafePointScalarObjectNode::hash() const { return NO_HASH; } 1562 bool SafePointScalarObjectNode::cmp( const Node &n ) const { 1563 return (&n == this); // Always fail except on self 1564 } 1565 1566 uint SafePointScalarObjectNode::ideal_reg() const { 1567 return 0; // No matching to machine instruction 1568 } 1569 1570 const RegMask &SafePointScalarObjectNode::in_RegMask(uint idx) const { 1571 return *(Compile::current()->matcher()->idealreg2debugmask[in(idx)->ideal_reg()]); 1572 } 1573 1574 const RegMask &SafePointScalarObjectNode::out_RegMask() const { 1575 return RegMask::Empty; 1576 } 1577 1578 uint SafePointScalarObjectNode::match_edge(uint idx) const { 1579 return 0; 1580 } 1581 1582 SafePointScalarObjectNode* 1583 SafePointScalarObjectNode::clone(Dict* sosn_map) const { 1584 void* cached = (*sosn_map)[(void*)this]; 1585 if (cached != NULL) { 1586 return (SafePointScalarObjectNode*)cached; 1587 } 1588 SafePointScalarObjectNode* res = (SafePointScalarObjectNode*)Node::clone(); 1589 sosn_map->Insert((void*)this, (void*)res); 1590 return res; 1591 } 1592 1593 1594 #ifndef PRODUCT 1595 void SafePointScalarObjectNode::dump_spec(outputStream *st) const { 1596 st->print(" # fields@[%d..%d]", first_index(), 1597 first_index() + n_fields() - 1); 1598 } 1599 1600 #endif 1601 1602 //============================================================================= 1603 uint AllocateNode::size_of() const { return sizeof(*this); } 1604 1605 AllocateNode::AllocateNode(Compile* C, const TypeFunc *atype, 1606 Node *ctrl, Node *mem, Node *abio, 1607 Node *size, Node *klass_node, 1608 Node* initial_test, 1609 ValueTypeBaseNode* value_node) 1610 : CallNode(atype, NULL, TypeRawPtr::BOTTOM) 1611 { 1612 init_class_id(Class_Allocate); 1613 init_flags(Flag_is_macro); 1614 _is_scalar_replaceable = false; 1615 _is_non_escaping = false; 1616 _is_allocation_MemBar_redundant = false; 1617 _larval = false; 1618 Node *topnode = C->top(); 1619 1620 init_req( TypeFunc::Control , ctrl ); 1621 init_req( TypeFunc::I_O , abio ); 1622 init_req( TypeFunc::Memory , mem ); 1623 init_req( TypeFunc::ReturnAdr, topnode ); 1624 init_req( TypeFunc::FramePtr , topnode ); 1625 init_req( AllocSize , size); 1626 init_req( KlassNode , klass_node); 1627 init_req( InitialTest , initial_test); 1628 init_req( ALength , topnode); 1629 init_req( ValueNode , value_node); 1630 // DefaultValue defaults to NULL 1631 // RawDefaultValue defaults to NULL 1632 // StorageProperties defaults to NULL 1633 C->add_macro_node(this); 1634 } 1635 1636 void AllocateNode::compute_MemBar_redundancy(ciMethod* initializer) 1637 { 1638 assert(initializer != NULL && 1639 initializer->is_object_constructor_or_class_initializer(), 1640 "unexpected initializer method"); 1641 BCEscapeAnalyzer* analyzer = initializer->get_bcea(); 1642 if (analyzer == NULL) { 1643 return; 1644 } 1645 1646 // Allocation node is first parameter in its initializer 1647 if (analyzer->is_arg_stack(0) || analyzer->is_arg_local(0)) { 1648 _is_allocation_MemBar_redundant = true; 1649 } 1650 } 1651 1652 Node* AllocateNode::Ideal(PhaseGVN* phase, bool can_reshape) { 1653 // Check for unused value type allocation 1654 if (can_reshape && in(AllocateNode::ValueNode) != NULL && 1655 outcnt() != 0 && result_cast() == NULL) { 1656 // Remove allocation by replacing the projection nodes with its inputs 1657 InitializeNode* init = initialization(); 1658 PhaseIterGVN* igvn = phase->is_IterGVN(); 1659 CallProjections* projs = extract_projections(true, false); 1660 assert(projs->nb_resproj <= 1, "unexpected number of results"); 1661 if (projs->fallthrough_catchproj != NULL) { 1662 igvn->replace_node(projs->fallthrough_catchproj, in(TypeFunc::Control)); 1663 } 1664 if (projs->fallthrough_memproj != NULL) { 1665 igvn->replace_node(projs->fallthrough_memproj, in(TypeFunc::Memory)); 1666 } 1667 if (projs->catchall_memproj != NULL) { 1668 igvn->replace_node(projs->catchall_memproj, phase->C->top()); 1669 } 1670 if (projs->fallthrough_ioproj != NULL) { 1671 igvn->replace_node(projs->fallthrough_ioproj, in(TypeFunc::I_O)); 1672 } 1673 if (projs->catchall_ioproj != NULL) { 1674 igvn->replace_node(projs->catchall_ioproj, phase->C->top()); 1675 } 1676 if (projs->catchall_catchproj != NULL) { 1677 igvn->replace_node(projs->catchall_catchproj, phase->C->top()); 1678 } 1679 if (projs->resproj[0] != NULL) { 1680 // Remove MemBarStoreStore user as well 1681 for (DUIterator_Fast imax, i = projs->resproj[0]->fast_outs(imax); i < imax; i++) { 1682 MemBarStoreStoreNode* mb = projs->resproj[0]->fast_out(i)->isa_MemBarStoreStore(); 1683 if (mb != NULL && mb->outcnt() == 2) { 1684 mb->remove(igvn); 1685 --i; --imax; 1686 } 1687 } 1688 igvn->replace_node(projs->resproj[0], phase->C->top()); 1689 } 1690 igvn->replace_node(this, phase->C->top()); 1691 if (init != NULL) { 1692 Node* ctrl_proj = init->proj_out_or_null(TypeFunc::Control); 1693 Node* mem_proj = init->proj_out_or_null(TypeFunc::Memory); 1694 if (ctrl_proj != NULL) { 1695 igvn->replace_node(ctrl_proj, init->in(TypeFunc::Control)); 1696 } 1697 if (mem_proj != NULL) { 1698 igvn->replace_node(mem_proj, init->in(TypeFunc::Memory)); 1699 } 1700 } 1701 return NULL; 1702 } 1703 1704 return CallNode::Ideal(phase, can_reshape); 1705 } 1706 1707 Node* AllocateNode::make_ideal_mark(PhaseGVN* phase, Node* control, Node* mem) { 1708 Node* mark_node = NULL; 1709 // For now only enable fast locking for non-array types 1710 if ((EnableValhalla || UseBiasedLocking) && Opcode() == Op_Allocate) { 1711 Node* klass_node = in(AllocateNode::KlassNode); 1712 Node* proto_adr = phase->transform(new AddPNode(klass_node, klass_node, phase->MakeConX(in_bytes(Klass::prototype_header_offset())))); 1713 mark_node = LoadNode::make(*phase, control, mem, proto_adr, TypeRawPtr::BOTTOM, TypeX_X, TypeX_X->basic_type(), MemNode::unordered); 1714 } else { 1715 mark_node = phase->MakeConX(markWord::prototype().value()); 1716 } 1717 mark_node = phase->transform(mark_node); 1718 // Avoid returning a constant (old node) here because this method is used by LoadNode::Ideal 1719 return new OrXNode(mark_node, phase->MakeConX(_larval ? markWord::larval_state_pattern : 0)); 1720 } 1721 1722 1723 //============================================================================= 1724 Node* AllocateArrayNode::Ideal(PhaseGVN *phase, bool can_reshape) { 1725 Node* res = SafePointNode::Ideal(phase, can_reshape); 1726 if (res != NULL) { 1727 return res; 1728 } 1729 // Don't bother trying to transform a dead node 1730 if (in(0) && in(0)->is_top()) return NULL; 1731 1732 const Type* type = phase->type(Ideal_length()); 1733 if (type->isa_int() && type->is_int()->_hi < 0) { 1734 if (can_reshape) { 1735 PhaseIterGVN *igvn = phase->is_IterGVN(); 1736 // Unreachable fall through path (negative array length), 1737 // the allocation can only throw so disconnect it. 1738 Node* proj = proj_out_or_null(TypeFunc::Control); 1739 Node* catchproj = NULL; 1740 if (proj != NULL) { 1741 for (DUIterator_Fast imax, i = proj->fast_outs(imax); i < imax; i++) { 1742 Node *cn = proj->fast_out(i); 1743 if (cn->is_Catch()) { 1744 catchproj = cn->as_Multi()->proj_out_or_null(CatchProjNode::fall_through_index); 1745 break; 1746 } 1747 } 1748 } 1749 if (catchproj != NULL && catchproj->outcnt() > 0 && 1750 (catchproj->outcnt() > 1 || 1751 catchproj->unique_out()->Opcode() != Op_Halt)) { 1752 assert(catchproj->is_CatchProj(), "must be a CatchProjNode"); 1753 Node* nproj = catchproj->clone(); 1754 igvn->register_new_node_with_optimizer(nproj); 1755 1756 Node *frame = new ParmNode( phase->C->start(), TypeFunc::FramePtr ); 1757 frame = phase->transform(frame); 1758 // Halt & Catch Fire 1759 Node* halt = new HaltNode(nproj, frame, "unexpected negative array length"); 1760 phase->C->root()->add_req(halt); 1761 phase->transform(halt); 1762 1763 igvn->replace_node(catchproj, phase->C->top()); 1764 return this; 1765 } 1766 } else { 1767 // Can't correct it during regular GVN so register for IGVN 1768 phase->C->record_for_igvn(this); 1769 } 1770 } 1771 return NULL; 1772 } 1773 1774 // Retrieve the length from the AllocateArrayNode. Narrow the type with a 1775 // CastII, if appropriate. If we are not allowed to create new nodes, and 1776 // a CastII is appropriate, return NULL. 1777 Node *AllocateArrayNode::make_ideal_length(const TypeOopPtr* oop_type, PhaseTransform *phase, bool allow_new_nodes) { 1778 Node *length = in(AllocateNode::ALength); 1779 assert(length != NULL, "length is not null"); 1780 1781 const TypeInt* length_type = phase->find_int_type(length); 1782 const TypeAryPtr* ary_type = oop_type->isa_aryptr(); 1783 1784 if (ary_type != NULL && length_type != NULL) { 1785 const TypeInt* narrow_length_type = ary_type->narrow_size_type(length_type); 1786 if (narrow_length_type != length_type) { 1787 // Assert one of: 1788 // - the narrow_length is 0 1789 // - the narrow_length is not wider than length 1790 assert(narrow_length_type == TypeInt::ZERO || 1791 length_type->is_con() && narrow_length_type->is_con() && 1792 (narrow_length_type->_hi <= length_type->_lo) || 1793 (narrow_length_type->_hi <= length_type->_hi && 1794 narrow_length_type->_lo >= length_type->_lo), 1795 "narrow type must be narrower than length type"); 1796 1797 // Return NULL if new nodes are not allowed 1798 if (!allow_new_nodes) return NULL; 1799 // Create a cast which is control dependent on the initialization to 1800 // propagate the fact that the array length must be positive. 1801 InitializeNode* init = initialization(); 1802 assert(init != NULL, "initialization not found"); 1803 length = new CastIINode(length, narrow_length_type); 1804 length->set_req(0, init->proj_out_or_null(0)); 1805 } 1806 } 1807 1808 return length; 1809 } 1810 1811 //============================================================================= 1812 uint LockNode::size_of() const { return sizeof(*this); } 1813 1814 // Redundant lock elimination 1815 // 1816 // There are various patterns of locking where we release and 1817 // immediately reacquire a lock in a piece of code where no operations 1818 // occur in between that would be observable. In those cases we can 1819 // skip releasing and reacquiring the lock without violating any 1820 // fairness requirements. Doing this around a loop could cause a lock 1821 // to be held for a very long time so we concentrate on non-looping 1822 // control flow. We also require that the operations are fully 1823 // redundant meaning that we don't introduce new lock operations on 1824 // some paths so to be able to eliminate it on others ala PRE. This 1825 // would probably require some more extensive graph manipulation to 1826 // guarantee that the memory edges were all handled correctly. 1827 // 1828 // Assuming p is a simple predicate which can't trap in any way and s 1829 // is a synchronized method consider this code: 1830 // 1831 // s(); 1832 // if (p) 1833 // s(); 1834 // else 1835 // s(); 1836 // s(); 1837 // 1838 // 1. The unlocks of the first call to s can be eliminated if the 1839 // locks inside the then and else branches are eliminated. 1840 // 1841 // 2. The unlocks of the then and else branches can be eliminated if 1842 // the lock of the final call to s is eliminated. 1843 // 1844 // Either of these cases subsumes the simple case of sequential control flow 1845 // 1846 // Addtionally we can eliminate versions without the else case: 1847 // 1848 // s(); 1849 // if (p) 1850 // s(); 1851 // s(); 1852 // 1853 // 3. In this case we eliminate the unlock of the first s, the lock 1854 // and unlock in the then case and the lock in the final s. 1855 // 1856 // Note also that in all these cases the then/else pieces don't have 1857 // to be trivial as long as they begin and end with synchronization 1858 // operations. 1859 // 1860 // s(); 1861 // if (p) 1862 // s(); 1863 // f(); 1864 // s(); 1865 // s(); 1866 // 1867 // The code will work properly for this case, leaving in the unlock 1868 // before the call to f and the relock after it. 1869 // 1870 // A potentially interesting case which isn't handled here is when the 1871 // locking is partially redundant. 1872 // 1873 // s(); 1874 // if (p) 1875 // s(); 1876 // 1877 // This could be eliminated putting unlocking on the else case and 1878 // eliminating the first unlock and the lock in the then side. 1879 // Alternatively the unlock could be moved out of the then side so it 1880 // was after the merge and the first unlock and second lock 1881 // eliminated. This might require less manipulation of the memory 1882 // state to get correct. 1883 // 1884 // Additionally we might allow work between a unlock and lock before 1885 // giving up eliminating the locks. The current code disallows any 1886 // conditional control flow between these operations. A formulation 1887 // similar to partial redundancy elimination computing the 1888 // availability of unlocking and the anticipatability of locking at a 1889 // program point would allow detection of fully redundant locking with 1890 // some amount of work in between. I'm not sure how often I really 1891 // think that would occur though. Most of the cases I've seen 1892 // indicate it's likely non-trivial work would occur in between. 1893 // There may be other more complicated constructs where we could 1894 // eliminate locking but I haven't seen any others appear as hot or 1895 // interesting. 1896 // 1897 // Locking and unlocking have a canonical form in ideal that looks 1898 // roughly like this: 1899 // 1900 // <obj> 1901 // | \\------+ 1902 // | \ \ 1903 // | BoxLock \ 1904 // | | | \ 1905 // | | \ \ 1906 // | | FastLock 1907 // | | / 1908 // | | / 1909 // | | | 1910 // 1911 // Lock 1912 // | 1913 // Proj #0 1914 // | 1915 // MembarAcquire 1916 // | 1917 // Proj #0 1918 // 1919 // MembarRelease 1920 // | 1921 // Proj #0 1922 // | 1923 // Unlock 1924 // | 1925 // Proj #0 1926 // 1927 // 1928 // This code proceeds by processing Lock nodes during PhaseIterGVN 1929 // and searching back through its control for the proper code 1930 // patterns. Once it finds a set of lock and unlock operations to 1931 // eliminate they are marked as eliminatable which causes the 1932 // expansion of the Lock and Unlock macro nodes to make the operation a NOP 1933 // 1934 //============================================================================= 1935 1936 // 1937 // Utility function to skip over uninteresting control nodes. Nodes skipped are: 1938 // - copy regions. (These may not have been optimized away yet.) 1939 // - eliminated locking nodes 1940 // 1941 static Node *next_control(Node *ctrl) { 1942 if (ctrl == NULL) 1943 return NULL; 1944 while (1) { 1945 if (ctrl->is_Region()) { 1946 RegionNode *r = ctrl->as_Region(); 1947 Node *n = r->is_copy(); 1948 if (n == NULL) 1949 break; // hit a region, return it 1950 else 1951 ctrl = n; 1952 } else if (ctrl->is_Proj()) { 1953 Node *in0 = ctrl->in(0); 1954 if (in0->is_AbstractLock() && in0->as_AbstractLock()->is_eliminated()) { 1955 ctrl = in0->in(0); 1956 } else { 1957 break; 1958 } 1959 } else { 1960 break; // found an interesting control 1961 } 1962 } 1963 return ctrl; 1964 } 1965 // 1966 // Given a control, see if it's the control projection of an Unlock which 1967 // operating on the same object as lock. 1968 // 1969 bool AbstractLockNode::find_matching_unlock(const Node* ctrl, LockNode* lock, 1970 GrowableArray<AbstractLockNode*> &lock_ops) { 1971 ProjNode *ctrl_proj = (ctrl->is_Proj()) ? ctrl->as_Proj() : NULL; 1972 if (ctrl_proj != NULL && ctrl_proj->_con == TypeFunc::Control) { 1973 Node *n = ctrl_proj->in(0); 1974 if (n != NULL && n->is_Unlock()) { 1975 UnlockNode *unlock = n->as_Unlock(); 1976 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); 1977 Node* lock_obj = bs->step_over_gc_barrier(lock->obj_node()); 1978 Node* unlock_obj = bs->step_over_gc_barrier(unlock->obj_node()); 1979 if (lock_obj->eqv_uncast(unlock_obj) && 1980 BoxLockNode::same_slot(lock->box_node(), unlock->box_node()) && 1981 !unlock->is_eliminated()) { 1982 lock_ops.append(unlock); 1983 return true; 1984 } 1985 } 1986 } 1987 return false; 1988 } 1989 1990 // 1991 // Find the lock matching an unlock. Returns null if a safepoint 1992 // or complicated control is encountered first. 1993 LockNode *AbstractLockNode::find_matching_lock(UnlockNode* unlock) { 1994 LockNode *lock_result = NULL; 1995 // find the matching lock, or an intervening safepoint 1996 Node *ctrl = next_control(unlock->in(0)); 1997 while (1) { 1998 assert(ctrl != NULL, "invalid control graph"); 1999 assert(!ctrl->is_Start(), "missing lock for unlock"); 2000 if (ctrl->is_top()) break; // dead control path 2001 if (ctrl->is_Proj()) ctrl = ctrl->in(0); 2002 if (ctrl->is_SafePoint()) { 2003 break; // found a safepoint (may be the lock we are searching for) 2004 } else if (ctrl->is_Region()) { 2005 // Check for a simple diamond pattern. Punt on anything more complicated 2006 if (ctrl->req() == 3 && ctrl->in(1) != NULL && ctrl->in(2) != NULL) { 2007 Node *in1 = next_control(ctrl->in(1)); 2008 Node *in2 = next_control(ctrl->in(2)); 2009 if (((in1->is_IfTrue() && in2->is_IfFalse()) || 2010 (in2->is_IfTrue() && in1->is_IfFalse())) && (in1->in(0) == in2->in(0))) { 2011 ctrl = next_control(in1->in(0)->in(0)); 2012 } else { 2013 break; 2014 } 2015 } else { 2016 break; 2017 } 2018 } else { 2019 ctrl = next_control(ctrl->in(0)); // keep searching 2020 } 2021 } 2022 if (ctrl->is_Lock()) { 2023 LockNode *lock = ctrl->as_Lock(); 2024 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); 2025 Node* lock_obj = bs->step_over_gc_barrier(lock->obj_node()); 2026 Node* unlock_obj = bs->step_over_gc_barrier(unlock->obj_node()); 2027 if (lock_obj->eqv_uncast(unlock_obj) && 2028 BoxLockNode::same_slot(lock->box_node(), unlock->box_node())) { 2029 lock_result = lock; 2030 } 2031 } 2032 return lock_result; 2033 } 2034 2035 // This code corresponds to case 3 above. 2036 2037 bool AbstractLockNode::find_lock_and_unlock_through_if(Node* node, LockNode* lock, 2038 GrowableArray<AbstractLockNode*> &lock_ops) { 2039 Node* if_node = node->in(0); 2040 bool if_true = node->is_IfTrue(); 2041 2042 if (if_node->is_If() && if_node->outcnt() == 2 && (if_true || node->is_IfFalse())) { 2043 Node *lock_ctrl = next_control(if_node->in(0)); 2044 if (find_matching_unlock(lock_ctrl, lock, lock_ops)) { 2045 Node* lock1_node = NULL; 2046 ProjNode* proj = if_node->as_If()->proj_out(!if_true); 2047 if (if_true) { 2048 if (proj->is_IfFalse() && proj->outcnt() == 1) { 2049 lock1_node = proj->unique_out(); 2050 } 2051 } else { 2052 if (proj->is_IfTrue() && proj->outcnt() == 1) { 2053 lock1_node = proj->unique_out(); 2054 } 2055 } 2056 if (lock1_node != NULL && lock1_node->is_Lock()) { 2057 LockNode *lock1 = lock1_node->as_Lock(); 2058 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); 2059 Node* lock_obj = bs->step_over_gc_barrier(lock->obj_node()); 2060 Node* lock1_obj = bs->step_over_gc_barrier(lock1->obj_node()); 2061 if (lock_obj->eqv_uncast(lock1_obj) && 2062 BoxLockNode::same_slot(lock->box_node(), lock1->box_node()) && 2063 !lock1->is_eliminated()) { 2064 lock_ops.append(lock1); 2065 return true; 2066 } 2067 } 2068 } 2069 } 2070 2071 lock_ops.trunc_to(0); 2072 return false; 2073 } 2074 2075 bool AbstractLockNode::find_unlocks_for_region(const RegionNode* region, LockNode* lock, 2076 GrowableArray<AbstractLockNode*> &lock_ops) { 2077 // check each control merging at this point for a matching unlock. 2078 // in(0) should be self edge so skip it. 2079 for (int i = 1; i < (int)region->req(); i++) { 2080 Node *in_node = next_control(region->in(i)); 2081 if (in_node != NULL) { 2082 if (find_matching_unlock(in_node, lock, lock_ops)) { 2083 // found a match so keep on checking. 2084 continue; 2085 } else if (find_lock_and_unlock_through_if(in_node, lock, lock_ops)) { 2086 continue; 2087 } 2088 2089 // If we fall through to here then it was some kind of node we 2090 // don't understand or there wasn't a matching unlock, so give 2091 // up trying to merge locks. 2092 lock_ops.trunc_to(0); 2093 return false; 2094 } 2095 } 2096 return true; 2097 2098 } 2099 2100 #ifndef PRODUCT 2101 // 2102 // Create a counter which counts the number of times this lock is acquired 2103 // 2104 void AbstractLockNode::create_lock_counter(JVMState* state) { 2105 _counter = OptoRuntime::new_named_counter(state, NamedCounter::LockCounter); 2106 } 2107 2108 void AbstractLockNode::set_eliminated_lock_counter() { 2109 if (_counter) { 2110 // Update the counter to indicate that this lock was eliminated. 2111 // The counter update code will stay around even though the 2112 // optimizer will eliminate the lock operation itself. 2113 _counter->set_tag(NamedCounter::EliminatedLockCounter); 2114 } 2115 } 2116 2117 const char* AbstractLockNode::_kind_names[] = {"Regular", "NonEscObj", "Coarsened", "Nested"}; 2118 2119 void AbstractLockNode::dump_spec(outputStream* st) const { 2120 st->print("%s ", _kind_names[_kind]); 2121 CallNode::dump_spec(st); 2122 } 2123 2124 void AbstractLockNode::dump_compact_spec(outputStream* st) const { 2125 st->print("%s", _kind_names[_kind]); 2126 } 2127 2128 // The related set of lock nodes includes the control boundary. 2129 void AbstractLockNode::related(GrowableArray<Node*> *in_rel, GrowableArray<Node*> *out_rel, bool compact) const { 2130 if (compact) { 2131 this->collect_nodes(in_rel, 1, false, false); 2132 } else { 2133 this->collect_nodes_in_all_data(in_rel, true); 2134 } 2135 this->collect_nodes(out_rel, -2, false, false); 2136 } 2137 #endif 2138 2139 //============================================================================= 2140 Node *LockNode::Ideal(PhaseGVN *phase, bool can_reshape) { 2141 2142 // perform any generic optimizations first (returns 'this' or NULL) 2143 Node *result = SafePointNode::Ideal(phase, can_reshape); 2144 if (result != NULL) return result; 2145 // Don't bother trying to transform a dead node 2146 if (in(0) && in(0)->is_top()) return NULL; 2147 2148 // Now see if we can optimize away this lock. We don't actually 2149 // remove the locking here, we simply set the _eliminate flag which 2150 // prevents macro expansion from expanding the lock. Since we don't 2151 // modify the graph, the value returned from this function is the 2152 // one computed above. 2153 const Type* obj_type = phase->type(obj_node()); 2154 if (can_reshape && EliminateLocks && !is_non_esc_obj() && 2155 !obj_type->isa_valuetype() && !obj_type->is_valuetypeptr()) { 2156 // 2157 // If we are locking an unescaped object, the lock/unlock is unnecessary 2158 // 2159 ConnectionGraph *cgr = phase->C->congraph(); 2160 if (cgr != NULL && cgr->not_global_escape(obj_node())) { 2161 assert(!is_eliminated() || is_coarsened(), "sanity"); 2162 // The lock could be marked eliminated by lock coarsening 2163 // code during first IGVN before EA. Replace coarsened flag 2164 // to eliminate all associated locks/unlocks. 2165 #ifdef ASSERT 2166 this->log_lock_optimization(phase->C,"eliminate_lock_set_non_esc1"); 2167 #endif 2168 this->set_non_esc_obj(); 2169 return result; 2170 } 2171 2172 // 2173 // Try lock coarsening 2174 // 2175 PhaseIterGVN* iter = phase->is_IterGVN(); 2176 if (iter != NULL && !is_eliminated()) { 2177 2178 GrowableArray<AbstractLockNode*> lock_ops; 2179 2180 Node *ctrl = next_control(in(0)); 2181 2182 // now search back for a matching Unlock 2183 if (find_matching_unlock(ctrl, this, lock_ops)) { 2184 // found an unlock directly preceding this lock. This is the 2185 // case of single unlock directly control dependent on a 2186 // single lock which is the trivial version of case 1 or 2. 2187 } else if (ctrl->is_Region() ) { 2188 if (find_unlocks_for_region(ctrl->as_Region(), this, lock_ops)) { 2189 // found lock preceded by multiple unlocks along all paths 2190 // joining at this point which is case 3 in description above. 2191 } 2192 } else { 2193 // see if this lock comes from either half of an if and the 2194 // predecessors merges unlocks and the other half of the if 2195 // performs a lock. 2196 if (find_lock_and_unlock_through_if(ctrl, this, lock_ops)) { 2197 // found unlock splitting to an if with locks on both branches. 2198 } 2199 } 2200 2201 if (lock_ops.length() > 0) { 2202 // add ourselves to the list of locks to be eliminated. 2203 lock_ops.append(this); 2204 2205 #ifndef PRODUCT 2206 if (PrintEliminateLocks) { 2207 int locks = 0; 2208 int unlocks = 0; 2209 for (int i = 0; i < lock_ops.length(); i++) { 2210 AbstractLockNode* lock = lock_ops.at(i); 2211 if (lock->Opcode() == Op_Lock) 2212 locks++; 2213 else 2214 unlocks++; 2215 if (Verbose) { 2216 lock->dump(1); 2217 } 2218 } 2219 tty->print_cr("***Eliminated %d unlocks and %d locks", unlocks, locks); 2220 } 2221 #endif 2222 2223 // for each of the identified locks, mark them 2224 // as eliminatable 2225 for (int i = 0; i < lock_ops.length(); i++) { 2226 AbstractLockNode* lock = lock_ops.at(i); 2227 2228 // Mark it eliminated by coarsening and update any counters 2229 #ifdef ASSERT 2230 lock->log_lock_optimization(phase->C, "eliminate_lock_set_coarsened"); 2231 #endif 2232 lock->set_coarsened(); 2233 } 2234 } else if (ctrl->is_Region() && 2235 iter->_worklist.member(ctrl)) { 2236 // We weren't able to find any opportunities but the region this 2237 // lock is control dependent on hasn't been processed yet so put 2238 // this lock back on the worklist so we can check again once any 2239 // region simplification has occurred. 2240 iter->_worklist.push(this); 2241 } 2242 } 2243 } 2244 2245 return result; 2246 } 2247 2248 //============================================================================= 2249 bool LockNode::is_nested_lock_region() { 2250 return is_nested_lock_region(NULL); 2251 } 2252 2253 // p is used for access to compilation log; no logging if NULL 2254 bool LockNode::is_nested_lock_region(Compile * c) { 2255 BoxLockNode* box = box_node()->as_BoxLock(); 2256 int stk_slot = box->stack_slot(); 2257 if (stk_slot <= 0) { 2258 #ifdef ASSERT 2259 this->log_lock_optimization(c, "eliminate_lock_INLR_1"); 2260 #endif 2261 return false; // External lock or it is not Box (Phi node). 2262 } 2263 2264 // Ignore complex cases: merged locks or multiple locks. 2265 Node* obj = obj_node(); 2266 LockNode* unique_lock = NULL; 2267 if (!box->is_simple_lock_region(&unique_lock, obj)) { 2268 #ifdef ASSERT 2269 this->log_lock_optimization(c, "eliminate_lock_INLR_2a"); 2270 #endif 2271 return false; 2272 } 2273 if (unique_lock != this) { 2274 #ifdef ASSERT 2275 this->log_lock_optimization(c, "eliminate_lock_INLR_2b"); 2276 #endif 2277 return false; 2278 } 2279 2280 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); 2281 obj = bs->step_over_gc_barrier(obj); 2282 // Look for external lock for the same object. 2283 SafePointNode* sfn = this->as_SafePoint(); 2284 JVMState* youngest_jvms = sfn->jvms(); 2285 int max_depth = youngest_jvms->depth(); 2286 for (int depth = 1; depth <= max_depth; depth++) { 2287 JVMState* jvms = youngest_jvms->of_depth(depth); 2288 int num_mon = jvms->nof_monitors(); 2289 // Loop over monitors 2290 for (int idx = 0; idx < num_mon; idx++) { 2291 Node* obj_node = sfn->monitor_obj(jvms, idx); 2292 obj_node = bs->step_over_gc_barrier(obj_node); 2293 BoxLockNode* box_node = sfn->monitor_box(jvms, idx)->as_BoxLock(); 2294 if ((box_node->stack_slot() < stk_slot) && obj_node->eqv_uncast(obj)) { 2295 return true; 2296 } 2297 } 2298 } 2299 #ifdef ASSERT 2300 this->log_lock_optimization(c, "eliminate_lock_INLR_3"); 2301 #endif 2302 return false; 2303 } 2304 2305 //============================================================================= 2306 uint UnlockNode::size_of() const { return sizeof(*this); } 2307 2308 //============================================================================= 2309 Node *UnlockNode::Ideal(PhaseGVN *phase, bool can_reshape) { 2310 2311 // perform any generic optimizations first (returns 'this' or NULL) 2312 Node *result = SafePointNode::Ideal(phase, can_reshape); 2313 if (result != NULL) return result; 2314 // Don't bother trying to transform a dead node 2315 if (in(0) && in(0)->is_top()) return NULL; 2316 2317 // Now see if we can optimize away this unlock. We don't actually 2318 // remove the unlocking here, we simply set the _eliminate flag which 2319 // prevents macro expansion from expanding the unlock. Since we don't 2320 // modify the graph, the value returned from this function is the 2321 // one computed above. 2322 // Escape state is defined after Parse phase. 2323 const Type* obj_type = phase->type(obj_node()); 2324 if (can_reshape && EliminateLocks && !is_non_esc_obj() && 2325 !obj_type->isa_valuetype() && !obj_type->is_valuetypeptr()) { 2326 // 2327 // If we are unlocking an unescaped object, the lock/unlock is unnecessary. 2328 // 2329 ConnectionGraph *cgr = phase->C->congraph(); 2330 if (cgr != NULL && cgr->not_global_escape(obj_node())) { 2331 assert(!is_eliminated() || is_coarsened(), "sanity"); 2332 // The lock could be marked eliminated by lock coarsening 2333 // code during first IGVN before EA. Replace coarsened flag 2334 // to eliminate all associated locks/unlocks. 2335 #ifdef ASSERT 2336 this->log_lock_optimization(phase->C, "eliminate_lock_set_non_esc2"); 2337 #endif 2338 this->set_non_esc_obj(); 2339 } 2340 } 2341 return result; 2342 } 2343 2344 const char * AbstractLockNode::kind_as_string() const { 2345 return is_coarsened() ? "coarsened" : 2346 is_nested() ? "nested" : 2347 is_non_esc_obj() ? "non_escaping" : 2348 "?"; 2349 } 2350 2351 void AbstractLockNode::log_lock_optimization(Compile *C, const char * tag) const { 2352 if (C == NULL) { 2353 return; 2354 } 2355 CompileLog* log = C->log(); 2356 if (log != NULL) { 2357 log->begin_head("%s lock='%d' compile_id='%d' class_id='%s' kind='%s'", 2358 tag, is_Lock(), C->compile_id(), 2359 is_Unlock() ? "unlock" : is_Lock() ? "lock" : "?", 2360 kind_as_string()); 2361 log->stamp(); 2362 log->end_head(); 2363 JVMState* p = is_Unlock() ? (as_Unlock()->dbg_jvms()) : jvms(); 2364 while (p != NULL) { 2365 log->elem("jvms bci='%d' method='%d'", p->bci(), log->identify(p->method())); 2366 p = p->caller(); 2367 } 2368 log->tail(tag); 2369 } 2370 } 2371 2372 bool CallNode::may_modify_arraycopy_helper(const TypeOopPtr* dest_t, const TypeOopPtr *t_oop, PhaseTransform *phase) { 2373 if (dest_t->is_known_instance() && t_oop->is_known_instance()) { 2374 return dest_t->instance_id() == t_oop->instance_id(); 2375 } 2376 2377 if (dest_t->isa_instptr() && !dest_t->klass()->equals(phase->C->env()->Object_klass())) { 2378 // clone 2379 if (t_oop->isa_aryptr()) { 2380 return false; 2381 } 2382 if (!t_oop->isa_instptr()) { 2383 return true; 2384 } 2385 if (dest_t->klass()->is_subtype_of(t_oop->klass()) || t_oop->klass()->is_subtype_of(dest_t->klass())) { 2386 return true; 2387 } 2388 // unrelated 2389 return false; 2390 } 2391 2392 if (dest_t->isa_aryptr()) { 2393 // arraycopy or array clone 2394 if (t_oop->isa_instptr()) { 2395 return false; 2396 } 2397 if (!t_oop->isa_aryptr()) { 2398 return true; 2399 } 2400 2401 const Type* elem = dest_t->is_aryptr()->elem(); 2402 if (elem == Type::BOTTOM) { 2403 // An array but we don't know what elements are 2404 return true; 2405 } 2406 2407 dest_t = dest_t->is_aryptr()->with_field_offset(Type::OffsetBot)->add_offset(Type::OffsetBot)->is_oopptr(); 2408 t_oop = t_oop->is_aryptr()->with_field_offset(Type::OffsetBot); 2409 uint dest_alias = phase->C->get_alias_index(dest_t); 2410 uint t_oop_alias = phase->C->get_alias_index(t_oop); 2411 2412 return dest_alias == t_oop_alias; 2413 } 2414 2415 return true; 2416 }