1 /* 2 * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_OPTO_CALLNODE_HPP 26 #define SHARE_VM_OPTO_CALLNODE_HPP 27 28 #include "opto/connode.hpp" 29 #include "opto/mulnode.hpp" 30 #include "opto/multnode.hpp" 31 #include "opto/opcodes.hpp" 32 #include "opto/phaseX.hpp" 33 #include "opto/replacednodes.hpp" 34 #include "opto/type.hpp" 35 36 // Portions of code courtesy of Clifford Click 37 38 // Optimization - Graph Style 39 40 class Chaitin; 41 class NamedCounter; 42 class MultiNode; 43 class SafePointNode; 44 class CallNode; 45 class CallJavaNode; 46 class CallStaticJavaNode; 47 class CallDynamicJavaNode; 48 class CallRuntimeNode; 49 class CallLeafNode; 50 class CallLeafNoFPNode; 51 class AllocateNode; 52 class AllocateArrayNode; 53 class BoxLockNode; 54 class LockNode; 55 class UnlockNode; 56 class JVMState; 57 class OopMap; 58 class State; 59 class StartNode; 60 class MachCallNode; 61 class FastLockNode; 62 63 //------------------------------StartNode-------------------------------------- 64 // The method start node 65 class StartNode : public MultiNode { 66 virtual uint cmp( const Node &n ) const; 67 virtual uint size_of() const; // Size is bigger 68 public: 69 const TypeTuple *_domain; 70 StartNode( Node *root, const TypeTuple *domain ) : MultiNode(2), _domain(domain) { 71 init_class_id(Class_Start); 72 init_req(0,this); 73 init_req(1,root); 74 } 75 virtual int Opcode() const; 76 virtual bool pinned() const { return true; }; 77 virtual const Type *bottom_type() const; 78 virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; } 79 virtual const Type *Value( PhaseTransform *phase ) const; 80 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 81 virtual void calling_convention( BasicType* sig_bt, VMRegPair *parm_reg, uint length ) const; 82 virtual const RegMask &in_RegMask(uint) const; 83 virtual Node *match( const ProjNode *proj, const Matcher *m ); 84 virtual uint ideal_reg() const { return 0; } 85 #ifndef PRODUCT 86 virtual void dump_spec(outputStream *st) const; 87 #endif 88 }; 89 90 //------------------------------StartOSRNode----------------------------------- 91 // The method start node for on stack replacement code 92 class StartOSRNode : public StartNode { 93 public: 94 StartOSRNode( Node *root, const TypeTuple *domain ) : StartNode(root, domain) {} 95 virtual int Opcode() const; 96 static const TypeTuple *osr_domain(); 97 }; 98 99 100 //------------------------------ParmNode--------------------------------------- 101 // Incoming parameters 102 class ParmNode : public ProjNode { 103 static const char * const names[TypeFunc::Parms+1]; 104 public: 105 ParmNode( StartNode *src, uint con ) : ProjNode(src,con) { 106 init_class_id(Class_Parm); 107 } 108 virtual int Opcode() const; 109 virtual bool is_CFG() const { return (_con == TypeFunc::Control); } 110 virtual uint ideal_reg() const; 111 #ifndef PRODUCT 112 virtual void dump_spec(outputStream *st) const; 113 #endif 114 }; 115 116 117 //------------------------------ReturnNode------------------------------------- 118 // Return from subroutine node 119 class ReturnNode : public Node { 120 public: 121 ReturnNode( uint edges, Node *cntrl, Node *i_o, Node *memory, Node *retadr, Node *frameptr ); 122 virtual int Opcode() const; 123 virtual bool is_CFG() const { return true; } 124 virtual uint hash() const { return NO_HASH; } // CFG nodes do not hash 125 virtual bool depends_only_on_test() const { return false; } 126 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 127 virtual const Type *Value( PhaseTransform *phase ) const; 128 virtual uint ideal_reg() const { return NotAMachineReg; } 129 virtual uint match_edge(uint idx) const; 130 #ifndef PRODUCT 131 virtual void dump_req(outputStream *st = tty) const; 132 #endif 133 }; 134 135 136 //------------------------------RethrowNode------------------------------------ 137 // Rethrow of exception at call site. Ends a procedure before rethrowing; 138 // ends the current basic block like a ReturnNode. Restores registers and 139 // unwinds stack. Rethrow happens in the caller's method. 140 class RethrowNode : public Node { 141 public: 142 RethrowNode( Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *ret_adr, Node *exception ); 143 virtual int Opcode() const; 144 virtual bool is_CFG() const { return true; } 145 virtual uint hash() const { return NO_HASH; } // CFG nodes do not hash 146 virtual bool depends_only_on_test() const { return false; } 147 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 148 virtual const Type *Value( PhaseTransform *phase ) const; 149 virtual uint match_edge(uint idx) const; 150 virtual uint ideal_reg() const { return NotAMachineReg; } 151 #ifndef PRODUCT 152 virtual void dump_req(outputStream *st = tty) const; 153 #endif 154 }; 155 156 157 //------------------------------TailCallNode----------------------------------- 158 // Pop stack frame and jump indirect 159 class TailCallNode : public ReturnNode { 160 public: 161 TailCallNode( Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *retadr, Node *target, Node *moop ) 162 : ReturnNode( TypeFunc::Parms+2, cntrl, i_o, memory, frameptr, retadr ) { 163 init_req(TypeFunc::Parms, target); 164 init_req(TypeFunc::Parms+1, moop); 165 } 166 167 virtual int Opcode() const; 168 virtual uint match_edge(uint idx) const; 169 }; 170 171 //------------------------------TailJumpNode----------------------------------- 172 // Pop stack frame and jump indirect 173 class TailJumpNode : public ReturnNode { 174 public: 175 TailJumpNode( Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *target, Node *ex_oop) 176 : ReturnNode(TypeFunc::Parms+2, cntrl, i_o, memory, frameptr, Compile::current()->top()) { 177 init_req(TypeFunc::Parms, target); 178 init_req(TypeFunc::Parms+1, ex_oop); 179 } 180 181 virtual int Opcode() const; 182 virtual uint match_edge(uint idx) const; 183 }; 184 185 //-------------------------------JVMState------------------------------------- 186 // A linked list of JVMState nodes captures the whole interpreter state, 187 // plus GC roots, for all active calls at some call site in this compilation 188 // unit. (If there is no inlining, then the list has exactly one link.) 189 // This provides a way to map the optimized program back into the interpreter, 190 // or to let the GC mark the stack. 191 class JVMState : public ResourceObj { 192 friend class VMStructs; 193 public: 194 typedef enum { 195 Reexecute_Undefined = -1, // not defined -- will be translated into false later 196 Reexecute_False = 0, // false -- do not reexecute 197 Reexecute_True = 1 // true -- reexecute the bytecode 198 } ReexecuteState; //Reexecute State 199 200 private: 201 JVMState* _caller; // List pointer for forming scope chains 202 uint _depth; // One more than caller depth, or one. 203 uint _locoff; // Offset to locals in input edge mapping 204 uint _stkoff; // Offset to stack in input edge mapping 205 uint _monoff; // Offset to monitors in input edge mapping 206 uint _scloff; // Offset to fields of scalar objs in input edge mapping 207 uint _endoff; // Offset to end of input edge mapping 208 uint _sp; // Jave Expression Stack Pointer for this state 209 int _bci; // Byte Code Index of this JVM point 210 ReexecuteState _reexecute; // Whether this bytecode need to be re-executed 211 ciMethod* _method; // Method Pointer 212 SafePointNode* _map; // Map node associated with this scope 213 public: 214 friend class Compile; 215 friend class PreserveReexecuteState; 216 217 // Because JVMState objects live over the entire lifetime of the 218 // Compile object, they are allocated into the comp_arena, which 219 // does not get resource marked or reset during the compile process 220 void *operator new( size_t x, Compile* C ) throw() { return C->comp_arena()->Amalloc(x); } 221 void operator delete( void * ) { } // fast deallocation 222 223 // Create a new JVMState, ready for abstract interpretation. 224 JVMState(ciMethod* method, JVMState* caller); 225 JVMState(int stack_size); // root state; has a null method 226 227 // Access functions for the JVM 228 // ... --|--- loc ---|--- stk ---|--- arg ---|--- mon ---|--- scl ---| 229 // \ locoff \ stkoff \ argoff \ monoff \ scloff \ endoff 230 uint locoff() const { return _locoff; } 231 uint stkoff() const { return _stkoff; } 232 uint argoff() const { return _stkoff + _sp; } 233 uint monoff() const { return _monoff; } 234 uint scloff() const { return _scloff; } 235 uint endoff() const { return _endoff; } 236 uint oopoff() const { return debug_end(); } 237 238 int loc_size() const { return stkoff() - locoff(); } 239 int stk_size() const { return monoff() - stkoff(); } 240 int mon_size() const { return scloff() - monoff(); } 241 int scl_size() const { return endoff() - scloff(); } 242 243 bool is_loc(uint i) const { return locoff() <= i && i < stkoff(); } 244 bool is_stk(uint i) const { return stkoff() <= i && i < monoff(); } 245 bool is_mon(uint i) const { return monoff() <= i && i < scloff(); } 246 bool is_scl(uint i) const { return scloff() <= i && i < endoff(); } 247 248 uint sp() const { return _sp; } 249 int bci() const { return _bci; } 250 bool should_reexecute() const { return _reexecute==Reexecute_True; } 251 bool is_reexecute_undefined() const { return _reexecute==Reexecute_Undefined; } 252 bool has_method() const { return _method != NULL; } 253 ciMethod* method() const { assert(has_method(), ""); return _method; } 254 JVMState* caller() const { return _caller; } 255 SafePointNode* map() const { return _map; } 256 uint depth() const { return _depth; } 257 uint debug_start() const; // returns locoff of root caller 258 uint debug_end() const; // returns endoff of self 259 uint debug_size() const { 260 return loc_size() + sp() + mon_size() + scl_size(); 261 } 262 uint debug_depth() const; // returns sum of debug_size values at all depths 263 264 // Returns the JVM state at the desired depth (1 == root). 265 JVMState* of_depth(int d) const; 266 267 // Tells if two JVM states have the same call chain (depth, methods, & bcis). 268 bool same_calls_as(const JVMState* that) const; 269 270 // Monitors (monitors are stored as (boxNode, objNode) pairs 271 enum { logMonitorEdges = 1 }; 272 int nof_monitors() const { return mon_size() >> logMonitorEdges; } 273 int monitor_depth() const { return nof_monitors() + (caller() ? caller()->monitor_depth() : 0); } 274 int monitor_box_offset(int idx) const { return monoff() + (idx << logMonitorEdges) + 0; } 275 int monitor_obj_offset(int idx) const { return monoff() + (idx << logMonitorEdges) + 1; } 276 bool is_monitor_box(uint off) const { 277 assert(is_mon(off), "should be called only for monitor edge"); 278 return (0 == bitfield(off - monoff(), 0, logMonitorEdges)); 279 } 280 bool is_monitor_use(uint off) const { return (is_mon(off) 281 && is_monitor_box(off)) 282 || (caller() && caller()->is_monitor_use(off)); } 283 284 // Initialization functions for the JVM 285 void set_locoff(uint off) { _locoff = off; } 286 void set_stkoff(uint off) { _stkoff = off; } 287 void set_monoff(uint off) { _monoff = off; } 288 void set_scloff(uint off) { _scloff = off; } 289 void set_endoff(uint off) { _endoff = off; } 290 void set_offsets(uint off) { 291 _locoff = _stkoff = _monoff = _scloff = _endoff = off; 292 } 293 void set_map(SafePointNode *map) { _map = map; } 294 void set_sp(uint sp) { _sp = sp; } 295 // _reexecute is initialized to "undefined" for a new bci 296 void set_bci(int bci) {if(_bci != bci)_reexecute=Reexecute_Undefined; _bci = bci; } 297 void set_should_reexecute(bool reexec) {_reexecute = reexec ? Reexecute_True : Reexecute_False;} 298 299 // Miscellaneous utility functions 300 JVMState* clone_deep(Compile* C) const; // recursively clones caller chain 301 JVMState* clone_shallow(Compile* C) const; // retains uncloned caller 302 void set_map_deep(SafePointNode *map);// reset map for all callers 303 void adapt_position(int delta); // Adapt offsets in in-array after adding an edge. 304 int interpreter_frame_size() const; 305 306 #ifndef PRODUCT 307 void format(PhaseRegAlloc *regalloc, const Node *n, outputStream* st) const; 308 void dump_spec(outputStream *st) const; 309 void dump_on(outputStream* st) const; 310 void dump() const { 311 dump_on(tty); 312 } 313 #endif 314 }; 315 316 //------------------------------SafePointNode---------------------------------- 317 // A SafePointNode is a subclass of a MultiNode for convenience (and 318 // potential code sharing) only - conceptually it is independent of 319 // the Node semantics. 320 class SafePointNode : public MultiNode { 321 virtual uint cmp( const Node &n ) const; 322 virtual uint size_of() const; // Size is bigger 323 324 public: 325 SafePointNode(uint edges, JVMState* jvms, 326 // A plain safepoint advertises no memory effects (NULL): 327 const TypePtr* adr_type = NULL) 328 : MultiNode( edges ), 329 _jvms(jvms), 330 _oop_map(NULL), 331 _adr_type(adr_type) 332 { 333 init_class_id(Class_SafePoint); 334 } 335 336 OopMap* _oop_map; // Array of OopMap info (8-bit char) for GC 337 JVMState* const _jvms; // Pointer to list of JVM State objects 338 const TypePtr* _adr_type; // What type of memory does this node produce? 339 ReplacedNodes _replaced_nodes; // During parsing: list of pair of nodes from calls to GraphKit::replace_in_map() 340 341 // Many calls take *all* of memory as input, 342 // but some produce a limited subset of that memory as output. 343 // The adr_type reports the call's behavior as a store, not a load. 344 345 virtual JVMState* jvms() const { return _jvms; } 346 void set_jvms(JVMState* s) { 347 *(JVMState**)&_jvms = s; // override const attribute in the accessor 348 } 349 OopMap *oop_map() const { return _oop_map; } 350 void set_oop_map(OopMap *om) { _oop_map = om; } 351 352 private: 353 void verify_input(JVMState* jvms, uint idx) const { 354 assert(verify_jvms(jvms), "jvms must match"); 355 Node* n = in(idx); 356 assert((!n->bottom_type()->isa_long() && !n->bottom_type()->isa_double()) || 357 in(idx + 1)->is_top(), "2nd half of long/double"); 358 } 359 360 public: 361 // Functionality from old debug nodes which has changed 362 Node *local(JVMState* jvms, uint idx) const { 363 verify_input(jvms, jvms->locoff() + idx); 364 return in(jvms->locoff() + idx); 365 } 366 Node *stack(JVMState* jvms, uint idx) const { 367 verify_input(jvms, jvms->stkoff() + idx); 368 return in(jvms->stkoff() + idx); 369 } 370 Node *argument(JVMState* jvms, uint idx) const { 371 verify_input(jvms, jvms->argoff() + idx); 372 return in(jvms->argoff() + idx); 373 } 374 Node *monitor_box(JVMState* jvms, uint idx) const { 375 assert(verify_jvms(jvms), "jvms must match"); 376 return in(jvms->monitor_box_offset(idx)); 377 } 378 Node *monitor_obj(JVMState* jvms, uint idx) const { 379 assert(verify_jvms(jvms), "jvms must match"); 380 return in(jvms->monitor_obj_offset(idx)); 381 } 382 383 void set_local(JVMState* jvms, uint idx, Node *c); 384 385 void set_stack(JVMState* jvms, uint idx, Node *c) { 386 assert(verify_jvms(jvms), "jvms must match"); 387 set_req(jvms->stkoff() + idx, c); 388 } 389 void set_argument(JVMState* jvms, uint idx, Node *c) { 390 assert(verify_jvms(jvms), "jvms must match"); 391 set_req(jvms->argoff() + idx, c); 392 } 393 void ensure_stack(JVMState* jvms, uint stk_size) { 394 assert(verify_jvms(jvms), "jvms must match"); 395 int grow_by = (int)stk_size - (int)jvms->stk_size(); 396 if (grow_by > 0) grow_stack(jvms, grow_by); 397 } 398 void grow_stack(JVMState* jvms, uint grow_by); 399 // Handle monitor stack 400 void push_monitor( const FastLockNode *lock ); 401 void pop_monitor (); 402 Node *peek_monitor_box() const; 403 Node *peek_monitor_obj() const; 404 405 // Access functions for the JVM 406 Node *control () const { return in(TypeFunc::Control ); } 407 Node *i_o () const { return in(TypeFunc::I_O ); } 408 Node *memory () const { return in(TypeFunc::Memory ); } 409 Node *returnadr() const { return in(TypeFunc::ReturnAdr); } 410 Node *frameptr () const { return in(TypeFunc::FramePtr ); } 411 412 void set_control ( Node *c ) { set_req(TypeFunc::Control,c); } 413 void set_i_o ( Node *c ) { set_req(TypeFunc::I_O ,c); } 414 void set_memory ( Node *c ) { set_req(TypeFunc::Memory ,c); } 415 416 MergeMemNode* merged_memory() const { 417 return in(TypeFunc::Memory)->as_MergeMem(); 418 } 419 420 // The parser marks useless maps as dead when it's done with them: 421 bool is_killed() { return in(TypeFunc::Control) == NULL; } 422 423 // Exception states bubbling out of subgraphs such as inlined calls 424 // are recorded here. (There might be more than one, hence the "next".) 425 // This feature is used only for safepoints which serve as "maps" 426 // for JVM states during parsing, intrinsic expansion, etc. 427 SafePointNode* next_exception() const; 428 void set_next_exception(SafePointNode* n); 429 bool has_exceptions() const { return next_exception() != NULL; } 430 431 // Helper methods to operate on replaced nodes 432 ReplacedNodes replaced_nodes() const { 433 return _replaced_nodes; 434 } 435 436 void set_replaced_nodes(ReplacedNodes replaced_nodes) { 437 _replaced_nodes = replaced_nodes; 438 } 439 440 void clone_replaced_nodes() { 441 _replaced_nodes.clone(); 442 } 443 void record_replaced_node(Node* initial, Node* improved) { 444 _replaced_nodes.record(initial, improved); 445 } 446 void transfer_replaced_nodes_from(SafePointNode* sfpt, uint idx = 0) { 447 _replaced_nodes.transfer_from(sfpt->_replaced_nodes, idx); 448 } 449 void delete_replaced_nodes() { 450 _replaced_nodes.reset(); 451 } 452 void apply_replaced_nodes() { 453 _replaced_nodes.apply(this); 454 } 455 void merge_replaced_nodes_with(SafePointNode* sfpt) { 456 _replaced_nodes.merge_with(sfpt->_replaced_nodes); 457 } 458 bool has_replaced_nodes() const { 459 return !_replaced_nodes.is_empty(); 460 } 461 462 // Standard Node stuff 463 virtual int Opcode() const; 464 virtual bool pinned() const { return true; } 465 virtual const Type *Value( PhaseTransform *phase ) const; 466 virtual const Type *bottom_type() const { return Type::CONTROL; } 467 virtual const TypePtr *adr_type() const { return _adr_type; } 468 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 469 virtual Node *Identity( PhaseTransform *phase ); 470 virtual uint ideal_reg() const { return 0; } 471 virtual const RegMask &in_RegMask(uint) const; 472 virtual const RegMask &out_RegMask() const; 473 virtual uint match_edge(uint idx) const; 474 475 static bool needs_polling_address_input(); 476 477 #ifndef PRODUCT 478 virtual void dump_spec(outputStream *st) const; 479 #endif 480 }; 481 482 //------------------------------SafePointScalarObjectNode---------------------- 483 // A SafePointScalarObjectNode represents the state of a scalarized object 484 // at a safepoint. 485 486 class SafePointScalarObjectNode: public TypeNode { 487 uint _first_index; // First input edge relative index of a SafePoint node where 488 // states of the scalarized object fields are collected. 489 // It is relative to the last (youngest) jvms->_scloff. 490 uint _n_fields; // Number of non-static fields of the scalarized object. 491 DEBUG_ONLY(AllocateNode* _alloc;) 492 493 virtual uint hash() const ; // { return NO_HASH; } 494 virtual uint cmp( const Node &n ) const; 495 496 uint first_index() const { return _first_index; } 497 498 public: 499 SafePointScalarObjectNode(const TypeOopPtr* tp, 500 #ifdef ASSERT 501 AllocateNode* alloc, 502 #endif 503 uint first_index, uint n_fields); 504 virtual int Opcode() const; 505 virtual uint ideal_reg() const; 506 virtual const RegMask &in_RegMask(uint) const; 507 virtual const RegMask &out_RegMask() const; 508 virtual uint match_edge(uint idx) const; 509 510 uint first_index(JVMState* jvms) const { 511 assert(jvms != NULL, "missed JVMS"); 512 return jvms->scloff() + _first_index; 513 } 514 uint n_fields() const { return _n_fields; } 515 516 #ifdef ASSERT 517 AllocateNode* alloc() const { return _alloc; } 518 #endif 519 520 virtual uint size_of() const { return sizeof(*this); } 521 522 // Assumes that "this" is an argument to a safepoint node "s", and that 523 // "new_call" is being created to correspond to "s". But the difference 524 // between the start index of the jvmstates of "new_call" and "s" is 525 // "jvms_adj". Produce and return a SafePointScalarObjectNode that 526 // corresponds appropriately to "this" in "new_call". Assumes that 527 // "sosn_map" is a map, specific to the translation of "s" to "new_call", 528 // mapping old SafePointScalarObjectNodes to new, to avoid multiple copies. 529 SafePointScalarObjectNode* clone(Dict* sosn_map) const; 530 531 #ifndef PRODUCT 532 virtual void dump_spec(outputStream *st) const; 533 #endif 534 }; 535 536 537 // Simple container for the outgoing projections of a call. Useful 538 // for serious surgery on calls. 539 class CallProjections : public StackObj { 540 public: 541 Node* fallthrough_proj; 542 Node* fallthrough_catchproj; 543 Node* fallthrough_memproj; 544 Node* fallthrough_ioproj; 545 Node* catchall_catchproj; 546 Node* catchall_memproj; 547 Node* catchall_ioproj; 548 Node* resproj; 549 Node* exobj; 550 }; 551 552 class CallGenerator; 553 554 //------------------------------CallNode--------------------------------------- 555 // Call nodes now subsume the function of debug nodes at callsites, so they 556 // contain the functionality of a full scope chain of debug nodes. 557 class CallNode : public SafePointNode { 558 friend class VMStructs; 559 public: 560 const TypeFunc *_tf; // Function type 561 address _entry_point; // Address of method being called 562 float _cnt; // Estimate of number of times called 563 CallGenerator* _generator; // corresponding CallGenerator for some late inline calls 564 565 CallNode(const TypeFunc* tf, address addr, const TypePtr* adr_type) 566 : SafePointNode(tf->domain()->cnt(), NULL, adr_type), 567 _tf(tf), 568 _entry_point(addr), 569 _cnt(COUNT_UNKNOWN), 570 _generator(NULL) 571 { 572 init_class_id(Class_Call); 573 } 574 575 const TypeFunc* tf() const { return _tf; } 576 const address entry_point() const { return _entry_point; } 577 const float cnt() const { return _cnt; } 578 CallGenerator* generator() const { return _generator; } 579 580 void set_tf(const TypeFunc* tf) { _tf = tf; } 581 void set_entry_point(address p) { _entry_point = p; } 582 void set_cnt(float c) { _cnt = c; } 583 void set_generator(CallGenerator* cg) { _generator = cg; } 584 585 virtual const Type *bottom_type() const; 586 virtual const Type *Value( PhaseTransform *phase ) const; 587 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 588 virtual Node *Identity( PhaseTransform *phase ) { return this; } 589 virtual uint cmp( const Node &n ) const; 590 virtual uint size_of() const = 0; 591 virtual void calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const; 592 virtual Node *match( const ProjNode *proj, const Matcher *m ); 593 virtual uint ideal_reg() const { return NotAMachineReg; } 594 // Are we guaranteed that this node is a safepoint? Not true for leaf calls and 595 // for some macro nodes whose expansion does not have a safepoint on the fast path. 596 virtual bool guaranteed_safepoint() { return true; } 597 // For macro nodes, the JVMState gets modified during expansion. If calls 598 // use MachConstantBase, it gets modified during matching. So when cloning 599 // the node the JVMState must be cloned. Default is not to clone. 600 virtual void clone_jvms(Compile* C) { 601 if (C->needs_clone_jvms() && jvms() != NULL) { 602 set_jvms(jvms()->clone_deep(C)); 603 jvms()->set_map_deep(this); 604 } 605 } 606 607 // Returns true if the call may modify n 608 virtual bool may_modify(const TypeOopPtr *t_oop, PhaseTransform *phase); 609 // Does this node have a use of n other than in debug information? 610 bool has_non_debug_use(Node *n); 611 // Returns the unique CheckCastPP of a call 612 // or result projection is there are several CheckCastPP 613 // or returns NULL if there is no one. 614 Node *result_cast(); 615 // Does this node returns pointer? 616 bool returns_pointer() const { 617 const TypeTuple *r = tf()->range(); 618 return (r->cnt() > TypeFunc::Parms && 619 r->field_at(TypeFunc::Parms)->isa_ptr()); 620 } 621 622 // Collect all the interesting edges from a call for use in 623 // replacing the call by something else. Used by macro expansion 624 // and the late inlining support. 625 void extract_projections(CallProjections* projs, bool separate_io_proj); 626 627 virtual uint match_edge(uint idx) const; 628 629 #ifndef PRODUCT 630 virtual void dump_req(outputStream *st = tty) const; 631 virtual void dump_spec(outputStream *st) const; 632 #endif 633 }; 634 635 636 //------------------------------CallJavaNode----------------------------------- 637 // Make a static or dynamic subroutine call node using Java calling 638 // convention. (The "Java" calling convention is the compiler's calling 639 // convention, as opposed to the interpreter's or that of native C.) 640 class CallJavaNode : public CallNode { 641 friend class VMStructs; 642 protected: 643 virtual uint cmp( const Node &n ) const; 644 virtual uint size_of() const; // Size is bigger 645 646 bool _optimized_virtual; 647 bool _method_handle_invoke; 648 ciMethod* _method; // Method being direct called 649 public: 650 const int _bci; // Byte Code Index of call byte code 651 CallJavaNode(const TypeFunc* tf , address addr, ciMethod* method, int bci) 652 : CallNode(tf, addr, TypePtr::BOTTOM), 653 _method(method), _bci(bci), 654 _optimized_virtual(false), 655 _method_handle_invoke(false) 656 { 657 init_class_id(Class_CallJava); 658 } 659 660 virtual int Opcode() const; 661 ciMethod* method() const { return _method; } 662 void set_method(ciMethod *m) { _method = m; } 663 void set_optimized_virtual(bool f) { _optimized_virtual = f; } 664 bool is_optimized_virtual() const { return _optimized_virtual; } 665 void set_method_handle_invoke(bool f) { _method_handle_invoke = f; } 666 bool is_method_handle_invoke() const { return _method_handle_invoke; } 667 668 #ifndef PRODUCT 669 virtual void dump_spec(outputStream *st) const; 670 #endif 671 }; 672 673 //------------------------------CallStaticJavaNode----------------------------- 674 // Make a direct subroutine call using Java calling convention (for static 675 // calls and optimized virtual calls, plus calls to wrappers for run-time 676 // routines); generates static stub. 677 class CallStaticJavaNode : public CallJavaNode { 678 virtual uint cmp( const Node &n ) const; 679 virtual uint size_of() const; // Size is bigger 680 public: 681 CallStaticJavaNode(Compile* C, const TypeFunc* tf, address addr, ciMethod* method, int bci) 682 : CallJavaNode(tf, addr, method, bci), _name(NULL) { 683 init_class_id(Class_CallStaticJava); 684 if (C->eliminate_boxing() && (method != NULL) && method->is_boxing_method()) { 685 init_flags(Flag_is_macro); 686 C->add_macro_node(this); 687 } 688 _is_scalar_replaceable = false; 689 _is_non_escaping = false; 690 } 691 CallStaticJavaNode(const TypeFunc* tf, address addr, const char* name, int bci, 692 const TypePtr* adr_type) 693 : CallJavaNode(tf, addr, NULL, bci), _name(name) { 694 init_class_id(Class_CallStaticJava); 695 // This node calls a runtime stub, which often has narrow memory effects. 696 _adr_type = adr_type; 697 _is_scalar_replaceable = false; 698 _is_non_escaping = false; 699 } 700 const char *_name; // Runtime wrapper name 701 702 // Result of Escape Analysis 703 bool _is_scalar_replaceable; 704 bool _is_non_escaping; 705 706 // If this is an uncommon trap, return the request code, else zero. 707 int uncommon_trap_request() const; 708 static int extract_uncommon_trap_request(const Node* call); 709 710 bool is_boxing_method() const { 711 return is_macro() && (method() != NULL) && method()->is_boxing_method(); 712 } 713 // Later inlining modifies the JVMState, so we need to clone it 714 // when the call node is cloned (because it is macro node). 715 virtual void clone_jvms(Compile* C) { 716 if ((jvms() != NULL) && is_boxing_method()) { 717 set_jvms(jvms()->clone_deep(C)); 718 jvms()->set_map_deep(this); 719 } 720 } 721 722 virtual int Opcode() const; 723 #ifndef PRODUCT 724 virtual void dump_spec(outputStream *st) const; 725 #endif 726 }; 727 728 //------------------------------CallDynamicJavaNode---------------------------- 729 // Make a dispatched call using Java calling convention. 730 class CallDynamicJavaNode : public CallJavaNode { 731 virtual uint cmp( const Node &n ) const; 732 virtual uint size_of() const; // Size is bigger 733 public: 734 CallDynamicJavaNode( const TypeFunc *tf , address addr, ciMethod* method, int vtable_index, int bci ) : CallJavaNode(tf,addr,method,bci), _vtable_index(vtable_index) { 735 init_class_id(Class_CallDynamicJava); 736 } 737 738 int _vtable_index; 739 virtual int Opcode() const; 740 #ifndef PRODUCT 741 virtual void dump_spec(outputStream *st) const; 742 #endif 743 }; 744 745 //------------------------------CallRuntimeNode-------------------------------- 746 // Make a direct subroutine call node into compiled C++ code. 747 class CallRuntimeNode : public CallNode { 748 virtual uint cmp( const Node &n ) const; 749 virtual uint size_of() const; // Size is bigger 750 public: 751 CallRuntimeNode(const TypeFunc* tf, address addr, const char* name, 752 const TypePtr* adr_type) 753 : CallNode(tf, addr, adr_type), 754 _name(name) 755 { 756 init_class_id(Class_CallRuntime); 757 } 758 759 const char *_name; // Printable name, if _method is NULL 760 virtual int Opcode() const; 761 virtual void calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const; 762 763 #ifndef PRODUCT 764 virtual void dump_spec(outputStream *st) const; 765 #endif 766 }; 767 768 //------------------------------CallLeafNode----------------------------------- 769 // Make a direct subroutine call node into compiled C++ code, without 770 // safepoints 771 class CallLeafNode : public CallRuntimeNode { 772 public: 773 CallLeafNode(const TypeFunc* tf, address addr, const char* name, 774 const TypePtr* adr_type) 775 : CallRuntimeNode(tf, addr, name, adr_type) 776 { 777 init_class_id(Class_CallLeaf); 778 } 779 virtual int Opcode() const; 780 virtual bool guaranteed_safepoint() { return false; } 781 #ifndef PRODUCT 782 virtual void dump_spec(outputStream *st) const; 783 #endif 784 }; 785 786 //------------------------------CallLeafNoFPNode------------------------------- 787 // CallLeafNode, not using floating point or using it in the same manner as 788 // the generated code 789 class CallLeafNoFPNode : public CallLeafNode { 790 public: 791 CallLeafNoFPNode(const TypeFunc* tf, address addr, const char* name, 792 const TypePtr* adr_type) 793 : CallLeafNode(tf, addr, name, adr_type) 794 { 795 } 796 virtual int Opcode() const; 797 }; 798 799 800 //------------------------------Allocate--------------------------------------- 801 // High-level memory allocation 802 // 803 // AllocateNode and AllocateArrayNode are subclasses of CallNode because they will 804 // get expanded into a code sequence containing a call. Unlike other CallNodes, 805 // they have 2 memory projections and 2 i_o projections (which are distinguished by 806 // the _is_io_use flag in the projection.) This is needed when expanding the node in 807 // order to differentiate the uses of the projection on the normal control path from 808 // those on the exception return path. 809 // 810 class AllocateNode : public CallNode { 811 public: 812 enum { 813 // Output: 814 RawAddress = TypeFunc::Parms, // the newly-allocated raw address 815 // Inputs: 816 AllocSize = TypeFunc::Parms, // size (in bytes) of the new object 817 KlassNode, // type (maybe dynamic) of the obj. 818 InitialTest, // slow-path test (may be constant) 819 ALength, // array length (or TOP if none) 820 ParmLimit 821 }; 822 823 static const TypeFunc* alloc_type(const Type* t) { 824 const Type** fields = TypeTuple::fields(ParmLimit - TypeFunc::Parms); 825 fields[AllocSize] = TypeInt::POS; 826 fields[KlassNode] = TypeInstPtr::NOTNULL; 827 fields[InitialTest] = TypeInt::BOOL; 828 fields[ALength] = t; // length (can be a bad length) 829 830 const TypeTuple *domain = TypeTuple::make(ParmLimit, fields); 831 832 // create result type (range) 833 fields = TypeTuple::fields(1); 834 fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Returned oop 835 836 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields); 837 838 return TypeFunc::make(domain, range); 839 } 840 841 // Result of Escape Analysis 842 bool _is_scalar_replaceable; 843 bool _is_non_escaping; 844 845 virtual uint size_of() const; // Size is bigger 846 AllocateNode(Compile* C, const TypeFunc *atype, Node *ctrl, Node *mem, Node *abio, 847 Node *size, Node *klass_node, Node *initial_test); 848 // Expansion modifies the JVMState, so we need to clone it 849 virtual void clone_jvms(Compile* C) { 850 if (jvms() != NULL) { 851 set_jvms(jvms()->clone_deep(C)); 852 jvms()->set_map_deep(this); 853 } 854 } 855 virtual int Opcode() const; 856 virtual uint ideal_reg() const { return Op_RegP; } 857 virtual bool guaranteed_safepoint() { return false; } 858 859 // allocations do not modify their arguments 860 virtual bool may_modify(const TypeOopPtr *t_oop, PhaseTransform *phase) { return false;} 861 862 // Pattern-match a possible usage of AllocateNode. 863 // Return null if no allocation is recognized. 864 // The operand is the pointer produced by the (possible) allocation. 865 // It must be a projection of the Allocate or its subsequent CastPP. 866 // (Note: This function is defined in file graphKit.cpp, near 867 // GraphKit::new_instance/new_array, whose output it recognizes.) 868 // The 'ptr' may not have an offset unless the 'offset' argument is given. 869 static AllocateNode* Ideal_allocation(Node* ptr, PhaseTransform* phase); 870 871 // Fancy version which uses AddPNode::Ideal_base_and_offset to strip 872 // an offset, which is reported back to the caller. 873 // (Note: AllocateNode::Ideal_allocation is defined in graphKit.cpp.) 874 static AllocateNode* Ideal_allocation(Node* ptr, PhaseTransform* phase, 875 intptr_t& offset); 876 877 // Dig the klass operand out of a (possible) allocation site. 878 static Node* Ideal_klass(Node* ptr, PhaseTransform* phase) { 879 AllocateNode* allo = Ideal_allocation(ptr, phase); 880 return (allo == NULL) ? NULL : allo->in(KlassNode); 881 } 882 883 // Conservatively small estimate of offset of first non-header byte. 884 int minimum_header_size() { 885 return is_AllocateArray() ? arrayOopDesc::base_offset_in_bytes(T_BYTE) : 886 instanceOopDesc::base_offset_in_bytes(); 887 } 888 889 // Return the corresponding initialization barrier (or null if none). 890 // Walks out edges to find it... 891 // (Note: Both InitializeNode::allocation and AllocateNode::initialization 892 // are defined in graphKit.cpp, which sets up the bidirectional relation.) 893 InitializeNode* initialization(); 894 895 // Convenience for initialization->maybe_set_complete(phase) 896 bool maybe_set_complete(PhaseGVN* phase); 897 }; 898 899 //------------------------------AllocateArray--------------------------------- 900 // 901 // High-level array allocation 902 // 903 class AllocateArrayNode : public AllocateNode { 904 public: 905 AllocateArrayNode(Compile* C, const TypeFunc *atype, Node *ctrl, Node *mem, Node *abio, 906 Node* size, Node* klass_node, Node* initial_test, 907 Node* count_val 908 ) 909 : AllocateNode(C, atype, ctrl, mem, abio, size, klass_node, 910 initial_test) 911 { 912 init_class_id(Class_AllocateArray); 913 set_req(AllocateNode::ALength, count_val); 914 } 915 virtual int Opcode() const; 916 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 917 918 // Dig the length operand out of a array allocation site. 919 Node* Ideal_length() { 920 return in(AllocateNode::ALength); 921 } 922 923 // Dig the length operand out of a array allocation site and narrow the 924 // type with a CastII, if necesssary 925 Node* make_ideal_length(const TypeOopPtr* ary_type, PhaseTransform *phase, bool can_create = true); 926 927 // Pattern-match a possible usage of AllocateArrayNode. 928 // Return null if no allocation is recognized. 929 static AllocateArrayNode* Ideal_array_allocation(Node* ptr, PhaseTransform* phase) { 930 AllocateNode* allo = Ideal_allocation(ptr, phase); 931 return (allo == NULL || !allo->is_AllocateArray()) 932 ? NULL : allo->as_AllocateArray(); 933 } 934 }; 935 936 //------------------------------AbstractLockNode----------------------------------- 937 class AbstractLockNode: public CallNode { 938 private: 939 enum { 940 Regular = 0, // Normal lock 941 NonEscObj, // Lock is used for non escaping object 942 Coarsened, // Lock was coarsened 943 Nested // Nested lock 944 } _kind; 945 #ifndef PRODUCT 946 NamedCounter* _counter; 947 #endif 948 949 protected: 950 // helper functions for lock elimination 951 // 952 953 bool find_matching_unlock(const Node* ctrl, LockNode* lock, 954 GrowableArray<AbstractLockNode*> &lock_ops); 955 bool find_lock_and_unlock_through_if(Node* node, LockNode* lock, 956 GrowableArray<AbstractLockNode*> &lock_ops); 957 bool find_unlocks_for_region(const RegionNode* region, LockNode* lock, 958 GrowableArray<AbstractLockNode*> &lock_ops); 959 LockNode *find_matching_lock(UnlockNode* unlock); 960 961 // Update the counter to indicate that this lock was eliminated. 962 void set_eliminated_lock_counter() PRODUCT_RETURN; 963 964 public: 965 AbstractLockNode(const TypeFunc *tf) 966 : CallNode(tf, NULL, TypeRawPtr::BOTTOM), 967 _kind(Regular) 968 { 969 #ifndef PRODUCT 970 _counter = NULL; 971 #endif 972 } 973 virtual int Opcode() const = 0; 974 Node * obj_node() const {return in(TypeFunc::Parms + 0); } 975 Node * box_node() const {return in(TypeFunc::Parms + 1); } 976 Node * fastlock_node() const {return in(TypeFunc::Parms + 2); } 977 void set_box_node(Node* box) { set_req(TypeFunc::Parms + 1, box); } 978 979 const Type *sub(const Type *t1, const Type *t2) const { return TypeInt::CC;} 980 981 virtual uint size_of() const { return sizeof(*this); } 982 983 bool is_eliminated() const { return (_kind != Regular); } 984 bool is_non_esc_obj() const { return (_kind == NonEscObj); } 985 bool is_coarsened() const { return (_kind == Coarsened); } 986 bool is_nested() const { return (_kind == Nested); } 987 988 void set_non_esc_obj() { _kind = NonEscObj; set_eliminated_lock_counter(); } 989 void set_coarsened() { _kind = Coarsened; set_eliminated_lock_counter(); } 990 void set_nested() { _kind = Nested; set_eliminated_lock_counter(); } 991 992 // locking does not modify its arguments 993 virtual bool may_modify(const TypeOopPtr *t_oop, PhaseTransform *phase){ return false;} 994 995 #ifndef PRODUCT 996 void create_lock_counter(JVMState* s); 997 NamedCounter* counter() const { return _counter; } 998 #endif 999 }; 1000 1001 //------------------------------Lock--------------------------------------- 1002 // High-level lock operation 1003 // 1004 // This is a subclass of CallNode because it is a macro node which gets expanded 1005 // into a code sequence containing a call. This node takes 3 "parameters": 1006 // 0 - object to lock 1007 // 1 - a BoxLockNode 1008 // 2 - a FastLockNode 1009 // 1010 class LockNode : public AbstractLockNode { 1011 public: 1012 1013 static const TypeFunc *lock_type() { 1014 // create input type (domain) 1015 const Type **fields = TypeTuple::fields(3); 1016 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Object to be Locked 1017 fields[TypeFunc::Parms+1] = TypeRawPtr::BOTTOM; // Address of stack location for lock 1018 fields[TypeFunc::Parms+2] = TypeInt::BOOL; // FastLock 1019 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+3,fields); 1020 1021 // create result type (range) 1022 fields = TypeTuple::fields(0); 1023 1024 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields); 1025 1026 return TypeFunc::make(domain,range); 1027 } 1028 1029 virtual int Opcode() const; 1030 virtual uint size_of() const; // Size is bigger 1031 LockNode(Compile* C, const TypeFunc *tf) : AbstractLockNode( tf ) { 1032 init_class_id(Class_Lock); 1033 init_flags(Flag_is_macro); 1034 C->add_macro_node(this); 1035 } 1036 virtual bool guaranteed_safepoint() { return false; } 1037 1038 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 1039 // Expansion modifies the JVMState, so we need to clone it 1040 virtual void clone_jvms(Compile* C) { 1041 if (jvms() != NULL) { 1042 set_jvms(jvms()->clone_deep(C)); 1043 jvms()->set_map_deep(this); 1044 } 1045 } 1046 1047 bool is_nested_lock_region(); // Is this Lock nested? 1048 }; 1049 1050 //------------------------------Unlock--------------------------------------- 1051 // High-level unlock operation 1052 class UnlockNode : public AbstractLockNode { 1053 public: 1054 virtual int Opcode() const; 1055 virtual uint size_of() const; // Size is bigger 1056 UnlockNode(Compile* C, const TypeFunc *tf) : AbstractLockNode( tf ) { 1057 init_class_id(Class_Unlock); 1058 init_flags(Flag_is_macro); 1059 C->add_macro_node(this); 1060 } 1061 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 1062 // unlock is never a safepoint 1063 virtual bool guaranteed_safepoint() { return false; } 1064 }; 1065 1066 class GraphKit; 1067 1068 class ArrayCopyNode : public CallNode { 1069 private: 1070 1071 // What kind of arraycopy variant is this? 1072 enum { 1073 None, // not set yet 1074 ArrayCopy, // System.arraycopy() 1075 CloneBasic, // A clone that can be copied by 64 bit chunks 1076 CloneOop, // An oop array clone 1077 CopyOf, // Arrays.copyOf() 1078 CopyOfRange // Arrays.copyOfRange() 1079 } _kind; 1080 1081 #ifndef PRODUCT 1082 static const char* _kind_names[CopyOfRange+1]; 1083 #endif 1084 // Is the alloc obtained with 1085 // AllocateArrayNode::Ideal_array_allocation() tighly coupled 1086 // (arraycopy follows immediately the allocation)? 1087 // We cache the result of LibraryCallKit::tightly_coupled_allocation 1088 // here because it's much easier to find whether there's a tightly 1089 // couple allocation at parse time than at macro expansion time. At 1090 // macro expansion time, for every use of the allocation node we 1091 // would need to figure out whether it happens after the arraycopy (and 1092 // can be ignored) or between the allocation and the arraycopy. At 1093 // parse time, it's straightforward because whatever happens after 1094 // the arraycopy is not parsed yet so doesn't exist when 1095 // LibraryCallKit::tightly_coupled_allocation() is called. 1096 bool _alloc_tightly_coupled; 1097 1098 bool _arguments_validated; 1099 1100 static const TypeFunc* arraycopy_type() { 1101 const Type** fields = TypeTuple::fields(ParmLimit - TypeFunc::Parms); 1102 fields[Src] = TypeInstPtr::BOTTOM; 1103 fields[SrcPos] = TypeInt::INT; 1104 fields[Dest] = TypeInstPtr::BOTTOM; 1105 fields[DestPos] = TypeInt::INT; 1106 fields[Length] = TypeInt::INT; 1107 fields[SrcLen] = TypeInt::INT; 1108 fields[DestLen] = TypeInt::INT; 1109 fields[SrcKlass] = TypeKlassPtr::BOTTOM; 1110 fields[DestKlass] = TypeKlassPtr::BOTTOM; 1111 const TypeTuple *domain = TypeTuple::make(ParmLimit, fields); 1112 1113 // create result type (range) 1114 fields = TypeTuple::fields(0); 1115 1116 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields); 1117 1118 return TypeFunc::make(domain, range); 1119 } 1120 1121 ArrayCopyNode(Compile* C, bool alloc_tightly_coupled); 1122 1123 int get_count(PhaseGVN *phase) const; 1124 static const TypePtr* get_address_type(PhaseGVN *phase, Node* n); 1125 1126 Node* try_clone_instance(PhaseGVN *phase, bool can_reshape, int count); 1127 bool finish_transform(PhaseGVN *phase, bool can_reshape, 1128 Node* ctl, Node *mem); 1129 1130 public: 1131 1132 enum { 1133 Src = TypeFunc::Parms, 1134 SrcPos, 1135 Dest, 1136 DestPos, 1137 Length, 1138 SrcLen, 1139 DestLen, 1140 SrcKlass, 1141 DestKlass, 1142 ParmLimit 1143 }; 1144 1145 static ArrayCopyNode* make(GraphKit* kit, bool may_throw, 1146 Node* src, Node* src_offset, 1147 Node* dest, Node* dest_offset, 1148 Node* length, 1149 bool alloc_tightly_coupled, 1150 Node* src_klass = NULL, Node* dest_klass = NULL, 1151 Node* src_length = NULL, Node* dest_length = NULL); 1152 1153 void connect_outputs(GraphKit* kit); 1154 1155 bool is_arraycopy() const { assert(_kind != None, "should bet set"); return _kind == ArrayCopy; } 1156 bool is_arraycopy_validated() const { assert(_kind != None, "should bet set"); return _kind == ArrayCopy && _arguments_validated; } 1157 bool is_clonebasic() const { assert(_kind != None, "should bet set"); return _kind == CloneBasic; } 1158 bool is_cloneoop() const { assert(_kind != None, "should bet set"); return _kind == CloneOop; } 1159 bool is_copyof() const { assert(_kind != None, "should bet set"); return _kind == CopyOf; } 1160 bool is_copyofrange() const { assert(_kind != None, "should bet set"); return _kind == CopyOfRange; } 1161 1162 void set_arraycopy(bool validated) { assert(_kind == None, "shouldn't bet set yet"); _kind = ArrayCopy; _arguments_validated = validated; } 1163 void set_clonebasic() { assert(_kind == None, "shouldn't bet set yet"); _kind = CloneBasic; } 1164 void set_cloneoop() { assert(_kind == None, "shouldn't bet set yet"); _kind = CloneOop; } 1165 void set_copyof() { assert(_kind == None, "shouldn't bet set yet"); _kind = CopyOf; _arguments_validated = false; } 1166 void set_copyofrange() { assert(_kind == None, "shouldn't bet set yet"); _kind = CopyOfRange; _arguments_validated = false; } 1167 1168 virtual int Opcode() const; 1169 virtual uint size_of() const; // Size is bigger 1170 virtual bool guaranteed_safepoint() { return false; } 1171 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); 1172 1173 bool is_alloc_tightly_coupled() const { return _alloc_tightly_coupled; } 1174 1175 #ifndef PRODUCT 1176 virtual void dump_spec(outputStream *st) const; 1177 #endif 1178 }; 1179 #endif // SHARE_VM_OPTO_CALLNODE_HPP