< prev index next >

src/hotspot/share/opto/subnode.cpp

Print this page




 698 
 699 //------------------------------Idealize---------------------------------------
 700 Node *CmpINode::Ideal( PhaseGVN *phase, bool can_reshape ) {
 701   if (phase->type(in(2))->higher_equal(TypeInt::ZERO)) {
 702     switch (in(1)->Opcode()) {
 703     case Op_CmpL3:              // Collapse a CmpL3/CmpI into a CmpL
 704       return new CmpLNode(in(1)->in(1),in(1)->in(2));
 705     case Op_CmpF3:              // Collapse a CmpF3/CmpI into a CmpF
 706       return new CmpFNode(in(1)->in(1),in(1)->in(2));
 707     case Op_CmpD3:              // Collapse a CmpD3/CmpI into a CmpD
 708       return new CmpDNode(in(1)->in(1),in(1)->in(2));
 709     //case Op_SubI:
 710       // If (x - y) cannot overflow, then ((x - y) <?> 0)
 711       // can be turned into (x <?> y).
 712       // This is handled (with more general cases) by Ideal_sub_algebra.
 713     }
 714   }
 715   return NULL;                  // No change
 716 }
 717 





















 718 
 719 //=============================================================================
 720 // Simplify a CmpL (compare 2 longs ) node, based on local information.
 721 // If both inputs are constants, compare them.
 722 const Type *CmpLNode::sub( const Type *t1, const Type *t2 ) const {
 723   const TypeLong *r0 = t1->is_long(); // Handy access
 724   const TypeLong *r1 = t2->is_long();
 725 
 726   if( r0->_hi < r1->_lo )       // Range is always low?
 727     return TypeInt::CC_LT;
 728   else if( r0->_lo > r1->_hi )  // Range is always high?
 729     return TypeInt::CC_GT;
 730 
 731   else if( r0->is_con() && r1->is_con() ) { // comparing constants?
 732     assert(r0->get_con() == r1->get_con(), "must be equal");
 733     return TypeInt::CC_EQ;      // Equal results.
 734   } else if( r0->_hi == r1->_lo ) // Range is never high?
 735     return TypeInt::CC_LE;
 736   else if( r0->_lo == r1->_hi ) // Range is never low?
 737     return TypeInt::CC_GE;


 915 
 916   // x.getClass() == int.class can never be true (for all primitive types)
 917   // Return a ConP(NULL) node for this case.
 918   if (mirror_type->is_classless()) {
 919     return phase->makecon(TypePtr::NULL_PTR);
 920   }
 921 
 922   // return the ConP(Foo.klass)
 923   assert(mirror_type->is_klass(), "mirror_type should represent a Klass*");
 924   return phase->makecon(TypeKlassPtr::make(mirror_type->as_klass()));
 925 }
 926 
 927 //------------------------------Ideal------------------------------------------
 928 // Normalize comparisons between Java mirror loads to compare the klass instead.
 929 //
 930 // Also check for the case of comparing an unknown klass loaded from the primary
 931 // super-type array vs a known klass with no subtypes.  This amounts to
 932 // checking to see an unknown klass subtypes a known klass with no subtypes;
 933 // this only happens on an exact match.  We can shorten this test by 1 load.
 934 Node *CmpPNode::Ideal( PhaseGVN *phase, bool can_reshape ) {





























 935   // Normalize comparisons between Java mirrors into comparisons of the low-
 936   // level klass, where a dependent load could be shortened.
 937   //
 938   // The new pattern has a nice effect of matching the same pattern used in the
 939   // fast path of instanceof/checkcast/Class.isInstance(), which allows
 940   // redundant exact type check be optimized away by GVN.
 941   // For example, in
 942   //   if (x.getClass() == Foo.class) {
 943   //     Foo foo = (Foo) x;
 944   //     // ... use a ...
 945   //   }
 946   // a CmpPNode could be shared between if_acmpne and checkcast
 947   {
 948     Node* k1 = isa_java_mirror_load(phase, in(1));
 949     Node* k2 = isa_java_mirror_load(phase, in(2));
 950     Node* conk2 = isa_const_java_mirror(phase, in(2));
 951 
 952     if (k1 && (k2 || conk2)) {
 953       Node* lhs = k1;
 954       Node* rhs = (k2 != NULL) ? k2 : conk2;


1013   // %%% Do this after we fix TypeOopPtr:  Deps are expressive enough now.
1014 
1015   // Object arrays must have their base element have no subtypes
1016   while (superklass->is_obj_array_klass()) {
1017     ciType* elem = superklass->as_obj_array_klass()->element_type();
1018     superklass = elem->as_klass();
1019   }
1020   if (superklass->is_instance_klass()) {
1021     ciInstanceKlass* ik = superklass->as_instance_klass();
1022     if (ik->has_subklass() || ik->is_interface())  return NULL;
1023     // Add a dependency if there is a chance that a subclass will be added later.
1024     if (!ik->is_final()) {
1025       phase->C->dependencies()->assert_leaf_type(ik);
1026     }
1027   }
1028 
1029   // Bypass the dependent load, and compare directly
1030   this->set_req(1,ldk2);
1031 
1032   return this;
















1033 }
1034 
1035 //=============================================================================
1036 //------------------------------sub--------------------------------------------
1037 // Simplify an CmpN (compare 2 pointers) node, based on local information.
1038 // If both inputs are constants, compare them.
1039 const Type *CmpNNode::sub( const Type *t1, const Type *t2 ) const {
1040   const TypePtr *r0 = t1->make_ptr(); // Handy access
1041   const TypePtr *r1 = t2->make_ptr();
1042 
1043   // Undefined inputs makes for an undefined result
1044   if ((r0 == NULL) || (r1 == NULL) ||
1045       TypePtr::above_centerline(r0->_ptr) ||
1046       TypePtr::above_centerline(r1->_ptr)) {
1047     return Type::TOP;
1048   }
1049   if (r0 == r1 && r0->singleton()) {
1050     // Equal pointer constants (klasses, nulls, etc.)
1051     return TypeInt::CC_EQ;
1052   }




 698 
 699 //------------------------------Idealize---------------------------------------
 700 Node *CmpINode::Ideal( PhaseGVN *phase, bool can_reshape ) {
 701   if (phase->type(in(2))->higher_equal(TypeInt::ZERO)) {
 702     switch (in(1)->Opcode()) {
 703     case Op_CmpL3:              // Collapse a CmpL3/CmpI into a CmpL
 704       return new CmpLNode(in(1)->in(1),in(1)->in(2));
 705     case Op_CmpF3:              // Collapse a CmpF3/CmpI into a CmpF
 706       return new CmpFNode(in(1)->in(1),in(1)->in(2));
 707     case Op_CmpD3:              // Collapse a CmpD3/CmpI into a CmpD
 708       return new CmpDNode(in(1)->in(1),in(1)->in(2));
 709     //case Op_SubI:
 710       // If (x - y) cannot overflow, then ((x - y) <?> 0)
 711       // can be turned into (x <?> y).
 712       // This is handled (with more general cases) by Ideal_sub_algebra.
 713     }
 714   }
 715   return NULL;                  // No change
 716 }
 717 
 718 //------------------------------Ideal------------------------------------------
 719 Node* CmpLNode::Ideal(PhaseGVN* phase, bool can_reshape) {
 720   if (in(1)->Opcode() == Op_OrL && in(1)->in(1)->Opcode() == Op_CastP2X && in(1)->in(2)->Opcode() == Op_CastP2X) {
 721     Node* a = in(1)->in(1)->in(1);
 722     Node* b = in(1)->in(2)->in(1);
 723     const Type* ta = phase->type(a);
 724     const Type* tb = phase->type(b);
 725     if (ta->is_zero_type() || tb->is_zero_type()) {
 726       if (Verbose) tty->print_cr("\n# NULL CHECK (CmpLNode::Ideal)");
 727       return new CmpPNode(a, b);
 728     } else if (!TypePtr::NULL_PTR->higher_equal(ta) || !TypePtr::NULL_PTR->higher_equal(tb)) {
 729       // One operand is never NULL, emit constant false
 730       if (Verbose) tty->print_cr("\n# CONSTANT FALSE (CmpLNode::Ideal)");
 731       set_req(1, phase->longcon(0));
 732       set_req(2, phase->longcon(1));
 733       return this;
 734     }
 735   }
 736   return NULL;
 737 }
 738 
 739 
 740 //=============================================================================
 741 // Simplify a CmpL (compare 2 longs ) node, based on local information.
 742 // If both inputs are constants, compare them.
 743 const Type *CmpLNode::sub( const Type *t1, const Type *t2 ) const {
 744   const TypeLong *r0 = t1->is_long(); // Handy access
 745   const TypeLong *r1 = t2->is_long();
 746 
 747   if( r0->_hi < r1->_lo )       // Range is always low?
 748     return TypeInt::CC_LT;
 749   else if( r0->_lo > r1->_hi )  // Range is always high?
 750     return TypeInt::CC_GT;
 751 
 752   else if( r0->is_con() && r1->is_con() ) { // comparing constants?
 753     assert(r0->get_con() == r1->get_con(), "must be equal");
 754     return TypeInt::CC_EQ;      // Equal results.
 755   } else if( r0->_hi == r1->_lo ) // Range is never high?
 756     return TypeInt::CC_LE;
 757   else if( r0->_lo == r1->_hi ) // Range is never low?
 758     return TypeInt::CC_GE;


 936 
 937   // x.getClass() == int.class can never be true (for all primitive types)
 938   // Return a ConP(NULL) node for this case.
 939   if (mirror_type->is_classless()) {
 940     return phase->makecon(TypePtr::NULL_PTR);
 941   }
 942 
 943   // return the ConP(Foo.klass)
 944   assert(mirror_type->is_klass(), "mirror_type should represent a Klass*");
 945   return phase->makecon(TypeKlassPtr::make(mirror_type->as_klass()));
 946 }
 947 
 948 //------------------------------Ideal------------------------------------------
 949 // Normalize comparisons between Java mirror loads to compare the klass instead.
 950 //
 951 // Also check for the case of comparing an unknown klass loaded from the primary
 952 // super-type array vs a known klass with no subtypes.  This amounts to
 953 // checking to see an unknown klass subtypes a known klass with no subtypes;
 954 // this only happens on an exact match.  We can shorten this test by 1 load.
 955 Node *CmpPNode::Ideal( PhaseGVN *phase, bool can_reshape ) {
 956   Node* pert = has_perturbed_operand();
 957   if (pert != NULL) {
 958     // Optimize new acmp
 959     Node* a = pert->in(AddPNode::Base); // unperturbed a
 960     Node* b = in(2);
 961     Node* cmp = phase->C->optimize_acmp(phase, a, b);
 962     if (cmp != NULL) {
 963       return cmp;
 964     }
 965     if ( TypePtr::NULL_PTR->higher_equal(phase->type(a)) &&
 966         !TypePtr::NULL_PTR->higher_equal(phase->type(b))) {
 967       // Operand 'b' is never null, swap operands to avoid null check
 968       Node* is_value = phase->C->load_is_value_bit(phase, b);
 969       set_req(1, phase->transform(new AddPNode(b, b, is_value)));
 970       set_req(2, a);
 971       return this;
 972     }
 973   } else {
 974     // Optimize old acmp with value type operands
 975     const TypeInstPtr* ta = phase->type(in(1))->isa_instptr();
 976     const TypeInstPtr* tb = phase->type(in(2))->isa_instptr();
 977     if (((ta != NULL && ta->is_loaded() && ta->is_value_based()) || (tb != NULL && tb->is_loaded() && tb->is_value_based())) &&
 978         (!TypePtr::NULL_PTR->higher_equal(phase->type(in(1))) || !TypePtr::NULL_PTR->higher_equal(phase->type(in(2))))) {
 979       // One operand is a value type and one operand is never null, fold to constant false
 980       if (Verbose) tty->print_cr("\n# CONSTANT FALSE");
 981       return new CmpINode(phase->intcon(0), phase->intcon(1));
 982     }
 983   }
 984 
 985   // Normalize comparisons between Java mirrors into comparisons of the low-
 986   // level klass, where a dependent load could be shortened.
 987   //
 988   // The new pattern has a nice effect of matching the same pattern used in the
 989   // fast path of instanceof/checkcast/Class.isInstance(), which allows
 990   // redundant exact type check be optimized away by GVN.
 991   // For example, in
 992   //   if (x.getClass() == Foo.class) {
 993   //     Foo foo = (Foo) x;
 994   //     // ... use a ...
 995   //   }
 996   // a CmpPNode could be shared between if_acmpne and checkcast
 997   {
 998     Node* k1 = isa_java_mirror_load(phase, in(1));
 999     Node* k2 = isa_java_mirror_load(phase, in(2));
1000     Node* conk2 = isa_const_java_mirror(phase, in(2));
1001 
1002     if (k1 && (k2 || conk2)) {
1003       Node* lhs = k1;
1004       Node* rhs = (k2 != NULL) ? k2 : conk2;


1063   // %%% Do this after we fix TypeOopPtr:  Deps are expressive enough now.
1064 
1065   // Object arrays must have their base element have no subtypes
1066   while (superklass->is_obj_array_klass()) {
1067     ciType* elem = superklass->as_obj_array_klass()->element_type();
1068     superklass = elem->as_klass();
1069   }
1070   if (superklass->is_instance_klass()) {
1071     ciInstanceKlass* ik = superklass->as_instance_klass();
1072     if (ik->has_subklass() || ik->is_interface())  return NULL;
1073     // Add a dependency if there is a chance that a subclass will be added later.
1074     if (!ik->is_final()) {
1075       phase->C->dependencies()->assert_leaf_type(ik);
1076     }
1077   }
1078 
1079   // Bypass the dependent load, and compare directly
1080   this->set_req(1,ldk2);
1081 
1082   return this;
1083 }
1084 
1085 // Checks if one operand is perturbed and returns it
1086 Node* CmpPNode::has_perturbed_operand() const {
1087   // We always perturbe the first operand
1088   AddPNode* addP = in(1)->isa_AddP();
1089   if (addP != NULL) {
1090     Node* base = addP->in(AddPNode::Base);
1091     if (base->is_top()) {
1092       // RawPtr comparison
1093       return NULL;
1094     }
1095     assert(UseNewAcmp, "unexpected perturbed oop");
1096     return in(1);
1097   }
1098   return NULL;
1099 }
1100 
1101 //=============================================================================
1102 //------------------------------sub--------------------------------------------
1103 // Simplify an CmpN (compare 2 pointers) node, based on local information.
1104 // If both inputs are constants, compare them.
1105 const Type *CmpNNode::sub( const Type *t1, const Type *t2 ) const {
1106   const TypePtr *r0 = t1->make_ptr(); // Handy access
1107   const TypePtr *r1 = t2->make_ptr();
1108 
1109   // Undefined inputs makes for an undefined result
1110   if ((r0 == NULL) || (r1 == NULL) ||
1111       TypePtr::above_centerline(r0->_ptr) ||
1112       TypePtr::above_centerline(r1->_ptr)) {
1113     return Type::TOP;
1114   }
1115   if (r0 == r1 && r0->singleton()) {
1116     // Equal pointer constants (klasses, nulls, etc.)
1117     return TypeInt::CC_EQ;
1118   }


< prev index next >