< prev index next >

src/hotspot/share/c1/c1_LIRGenerator.cpp

Print this page


1642                    obj_item, LIR_OprFact::intConst(obj_offset), temp,
1643                    NULL, NULL);
1644     access_store_at(decorators, field_type,
1645                     elm_item, LIR_OprFact::intConst(elm_offset), temp,
1646                     NULL, NULL);
1647     }
1648   }
1649 }
1650 
1651 void LIRGenerator::check_flattened_array(LIRItem& array, CodeStub* slow_path) {
1652   LIR_Opr array_klass_reg = new_register(T_METADATA);
1653 
1654   __ move(new LIR_Address(array.result(), oopDesc::klass_offset_in_bytes(), T_ADDRESS), array_klass_reg);
1655   LIR_Opr layout = new_register(T_INT);
1656   __ move(new LIR_Address(array_klass_reg, in_bytes(Klass::layout_helper_offset()), T_INT), layout);
1657   __ shift_right(layout, Klass::_lh_array_tag_shift, layout);
1658   __ cmp(lir_cond_equal, layout, LIR_OprFact::intConst(Klass::_lh_array_tag_vt_value));
1659   __ branch(lir_cond_equal, T_ILLEGAL, slow_path);
1660 }
1661 




















1662 void LIRGenerator::do_StoreIndexed(StoreIndexed* x) {
1663   assert(x->is_pinned(),"");
1664   bool is_loaded_flattened_array = x->array()->is_loaded_flattened_array();
1665   bool needs_range_check = x->compute_needs_range_check();
1666   bool use_length = x->length() != NULL;
1667   bool obj_store = x->elt_type() == T_ARRAY || x->elt_type() == T_OBJECT;
1668   bool needs_store_check = obj_store && !is_loaded_flattened_array &&
1669                                         (x->value()->as_Constant() == NULL ||
1670                                          !get_jobject_constant(x->value())->is_null_object() ||
1671                                          x->should_profile());
1672 
1673   LIRItem array(x->array(), this);
1674   LIRItem index(x->index(), this);
1675   LIRItem value(x->value(), this);
1676   LIRItem length(this);
1677 
1678   array.load_item();
1679   index.load_nonconstant();
1680 
1681   if (use_length && needs_range_check) {
1682     length.set_instruction(x->length());
1683     length.load_item();
1684   }
1685 
1686   if (needs_store_check || x->check_boolean()
1687       || is_loaded_flattened_array || x->array()->maybe_flattened_array()) {
1688     value.load_item();
1689   } else {
1690     value.load_for_store(x->elt_type());
1691   }
1692 
1693   set_no_result(x);
1694 
1695   // the CodeEmitInfo must be duplicated for each different
1696   // LIR-instruction because spilling can occur anywhere between two
1697   // instructions and so the debug information must be different
1698   CodeEmitInfo* range_check_info = state_for(x);
1699   CodeEmitInfo* null_check_info = NULL;
1700   if (x->needs_null_check()) {
1701     null_check_info = new CodeEmitInfo(range_check_info);
1702   }
1703 
1704   if (GenerateRangeChecks && needs_range_check) {
1705     if (use_length) {
1706       __ cmp(lir_cond_belowEqual, length.result(), index.result());
1707       __ branch(lir_cond_belowEqual, T_INT, new RangeCheckStub(range_check_info, index.result(), array.result()));


1712     }
1713   }
1714 
1715   if (GenerateArrayStoreCheck && needs_store_check) {
1716     CodeEmitInfo* store_check_info = new CodeEmitInfo(range_check_info);
1717     array_store_check(value.result(), array.result(), store_check_info, x->profiled_method(), x->profiled_bci());
1718   }
1719 
1720   if (is_loaded_flattened_array) {
1721     if (!x->is_exact_flattened_array_store()) {
1722       CodeEmitInfo* info = new CodeEmitInfo(range_check_info);
1723       ciKlass* element_klass = x->array()->declared_type()->as_value_array_klass()->element_klass();
1724       flattened_array_store_check(value.result(), element_klass, info);
1725     } else if (!x->value()->is_never_null()) {
1726       __ null_check(value.result(), new CodeEmitInfo(range_check_info));
1727     }
1728     access_flattened_array(false, array, index, value);
1729   } else {
1730     StoreFlattenedArrayStub* slow_path = NULL;
1731 
1732     if (x->array()->maybe_flattened_array()) {
1733       // Check if we indeed have a flattened array
1734       index.load_item();
1735       slow_path = new StoreFlattenedArrayStub(array.result(), index.result(), value.result(), state_for(x));
1736       check_flattened_array(array, slow_path);
1737     }
1738 
1739     DecoratorSet decorators = IN_HEAP | IS_ARRAY;
1740     if (x->check_boolean()) {
1741       decorators |= C1_MASK_BOOLEAN;
1742     }
1743 
1744     access_store_at(decorators, x->elt_type(), array, index.result(), value.result(),
1745                     NULL, null_check_info);
1746     if (slow_path != NULL) {
1747       __ branch_destination(slow_path->continuation());
1748     }
1749   }
1750 }
1751 
1752 void LIRGenerator::access_load_at(DecoratorSet decorators, BasicType type,


2029       __ cmp(lir_cond_belowEqual, length.result(), index.result());
2030       __ branch(lir_cond_belowEqual, T_INT, new RangeCheckStub(range_check_info, index.result(), array.result()));
2031     } else {
2032       array_range_check(array.result(), index.result(), null_check_info, range_check_info);
2033       // The range check performs the null check, so clear it out for the load
2034       null_check_info = NULL;
2035     }
2036   }
2037 
2038   if (x->array()->is_loaded_flattened_array()) {
2039     // Find the destination address (of the NewValueTypeInstance)
2040     LIR_Opr obj = x->vt()->operand();
2041     LIRItem obj_item(x->vt(), this);
2042 
2043     access_flattened_array(true, array, index, obj_item);
2044     set_no_result(x);
2045   } else {
2046     LIR_Opr result = rlock_result(x, x->elt_type());
2047     LoadFlattenedArrayStub* slow_path = NULL;
2048 
2049     if (x->array()->maybe_flattened_array()) {
2050       index.load_item();
2051       // Check if we indeed have a flattened array
2052       slow_path = new LoadFlattenedArrayStub(array.result(), index.result(), result, state_for(x));
2053       check_flattened_array(array, slow_path);
2054     }
2055 
2056     DecoratorSet decorators = IN_HEAP | IS_ARRAY;
2057     access_load_at(decorators, x->elt_type(),
2058                    array, index.result(), result,
2059                    NULL, null_check_info);
2060 
2061     if (slow_path != NULL) {
2062       __ branch_destination(slow_path->continuation());
2063     }
2064   }
2065 }
2066 
2067 
2068 void LIRGenerator::do_NullCheck(NullCheck* x) {
2069   if (x->can_trap()) {
2070     LIRItem value(x->obj(), this);
2071     value.load_item();




1642                    obj_item, LIR_OprFact::intConst(obj_offset), temp,
1643                    NULL, NULL);
1644     access_store_at(decorators, field_type,
1645                     elm_item, LIR_OprFact::intConst(elm_offset), temp,
1646                     NULL, NULL);
1647     }
1648   }
1649 }
1650 
1651 void LIRGenerator::check_flattened_array(LIRItem& array, CodeStub* slow_path) {
1652   LIR_Opr array_klass_reg = new_register(T_METADATA);
1653 
1654   __ move(new LIR_Address(array.result(), oopDesc::klass_offset_in_bytes(), T_ADDRESS), array_klass_reg);
1655   LIR_Opr layout = new_register(T_INT);
1656   __ move(new LIR_Address(array_klass_reg, in_bytes(Klass::layout_helper_offset()), T_INT), layout);
1657   __ shift_right(layout, Klass::_lh_array_tag_shift, layout);
1658   __ cmp(lir_cond_equal, layout, LIR_OprFact::intConst(Klass::_lh_array_tag_vt_value));
1659   __ branch(lir_cond_equal, T_ILLEGAL, slow_path);
1660 }
1661 
1662 bool LIRGenerator::needs_flattened_array_store_check(StoreIndexed* x) {
1663   if (ValueArrayFlatten && x->elt_type() == T_OBJECT && x->array()->maybe_flattened_array()) {
1664     ciType* type = x->value()->declared_type();
1665     if (type != NULL && type->is_klass()) {
1666       ciKlass* klass = type->as_klass();
1667       if (klass->is_loaded() &&
1668           !(klass->is_valuetype() && klass->as_value_klass()->flatten_array()) &&
1669           !klass->is_java_lang_Object() &&
1670           !klass->is_interface()) {
1671         // This is known to be a non-flattenable object. If the array is flattened,
1672         // it will be caught by the code generated by array_store_check().
1673         return false;
1674       }
1675     }
1676     // We're not 100% sure, so let's do the flattened_array_store_check.
1677     return true;
1678   }
1679   return false;
1680 }
1681 
1682 void LIRGenerator::do_StoreIndexed(StoreIndexed* x) {
1683   assert(x->is_pinned(),"");
1684   bool is_loaded_flattened_array = x->array()->is_loaded_flattened_array();
1685   bool needs_range_check = x->compute_needs_range_check();
1686   bool use_length = x->length() != NULL;
1687   bool obj_store = x->elt_type() == T_OBJECT; assert(x->elt_type() != T_ARRAY, "never used");
1688   bool needs_store_check = obj_store && !is_loaded_flattened_array &&
1689                                         (x->value()->as_Constant() == NULL ||
1690                                          !get_jobject_constant(x->value())->is_null_object() ||
1691                                          x->should_profile());
1692 
1693   LIRItem array(x->array(), this);
1694   LIRItem index(x->index(), this);
1695   LIRItem value(x->value(), this);
1696   LIRItem length(this);
1697 
1698   array.load_item();
1699   index.load_nonconstant();
1700 
1701   if (use_length && needs_range_check) {
1702     length.set_instruction(x->length());
1703     length.load_item();
1704   }
1705 
1706   if (needs_store_check || x->check_boolean()
1707       || is_loaded_flattened_array || needs_flattened_array_store_check(x)) {
1708     value.load_item();
1709   } else {
1710     value.load_for_store(x->elt_type());
1711   }
1712 
1713   set_no_result(x);
1714 
1715   // the CodeEmitInfo must be duplicated for each different
1716   // LIR-instruction because spilling can occur anywhere between two
1717   // instructions and so the debug information must be different
1718   CodeEmitInfo* range_check_info = state_for(x);
1719   CodeEmitInfo* null_check_info = NULL;
1720   if (x->needs_null_check()) {
1721     null_check_info = new CodeEmitInfo(range_check_info);
1722   }
1723 
1724   if (GenerateRangeChecks && needs_range_check) {
1725     if (use_length) {
1726       __ cmp(lir_cond_belowEqual, length.result(), index.result());
1727       __ branch(lir_cond_belowEqual, T_INT, new RangeCheckStub(range_check_info, index.result(), array.result()));


1732     }
1733   }
1734 
1735   if (GenerateArrayStoreCheck && needs_store_check) {
1736     CodeEmitInfo* store_check_info = new CodeEmitInfo(range_check_info);
1737     array_store_check(value.result(), array.result(), store_check_info, x->profiled_method(), x->profiled_bci());
1738   }
1739 
1740   if (is_loaded_flattened_array) {
1741     if (!x->is_exact_flattened_array_store()) {
1742       CodeEmitInfo* info = new CodeEmitInfo(range_check_info);
1743       ciKlass* element_klass = x->array()->declared_type()->as_value_array_klass()->element_klass();
1744       flattened_array_store_check(value.result(), element_klass, info);
1745     } else if (!x->value()->is_never_null()) {
1746       __ null_check(value.result(), new CodeEmitInfo(range_check_info));
1747     }
1748     access_flattened_array(false, array, index, value);
1749   } else {
1750     StoreFlattenedArrayStub* slow_path = NULL;
1751 
1752     if (needs_flattened_array_store_check(x)) {
1753       // Check if we indeed have a flattened array
1754       index.load_item();
1755       slow_path = new StoreFlattenedArrayStub(array.result(), index.result(), value.result(), state_for(x));
1756       check_flattened_array(array, slow_path);
1757     }
1758 
1759     DecoratorSet decorators = IN_HEAP | IS_ARRAY;
1760     if (x->check_boolean()) {
1761       decorators |= C1_MASK_BOOLEAN;
1762     }
1763 
1764     access_store_at(decorators, x->elt_type(), array, index.result(), value.result(),
1765                     NULL, null_check_info);
1766     if (slow_path != NULL) {
1767       __ branch_destination(slow_path->continuation());
1768     }
1769   }
1770 }
1771 
1772 void LIRGenerator::access_load_at(DecoratorSet decorators, BasicType type,


2049       __ cmp(lir_cond_belowEqual, length.result(), index.result());
2050       __ branch(lir_cond_belowEqual, T_INT, new RangeCheckStub(range_check_info, index.result(), array.result()));
2051     } else {
2052       array_range_check(array.result(), index.result(), null_check_info, range_check_info);
2053       // The range check performs the null check, so clear it out for the load
2054       null_check_info = NULL;
2055     }
2056   }
2057 
2058   if (x->array()->is_loaded_flattened_array()) {
2059     // Find the destination address (of the NewValueTypeInstance)
2060     LIR_Opr obj = x->vt()->operand();
2061     LIRItem obj_item(x->vt(), this);
2062 
2063     access_flattened_array(true, array, index, obj_item);
2064     set_no_result(x);
2065   } else {
2066     LIR_Opr result = rlock_result(x, x->elt_type());
2067     LoadFlattenedArrayStub* slow_path = NULL;
2068 
2069     if (x->elt_type() == T_OBJECT && x->array()->maybe_flattened_array()) {
2070       index.load_item();
2071       // if we are loading from flattened array, load it using a runtime call
2072       slow_path = new LoadFlattenedArrayStub(array.result(), index.result(), result, state_for(x));
2073       check_flattened_array(array, slow_path);
2074     }
2075 
2076     DecoratorSet decorators = IN_HEAP | IS_ARRAY;
2077     access_load_at(decorators, x->elt_type(),
2078                    array, index.result(), result,
2079                    NULL, null_check_info);
2080 
2081     if (slow_path != NULL) {
2082       __ branch_destination(slow_path->continuation());
2083     }
2084   }
2085 }
2086 
2087 
2088 void LIRGenerator::do_NullCheck(NullCheck* x) {
2089   if (x->can_trap()) {
2090     LIRItem value(x->obj(), this);
2091     value.load_item();


< prev index next >