1631
1632 DecoratorSet decorators = IN_HEAP;
1633 if (is_load) {
1634 access_load_at(decorators, field_type,
1635 elm_item, LIR_OprFact::intConst(elm_offset), temp,
1636 NULL, NULL);
1637 access_store_at(decorators, field_type,
1638 obj_item, LIR_OprFact::intConst(obj_offset), temp,
1639 NULL, NULL);
1640 } else {
1641 access_load_at(decorators, field_type,
1642 obj_item, LIR_OprFact::intConst(obj_offset), temp,
1643 NULL, NULL);
1644 access_store_at(decorators, field_type,
1645 elm_item, LIR_OprFact::intConst(elm_offset), temp,
1646 NULL, NULL);
1647 }
1648 }
1649 }
1650
1651 void LIRGenerator::maybe_deopt_value_array_access(LIRItem& array, CodeEmitInfo* null_check_info, CodeEmitInfo* deopt_info) {
1652 LIR_Opr klass = new_register(T_METADATA);
1653 __ move(new LIR_Address(array.result(), oopDesc::klass_offset_in_bytes(), T_ADDRESS), klass, null_check_info);
1654 LIR_Opr layout = new_register(T_INT);
1655 __ move(new LIR_Address(klass, in_bytes(Klass::layout_helper_offset()), T_INT), layout);
1656 __ shift_right(layout, Klass::_lh_array_tag_shift, layout);
1657 __ cmp(lir_cond_equal, layout, LIR_OprFact::intConst(Klass::_lh_array_tag_vt_value));
1658
1659 CodeStub* stub = new DeoptimizeStub(deopt_info, Deoptimization::Reason_unloaded, Deoptimization::Action_make_not_entrant);
1660 __ branch(lir_cond_equal, T_ILLEGAL, stub);
1661 }
1662
1663 void LIRGenerator::do_StoreIndexed(StoreIndexed* x) {
1664 assert(x->is_pinned(),"");
1665 bool is_flattened = x->array()->is_flattened_array();
1666 bool needs_range_check = x->compute_needs_range_check();
1667 bool use_length = x->length() != NULL;
1668 bool obj_store = x->elt_type() == T_ARRAY || x->elt_type() == T_OBJECT;
1669 bool needs_store_check = obj_store && !is_flattened &&
1670 (x->value()->as_Constant() == NULL ||
1671 !get_jobject_constant(x->value())->is_null_object() ||
1672 x->should_profile());
1673
1674 LIRItem array(x->array(), this);
1675 LIRItem index(x->index(), this);
1676 LIRItem value(x->value(), this);
1677 LIRItem length(this);
1678
1679 array.load_item();
1680 index.load_nonconstant();
1681
1682 if (use_length && needs_range_check) {
1683 length.set_instruction(x->length());
1684 length.load_item();
1685
1686 }
1687
1688 if (needs_store_check || x->check_boolean() || is_flattened) {
1689 value.load_item();
1690 } else {
1691 value.load_for_store(x->elt_type());
1692 }
1693
1694 set_no_result(x);
1695
1696 // the CodeEmitInfo must be duplicated for each different
1697 // LIR-instruction because spilling can occur anywhere between two
1698 // instructions and so the debug information must be different
1699 CodeEmitInfo* range_check_info = state_for(x);
1700 CodeEmitInfo* null_check_info = NULL;
1701 if (x->needs_null_check()) {
1702 null_check_info = new CodeEmitInfo(range_check_info);
1703 }
1704
1705 if (GenerateRangeChecks && needs_range_check) {
1706 if (use_length) {
1707 __ cmp(lir_cond_belowEqual, length.result(), index.result());
1708 __ branch(lir_cond_belowEqual, T_INT, new RangeCheckStub(range_check_info, index.result(), array.result()));
1709 } else {
1710 array_range_check(array.result(), index.result(), null_check_info, range_check_info);
1711 // range_check also does the null check
1712 null_check_info = NULL;
1713 }
1714 }
1715
1716 if (GenerateArrayStoreCheck && needs_store_check) {
1717 CodeEmitInfo* store_check_info = new CodeEmitInfo(range_check_info);
1718 array_store_check(value.result(), array.result(), store_check_info, x->profiled_method(), x->profiled_bci());
1719 }
1720
1721 if (is_flattened) {
1722 if (x->array()->declared_type()->is_loaded()) {
1723 index.load_item();
1724 access_flattened_array(false, array, index, value);
1725 return;
1726 } else {
1727 // If the array is indeed flattened, deopt. Otherwise access it as a normal object array.
1728 CodeEmitInfo* deopt_info = state_for(x, x->state_before());
1729 maybe_deopt_value_array_access(array, null_check_info, deopt_info);
1730 }
1731 }
1732
1733 DecoratorSet decorators = IN_HEAP | IS_ARRAY;
1734 if (x->check_boolean()) {
1735 decorators |= C1_MASK_BOOLEAN;
1736 }
1737
1738 access_store_at(decorators, x->elt_type(), array, index.result(), value.result(),
1739 NULL, null_check_info);
1740 }
1741
1742 void LIRGenerator::access_load_at(DecoratorSet decorators, BasicType type,
1743 LIRItem& base, LIR_Opr offset, LIR_Opr result,
1744 CodeEmitInfo* patch_info, CodeEmitInfo* load_emit_info) {
1745 decorators |= ACCESS_READ;
1746 LIRAccess access(this, decorators, base, offset, type, patch_info, load_emit_info);
1747 if (access.is_raw()) {
1748 _barrier_set->BarrierSetC1::load_at(access, result);
1749 } else {
1750 _barrier_set->load_at(access, result);
1751 }
1752 }
1753
1754 void LIRGenerator::access_load(DecoratorSet decorators, BasicType type,
1755 LIR_Opr addr, LIR_Opr result) {
1756 decorators |= ACCESS_READ;
1757 LIRAccess access(this, decorators, LIR_OprFact::illegalOpr, LIR_OprFact::illegalOpr, type);
1758 access.set_resolved_addr(addr);
1759 if (access.is_raw()) {
2023 array_range_check(array.result(), index.result(), null_check_info, range_check_info);
2024 // The range check performs the null check, so clear it out for the load
2025 null_check_info = NULL;
2026 }
2027 }
2028
2029 if (x->array()->is_loaded_flattened_array()) {
2030 // Find the destination address (of the NewValueTypeInstance)
2031 LIR_Opr obj = x->vt()->operand();
2032 LIRItem obj_item(x->vt(), this);
2033
2034 access_flattened_array(true, array, index, obj_item);
2035 set_no_result(x);
2036 } else {
2037 LIR_Opr result = rlock_result(x, x->elt_type());
2038 LoadFlattenedArrayStub* slow_path = NULL;
2039
2040 if (x->array()->maybe_flattened_array()) {
2041 // Check if we indeed have a flattened array
2042 slow_path = new LoadFlattenedArrayStub(array.result(), index.result(), result, state_for(x));
2043 LIR_Opr array_klass_reg = new_register(T_METADATA);
2044
2045 __ move(new LIR_Address(array.result(), oopDesc::klass_offset_in_bytes(), T_ADDRESS), array_klass_reg);
2046 LIR_Opr layout = new_register(T_INT);
2047 __ move(new LIR_Address(array_klass_reg, in_bytes(Klass::layout_helper_offset()), T_INT), layout);
2048 __ shift_right(layout, Klass::_lh_array_tag_shift, layout);
2049 __ cmp(lir_cond_equal, layout, LIR_OprFact::intConst(Klass::_lh_array_tag_vt_value));
2050 __ branch(lir_cond_equal, T_ILLEGAL, slow_path);
2051 }
2052
2053 DecoratorSet decorators = IN_HEAP | IS_ARRAY;
2054 access_load_at(decorators, x->elt_type(),
2055 array, index.result(), result,
2056 NULL, null_check_info);
2057
2058 if (slow_path != NULL) {
2059 __ branch_destination(slow_path->continuation());
2060 }
2061 }
2062 }
2063
2064
2065 void LIRGenerator::do_NullCheck(NullCheck* x) {
2066 if (x->can_trap()) {
2067 LIRItem value(x->obj(), this);
2068 value.load_item();
2069 CodeEmitInfo* info = state_for(x);
2070 __ null_check(value.result(), info);
|
1631
1632 DecoratorSet decorators = IN_HEAP;
1633 if (is_load) {
1634 access_load_at(decorators, field_type,
1635 elm_item, LIR_OprFact::intConst(elm_offset), temp,
1636 NULL, NULL);
1637 access_store_at(decorators, field_type,
1638 obj_item, LIR_OprFact::intConst(obj_offset), temp,
1639 NULL, NULL);
1640 } else {
1641 access_load_at(decorators, field_type,
1642 obj_item, LIR_OprFact::intConst(obj_offset), temp,
1643 NULL, NULL);
1644 access_store_at(decorators, field_type,
1645 elm_item, LIR_OprFact::intConst(elm_offset), temp,
1646 NULL, NULL);
1647 }
1648 }
1649 }
1650
1651 void LIRGenerator::check_flattened_array(LIRItem& array, CodeStub* slow_path) {
1652 LIR_Opr array_klass_reg = new_register(T_METADATA);
1653
1654 __ move(new LIR_Address(array.result(), oopDesc::klass_offset_in_bytes(), T_ADDRESS), array_klass_reg);
1655 LIR_Opr layout = new_register(T_INT);
1656 __ move(new LIR_Address(array_klass_reg, in_bytes(Klass::layout_helper_offset()), T_INT), layout);
1657 __ shift_right(layout, Klass::_lh_array_tag_shift, layout);
1658 __ cmp(lir_cond_equal, layout, LIR_OprFact::intConst(Klass::_lh_array_tag_vt_value));
1659 __ branch(lir_cond_equal, T_ILLEGAL, slow_path);
1660 }
1661
1662 void LIRGenerator::do_StoreIndexed(StoreIndexed* x) {
1663 assert(x->is_pinned(),"");
1664 bool is_loaded_flattened_array = x->array()->is_loaded_flattened_array();
1665 bool needs_range_check = x->compute_needs_range_check();
1666 bool use_length = x->length() != NULL;
1667 bool obj_store = x->elt_type() == T_ARRAY || x->elt_type() == T_OBJECT;
1668 bool needs_store_check = obj_store && !is_loaded_flattened_array &&
1669 (x->value()->as_Constant() == NULL ||
1670 !get_jobject_constant(x->value())->is_null_object() ||
1671 x->should_profile());
1672
1673 LIRItem array(x->array(), this);
1674 LIRItem index(x->index(), this);
1675 LIRItem value(x->value(), this);
1676 LIRItem length(this);
1677
1678 array.load_item();
1679 index.load_nonconstant();
1680
1681 if (use_length && needs_range_check) {
1682 length.set_instruction(x->length());
1683 length.load_item();
1684 }
1685
1686 if (needs_store_check || x->check_boolean()
1687 || is_loaded_flattened_array || x->array()->maybe_flattened_array()) {
1688 value.load_item();
1689 } else {
1690 value.load_for_store(x->elt_type());
1691 }
1692
1693 set_no_result(x);
1694
1695 // the CodeEmitInfo must be duplicated for each different
1696 // LIR-instruction because spilling can occur anywhere between two
1697 // instructions and so the debug information must be different
1698 CodeEmitInfo* range_check_info = state_for(x);
1699 CodeEmitInfo* null_check_info = NULL;
1700 if (x->needs_null_check()) {
1701 null_check_info = new CodeEmitInfo(range_check_info);
1702 }
1703
1704 if (GenerateRangeChecks && needs_range_check) {
1705 if (use_length) {
1706 __ cmp(lir_cond_belowEqual, length.result(), index.result());
1707 __ branch(lir_cond_belowEqual, T_INT, new RangeCheckStub(range_check_info, index.result(), array.result()));
1708 } else {
1709 array_range_check(array.result(), index.result(), null_check_info, range_check_info);
1710 // range_check also does the null check
1711 null_check_info = NULL;
1712 }
1713 }
1714
1715 if (GenerateArrayStoreCheck && needs_store_check) {
1716 CodeEmitInfo* store_check_info = new CodeEmitInfo(range_check_info);
1717 array_store_check(value.result(), array.result(), store_check_info, x->profiled_method(), x->profiled_bci());
1718 }
1719
1720 if (is_loaded_flattened_array) {
1721 index.load_item();
1722 access_flattened_array(false, array, index, value);
1723 } else {
1724 StoreFlattenedArrayStub* slow_path = NULL;
1725
1726 if (x->array()->maybe_flattened_array()) {
1727 // Check if we indeed have a flattened array
1728 index.load_item();
1729 slow_path = new StoreFlattenedArrayStub(array.result(), index.result(), value.result(), state_for(x));
1730 check_flattened_array(array, slow_path);
1731 }
1732
1733 DecoratorSet decorators = IN_HEAP | IS_ARRAY;
1734 if (x->check_boolean()) {
1735 decorators |= C1_MASK_BOOLEAN;
1736 }
1737
1738 access_store_at(decorators, x->elt_type(), array, index.result(), value.result(),
1739 NULL, null_check_info);
1740 if (slow_path != NULL) {
1741 __ branch_destination(slow_path->continuation());
1742 }
1743 }
1744 }
1745
1746 void LIRGenerator::access_load_at(DecoratorSet decorators, BasicType type,
1747 LIRItem& base, LIR_Opr offset, LIR_Opr result,
1748 CodeEmitInfo* patch_info, CodeEmitInfo* load_emit_info) {
1749 decorators |= ACCESS_READ;
1750 LIRAccess access(this, decorators, base, offset, type, patch_info, load_emit_info);
1751 if (access.is_raw()) {
1752 _barrier_set->BarrierSetC1::load_at(access, result);
1753 } else {
1754 _barrier_set->load_at(access, result);
1755 }
1756 }
1757
1758 void LIRGenerator::access_load(DecoratorSet decorators, BasicType type,
1759 LIR_Opr addr, LIR_Opr result) {
1760 decorators |= ACCESS_READ;
1761 LIRAccess access(this, decorators, LIR_OprFact::illegalOpr, LIR_OprFact::illegalOpr, type);
1762 access.set_resolved_addr(addr);
1763 if (access.is_raw()) {
2027 array_range_check(array.result(), index.result(), null_check_info, range_check_info);
2028 // The range check performs the null check, so clear it out for the load
2029 null_check_info = NULL;
2030 }
2031 }
2032
2033 if (x->array()->is_loaded_flattened_array()) {
2034 // Find the destination address (of the NewValueTypeInstance)
2035 LIR_Opr obj = x->vt()->operand();
2036 LIRItem obj_item(x->vt(), this);
2037
2038 access_flattened_array(true, array, index, obj_item);
2039 set_no_result(x);
2040 } else {
2041 LIR_Opr result = rlock_result(x, x->elt_type());
2042 LoadFlattenedArrayStub* slow_path = NULL;
2043
2044 if (x->array()->maybe_flattened_array()) {
2045 // Check if we indeed have a flattened array
2046 slow_path = new LoadFlattenedArrayStub(array.result(), index.result(), result, state_for(x));
2047 check_flattened_array(array, slow_path);
2048 }
2049
2050 DecoratorSet decorators = IN_HEAP | IS_ARRAY;
2051 access_load_at(decorators, x->elt_type(),
2052 array, index.result(), result,
2053 NULL, null_check_info);
2054
2055 if (slow_path != NULL) {
2056 __ branch_destination(slow_path->continuation());
2057 }
2058 }
2059 }
2060
2061
2062 void LIRGenerator::do_NullCheck(NullCheck* x) {
2063 if (x->can_trap()) {
2064 LIRItem value(x->obj(), this);
2065 value.load_item();
2066 CodeEmitInfo* info = state_for(x);
2067 __ null_check(value.result(), info);
|