< prev index next >

src/share/vm/c1/c1_LIRGenerator.cpp

Print this page




   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"

  26 #include "c1/c1_Compilation.hpp"
  27 #include "c1/c1_FrameMap.hpp"
  28 #include "c1/c1_Instruction.hpp"
  29 #include "c1/c1_LIRAssembler.hpp"
  30 #include "c1/c1_LIRGenerator.hpp"
  31 #include "c1/c1_ValueStack.hpp"
  32 #include "ci/ciArrayKlass.hpp"
  33 #include "ci/ciInstance.hpp"
  34 #include "ci/ciObjArray.hpp"
  35 #include "runtime/sharedRuntime.hpp"
  36 #include "runtime/stubRoutines.hpp"
  37 #include "utilities/bitMap.inline.hpp"
  38 #include "utilities/macros.hpp"
  39 #if INCLUDE_ALL_GCS
  40 #include "gc_implementation/g1/heapRegion.hpp"
  41 #endif // INCLUDE_ALL_GCS
  42 
  43 #ifdef ASSERT
  44 #define __ gen()->lir(__FILE__, __LINE__)->
  45 #else
  46 #define __ gen()->lir()->
  47 #endif
  48 
  49 // TODO: ARM - Use some recognizable constant which still fits architectural constraints
  50 #ifdef ARM
  51 #define PATCHED_ADDR  (204)
  52 #else
  53 #define PATCHED_ADDR  (max_jint)
  54 #endif
  55 
  56 void PhiResolverState::reset(int max_vregs) {
  57   // Initialize array sizes
  58   _virtual_operands.at_put_grow(max_vregs - 1, NULL, NULL);
  59   _virtual_operands.trunc_to(0);
  60   _other_operands.at_put_grow(max_vregs - 1, NULL, NULL);
  61   _other_operands.trunc_to(0);
  62   _vreg_table.at_put_grow(max_vregs - 1, NULL, NULL);
  63   _vreg_table.trunc_to(0);
  64 }
  65 
  66 
  67 
  68 //--------------------------------------------------------------
  69 // PhiResolver
  70 
  71 // Resolves cycles:
  72 //


1582 
1583 void LIRGenerator::CardTableModRef_post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val) {
1584 
1585   assert(sizeof(*((CardTableModRefBS*)_bs)->byte_map_base) == sizeof(jbyte), "adjust this code");
1586   LIR_Const* card_table_base = new LIR_Const(((CardTableModRefBS*)_bs)->byte_map_base);
1587   if (addr->is_address()) {
1588     LIR_Address* address = addr->as_address_ptr();
1589     // ptr cannot be an object because we use this barrier for array card marks
1590     // and addr can point in the middle of an array.
1591     LIR_Opr ptr = new_pointer_register();
1592     if (!address->index()->is_valid() && address->disp() == 0) {
1593       __ move(address->base(), ptr);
1594     } else {
1595       assert(address->disp() != max_jint, "lea doesn't support patched addresses!");
1596       __ leal(addr, ptr);
1597     }
1598     addr = ptr;
1599   }
1600   assert(addr->is_register(), "must be a register at this point");
1601 
1602 #ifdef ARM
1603   // TODO: ARM - move to platform-dependent code
1604   LIR_Opr tmp = FrameMap::R14_opr;
1605   if (VM_Version::supports_movw()) {
1606     __ move((LIR_Opr)card_table_base, tmp);
1607   } else {
1608     __ move(new LIR_Address(FrameMap::Rthread_opr, in_bytes(JavaThread::card_table_base_offset()), T_ADDRESS), tmp);
1609   }
1610 
1611   CardTableModRefBS* ct = (CardTableModRefBS*)_bs;
1612   LIR_Address *card_addr = new LIR_Address(tmp, addr, (LIR_Address::Scale) -CardTableModRefBS::card_shift, 0, T_BYTE);
1613   if(((int)ct->byte_map_base & 0xff) == 0) {
1614     __ move(tmp, card_addr);
1615   } else {
1616     LIR_Opr tmp_zero = new_register(T_INT);
1617     __ move(LIR_OprFact::intConst(0), tmp_zero);
1618     __ move(tmp_zero, card_addr);
1619   }
1620 #else // ARM
1621   LIR_Opr tmp = new_pointer_register();
1622   if (TwoOperandLIRForm) {
1623     __ move(addr, tmp);
1624     __ unsigned_shift_right(tmp, CardTableModRefBS::card_shift, tmp);
1625   } else {
1626     __ unsigned_shift_right(addr, CardTableModRefBS::card_shift, tmp);
1627   }
1628   if (can_inline_as_constant(card_table_base)) {
1629     __ move(LIR_OprFact::intConst(0),
1630               new LIR_Address(tmp, card_table_base->as_jint(), T_BYTE));
1631   } else {
1632     __ move(LIR_OprFact::intConst(0),
1633               new LIR_Address(tmp, load_constant(card_table_base),
1634                               T_BYTE));
1635   }
1636 #endif // ARM
1637 }
1638 
1639 
1640 //------------------------field access--------------------------------------
1641 
1642 // Comment copied form templateTable_i486.cpp
1643 // ----------------------------------------------------------------------------
1644 // Volatile variables demand their effects be made known to all CPU's in
1645 // order.  Store buffers on most chips allow reads & writes to reorder; the
1646 // JMM's ReadAfterWrite.java test fails in -Xint mode without some kind of
1647 // memory barrier (i.e., it's not sufficient that the interpreter does not
1648 // reorder volatile references, the hardware also must not reorder them).
1649 //
1650 // According to the new Java Memory Model (JMM):
1651 // (1) All volatiles are serialized wrt to each other.
1652 // ALSO reads & writes act as aquire & release, so:
1653 // (2) A read cannot let unrelated NON-volatile memory refs that happen after
1654 // the read float up to before the read.  It's OK for non-volatile memory refs
1655 // that happen before the volatile read to float down below it.
1656 // (3) Similar a volatile write cannot let unrelated NON-volatile memory refs


2104   // At this point base is a long non-constant
2105   // Index is a long register or a int constant.
2106   // We allow the constant to stay an int because that would allow us a more compact encoding by
2107   // embedding an immediate offset in the address expression. If we have a long constant, we have to
2108   // move it into a register first.
2109   assert(base_op->type() == T_LONG && !base_op->is_constant(), "base must be a long non-constant");
2110   assert(!x->has_index() || (index_op->type() == T_INT && index_op->is_constant()) ||
2111                             (index_op->type() == T_LONG && !index_op->is_constant()), "unexpected index type");
2112 #endif
2113 
2114   BasicType dst_type = x->basic_type();
2115 
2116   LIR_Address* addr;
2117   if (index_op->is_constant()) {
2118     assert(log2_scale == 0, "must not have a scale");
2119     assert(index_op->type() == T_INT, "only int constants supported");
2120     addr = new LIR_Address(base_op, index_op->as_jint(), dst_type);
2121   } else {
2122 #ifdef X86
2123     addr = new LIR_Address(base_op, index_op, LIR_Address::Scale(log2_scale), 0, dst_type);
2124 #elif defined(ARM)
2125     addr = generate_address(base_op, index_op, log2_scale, 0, dst_type);
2126 #else
2127     if (index_op->is_illegal() || log2_scale == 0) {
2128       addr = new LIR_Address(base_op, index_op, dst_type);
2129     } else {
2130       LIR_Opr tmp = new_pointer_register();
2131       __ shift_left(index_op, log2_scale, tmp);
2132       addr = new LIR_Address(base_op, tmp, dst_type);
2133     }
2134 #endif
2135   }
2136 
2137   if (x->may_be_unaligned() && (dst_type == T_LONG || dst_type == T_DOUBLE)) {
2138     __ unaligned_move(addr, reg);
2139   } else {
2140     if (dst_type == T_OBJECT && x->is_wide()) {
2141       __ move_wide(addr, reg);
2142     } else {
2143       __ move(addr, reg);
2144     }


2158   LIRItem value(x->value(), this);
2159   LIRItem idx(this);
2160 
2161   base.load_item();
2162   if (x->has_index()) {
2163     idx.set_instruction(x->index());
2164     idx.load_item();
2165   }
2166 
2167   if (type == T_BYTE || type == T_BOOLEAN) {
2168     value.load_byte_item();
2169   } else {
2170     value.load_item();
2171   }
2172 
2173   set_no_result(x);
2174 
2175   LIR_Opr base_op = base.result();
2176   LIR_Opr index_op = idx.result();
2177 



2178 #ifndef _LP64
2179   if (base_op->type() == T_LONG) {
2180     base_op = new_register(T_INT);
2181     __ convert(Bytecodes::_l2i, base.result(), base_op);
2182   }
2183   if (x->has_index()) {
2184     if (index_op->type() == T_LONG) {
2185       index_op = new_register(T_INT);
2186       __ convert(Bytecodes::_l2i, idx.result(), index_op);
2187     }
2188   }
2189   // At this point base and index should be all ints and not constants
2190   assert(base_op->type() == T_INT && !base_op->is_constant(), "base should be an non-constant int");
2191   assert(!x->has_index() || (index_op->type() == T_INT && !index_op->is_constant()), "index should be an non-constant int");
2192 #else
2193   if (x->has_index()) {
2194     if (index_op->type() == T_INT) {
2195       index_op = new_register(T_LONG);
2196       __ convert(Bytecodes::_i2l, idx.result(), index_op);
2197     }
2198   }
2199   // At this point base and index are long and non-constant
2200   assert(base_op->type() == T_LONG && !base_op->is_constant(), "base must be a non-constant long");
2201   assert(!x->has_index() || (index_op->type() == T_LONG && !index_op->is_constant()), "index must be a non-constant long");
2202 #endif
2203 
2204   if (log2_scale != 0) {
2205     // temporary fix (platform dependent code without shift on Intel would be better)
2206     // TODO: ARM also allows embedded shift in the address
2207     __ shift_left(index_op, log2_scale, index_op);
2208   }
2209 
2210   LIR_Address* addr = new LIR_Address(base_op, index_op, x->basic_type());

2211   __ move(value.result(), addr);
2212 }
2213 
2214 
2215 void LIRGenerator::do_UnsafeGetObject(UnsafeGetObject* x) {
2216   BasicType type = x->basic_type();
2217   LIRItem src(x->object(), this);
2218   LIRItem off(x->offset(), this);
2219 
2220   off.load_item();
2221   src.load_item();
2222 
2223   LIR_Opr value = rlock_result(x, x->basic_type());
2224 
2225   get_Object_unsafe(value, src.result(), off.result(), type, x->is_volatile());
2226 
2227 #if INCLUDE_ALL_GCS
2228   // We might be reading the value of the referent field of a
2229   // Reference object in order to attach it back to the live
2230   // object graph. If G1 is enabled then we need to record




   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "c1/c1_Defs.hpp"
  27 #include "c1/c1_Compilation.hpp"
  28 #include "c1/c1_FrameMap.hpp"
  29 #include "c1/c1_Instruction.hpp"
  30 #include "c1/c1_LIRAssembler.hpp"
  31 #include "c1/c1_LIRGenerator.hpp"
  32 #include "c1/c1_ValueStack.hpp"
  33 #include "ci/ciArrayKlass.hpp"
  34 #include "ci/ciInstance.hpp"
  35 #include "ci/ciObjArray.hpp"
  36 #include "runtime/sharedRuntime.hpp"
  37 #include "runtime/stubRoutines.hpp"
  38 #include "utilities/bitMap.inline.hpp"
  39 #include "utilities/macros.hpp"
  40 #if INCLUDE_ALL_GCS
  41 #include "gc_implementation/g1/heapRegion.hpp"
  42 #endif // INCLUDE_ALL_GCS
  43 
  44 #ifdef ASSERT
  45 #define __ gen()->lir(__FILE__, __LINE__)->
  46 #else
  47 #define __ gen()->lir()->
  48 #endif
  49 
  50 #ifndef PATCHED_ADDR



  51 #define PATCHED_ADDR  (max_jint)
  52 #endif
  53 
  54 void PhiResolverState::reset(int max_vregs) {
  55   // Initialize array sizes
  56   _virtual_operands.at_put_grow(max_vregs - 1, NULL, NULL);
  57   _virtual_operands.trunc_to(0);
  58   _other_operands.at_put_grow(max_vregs - 1, NULL, NULL);
  59   _other_operands.trunc_to(0);
  60   _vreg_table.at_put_grow(max_vregs - 1, NULL, NULL);
  61   _vreg_table.trunc_to(0);
  62 }
  63 
  64 
  65 
  66 //--------------------------------------------------------------
  67 // PhiResolver
  68 
  69 // Resolves cycles:
  70 //


1580 
1581 void LIRGenerator::CardTableModRef_post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val) {
1582 
1583   assert(sizeof(*((CardTableModRefBS*)_bs)->byte_map_base) == sizeof(jbyte), "adjust this code");
1584   LIR_Const* card_table_base = new LIR_Const(((CardTableModRefBS*)_bs)->byte_map_base);
1585   if (addr->is_address()) {
1586     LIR_Address* address = addr->as_address_ptr();
1587     // ptr cannot be an object because we use this barrier for array card marks
1588     // and addr can point in the middle of an array.
1589     LIR_Opr ptr = new_pointer_register();
1590     if (!address->index()->is_valid() && address->disp() == 0) {
1591       __ move(address->base(), ptr);
1592     } else {
1593       assert(address->disp() != max_jint, "lea doesn't support patched addresses!");
1594       __ leal(addr, ptr);
1595     }
1596     addr = ptr;
1597   }
1598   assert(addr->is_register(), "must be a register at this point");
1599 
1600 #ifdef CARDTABLEMODREF_POST_BARRIER_HELPER
1601   CardTableModRef_post_barrier_helper(addr, card_table_base);
1602 #else
















1603   LIR_Opr tmp = new_pointer_register();
1604   if (TwoOperandLIRForm) {
1605     __ move(addr, tmp);
1606     __ unsigned_shift_right(tmp, CardTableModRefBS::card_shift, tmp);
1607   } else {
1608     __ unsigned_shift_right(addr, CardTableModRefBS::card_shift, tmp);
1609   }
1610   if (can_inline_as_constant(card_table_base)) {
1611     __ move(LIR_OprFact::intConst(0),
1612               new LIR_Address(tmp, card_table_base->as_jint(), T_BYTE));
1613   } else {
1614     __ move(LIR_OprFact::intConst(0),
1615               new LIR_Address(tmp, load_constant(card_table_base),
1616                               T_BYTE));
1617   }
1618 #endif
1619 }
1620 
1621 
1622 //------------------------field access--------------------------------------
1623 
1624 // Comment copied form templateTable_i486.cpp
1625 // ----------------------------------------------------------------------------
1626 // Volatile variables demand their effects be made known to all CPU's in
1627 // order.  Store buffers on most chips allow reads & writes to reorder; the
1628 // JMM's ReadAfterWrite.java test fails in -Xint mode without some kind of
1629 // memory barrier (i.e., it's not sufficient that the interpreter does not
1630 // reorder volatile references, the hardware also must not reorder them).
1631 //
1632 // According to the new Java Memory Model (JMM):
1633 // (1) All volatiles are serialized wrt to each other.
1634 // ALSO reads & writes act as aquire & release, so:
1635 // (2) A read cannot let unrelated NON-volatile memory refs that happen after
1636 // the read float up to before the read.  It's OK for non-volatile memory refs
1637 // that happen before the volatile read to float down below it.
1638 // (3) Similar a volatile write cannot let unrelated NON-volatile memory refs


2086   // At this point base is a long non-constant
2087   // Index is a long register or a int constant.
2088   // We allow the constant to stay an int because that would allow us a more compact encoding by
2089   // embedding an immediate offset in the address expression. If we have a long constant, we have to
2090   // move it into a register first.
2091   assert(base_op->type() == T_LONG && !base_op->is_constant(), "base must be a long non-constant");
2092   assert(!x->has_index() || (index_op->type() == T_INT && index_op->is_constant()) ||
2093                             (index_op->type() == T_LONG && !index_op->is_constant()), "unexpected index type");
2094 #endif
2095 
2096   BasicType dst_type = x->basic_type();
2097 
2098   LIR_Address* addr;
2099   if (index_op->is_constant()) {
2100     assert(log2_scale == 0, "must not have a scale");
2101     assert(index_op->type() == T_INT, "only int constants supported");
2102     addr = new LIR_Address(base_op, index_op->as_jint(), dst_type);
2103   } else {
2104 #ifdef X86
2105     addr = new LIR_Address(base_op, index_op, LIR_Address::Scale(log2_scale), 0, dst_type);
2106 #elif defined(GENERATE_ADDRESS_IS_PREFERRED)
2107     addr = generate_address(base_op, index_op, log2_scale, 0, dst_type);
2108 #else
2109     if (index_op->is_illegal() || log2_scale == 0) {
2110       addr = new LIR_Address(base_op, index_op, dst_type);
2111     } else {
2112       LIR_Opr tmp = new_pointer_register();
2113       __ shift_left(index_op, log2_scale, tmp);
2114       addr = new LIR_Address(base_op, tmp, dst_type);
2115     }
2116 #endif
2117   }
2118 
2119   if (x->may_be_unaligned() && (dst_type == T_LONG || dst_type == T_DOUBLE)) {
2120     __ unaligned_move(addr, reg);
2121   } else {
2122     if (dst_type == T_OBJECT && x->is_wide()) {
2123       __ move_wide(addr, reg);
2124     } else {
2125       __ move(addr, reg);
2126     }


2140   LIRItem value(x->value(), this);
2141   LIRItem idx(this);
2142 
2143   base.load_item();
2144   if (x->has_index()) {
2145     idx.set_instruction(x->index());
2146     idx.load_item();
2147   }
2148 
2149   if (type == T_BYTE || type == T_BOOLEAN) {
2150     value.load_byte_item();
2151   } else {
2152     value.load_item();
2153   }
2154 
2155   set_no_result(x);
2156 
2157   LIR_Opr base_op = base.result();
2158   LIR_Opr index_op = idx.result();
2159 
2160 #ifdef GENERATE_ADDRESS_IS_PREFERRED
2161   LIR_Address* addr = generate_address(base_op, index_op, log2_scale, 0, x->basic_type());
2162 #else
2163 #ifndef _LP64
2164   if (base_op->type() == T_LONG) {
2165     base_op = new_register(T_INT);
2166     __ convert(Bytecodes::_l2i, base.result(), base_op);
2167   }
2168   if (x->has_index()) {
2169     if (index_op->type() == T_LONG) {
2170       index_op = new_register(T_INT);
2171       __ convert(Bytecodes::_l2i, idx.result(), index_op);
2172     }
2173   }
2174   // At this point base and index should be all ints and not constants
2175   assert(base_op->type() == T_INT && !base_op->is_constant(), "base should be an non-constant int");
2176   assert(!x->has_index() || (index_op->type() == T_INT && !index_op->is_constant()), "index should be an non-constant int");
2177 #else
2178   if (x->has_index()) {
2179     if (index_op->type() == T_INT) {
2180       index_op = new_register(T_LONG);
2181       __ convert(Bytecodes::_i2l, idx.result(), index_op);
2182     }
2183   }
2184   // At this point base and index are long and non-constant
2185   assert(base_op->type() == T_LONG && !base_op->is_constant(), "base must be a non-constant long");
2186   assert(!x->has_index() || (index_op->type() == T_LONG && !index_op->is_constant()), "index must be a non-constant long");
2187 #endif
2188 
2189   if (log2_scale != 0) {
2190     // temporary fix (platform dependent code without shift on Intel would be better)
2191     // TODO: ARM also allows embedded shift in the address
2192     __ shift_left(index_op, log2_scale, index_op);
2193   }
2194 
2195   LIR_Address* addr = new LIR_Address(base_op, index_op, x->basic_type());
2196 #endif // !GENERATE_ADDRESS_IS_PREFERRED
2197   __ move(value.result(), addr);
2198 }
2199 
2200 
2201 void LIRGenerator::do_UnsafeGetObject(UnsafeGetObject* x) {
2202   BasicType type = x->basic_type();
2203   LIRItem src(x->object(), this);
2204   LIRItem off(x->offset(), this);
2205 
2206   off.load_item();
2207   src.load_item();
2208 
2209   LIR_Opr value = rlock_result(x, x->basic_type());
2210 
2211   get_Object_unsafe(value, src.result(), off.result(), type, x->is_volatile());
2212 
2213 #if INCLUDE_ALL_GCS
2214   // We might be reading the value of the referent field of a
2215   // Reference object in order to attach it back to the live
2216   // object graph. If G1 is enabled then we need to record


< prev index next >