< prev index next >

src/hotspot/share/c1/c1_LIRGenerator.cpp

Print this page


  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "c1/c1_Compilation.hpp"
  27 #include "c1/c1_Defs.hpp"
  28 #include "c1/c1_FrameMap.hpp"
  29 #include "c1/c1_Instruction.hpp"
  30 #include "c1/c1_LIRAssembler.hpp"
  31 #include "c1/c1_LIRGenerator.hpp"
  32 #include "c1/c1_ValueStack.hpp"
  33 #include "ci/ciArrayKlass.hpp"
  34 #include "ci/ciInstance.hpp"
  35 #include "ci/ciObjArray.hpp"
  36 #include "ci/ciUtilities.hpp"


  37 #include "gc/shared/barrierSet.hpp"
  38 #include "gc/shared/c1/barrierSetC1.hpp"
  39 #include "runtime/arguments.hpp"
  40 #include "runtime/sharedRuntime.hpp"
  41 #include "runtime/stubRoutines.hpp"
  42 #include "runtime/vm_version.hpp"
  43 #include "utilities/bitMap.inline.hpp"
  44 #include "utilities/macros.hpp"
  45 
  46 #ifdef ASSERT
  47 #define __ gen()->lir(__FILE__, __LINE__)->
  48 #else
  49 #define __ gen()->lir()->
  50 #endif
  51 
  52 #ifndef PATCHED_ADDR
  53 #define PATCHED_ADDR  (max_jint)
  54 #endif
  55 
  56 void PhiResolverState::reset(int max_vregs) {


1531       (needs_patching ||
1532        MacroAssembler::needs_explicit_null_check(x->offset()))) {
1533     // Emit an explicit null check because the offset is too large.
1534     // If the class is not loaded and the object is NULL, we need to deoptimize to throw a
1535     // NoClassDefFoundError in the interpreter instead of an implicit NPE from compiled code.
1536     __ null_check(object.result(), new CodeEmitInfo(info), /* deoptimize */ needs_patching);
1537   }
1538 
1539   DecoratorSet decorators = IN_HEAP;
1540   if (is_volatile) {
1541     decorators |= MO_SEQ_CST;
1542   }
1543   if (needs_patching) {
1544     decorators |= C1_NEEDS_PATCHING;
1545   }
1546 
1547   access_store_at(decorators, field_type, object, LIR_OprFact::intConst(x->offset()),
1548                   value.result(), info != NULL ? new CodeEmitInfo(info) : NULL, info);
1549 }
1550 







































































1551 void LIRGenerator::do_StoreIndexed(StoreIndexed* x) {
1552   assert(x->is_pinned(),"");

1553   bool needs_range_check = x->compute_needs_range_check();
1554   bool use_length = x->length() != NULL;
1555   bool obj_store = x->elt_type() == T_ARRAY || x->elt_type() == T_OBJECT;
1556   bool needs_store_check = obj_store && (x->value()->as_Constant() == NULL ||

1557                                          !get_jobject_constant(x->value())->is_null_object() ||
1558                                          x->should_profile());
1559 
1560   LIRItem array(x->array(), this);
1561   LIRItem index(x->index(), this);
1562   LIRItem value(x->value(), this);
1563   LIRItem length(this);
1564 
1565   array.load_item();
1566   index.load_nonconstant();
1567 
1568   if (use_length && needs_range_check) {
1569     length.set_instruction(x->length());
1570     length.load_item();
1571 
1572   }
1573   if (needs_store_check || x->check_boolean()) {

1574     value.load_item();
1575   } else {
1576     value.load_for_store(x->elt_type());
1577   }
1578 
1579   set_no_result(x);
1580 
1581   // the CodeEmitInfo must be duplicated for each different
1582   // LIR-instruction because spilling can occur anywhere between two
1583   // instructions and so the debug information must be different
1584   CodeEmitInfo* range_check_info = state_for(x);
1585   CodeEmitInfo* null_check_info = NULL;
1586   if (x->needs_null_check()) {
1587     null_check_info = new CodeEmitInfo(range_check_info);
1588   }
1589 
1590   if (GenerateRangeChecks && needs_range_check) {
1591     if (use_length) {
1592       __ cmp(lir_cond_belowEqual, length.result(), index.result());
1593       __ branch(lir_cond_belowEqual, T_INT, new RangeCheckStub(range_check_info, index.result(), array.result()));
1594     } else {
1595       array_range_check(array.result(), index.result(), null_check_info, range_check_info);
1596       // range_check also does the null check
1597       null_check_info = NULL;
1598     }
1599   }
1600 
1601   if (GenerateArrayStoreCheck && needs_store_check) {
1602     CodeEmitInfo* store_check_info = new CodeEmitInfo(range_check_info);
1603     array_store_check(value.result(), array.result(), store_check_info, x->profiled_method(), x->profiled_bci());
1604   }
1605 




1606   DecoratorSet decorators = IN_HEAP | IS_ARRAY;
1607   if (x->check_boolean()) {
1608     decorators |= C1_MASK_BOOLEAN;
1609   }
1610 
1611   access_store_at(decorators, x->elt_type(), array, index.result(), value.result(),
1612                   NULL, null_check_info);

1613 }
1614 
1615 void LIRGenerator::access_load_at(DecoratorSet decorators, BasicType type,
1616                                   LIRItem& base, LIR_Opr offset, LIR_Opr result,
1617                                   CodeEmitInfo* patch_info, CodeEmitInfo* load_emit_info) {
1618   decorators |= ACCESS_READ;
1619   LIRAccess access(this, decorators, base, offset, type, patch_info, load_emit_info);
1620   if (access.is_raw()) {
1621     _barrier_set->BarrierSetC1::load_at(access, result);
1622   } else {
1623     _barrier_set->load_at(access, result);
1624   }
1625 }
1626 
1627 void LIRGenerator::access_load(DecoratorSet decorators, BasicType type,
1628                                LIR_Opr addr, LIR_Opr result) {
1629   decorators |= ACCESS_READ;
1630   LIRAccess access(this, decorators, LIR_OprFact::illegalOpr, LIR_OprFact::illegalOpr, type);
1631   access.set_resolved_addr(addr);
1632   if (access.is_raw()) {


1853       __ move(LIR_OprFact::oopConst(NULL), obj);
1854       __ null_check(obj, new CodeEmitInfo(null_check_info));
1855     }
1856   }
1857 
1858   if (GenerateRangeChecks && needs_range_check) {
1859     if (StressLoopInvariantCodeMotion && range_check_info->deoptimize_on_exception()) {
1860       __ branch(lir_cond_always, T_ILLEGAL, new RangeCheckStub(range_check_info, index.result(), array.result()));
1861     } else if (use_length) {
1862       // TODO: use a (modified) version of array_range_check that does not require a
1863       //       constant length to be loaded to a register
1864       __ cmp(lir_cond_belowEqual, length.result(), index.result());
1865       __ branch(lir_cond_belowEqual, T_INT, new RangeCheckStub(range_check_info, index.result(), array.result()));
1866     } else {
1867       array_range_check(array.result(), index.result(), null_check_info, range_check_info);
1868       // The range check performs the null check, so clear it out for the load
1869       null_check_info = NULL;
1870     }
1871   }
1872 
1873   DecoratorSet decorators = IN_HEAP | IS_ARRAY;



1874 




1875   LIR_Opr result = rlock_result(x, x->elt_type());
1876   access_load_at(decorators, x->elt_type(),
1877                  array, index.result(), result,
1878                  NULL, null_check_info);

1879 }
1880 
1881 
1882 void LIRGenerator::do_NullCheck(NullCheck* x) {
1883   if (x->can_trap()) {
1884     LIRItem value(x->obj(), this);
1885     value.load_item();
1886     CodeEmitInfo* info = state_for(x);
1887     __ null_check(value.result(), info);
1888   }
1889 }
1890 
1891 
1892 void LIRGenerator::do_TypeCast(TypeCast* x) {
1893   LIRItem value(x->obj(), this);
1894   value.load_item();
1895   // the result is the same as from the node we are casting
1896   set_result(x, value.result());
1897 }
1898 


2718 void LIRGenerator::do_OsrEntry(OsrEntry* x) {
2719   // construct our frame and model the production of incoming pointer
2720   // to the OSR buffer.
2721   __ osr_entry(LIR_Assembler::osrBufferPointer());
2722   LIR_Opr result = rlock_result(x);
2723   __ move(LIR_Assembler::osrBufferPointer(), result);
2724 }
2725 
2726 
2727 void LIRGenerator::invoke_load_arguments(Invoke* x, LIRItemList* args, const LIR_OprList* arg_list) {
2728   assert(args->length() == arg_list->length(),
2729          "args=%d, arg_list=%d", args->length(), arg_list->length());
2730   for (int i = x->has_receiver() ? 1 : 0; i < args->length(); i++) {
2731     LIRItem* param = args->at(i);
2732     LIR_Opr loc = arg_list->at(i);
2733     if (loc->is_register()) {
2734       param->load_item_force(loc);
2735     } else {
2736       LIR_Address* addr = loc->as_address_ptr();
2737       param->load_for_store(addr->type());

2738       if (addr->type() == T_OBJECT) {
2739         __ move_wide(param->result(), addr);
2740       } else
2741         if (addr->type() == T_LONG || addr->type() == T_DOUBLE) {
2742           __ unaligned_move(param->result(), addr);
2743         } else {
2744           __ move(param->result(), addr);
2745         }
2746     }
2747   }
2748 
2749   if (x->has_receiver()) {
2750     LIRItem* receiver = args->at(0);
2751     LIR_Opr loc = arg_list->at(0);
2752     if (loc->is_register()) {
2753       receiver->load_item_force(loc);
2754     } else {
2755       assert(loc->is_address(), "just checking");
2756       receiver->load_for_store(T_OBJECT);
2757       __ move_wide(receiver->result(), loc->as_address_ptr());




  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "c1/c1_Compilation.hpp"
  27 #include "c1/c1_Defs.hpp"
  28 #include "c1/c1_FrameMap.hpp"
  29 #include "c1/c1_Instruction.hpp"
  30 #include "c1/c1_LIRAssembler.hpp"
  31 #include "c1/c1_LIRGenerator.hpp"
  32 #include "c1/c1_ValueStack.hpp"
  33 #include "ci/ciArrayKlass.hpp"
  34 #include "ci/ciInstance.hpp"
  35 #include "ci/ciObjArray.hpp"
  36 #include "ci/ciUtilities.hpp"
  37 #include "ci/ciValueArrayKlass.hpp"
  38 #include "ci/ciValueKlass.hpp"
  39 #include "gc/shared/barrierSet.hpp"
  40 #include "gc/shared/c1/barrierSetC1.hpp"
  41 #include "runtime/arguments.hpp"
  42 #include "runtime/sharedRuntime.hpp"
  43 #include "runtime/stubRoutines.hpp"
  44 #include "runtime/vm_version.hpp"
  45 #include "utilities/bitMap.inline.hpp"
  46 #include "utilities/macros.hpp"
  47 
  48 #ifdef ASSERT
  49 #define __ gen()->lir(__FILE__, __LINE__)->
  50 #else
  51 #define __ gen()->lir()->
  52 #endif
  53 
  54 #ifndef PATCHED_ADDR
  55 #define PATCHED_ADDR  (max_jint)
  56 #endif
  57 
  58 void PhiResolverState::reset(int max_vregs) {


1533       (needs_patching ||
1534        MacroAssembler::needs_explicit_null_check(x->offset()))) {
1535     // Emit an explicit null check because the offset is too large.
1536     // If the class is not loaded and the object is NULL, we need to deoptimize to throw a
1537     // NoClassDefFoundError in the interpreter instead of an implicit NPE from compiled code.
1538     __ null_check(object.result(), new CodeEmitInfo(info), /* deoptimize */ needs_patching);
1539   }
1540 
1541   DecoratorSet decorators = IN_HEAP;
1542   if (is_volatile) {
1543     decorators |= MO_SEQ_CST;
1544   }
1545   if (needs_patching) {
1546     decorators |= C1_NEEDS_PATCHING;
1547   }
1548 
1549   access_store_at(decorators, field_type, object, LIR_OprFact::intConst(x->offset()),
1550                   value.result(), info != NULL ? new CodeEmitInfo(info) : NULL, info);
1551 }
1552 
1553 // FIXME -- I can't find any other way to pass an address to access_load_at().
1554 class TempResolvedAddress: public Instruction {
1555  public:
1556   TempResolvedAddress(ValueType* type, LIR_Opr addr) : Instruction(type) {
1557     set_operand(addr);
1558   }
1559   virtual void input_values_do(ValueVisitor*) {}
1560   virtual void visit(InstructionVisitor* v)   {}
1561   virtual const char* name() const  { return "TempResolvedAddress"; }
1562 };
1563 
1564 void LIRGenerator::access_flattened_array(bool is_load, LIRItem& array, LIRItem& index, LIRItem& obj_item) {
1565   // Find the starting address of the source (inside the array)
1566   ciType* array_type = array.value()->declared_type();
1567   ciValueArrayKlass* value_array_klass = array_type->as_value_array_klass();
1568   ciValueKlass* elem_klass = value_array_klass->element_klass()->as_value_klass();
1569   int array_header_size = value_array_klass->array_header_in_bytes();
1570 
1571 #ifndef _LP64
1572   LIR_Opr index_op = index.result();
1573 #else
1574   LIR_Opr index_op = new_register(T_LONG);
1575   __ convert(Bytecodes::_i2l, index.result(), index_op);
1576 #endif
1577   // Need to shift manually, as LIR_Address can scale only up to 3.
1578   __ shift_left(index_op, value_array_klass->log2_element_size(), index_op);
1579 
1580   LIR_Opr elm_op = new_pointer_register();
1581   LIR_Address* elm_address = new LIR_Address(array.result(), index_op, array_header_size, T_ADDRESS);
1582   __ leal(LIR_OprFact::address(elm_address), elm_op);
1583 
1584   for (int i = 0; i < elem_klass->nof_nonstatic_fields(); i++) {
1585     ciField* inner_field = elem_klass->nonstatic_field_at(i);
1586     int obj_offset = inner_field->offset();
1587     int elm_offset = obj_offset - elem_klass->first_field_offset(); // object header is not stored in array.
1588 
1589     BasicType field_type = inner_field->type()->basic_type();
1590     switch (field_type) {
1591     case T_BYTE:
1592     case T_BOOLEAN:
1593     case T_SHORT:
1594     case T_CHAR:
1595      field_type = T_INT;
1596       break;
1597     default:
1598       break;
1599     }
1600 
1601     LIR_Opr temp = new_register(field_type);
1602     TempResolvedAddress* elm_resolved_addr = new TempResolvedAddress(as_ValueType(field_type), elm_op);
1603     LIRItem elm_item(elm_resolved_addr, this);
1604 
1605     DecoratorSet decorators = IN_HEAP;
1606     if (is_load) {
1607       access_load_at(decorators, field_type,
1608                      elm_item, LIR_OprFact::intConst(elm_offset), temp,
1609                      NULL, NULL);
1610       access_store_at(decorators, field_type,
1611                       obj_item, LIR_OprFact::intConst(obj_offset), temp,
1612                       NULL, NULL);
1613     } else {
1614     access_load_at(decorators, field_type,
1615                    obj_item, LIR_OprFact::intConst(obj_offset), temp,
1616                    NULL, NULL);
1617     access_store_at(decorators, field_type,
1618                     elm_item, LIR_OprFact::intConst(elm_offset), temp,
1619                     NULL, NULL);
1620     }
1621   }
1622 }
1623 
1624 void LIRGenerator::do_StoreIndexed(StoreIndexed* x) {
1625   assert(x->is_pinned(),"");
1626   bool is_flattened = x->array()->is_flattened_array();
1627   bool needs_range_check = x->compute_needs_range_check();
1628   bool use_length = x->length() != NULL;
1629   bool obj_store = x->elt_type() == T_ARRAY || x->elt_type() == T_OBJECT;
1630   bool needs_store_check = obj_store && !is_flattened &&
1631                                         (x->value()->as_Constant() == NULL ||
1632                                          !get_jobject_constant(x->value())->is_null_object() ||
1633                                          x->should_profile());
1634 
1635   LIRItem array(x->array(), this);
1636   LIRItem index(x->index(), this);
1637   LIRItem value(x->value(), this);
1638   LIRItem length(this);
1639 
1640   array.load_item();
1641   index.load_nonconstant();
1642 
1643   if (use_length && needs_range_check) {
1644     length.set_instruction(x->length());
1645     length.load_item();
1646 
1647   }
1648   
1649   if (needs_store_check || x->check_boolean() || is_flattened) {
1650     value.load_item();
1651   } else {
1652     value.load_for_store(x->elt_type());
1653   }
1654 
1655   set_no_result(x);
1656 
1657   // the CodeEmitInfo must be duplicated for each different
1658   // LIR-instruction because spilling can occur anywhere between two
1659   // instructions and so the debug information must be different
1660   CodeEmitInfo* range_check_info = state_for(x);
1661   CodeEmitInfo* null_check_info = NULL;
1662   if (x->needs_null_check()) {
1663     null_check_info = new CodeEmitInfo(range_check_info);
1664   }
1665 
1666   if (GenerateRangeChecks && needs_range_check) {
1667     if (use_length) {
1668       __ cmp(lir_cond_belowEqual, length.result(), index.result());
1669       __ branch(lir_cond_belowEqual, T_INT, new RangeCheckStub(range_check_info, index.result(), array.result()));
1670     } else {
1671       array_range_check(array.result(), index.result(), null_check_info, range_check_info);
1672       // range_check also does the null check
1673       null_check_info = NULL;
1674     }
1675   }
1676 
1677   if (GenerateArrayStoreCheck && needs_store_check) {
1678     CodeEmitInfo* store_check_info = new CodeEmitInfo(range_check_info);
1679     array_store_check(value.result(), array.result(), store_check_info, x->profiled_method(), x->profiled_bci());
1680   }
1681 
1682   if (is_flattened) {
1683     index.load_item();
1684     access_flattened_array(false, array, index, value); 
1685   } else {
1686     DecoratorSet decorators = IN_HEAP | IS_ARRAY;
1687     if (x->check_boolean()) {
1688       decorators |= C1_MASK_BOOLEAN;
1689     }
1690 
1691     access_store_at(decorators, x->elt_type(), array, index.result(), value.result(),
1692                     NULL, null_check_info);
1693   }
1694 }
1695 
1696 void LIRGenerator::access_load_at(DecoratorSet decorators, BasicType type,
1697                                   LIRItem& base, LIR_Opr offset, LIR_Opr result,
1698                                   CodeEmitInfo* patch_info, CodeEmitInfo* load_emit_info) {
1699   decorators |= ACCESS_READ;
1700   LIRAccess access(this, decorators, base, offset, type, patch_info, load_emit_info);
1701   if (access.is_raw()) {
1702     _barrier_set->BarrierSetC1::load_at(access, result);
1703   } else {
1704     _barrier_set->load_at(access, result);
1705   }
1706 }
1707 
1708 void LIRGenerator::access_load(DecoratorSet decorators, BasicType type,
1709                                LIR_Opr addr, LIR_Opr result) {
1710   decorators |= ACCESS_READ;
1711   LIRAccess access(this, decorators, LIR_OprFact::illegalOpr, LIR_OprFact::illegalOpr, type);
1712   access.set_resolved_addr(addr);
1713   if (access.is_raw()) {


1934       __ move(LIR_OprFact::oopConst(NULL), obj);
1935       __ null_check(obj, new CodeEmitInfo(null_check_info));
1936     }
1937   }
1938 
1939   if (GenerateRangeChecks && needs_range_check) {
1940     if (StressLoopInvariantCodeMotion && range_check_info->deoptimize_on_exception()) {
1941       __ branch(lir_cond_always, T_ILLEGAL, new RangeCheckStub(range_check_info, index.result(), array.result()));
1942     } else if (use_length) {
1943       // TODO: use a (modified) version of array_range_check that does not require a
1944       //       constant length to be loaded to a register
1945       __ cmp(lir_cond_belowEqual, length.result(), index.result());
1946       __ branch(lir_cond_belowEqual, T_INT, new RangeCheckStub(range_check_info, index.result(), array.result()));
1947     } else {
1948       array_range_check(array.result(), index.result(), null_check_info, range_check_info);
1949       // The range check performs the null check, so clear it out for the load
1950       null_check_info = NULL;
1951     }
1952   }
1953 
1954   if (x->array()->is_flattened_array()) {
1955     // Find the destination address (of the NewValueTypeInstance)
1956     LIR_Opr obj = x->vt()->operand();
1957     LIRItem obj_item(x->vt(), this);
1958 
1959     access_flattened_array(true, array, index, obj_item);
1960     set_no_result(x);
1961   } else {
1962     DecoratorSet decorators = IN_HEAP | IS_ARRAY;
1963     LIR_Opr result = rlock_result(x, x->elt_type());
1964     access_load_at(decorators, x->elt_type(),
1965                    array, index.result(), result,
1966                    NULL, null_check_info);
1967   }
1968 }
1969 
1970 
1971 void LIRGenerator::do_NullCheck(NullCheck* x) {
1972   if (x->can_trap()) {
1973     LIRItem value(x->obj(), this);
1974     value.load_item();
1975     CodeEmitInfo* info = state_for(x);
1976     __ null_check(value.result(), info);
1977   }
1978 }
1979 
1980 
1981 void LIRGenerator::do_TypeCast(TypeCast* x) {
1982   LIRItem value(x->obj(), this);
1983   value.load_item();
1984   // the result is the same as from the node we are casting
1985   set_result(x, value.result());
1986 }
1987 


2807 void LIRGenerator::do_OsrEntry(OsrEntry* x) {
2808   // construct our frame and model the production of incoming pointer
2809   // to the OSR buffer.
2810   __ osr_entry(LIR_Assembler::osrBufferPointer());
2811   LIR_Opr result = rlock_result(x);
2812   __ move(LIR_Assembler::osrBufferPointer(), result);
2813 }
2814 
2815 
2816 void LIRGenerator::invoke_load_arguments(Invoke* x, LIRItemList* args, const LIR_OprList* arg_list) {
2817   assert(args->length() == arg_list->length(),
2818          "args=%d, arg_list=%d", args->length(), arg_list->length());
2819   for (int i = x->has_receiver() ? 1 : 0; i < args->length(); i++) {
2820     LIRItem* param = args->at(i);
2821     LIR_Opr loc = arg_list->at(i);
2822     if (loc->is_register()) {
2823       param->load_item_force(loc);
2824     } else {
2825       LIR_Address* addr = loc->as_address_ptr();
2826       param->load_for_store(addr->type());
2827       assert(addr->type() != T_VALUETYPE, "not supported yet");
2828       if (addr->type() == T_OBJECT) {
2829         __ move_wide(param->result(), addr);
2830       } else
2831         if (addr->type() == T_LONG || addr->type() == T_DOUBLE) {
2832           __ unaligned_move(param->result(), addr);
2833         } else {
2834           __ move(param->result(), addr);
2835         }
2836     }
2837   }
2838 
2839   if (x->has_receiver()) {
2840     LIRItem* receiver = args->at(0);
2841     LIR_Opr loc = arg_list->at(0);
2842     if (loc->is_register()) {
2843       receiver->load_item_force(loc);
2844     } else {
2845       assert(loc->is_address(), "just checking");
2846       receiver->load_for_store(T_OBJECT);
2847       __ move_wide(receiver->result(), loc->as_address_ptr());


< prev index next >