< prev index next >

src/hotspot/share/opto/superword.cpp

Print this page



2725       if (t != NULL && t->is_con()) {
2726         juint shift = t->get_con();
2727         if (shift > mask) { // Unsigned cmp
2728           cnt = ConNode::make(TypeInt::make(shift & mask));
2729         }
2730       } else {
2731         if (t == NULL || t->_lo < 0 || t->_hi > (int)mask) {
2732           cnt = ConNode::make(TypeInt::make(mask));
2733           _igvn.register_new_node_with_optimizer(cnt);
2734           cnt = new AndINode(opd, cnt);
2735           _igvn.register_new_node_with_optimizer(cnt);
2736           _phase->set_ctrl(cnt, _phase->get_ctrl(opd));
2737         }
2738         assert(opd->bottom_type()->isa_int(), "int type only");
2739         if (!opd->bottom_type()->isa_int()) {
2740           NOT_PRODUCT(if(is_trace_loop_reverse() || TraceLoopOpts) {tty->print_cr("Should be int type only");})
2741           return NULL;
2742         }
2743       }
2744       // Move shift count into vector register.
2745       cnt = VectorNode::shift_count(p0, cnt, vlen, velt_basic_type(p0));
2746       _igvn.register_new_node_with_optimizer(cnt);
2747       _phase->set_ctrl(cnt, _phase->get_ctrl(opd));
2748       return cnt;
2749     }
2750     assert(!opd->is_StoreVector(), "such vector is not expected here");
2751     if (opd->is_StoreVector()) {
2752       NOT_PRODUCT(if(is_trace_loop_reverse() || TraceLoopOpts) {tty->print_cr("StoreVector is not expected here");})
2753       return NULL;
2754     }
2755     // Convert scalar input to vector with the same number of elements as
2756     // p0's vector. Use p0's type because size of operand's container in
2757     // vector should match p0's size regardless operand's size.
2758     const Type* p0_t = velt_type(p0);
2759     VectorNode* vn = VectorNode::scalar2vector(opd, vlen, p0_t);
2760 
2761     _igvn.register_new_node_with_optimizer(vn);
2762     _phase->set_ctrl(vn, _phase->get_ctrl(opd));
2763 #ifdef ASSERT
2764     if (TraceNewVectors) {
2765       tty->print("new Vector node: ");



2725       if (t != NULL && t->is_con()) {
2726         juint shift = t->get_con();
2727         if (shift > mask) { // Unsigned cmp
2728           cnt = ConNode::make(TypeInt::make(shift & mask));
2729         }
2730       } else {
2731         if (t == NULL || t->_lo < 0 || t->_hi > (int)mask) {
2732           cnt = ConNode::make(TypeInt::make(mask));
2733           _igvn.register_new_node_with_optimizer(cnt);
2734           cnt = new AndINode(opd, cnt);
2735           _igvn.register_new_node_with_optimizer(cnt);
2736           _phase->set_ctrl(cnt, _phase->get_ctrl(opd));
2737         }
2738         assert(opd->bottom_type()->isa_int(), "int type only");
2739         if (!opd->bottom_type()->isa_int()) {
2740           NOT_PRODUCT(if(is_trace_loop_reverse() || TraceLoopOpts) {tty->print_cr("Should be int type only");})
2741           return NULL;
2742         }
2743       }
2744       // Move shift count into vector register.
2745       cnt = VectorNode::shift_count(p0->Opcode(), cnt, vlen, velt_basic_type(p0));
2746       _igvn.register_new_node_with_optimizer(cnt);
2747       _phase->set_ctrl(cnt, _phase->get_ctrl(opd));
2748       return cnt;
2749     }
2750     assert(!opd->is_StoreVector(), "such vector is not expected here");
2751     if (opd->is_StoreVector()) {
2752       NOT_PRODUCT(if(is_trace_loop_reverse() || TraceLoopOpts) {tty->print_cr("StoreVector is not expected here");})
2753       return NULL;
2754     }
2755     // Convert scalar input to vector with the same number of elements as
2756     // p0's vector. Use p0's type because size of operand's container in
2757     // vector should match p0's size regardless operand's size.
2758     const Type* p0_t = velt_type(p0);
2759     VectorNode* vn = VectorNode::scalar2vector(opd, vlen, p0_t);
2760 
2761     _igvn.register_new_node_with_optimizer(vn);
2762     _phase->set_ctrl(vn, _phase->get_ctrl(opd));
2763 #ifdef ASSERT
2764     if (TraceNewVectors) {
2765       tty->print("new Vector node: ");


< prev index next >