rev 61244 : Refactor vector operations in aarch64 backend
Summary: move all vector operations which are not in jdk master to
aarch64_neon_ad.m4 and place generated instructions to the end of aarch64.ad.
This change is to minimize conflict when merging Vector API to jdk
master. In reduction operations, identify scalar/vector input as isrc/
vsrc to make code clear. Jdk master also uses such naming style.

   1 dnl Copyright (c) 2014, Red Hat Inc. All rights reserved.
   2 dnl DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   3 dnl
   4 dnl This code is free software; you can redistribute it and/or modify it
   5 dnl under the terms of the GNU General Public License version 2 only, as
   6 dnl published by the Free Software Foundation.
   7 dnl
   8 dnl This code is distributed in the hope that it will be useful, but WITHOUT
   9 dnl ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10 dnl FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11 dnl version 2 for more details (a copy is included in the LICENSE file that
  12 dnl accompanied this code).
  13 dnl
  14 dnl You should have received a copy of the GNU General Public License version
  15 dnl 2 along with this work; if not, write to the Free Software Foundation,
  16 dnl Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17 dnl
  18 dnl Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19 dnl or visit www.oracle.com if you need additional information or have any
  20 dnl questions.
  21 dnl
  22 dnl 
  23 dnl Process this file with m4 aarch64_ad.m4 to generate the arithmetic
  24 dnl and shift patterns patterns used in aarch64.ad.


  25 dnl
  26 // BEGIN This section of the file is automatically generated. Do not edit --------------

  27 dnl
  28 define(`ORL2I', `ifelse($1,I,orL2I)')
  29 dnl
  30 define(`BASE_SHIFT_INSN',
  31 `
  32 instruct $2$1_reg_$4_reg(iReg$1NoSp dst,
  33                          iReg$1`'ORL2I($1) src1, iReg$1`'ORL2I($1) src2,
  34                          immI src3, rFlagsReg cr) %{
  35   match(Set dst ($2$1 src1 ($4$1 src2 src3)));
  36 
  37   ins_cost(1.9 * INSN_COST);
  38   format %{ "$3  $dst, $src1, $src2, $5 $src3" %}
  39 
  40   ins_encode %{
  41     __ $3(as_Register($dst$$reg),
  42               as_Register($src1$$reg),
  43               as_Register($src2$$reg),
  44               Assembler::$5,
  45               $src3$$constant & ifelse($1,I,0x1f,0x3f));
  46   %}
  47 
  48   ins_pipe(ialu_reg_reg_shift);
  49 %}')dnl
  50 define(`BASE_INVERTED_INSN',
  51 `
  52 instruct $2$1_reg_not_reg(iReg$1NoSp dst,
  53                          iReg$1`'ORL2I($1) src1, iReg$1`'ORL2I($1) src2, imm$1_M1 m1,
  54                          rFlagsReg cr) %{
  55 dnl This ifelse is because hotspot reassociates (xor (xor ..)..)
  56 dnl into this canonical form.
  57   ifelse($2,Xor,
  58     match(Set dst (Xor$1 m1 (Xor$1 src2 src1)));,
  59     match(Set dst ($2$1 src1 (Xor$1 src2 m1)));)
  60   ins_cost(INSN_COST);
  61   format %{ "$3  $dst, $src1, $src2" %}
  62 
  63   ins_encode %{
  64     __ $3(as_Register($dst$$reg),
  65               as_Register($src1$$reg),
  66               as_Register($src2$$reg),
  67               Assembler::LSL, 0);
  68   %}
  69 
  70   ins_pipe(ialu_reg_reg);
  71 %}')dnl
  72 define(`INVERTED_SHIFT_INSN',
  73 `
  74 instruct $2$1_reg_$4_not_reg(iReg$1NoSp dst,
  75                          iReg$1`'ORL2I($1) src1, iReg$1`'ORL2I($1) src2,
  76                          immI src3, imm$1_M1 src4, rFlagsReg cr) %{
  77 dnl This ifelse is because hotspot reassociates (xor (xor ..)..)
  78 dnl into this canonical form.
  79   ifelse($2,Xor,
  80     match(Set dst ($2$1 src4 (Xor$1($4$1 src2 src3) src1)));,
  81     match(Set dst ($2$1 src1 (Xor$1($4$1 src2 src3) src4)));)
  82   ins_cost(1.9 * INSN_COST);
  83   format %{ "$3  $dst, $src1, $src2, $5 $src3" %}
  84 
  85   ins_encode %{
  86     __ $3(as_Register($dst$$reg),
  87               as_Register($src1$$reg),
  88               as_Register($src2$$reg),
  89               Assembler::$5,
  90               $src3$$constant & ifelse($1,I,0x1f,0x3f));
  91   %}
  92 
  93   ins_pipe(ialu_reg_reg_shift);
  94 %}')dnl
  95 define(`NOT_INSN',
  96 `instruct reg$1_not_reg(iReg$1NoSp dst,
  97                          iReg$1`'ORL2I($1) src1, imm$1_M1 m1,
  98                          rFlagsReg cr) %{
  99   match(Set dst (Xor$1 src1 m1));
 100   ins_cost(INSN_COST);
 101   format %{ "$2  $dst, $src1, zr" %}
 102 
 103   ins_encode %{
 104     __ $2(as_Register($dst$$reg),
 105               as_Register($src1$$reg),
 106               zr,
 107               Assembler::LSL, 0);
 108   %}
 109 
 110   ins_pipe(ialu_reg);
 111 %}')dnl
 112 dnl
 113 define(`BOTH_SHIFT_INSNS',
 114 `BASE_SHIFT_INSN(I, $1, ifelse($2,andr,andw,$2w), $3, $4)
 115 BASE_SHIFT_INSN(L, $1, $2, $3, $4)')dnl
 116 dnl
 117 define(`BOTH_INVERTED_INSNS',
 118 `BASE_INVERTED_INSN(I, $1, $2w, $3, $4)
 119 BASE_INVERTED_INSN(L, $1, $2, $3, $4)')dnl
 120 dnl
 121 define(`BOTH_INVERTED_SHIFT_INSNS',
 122 `INVERTED_SHIFT_INSN(I, $1, $2w, $3, $4, ~0, int)
 123 INVERTED_SHIFT_INSN(L, $1, $2, $3, $4, ~0l, long)')dnl
 124 dnl
 125 define(`ALL_SHIFT_KINDS',
 126 `BOTH_SHIFT_INSNS($1, $2, URShift, LSR)
 127 BOTH_SHIFT_INSNS($1, $2, RShift, ASR)
 128 BOTH_SHIFT_INSNS($1, $2, LShift, LSL)')dnl
 129 dnl
 130 define(`ALL_INVERTED_SHIFT_KINDS',
 131 `BOTH_INVERTED_SHIFT_INSNS($1, $2, URShift, LSR)
 132 BOTH_INVERTED_SHIFT_INSNS($1, $2, RShift, ASR)
 133 BOTH_INVERTED_SHIFT_INSNS($1, $2, LShift, LSL)')dnl
 134 dnl
 135 NOT_INSN(L, eon)
 136 NOT_INSN(I, eonw)
 137 BOTH_INVERTED_INSNS(And, bic)
 138 BOTH_INVERTED_INSNS(Or, orn)
 139 BOTH_INVERTED_INSNS(Xor, eon)
 140 ALL_INVERTED_SHIFT_KINDS(And, bic)
 141 ALL_INVERTED_SHIFT_KINDS(Xor, eon)
 142 ALL_INVERTED_SHIFT_KINDS(Or, orn)
 143 ALL_SHIFT_KINDS(And, andr)
 144 ALL_SHIFT_KINDS(Xor, eor)
 145 ALL_SHIFT_KINDS(Or, orr)
 146 ALL_SHIFT_KINDS(Add, add)
 147 ALL_SHIFT_KINDS(Sub, sub)
 148 dnl
 149 dnl EXTEND mode, rshift_op, src, lshift_count, rshift_count
 150 define(`EXTEND', `($2$1 (LShift$1 $3 $4) $5)')
 151 define(`BFM_INSN',`
 152 // Shift Left followed by Shift Right.
 153 // This idiom is used by the compiler for the i2b bytecode etc.
 154 instruct $4$1(iReg$1NoSp dst, iReg$1`'ORL2I($1) src, immI lshift_count, immI rshift_count)
 155 %{
 156   match(Set dst EXTEND($1, $3, src, lshift_count, rshift_count));
 157   ins_cost(INSN_COST * 2);
 158   format %{ "$4  $dst, $src, $rshift_count - $lshift_count, #$2 - $lshift_count" %}
 159   ins_encode %{
 160     int lshift = $lshift_count$$constant & $2;
 161     int rshift = $rshift_count$$constant & $2;
 162     int s = $2 - lshift;
 163     int r = (rshift - lshift) & $2;
 164     __ $4(as_Register($dst$$reg),
 165             as_Register($src$$reg),
 166             r, s);
 167   %}
 168 
 169   ins_pipe(ialu_reg_shift);
 170 %}')
 171 BFM_INSN(L, 63, RShift, sbfm)
 172 BFM_INSN(I, 31, RShift, sbfmw)
 173 BFM_INSN(L, 63, URShift, ubfm)
 174 BFM_INSN(I, 31, URShift, ubfmw)
 175 dnl
 176 // Bitfield extract with shift & mask
 177 define(`BFX_INSN',
 178 `instruct $3$1(iReg$1NoSp dst, iReg$1`'ORL2I($1) src, immI rshift, imm$1_bitmask mask)
 179 %{
 180   match(Set dst (And$1 ($2$1 src rshift) mask));
 181   // Make sure we are not going to exceed what $3 can do.
 182   predicate((exact_log2$6(n->in(2)->get_$5() + 1) + (n->in(1)->in(2)->get_int() & $4)) <= ($4 + 1));
 183 
 184   ins_cost(INSN_COST);
 185   format %{ "$3 $dst, $src, $rshift, $mask" %}
 186   ins_encode %{
 187     int rshift = $rshift$$constant & $4;
 188     long mask = $mask$$constant;
 189     int width = exact_log2$6(mask+1);
 190     __ $3(as_Register($dst$$reg),
 191             as_Register($src$$reg), rshift, width);
 192   %}
 193   ins_pipe(ialu_reg_shift);
 194 %}')
 195 BFX_INSN(I, URShift, ubfxw, 31, int)
 196 BFX_INSN(L, URShift, ubfx,  63, long, _long)
 197 
 198 // We can use ubfx when extending an And with a mask when we know mask
 199 // is positive.  We know that because immI_bitmask guarantees it.
 200 instruct ubfxIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
 201 %{
 202   match(Set dst (ConvI2L (AndI (URShiftI src rshift) mask)));
 203   // Make sure we are not going to exceed what ubfxw can do.
 204   predicate((exact_log2(n->in(1)->in(2)->get_int() + 1) + (n->in(1)->in(1)->in(2)->get_int() & 31)) <= (31 + 1));
 205 
 206   ins_cost(INSN_COST * 2);
 207   format %{ "ubfx $dst, $src, $rshift, $mask" %}
 208   ins_encode %{
 209     int rshift = $rshift$$constant & 31;
 210     long mask = $mask$$constant;
 211     int width = exact_log2(mask+1);
 212     __ ubfx(as_Register($dst$$reg),
 213             as_Register($src$$reg), rshift, width);
 214   %}
 215   ins_pipe(ialu_reg_shift);
 216 %}
 217 
 218 define(`UBFIZ_INSN',
 219 // We can use ubfiz when masking by a positive number and then left shifting the result.
 220 // We know that the mask is positive because imm$1_bitmask guarantees it.
 221 `instruct $2$1(iReg$1NoSp dst, iReg$1`'ORL2I($1) src, immI lshift, imm$1_bitmask mask)
 222 %{
 223   match(Set dst (LShift$1 (And$1 src mask) lshift));
 224   predicate((exact_log2$5(n->in(1)->in(2)->get_$4() + 1) + (n->in(2)->get_int() & $3)) <= ($3 + 1));
 225 
 226   ins_cost(INSN_COST);
 227   format %{ "$2 $dst, $src, $lshift, $mask" %}
 228   ins_encode %{
 229     int lshift = $lshift$$constant & $3;
 230     long mask = $mask$$constant;
 231     int width = exact_log2$5(mask+1);
 232     __ $2(as_Register($dst$$reg),
 233           as_Register($src$$reg), lshift, width);
 234   %}
 235   ins_pipe(ialu_reg_shift);
 236 %}')
 237 UBFIZ_INSN(I, ubfizw, 31, int)
 238 UBFIZ_INSN(L, ubfiz,  63, long, _long)
 239 
 240 // If there is a convert I to L block between and AndI and a LShiftL, we can also match ubfiz
 241 instruct ubfizIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI lshift, immI_bitmask mask)
 242 %{
 243   match(Set dst (LShiftL (ConvI2L (AndI src mask)) lshift));
 244   predicate((exact_log2(n->in(1)->in(1)->in(2)->get_int() + 1) + (n->in(2)->get_int() & 63)) <= (63 + 1));
 245 
 246   ins_cost(INSN_COST);
 247   format %{ "ubfiz $dst, $src, $lshift, $mask" %}
 248   ins_encode %{
 249     int lshift = $lshift$$constant & 63;
 250     long mask = $mask$$constant;
 251     int width = exact_log2(mask+1);
 252     __ ubfiz(as_Register($dst$$reg),
 253              as_Register($src$$reg), lshift, width);
 254   %}
 255   ins_pipe(ialu_reg_shift);
 256 %}
 257 
 258 // Rotations
 259 
 260 define(`EXTRACT_INSN',
 261 `instruct extr$3$1(iReg$1NoSp dst, iReg$1`'ORL2I($1) src1, iReg$1`'ORL2I($1) src2, immI lshift, immI rshift, rFlagsReg cr)
 262 %{
 263   match(Set dst ($3$1 (LShift$1 src1 lshift) (URShift$1 src2 rshift)));
 264   predicate(0 == (((n->in(1)->in(2)->get_int() & $2) + (n->in(2)->in(2)->get_int() & $2)) & $2));
 265 
 266   ins_cost(INSN_COST);
 267   format %{ "extr $dst, $src1, $src2, #$rshift" %}
 268 
 269   ins_encode %{
 270     __ $4(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
 271             $rshift$$constant & $2);
 272   %}
 273   ins_pipe(ialu_reg_reg_extr);
 274 %}
 275 ')dnl
 276 EXTRACT_INSN(L, 63, Or, extr)
 277 EXTRACT_INSN(I, 31, Or, extrw)
 278 EXTRACT_INSN(L, 63, Add, extr)
 279 EXTRACT_INSN(I, 31, Add, extrw)
 280 define(`ROL_EXPAND', `
 281 // $2 expander
 282 
 283 instruct $2$1_rReg(iReg$1NoSp dst, iReg$1 src, iRegI shift, rFlagsReg cr)
 284 %{
 285   effect(DEF dst, USE src, USE shift);
 286 
 287   format %{ "$2    $dst, $src, $shift" %}
 288   ins_cost(INSN_COST * 3);
 289   ins_encode %{
 290     __ subw(rscratch1, zr, as_Register($shift$$reg));
 291     __ $3(as_Register($dst$$reg), as_Register($src$$reg),
 292             rscratch1);
 293     %}
 294   ins_pipe(ialu_reg_reg_vshift);
 295 %}')dnl
 296 define(`ROR_EXPAND', `
 297 // $2 expander
 298 
 299 instruct $2$1_rReg(iReg$1NoSp dst, iReg$1 src, iRegI shift, rFlagsReg cr)
 300 %{
 301   effect(DEF dst, USE src, USE shift);
 302 
 303   format %{ "$2    $dst, $src, $shift" %}
 304   ins_cost(INSN_COST);
 305   ins_encode %{
 306     __ $3(as_Register($dst$$reg), as_Register($src$$reg),
 307             as_Register($shift$$reg));
 308     %}
 309   ins_pipe(ialu_reg_reg_vshift);
 310 %}')dnl
 311 define(ROL_INSN, `
 312 instruct $3$1_rReg_Var_C$2(iReg$1NoSp dst, iReg$1 src, iRegI shift, immI$2 c$2, rFlagsReg cr)
 313 %{
 314   match(Set dst (Or$1 (LShift$1 src shift) (URShift$1 src (SubI c$2 shift))));
 315 
 316   expand %{
 317     $3$1_rReg(dst, src, shift, cr);
 318   %}
 319 %}')dnl
 320 define(ROR_INSN, `
 321 instruct $3$1_rReg_Var_C$2(iReg$1NoSp dst, iReg$1 src, iRegI shift, immI$2 c$2, rFlagsReg cr)
 322 %{
 323   match(Set dst (Or$1 (URShift$1 src shift) (LShift$1 src (SubI c$2 shift))));
 324 
 325   expand %{
 326     $3$1_rReg(dst, src, shift, cr);
 327   %}
 328 %}')dnl
 329 ROL_EXPAND(L, rol, rorv)
 330 ROL_EXPAND(I, rol, rorvw)
 331 ROL_INSN(L, _64, rol)
 332 ROL_INSN(L, 0, rol)
 333 ROL_INSN(I, _32, rol)
 334 ROL_INSN(I, 0, rol)
 335 ROR_EXPAND(L, ror, rorv)
 336 ROR_EXPAND(I, ror, rorvw)
 337 ROR_INSN(L, _64, ror)
 338 ROR_INSN(L, 0, ror)
 339 ROR_INSN(I, _32, ror)
 340 ROR_INSN(I, 0, ror)
 341 
 342 // Add/subtract (extended)
 343 dnl ADD_SUB_EXTENDED(mode, size, add node, shift node, insn, shift type, wordsize
 344 define(`ADD_SUB_CONV', `
 345 instruct $3Ext$1(iReg$2NoSp dst, iReg$2`'ORL2I($2) src1, iReg$1`'ORL2I($1) src2, rFlagsReg cr)
 346 %{
 347   match(Set dst ($3$2 src1 (ConvI2L src2)));
 348   ins_cost(INSN_COST);
 349   format %{ "$4  $dst, $src1, $src2, $5" %}
 350 
 351    ins_encode %{
 352      __ $4(as_Register($dst$$reg), as_Register($src1$$reg),
 353             as_Register($src2$$reg), ext::$5);
 354    %}
 355   ins_pipe(ialu_reg_reg);
 356 %}')dnl
 357 ADD_SUB_CONV(I,L,Add,add,sxtw);
 358 ADD_SUB_CONV(I,L,Sub,sub,sxtw);
 359 dnl
 360 define(`ADD_SUB_EXTENDED', `
 361 instruct $3Ext$1_$6(iReg$1NoSp dst, iReg$1`'ORL2I($1) src1, iReg$1`'ORL2I($1) src2, immI_`'eval($7-$2) lshift, immI_`'eval($7-$2) rshift, rFlagsReg cr)
 362 %{
 363   match(Set dst ($3$1 src1 EXTEND($1, $4, src2, lshift, rshift)));
 364   ins_cost(INSN_COST);
 365   format %{ "$5  $dst, $src1, $src2, $6" %}
 366 
 367    ins_encode %{
 368      __ $5(as_Register($dst$$reg), as_Register($src1$$reg),
 369             as_Register($src2$$reg), ext::$6);
 370    %}
 371   ins_pipe(ialu_reg_reg);
 372 %}')
 373 ADD_SUB_EXTENDED(I,16,Add,RShift,add,sxth,32)
 374 ADD_SUB_EXTENDED(I,8,Add,RShift,add,sxtb,32)
 375 ADD_SUB_EXTENDED(I,8,Add,URShift,add,uxtb,32)
 376 ADD_SUB_EXTENDED(L,16,Add,RShift,add,sxth,64)
 377 ADD_SUB_EXTENDED(L,32,Add,RShift,add,sxtw,64)
 378 ADD_SUB_EXTENDED(L,8,Add,RShift,add,sxtb,64)
 379 ADD_SUB_EXTENDED(L,8,Add,URShift,add,uxtb,64)
 380 dnl
 381 dnl ADD_SUB_ZERO_EXTEND(mode, size, add node, insn, shift type)
 382 define(`ADD_SUB_ZERO_EXTEND', `
 383 instruct $3Ext$1_$5_and(iReg$1NoSp dst, iReg$1`'ORL2I($1) src1, iReg$1`'ORL2I($1) src2, imm$1_$2 mask, rFlagsReg cr)
 384 %{
 385   match(Set dst ($3$1 src1 (And$1 src2 mask)));
 386   ins_cost(INSN_COST);
 387   format %{ "$4  $dst, $src1, $src2, $5" %}
 388 
 389    ins_encode %{
 390      __ $4(as_Register($dst$$reg), as_Register($src1$$reg),
 391             as_Register($src2$$reg), ext::$5);
 392    %}
 393   ins_pipe(ialu_reg_reg);
 394 %}')
 395 dnl
 396 ADD_SUB_ZERO_EXTEND(I,255,Add,addw,uxtb)
 397 ADD_SUB_ZERO_EXTEND(I,65535,Add,addw,uxth)
 398 ADD_SUB_ZERO_EXTEND(L,255,Add,add,uxtb)
 399 ADD_SUB_ZERO_EXTEND(L,65535,Add,add,uxth)
 400 ADD_SUB_ZERO_EXTEND(L,4294967295,Add,add,uxtw)
 401 dnl
 402 ADD_SUB_ZERO_EXTEND(I,255,Sub,subw,uxtb)
 403 ADD_SUB_ZERO_EXTEND(I,65535,Sub,subw,uxth)
 404 ADD_SUB_ZERO_EXTEND(L,255,Sub,sub,uxtb)
 405 ADD_SUB_ZERO_EXTEND(L,65535,Sub,sub,uxth)
 406 ADD_SUB_ZERO_EXTEND(L,4294967295,Sub,sub,uxtw)
 407 dnl
 408 dnl ADD_SUB_ZERO_EXTEND_SHIFT(mode, size, add node, insn, ext type)
 409 define(`ADD_SUB_EXTENDED_SHIFT', `
 410 instruct $3Ext$1_$6_shift(iReg$1NoSp dst, iReg$1`'ORL2I($1) src1, iReg$1`'ORL2I($1) src2, immIExt lshift2, immI_`'eval($7-$2) lshift1, immI_`'eval($7-$2) rshift1, rFlagsReg cr)
 411 %{
 412   match(Set dst ($3$1 src1 (LShift$1 EXTEND($1, $4, src2, lshift1, rshift1) lshift2)));
 413   ins_cost(1.9 * INSN_COST);
 414   format %{ "$5  $dst, $src1, $src2, $6 #lshift2" %}
 415 
 416    ins_encode %{
 417      __ $5(as_Register($dst$$reg), as_Register($src1$$reg),
 418             as_Register($src2$$reg), ext::$6, ($lshift2$$constant));
 419    %}
 420   ins_pipe(ialu_reg_reg_shift);
 421 %}')
 422 dnl                   $1 $2 $3   $4   $5   $6  $7
 423 ADD_SUB_EXTENDED_SHIFT(L,8,Add,RShift,add,sxtb,64)
 424 ADD_SUB_EXTENDED_SHIFT(L,16,Add,RShift,add,sxth,64)
 425 ADD_SUB_EXTENDED_SHIFT(L,32,Add,RShift,add,sxtw,64)
 426 dnl
 427 ADD_SUB_EXTENDED_SHIFT(L,8,Sub,RShift,sub,sxtb,64)
 428 ADD_SUB_EXTENDED_SHIFT(L,16,Sub,RShift,sub,sxth,64)
 429 ADD_SUB_EXTENDED_SHIFT(L,32,Sub,RShift,sub,sxtw,64)
 430 dnl
 431 ADD_SUB_EXTENDED_SHIFT(I,8,Add,RShift,addw,sxtb,32)
 432 ADD_SUB_EXTENDED_SHIFT(I,16,Add,RShift,addw,sxth,32)
 433 dnl
 434 ADD_SUB_EXTENDED_SHIFT(I,8,Sub,RShift,subw,sxtb,32)
 435 ADD_SUB_EXTENDED_SHIFT(I,16,Sub,RShift,subw,sxth,32)
 436 dnl
 437 dnl ADD_SUB_CONV_SHIFT(mode, add node, insn, ext type)
 438 define(`ADD_SUB_CONV_SHIFT', `
 439 instruct $2ExtI_shift(iReg$1NoSp dst, iReg$1`'ORL2I($1) src1, iRegIorL2I src2, immIExt lshift, rFlagsReg cr)
 440 %{
 441   match(Set dst ($2$1 src1 (LShiftL (ConvI2L src2) lshift)));
 442   ins_cost(1.9 * INSN_COST);
 443   format %{ "$3  $dst, $src1, $src2, $4 #lshift" %}
 444 
 445    ins_encode %{
 446      __ $3(as_Register($dst$$reg), as_Register($src1$$reg),
 447             as_Register($src2$$reg), ext::$4, ($lshift$$constant));
 448    %}
 449   ins_pipe(ialu_reg_reg_shift);
 450 %}')
 451 dnl
 452 ADD_SUB_CONV_SHIFT(L,Add,add,sxtw);
 453 ADD_SUB_CONV_SHIFT(L,Sub,sub,sxtw);
 454 dnl
 455 dnl ADD_SUB_ZERO_EXTEND(mode, size, add node, insn, ext type)
 456 define(`ADD_SUB_ZERO_EXTEND_SHIFT', `
 457 instruct $3Ext$1_$5_and_shift(iReg$1NoSp dst, iReg$1`'ORL2I($1) src1, iReg$1`'ORL2I($1) src2, imm$1_$2 mask, immIExt lshift, rFlagsReg cr)
 458 %{
 459   match(Set dst ($3$1 src1 (LShift$1 (And$1 src2 mask) lshift)));
 460   ins_cost(1.9 * INSN_COST);
 461   format %{ "$4  $dst, $src1, $src2, $5 #lshift" %}
 462 
 463    ins_encode %{
 464      __ $4(as_Register($dst$$reg), as_Register($src1$$reg),
 465             as_Register($src2$$reg), ext::$5, ($lshift$$constant));
 466    %}
 467   ins_pipe(ialu_reg_reg_shift);
 468 %}')
 469 dnl
 470 dnl                       $1 $2  $3  $4  $5
 471 ADD_SUB_ZERO_EXTEND_SHIFT(L,255,Add,add,uxtb)
 472 ADD_SUB_ZERO_EXTEND_SHIFT(L,65535,Add,add,uxth)
 473 ADD_SUB_ZERO_EXTEND_SHIFT(L,4294967295,Add,add,uxtw)
 474 dnl
 475 ADD_SUB_ZERO_EXTEND_SHIFT(L,255,Sub,sub,uxtb)
 476 ADD_SUB_ZERO_EXTEND_SHIFT(L,65535,Sub,sub,uxth)
 477 ADD_SUB_ZERO_EXTEND_SHIFT(L,4294967295,Sub,sub,uxtw)
 478 dnl
 479 ADD_SUB_ZERO_EXTEND_SHIFT(I,255,Add,addw,uxtb)
 480 ADD_SUB_ZERO_EXTEND_SHIFT(I,65535,Add,addw,uxth)
 481 dnl
 482 ADD_SUB_ZERO_EXTEND_SHIFT(I,255,Sub,subw,uxtb)
 483 ADD_SUB_ZERO_EXTEND_SHIFT(I,65535,Sub,subw,uxth)
 484 dnl
 485 // END This section of the file is automatically generated. Do not edit --------------
--- EOF ---