# HG changeset patch # User enevill # Date 1435052650 0 # Tue Jun 23 09:44:10 2015 +0000 # Node ID e62919fa7fb787705083aa5cf6d2683abb15fe72 # Parent 8672e9264db30c21504063932dbc374eabc287a1 8129551: aarch64: some regressions introduced by addition of vectorisation code Summary: Fix regressions Reviewed-by: duke diff --git a/src/cpu/aarch64/vm/assembler_aarch64.hpp b/src/cpu/aarch64/vm/assembler_aarch64.hpp --- a/src/cpu/aarch64/vm/assembler_aarch64.hpp +++ b/src/cpu/aarch64/vm/assembler_aarch64.hpp @@ -491,6 +491,11 @@ i->rf(_index, 16); i->f(_ext.option(), 15, 13); unsigned size = i->get(31, 30); + if (i->get(26, 26) && i->get(23, 23)) { + // SIMD Q Type - Size = 128 bits + assert(size == 0, "bad size"); + size = 0b100; + } if (size == 0) // It's a byte i->f(_ext.shift() >= 0, 12); else { diff --git a/src/cpu/aarch64/vm/macroAssembler_aarch64.cpp b/src/cpu/aarch64/vm/macroAssembler_aarch64.cpp --- a/src/cpu/aarch64/vm/macroAssembler_aarch64.cpp +++ b/src/cpu/aarch64/vm/macroAssembler_aarch64.cpp @@ -1408,6 +1408,38 @@ movk(r, imm64 & 0xffff, 32); } +void MacroAssembler::mov(FloatRegister Vd, SIMD_Arrangement T, u_int32_t imm32) { + assert(T != T1D && T != T2D, "invalid arrangement"); + if (T == T8B || T == T16B) { + movi(Vd, T, imm32 & 0xff, 0); + return; + } + u_int32_t nimm32 = ~imm32; + if (T == T4H || T == T8H) { imm32 &= 0xffff; nimm32 &= 0xffff; } + u_int32_t x = imm32; + int movi_cnt = 0; + int movn_cnt = 0; + while (x) { if (x & 0xff) movi_cnt++; x >>= 8; } + x = nimm32; + while (x) { if (x & 0xff) movn_cnt++; x >>= 8; } + if (movn_cnt < movi_cnt) imm32 = nimm32; + unsigned lsl = 0; + while (imm32 && (imm32 & 0xff) == 0) { lsl += 8; imm32 >>= 8; } + if (movn_cnt < movi_cnt) + mvni(Vd, T, imm32 & 0xff, lsl); + else + movi(Vd, T, imm32 & 0xff, lsl); + imm32 >>= 8; lsl += 8; + while (imm32) { + while ((imm32 & 0xff) == 0) { lsl += 8; imm32 >>= 8; } + if (movn_cnt < movi_cnt) + bici(Vd, T, imm32 & 0xff, lsl); + else + orri(Vd, T, imm32 & 0xff, lsl); + lsl += 8; imm32 >>= 8; + } +} + void MacroAssembler::mov_immediate64(Register dst, u_int64_t imm64) { #ifndef PRODUCT diff --git a/src/cpu/aarch64/vm/macroAssembler_aarch64.hpp b/src/cpu/aarch64/vm/macroAssembler_aarch64.hpp --- a/src/cpu/aarch64/vm/macroAssembler_aarch64.hpp +++ b/src/cpu/aarch64/vm/macroAssembler_aarch64.hpp @@ -475,34 +475,7 @@ // T2S: Vd = abcdefghabcdefgh // T4S: Vd = abcdefghabcdefghabcdefghabcdefgh // T1D/T2D: invalid - void mov(FloatRegister Vd, SIMD_Arrangement T, u_int32_t imm32) { - assert(T != T1D && T != T2D, "invalid arrangement"); - u_int32_t nimm32 = ~imm32; - if (T == T8B || T == T16B) { imm32 &= 0xff; nimm32 &= 0xff; } - if (T == T4H || T == T8H) { imm32 &= 0xffff; nimm32 &= 0xffff; } - u_int32_t x = imm32; - int movi_cnt = 0; - int movn_cnt = 0; - while (x) { if (x & 0xff) movi_cnt++; x >>= 8; } - x = nimm32; - while (x) { if (x & 0xff) movn_cnt++; x >>= 8; } - if (movn_cnt < movi_cnt) imm32 = nimm32; - unsigned lsl = 0; - while (imm32 && (imm32 & 0xff) == 0) { lsl += 8; imm32 >>= 8; } - if (movn_cnt < movi_cnt) - mvni(Vd, T, imm32 & 0xff, lsl); - else - movi(Vd, T, imm32 & 0xff, lsl); - imm32 >>= 8; lsl += 8; - while (imm32) { - while ((imm32 & 0xff) == 0) { lsl += 8; imm32 >>= 8; } - if (movn_cnt < movi_cnt) - bici(Vd, T, imm32 & 0xff, lsl); - else - orri(Vd, T, imm32 & 0xff, lsl); - lsl += 8; imm32 >>= 8; - } - } + void mov(FloatRegister Vd, SIMD_Arrangement T, u_int32_t imm32); // macro instructions for accessing and updating floating point // status register