From 71313f88cb9ad3b30eaede19cbbb4ef1be1ff882 Mon Sep 17 00:00:00 2001 From: Tim Northover Date: Mon, 10 Feb 2014 16:20:29 +0000 Subject: ARM: use natural LLVM IR for vshll instructions Similarly to the vshrn instructions, these are simple zext/sext + trunc operations. Using normal LLVM IR should allow for better code, and more sharing with the AArch64 backend. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@201093 91177308-0d34-0410-b5e6-96231b3b80d8 --- test/CodeGen/ARM/vshll.ll | 99 +++++++++++++++++++++++++++++++---------------- 1 file changed, 66 insertions(+), 33 deletions(-) (limited to 'test') diff --git a/test/CodeGen/ARM/vshll.ll b/test/CodeGen/ARM/vshll.ll index ae80664148..8faee118ff 100644 --- a/test/CodeGen/ARM/vshll.ll +++ b/test/CodeGen/ARM/vshll.ll @@ -3,49 +3,55 @@ define <8 x i16> @vshlls8(<8 x i8>* %A) nounwind { ;CHECK-LABEL: vshlls8: ;CHECK: vshll.s8 - %tmp1 = load <8 x i8>* %A - %tmp2 = call <8 x i16> @llvm.arm.neon.vshiftls.v8i16(<8 x i8> %tmp1, <8 x i8> < i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7 >) - ret <8 x i16> %tmp2 + %tmp1 = load <8 x i8>* %A + %sext = sext <8 x i8> %tmp1 to <8 x i16> + %shift = shl <8 x i16> %sext, + ret <8 x i16> %shift } define <4 x i32> @vshlls16(<4 x i16>* %A) nounwind { ;CHECK-LABEL: vshlls16: ;CHECK: vshll.s16 - %tmp1 = load <4 x i16>* %A - %tmp2 = call <4 x i32> @llvm.arm.neon.vshiftls.v4i32(<4 x i16> %tmp1, <4 x i16> < i16 15, i16 15, i16 15, i16 15 >) - ret <4 x i32> %tmp2 + %tmp1 = load <4 x i16>* %A + %sext = sext <4 x i16> %tmp1 to <4 x i32> + %shift = shl <4 x i32> %sext, + ret <4 x i32> %shift } define <2 x i64> @vshlls32(<2 x i32>* %A) nounwind { ;CHECK-LABEL: vshlls32: ;CHECK: vshll.s32 - %tmp1 = load <2 x i32>* %A - %tmp2 = call <2 x i64> @llvm.arm.neon.vshiftls.v2i64(<2 x i32> %tmp1, <2 x i32> < i32 31, i32 31 >) - ret <2 x i64> %tmp2 + %tmp1 = load <2 x i32>* %A + %sext = sext <2 x i32> %tmp1 to <2 x i64> + %shift = shl <2 x i64> %sext, + ret <2 x i64> %shift } define <8 x i16> @vshllu8(<8 x i8>* %A) nounwind { ;CHECK-LABEL: vshllu8: ;CHECK: vshll.u8 - %tmp1 = load <8 x i8>* %A - %tmp2 = call <8 x i16> @llvm.arm.neon.vshiftlu.v8i16(<8 x i8> %tmp1, <8 x i8> < i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7 >) - ret <8 x i16> %tmp2 + %tmp1 = load <8 x i8>* %A + %zext = zext <8 x i8> %tmp1 to <8 x i16> + %shift = shl <8 x i16> %zext, + ret <8 x i16> %shift } define <4 x i32> @vshllu16(<4 x i16>* %A) nounwind { ;CHECK-LABEL: vshllu16: ;CHECK: vshll.u16 - %tmp1 = load <4 x i16>* %A - %tmp2 = call <4 x i32> @llvm.arm.neon.vshiftlu.v4i32(<4 x i16> %tmp1, <4 x i16> < i16 15, i16 15, i16 15, i16 15 >) - ret <4 x i32> %tmp2 + %tmp1 = load <4 x i16>* %A + %zext = zext <4 x i16> %tmp1 to <4 x i32> + %shift = shl <4 x i32> %zext, + ret <4 x i32> %shift } define <2 x i64> @vshllu32(<2 x i32>* %A) nounwind { ;CHECK-LABEL: vshllu32: ;CHECK: vshll.u32 - %tmp1 = load <2 x i32>* %A - %tmp2 = call <2 x i64> @llvm.arm.neon.vshiftlu.v2i64(<2 x i32> %tmp1, <2 x i32> < i32 31, i32 31 >) - ret <2 x i64> %tmp2 + %tmp1 = load <2 x i32>* %A + %zext = zext <2 x i32> %tmp1 to <2 x i64> + %shift = shl <2 x i64> %zext, + ret <2 x i64> %shift } ; The following tests use the maximum shift count, so the signedness is @@ -53,31 +59,58 @@ define <2 x i64> @vshllu32(<2 x i32>* %A) nounwind { define <8 x i16> @vshlli8(<8 x i8>* %A) nounwind { ;CHECK-LABEL: vshlli8: ;CHECK: vshll.i8 - %tmp1 = load <8 x i8>* %A - %tmp2 = call <8 x i16> @llvm.arm.neon.vshiftls.v8i16(<8 x i8> %tmp1, <8 x i8> < i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8 >) - ret <8 x i16> %tmp2 + %tmp1 = load <8 x i8>* %A + %sext = sext <8 x i8> %tmp1 to <8 x i16> + %shift = shl <8 x i16> %sext, + ret <8 x i16> %shift } define <4 x i32> @vshlli16(<4 x i16>* %A) nounwind { ;CHECK-LABEL: vshlli16: ;CHECK: vshll.i16 - %tmp1 = load <4 x i16>* %A - %tmp2 = call <4 x i32> @llvm.arm.neon.vshiftlu.v4i32(<4 x i16> %tmp1, <4 x i16> < i16 16, i16 16, i16 16, i16 16 >) - ret <4 x i32> %tmp2 + %tmp1 = load <4 x i16>* %A + %zext = zext <4 x i16> %tmp1 to <4 x i32> + %shift = shl <4 x i32> %zext, + ret <4 x i32> %shift } define <2 x i64> @vshlli32(<2 x i32>* %A) nounwind { ;CHECK-LABEL: vshlli32: ;CHECK: vshll.i32 - %tmp1 = load <2 x i32>* %A - %tmp2 = call <2 x i64> @llvm.arm.neon.vshiftls.v2i64(<2 x i32> %tmp1, <2 x i32> < i32 32, i32 32 >) - ret <2 x i64> %tmp2 + %tmp1 = load <2 x i32>* %A + %zext = zext <2 x i32> %tmp1 to <2 x i64> + %shift = shl <2 x i64> %zext, + ret <2 x i64> %shift } -declare <8 x i16> @llvm.arm.neon.vshiftls.v8i16(<8 x i8>, <8 x i8>) nounwind readnone -declare <4 x i32> @llvm.arm.neon.vshiftls.v4i32(<4 x i16>, <4 x i16>) nounwind readnone -declare <2 x i64> @llvm.arm.neon.vshiftls.v2i64(<2 x i32>, <2 x i32>) nounwind readnone +; And these have a shift just out of range so separate vmovl and vshl +; instructions are needed. +define <8 x i16> @vshllu8_bad(<8 x i8>* %A) nounwind { +; CHECK-LABEL: vshllu8_bad: +; CHECK: vmovl.u8 +; CHECK: vshl.i16 + %tmp1 = load <8 x i8>* %A + %zext = zext <8 x i8> %tmp1 to <8 x i16> + %shift = shl <8 x i16> %zext, + ret <8 x i16> %shift +} + +define <4 x i32> @vshlls16_bad(<4 x i16>* %A) nounwind { +; CHECK-LABEL: vshlls16_bad: +; CHECK: vmovl.s16 +; CHECK: vshl.i32 + %tmp1 = load <4 x i16>* %A + %sext = sext <4 x i16> %tmp1 to <4 x i32> + %shift = shl <4 x i32> %sext, + ret <4 x i32> %shift +} -declare <8 x i16> @llvm.arm.neon.vshiftlu.v8i16(<8 x i8>, <8 x i8>) nounwind readnone -declare <4 x i32> @llvm.arm.neon.vshiftlu.v4i32(<4 x i16>, <4 x i16>) nounwind readnone -declare <2 x i64> @llvm.arm.neon.vshiftlu.v2i64(<2 x i32>, <2 x i32>) nounwind readnone +define <2 x i64> @vshllu32_bad(<2 x i32>* %A) nounwind { +; CHECK-LABEL: vshllu32_bad: +; CHECK: vmovl.u32 +; CHECK: vshl.i64 + %tmp1 = load <2 x i32>* %A + %zext = zext <2 x i32> %tmp1 to <2 x i64> + %shift = shl <2 x i64> %zext, + ret <2 x i64> %shift +} -- cgit v1.2.3