summaryrefslogtreecommitdiff
path: root/test
diff options
context:
space:
mode:
authorBob Wilson <bob.wilson@apple.com>2009-08-12 20:51:55 +0000
committerBob Wilson <bob.wilson@apple.com>2009-08-12 20:51:55 +0000
commit114a266c942ac2dc1e98047a00337ecbc81f7380 (patch)
treefb1cb57e06ad381eac79ca6afa4cc01f4fda93c9 /test
parenta6340628c2c1a271f7074836a7484ad2d90dd62e (diff)
downloadllvm-114a266c942ac2dc1e98047a00337ecbc81f7380.tar.gz
llvm-114a266c942ac2dc1e98047a00337ecbc81f7380.tar.bz2
llvm-114a266c942ac2dc1e98047a00337ecbc81f7380.tar.xz
Generate Neon VTBL and VTBX instructions from the corresponding intrinsics.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@78835 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test')
-rw-r--r--test/CodeGen/ARM/vtbl.ll109
1 files changed, 109 insertions, 0 deletions
diff --git a/test/CodeGen/ARM/vtbl.ll b/test/CodeGen/ARM/vtbl.ll
new file mode 100644
index 0000000000..6e3e50083e
--- /dev/null
+++ b/test/CodeGen/ARM/vtbl.ll
@@ -0,0 +1,109 @@
+; RUN: llvm-as < %s | llc -march=arm -mattr=+neon | FileCheck %s
+
+%struct.__builtin_neon_v8qi2 = type { <8 x i8>, <8 x i8> }
+%struct.__builtin_neon_v8qi3 = type { <8 x i8>, <8 x i8>, <8 x i8> }
+%struct.__builtin_neon_v8qi4 = type { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> }
+
+define <8 x i8> @vtbl1(<8 x i8>* %A, <8 x i8>* %B) nounwind {
+;CHECK: vtbl1:
+;CHECK: vtbl.8
+ %tmp1 = load <8 x i8>* %A
+ %tmp2 = load <8 x i8>* %B
+ %tmp3 = call <8 x i8> @llvm.arm.neon.vtbl1(<8 x i8> %tmp1, <8 x i8> %tmp2)
+ ret <8 x i8> %tmp3
+}
+
+define <8 x i8> @vtbl2(<8 x i8>* %A, %struct.__builtin_neon_v8qi2* %B) nounwind {
+;CHECK: vtbl2:
+;CHECK: vtbl.8
+ %tmp1 = load <8 x i8>* %A
+ %tmp2 = load %struct.__builtin_neon_v8qi2* %B
+ %tmp3 = extractvalue %struct.__builtin_neon_v8qi2 %tmp2, 0
+ %tmp4 = extractvalue %struct.__builtin_neon_v8qi2 %tmp2, 1
+ %tmp5 = call <8 x i8> @llvm.arm.neon.vtbl2(<8 x i8> %tmp1, <8 x i8> %tmp3, <8 x i8> %tmp4)
+ ret <8 x i8> %tmp5
+}
+
+define <8 x i8> @vtbl3(<8 x i8>* %A, %struct.__builtin_neon_v8qi3* %B) nounwind {
+;CHECK: vtbl3:
+;CHECK: vtbl.8
+ %tmp1 = load <8 x i8>* %A
+ %tmp2 = load %struct.__builtin_neon_v8qi3* %B
+ %tmp3 = extractvalue %struct.__builtin_neon_v8qi3 %tmp2, 0
+ %tmp4 = extractvalue %struct.__builtin_neon_v8qi3 %tmp2, 1
+ %tmp5 = extractvalue %struct.__builtin_neon_v8qi3 %tmp2, 2
+ %tmp6 = call <8 x i8> @llvm.arm.neon.vtbl3(<8 x i8> %tmp1, <8 x i8> %tmp3, <8 x i8> %tmp4, <8 x i8> %tmp5)
+ ret <8 x i8> %tmp6
+}
+
+define <8 x i8> @vtbl4(<8 x i8>* %A, %struct.__builtin_neon_v8qi4* %B) nounwind {
+;CHECK: vtbl4:
+;CHECK: vtbl.8
+ %tmp1 = load <8 x i8>* %A
+ %tmp2 = load %struct.__builtin_neon_v8qi4* %B
+ %tmp3 = extractvalue %struct.__builtin_neon_v8qi4 %tmp2, 0
+ %tmp4 = extractvalue %struct.__builtin_neon_v8qi4 %tmp2, 1
+ %tmp5 = extractvalue %struct.__builtin_neon_v8qi4 %tmp2, 2
+ %tmp6 = extractvalue %struct.__builtin_neon_v8qi4 %tmp2, 3
+ %tmp7 = call <8 x i8> @llvm.arm.neon.vtbl4(<8 x i8> %tmp1, <8 x i8> %tmp3, <8 x i8> %tmp4, <8 x i8> %tmp5, <8 x i8> %tmp6)
+ ret <8 x i8> %tmp7
+}
+
+define <8 x i8> @vtbx1(<8 x i8>* %A, <8 x i8>* %B, <8 x i8>* %C) nounwind {
+;CHECK: vtbx1:
+;CHECK: vtbx.8
+ %tmp1 = load <8 x i8>* %A
+ %tmp2 = load <8 x i8>* %B
+ %tmp3 = load <8 x i8>* %C
+ %tmp4 = call <8 x i8> @llvm.arm.neon.vtbx1(<8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i8> %tmp3)
+ ret <8 x i8> %tmp4
+}
+
+define <8 x i8> @vtbx2(<8 x i8>* %A, %struct.__builtin_neon_v8qi2* %B, <8 x i8>* %C) nounwind {
+;CHECK: vtbx2:
+;CHECK: vtbx.8
+ %tmp1 = load <8 x i8>* %A
+ %tmp2 = load %struct.__builtin_neon_v8qi2* %B
+ %tmp3 = extractvalue %struct.__builtin_neon_v8qi2 %tmp2, 0
+ %tmp4 = extractvalue %struct.__builtin_neon_v8qi2 %tmp2, 1
+ %tmp5 = load <8 x i8>* %C
+ %tmp6 = call <8 x i8> @llvm.arm.neon.vtbx2(<8 x i8> %tmp1, <8 x i8> %tmp3, <8 x i8> %tmp4, <8 x i8> %tmp5)
+ ret <8 x i8> %tmp6
+}
+
+define <8 x i8> @vtbx3(<8 x i8>* %A, %struct.__builtin_neon_v8qi3* %B, <8 x i8>* %C) nounwind {
+;CHECK: vtbx3:
+;CHECK: vtbx.8
+ %tmp1 = load <8 x i8>* %A
+ %tmp2 = load %struct.__builtin_neon_v8qi3* %B
+ %tmp3 = extractvalue %struct.__builtin_neon_v8qi3 %tmp2, 0
+ %tmp4 = extractvalue %struct.__builtin_neon_v8qi3 %tmp2, 1
+ %tmp5 = extractvalue %struct.__builtin_neon_v8qi3 %tmp2, 2
+ %tmp6 = load <8 x i8>* %C
+ %tmp7 = call <8 x i8> @llvm.arm.neon.vtbx3(<8 x i8> %tmp1, <8 x i8> %tmp3, <8 x i8> %tmp4, <8 x i8> %tmp5, <8 x i8> %tmp6)
+ ret <8 x i8> %tmp7
+}
+
+define <8 x i8> @vtbx4(<8 x i8>* %A, %struct.__builtin_neon_v8qi4* %B, <8 x i8>* %C) nounwind {
+;CHECK: vtbx4:
+;CHECK: vtbx.8
+ %tmp1 = load <8 x i8>* %A
+ %tmp2 = load %struct.__builtin_neon_v8qi4* %B
+ %tmp3 = extractvalue %struct.__builtin_neon_v8qi4 %tmp2, 0
+ %tmp4 = extractvalue %struct.__builtin_neon_v8qi4 %tmp2, 1
+ %tmp5 = extractvalue %struct.__builtin_neon_v8qi4 %tmp2, 2
+ %tmp6 = extractvalue %struct.__builtin_neon_v8qi4 %tmp2, 3
+ %tmp7 = load <8 x i8>* %C
+ %tmp8 = call <8 x i8> @llvm.arm.neon.vtbx4(<8 x i8> %tmp1, <8 x i8> %tmp3, <8 x i8> %tmp4, <8 x i8> %tmp5, <8 x i8> %tmp6, <8 x i8> %tmp7)
+ ret <8 x i8> %tmp8
+}
+
+declare <8 x i8> @llvm.arm.neon.vtbl1(<8 x i8>, <8 x i8>) nounwind readnone
+declare <8 x i8> @llvm.arm.neon.vtbl2(<8 x i8>, <8 x i8>, <8 x i8>) nounwind readnone
+declare <8 x i8> @llvm.arm.neon.vtbl3(<8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>) nounwind readnone
+declare <8 x i8> @llvm.arm.neon.vtbl4(<8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>) nounwind readnone
+
+declare <8 x i8> @llvm.arm.neon.vtbx1(<8 x i8>, <8 x i8>, <8 x i8>) nounwind readnone
+declare <8 x i8> @llvm.arm.neon.vtbx2(<8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>) nounwind readnone
+declare <8 x i8> @llvm.arm.neon.vtbx3(<8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>) nounwind readnone
+declare <8 x i8> @llvm.arm.neon.vtbx4(<8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>) nounwind readnone