summaryrefslogtreecommitdiff
path: root/test/CodeGen/ARM/vpadal.ll
diff options
context:
space:
mode:
authorBob Wilson <bob.wilson@apple.com>2009-06-22 23:27:02 +0000
committerBob Wilson <bob.wilson@apple.com>2009-06-22 23:27:02 +0000
commit5bafff36c798608a189c517d37527e4a38863071 (patch)
tree79bd2abbc5253e6f00db07023cf7d829cbcdee5a /test/CodeGen/ARM/vpadal.ll
parent5de83afcdc3f4f0edf8caacba523f5d05ee48048 (diff)
downloadllvm-5bafff36c798608a189c517d37527e4a38863071.tar.gz
llvm-5bafff36c798608a189c517d37527e4a38863071.tar.bz2
llvm-5bafff36c798608a189c517d37527e4a38863071.tar.xz
Add support for ARM's Advanced SIMD (NEON) instruction set.
This is still a work in progress but most of the NEON instruction set is supported. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@73919 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test/CodeGen/ARM/vpadal.ll')
-rw-r--r--test/CodeGen/ARM/vpadal.ll107
1 files changed, 107 insertions, 0 deletions
diff --git a/test/CodeGen/ARM/vpadal.ll b/test/CodeGen/ARM/vpadal.ll
new file mode 100644
index 0000000000..c41c532988
--- /dev/null
+++ b/test/CodeGen/ARM/vpadal.ll
@@ -0,0 +1,107 @@
+; RUN: llvm-as < %s | llc -march=arm -mattr=+neon > %t
+; RUN: grep {vpadal\\.s8} %t | count 2
+; RUN: grep {vpadal\\.s16} %t | count 2
+; RUN: grep {vpadal\\.s32} %t | count 2
+; RUN: grep {vpadal\\.u8} %t | count 2
+; RUN: grep {vpadal\\.u16} %t | count 2
+; RUN: grep {vpadal\\.u32} %t | count 2
+
+define <4 x i16> @vpadals8(<4 x i16>* %A, <8 x i8>* %B) nounwind {
+ %tmp1 = load <4 x i16>* %A
+ %tmp2 = load <8 x i8>* %B
+ %tmp3 = call <4 x i16> @llvm.arm.neon.vpadals.v4i16.v8i8(<4 x i16> %tmp1, <8 x i8> %tmp2)
+ ret <4 x i16> %tmp3
+}
+
+define <2 x i32> @vpadals16(<2 x i32>* %A, <4 x i16>* %B) nounwind {
+ %tmp1 = load <2 x i32>* %A
+ %tmp2 = load <4 x i16>* %B
+ %tmp3 = call <2 x i32> @llvm.arm.neon.vpadals.v2i32.v4i16(<2 x i32> %tmp1, <4 x i16> %tmp2)
+ ret <2 x i32> %tmp3
+}
+
+define <1 x i64> @vpadals32(<1 x i64>* %A, <2 x i32>* %B) nounwind {
+ %tmp1 = load <1 x i64>* %A
+ %tmp2 = load <2 x i32>* %B
+ %tmp3 = call <1 x i64> @llvm.arm.neon.vpadals.v1i64.v2i32(<1 x i64> %tmp1, <2 x i32> %tmp2)
+ ret <1 x i64> %tmp3
+}
+
+define <4 x i16> @vpadalu8(<4 x i16>* %A, <8 x i8>* %B) nounwind {
+ %tmp1 = load <4 x i16>* %A
+ %tmp2 = load <8 x i8>* %B
+ %tmp3 = call <4 x i16> @llvm.arm.neon.vpadalu.v4i16.v8i8(<4 x i16> %tmp1, <8 x i8> %tmp2)
+ ret <4 x i16> %tmp3
+}
+
+define <2 x i32> @vpadalu16(<2 x i32>* %A, <4 x i16>* %B) nounwind {
+ %tmp1 = load <2 x i32>* %A
+ %tmp2 = load <4 x i16>* %B
+ %tmp3 = call <2 x i32> @llvm.arm.neon.vpadalu.v2i32.v4i16(<2 x i32> %tmp1, <4 x i16> %tmp2)
+ ret <2 x i32> %tmp3
+}
+
+define <1 x i64> @vpadalu32(<1 x i64>* %A, <2 x i32>* %B) nounwind {
+ %tmp1 = load <1 x i64>* %A
+ %tmp2 = load <2 x i32>* %B
+ %tmp3 = call <1 x i64> @llvm.arm.neon.vpadalu.v1i64.v2i32(<1 x i64> %tmp1, <2 x i32> %tmp2)
+ ret <1 x i64> %tmp3
+}
+
+define <8 x i16> @vpadalQs8(<8 x i16>* %A, <16 x i8>* %B) nounwind {
+ %tmp1 = load <8 x i16>* %A
+ %tmp2 = load <16 x i8>* %B
+ %tmp3 = call <8 x i16> @llvm.arm.neon.vpadals.v8i16.v16i8(<8 x i16> %tmp1, <16 x i8> %tmp2)
+ ret <8 x i16> %tmp3
+}
+
+define <4 x i32> @vpadalQs16(<4 x i32>* %A, <8 x i16>* %B) nounwind {
+ %tmp1 = load <4 x i32>* %A
+ %tmp2 = load <8 x i16>* %B
+ %tmp3 = call <4 x i32> @llvm.arm.neon.vpadals.v4i32.v8i16(<4 x i32> %tmp1, <8 x i16> %tmp2)
+ ret <4 x i32> %tmp3
+}
+
+define <2 x i64> @vpadalQs32(<2 x i64>* %A, <4 x i32>* %B) nounwind {
+ %tmp1 = load <2 x i64>* %A
+ %tmp2 = load <4 x i32>* %B
+ %tmp3 = call <2 x i64> @llvm.arm.neon.vpadals.v2i64.v4i32(<2 x i64> %tmp1, <4 x i32> %tmp2)
+ ret <2 x i64> %tmp3
+}
+
+define <8 x i16> @vpadalQu8(<8 x i16>* %A, <16 x i8>* %B) nounwind {
+ %tmp1 = load <8 x i16>* %A
+ %tmp2 = load <16 x i8>* %B
+ %tmp3 = call <8 x i16> @llvm.arm.neon.vpadalu.v8i16.v16i8(<8 x i16> %tmp1, <16 x i8> %tmp2)
+ ret <8 x i16> %tmp3
+}
+
+define <4 x i32> @vpadalQu16(<4 x i32>* %A, <8 x i16>* %B) nounwind {
+ %tmp1 = load <4 x i32>* %A
+ %tmp2 = load <8 x i16>* %B
+ %tmp3 = call <4 x i32> @llvm.arm.neon.vpadalu.v4i32.v8i16(<4 x i32> %tmp1, <8 x i16> %tmp2)
+ ret <4 x i32> %tmp3
+}
+
+define <2 x i64> @vpadalQu32(<2 x i64>* %A, <4 x i32>* %B) nounwind {
+ %tmp1 = load <2 x i64>* %A
+ %tmp2 = load <4 x i32>* %B
+ %tmp3 = call <2 x i64> @llvm.arm.neon.vpadalu.v2i64.v4i32(<2 x i64> %tmp1, <4 x i32> %tmp2)
+ ret <2 x i64> %tmp3
+}
+
+declare <4 x i16> @llvm.arm.neon.vpadals.v4i16.v8i8(<4 x i16>, <8 x i8>) nounwind readnone
+declare <2 x i32> @llvm.arm.neon.vpadals.v2i32.v4i16(<2 x i32>, <4 x i16>) nounwind readnone
+declare <1 x i64> @llvm.arm.neon.vpadals.v1i64.v2i32(<1 x i64>, <2 x i32>) nounwind readnone
+
+declare <4 x i16> @llvm.arm.neon.vpadalu.v4i16.v8i8(<4 x i16>, <8 x i8>) nounwind readnone
+declare <2 x i32> @llvm.arm.neon.vpadalu.v2i32.v4i16(<2 x i32>, <4 x i16>) nounwind readnone
+declare <1 x i64> @llvm.arm.neon.vpadalu.v1i64.v2i32(<1 x i64>, <2 x i32>) nounwind readnone
+
+declare <8 x i16> @llvm.arm.neon.vpadals.v8i16.v16i8(<8 x i16>, <16 x i8>) nounwind readnone
+declare <4 x i32> @llvm.arm.neon.vpadals.v4i32.v8i16(<4 x i32>, <8 x i16>) nounwind readnone
+declare <2 x i64> @llvm.arm.neon.vpadals.v2i64.v4i32(<2 x i64>, <4 x i32>) nounwind readnone
+
+declare <8 x i16> @llvm.arm.neon.vpadalu.v8i16.v16i8(<8 x i16>, <16 x i8>) nounwind readnone
+declare <4 x i32> @llvm.arm.neon.vpadalu.v4i32.v8i16(<4 x i32>, <8 x i16>) nounwind readnone
+declare <2 x i64> @llvm.arm.neon.vpadalu.v2i64.v4i32(<2 x i64>, <4 x i32>) nounwind readnone