From 5bafff36c798608a189c517d37527e4a38863071 Mon Sep 17 00:00:00 2001 From: Bob Wilson Date: Mon, 22 Jun 2009 23:27:02 +0000 Subject: Add support for ARM's Advanced SIMD (NEON) instruction set. This is still a work in progress but most of the NEON instruction set is supported. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@73919 91177308-0d34-0410-b5e6-96231b3b80d8 --- test/CodeGen/ARM/vicmp.ll | 85 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 85 insertions(+) create mode 100644 test/CodeGen/ARM/vicmp.ll (limited to 'test/CodeGen/ARM/vicmp.ll') diff --git a/test/CodeGen/ARM/vicmp.ll b/test/CodeGen/ARM/vicmp.ll new file mode 100644 index 0000000000..86858f9293 --- /dev/null +++ b/test/CodeGen/ARM/vicmp.ll @@ -0,0 +1,85 @@ +; RUN: llvm-as < %s | llc -march=arm -mattr=+neon > %t +; RUN: grep {vceq\\.i8} %t | count 2 +; RUN: grep {vceq\\.i16} %t | count 2 +; RUN: grep {vceq\\.i32} %t | count 2 +; RUN: grep vmvn %t | count 6 +; RUN: grep {vcgt\\.s8} %t | count 1 +; RUN: grep {vcge\\.s16} %t | count 1 +; RUN: grep {vcgt\\.u16} %t | count 1 +; RUN: grep {vcge\\.u32} %t | count 1 + +; This tests vicmp operations that do not map directly to NEON instructions. +; Not-equal (ne) operations are implemented by VCEQ/VMVN. Less-than (lt/ult) +; and less-than-or-equal (le/ule) are implemented by swapping the arguments +; to VCGT and VCGE. Test all the operand types for not-equal but only sample +; the other operations. + +define <8 x i8> @vcnei8(<8 x i8>* %A, <8 x i8>* %B) nounwind { + %tmp1 = load <8 x i8>* %A + %tmp2 = load <8 x i8>* %B + %tmp3 = vicmp ne <8 x i8> %tmp1, %tmp2 + ret <8 x i8> %tmp3 +} + +define <4 x i16> @vcnei16(<4 x i16>* %A, <4 x i16>* %B) nounwind { + %tmp1 = load <4 x i16>* %A + %tmp2 = load <4 x i16>* %B + %tmp3 = vicmp ne <4 x i16> %tmp1, %tmp2 + ret <4 x i16> %tmp3 +} + +define <2 x i32> @vcnei32(<2 x i32>* %A, <2 x i32>* %B) nounwind { + %tmp1 = load <2 x i32>* %A + %tmp2 = load <2 x i32>* %B + %tmp3 = vicmp ne <2 x i32> %tmp1, %tmp2 + ret <2 x i32> %tmp3 +} + +define <16 x i8> @vcneQi8(<16 x i8>* %A, <16 x i8>* %B) nounwind { + %tmp1 = load <16 x i8>* %A + %tmp2 = load <16 x i8>* %B + %tmp3 = vicmp ne <16 x i8> %tmp1, %tmp2 + ret <16 x i8> %tmp3 +} + +define <8 x i16> @vcneQi16(<8 x i16>* %A, <8 x i16>* %B) nounwind { + %tmp1 = load <8 x i16>* %A + %tmp2 = load <8 x i16>* %B + %tmp3 = vicmp ne <8 x i16> %tmp1, %tmp2 + ret <8 x i16> %tmp3 +} + +define <4 x i32> @vcneQi32(<4 x i32>* %A, <4 x i32>* %B) nounwind { + %tmp1 = load <4 x i32>* %A + %tmp2 = load <4 x i32>* %B + %tmp3 = vicmp ne <4 x i32> %tmp1, %tmp2 + ret <4 x i32> %tmp3 +} + +define <16 x i8> @vcltQs8(<16 x i8>* %A, <16 x i8>* %B) nounwind { + %tmp1 = load <16 x i8>* %A + %tmp2 = load <16 x i8>* %B + %tmp3 = vicmp slt <16 x i8> %tmp1, %tmp2 + ret <16 x i8> %tmp3 +} + +define <4 x i16> @vcles16(<4 x i16>* %A, <4 x i16>* %B) nounwind { + %tmp1 = load <4 x i16>* %A + %tmp2 = load <4 x i16>* %B + %tmp3 = vicmp sle <4 x i16> %tmp1, %tmp2 + ret <4 x i16> %tmp3 +} + +define <4 x i16> @vcltu16(<4 x i16>* %A, <4 x i16>* %B) nounwind { + %tmp1 = load <4 x i16>* %A + %tmp2 = load <4 x i16>* %B + %tmp3 = vicmp ult <4 x i16> %tmp1, %tmp2 + ret <4 x i16> %tmp3 +} + +define <4 x i32> @vcleQu32(<4 x i32>* %A, <4 x i32>* %B) nounwind { + %tmp1 = load <4 x i32>* %A + %tmp2 = load <4 x i32>* %B + %tmp3 = vicmp ule <4 x i32> %tmp1, %tmp2 + ret <4 x i32> %tmp3 +} -- cgit v1.2.3