summaryrefslogtreecommitdiff
path: root/test/CodeGen
diff options
context:
space:
mode:
authorChad Rosier <mcrosier@codeaurora.org>2013-10-14 14:37:20 +0000
committerChad Rosier <mcrosier@codeaurora.org>2013-10-14 14:37:20 +0000
commit942827b1139c432239648ef54d1df5074eac36ec (patch)
treeb7b5fa3a806a4e0fff855feb7367a28a394f19ff /test/CodeGen
parent0d1e2aebe641fc26bba5d895bbcadcac6f23aaec (diff)
downloadllvm-942827b1139c432239648ef54d1df5074eac36ec.tar.gz
llvm-942827b1139c432239648ef54d1df5074eac36ec.tar.bz2
llvm-942827b1139c432239648ef54d1df5074eac36ec.tar.xz
[AArch64] Add support for NEON scalar integer compare instructions.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@192596 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test/CodeGen')
-rw-r--r--test/CodeGen/AArch64/neon-scalar-compare.ll128
1 files changed, 128 insertions, 0 deletions
diff --git a/test/CodeGen/AArch64/neon-scalar-compare.ll b/test/CodeGen/AArch64/neon-scalar-compare.ll
new file mode 100644
index 0000000000..831c10bf4b
--- /dev/null
+++ b/test/CodeGen/AArch64/neon-scalar-compare.ll
@@ -0,0 +1,128 @@
+; RUN: llc -mtriple=aarch64-none-linux-gnu -mattr=+neon < %s | FileCheck %s
+
+;; Scalar Integer Compare
+
+define i64 @test_vceqd(i64 %a, i64 %b) {
+; CHECK: test_vceqd
+; CHECK: cmeq {{d[0-9]+}}, {{d[0-9]}}, {{d[0-9]}}
+entry:
+ %vceq.i = insertelement <1 x i64> undef, i64 %a, i32 0
+ %vceq1.i = insertelement <1 x i64> undef, i64 %b, i32 0
+ %vceq2.i = call <1 x i64> @llvm.aarch64.neon.vceq(<1 x i64> %vceq.i, <1 x i64> %vceq1.i)
+ %0 = extractelement <1 x i64> %vceq2.i, i32 0
+ ret i64 %0
+}
+
+define i64 @test_vceqzd(i64 %a) {
+; CHECK: test_vceqzd
+; CHECK: cmeq {{d[0-9]}}, {{d[0-9]}}, #0x0
+entry:
+ %vceqz.i = insertelement <1 x i64> undef, i64 %a, i32 0
+ %vceqz1.i = call <1 x i64> @llvm.aarch64.neon.vceq(<1 x i64> %vceqz.i, <1 x i64> zeroinitializer)
+ %0 = extractelement <1 x i64> %vceqz1.i, i32 0
+ ret i64 %0
+}
+
+define i64 @test_vcged(i64 %a, i64 %b) {
+; CHECK: test_vcged
+; CHECK: cmge {{d[0-9]}}, {{d[0-9]}}, {{d[0-9]}}
+entry:
+ %vcge.i = insertelement <1 x i64> undef, i64 %a, i32 0
+ %vcge1.i = insertelement <1 x i64> undef, i64 %b, i32 0
+ %vcge2.i = call <1 x i64> @llvm.aarch64.neon.vcge(<1 x i64> %vcge.i, <1 x i64> %vcge1.i)
+ %0 = extractelement <1 x i64> %vcge2.i, i32 0
+ ret i64 %0
+}
+
+define i64 @test_vcgezd(i64 %a) {
+; CHECK: test_vcgezd
+; CHECK: cmge {{d[0-9]}}, {{d[0-9]}}, #0x0
+entry:
+ %vcgez.i = insertelement <1 x i64> undef, i64 %a, i32 0
+ %vcgez1.i = call <1 x i64> @llvm.aarch64.neon.vcge(<1 x i64> %vcgez.i, <1 x i64> zeroinitializer)
+ %0 = extractelement <1 x i64> %vcgez1.i, i32 0
+ ret i64 %0
+}
+
+define i64 @test_vcgtd(i64 %a, i64 %b) {
+; CHECK: test_vcgtd
+; CHECK: cmgt {{d[0-9]}}, {{d[0-9]}}, {{d[0-9]}}
+entry:
+ %vcgt.i = insertelement <1 x i64> undef, i64 %a, i32 0
+ %vcgt1.i = insertelement <1 x i64> undef, i64 %b, i32 0
+ %vcgt2.i = call <1 x i64> @llvm.aarch64.neon.vcgt(<1 x i64> %vcgt.i, <1 x i64> %vcgt1.i)
+ %0 = extractelement <1 x i64> %vcgt2.i, i32 0
+ ret i64 %0
+}
+
+define i64 @test_vcgtzd(i64 %a) {
+; CHECK: test_vcgtzd
+; CHECK: cmgt {{d[0-9]}}, {{d[0-9]}}, #0x0
+entry:
+ %vcgtz.i = insertelement <1 x i64> undef, i64 %a, i32 0
+ %vcgtz1.i = call <1 x i64> @llvm.aarch64.neon.vcgt(<1 x i64> %vcgtz.i, <1 x i64> zeroinitializer)
+ %0 = extractelement <1 x i64> %vcgtz1.i, i32 0
+ ret i64 %0
+}
+
+define i64 @test_vcled(i64 %a, i64 %b) {
+; CHECK: test_vcled
+; CHECK: cmgt {{d[0-9]}}, {{d[0-9]}}, {{d[0-9]}}
+entry:
+ %vcgt.i = insertelement <1 x i64> undef, i64 %b, i32 0
+ %vcgt1.i = insertelement <1 x i64> undef, i64 %a, i32 0
+ %vcgt2.i = call <1 x i64> @llvm.aarch64.neon.vcgt(<1 x i64> %vcgt.i, <1 x i64> %vcgt1.i)
+ %0 = extractelement <1 x i64> %vcgt2.i, i32 0
+ ret i64 %0
+}
+
+define i64 @test_vclezd(i64 %a) {
+; CHECK: test_vclezd
+; CHECK: cmle {{d[0-9]}}, {{d[0-9]}}, #0x0
+entry:
+ %vclez.i = insertelement <1 x i64> undef, i64 %a, i32 0
+ %vclez1.i = call <1 x i64> @llvm.aarch64.neon.vclez(<1 x i64> %vclez.i, <1 x i64> zeroinitializer)
+ %0 = extractelement <1 x i64> %vclez1.i, i32 0
+ ret i64 %0
+}
+
+define i64 @test_vcltd(i64 %a, i64 %b) {
+; CHECK: test_vcltd
+; CHECK: cmge {{d[0-9]}}, {{d[0-9]}}, {{d[0-9]}}
+entry:
+ %vcge.i = insertelement <1 x i64> undef, i64 %b, i32 0
+ %vcge1.i = insertelement <1 x i64> undef, i64 %a, i32 0
+ %vcge2.i = call <1 x i64> @llvm.aarch64.neon.vcge(<1 x i64> %vcge.i, <1 x i64> %vcge1.i)
+ %0 = extractelement <1 x i64> %vcge2.i, i32 0
+ ret i64 %0
+}
+
+define i64 @test_vcltzd(i64 %a) {
+; CHECK: test_vcltzd
+; CHECK: cmlt {{d[0-9]}}, {{d[0-9]}}, #0x0
+entry:
+ %vcltz.i = insertelement <1 x i64> undef, i64 %a, i32 0
+ %vcltz1.i = call <1 x i64> @llvm.aarch64.neon.vcltz(<1 x i64> %vcltz.i, <1 x i64> zeroinitializer)
+ %0 = extractelement <1 x i64> %vcltz1.i, i32 0
+ ret i64 %0
+}
+
+define i64 @test_vtstd(i64 %a, i64 %b) {
+; CHECK: test_vtstd
+; CHECK: cmtst {{d[0-9]}}, {{d[0-9]}}, {{d[0-9]}}
+entry:
+ %vtst.i = insertelement <1 x i64> undef, i64 %a, i32 0
+ %vtst1.i = insertelement <1 x i64> undef, i64 %b, i32 0
+ %vtst2.i = call <1 x i64> @llvm.aarch64.neon.vtstd(<1 x i64> %vtst.i, <1 x i64> %vtst1.i)
+ %0 = extractelement <1 x i64> %vtst2.i, i32 0
+ ret i64 %0
+}
+
+declare <1 x i64> @llvm.aarch64.neon.vtstd(<1 x i64>, <1 x i64>)
+declare <1 x i64> @llvm.aarch64.neon.vcltz(<1 x i64>, <1 x i64>)
+declare <1 x i64> @llvm.aarch64.neon.vchs(<1 x i64>, <1 x i64>)
+declare <1 x i64> @llvm.aarch64.neon.vcge(<1 x i64>, <1 x i64>)
+declare <1 x i64> @llvm.aarch64.neon.vclez(<1 x i64>, <1 x i64>)
+declare <1 x i64> @llvm.aarch64.neon.vchi(<1 x i64>, <1 x i64>)
+declare <1 x i64> @llvm.aarch64.neon.vcgt(<1 x i64>, <1 x i64>)
+declare <1 x i64> @llvm.aarch64.neon.vceq(<1 x i64>, <1 x i64>)