summaryrefslogtreecommitdiff
path: root/test/CodeGen/AArch64
diff options
context:
space:
mode:
authorChad Rosier <mcrosier@codeaurora.org>2013-12-11 21:03:46 +0000
committerChad Rosier <mcrosier@codeaurora.org>2013-12-11 21:03:46 +0000
commit87b627d88e14c32d1dd9909a89f00c612cb476a7 (patch)
treee09fbff08d7573616d9b768e5e60384ec0e51e21 /test/CodeGen/AArch64
parent73f468218fc56a0acf9cb9c38eb74b138e955ff5 (diff)
downloadllvm-87b627d88e14c32d1dd9909a89f00c612cb476a7.tar.gz
llvm-87b627d88e14c32d1dd9909a89f00c612cb476a7.tar.bz2
llvm-87b627d88e14c32d1dd9909a89f00c612cb476a7.tar.xz
[AArch64] Add NEON scalar floating-point compare LLVM AArch64 intrinsics that
use f32/f64 types, rather than their vector equivalents. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@197068 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test/CodeGen/AArch64')
-rw-r--r--test/CodeGen/AArch64/neon-scalar-fp-compare.ll252
1 files changed, 103 insertions, 149 deletions
diff --git a/test/CodeGen/AArch64/neon-scalar-fp-compare.ll b/test/CodeGen/AArch64/neon-scalar-fp-compare.ll
index a6e58599ac..99a9a3a5e0 100644
--- a/test/CodeGen/AArch64/neon-scalar-fp-compare.ll
+++ b/test/CodeGen/AArch64/neon-scalar-fp-compare.ll
@@ -3,29 +3,25 @@
;; Scalar Floating-point Compare
define i32 @test_vceqs_f32(float %a, float %b) {
-; CHECK: test_vceqs_f32
+; CHECK-LABEL: test_vceqs_f32
; CHECK: fcmeq {{s[0-9]}}, {{s[0-9]}}, {{s[0-9]}}
entry:
- %vceq.i = insertelement <1 x float> undef, float %a, i32 0
- %vceq1.i = insertelement <1 x float> undef, float %b, i32 0
- %vceq2.i = call <1 x i32> @llvm.aarch64.neon.vceq.v1i32.v1f32.v1f32(<1 x float> %vceq.i, <1 x float> %vceq1.i)
- %0 = extractelement <1 x i32> %vceq2.i, i32 0
+ %fceq2.i = call <1 x i32> @llvm.aarch64.neon.fceq.v1i32.f32.f32(float %a, float %b)
+ %0 = extractelement <1 x i32> %fceq2.i, i32 0
ret i32 %0
}
define i64 @test_vceqd_f64(double %a, double %b) {
-; CHECK: test_vceqd_f64
+; CHECK-LABEL: test_vceqd_f64
; CHECK: fcmeq {{d[0-9]}}, {{d[0-9]}}, {{d[0-9]}}
entry:
- %vceq.i = insertelement <1 x double> undef, double %a, i32 0
- %vceq1.i = insertelement <1 x double> undef, double %b, i32 0
- %vceq2.i = call <1 x i64> @llvm.aarch64.neon.vceq.v1i64.v1f64.v1f64(<1 x double> %vceq.i, <1 x double> %vceq1.i)
- %0 = extractelement <1 x i64> %vceq2.i, i32 0
+ %fceq2.i = call <1 x i64> @llvm.aarch64.neon.fceq.v1i64.f64.f64(double %a, double %b)
+ %0 = extractelement <1 x i64> %fceq2.i, i32 0
ret i64 %0
}
-define <1 x i64> @test_vceqz_f64(<1 x double> %a) #0 {
-; CHECK: test_vceqz_f64
+define <1 x i64> @test_vceqz_f64(<1 x double> %a) {
+; CHECK-LABEL: test_vceqz_f64
; CHECK: fcmeq {{d[0-9]+}}, {{d[0-9]+}}, #0.0
entry:
%0 = fcmp oeq <1 x double> %a, zeroinitializer
@@ -34,295 +30,253 @@ entry:
}
define i32 @test_vceqzs_f32(float %a) {
-; CHECK: test_vceqzs_f32
+; CHECK-LABEL: test_vceqzs_f32
; CHECK: fcmeq {{s[0-9]}}, {{s[0-9]}}, #0.0
entry:
- %vceq.i = insertelement <1 x float> undef, float %a, i32 0
- %vceq1.i = call <1 x i32> @llvm.aarch64.neon.vceq.v1i32.v1f32.v1f32(<1 x float> %vceq.i, <1 x float> zeroinitializer)
- %0 = extractelement <1 x i32> %vceq1.i, i32 0
+ %fceq1.i = call <1 x i32> @llvm.aarch64.neon.fceq.v1i32.f32.f32(float %a, float 0.0)
+ %0 = extractelement <1 x i32> %fceq1.i, i32 0
ret i32 %0
}
define i64 @test_vceqzd_f64(double %a) {
-; CHECK: test_vceqzd_f64
+; CHECK-LABEL: test_vceqzd_f64
; CHECK: fcmeq {{d[0-9]}}, {{d[0-9]}}, #0.0
entry:
- %vceq.i = insertelement <1 x double> undef, double %a, i32 0
- %vceq1.i = tail call <1 x i64> @llvm.aarch64.neon.vceq.v1i64.v1f64.v1f32(<1 x double> %vceq.i, <1 x float> zeroinitializer) #5
- %0 = extractelement <1 x i64> %vceq1.i, i32 0
+ %fceq1.i = call <1 x i64> @llvm.aarch64.neon.fceq.v1i64.f64.f32(double %a, float 0.0)
+ %0 = extractelement <1 x i64> %fceq1.i, i32 0
ret i64 %0
}
define i32 @test_vcges_f32(float %a, float %b) {
-; CHECK: test_vcges_f32
+; CHECK-LABEL: test_vcges_f32
; CHECK: fcmge {{s[0-9]}}, {{s[0-9]}}, {{s[0-9]}}
entry:
- %vcge.i = insertelement <1 x float> undef, float %a, i32 0
- %vcge1.i = insertelement <1 x float> undef, float %b, i32 0
- %vcge2.i = call <1 x i32> @llvm.aarch64.neon.vcge.v1i32.v1f32.v1f32(<1 x float> %vcge.i, <1 x float> %vcge1.i)
- %0 = extractelement <1 x i32> %vcge2.i, i32 0
+ %fcge2.i = call <1 x i32> @llvm.aarch64.neon.fcge.v1i32.f32.f32(float %a, float %b)
+ %0 = extractelement <1 x i32> %fcge2.i, i32 0
ret i32 %0
}
define i64 @test_vcged_f64(double %a, double %b) {
-; CHECK: test_vcged_f64
+; CHECK-LABEL: test_vcged_f64
; CHECK: fcmge {{d[0-9]}}, {{d[0-9]}}, {{d[0-9]}}
entry:
- %vcge.i = insertelement <1 x double> undef, double %a, i32 0
- %vcge1.i = insertelement <1 x double> undef, double %b, i32 0
- %vcge2.i = call <1 x i64> @llvm.aarch64.neon.vcge.v1i64.v1f64.v1f64(<1 x double> %vcge.i, <1 x double> %vcge1.i)
- %0 = extractelement <1 x i64> %vcge2.i, i32 0
+ %fcge2.i = call <1 x i64> @llvm.aarch64.neon.fcge.v1i64.f64.f64(double %a, double %b)
+ %0 = extractelement <1 x i64> %fcge2.i, i32 0
ret i64 %0
}
define i32 @test_vcgezs_f32(float %a) {
-; CHECK: test_vcgezs_f32
+; CHECK-LABEL: test_vcgezs_f32
; CHECK: fcmge {{s[0-9]}}, {{s[0-9]}}, #0.0
entry:
- %vcge.i = insertelement <1 x float> undef, float %a, i32 0
- %vcge1.i = call <1 x i32> @llvm.aarch64.neon.vcge.v1i32.v1f32.v1f32(<1 x float> %vcge.i, <1 x float> zeroinitializer)
- %0 = extractelement <1 x i32> %vcge1.i, i32 0
+ %fcge1.i = call <1 x i32> @llvm.aarch64.neon.fcge.v1i32.f32.f32(float %a, float 0.0)
+ %0 = extractelement <1 x i32> %fcge1.i, i32 0
ret i32 %0
}
define i64 @test_vcgezd_f64(double %a) {
-; CHECK: test_vcgezd_f64
+; CHECK-LABEL: test_vcgezd_f64
; CHECK: fcmge {{d[0-9]}}, {{d[0-9]}}, #0.0
entry:
- %vcge.i = insertelement <1 x double> undef, double %a, i32 0
- %vcge1.i = tail call <1 x i64> @llvm.aarch64.neon.vcge.v1i64.v1f64.v1f32(<1 x double> %vcge.i, <1 x float> zeroinitializer) #5
- %0 = extractelement <1 x i64> %vcge1.i, i32 0
+ %fcge1.i = call <1 x i64> @llvm.aarch64.neon.fcge.v1i64.f64.f32(double %a, float 0.0)
+ %0 = extractelement <1 x i64> %fcge1.i, i32 0
ret i64 %0
}
define i32 @test_vcgts_f32(float %a, float %b) {
-; CHECK: test_vcgts_f32
+; CHECK-LABEL: test_vcgts_f32
; CHECK: fcmgt {{s[0-9]}}, {{s[0-9]}}, {{s[0-9]}}
entry:
- %vcgt.i = insertelement <1 x float> undef, float %a, i32 0
- %vcgt1.i = insertelement <1 x float> undef, float %b, i32 0
- %vcgt2.i = call <1 x i32> @llvm.aarch64.neon.vcgt.v1i32.v1f32.v1f32(<1 x float> %vcgt.i, <1 x float> %vcgt1.i)
- %0 = extractelement <1 x i32> %vcgt2.i, i32 0
+ %fcgt2.i = call <1 x i32> @llvm.aarch64.neon.fcgt.v1i32.f32.f32(float %a, float %b)
+ %0 = extractelement <1 x i32> %fcgt2.i, i32 0
ret i32 %0
}
define i64 @test_vcgtd_f64(double %a, double %b) {
-; CHECK: test_vcgtd_f64
+; CHECK-LABEL: test_vcgtd_f64
; CHECK: fcmgt {{d[0-9]}}, {{d[0-9]}}, {{d[0-9]}}
entry:
- %vcgt.i = insertelement <1 x double> undef, double %a, i32 0
- %vcgt1.i = insertelement <1 x double> undef, double %b, i32 0
- %vcgt2.i = call <1 x i64> @llvm.aarch64.neon.vcgt.v1i64.v1f64.v1f64(<1 x double> %vcgt.i, <1 x double> %vcgt1.i)
- %0 = extractelement <1 x i64> %vcgt2.i, i32 0
+ %fcgt2.i = call <1 x i64> @llvm.aarch64.neon.fcgt.v1i64.f64.f64(double %a, double %b)
+ %0 = extractelement <1 x i64> %fcgt2.i, i32 0
ret i64 %0
}
define i32 @test_vcgtzs_f32(float %a) {
-; CHECK: test_vcgtzs_f32
+; CHECK-LABEL: test_vcgtzs_f32
; CHECK: fcmgt {{s[0-9]}}, {{s[0-9]}}, #0.0
entry:
- %vcgt.i = insertelement <1 x float> undef, float %a, i32 0
- %vcgt1.i = call <1 x i32> @llvm.aarch64.neon.vcgt.v1i32.v1f32.v1f32(<1 x float> %vcgt.i, <1 x float> zeroinitializer)
- %0 = extractelement <1 x i32> %vcgt1.i, i32 0
+ %fcgt1.i = call <1 x i32> @llvm.aarch64.neon.fcgt.v1i32.f32.f32(float %a, float 0.0)
+ %0 = extractelement <1 x i32> %fcgt1.i, i32 0
ret i32 %0
}
define i64 @test_vcgtzd_f64(double %a) {
-; CHECK: test_vcgtzd_f64
+; CHECK-LABEL: test_vcgtzd_f64
; CHECK: fcmgt {{d[0-9]}}, {{d[0-9]}}, #0.0
entry:
- %vcgt.i = insertelement <1 x double> undef, double %a, i32 0
- %vcgt1.i = tail call <1 x i64> @llvm.aarch64.neon.vcgt.v1i64.v1f64.v1f32(<1 x double> %vcgt.i, <1 x float> zeroinitializer) #5
- %0 = extractelement <1 x i64> %vcgt1.i, i32 0
+ %fcgt1.i = call <1 x i64> @llvm.aarch64.neon.fcgt.v1i64.f64.f32(double %a, float 0.0)
+ %0 = extractelement <1 x i64> %fcgt1.i, i32 0
ret i64 %0
}
define i32 @test_vcles_f32(float %a, float %b) {
-; CHECK: test_vcles_f32
+; CHECK-LABEL: test_vcles_f32
; CHECK: fcmge {{s[0-9]}}, {{s[0-9]}}, {{s[0-9]}}
entry:
- %vcge.i = insertelement <1 x float> undef, float %a, i32 0
- %vcge1.i = insertelement <1 x float> undef, float %b, i32 0
- %vcge2.i = call <1 x i32> @llvm.aarch64.neon.vcge.v1i32.v1f32.v1f32(<1 x float> %vcge.i, <1 x float> %vcge1.i)
- %0 = extractelement <1 x i32> %vcge2.i, i32 0
+ %fcge2.i = call <1 x i32> @llvm.aarch64.neon.fcge.v1i32.f32.f32(float %a, float %b)
+ %0 = extractelement <1 x i32> %fcge2.i, i32 0
ret i32 %0
}
define i64 @test_vcled_f64(double %a, double %b) {
-; CHECK: test_vcled_f64
+; CHECK-LABEL: test_vcled_f64
; CHECK: fcmge {{d[0-9]}}, {{d[0-9]}}, {{d[0-9]}}
entry:
- %vcge.i = insertelement <1 x double> undef, double %a, i32 0
- %vcge1.i = insertelement <1 x double> undef, double %b, i32 0
- %vcge2.i = call <1 x i64> @llvm.aarch64.neon.vcge.v1i64.v1f64.v1f64(<1 x double> %vcge.i, <1 x double> %vcge1.i)
- %0 = extractelement <1 x i64> %vcge2.i, i32 0
+ %fcge2.i = call <1 x i64> @llvm.aarch64.neon.fcge.v1i64.f64.f64(double %a, double %b)
+ %0 = extractelement <1 x i64> %fcge2.i, i32 0
ret i64 %0
}
define i32 @test_vclezs_f32(float %a) {
-; CHECK: test_vclezs_f32
+; CHECK-LABEL: test_vclezs_f32
; CHECK: fcmle {{s[0-9]}}, {{s[0-9]}}, #0.0
entry:
- %vcle.i = insertelement <1 x float> undef, float %a, i32 0
- %vcle1.i = call <1 x i32> @llvm.aarch64.neon.vclez.v1i32.v1f32.v1f32(<1 x float> %vcle.i, <1 x float> zeroinitializer)
- %0 = extractelement <1 x i32> %vcle1.i, i32 0
+ %fcle1.i = call <1 x i32> @llvm.aarch64.neon.fclez.v1i32.f32.f32(float %a, float 0.0)
+ %0 = extractelement <1 x i32> %fcle1.i, i32 0
ret i32 %0
}
define i64 @test_vclezd_f64(double %a) {
-; CHECK: test_vclezd_f64
+; CHECK-LABEL: test_vclezd_f64
; CHECK: fcmle {{d[0-9]}}, {{d[0-9]}}, #0.0
entry:
- %vcle.i = insertelement <1 x double> undef, double %a, i32 0
- %vcle1.i = tail call <1 x i64> @llvm.aarch64.neon.vclez.v1i64.v1f64.v1f32(<1 x double> %vcle.i, <1 x float> zeroinitializer) #5
- %0 = extractelement <1 x i64> %vcle1.i, i32 0
+ %fcle1.i = call <1 x i64> @llvm.aarch64.neon.fclez.v1i64.f64.f32(double %a, float 0.0)
+ %0 = extractelement <1 x i64> %fcle1.i, i32 0
ret i64 %0
}
define i32 @test_vclts_f32(float %a, float %b) {
-; CHECK: test_vclts_f32
+; CHECK-LABEL: test_vclts_f32
; CHECK: fcmgt {{s[0-9]}}, {{s[0-9]}}, {{s[0-9]}}
entry:
- %vcgt.i = insertelement <1 x float> undef, float %b, i32 0
- %vcgt1.i = insertelement <1 x float> undef, float %a, i32 0
- %vcgt2.i = call <1 x i32> @llvm.aarch64.neon.vcgt.v1i32.v1f32.v1f32(<1 x float> %vcgt.i, <1 x float> %vcgt1.i)
- %0 = extractelement <1 x i32> %vcgt2.i, i32 0
+ %fcgt2.i = call <1 x i32> @llvm.aarch64.neon.fcgt.v1i32.f32.f32(float %a, float %b)
+ %0 = extractelement <1 x i32> %fcgt2.i, i32 0
ret i32 %0
}
define i64 @test_vcltd_f64(double %a, double %b) {
-; CHECK: test_vcltd_f64
+; CHECK-LABEL: test_vcltd_f64
; CHECK: fcmgt {{d[0-9]}}, {{d[0-9]}}, {{d[0-9]}}
entry:
- %vcgt.i = insertelement <1 x double> undef, double %b, i32 0
- %vcgt1.i = insertelement <1 x double> undef, double %a, i32 0
- %vcgt2.i = call <1 x i64> @llvm.aarch64.neon.vcgt.v1i64.v1f64.v1f64(<1 x double> %vcgt.i, <1 x double> %vcgt1.i)
- %0 = extractelement <1 x i64> %vcgt2.i, i32 0
+ %fcgt2.i = call <1 x i64> @llvm.aarch64.neon.fcgt.v1i64.f64.f64(double %a, double %b)
+ %0 = extractelement <1 x i64> %fcgt2.i, i32 0
ret i64 %0
}
define i32 @test_vcltzs_f32(float %a) {
-; CHECK: test_vcltzs_f32
+; CHECK-LABEL: test_vcltzs_f32
; CHECK: fcmlt {{s[0-9]}}, {{s[0-9]}}, #0.0
entry:
- %vclt.i = insertelement <1 x float> undef, float %a, i32 0
- %vclt1.i = call <1 x i32> @llvm.aarch64.neon.vcltz.v1i32.v1f32.v1f32(<1 x float> %vclt.i, <1 x float> zeroinitializer)
- %0 = extractelement <1 x i32> %vclt1.i, i32 0
+ %fclt1.i = call <1 x i32> @llvm.aarch64.neon.fcltz.v1i32.f32.f32(float %a, float 0.0)
+ %0 = extractelement <1 x i32> %fclt1.i, i32 0
ret i32 %0
}
define i64 @test_vcltzd_f64(double %a) {
-; CHECK: test_vcltzd_f64
+; CHECK-LABEL: test_vcltzd_f64
; CHECK: fcmlt {{d[0-9]}}, {{d[0-9]}}, #0.0
entry:
- %vclt.i = insertelement <1 x double> undef, double %a, i32 0
- %vclt1.i = tail call <1 x i64> @llvm.aarch64.neon.vcltz.v1i64.v1f64.v1f32(<1 x double> %vclt.i, <1 x float> zeroinitializer) #5
- %0 = extractelement <1 x i64> %vclt1.i, i32 0
+ %fclt1.i = call <1 x i64> @llvm.aarch64.neon.fcltz.v1i64.f64.f32(double %a, float 0.0)
+ %0 = extractelement <1 x i64> %fclt1.i, i32 0
ret i64 %0
}
define i32 @test_vcages_f32(float %a, float %b) {
-; CHECK: test_vcages_f32
+; CHECK-LABEL: test_vcages_f32
; CHECK: facge {{s[0-9]}}, {{s[0-9]}}, {{s[0-9]}}
entry:
- %vcage.i = insertelement <1 x float> undef, float %a, i32 0
- %vcage1.i = insertelement <1 x float> undef, float %b, i32 0
- %vcage2.i = call <1 x i32> @llvm.aarch64.neon.vcage.v1i32.v1f32.v1f32(<1 x float> %vcage.i, <1 x float> %vcage1.i)
- %0 = extractelement <1 x i32> %vcage2.i, i32 0
+ %fcage2.i = call <1 x i32> @llvm.aarch64.neon.fcage.v1i32.f32.f32(float %a, float %b)
+ %0 = extractelement <1 x i32> %fcage2.i, i32 0
ret i32 %0
}
define i64 @test_vcaged_f64(double %a, double %b) {
-; CHECK: test_vcaged_f64
+; CHECK-LABEL: test_vcaged_f64
; CHECK: facge {{d[0-9]}}, {{d[0-9]}}, {{d[0-9]}}
entry:
- %vcage.i = insertelement <1 x double> undef, double %a, i32 0
- %vcage1.i = insertelement <1 x double> undef, double %b, i32 0
- %vcage2.i = call <1 x i64> @llvm.aarch64.neon.vcage.v1i64.v1f64.v1f64(<1 x double> %vcage.i, <1 x double> %vcage1.i)
- %0 = extractelement <1 x i64> %vcage2.i, i32 0
+ %fcage2.i = call <1 x i64> @llvm.aarch64.neon.fcage.v1i64.f64.f64(double %a, double %b)
+ %0 = extractelement <1 x i64> %fcage2.i, i32 0
ret i64 %0
}
define i32 @test_vcagts_f32(float %a, float %b) {
-; CHECK: test_vcagts_f32
+; CHECK-LABEL: test_vcagts_f32
; CHECK: facgt {{s[0-9]}}, {{s[0-9]}}, {{s[0-9]}}
entry:
- %vcagt.i = insertelement <1 x float> undef, float %a, i32 0
- %vcagt1.i = insertelement <1 x float> undef, float %b, i32 0
- %vcagt2.i = call <1 x i32> @llvm.aarch64.neon.vcagt.v1i32.v1f32.v1f32(<1 x float> %vcagt.i, <1 x float> %vcagt1.i)
- %0 = extractelement <1 x i32> %vcagt2.i, i32 0
+ %fcagt2.i = call <1 x i32> @llvm.aarch64.neon.fcagt.v1i32.f32.f32(float %a, float %b)
+ %0 = extractelement <1 x i32> %fcagt2.i, i32 0
ret i32 %0
}
define i64 @test_vcagtd_f64(double %a, double %b) {
-; CHECK: test_vcagtd_f64
+; CHECK-LABEL: test_vcagtd_f64
; CHECK: facgt {{d[0-9]}}, {{d[0-9]}}, {{d[0-9]}}
entry:
- %vcagt.i = insertelement <1 x double> undef, double %a, i32 0
- %vcagt1.i = insertelement <1 x double> undef, double %b, i32 0
- %vcagt2.i = call <1 x i64> @llvm.aarch64.neon.vcagt.v1i64.v1f64.v1f64(<1 x double> %vcagt.i, <1 x double> %vcagt1.i)
- %0 = extractelement <1 x i64> %vcagt2.i, i32 0
+ %fcagt2.i = call <1 x i64> @llvm.aarch64.neon.fcagt.v1i64.f64.f64(double %a, double %b)
+ %0 = extractelement <1 x i64> %fcagt2.i, i32 0
ret i64 %0
}
define i32 @test_vcales_f32(float %a, float %b) {
-; CHECK: test_vcales_f32
+; CHECK-LABEL: test_vcales_f32
; CHECK: facge {{s[0-9]}}, {{s[0-9]}}, {{s[0-9]}}
entry:
- %vcage.i = insertelement <1 x float> undef, float %b, i32 0
- %vcage1.i = insertelement <1 x float> undef, float %a, i32 0
- %vcage2.i = call <1 x i32> @llvm.aarch64.neon.vcage.v1i32.v1f32.v1f32(<1 x float> %vcage.i, <1 x float> %vcage1.i)
- %0 = extractelement <1 x i32> %vcage2.i, i32 0
+ %fcage2.i = call <1 x i32> @llvm.aarch64.neon.fcage.v1i32.f32.f32(float %a, float %b)
+ %0 = extractelement <1 x i32> %fcage2.i, i32 0
ret i32 %0
}
define i64 @test_vcaled_f64(double %a, double %b) {
-; CHECK: test_vcaled_f64
+; CHECK-LABEL: test_vcaled_f64
; CHECK: facge {{d[0-9]}}, {{d[0-9]}}, {{d[0-9]}}
entry:
- %vcage.i = insertelement <1 x double> undef, double %b, i32 0
- %vcage1.i = insertelement <1 x double> undef, double %a, i32 0
- %vcage2.i = call <1 x i64> @llvm.aarch64.neon.vcage.v1i64.v1f64.v1f64(<1 x double> %vcage.i, <1 x double> %vcage1.i)
- %0 = extractelement <1 x i64> %vcage2.i, i32 0
+ %fcage2.i = call <1 x i64> @llvm.aarch64.neon.fcage.v1i64.f64.f64(double %a, double %b)
+ %0 = extractelement <1 x i64> %fcage2.i, i32 0
ret i64 %0
}
define i32 @test_vcalts_f32(float %a, float %b) {
-; CHECK: test_vcalts_f32
+; CHECK-LABEL: test_vcalts_f32
; CHECK: facgt {{s[0-9]}}, {{s[0-9]}}, {{s[0-9]}}
entry:
- %vcalt.i = insertelement <1 x float> undef, float %b, i32 0
- %vcalt1.i = insertelement <1 x float> undef, float %a, i32 0
- %vcalt2.i = call <1 x i32> @llvm.aarch64.neon.vcagt.v1i32.v1f32.v1f32(<1 x float> %vcalt.i, <1 x float> %vcalt1.i)
- %0 = extractelement <1 x i32> %vcalt2.i, i32 0
+ %fcalt2.i = call <1 x i32> @llvm.aarch64.neon.fcagt.v1i32.f32.f32(float %a, float %b)
+ %0 = extractelement <1 x i32> %fcalt2.i, i32 0
ret i32 %0
}
define i64 @test_vcaltd_f64(double %a, double %b) {
-; CHECK: test_vcaltd_f64
+; CHECK-LABEL: test_vcaltd_f64
; CHECK: facgt {{d[0-9]}}, {{d[0-9]}}, {{d[0-9]}}
entry:
- %vcalt.i = insertelement <1 x double> undef, double %b, i32 0
- %vcalt1.i = insertelement <1 x double> undef, double %a, i32 0
- %vcalt2.i = call <1 x i64> @llvm.aarch64.neon.vcagt.v1i64.v1f64.v1f64(<1 x double> %vcalt.i, <1 x double> %vcalt1.i)
- %0 = extractelement <1 x i64> %vcalt2.i, i32 0
+ %fcalt2.i = call <1 x i64> @llvm.aarch64.neon.fcagt.v1i64.f64.f64(double %a, double %b)
+ %0 = extractelement <1 x i64> %fcalt2.i, i32 0
ret i64 %0
}
-declare <1 x i32> @llvm.aarch64.neon.vceq.v1i32.v1f32.v1f32(<1 x float>, <1 x float>)
-declare <1 x i64> @llvm.aarch64.neon.vceq.v1i64.v1f64.v1f32(<1 x double>, <1 x float>)
-declare <1 x i64> @llvm.aarch64.neon.vceq.v1i64.v1f64.v1f64(<1 x double>, <1 x double>)
-declare <1 x i32> @llvm.aarch64.neon.vcge.v1i32.v1f32.v1f32(<1 x float>, <1 x float>)
-declare <1 x i64> @llvm.aarch64.neon.vcge.v1i64.v1f64.v1f32(<1 x double>, <1 x float>)
-declare <1 x i64> @llvm.aarch64.neon.vcge.v1i64.v1f64.v1f64(<1 x double>, <1 x double>)
-declare <1 x i32> @llvm.aarch64.neon.vclez.v1i32.v1f32.v1f32(<1 x float>, <1 x float>)
-declare <1 x i64> @llvm.aarch64.neon.vclez.v1i64.v1f64.v1f32(<1 x double>, <1 x float>)
-declare <1 x i32> @llvm.aarch64.neon.vcgt.v1i32.v1f32.v1f32(<1 x float>, <1 x float>)
-declare <1 x i64> @llvm.aarch64.neon.vcgt.v1i64.v1f64.v1f32(<1 x double>, <1 x float>)
-declare <1 x i64> @llvm.aarch64.neon.vcgt.v1i64.v1f64.v1f64(<1 x double>, <1 x double>)
-declare <1 x i32> @llvm.aarch64.neon.vcltz.v1i32.v1f32.v1f32(<1 x float>, <1 x float>)
-declare <1 x i64> @llvm.aarch64.neon.vcltz.v1i64.v1f64.v1f32(<1 x double>, <1 x float>)
-declare <1 x i32> @llvm.aarch64.neon.vcage.v1i32.v1f32.v1f32(<1 x float>, <1 x float>)
-declare <1 x i64> @llvm.aarch64.neon.vcage.v1i64.v1f64.v1f64(<1 x double>, <1 x double>)
-declare <1 x i32> @llvm.aarch64.neon.vcagt.v1i32.v1f32.v1f32(<1 x float>, <1 x float>)
-declare <1 x i64> @llvm.aarch64.neon.vcagt.v1i64.v1f64.v1f64(<1 x double>, <1 x double>)
+declare <1 x i32> @llvm.aarch64.neon.fceq.v1i32.f32.f32(float, float)
+declare <1 x i64> @llvm.aarch64.neon.fceq.v1i64.f64.f32(double, float)
+declare <1 x i64> @llvm.aarch64.neon.fceq.v1i64.f64.f64(double, double)
+declare <1 x i32> @llvm.aarch64.neon.fcge.v1i32.f32.f32(float, float)
+declare <1 x i64> @llvm.aarch64.neon.fcge.v1i64.f64.f32(double, float)
+declare <1 x i64> @llvm.aarch64.neon.fcge.v1i64.f64.f64(double, double)
+declare <1 x i32> @llvm.aarch64.neon.fclez.v1i32.f32.f32(float, float)
+declare <1 x i64> @llvm.aarch64.neon.fclez.v1i64.f64.f32(double, float)
+declare <1 x i32> @llvm.aarch64.neon.fcgt.v1i32.f32.f32(float, float)
+declare <1 x i64> @llvm.aarch64.neon.fcgt.v1i64.f64.f32(double, float)
+declare <1 x i64> @llvm.aarch64.neon.fcgt.v1i64.f64.f64(double, double)
+declare <1 x i32> @llvm.aarch64.neon.fcltz.v1i32.f32.f32(float, float)
+declare <1 x i64> @llvm.aarch64.neon.fcltz.v1i64.f64.f32(double, float)
+declare <1 x i32> @llvm.aarch64.neon.fcage.v1i32.f32.f32(float, float)
+declare <1 x i64> @llvm.aarch64.neon.fcage.v1i64.f64.f64(double, double)
+declare <1 x i32> @llvm.aarch64.neon.fcagt.v1i32.f32.f32(float, float)
+declare <1 x i64> @llvm.aarch64.neon.fcagt.v1i64.f64.f64(double, double)