summaryrefslogtreecommitdiff
path: root/test/CodeGen/AArch64
diff options
context:
space:
mode:
authorChad Rosier <mcrosier@codeaurora.org>2013-12-11 21:03:40 +0000
committerChad Rosier <mcrosier@codeaurora.org>2013-12-11 21:03:40 +0000
commitc3e5d72ba83f607d7e1409027f7593c689fc70d0 (patch)
tree2446844292ae908b9f82cc4f824ab359966b7317 /test/CodeGen/AArch64
parent7dc7cc49a58aeed45e208a9f64f0e47a4fcc6fd6 (diff)
downloadllvm-c3e5d72ba83f607d7e1409027f7593c689fc70d0.tar.gz
llvm-c3e5d72ba83f607d7e1409027f7593c689fc70d0.tar.bz2
llvm-c3e5d72ba83f607d7e1409027f7593c689fc70d0.tar.xz
[AArch64] Refactor the NEON scalar floating-point reciprocal estimate, floating-
point reciprocal exponent, and floating-point reciprocal square root estimate LLVM AArch64 intrinsics to use f32/f64 types, rather than their vector equivalents. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@197066 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test/CodeGen/AArch64')
-rw-r--r--test/CodeGen/AArch64/neon-scalar-recip.ll36
1 files changed, 12 insertions, 24 deletions
diff --git a/test/CodeGen/AArch64/neon-scalar-recip.ll b/test/CodeGen/AArch64/neon-scalar-recip.ll
index f21c27bee4..bd549a86a4 100644
--- a/test/CodeGen/AArch64/neon-scalar-recip.ll
+++ b/test/CodeGen/AArch64/neon-scalar-recip.ll
@@ -50,9 +50,7 @@ define float @test_vrecpes_f32(float %a) {
; CHECK: test_vrecpes_f32
; CHECK: frecpe {{s[0-9]+}}, {{s[0-9]+}}
entry:
- %vrecpe.i = insertelement <1 x float> undef, float %a, i32 0
- %vrecpe1.i = tail call <1 x float> @llvm.arm.neon.vrecpe.v1f32(<1 x float> %vrecpe.i)
- %0 = extractelement <1 x float> %vrecpe1.i, i32 0
+ %0 = call float @llvm.aarch64.neon.vrecpe.f32(float %a)
ret float %0
}
@@ -60,22 +58,18 @@ define double @test_vrecped_f64(double %a) {
; CHECK: test_vrecped_f64
; CHECK: frecpe {{d[0-9]+}}, {{d[0-9]+}}
entry:
- %vrecpe.i = insertelement <1 x double> undef, double %a, i32 0
- %vrecpe1.i = tail call <1 x double> @llvm.arm.neon.vrecpe.v1f64(<1 x double> %vrecpe.i)
- %0 = extractelement <1 x double> %vrecpe1.i, i32 0
+ %0 = call double @llvm.aarch64.neon.vrecpe.f64(double %a)
ret double %0
}
-declare <1 x float> @llvm.arm.neon.vrecpe.v1f32(<1 x float>)
-declare <1 x double> @llvm.arm.neon.vrecpe.v1f64(<1 x double>)
+declare float @llvm.aarch64.neon.vrecpe.f32(float)
+declare double @llvm.aarch64.neon.vrecpe.f64(double)
define float @test_vrecpxs_f32(float %a) {
; CHECK: test_vrecpxs_f32
; CHECK: frecpx {{s[0-9]+}}, {{s[0-9]+}}
entry:
- %vrecpx.i = insertelement <1 x float> undef, float %a, i32 0
- %vrecpx1.i = tail call <1 x float> @llvm.aarch64.neon.vrecpx.v1f32(<1 x float> %vrecpx.i)
- %0 = extractelement <1 x float> %vrecpx1.i, i32 0
+ %0 = call float @llvm.aarch64.neon.vrecpx.f32(float %a)
ret float %0
}
@@ -83,22 +77,18 @@ define double @test_vrecpxd_f64(double %a) {
; CHECK: test_vrecpxd_f64
; CHECK: frecpx {{d[0-9]+}}, {{d[0-9]+}}
entry:
- %vrecpx.i = insertelement <1 x double> undef, double %a, i32 0
- %vrecpx1.i = tail call <1 x double> @llvm.aarch64.neon.vrecpx.v1f64(<1 x double> %vrecpx.i)
- %0 = extractelement <1 x double> %vrecpx1.i, i32 0
+ %0 = call double @llvm.aarch64.neon.vrecpx.f64(double %a)
ret double %0
}
-declare <1 x float> @llvm.aarch64.neon.vrecpx.v1f32(<1 x float>)
-declare <1 x double> @llvm.aarch64.neon.vrecpx.v1f64(<1 x double>)
+declare float @llvm.aarch64.neon.vrecpx.f32(float)
+declare double @llvm.aarch64.neon.vrecpx.f64(double)
define float @test_vrsqrtes_f32(float %a) {
; CHECK: test_vrsqrtes_f32
; CHECK: frsqrte {{s[0-9]+}}, {{s[0-9]+}}
entry:
- %vrsqrte.i = insertelement <1 x float> undef, float %a, i32 0
- %vrsqrte1.i = tail call <1 x float> @llvm.arm.neon.vrsqrte.v1f32(<1 x float> %vrsqrte.i)
- %0 = extractelement <1 x float> %vrsqrte1.i, i32 0
+ %0 = call float @llvm.aarch64.neon.vrsqrte.f32(float %a)
ret float %0
}
@@ -106,11 +96,9 @@ define double @test_vrsqrted_f64(double %a) {
; CHECK: test_vrsqrted_f64
; CHECK: frsqrte {{d[0-9]+}}, {{d[0-9]+}}
entry:
- %vrsqrte.i = insertelement <1 x double> undef, double %a, i32 0
- %vrsqrte1.i = tail call <1 x double> @llvm.arm.neon.vrsqrte.v1f64(<1 x double> %vrsqrte.i)
- %0 = extractelement <1 x double> %vrsqrte1.i, i32 0
+ %0 = call double @llvm.aarch64.neon.vrsqrte.f64(double %a)
ret double %0
}
-declare <1 x float> @llvm.arm.neon.vrsqrte.v1f32(<1 x float>)
-declare <1 x double> @llvm.arm.neon.vrsqrte.v1f64(<1 x double>)
+declare float @llvm.aarch64.neon.vrsqrte.f32(float)
+declare double @llvm.aarch64.neon.vrsqrte.f64(double)