summaryrefslogtreecommitdiff
path: root/test/CodeGen/AArch64/neon-scalar-cvt.ll
diff options
context:
space:
mode:
authorChad Rosier <mcrosier@codeaurora.org>2013-11-15 21:28:10 +0000
committerChad Rosier <mcrosier@codeaurora.org>2013-11-15 21:28:10 +0000
commit5758c3c832daf4c0b37042684f822fa1896966ac (patch)
treefdb5661257eb8180b9590a4561baff057f2c6d3e /test/CodeGen/AArch64/neon-scalar-cvt.ll
parent8a631b2cbe2f8621eb3679a4898205da577453b7 (diff)
downloadllvm-5758c3c832daf4c0b37042684f822fa1896966ac.tar.gz
llvm-5758c3c832daf4c0b37042684f822fa1896966ac.tar.bz2
llvm-5758c3c832daf4c0b37042684f822fa1896966ac.tar.xz
[AArch64] Fix the scalar NEON ACLE functions so that they return float/double
rather than the vector equivalent. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@194853 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test/CodeGen/AArch64/neon-scalar-cvt.ll')
-rw-r--r--test/CodeGen/AArch64/neon-scalar-cvt.ll40
1 files changed, 16 insertions, 24 deletions
diff --git a/test/CodeGen/AArch64/neon-scalar-cvt.ll b/test/CodeGen/AArch64/neon-scalar-cvt.ll
index a7f0ac0965..2fe25b8c19 100644
--- a/test/CodeGen/AArch64/neon-scalar-cvt.ll
+++ b/test/CodeGen/AArch64/neon-scalar-cvt.ll
@@ -5,96 +5,88 @@ define float @test_vcvts_f32_s32(i32 %a) {
; CHECK: scvtf {{s[0-9]+}}, {{s[0-9]+}}
entry:
%vcvtf.i = insertelement <1 x i32> undef, i32 %a, i32 0
- %vcvtf1.i = call <1 x float> @llvm.aarch64.neon.vcvtf32.s32(<1 x i32> %vcvtf.i)
- %0 = extractelement <1 x float> %vcvtf1.i, i32 0
+ %0 = call float @llvm.aarch64.neon.vcvtf32.s32(<1 x i32> %vcvtf.i)
ret float %0
}
-declare <1 x float> @llvm.aarch64.neon.vcvtf32.s32(<1 x i32>)
+declare float @llvm.aarch64.neon.vcvtf32.s32(<1 x i32>)
define double @test_vcvtd_f64_s64(i64 %a) {
; CHECK: test_vcvtd_f64_s64
; CHECK: scvtf {{d[0-9]+}}, {{d[0-9]+}}
entry:
%vcvtf.i = insertelement <1 x i64> undef, i64 %a, i32 0
- %vcvtf1.i = call <1 x double> @llvm.aarch64.neon.vcvtf64.s64(<1 x i64> %vcvtf.i)
- %0 = extractelement <1 x double> %vcvtf1.i, i32 0
+ %0 = call double @llvm.aarch64.neon.vcvtf64.s64(<1 x i64> %vcvtf.i)
ret double %0
}
-declare <1 x double> @llvm.aarch64.neon.vcvtf64.s64(<1 x i64>)
+declare double @llvm.aarch64.neon.vcvtf64.s64(<1 x i64>)
define float @test_vcvts_f32_u32(i32 %a) {
; CHECK: test_vcvts_f32_u32
; CHECK: ucvtf {{s[0-9]+}}, {{s[0-9]+}}
entry:
%vcvtf.i = insertelement <1 x i32> undef, i32 %a, i32 0
- %vcvtf1.i = call <1 x float> @llvm.aarch64.neon.vcvtf32.u32(<1 x i32> %vcvtf.i)
- %0 = extractelement <1 x float> %vcvtf1.i, i32 0
+ %0 = call float @llvm.aarch64.neon.vcvtf32.u32(<1 x i32> %vcvtf.i)
ret float %0
}
-declare <1 x float> @llvm.aarch64.neon.vcvtf32.u32(<1 x i32>)
+declare float @llvm.aarch64.neon.vcvtf32.u32(<1 x i32>)
define double @test_vcvtd_f64_u64(i64 %a) {
; CHECK: test_vcvtd_f64_u64
; CHECK: ucvtf {{d[0-9]+}}, {{d[0-9]+}}
entry:
%vcvtf.i = insertelement <1 x i64> undef, i64 %a, i32 0
- %vcvtf1.i = call <1 x double> @llvm.aarch64.neon.vcvtf64.u64(<1 x i64> %vcvtf.i)
- %0 = extractelement <1 x double> %vcvtf1.i, i32 0
+ %0 = call double @llvm.aarch64.neon.vcvtf64.u64(<1 x i64> %vcvtf.i)
ret double %0
}
-declare <1 x double> @llvm.aarch64.neon.vcvtf64.u64(<1 x i64>)
+declare double @llvm.aarch64.neon.vcvtf64.u64(<1 x i64>)
define float @test_vcvts_n_f32_s32(i32 %a) {
; CHECK: test_vcvts_n_f32_s32
; CHECK: scvtf {{s[0-9]+}}, {{s[0-9]+}}, #1
entry:
%vcvtf = insertelement <1 x i32> undef, i32 %a, i32 0
- %vcvtf1 = call <1 x float> @llvm.aarch64.neon.vcvtf32.n.s32(<1 x i32> %vcvtf, i32 1)
- %0 = extractelement <1 x float> %vcvtf1, i32 0
+ %0 = call float @llvm.aarch64.neon.vcvtf32.n.s32(<1 x i32> %vcvtf, i32 1)
ret float %0
}
-declare <1 x float> @llvm.aarch64.neon.vcvtf32.n.s32(<1 x i32>, i32)
+declare float @llvm.aarch64.neon.vcvtf32.n.s32(<1 x i32>, i32)
define double @test_vcvtd_n_f64_s64(i64 %a) {
; CHECK: test_vcvtd_n_f64_s64
; CHECK: scvtf {{d[0-9]+}}, {{d[0-9]+}}, #1
entry:
%vcvtf = insertelement <1 x i64> undef, i64 %a, i32 0
- %vcvtf1 = call <1 x double> @llvm.aarch64.neon.vcvtf64.n.s64(<1 x i64> %vcvtf, i32 1)
- %0 = extractelement <1 x double> %vcvtf1, i32 0
+ %0 = call double @llvm.aarch64.neon.vcvtf64.n.s64(<1 x i64> %vcvtf, i32 1)
ret double %0
}
-declare <1 x double> @llvm.aarch64.neon.vcvtf64.n.s64(<1 x i64>, i32)
+declare double @llvm.aarch64.neon.vcvtf64.n.s64(<1 x i64>, i32)
define float @test_vcvts_n_f32_u32(i32 %a) {
; CHECK: test_vcvts_n_f32_u32
; CHECK: ucvtf {{s[0-9]+}}, {{s[0-9]+}}, #1
entry:
%vcvtf = insertelement <1 x i32> undef, i32 %a, i32 0
- %vcvtf1 = call <1 x float> @llvm.aarch64.neon.vcvtf32.n.u32(<1 x i32> %vcvtf, i32 1)
- %0 = extractelement <1 x float> %vcvtf1, i32 0
+ %0 = call float @llvm.aarch64.neon.vcvtf32.n.u32(<1 x i32> %vcvtf, i32 1)
ret float %0
}
-declare <1 x float> @llvm.aarch64.neon.vcvtf32.n.u32(<1 x i32>, i32)
+declare float @llvm.aarch64.neon.vcvtf32.n.u32(<1 x i32>, i32)
define double @test_vcvtd_n_f64_u64(i64 %a) {
; CHECK: test_vcvtd_n_f64_u64
; CHECK: ucvtf {{d[0-9]+}}, {{d[0-9]+}}, #1
entry:
%vcvtf = insertelement <1 x i64> undef, i64 %a, i32 0
- %vcvtf1 = call <1 x double> @llvm.aarch64.neon.vcvtf64.n.u64(<1 x i64> %vcvtf, i32 1)
- %0 = extractelement <1 x double> %vcvtf1, i32 0
+ %0 = call double @llvm.aarch64.neon.vcvtf64.n.u64(<1 x i64> %vcvtf, i32 1)
ret double %0
}
-declare <1 x double> @llvm.aarch64.neon.vcvtf64.n.u64(<1 x i64>, i32)
+declare double @llvm.aarch64.neon.vcvtf64.n.u64(<1 x i64>, i32)
define i32 @test_vcvts_n_s32_f32(float %a) {
; CHECK: test_vcvts_n_s32_f32