summaryrefslogtreecommitdiff
path: root/test/CodeGen/AArch64
diff options
context:
space:
mode:
Diffstat (limited to 'test/CodeGen/AArch64')
-rw-r--r--test/CodeGen/AArch64/neon-scalar-cvt.ll48
1 files changed, 48 insertions, 0 deletions
diff --git a/test/CodeGen/AArch64/neon-scalar-cvt.ll b/test/CodeGen/AArch64/neon-scalar-cvt.ll
index 0d9fdf3c97..056504a67e 100644
--- a/test/CodeGen/AArch64/neon-scalar-cvt.ll
+++ b/test/CodeGen/AArch64/neon-scalar-cvt.ll
@@ -47,3 +47,51 @@ entry:
}
declare <1 x double> @llvm.aarch64.neon.vcvtf64.u64(<1 x i64>)
+
+define float @test_vcvts_n_f32_s32(i32 %a) {
+; CHECK: test_vcvts_n_f32_s32
+; CHECK: scvtf {{s[0-9]+}}, {{s[0-9]+}}, #1
+entry:
+ %vcvtf = insertelement <1 x i32> undef, i32 %a, i32 0
+ %vcvtf1 = tail call <1 x float> @llvm.aarch64.neon.vcvtf32.n.s32(<1 x i32> %vcvtf, i32 1)
+ %0 = extractelement <1 x float> %vcvtf1, i32 0
+ ret float %0
+}
+
+declare <1 x float> @llvm.aarch64.neon.vcvtf32.n.s32(<1 x i32>, i32)
+
+define double @test_vcvtd_n_f64_s64(i64 %a) {
+; CHECK: test_vcvtd_n_f64_s64
+; CHECK: scvtf {{d[0-9]+}}, {{d[0-9]+}}, #1
+entry:
+ %vcvtf = insertelement <1 x i64> undef, i64 %a, i32 0
+ %vcvtf1 = tail call <1 x double> @llvm.aarch64.neon.vcvtf64.n.s64(<1 x i64> %vcvtf, i32 1)
+ %0 = extractelement <1 x double> %vcvtf1, i32 0
+ ret double %0
+}
+
+declare <1 x double> @llvm.aarch64.neon.vcvtf64.n.s64(<1 x i64>, i32)
+
+define float @test_vcvts_n_f32_u32(i32 %a) {
+; CHECK: test_vcvts_n_f32_u32
+; CHECK: ucvtf {{s[0-9]+}}, {{s[0-9]+}}, #1
+entry:
+ %vcvtf = insertelement <1 x i32> undef, i32 %a, i32 0
+ %vcvtf1 = tail call <1 x float> @llvm.aarch64.neon.vcvtf32.n.u32(<1 x i32> %vcvtf, i32 1)
+ %0 = extractelement <1 x float> %vcvtf1, i32 0
+ ret float %0
+}
+
+declare <1 x float> @llvm.aarch64.neon.vcvtf32.n.u32(<1 x i32>, i32)
+
+define double @test_vcvtd_n_f64_u64(i64 %a) {
+; CHECK: test_vcvtd_n_f64_u64
+; CHECK: ucvtf {{d[0-9]+}}, {{d[0-9]+}}, #1
+entry:
+ %vcvtf = insertelement <1 x i64> undef, i64 %a, i32 0
+ %vcvtf1 = tail call <1 x double> @llvm.aarch64.neon.vcvtf64.n.u64(<1 x i64> %vcvtf, i32 1)
+ %0 = extractelement <1 x double> %vcvtf1, i32 0
+ ret double %0
+}
+
+declare <1 x double> @llvm.aarch64.neon.vcvtf64.n.u64(<1 x i64>, i32)