summaryrefslogtreecommitdiff
path: root/test/CodeGen
diff options
context:
space:
mode:
authorChad Rosier <mcrosier@codeaurora.org>2013-12-09 22:47:31 +0000
committerChad Rosier <mcrosier@codeaurora.org>2013-12-09 22:47:31 +0000
commit6c6344e6a95130b61ba8491b5db262e8a171f30b (patch)
tree83b1ddfdaf1f132c6b68346bfebd1a0d5e8d0525 /test/CodeGen
parent89d8470356c66cd964829b21892255f440a3e833 (diff)
downloadllvm-6c6344e6a95130b61ba8491b5db262e8a171f30b.tar.gz
llvm-6c6344e6a95130b61ba8491b5db262e8a171f30b.tar.bz2
llvm-6c6344e6a95130b61ba8491b5db262e8a171f30b.tar.xz
[AArch64] Remove q and non-q intrinsic definitions in the NEON scalar reduce
pairwise implementation, using an overloaded definition instead. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@196831 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test/CodeGen')
-rw-r--r--test/CodeGen/AArch64/neon-scalar-reduce-pairwise.ll87
1 files changed, 43 insertions, 44 deletions
diff --git a/test/CodeGen/AArch64/neon-scalar-reduce-pairwise.ll b/test/CodeGen/AArch64/neon-scalar-reduce-pairwise.ll
index 80e8dc339d..401ceec255 100644
--- a/test/CodeGen/AArch64/neon-scalar-reduce-pairwise.ll
+++ b/test/CodeGen/AArch64/neon-scalar-reduce-pairwise.ll
@@ -4,101 +4,100 @@ declare <1 x i64> @llvm.aarch64.neon.vpadd(<2 x i64>)
define <1 x i64> @test_addp_v1i64(<2 x i64> %a) {
; CHECK: test_addp_v1i64:
- %val = call <1 x i64> @llvm.aarch64.neon.vpadd(<2 x i64> %a)
-; CHECK: addp d0, v0.2d
- ret <1 x i64> %val
+; CHECK: addp {{d[0-9]+}}, {{v[0-9]+}}.2d
+ %val = call <1 x i64> @llvm.aarch64.neon.vpadd(<2 x i64> %a)
+ ret <1 x i64> %val
}
-declare <1 x float> @llvm.aarch64.neon.vpfadd(<2 x float>)
+declare <1 x float> @llvm.aarch64.neon.vpfadd.v1f32.v2f32(<2 x float>)
define <1 x float> @test_faddp_v1f32(<2 x float> %a) {
; CHECK: test_faddp_v1f32:
- %val = call <1 x float> @llvm.aarch64.neon.vpfadd(<2 x float> %a)
-; CHECK: faddp s0, v0.2s
- ret <1 x float> %val
+; CHECK: faddp {{s[0-9]+}}, {{v[0-9]+}}.2s
+ %val = call <1 x float> @llvm.aarch64.neon.vpfadd.v1f32.v2f32(<2 x float> %a)
+ ret <1 x float> %val
}
-declare <1 x double> @llvm.aarch64.neon.vpfaddq(<2 x double>)
+declare <1 x double> @llvm.aarch64.neon.vpfadd.v1f64.v2f64(<2 x double>)
define <1 x double> @test_faddp_v1f64(<2 x double> %a) {
; CHECK: test_faddp_v1f64:
- %val = call <1 x double> @llvm.aarch64.neon.vpfaddq(<2 x double> %a)
-; CHECK: faddp d0, v0.2d
- ret <1 x double> %val
+; CHECK: faddp {{d[0-9]+}}, {{v[0-9]+}}.2d
+ %val = call <1 x double> @llvm.aarch64.neon.vpfadd.v1f64.v2f64(<2 x double> %a)
+ ret <1 x double> %val
}
-declare <1 x float> @llvm.aarch64.neon.vpmax(<2 x float>)
+declare <1 x float> @llvm.aarch64.neon.vpmax.v1f32.v2f32(<2 x float>)
define <1 x float> @test_fmaxp_v1f32(<2 x float> %a) {
; CHECK: test_fmaxp_v1f32:
- %val = call <1 x float> @llvm.aarch64.neon.vpmax(<2 x float> %a)
-; CHECK: fmaxp s0, v0.2s
- ret <1 x float> %val
+; CHECK: fmaxp {{s[0-9]+}}, {{v[0-9]+}}.2s
+ %val = call <1 x float> @llvm.aarch64.neon.vpmax.v1f32.v2f32(<2 x float> %a)
+ ret <1 x float> %val
}
-declare <1 x double> @llvm.aarch64.neon.vpmaxq(<2 x double>)
+declare <1 x double> @llvm.aarch64.neon.vpmax.v1f64.v2f64(<2 x double>)
define <1 x double> @test_fmaxp_v1f64(<2 x double> %a) {
; CHECK: test_fmaxp_v1f64:
- %val = call <1 x double> @llvm.aarch64.neon.vpmaxq(<2 x double> %a)
-; CHECK: fmaxp d0, v0.2d
- ret <1 x double> %val
+; CHECK: fmaxp {{d[0-9]+}}, {{v[0-9]+}}.2d
+ %val = call <1 x double> @llvm.aarch64.neon.vpmax.v1f64.v2f64(<2 x double> %a)
+ ret <1 x double> %val
}
-
-declare <1 x float> @llvm.aarch64.neon.vpmin(<2 x float>)
+declare <1 x float> @llvm.aarch64.neon.vpmin.v1f32.v2f32(<2 x float>)
define <1 x float> @test_fminp_v1f32(<2 x float> %a) {
; CHECK: test_fminp_v1f32:
- %val = call <1 x float> @llvm.aarch64.neon.vpmin(<2 x float> %a)
-; CHECK: fminp s0, v0.2s
- ret <1 x float> %val
+; CHECK: fminp {{s[0-9]+}}, {{v[0-9]+}}.2s
+ %val = call <1 x float> @llvm.aarch64.neon.vpmin.v1f32.v2f32(<2 x float> %a)
+ ret <1 x float> %val
}
-declare <1 x double> @llvm.aarch64.neon.vpminq(<2 x double>)
+declare <1 x double> @llvm.aarch64.neon.vpmin.v1f64.v2f64(<2 x double>)
define <1 x double> @test_fminp_v1f64(<2 x double> %a) {
; CHECK: test_fminp_v1f64:
- %val = call <1 x double> @llvm.aarch64.neon.vpminq(<2 x double> %a)
-; CHECK: fminp d0, v0.2d
- ret <1 x double> %val
+; CHECK: fminp {{d[0-9]+}}, {{v[0-9]+}}.2d
+ %val = call <1 x double> @llvm.aarch64.neon.vpmin.v1f64.v2f64(<2 x double> %a)
+ ret <1 x double> %val
}
-declare <1 x float> @llvm.aarch64.neon.vpfmaxnm(<2 x float>)
+declare <1 x float> @llvm.aarch64.neon.vpfmaxnm.v1f32.v2f32(<2 x float>)
define <1 x float> @test_fmaxnmp_v1f32(<2 x float> %a) {
; CHECK: test_fmaxnmp_v1f32:
- %val = call <1 x float> @llvm.aarch64.neon.vpfmaxnm(<2 x float> %a)
-; CHECK: fmaxnmp s0, v0.2s
- ret <1 x float> %val
+; CHECK: fmaxnmp {{s[0-9]+}}, {{v[0-9]+}}.2s
+ %val = call <1 x float> @llvm.aarch64.neon.vpfmaxnm.v1f32.v2f32(<2 x float> %a)
+ ret <1 x float> %val
}
-declare <1 x double> @llvm.aarch64.neon.vpfmaxnmq(<2 x double>)
+declare <1 x double> @llvm.aarch64.neon.vpfmaxnm.v1f64.v2f64(<2 x double>)
define <1 x double> @test_fmaxnmp_v1f64(<2 x double> %a) {
; CHECK: test_fmaxnmp_v1f64:
- %val = call <1 x double> @llvm.aarch64.neon.vpfmaxnmq(<2 x double> %a)
-; CHECK: fmaxnmp d0, v0.2d
- ret <1 x double> %val
+; CHECK: fmaxnmp {{d[0-9]+}}, {{v[0-9]+}}.2d
+ %val = call <1 x double> @llvm.aarch64.neon.vpfmaxnm.v1f64.v2f64(<2 x double> %a)
+ ret <1 x double> %val
}
-declare <1 x float> @llvm.aarch64.neon.vpfminnm(<2 x float>)
+declare <1 x float> @llvm.aarch64.neon.vpfminnm.v1f32.v2f32(<2 x float>)
define <1 x float> @test_fminnmp_v1f32(<2 x float> %a) {
; CHECK: test_fminnmp_v1f32:
- %val = call <1 x float> @llvm.aarch64.neon.vpfminnm(<2 x float> %a)
-; CHECK: fminnmp s0, v0.2s
- ret <1 x float> %val
+; CHECK: fminnmp {{s[0-9]+}}, {{v[0-9]+}}.2s
+ %val = call <1 x float> @llvm.aarch64.neon.vpfminnm.v1f32.v2f32(<2 x float> %a)
+ ret <1 x float> %val
}
-declare <1 x double> @llvm.aarch64.neon.vpfminnmq(<2 x double>)
+declare <1 x double> @llvm.aarch64.neon.vpfminnm.v1f64.v2f64(<2 x double>)
define <1 x double> @test_fminnmp_v1f64(<2 x double> %a) {
; CHECK: test_fminnmp_v1f64:
- %val = call <1 x double> @llvm.aarch64.neon.vpfminnmq(<2 x double> %a)
-; CHECK: fminnmp d0, v0.2d
- ret <1 x double> %val
+; CHECK: fminnmp {{d[0-9]+}}, {{v[0-9]+}}.2d
+ %val = call <1 x double> @llvm.aarch64.neon.vpfminnm.v1f64.v2f64(<2 x double> %a)
+ ret <1 x double> %val
}
define float @test_vaddv_f32(<2 x float> %a) {