summaryrefslogtreecommitdiff
path: root/test/CodeGen/AArch64
diff options
context:
space:
mode:
authorChad Rosier <mcrosier@codeaurora.org>2013-12-09 22:47:34 +0000
committerChad Rosier <mcrosier@codeaurora.org>2013-12-09 22:47:34 +0000
commit97eda18693aeecacd996441c61b3842a228c1f1c (patch)
treeb43971b3bf415d6efcaccd7884f729420a137617 /test/CodeGen/AArch64
parent6c6344e6a95130b61ba8491b5db262e8a171f30b (diff)
downloadllvm-97eda18693aeecacd996441c61b3842a228c1f1c.tar.gz
llvm-97eda18693aeecacd996441c61b3842a228c1f1c.tar.bz2
llvm-97eda18693aeecacd996441c61b3842a228c1f1c.tar.xz
[AArch64] Refactor NEON scalar reduce pairwise front-end codegen to remove
unnecessary patterns in tablegen. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@196832 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test/CodeGen/AArch64')
-rw-r--r--test/CodeGen/AArch64/neon-scalar-reduce-pairwise.ll44
1 files changed, 12 insertions, 32 deletions
diff --git a/test/CodeGen/AArch64/neon-scalar-reduce-pairwise.ll b/test/CodeGen/AArch64/neon-scalar-reduce-pairwise.ll
index 401ceec255..3da90365d6 100644
--- a/test/CodeGen/AArch64/neon-scalar-reduce-pairwise.ll
+++ b/test/CodeGen/AArch64/neon-scalar-reduce-pairwise.ll
@@ -103,7 +103,7 @@ define <1 x double> @test_fminnmp_v1f64(<2 x double> %a) {
define float @test_vaddv_f32(<2 x float> %a) {
; CHECK-LABEL: test_vaddv_f32
; CHECK: faddp {{s[0-9]+}}, {{v[0-9]+}}.2s
- %1 = tail call <1 x float> @llvm.aarch64.neon.vaddv.v1f32.v2f32(<2 x float> %a)
+ %1 = tail call <1 x float> @llvm.aarch64.neon.vpfadd.v1f32.v2f32(<2 x float> %a)
%2 = extractelement <1 x float> %1, i32 0
ret float %2
}
@@ -112,7 +112,7 @@ define float @test_vaddvq_f32(<4 x float> %a) {
; CHECK-LABEL: test_vaddvq_f32
; CHECK: faddp {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
; CHECK: faddp {{s[0-9]+}}, {{v[0-9]+}}.2s
- %1 = tail call <1 x float> @llvm.aarch64.neon.vaddv.v1f32.v4f32(<4 x float> %a)
+ %1 = tail call <1 x float> @llvm.aarch64.neon.vpfadd.v1f32.v4f32(<4 x float> %a)
%2 = extractelement <1 x float> %1, i32 0
ret float %2
}
@@ -120,7 +120,7 @@ define float @test_vaddvq_f32(<4 x float> %a) {
define double @test_vaddvq_f64(<2 x double> %a) {
; CHECK-LABEL: test_vaddvq_f64
; CHECK: faddp {{d[0-9]+}}, {{v[0-9]+}}.2d
- %1 = tail call <1 x double> @llvm.aarch64.neon.vaddv.v1f64.v2f64(<2 x double> %a)
+ %1 = tail call <1 x double> @llvm.aarch64.neon.vpfadd.v1f64.v2f64(<2 x double> %a)
%2 = extractelement <1 x double> %1, i32 0
ret double %2
}
@@ -128,7 +128,7 @@ define double @test_vaddvq_f64(<2 x double> %a) {
define float @test_vmaxv_f32(<2 x float> %a) {
; CHECK-LABEL: test_vmaxv_f32
; CHECK: fmaxp {{s[0-9]+}}, {{v[0-9]+}}.2s
- %1 = tail call <1 x float> @llvm.aarch64.neon.vmaxv.v1f32.v2f32(<2 x float> %a)
+ %1 = tail call <1 x float> @llvm.aarch64.neon.vpmax.v1f32.v2f32(<2 x float> %a)
%2 = extractelement <1 x float> %1, i32 0
ret float %2
}
@@ -136,7 +136,7 @@ define float @test_vmaxv_f32(<2 x float> %a) {
define double @test_vmaxvq_f64(<2 x double> %a) {
; CHECK-LABEL: test_vmaxvq_f64
; CHECK: fmaxp {{d[0-9]+}}, {{v[0-9]+}}.2d
- %1 = tail call <1 x double> @llvm.aarch64.neon.vmaxv.v1f64.v2f64(<2 x double> %a)
+ %1 = tail call <1 x double> @llvm.aarch64.neon.vpmax.v1f64.v2f64(<2 x double> %a)
%2 = extractelement <1 x double> %1, i32 0
ret double %2
}
@@ -144,7 +144,7 @@ define double @test_vmaxvq_f64(<2 x double> %a) {
define float @test_vminv_f32(<2 x float> %a) {
; CHECK-LABEL: test_vminv_f32
; CHECK: fminp {{s[0-9]+}}, {{v[0-9]+}}.2s
- %1 = tail call <1 x float> @llvm.aarch64.neon.vminv.v1f32.v2f32(<2 x float> %a)
+ %1 = tail call <1 x float> @llvm.aarch64.neon.vpmin.v1f32.v2f32(<2 x float> %a)
%2 = extractelement <1 x float> %1, i32 0
ret float %2
}
@@ -152,7 +152,7 @@ define float @test_vminv_f32(<2 x float> %a) {
define double @test_vminvq_f64(<2 x double> %a) {
; CHECK-LABEL: test_vminvq_f64
; CHECK: fminp {{d[0-9]+}}, {{v[0-9]+}}.2d
- %1 = tail call <1 x double> @llvm.aarch64.neon.vminv.v1f64.v2f64(<2 x double> %a)
+ %1 = tail call <1 x double> @llvm.aarch64.neon.vpmin.v1f64.v2f64(<2 x double> %a)
%2 = extractelement <1 x double> %1, i32 0
ret double %2
}
@@ -160,7 +160,7 @@ define double @test_vminvq_f64(<2 x double> %a) {
define double @test_vmaxnmvq_f64(<2 x double> %a) {
; CHECK-LABEL: test_vmaxnmvq_f64
; CHECK: fmaxnmp {{d[0-9]+}}, {{v[0-9]+}}.2d
- %1 = tail call <1 x double> @llvm.aarch64.neon.vmaxnmv.v1f64.v2f64(<2 x double> %a)
+ %1 = tail call <1 x double> @llvm.aarch64.neon.vpfmaxnm.v1f64.v2f64(<2 x double> %a)
%2 = extractelement <1 x double> %1, i32 0
ret double %2
}
@@ -168,7 +168,7 @@ define double @test_vmaxnmvq_f64(<2 x double> %a) {
define float @test_vmaxnmv_f32(<2 x float> %a) {
; CHECK-LABEL: test_vmaxnmv_f32
; CHECK: fmaxnmp {{s[0-9]+}}, {{v[0-9]+}}.2s
- %1 = tail call <1 x float> @llvm.aarch64.neon.vmaxnmv.v1f32.v2f32(<2 x float> %a)
+ %1 = tail call <1 x float> @llvm.aarch64.neon.vpfmaxnm.v1f32.v2f32(<2 x float> %a)
%2 = extractelement <1 x float> %1, i32 0
ret float %2
}
@@ -176,7 +176,7 @@ define float @test_vmaxnmv_f32(<2 x float> %a) {
define double @test_vminnmvq_f64(<2 x double> %a) {
; CHECK-LABEL: test_vminnmvq_f64
; CHECK: fminnmp {{d[0-9]+}}, {{v[0-9]+}}.2d
- %1 = tail call <1 x double> @llvm.aarch64.neon.vminnmv.v1f64.v2f64(<2 x double> %a)
+ %1 = tail call <1 x double> @llvm.aarch64.neon.vpfminnm.v1f64.v2f64(<2 x double> %a)
%2 = extractelement <1 x double> %1, i32 0
ret double %2
}
@@ -184,7 +184,7 @@ define double @test_vminnmvq_f64(<2 x double> %a) {
define float @test_vminnmv_f32(<2 x float> %a) {
; CHECK-LABEL: test_vminnmv_f32
; CHECK: fminnmp {{s[0-9]+}}, {{v[0-9]+}}.2s
- %1 = tail call <1 x float> @llvm.aarch64.neon.vminnmv.v1f32.v2f32(<2 x float> %a)
+ %1 = tail call <1 x float> @llvm.aarch64.neon.vpfminnm.v1f32.v2f32(<2 x float> %a)
%2 = extractelement <1 x float> %1, i32 0
ret float %2
}
@@ -223,24 +223,4 @@ declare <1 x i64> @llvm.aarch64.neon.vaddv.v1i64.v2i64(<2 x i64>)
declare <2 x i64> @llvm.arm.neon.vpadd.v2i64(<2 x i64>, <2 x i64>)
-declare <1 x float> @llvm.aarch64.neon.vminnmv.v1f32.v2f32(<2 x float>)
-
-declare <1 x double> @llvm.aarch64.neon.vminnmv.v1f64.v2f64(<2 x double>)
-
-declare <1 x float> @llvm.aarch64.neon.vmaxnmv.v1f32.v2f32(<2 x float>)
-
-declare <1 x double> @llvm.aarch64.neon.vmaxnmv.v1f64.v2f64(<2 x double>)
-
-declare <1 x double> @llvm.aarch64.neon.vminv.v1f64.v2f64(<2 x double>)
-
-declare <1 x float> @llvm.aarch64.neon.vminv.v1f32.v2f32(<2 x float>)
-
-declare <1 x double> @llvm.aarch64.neon.vmaxv.v1f64.v2f64(<2 x double>)
-
-declare <1 x float> @llvm.aarch64.neon.vmaxv.v1f32.v2f32(<2 x float>)
-
-declare <1 x double> @llvm.aarch64.neon.vaddv.v1f64.v2f64(<2 x double>)
-
-declare <1 x float> @llvm.aarch64.neon.vaddv.v1f32.v4f32(<4 x float>)
-
-declare <1 x float> @llvm.aarch64.neon.vaddv.v1f32.v2f32(<2 x float>) \ No newline at end of file
+declare <1 x float> @llvm.aarch64.neon.vpfadd.v1f32.v4f32(<4 x float>)