summaryrefslogtreecommitdiff
path: root/test/CodeGen
diff options
context:
space:
mode:
authorChad Rosier <mcrosier@codeaurora.org>2013-12-09 22:47:38 +0000
committerChad Rosier <mcrosier@codeaurora.org>2013-12-09 22:47:38 +0000
commite02fa056d91064c00182e8fed8c585f992e4100b (patch)
tree24236762bd550685579649dd4b3b3affdb06e4b7 /test/CodeGen
parent97eda18693aeecacd996441c61b3842a228c1f1c (diff)
downloadllvm-e02fa056d91064c00182e8fed8c585f992e4100b.tar.gz
llvm-e02fa056d91064c00182e8fed8c585f992e4100b.tar.bz2
llvm-e02fa056d91064c00182e8fed8c585f992e4100b.tar.xz
[AArch64] Refactor the NEON scalar reduce pairwise intrinsics, so that they use
float/double rather than the vector equivalents when appropriate. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@196833 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test/CodeGen')
-rw-r--r--test/CodeGen/AArch64/neon-scalar-reduce-pairwise.ll165
1 files changed, 77 insertions, 88 deletions
diff --git a/test/CodeGen/AArch64/neon-scalar-reduce-pairwise.ll b/test/CodeGen/AArch64/neon-scalar-reduce-pairwise.ll
index 3da90365d6..33ce5cf6ce 100644
--- a/test/CodeGen/AArch64/neon-scalar-reduce-pairwise.ll
+++ b/test/CodeGen/AArch64/neon-scalar-reduce-pairwise.ll
@@ -9,204 +9,193 @@ define <1 x i64> @test_addp_v1i64(<2 x i64> %a) {
ret <1 x i64> %val
}
-declare <1 x float> @llvm.aarch64.neon.vpfadd.v1f32.v2f32(<2 x float>)
+declare float @llvm.aarch64.neon.vpfadd.f32.v2f32(<2 x float>)
-define <1 x float> @test_faddp_v1f32(<2 x float> %a) {
-; CHECK: test_faddp_v1f32:
+define float @test_faddp_f32(<2 x float> %a) {
+; CHECK: test_faddp_f32:
; CHECK: faddp {{s[0-9]+}}, {{v[0-9]+}}.2s
- %val = call <1 x float> @llvm.aarch64.neon.vpfadd.v1f32.v2f32(<2 x float> %a)
- ret <1 x float> %val
+ %val = call float @llvm.aarch64.neon.vpfadd.f32.v2f32(<2 x float> %a)
+ ret float %val
}
-declare <1 x double> @llvm.aarch64.neon.vpfadd.v1f64.v2f64(<2 x double>)
+declare double @llvm.aarch64.neon.vpfadd.f64.v2f64(<2 x double>)
-define <1 x double> @test_faddp_v1f64(<2 x double> %a) {
-; CHECK: test_faddp_v1f64:
+define double @test_faddp_f64(<2 x double> %a) {
+; CHECK: test_faddp_f64:
; CHECK: faddp {{d[0-9]+}}, {{v[0-9]+}}.2d
- %val = call <1 x double> @llvm.aarch64.neon.vpfadd.v1f64.v2f64(<2 x double> %a)
- ret <1 x double> %val
+ %val = call double @llvm.aarch64.neon.vpfadd.f64.v2f64(<2 x double> %a)
+ ret double %val
}
-declare <1 x float> @llvm.aarch64.neon.vpmax.v1f32.v2f32(<2 x float>)
+declare float @llvm.aarch64.neon.vpmax.f32.v2f32(<2 x float>)
-define <1 x float> @test_fmaxp_v1f32(<2 x float> %a) {
-; CHECK: test_fmaxp_v1f32:
+define float @test_fmaxp_f32(<2 x float> %a) {
+; CHECK: test_fmaxp_f32:
; CHECK: fmaxp {{s[0-9]+}}, {{v[0-9]+}}.2s
- %val = call <1 x float> @llvm.aarch64.neon.vpmax.v1f32.v2f32(<2 x float> %a)
- ret <1 x float> %val
+ %val = call float @llvm.aarch64.neon.vpmax.f32.v2f32(<2 x float> %a)
+ ret float %val
}
-declare <1 x double> @llvm.aarch64.neon.vpmax.v1f64.v2f64(<2 x double>)
+declare double @llvm.aarch64.neon.vpmax.f64.v2f64(<2 x double>)
-define <1 x double> @test_fmaxp_v1f64(<2 x double> %a) {
-; CHECK: test_fmaxp_v1f64:
+define double @test_fmaxp_f64(<2 x double> %a) {
+; CHECK: test_fmaxp_f64:
; CHECK: fmaxp {{d[0-9]+}}, {{v[0-9]+}}.2d
- %val = call <1 x double> @llvm.aarch64.neon.vpmax.v1f64.v2f64(<2 x double> %a)
- ret <1 x double> %val
+ %val = call double @llvm.aarch64.neon.vpmax.f64.v2f64(<2 x double> %a)
+ ret double %val
}
-declare <1 x float> @llvm.aarch64.neon.vpmin.v1f32.v2f32(<2 x float>)
+declare float @llvm.aarch64.neon.vpmin.f32.v2f32(<2 x float>)
-define <1 x float> @test_fminp_v1f32(<2 x float> %a) {
-; CHECK: test_fminp_v1f32:
+define float @test_fminp_f32(<2 x float> %a) {
+; CHECK: test_fminp_f32:
; CHECK: fminp {{s[0-9]+}}, {{v[0-9]+}}.2s
- %val = call <1 x float> @llvm.aarch64.neon.vpmin.v1f32.v2f32(<2 x float> %a)
- ret <1 x float> %val
+ %val = call float @llvm.aarch64.neon.vpmin.f32.v2f32(<2 x float> %a)
+ ret float %val
}
-declare <1 x double> @llvm.aarch64.neon.vpmin.v1f64.v2f64(<2 x double>)
+declare double @llvm.aarch64.neon.vpmin.f64.v2f64(<2 x double>)
-define <1 x double> @test_fminp_v1f64(<2 x double> %a) {
-; CHECK: test_fminp_v1f64:
+define double @test_fminp_f64(<2 x double> %a) {
+; CHECK: test_fminp_f64:
; CHECK: fminp {{d[0-9]+}}, {{v[0-9]+}}.2d
- %val = call <1 x double> @llvm.aarch64.neon.vpmin.v1f64.v2f64(<2 x double> %a)
- ret <1 x double> %val
+ %val = call double @llvm.aarch64.neon.vpmin.f64.v2f64(<2 x double> %a)
+ ret double %val
}
-declare <1 x float> @llvm.aarch64.neon.vpfmaxnm.v1f32.v2f32(<2 x float>)
+declare float @llvm.aarch64.neon.vpfmaxnm.f32.v2f32(<2 x float>)
-define <1 x float> @test_fmaxnmp_v1f32(<2 x float> %a) {
-; CHECK: test_fmaxnmp_v1f32:
+define float @test_fmaxnmp_f32(<2 x float> %a) {
+; CHECK: test_fmaxnmp_f32:
; CHECK: fmaxnmp {{s[0-9]+}}, {{v[0-9]+}}.2s
- %val = call <1 x float> @llvm.aarch64.neon.vpfmaxnm.v1f32.v2f32(<2 x float> %a)
- ret <1 x float> %val
+ %val = call float @llvm.aarch64.neon.vpfmaxnm.f32.v2f32(<2 x float> %a)
+ ret float %val
}
-declare <1 x double> @llvm.aarch64.neon.vpfmaxnm.v1f64.v2f64(<2 x double>)
+declare double @llvm.aarch64.neon.vpfmaxnm.f64.v2f64(<2 x double>)
-define <1 x double> @test_fmaxnmp_v1f64(<2 x double> %a) {
-; CHECK: test_fmaxnmp_v1f64:
+define double @test_fmaxnmp_f64(<2 x double> %a) {
+; CHECK: test_fmaxnmp_f64:
; CHECK: fmaxnmp {{d[0-9]+}}, {{v[0-9]+}}.2d
- %val = call <1 x double> @llvm.aarch64.neon.vpfmaxnm.v1f64.v2f64(<2 x double> %a)
- ret <1 x double> %val
+ %val = call double @llvm.aarch64.neon.vpfmaxnm.f64.v2f64(<2 x double> %a)
+ ret double %val
}
-declare <1 x float> @llvm.aarch64.neon.vpfminnm.v1f32.v2f32(<2 x float>)
+declare float @llvm.aarch64.neon.vpfminnm.f32.v2f32(<2 x float>)
-define <1 x float> @test_fminnmp_v1f32(<2 x float> %a) {
-; CHECK: test_fminnmp_v1f32:
+define float @test_fminnmp_f32(<2 x float> %a) {
+; CHECK: test_fminnmp_f32:
; CHECK: fminnmp {{s[0-9]+}}, {{v[0-9]+}}.2s
- %val = call <1 x float> @llvm.aarch64.neon.vpfminnm.v1f32.v2f32(<2 x float> %a)
- ret <1 x float> %val
+ %val = call float @llvm.aarch64.neon.vpfminnm.f32.v2f32(<2 x float> %a)
+ ret float %val
}
-declare <1 x double> @llvm.aarch64.neon.vpfminnm.v1f64.v2f64(<2 x double>)
+declare double @llvm.aarch64.neon.vpfminnm.f64.v2f64(<2 x double>)
-define <1 x double> @test_fminnmp_v1f64(<2 x double> %a) {
-; CHECK: test_fminnmp_v1f64:
+define double @test_fminnmp_f64(<2 x double> %a) {
+; CHECK: test_fminnmp_f64:
; CHECK: fminnmp {{d[0-9]+}}, {{v[0-9]+}}.2d
- %val = call <1 x double> @llvm.aarch64.neon.vpfminnm.v1f64.v2f64(<2 x double> %a)
- ret <1 x double> %val
+ %val = call double @llvm.aarch64.neon.vpfminnm.f64.v2f64(<2 x double> %a)
+ ret double %val
}
define float @test_vaddv_f32(<2 x float> %a) {
; CHECK-LABEL: test_vaddv_f32
; CHECK: faddp {{s[0-9]+}}, {{v[0-9]+}}.2s
- %1 = tail call <1 x float> @llvm.aarch64.neon.vpfadd.v1f32.v2f32(<2 x float> %a)
- %2 = extractelement <1 x float> %1, i32 0
- ret float %2
+ %1 = call float @llvm.aarch64.neon.vpfadd.f32.v2f32(<2 x float> %a)
+ ret float %1
}
define float @test_vaddvq_f32(<4 x float> %a) {
; CHECK-LABEL: test_vaddvq_f32
; CHECK: faddp {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
; CHECK: faddp {{s[0-9]+}}, {{v[0-9]+}}.2s
- %1 = tail call <1 x float> @llvm.aarch64.neon.vpfadd.v1f32.v4f32(<4 x float> %a)
- %2 = extractelement <1 x float> %1, i32 0
- ret float %2
+ %1 = call float @llvm.aarch64.neon.vpfadd.f32.v4f32(<4 x float> %a)
+ ret float %1
}
define double @test_vaddvq_f64(<2 x double> %a) {
; CHECK-LABEL: test_vaddvq_f64
; CHECK: faddp {{d[0-9]+}}, {{v[0-9]+}}.2d
- %1 = tail call <1 x double> @llvm.aarch64.neon.vpfadd.v1f64.v2f64(<2 x double> %a)
- %2 = extractelement <1 x double> %1, i32 0
- ret double %2
+ %1 = call double @llvm.aarch64.neon.vpfadd.f64.v2f64(<2 x double> %a)
+ ret double %1
}
define float @test_vmaxv_f32(<2 x float> %a) {
; CHECK-LABEL: test_vmaxv_f32
; CHECK: fmaxp {{s[0-9]+}}, {{v[0-9]+}}.2s
- %1 = tail call <1 x float> @llvm.aarch64.neon.vpmax.v1f32.v2f32(<2 x float> %a)
- %2 = extractelement <1 x float> %1, i32 0
- ret float %2
+ %1 = call float @llvm.aarch64.neon.vpmax.f32.v2f32(<2 x float> %a)
+ ret float %1
}
define double @test_vmaxvq_f64(<2 x double> %a) {
; CHECK-LABEL: test_vmaxvq_f64
; CHECK: fmaxp {{d[0-9]+}}, {{v[0-9]+}}.2d
- %1 = tail call <1 x double> @llvm.aarch64.neon.vpmax.v1f64.v2f64(<2 x double> %a)
- %2 = extractelement <1 x double> %1, i32 0
- ret double %2
+ %1 = call double @llvm.aarch64.neon.vpmax.f64.v2f64(<2 x double> %a)
+ ret double %1
}
define float @test_vminv_f32(<2 x float> %a) {
; CHECK-LABEL: test_vminv_f32
; CHECK: fminp {{s[0-9]+}}, {{v[0-9]+}}.2s
- %1 = tail call <1 x float> @llvm.aarch64.neon.vpmin.v1f32.v2f32(<2 x float> %a)
- %2 = extractelement <1 x float> %1, i32 0
- ret float %2
+ %1 = call float @llvm.aarch64.neon.vpmin.f32.v2f32(<2 x float> %a)
+ ret float %1
}
define double @test_vminvq_f64(<2 x double> %a) {
; CHECK-LABEL: test_vminvq_f64
; CHECK: fminp {{d[0-9]+}}, {{v[0-9]+}}.2d
- %1 = tail call <1 x double> @llvm.aarch64.neon.vpmin.v1f64.v2f64(<2 x double> %a)
- %2 = extractelement <1 x double> %1, i32 0
- ret double %2
+ %1 = call double @llvm.aarch64.neon.vpmin.f64.v2f64(<2 x double> %a)
+ ret double %1
}
define double @test_vmaxnmvq_f64(<2 x double> %a) {
; CHECK-LABEL: test_vmaxnmvq_f64
; CHECK: fmaxnmp {{d[0-9]+}}, {{v[0-9]+}}.2d
- %1 = tail call <1 x double> @llvm.aarch64.neon.vpfmaxnm.v1f64.v2f64(<2 x double> %a)
- %2 = extractelement <1 x double> %1, i32 0
- ret double %2
+ %1 = call double @llvm.aarch64.neon.vpfmaxnm.f64.v2f64(<2 x double> %a)
+ ret double %1
}
define float @test_vmaxnmv_f32(<2 x float> %a) {
; CHECK-LABEL: test_vmaxnmv_f32
; CHECK: fmaxnmp {{s[0-9]+}}, {{v[0-9]+}}.2s
- %1 = tail call <1 x float> @llvm.aarch64.neon.vpfmaxnm.v1f32.v2f32(<2 x float> %a)
- %2 = extractelement <1 x float> %1, i32 0
- ret float %2
+ %1 = call float @llvm.aarch64.neon.vpfmaxnm.f32.v2f32(<2 x float> %a)
+ ret float %1
}
define double @test_vminnmvq_f64(<2 x double> %a) {
; CHECK-LABEL: test_vminnmvq_f64
; CHECK: fminnmp {{d[0-9]+}}, {{v[0-9]+}}.2d
- %1 = tail call <1 x double> @llvm.aarch64.neon.vpfminnm.v1f64.v2f64(<2 x double> %a)
- %2 = extractelement <1 x double> %1, i32 0
- ret double %2
+ %1 = call double @llvm.aarch64.neon.vpfminnm.f64.v2f64(<2 x double> %a)
+ ret double %1
}
define float @test_vminnmv_f32(<2 x float> %a) {
; CHECK-LABEL: test_vminnmv_f32
; CHECK: fminnmp {{s[0-9]+}}, {{v[0-9]+}}.2s
- %1 = tail call <1 x float> @llvm.aarch64.neon.vpfminnm.v1f32.v2f32(<2 x float> %a)
- %2 = extractelement <1 x float> %1, i32 0
- ret float %2
+ %1 = call float @llvm.aarch64.neon.vpfminnm.f32.v2f32(<2 x float> %a)
+ ret float %1
}
define <2 x i64> @test_vpaddq_s64(<2 x i64> %a, <2 x i64> %b) {
; CHECK-LABEL: test_vpaddq_s64
; CHECK: addp {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
- %1 = tail call <2 x i64> @llvm.arm.neon.vpadd.v2i64(<2 x i64> %a, <2 x i64> %b)
+ %1 = call <2 x i64> @llvm.arm.neon.vpadd.v2i64(<2 x i64> %a, <2 x i64> %b)
ret <2 x i64> %1
}
define <2 x i64> @test_vpaddq_u64(<2 x i64> %a, <2 x i64> %b) {
; CHECK-LABEL: test_vpaddq_u64
; CHECK: addp {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
- %1 = tail call <2 x i64> @llvm.arm.neon.vpadd.v2i64(<2 x i64> %a, <2 x i64> %b)
+ %1 = call <2 x i64> @llvm.arm.neon.vpadd.v2i64(<2 x i64> %a, <2 x i64> %b)
ret <2 x i64> %1
}
define i64 @test_vaddvq_s64(<2 x i64> %a) {
; CHECK-LABEL: test_vaddvq_s64
; CHECK: addp {{d[0-9]+}}, {{v[0-9]+}}.2d
- %1 = tail call <1 x i64> @llvm.aarch64.neon.vaddv.v1i64.v2i64(<2 x i64> %a)
+ %1 = call <1 x i64> @llvm.aarch64.neon.vaddv.v1i64.v2i64(<2 x i64> %a)
%2 = extractelement <1 x i64> %1, i32 0
ret i64 %2
}
@@ -214,7 +203,7 @@ define i64 @test_vaddvq_s64(<2 x i64> %a) {
define i64 @test_vaddvq_u64(<2 x i64> %a) {
; CHECK-LABEL: test_vaddvq_u64
; CHECK: addp {{d[0-9]+}}, {{v[0-9]+}}.2d
- %1 = tail call <1 x i64> @llvm.aarch64.neon.vaddv.v1i64.v2i64(<2 x i64> %a)
+ %1 = call <1 x i64> @llvm.aarch64.neon.vaddv.v1i64.v2i64(<2 x i64> %a)
%2 = extractelement <1 x i64> %1, i32 0
ret i64 %2
}
@@ -223,4 +212,4 @@ declare <1 x i64> @llvm.aarch64.neon.vaddv.v1i64.v2i64(<2 x i64>)
declare <2 x i64> @llvm.arm.neon.vpadd.v2i64(<2 x i64>, <2 x i64>)
-declare <1 x float> @llvm.aarch64.neon.vpfadd.v1f32.v4f32(<4 x float>)
+declare float @llvm.aarch64.neon.vpfadd.f32.v4f32(<4 x float>)