diff options
author | Tim Northover <tnorthover@apple.com> | 2014-02-04 14:55:42 +0000 |
---|---|---|
committer | Tim Northover <tnorthover@apple.com> | 2014-02-04 14:55:42 +0000 |
commit | f9ced85e49f530c1cc3559b881c4b28a29408985 (patch) | |
tree | ade9a3397acf4b9fe634632ece72756d0a1fedff /test/CodeGen/AArch64/neon-scalar-compare.ll | |
parent | 8753ba91d28cd0fe7e6767466dec320b1fe2af86 (diff) | |
download | llvm-f9ced85e49f530c1cc3559b881c4b28a29408985.tar.gz llvm-f9ced85e49f530c1cc3559b881c4b28a29408985.tar.bz2 llvm-f9ced85e49f530c1cc3559b881c4b28a29408985.tar.xz |
ARM & AArch64: merge NEON absolute compare intrinsics
There was an extremely confusing proliferation of LLVM intrinsics to implement
the vacge & vacgt instructions. This combines them all into two polymorphic
intrinsics, shared across both backends.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@200768 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test/CodeGen/AArch64/neon-scalar-compare.ll')
-rw-r--r-- | test/CodeGen/AArch64/neon-scalar-compare.ll | 12 |
1 files changed, 6 insertions, 6 deletions
diff --git a/test/CodeGen/AArch64/neon-scalar-compare.ll b/test/CodeGen/AArch64/neon-scalar-compare.ll index d1b5f9c254..e1f39645f1 100644 --- a/test/CodeGen/AArch64/neon-scalar-compare.ll +++ b/test/CodeGen/AArch64/neon-scalar-compare.ll @@ -122,28 +122,28 @@ entry: define <1 x i64> @test_vcage_f64(<1 x double> %a, <1 x double> %b) #0 { ; CHECK: test_vcage_f64 ; CHECK: facge {{d[0-9]}}, {{d[0-9]}}, {{d[0-9]}} - %vcage2.i = tail call <1 x i64> @llvm.aarch64.neon.vcage.v1i64.v1f64.v1f64(<1 x double> %a, <1 x double> %b) #2 + %vcage2.i = tail call <1 x i64> @llvm.arm.neon.vacge.v1i64.v1f64(<1 x double> %a, <1 x double> %b) #2 ret <1 x i64> %vcage2.i } define <1 x i64> @test_vcagt_f64(<1 x double> %a, <1 x double> %b) #0 { ; CHECK: test_vcagt_f64 ; CHECK: facgt {{d[0-9]}}, {{d[0-9]}}, {{d[0-9]}} - %vcagt2.i = tail call <1 x i64> @llvm.aarch64.neon.vcagt.v1i64.v1f64.v1f64(<1 x double> %a, <1 x double> %b) #2 + %vcagt2.i = tail call <1 x i64> @llvm.arm.neon.vacgt.v1i64.v1f64(<1 x double> %a, <1 x double> %b) #2 ret <1 x i64> %vcagt2.i } define <1 x i64> @test_vcale_f64(<1 x double> %a, <1 x double> %b) #0 { ; CHECK: test_vcale_f64 ; CHECK: facge {{d[0-9]}}, {{d[0-9]}}, {{d[0-9]}} - %vcage2.i = tail call <1 x i64> @llvm.aarch64.neon.vcage.v1i64.v1f64.v1f64(<1 x double> %b, <1 x double> %a) #2 + %vcage2.i = tail call <1 x i64> @llvm.arm.neon.vacge.v1i64.v1f64(<1 x double> %b, <1 x double> %a) #2 ret <1 x i64> %vcage2.i } define <1 x i64> @test_vcalt_f64(<1 x double> %a, <1 x double> %b) #0 { ; CHECK: test_vcalt_f64 ; CHECK: facgt {{d[0-9]}}, {{d[0-9]}}, {{d[0-9]}} - %vcagt2.i = tail call <1 x i64> @llvm.aarch64.neon.vcagt.v1i64.v1f64.v1f64(<1 x double> %b, <1 x double> %a) #2 + %vcagt2.i = tail call <1 x i64> @llvm.arm.neon.vacgt.v1i64.v1f64(<1 x double> %b, <1 x double> %a) #2 ret <1 x i64> %vcagt2.i } @@ -331,8 +331,8 @@ define <1 x i64> @test_vcltz_s64(<1 x i64> %a) #0 { ret <1 x i64> %vcltz.i } -declare <1 x i64> @llvm.aarch64.neon.vcagt.v1i64.v1f64.v1f64(<1 x double>, <1 x double>) -declare <1 x i64> @llvm.aarch64.neon.vcage.v1i64.v1f64.v1f64(<1 x double>, <1 x double>) +declare <1 x i64> @llvm.arm.neon.vacgt.v1i64.v1f64(<1 x double>, <1 x double>) +declare <1 x i64> @llvm.arm.neon.vacge.v1i64.v1f64(<1 x double>, <1 x double>) declare <1 x i64> @llvm.aarch64.neon.vtstd.v1i64.v1i64.v1i64(<1 x i64>, <1 x i64>) declare <1 x i64> @llvm.aarch64.neon.vcltz.v1i64.v1i64.v1i64(<1 x i64>, <1 x i64>) declare <1 x i64> @llvm.aarch64.neon.vchs.v1i64.v1i64.v1i64(<1 x i64>, <1 x i64>) |