diff options
author | Tim Northover <tnorthover@apple.com> | 2014-05-16 09:42:04 +0000 |
---|---|---|
committer | Tim Northover <tnorthover@apple.com> | 2014-05-16 09:42:04 +0000 |
commit | a9a94ce83924290417762594def7088bcf31d782 (patch) | |
tree | 436f499d2f6f51b21413288d8653af238307a5c9 /test/CodeGen/AArch64 | |
parent | fbf4058e7a94b08d9d2f1b994f2d1bef1cd80115 (diff) | |
download | llvm-a9a94ce83924290417762594def7088bcf31d782.tar.gz llvm-a9a94ce83924290417762594def7088bcf31d782.tar.bz2 llvm-a9a94ce83924290417762594def7088bcf31d782.tar.xz |
TableGen: fix operand counting for aliases
TableGen has a fairly dubious heuristic to decide whether an alias should be
printed: does the alias have lest operands than the real instruction. This is
bad enough (particularly with no way to override it), but it should at least be
calculated consistently for both strings.
This patch implements that logic: first get the *correct* string for the
variant, in the same way as the Matcher, without guessing; then count the
number of whitespace chars.
There are basically 4 changes this brings about after the previous
commits; all of these appear to be good, so I have changed the tests:
+ ARM64: we print "neg X, Y" instead of "sub X, xzr, Y".
+ ARM64: we skip implicit "uxtx" and "uxtw" modifiers.
+ Sparc: we print "mov A, B" instead of "or %g0, A, B".
+ Sparc: we print "fcmpX A, B" instead of "fcmpX %fcc0, A, B"
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@208969 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test/CodeGen/AArch64')
-rw-r--r-- | test/CodeGen/AArch64/addsub-shifted.ll | 28 |
1 files changed, 10 insertions, 18 deletions
diff --git a/test/CodeGen/AArch64/addsub-shifted.ll b/test/CodeGen/AArch64/addsub-shifted.ll index b7e5c9af80..f3fdbefb47 100644 --- a/test/CodeGen/AArch64/addsub-shifted.ll +++ b/test/CodeGen/AArch64/addsub-shifted.ll @@ -1,5 +1,5 @@ -; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-AARCH64 -; RUN: llc -verify-machineinstrs %s -o - -mtriple=arm64-apple-ios7.0 | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-ARM64 +; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu | FileCheck %s +; RUN: llc -verify-machineinstrs %s -o - -mtriple=arm64-apple-ios7.0 | FileCheck %s @var32 = global i32 0 @var64 = global i64 0 @@ -36,8 +36,7 @@ define void @test_lsl_arith(i32 %lhs32, i32 %rhs32, i64 %lhs64, i64 %rhs64) { %shift4a = shl i32 %lhs4a, 15 %val4a = sub i32 0, %shift4a store volatile i32 %val4a, i32* @var32 -; CHECK-AARCH64: neg {{w[0-9]+}}, {{w[0-9]+}}, lsl #15 -; CHECK-ARM64: sub {{w[0-9]+}}, wzr, {{w[0-9]+}}, lsl #15 +; CHECK: neg {{w[0-9]+}}, {{w[0-9]+}}, lsl #15 %rhs5 = load volatile i64* @var64 %shift5 = shl i64 %rhs5, 18 @@ -68,8 +67,7 @@ define void @test_lsl_arith(i32 %lhs32, i32 %rhs32, i64 %lhs64, i64 %rhs64) { %shift8a = shl i64 %lhs8a, 60 %val8a = sub i64 0, %shift8a store volatile i64 %val8a, i64* @var64 -; CHECK-AARCH64: neg {{x[0-9]+}}, {{x[0-9]+}}, lsl #60 -; CHECK-ARM64: sub {{x[0-9]+}}, xzr, {{x[0-9]+}}, lsl #60 +; CHECK: neg {{x[0-9]+}}, {{x[0-9]+}}, lsl #60 ret void ; CHECK: ret @@ -102,8 +100,7 @@ define void @test_lsr_arith(i32 %lhs32, i32 %rhs32, i64 %lhs64, i64 %rhs64) { %shift4a = lshr i32 %lhs32, 15 %val4a = sub i32 0, %shift4a store volatile i32 %val4a, i32* @var32 -; CHECK-AARCH64: neg {{w[0-9]+}}, {{w[0-9]+}}, lsr #15 -; CHECK-ARM64: sub {{w[0-9]+}}, wzr, {{w[0-9]+}}, lsr #15 +; CHECK: neg {{w[0-9]+}}, {{w[0-9]+}}, lsr #15 %shift5 = lshr i64 %rhs64, 18 %val5 = add i64 %lhs64, %shift5 @@ -129,8 +126,7 @@ define void @test_lsr_arith(i32 %lhs32, i32 %rhs32, i64 %lhs64, i64 %rhs64) { %shift8a = lshr i64 %lhs64, 45 %val8a = sub i64 0, %shift8a store volatile i64 %val8a, i64* @var64 -; CHECK-AARCH64: neg {{x[0-9]+}}, {{x[0-9]+}}, lsr #45 -; CHECK-ARM64: sub {{x[0-9]+}}, xzr, {{x[0-9]+}}, lsr #45 +; CHECK: neg {{x[0-9]+}}, {{x[0-9]+}}, lsr #45 ret void ; CHECK: ret @@ -163,8 +159,7 @@ define void @test_asr_arith(i32 %lhs32, i32 %rhs32, i64 %lhs64, i64 %rhs64) { %shift4a = ashr i32 %lhs32, 15 %val4a = sub i32 0, %shift4a store volatile i32 %val4a, i32* @var32 -; CHECK-AARCH64: neg {{w[0-9]+}}, {{w[0-9]+}}, asr #15 -; CHECK-ARM64: sub {{w[0-9]+}}, wzr, {{w[0-9]+}}, asr #15 +; CHECK: neg {{w[0-9]+}}, {{w[0-9]+}}, asr #15 %shift5 = ashr i64 %rhs64, 18 %val5 = add i64 %lhs64, %shift5 @@ -190,8 +185,7 @@ define void @test_asr_arith(i32 %lhs32, i32 %rhs32, i64 %lhs64, i64 %rhs64) { %shift8a = ashr i64 %lhs64, 45 %val8a = sub i64 0, %shift8a store volatile i64 %val8a, i64* @var64 -; CHECK-AARCH64: neg {{x[0-9]+}}, {{x[0-9]+}}, asr #45 -; CHECK-ARM64: sub {{x[0-9]+}}, xzr, {{x[0-9]+}}, asr #45 +; CHECK: neg {{x[0-9]+}}, {{x[0-9]+}}, asr #45 ret void ; CHECK: ret @@ -252,8 +246,7 @@ define i32 @test_cmn(i32 %lhs32, i32 %rhs32, i64 %lhs64, i64 %rhs64) { br i1 %tst1, label %t2, label %end ; Important that this isn't lowered to a cmn instruction because if %rhs32 == ; 0 then the results will differ. -; CHECK-AARCH64: neg [[RHS:w[0-9]+]], {{w[0-9]+}}, lsl #13 -; CHECK-ARM64: sub [[RHS:w[0-9]+]], wzr, {{w[0-9]+}}, lsl #13 +; CHECK: neg [[RHS:w[0-9]+]], {{w[0-9]+}}, lsl #13 ; CHECK: cmp {{w[0-9]+}}, [[RHS]] t2: @@ -276,8 +269,7 @@ t4: %tst4 = icmp slt i64 %lhs64, %val4 br i1 %tst4, label %t5, label %end ; Again, it's important that cmn isn't used here in case %rhs64 == 0. -; CHECK-AARCH64: neg [[RHS:x[0-9]+]], {{x[0-9]+}}, lsl #43 -; CHECK-ARM64: sub [[RHS:x[0-9]+]], xzr, {{x[0-9]+}}, lsl #43 +; CHECK: neg [[RHS:x[0-9]+]], {{x[0-9]+}}, lsl #43 ; CHECK: cmp {{x[0-9]+}}, [[RHS]] t5: |