summaryrefslogtreecommitdiff
path: root/test/CodeGen
diff options
context:
space:
mode:
authorTim Northover <tnorthover@apple.com>2013-05-30 13:19:42 +0000
committerTim Northover <tnorthover@apple.com>2013-05-30 13:19:42 +0000
commit15983b80a0ceb224b74d2ee5ef53d3eed37dc03b (patch)
tree04df22eecc061cf808aa3a7a8a33ba02dc2b8ebf /test/CodeGen
parent52d65ab72dcdb3b5de8b84743537355067819c31 (diff)
downloadllvm-15983b80a0ceb224b74d2ee5ef53d3eed37dc03b.tar.gz
llvm-15983b80a0ceb224b74d2ee5ef53d3eed37dc03b.tar.bz2
llvm-15983b80a0ceb224b74d2ee5ef53d3eed37dc03b.tar.xz
X86: use sub-register sequences for MOV*r0 operations
Instead of having a bunch of separate MOV8r0, MOV16r0, ... pseudo-instructions, it's better to use a single MOV32r0 (which will expand to "xorl %reg, %reg") and obtain other sizes with EXTRACT_SUBREG and SUBREG_TO_REG. The encoding is smaller and partial register updates can sometimes be avoided. Until recently, this sequence was a barrier to rematerialization though. That should now be fixed so it's an appropriate time to make the change. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@182928 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test/CodeGen')
-rw-r--r--test/CodeGen/X86/2009-02-26-MachineLICMBug.ll6
-rw-r--r--test/CodeGen/X86/2011-09-14-valcoalesce.ll37
-rw-r--r--test/CodeGen/X86/fast-isel-divrem-x86-64.ll2
-rw-r--r--test/CodeGen/X86/fast-isel-divrem.ll4
-rw-r--r--test/CodeGen/X86/hoist-common.ll2
-rw-r--r--test/CodeGen/X86/licm-dominance.ll2
-rw-r--r--test/CodeGen/X86/licm-nested.ll2
-rw-r--r--test/CodeGen/X86/lsr-interesting-step.ll2
-rw-r--r--test/CodeGen/X86/lsr-static-addr.ll3
-rw-r--r--test/CodeGen/X86/sibcall.ll4
-rw-r--r--test/CodeGen/X86/tail-opts.ll4
-rw-r--r--test/CodeGen/X86/zext-extract_subreg.ll2
-rw-r--r--test/CodeGen/X86/zext-sext.ll5
13 files changed, 51 insertions, 24 deletions
diff --git a/test/CodeGen/X86/2009-02-26-MachineLICMBug.ll b/test/CodeGen/X86/2009-02-26-MachineLICMBug.ll
index 68a9fafb6d..8174fbdc9e 100644
--- a/test/CodeGen/X86/2009-02-26-MachineLICMBug.ll
+++ b/test/CodeGen/X86/2009-02-26-MachineLICMBug.ll
@@ -1,5 +1,5 @@
; REQUIRES: asserts
-; RUN: llc < %s -march=x86-64 -mattr=+sse3,+sse41 -mcpu=penryn -stats 2>&1 | grep "5 machine-licm"
+; RUN: llc < %s -march=x86-64 -mattr=+sse3,+sse41 -mcpu=penryn -stats 2>&1 | grep "4 machine-licm"
; RUN: llc < %s -march=x86-64 -mattr=+sse3,+sse41 -mcpu=penryn | FileCheck %s
; rdar://6627786
; rdar://7792037
@@ -15,11 +15,11 @@ entry:
bb4: ; preds = %bb.i, %bb26, %bb4, %entry
; CHECK: %bb4
-; CHECK: xorb
+; CHECK: xorl
; CHECK: callq
; CHECK: movq
; CHECK: xorl
-; CHECK: xorb
+; CHECK: xorl
%0 = call i32 (...)* @xxGetOffsetForCode(i32 undef) nounwind ; <i32> [#uses=0]
%ins = or i64 %p, 2097152 ; <i64> [#uses=1]
diff --git a/test/CodeGen/X86/2011-09-14-valcoalesce.ll b/test/CodeGen/X86/2011-09-14-valcoalesce.ll
index 6d91109daa..4e84e84c1a 100644
--- a/test/CodeGen/X86/2011-09-14-valcoalesce.ll
+++ b/test/CodeGen/X86/2011-09-14-valcoalesce.ll
@@ -14,22 +14,47 @@
; Prior to fixing PR10920 401.bzip miscompile, the coalescer would
; consider vreg1 and vreg27 to be copies of the same value. It would
; then remove one of the critical edge copes, which cannot safely be removed.
-;
+
+; There are two obvious ways the register-allocator could go here, either
+; reusing the pre-addition register later, or the post-addition one. Currently,
+; it does the latter, so we check:
+
; CHECK: # %while.body85.i
; CHECK-NOT: # %
; CHECK-NOT: add
; CHECK: movl %[[POSTR:e[abcdxi]+]], %[[PRER:e[abcdxi]+]]
; CHECK: addl %{{.*}}, %[[POSTR]]
; CHECK: # %while.end.i
-; CHECK: movl %[[POSTR]], %[[USER:e[abcdxi]+]]
+; CHECK-NOT: movl %[[POSTR]]
; CHECK: # %land.lhs.true.i
-; CHECK: movl %[[POSTR]], %[[USER]]
+; CHECK-NOT: movl %[[POSTR]]
; CHECK: # %land.lhs.true103.i
-; CHECK: movl %[[POSTR]], %[[USER]]
+; CHECK-NOT: movl %[[POSTR]]
; CHECK: # %if.then108.i
-; [[PRER] live out, so nothing on this path should define it.
-; CHECK-NOT: , %[[PRER]]
+; CHECK: movl %[[PRER]], %[[POSTR]]
; CHECK: # %if.end117.i
+; and use it for fprintf:
+; CHECK: movl %[[POSTR]], 12(%esp)
+
+
+; If it ever reverts to reusing the pre-addition register then we should
+; *probably* check this instead (it certainly worked last time):
+
+; CHECKALT: # %while.body85.i
+; CHECKALT-NOT: # %
+; CHECKALT-NOT: add
+; CHECKALT: movl %[[POSTR:e[abcdxi]+]], %[[PRER:e[abcdxi]+]]
+; CHECKALT: addl %{{.*}}, %[[POSTR]]
+; CHECKALT: # %while.end.i
+; CHECKALT: movl %[[POSTR]], %[[USER:e[abcdxi]+]]
+; CHECKALT: # %land.lhs.true.i
+; CHECKALT: movl %[[POSTR]], %[[USER]]
+; CHECKALT: # %land.lhs.true103.i
+; CHECKALT: movl %[[POSTR]], %[[USER]]
+; CHECKALT: # %if.then108.i
+; [[PRER] live out, so nothing on this path should define it.
+; CHECKALT-NOT: , %[[PRER]]
+; CHECKALT: # %if.end117.i
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128-n8:16:32"
diff --git a/test/CodeGen/X86/fast-isel-divrem-x86-64.ll b/test/CodeGen/X86/fast-isel-divrem-x86-64.ll
index 45494f139e..f2afaa06bb 100644
--- a/test/CodeGen/X86/fast-isel-divrem-x86-64.ll
+++ b/test/CodeGen/X86/fast-isel-divrem-x86-64.ll
@@ -1,4 +1,4 @@
-; RUN: llc -mtriple=x86_64-none-linux -fast-isel -fast-isel-abort < %s | FileCheck %s
+; RUN: llc -mtriple=x86_64-none-linux -fast-isel -fast-isel-abort -verify-machineinstrs < %s | FileCheck %s
define i64 @test_sdiv64(i64 %dividend, i64 %divisor) nounwind {
entry:
diff --git a/test/CodeGen/X86/fast-isel-divrem.ll b/test/CodeGen/X86/fast-isel-divrem.ll
index 7aba7f7b79..1a309a1ebc 100644
--- a/test/CodeGen/X86/fast-isel-divrem.ll
+++ b/test/CodeGen/X86/fast-isel-divrem.ll
@@ -1,5 +1,5 @@
-; RUN: llc -mtriple=x86_64-none-linux -fast-isel -fast-isel-abort < %s | FileCheck %s
-; RUN: llc -mtriple=i686-none-linux -fast-isel -fast-isel-abort < %s | FileCheck %s
+; RUN: llc -mtriple=x86_64-none-linux -fast-isel -fast-isel-abort -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=i686-none-linux -fast-isel -fast-isel-abort -verify-machineinstrs < %s | FileCheck %s
define i8 @test_sdiv8(i8 %dividend, i8 %divisor) nounwind {
entry:
diff --git a/test/CodeGen/X86/hoist-common.ll b/test/CodeGen/X86/hoist-common.ll
index 72e17c065b..cdfdea3d98 100644
--- a/test/CodeGen/X86/hoist-common.ll
+++ b/test/CodeGen/X86/hoist-common.ll
@@ -8,7 +8,7 @@
define zeroext i1 @t(i32 %c) nounwind ssp {
entry:
; CHECK: t:
-; CHECK: xorb %al, %al
+; CHECK: xorl %eax, %eax
; CHECK: test
; CHECK: je
%tobool = icmp eq i32 %c, 0
diff --git a/test/CodeGen/X86/licm-dominance.ll b/test/CodeGen/X86/licm-dominance.ll
index 019f8a32b6..7e3c6fdf95 100644
--- a/test/CodeGen/X86/licm-dominance.ll
+++ b/test/CodeGen/X86/licm-dominance.ll
@@ -2,7 +2,7 @@
; MachineLICM should check dominance before hoisting instructions.
; CHECK: ## in Loop:
-; CHECK-NEXT: xorb %al, %al
+; CHECK-NEXT: xorl %eax, %eax
; CHECK-NEXT: testb %al, %al
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
diff --git a/test/CodeGen/X86/licm-nested.ll b/test/CodeGen/X86/licm-nested.ll
index 66074fb368..083ae0875e 100644
--- a/test/CodeGen/X86/licm-nested.ll
+++ b/test/CodeGen/X86/licm-nested.ll
@@ -1,5 +1,5 @@
; REQUIRES: asserts
-; RUN: llc -mtriple=x86_64-apple-darwin -march=x86-64 < %s -o /dev/null -stats -info-output-file - | grep "hoisted out of loops" | grep 3
+; RUN: llc -mtriple=x86_64-apple-darwin -march=x86-64 < %s -o /dev/null -stats -info-output-file - | grep "hoisted out of loops" | grep 4
; MachineLICM should be able to hoist the symbolic addresses out of
; the inner loops.
diff --git a/test/CodeGen/X86/lsr-interesting-step.ll b/test/CodeGen/X86/lsr-interesting-step.ll
index d1de0510a0..d4a7ac7da1 100644
--- a/test/CodeGen/X86/lsr-interesting-step.ll
+++ b/test/CodeGen/X86/lsr-interesting-step.ll
@@ -5,7 +5,7 @@
; CHECK: BB0_3:
; CHECK-NEXT: movb $0, flags(%rdx)
-; CHECK-NEXT: addq %rcx, %rdx
+; CHECK-NEXT: addq %rax, %rdx
; CHECK-NEXT: cmpq $8192, %rdx
; CHECK-NEXT: jl
diff --git a/test/CodeGen/X86/lsr-static-addr.ll b/test/CodeGen/X86/lsr-static-addr.ll
index b2aea90500..1bac790f57 100644
--- a/test/CodeGen/X86/lsr-static-addr.ll
+++ b/test/CodeGen/X86/lsr-static-addr.ll
@@ -10,8 +10,9 @@
; CHECK-NEXT: movsd
; CHECK-NEXT: incq %rax
-; ATOM: movsd .LCPI0_0(%rip), %xmm0
+
; ATOM: xorl %eax, %eax
+; ATOM: movsd .LCPI0_0(%rip), %xmm0
; ATOM: align
; ATOM-NEXT: BB0_2:
; ATOM-NEXT: movsd A(,%rax,8)
diff --git a/test/CodeGen/X86/sibcall.ll b/test/CodeGen/X86/sibcall.ll
index ceb79ea927..de98cb4bb6 100644
--- a/test/CodeGen/X86/sibcall.ll
+++ b/test/CodeGen/X86/sibcall.ll
@@ -279,7 +279,7 @@ entry:
; 32: jmp {{_?}}bar5
; 64: t17:
-; 64: xorb %al, %al
+; 64: xorl %eax, %eax
; 64: jmp {{_?}}bar5
tail call void (...)* @bar5() nounwind
ret void
@@ -295,7 +295,7 @@ entry:
; 32: fstp %st(0)
; 64: t18:
-; 64: xorb %al, %al
+; 64: xorl %eax, %eax
; 64: jmp {{_?}}bar6
%0 = tail call double (...)* @bar6() nounwind
ret void
diff --git a/test/CodeGen/X86/tail-opts.ll b/test/CodeGen/X86/tail-opts.ll
index 6e20af5866..75a728cb3d 100644
--- a/test/CodeGen/X86/tail-opts.ll
+++ b/test/CodeGen/X86/tail-opts.ll
@@ -118,7 +118,7 @@ altret:
; CHECK-NEXT: ucomiss %xmm{{[0-2]}}, %xmm{{[0-2]}}
; CHECK-NEXT: jbe .LBB2_2
; CHECK-NEXT: .LBB2_4:
-; CHECK-NEXT: xorb %al, %al
+; CHECK-NEXT: xorl %eax, %eax
; CHECK-NEXT: ret
; CHECK-NEXT: .LBB2_2:
; CHECK-NEXT: movb $1, %al
@@ -161,7 +161,7 @@ bb30:
; CHE: jmp .LBB3_11
; CHE-NEXT: .LBB3_9:
; CHE-NEXT: movq 8(%rax), %rax
-; CHE-NEXT: xorb %dl, %dl
+; CHE-NEXT: xorl %edx, %edx
; CHE-NEXT: movb 16(%rax), %al
; CHE-NEXT: cmpb $16, %al
; CHE-NEXT: je .LBB3_11
diff --git a/test/CodeGen/X86/zext-extract_subreg.ll b/test/CodeGen/X86/zext-extract_subreg.ll
index 168b898f12..7fa0574fc4 100644
--- a/test/CodeGen/X86/zext-extract_subreg.ll
+++ b/test/CodeGen/X86/zext-extract_subreg.ll
@@ -14,7 +14,7 @@ if.end: ; preds = %if.end.i
; CHECK: movl (%{{.*}}), [[REG:%[a-z]+]]
; CHECK-NOT: movl [[REG]], [[REG]]
; CHECK-NEXT: testl [[REG]], [[REG]]
-; CHECK-NEXT: xorb
+; CHECK-NEXT: xorl
%tmp138 = select i1 undef, i32 0, i32 %tmp7.i
%tmp867 = zext i32 %tmp138 to i64
br label %while.cond
diff --git a/test/CodeGen/X86/zext-sext.ll b/test/CodeGen/X86/zext-sext.ll
index 6432ae38ff..e4264aef4c 100644
--- a/test/CodeGen/X86/zext-sext.ll
+++ b/test/CodeGen/X86/zext-sext.ll
@@ -1,8 +1,9 @@
-; XFAIL: *
-; ...should pass. See PR12324: misched bringup
; RUN: llc < %s -march=x86-64 | FileCheck %s
; <rdar://problem/8006248>
+; This randomly started passing after an unrelated change, if it fails again it
+; might be worth looking at PR12324: misched bringup.
+
@llvm.used = appending global [1 x i8*] [i8* bitcast (void ([40 x i16]*, i32*, i16**, i64*)* @func to i8*)], section "llvm.metadata"
define void @func([40 x i16]* %a, i32* %b, i16** %c, i64* %d) nounwind {