summaryrefslogtreecommitdiff
path: root/test/CodeGen/PowerPC
diff options
context:
space:
mode:
authorHal Finkel <hfinkel@anl.gov>2013-04-19 22:08:38 +0000
committerHal Finkel <hfinkel@anl.gov>2013-04-19 22:08:38 +0000
commit87c1e42be7dadaea7c3e00fb7ccbd77633cea37f (patch)
treedda7859621dda1b18b14d7f0e10339a9c9c4288e /test/CodeGen/PowerPC
parenta58d67af29d38fa37c94f59af37db9df75f349be (diff)
downloadllvm-87c1e42be7dadaea7c3e00fb7ccbd77633cea37f.tar.gz
llvm-87c1e42be7dadaea7c3e00fb7ccbd77633cea37f.tar.bz2
llvm-87c1e42be7dadaea7c3e00fb7ccbd77633cea37f.tar.xz
Fix PPC optimizeCompareInstr swapped-sub argument handling
When matching a compare with a subtract where the arguments of the compare are swapped w.r.t. the arguments of the subtract, we need to negate the predicates (or CR bit indices) of the users. This, however, is not the same as inverting the predicate (negating LT -> GT, but inverting LT -> GE, for example). The ARM backend seems to do this correctly, but when I adapted the code for the PPC backend, I introduced an error in this logic. Comparison optimization is now enabled again by default. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@179899 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test/CodeGen/PowerPC')
-rw-r--r--test/CodeGen/PowerPC/optcmp.ll42
1 files changed, 42 insertions, 0 deletions
diff --git a/test/CodeGen/PowerPC/optcmp.ll b/test/CodeGen/PowerPC/optcmp.ll
index d348541dc5..1fce464dd3 100644
--- a/test/CodeGen/PowerPC/optcmp.ll
+++ b/test/CodeGen/PowerPC/optcmp.ll
@@ -54,6 +54,48 @@ entry:
; CHECK: std [[REG]], 0(5)
}
+define i64 @foolc(i64 %a, i64 %b, i64* nocapture %c) #0 {
+entry:
+ %sub = sub nsw i64 %b, %a
+ store i64 %sub, i64* %c, align 8, !tbaa !3
+ %cmp = icmp sgt i64 %a, %b
+ %cond = select i1 %cmp, i64 %a, i64 %b
+ ret i64 %cond
+
+; CHECK: @foolc
+; CHECK: subf. [[REG:[0-9]+]], 3, 4
+; CHECK: isel 3, 3, 4, 0
+; CHECK: std [[REG]], 0(5)
+}
+
+define i64 @foold(i64 %a, i64 %b, i64* nocapture %c) #0 {
+entry:
+ %sub = sub nsw i64 %b, %a
+ store i64 %sub, i64* %c, align 8, !tbaa !3
+ %cmp = icmp eq i64 %a, %b
+ %cond = select i1 %cmp, i64 %a, i64 %b
+ ret i64 %cond
+
+; CHECK: @foold
+; CHECK: subf. [[REG:[0-9]+]], 3, 4
+; CHECK: isel 3, 3, 4, 2
+; CHECK: std [[REG]], 0(5)
+}
+
+define i64 @foold2(i64 %a, i64 %b, i64* nocapture %c) #0 {
+entry:
+ %sub = sub nsw i64 %a, %b
+ store i64 %sub, i64* %c, align 8, !tbaa !3
+ %cmp = icmp eq i64 %a, %b
+ %cond = select i1 %cmp, i64 %a, i64 %b
+ ret i64 %cond
+
+; CHECK: @foold2
+; CHECK: subf. [[REG:[0-9]+]], 4, 3
+; CHECK: isel 3, 3, 4, 2
+; CHECK: std [[REG]], 0(5)
+}
+
define i64 @foo2l(i64 %a, i64 %b, i64* nocapture %c) #0 {
entry:
%shl = shl i64 %a, %b