summaryrefslogtreecommitdiff
path: root/lib/Target/README.txt
diff options
context:
space:
mode:
authorChris Lattner <sabre@nondot.org>2011-02-17 01:43:46 +0000
committerChris Lattner <sabre@nondot.org>2011-02-17 01:43:46 +0000
commitcb40195641660cf13ef8dab909aa3f76185e9900 (patch)
treec805cde079f4fe96be27060046a29472f13a4d17 /lib/Target/README.txt
parentde1d8a544c1398cc34d4c865c5afa8b91f96316c (diff)
downloadllvm-cb40195641660cf13ef8dab909aa3f76185e9900.tar.gz
llvm-cb40195641660cf13ef8dab909aa3f76185e9900.tar.bz2
llvm-cb40195641660cf13ef8dab909aa3f76185e9900.tar.xz
add some notes on compares + binops. Remove redundant entries.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@125702 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/Target/README.txt')
-rw-r--r--lib/Target/README.txt67
1 files changed, 47 insertions, 20 deletions
diff --git a/lib/Target/README.txt b/lib/Target/README.txt
index c6f384cd6e..1a33aa8ed8 100644
--- a/lib/Target/README.txt
+++ b/lib/Target/README.txt
@@ -873,6 +873,12 @@ rshift_gt (unsigned int a)
if ((a >> 2) > 5)
bar ();
}
+
+void neg_eq_cst(unsigned int a) {
+if (-a == 123)
+bar();
+}
+
All should simplify to a single comparison. All of these are
currently not optimized with "clang -emit-llvm-bc | opt
-std-compile-opts".
@@ -2219,31 +2225,52 @@ avoids partial register stalls in some important cases.
//===---------------------------------------------------------------------===//
-Some missed instcombine xforms (from GCC PR14753):
+We should fold compares like this:
-void bar (void);
+ %1266 = add nsw i32 %.84.i.i.i, 1
+ %560 = add nsw i32 %556, 1
+ %1267 = icmp slt i32 %1266, %560
-void mask_gt (unsigned int a) {
-/* This is equivalent to a > 15. */
-if ((a & ~7) > 8)
-bar();
-}
+to a single 'icmp slt' when the add's have a single use, since they are NSW.
-void neg_eq_cst(unsigned int a) {
-if (-a == 123)
-bar();
-}
+//===---------------------------------------------------------------------===//
-void minus_cst(unsigned int a) {
-if (20 - a == 5)
-bar();
-}
+We don't fold (icmp (add) (add)) unless the two adds only have a single use.
+There are a lot of cases that we're refusing to fold in (e.g.) 256.bzip2, for
+example:
-void rotate_cst (unsigned a) {
-a = (a << 10) | (a >> 22);
-if (a == 123)
-bar ();
-}
+ %indvar.next90 = add i64 %indvar89, 1 ;; Has 2 uses
+ %tmp96 = add i64 %tmp95, 1 ;; Has 1 use
+ %exitcond97 = icmp eq i64 %indvar.next90, %tmp96
+
+We don't fold this because we don't want to introduce an overlapped live range
+of the ivar. However if we can make this more aggressive without causing
+performance issues in two ways:
+
+1. If *either* the LHS or RHS has a single use, we can definitely do the
+ transformation. In the overlapping liverange case we're trading one register
+ use for one fewer operation, which is a reasonable trade. Before doing this
+ we should verify that the llc output actually shrinks for some benchmarks.
+2. If both ops have multiple uses, we can still fold it if the operations are
+ both sinkable to *after* the icmp (e.g. in a subsequent block) which doesn't
+ increase register pressure.
+
+There are a ton of icmp's we aren't simplifying because of the reg pressure
+concern. Care is warranted here though because many of these are induction
+variables and other cases that matter a lot to performance, like the above.
+Here's a blob of code that you can drop into the bottom of visitICmp to see some
+missed cases:
+
+ { Value *A, *B, *C, *D;
+ if (match(Op0, m_Add(m_Value(A), m_Value(B))) &&
+ match(Op1, m_Add(m_Value(C), m_Value(D))) &&
+ (A == C || A == D || B == C || B == D)) {
+ errs() << "OP0 = " << *Op0 << " U=" << Op0->getNumUses() << "\n";
+ errs() << "OP1 = " << *Op1 << " U=" << Op1->getNumUses() << "\n";
+ errs() << "CMP = " << I << "\n\n";
+ }
+ }
//===---------------------------------------------------------------------===//
+