summaryrefslogtreecommitdiff
path: root/test/Analysis
diff options
context:
space:
mode:
authorNick Lewycky <nicholas@mxc.ca>2014-01-27 10:04:03 +0000
committerNick Lewycky <nicholas@mxc.ca>2014-01-27 10:04:03 +0000
commitf2282cac95e7849a86437e347921bc4cc28d403f (patch)
tree4bc44584428a00aaf94d39b9941eee0714096da8 /test/Analysis
parent90193401204ea72f93f403ec9810a9a22aa22e22 (diff)
downloadllvm-f2282cac95e7849a86437e347921bc4cc28d403f.tar.gz
llvm-f2282cac95e7849a86437e347921bc4cc28d403f.tar.bz2
llvm-f2282cac95e7849a86437e347921bc4cc28d403f.tar.xz
Teach SCEV to handle more cases of 'and X, CST', specifically where CST is any number of contiguous 1 bits in a row, with any number of leading and trailing 0 bits.
Unfortunately, this in turn led to some lower quality SCEVs due to some different paths through expression simplification, so add getUDivExactExpr and use it. This fixes all instances of the problems that I found, but we can make that function smarter as necessary. Merge test "xor-and.ll" into "and-xor.ll" since I needed to update it anyways. Test 'nsw-offset.ll' analyzes a little deeper, %n now gets a scev in terms of %no instead of a SCEVUnknown. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@200203 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test/Analysis')
-rw-r--r--test/Analysis/ScalarEvolution/and-xor.ll18
-rw-r--r--test/Analysis/ScalarEvolution/fold.ll17
-rw-r--r--test/Analysis/ScalarEvolution/nsw-offset.ll2
-rw-r--r--test/Analysis/ScalarEvolution/xor-and.ll13
4 files changed, 35 insertions, 15 deletions
diff --git a/test/Analysis/ScalarEvolution/and-xor.ll b/test/Analysis/ScalarEvolution/and-xor.ll
index 404ab91e26..ad636da4d4 100644
--- a/test/Analysis/ScalarEvolution/and-xor.ll
+++ b/test/Analysis/ScalarEvolution/and-xor.ll
@@ -1,11 +1,27 @@
; RUN: opt < %s -scalar-evolution -analyze | FileCheck %s
+; CHECK-LABEL: @test1
; CHECK: --> (zext
; CHECK: --> (zext
; CHECK-NOT: --> (zext
-define i32 @foo(i32 %x) {
+define i32 @test1(i32 %x) {
%n = and i32 %x, 255
%y = xor i32 %n, 255
ret i32 %y
}
+
+; ScalarEvolution shouldn't try to analyze %z into something like
+; --> (zext i4 (-1 + (-1 * (trunc i64 (8 * %x) to i4))) to i64)
+; or
+; --> (8 * (zext i1 (trunc i64 ((8 * %x) /u 8) to i1) to i64))
+
+; CHECK-LABEL: @test2
+; CHECK: --> (8 * (zext i1 (trunc i64 %x to i1) to i64))
+
+define i64 @test2(i64 %x) {
+ %a = shl i64 %x, 3
+ %t = and i64 %a, 8
+ %z = xor i64 %t, 8
+ ret i64 %z
+}
diff --git a/test/Analysis/ScalarEvolution/fold.ll b/test/Analysis/ScalarEvolution/fold.ll
index 57006dd9bb..84b657050c 100644
--- a/test/Analysis/ScalarEvolution/fold.ll
+++ b/test/Analysis/ScalarEvolution/fold.ll
@@ -60,3 +60,20 @@ loop:
exit:
ret void
}
+
+define void @test5(i32 %i) {
+; CHECK-LABEL: @test5
+ %A = and i32 %i, 1
+; CHECK: --> (zext i1 (trunc i32 %i to i1) to i32)
+ %B = and i32 %i, 2
+; CHECK: --> (2 * (zext i1 (trunc i32 (%i /u 2) to i1) to i32))
+ %C = and i32 %i, 63
+; CHECK: --> (zext i6 (trunc i32 %i to i6) to i32)
+ %D = and i32 %i, 126
+; CHECK: --> (2 * (zext i6 (trunc i32 (%i /u 2) to i6) to i32))
+ %E = and i32 %i, 64
+; CHECK: --> (64 * (zext i1 (trunc i32 (%i /u 64) to i1) to i32))
+ %F = and i32 %i, -2147483648
+; CHECK: --> (-2147483648 * (%i /u -2147483648))
+ ret void
+}
diff --git a/test/Analysis/ScalarEvolution/nsw-offset.ll b/test/Analysis/ScalarEvolution/nsw-offset.ll
index 8969a5ad4c..88cdcf23d9 100644
--- a/test/Analysis/ScalarEvolution/nsw-offset.ll
+++ b/test/Analysis/ScalarEvolution/nsw-offset.ll
@@ -73,5 +73,5 @@ return: ; preds = %bb1.return_crit_edg
ret void
}
-; CHECK: Loop %bb: backedge-taken count is ((-1 + %n) /u 2)
+; CHECK: Loop %bb: backedge-taken count is ((-1 + (2 * (%no /u 2))) /u 2)
; CHECK: Loop %bb: max backedge-taken count is 1073741822
diff --git a/test/Analysis/ScalarEvolution/xor-and.ll b/test/Analysis/ScalarEvolution/xor-and.ll
deleted file mode 100644
index 2616ea928a..0000000000
--- a/test/Analysis/ScalarEvolution/xor-and.ll
+++ /dev/null
@@ -1,13 +0,0 @@
-; RUN: opt < %s -scalar-evolution -analyze | FileCheck %s
-
-; ScalarEvolution shouldn't try to analyze %z into something like
-; --> (zext i4 (-1 + (-1 * (trunc i64 (8 * %x) to i4))) to i64)
-
-; CHECK: --> (zext i4 (-8 + (trunc i64 (8 * %x) to i4)) to i64)
-
-define i64 @foo(i64 %x) {
- %a = shl i64 %x, 3
- %t = and i64 %a, 8
- %z = xor i64 %t, 8
- ret i64 %z
-}