summaryrefslogtreecommitdiff
path: root/test/Transforms/SROA
diff options
context:
space:
mode:
authorChandler Carruth <chandlerc@gmail.com>2012-09-24 00:34:20 +0000
committerChandler Carruth <chandlerc@gmail.com>2012-09-24 00:34:20 +0000
commitbc4021f31eaa97ee52655828da3e3de14a39e4a6 (patch)
tree18f04e851fc7f89a90e5f775f5a6ff41ca1473cf /test/Transforms/SROA
parent371e17c03c3169459c84986d4a318f6d6d3f8730 (diff)
downloadllvm-bc4021f31eaa97ee52655828da3e3de14a39e4a6.tar.gz
llvm-bc4021f31eaa97ee52655828da3e3de14a39e4a6.tar.bz2
llvm-bc4021f31eaa97ee52655828da3e3de14a39e4a6.tar.xz
Address one of the original FIXMEs for the new SROA pass by implementing
integer promotion analogous to vector promotion. When there is an integer alloca being accessed both as its integer type and as a narrower integer type, promote the narrower access to "insert" and "extract" the smaller integer from the larger one, and make the integer alloca a candidate for promotion. In the new formulation, we don't care about target legal integer or use thresholds to control things. Instead, we only perform this promotion to an integer type which the frontend has already emitted a load or store for. This bounds the scope and prevents optimization passes from coalescing larger and larger entities into a single integer. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@164479 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test/Transforms/SROA')
-rw-r--r--test/Transforms/SROA/basictest.ll63
1 files changed, 43 insertions, 20 deletions
diff --git a/test/Transforms/SROA/basictest.ll b/test/Transforms/SROA/basictest.ll
index be3ef64dc2..a61de05f45 100644
--- a/test/Transforms/SROA/basictest.ll
+++ b/test/Transforms/SROA/basictest.ll
@@ -553,30 +553,53 @@ bad:
ret i32 %Z2
}
-define i32 @test12() {
-; CHECK: @test12
-; CHECK: alloca i24
-;
-; FIXME: SROA should promote accesses to this into whole i24 operations instead
-; of i8 operations.
-; CHECK: store i8 0
-; CHECK: store i8 0
-; CHECK: store i8 0
+define i8 @test12() {
+; We fully promote these to the i24 load or store size, resulting in just masks
+; and other operations that instcombine will fold, but no alloca.
;
-; CHECK: load i24*
+; CHECK: @test12
entry:
%a = alloca [3 x i8]
- %b0ptr = getelementptr [3 x i8]* %a, i64 0, i32 0
- store i8 0, i8* %b0ptr
- %b1ptr = getelementptr [3 x i8]* %a, i64 0, i32 1
- store i8 0, i8* %b1ptr
- %b2ptr = getelementptr [3 x i8]* %a, i64 0, i32 2
- store i8 0, i8* %b2ptr
- %iptr = bitcast [3 x i8]* %a to i24*
- %i = load i24* %iptr
- %ret = zext i24 %i to i32
- ret i32 %ret
+ %b = alloca [3 x i8]
+; CHECK-NOT: alloca
+
+ %a0ptr = getelementptr [3 x i8]* %a, i64 0, i32 0
+ store i8 0, i8* %a0ptr
+ %a1ptr = getelementptr [3 x i8]* %a, i64 0, i32 1
+ store i8 0, i8* %a1ptr
+ %a2ptr = getelementptr [3 x i8]* %a, i64 0, i32 2
+ store i8 0, i8* %a2ptr
+ %aiptr = bitcast [3 x i8]* %a to i24*
+ %ai = load i24* %aiptr
+; CHCEK-NOT: store
+; CHCEK-NOT: load
+; CHECK: %[[mask0:.*]] = and i24 undef, -256
+; CHECK-NEXT: %[[mask1:.*]] = and i24 %[[mask0]], -65281
+; CHECK-NEXT: %[[mask2:.*]] = and i24 %[[mask1]], 65535
+
+ %biptr = bitcast [3 x i8]* %b to i24*
+ store i24 %ai, i24* %biptr
+ %b0ptr = getelementptr [3 x i8]* %b, i64 0, i32 0
+ %b0 = load i8* %b0ptr
+ %b1ptr = getelementptr [3 x i8]* %b, i64 0, i32 1
+ %b1 = load i8* %b1ptr
+ %b2ptr = getelementptr [3 x i8]* %b, i64 0, i32 2
+ %b2 = load i8* %b2ptr
+; CHCEK-NOT: store
+; CHCEK-NOT: load
+; CHECK: %[[trunc0:.*]] = trunc i24 %[[mask2]] to i8
+; CHECK-NEXT: %[[shift1:.*]] = lshr i24 %[[mask2]], 8
+; CHECK-NEXT: %[[trunc1:.*]] = trunc i24 %[[shift1]] to i8
+; CHECK-NEXT: %[[shift2:.*]] = lshr i24 %[[mask2]], 16
+; CHECK-NEXT: %[[trunc2:.*]] = trunc i24 %[[shift2]] to i8
+
+ %bsum0 = add i8 %b0, %b1
+ %bsum1 = add i8 %bsum0, %b2
+ ret i8 %bsum1
+; CHECK: %[[sum0:.*]] = add i8 %[[trunc0]], %[[trunc1]]
+; CHECK-NEXT: %[[sum1:.*]] = add i8 %[[sum0]], %[[trunc2]]
+; CHECK-NEXT: ret i8 %[[sum1]]
}
define i32 @test13() {