summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
authorChris Lattner <sabre@nondot.org>2011-02-10 07:11:16 +0000
committerChris Lattner <sabre@nondot.org>2011-02-10 07:11:16 +0000
commit6cdf2ea98e9952a57768a1fcdce8850089263260 (patch)
tree9e4aba745da2c2ade020bbd6c284111076e96177 /lib
parent81baf14fdfa29c22a08d609144c285169e23a247 (diff)
downloadllvm-6cdf2ea98e9952a57768a1fcdce8850089263260.tar.gz
llvm-6cdf2ea98e9952a57768a1fcdce8850089263260.tar.bz2
llvm-6cdf2ea98e9952a57768a1fcdce8850089263260.tar.xz
implement the first part of PR8882: when lowering an inbounds
gep to explicit addressing, we know that none of the intermediate computation overflows. This could use review: it seems that the shifts certainly wouldn't overflow, but could the intermediate adds overflow if there is a negative index? Previously the testcase would instcombine to: define i1 @test(i64 %i) { %p1.idx.mask = and i64 %i, 4611686018427387903 %cmp = icmp eq i64 %p1.idx.mask, 1000 ret i1 %cmp } now we get: define i1 @test(i64 %i) { %cmp = icmp eq i64 %i, 1000 ret i1 %cmp } git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@125271 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib')
-rw-r--r--lib/Transforms/InstCombine/InstCombineAddSub.cpp23
1 files changed, 15 insertions, 8 deletions
diff --git a/lib/Transforms/InstCombine/InstCombineAddSub.cpp b/lib/Transforms/InstCombine/InstCombineAddSub.cpp
index 91116ca27a..b04a05b3b3 100644
--- a/lib/Transforms/InstCombine/InstCombineAddSub.cpp
+++ b/lib/Transforms/InstCombine/InstCombineAddSub.cpp
@@ -399,6 +399,10 @@ Value *InstCombiner::EmitGEPOffset(User *GEP) {
const Type *IntPtrTy = TD.getIntPtrType(GEP->getContext());
Value *Result = Constant::getNullValue(IntPtrTy);
+ // If the GEP is inbounds, we know that none of the addressing operations will
+ // overflow in an unsigned sense.
+ bool isInBounds = cast<GEPOperator>(GEP)->isInBounds();
+
// Build a mask for high order bits.
unsigned IntPtrWidth = TD.getPointerSizeInBits();
uint64_t PtrSizeMask = ~0ULL >> (64-IntPtrWidth);
@@ -414,31 +418,34 @@ Value *InstCombiner::EmitGEPOffset(User *GEP) {
if (const StructType *STy = dyn_cast<StructType>(*GTI)) {
Size = TD.getStructLayout(STy)->getElementOffset(OpC->getZExtValue());
- Result = Builder->CreateAdd(Result,
- ConstantInt::get(IntPtrTy, Size),
- GEP->getName()+".offs");
+ if (Size)
+ Result = Builder->CreateAdd(Result, ConstantInt::get(IntPtrTy, Size),
+ GEP->getName()+".offs",
+ isInBounds /*NUW*/);
continue;
}
Constant *Scale = ConstantInt::get(IntPtrTy, Size);
Constant *OC =
ConstantExpr::getIntegerCast(OpC, IntPtrTy, true /*SExt*/);
- Scale = ConstantExpr::getMul(OC, Scale);
+ Scale = ConstantExpr::getMul(OC, Scale, isInBounds/*NUW*/);
// Emit an add instruction.
- Result = Builder->CreateAdd(Result, Scale, GEP->getName()+".offs");
+ Result = Builder->CreateAdd(Result, Scale, GEP->getName()+".offs",
+ isInBounds /*NUW*/);
continue;
}
// Convert to correct type.
if (Op->getType() != IntPtrTy)
Op = Builder->CreateIntCast(Op, IntPtrTy, true, Op->getName()+".c");
if (Size != 1) {
- Constant *Scale = ConstantInt::get(IntPtrTy, Size);
// We'll let instcombine(mul) convert this to a shl if possible.
- Op = Builder->CreateMul(Op, Scale, GEP->getName()+".idx");
+ Op = Builder->CreateMul(Op, ConstantInt::get(IntPtrTy, Size),
+ GEP->getName()+".idx", isInBounds /*NUW*/);
}
// Emit an add instruction.
- Result = Builder->CreateAdd(Op, Result, GEP->getName()+".offs");
+ Result = Builder->CreateAdd(Op, Result, GEP->getName()+".offs",
+ isInBounds /*NUW*/);
}
return Result;
}