summaryrefslogtreecommitdiff
path: root/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
diff options
context:
space:
mode:
authorChandler Carruth <chandlerc@gmail.com>2014-03-09 03:16:01 +0000
committerChandler Carruth <chandlerc@gmail.com>2014-03-09 03:16:01 +0000
commit36b699f2b139a30a2dfa4448223d6985b55daa8a (patch)
treed6844c991f0c06de4b66a2615259607d8349e5b3 /lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
parentb033b03c23fb3ae066937b2ec09eb9d7a3f1d522 (diff)
downloadllvm-36b699f2b139a30a2dfa4448223d6985b55daa8a.tar.gz
llvm-36b699f2b139a30a2dfa4448223d6985b55daa8a.tar.bz2
llvm-36b699f2b139a30a2dfa4448223d6985b55daa8a.tar.xz
[C++11] Add range based accessors for the Use-Def chain of a Value.
This requires a number of steps. 1) Move value_use_iterator into the Value class as an implementation detail 2) Change it to actually be a *Use* iterator rather than a *User* iterator. 3) Add an adaptor which is a User iterator that always looks through the Use to the User. 4) Wrap these in Value::use_iterator and Value::user_iterator typedefs. 5) Add the range adaptors as Value::uses() and Value::users(). 6) Update *all* of the callers to correctly distinguish between whether they wanted a use_iterator (and to explicitly dig out the User when needed), or a user_iterator which makes the Use itself totally opaque. Because #6 requires churning essentially everything that walked the Use-Def chains, I went ahead and added all of the range adaptors and switched them to range-based loops where appropriate. Also because the renaming requires at least churning every line of code, it didn't make any sense to split these up into multiple commits -- all of which would touch all of the same lies of code. The result is still not quite optimal. The Value::use_iterator is a nice regular iterator, but Value::user_iterator is an iterator over User*s rather than over the User objects themselves. As a consequence, it fits a bit awkwardly into the range-based world and it has the weird extra-dereferencing 'operator->' that so many of our iterators have. I think this could be fixed by providing something which transforms a range of T&s into a range of T*s, but that *can* be separated into another patch, and it isn't yet 100% clear whether this is the right move. However, this change gets us most of the benefit and cleans up a substantial amount of code around Use and User. =] git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@203364 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp')
-rw-r--r--lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp24
1 files changed, 12 insertions, 12 deletions
diff --git a/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp b/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
index 90cb7a96e0..1db55fea00 100644
--- a/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
+++ b/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
@@ -51,22 +51,22 @@ isOnlyCopiedFromConstantGlobal(Value *V, MemTransferInst *&TheCopy,
// ahead and replace the value with the global, this lets the caller quickly
// eliminate the markers.
- for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI!=E; ++UI) {
- User *U = cast<Instruction>(*UI);
+ for (Use &U : V->uses()) {
+ Instruction *I = cast<Instruction>(U.getUser());
- if (LoadInst *LI = dyn_cast<LoadInst>(U)) {
+ if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
// Ignore non-volatile loads, they are always ok.
if (!LI->isSimple()) return false;
continue;
}
- if (BitCastInst *BCI = dyn_cast<BitCastInst>(U)) {
+ if (BitCastInst *BCI = dyn_cast<BitCastInst>(I)) {
// If uses of the bitcast are ok, we are ok.
if (!isOnlyCopiedFromConstantGlobal(BCI, TheCopy, ToDelete, IsOffset))
return false;
continue;
}
- if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(U)) {
+ if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I)) {
// If the GEP has all zero indices, it doesn't offset the pointer. If it
// doesn't, it does.
if (!isOnlyCopiedFromConstantGlobal(
@@ -75,14 +75,14 @@ isOnlyCopiedFromConstantGlobal(Value *V, MemTransferInst *&TheCopy,
continue;
}
- if (CallSite CS = U) {
+ if (CallSite CS = I) {
// If this is the function being called then we treat it like a load and
// ignore it.
- if (CS.isCallee(UI))
+ if (CS.isCallee(&U))
continue;
// Inalloca arguments are clobbered by the call.
- unsigned ArgNo = CS.getArgumentNo(UI);
+ unsigned ArgNo = CS.getArgumentNo(&U);
if (CS.isInAllocaArgument(ArgNo))
return false;
@@ -100,7 +100,7 @@ isOnlyCopiedFromConstantGlobal(Value *V, MemTransferInst *&TheCopy,
}
// Lifetime intrinsics can be handled by the caller.
- if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(U)) {
+ if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
if (II->getIntrinsicID() == Intrinsic::lifetime_start ||
II->getIntrinsicID() == Intrinsic::lifetime_end) {
assert(II->use_empty() && "Lifetime markers have no result to use!");
@@ -111,13 +111,13 @@ isOnlyCopiedFromConstantGlobal(Value *V, MemTransferInst *&TheCopy,
// If this is isn't our memcpy/memmove, reject it as something we can't
// handle.
- MemTransferInst *MI = dyn_cast<MemTransferInst>(U);
+ MemTransferInst *MI = dyn_cast<MemTransferInst>(I);
if (MI == 0)
return false;
// If the transfer is using the alloca as a source of the transfer, then
// ignore it since it is a load (unless the transfer is volatile).
- if (UI.getOperandNo() == 1) {
+ if (U.getOperandNo() == 1) {
if (MI->isVolatile()) return false;
continue;
}
@@ -130,7 +130,7 @@ isOnlyCopiedFromConstantGlobal(Value *V, MemTransferInst *&TheCopy,
if (IsOffset) return false;
// If the memintrinsic isn't using the alloca as the dest, reject it.
- if (UI.getOperandNo() != 0) return false;
+ if (U.getOperandNo() != 0) return false;
// If the source of the memcpy/move is not a constant global, reject it.
if (!pointsToConstantGlobal(MI->getSource()))