summaryrefslogtreecommitdiff
path: root/lib/Transforms/Scalar/SROA.cpp
diff options
context:
space:
mode:
authorChandler Carruth <chandlerc@gmail.com>2012-10-13 02:41:05 +0000
committerChandler Carruth <chandlerc@gmail.com>2012-10-13 02:41:05 +0000
commit520eeaeffd5ef724ea5c68192f77331571f38060 (patch)
tree9e874bec01bcd368b503775175e70e08e96ecf09 /lib/Transforms/Scalar/SROA.cpp
parent84125ca43c758fd21fdab2b05196e0df57c55c96 (diff)
downloadllvm-520eeaeffd5ef724ea5c68192f77331571f38060.tar.gz
llvm-520eeaeffd5ef724ea5c68192f77331571f38060.tar.bz2
llvm-520eeaeffd5ef724ea5c68192f77331571f38060.tar.xz
Clean up how we rewrite loads and stores to the whole alloca. When these
are single value types, the load and store should be directly based upon the alloca and then bitcasting can fix the type as needed afterward. This might in theory improve some of the IR coming out of SROA, but I don't expect big changes yet and don't have any test cases on hand. This is really just a cleanup/refactoring patch. The next patch will cause this code path to be hit a lot more, actually get SROA to promote more allocas and include several more test cases. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@165864 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/Transforms/Scalar/SROA.cpp')
-rw-r--r--lib/Transforms/Scalar/SROA.cpp54
1 files changed, 49 insertions, 5 deletions
diff --git a/lib/Transforms/Scalar/SROA.cpp b/lib/Transforms/Scalar/SROA.cpp
index ca76251492..34c238ed15 100644
--- a/lib/Transforms/Scalar/SROA.cpp
+++ b/lib/Transforms/Scalar/SROA.cpp
@@ -2149,6 +2149,7 @@ class AllocaPartitionRewriter : public InstVisitor<AllocaPartitionRewriter,
SROA &Pass;
AllocaInst &OldAI, &NewAI;
const uint64_t NewAllocaBeginOffset, NewAllocaEndOffset;
+ Type *NewAllocaTy;
// If we are rewriting an alloca partition which can be written as pure
// vector operations, we stash extra information here. When VecTy is
@@ -2186,6 +2187,7 @@ public:
OldAI(OldAI), NewAI(NewAI),
NewAllocaBeginOffset(NewBeginOffset),
NewAllocaEndOffset(NewEndOffset),
+ NewAllocaTy(NewAI.getAllocatedType()),
VecTy(), ElementTy(), ElementSize(), IntPromotionTy(),
BeginOffset(), EndOffset() {
}
@@ -2343,7 +2345,24 @@ private:
Pass.DeadInsts.push_back(I);
}
- Value *getValueCast(IRBuilder<> &IRB, Value *V, Type *Ty) {
+ /// \brief Test whether we can convert a value from the old to the new type.
+ ///
+ /// This predicate should be used to guard calls to convertValue in order to
+ /// ensure that we only try to convert viable values. The strategy is that we
+ /// will peel off single element struct and array wrappings to get to an
+ /// underlying value, and convert that value.
+ bool canConvertValue(Type *OldTy, Type *NewTy) {
+ if (OldTy == NewTy)
+ return true;
+ if (TD.getTypeSizeInBits(NewTy) != TD.getTypeSizeInBits(OldTy))
+ return false;
+ if (!NewTy->isSingleValueType() || !OldTy->isSingleValueType())
+ return false;
+ return true;
+ }
+
+ Value *convertValue(IRBuilder<> &IRB, Value *V, Type *Ty) {
+ assert(canConvertValue(V->getType(), Ty) && "Value not convertable to type");
if (V->getType()->isIntegerTy() && Ty->isPointerTy())
return IRB.CreateIntToPtr(V, Ty);
if (V->getType()->isPointerTy() && Ty->isIntegerTy())
@@ -2364,7 +2383,7 @@ private:
getName(".load"));
}
if (Result->getType() != LI.getType())
- Result = getValueCast(IRB, Result, LI.getType());
+ Result = convertValue(IRB, Result, LI.getType());
LI.replaceAllUsesWith(Result);
Pass.DeadInsts.push_back(&LI);
@@ -2393,6 +2412,18 @@ private:
if (IntPromotionTy)
return rewriteIntegerLoad(IRB, LI);
+ if (BeginOffset == NewAllocaBeginOffset &&
+ canConvertValue(NewAllocaTy, LI.getType())) {
+ Value *NewLI = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(),
+ LI.isVolatile(), getName(".load"));
+ Value *NewV = convertValue(IRB, NewLI, LI.getType());
+ LI.replaceAllUsesWith(NewV);
+ Pass.DeadInsts.push_back(&LI);
+
+ DEBUG(dbgs() << " to: " << *NewLI << "\n");
+ return !LI.isVolatile();
+ }
+
Value *NewPtr = getAdjustedAllocaPtr(IRB,
LI.getPointerOperand()->getType());
LI.setOperand(0, NewPtr);
@@ -2409,13 +2440,13 @@ private:
if (V->getType() == ElementTy ||
BeginOffset > NewAllocaBeginOffset || EndOffset < NewAllocaEndOffset) {
if (V->getType() != ElementTy)
- V = getValueCast(IRB, V, ElementTy);
+ V = convertValue(IRB, V, ElementTy);
LoadInst *LI = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(),
getName(".load"));
V = IRB.CreateInsertElement(LI, V, getIndex(IRB, BeginOffset),
getName(".insert"));
} else if (V->getType() != VecTy) {
- V = getValueCast(IRB, V, VecTy);
+ V = convertValue(IRB, V, VecTy);
}
StoreInst *Store = IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlignment());
Pass.DeadInsts.push_back(&SI);
@@ -2445,13 +2476,26 @@ private:
if (IntPromotionTy)
return rewriteIntegerStore(IRB, SI);
+ Type *ValueTy = SI.getValueOperand()->getType();
+
// Strip all inbounds GEPs and pointer casts to try to dig out any root
// alloca that should be re-examined after promoting this alloca.
- if (SI.getValueOperand()->getType()->isPointerTy())
+ if (ValueTy->isPointerTy())
if (AllocaInst *AI = dyn_cast<AllocaInst>(SI.getValueOperand()
->stripInBoundsOffsets()))
Pass.PostPromotionWorklist.insert(AI);
+ if (BeginOffset == NewAllocaBeginOffset &&
+ canConvertValue(ValueTy, NewAllocaTy)) {
+ Value *NewV = convertValue(IRB, SI.getValueOperand(), NewAllocaTy);
+ StoreInst *NewSI = IRB.CreateAlignedStore(NewV, &NewAI, NewAI.getAlignment(),
+ SI.isVolatile());
+ Pass.DeadInsts.push_back(&SI);
+
+ DEBUG(dbgs() << " to: " << *NewSI << "\n");
+ return !SI.isVolatile();
+ }
+
Value *NewPtr = getAdjustedAllocaPtr(IRB,
SI.getPointerOperand()->getType());
SI.setOperand(1, NewPtr);