From a94d6e87c4c49f2e81b01d66d8bfb591277f8f96 Mon Sep 17 00:00:00 2001 From: Nadav Rotem Date: Tue, 24 Jul 2012 10:51:42 +0000 Subject: Clean whitespaces. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@160668 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Transforms/Scalar/ScalarReplAggregates.cpp | 134 ++++++++++++------------- 1 file changed, 67 insertions(+), 67 deletions(-) (limited to 'lib/Transforms/Scalar/ScalarReplAggregates.cpp') diff --git a/lib/Transforms/Scalar/ScalarReplAggregates.cpp b/lib/Transforms/Scalar/ScalarReplAggregates.cpp index e3e3c9eb17..ec835b15ca 100644 --- a/lib/Transforms/Scalar/ScalarReplAggregates.cpp +++ b/lib/Transforms/Scalar/ScalarReplAggregates.cpp @@ -104,7 +104,7 @@ namespace { /// CheckedPHIs - This is a set of verified PHI nodes, to prevent infinite /// looping and avoid redundant work. SmallPtrSet CheckedPHIs; - + /// isUnsafe - This is set to true if the alloca cannot be SROA'd. bool isUnsafe : 1; @@ -118,12 +118,12 @@ namespace { /// ever accessed, or false if the alloca is only accessed with mem /// intrinsics or load/store that only access the entire alloca at once. bool hasSubelementAccess : 1; - + /// hasALoadOrStore - This is true if there are any loads or stores to it. /// The alloca may just be accessed with memcpy, for example, which would /// not set this. bool hasALoadOrStore : 1; - + explicit AllocaInfo(AllocaInst *ai) : AI(ai), isUnsafe(false), isMemCpySrc(false), isMemCpyDst(false), hasSubelementAccess(false), hasALoadOrStore(false) {} @@ -187,7 +187,7 @@ namespace { static MemTransferInst *isOnlyCopiedFromConstantGlobal( AllocaInst *AI, SmallVector &ToDelete); }; - + // SROA_DT - SROA that uses DominatorTree. struct SROA_DT : public SROA { static char ID; @@ -196,7 +196,7 @@ namespace { SROA(T, true, ID, ST, AT, SLT) { initializeSROA_DTPass(*PassRegistry::getPassRegistry()); } - + // getAnalysisUsage - This pass does not require any passes, but we know it // will not alter the CFG, so say so. virtual void getAnalysisUsage(AnalysisUsage &AU) const { @@ -204,7 +204,7 @@ namespace { AU.setPreservesCFG(); } }; - + // SROA_SSAUp - SROA that uses SSAUpdater. struct SROA_SSAUp : public SROA { static char ID; @@ -213,14 +213,14 @@ namespace { SROA(T, false, ID, ST, AT, SLT) { initializeSROA_SSAUpPass(*PassRegistry::getPassRegistry()); } - + // getAnalysisUsage - This pass does not require any passes, but we know it // will not alter the CFG, so say so. virtual void getAnalysisUsage(AnalysisUsage &AU) const { AU.setPreservesCFG(); } }; - + } char SROA_DT::ID = 0; @@ -294,7 +294,7 @@ class ConvertToScalarInfo { /// isn't possible to turn into a vector type, it gets set to VoidTy. VectorType *VectorTy; - /// HadNonMemTransferAccess - True if there is at least one access to the + /// HadNonMemTransferAccess - True if there is at least one access to the /// alloca that is not a MemTransferInst. We don't want to turn structs into /// large integers unless there is some potential for optimization. bool HadNonMemTransferAccess; @@ -1050,7 +1050,7 @@ public: AllocaPromoter(const SmallVectorImpl &Insts, SSAUpdater &S, DIBuilder *DB) : LoadAndStorePromoter(Insts, S), AI(0), DIB(DB) {} - + void run(AllocaInst *AI, const SmallVectorImpl &Insts) { // Remember which alloca we're promoting (for isInstInList). this->AI = AI; @@ -1065,18 +1065,18 @@ public: LoadAndStorePromoter::run(Insts); AI->eraseFromParent(); - for (SmallVector::iterator I = DDIs.begin(), + for (SmallVector::iterator I = DDIs.begin(), E = DDIs.end(); I != E; ++I) { DbgDeclareInst *DDI = *I; DDI->eraseFromParent(); } - for (SmallVector::iterator I = DVIs.begin(), + for (SmallVector::iterator I = DVIs.begin(), E = DVIs.end(); I != E; ++I) { DbgValueInst *DVI = *I; DVI->eraseFromParent(); } } - + virtual bool isInstInList(Instruction *I, const SmallVectorImpl &Insts) const { if (LoadInst *LI = dyn_cast(I)) @@ -1085,7 +1085,7 @@ public: } virtual void updateDebugInfo(Instruction *Inst) const { - for (SmallVector::const_iterator I = DDIs.begin(), + for (SmallVector::const_iterator I = DDIs.begin(), E = DDIs.end(); I != E; ++I) { DbgDeclareInst *DDI = *I; if (StoreInst *SI = dyn_cast(Inst)) @@ -1093,7 +1093,7 @@ public: else if (LoadInst *LI = dyn_cast(Inst)) ConvertDebugDeclareToDebugValue(DDI, LI, *DIB); } - for (SmallVector::const_iterator I = DVIs.begin(), + for (SmallVector::const_iterator I = DVIs.begin(), E = DVIs.end(); I != E; ++I) { DbgValueInst *DVI = *I; Value *Arg = NULL; @@ -1136,12 +1136,12 @@ public: static bool isSafeSelectToSpeculate(SelectInst *SI, const TargetData *TD) { bool TDerefable = SI->getTrueValue()->isDereferenceablePointer(); bool FDerefable = SI->getFalseValue()->isDereferenceablePointer(); - + for (Value::use_iterator UI = SI->use_begin(), UE = SI->use_end(); UI != UE; ++UI) { LoadInst *LI = dyn_cast(*UI); if (LI == 0 || !LI->isSimple()) return false; - + // Both operands to the select need to be dereferencable, either absolutely // (e.g. allocas) or at this point because we can see other accesses to it. if (!TDerefable && !isSafeToLoadUnconditionally(SI->getTrueValue(), LI, @@ -1151,7 +1151,7 @@ static bool isSafeSelectToSpeculate(SelectInst *SI, const TargetData *TD) { LI->getAlignment(), TD)) return false; } - + return true; } @@ -1182,20 +1182,20 @@ static bool isSafePHIToSpeculate(PHINode *PN, const TargetData *TD) { UI != UE; ++UI) { LoadInst *LI = dyn_cast(*UI); if (LI == 0 || !LI->isSimple()) return false; - + // For now we only allow loads in the same block as the PHI. This is a // common case that happens when instcombine merges two loads through a PHI. if (LI->getParent() != BB) return false; - + // Ensure that there are no instructions between the PHI and the load that // could store. for (BasicBlock::iterator BBI = PN; &*BBI != LI; ++BBI) if (BBI->mayWriteToMemory()) return false; - + MaxAlign = std::max(MaxAlign, LI->getAlignment()); } - + // Okay, we know that we have one or more loads in the same block as the PHI. // We can transform this if it is safe to push the loads into the predecessor // blocks. The only thing to watch out for is that we can't put a possibly @@ -1223,10 +1223,10 @@ static bool isSafePHIToSpeculate(PHINode *PN, const TargetData *TD) { if (InVal->isDereferenceablePointer() || isSafeToLoadUnconditionally(InVal, Pred->getTerminator(), MaxAlign, TD)) continue; - + return false; } - + return true; } @@ -1238,7 +1238,7 @@ static bool isSafePHIToSpeculate(PHINode *PN, const TargetData *TD) { static bool tryToMakeAllocaBePromotable(AllocaInst *AI, const TargetData *TD) { SetVector, SmallPtrSet > InstsToRewrite; - + for (Value::use_iterator UI = AI->use_begin(), UE = AI->use_end(); UI != UE; ++UI) { User *U = *UI; @@ -1247,7 +1247,7 @@ static bool tryToMakeAllocaBePromotable(AllocaInst *AI, const TargetData *TD) { return false; continue; } - + if (StoreInst *SI = dyn_cast(U)) { if (SI->getOperand(0) == AI || !SI->isSimple()) return false; // Don't allow a store OF the AI, only INTO the AI. @@ -1261,7 +1261,7 @@ static bool tryToMakeAllocaBePromotable(AllocaInst *AI, const TargetData *TD) { Value *Result = SI->getOperand(1+CI->isZero()); SI->replaceAllUsesWith(Result); SI->eraseFromParent(); - + // This is very rare and we just scrambled the use list of AI, start // over completely. return tryToMakeAllocaBePromotable(AI, TD); @@ -1271,33 +1271,33 @@ static bool tryToMakeAllocaBePromotable(AllocaInst *AI, const TargetData *TD) { // loads, then we can transform this by rewriting the select. if (!isSafeSelectToSpeculate(SI, TD)) return false; - + InstsToRewrite.insert(SI); continue; } - + if (PHINode *PN = dyn_cast(U)) { if (PN->use_empty()) { // Dead PHIs can be stripped. InstsToRewrite.insert(PN); continue; } - + // If it is safe to turn "load (phi [AI, ptr, ...])" into a PHI of loads // in the pred blocks, then we can transform this by rewriting the PHI. if (!isSafePHIToSpeculate(PN, TD)) return false; - + InstsToRewrite.insert(PN); continue; } - + if (BitCastInst *BCI = dyn_cast(U)) { if (onlyUsedByLifetimeMarkers(BCI)) { InstsToRewrite.insert(BCI); continue; } } - + return false; } @@ -1305,7 +1305,7 @@ static bool tryToMakeAllocaBePromotable(AllocaInst *AI, const TargetData *TD) { // we're done! if (InstsToRewrite.empty()) return true; - + // If we have instructions that need to be rewritten for this to be promotable // take care of it now. for (unsigned i = 0, e = InstsToRewrite.size(); i != e; ++i) { @@ -1326,13 +1326,13 @@ static bool tryToMakeAllocaBePromotable(AllocaInst *AI, const TargetData *TD) { // loads with a new select. while (!SI->use_empty()) { LoadInst *LI = cast(SI->use_back()); - + IRBuilder<> Builder(LI); - LoadInst *TrueLoad = + LoadInst *TrueLoad = Builder.CreateLoad(SI->getTrueValue(), LI->getName()+".t"); - LoadInst *FalseLoad = + LoadInst *FalseLoad = Builder.CreateLoad(SI->getFalseValue(), LI->getName()+".f"); - + // Transfer alignment and TBAA info if present. TrueLoad->setAlignment(LI->getAlignment()); FalseLoad->setAlignment(LI->getAlignment()); @@ -1340,18 +1340,18 @@ static bool tryToMakeAllocaBePromotable(AllocaInst *AI, const TargetData *TD) { TrueLoad->setMetadata(LLVMContext::MD_tbaa, Tag); FalseLoad->setMetadata(LLVMContext::MD_tbaa, Tag); } - + Value *V = Builder.CreateSelect(SI->getCondition(), TrueLoad, FalseLoad); V->takeName(LI); LI->replaceAllUsesWith(V); LI->eraseFromParent(); } - + // Now that all the loads are gone, the select is gone too. SI->eraseFromParent(); continue; } - + // Otherwise, we have a PHI node which allows us to push the loads into the // predecessors. PHINode *PN = cast(InstsToRewrite[i]); @@ -1359,7 +1359,7 @@ static bool tryToMakeAllocaBePromotable(AllocaInst *AI, const TargetData *TD) { PN->eraseFromParent(); continue; } - + Type *LoadTy = cast(PN->getType())->getElementType(); PHINode *NewPN = PHINode::Create(LoadTy, PN->getNumIncomingValues(), PN->getName()+".ld", PN); @@ -1369,18 +1369,18 @@ static bool tryToMakeAllocaBePromotable(AllocaInst *AI, const TargetData *TD) { LoadInst *SomeLoad = cast(PN->use_back()); MDNode *TBAATag = SomeLoad->getMetadata(LLVMContext::MD_tbaa); unsigned Align = SomeLoad->getAlignment(); - + // Rewrite all loads of the PN to use the new PHI. while (!PN->use_empty()) { LoadInst *LI = cast(PN->use_back()); LI->replaceAllUsesWith(NewPN); LI->eraseFromParent(); } - + // Inject loads into all of the pred blocks. Keep track of which blocks we // insert them into in case we have multiple edges from the same block. DenseMap InsertedLoads; - + for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { BasicBlock *Pred = PN->getIncomingBlock(i); LoadInst *&Load = InsertedLoads[Pred]; @@ -1391,13 +1391,13 @@ static bool tryToMakeAllocaBePromotable(AllocaInst *AI, const TargetData *TD) { Load->setAlignment(Align); if (TBAATag) Load->setMetadata(LLVMContext::MD_tbaa, TBAATag); } - + NewPN->addIncoming(Load, Pred); } - + PN->eraseFromParent(); } - + ++NumAdjusted; return true; } @@ -1430,7 +1430,7 @@ bool SROA::performPromotion(Function &F) { SSAUpdater SSA; for (unsigned i = 0, e = Allocas.size(); i != e; ++i) { AllocaInst *AI = Allocas[i]; - + // Build list of instructions to promote. for (Value::use_iterator UI = AI->use_begin(), E = AI->use_end(); UI != E; ++UI) @@ -1667,12 +1667,12 @@ void SROA::isSafeForScalarRepl(Instruction *I, uint64_t Offset, isSafeMemAccess(Offset, TD->getTypeAllocSize(LIType), LIType, false, Info, LI, true /*AllowWholeAccess*/); Info.hasALoadOrStore = true; - + } else if (StoreInst *SI = dyn_cast(User)) { // Store is ok if storing INTO the pointer, not storing the pointer if (!SI->isSimple() || SI->getOperand(0) == I) return MarkUnsafe(Info, User); - + Type *SIType = SI->getOperand(0)->getType(); isSafeMemAccess(Offset, TD->getTypeAllocSize(SIType), SIType, true, Info, SI, true /*AllowWholeAccess*/); @@ -1689,7 +1689,7 @@ void SROA::isSafeForScalarRepl(Instruction *I, uint64_t Offset, if (Info.isUnsafe) return; } } - + /// isSafePHIUseForScalarRepl - If we see a PHI node or select using a pointer /// derived from the alloca, we can often still split the alloca into elements. @@ -1706,10 +1706,10 @@ void SROA::isSafePHISelectUseForScalarRepl(Instruction *I, uint64_t Offset, if (PHINode *PN = dyn_cast(I)) if (!Info.CheckedPHIs.insert(PN)) return; - + for (Value::use_iterator UI = I->use_begin(), E = I->use_end(); UI!=E; ++UI) { Instruction *User = cast(*UI); - + if (BitCastInst *BC = dyn_cast(User)) { isSafePHISelectUseForScalarRepl(BC, Offset, Info); } else if (GetElementPtrInst *GEPI = dyn_cast(User)) { @@ -1726,12 +1726,12 @@ void SROA::isSafePHISelectUseForScalarRepl(Instruction *I, uint64_t Offset, isSafeMemAccess(Offset, TD->getTypeAllocSize(LIType), LIType, false, Info, LI, false /*AllowWholeAccess*/); Info.hasALoadOrStore = true; - + } else if (StoreInst *SI = dyn_cast(User)) { // Store is ok if storing INTO the pointer, not storing the pointer if (!SI->isSimple() || SI->getOperand(0) == I) return MarkUnsafe(Info, User); - + Type *SIType = SI->getOperand(0)->getType(); isSafeMemAccess(Offset, TD->getTypeAllocSize(SIType), SIType, true, Info, SI, false /*AllowWholeAccess*/); @@ -1925,12 +1925,12 @@ void SROA::RewriteForScalarRepl(Instruction *I, AllocaInst *AI, uint64_t Offset, RewriteBitCast(BC, AI, Offset, NewElts); continue; } - + if (GetElementPtrInst *GEPI = dyn_cast(User)) { RewriteGEP(GEPI, AI, Offset, NewElts); continue; } - + if (MemIntrinsic *MI = dyn_cast(User)) { ConstantInt *Length = dyn_cast(MI->getLength()); uint64_t MemSize = Length->getZExtValue(); @@ -1949,10 +1949,10 @@ void SROA::RewriteForScalarRepl(Instruction *I, AllocaInst *AI, uint64_t Offset, } continue; } - + if (LoadInst *LI = dyn_cast(User)) { Type *LIType = LI->getType(); - + if (isCompatibleAggregate(LIType, AI->getAllocatedType())) { // Replace: // %res = load { i32, i32 }* %alloc @@ -1978,7 +1978,7 @@ void SROA::RewriteForScalarRepl(Instruction *I, AllocaInst *AI, uint64_t Offset, } continue; } - + if (StoreInst *SI = dyn_cast(User)) { Value *Val = SI->getOperand(0); Type *SIType = Val->getType(); @@ -2005,16 +2005,16 @@ void SROA::RewriteForScalarRepl(Instruction *I, AllocaInst *AI, uint64_t Offset, } continue; } - + if (isa(User) || isa(User)) { - // If we have a PHI user of the alloca itself (as opposed to a GEP or + // If we have a PHI user of the alloca itself (as opposed to a GEP or // bitcast) we have to rewrite it. GEP and bitcast uses will be RAUW'd to // the new pointer. if (!isa(I)) continue; - + assert(Offset == 0 && NewElts[0] && "Direct alloca use should have a zero offset"); - + // If we have a use of the alloca, we know the derived uses will be // utilizing just the first element of the scalarized result. Insert a // bitcast of the first alloca before the user as required. @@ -2386,7 +2386,7 @@ void SROA::RewriteStoreUserOfWholeAlloca(StoreInst *SI, AllocaInst *AI, uint64_t AllocaSizeBits = TD->getTypeAllocSizeInBits(AllocaEltTy); IRBuilder<> Builder(SI); - + // Handle tail padding by extending the operand if (TD->getTypeSizeInBits(SrcVal->getType()) != AllocaSizeBits) SrcVal = Builder.CreateZExt(SrcVal, @@ -2648,7 +2648,7 @@ bool SROA::isSafeAllocaToScalarRepl(AllocaInst *AI) { return false; } } - + return true; } -- cgit v1.2.3