diff options
author | Rafael Espindola <rafael.espindola@gmail.com> | 2014-02-21 00:06:31 +0000 |
---|---|---|
committer | Rafael Espindola <rafael.espindola@gmail.com> | 2014-02-21 00:06:31 +0000 |
commit | f116e5308d2eed4b531da795e8eed91211c01241 (patch) | |
tree | d458cab6fb15c8319e97119600478aa358d4e954 /lib/Transforms/Scalar | |
parent | 23ffb3ea10530e36f8b779570f8e1cc686708051 (diff) | |
download | llvm-f116e5308d2eed4b531da795e8eed91211c01241.tar.gz llvm-f116e5308d2eed4b531da795e8eed91211c01241.tar.bz2 llvm-f116e5308d2eed4b531da795e8eed91211c01241.tar.xz |
Rename many DataLayout variables from TD to DL.
I am really sorry for the noise, but the current state where some parts of the
code use TD (from the old name: TargetData) and other parts use DL makes it
hard to write a patch that changes where those variables come from and how
they are passed along.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@201827 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/Transforms/Scalar')
-rw-r--r-- | lib/Transforms/Scalar/EarlyCSE.cpp | 6 | ||||
-rw-r--r-- | lib/Transforms/Scalar/GVN.cpp | 150 | ||||
-rw-r--r-- | lib/Transforms/Scalar/GlobalMerge.cpp | 20 | ||||
-rw-r--r-- | lib/Transforms/Scalar/IndVarSimplify.cpp | 26 | ||||
-rw-r--r-- | lib/Transforms/Scalar/JumpThreading.cpp | 12 | ||||
-rw-r--r-- | lib/Transforms/Scalar/LICM.cpp | 6 | ||||
-rw-r--r-- | lib/Transforms/Scalar/LoopIdiomRecognize.cpp | 20 | ||||
-rw-r--r-- | lib/Transforms/Scalar/MemCpyOptimizer.cpp | 48 | ||||
-rw-r--r-- | lib/Transforms/Scalar/SCCP.cpp | 16 | ||||
-rw-r--r-- | lib/Transforms/Scalar/ScalarReplAggregates.cpp | 182 |
10 files changed, 243 insertions, 243 deletions
diff --git a/lib/Transforms/Scalar/EarlyCSE.cpp b/lib/Transforms/Scalar/EarlyCSE.cpp index 24baee72dc..ecf0335ebe 100644 --- a/lib/Transforms/Scalar/EarlyCSE.cpp +++ b/lib/Transforms/Scalar/EarlyCSE.cpp @@ -262,7 +262,7 @@ namespace { /// cases. class EarlyCSE : public FunctionPass { public: - const DataLayout *TD; + const DataLayout *DL; const TargetLibraryInfo *TLI; DominatorTree *DT; typedef RecyclingAllocator<BumpPtrAllocator, @@ -432,7 +432,7 @@ bool EarlyCSE::processNode(DomTreeNode *Node) { // If the instruction can be simplified (e.g. X+0 = X) then replace it with // its simpler value. - if (Value *V = SimplifyInstruction(Inst, TD, TLI, DT)) { + if (Value *V = SimplifyInstruction(Inst, DL, TLI, DT)) { DEBUG(dbgs() << "EarlyCSE Simplify: " << *Inst << " to: " << *V << '\n'); Inst->replaceAllUsesWith(V); Inst->eraseFromParent(); @@ -557,7 +557,7 @@ bool EarlyCSE::runOnFunction(Function &F) { std::vector<StackNode *> nodesToProcess; - TD = getAnalysisIfAvailable<DataLayout>(); + DL = getAnalysisIfAvailable<DataLayout>(); TLI = &getAnalysis<TargetLibraryInfo>(); DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); diff --git a/lib/Transforms/Scalar/GVN.cpp b/lib/Transforms/Scalar/GVN.cpp index 3acf92b8e6..fbbc1317b5 100644 --- a/lib/Transforms/Scalar/GVN.cpp +++ b/lib/Transforms/Scalar/GVN.cpp @@ -586,7 +586,7 @@ namespace { bool NoLoads; MemoryDependenceAnalysis *MD; DominatorTree *DT; - const DataLayout *TD; + const DataLayout *DL; const TargetLibraryInfo *TLI; SetVector<BasicBlock *> DeadBlocks; @@ -624,7 +624,7 @@ namespace { InstrsToErase.push_back(I); } - const DataLayout *getDataLayout() const { return TD; } + const DataLayout *getDataLayout() const { return DL; } DominatorTree &getDominatorTree() const { return *DT; } AliasAnalysis *getAliasAnalysis() const { return VN.getAliasAnalysis(); } MemoryDependenceAnalysis &getMemDep() const { return *MD; } @@ -828,7 +828,7 @@ SpeculationFailure: /// CoerceAvailableValueToLoadType will succeed. static bool CanCoerceMustAliasedValueToLoad(Value *StoredVal, Type *LoadTy, - const DataLayout &TD) { + const DataLayout &DL) { // If the loaded or stored value is an first class array or struct, don't try // to transform them. We need to be able to bitcast to integer. if (LoadTy->isStructTy() || LoadTy->isArrayTy() || @@ -837,8 +837,8 @@ static bool CanCoerceMustAliasedValueToLoad(Value *StoredVal, return false; // The store has to be at least as big as the load. - if (TD.getTypeSizeInBits(StoredVal->getType()) < - TD.getTypeSizeInBits(LoadTy)) + if (DL.getTypeSizeInBits(StoredVal->getType()) < + DL.getTypeSizeInBits(LoadTy)) return false; return true; @@ -853,15 +853,15 @@ static bool CanCoerceMustAliasedValueToLoad(Value *StoredVal, static Value *CoerceAvailableValueToLoadType(Value *StoredVal, Type *LoadedTy, Instruction *InsertPt, - const DataLayout &TD) { - if (!CanCoerceMustAliasedValueToLoad(StoredVal, LoadedTy, TD)) + const DataLayout &DL) { + if (!CanCoerceMustAliasedValueToLoad(StoredVal, LoadedTy, DL)) return 0; // If this is already the right type, just return it. Type *StoredValTy = StoredVal->getType(); - uint64_t StoreSize = TD.getTypeSizeInBits(StoredValTy); - uint64_t LoadSize = TD.getTypeSizeInBits(LoadedTy); + uint64_t StoreSize = DL.getTypeSizeInBits(StoredValTy); + uint64_t LoadSize = DL.getTypeSizeInBits(LoadedTy); // If the store and reload are the same size, we can always reuse it. if (StoreSize == LoadSize) { @@ -872,13 +872,13 @@ static Value *CoerceAvailableValueToLoadType(Value *StoredVal, // Convert source pointers to integers, which can be bitcast. if (StoredValTy->getScalarType()->isPointerTy()) { - StoredValTy = TD.getIntPtrType(StoredValTy); + StoredValTy = DL.getIntPtrType(StoredValTy); StoredVal = new PtrToIntInst(StoredVal, StoredValTy, "", InsertPt); } Type *TypeToCastTo = LoadedTy; if (TypeToCastTo->getScalarType()->isPointerTy()) - TypeToCastTo = TD.getIntPtrType(TypeToCastTo); + TypeToCastTo = DL.getIntPtrType(TypeToCastTo); if (StoredValTy != TypeToCastTo) StoredVal = new BitCastInst(StoredVal, TypeToCastTo, "", InsertPt); @@ -897,7 +897,7 @@ static Value *CoerceAvailableValueToLoadType(Value *StoredVal, // Convert source pointers to integers, which can be manipulated. if (StoredValTy->getScalarType()->isPointerTy()) { - StoredValTy = TD.getIntPtrType(StoredValTy); + StoredValTy = DL.getIntPtrType(StoredValTy); StoredVal = new PtrToIntInst(StoredVal, StoredValTy, "", InsertPt); } @@ -909,7 +909,7 @@ static Value *CoerceAvailableValueToLoadType(Value *StoredVal, // If this is a big-endian system, we need to shift the value down to the low // bits so that a truncate will work. - if (TD.isBigEndian()) { + if (DL.isBigEndian()) { Constant *Val = ConstantInt::get(StoredVal->getType(), StoreSize-LoadSize); StoredVal = BinaryOperator::CreateLShr(StoredVal, Val, "tmp", InsertPt); } @@ -940,15 +940,15 @@ static Value *CoerceAvailableValueToLoadType(Value *StoredVal, static int AnalyzeLoadFromClobberingWrite(Type *LoadTy, Value *LoadPtr, Value *WritePtr, uint64_t WriteSizeInBits, - const DataLayout &TD) { + const DataLayout &DL) { // If the loaded or stored value is a first class array or struct, don't try // to transform them. We need to be able to bitcast to integer. if (LoadTy->isStructTy() || LoadTy->isArrayTy()) return -1; int64_t StoreOffset = 0, LoadOffset = 0; - Value *StoreBase = GetPointerBaseWithConstantOffset(WritePtr,StoreOffset,&TD); - Value *LoadBase = GetPointerBaseWithConstantOffset(LoadPtr, LoadOffset, &TD); + Value *StoreBase = GetPointerBaseWithConstantOffset(WritePtr,StoreOffset,&DL); + Value *LoadBase = GetPointerBaseWithConstantOffset(LoadPtr, LoadOffset, &DL); if (StoreBase != LoadBase) return -1; @@ -970,7 +970,7 @@ static int AnalyzeLoadFromClobberingWrite(Type *LoadTy, Value *LoadPtr, // If the load and store don't overlap at all, the store doesn't provide // anything to the load. In this case, they really don't alias at all, AA // must have gotten confused. - uint64_t LoadSize = TD.getTypeSizeInBits(LoadTy); + uint64_t LoadSize = DL.getTypeSizeInBits(LoadTy); if ((WriteSizeInBits & 7) | (LoadSize & 7)) return -1; @@ -1013,51 +1013,51 @@ static int AnalyzeLoadFromClobberingWrite(Type *LoadTy, Value *LoadPtr, /// memdep query of a load that ends up being a clobbering store. static int AnalyzeLoadFromClobberingStore(Type *LoadTy, Value *LoadPtr, StoreInst *DepSI, - const DataLayout &TD) { + const DataLayout &DL) { // Cannot handle reading from store of first-class aggregate yet. if (DepSI->getValueOperand()->getType()->isStructTy() || DepSI->getValueOperand()->getType()->isArrayTy()) return -1; Value *StorePtr = DepSI->getPointerOperand(); - uint64_t StoreSize =TD.getTypeSizeInBits(DepSI->getValueOperand()->getType()); + uint64_t StoreSize =DL.getTypeSizeInBits(DepSI->getValueOperand()->getType()); return AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr, - StorePtr, StoreSize, TD); + StorePtr, StoreSize, DL); } /// AnalyzeLoadFromClobberingLoad - This function is called when we have a /// memdep query of a load that ends up being clobbered by another load. See if /// the other load can feed into the second load. static int AnalyzeLoadFromClobberingLoad(Type *LoadTy, Value *LoadPtr, - LoadInst *DepLI, const DataLayout &TD){ + LoadInst *DepLI, const DataLayout &DL){ // Cannot handle reading from store of first-class aggregate yet. if (DepLI->getType()->isStructTy() || DepLI->getType()->isArrayTy()) return -1; Value *DepPtr = DepLI->getPointerOperand(); - uint64_t DepSize = TD.getTypeSizeInBits(DepLI->getType()); - int R = AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr, DepPtr, DepSize, TD); + uint64_t DepSize = DL.getTypeSizeInBits(DepLI->getType()); + int R = AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr, DepPtr, DepSize, DL); if (R != -1) return R; // If we have a load/load clobber an DepLI can be widened to cover this load, // then we should widen it! int64_t LoadOffs = 0; const Value *LoadBase = - GetPointerBaseWithConstantOffset(LoadPtr, LoadOffs, &TD); - unsigned LoadSize = TD.getTypeStoreSize(LoadTy); + GetPointerBaseWithConstantOffset(LoadPtr, LoadOffs, &DL); + unsigned LoadSize = DL.getTypeStoreSize(LoadTy); unsigned Size = MemoryDependenceAnalysis:: - getLoadLoadClobberFullWidthSize(LoadBase, LoadOffs, LoadSize, DepLI, TD); + getLoadLoadClobberFullWidthSize(LoadBase, LoadOffs, LoadSize, DepLI, DL); if (Size == 0) return -1; - return AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr, DepPtr, Size*8, TD); + return AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr, DepPtr, Size*8, DL); } static int AnalyzeLoadFromClobberingMemInst(Type *LoadTy, Value *LoadPtr, MemIntrinsic *MI, - const DataLayout &TD) { + const DataLayout &DL) { // If the mem operation is a non-constant size, we can't handle it. ConstantInt *SizeCst = dyn_cast<ConstantInt>(MI->getLength()); if (SizeCst == 0) return -1; @@ -1067,7 +1067,7 @@ static int AnalyzeLoadFromClobberingMemInst(Type *LoadTy, Value *LoadPtr, // of the memset.. if (MI->getIntrinsicID() == Intrinsic::memset) return AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr, MI->getDest(), - MemSizeInBits, TD); + MemSizeInBits, DL); // If we have a memcpy/memmove, the only case we can handle is if this is a // copy from constant memory. In that case, we can read directly from the @@ -1077,12 +1077,12 @@ static int AnalyzeLoadFromClobberingMemInst(Type *LoadTy, Value *LoadPtr, Constant *Src = dyn_cast<Constant>(MTI->getSource()); if (Src == 0) return -1; - GlobalVariable *GV = dyn_cast<GlobalVariable>(GetUnderlyingObject(Src, &TD)); + GlobalVariable *GV = dyn_cast<GlobalVariable>(GetUnderlyingObject(Src, &DL)); if (GV == 0 || !GV->isConstant()) return -1; // See if the access is within the bounds of the transfer. int Offset = AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr, - MI->getDest(), MemSizeInBits, TD); + MI->getDest(), MemSizeInBits, DL); if (Offset == -1) return Offset; @@ -1095,7 +1095,7 @@ static int AnalyzeLoadFromClobberingMemInst(Type *LoadTy, Value *LoadPtr, ConstantInt::get(Type::getInt64Ty(Src->getContext()), (unsigned)Offset); Src = ConstantExpr::getGetElementPtr(Src, OffsetCst); Src = ConstantExpr::getBitCast(Src, PointerType::get(LoadTy, AS)); - if (ConstantFoldLoadFromConstPtr(Src, &TD)) + if (ConstantFoldLoadFromConstPtr(Src, &DL)) return Offset; return -1; } @@ -1108,11 +1108,11 @@ static int AnalyzeLoadFromClobberingMemInst(Type *LoadTy, Value *LoadPtr, /// before we give up. static Value *GetStoreValueForLoad(Value *SrcVal, unsigned Offset, Type *LoadTy, - Instruction *InsertPt, const DataLayout &TD){ + Instruction *InsertPt, const DataLayout &DL){ LLVMContext &Ctx = SrcVal->getType()->getContext(); - uint64_t StoreSize = (TD.getTypeSizeInBits(SrcVal->getType()) + 7) / 8; - uint64_t LoadSize = (TD.getTypeSizeInBits(LoadTy) + 7) / 8; + uint64_t StoreSize = (DL.getTypeSizeInBits(SrcVal->getType()) + 7) / 8; + uint64_t LoadSize = (DL.getTypeSizeInBits(LoadTy) + 7) / 8; IRBuilder<> Builder(InsertPt->getParent(), InsertPt); @@ -1120,13 +1120,13 @@ static Value *GetStoreValueForLoad(Value *SrcVal, unsigned Offset, // to an integer type to start with. if (SrcVal->getType()->getScalarType()->isPointerTy()) SrcVal = Builder.CreatePtrToInt(SrcVal, - TD.getIntPtrType(SrcVal->getType())); + DL.getIntPtrType(SrcVal->getType())); if (!SrcVal->getType()->isIntegerTy()) SrcVal = Builder.CreateBitCast(SrcVal, IntegerType::get(Ctx, StoreSize*8)); // Shift the bits to the least significant depending on endianness. unsigned ShiftAmt; - if (TD.isLittleEndian()) + if (DL.isLittleEndian()) ShiftAmt = Offset*8; else ShiftAmt = (StoreSize-LoadSize-Offset)*8; @@ -1137,7 +1137,7 @@ static Value *GetStoreValueForLoad(Value *SrcVal, unsigned Offset, if (LoadSize != StoreSize) SrcVal = Builder.CreateTrunc(SrcVal, IntegerType::get(Ctx, LoadSize*8)); - return CoerceAvailableValueToLoadType(SrcVal, LoadTy, InsertPt, TD); + return CoerceAvailableValueToLoadType(SrcVal, LoadTy, InsertPt, DL); } /// GetLoadValueForLoad - This function is called when we have a @@ -1148,11 +1148,11 @@ static Value *GetStoreValueForLoad(Value *SrcVal, unsigned Offset, static Value *GetLoadValueForLoad(LoadInst *SrcVal, unsigned Offset, Type *LoadTy, Instruction *InsertPt, GVN &gvn) { - const DataLayout &TD = *gvn.getDataLayout(); + const DataLayout &DL = *gvn.getDataLayout(); // If Offset+LoadTy exceeds the size of SrcVal, then we must be wanting to // widen SrcVal out to a larger load. - unsigned SrcValSize = TD.getTypeStoreSize(SrcVal->getType()); - unsigned LoadSize = TD.getTypeStoreSize(LoadTy); + unsigned SrcValSize = DL.getTypeStoreSize(SrcVal->getType()); + unsigned LoadSize = DL.getTypeStoreSize(LoadTy); if (Offset+LoadSize > SrcValSize) { assert(SrcVal->isSimple() && "Cannot widen volatile/atomic load!"); assert(SrcVal->getType()->isIntegerTy() && "Can't widen non-integer load"); @@ -1184,7 +1184,7 @@ static Value *GetLoadValueForLoad(LoadInst *SrcVal, unsigned Offset, // Replace uses of the original load with the wider load. On a big endian // system, we need to shift down to get the relevant bits. Value *RV = NewLoad; - if (TD.isBigEndian()) + if (DL.isBigEndian()) RV = Builder.CreateLShr(RV, NewLoadSize*8-SrcVal->getType()->getPrimitiveSizeInBits()); RV = Builder.CreateTrunc(RV, SrcVal->getType()); @@ -1199,7 +1199,7 @@ static Value *GetLoadValueForLoad(LoadInst *SrcVal, unsigned Offset, SrcVal = NewLoad; } - return GetStoreValueForLoad(SrcVal, Offset, LoadTy, InsertPt, TD); + return GetStoreValueForLoad(SrcVal, Offset, LoadTy, InsertPt, DL); } @@ -1207,9 +1207,9 @@ static Value *GetLoadValueForLoad(LoadInst *SrcVal, unsigned Offset, /// memdep query of a load that ends up being a clobbering mem intrinsic. static Value *GetMemInstValueForLoad(MemIntrinsic *SrcInst, unsigned Offset, Type *LoadTy, Instruction *InsertPt, - const DataLayout &TD){ + const DataLayout &DL){ LLVMContext &Ctx = LoadTy->getContext(); - uint64_t LoadSize = TD.getTypeSizeInBits(LoadTy)/8; + uint64_t LoadSize = DL.getTypeSizeInBits(LoadTy)/8; IRBuilder<> Builder(InsertPt->getParent(), InsertPt); @@ -1240,7 +1240,7 @@ static Value *GetMemInstValueForLoad(MemIntrinsic *SrcInst, unsigned Offset, ++NumBytesSet; } - return CoerceAvailableValueToLoadType(Val, LoadTy, InsertPt, TD); + return CoerceAvailableValueToLoadType(Val, LoadTy, InsertPt, DL); } // Otherwise, this is a memcpy/memmove from a constant global. @@ -1256,7 +1256,7 @@ static Value *GetMemInstValueForLoad(MemIntrinsic *SrcInst, unsigned Offset, ConstantInt::get(Type::getInt64Ty(Src->getContext()), (unsigned)Offset); Src = ConstantExpr::getGetElementPtr(Src, OffsetCst); Src = ConstantExpr::getBitCast(Src, PointerType::get(LoadTy, AS)); - return ConstantFoldLoadFromConstPtr(Src, &TD); + return ConstantFoldLoadFromConstPtr(Src, &DL); } @@ -1322,10 +1322,10 @@ Value *AvailableValueInBlock::MaterializeAdjustedValue(Type *LoadTy, GVN &gvn) c if (isSimpleValue()) { Res = getSimpleValue(); if (Res->getType() != LoadTy) { - const DataLayout *TD = gvn.getDataLayout(); - assert(TD && "Need target data to handle type mismatch case"); + const DataLayout *DL = gvn.getDataLayout(); + assert(DL && "Need target data to handle type mismatch case"); Res = GetStoreValueForLoad(Res, Offset, LoadTy, BB->getTerminator(), - *TD); + *DL); DEBUG(dbgs() << "GVN COERCED NONLOCAL VAL:\nOffset: " << Offset << " " << *getSimpleValue() << '\n' @@ -1344,10 +1344,10 @@ Value *AvailableValueInBlock::MaterializeAdjustedValue(Type *LoadTy, GVN &gvn) c << *Res << '\n' << "\n\n\n"); } } else if (isMemIntrinValue()) { - const DataLayout *TD = gvn.getDataLayout(); - assert(TD && "Need target data to handle type mismatch case"); + const DataLayout *DL = gvn.getDataLayout(); + assert(DL && "Need target data to handle type mismatch case"); Res = GetMemInstValueForLoad(getMemIntrinValue(), Offset, - LoadTy, BB->getTerminator(), *TD); + LoadTy, BB->getTerminator(), *DL); DEBUG(dbgs() << "GVN COERCED NONLOCAL MEM INTRIN:\nOffset: " << Offset << " " << *getMemIntrinValue() << '\n' << *Res << '\n' << "\n\n\n"); @@ -1400,9 +1400,9 @@ void GVN::AnalyzeLoadAvailability(LoadInst *LI, LoadDepVect &Deps, // read by the load, we can extract the bits we need for the load from the // stored value. if (StoreInst *DepSI = dyn_cast<StoreInst>(DepInfo.getInst())) { - if (TD && Address) { + if (DL && Address) { int Offset = AnalyzeLoadFromClobberingStore(LI->getType(), Address, - DepSI, *TD); + DepSI, *DL); if (Offset != -1) { ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB, DepSI->getValueOperand(), @@ -1419,10 +1419,10 @@ void GVN::AnalyzeLoadAvailability(LoadInst *LI, LoadDepVect &Deps, if (LoadInst *DepLI = dyn_cast<LoadInst>(DepInfo.getInst())) { // If this is a clobber and L is the first instruction in its block, then // we have the first instruction in the entry block. - if (DepLI != LI && Address && TD) { + if (DepLI != LI && Address && DL) { int Offset = AnalyzeLoadFromClobberingLoad(LI->getType(), LI->getPointerOperand(), - DepLI, *TD); + DepLI, *DL); if (Offset != -1) { ValuesPerBlock.push_back(AvailableValueInBlock::getLoad(DepBB,DepLI, @@ -1435,9 +1435,9 @@ void GVN::AnalyzeLoadAvailability(LoadInst *LI, LoadDepVect &Deps, // If the clobbering value is a memset/memcpy/memmove, see if we can // forward a value on from it. if (MemIntrinsic *DepMI = dyn_cast<MemIntrinsic>(DepInfo.getInst())) { - if (TD && Address) { + if (DL && Address) { int Offset = AnalyzeLoadFromClobberingMemInst(LI->getType(), Address, - DepMI, *TD); + DepMI, *DL); if (Offset != -1) { ValuesPerBlock.push_back(AvailableValueInBlock::getMI(DepBB, DepMI, Offset)); @@ -1469,8 +1469,8 @@ void GVN::AnalyzeLoadAvailability(LoadInst *LI, LoadDepVect &Deps, if (S->getValueOperand()->getType() != LI->getType()) { // If the stored value is larger or equal to the loaded value, we can // reuse it. - if (TD == 0 || !CanCoerceMustAliasedValueToLoad(S->getValueOperand(), - LI->getType(), *TD)) { + if (DL == 0 || !CanCoerceMustAliasedValueToLoad(S->getValueOperand(), + LI->getType(), *DL)) { UnavailableBlocks.push_back(DepBB); continue; } @@ -1486,7 +1486,7 @@ void GVN::AnalyzeLoadAvailability(LoadInst *LI, LoadDepVect &Deps, if (LD->getType() != LI->getType()) { // If the stored value is larger or equal to the loaded value, we can // reuse it. - if (TD == 0 || !CanCoerceMustAliasedValueToLoad(LD, LI->getType(),*TD)){ + if (DL == 0 || !CanCoerceMustAliasedValueToLoad(LD, LI->getType(),*DL)){ UnavailableBlocks.push_back(DepBB); continue; } @@ -1609,7 +1609,7 @@ bool GVN::PerformLoadPRE(LoadInst *LI, AvailValInBlkVect &ValuesPerBlock, // If all preds have a single successor, then we know it is safe to insert // the load on the pred (?!?), so we can insert code to materialize the // pointer if it is not available. - PHITransAddr Address(LI->getPointerOperand(), TD); + PHITransAddr Address(LI->getPointerOperand(), DL); Value *LoadPtr = 0; LoadPtr = Address.PHITranslateWithInsertion(LoadBB, UnavailablePred, *DT, NewInsts); @@ -1821,7 +1821,7 @@ bool GVN::processLoad(LoadInst *L) { // If we have a clobber and target data is around, see if this is a clobber // that we can fix up through code synthesis. - if (Dep.isClobber() && TD) { + if (Dep.isClobber() && DL) { // Check to see if we have something like this: // store i32 123, i32* %P // %A = bitcast i32* %P to i8* @@ -1836,10 +1836,10 @@ bool GVN::processLoad(LoadInst *L) { if (StoreInst *DepSI = dyn_cast<StoreInst>(Dep.getInst())) { int Offset = AnalyzeLoadFromClobberingStore(L->getType(), L->getPointerOperand(), - DepSI, *TD); + DepSI, *DL); if (Offset != -1) AvailVal = GetStoreValueForLoad(DepSI->getValueOperand(), Offset, - L->getType(), L, *TD); + L->getType(), L, *DL); } // Check to see if we have something like this: @@ -1854,7 +1854,7 @@ bool GVN::processLoad(LoadInst *L) { int Offset = AnalyzeLoadFromClobberingLoad(L->getType(), L->getPointerOperand(), - DepLI, *TD); + DepLI, *DL); if (Offset != -1) AvailVal = GetLoadValueForLoad(DepLI, Offset, L->getType(), L, *this); } @@ -1864,9 +1864,9 @@ bool GVN::processLoad(LoadInst *L) { if (MemIntrinsic *DepMI = dyn_cast<MemIntrinsic>(Dep.getInst())) { int Offset = AnalyzeLoadFromClobberingMemInst(L->getType(), L->getPointerOperand(), - DepMI, *TD); + DepMI, *DL); if (Offset != -1) - AvailVal = GetMemInstValueForLoad(DepMI, Offset, L->getType(), L, *TD); + AvailVal = GetMemInstValueForLoad(DepMI, Offset, L->getType(), L, *DL); } if (AvailVal) { @@ -1917,9 +1917,9 @@ bool GVN::processLoad(LoadInst *L) { // actually have the same type. See if we know how to reuse the stored // value (depending on its type). if (StoredVal->getType() != L->getType()) { - if (TD) { + if (DL) { StoredVal = CoerceAvailableValueToLoadType(StoredVal, L->getType(), - L, *TD); + L, *DL); if (StoredVal == 0) return false; @@ -1946,9 +1946,9 @@ bool GVN::processLoad(LoadInst *L) { // the same type. See if we know how to reuse the previously loaded value // (depending on its type). if (DepLI->getType() != L->getType()) { - if (TD) { + if (DL) { AvailableVal = CoerceAvailableValueToLoadType(DepLI, L->getType(), - L, *TD); + L, *DL); if (AvailableVal == 0) return false; @@ -2200,7 +2200,7 @@ bool GVN::processInstruction(Instruction *I) { // to value numbering it. Value numbering often exposes redundancies, for // example if it determines that %y is equal to %x then the instruction // "%z = and i32 %x, %y" becomes "%z = and i32 %x, %x" which we now simplify. - if (Value *V = SimplifyInstruction(I, TD, TLI, DT)) { + if (Value *V = SimplifyInstruction(I, DL, TLI, DT)) { I->replaceAllUsesWith(V); if (MD && V->getType()->getScalarType()->isPointerTy()) MD->invalidateCachedPointerInfo(V); @@ -2318,7 +2318,7 @@ bool GVN::runOnFunction(Function& F) { if (!NoLoads) MD = &getAnalysis<MemoryDependenceAnalysis>(); DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); - TD = getAnalysisIfAvailable<DataLayout>(); + DL = getAnalysisIfAvailable<DataLayout>(); TLI = &getAnalysis<TargetLibraryInfo>(); VN.setAliasAnalysis(&getAnalysis<AliasAnalysis>()); VN.setMemDep(MD); diff --git a/lib/Transforms/Scalar/GlobalMerge.cpp b/lib/Transforms/Scalar/GlobalMerge.cpp index a15cc84cfd..161e35a648 100644 --- a/lib/Transforms/Scalar/GlobalMerge.cpp +++ b/lib/Transforms/Scalar/GlobalMerge.cpp @@ -126,15 +126,15 @@ namespace { } struct GlobalCmp { - const DataLayout *TD; + const DataLayout *DL; - GlobalCmp(const DataLayout *td) : TD(td) { } + GlobalCmp(const DataLayout *DL) : DL(DL) { } bool operator()(const GlobalVariable *GV1, const GlobalVariable *GV2) { Type *Ty1 = cast<PointerType>(GV1->getType())->getElementType(); Type *Ty2 = cast<PointerType>(GV2->getType())->getElementType(); - return (TD->getTypeAllocSize(Ty1) < TD->getTypeAllocSize(Ty2)); + return (DL->getTypeAllocSize(Ty1) < DL->getTypeAllocSize(Ty2)); } }; }; @@ -148,7 +148,7 @@ INITIALIZE_PASS(GlobalMerge, "global-merge", bool GlobalMerge::doMerge(SmallVectorImpl<GlobalVariable*> &Globals, Module &M, bool isConst, unsigned AddrSpace) const { const TargetLowering *TLI = TM->getTargetLowering(); - const DataLayout *TD = TLI->getDataLayout(); + const DataLayout *DL = TLI->getDataLayout(); // FIXME: Infer the maximum possible offset depending on the actual users // (these max offsets are different for the users inside Thumb or ARM @@ -156,7 +156,7 @@ bool GlobalMerge::doMerge(SmallVectorImpl<GlobalVariable*> &Globals, unsigned MaxOffset = TLI->getMaximalGlobalOffset(); // FIXME: Find better heuristics - std::stable_sort(Globals.begin(), Globals.end(), GlobalCmp(TD)); + std::stable_sort(Globals.begin(), Globals.end(), GlobalCmp(DL)); Type *Int32Ty = Type::getInt32Ty(M.getContext()); @@ -167,7 +167,7 @@ bool GlobalMerge::doMerge(SmallVectorImpl<GlobalVariable*> &Globals, std::vector<Constant*> Inits; for (j = i; j != e; ++j) { Type *Ty = Globals[j]->getType()->getElementType(); - MergedSize += TD->getTypeAllocSize(Ty); + MergedSize += DL->getTypeAllocSize(Ty); if (MergedSize > MaxOffset) { break; } @@ -242,7 +242,7 @@ bool GlobalMerge::doInitialization(Module &M) { DenseMap<unsigned, SmallVector<GlobalVariable*, 16> > Globals, ConstGlobals, BSSGlobals; const TargetLowering *TLI = TM->getTargetLowering(); - const DataLayout *TD = TLI->getDataLayout(); + const DataLayout *DL = TLI->getDataLayout(); unsigned MaxOffset = TLI->getMaximalGlobalOffset(); bool Changed = false; setMustKeepGlobalVariables(M); @@ -260,9 +260,9 @@ bool GlobalMerge::doInitialization(Module &M) { unsigned AddressSpace = PT->getAddressSpace(); // Ignore fancy-aligned globals for now. - unsigned Alignment = TD->getPreferredAlignment(I); + unsigned Alignment = DL->getPreferredAlignment(I); Type *Ty = I->getType()->getElementType(); - if (Alignment > TD->getABITypeAlignment(Ty)) + if (Alignment > DL->getABITypeAlignment(Ty)) continue; // Ignore all 'special' globals. @@ -274,7 +274,7 @@ bool GlobalMerge::doInitialization(Module &M) { if (isMustKeepGlobalVariable(I)) continue; - if (TD->getTypeAllocSize(Ty) < MaxOffset) { + if (DL->getTypeAllocSize(Ty) < MaxOffset) { if (TargetLoweringObjectFile::getKindForGlobal(I, TLI->getTargetMachine()) .isBSSLocal()) BSSGlobals[AddressSpace].push_back(I); diff --git a/lib/Transforms/Scalar/IndVarSimplify.cpp b/lib/Transforms/Scalar/IndVarSimplify.cpp index 723af0f02c..1543e5f1eb 100644 --- a/lib/Transforms/Scalar/IndVarSimplify.cpp +++ b/lib/Transforms/Scalar/IndVarSimplify.cpp @@ -71,7 +71,7 @@ namespace { LoopInfo *LI; ScalarEvolution *SE; DominatorTree *DT; - DataLayout *TD; + DataLayout *DL; TargetLibraryInfo *TLI; SmallVector<WeakVH, 16> DeadInsts; @@ -79,7 +79,7 @@ namespace { public: static char ID; // Pass identification, replacement for typeid - IndVarSimplify() : LoopPass(ID), LI(0), SE(0), DT(0), TD(0), + IndVarSimplify() : LoopPass(ID), LI(0), SE(0), DT(0), DL(0), Changed(false) { initializeIndVarSimplifyPass(*PassRegistry::getPassRegistry()); } @@ -659,14 +659,14 @@ namespace { /// extended by this sign or zero extend operation. This is used to determine /// the final width of the IV before actually widening it. static void visitIVCast(CastInst *Cast, WideIVInfo &WI, ScalarEvolution *SE, - const DataLayout *TD) { + const DataLayout *DL) { bool IsSigned = Cast->getOpcode() == Instruction::SExt; if (!IsSigned && Cast->getOpcode() != Instruction::ZExt) return; Type *Ty = Cast->getType(); uint64_t Width = SE->getTypeSizeInBits(Ty); - if (TD && !TD->isLegalInteger(Width)) + if (DL && !DL->isLegalInteger(Width)) return; if (!WI.WidestNativeType) { @@ -1122,15 +1122,15 @@ PHINode *WidenIV::CreateWideIV(SCEVExpander &Rewriter) { namespace { class IndVarSimplifyVisitor : public IVVisitor { ScalarEvolution *SE; - const DataLayout *TD; + const DataLayout *DL; PHINode *IVPhi; public: WideIVInfo WI; IndVarSimplifyVisitor(PHINode *IV, ScalarEvolution *SCEV, - const DataLayout *TData, const DominatorTree *DTree): - SE(SCEV), TD(TData), IVPhi(IV) { + const DataLayout *DL, const DominatorTree *DTree): + SE(SCEV), DL(DL), IVPhi(IV) { DT = DTree; WI.NarrowIV = IVPhi; if (ReduceLiveIVs) @@ -1138,7 +1138,7 @@ namespace { } // Implement the interface used by simplifyUsersOfIV. - virtual void visitCast(CastInst *Cast) { visitIVCast(Cast, WI, SE, TD); } + virtual void visitCast(CastInst *Cast) { visitIVCast(Cast, WI, SE, DL); } }; } @@ -1172,7 +1172,7 @@ void IndVarSimplify::SimplifyAndExtend(Loop *L, PHINode *CurrIV = LoopPhis.pop_back_val(); // Information about sign/zero extensions of CurrIV. - IndVarSimplifyVisitor Visitor(CurrIV, SE, TD, DT); + IndVarSimplifyVisitor Visitor(CurrIV, SE, DL, DT); Changed |= simplifyUsersOfIV(CurrIV, SE, &LPM, DeadInsts, &Visitor); @@ -1444,7 +1444,7 @@ static bool AlmostDeadIV(PHINode *Phi, BasicBlock *LatchBlock, Value *Cond) { /// could at least handle constant BECounts. static PHINode * FindLoopCounter(Loop *L, const SCEV *BECount, - ScalarEvolution *SE, DominatorTree *DT, const DataLayout *TD) { + ScalarEvolution *SE, DominatorTree *DT, const DataLayout *DL) { uint64_t BCWidth = SE->getTypeSizeInBits(BECount->getType()); Value *Cond = @@ -1473,7 +1473,7 @@ FindLoopCounter(Loop *L, const SCEV *BECount, // AR may be wider than BECount. With eq/ne tests overflow is immaterial. // AR may not be a narrower type, or we may never exit. uint64_t PhiWidth = SE->getTypeSizeInBits(AR->getType()); - if (PhiWidth < BCWidth || (TD && !TD->isLegalInteger(PhiWidth))) + if (PhiWidth < BCWidth || (DL && !DL->isLegalInteger(PhiWidth))) continue; const SCEV *Step = dyn_cast<SCEVConstant>(AR->getStepRecurrence(*SE)); @@ -1818,7 +1818,7 @@ bool IndVarSimplify::runOnLoop(Loop *L, LPPassManager &LPM) { LI = &getAnalysis<LoopInfo>(); SE = &getAnalysis<ScalarEvolution>(); DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); - TD = getAnalysisIfAvailable<DataLayout>(); + DL = getAnalysisIfAvailable<DataLayout>(); TLI = getAnalysisIfAvailable<TargetLibraryInfo>(); DeadInsts.clear(); @@ -1860,7 +1860,7 @@ bool IndVarSimplify::runOnLoop(Loop *L, LPPassManager &LPM) { // If we have a trip count expression, rewrite the loop's exit condition // using it. We can currently only handle loops with a single exit. if (canExpandBackedgeTakenCount(L, SE) && needsLFTR(L, DT)) { - PHINode *IndVar = FindLoopCounter(L, BackedgeTakenCount, SE, DT, TD); + PHINode *IndVar = FindLoopCounter(L, BackedgeTakenCount, SE, DT, DL); if (IndVar) { // Check preconditions for proper SCEVExpander operation. SCEV does not // express SCEVExpander's dependencies, such as LoopSimplify. Instead any diff --git a/lib/Transforms/Scalar/JumpThreading.cpp b/lib/Transforms/Scalar/JumpThreading.cpp index 087944a2be..e4b088d82b 100644 --- a/lib/Transforms/Scalar/JumpThreading.cpp +++ b/lib/Transforms/Scalar/JumpThreading.cpp @@ -76,7 +76,7 @@ namespace { /// revectored to the false side of the second if. /// class JumpThreading : public FunctionPass { - DataLayout *TD; + DataLayout *DL; TargetLibraryInfo *TLI; LazyValueInfo *LVI; #ifdef NDEBUG @@ -152,7 +152,7 @@ bool JumpThreading::runOnFunction(Function &F) { return false; DEBUG(dbgs() << "Jump threading on function '" << F.getName() << "'\n"); - TD = getAnalysisIfAvailable<DataLayout>(); + DL = getAnalysisIfAvailable<DataLayout>(); TLI = &getAnalysis<TargetLibraryInfo>(); LVI = &getAnalysis<LazyValueInfo>(); @@ -493,7 +493,7 @@ ComputeValueKnownInPredecessors(Value *V, BasicBlock *BB, PredValueInfo &Result, Value *LHS = PN->getIncomingValue(i); Value *RHS = Cmp->getOperand(1)->DoPHITranslation(BB, PredBB); - Value *Res = SimplifyCmpInst(Cmp->getPredicate(), LHS, RHS, TD); + Value *Res = SimplifyCmpInst(Cmp->getPredicate(), LHS, RHS, DL); if (Res == 0) { if (!isa<Constant>(RHS)) continue; @@ -695,7 +695,7 @@ bool JumpThreading::ProcessBlock(BasicBlock *BB) { // Run constant folding to see if we can reduce the condition to a simple // constant. if (Instruction *I = dyn_cast<Instruction>(Condition)) { - Value *SimpleVal = ConstantFoldInstruction(I, TD, TLI); + Value *SimpleVal = ConstantFoldInstruction(I, DL, TLI); if (SimpleVal) { I->replaceAllUsesWith(SimpleVal); I->eraseFromParent(); @@ -1478,7 +1478,7 @@ bool JumpThreading::ThreadEdge(BasicBlock *BB, // At this point, the IR is fully up to date and consistent. Do a quick scan // over the new instructions and zap any that are constants or dead. This // frequently happens because of phi translation. - SimplifyInstructionsInBlock(NewBB, TD, TLI); + SimplifyInstructionsInBlock(NewBB, DL, TLI); // Threaded an edge! ++NumThreads; @@ -1560,7 +1560,7 @@ bool JumpThreading::DuplicateCondBranchOnPHIIntoPred(BasicBlock *BB, // If this instruction can be simplified after the operands are updated, // just use the simplified value instead. This frequently happens due to // phi translation. - if (Value *IV = SimplifyInstruction(New, TD)) { + if (Value *IV = SimplifyInstruction(New, DL)) { delete New; ValueMapping[BI] = IV; } else { diff --git a/lib/Transforms/Scalar/LICM.cpp b/lib/Transforms/Scalar/LICM.cpp index 43247ea9b7..2ba6210b47 100644 --- a/lib/Transforms/Scalar/LICM.cpp +++ b/lib/Transforms/Scalar/LICM.cpp @@ -108,7 +108,7 @@ namespace { LoopInfo *LI; // Current LoopInfo DominatorTree *DT; // Dominator Tree for the current Loop. - DataLayout *TD; // DataLayout for constant folding. + DataLayout *DL; // DataLayout for constant folding. TargetLibraryInfo *TLI; // TargetLibraryInfo for constant folding. // State that is updated as we process loops. @@ -221,7 +221,7 @@ bool LICM::runOnLoop(Loop *L, LPPassManager &LPM) { AA = &getAnalysis<AliasAnalysis>(); DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); - TD = getAnalysisIfAvailable<DataLayout>(); + DL = getAnalysisIfAvailable<DataLayout>(); TLI = &getAnalysis<TargetLibraryInfo>(); assert(L->isLCSSAForm(*DT) && "Loop is not in LCSSA form."); @@ -394,7 +394,7 @@ void LICM::HoistRegion(DomTreeNode *N) { // Try constant folding this instruction. If all the operands are // constants, it is technically hoistable, but it would be better to just // fold it. - if (Constant *C = ConstantFoldInstruction(&I, TD, TLI)) { + if (Constant *C = ConstantFoldInstruction(&I, DL, TLI)) { DEBUG(dbgs() << "LICM folding inst: " << I << " --> " << *C << '\n'); CurAST->copyValue(&I, C); CurAST->deleteValue(&I); diff --git a/lib/Transforms/Scalar/LoopIdiomRecognize.cpp b/lib/Transforms/Scalar/LoopIdiomRecognize.cpp index b9c4046c0c..acff3734e1 100644 --- a/lib/Transforms/Scalar/LoopIdiomRecognize.cpp +++ b/lib/Transforms/Scalar/LoopIdiomRecognize.cpp @@ -132,7 +132,7 @@ namespace { class LoopIdiomRecognize : public LoopPass { Loop *CurLoop; - const DataLayout *TD; + const DataLayout *DL; DominatorTree *DT; ScalarEvolution *SE; TargetLibraryInfo *TLI; @@ -141,7 +141,7 @@ namespace { static char ID; explicit LoopIdiomRecognize() : LoopPass(ID) { initializeLoopIdiomRecognizePass(*PassRegistry::getPassRegistry()); - TD = 0; DT = 0; SE = 0; TLI = 0; TTI = 0; + DL = 0; DT = 0; SE = 0; TLI = 0; TTI = 0; } bool runOnLoop(Loop *L, LPPassManager &LPM); @@ -182,7 +182,7 @@ namespace { } const DataLayout *getDataLayout() { - return TD ? TD : TD=getAnalysisIfAvailable<DataLayout>(); + return DL ? DL : DL=getAnalysisIfAvailable<DataLayout>(); } DominatorTree *getDominatorTree() { @@ -782,7 +782,7 @@ bool LoopIdiomRecognize::processLoopStore(StoreInst *SI, const SCEV *BECount) { Value *StorePtr = SI->getPointerOperand(); // Reject stores that are so large that they overflow an unsigned. - uint64_t SizeInBits = TD->getTypeSizeInBits(StoredVal->getType()); + uint64_t SizeInBits = DL->getTypeSizeInBits(StoredVal->getType()); if ((SizeInBits & 7) || (SizeInBits >> 32) != 0) return false; @@ -910,7 +910,7 @@ static bool mayLoopAccessLocation(Value *Ptr,AliasAnalysis::ModRefResult Access, /// /// Note that we don't ever attempt to use memset_pattern8 or 4, because these /// just replicate their input array and then pass on to memset_pattern16. -static Constant *getMemSetPatternValue(Value *V, const DataLayout &TD) { +static Constant *getMemSetPatternValue(Value *V, const DataLayout &DL) { // If the value isn't a constant, we can't promote it to being in a constant // array. We could theoretically do a store to an alloca or something, but // that doesn't seem worthwhile. @@ -918,12 +918,12 @@ static Constant *getMemSetPatternValue(Value *V, const DataLayout &TD) { if (C == 0) return 0; // Only handle simple values that are a power of two bytes in size. - uint64_t Size = TD.getTypeSizeInBits(V->getType()); + uint64_t Size = DL.getTypeSizeInBits(V->getType()); if (Size == 0 || (Size & 7) || (Size & (Size-1))) return 0; // Don't care enough about darwin/ppc to implement this. - if (TD.isBigEndian()) + if (DL.isBigEndian()) return 0; // Convert to size in bytes. @@ -970,7 +970,7 @@ processLoopStridedStore(Value *DestPtr, unsigned StoreSize, PatternValue = 0; } else if (DestAS == 0 && TLI->has(LibFunc::memset_pattern16) && - (PatternValue = getMemSetPatternValue(StoredVal, *TD))) { + (PatternValue = getMemSetPatternValue(StoredVal, *DL))) { // Don't create memset_pattern16s with address spaces. // It looks like we can use PatternValue! SplatValue = 0; @@ -1011,7 +1011,7 @@ processLoopStridedStore(Value *DestPtr, unsigned StoreSize, // The # stored bytes is (BECount+1)*Size. Expand the trip count out to // pointer size if it isn't already. - Type *IntPtr = Builder.getIntPtrTy(TD, DestAS); + Type *IntPtr = Builder.getIntPtrTy(DL, DestAS); BECount = SE->getTruncateOrZeroExtend(BECount, IntPtr); const SCEV *NumBytesS = SE->getAddExpr(BECount, SE->getConstant(IntPtr, 1), @@ -1125,7 +1125,7 @@ processLoopStoreOfLoopLoad(StoreInst *SI, unsigned StoreSize, // The # stored bytes is (BECount+1)*Size. Expand the trip count out to // pointer size if it isn't already. - Type *IntPtrTy = Builder.getIntPtrTy(TD, SI->getPointerAddressSpace()); + Type *IntPtrTy = Builder.getIntPtrTy(DL, SI->getPointerAddressSpace()); BECount = SE->getTruncateOrZeroExtend(BECount, IntPtrTy); const SCEV *NumBytesS = SE->getAddExpr(BECount, SE->getConstant(IntPtrTy, 1), diff --git a/lib/Transforms/Scalar/MemCpyOptimizer.cpp b/lib/Transforms/Scalar/MemCpyOptimizer.cpp index 6619d54288..8c5620fe1d 100644 --- a/lib/Transforms/Scalar/MemCpyOptimizer.cpp +++ b/lib/Transforms/Scalar/MemCpyOptimizer.cpp @@ -195,9 +195,9 @@ class MemsetRanges { /// because each element is relatively large and expensive to copy. std::list<MemsetRange> Ranges; typedef std::list<MemsetRange>::iterator range_iterator; - const DataLayout &TD; + const DataLayout &DL; public: - MemsetRanges(const DataLayout &td) : TD(td) {} + MemsetRanges(const DataLayout &DL) : DL(DL) {} typedef std::list<MemsetRange>::const_iterator const_iterator; const_iterator begin() const { return Ranges.begin(); } @@ -212,7 +212,7 @@ public: } void addStore(int64_t OffsetFromFirst, StoreInst *SI) { - int64_t StoreSize = TD.getTypeStoreSize(SI->getOperand(0)->getType()); + int64_t StoreSize = DL.getTypeStoreSize(SI->getOperand(0)->getType()); addRange(OffsetFromFirst, StoreSize, SI->getPointerOperand(), SI->getAlignment(), SI); @@ -305,14 +305,14 @@ namespace { class MemCpyOpt : public FunctionPass { MemoryDependenceAnalysis *MD; TargetLibraryInfo *TLI; - const DataLayout *TD; + const DataLayout *DL; public: static char ID; // Pass identification, replacement for typeid MemCpyOpt() : FunctionPass(ID) { initializeMemCpyOptPass(*PassRegistry::getPassRegistry()); MD = 0; TLI = 0; - TD = 0; + DL = 0; } bool runOnFunction(Function &F); @@ -366,13 +366,13 @@ INITIALIZE_PASS_END(MemCpyOpt, "memcpyopt", "MemCpy Optimization", /// attempts to merge them together into a memcpy/memset. Instruction *MemCpyOpt::tryMergingIntoMemset(Instruction *StartInst, Value *StartPtr, Value *ByteVal) { - if (TD == 0) return 0; + if (DL == 0) return 0; // Okay, so we now have a single store that can be splatable. Scan to find // all subsequent stores of the same value to offset from the same pointer. // Join these together into ranges, so we can decide whether contiguous blocks // are stored. - MemsetRanges Ranges(*TD); + MemsetRanges Ranges(*DL); BasicBlock::iterator BI = StartInst; for (++BI; !isa<TerminatorInst>(BI); ++BI) { @@ -396,7 +396,7 @@ Instruction *MemCpyOpt::tryMergingIntoMemset(Instruction *StartInst, // Check to see if this store is to a constant offset from the start ptr. int64_t Offset; if (!IsPointerOffset(StartPtr, NextStore->getPointerOperand(), - Offset, *TD)) + Offset, *DL)) break; Ranges.addStore(Offset, NextStore); @@ -409,7 +409,7 @@ Instruction *MemCpyOpt::tryMergingIntoMemset(Instruction *StartInst, // Check to see if this store is to a constant offset from the start ptr. int64_t Offset; - if (!IsPointerOffset(StartPtr, MSI->getDest(), Offset, *TD)) + if (!IsPointerOffset(StartPtr, MSI->getDest(), Offset, *DL)) break; Ranges.addMemSet(Offset, MSI); @@ -441,7 +441,7 @@ Instruction *MemCpyOpt::tryMergingIntoMemset(Instruction *StartInst, if (Range.TheStores.size() == 1) continue; // If it is profitable to lower this range to memset, do so now. - if (!Range.isProfitableToUseMemset(*TD)) + if (!Range.isProfitableToUseMemset(*DL)) continue; // Otherwise, we do want to transform this! Create a new memset. @@ -453,7 +453,7 @@ Instruction *MemCpyOpt::tryMergingIntoMemset(Instruction *StartInst, if (Alignment == 0) { Type *EltType = cast<PointerType>(StartPtr->getType())->getElementType(); - Alignment = TD->getABITypeAlignment(EltType); + Alignment = DL->getABITypeAlignment(EltType); } AMemSet = @@ -484,7 +484,7 @@ Instruction *MemCpyOpt::tryMergingIntoMemset(Instruction *StartInst, bool MemCpyOpt::processStore(StoreInst *SI, BasicBlock::iterator &BBI) { if (!SI->isSimple()) return false; - if (TD == 0) return false; + if (DL == 0) return false; // Detect cases where we're performing call slot forwarding, but // happen to be using a load-store pair to implement it, rather than @@ -514,15 +514,15 @@ bool MemCpyOpt::processStore(StoreInst *SI, BasicBlock::iterator &BBI) { if (C) { unsigned storeAlign = SI->getAlignment(); if (!storeAlign) - storeAlign = TD->getABITypeAlignment(SI->getOperand(0)->getType()); + storeAlign = DL->getABITypeAlignment(SI->getOperand(0)->getType()); unsigned loadAlign = LI->getAlignment(); if (!loadAlign) - loadAlign = TD->getABITypeAlignment(LI->getType()); + loadAlign = DL->getABITypeAlignment(LI->getType()); bool changed = performCallSlotOptzn(LI, SI->getPointerOperand()->stripPointerCasts(), LI->getPointerOperand()->stripPointerCasts(), - TD->getTypeStoreSize(SI->getOperand(0)->getType()), + DL->getTypeStoreSize(SI->getOperand(0)->getType()), std::min(storeAlign, loadAlign), C); if (changed) { MD->removeInstruction(SI); @@ -596,13 +596,13 @@ bool MemCpyOpt::performCallSlotOptzn(Instruction *cpy, return false; // Check that all of src is copied to dest. - if (TD == 0) return false; + if (DL == 0) return false; ConstantInt *srcArraySize = dyn_cast<ConstantInt>(srcAlloca->getArraySize()); if (!srcArraySize) return false; - uint64_t srcSize = TD->getTypeAllocSize(srcAlloca->getAllocatedType()) * + uint64_t srcSize = DL->getTypeAllocSize(srcAlloca->getAllocatedType()) * srcArraySize->getZExtValue(); if (cpyLen < srcSize) @@ -617,7 +617,7 @@ bool MemCpyOpt::performCallSlotOptzn(Instruction *cpy, if (!destArraySize) return false; - uint64_t destSize = TD->getTypeAllocSize(A->getAllocatedType()) * + uint64_t destSize = DL->getTypeAllocSize(A->getAllocatedType()) * destArraySize->getZExtValue(); if (destSize < srcSize) @@ -636,7 +636,7 @@ bool MemCpyOpt::performCallSlotOptzn(Instruction *cpy, return false; } - uint64_t destSize = TD->getTypeAllocSize(StructTy); + uint64_t destSize = DL->getTypeAllocSize(StructTy); if (destSize < srcSize) return false; } else { @@ -646,7 +646,7 @@ bool MemCpyOpt::performCallSlotOptzn(Instruction *cpy, // Check that dest points to memory that is at least as aligned as src. unsigned srcAlign = srcAlloca->getAlignment(); if (!srcAlign) - srcAlign = TD->getABITypeAlignment(srcAlloca->getAllocatedType()); + srcAlign = DL->getABITypeAlignment(srcAlloca->getAllocatedType()); bool isDestSufficientlyAligned = srcAlign <= cpyAlign; // If dest is not aligned enough and we can't increase its alignment then // bail out. @@ -912,12 +912,12 @@ bool MemCpyOpt::processMemMove(MemMoveInst *M) { /// processByValArgument - This is called on every byval argument in call sites. bool MemCpyOpt::processByValArgument(CallSite CS, unsigned ArgNo) { - if (TD == 0) return false; + if (DL == 0) return false; // Find out what feeds this byval argument. Value *ByValArg = CS.getArgument(ArgNo); Type *ByValTy = cast<PointerType>(ByValArg->getType())->getElementType(); - uint64_t ByValSize = TD->getTypeAllocSize(ByValTy); + uint64_t ByValSize = DL->getTypeAllocSize(ByValTy); MemDepResult DepInfo = MD->getPointerDependencyFrom(AliasAnalysis::Location(ByValArg, ByValSize), true, CS.getInstruction(), @@ -946,7 +946,7 @@ bool MemCpyOpt::processByValArgument(CallSite CS, unsigned ArgNo) { // If it is greater than the memcpy, then we check to see if we can force the // source of the memcpy to the alignment we need. If we fail, we bail out. if (MDep->getAlignment() < ByValAlign && - getOrEnforceKnownAlignment(MDep->getSource(),ByValAlign, TD) < ByValAlign) + getOrEnforceKnownAlignment(MDep->getSource(),ByValAlign, DL) < ByValAlign) return false; // Verify that the copied-from memory doesn't change in between the memcpy and @@ -1025,7 +1025,7 @@ bool MemCpyOpt::runOnFunction(Function &F) { bool MadeChange = false; MD = &getAnalysis<MemoryDependenceAnalysis>(); - TD = getAnalysisIfAvailable<DataLayout>(); + DL = getAnalysisIfAvailable<DataLayout>(); TLI = &getAnalysis<TargetLibraryInfo>(); // If we don't have at least memset and memcpy, there is little point of doing diff --git a/lib/Transforms/Scalar/SCCP.cpp b/lib/Transforms/Scalar/SCCP.cpp index 02c64d9ea8..335ef3147d 100644 --- a/lib/Transforms/Scalar/SCCP.cpp +++ b/lib/Transforms/Scalar/SCCP.cpp @@ -153,7 +153,7 @@ namespace { /// Constant Propagation. /// class SCCPSolver : public InstVisitor<SCCPSolver> { - const DataLayout *TD; + const DataLayout *DL; const TargetLibraryInfo *TLI; SmallPtrSet<BasicBlock*, 8> BBExecutable; // The BBs that are executable. DenseMap<Value*, LatticeVal> ValueState; // The state each value is in. @@ -205,8 +205,8 @@ class SCCPSolver : public InstVisitor<SCCPSolver> { typedef std::pair<BasicBlock*, BasicBlock*> Edge; DenseSet<Edge> KnownFeasibleEdges; public: - SCCPSolver(const DataLayout *td, const TargetLibraryInfo *tli) - : TD(td), TLI(tli) {} + SCCPSolver(const DataLayout *DL, const TargetLibraryInfo *tli) + : DL(DL), TLI(tli) {} /// MarkBlockExecutable - This method can be used by clients to mark all of /// the blocks that are known to be intrinsically live in the processed unit. @@ -1067,7 +1067,7 @@ void SCCPSolver::visitLoadInst(LoadInst &I) { } // Transform load from a constant into a constant if possible. - if (Constant *C = ConstantFoldLoadFromConstPtr(Ptr, TD)) + if (Constant *C = ConstantFoldLoadFromConstPtr(Ptr, DL)) return markConstant(IV, &I, C); // Otherwise we cannot say for certain what value this load will produce. @@ -1557,9 +1557,9 @@ bool SCCP::runOnFunction(Function &F) { return false; DEBUG(dbgs() << "SCCP on function '" << F.getName() << "'\n"); - const DataLayout *TD = getAnalysisIfAvailable<DataLayout>(); + const DataLayout *DL = getAnalysisIfAvailable<DataLayout>(); const TargetLibraryInfo *TLI = &getAnalysis<TargetLibraryInfo>(); - SCCPSolver Solver(TD, TLI); + SCCPSolver Solver(DL, TLI); // Mark the first block of the function as being executable. Solver.MarkBlockExecutable(F.begin()); @@ -1686,9 +1686,9 @@ static bool AddressIsTaken(const GlobalValue *GV) { } bool IPSCCP::runOnModule(Module &M) { - const DataLayout *TD = getAnalysisIfAvailable<DataLayout>(); + const DataLayout *DL = getAnalysisIfAvailable<DataLayout>(); const TargetLibraryInfo *TLI = &getAnalysis<TargetLibraryInfo>(); - SCCPSolver Solver(TD, TLI); + SCCPSolver Solver(DL, TLI); // AddressTakenFunctions - This set keeps track of the address-taken functions // that are in the input. As IPSCCP runs through and simplifies code, diff --git a/lib/Transforms/Scalar/ScalarReplAggregates.cpp b/lib/Transforms/Scalar/ScalarReplAggregates.cpp index a8186ae342..ac7e7773aa 100644 --- a/lib/Transforms/Scalar/ScalarReplAggregates.cpp +++ b/lib/Transforms/Scalar/ScalarReplAggregates.cpp @@ -87,7 +87,7 @@ namespace { private: bool HasDomTree; - DataLayout *TD; + DataLayout *DL; /// DeadInsts - Keep track of instructions we have made dead, so that /// we can remove them after we are done working. @@ -258,7 +258,7 @@ namespace { class ConvertToScalarInfo { /// AllocaSize - The size of the alloca being considered in bytes. unsigned AllocaSize; - const DataLayout &TD; + const DataLayout &DL; unsigned ScalarLoadThreshold; /// IsNotTrivial - This is set to true if there is some access to the object @@ -301,9 +301,9 @@ class ConvertToScalarInfo { bool HadDynamicAccess; public: - explicit ConvertToScalarInfo(unsigned Size, const DataLayout &td, + explicit ConvertToScalarInfo(unsigned Size, const DataLayout &DL, unsigned SLT) - : AllocaSize(Size), TD(td), ScalarLoadThreshold(SLT), IsNotTrivial(false), + : AllocaSize(Size), DL(DL), ScalarLoadThreshold(SLT), IsNotTrivial(false), ScalarKind(Unknown), VectorTy(0), HadNonMemTransferAccess(false), HadDynamicAccess(false) { } @@ -364,7 +364,7 @@ AllocaInst *ConvertToScalarInfo::TryConvert(AllocaInst *AI) { return 0; if ((ScalarKind == ImplicitVector || ScalarKind == Integer) && - !HadNonMemTransferAccess && !TD.fitsInLegalInteger(BitWidth)) + !HadNonMemTransferAccess && !DL.fitsInLegalInteger(BitWidth)) return 0; // Dynamic accesses on integers aren't yet supported. They need us to shift // by a dynamic amount which could be difficult to work out as we might not @@ -520,7 +520,7 @@ bool ConvertToScalarInfo::CanConvertToScalar(Value *V, uint64_t Offset, HadDynamicAccess = true; } else GEPNonConstantIdx = NonConstantIdx; - uint64_t GEPOffset = TD.getIndexedOffset(PtrTy, + uint64_t GEPOffset = DL.getIndexedOffset(PtrTy, Indices); // See if all uses can be converted. if (!CanConvertToScalar(GEP, Offset+GEPOffset, GEPNonConstantIdx)) @@ -615,7 +615,7 @@ void ConvertToScalarInfo::ConvertUsesToScalar(Value *Ptr, AllocaInst *NewAI, GEPNonConstantIdx = Indices.pop_back_val(); } else GEPNonConstantIdx = NonConstantIdx; - uint64_t GEPOffset = TD.getIndexedOffset(GEP->getPointerOperandType(), + uint64_t GEPOffset = DL.getIndexedOffset(GEP->getPointerOperandType(), Indices); ConvertUsesToScalar(GEP, NewAI, Offset+GEPOffset*8, GEPNonConstantIdx); GEP->eraseFromParent(); @@ -692,9 +692,9 @@ void ConvertToScalarInfo::ConvertUsesToScalar(Value *Ptr, AllocaInst *NewAI, // If the source and destination are both to the same alloca, then this is // a noop copy-to-self, just delete it. Otherwise, emit a load and store // as appropriate. - AllocaInst *OrigAI = cast<AllocaInst>(GetUnderlyingObject(Ptr, &TD, 0)); + AllocaInst *OrigAI = cast<AllocaInst>(GetUnderlyingObject(Ptr, &DL, 0)); - if (GetUnderlyingObject(MTI->getSource(), &TD, 0) != OrigAI) { + if (GetUnderlyingObject(MTI->getSource(), &DL, 0) != OrigAI) { // Dest must be OrigAI, change this to be a load from the original // pointer (bitcasted), then a store to our new alloca. assert(MTI->getRawDest() == Ptr && "Neither use is of pointer?"); @@ -710,7 +710,7 @@ void ConvertToScalarInfo::ConvertUsesToScalar(Value *Ptr, AllocaInst *NewAI, LoadInst *SrcVal = Builder.CreateLoad(SrcPtr, "srcval"); SrcVal->setAlignment(MTI->getAlignment()); Builder.CreateStore(SrcVal, NewAI); - } else if (GetUnderlyingObject(MTI->getDest(), &TD, 0) != OrigAI) { + } else if (GetUnderlyingObject(MTI->getDest(), &DL, 0) != OrigAI) { // Src must be OrigAI, change this to be a load from NewAI then a store // through the original dest pointer (bitcasted). assert(MTI->getRawSource() == Ptr && "Neither use is of pointer?"); @@ -770,15 +770,15 @@ ConvertScalar_ExtractValue(Value *FromVal, Type *ToType, // If the result alloca is a vector type, this is either an element // access or a bitcast to another vector type of the same size. if (VectorType *VTy = dyn_cast<VectorType>(FromType)) { - unsigned FromTypeSize = TD.getTypeAllocSize(FromType); - unsigned ToTypeSize = TD.getTypeAllocSize(ToType); + unsigned FromTypeSize = DL.getTypeAllocSize(FromType); + unsigned ToTypeSize = DL.getTypeAllocSize(ToType); if (FromTypeSize == ToTypeSize) return Builder.CreateBitCast(FromVal, ToType); // Otherwise it must be an element access. unsigned Elt = 0; if (Offset) { - unsigned EltSize = TD.getTypeAllocSizeInBits(VTy->getElementType()); + unsigned EltSize = DL.getTypeAllocSizeInBits(VTy->getElementType()); Elt = Offset/EltSize; assert(EltSize*Elt == Offset && "Invalid modulus in validity checking"); } @@ -804,7 +804,7 @@ ConvertScalar_ExtractValue(Value *FromVal, Type *ToType, if (StructType *ST = dyn_cast<StructType>(ToType)) { assert(!NonConstantIdx && "Dynamic indexing into struct types not supported"); - const StructLayout &Layout = *TD.getStructLayout(ST); + const StructLayout &Layout = *DL.getStructLayout(ST); Value *Res = UndefValue::get(ST); for (unsigned i = 0, e = ST->getNumElements(); i != e; ++i) { Value *Elt = ConvertScalar_ExtractValue(FromVal, ST->getElementType(i), @@ -818,7 +818,7 @@ ConvertScalar_ExtractValue(Value *FromVal, Type *ToType, if (ArrayType *AT = dyn_cast<ArrayType>(ToType)) { assert(!NonConstantIdx && "Dynamic indexing into array types not supported"); - uint64_t EltSize = TD.getTypeAllocSizeInBits(AT->getElementType()); + uint64_t EltSize = DL.getTypeAllocSizeInBits(AT->getElementType()); Value *Res = UndefValue::get(AT); for (unsigned i = 0, e = AT->getNumElements(); i != e; ++i) { Value *Elt = ConvertScalar_ExtractValue(FromVal, AT->getElementType(), @@ -834,12 +834,12 @@ ConvertScalar_ExtractValue(Value *FromVal, Type *ToType, // If this is a big-endian system and the load is narrower than the // full alloca type, we need to do a shift to get the right bits. int ShAmt = 0; - if (TD.isBigEndian()) { + if (DL.isBigEndian()) { // On big-endian machines, the lowest bit is stored at the bit offset // from the pointer given by getTypeStoreSizeInBits. This matters for // integers with a bitwidth that is not a multiple of 8. - ShAmt = TD.getTypeStoreSizeInBits(NTy) - - TD.getTypeStoreSizeInBits(ToType) - Offset; + ShAmt = DL.getTypeStoreSizeInBits(NTy) - + DL.getTypeStoreSizeInBits(ToType) - Offset; } else { ShAmt = Offset; } @@ -855,7 +855,7 @@ ConvertScalar_ExtractValue(Value *FromVal, Type *ToType, ConstantInt::get(FromVal->getType(), -ShAmt)); // Finally, unconditionally truncate the integer to the right width. - unsigned LIBitWidth = TD.getTypeSizeInBits(ToType); + unsigned LIBitWidth = DL.getTypeSizeInBits(ToType); if (LIBitWidth < NTy->getBitWidth()) FromVal = Builder.CreateTrunc(FromVal, IntegerType::get(FromVal->getContext(), @@ -902,8 +902,8 @@ ConvertScalar_InsertValue(Value *SV, Value *Old, LLVMContext &Context = Old->getContext(); if (VectorType *VTy = dyn_cast<VectorType>(AllocaType)) { - uint64_t VecSize = TD.getTypeAllocSizeInBits(VTy); - uint64_t ValSize = TD.getTypeAllocSizeInBits(SV->getType()); + uint64_t VecSize = DL.getTypeAllocSizeInBits(VTy); + uint64_t ValSize = DL.getTypeAllocSizeInBits(SV->getType()); // Changing the whole vector with memset or with an access of a different // vector type? @@ -914,7 +914,7 @@ ConvertScalar_InsertValue(Value *SV, Value *Old, Type *EltTy = VTy->getElementType(); if (SV->getType() != EltTy) SV = Builder.CreateBitCast(SV, EltTy); - uint64_t EltSize = TD.getTypeAllocSizeInBits(EltTy); + uint64_t EltSize = DL.getTypeAllocSizeInBits(EltTy); unsigned Elt = Offset/EltSize; Value *Idx; if (NonConstantIdx) { @@ -933,7 +933,7 @@ ConvertScalar_InsertValue(Value *SV, Value *Old, if (StructType *ST = dyn_cast<StructType>(SV->getType())) { assert(!NonConstantIdx && "Dynamic indexing into struct types not supported"); - const StructLayout &Layout = *TD.getStructLayout(ST); + const StructLayout &Layout = *DL.getStructLayout(ST); for (unsigned i = 0, e = ST->getNumElements(); i != e; ++i) { Value *Elt = Builder.CreateExtractValue(SV, i); Old = ConvertScalar_InsertValue(Elt, Old, @@ -946,7 +946,7 @@ ConvertScalar_InsertValue(Value *SV, Value *Old, if (ArrayType *AT = dyn_cast<ArrayType>(SV->getType())) { assert(!NonConstantIdx && "Dynamic indexing into array types not supported"); - uint64_t EltSize = TD.getTypeAllocSizeInBits(AT->getElementType()); + uint64_t EltSize = DL.getTypeAllocSizeInBits(AT->getElementType()); for (unsigned i = 0, e = AT->getNumElements(); i != e; ++i) { Value *Elt = Builder.CreateExtractValue(SV, i); Old = ConvertScalar_InsertValue(Elt, Old, Offset+i*EltSize, 0, Builder); @@ -956,14 +956,14 @@ ConvertScalar_InsertValue(Value *SV, Value *Old, // If SV is a float, convert it to the appropriate integer type. // If it is a pointer, do the same. - unsigned SrcWidth = TD.getTypeSizeInBits(SV->getType()); - unsigned DestWidth = TD.getTypeSizeInBits(AllocaType); - unsigned SrcStoreWidth = TD.getTypeStoreSizeInBits(SV->getType()); - unsigned DestStoreWidth = TD.getTypeStoreSizeInBits(AllocaType); + unsigned SrcWidth = DL.getTypeSizeInBits(SV->getType()); + unsigned DestWidth = DL.getTypeSizeInBits(AllocaType); + unsigned SrcStoreWidth = DL.getTypeStoreSizeInBits(SV->getType()); + unsigned DestStoreWidth = DL.getTypeStoreSizeInBits(AllocaType); if (SV->getType()->isFloatingPointTy() || SV->getType()->isVectorTy()) SV = Builder.CreateBitCast(SV, IntegerType::get(SV->getContext(),SrcWidth)); else if (SV->getType()->isPointerTy()) - SV = Builder.CreatePtrToInt(SV, TD.getIntPtrType(SV->getType())); + SV = Builder.CreatePtrToInt(SV, DL.getIntPtrType(SV->getType())); // Zero extend or truncate the value if needed. if (SV->getType() != AllocaType) { @@ -982,7 +982,7 @@ ConvertScalar_InsertValue(Value *SV, Value *Old, // If this is a big-endian system and the store is narrower than the // full alloca type, we need to do a shift to get the right bits. int ShAmt = 0; - if (TD.isBigEndian()) { + if (DL.isBigEndian()) { // On big-endian machines, the lowest bit is stored at the bit offset // from the pointer given by getTypeStoreSizeInBits. This matters for // integers with a bitwidth that is not a multiple of 8. @@ -1023,7 +1023,7 @@ bool SROA::runOnFunction(Function &F) { if (skipOptnoneFunction(F)) return false; - TD = getAnalysisIfAvailable<DataLayout>(); + DL = getAnalysisIfAvailable<DataLayout>(); bool Changed = performPromotion(F); @@ -1031,7 +1031,7 @@ bool SROA::runOnFunction(Function &F) { // theoretically needs to. It should be refactored in order to support // target-independent IR. Until this is done, just skip the actual // scalar-replacement portion of this pass. - if (!TD) return Changed; + if (!DL) return Changed; while (1) { bool LocalChange = performScalarRepl(F); @@ -1137,7 +1137,7 @@ public: /// /// We can do this to a select if its only uses are loads and if the operand to /// the select can be loaded unconditionally. -static bool isSafeSelectToSpeculate(SelectInst *SI, const DataLayout *TD) { +static bool isSafeSelectToSpeculate(SelectInst *SI, const DataLayout *DL) { bool TDerefable = SI->getTrueValue()->isDereferenceablePointer(); bool FDerefable = SI->getFalseValue()->isDereferenceablePointer(); @@ -1149,10 +1149,10 @@ static bool isSafeSelectToSpeculate(SelectInst *SI, const DataLayout *TD) { // Both operands to the select need to be dereferencable, either absolutely // (e.g. allocas) or at this point because we can see other accesses to it. if (!TDerefable && !isSafeToLoadUnconditionally(SI->getTrueValue(), LI, - LI->getAlignment(), TD)) + LI->getAlignment(), DL)) return false; if (!FDerefable && !isSafeToLoadUnconditionally(SI->getFalseValue(), LI, - LI->getAlignment(), TD)) + LI->getAlignment(), DL)) return false; } @@ -1175,7 +1175,7 @@ static bool isSafeSelectToSpeculate(SelectInst *SI, const DataLayout *TD) { /// /// We can do this to a select if its only uses are loads and if the operand to /// the select can be loaded unconditionally. -static bool isSafePHIToSpeculate(PHINode *PN, const DataLayout *TD) { +static bool isSafePHIToSpeculate(PHINode *PN, const DataLayout *DL) { // For now, we can only do this promotion if the load is in the same block as // the PHI, and if there are no stores between the phi and load. // TODO: Allow recursive phi users. @@ -1225,7 +1225,7 @@ static bool isSafePHIToSpeculate(PHINode *PN, const DataLayout *TD) { // If this pointer is always safe to load, or if we can prove that there is // already a load in the block, then we can move the load to the pred block. if (InVal->isDereferenceablePointer() || - isSafeToLoadUnconditionally(InVal, Pred->getTerminator(), MaxAlign, TD)) + isSafeToLoadUnconditionally(InVal, Pred->getTerminator(), MaxAlign, DL)) continue; return false; @@ -1239,7 +1239,7 @@ static bool isSafePHIToSpeculate(PHINode *PN, const DataLayout *TD) { /// direct (non-volatile) loads and stores to it. If the alloca is close but /// not quite there, this will transform the code to allow promotion. As such, /// it is a non-pure predicate. -static bool tryToMakeAllocaBePromotable(AllocaInst *AI, const DataLayout *TD) { +static bool tryToMakeAllocaBePromotable(AllocaInst *AI, const DataLayout *DL) { SetVector<Instruction*, SmallVector<Instruction*, 4>, SmallPtrSet<Instruction*, 4> > InstsToRewrite; @@ -1268,12 +1268,12 @@ static bool tryToMakeAllocaBePromotable(AllocaInst *AI, const DataLayout *TD) { // This is very rare and we just scrambled the use list of AI, start // over completely. - return tryToMakeAllocaBePromotable(AI, TD); + return tryToMakeAllocaBePromotable(AI, DL); } // If it is safe to turn "load (select c, AI, ptr)" into a select of two // loads, then we can transform this by rewriting the select. - if (!isSafeSelectToSpeculate(SI, TD)) + if (!isSafeSelectToSpeculate(SI, DL)) return false; InstsToRewrite.insert(SI); @@ -1288,7 +1288,7 @@ static bool tryToMakeAllocaBePromotable(AllocaInst *AI, const DataLayout *TD) { // If it is safe to turn "load (phi [AI, ptr, ...])" into a PHI of loads // in the pred blocks, then we can transform this by rewriting the PHI. - if (!isSafePHIToSpeculate(PN, TD)) + if (!isSafePHIToSpeculate(PN, DL)) return false; InstsToRewrite.insert(PN); @@ -1423,7 +1423,7 @@ bool SROA::performPromotion(Function &F) { // the entry node for (BasicBlock::iterator I = BB.begin(), E = --BB.end(); I != E; ++I) if (AllocaInst *AI = dyn_cast<AllocaInst>(I)) // Is it an alloca? - if (tryToMakeAllocaBePromotable(AI, TD)) + if (tryToMakeAllocaBePromotable(AI, DL)) Allocas.push_back(AI); if (Allocas.empty()) break; @@ -1499,7 +1499,7 @@ bool SROA::performScalarRepl(Function &F) { // transform the allocation instruction if it is an array allocation // (allocations OF arrays are ok though), and an allocation of a scalar // value cannot be decomposed at all. - uint64_t AllocaSize = TD->getTypeAllocSize(AI->getAllocatedType()); + uint64_t AllocaSize = DL->getTypeAllocSize(AI->getAllocatedType()); // Do not promote [0 x %struct]. if (AllocaSize == 0) continue; @@ -1523,7 +1523,7 @@ bool SROA::performScalarRepl(Function &F) { // that we can't just check based on the type: the alloca may be of an i32 // but that has pointer arithmetic to set byte 3 of it or something. if (AllocaInst *NewAI = ConvertToScalarInfo( - (unsigned)AllocaSize, *TD, ScalarLoadThreshold).TryConvert(AI)) { + (unsigned)AllocaSize, *DL, ScalarLoadThreshold).TryConvert(AI)) { NewAI->takeName(AI); AI->eraseFromParent(); ++NumConverted; @@ -1625,7 +1625,7 @@ void SROA::isSafeForScalarRepl(Instruction *I, uint64_t Offset, if (!LI->isSimple()) return MarkUnsafe(Info, User); Type *LIType = LI->getType(); - isSafeMemAccess(Offset, TD->getTypeAllocSize(LIType), + isSafeMemAccess(Offset, DL->getTypeAllocSize(LIType), LIType, false, Info, LI, true /*AllowWholeAccess*/); Info.hasALoadOrStore = true; @@ -1635,7 +1635,7 @@ void SROA::isSafeForScalarRepl(Instruction *I, uint64_t Offset, return MarkUnsafe(Info, User); Type *SIType = SI->getOperand(0)->getType(); - isSafeMemAccess(Offset, TD->getTypeAllocSize(SIType), + isSafeMemAccess(Offset, DL->getTypeAllocSize(SIType), SIType, true, Info, SI, true /*AllowWholeAccess*/); Info.hasALoadOrStore = true; } else if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(User)) { @@ -1684,7 +1684,7 @@ void SROA::isSafePHISelectUseForScalarRepl(Instruction *I, uint64_t Offset, if (!LI->isSimple()) return MarkUnsafe(Info, User); Type *LIType = LI->getType(); - isSafeMemAccess(Offset, TD->getTypeAllocSize(LIType), + isSafeMemAccess(Offset, DL->getTypeAllocSize(LIType), LIType, false, Info, LI, false /*AllowWholeAccess*/); Info.hasALoadOrStore = true; @@ -1694,7 +1694,7 @@ void SROA::isSafePHISelectUseForScalarRepl(Instruction *I, uint64_t Offset, return MarkUnsafe(Info, User); Type *SIType = SI->getOperand(0)->getType(); - isSafeMemAccess(Offset, TD->getTypeAllocSize(SIType), + isSafeMemAccess(Offset, DL->getTypeAllocSize(SIType), SIType, true, Info, SI, false /*AllowWholeAccess*/); Info.hasALoadOrStore = true; } else if (isa<PHINode>(User) || isa<SelectInst>(User)) { @@ -1739,7 +1739,7 @@ void SROA::isSafeGEP(GetElementPtrInst *GEPI, // constant part of the offset. if (NonConstant) Indices.pop_back(); - Offset += TD->getIndexedOffset(GEPI->getPointerOperandType(), Indices); + Offset += DL->getIndexedOffset(GEPI->getPointerOperandType(), Indices); if (!TypeHasComponent(Info.AI->getAllocatedType(), Offset, NonConstantIdxSize)) MarkUnsafe(Info, GEPI); @@ -1798,7 +1798,7 @@ void SROA::isSafeMemAccess(uint64_t Offset, uint64_t MemSize, bool AllowWholeAccess) { // Check if this is a load/store of the entire alloca. if (Offset == 0 && AllowWholeAccess && - MemSize == TD->getTypeAllocSize(Info.AI->getAllocatedType())) { + MemSize == DL->getTypeAllocSize(Info.AI->getAllocatedType())) { // This can be safe for MemIntrinsics (where MemOpType is 0) and integer // loads/stores (which are essentially the same as the MemIntrinsics with // regard to copying padding between elements). But, if an alloca is @@ -1835,20 +1835,20 @@ bool SROA::TypeHasComponent(Type *T, uint64_t Offset, uint64_t Size) { Type *EltTy; uint64_t EltSize; if (StructType *ST = dyn_cast<StructType>(T)) { - const StructLayout *Layout = TD->getStructLayout(ST); + const StructLayout *Layout = DL->getStructLayout(ST); unsigned EltIdx = Layout->getElementContainingOffset(Offset); EltTy = ST->getContainedType(EltIdx); - EltSize = TD->getTypeAllocSize(EltTy); + EltSize = DL->getTypeAllocSize(EltTy); Offset -= Layout->getElementOffset(EltIdx); } else if (ArrayType *AT = dyn_cast<ArrayType>(T)) { EltTy = AT->getElementType(); - EltSize = TD->getTypeAllocSize(EltTy); + EltSize = DL->getTypeAllocSize(EltTy); if (Offset >= AT->getNumElements() * EltSize) return false; Offset %= EltSize; } else if (VectorType *VT = dyn_cast<VectorType>(T)) { EltTy = VT->getElementType(); - EltSize = TD->getTypeAllocSize(EltTy); + EltSize = DL->getTypeAllocSize(EltTy); if (Offset >= VT->getNumElements() * EltSize) return false; Offset %= EltSize; @@ -1887,7 +1887,7 @@ void SROA::RewriteForScalarRepl(Instruction *I, AllocaInst *AI, uint64_t Offset, ConstantInt *Length = dyn_cast<ConstantInt>(MI->getLength()); uint64_t MemSize = Length->getZExtValue(); if (Offset == 0 && - MemSize == TD->getTypeAllocSize(AI->getAllocatedType())) + MemSize == DL->getTypeAllocSize(AI->getAllocatedType())) RewriteMemIntrinUserOfAlloca(MI, I, AI, NewElts); // Otherwise the intrinsic can only touch a single element and the // address operand will be updated, so nothing else needs to be done. @@ -1923,8 +1923,8 @@ void SROA::RewriteForScalarRepl(Instruction *I, AllocaInst *AI, uint64_t Offset, LI->replaceAllUsesWith(Insert); DeadInsts.push_back(LI); } else if (LIType->isIntegerTy() && - TD->getTypeAllocSize(LIType) == - TD->getTypeAllocSize(AI->getAllocatedType())) { + DL->getTypeAllocSize(LIType) == + DL->getTypeAllocSize(AI->getAllocatedType())) { // If this is a load of the entire alloca to an integer, rewrite it. RewriteLoadUserOfWholeAlloca(LI, AI, NewElts); } @@ -1950,8 +1950,8 @@ void SROA::RewriteForScalarRepl(Instruction *I, AllocaInst *AI, uint64_t Offset, } DeadInsts.push_back(SI); } else if (SIType->isIntegerTy() && - TD->getTypeAllocSize(SIType) == - TD->getTypeAllocSize(AI->getAllocatedType())) { + DL->getTypeAllocSize(SIType) == + DL->getTypeAllocSize(AI->getAllocatedType())) { // If this is a store of the entire alloca from an integer, rewrite it. RewriteStoreUserOfWholeAlloca(SI, AI, NewElts); } @@ -2013,7 +2013,7 @@ uint64_t SROA::FindElementAndOffset(Type *&T, uint64_t &Offset, Type *&IdxTy) { uint64_t Idx = 0; if (StructType *ST = dyn_cast<StructType>(T)) { - const StructLayout *Layout = TD->getStructLayout(ST); + const StructLayout *Layout = DL->getStructLayout(ST); Idx = Layout->getElementContainingOffset(Offset); T = ST->getContainedType(Idx); Offset -= Layout->getElementOffset(Idx); @@ -2021,7 +2021,7 @@ uint64_t SROA::FindElementAndOffset(Type *&T, uint64_t &Offset, return Idx; } else if (ArrayType *AT = dyn_cast<ArrayType>(T)) { T = AT->getElementType(); - uint64_t EltSize = TD->getTypeAllocSize(T); + uint64_t EltSize = DL->getTypeAllocSize(T); Idx = Offset / EltSize; Offset -= Idx * EltSize; IdxTy = Type::getInt64Ty(T->getContext()); @@ -2029,7 +2029,7 @@ uint64_t SROA::FindElementAndOffset(Type *&T, uint64_t &Offset, } VectorType *VT = cast<VectorType>(T); T = VT->getElementType(); - uint64_t EltSize = TD->getTypeAllocSize(T); + uint64_t EltSize = DL->getTypeAllocSize(T); Idx = Offset / EltSize; Offset -= Idx * EltSize; IdxTy = Type::getInt64Ty(T->getContext()); @@ -2050,7 +2050,7 @@ void SROA::RewriteGEP(GetElementPtrInst *GEPI, AllocaInst *AI, uint64_t Offset, Value* NonConstantIdx = 0; if (!GEPI->hasAllConstantIndices()) NonConstantIdx = Indices.pop_back_val(); - Offset += TD->getIndexedOffset(GEPI->getPointerOperandType(), Indices); + Offset += DL->getIndexedOffset(GEPI->getPointerOperandType(), Indices); RewriteForScalarRepl(GEPI, AI, Offset, NewElts); @@ -2121,7 +2121,7 @@ void SROA::RewriteLifetimeIntrinsic(IntrinsicInst *II, AllocaInst *AI, V = Builder.CreateGEP(V, Builder.getInt64(NewOffset)); IdxTy = NewElts[Idx]->getAllocatedType(); - uint64_t EltSize = TD->getTypeAllocSize(IdxTy) - NewOffset; + uint64_t EltSize = DL->getTypeAllocSize(IdxTy) - NewOffset; if (EltSize > Size) { EltSize = Size; Size = 0; @@ -2137,7 +2137,7 @@ void SROA::RewriteLifetimeIntrinsic(IntrinsicInst *II, AllocaInst *AI, for (; Idx != NewElts.size() && Size; ++Idx) { IdxTy = NewElts[Idx]->getAllocatedType(); - uint64_t EltSize = TD->getTypeAllocSize(IdxTy); + uint64_t EltSize = DL->getTypeAllocSize(IdxTy); if (EltSize > Size) { EltSize = Size; Size = 0; @@ -2229,10 +2229,10 @@ SROA::RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *Inst, PointerType *OtherPtrTy = cast<PointerType>(OtherPtr->getType()); Type *OtherTy = OtherPtrTy->getElementType(); if (StructType *ST = dyn_cast<StructType>(OtherTy)) { - EltOffset = TD->getStructLayout(ST)->getElementOffset(i); + EltOffset = DL->getStructLayout(ST)->getElementOffset(i); } else { Type *EltTy = cast<SequentialType>(OtherTy)->getElementType(); - EltOffset = TD->getTypeAllocSize(EltTy)*i; + EltOffset = DL->getTypeAllocSize(EltTy)*i; } // The alignment of the other pointer is the guaranteed alignment of the @@ -2273,7 +2273,7 @@ SROA::RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *Inst, Type *ValTy = EltTy->getScalarType(); // Construct an integer with the right value. - unsigned EltSize = TD->getTypeSizeInBits(ValTy); + unsigned EltSize = DL->getTypeSizeInBits(ValTy); APInt OneVal(EltSize, CI->getZExtValue()); APInt TotalVal(OneVal); // Set each byte. @@ -2303,7 +2303,7 @@ SROA::RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *Inst, // this element. } - unsigned EltSize = TD->getTypeAllocSize(EltTy); + unsigned EltSize = DL->getTypeAllocSize(EltTy); if (!EltSize) continue; @@ -2337,12 +2337,12 @@ SROA::RewriteStoreUserOfWholeAlloca(StoreInst *SI, AllocaInst *AI, // and store the element value to the individual alloca. Value *SrcVal = SI->getOperand(0); Type *AllocaEltTy = AI->getAllocatedType(); - uint64_t AllocaSizeBits = TD->getTypeAllocSizeInBits(AllocaEltTy); + uint64_t AllocaSizeBits = DL->getTypeAllocSizeInBits(AllocaEltTy); IRBuilder<> Builder(SI); // Handle tail padding by extending the operand - if (TD->getTypeSizeInBits(SrcVal->getType()) != AllocaSizeBits) + if (DL->getTypeSizeInBits(SrcVal->getType()) != AllocaSizeBits) SrcVal = Builder.CreateZExt(SrcVal, IntegerType::get(SI->getContext(), AllocaSizeBits)); @@ -2352,15 +2352,15 @@ SROA::RewriteStoreUserOfWholeAlloca(StoreInst *SI, AllocaInst *AI, // There are two forms here: AI could be an array or struct. Both cases // have different ways to compute the element offset. if (StructType *EltSTy = dyn_cast<StructType>(AllocaEltTy)) { - const StructLayout *Layout = TD->getStructLayout(EltSTy); + const StructLayout *Layout = DL->getStructLayout(EltSTy); for (unsigned i = 0, e = NewElts.size(); i != e; ++i) { // Get the number of bits to shift SrcVal to get the value. Type *FieldTy = EltSTy->getElementType(i); uint64_t Shift = Layout->getElementOffsetInBits(i); - if (TD->isBigEndian()) - Shift = AllocaSizeBits-Shift-TD->getTypeAllocSizeInBits(FieldTy); + if (DL->isBigEndian()) + Shift = AllocaSizeBits-Shift-DL->getTypeAllocSizeInBits(FieldTy); Value *EltVal = SrcVal; if (Shift) { @@ -2369,7 +2369,7 @@ SROA::RewriteStoreUserOfWholeAlloca(StoreInst *SI, AllocaInst *AI, } // Truncate down to an integer of the right size. - uint64_t FieldSizeBits = TD->getTypeSizeInBits(FieldTy); + uint64_t FieldSizeBits = DL->getTypeSizeInBits(FieldTy); // Ignore zero sized fields like {}, they obviously contain no data. if (FieldSizeBits == 0) continue; @@ -2394,12 +2394,12 @@ SROA::RewriteStoreUserOfWholeAlloca(StoreInst *SI, AllocaInst *AI, } else { ArrayType *ATy = cast<ArrayType>(AllocaEltTy); Type *ArrayEltTy = ATy->getElementType(); - uint64_t ElementOffset = TD->getTypeAllocSizeInBits(ArrayEltTy); - uint64_t ElementSizeBits = TD->getTypeSizeInBits(ArrayEltTy); + uint64_t ElementOffset = DL->getTypeAllocSizeInBits(ArrayEltTy); + uint64_t ElementSizeBits = DL->getTypeSizeInBits(ArrayEltTy); uint64_t Shift; - if (TD->isBigEndian()) + if (DL->isBigEndian()) Shift = AllocaSizeBits-ElementOffset; else Shift = 0; @@ -2433,7 +2433,7 @@ SROA::RewriteStoreUserOfWholeAlloca(StoreInst *SI, AllocaInst *AI, } new StoreInst(EltVal, DestField, SI); - if (TD->isBigEndian()) + if (DL->isBigEndian()) Shift -= ElementOffset; else Shift += ElementOffset; @@ -2451,7 +2451,7 @@ SROA::RewriteLoadUserOfWholeAlloca(LoadInst *LI, AllocaInst *AI, // Extract each element out of the NewElts according to its structure offset // and form the result value. Type *AllocaEltTy = AI->getAllocatedType(); - uint64_t AllocaSizeBits = TD->getTypeAllocSizeInBits(AllocaEltTy); + uint64_t AllocaSizeBits = DL->getTypeAllocSizeInBits(AllocaEltTy); DEBUG(dbgs() << "PROMOTING LOAD OF WHOLE ALLOCA: " << *AI << '\n' << *LI << '\n'); @@ -2461,10 +2461,10 @@ SROA::RewriteLoadUserOfWholeAlloca(LoadInst *LI, AllocaInst *AI, const StructLayout *Layout = 0; uint64_t ArrayEltBitOffset = 0; if (StructType *EltSTy = dyn_cast<StructType>(AllocaEltTy)) { - Layout = TD->getStructLayout(EltSTy); + Layout = DL->getStructLayout(EltSTy); } else { Type *ArrayEltTy = cast<ArrayType>(AllocaEltTy)->getElementType(); - ArrayEltBitOffset = TD->getTypeAllocSizeInBits(ArrayEltTy); + ArrayEltBitOffset = DL->getTypeAllocSizeInBits(ArrayEltTy); } Value *ResultVal = @@ -2476,7 +2476,7 @@ SROA::RewriteLoadUserOfWholeAlloca(LoadInst *LI, AllocaInst *AI, Value *SrcField = NewElts[i]; Type *FieldTy = cast<PointerType>(SrcField->getType())->getElementType(); - uint64_t FieldSizeBits = TD->getTypeSizeInBits(FieldTy); + uint64_t FieldSizeBits = DL->getTypeSizeInBits(FieldTy); // Ignore zero sized fields like {}, they obviously contain no data. if (FieldSizeBits == 0) continue; @@ -2507,7 +2507,7 @@ SROA::RewriteLoadUserOfWholeAlloca(LoadInst *LI, AllocaInst *AI, else // Array case. Shift = i*ArrayEltBitOffset; - if (TD->isBigEndian()) + if (DL->isBigEndian()) Shift = AllocaSizeBits-Shift-FieldIntTy->getBitWidth(); if (Shift) { @@ -2524,7 +2524,7 @@ SROA::RewriteLoadUserOfWholeAlloca(LoadInst *LI, AllocaInst *AI, } // Handle tail padding by truncating the result - if (TD->getTypeSizeInBits(LI->getType()) != AllocaSizeBits) + if (DL->getTypeSizeInBits(LI->getType()) != AllocaSizeBits) ResultVal = new TruncInst(ResultVal, LI->getType(), "", LI); LI->replaceAllUsesWith(ResultVal); @@ -2534,15 +2534,15 @@ SROA::RewriteLoadUserOfWholeAlloca(LoadInst *LI, AllocaInst *AI, /// HasPadding - Return true if the specified type has any structure or /// alignment padding in between the elements that would be split apart /// by SROA; return false otherwise. -static bool HasPadding(Type *Ty, const DataLayout &TD) { +static bool HasPadding(Type *Ty, const DataLayout &DL) { if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) { Ty = ATy->getElementType(); - return TD.getTypeSizeInBits(Ty) != TD.getTypeAllocSizeInBits(Ty); + return DL.getTypeSizeInBits(Ty) != DL.getTypeAllocSizeInBits(Ty); } // SROA currently handles only Arrays and Structs. StructType *STy = cast<StructType>(Ty); - const StructLayout *SL = TD.getStructLayout(STy); + const StructLayout *SL = DL.getStructLayout(STy); unsigned PrevFieldBitOffset = 0; for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { unsigned FieldBitOffset = SL->getElementOffsetInBits(i); @@ -2551,7 +2551,7 @@ static bool HasPadding(Type *Ty, const DataLayout &TD) { // previous one. if (i) { unsigned PrevFieldEnd = - PrevFieldBitOffset+TD.getTypeSizeInBits(STy->getElementType(i-1)); + PrevFieldBitOffset+DL.getTypeSizeInBits(STy->getElementType(i-1)); if (PrevFieldEnd < FieldBitOffset) return true; } @@ -2560,7 +2560,7 @@ static bool HasPadding(Type *Ty, const DataLayout &TD) { // Check for tail padding. if (unsigned EltCount = STy->getNumElements()) { unsigned PrevFieldEnd = PrevFieldBitOffset + - TD.getTypeSizeInBits(STy->getElementType(EltCount-1)); + DL.getTypeSizeInBits(STy->getElementType(EltCount-1)); if (PrevFieldEnd < SL->getSizeInBits()) return true; } @@ -2587,7 +2587,7 @@ bool SROA::isSafeAllocaToScalarRepl(AllocaInst *AI) { // types, but may actually be used. In these cases, we refuse to promote the // struct. if (Info.isMemCpySrc && Info.isMemCpyDst && - HasPadding(AI->getAllocatedType(), *TD)) + HasPadding(AI->getAllocatedType(), *DL)) return false; // If the alloca never has an access to just *part* of it, but is accessed |