summaryrefslogtreecommitdiff
path: root/lib/Transforms
diff options
context:
space:
mode:
authorRafael Espindola <rafael.espindola@gmail.com>2014-02-21 00:06:31 +0000
committerRafael Espindola <rafael.espindola@gmail.com>2014-02-21 00:06:31 +0000
commitf116e5308d2eed4b531da795e8eed91211c01241 (patch)
treed458cab6fb15c8319e97119600478aa358d4e954 /lib/Transforms
parent23ffb3ea10530e36f8b779570f8e1cc686708051 (diff)
downloadllvm-f116e5308d2eed4b531da795e8eed91211c01241.tar.gz
llvm-f116e5308d2eed4b531da795e8eed91211c01241.tar.bz2
llvm-f116e5308d2eed4b531da795e8eed91211c01241.tar.xz
Rename many DataLayout variables from TD to DL.
I am really sorry for the noise, but the current state where some parts of the code use TD (from the old name: TargetData) and other parts use DL makes it hard to write a patch that changes where those variables come from and how they are passed along. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@201827 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/Transforms')
-rw-r--r--lib/Transforms/IPO/ConstantMerge.cpp10
-rw-r--r--lib/Transforms/IPO/GlobalOpt.cpp136
-rw-r--r--lib/Transforms/IPO/MergeFunctions.cpp46
-rw-r--r--lib/Transforms/InstCombine/InstCombine.h14
-rw-r--r--lib/Transforms/InstCombine/InstCombineAddSub.cpp12
-rw-r--r--lib/Transforms/InstCombine/InstCombineAndOrXor.cpp6
-rw-r--r--lib/Transforms/InstCombine/InstCombineCalls.cpp34
-rw-r--r--lib/Transforms/InstCombine/InstCombineCasts.cpp54
-rw-r--r--lib/Transforms/InstCombine/InstCombineCompares.cpp46
-rw-r--r--lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp48
-rw-r--r--lib/Transforms/InstCombine/InstCombineMulDivRem.cpp16
-rw-r--r--lib/Transforms/InstCombine/InstCombinePHI.cpp6
-rw-r--r--lib/Transforms/InstCombine/InstCombineSelect.cpp18
-rw-r--r--lib/Transforms/InstCombine/InstCombineShifts.cpp6
-rw-r--r--lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp4
-rw-r--r--lib/Transforms/InstCombine/InstructionCombining.cpp110
-rw-r--r--lib/Transforms/Instrumentation/AddressSanitizer.cpp24
-rw-r--r--lib/Transforms/Instrumentation/BoundsChecking.cpp12
-rw-r--r--lib/Transforms/Instrumentation/MemorySanitizer.cpp32
-rw-r--r--lib/Transforms/Instrumentation/ThreadSanitizer.cpp14
-rw-r--r--lib/Transforms/Scalar/EarlyCSE.cpp6
-rw-r--r--lib/Transforms/Scalar/GVN.cpp150
-rw-r--r--lib/Transforms/Scalar/GlobalMerge.cpp20
-rw-r--r--lib/Transforms/Scalar/IndVarSimplify.cpp26
-rw-r--r--lib/Transforms/Scalar/JumpThreading.cpp12
-rw-r--r--lib/Transforms/Scalar/LICM.cpp6
-rw-r--r--lib/Transforms/Scalar/LoopIdiomRecognize.cpp20
-rw-r--r--lib/Transforms/Scalar/MemCpyOptimizer.cpp48
-rw-r--r--lib/Transforms/Scalar/SCCP.cpp16
-rw-r--r--lib/Transforms/Scalar/ScalarReplAggregates.cpp182
-rw-r--r--lib/Transforms/Utils/CloneFunction.cpp14
-rw-r--r--lib/Transforms/Utils/SimplifyCFG.cpp140
-rw-r--r--lib/Transforms/Utils/SimplifyIndVar.cpp4
-rw-r--r--lib/Transforms/Utils/SimplifyLibCalls.cpp170
-rw-r--r--lib/Transforms/Vectorize/BBVectorize.cpp18
35 files changed, 740 insertions, 740 deletions
diff --git a/lib/Transforms/IPO/ConstantMerge.cpp b/lib/Transforms/IPO/ConstantMerge.cpp
index aefcff9565..0f49c7e1ec 100644
--- a/lib/Transforms/IPO/ConstantMerge.cpp
+++ b/lib/Transforms/IPO/ConstantMerge.cpp
@@ -51,7 +51,7 @@ namespace {
// alignment to a concrete value.
unsigned getAlignment(GlobalVariable *GV) const;
- const DataLayout *TD;
+ const DataLayout *DL;
};
}
@@ -89,20 +89,20 @@ static bool IsBetterCanonical(const GlobalVariable &A,
}
bool ConstantMerge::hasKnownAlignment(GlobalVariable *GV) const {
- return TD || GV->getAlignment() != 0;
+ return DL || GV->getAlignment() != 0;
}
unsigned ConstantMerge::getAlignment(GlobalVariable *GV) const {
unsigned Align = GV->getAlignment();
if (Align)
return Align;
- if (TD)
- return TD->getPreferredAlignment(GV);
+ if (DL)
+ return DL->getPreferredAlignment(GV);
return 0;
}
bool ConstantMerge::runOnModule(Module &M) {
- TD = getAnalysisIfAvailable<DataLayout>();
+ DL = getAnalysisIfAvailable<DataLayout>();
// Find all the globals that are marked "used". These cannot be merged.
SmallPtrSet<const GlobalValue*, 8> UsedGlobals;
diff --git a/lib/Transforms/IPO/GlobalOpt.cpp b/lib/Transforms/IPO/GlobalOpt.cpp
index 0381f8c180..8b20fcd2a4 100644
--- a/lib/Transforms/IPO/GlobalOpt.cpp
+++ b/lib/Transforms/IPO/GlobalOpt.cpp
@@ -84,7 +84,7 @@ namespace {
const GlobalStatus &GS);
bool OptimizeEmptyGlobalCXXDtors(Function *CXAAtExitFn);
- DataLayout *TD;
+ DataLayout *DL;
TargetLibraryInfo *TLI;
};
}
@@ -266,7 +266,7 @@ static bool CleanupPointerRootUsers(GlobalVariable *GV,
/// quick scan over the use list to clean up the easy and obvious cruft. This
/// returns true if it made a change.
static bool CleanupConstantGlobalUsers(Value *V, Constant *Init,
- DataLayout *TD, TargetLibraryInfo *TLI) {
+ DataLayout *DL, TargetLibraryInfo *TLI) {
bool Changed = false;
// Note that we need to use a weak value handle for the worklist items. When
// we delete a constant array, we may also be holding pointer to one of its
@@ -296,12 +296,12 @@ static bool CleanupConstantGlobalUsers(Value *V, Constant *Init,
Constant *SubInit = 0;
if (Init)
SubInit = ConstantFoldLoadThroughGEPConstantExpr(Init, CE);
- Changed |= CleanupConstantGlobalUsers(CE, SubInit, TD, TLI);
+ Changed |= CleanupConstantGlobalUsers(CE, SubInit, DL, TLI);
} else if ((CE->getOpcode() == Instruction::BitCast &&
CE->getType()->isPointerTy()) ||
CE->getOpcode() == Instruction::AddrSpaceCast) {
// Pointer cast, delete any stores and memsets to the global.
- Changed |= CleanupConstantGlobalUsers(CE, 0, TD, TLI);
+ Changed |= CleanupConstantGlobalUsers(CE, 0, DL, TLI);
}
if (CE->use_empty()) {
@@ -315,7 +315,7 @@ static bool CleanupConstantGlobalUsers(Value *V, Constant *Init,
Constant *SubInit = 0;
if (!isa<ConstantExpr>(GEP->getOperand(0))) {
ConstantExpr *CE =
- dyn_cast_or_null<ConstantExpr>(ConstantFoldInstruction(GEP, TD, TLI));
+ dyn_cast_or_null<ConstantExpr>(ConstantFoldInstruction(GEP, DL, TLI));
if (Init && CE && CE->getOpcode() == Instruction::GetElementPtr)
SubInit = ConstantFoldLoadThroughGEPConstantExpr(Init, CE);
@@ -325,7 +325,7 @@ static bool CleanupConstantGlobalUsers(Value *V, Constant *Init,
if (Init && isa<ConstantAggregateZero>(Init) && GEP->isInBounds())
SubInit = Constant::getNullValue(GEP->getType()->getElementType());
}
- Changed |= CleanupConstantGlobalUsers(GEP, SubInit, TD, TLI);
+ Changed |= CleanupConstantGlobalUsers(GEP, SubInit, DL, TLI);
if (GEP->use_empty()) {
GEP->eraseFromParent();
@@ -342,7 +342,7 @@ static bool CleanupConstantGlobalUsers(Value *V, Constant *Init,
// us, and if they are all dead, nuke them without remorse.
if (isSafeToDestroyConstant(C)) {
C->destroyConstant();
- CleanupConstantGlobalUsers(V, Init, TD, TLI);
+ CleanupConstantGlobalUsers(V, Init, DL, TLI);
return true;
}
}
@@ -467,7 +467,7 @@ static bool GlobalUsersSafeToSRA(GlobalValue *GV) {
/// behavior of the program in a more fine-grained way. We have determined that
/// this transformation is safe already. We return the first global variable we
/// insert so that the caller can reprocess it.
-static GlobalVariable *SRAGlobal(GlobalVariable *GV, const DataLayout &TD) {
+static GlobalVariable *SRAGlobal(GlobalVariable *GV, const DataLayout &DL) {
// Make sure this global only has simple uses that we can SRA.
if (!GlobalUsersSafeToSRA(GV))
return 0;
@@ -482,11 +482,11 @@ static GlobalVariable *SRAGlobal(GlobalVariable *GV, const DataLayout &TD) {
// Get the alignment of the global, either explicit or target-specific.
unsigned StartAlignment = GV->getAlignment();
if (StartAlignment == 0)
- StartAlignment = TD.getABITypeAlignment(GV->getType());
+ StartAlignment = DL.getABITypeAlignment(GV->getType());
if (StructType *STy = dyn_cast<StructType>(Ty)) {
NewGlobals.reserve(STy->getNumElements());
- const StructLayout &Layout = *TD.getStructLayout(STy);
+ const StructLayout &Layout = *DL.getStructLayout(STy);
for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
Constant *In = Init->getAggregateElement(i);
assert(In && "Couldn't get element of initializer?");
@@ -503,7 +503,7 @@ static GlobalVariable *SRAGlobal(GlobalVariable *GV, const DataLayout &TD) {
// propagate info to each field.
uint64_t FieldOffset = Layout.getElementOffset(i);
unsigned NewAlign = (unsigned)MinAlign(StartAlignment, FieldOffset);
- if (NewAlign > TD.getABITypeAlignment(STy->getElementType(i)))
+ if (NewAlign > DL.getABITypeAlignment(STy->getElementType(i)))
NGV->setAlignment(NewAlign);
}
} else if (SequentialType *STy = dyn_cast<SequentialType>(Ty)) {
@@ -517,8 +517,8 @@ static GlobalVariable *SRAGlobal(GlobalVariable *GV, const DataLayout &TD) {
return 0; // It's not worth it.
NewGlobals.reserve(NumElements);
- uint64_t EltSize = TD.getTypeAllocSize(STy->getElementType());
- unsigned EltAlign = TD.getABITypeAlignment(STy->getElementType());
+ uint64_t EltSize = DL.getTypeAllocSize(STy->getElementType());
+ unsigned EltAlign = DL.getABITypeAlignment(STy->getElementType());
for (unsigned i = 0, e = NumElements; i != e; ++i) {
Constant *In = Init->getAggregateElement(i);
assert(In && "Couldn't get element of initializer?");
@@ -743,7 +743,7 @@ static bool OptimizeAwayTrappingUsesOfValue(Value *V, Constant *NewV) {
/// if the loaded value is dynamically null, then we know that they cannot be
/// reachable with a null optimize away the load.
static bool OptimizeAwayTrappingUsesOfLoads(GlobalVariable *GV, Constant *LV,
- DataLayout *TD,
+ DataLayout *DL,
TargetLibraryInfo *TLI) {
bool Changed = false;
@@ -792,7 +792,7 @@ static bool OptimizeAwayTrappingUsesOfLoads(GlobalVariable *GV, Constant *LV,
Changed |= CleanupPointerRootUsers(GV, TLI);
} else {
Changed = true;
- CleanupConstantGlobalUsers(GV, 0, TD, TLI);
+ CleanupConstantGlobalUsers(GV, 0, DL, TLI);
}
if (GV->use_empty()) {
DEBUG(dbgs() << " *** GLOBAL NOW DEAD!\n");
@@ -807,10 +807,10 @@ static bool OptimizeAwayTrappingUsesOfLoads(GlobalVariable *GV, Constant *LV,
/// ConstantPropUsersOf - Walk the use list of V, constant folding all of the
/// instructions that are foldable.
static void ConstantPropUsersOf(Value *V,
- DataLayout *TD, TargetLibraryInfo *TLI) {
+ DataLayout *DL, TargetLibraryInfo *TLI) {
for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E; )
if (Instruction *I = dyn_cast<Instruction>(*UI++))
- if (Constant *NewC = ConstantFoldInstruction(I, TD, TLI)) {
+ if (Constant *NewC = ConstantFoldInstruction(I, DL, TLI)) {
I->replaceAllUsesWith(NewC);
// Advance UI to the next non-I use to avoid invalidating it!
@@ -830,7 +830,7 @@ static GlobalVariable *OptimizeGlobalAddressOfMalloc(GlobalVariable *GV,
CallInst *CI,
Type *AllocTy,
ConstantInt *NElements,
- DataLayout *TD,
+ DataLayout *DL,
TargetLibraryInfo *TLI) {
DEBUG(errs() << "PROMOTING GLOBAL: " << *GV << " CALL = " << *CI << '\n');
@@ -949,9 +949,9 @@ static GlobalVariable *OptimizeGlobalAddressOfMalloc(GlobalVariable *GV,
// To further other optimizations, loop over all users of NewGV and try to
// constant prop them. This will promote GEP instructions with constant
// indices into GEP constant-exprs, which will allow global-opt to hack on it.
- ConstantPropUsersOf(NewGV, TD, TLI);
+ ConstantPropUsersOf(NewGV, DL, TLI);
if (RepValue != NewGV)
- ConstantPropUsersOf(RepValue, TD, TLI);
+ ConstantPropUsersOf(RepValue, DL, TLI);
return NewGV;
}
@@ -1278,7 +1278,7 @@ static void RewriteUsesOfLoadForHeapSRoA(LoadInst *Load,
/// PerformHeapAllocSRoA - CI is an allocation of an array of structures. Break
/// it up into multiple allocations of arrays of the fields.
static GlobalVariable *PerformHeapAllocSRoA(GlobalVariable *GV, CallInst *CI,
- Value *NElems, DataLayout *TD,
+ Value *NElems, DataLayout *DL,
const TargetLibraryInfo *TLI) {
DEBUG(dbgs() << "SROA HEAP ALLOC: " << *GV << " MALLOC = " << *CI << '\n');
Type *MAT = getMallocAllocatedType(CI, TLI);
@@ -1307,10 +1307,10 @@ static GlobalVariable *PerformHeapAllocSRoA(GlobalVariable *GV, CallInst *CI,
GV->getThreadLocalMode());
FieldGlobals.push_back(NGV);
- unsigned TypeSize = TD->getTypeAllocSize(FieldTy);
+ unsigned TypeSize = DL->getTypeAllocSize(FieldTy);
if (StructType *ST = dyn_cast<StructType>(FieldTy))
- TypeSize = TD->getStructLayout(ST)->getSizeInBytes();
- Type *IntPtrTy = TD->getIntPtrType(CI->getType());
+ TypeSize = DL->getStructLayout(ST)->getSizeInBytes();
+ Type *IntPtrTy = DL->getIntPtrType(CI->getType());
Value *NMI = CallInst::CreateMalloc(CI, IntPtrTy, FieldTy,
ConstantInt::get(IntPtrTy, TypeSize),
NElems, 0,
@@ -1470,9 +1470,9 @@ static bool TryToOptimizeStoreOfMallocToGlobal(GlobalVariable *GV,
Type *AllocTy,
AtomicOrdering Ordering,
Module::global_iterator &GVI,
- DataLayout *TD,
+ DataLayout *DL,
TargetLibraryInfo *TLI) {
- if (!TD)
+ if (!DL)
return false;
// If this is a malloc of an abstract type, don't touch it.
@@ -1502,7 +1502,7 @@ static bool TryToOptimizeStoreOfMallocToGlobal(GlobalVariable *GV,
// This eliminates dynamic allocation, avoids an indirection accessing the
// data, and exposes the resultant global to further GlobalOpt.
// We cannot optimize the malloc if we cannot determine malloc array size.
- Value *NElems = getMallocArraySize(CI, TD, TLI, true);
+ Value *NElems = getMallocArraySize(CI, DL, TLI, true);
if (!NElems)
return false;
@@ -1510,8 +1510,8 @@ static bool TryToOptimizeStoreOfMallocToGlobal(GlobalVariable *GV,
// Restrict this transformation to only working on small allocations
// (2048 bytes currently), as we don't want to introduce a 16M global or
// something.
- if (NElements->getZExtValue() * TD->getTypeAllocSize(AllocTy) < 2048) {
- GVI = OptimizeGlobalAddressOfMalloc(GV, CI, AllocTy, NElements, TD, TLI);
+ if (NElements->getZExtValue() * DL->getTypeAllocSize(AllocTy) < 2048) {
+ GVI = OptimizeGlobalAddressOfMalloc(GV, CI, AllocTy, NElements, DL, TLI);
return true;
}
@@ -1540,8 +1540,8 @@ static bool TryToOptimizeStoreOfMallocToGlobal(GlobalVariable *GV,
// If this is a fixed size array, transform the Malloc to be an alloc of
// structs. malloc [100 x struct],1 -> malloc struct, 100
if (ArrayType *AT = dyn_cast<ArrayType>(getMallocAllocatedType(CI, TLI))) {
- Type *IntPtrTy = TD->getIntPtrType(CI->getType());
- unsigned TypeSize = TD->getStructLayout(AllocSTy)->getSizeInBytes();
+ Type *IntPtrTy = DL->getIntPtrType(CI->getType());
+ unsigned TypeSize = DL->getStructLayout(AllocSTy)->getSizeInBytes();
Value *AllocSize = ConstantInt::get(IntPtrTy, TypeSize);
Value *NumElements = ConstantInt::get(IntPtrTy, AT->getNumElements());
Instruction *Malloc = CallInst::CreateMalloc(CI, IntPtrTy, AllocSTy,
@@ -1556,8 +1556,8 @@ static bool TryToOptimizeStoreOfMallocToGlobal(GlobalVariable *GV,
CI = cast<CallInst>(Malloc);
}
- GVI = PerformHeapAllocSRoA(GV, CI, getMallocArraySize(CI, TD, TLI, true),
- TD, TLI);
+ GVI = PerformHeapAllocSRoA(GV, CI, getMallocArraySize(CI, DL, TLI, true),
+ DL, TLI);
return true;
}
@@ -1569,7 +1569,7 @@ static bool TryToOptimizeStoreOfMallocToGlobal(GlobalVariable *GV,
static bool OptimizeOnceStoredGlobal(GlobalVariable *GV, Value *StoredOnceVal,
AtomicOrdering Ordering,
Module::global_iterator &GVI,
- DataLayout *TD, TargetLibraryInfo *TLI) {
+ DataLayout *DL, TargetLibraryInfo *TLI) {
// Ignore no-op GEPs and bitcasts.
StoredOnceVal = StoredOnceVal->stripPointerCasts();
@@ -1584,13 +1584,13 @@ static bool OptimizeOnceStoredGlobal(GlobalVariable *GV, Value *StoredOnceVal,
SOVC = ConstantExpr::getBitCast(SOVC, GV->getInitializer()->getType());
// Optimize away any trapping uses of the loaded value.
- if (OptimizeAwayTrappingUsesOfLoads(GV, SOVC, TD, TLI))
+ if (OptimizeAwayTrappingUsesOfLoads(GV, SOVC, DL, TLI))
return true;
} else if (CallInst *CI = extractMallocCall(StoredOnceVal, TLI)) {
Type *MallocType = getMallocAllocatedType(CI, TLI);
if (MallocType &&
TryToOptimizeStoreOfMallocToGlobal(GV, CI, MallocType, Ordering, GVI,
- TD, TLI))
+ DL, TLI))
return true;
}
}
@@ -1784,7 +1784,7 @@ bool GlobalOpt::ProcessInternalGlobal(GlobalVariable *GV,
} else {
// Delete any stores we can find to the global. We may not be able to
// make it completely dead though.
- Changed = CleanupConstantGlobalUsers(GV, GV->getInitializer(), TD, TLI);
+ Changed = CleanupConstantGlobalUsers(GV, GV->getInitializer(), DL, TLI);
}
// If the global is dead now, delete it.
@@ -1800,7 +1800,7 @@ bool GlobalOpt::ProcessInternalGlobal(GlobalVariable *GV,
GV->setConstant(true);
// Clean up any obviously simplifiable users now.
- CleanupConstantGlobalUsers(GV, GV->getInitializer(), TD, TLI);
+ CleanupConstantGlobalUsers(GV, GV->getInitializer(), DL, TLI);
// If the global is dead now, just nuke it.
if (GV->use_empty()) {
@@ -1813,8 +1813,8 @@ bool GlobalOpt::ProcessInternalGlobal(GlobalVariable *GV,
++NumMarked;
return true;
} else if (!GV->getInitializer()->getType()->isSingleValueType()) {
- if (DataLayout *TD = getAnalysisIfAvailable<DataLayout>())
- if (GlobalVariable *FirstNewGV = SRAGlobal(GV, *TD)) {
+ if (DataLayout *DL = getAnalysisIfAvailable<DataLayout>())
+ if (GlobalVariable *FirstNewGV = SRAGlobal(GV, *DL)) {
GVI = FirstNewGV; // Don't skip the newly produced globals!
return true;
}
@@ -1829,7 +1829,7 @@ bool GlobalOpt::ProcessInternalGlobal(GlobalVariable *GV,
GV->setInitializer(SOVConstant);
// Clean up any obviously simplifiable users now.
- CleanupConstantGlobalUsers(GV, GV->getInitializer(), TD, TLI);
+ CleanupConstantGlobalUsers(GV, GV->getInitializer(), DL, TLI);
if (GV->use_empty()) {
DEBUG(dbgs() << " *** Substituting initializer allowed us to "
@@ -1846,7 +1846,7 @@ bool GlobalOpt::ProcessInternalGlobal(GlobalVariable *GV,
// Try to optimize globals based on the knowledge that only one value
// (besides its initializer) is ever stored to the global.
if (OptimizeOnceStoredGlobal(GV, GS.StoredOnceValue, GS.Ordering, GVI,
- TD, TLI))
+ DL, TLI))
return true;
// Otherwise, if the global was not a boolean, we can shrink it to be a
@@ -1947,7 +1947,7 @@ bool GlobalOpt::OptimizeGlobalVars(Module &M) {
// Simplify the initializer.
if (GV->hasInitializer())
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(GV->getInitializer())) {
- Constant *New = ConstantFoldConstantExpression(CE, TD, TLI);
+ Constant *New = ConstantFoldConstantExpression(CE, DL, TLI);
if (New && New != CE)
GV->setInitializer(New);
}
@@ -2070,7 +2070,7 @@ static GlobalVariable *InstallGlobalCtors(GlobalVariable *GCL,
static inline bool
isSimpleEnoughValueToCommit(Constant *C,
SmallPtrSet<Constant*, 8> &SimpleConstants,
- const DataLayout *TD);
+ const DataLayout *DL);
/// isSimpleEnoughValueToCommit - Return true if the specified constant can be
@@ -2083,7 +2083,7 @@ isSimpleEnoughValueToCommit(Constant *C,
/// time.
static bool isSimpleEnoughValueToCommitHelper(Constant *C,
SmallPtrSet<Constant*, 8> &SimpleConstants,
- const DataLayout *TD) {
+ const DataLayout *DL) {
// Simple integer, undef, constant aggregate zero, global addresses, etc are
// all supported.
if (C->getNumOperands() == 0 || isa<BlockAddress>(C) ||
@@ -2095,7 +2095,7 @@ static bool isSimpleEnoughValueToCommitHelper(Constant *C,
isa<ConstantVector>(C)) {
for (unsigned i = 0, e = C->getNumOperands(); i != e; ++i) {
Constant *Op = cast<Constant>(C->getOperand(i));
- if (!isSimpleEnoughValueToCommit(Op, SimpleConstants, TD))
+ if (!isSimpleEnoughValueToCommit(Op, SimpleConstants, DL))
return false;
}
return true;
@@ -2108,29 +2108,29 @@ static bool isSimpleEnoughValueToCommitHelper(Constant *C,
switch (CE->getOpcode()) {
case Instruction::BitCast:
// Bitcast is fine if the casted value is fine.
- return isSimpleEnoughValueToCommit(CE->getOperand(0), SimpleConstants, TD);
+ return isSimpleEnoughValueToCommit(CE->getOperand(0), SimpleConstants, DL);
case Instruction::IntToPtr:
case Instruction::PtrToInt:
// int <=> ptr is fine if the int type is the same size as the
// pointer type.
- if (!TD || TD->getTypeSizeInBits(CE->getType()) !=
- TD->getTypeSizeInBits(CE->getOperand(0)->getType()))
+ if (!DL || DL->getTypeSizeInBits(CE->getType()) !=
+ DL->getTypeSizeInBits(CE->getOperand(0)->getType()))
return false;
- return isSimpleEnoughValueToCommit(CE->getOperand(0), SimpleConstants, TD);
+ return isSimpleEnoughValueToCommit(CE->getOperand(0), SimpleConstants, DL);
// GEP is fine if it is simple + constant offset.
case Instruction::GetElementPtr:
for (unsigned i = 1, e = CE->getNumOperands(); i != e; ++i)
if (!isa<ConstantInt>(CE->getOperand(i)))
return false;
- return isSimpleEnoughValueToCommit(CE->getOperand(0), SimpleConstants, TD);
+ return isSimpleEnoughValueToCommit(CE->getOperand(0), SimpleConstants, DL);
case Instruction::Add:
// We allow simple+cst.
if (!isa<ConstantInt>(CE->getOperand(1)))
return false;
- return isSimpleEnoughValueToCommit(CE->getOperand(0), SimpleConstants, TD);
+ return isSimpleEnoughValueToCommit(CE->getOperand(0), SimpleConstants, DL);
}
return false;
}
@@ -2138,11 +2138,11 @@ static bool isSimpleEnoughValueToCommitHelper(Constant *C,
static inline bool
isSimpleEnoughValueToCommit(Constant *C,
SmallPtrSet<Constant*, 8> &SimpleConstants,
- const DataLayout *TD) {
+ const DataLayout *DL) {
// If we already checked this constant, we win.
if (!SimpleConstants.insert(C)) return true;
// Check the constant.
- return isSimpleEnoughValueToCommitHelper(C, SimpleConstants, TD);
+ return isSimpleEnoughValueToCommitHelper(C, SimpleConstants, DL);
}
@@ -2269,8 +2269,8 @@ namespace {
/// Once an evaluation call fails, the evaluation object should not be reused.
class Evaluator {
public:
- Evaluator(const DataLayout *TD, const TargetLibraryInfo *TLI)
- : TD(TD), TLI(TLI) {
+ Evaluator(const DataLayout *DL, const TargetLibraryInfo *TLI)
+ : DL(DL), TLI(TLI) {
ValueStack.push_back(new DenseMap<Value*, Constant*>);
}
@@ -2350,7 +2350,7 @@ private:
/// simple enough to live in a static initializer of a global.
SmallPtrSet<Constant*, 8> SimpleConstants;
- const DataLayout *TD;
+ const DataLayout *DL;
const TargetLibraryInfo *TLI;
};
@@ -2403,7 +2403,7 @@ bool Evaluator::EvaluateBlock(BasicBlock::iterator CurInst,
Constant *Ptr = getVal(SI->getOperand(1));
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ptr)) {
DEBUG(dbgs() << "Folding constant ptr expression: " << *Ptr);
- Ptr = ConstantFoldConstantExpression(CE, TD, TLI);
+ Ptr = ConstantFoldConstantExpression(CE, DL, TLI);
DEBUG(dbgs() << "; To: " << *Ptr << "\n");
}
if (!isSimpleEnoughPointerToCommit(Ptr)) {
@@ -2416,7 +2416,7 @@ bool Evaluator::EvaluateBlock(BasicBlock::iterator CurInst,
// If this might be too difficult for the backend to handle (e.g. the addr
// of one global variable divided by another) then we can't commit it.
- if (!isSimpleEnoughValueToCommit(Val, SimpleConstants, TD)) {
+ if (!isSimpleEnoughValueToCommit(Val, SimpleConstants, DL)) {
DEBUG(dbgs() << "Store value is too complex to evaluate store. " << *Val
<< "\n");
return false;
@@ -2448,7 +2448,7 @@ bool Evaluator::EvaluateBlock(BasicBlock::iterator CurInst,
Ptr = ConstantExpr::getGetElementPtr(Ptr, IdxList);
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ptr))
- Ptr = ConstantFoldConstantExpression(CE, TD, TLI);
+ Ptr = ConstantFoldConstantExpression(CE, DL, TLI);
// If we can't improve the situation by introspecting NewTy,
// we have to give up.
@@ -2512,7 +2512,7 @@ bool Evaluator::EvaluateBlock(BasicBlock::iterator CurInst,
Constant *Ptr = getVal(LI->getOperand(0));
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ptr)) {
- Ptr = ConstantFoldConstantExpression(CE, TD, TLI);
+ Ptr = ConstantFoldConstantExpression(CE, DL, TLI);
DEBUG(dbgs() << "Found a constant pointer expression, constant "
"folding: " << *Ptr << "\n");
}
@@ -2589,9 +2589,9 @@ bool Evaluator::EvaluateBlock(BasicBlock::iterator CurInst,
Value *Ptr = PtrArg->stripPointerCasts();
if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Ptr)) {
Type *ElemTy = cast<PointerType>(GV->getType())->getElementType();
- if (TD && !Size->isAllOnesValue() &&
+ if (DL && !Size->isAllOnesValue() &&
Size->getValue().getLimitedValue() >=
- TD->getTypeStoreSize(ElemTy)) {
+ DL->getTypeStoreSize(ElemTy)) {
Invariants.insert(GV);
DEBUG(dbgs() << "Found a global var that is an invariant: " << *GV
<< "\n");
@@ -2697,7 +2697,7 @@ bool Evaluator::EvaluateBlock(BasicBlock::iterator CurInst,
if (!CurInst->use_empty()) {
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(InstResult))
- InstResult = ConstantFoldConstantExpression(CE, TD, TLI);
+ InstResult = ConstantFoldConstantExpression(CE, DL, TLI);
setVal(CurInst, InstResult);
}
@@ -2780,10 +2780,10 @@ bool Evaluator::EvaluateFunction(Function *F, Constant *&RetVal,
/// EvaluateStaticConstructor - Evaluate static constructors in the function, if
/// we can. Return true if we can, false otherwise.
-static bool EvaluateStaticConstructor(Function *F, const DataLayout *TD,
+static bool EvaluateStaticConstructor(Function *F, const DataLayout *DL,
const TargetLibraryInfo *TLI) {
// Call the function.
- Evaluator Eval(TD, TLI);
+ Evaluator Eval(DL, TLI);
Constant *RetValDummy;
bool EvalSuccess = Eval.EvaluateFunction(F, RetValDummy,
SmallVector<Constant*, 0>());
@@ -2831,7 +2831,7 @@ bool GlobalOpt::OptimizeGlobalCtorsList(GlobalVariable *&GCL) {
if (F->empty()) continue;
// If we can evaluate the ctor at compile time, do.
- if (EvaluateStaticConstructor(F, TD, TLI)) {
+ if (EvaluateStaticConstructor(F, DL, TLI)) {
Ctors.erase(Ctors.begin()+i);
MadeChange = true;
--i;
@@ -3159,7 +3159,7 @@ bool GlobalOpt::OptimizeEmptyGlobalCXXDtors(Function *CXAAtExitFn) {
bool GlobalOpt::runOnModule(Module &M) {
bool Changed = false;
- TD = getAnalysisIfAvailable<DataLayout>();
+ DL = getAnalysisIfAvailable<DataLayout>();
TLI = &getAnalysis<TargetLibraryInfo>();
// Try to find the llvm.globalctors list.
diff --git a/lib/Transforms/IPO/MergeFunctions.cpp b/lib/Transforms/IPO/MergeFunctions.cpp
index 33f0707f29..d7d4cc914d 100644
--- a/lib/Transforms/IPO/MergeFunctions.cpp
+++ b/lib/Transforms/IPO/MergeFunctions.cpp
@@ -108,12 +108,12 @@ public:
static const ComparableFunction TombstoneKey;
static DataLayout * const LookupOnly;
- ComparableFunction(Function *Func, DataLayout *TD)
- : Func(Func), Hash(profileFunction(Func)), TD(TD) {}
+ ComparableFunction(Function *Func, DataLayout *DL)
+ : Func(Func), Hash(profileFunction(Func)), DL(DL) {}
Function *getFunc() const { return Func; }
unsigned getHash() const { return Hash; }
- DataLayout *getTD() const { return TD; }
+ DataLayout *getDataLayout() const { return DL; }
// Drops AssertingVH reference to the function. Outside of debug mode, this
// does nothing.
@@ -125,11 +125,11 @@ public:
private:
explicit ComparableFunction(unsigned Hash)
- : Func(NULL), Hash(Hash), TD(NULL) {}
+ : Func(NULL), Hash(Hash), DL(NULL) {}
AssertingVH<Function> Func;
unsigned Hash;
- DataLayout *TD;
+ DataLayout *DL;
};
const ComparableFunction ComparableFunction::EmptyKey = ComparableFunction(0);
@@ -164,9 +164,9 @@ namespace {
/// side of claiming that two functions are different).
class FunctionComparator {
public:
- FunctionComparator(const DataLayout *TD, const Function *F1,
+ FunctionComparator(const DataLayout *DL, const Function *F1,
const Function *F2)
- : F1(F1), F2(F2), TD(TD) {}
+ : F1(F1), F2(F2), DL(DL) {}
/// Test whether the two functions have equivalent behaviour.
bool compare();
@@ -199,7 +199,7 @@ private:
// The two functions undergoing comparison.
const Function *F1, *F2;
- const DataLayout *TD;
+ const DataLayout *DL;
DenseMap<const Value *, const Value *> id_map;
DenseSet<const Value *> seen_values;
@@ -214,9 +214,9 @@ bool FunctionComparator::isEquivalentType(Type *Ty1, Type *Ty2) const {
PointerType *PTy1 = dyn_cast<PointerType>(Ty1);
PointerType *PTy2 = dyn_cast<PointerType>(Ty2);
- if (TD) {
- if (PTy1 && PTy1->getAddressSpace() == 0) Ty1 = TD->getIntPtrType(Ty1);
- if (PTy2 && PTy2->getAddressSpace() == 0) Ty2 = TD->getIntPtrType(Ty2);
+ if (DL) {
+ if (PTy1 && PTy1->getAddressSpace() == 0) Ty1 = DL->getIntPtrType(Ty1);
+ if (PTy2 && PTy2->getAddressSpace() == 0) Ty2 = DL->getIntPtrType(Ty2);
}
if (Ty1 == Ty2)
@@ -359,13 +359,13 @@ bool FunctionComparator::isEquivalentGEP(const GEPOperator *GEP1,
if (AS != GEP2->getPointerAddressSpace())
return false;
- if (TD) {
+ if (DL) {
// When we have target data, we can reduce the GEP down to the value in bytes
// added to the address.
- unsigned BitWidth = TD ? TD->getPointerSizeInBits(AS) : 1;
+ unsigned BitWidth = DL ? DL->getPointerSizeInBits(AS) : 1;
APInt Offset1(BitWidth, 0), Offset2(BitWidth, 0);
- if (GEP1->accumulateConstantOffset(*TD, Offset1) &&
- GEP2->accumulateConstantOffset(*TD, Offset2)) {
+ if (GEP1->accumulateConstantOffset(*DL, Offset1) &&
+ GEP2->accumulateConstantOffset(*DL, Offset2)) {
return Offset1 == Offset2;
}
}
@@ -606,7 +606,7 @@ private:
FnSetType FnSet;
/// DataLayout for more accurate GEP comparisons. May be NULL.
- DataLayout *TD;
+ DataLayout *DL;
/// Whether or not the target supports global aliases.
bool HasGlobalAliases;
@@ -623,7 +623,7 @@ ModulePass *llvm::createMergeFunctionsPass() {
bool MergeFunctions::runOnModule(Module &M) {
bool Changed = false;
- TD = getAnalysisIfAvailable<DataLayout>();
+ DL = getAnalysisIfAvailable<DataLayout>();
for (Module::iterator I = M.begin(), E = M.end(); I != E; ++I) {
if (!I->isDeclaration() && !I->hasAvailableExternallyLinkage())
@@ -646,7 +646,7 @@ bool MergeFunctions::runOnModule(Module &M) {
Function *F = cast<Function>(*I);
if (!F->isDeclaration() && !F->hasAvailableExternallyLinkage() &&
!F->mayBeOverridden()) {
- ComparableFunction CF = ComparableFunction(F, TD);
+ ComparableFunction CF = ComparableFunction(F, DL);
Changed |= insert(CF);
}
}
@@ -661,7 +661,7 @@ bool MergeFunctions::runOnModule(Module &M) {
Function *F = cast<Function>(*I);
if (!F->isDeclaration() && !F->hasAvailableExternallyLinkage() &&
F->mayBeOverridden()) {
- ComparableFunction CF = ComparableFunction(F, TD);
+ ComparableFunction CF = ComparableFunction(F, DL);
Changed |= insert(CF);
}
}
@@ -682,14 +682,14 @@ bool DenseMapInfo<ComparableFunction>::isEqual(const ComparableFunction &LHS,
return false;
// One of these is a special "underlying pointer comparison only" object.
- if (LHS.getTD() == ComparableFunction::LookupOnly ||
- RHS.getTD() == ComparableFunction::LookupOnly)
+ if (LHS.getDataLayout() == ComparableFunction::LookupOnly ||
+ RHS.getDataLayout() == ComparableFunction::LookupOnly)
return false;
- assert(LHS.getTD() == RHS.getTD() &&
+ assert(LHS.getDataLayout() == RHS.getDataLayout() &&
"Comparing functions for different targets");
- return FunctionComparator(LHS.getTD(), LHS.getFunc(),
+ return FunctionComparator(LHS.getDataLayout(), LHS.getFunc(),
RHS.getFunc()).compare();
}
diff --git a/lib/Transforms/InstCombine/InstCombine.h b/lib/Transforms/InstCombine/InstCombine.h
index 4021f292d9..241db7a6a5 100644
--- a/lib/Transforms/InstCombine/InstCombine.h
+++ b/lib/Transforms/InstCombine/InstCombine.h
@@ -81,7 +81,7 @@ public:
class LLVM_LIBRARY_VISIBILITY InstCombiner
: public FunctionPass,
public InstVisitor<InstCombiner, Instruction*> {
- DataLayout *TD;
+ DataLayout *DL;
TargetLibraryInfo *TLI;
bool MadeIRChange;
LibCallSimplifier *Simplifier;
@@ -96,7 +96,7 @@ public:
BuilderTy *Builder;
static char ID; // Pass identification, replacement for typeid
- InstCombiner() : FunctionPass(ID), TD(0), Builder(0) {
+ InstCombiner() : FunctionPass(ID), DL(0), Builder(0) {
MinimizeSize = false;
initializeInstCombinerPass(*PassRegistry::getPassRegistry());
}
@@ -108,7 +108,7 @@ public:
virtual void getAnalysisUsage(AnalysisUsage &AU) const;
- DataLayout *getDataLayout() const { return TD; }
+ DataLayout *getDataLayout() const { return DL; }
TargetLibraryInfo *getTargetLibraryInfo() const { return TLI; }
@@ -234,7 +234,7 @@ private:
Type *Ty);
Instruction *visitCallSite(CallSite CS);
- Instruction *tryOptimizeCall(CallInst *CI, const DataLayout *TD);
+ Instruction *tryOptimizeCall(CallInst *CI, const DataLayout *DL);
bool transformConstExprCastCall(CallSite CS);
Instruction *transformCallThroughTrampoline(CallSite CS,
IntrinsicInst *Tramp);
@@ -311,15 +311,15 @@ public:
void ComputeMaskedBits(Value *V, APInt &KnownZero,
APInt &KnownOne, unsigned Depth = 0) const {
- return llvm::ComputeMaskedBits(V, KnownZero, KnownOne, TD, Depth);
+ return llvm::ComputeMaskedBits(V, KnownZero, KnownOne, DL, Depth);
}
bool MaskedValueIsZero(Value *V, const APInt &Mask,
unsigned Depth = 0) const {
- return llvm::MaskedValueIsZero(V, Mask, TD, Depth);
+ return llvm::MaskedValueIsZero(V, Mask, DL, Depth);
}
unsigned ComputeNumSignBits(Value *Op, unsigned Depth = 0) const {
- return llvm::ComputeNumSignBits(Op, TD, Depth);
+ return llvm::ComputeNumSignBits(Op, DL, Depth);
}
private:
diff --git a/lib/Transforms/InstCombine/InstCombineAddSub.cpp b/lib/Transforms/InstCombine/InstCombineAddSub.cpp
index c56a31ce35..e0c7b8e14f 100644
--- a/lib/Transforms/InstCombine/InstCombineAddSub.cpp
+++ b/lib/Transforms/InstCombine/InstCombineAddSub.cpp
@@ -919,7 +919,7 @@ Instruction *InstCombiner::visitAdd(BinaryOperator &I) {
Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
if (Value *V = SimplifyAddInst(LHS, RHS, I.hasNoSignedWrap(),
- I.hasNoUnsignedWrap(), TD))
+ I.hasNoUnsignedWrap(), DL))
return ReplaceInstUsesWith(I, V);
// (A*B)+(A*C) -> A*(B+C) etc
@@ -1193,7 +1193,7 @@ Instruction *InstCombiner::visitFAdd(BinaryOperator &I) {
bool Changed = SimplifyAssociativeOrCommutative(I);
Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
- if (Value *V = SimplifyFAddInst(LHS, RHS, I.getFastMathFlags(), TD))
+ if (Value *V = SimplifyFAddInst(LHS, RHS, I.getFastMathFlags(), DL))
return ReplaceInstUsesWith(I, V);
if (isa<Constant>(RHS)) {
@@ -1300,7 +1300,7 @@ Instruction *InstCombiner::visitFAdd(BinaryOperator &I) {
///
Value *InstCombiner::OptimizePointerDifference(Value *LHS, Value *RHS,
Type *Ty) {
- assert(TD && "Must have target data info for this");
+ assert(DL && "Must have target data info for this");
// If LHS is a gep based on RHS or RHS is a gep based on LHS, we can optimize
// this.
@@ -1369,7 +1369,7 @@ Instruction *InstCombiner::visitSub(BinaryOperator &I) {
Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
if (Value *V = SimplifySubInst(Op0, Op1, I.hasNoSignedWrap(),
- I.hasNoUnsignedWrap(), TD))
+ I.hasNoUnsignedWrap(), DL))
return ReplaceInstUsesWith(I, V);
// (A*B)-(A*C) -> A*(B-C) etc
@@ -1518,7 +1518,7 @@ Instruction *InstCombiner::visitSub(BinaryOperator &I) {
// Optimize pointer differences into the same array into a size. Consider:
// &A[10] - &A[0]: we should compile this to "10".
- if (TD) {
+ if (DL) {
Value *LHSOp, *RHSOp;
if (match(Op0, m_PtrToInt(m_Value(LHSOp))) &&
match(Op1, m_PtrToInt(m_Value(RHSOp))))
@@ -1538,7 +1538,7 @@ Instruction *InstCombiner::visitSub(BinaryOperator &I) {
Instruction *InstCombiner::visitFSub(BinaryOperator &I) {
Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
- if (Value *V = SimplifyFSubInst(Op0, Op1, I.getFastMathFlags(), TD))
+ if (Value *V = SimplifyFSubInst(Op0, Op1, I.getFastMathFlags(), DL))
return ReplaceInstUsesWith(I, V);
if (isa<Constant>(Op0))
diff --git a/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp b/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
index d768903f5f..424308651d 100644
--- a/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
+++ b/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
@@ -1104,7 +1104,7 @@ Instruction *InstCombiner::visitAnd(BinaryOperator &I) {
bool Changed = SimplifyAssociativeOrCommutative(I);
Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
- if (Value *V = SimplifyAndInst(Op0, Op1, TD))
+ if (Value *V = SimplifyAndInst(Op0, Op1, DL))
return ReplaceInstUsesWith(I, V);
// (A|B)&(A|C) -> A|(B&C) etc
@@ -1905,7 +1905,7 @@ Instruction *InstCombiner::visitOr(BinaryOperator &I) {
bool Changed = SimplifyAssociativeOrCommutative(I);
Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
- if (Value *V = SimplifyOrInst(Op0, Op1, TD))
+ if (Value *V = SimplifyOrInst(Op0, Op1, DL))
return ReplaceInstUsesWith(I, V);
// (A&B)|(A&C) -> A&(B|C) etc
@@ -2237,7 +2237,7 @@ Instruction *InstCombiner::visitXor(BinaryOperator &I) {
bool Changed = SimplifyAssociativeOrCommutative(I);
Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
- if (Value *V = SimplifyXorInst(Op0, Op1, TD))
+ if (Value *V = SimplifyXorInst(Op0, Op1, DL))
return ReplaceInstUsesWith(I, V);
// (A&B)^(A&C) -> A&(B^C) etc
diff --git a/lib/Transforms/InstCombine/InstCombineCalls.cpp b/lib/Transforms/InstCombine/InstCombineCalls.cpp
index fe8c1b0baf..f56e9f8e69 100644
--- a/lib/Transforms/InstCombine/InstCombineCalls.cpp
+++ b/lib/Transforms/InstCombine/InstCombineCalls.cpp
@@ -56,8 +56,8 @@ static Type *reduceToSingleValueType(Type *T) {
}
Instruction *InstCombiner::SimplifyMemTransfer(MemIntrinsic *MI) {
- unsigned DstAlign = getKnownAlignment(MI->getArgOperand(0), TD);
- unsigned SrcAlign = getKnownAlignment(MI->getArgOperand(1), TD);
+ unsigned DstAlign = getKnownAlignment(MI->getArgOperand(0), DL);
+ unsigned SrcAlign = getKnownAlignment(MI->getArgOperand(1), DL);
unsigned MinAlign = std::min(DstAlign, SrcAlign);
unsigned CopyAlign = MI->getAlignment();
@@ -103,7 +103,7 @@ Instruction *InstCombiner::SimplifyMemTransfer(MemIntrinsic *MI) {
if (StrippedDest != MI->getArgOperand(0)) {
Type *SrcETy = cast<PointerType>(StrippedDest->getType())
->getElementType();
- if (TD && SrcETy->isSized() && TD->getTypeStoreSize(SrcETy) == Size) {
+ if (DL && SrcETy->isSized() && DL->getTypeStoreSize(SrcETy) == Size) {
// The SrcETy might be something like {{{double}}} or [1 x double]. Rip
// down through these levels if so.
SrcETy = reduceToSingleValueType(SrcETy);
@@ -152,7 +152,7 @@ Instruction *InstCombiner::SimplifyMemTransfer(MemIntrinsic *MI) {
}
Instruction *InstCombiner::SimplifyMemSet(MemSetInst *MI) {
- unsigned Alignment = getKnownAlignment(MI->getDest(), TD);
+ unsigned Alignment = getKnownAlignment(MI->getDest(), DL);
if (MI->getAlignment() < Alignment) {
MI->setAlignment(ConstantInt::get(MI->getAlignmentType(),
Alignment, false));
@@ -274,7 +274,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
default: break;
case Intrinsic::objectsize: {
uint64_t Size;
- if (getObjectSize(II->getArgOperand(0), Size, TD, TLI))
+ if (getObjectSize(II->getArgOperand(0), Size, DL, TLI))
return ReplaceInstUsesWith(CI, ConstantInt::get(CI.getType(), Size));
return 0;
}
@@ -504,7 +504,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
case Intrinsic::ppc_altivec_lvx:
case Intrinsic::ppc_altivec_lvxl:
// Turn PPC lvx -> load if the pointer is known aligned.
- if (getOrEnforceKnownAlignment(II->getArgOperand(0), 16, TD) >= 16) {
+ if (getOrEnforceKnownAlignment(II->getArgOperand(0), 16, DL) >= 16) {
Value *Ptr = Builder->CreateBitCast(II->getArgOperand(0),
PointerType::getUnqual(II->getType()));
return new LoadInst(Ptr);
@@ -513,7 +513,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
case Intrinsic::ppc_altivec_stvx:
case Intrinsic::ppc_altivec_stvxl:
// Turn stvx -> store if the pointer is known aligned.
- if (getOrEnforceKnownAlignment(II->getArgOperand(1), 16, TD) >= 16) {
+ if (getOrEnforceKnownAlignment(II->getArgOperand(1), 16, DL) >= 16) {
Type *OpPtrTy =
PointerType::getUnqual(II->getArgOperand(0)->getType());
Value *Ptr = Builder->CreateBitCast(II->getArgOperand(1), OpPtrTy);
@@ -524,7 +524,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
case Intrinsic::x86_sse2_storeu_pd:
case Intrinsic::x86_sse2_storeu_dq:
// Turn X86 storeu -> store if the pointer is known aligned.
- if (getOrEnforceKnownAlignment(II->getArgOperand(0), 16, TD) >= 16) {
+ if (getOrEnforceKnownAlignment(II->getArgOperand(0), 16, DL) >= 16) {
Type *OpPtrTy =
PointerType::getUnqual(II->getArgOperand(1)->getType());
Value *Ptr = Builder->CreateBitCast(II->getArgOperand(0), OpPtrTy);
@@ -641,7 +641,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
case Intrinsic::arm_neon_vst2lane:
case Intrinsic::arm_neon_vst3lane:
case Intrinsic::arm_neon_vst4lane: {
- unsigned MemAlign = getKnownAlignment(II->getArgOperand(0), TD);
+ unsigned MemAlign = getKnownAlignment(II->getArgOperand(0), DL);
unsigned AlignArg = II->getNumArgOperands() - 1;
ConstantInt *IntrAlign = dyn_cast<ConstantInt>(II->getArgOperand(AlignArg));
if (IntrAlign && IntrAlign->getZExtValue() < MemAlign) {
@@ -747,7 +747,7 @@ Instruction *InstCombiner::visitInvokeInst(InvokeInst &II) {
/// passed through the varargs area, we can eliminate the use of the cast.
static bool isSafeToEliminateVarargsCast(const CallSite CS,
const CastInst * const CI,
- const DataLayout * const TD,
+ const DataLayout * const DL,
const int ix) {
if (!CI->isLosslessCast())
return false;
@@ -763,7 +763,7 @@ static bool isSafeToEliminateVarargsCast(const CallSite CS,
Type* DstTy = cast<PointerType>(CI->getType())->getElementType();
if (!SrcTy->isSized() || !DstTy->isSized())
return false;
- if (!TD || TD->getTypeAllocSize(SrcTy) != TD->getTypeAllocSize(DstTy))
+ if (!DL || DL->getTypeAllocSize(SrcTy) != DL->getTypeAllocSize(DstTy))
return false;
return true;
}
@@ -772,7 +772,7 @@ static bool isSafeToEliminateVarargsCast(const CallSite CS,
// Currently we're only working with the checking functions, memcpy_chk,
// mempcpy_chk, memmove_chk, memset_chk, strcpy_chk, stpcpy_chk, strncpy_chk,
// strcat_chk and strncat_chk.
-Instruction *InstCombiner::tryOptimizeCall(CallInst *CI, const DataLayout *TD) {
+Instruction *InstCombiner::tryOptimizeCall(CallInst *CI, const DataLayout *DL) {
if (CI->getCalledFunction() == 0) return 0;
if (Value *With = Simplifier->optimizeCall(CI)) {
@@ -934,7 +934,7 @@ Instruction *InstCombiner::visitCallSite(CallSite CS) {
for (CallSite::arg_iterator I = CS.arg_begin() + FTy->getNumParams(),
E = CS.arg_end(); I != E; ++I, ++ix) {
CastInst *CI = dyn_cast<CastInst>(*I);
- if (CI && isSafeToEliminateVarargsCast(CS, CI, TD, ix)) {
+ if (CI && isSafeToEliminateVarargsCast(CS, CI, DL, ix)) {
*I = CI->getOperand(0);
Changed = true;
}
@@ -951,7 +951,7 @@ Instruction *InstCombiner::visitCallSite(CallSite CS) {
// this. None of these calls are seen as possibly dead so go ahead and
// delete the instruction now.
if (CallInst *CI = dyn_cast<CallInst>(CS.getInstruction())) {
- Instruction *I = tryOptimizeCall(CI, TD);
+ Instruction *I = tryOptimizeCall(CI, DL);
// If we changed something return the result, etc. Otherwise let
// the fallthrough check.
if (I) return EraseInstFromFunction(*I);
@@ -1043,12 +1043,12 @@ bool InstCombiner::transformConstExprCastCall(CallSite CS) {
CallerPAL.getParamAttributes(i + 1).hasAttribute(i + 1,
Attribute::ByVal)) {
PointerType *ParamPTy = dyn_cast<PointerType>(ParamTy);
- if (ParamPTy == 0 || !ParamPTy->getElementType()->isSized() || TD == 0)
+ if (ParamPTy == 0 || !ParamPTy->getElementType()->isSized() || DL == 0)
return false;
Type *CurElTy = ActTy->getPointerElementType();
- if (TD->getTypeAllocSize(CurElTy) !=
- TD->getTypeAllocSize(ParamPTy->getElementType()))
+ if (DL->getTypeAllocSize(CurElTy) !=
+ DL->getTypeAllocSize(ParamPTy->getElementType()))
return false;
}
}
diff --git a/lib/Transforms/InstCombine/InstCombineCasts.cpp b/lib/Transforms/InstCombine/InstCombineCasts.cpp
index cccfd4d49e..5c1d1b136b 100644
--- a/lib/Transforms/InstCombine/InstCombineCasts.cpp
+++ b/lib/Transforms/InstCombine/InstCombineCasts.cpp
@@ -79,7 +79,7 @@ static Value *DecomposeSimpleLinearExpr(Value *Val, unsigned &Scale,
Instruction *InstCombiner::PromoteCastOfAllocation(BitCastInst &CI,
AllocaInst &AI) {
// This requires DataLayout to get the alloca alignment and size information.
- if (!TD) return 0;
+ if (!DL) return 0;
PointerType *PTy = cast<PointerType>(CI.getType());
@@ -91,8 +91,8 @@ Instruction *InstCombiner::PromoteCastOfAllocation(BitCastInst &CI,
Type *CastElTy = PTy->getElementType();
if (!AllocElTy->isSized() || !CastElTy->isSized()) return 0;
- unsigned AllocElTyAlign = TD->getABITypeAlignment(AllocElTy);
- unsigned CastElTyAlign = TD->getABITypeAlignment(CastElTy);
+ unsigned AllocElTyAlign = DL->getABITypeAlignment(AllocElTy);
+ unsigned CastElTyAlign = DL->getABITypeAlignment(CastElTy);
if (CastElTyAlign < AllocElTyAlign) return 0;
// If the allocation has multiple uses, only promote it if we are strictly
@@ -100,14 +100,14 @@ Instruction *InstCombiner::PromoteCastOfAllocation(BitCastInst &CI,
// same, we open the door to infinite loops of various kinds.
if (!AI.hasOneUse() && CastElTyAlign == AllocElTyAlign) return 0;
- uint64_t AllocElTySize = TD->getTypeAllocSize(AllocElTy);
- uint64_t CastElTySize = TD->getTypeAllocSize(CastElTy);
+ uint64_t AllocElTySize = DL->getTypeAllocSize(AllocElTy);
+ uint64_t CastElTySize = DL->getTypeAllocSize(CastElTy);
if (CastElTySize == 0 || AllocElTySize == 0) return 0;
// If the allocation has multiple uses, only promote it if we're not
// shrinking the amount of memory being allocated.
- uint64_t AllocElTyStoreSize = TD->getTypeStoreSize(AllocElTy);
- uint64_t CastElTyStoreSize = TD->getTypeStoreSize(CastElTy);
+ uint64_t AllocElTyStoreSize = DL->getTypeStoreSize(AllocElTy);
+ uint64_t CastElTyStoreSize = DL->getTypeStoreSize(CastElTy);
if (!AI.hasOneUse() && CastElTyStoreSize < AllocElTyStoreSize) return 0;
// See if we can satisfy the modulus by pulling a scale out of the array
@@ -161,9 +161,9 @@ Value *InstCombiner::EvaluateInDifferentType(Value *V, Type *Ty,
bool isSigned) {
if (Constant *C = dyn_cast<Constant>(V)) {
C = ConstantExpr::getIntegerCast(C, Ty, isSigned /*Sext or ZExt*/);
- // If we got a constantexpr back, try to simplify it with TD info.
+ // If we got a constantexpr back, try to simplify it with DL info.
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))
- C = ConstantFoldConstantExpression(CE, TD, TLI);
+ C = ConstantFoldConstantExpression(CE, DL, TLI);
return C;
}
@@ -235,7 +235,7 @@ isEliminableCastPair(
const CastInst *CI, ///< The first cast instruction
unsigned opcode, ///< The opcode of the second cast instruction
Type *DstTy, ///< The target type for the second cast instruction
- DataLayout *TD ///< The target data for pointer size
+ DataLayout *DL ///< The target data for pointer size
) {
Type *SrcTy = CI->getOperand(0)->getType(); // A from above
@@ -244,12 +244,12 @@ isEliminableCastPair(
// Get the opcodes of the two Cast instructions
Instruction::CastOps firstOp = Instruction::CastOps(CI->getOpcode());
Instruction::CastOps secondOp = Instruction::CastOps(opcode);
- Type *SrcIntPtrTy = TD && SrcTy->isPtrOrPtrVectorTy() ?
- TD->getIntPtrType(SrcTy) : 0;
- Type *MidIntPtrTy = TD && MidTy->isPtrOrPtrVectorTy() ?
- TD->getIntPtrType(MidTy) : 0;
- Type *DstIntPtrTy = TD && DstTy->isPtrOrPtrVectorTy() ?
- TD->getIntPtrType(DstTy) : 0;
+ Type *SrcIntPtrTy = DL && SrcTy->isPtrOrPtrVectorTy() ?
+ DL->getIntPtrType(SrcTy) : 0;
+ Type *MidIntPtrTy = DL && MidTy->isPtrOrPtrVectorTy() ?
+ DL->getIntPtrType(MidTy) : 0;
+ Type *DstIntPtrTy = DL && DstTy->isPtrOrPtrVectorTy() ?
+ DL->getIntPtrType(DstTy) : 0;
unsigned Res = CastInst::isEliminableCastPair(firstOp, secondOp, SrcTy, MidTy,
DstTy, SrcIntPtrTy, MidIntPtrTy,
DstIntPtrTy);
@@ -275,7 +275,7 @@ bool InstCombiner::ShouldOptimizeCast(Instruction::CastOps opc, const Value *V,
// If this is another cast that can be eliminated, we prefer to have it
// eliminated.
if (const CastInst *CI = dyn_cast<CastInst>(V))
- if (isEliminableCastPair(CI, opc, Ty, TD))
+ if (isEliminableCastPair(CI, opc, Ty, DL))
return false;
// If this is a vector sext from a compare, then we don't want to break the
@@ -295,7 +295,7 @@ Instruction *InstCombiner::commonCastTransforms(CastInst &CI) {
// eliminate it now.
if (CastInst *CSrc = dyn_cast<CastInst>(Src)) { // A->B->C cast
if (Instruction::CastOps opc =
- isEliminableCastPair(CSrc, CI.getOpcode(), CI.getType(), TD)) {
+ isEliminableCastPair(CSrc, CI.getOpcode(), CI.getType(), DL)) {
// The first cast (CSrc) is eliminable so we need to fix up or replace
// the second cast (CI). CSrc will then have a good chance of being dead.
return CastInst::Create(opc, CSrc->getOperand(0), CI.getType());
@@ -1405,11 +1405,11 @@ Instruction *InstCombiner::visitIntToPtr(IntToPtrInst &CI) {
// trunc or zext to the intptr_t type, then inttoptr of it. This allows the
// cast to be exposed to other transforms.
- if (TD) {
+ if (DL) {
unsigned AS = CI.getAddressSpace();
if (CI.getOperand(0)->getType()->getScalarSizeInBits() !=
- TD->getPointerSizeInBits(AS)) {
- Type *Ty = TD->getIntPtrType(CI.getContext(), AS);
+ DL->getPointerSizeInBits(AS)) {
+ Type *Ty = DL->getIntPtrType(CI.getContext(), AS);
if (CI.getType()->isVectorTy()) // Handle vectors of pointers.
Ty = VectorType::get(Ty, CI.getType()->getVectorNumElements());
@@ -1440,7 +1440,7 @@ Instruction *InstCombiner::commonPointerCastTransforms(CastInst &CI) {
return &CI;
}
- if (!TD)
+ if (!DL)
return commonCastTransforms(CI);
// If the GEP has a single use, and the base pointer is a bitcast, and the
@@ -1448,12 +1448,12 @@ Instruction *InstCombiner::commonPointerCastTransforms(CastInst &CI) {
// instructions into fewer. This typically happens with unions and other
// non-type-safe code.
unsigned AS = GEP->getPointerAddressSpace();
- unsigned OffsetBits = TD->getPointerSizeInBits(AS);
+ unsigned OffsetBits = DL->getPointerSizeInBits(AS);
APInt Offset(OffsetBits, 0);
BitCastInst *BCI = dyn_cast<BitCastInst>(GEP->getOperand(0));
if (GEP->hasOneUse() &&
BCI &&
- GEP->accumulateConstantOffset(*TD, Offset)) {
+ GEP->accumulateConstantOffset(*DL, Offset)) {
// Get the base pointer input of the bitcast, and the type it points to.
Value *OrigBase = BCI->getOperand(0);
SmallVector<Value*, 8> NewIndices;
@@ -1484,16 +1484,16 @@ Instruction *InstCombiner::visitPtrToInt(PtrToIntInst &CI) {
// do a ptrtoint to intptr_t then do a trunc or zext. This allows the cast
// to be exposed to other transforms.
- if (!TD)
+ if (!DL)
return commonPointerCastTransforms(CI);
Type *Ty = CI.getType();
unsigned AS = CI.getPointerAddressSpace();
- if (Ty->getScalarSizeInBits() == TD->getPointerSizeInBits(AS))
+ if (Ty->getScalarSizeInBits() == DL->getPointerSizeInBits(AS))
return commonPointerCastTransforms(CI);
- Type *PtrTy = TD->getIntPtrType(CI.getContext(), AS);
+ Type *PtrTy = DL->getIntPtrType(CI.getContext(), AS);
if (Ty->isVectorTy()) // Handle vectors of pointers.
PtrTy = VectorType::get(PtrTy, Ty->getVectorNumElements());
diff --git a/lib/Transforms/InstCombine/InstCombineCompares.cpp b/lib/Transforms/InstCombine/InstCombineCompares.cpp
index 281ff4050c..4ad58c40e4 100644
--- a/lib/Transforms/InstCombine/InstCombineCompares.cpp
+++ b/lib/Transforms/InstCombine/InstCombineCompares.cpp
@@ -218,7 +218,7 @@ Instruction *InstCombiner::
FoldCmpLoadFromIndexedGlobal(GetElementPtrInst *GEP, GlobalVariable *GV,
CmpInst &ICI, ConstantInt *AndCst) {
// We need TD information to know the pointer size unless this is inbounds.
- if (!GEP->isInBounds() && TD == 0)
+ if (!GEP->isInBounds() && DL == 0)
return 0;
Constant *Init = GV->getInitializer();
@@ -307,7 +307,7 @@ FoldCmpLoadFromIndexedGlobal(GetElementPtrInst *GEP, GlobalVariable *GV,
// Find out if the comparison would be true or false for the i'th element.
Constant *C = ConstantFoldCompareInstOperands(ICI.getPredicate(), Elt,
- CompareRHS, TD, TLI);
+ CompareRHS, DL, TLI);
// If the result is undef for this element, ignore it.
if (isa<UndefValue>(C)) {
// Extend range state machines to cover this element in case there is an
@@ -386,7 +386,7 @@ FoldCmpLoadFromIndexedGlobal(GetElementPtrInst *GEP, GlobalVariable *GV,
// index down like the GEP would do implicitly. We don't have to do this for
// an inbounds GEP because the index can't be out of range.
if (!GEP->isInBounds()) {
- Type *IntPtrTy = TD->getIntPtrType(GEP->getType());
+ Type *IntPtrTy = DL->getIntPtrType(GEP->getType());
unsigned PtrSize = IntPtrTy->getIntegerBitWidth();
if (Idx->getType()->getPrimitiveSizeInBits() > PtrSize)
Idx = Builder->CreateTrunc(Idx, IntPtrTy);
@@ -475,8 +475,8 @@ FoldCmpLoadFromIndexedGlobal(GetElementPtrInst *GEP, GlobalVariable *GV,
// - Default to i32
if (ArrayElementCount <= Idx->getType()->getIntegerBitWidth())
Ty = Idx->getType();
- else if (TD)
- Ty = TD->getSmallestLegalIntType(Init->getContext(), ArrayElementCount);
+ else if (DL)
+ Ty = DL->getSmallestLegalIntType(Init->getContext(), ArrayElementCount);
else if (ArrayElementCount <= 32)
Ty = Type::getInt32Ty(Init->getContext());
@@ -503,7 +503,7 @@ FoldCmpLoadFromIndexedGlobal(GetElementPtrInst *GEP, GlobalVariable *GV,
/// If we can't emit an optimized form for this expression, this returns null.
///
static Value *EvaluateGEPOffsetExpression(User *GEP, InstCombiner &IC) {
- DataLayout &TD = *IC.getDataLayout();
+ DataLayout &DL = *IC.getDataLayout();
gep_type_iterator GTI = gep_type_begin(GEP);
// Check to see if this gep only has a single variable index. If so, and if
@@ -520,9 +520,9 @@ static Value *EvaluateGEPOffsetExpression(User *GEP, InstCombiner &IC) {
// Handle a struct index, which adds its field offset to the pointer.
if (StructType *STy = dyn_cast<StructType>(*GTI)) {
- Offset += TD.getStructLayout(STy)->getElementOffset(CI->getZExtValue());
+ Offset += DL.getStructLayout(STy)->getElementOffset(CI->getZExtValue());
} else {
- uint64_t Size = TD.getTypeAllocSize(GTI.getIndexedType());
+ uint64_t Size = DL.getTypeAllocSize(GTI.getIndexedType());
Offset += Size*CI->getSExtValue();
}
} else {
@@ -538,7 +538,7 @@ static Value *EvaluateGEPOffsetExpression(User *GEP, InstCombiner &IC) {
Value *VariableIdx = GEP->getOperand(i);
// Determine the scale factor of the variable element. For example, this is
// 4 if the variable index is into an array of i32.
- uint64_t VariableScale = TD.getTypeAllocSize(GTI.getIndexedType());
+ uint64_t VariableScale = DL.getTypeAllocSize(GTI.getIndexedType());
// Verify that there are no other variable indices. If so, emit the hard way.
for (++i, ++GTI; i != e; ++i, ++GTI) {
@@ -550,9 +550,9 @@ static Value *EvaluateGEPOffsetExpression(User *GEP, InstCombiner &IC) {
// Handle a struct index, which adds its field offset to the pointer.
if (StructType *STy = dyn_cast<StructType>(*GTI)) {
- Offset += TD.getStructLayout(STy)->getElementOffset(CI->getZExtValue());
+ Offset += DL.getStructLayout(STy)->getElementOffset(CI->getZExtValue());
} else {
- uint64_t Size = TD.getTypeAllocSize(GTI.getIndexedType());
+ uint64_t Size = DL.getTypeAllocSize(GTI.getIndexedType());
Offset += Size*CI->getSExtValue();
}
}
@@ -562,7 +562,7 @@ static Value *EvaluateGEPOffsetExpression(User *GEP, InstCombiner &IC) {
// Okay, we know we have a single variable index, which must be a
// pointer/array/vector index. If there is no offset, life is simple, return
// the index.
- Type *IntPtrTy = TD.getIntPtrType(GEP->getOperand(0)->getType());
+ Type *IntPtrTy = DL.getIntPtrType(GEP->getOperand(0)->getType());
unsigned IntPtrWidth = IntPtrTy->getIntegerBitWidth();
if (Offset == 0) {
// Cast to intptrty in case a truncation occurs. If an extension is needed,
@@ -615,7 +615,7 @@ Instruction *InstCombiner::FoldGEPICmp(GEPOperator *GEPLHS, Value *RHS,
RHS = BCI->getOperand(0);
Value *PtrBase = GEPLHS->getOperand(0);
- if (TD && PtrBase == RHS && GEPLHS->isInBounds()) {
+ if (DL && PtrBase == RHS && GEPLHS->isInBounds()) {
// ((gep Ptr, OFFSET) cmp Ptr) ---> (OFFSET cmp 0).
// This transformation (ignoring the base and scales) is valid because we
// know pointers can't overflow since the gep is inbounds. See if we can
@@ -648,7 +648,7 @@ Instruction *InstCombiner::FoldGEPICmp(GEPOperator *GEPLHS, Value *RHS,
// If we're comparing GEPs with two base pointers that only differ in type
// and both GEPs have only constant indices or just one use, then fold
// the compare with the adjusted indices.
- if (TD && GEPLHS->isInBounds() && GEPRHS->isInBounds() &&
+ if (DL && GEPLHS->isInBounds() && GEPRHS->isInBounds() &&
(GEPLHS->hasAllConstantIndices() || GEPLHS->hasOneUse()) &&
(GEPRHS->hasAllConstantIndices() || GEPRHS->hasOneUse()) &&
PtrBase->stripPointerCasts() ==
@@ -719,7 +719,7 @@ Instruction *InstCombiner::FoldGEPICmp(GEPOperator *GEPLHS, Value *RHS,
// Only lower this if the icmp is the only user of the GEP or if we expect
// the result to fold to a constant!
- if (TD &&
+ if (DL &&
GEPsInBounds &&
(isa<ConstantExpr>(GEPLHS) || GEPLHS->hasOneUse()) &&
(isa<ConstantExpr>(GEPRHS) || GEPRHS->hasOneUse())) {
@@ -1792,8 +1792,8 @@ Instruction *InstCombiner::visitICmpInstWithCastAndCast(ICmpInst &ICI) {
// Turn icmp (ptrtoint x), (ptrtoint/c) into a compare of the input if the
// integer type is the same size as the pointer type.
- if (TD && LHSCI->getOpcode() == Instruction::PtrToInt &&
- TD->getPointerTypeSizeInBits(SrcTy) == DestTy->getIntegerBitWidth()) {
+ if (DL && LHSCI->getOpcode() == Instruction::PtrToInt &&
+ DL->getPointerTypeSizeInBits(SrcTy) == DestTy->getIntegerBitWidth()) {
Value *RHSOp = 0;
if (Constant *RHSC = dyn_cast<Constant>(ICI.getOperand(1))) {
RHSOp = ConstantExpr::getIntToPtr(RHSC, SrcTy);
@@ -2104,7 +2104,7 @@ Instruction *InstCombiner::visitICmpInst(ICmpInst &I) {
Changed = true;
}
- if (Value *V = SimplifyICmpInst(I.getPredicate(), Op0, Op1, TD))
+ if (Value *V = SimplifyICmpInst(I.getPredicate(), Op0, Op1, DL))
return ReplaceInstUsesWith(I, V);
// comparing -val or val with non-zero is the same as just comparing val
@@ -2172,8 +2172,8 @@ Instruction *InstCombiner::visitICmpInst(ICmpInst &I) {
unsigned BitWidth = 0;
if (Ty->isIntOrIntVectorTy())
BitWidth = Ty->getScalarSizeInBits();
- else if (TD) // Pointers require TD info to get their size.
- BitWidth = TD->getTypeSizeInBits(Ty->getScalarType());
+ else if (DL) // Pointers require DL info to get their size.
+ BitWidth = DL->getTypeSizeInBits(Ty->getScalarType());
bool isSignBit = false;
@@ -2532,8 +2532,8 @@ Instruction *InstCombiner::visitICmpInst(ICmpInst &I) {
}
case Instruction::IntToPtr:
// icmp pred inttoptr(X), null -> icmp pred X, 0
- if (RHSC->isNullValue() && TD &&
- TD->getIntPtrType(RHSC->getType()) ==
+ if (RHSC->isNullValue() && DL &&
+ DL->getIntPtrType(RHSC->getType()) ==
LHSI->getOperand(0)->getType())
return new ICmpInst(I.getPredicate(), LHSI->getOperand(0),
Constant::getNullValue(LHSI->getOperand(0)->getType()));
@@ -3229,7 +3229,7 @@ Instruction *InstCombiner::visitFCmpInst(FCmpInst &I) {
Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
- if (Value *V = SimplifyFCmpInst(I.getPredicate(), Op0, Op1, TD))
+ if (Value *V = SimplifyFCmpInst(I.getPredicate(), Op0, Op1, DL))
return ReplaceInstUsesWith(I, V);
// Simplify 'fcmp pred X, X'
diff --git a/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp b/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
index 1f69176817..90cb7a96e0 100644
--- a/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
+++ b/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
@@ -157,8 +157,8 @@ isOnlyCopiedFromConstantGlobal(AllocaInst *AI,
Instruction *InstCombiner::visitAllocaInst(AllocaInst &AI) {
// Ensure that the alloca array size argument has type intptr_t, so that
// any casting is exposed early.
- if (TD) {
- Type *IntPtrTy = TD->getIntPtrType(AI.getType());
+ if (DL) {
+ Type *IntPtrTy = DL->getIntPtrType(AI.getType());
if (AI.getArraySize()->getType() != IntPtrTy) {
Value *V = Builder->CreateIntCast(AI.getArraySize(),
IntPtrTy, false);
@@ -184,8 +184,8 @@ Instruction *InstCombiner::visitAllocaInst(AllocaInst &AI) {
// Now that I is pointing to the first non-allocation-inst in the block,
// insert our getelementptr instruction...
//
- Type *IdxTy = TD
- ? TD->getIntPtrType(AI.getType())
+ Type *IdxTy = DL
+ ? DL->getIntPtrType(AI.getType())
: Type::getInt64Ty(AI.getContext());
Value *NullIdx = Constant::getNullValue(IdxTy);
Value *Idx[2] = { NullIdx, NullIdx };
@@ -201,15 +201,15 @@ Instruction *InstCombiner::visitAllocaInst(AllocaInst &AI) {
}
}
- if (TD && AI.getAllocatedType()->isSized()) {
+ if (DL && AI.getAllocatedType()->isSized()) {
// If the alignment is 0 (unspecified), assign it the preferred alignment.
if (AI.getAlignment() == 0)
- AI.setAlignment(TD->getPrefTypeAlignment(AI.getAllocatedType()));
+ AI.setAlignment(DL->getPrefTypeAlignment(AI.getAllocatedType()));
// Move all alloca's of zero byte objects to the entry block and merge them
// together. Note that we only do this for alloca's, because malloc should
// allocate and return a unique pointer, even for a zero byte allocation.
- if (TD->getTypeAllocSize(AI.getAllocatedType()) == 0) {
+ if (DL->getTypeAllocSize(AI.getAllocatedType()) == 0) {
// For a zero sized alloca there is no point in doing an array allocation.
// This is helpful if the array size is a complicated expression not used
// elsewhere.
@@ -227,7 +227,7 @@ Instruction *InstCombiner::visitAllocaInst(AllocaInst &AI) {
// dominance as the array size was forced to a constant earlier already.
AllocaInst *EntryAI = dyn_cast<AllocaInst>(FirstInst);
if (!EntryAI || !EntryAI->getAllocatedType()->isSized() ||
- TD->getTypeAllocSize(EntryAI->getAllocatedType()) != 0) {
+ DL->getTypeAllocSize(EntryAI->getAllocatedType()) != 0) {
AI.moveBefore(FirstInst);
return &AI;
}
@@ -236,7 +236,7 @@ Instruction *InstCombiner::visitAllocaInst(AllocaInst &AI) {
// assign it the preferred alignment.
if (EntryAI->getAlignment() == 0)
EntryAI->setAlignment(
- TD->getPrefTypeAlignment(EntryAI->getAllocatedType()));
+ DL->getPrefTypeAlignment(EntryAI->getAllocatedType()));
// Replace this zero-sized alloca with the one at the start of the entry
// block after ensuring that the address will be aligned enough for both
// types.
@@ -260,7 +260,7 @@ Instruction *InstCombiner::visitAllocaInst(AllocaInst &AI) {
SmallVector<Instruction *, 4> ToDelete;
if (MemTransferInst *Copy = isOnlyCopiedFromConstantGlobal(&AI, ToDelete)) {
unsigned SourceAlign = getOrEnforceKnownAlignment(Copy->getSource(),
- AI.getAlignment(), TD);
+ AI.getAlignment(), DL);
if (AI.getAlignment() <= SourceAlign) {
DEBUG(dbgs() << "Found alloca equal to global: " << AI << '\n');
DEBUG(dbgs() << " memcpy = " << *Copy << '\n');
@@ -285,7 +285,7 @@ Instruction *InstCombiner::visitAllocaInst(AllocaInst &AI) {
/// InstCombineLoadCast - Fold 'load (cast P)' -> cast (load P)' when possible.
static Instruction *InstCombineLoadCast(InstCombiner &IC, LoadInst &LI,
- const DataLayout *TD) {
+ const DataLayout *DL) {
User *CI = cast<User>(LI.getOperand(0));
Value *CastOp = CI->getOperand(0);
@@ -307,8 +307,8 @@ static Instruction *InstCombineLoadCast(InstCombiner &IC, LoadInst &LI,
if (ArrayType *ASrcTy = dyn_cast<ArrayType>(SrcPTy))
if (Constant *CSrc = dyn_cast<Constant>(CastOp))
if (ASrcTy->getNumElements() != 0) {
- Type *IdxTy = TD
- ? TD->getIntPtrType(SrcTy)
+ Type *IdxTy = DL
+ ? DL->getIntPtrType(SrcTy)
: Type::getInt64Ty(SrcTy->getContext());
Value *Idx = Constant::getNullValue(IdxTy);
Value *Idxs[2] = { Idx, Idx };
@@ -346,12 +346,12 @@ Instruction *InstCombiner::visitLoadInst(LoadInst &LI) {
Value *Op = LI.getOperand(0);
// Attempt to improve the alignment.
- if (TD) {
+ if (DL) {
unsigned KnownAlign =
- getOrEnforceKnownAlignment(Op, TD->getPrefTypeAlignment(LI.getType()),TD);
+ getOrEnforceKnownAlignment(Op, DL->getPrefTypeAlignment(LI.getType()),DL);
unsigned LoadAlign = LI.getAlignment();
unsigned EffectiveLoadAlign = LoadAlign != 0 ? LoadAlign :
- TD->getABITypeAlignment(LI.getType());
+ DL->getABITypeAlignment(LI.getType());
if (KnownAlign > EffectiveLoadAlign)
LI.setAlignment(KnownAlign);
@@ -361,7 +361,7 @@ Instruction *InstCombiner::visitLoadInst(LoadInst &LI) {
// load (cast X) --> cast (load X) iff safe.
if (isa<CastInst>(Op))
- if (Instruction *Res = InstCombineLoadCast(*this, LI, TD))
+ if (Instruction *Res = InstCombineLoadCast(*this, LI, DL))
return Res;
// None of the following transforms are legal for volatile/atomic loads.
@@ -405,7 +405,7 @@ Instruction *InstCombiner::visitLoadInst(LoadInst &LI) {
// Instcombine load (constantexpr_cast global) -> cast (load global)
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Op))
if (CE->isCast())
- if (Instruction *Res = InstCombineLoadCast(*this, LI, TD))
+ if (Instruction *Res = InstCombineLoadCast(*this, LI, DL))
return Res;
if (Op->hasOneUse()) {
@@ -422,8 +422,8 @@ Instruction *InstCombiner::visitLoadInst(LoadInst &LI) {
if (SelectInst *SI = dyn_cast<SelectInst>(Op)) {
// load (select (Cond, &V1, &V2)) --> select(Cond, load &V1, load &V2).
unsigned Align = LI.getAlignment();
- if (isSafeToLoadUnconditionally(SI->getOperand(1), SI, Align, TD) &&
- isSafeToLoadUnconditionally(SI->getOperand(2), SI, Align, TD)) {
+ if (isSafeToLoadUnconditionally(SI->getOperand(1), SI, Align, DL) &&
+ isSafeToLoadUnconditionally(SI->getOperand(2), SI, Align, DL)) {
LoadInst *V1 = Builder->CreateLoad(SI->getOperand(1),
SI->getOperand(1)->getName()+".val");
LoadInst *V2 = Builder->CreateLoad(SI->getOperand(2),
@@ -572,13 +572,13 @@ Instruction *InstCombiner::visitStoreInst(StoreInst &SI) {
Value *Ptr = SI.getOperand(1);
// Attempt to improve the alignment.
- if (TD) {
+ if (DL) {
unsigned KnownAlign =
- getOrEnforceKnownAlignment(Ptr, TD->getPrefTypeAlignment(Val->getType()),
- TD);
+ getOrEnforceKnownAlignment(Ptr, DL->getPrefTypeAlignment(Val->getType()),
+ DL);
unsigned StoreAlign = SI.getAlignment();
unsigned EffectiveStoreAlign = StoreAlign != 0 ? StoreAlign :
- TD->getABITypeAlignment(Val->getType());
+ DL->getABITypeAlignment(Val->getType());
if (KnownAlign > EffectiveStoreAlign)
SI.setAlignment(KnownAlign);
diff --git a/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp b/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp
index dd2089f3b7..bd4b6c3270 100644
--- a/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp
+++ b/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp
@@ -118,7 +118,7 @@ Instruction *InstCombiner::visitMul(BinaryOperator &I) {
bool Changed = SimplifyAssociativeOrCommutative(I);
Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
- if (Value *V = SimplifyMulInst(Op0, Op1, TD))
+ if (Value *V = SimplifyMulInst(Op0, Op1, DL))
return ReplaceInstUsesWith(I, V);
if (Value *V = SimplifyUsingDistributiveLaws(I))
@@ -429,7 +429,7 @@ Instruction *InstCombiner::visitFMul(BinaryOperator &I) {
if (isa<Constant>(Op0))
std::swap(Op0, Op1);
- if (Value *V = SimplifyFMulInst(Op0, Op1, I.getFastMathFlags(), TD))
+ if (Value *V = SimplifyFMulInst(Op0, Op1, I.getFastMathFlags(), DL))
return ReplaceInstUsesWith(I, V);
bool AllowReassociate = I.hasUnsafeAlgebra();
@@ -875,7 +875,7 @@ static size_t visitUDivOperand(Value *Op0, Value *Op1, const BinaryOperator &I,
Instruction *InstCombiner::visitUDiv(BinaryOperator &I) {
Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
- if (Value *V = SimplifyUDivInst(Op0, Op1, TD))
+ if (Value *V = SimplifyUDivInst(Op0, Op1, DL))
return ReplaceInstUsesWith(I, V);
// Handle the integer div common cases
@@ -934,7 +934,7 @@ Instruction *InstCombiner::visitUDiv(BinaryOperator &I) {
Instruction *InstCombiner::visitSDiv(BinaryOperator &I) {
Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
- if (Value *V = SimplifySDivInst(Op0, Op1, TD))
+ if (Value *V = SimplifySDivInst(Op0, Op1, DL))
return ReplaceInstUsesWith(I, V);
// Handle the integer div common cases
@@ -1020,7 +1020,7 @@ static Instruction *CvtFDivConstToReciprocal(Value *Dividend,
Instruction *InstCombiner::visitFDiv(BinaryOperator &I) {
Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
- if (Value *V = SimplifyFDivInst(Op0, Op1, TD))
+ if (Value *V = SimplifyFDivInst(Op0, Op1, DL))
return ReplaceInstUsesWith(I, V);
if (isa<Constant>(Op0))
@@ -1182,7 +1182,7 @@ Instruction *InstCombiner::commonIRemTransforms(BinaryOperator &I) {
Instruction *InstCombiner::visitURem(BinaryOperator &I) {
Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
- if (Value *V = SimplifyURemInst(Op0, Op1, TD))
+ if (Value *V = SimplifyURemInst(Op0, Op1, DL))
return ReplaceInstUsesWith(I, V);
if (Instruction *common = commonIRemTransforms(I))
@@ -1214,7 +1214,7 @@ Instruction *InstCombiner::visitURem(BinaryOperator &I) {
Instruction *InstCombiner::visitSRem(BinaryOperator &I) {
Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
- if (Value *V = SimplifySRemInst(Op0, Op1, TD))
+ if (Value *V = SimplifySRemInst(Op0, Op1, DL))
return ReplaceInstUsesWith(I, V);
// Handle the integer rem common cases
@@ -1285,7 +1285,7 @@ Instruction *InstCombiner::visitSRem(BinaryOperator &I) {
Instruction *InstCombiner::visitFRem(BinaryOperator &I) {
Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
- if (Value *V = SimplifyFRemInst(Op0, Op1, TD))
+ if (Value *V = SimplifyFRemInst(Op0, Op1, DL))
return ReplaceInstUsesWith(I, V);
// Handle cases involving: rem X, (select Cond, Y, Z)
diff --git a/lib/Transforms/InstCombine/InstCombinePHI.cpp b/lib/Transforms/InstCombine/InstCombinePHI.cpp
index 4c6d0c43cd..5da3abcde1 100644
--- a/lib/Transforms/InstCombine/InstCombinePHI.cpp
+++ b/lib/Transforms/InstCombine/InstCombinePHI.cpp
@@ -790,7 +790,7 @@ Instruction *InstCombiner::SliceUpIllegalIntegerPHI(PHINode &FirstPhi) {
// PHINode simplification
//
Instruction *InstCombiner::visitPHINode(PHINode &PN) {
- if (Value *V = SimplifyInstruction(&PN, TD, TLI))
+ if (Value *V = SimplifyInstruction(&PN, DL, TLI))
return ReplaceInstUsesWith(PN, V);
// If all PHI operands are the same operation, pull them through the PHI,
@@ -893,8 +893,8 @@ Instruction *InstCombiner::visitPHINode(PHINode &PN) {
// it is only used by trunc or trunc(lshr) operations. If so, we split the
// PHI into the various pieces being extracted. This sort of thing is
// introduced when SROA promotes an aggregate to a single large integer type.
- if (PN.getType()->isIntegerTy() && TD &&
- !TD->isLegalInteger(PN.getType()->getPrimitiveSizeInBits()))
+ if (PN.getType()->isIntegerTy() && DL &&
+ !DL->isLegalInteger(PN.getType()->getPrimitiveSizeInBits()))
if (Instruction *Res = SliceUpIllegalIntegerPHI(PN))
return Res;
diff --git a/lib/Transforms/InstCombine/InstCombineSelect.cpp b/lib/Transforms/InstCombine/InstCombineSelect.cpp
index 555ffc7752..e0609bb264 100644
--- a/lib/Transforms/InstCombine/InstCombineSelect.cpp
+++ b/lib/Transforms/InstCombine/InstCombineSelect.cpp
@@ -554,18 +554,18 @@ Instruction *InstCombiner::visitSelectInstWithICmp(SelectInst &SI,
// arms of the select. See if substituting this value into the arm and
// simplifying the result yields the same value as the other arm.
if (Pred == ICmpInst::ICMP_EQ) {
- if (SimplifyWithOpReplaced(FalseVal, CmpLHS, CmpRHS, TD, TLI) == TrueVal ||
- SimplifyWithOpReplaced(FalseVal, CmpRHS, CmpLHS, TD, TLI) == TrueVal)
+ if (SimplifyWithOpReplaced(FalseVal, CmpLHS, CmpRHS, DL, TLI) == TrueVal ||
+ SimplifyWithOpReplaced(FalseVal, CmpRHS, CmpLHS, DL, TLI) == TrueVal)
return ReplaceInstUsesWith(SI, FalseVal);
- if (SimplifyWithOpReplaced(TrueVal, CmpLHS, CmpRHS, TD, TLI) == FalseVal ||
- SimplifyWithOpReplaced(TrueVal, CmpRHS, CmpLHS, TD, TLI) == FalseVal)
+ if (SimplifyWithOpReplaced(TrueVal, CmpLHS, CmpRHS, DL, TLI) == FalseVal ||
+ SimplifyWithOpReplaced(TrueVal, CmpRHS, CmpLHS, DL, TLI) == FalseVal)
return ReplaceInstUsesWith(SI, FalseVal);
} else if (Pred == ICmpInst::ICMP_NE) {
- if (SimplifyWithOpReplaced(TrueVal, CmpLHS, CmpRHS, TD, TLI) == FalseVal ||
- SimplifyWithOpReplaced(TrueVal, CmpRHS, CmpLHS, TD, TLI) == FalseVal)
+ if (SimplifyWithOpReplaced(TrueVal, CmpLHS, CmpRHS, DL, TLI) == FalseVal ||
+ SimplifyWithOpReplaced(TrueVal, CmpRHS, CmpLHS, DL, TLI) == FalseVal)
return ReplaceInstUsesWith(SI, TrueVal);
- if (SimplifyWithOpReplaced(FalseVal, CmpLHS, CmpRHS, TD, TLI) == TrueVal ||
- SimplifyWithOpReplaced(FalseVal, CmpRHS, CmpLHS, TD, TLI) == TrueVal)
+ if (SimplifyWithOpReplaced(FalseVal, CmpLHS, CmpRHS, DL, TLI) == TrueVal ||
+ SimplifyWithOpReplaced(FalseVal, CmpRHS, CmpLHS, DL, TLI) == TrueVal)
return ReplaceInstUsesWith(SI, TrueVal);
}
@@ -734,7 +734,7 @@ Instruction *InstCombiner::visitSelectInst(SelectInst &SI) {
Value *TrueVal = SI.getTrueValue();
Value *FalseVal = SI.getFalseValue();
- if (Value *V = SimplifySelectInst(CondVal, TrueVal, FalseVal, TD))
+ if (Value *V = SimplifySelectInst(CondVal, TrueVal, FalseVal, DL))
return ReplaceInstUsesWith(SI, V);
if (SI.getType()->isIntegerTy(1)) {
diff --git a/lib/Transforms/InstCombine/InstCombineShifts.cpp b/lib/Transforms/InstCombine/InstCombineShifts.cpp
index 8cf76e5e8a..f4d6222587 100644
--- a/lib/Transforms/InstCombine/InstCombineShifts.cpp
+++ b/lib/Transforms/InstCombine/InstCombineShifts.cpp
@@ -677,7 +677,7 @@ Instruction *InstCombiner::FoldShiftByConstant(Value *Op0, ConstantInt *Op1,
Instruction *InstCombiner::visitShl(BinaryOperator &I) {
if (Value *V = SimplifyShlInst(I.getOperand(0), I.getOperand(1),
I.hasNoSignedWrap(), I.hasNoUnsignedWrap(),
- TD))
+ DL))
return ReplaceInstUsesWith(I, V);
if (Instruction *V = commonShiftTransforms(I))
@@ -714,7 +714,7 @@ Instruction *InstCombiner::visitShl(BinaryOperator &I) {
Instruction *InstCombiner::visitLShr(BinaryOperator &I) {
if (Value *V = SimplifyLShrInst(I.getOperand(0), I.getOperand(1),
- I.isExact(), TD))
+ I.isExact(), DL))
return ReplaceInstUsesWith(I, V);
if (Instruction *R = commonShiftTransforms(I))
@@ -754,7 +754,7 @@ Instruction *InstCombiner::visitLShr(BinaryOperator &I) {
Instruction *InstCombiner::visitAShr(BinaryOperator &I) {
if (Value *V = SimplifyAShrInst(I.getOperand(0), I.getOperand(1),
- I.isExact(), TD))
+ I.isExact(), DL))
return ReplaceInstUsesWith(I, V);
if (Instruction *R = commonShiftTransforms(I))
diff --git a/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp b/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp
index c831ddd3da..880fe54c56 100644
--- a/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp
+++ b/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp
@@ -105,9 +105,9 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
assert(Depth <= 6 && "Limit Search Depth");
uint32_t BitWidth = DemandedMask.getBitWidth();
Type *VTy = V->getType();
- assert((TD || !VTy->isPointerTy()) &&
+ assert((DL || !VTy->isPointerTy()) &&
"SimplifyDemandedBits needs to know bit widths!");
- assert((!TD || TD->getTypeSizeInBits(VTy->getScalarType()) == BitWidth) &&
+ assert((!DL || DL->getTypeSizeInBits(VTy->getScalarType()) == BitWidth) &&
(!VTy->isIntOrIntVectorTy() ||
VTy->getScalarSizeInBits() == BitWidth) &&
KnownZero.getBitWidth() == BitWidth &&
diff --git a/lib/Transforms/InstCombine/InstructionCombining.cpp b/lib/Transforms/InstCombine/InstructionCombining.cpp
index e4365d691f..8ebc0956c8 100644
--- a/lib/Transforms/InstCombine/InstructionCombining.cpp
+++ b/lib/Transforms/InstCombine/InstructionCombining.cpp
@@ -103,13 +103,13 @@ Value *InstCombiner::EmitGEPOffset(User *GEP) {
bool InstCombiner::ShouldChangeType(Type *From, Type *To) const {
assert(From->isIntegerTy() && To->isIntegerTy());
- // If we don't have TD, we don't know if the source/dest are legal.
- if (!TD) return false;
+ // If we don't have DL, we don't know if the source/dest are legal.
+ if (!DL) return false;
unsigned FromWidth = From->getPrimitiveSizeInBits();
unsigned ToWidth = To->getPrimitiveSizeInBits();
- bool FromLegal = TD->isLegalInteger(FromWidth);
- bool ToLegal = TD->isLegalInteger(ToWidth);
+ bool FromLegal = DL->isLegalInteger(FromWidth);
+ bool ToLegal = DL->isLegalInteger(ToWidth);
// If this is a legal integer from type, and the result would be an illegal
// type, don't do the transformation.
@@ -221,7 +221,7 @@ bool InstCombiner::SimplifyAssociativeOrCommutative(BinaryOperator &I) {
Value *C = I.getOperand(1);
// Does "B op C" simplify?
- if (Value *V = SimplifyBinOp(Opcode, B, C, TD)) {
+ if (Value *V = SimplifyBinOp(Opcode, B, C, DL)) {
// It simplifies to V. Form "A op V".
I.setOperand(0, A);
I.setOperand(1, V);
@@ -250,7 +250,7 @@ bool InstCombiner::SimplifyAssociativeOrCommutative(BinaryOperator &I) {
Value *C = Op1->getOperand(1);
// Does "A op B" simplify?
- if (Value *V = SimplifyBinOp(Opcode, A, B, TD)) {
+ if (Value *V = SimplifyBinOp(Opcode, A, B, DL)) {
// It simplifies to V. Form "V op C".
I.setOperand(0, V);
I.setOperand(1, C);
@@ -272,7 +272,7 @@ bool InstCombiner::SimplifyAssociativeOrCommutative(BinaryOperator &I) {
Value *C = I.getOperand(1);
// Does "C op A" simplify?
- if (Value *V = SimplifyBinOp(Opcode, C, A, TD)) {
+ if (Value *V = SimplifyBinOp(Opcode, C, A, DL)) {
// It simplifies to V. Form "V op B".
I.setOperand(0, V);
I.setOperand(1, B);
@@ -292,7 +292,7 @@ bool InstCombiner::SimplifyAssociativeOrCommutative(BinaryOperator &I) {
Value *C = Op1->getOperand(1);
// Does "C op A" simplify?
- if (Value *V = SimplifyBinOp(Opcode, C, A, TD)) {
+ if (Value *V = SimplifyBinOp(Opcode, C, A, DL)) {
// It simplifies to V. Form "B op V".
I.setOperand(0, B);
I.setOperand(1, V);
@@ -425,7 +425,7 @@ Value *InstCombiner::SimplifyUsingDistributiveLaws(BinaryOperator &I) {
std::swap(C, D);
// Consider forming "A op' (B op D)".
// If "B op D" simplifies then it can be formed with no cost.
- Value *V = SimplifyBinOp(TopLevelOpcode, B, D, TD);
+ Value *V = SimplifyBinOp(TopLevelOpcode, B, D, DL);
// If "B op D" doesn't simplify then only go on if both of the existing
// operations "A op' B" and "C op' D" will be zapped as no longer used.
if (!V && Op0->hasOneUse() && Op1->hasOneUse())
@@ -447,7 +447,7 @@ Value *InstCombiner::SimplifyUsingDistributiveLaws(BinaryOperator &I) {
std::swap(C, D);
// Consider forming "(A op C) op' B".
// If "A op C" simplifies then it can be formed with no cost.
- Value *V = SimplifyBinOp(TopLevelOpcode, A, C, TD);
+ Value *V = SimplifyBinOp(TopLevelOpcode, A, C, DL);
// If "A op C" doesn't simplify then only go on if both of the existing
// operations "A op' B" and "C op' D" will be zapped as no longer used.
if (!V && Op0->hasOneUse() && Op1->hasOneUse())
@@ -469,8 +469,8 @@ Value *InstCombiner::SimplifyUsingDistributiveLaws(BinaryOperator &I) {
Instruction::BinaryOps InnerOpcode = Op0->getOpcode(); // op'
// Do "A op C" and "B op C" both simplify?
- if (Value *L = SimplifyBinOp(TopLevelOpcode, A, C, TD))
- if (Value *R = SimplifyBinOp(TopLevelOpcode, B, C, TD)) {
+ if (Value *L = SimplifyBinOp(TopLevelOpcode, A, C, DL))
+ if (Value *R = SimplifyBinOp(TopLevelOpcode, B, C, DL)) {
// They do! Return "L op' R".
++NumExpand;
// If "L op' R" equals "A op' B" then "L op' R" is just the LHS.
@@ -478,7 +478,7 @@ Value *InstCombiner::SimplifyUsingDistributiveLaws(BinaryOperator &I) {
(Instruction::isCommutative(InnerOpcode) && L == B && R == A))
return Op0;
// Otherwise return "L op' R" if it simplifies.
- if (Value *V = SimplifyBinOp(InnerOpcode, L, R, TD))
+ if (Value *V = SimplifyBinOp(InnerOpcode, L, R, DL))
return V;
// Otherwise, create a new instruction.
C = Builder->CreateBinOp(InnerOpcode, L, R);
@@ -494,8 +494,8 @@ Value *InstCombiner::SimplifyUsingDistributiveLaws(BinaryOperator &I) {
Instruction::BinaryOps InnerOpcode = Op1->getOpcode(); // op'
// Do "A op B" and "A op C" both simplify?
- if (Value *L = SimplifyBinOp(TopLevelOpcode, A, B, TD))
- if (Value *R = SimplifyBinOp(TopLevelOpcode, A, C, TD)) {
+ if (Value *L = SimplifyBinOp(TopLevelOpcode, A, B, DL))
+ if (Value *R = SimplifyBinOp(TopLevelOpcode, A, C, DL)) {
// They do! Return "L op' R".
++NumExpand;
// If "L op' R" equals "B op' C" then "L op' R" is just the RHS.
@@ -503,7 +503,7 @@ Value *InstCombiner::SimplifyUsingDistributiveLaws(BinaryOperator &I) {
(Instruction::isCommutative(InnerOpcode) && L == C && R == B))
return Op1;
// Otherwise return "L op' R" if it simplifies.
- if (Value *V = SimplifyBinOp(InnerOpcode, L, R, TD))
+ if (Value *V = SimplifyBinOp(InnerOpcode, L, R, DL))
return V;
// Otherwise, create a new instruction.
A = Builder->CreateBinOp(InnerOpcode, L, R);
@@ -777,7 +777,7 @@ Type *InstCombiner::FindElementAtOffset(Type *PtrTy, int64_t Offset,
SmallVectorImpl<Value*> &NewIndices) {
assert(PtrTy->isPtrOrPtrVectorTy());
- if (!TD)
+ if (!DL)
return 0;
Type *Ty = PtrTy->getPointerElementType();
@@ -787,9 +787,9 @@ Type *InstCombiner::FindElementAtOffset(Type *PtrTy, int64_t Offset,
// Start with the index over the outer type. Note that the type size
// might be zero (even if the offset isn't zero) if the indexed type
// is something like [0 x {int, int}]
- Type *IntPtrTy = TD->getIntPtrType(PtrTy);
+ Type *IntPtrTy = DL->getIntPtrType(PtrTy);
int64_t FirstIdx = 0;
- if (int64_t TySize = TD->getTypeAllocSize(Ty)) {
+ if (int64_t TySize = DL->getTypeAllocSize(Ty)) {
FirstIdx = Offset/TySize;
Offset -= FirstIdx*TySize;
@@ -807,11 +807,11 @@ Type *InstCombiner::FindElementAtOffset(Type *PtrTy, int64_t Offset,
// Index into the types. If we fail, set OrigBase to null.
while (Offset) {
// Indexing into tail padding between struct/array elements.
- if (uint64_t(Offset*8) >= TD->getTypeSizeInBits(Ty))
+ if (uint64_t(Offset*8) >= DL->getTypeSizeInBits(Ty))
return 0;
if (StructType *STy = dyn_cast<StructType>(Ty)) {
- const StructLayout *SL = TD->getStructLayout(STy);
+ const StructLayout *SL = DL->getStructLayout(STy);
assert(Offset < (int64_t)SL->getSizeInBytes() &&
"Offset must stay within the indexed type");
@@ -822,7 +822,7 @@ Type *InstCombiner::FindElementAtOffset(Type *PtrTy, int64_t Offset,
Offset -= SL->getElementOffset(Elt);
Ty = STy->getElementType(Elt);
} else if (ArrayType *AT = dyn_cast<ArrayType>(Ty)) {
- uint64_t EltSize = TD->getTypeAllocSize(AT->getElementType());
+ uint64_t EltSize = DL->getTypeAllocSize(AT->getElementType());
assert(EltSize && "Cannot index into a zero-sized array");
NewIndices.push_back(ConstantInt::get(IntPtrTy,Offset/EltSize));
Offset %= EltSize;
@@ -1087,16 +1087,16 @@ Value *InstCombiner::Descale(Value *Val, APInt Scale, bool &NoSignedWrap) {
Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
SmallVector<Value*, 8> Ops(GEP.op_begin(), GEP.op_end());
- if (Value *V = SimplifyGEPInst(Ops, TD))
+ if (Value *V = SimplifyGEPInst(Ops, DL))
return ReplaceInstUsesWith(GEP, V);
Value *PtrOp = GEP.getOperand(0);
// Eliminate unneeded casts for indices, and replace indices which displace
// by multiples of a zero size type with zero.
- if (TD) {
+ if (DL) {
bool MadeChange = false;
- Type *IntPtrTy = TD->getIntPtrType(GEP.getPointerOperandType());
+ Type *IntPtrTy = DL->getIntPtrType(GEP.getPointerOperandType());
gep_type_iterator GTI = gep_type_begin(GEP);
for (User::op_iterator I = GEP.op_begin() + 1, E = GEP.op_end();
@@ -1108,7 +1108,7 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
// If the element type has zero size then any index over it is equivalent
// to an index of zero, so replace it with zero if it is not zero already.
if (SeqTy->getElementType()->isSized() &&
- TD->getTypeAllocSize(SeqTy->getElementType()) == 0)
+ DL->getTypeAllocSize(SeqTy->getElementType()) == 0)
if (!isa<Constant>(*I) || !cast<Constant>(*I)->isNullValue()) {
*I = Constant::getNullValue(IntPtrTy);
MadeChange = true;
@@ -1199,12 +1199,12 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
// Canonicalize (gep i8* X, -(ptrtoint Y)) to (sub (ptrtoint X), (ptrtoint Y))
// The GEP pattern is emitted by the SCEV expander for certain kinds of
// pointer arithmetic.
- if (TD && GEP.getNumIndices() == 1 &&
+ if (DL && GEP.getNumIndices() == 1 &&
match(GEP.getOperand(1), m_Neg(m_PtrToInt(m_Value())))) {
unsigned AS = GEP.getPointerAddressSpace();
if (GEP.getType() == Builder->getInt8PtrTy(AS) &&
GEP.getOperand(1)->getType()->getScalarSizeInBits() ==
- TD->getPointerSizeInBits(AS)) {
+ DL->getPointerSizeInBits(AS)) {
Operator *Index = cast<Operator>(GEP.getOperand(1));
Value *PtrToInt = Builder->CreatePtrToInt(PtrOp, Index->getType());
Value *NewSub = Builder->CreateSub(PtrToInt, Index->getOperand(1));
@@ -1266,10 +1266,10 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
// into: %t1 = getelementptr [2 x i32]* %str, i32 0, i32 %V; bitcast
Type *SrcElTy = StrippedPtrTy->getElementType();
Type *ResElTy = PtrOp->getType()->getPointerElementType();
- if (TD && SrcElTy->isArrayTy() &&
- TD->getTypeAllocSize(SrcElTy->getArrayElementType()) ==
- TD->getTypeAllocSize(ResElTy)) {
- Type *IdxType = TD->getIntPtrType(GEP.getType());
+ if (DL && SrcElTy->isArrayTy() &&
+ DL->getTypeAllocSize(SrcElTy->getArrayElementType()) ==
+ DL->getTypeAllocSize(ResElTy)) {
+ Type *IdxType = DL->getIntPtrType(GEP.getType());
Value *Idx[2] = { Constant::getNullValue(IdxType), GEP.getOperand(1) };
Value *NewGEP = GEP.isInBounds() ?
Builder->CreateInBoundsGEP(StrippedPtr, Idx, GEP.getName()) :
@@ -1285,11 +1285,11 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
// %V = mul i64 %N, 4
// %t = getelementptr i8* bitcast (i32* %arr to i8*), i32 %V
// into: %t1 = getelementptr i32* %arr, i32 %N; bitcast
- if (TD && ResElTy->isSized() && SrcElTy->isSized()) {
+ if (DL && ResElTy->isSized() && SrcElTy->isSized()) {
// Check that changing the type amounts to dividing the index by a scale
// factor.
- uint64_t ResSize = TD->getTypeAllocSize(ResElTy);
- uint64_t SrcSize = TD->getTypeAllocSize(SrcElTy);
+ uint64_t ResSize = DL->getTypeAllocSize(ResElTy);
+ uint64_t SrcSize = DL->getTypeAllocSize(SrcElTy);
if (ResSize && SrcSize % ResSize == 0) {
Value *Idx = GEP.getOperand(1);
unsigned BitWidth = Idx->getType()->getPrimitiveSizeInBits();
@@ -1297,7 +1297,7 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
// Earlier transforms ensure that the index has type IntPtrType, which
// considerably simplifies the logic by eliminating implicit casts.
- assert(Idx->getType() == TD->getIntPtrType(GEP.getType()) &&
+ assert(Idx->getType() == DL->getIntPtrType(GEP.getType()) &&
"Index not cast to pointer width?");
bool NSW;
@@ -1321,13 +1321,13 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
// getelementptr i8* bitcast ([100 x double]* X to i8*), i32 %tmp
// (where tmp = 8*tmp2) into:
// getelementptr [100 x double]* %arr, i32 0, i32 %tmp2; bitcast
- if (TD && ResElTy->isSized() && SrcElTy->isSized() &&
+ if (DL && ResElTy->isSized() && SrcElTy->isSized() &&
SrcElTy->isArrayTy()) {
// Check that changing to the array element type amounts to dividing the
// index by a scale factor.
- uint64_t ResSize = TD->getTypeAllocSize(ResElTy);
+ uint64_t ResSize = DL->getTypeAllocSize(ResElTy);
uint64_t ArrayEltSize
- = TD->getTypeAllocSize(SrcElTy->getArrayElementType());
+ = DL->getTypeAllocSize(SrcElTy->getArrayElementType());
if (ResSize && ArrayEltSize % ResSize == 0) {
Value *Idx = GEP.getOperand(1);
unsigned BitWidth = Idx->getType()->getPrimitiveSizeInBits();
@@ -1335,7 +1335,7 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
// Earlier transforms ensure that the index has type IntPtrType, which
// considerably simplifies the logic by eliminating implicit casts.
- assert(Idx->getType() == TD->getIntPtrType(GEP.getType()) &&
+ assert(Idx->getType() == DL->getIntPtrType(GEP.getType()) &&
"Index not cast to pointer width?");
bool NSW;
@@ -1344,7 +1344,7 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
// If the multiplication NewIdx * Scale may overflow then the new
// GEP may not be "inbounds".
Value *Off[2] = {
- Constant::getNullValue(TD->getIntPtrType(GEP.getType())),
+ Constant::getNullValue(DL->getIntPtrType(GEP.getType())),
NewIdx
};
@@ -1361,7 +1361,7 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
}
}
- if (!TD)
+ if (!DL)
return 0;
/// See if we can simplify:
@@ -1372,10 +1372,10 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
if (BitCastInst *BCI = dyn_cast<BitCastInst>(PtrOp)) {
Value *Operand = BCI->getOperand(0);
PointerType *OpType = cast<PointerType>(Operand->getType());
- unsigned OffsetBits = TD->getPointerTypeSizeInBits(OpType);
+ unsigned OffsetBits = DL->getPointerTypeSizeInBits(OpType);
APInt Offset(OffsetBits, 0);
if (!isa<BitCastInst>(Operand) &&
- GEP.accumulateConstantOffset(*TD, Offset) &&
+ GEP.accumulateConstantOffset(*DL, Offset) &&
StrippedPtrTy->getAddressSpace() == GEP.getPointerAddressSpace()) {
// If this GEP instruction doesn't move the pointer, just replace the GEP
@@ -2231,7 +2231,7 @@ static bool TryToSinkInstruction(Instruction *I, BasicBlock *DestBlock) {
static bool AddReachableCodeToWorklist(BasicBlock *BB,
SmallPtrSet<BasicBlock*, 64> &Visited,
InstCombiner &IC,
- const DataLayout *TD,
+ const DataLayout *DL,
const TargetLibraryInfo *TLI) {
bool MadeIRChange = false;
SmallVector<BasicBlock*, 256> Worklist;
@@ -2259,7 +2259,7 @@ static bool AddReachableCodeToWorklist(BasicBlock *BB,
// ConstantProp instruction if trivially constant.
if (!Inst->use_empty() && isa<Constant>(Inst->getOperand(0)))
- if (Constant *C = ConstantFoldInstruction(Inst, TD, TLI)) {
+ if (Constant *C = ConstantFoldInstruction(Inst, DL, TLI)) {
DEBUG(dbgs() << "IC: ConstFold to: " << *C << " from: "
<< *Inst << '\n');
Inst->replaceAllUsesWith(C);
@@ -2268,7 +2268,7 @@ static bool AddReachableCodeToWorklist(BasicBlock *BB,
continue;
}
- if (TD) {
+ if (DL) {
// See if we can constant fold its operands.
for (User::op_iterator i = Inst->op_begin(), e = Inst->op_end();
i != e; ++i) {
@@ -2277,7 +2277,7 @@ static bool AddReachableCodeToWorklist(BasicBlock *BB,
Constant*& FoldRes = FoldedConstants[CE];
if (!FoldRes)
- FoldRes = ConstantFoldConstantExpression(CE, TD, TLI);
+ FoldRes = ConstantFoldConstantExpression(CE, DL, TLI);
if (!FoldRes)
FoldRes = CE;
@@ -2344,7 +2344,7 @@ bool InstCombiner::DoOneIteration(Function &F, unsigned Iteration) {
// the reachable instructions. Ignore blocks that are not reachable. Keep
// track of which blocks we visit.
SmallPtrSet<BasicBlock*, 64> Visited;
- MadeIRChange |= AddReachableCodeToWorklist(F.begin(), Visited, *this, TD,
+ MadeIRChange |= AddReachableCodeToWorklist(F.begin(), Visited, *this, DL,
TLI);
// Do a quick scan over the function. If we find any blocks that are
@@ -2390,7 +2390,7 @@ bool InstCombiner::DoOneIteration(Function &F, unsigned Iteration) {
// Instruction isn't dead, see if we can constant propagate it.
if (!I->use_empty() && isa<Constant>(I->getOperand(0)))
- if (Constant *C = ConstantFoldInstruction(I, TD, TLI)) {
+ if (Constant *C = ConstantFoldInstruction(I, DL, TLI)) {
DEBUG(dbgs() << "IC: ConstFold to: " << *C << " from: " << *I << '\n');
// Add operands to the worklist.
@@ -2499,10 +2499,10 @@ namespace {
class InstCombinerLibCallSimplifier : public LibCallSimplifier {
InstCombiner *IC;
public:
- InstCombinerLibCallSimplifier(const DataLayout *TD,
+ InstCombinerLibCallSimplifier(const DataLayout *DL,
const TargetLibraryInfo *TLI,
InstCombiner *IC)
- : LibCallSimplifier(TD, TLI, UnsafeFPShrink) {
+ : LibCallSimplifier(DL, TLI, UnsafeFPShrink) {
this->IC = IC;
}
@@ -2518,7 +2518,7 @@ bool InstCombiner::runOnFunction(Function &F) {
if (skipOptnoneFunction(F))
return false;
- TD = getAnalysisIfAvailable<DataLayout>();
+ DL = getAnalysisIfAvailable<DataLayout>();
TLI = &getAnalysis<TargetLibraryInfo>();
// Minimizing size?
MinimizeSize = F.getAttributes().hasAttribute(AttributeSet::FunctionIndex,
@@ -2527,11 +2527,11 @@ bool InstCombiner::runOnFunction(Function &F) {
/// Builder - This is an IRBuilder that automatically inserts new
/// instructions into the worklist when they are created.
IRBuilder<true, TargetFolder, InstCombineIRInserter>
- TheBuilder(F.getContext(), TargetFolder(TD),
+ TheBuilder(F.getContext(), TargetFolder(DL),
InstCombineIRInserter(Worklist));
Builder = &TheBuilder;
- InstCombinerLibCallSimplifier TheSimplifier(TD, TLI, this);
+ InstCombinerLibCallSimplifier TheSimplifier(DL, TLI, this);
Simplifier = &TheSimplifier;
bool EverMadeChange = false;
diff --git a/lib/Transforms/Instrumentation/AddressSanitizer.cpp b/lib/Transforms/Instrumentation/AddressSanitizer.cpp
index ee2692dced..a980fb1dc3 100644
--- a/lib/Transforms/Instrumentation/AddressSanitizer.cpp
+++ b/lib/Transforms/Instrumentation/AddressSanitizer.cpp
@@ -336,7 +336,7 @@ struct AddressSanitizer : public FunctionPass {
SmallString<64> BlacklistFile;
LLVMContext *C;
- DataLayout *TD;
+ DataLayout *DL;
int LongSize;
Type *IntptrTy;
ShadowMapping Mapping;
@@ -385,7 +385,7 @@ class AddressSanitizerModule : public ModulePass {
SetOfDynamicallyInitializedGlobals DynamicallyInitializedGlobals;
Type *IntptrTy;
LLVMContext *C;
- DataLayout *TD;
+ DataLayout *DL;
ShadowMapping Mapping;
Function *AsanPoisonGlobals;
Function *AsanUnpoisonGlobals;
@@ -516,7 +516,7 @@ struct FunctionStackPoisoner : public InstVisitor<FunctionStackPoisoner> {
uint64_t getAllocaSizeInBytes(AllocaInst *AI) const {
Type *Ty = AI->getAllocatedType();
- uint64_t SizeInBytes = ASan.TD->getTypeAllocSize(Ty);
+ uint64_t SizeInBytes = ASan.DL->getTypeAllocSize(Ty);
return SizeInBytes;
}
/// Finds alloca where the value comes from.
@@ -691,7 +691,7 @@ void AddressSanitizer::instrumentMop(Instruction *I) {
Type *OrigTy = cast<PointerType>(OrigPtrTy)->getElementType();
assert(OrigTy->isSized());
- uint32_t TypeSize = TD->getTypeStoreSizeInBits(OrigTy);
+ uint32_t TypeSize = DL->getTypeStoreSizeInBits(OrigTy);
assert((TypeSize % 8) == 0);
@@ -912,13 +912,13 @@ void AddressSanitizerModule::initializeCallbacks(Module &M) {
// redzones and inserts this function into llvm.global_ctors.
bool AddressSanitizerModule::runOnModule(Module &M) {
if (!ClGlobals) return false;
- TD = getAnalysisIfAvailable<DataLayout>();
- if (!TD)
+ DL = getAnalysisIfAvailable<DataLayout>();
+ if (!DL)
return false;
BL.reset(SpecialCaseList::createOrDie(BlacklistFile));
if (BL->isIn(M)) return false;
C = &(M.getContext());
- int LongSize = TD->getPointerSizeInBits();
+ int LongSize = DL->getPointerSizeInBits();
IntptrTy = Type::getIntNTy(*C, LongSize);
Mapping = getShadowMapping(M, LongSize);
initializeCallbacks(M);
@@ -964,7 +964,7 @@ bool AddressSanitizerModule::runOnModule(Module &M) {
GlobalVariable *G = GlobalsToChange[i];
PointerType *PtrTy = cast<PointerType>(G->getType());
Type *Ty = PtrTy->getElementType();
- uint64_t SizeInBytes = TD->getTypeAllocSize(Ty);
+ uint64_t SizeInBytes = DL->getTypeAllocSize(Ty);
uint64_t MinRZ = MinRedzoneSizeForGlobal();
// MinRZ <= RZ <= kMaxGlobalRedzone
// and trying to make RZ to be ~ 1/4 of SizeInBytes.
@@ -1105,15 +1105,15 @@ void AddressSanitizer::emitShadowMapping(Module &M, IRBuilder<> &IRB) const {
// virtual
bool AddressSanitizer::doInitialization(Module &M) {
// Initialize the private fields. No one has accessed them before.
- TD = getAnalysisIfAvailable<DataLayout>();
+ DL = getAnalysisIfAvailable<DataLayout>();
- if (!TD)
+ if (!DL)
return false;
BL.reset(SpecialCaseList::createOrDie(BlacklistFile));
DynamicallyInitializedGlobals.Init(M);
C = &(M.getContext());
- LongSize = TD->getPointerSizeInBits();
+ LongSize = DL->getPointerSizeInBits();
IntptrTy = Type::getIntNTy(*C, LongSize);
AsanCtorFunction = Function::Create(
@@ -1378,7 +1378,7 @@ FunctionStackPoisoner::poisonRedZones(const ArrayRef<uint8_t> ShadowBytes,
for (; i + LargeStoreSizeInBytes - 1 < n; i += LargeStoreSizeInBytes) {
uint64_t Val = 0;
for (size_t j = 0; j < LargeStoreSizeInBytes; j++) {
- if (ASan.TD->isLittleEndian())
+ if (ASan.DL->isLittleEndian())
Val |= (uint64_t)ShadowBytes[i + j] << (8 * j);
else
Val = (Val << 8) | ShadowBytes[i + j];
diff --git a/lib/Transforms/Instrumentation/BoundsChecking.cpp b/lib/Transforms/Instrumentation/BoundsChecking.cpp
index 7a9f0f69fb..3c14b276ec 100644
--- a/lib/Transforms/Instrumentation/BoundsChecking.cpp
+++ b/lib/Transforms/Instrumentation/BoundsChecking.cpp
@@ -53,7 +53,7 @@ namespace {
}
private:
- const DataLayout *TD;
+ const DataLayout *DL;
const TargetLibraryInfo *TLI;
ObjectSizeOffsetEvaluator *ObjSizeEval;
BuilderTy *Builder;
@@ -127,7 +127,7 @@ void BoundsChecking::emitBranchToTrap(Value *Cmp) {
/// size of memory block that is touched.
/// Returns true if any change was made to the IR, false otherwise.
bool BoundsChecking::instrument(Value *Ptr, Value *InstVal) {
- uint64_t NeededSize = TD->getTypeStoreSize(InstVal->getType());
+ uint64_t NeededSize = DL->getTypeStoreSize(InstVal->getType());
DEBUG(dbgs() << "Instrument " << *Ptr << " for " << Twine(NeededSize)
<< " bytes\n");
@@ -142,7 +142,7 @@ bool BoundsChecking::instrument(Value *Ptr, Value *InstVal) {
Value *Offset = SizeOffset.second;
ConstantInt *SizeCI = dyn_cast<ConstantInt>(Size);
- Type *IntTy = TD->getIntPtrType(Ptr->getType());
+ Type *IntTy = DL->getIntPtrType(Ptr->getType());
Value *NeededSizeVal = ConstantInt::get(IntTy, NeededSize);
// three checks are required to ensure safety:
@@ -166,13 +166,13 @@ bool BoundsChecking::instrument(Value *Ptr, Value *InstVal) {
}
bool BoundsChecking::runOnFunction(Function &F) {
- TD = &getAnalysis<DataLayout>();
+ DL = &getAnalysis<DataLayout>();
TLI = &getAnalysis<TargetLibraryInfo>();
TrapBB = 0;
- BuilderTy TheBuilder(F.getContext(), TargetFolder(TD));
+ BuilderTy TheBuilder(F.getContext(), TargetFolder(DL));
Builder = &TheBuilder;
- ObjectSizeOffsetEvaluator TheObjSizeEval(TD, TLI, F.getContext(),
+ ObjectSizeOffsetEvaluator TheObjSizeEval(DL, TLI, F.getContext(),
/*RoundToAlign=*/true);
ObjSizeEval = &TheObjSizeEval;
diff --git a/lib/Transforms/Instrumentation/MemorySanitizer.cpp b/lib/Transforms/Instrumentation/MemorySanitizer.cpp
index 93e71cc116..7eea4bff59 100644
--- a/lib/Transforms/Instrumentation/MemorySanitizer.cpp
+++ b/lib/Transforms/Instrumentation/MemorySanitizer.cpp
@@ -207,7 +207,7 @@ class MemorySanitizer : public FunctionPass {
StringRef BlacklistFile = StringRef())
: FunctionPass(ID),
TrackOrigins(TrackOrigins || ClTrackOrigins),
- TD(0),
+ DL(0),
WarningFn(0),
BlacklistFile(BlacklistFile.empty() ? ClBlacklistFile : BlacklistFile),
WrapIndirectCalls(!ClWrapIndirectCalls.empty()) {}
@@ -222,7 +222,7 @@ class MemorySanitizer : public FunctionPass {
/// \brief Track origins (allocation points) of uninitialized values.
bool TrackOrigins;
- DataLayout *TD;
+ DataLayout *DL;
LLVMContext *C;
Type *IntptrTy;
Type *OriginTy;
@@ -399,12 +399,12 @@ void MemorySanitizer::initializeCallbacks(Module &M) {
///
/// inserts a call to __msan_init to the module's constructor list.
bool MemorySanitizer::doInitialization(Module &M) {
- TD = getAnalysisIfAvailable<DataLayout>();
- if (!TD)
+ DL = getAnalysisIfAvailable<DataLayout>();
+ if (!DL)
return false;
BL.reset(SpecialCaseList::createOrDie(BlacklistFile));
C = &(M.getContext());
- unsigned PtrSize = TD->getPointerSizeInBits(/* AddressSpace */0);
+ unsigned PtrSize = DL->getPointerSizeInBits(/* AddressSpace */0);
switch (PtrSize) {
case 64:
ShadowMask = kShadowMask64;
@@ -420,7 +420,7 @@ bool MemorySanitizer::doInitialization(Module &M) {
}
IRBuilder<> IRB(*C);
- IntptrTy = IRB.getIntPtrTy(TD);
+ IntptrTy = IRB.getIntPtrTy(DL);
OriginTy = IRB.getInt32Ty();
ColdCallWeights = MDBuilder(*C).createBranchWeights(1, 1000);
@@ -650,7 +650,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
/// \brief Add MemorySanitizer instrumentation to a function.
bool runOnFunction() {
MS.initializeCallbacks(*F.getParent());
- if (!MS.TD) return false;
+ if (!MS.DL) return false;
// In the presence of unreachable blocks, we may see Phi nodes with
// incoming nodes from such blocks. Since InstVisitor skips unreachable
@@ -710,7 +710,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
if (IntegerType *IT = dyn_cast<IntegerType>(OrigTy))
return IT;
if (VectorType *VT = dyn_cast<VectorType>(OrigTy)) {
- uint32_t EltSize = MS.TD->getTypeSizeInBits(VT->getElementType());
+ uint32_t EltSize = MS.DL->getTypeSizeInBits(VT->getElementType());
return VectorType::get(IntegerType::get(*MS.C, EltSize),
VT->getNumElements());
}
@@ -722,7 +722,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
DEBUG(dbgs() << "getShadowTy: " << *ST << " ===> " << *Res << "\n");
return Res;
}
- uint32_t TypeSize = MS.TD->getTypeSizeInBits(OrigTy);
+ uint32_t TypeSize = MS.DL->getTypeSizeInBits(OrigTy);
return IntegerType::get(*MS.C, TypeSize);
}
@@ -889,8 +889,8 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
continue;
}
unsigned Size = AI->hasByValAttr()
- ? MS.TD->getTypeAllocSize(AI->getType()->getPointerElementType())
- : MS.TD->getTypeAllocSize(AI->getType());
+ ? MS.DL->getTypeAllocSize(AI->getType()->getPointerElementType())
+ : MS.DL->getTypeAllocSize(AI->getType());
if (A == AI) {
Value *Base = getShadowPtrForArgument(AI, EntryIRB, ArgOffset);
if (AI->hasByValAttr()) {
@@ -900,7 +900,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
unsigned ArgAlign = AI->getParamAlignment();
if (ArgAlign == 0) {
Type *EltType = A->getType()->getPointerElementType();
- ArgAlign = MS.TD->getABITypeAlignment(EltType);
+ ArgAlign = MS.DL->getABITypeAlignment(EltType);
}
unsigned CopyAlign = std::min(ArgAlign, kShadowTLSAlignment);
Value *Cpy = EntryIRB.CreateMemCpy(
@@ -1935,13 +1935,13 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
if (CS.paramHasAttr(i + 1, Attribute::ByVal)) {
assert(A->getType()->isPointerTy() &&
"ByVal argument is not a pointer!");
- Size = MS.TD->getTypeAllocSize(A->getType()->getPointerElementType());
+ Size = MS.DL->getTypeAllocSize(A->getType()->getPointerElementType());
unsigned Alignment = CS.getParamAlignment(i + 1);
Store = IRB.CreateMemCpy(ArgShadowBase,
getShadowPtr(A, Type::getInt8Ty(*MS.C), IRB),
Size, Alignment);
} else {
- Size = MS.TD->getTypeAllocSize(A->getType());
+ Size = MS.DL->getTypeAllocSize(A->getType());
Store = IRB.CreateAlignedStore(ArgShadow, ArgShadowBase,
kShadowTLSAlignment);
}
@@ -2024,7 +2024,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
void visitAllocaInst(AllocaInst &I) {
setShadow(&I, getCleanShadow(&I));
IRBuilder<> IRB(I.getNextNode());
- uint64_t Size = MS.TD->getTypeAllocSize(I.getAllocatedType());
+ uint64_t Size = MS.DL->getTypeAllocSize(I.getAllocatedType());
if (PoisonStack && ClPoisonStackWithCall) {
IRB.CreateCall2(MS.MsanPoisonStackFn,
IRB.CreatePointerCast(&I, IRB.getInt8PtrTy()),
@@ -2223,7 +2223,7 @@ struct VarArgAMD64Helper : public VarArgHelper {
FpOffset += 16;
break;
case AK_Memory:
- uint64_t ArgSize = MS.TD->getTypeAllocSize(A->getType());
+ uint64_t ArgSize = MS.DL->getTypeAllocSize(A->getType());
Base = getShadowPtrForVAArgument(A, IRB, OverflowOffset);
OverflowOffset += DataLayout::RoundUpAlignment(ArgSize, 8);
}
diff --git a/lib/Transforms/Instrumentation/ThreadSanitizer.cpp b/lib/Transforms/Instrumentation/ThreadSanitizer.cpp
index 4570311340..f2d43f2877 100644
--- a/lib/Transforms/Instrumentation/ThreadSanitizer.cpp
+++ b/lib/Transforms/Instrumentation/ThreadSanitizer.cpp
@@ -78,7 +78,7 @@ namespace {
struct ThreadSanitizer : public FunctionPass {
ThreadSanitizer(StringRef BlacklistFile = StringRef())
: FunctionPass(ID),
- TD(0),
+ DL(0),
BlacklistFile(BlacklistFile.empty() ? ClBlacklistFile
: BlacklistFile) { }
const char *getPassName() const;
@@ -96,7 +96,7 @@ struct ThreadSanitizer : public FunctionPass {
bool addrPointsToConstantData(Value *Addr);
int getMemoryAccessFuncIndex(Value *Addr);
- DataLayout *TD;
+ DataLayout *DL;
Type *IntptrTy;
SmallString<64> BlacklistFile;
OwningPtr<SpecialCaseList> BL;
@@ -224,14 +224,14 @@ void ThreadSanitizer::initializeCallbacks(Module &M) {
}
bool ThreadSanitizer::doInitialization(Module &M) {
- TD = getAnalysisIfAvailable<DataLayout>();
- if (!TD)
+ DL = getAnalysisIfAvailable<DataLayout>();
+ if (!DL)
return false;
BL.reset(SpecialCaseList::createOrDie(BlacklistFile));
// Always insert a call to __tsan_init into the module's CTORs.
IRBuilder<> IRB(M.getContext());
- IntptrTy = IRB.getIntPtrTy(TD);
+ IntptrTy = IRB.getIntPtrTy(DL);
Value *TsanInit = M.getOrInsertFunction("__tsan_init",
IRB.getVoidTy(), NULL);
appendToGlobalCtors(M, cast<Function>(TsanInit), 0);
@@ -320,7 +320,7 @@ static bool isAtomic(Instruction *I) {
}
bool ThreadSanitizer::runOnFunction(Function &F) {
- if (!TD) return false;
+ if (!DL) return false;
if (BL->isIn(F)) return false;
initializeCallbacks(*F.getParent());
SmallVector<Instruction*, 8> RetVec;
@@ -573,7 +573,7 @@ int ThreadSanitizer::getMemoryAccessFuncIndex(Value *Addr) {
Type *OrigPtrTy = Addr->getType();
Type *OrigTy = cast<PointerType>(OrigPtrTy)->getElementType();
assert(OrigTy->isSized());
- uint32_t TypeSize = TD->getTypeStoreSizeInBits(OrigTy);
+ uint32_t TypeSize = DL->getTypeStoreSizeInBits(OrigTy);
if (TypeSize != 8 && TypeSize != 16 &&
TypeSize != 32 && TypeSize != 64 && TypeSize != 128) {
NumAccessesWithBadSize++;
diff --git a/lib/Transforms/Scalar/EarlyCSE.cpp b/lib/Transforms/Scalar/EarlyCSE.cpp
index 24baee72dc..ecf0335ebe 100644
--- a/lib/Transforms/Scalar/EarlyCSE.cpp
+++ b/lib/Transforms/Scalar/EarlyCSE.cpp
@@ -262,7 +262,7 @@ namespace {
/// cases.
class EarlyCSE : public FunctionPass {
public:
- const DataLayout *TD;
+ const DataLayout *DL;
const TargetLibraryInfo *TLI;
DominatorTree *DT;
typedef RecyclingAllocator<BumpPtrAllocator,
@@ -432,7 +432,7 @@ bool EarlyCSE::processNode(DomTreeNode *Node) {
// If the instruction can be simplified (e.g. X+0 = X) then replace it with
// its simpler value.
- if (Value *V = SimplifyInstruction(Inst, TD, TLI, DT)) {
+ if (Value *V = SimplifyInstruction(Inst, DL, TLI, DT)) {
DEBUG(dbgs() << "EarlyCSE Simplify: " << *Inst << " to: " << *V << '\n');
Inst->replaceAllUsesWith(V);
Inst->eraseFromParent();
@@ -557,7 +557,7 @@ bool EarlyCSE::runOnFunction(Function &F) {
std::vector<StackNode *> nodesToProcess;
- TD = getAnalysisIfAvailable<DataLayout>();
+ DL = getAnalysisIfAvailable<DataLayout>();
TLI = &getAnalysis<TargetLibraryInfo>();
DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
diff --git a/lib/Transforms/Scalar/GVN.cpp b/lib/Transforms/Scalar/GVN.cpp
index 3acf92b8e6..fbbc1317b5 100644
--- a/lib/Transforms/Scalar/GVN.cpp
+++ b/lib/Transforms/Scalar/GVN.cpp
@@ -586,7 +586,7 @@ namespace {
bool NoLoads;
MemoryDependenceAnalysis *MD;
DominatorTree *DT;
- const DataLayout *TD;
+ const DataLayout *DL;
const TargetLibraryInfo *TLI;
SetVector<BasicBlock *> DeadBlocks;
@@ -624,7 +624,7 @@ namespace {
InstrsToErase.push_back(I);
}
- const DataLayout *getDataLayout() const { return TD; }
+ const DataLayout *getDataLayout() const { return DL; }
DominatorTree &getDominatorTree() const { return *DT; }
AliasAnalysis *getAliasAnalysis() const { return VN.getAliasAnalysis(); }
MemoryDependenceAnalysis &getMemDep() const { return *MD; }
@@ -828,7 +828,7 @@ SpeculationFailure:
/// CoerceAvailableValueToLoadType will succeed.
static bool CanCoerceMustAliasedValueToLoad(Value *StoredVal,
Type *LoadTy,
- const DataLayout &TD) {
+ const DataLayout &DL) {
// If the loaded or stored value is an first class array or struct, don't try
// to transform them. We need to be able to bitcast to integer.
if (LoadTy->isStructTy() || LoadTy->isArrayTy() ||
@@ -837,8 +837,8 @@ static bool CanCoerceMustAliasedValueToLoad(Value *StoredVal,
return false;
// The store has to be at least as big as the load.
- if (TD.getTypeSizeInBits(StoredVal->getType()) <
- TD.getTypeSizeInBits(LoadTy))
+ if (DL.getTypeSizeInBits(StoredVal->getType()) <
+ DL.getTypeSizeInBits(LoadTy))
return false;
return true;
@@ -853,15 +853,15 @@ static bool CanCoerceMustAliasedValueToLoad(Value *StoredVal,
static Value *CoerceAvailableValueToLoadType(Value *StoredVal,
Type *LoadedTy,
Instruction *InsertPt,
- const DataLayout &TD) {
- if (!CanCoerceMustAliasedValueToLoad(StoredVal, LoadedTy, TD))
+ const DataLayout &DL) {
+ if (!CanCoerceMustAliasedValueToLoad(StoredVal, LoadedTy, DL))
return 0;
// If this is already the right type, just return it.
Type *StoredValTy = StoredVal->getType();
- uint64_t StoreSize = TD.getTypeSizeInBits(StoredValTy);
- uint64_t LoadSize = TD.getTypeSizeInBits(LoadedTy);
+ uint64_t StoreSize = DL.getTypeSizeInBits(StoredValTy);
+ uint64_t LoadSize = DL.getTypeSizeInBits(LoadedTy);
// If the store and reload are the same size, we can always reuse it.
if (StoreSize == LoadSize) {
@@ -872,13 +872,13 @@ static Value *CoerceAvailableValueToLoadType(Value *StoredVal,
// Convert source pointers to integers, which can be bitcast.
if (StoredValTy->getScalarType()->isPointerTy()) {
- StoredValTy = TD.getIntPtrType(StoredValTy);
+ StoredValTy = DL.getIntPtrType(StoredValTy);
StoredVal = new PtrToIntInst(StoredVal, StoredValTy, "", InsertPt);
}
Type *TypeToCastTo = LoadedTy;
if (TypeToCastTo->getScalarType()->isPointerTy())
- TypeToCastTo = TD.getIntPtrType(TypeToCastTo);
+ TypeToCastTo = DL.getIntPtrType(TypeToCastTo);
if (StoredValTy != TypeToCastTo)
StoredVal = new BitCastInst(StoredVal, TypeToCastTo, "", InsertPt);
@@ -897,7 +897,7 @@ static Value *CoerceAvailableValueToLoadType(Value *StoredVal,
// Convert source pointers to integers, which can be manipulated.
if (StoredValTy->getScalarType()->isPointerTy()) {
- StoredValTy = TD.getIntPtrType(StoredValTy);
+ StoredValTy = DL.getIntPtrType(StoredValTy);
StoredVal = new PtrToIntInst(StoredVal, StoredValTy, "", InsertPt);
}
@@ -909,7 +909,7 @@ static Value *CoerceAvailableValueToLoadType(Value *StoredVal,
// If this is a big-endian system, we need to shift the value down to the low
// bits so that a truncate will work.
- if (TD.isBigEndian()) {
+ if (DL.isBigEndian()) {
Constant *Val = ConstantInt::get(StoredVal->getType(), StoreSize-LoadSize);
StoredVal = BinaryOperator::CreateLShr(StoredVal, Val, "tmp", InsertPt);
}
@@ -940,15 +940,15 @@ static Value *CoerceAvailableValueToLoadType(Value *StoredVal,
static int AnalyzeLoadFromClobberingWrite(Type *LoadTy, Value *LoadPtr,
Value *WritePtr,
uint64_t WriteSizeInBits,
- const DataLayout &TD) {
+ const DataLayout &DL) {
// If the loaded or stored value is a first class array or struct, don't try
// to transform them. We need to be able to bitcast to integer.
if (LoadTy->isStructTy() || LoadTy->isArrayTy())
return -1;
int64_t StoreOffset = 0, LoadOffset = 0;
- Value *StoreBase = GetPointerBaseWithConstantOffset(WritePtr,StoreOffset,&TD);
- Value *LoadBase = GetPointerBaseWithConstantOffset(LoadPtr, LoadOffset, &TD);
+ Value *StoreBase = GetPointerBaseWithConstantOffset(WritePtr,StoreOffset,&DL);
+ Value *LoadBase = GetPointerBaseWithConstantOffset(LoadPtr, LoadOffset, &DL);
if (StoreBase != LoadBase)
return -1;
@@ -970,7 +970,7 @@ static int AnalyzeLoadFromClobberingWrite(Type *LoadTy, Value *LoadPtr,
// If the load and store don't overlap at all, the store doesn't provide
// anything to the load. In this case, they really don't alias at all, AA
// must have gotten confused.
- uint64_t LoadSize = TD.getTypeSizeInBits(LoadTy);
+ uint64_t LoadSize = DL.getTypeSizeInBits(LoadTy);
if ((WriteSizeInBits & 7) | (LoadSize & 7))
return -1;
@@ -1013,51 +1013,51 @@ static int AnalyzeLoadFromClobberingWrite(Type *LoadTy, Value *LoadPtr,
/// memdep query of a load that ends up being a clobbering store.
static int AnalyzeLoadFromClobberingStore(Type *LoadTy, Value *LoadPtr,
StoreInst *DepSI,
- const DataLayout &TD) {
+ const DataLayout &DL) {
// Cannot handle reading from store of first-class aggregate yet.
if (DepSI->getValueOperand()->getType()->isStructTy() ||
DepSI->getValueOperand()->getType()->isArrayTy())
return -1;
Value *StorePtr = DepSI->getPointerOperand();
- uint64_t StoreSize =TD.getTypeSizeInBits(DepSI->getValueOperand()->getType());
+ uint64_t StoreSize =DL.getTypeSizeInBits(DepSI->getValueOperand()->getType());
return AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr,
- StorePtr, StoreSize, TD);
+ StorePtr, StoreSize, DL);
}
/// AnalyzeLoadFromClobberingLoad - This function is called when we have a
/// memdep query of a load that ends up being clobbered by another load. See if
/// the other load can feed into the second load.
static int AnalyzeLoadFromClobberingLoad(Type *LoadTy, Value *LoadPtr,
- LoadInst *DepLI, const DataLayout &TD){
+ LoadInst *DepLI, const DataLayout &DL){
// Cannot handle reading from store of first-class aggregate yet.
if (DepLI->getType()->isStructTy() || DepLI->getType()->isArrayTy())
return -1;
Value *DepPtr = DepLI->getPointerOperand();
- uint64_t DepSize = TD.getTypeSizeInBits(DepLI->getType());
- int R = AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr, DepPtr, DepSize, TD);
+ uint64_t DepSize = DL.getTypeSizeInBits(DepLI->getType());
+ int R = AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr, DepPtr, DepSize, DL);
if (R != -1) return R;
// If we have a load/load clobber an DepLI can be widened to cover this load,
// then we should widen it!
int64_t LoadOffs = 0;
const Value *LoadBase =
- GetPointerBaseWithConstantOffset(LoadPtr, LoadOffs, &TD);
- unsigned LoadSize = TD.getTypeStoreSize(LoadTy);
+ GetPointerBaseWithConstantOffset(LoadPtr, LoadOffs, &DL);
+ unsigned LoadSize = DL.getTypeStoreSize(LoadTy);
unsigned Size = MemoryDependenceAnalysis::
- getLoadLoadClobberFullWidthSize(LoadBase, LoadOffs, LoadSize, DepLI, TD);
+ getLoadLoadClobberFullWidthSize(LoadBase, LoadOffs, LoadSize, DepLI, DL);
if (Size == 0) return -1;
- return AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr, DepPtr, Size*8, TD);
+ return AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr, DepPtr, Size*8, DL);
}
static int AnalyzeLoadFromClobberingMemInst(Type *LoadTy, Value *LoadPtr,
MemIntrinsic *MI,
- const DataLayout &TD) {
+ const DataLayout &DL) {
// If the mem operation is a non-constant size, we can't handle it.
ConstantInt *SizeCst = dyn_cast<ConstantInt>(MI->getLength());
if (SizeCst == 0) return -1;
@@ -1067,7 +1067,7 @@ static int AnalyzeLoadFromClobberingMemInst(Type *LoadTy, Value *LoadPtr,
// of the memset..
if (MI->getIntrinsicID() == Intrinsic::memset)
return AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr, MI->getDest(),
- MemSizeInBits, TD);
+ MemSizeInBits, DL);
// If we have a memcpy/memmove, the only case we can handle is if this is a
// copy from constant memory. In that case, we can read directly from the
@@ -1077,12 +1077,12 @@ static int AnalyzeLoadFromClobberingMemInst(Type *LoadTy, Value *LoadPtr,
Constant *Src = dyn_cast<Constant>(MTI->getSource());
if (Src == 0) return -1;
- GlobalVariable *GV = dyn_cast<GlobalVariable>(GetUnderlyingObject(Src, &TD));
+ GlobalVariable *GV = dyn_cast<GlobalVariable>(GetUnderlyingObject(Src, &DL));
if (GV == 0 || !GV->isConstant()) return -1;
// See if the access is within the bounds of the transfer.
int Offset = AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr,
- MI->getDest(), MemSizeInBits, TD);
+ MI->getDest(), MemSizeInBits, DL);
if (Offset == -1)
return Offset;
@@ -1095,7 +1095,7 @@ static int AnalyzeLoadFromClobberingMemInst(Type *LoadTy, Value *LoadPtr,
ConstantInt::get(Type::getInt64Ty(Src->getContext()), (unsigned)Offset);
Src = ConstantExpr::getGetElementPtr(Src, OffsetCst);
Src = ConstantExpr::getBitCast(Src, PointerType::get(LoadTy, AS));
- if (ConstantFoldLoadFromConstPtr(Src, &TD))
+ if (ConstantFoldLoadFromConstPtr(Src, &DL))
return Offset;
return -1;
}
@@ -1108,11 +1108,11 @@ static int AnalyzeLoadFromClobberingMemInst(Type *LoadTy, Value *LoadPtr,
/// before we give up.
static Value *GetStoreValueForLoad(Value *SrcVal, unsigned Offset,
Type *LoadTy,
- Instruction *InsertPt, const DataLayout &TD){
+ Instruction *InsertPt, const DataLayout &DL){
LLVMContext &Ctx = SrcVal->getType()->getContext();
- uint64_t StoreSize = (TD.getTypeSizeInBits(SrcVal->getType()) + 7) / 8;
- uint64_t LoadSize = (TD.getTypeSizeInBits(LoadTy) + 7) / 8;
+ uint64_t StoreSize = (DL.getTypeSizeInBits(SrcVal->getType()) + 7) / 8;
+ uint64_t LoadSize = (DL.getTypeSizeInBits(LoadTy) + 7) / 8;
IRBuilder<> Builder(InsertPt->getParent(), InsertPt);
@@ -1120,13 +1120,13 @@ static Value *GetStoreValueForLoad(Value *SrcVal, unsigned Offset,
// to an integer type to start with.
if (SrcVal->getType()->getScalarType()->isPointerTy())
SrcVal = Builder.CreatePtrToInt(SrcVal,
- TD.getIntPtrType(SrcVal->getType()));
+ DL.getIntPtrType(SrcVal->getType()));
if (!SrcVal->getType()->isIntegerTy())
SrcVal = Builder.CreateBitCast(SrcVal, IntegerType::get(Ctx, StoreSize*8));
// Shift the bits to the least significant depending on endianness.
unsigned ShiftAmt;
- if (TD.isLittleEndian())
+ if (DL.isLittleEndian())
ShiftAmt = Offset*8;
else
ShiftAmt = (StoreSize-LoadSize-Offset)*8;
@@ -1137,7 +1137,7 @@ static Value *GetStoreValueForLoad(Value *SrcVal, unsigned Offset,
if (LoadSize != StoreSize)
SrcVal = Builder.CreateTrunc(SrcVal, IntegerType::get(Ctx, LoadSize*8));
- return CoerceAvailableValueToLoadType(SrcVal, LoadTy, InsertPt, TD);
+ return CoerceAvailableValueToLoadType(SrcVal, LoadTy, InsertPt, DL);
}
/// GetLoadValueForLoad - This function is called when we have a
@@ -1148,11 +1148,11 @@ static Value *GetStoreValueForLoad(Value *SrcVal, unsigned Offset,
static Value *GetLoadValueForLoad(LoadInst *SrcVal, unsigned Offset,
Type *LoadTy, Instruction *InsertPt,
GVN &gvn) {
- const DataLayout &TD = *gvn.getDataLayout();
+ const DataLayout &DL = *gvn.getDataLayout();
// If Offset+LoadTy exceeds the size of SrcVal, then we must be wanting to
// widen SrcVal out to a larger load.
- unsigned SrcValSize = TD.getTypeStoreSize(SrcVal->getType());
- unsigned LoadSize = TD.getTypeStoreSize(LoadTy);
+ unsigned SrcValSize = DL.getTypeStoreSize(SrcVal->getType());
+ unsigned LoadSize = DL.getTypeStoreSize(LoadTy);
if (Offset+LoadSize > SrcValSize) {
assert(SrcVal->isSimple() && "Cannot widen volatile/atomic load!");
assert(SrcVal->getType()->isIntegerTy() && "Can't widen non-integer load");
@@ -1184,7 +1184,7 @@ static Value *GetLoadValueForLoad(LoadInst *SrcVal, unsigned Offset,
// Replace uses of the original load with the wider load. On a big endian
// system, we need to shift down to get the relevant bits.
Value *RV = NewLoad;
- if (TD.isBigEndian())
+ if (DL.isBigEndian())
RV = Builder.CreateLShr(RV,
NewLoadSize*8-SrcVal->getType()->getPrimitiveSizeInBits());
RV = Builder.CreateTrunc(RV, SrcVal->getType());
@@ -1199,7 +1199,7 @@ static Value *GetLoadValueForLoad(LoadInst *SrcVal, unsigned Offset,
SrcVal = NewLoad;
}
- return GetStoreValueForLoad(SrcVal, Offset, LoadTy, InsertPt, TD);
+ return GetStoreValueForLoad(SrcVal, Offset, LoadTy, InsertPt, DL);
}
@@ -1207,9 +1207,9 @@ static Value *GetLoadValueForLoad(LoadInst *SrcVal, unsigned Offset,
/// memdep query of a load that ends up being a clobbering mem intrinsic.
static Value *GetMemInstValueForLoad(MemIntrinsic *SrcInst, unsigned Offset,
Type *LoadTy, Instruction *InsertPt,
- const DataLayout &TD){
+ const DataLayout &DL){
LLVMContext &Ctx = LoadTy->getContext();
- uint64_t LoadSize = TD.getTypeSizeInBits(LoadTy)/8;
+ uint64_t LoadSize = DL.getTypeSizeInBits(LoadTy)/8;
IRBuilder<> Builder(InsertPt->getParent(), InsertPt);
@@ -1240,7 +1240,7 @@ static Value *GetMemInstValueForLoad(MemIntrinsic *SrcInst, unsigned Offset,
++NumBytesSet;
}
- return CoerceAvailableValueToLoadType(Val, LoadTy, InsertPt, TD);
+ return CoerceAvailableValueToLoadType(Val, LoadTy, InsertPt, DL);
}
// Otherwise, this is a memcpy/memmove from a constant global.
@@ -1256,7 +1256,7 @@ static Value *GetMemInstValueForLoad(MemIntrinsic *SrcInst, unsigned Offset,
ConstantInt::get(Type::getInt64Ty(Src->getContext()), (unsigned)Offset);
Src = ConstantExpr::getGetElementPtr(Src, OffsetCst);
Src = ConstantExpr::getBitCast(Src, PointerType::get(LoadTy, AS));
- return ConstantFoldLoadFromConstPtr(Src, &TD);
+ return ConstantFoldLoadFromConstPtr(Src, &DL);
}
@@ -1322,10 +1322,10 @@ Value *AvailableValueInBlock::MaterializeAdjustedValue(Type *LoadTy, GVN &gvn) c
if (isSimpleValue()) {
Res = getSimpleValue();
if (Res->getType() != LoadTy) {
- const DataLayout *TD = gvn.getDataLayout();
- assert(TD && "Need target data to handle type mismatch case");
+ const DataLayout *DL = gvn.getDataLayout();
+ assert(DL && "Need target data to handle type mismatch case");
Res = GetStoreValueForLoad(Res, Offset, LoadTy, BB->getTerminator(),
- *TD);
+ *DL);
DEBUG(dbgs() << "GVN COERCED NONLOCAL VAL:\nOffset: " << Offset << " "
<< *getSimpleValue() << '\n'
@@ -1344,10 +1344,10 @@ Value *AvailableValueInBlock::MaterializeAdjustedValue(Type *LoadTy, GVN &gvn) c
<< *Res << '\n' << "\n\n\n");
}
} else if (isMemIntrinValue()) {
- const DataLayout *TD = gvn.getDataLayout();
- assert(TD && "Need target data to handle type mismatch case");
+ const DataLayout *DL = gvn.getDataLayout();
+ assert(DL && "Need target data to handle type mismatch case");
Res = GetMemInstValueForLoad(getMemIntrinValue(), Offset,
- LoadTy, BB->getTerminator(), *TD);
+ LoadTy, BB->getTerminator(), *DL);
DEBUG(dbgs() << "GVN COERCED NONLOCAL MEM INTRIN:\nOffset: " << Offset
<< " " << *getMemIntrinValue() << '\n'
<< *Res << '\n' << "\n\n\n");
@@ -1400,9 +1400,9 @@ void GVN::AnalyzeLoadAvailability(LoadInst *LI, LoadDepVect &Deps,
// read by the load, we can extract the bits we need for the load from the
// stored value.
if (StoreInst *DepSI = dyn_cast<StoreInst>(DepInfo.getInst())) {
- if (TD && Address) {
+ if (DL && Address) {
int Offset = AnalyzeLoadFromClobberingStore(LI->getType(), Address,
- DepSI, *TD);
+ DepSI, *DL);
if (Offset != -1) {
ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB,
DepSI->getValueOperand(),
@@ -1419,10 +1419,10 @@ void GVN::AnalyzeLoadAvailability(LoadInst *LI, LoadDepVect &Deps,
if (LoadInst *DepLI = dyn_cast<LoadInst>(DepInfo.getInst())) {
// If this is a clobber and L is the first instruction in its block, then
// we have the first instruction in the entry block.
- if (DepLI != LI && Address && TD) {
+ if (DepLI != LI && Address && DL) {
int Offset = AnalyzeLoadFromClobberingLoad(LI->getType(),
LI->getPointerOperand(),
- DepLI, *TD);
+ DepLI, *DL);
if (Offset != -1) {
ValuesPerBlock.push_back(AvailableValueInBlock::getLoad(DepBB,DepLI,
@@ -1435,9 +1435,9 @@ void GVN::AnalyzeLoadAvailability(LoadInst *LI, LoadDepVect &Deps,
// If the clobbering value is a memset/memcpy/memmove, see if we can
// forward a value on from it.
if (MemIntrinsic *DepMI = dyn_cast<MemIntrinsic>(DepInfo.getInst())) {
- if (TD && Address) {
+ if (DL && Address) {
int Offset = AnalyzeLoadFromClobberingMemInst(LI->getType(), Address,
- DepMI, *TD);
+ DepMI, *DL);
if (Offset != -1) {
ValuesPerBlock.push_back(AvailableValueInBlock::getMI(DepBB, DepMI,
Offset));
@@ -1469,8 +1469,8 @@ void GVN::AnalyzeLoadAvailability(LoadInst *LI, LoadDepVect &Deps,
if (S->getValueOperand()->getType() != LI->getType()) {
// If the stored value is larger or equal to the loaded value, we can
// reuse it.
- if (TD == 0 || !CanCoerceMustAliasedValueToLoad(S->getValueOperand(),
- LI->getType(), *TD)) {
+ if (DL == 0 || !CanCoerceMustAliasedValueToLoad(S->getValueOperand(),
+ LI->getType(), *DL)) {
UnavailableBlocks.push_back(DepBB);
continue;
}
@@ -1486,7 +1486,7 @@ void GVN::AnalyzeLoadAvailability(LoadInst *LI, LoadDepVect &Deps,
if (LD->getType() != LI->getType()) {
// If the stored value is larger or equal to the loaded value, we can
// reuse it.
- if (TD == 0 || !CanCoerceMustAliasedValueToLoad(LD, LI->getType(),*TD)){
+ if (DL == 0 || !CanCoerceMustAliasedValueToLoad(LD, LI->getType(),*DL)){
UnavailableBlocks.push_back(DepBB);
continue;
}
@@ -1609,7 +1609,7 @@ bool GVN::PerformLoadPRE(LoadInst *LI, AvailValInBlkVect &ValuesPerBlock,
// If all preds have a single successor, then we know it is safe to insert
// the load on the pred (?!?), so we can insert code to materialize the
// pointer if it is not available.
- PHITransAddr Address(LI->getPointerOperand(), TD);
+ PHITransAddr Address(LI->getPointerOperand(), DL);
Value *LoadPtr = 0;
LoadPtr = Address.PHITranslateWithInsertion(LoadBB, UnavailablePred,
*DT, NewInsts);
@@ -1821,7 +1821,7 @@ bool GVN::processLoad(LoadInst *L) {
// If we have a clobber and target data is around, see if this is a clobber
// that we can fix up through code synthesis.
- if (Dep.isClobber() && TD) {
+ if (Dep.isClobber() && DL) {
// Check to see if we have something like this:
// store i32 123, i32* %P
// %A = bitcast i32* %P to i8*
@@ -1836,10 +1836,10 @@ bool GVN::processLoad(LoadInst *L) {
if (StoreInst *DepSI = dyn_cast<StoreInst>(Dep.getInst())) {
int Offset = AnalyzeLoadFromClobberingStore(L->getType(),
L->getPointerOperand(),
- DepSI, *TD);
+ DepSI, *DL);
if (Offset != -1)
AvailVal = GetStoreValueForLoad(DepSI->getValueOperand(), Offset,
- L->getType(), L, *TD);
+ L->getType(), L, *DL);
}
// Check to see if we have something like this:
@@ -1854,7 +1854,7 @@ bool GVN::processLoad(LoadInst *L) {
int Offset = AnalyzeLoadFromClobberingLoad(L->getType(),
L->getPointerOperand(),
- DepLI, *TD);
+ DepLI, *DL);
if (Offset != -1)
AvailVal = GetLoadValueForLoad(DepLI, Offset, L->getType(), L, *this);
}
@@ -1864,9 +1864,9 @@ bool GVN::processLoad(LoadInst *L) {
if (MemIntrinsic *DepMI = dyn_cast<MemIntrinsic>(Dep.getInst())) {
int Offset = AnalyzeLoadFromClobberingMemInst(L->getType(),
L->getPointerOperand(),
- DepMI, *TD);
+ DepMI, *DL);
if (Offset != -1)
- AvailVal = GetMemInstValueForLoad(DepMI, Offset, L->getType(), L, *TD);
+ AvailVal = GetMemInstValueForLoad(DepMI, Offset, L->getType(), L, *DL);
}
if (AvailVal) {
@@ -1917,9 +1917,9 @@ bool GVN::processLoad(LoadInst *L) {
// actually have the same type. See if we know how to reuse the stored
// value (depending on its type).
if (StoredVal->getType() != L->getType()) {
- if (TD) {
+ if (DL) {
StoredVal = CoerceAvailableValueToLoadType(StoredVal, L->getType(),
- L, *TD);
+ L, *DL);
if (StoredVal == 0)
return false;
@@ -1946,9 +1946,9 @@ bool GVN::processLoad(LoadInst *L) {
// the same type. See if we know how to reuse the previously loaded value
// (depending on its type).
if (DepLI->getType() != L->getType()) {
- if (TD) {
+ if (DL) {
AvailableVal = CoerceAvailableValueToLoadType(DepLI, L->getType(),
- L, *TD);
+ L, *DL);
if (AvailableVal == 0)
return false;
@@ -2200,7 +2200,7 @@ bool GVN::processInstruction(Instruction *I) {
// to value numbering it. Value numbering often exposes redundancies, for
// example if it determines that %y is equal to %x then the instruction
// "%z = and i32 %x, %y" becomes "%z = and i32 %x, %x" which we now simplify.
- if (Value *V = SimplifyInstruction(I, TD, TLI, DT)) {
+ if (Value *V = SimplifyInstruction(I, DL, TLI, DT)) {
I->replaceAllUsesWith(V);
if (MD && V->getType()->getScalarType()->isPointerTy())
MD->invalidateCachedPointerInfo(V);
@@ -2318,7 +2318,7 @@ bool GVN::runOnFunction(Function& F) {
if (!NoLoads)
MD = &getAnalysis<MemoryDependenceAnalysis>();
DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
- TD = getAnalysisIfAvailable<DataLayout>();
+ DL = getAnalysisIfAvailable<DataLayout>();
TLI = &getAnalysis<TargetLibraryInfo>();
VN.setAliasAnalysis(&getAnalysis<AliasAnalysis>());
VN.setMemDep(MD);
diff --git a/lib/Transforms/Scalar/GlobalMerge.cpp b/lib/Transforms/Scalar/GlobalMerge.cpp
index a15cc84cfd..161e35a648 100644
--- a/lib/Transforms/Scalar/GlobalMerge.cpp
+++ b/lib/Transforms/Scalar/GlobalMerge.cpp
@@ -126,15 +126,15 @@ namespace {
}
struct GlobalCmp {
- const DataLayout *TD;
+ const DataLayout *DL;
- GlobalCmp(const DataLayout *td) : TD(td) { }
+ GlobalCmp(const DataLayout *DL) : DL(DL) { }
bool operator()(const GlobalVariable *GV1, const GlobalVariable *GV2) {
Type *Ty1 = cast<PointerType>(GV1->getType())->getElementType();
Type *Ty2 = cast<PointerType>(GV2->getType())->getElementType();
- return (TD->getTypeAllocSize(Ty1) < TD->getTypeAllocSize(Ty2));
+ return (DL->getTypeAllocSize(Ty1) < DL->getTypeAllocSize(Ty2));
}
};
};
@@ -148,7 +148,7 @@ INITIALIZE_PASS(GlobalMerge, "global-merge",
bool GlobalMerge::doMerge(SmallVectorImpl<GlobalVariable*> &Globals,
Module &M, bool isConst, unsigned AddrSpace) const {
const TargetLowering *TLI = TM->getTargetLowering();
- const DataLayout *TD = TLI->getDataLayout();
+ const DataLayout *DL = TLI->getDataLayout();
// FIXME: Infer the maximum possible offset depending on the actual users
// (these max offsets are different for the users inside Thumb or ARM
@@ -156,7 +156,7 @@ bool GlobalMerge::doMerge(SmallVectorImpl<GlobalVariable*> &Globals,
unsigned MaxOffset = TLI->getMaximalGlobalOffset();
// FIXME: Find better heuristics
- std::stable_sort(Globals.begin(), Globals.end(), GlobalCmp(TD));
+ std::stable_sort(Globals.begin(), Globals.end(), GlobalCmp(DL));
Type *Int32Ty = Type::getInt32Ty(M.getContext());
@@ -167,7 +167,7 @@ bool GlobalMerge::doMerge(SmallVectorImpl<GlobalVariable*> &Globals,
std::vector<Constant*> Inits;
for (j = i; j != e; ++j) {
Type *Ty = Globals[j]->getType()->getElementType();
- MergedSize += TD->getTypeAllocSize(Ty);
+ MergedSize += DL->getTypeAllocSize(Ty);
if (MergedSize > MaxOffset) {
break;
}
@@ -242,7 +242,7 @@ bool GlobalMerge::doInitialization(Module &M) {
DenseMap<unsigned, SmallVector<GlobalVariable*, 16> > Globals, ConstGlobals,
BSSGlobals;
const TargetLowering *TLI = TM->getTargetLowering();
- const DataLayout *TD = TLI->getDataLayout();
+ const DataLayout *DL = TLI->getDataLayout();
unsigned MaxOffset = TLI->getMaximalGlobalOffset();
bool Changed = false;
setMustKeepGlobalVariables(M);
@@ -260,9 +260,9 @@ bool GlobalMerge::doInitialization(Module &M) {
unsigned AddressSpace = PT->getAddressSpace();
// Ignore fancy-aligned globals for now.
- unsigned Alignment = TD->getPreferredAlignment(I);
+ unsigned Alignment = DL->getPreferredAlignment(I);
Type *Ty = I->getType()->getElementType();
- if (Alignment > TD->getABITypeAlignment(Ty))
+ if (Alignment > DL->getABITypeAlignment(Ty))
continue;
// Ignore all 'special' globals.
@@ -274,7 +274,7 @@ bool GlobalMerge::doInitialization(Module &M) {
if (isMustKeepGlobalVariable(I))
continue;
- if (TD->getTypeAllocSize(Ty) < MaxOffset) {
+ if (DL->getTypeAllocSize(Ty) < MaxOffset) {
if (TargetLoweringObjectFile::getKindForGlobal(I, TLI->getTargetMachine())
.isBSSLocal())
BSSGlobals[AddressSpace].push_back(I);
diff --git a/lib/Transforms/Scalar/IndVarSimplify.cpp b/lib/Transforms/Scalar/IndVarSimplify.cpp
index 723af0f02c..1543e5f1eb 100644
--- a/lib/Transforms/Scalar/IndVarSimplify.cpp
+++ b/lib/Transforms/Scalar/IndVarSimplify.cpp
@@ -71,7 +71,7 @@ namespace {
LoopInfo *LI;
ScalarEvolution *SE;
DominatorTree *DT;
- DataLayout *TD;
+ DataLayout *DL;
TargetLibraryInfo *TLI;
SmallVector<WeakVH, 16> DeadInsts;
@@ -79,7 +79,7 @@ namespace {
public:
static char ID; // Pass identification, replacement for typeid
- IndVarSimplify() : LoopPass(ID), LI(0), SE(0), DT(0), TD(0),
+ IndVarSimplify() : LoopPass(ID), LI(0), SE(0), DT(0), DL(0),
Changed(false) {
initializeIndVarSimplifyPass(*PassRegistry::getPassRegistry());
}
@@ -659,14 +659,14 @@ namespace {
/// extended by this sign or zero extend operation. This is used to determine
/// the final width of the IV before actually widening it.
static void visitIVCast(CastInst *Cast, WideIVInfo &WI, ScalarEvolution *SE,
- const DataLayout *TD) {
+ const DataLayout *DL) {
bool IsSigned = Cast->getOpcode() == Instruction::SExt;
if (!IsSigned && Cast->getOpcode() != Instruction::ZExt)
return;
Type *Ty = Cast->getType();
uint64_t Width = SE->getTypeSizeInBits(Ty);
- if (TD && !TD->isLegalInteger(Width))
+ if (DL && !DL->isLegalInteger(Width))
return;
if (!WI.WidestNativeType) {
@@ -1122,15 +1122,15 @@ PHINode *WidenIV::CreateWideIV(SCEVExpander &Rewriter) {
namespace {
class IndVarSimplifyVisitor : public IVVisitor {
ScalarEvolution *SE;
- const DataLayout *TD;
+ const DataLayout *DL;
PHINode *IVPhi;
public:
WideIVInfo WI;
IndVarSimplifyVisitor(PHINode *IV, ScalarEvolution *SCEV,
- const DataLayout *TData, const DominatorTree *DTree):
- SE(SCEV), TD(TData), IVPhi(IV) {
+ const DataLayout *DL, const DominatorTree *DTree):
+ SE(SCEV), DL(DL), IVPhi(IV) {
DT = DTree;
WI.NarrowIV = IVPhi;
if (ReduceLiveIVs)
@@ -1138,7 +1138,7 @@ namespace {
}
// Implement the interface used by simplifyUsersOfIV.
- virtual void visitCast(CastInst *Cast) { visitIVCast(Cast, WI, SE, TD); }
+ virtual void visitCast(CastInst *Cast) { visitIVCast(Cast, WI, SE, DL); }
};
}
@@ -1172,7 +1172,7 @@ void IndVarSimplify::SimplifyAndExtend(Loop *L,
PHINode *CurrIV = LoopPhis.pop_back_val();
// Information about sign/zero extensions of CurrIV.
- IndVarSimplifyVisitor Visitor(CurrIV, SE, TD, DT);
+ IndVarSimplifyVisitor Visitor(CurrIV, SE, DL, DT);
Changed |= simplifyUsersOfIV(CurrIV, SE, &LPM, DeadInsts, &Visitor);
@@ -1444,7 +1444,7 @@ static bool AlmostDeadIV(PHINode *Phi, BasicBlock *LatchBlock, Value *Cond) {
/// could at least handle constant BECounts.
static PHINode *
FindLoopCounter(Loop *L, const SCEV *BECount,
- ScalarEvolution *SE, DominatorTree *DT, const DataLayout *TD) {
+ ScalarEvolution *SE, DominatorTree *DT, const DataLayout *DL) {
uint64_t BCWidth = SE->getTypeSizeInBits(BECount->getType());
Value *Cond =
@@ -1473,7 +1473,7 @@ FindLoopCounter(Loop *L, const SCEV *BECount,
// AR may be wider than BECount. With eq/ne tests overflow is immaterial.
// AR may not be a narrower type, or we may never exit.
uint64_t PhiWidth = SE->getTypeSizeInBits(AR->getType());
- if (PhiWidth < BCWidth || (TD && !TD->isLegalInteger(PhiWidth)))
+ if (PhiWidth < BCWidth || (DL && !DL->isLegalInteger(PhiWidth)))
continue;
const SCEV *Step = dyn_cast<SCEVConstant>(AR->getStepRecurrence(*SE));
@@ -1818,7 +1818,7 @@ bool IndVarSimplify::runOnLoop(Loop *L, LPPassManager &LPM) {
LI = &getAnalysis<LoopInfo>();
SE = &getAnalysis<ScalarEvolution>();
DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
- TD = getAnalysisIfAvailable<DataLayout>();
+ DL = getAnalysisIfAvailable<DataLayout>();
TLI = getAnalysisIfAvailable<TargetLibraryInfo>();
DeadInsts.clear();
@@ -1860,7 +1860,7 @@ bool IndVarSimplify::runOnLoop(Loop *L, LPPassManager &LPM) {
// If we have a trip count expression, rewrite the loop's exit condition
// using it. We can currently only handle loops with a single exit.
if (canExpandBackedgeTakenCount(L, SE) && needsLFTR(L, DT)) {
- PHINode *IndVar = FindLoopCounter(L, BackedgeTakenCount, SE, DT, TD);
+ PHINode *IndVar = FindLoopCounter(L, BackedgeTakenCount, SE, DT, DL);
if (IndVar) {
// Check preconditions for proper SCEVExpander operation. SCEV does not
// express SCEVExpander's dependencies, such as LoopSimplify. Instead any
diff --git a/lib/Transforms/Scalar/JumpThreading.cpp b/lib/Transforms/Scalar/JumpThreading.cpp
index 087944a2be..e4b088d82b 100644
--- a/lib/Transforms/Scalar/JumpThreading.cpp
+++ b/lib/Transforms/Scalar/JumpThreading.cpp
@@ -76,7 +76,7 @@ namespace {
/// revectored to the false side of the second if.
///
class JumpThreading : public FunctionPass {
- DataLayout *TD;
+ DataLayout *DL;
TargetLibraryInfo *TLI;
LazyValueInfo *LVI;
#ifdef NDEBUG
@@ -152,7 +152,7 @@ bool JumpThreading::runOnFunction(Function &F) {
return false;
DEBUG(dbgs() << "Jump threading on function '" << F.getName() << "'\n");
- TD = getAnalysisIfAvailable<DataLayout>();
+ DL = getAnalysisIfAvailable<DataLayout>();
TLI = &getAnalysis<TargetLibraryInfo>();
LVI = &getAnalysis<LazyValueInfo>();
@@ -493,7 +493,7 @@ ComputeValueKnownInPredecessors(Value *V, BasicBlock *BB, PredValueInfo &Result,
Value *LHS = PN->getIncomingValue(i);
Value *RHS = Cmp->getOperand(1)->DoPHITranslation(BB, PredBB);
- Value *Res = SimplifyCmpInst(Cmp->getPredicate(), LHS, RHS, TD);
+ Value *Res = SimplifyCmpInst(Cmp->getPredicate(), LHS, RHS, DL);
if (Res == 0) {
if (!isa<Constant>(RHS))
continue;
@@ -695,7 +695,7 @@ bool JumpThreading::ProcessBlock(BasicBlock *BB) {
// Run constant folding to see if we can reduce the condition to a simple
// constant.
if (Instruction *I = dyn_cast<Instruction>(Condition)) {
- Value *SimpleVal = ConstantFoldInstruction(I, TD, TLI);
+ Value *SimpleVal = ConstantFoldInstruction(I, DL, TLI);
if (SimpleVal) {
I->replaceAllUsesWith(SimpleVal);
I->eraseFromParent();
@@ -1478,7 +1478,7 @@ bool JumpThreading::ThreadEdge(BasicBlock *BB,
// At this point, the IR is fully up to date and consistent. Do a quick scan
// over the new instructions and zap any that are constants or dead. This
// frequently happens because of phi translation.
- SimplifyInstructionsInBlock(NewBB, TD, TLI);
+ SimplifyInstructionsInBlock(NewBB, DL, TLI);
// Threaded an edge!
++NumThreads;
@@ -1560,7 +1560,7 @@ bool JumpThreading::DuplicateCondBranchOnPHIIntoPred(BasicBlock *BB,
// If this instruction can be simplified after the operands are updated,
// just use the simplified value instead. This frequently happens due to
// phi translation.
- if (Value *IV = SimplifyInstruction(New, TD)) {
+ if (Value *IV = SimplifyInstruction(New, DL)) {
delete New;
ValueMapping[BI] = IV;
} else {
diff --git a/lib/Transforms/Scalar/LICM.cpp b/lib/Transforms/Scalar/LICM.cpp
index 43247ea9b7..2ba6210b47 100644
--- a/lib/Transforms/Scalar/LICM.cpp
+++ b/lib/Transforms/Scalar/LICM.cpp
@@ -108,7 +108,7 @@ namespace {
LoopInfo *LI; // Current LoopInfo
DominatorTree *DT; // Dominator Tree for the current Loop.
- DataLayout *TD; // DataLayout for constant folding.
+ DataLayout *DL; // DataLayout for constant folding.
TargetLibraryInfo *TLI; // TargetLibraryInfo for constant folding.
// State that is updated as we process loops.
@@ -221,7 +221,7 @@ bool LICM::runOnLoop(Loop *L, LPPassManager &LPM) {
AA = &getAnalysis<AliasAnalysis>();
DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
- TD = getAnalysisIfAvailable<DataLayout>();
+ DL = getAnalysisIfAvailable<DataLayout>();
TLI = &getAnalysis<TargetLibraryInfo>();
assert(L->isLCSSAForm(*DT) && "Loop is not in LCSSA form.");
@@ -394,7 +394,7 @@ void LICM::HoistRegion(DomTreeNode *N) {
// Try constant folding this instruction. If all the operands are
// constants, it is technically hoistable, but it would be better to just
// fold it.
- if (Constant *C = ConstantFoldInstruction(&I, TD, TLI)) {
+ if (Constant *C = ConstantFoldInstruction(&I, DL, TLI)) {
DEBUG(dbgs() << "LICM folding inst: " << I << " --> " << *C << '\n');
CurAST->copyValue(&I, C);
CurAST->deleteValue(&I);
diff --git a/lib/Transforms/Scalar/LoopIdiomRecognize.cpp b/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
index b9c4046c0c..acff3734e1 100644
--- a/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
+++ b/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
@@ -132,7 +132,7 @@ namespace {
class LoopIdiomRecognize : public LoopPass {
Loop *CurLoop;
- const DataLayout *TD;
+ const DataLayout *DL;
DominatorTree *DT;
ScalarEvolution *SE;
TargetLibraryInfo *TLI;
@@ -141,7 +141,7 @@ namespace {
static char ID;
explicit LoopIdiomRecognize() : LoopPass(ID) {
initializeLoopIdiomRecognizePass(*PassRegistry::getPassRegistry());
- TD = 0; DT = 0; SE = 0; TLI = 0; TTI = 0;
+ DL = 0; DT = 0; SE = 0; TLI = 0; TTI = 0;
}
bool runOnLoop(Loop *L, LPPassManager &LPM);
@@ -182,7 +182,7 @@ namespace {
}
const DataLayout *getDataLayout() {
- return TD ? TD : TD=getAnalysisIfAvailable<DataLayout>();
+ return DL ? DL : DL=getAnalysisIfAvailable<DataLayout>();
}
DominatorTree *getDominatorTree() {
@@ -782,7 +782,7 @@ bool LoopIdiomRecognize::processLoopStore(StoreInst *SI, const SCEV *BECount) {
Value *StorePtr = SI->getPointerOperand();
// Reject stores that are so large that they overflow an unsigned.
- uint64_t SizeInBits = TD->getTypeSizeInBits(StoredVal->getType());
+ uint64_t SizeInBits = DL->getTypeSizeInBits(StoredVal->getType());
if ((SizeInBits & 7) || (SizeInBits >> 32) != 0)
return false;
@@ -910,7 +910,7 @@ static bool mayLoopAccessLocation(Value *Ptr,AliasAnalysis::ModRefResult Access,
///
/// Note that we don't ever attempt to use memset_pattern8 or 4, because these
/// just replicate their input array and then pass on to memset_pattern16.
-static Constant *getMemSetPatternValue(Value *V, const DataLayout &TD) {
+static Constant *getMemSetPatternValue(Value *V, const DataLayout &DL) {
// If the value isn't a constant, we can't promote it to being in a constant
// array. We could theoretically do a store to an alloca or something, but
// that doesn't seem worthwhile.
@@ -918,12 +918,12 @@ static Constant *getMemSetPatternValue(Value *V, const DataLayout &TD) {
if (C == 0) return 0;
// Only handle simple values that are a power of two bytes in size.
- uint64_t Size = TD.getTypeSizeInBits(V->getType());
+ uint64_t Size = DL.getTypeSizeInBits(V->getType());
if (Size == 0 || (Size & 7) || (Size & (Size-1)))
return 0;
// Don't care enough about darwin/ppc to implement this.
- if (TD.isBigEndian())
+ if (DL.isBigEndian())
return 0;
// Convert to size in bytes.
@@ -970,7 +970,7 @@ processLoopStridedStore(Value *DestPtr, unsigned StoreSize,
PatternValue = 0;
} else if (DestAS == 0 &&
TLI->has(LibFunc::memset_pattern16) &&
- (PatternValue = getMemSetPatternValue(StoredVal, *TD))) {
+ (PatternValue = getMemSetPatternValue(StoredVal, *DL))) {
// Don't create memset_pattern16s with address spaces.
// It looks like we can use PatternValue!
SplatValue = 0;
@@ -1011,7 +1011,7 @@ processLoopStridedStore(Value *DestPtr, unsigned StoreSize,
// The # stored bytes is (BECount+1)*Size. Expand the trip count out to
// pointer size if it isn't already.
- Type *IntPtr = Builder.getIntPtrTy(TD, DestAS);
+ Type *IntPtr = Builder.getIntPtrTy(DL, DestAS);
BECount = SE->getTruncateOrZeroExtend(BECount, IntPtr);
const SCEV *NumBytesS = SE->getAddExpr(BECount, SE->getConstant(IntPtr, 1),
@@ -1125,7 +1125,7 @@ processLoopStoreOfLoopLoad(StoreInst *SI, unsigned StoreSize,
// The # stored bytes is (BECount+1)*Size. Expand the trip count out to
// pointer size if it isn't already.
- Type *IntPtrTy = Builder.getIntPtrTy(TD, SI->getPointerAddressSpace());
+ Type *IntPtrTy = Builder.getIntPtrTy(DL, SI->getPointerAddressSpace());
BECount = SE->getTruncateOrZeroExtend(BECount, IntPtrTy);
const SCEV *NumBytesS = SE->getAddExpr(BECount, SE->getConstant(IntPtrTy, 1),
diff --git a/lib/Transforms/Scalar/MemCpyOptimizer.cpp b/lib/Transforms/Scalar/MemCpyOptimizer.cpp
index 6619d54288..8c5620fe1d 100644
--- a/lib/Transforms/Scalar/MemCpyOptimizer.cpp
+++ b/lib/Transforms/Scalar/MemCpyOptimizer.cpp
@@ -195,9 +195,9 @@ class MemsetRanges {
/// because each element is relatively large and expensive to copy.
std::list<MemsetRange> Ranges;
typedef std::list<MemsetRange>::iterator range_iterator;
- const DataLayout &TD;
+ const DataLayout &DL;
public:
- MemsetRanges(const DataLayout &td) : TD(td) {}
+ MemsetRanges(const DataLayout &DL) : DL(DL) {}
typedef std::list<MemsetRange>::const_iterator const_iterator;
const_iterator begin() const { return Ranges.begin(); }
@@ -212,7 +212,7 @@ public:
}
void addStore(int64_t OffsetFromFirst, StoreInst *SI) {
- int64_t StoreSize = TD.getTypeStoreSize(SI->getOperand(0)->getType());
+ int64_t StoreSize = DL.getTypeStoreSize(SI->getOperand(0)->getType());
addRange(OffsetFromFirst, StoreSize,
SI->getPointerOperand(), SI->getAlignment(), SI);
@@ -305,14 +305,14 @@ namespace {
class MemCpyOpt : public FunctionPass {
MemoryDependenceAnalysis *MD;
TargetLibraryInfo *TLI;
- const DataLayout *TD;
+ const DataLayout *DL;
public:
static char ID; // Pass identification, replacement for typeid
MemCpyOpt() : FunctionPass(ID) {
initializeMemCpyOptPass(*PassRegistry::getPassRegistry());
MD = 0;
TLI = 0;
- TD = 0;
+ DL = 0;
}
bool runOnFunction(Function &F);
@@ -366,13 +366,13 @@ INITIALIZE_PASS_END(MemCpyOpt, "memcpyopt", "MemCpy Optimization",
/// attempts to merge them together into a memcpy/memset.
Instruction *MemCpyOpt::tryMergingIntoMemset(Instruction *StartInst,
Value *StartPtr, Value *ByteVal) {
- if (TD == 0) return 0;
+ if (DL == 0) return 0;
// Okay, so we now have a single store that can be splatable. Scan to find
// all subsequent stores of the same value to offset from the same pointer.
// Join these together into ranges, so we can decide whether contiguous blocks
// are stored.
- MemsetRanges Ranges(*TD);
+ MemsetRanges Ranges(*DL);
BasicBlock::iterator BI = StartInst;
for (++BI; !isa<TerminatorInst>(BI); ++BI) {
@@ -396,7 +396,7 @@ Instruction *MemCpyOpt::tryMergingIntoMemset(Instruction *StartInst,
// Check to see if this store is to a constant offset from the start ptr.
int64_t Offset;
if (!IsPointerOffset(StartPtr, NextStore->getPointerOperand(),
- Offset, *TD))
+ Offset, *DL))
break;
Ranges.addStore(Offset, NextStore);
@@ -409,7 +409,7 @@ Instruction *MemCpyOpt::tryMergingIntoMemset(Instruction *StartInst,
// Check to see if this store is to a constant offset from the start ptr.
int64_t Offset;
- if (!IsPointerOffset(StartPtr, MSI->getDest(), Offset, *TD))
+ if (!IsPointerOffset(StartPtr, MSI->getDest(), Offset, *DL))
break;
Ranges.addMemSet(Offset, MSI);
@@ -441,7 +441,7 @@ Instruction *MemCpyOpt::tryMergingIntoMemset(Instruction *StartInst,
if (Range.TheStores.size() == 1) continue;
// If it is profitable to lower this range to memset, do so now.
- if (!Range.isProfitableToUseMemset(*TD))
+ if (!Range.isProfitableToUseMemset(*DL))
continue;
// Otherwise, we do want to transform this! Create a new memset.
@@ -453,7 +453,7 @@ Instruction *MemCpyOpt::tryMergingIntoMemset(Instruction *StartInst,
if (Alignment == 0) {
Type *EltType =
cast<PointerType>(StartPtr->getType())->getElementType();
- Alignment = TD->getABITypeAlignment(EltType);
+ Alignment = DL->getABITypeAlignment(EltType);
}
AMemSet =
@@ -484,7 +484,7 @@ Instruction *MemCpyOpt::tryMergingIntoMemset(Instruction *StartInst,
bool MemCpyOpt::processStore(StoreInst *SI, BasicBlock::iterator &BBI) {
if (!SI->isSimple()) return false;
- if (TD == 0) return false;
+ if (DL == 0) return false;
// Detect cases where we're performing call slot forwarding, but
// happen to be using a load-store pair to implement it, rather than
@@ -514,15 +514,15 @@ bool MemCpyOpt::processStore(StoreInst *SI, BasicBlock::iterator &BBI) {
if (C) {
unsigned storeAlign = SI->getAlignment();
if (!storeAlign)
- storeAlign = TD->getABITypeAlignment(SI->getOperand(0)->getType());
+ storeAlign = DL->getABITypeAlignment(SI->getOperand(0)->getType());
unsigned loadAlign = LI->getAlignment();
if (!loadAlign)
- loadAlign = TD->getABITypeAlignment(LI->getType());
+ loadAlign = DL->getABITypeAlignment(LI->getType());
bool changed = performCallSlotOptzn(LI,
SI->getPointerOperand()->stripPointerCasts(),
LI->getPointerOperand()->stripPointerCasts(),
- TD->getTypeStoreSize(SI->getOperand(0)->getType()),
+ DL->getTypeStoreSize(SI->getOperand(0)->getType()),
std::min(storeAlign, loadAlign), C);
if (changed) {
MD->removeInstruction(SI);
@@ -596,13 +596,13 @@ bool MemCpyOpt::performCallSlotOptzn(Instruction *cpy,
return false;
// Check that all of src is copied to dest.
- if (TD == 0) return false;
+ if (DL == 0) return false;
ConstantInt *srcArraySize = dyn_cast<ConstantInt>(srcAlloca->getArraySize());
if (!srcArraySize)
return false;
- uint64_t srcSize = TD->getTypeAllocSize(srcAlloca->getAllocatedType()) *
+ uint64_t srcSize = DL->getTypeAllocSize(srcAlloca->getAllocatedType()) *
srcArraySize->getZExtValue();
if (cpyLen < srcSize)
@@ -617,7 +617,7 @@ bool MemCpyOpt::performCallSlotOptzn(Instruction *cpy,
if (!destArraySize)
return false;
- uint64_t destSize = TD->getTypeAllocSize(A->getAllocatedType()) *
+ uint64_t destSize = DL->getTypeAllocSize(A->getAllocatedType()) *
destArraySize->getZExtValue();
if (destSize < srcSize)
@@ -636,7 +636,7 @@ bool MemCpyOpt::performCallSlotOptzn(Instruction *cpy,
return false;
}
- uint64_t destSize = TD->getTypeAllocSize(StructTy);
+ uint64_t destSize = DL->getTypeAllocSize(StructTy);
if (destSize < srcSize)
return false;
} else {
@@ -646,7 +646,7 @@ bool MemCpyOpt::performCallSlotOptzn(Instruction *cpy,
// Check that dest points to memory that is at least as aligned as src.
unsigned srcAlign = srcAlloca->getAlignment();
if (!srcAlign)
- srcAlign = TD->getABITypeAlignment(srcAlloca->getAllocatedType());
+ srcAlign = DL->getABITypeAlignment(srcAlloca->getAllocatedType());
bool isDestSufficientlyAligned = srcAlign <= cpyAlign;
// If dest is not aligned enough and we can't increase its alignment then
// bail out.
@@ -912,12 +912,12 @@ bool MemCpyOpt::processMemMove(MemMoveInst *M) {
/// processByValArgument - This is called on every byval argument in call sites.
bool MemCpyOpt::processByValArgument(CallSite CS, unsigned ArgNo) {
- if (TD == 0) return false;
+ if (DL == 0) return false;
// Find out what feeds this byval argument.
Value *ByValArg = CS.getArgument(ArgNo);
Type *ByValTy = cast<PointerType>(ByValArg->getType())->getElementType();
- uint64_t ByValSize = TD->getTypeAllocSize(ByValTy);
+ uint64_t ByValSize = DL->getTypeAllocSize(ByValTy);
MemDepResult DepInfo =
MD->getPointerDependencyFrom(AliasAnalysis::Location(ByValArg, ByValSize),
true, CS.getInstruction(),
@@ -946,7 +946,7 @@ bool MemCpyOpt::processByValArgument(CallSite CS, unsigned ArgNo) {
// If it is greater than the memcpy, then we check to see if we can force the
// source of the memcpy to the alignment we need. If we fail, we bail out.
if (MDep->getAlignment() < ByValAlign &&
- getOrEnforceKnownAlignment(MDep->getSource(),ByValAlign, TD) < ByValAlign)
+ getOrEnforceKnownAlignment(MDep->getSource(),ByValAlign, DL) < ByValAlign)
return false;
// Verify that the copied-from memory doesn't change in between the memcpy and
@@ -1025,7 +1025,7 @@ bool MemCpyOpt::runOnFunction(Function &F) {
bool MadeChange = false;
MD = &getAnalysis<MemoryDependenceAnalysis>();
- TD = getAnalysisIfAvailable<DataLayout>();
+ DL = getAnalysisIfAvailable<DataLayout>();
TLI = &getAnalysis<TargetLibraryInfo>();
// If we don't have at least memset and memcpy, there is little point of doing
diff --git a/lib/Transforms/Scalar/SCCP.cpp b/lib/Transforms/Scalar/SCCP.cpp
index 02c64d9ea8..335ef3147d 100644
--- a/lib/Transforms/Scalar/SCCP.cpp
+++ b/lib/Transforms/Scalar/SCCP.cpp
@@ -153,7 +153,7 @@ namespace {
/// Constant Propagation.
///
class SCCPSolver : public InstVisitor<SCCPSolver> {
- const DataLayout *TD;
+ const DataLayout *DL;
const TargetLibraryInfo *TLI;
SmallPtrSet<BasicBlock*, 8> BBExecutable; // The BBs that are executable.
DenseMap<Value*, LatticeVal> ValueState; // The state each value is in.
@@ -205,8 +205,8 @@ class SCCPSolver : public InstVisitor<SCCPSolver> {
typedef std::pair<BasicBlock*, BasicBlock*> Edge;
DenseSet<Edge> KnownFeasibleEdges;
public:
- SCCPSolver(const DataLayout *td, const TargetLibraryInfo *tli)
- : TD(td), TLI(tli) {}
+ SCCPSolver(const DataLayout *DL, const TargetLibraryInfo *tli)
+ : DL(DL), TLI(tli) {}
/// MarkBlockExecutable - This method can be used by clients to mark all of
/// the blocks that are known to be intrinsically live in the processed unit.
@@ -1067,7 +1067,7 @@ void SCCPSolver::visitLoadInst(LoadInst &I) {
}
// Transform load from a constant into a constant if possible.
- if (Constant *C = ConstantFoldLoadFromConstPtr(Ptr, TD))
+ if (Constant *C = ConstantFoldLoadFromConstPtr(Ptr, DL))
return markConstant(IV, &I, C);
// Otherwise we cannot say for certain what value this load will produce.
@@ -1557,9 +1557,9 @@ bool SCCP::runOnFunction(Function &F) {
return false;
DEBUG(dbgs() << "SCCP on function '" << F.getName() << "'\n");
- const DataLayout *TD = getAnalysisIfAvailable<DataLayout>();
+ const DataLayout *DL = getAnalysisIfAvailable<DataLayout>();
const TargetLibraryInfo *TLI = &getAnalysis<TargetLibraryInfo>();
- SCCPSolver Solver(TD, TLI);
+ SCCPSolver Solver(DL, TLI);
// Mark the first block of the function as being executable.
Solver.MarkBlockExecutable(F.begin());
@@ -1686,9 +1686,9 @@ static bool AddressIsTaken(const GlobalValue *GV) {
}
bool IPSCCP::runOnModule(Module &M) {
- const DataLayout *TD = getAnalysisIfAvailable<DataLayout>();
+ const DataLayout *DL = getAnalysisIfAvailable<DataLayout>();
const TargetLibraryInfo *TLI = &getAnalysis<TargetLibraryInfo>();
- SCCPSolver Solver(TD, TLI);
+ SCCPSolver Solver(DL, TLI);
// AddressTakenFunctions - This set keeps track of the address-taken functions
// that are in the input. As IPSCCP runs through and simplifies code,
diff --git a/lib/Transforms/Scalar/ScalarReplAggregates.cpp b/lib/Transforms/Scalar/ScalarReplAggregates.cpp
index a8186ae342..ac7e7773aa 100644
--- a/lib/Transforms/Scalar/ScalarReplAggregates.cpp
+++ b/lib/Transforms/Scalar/ScalarReplAggregates.cpp
@@ -87,7 +87,7 @@ namespace {
private:
bool HasDomTree;
- DataLayout *TD;
+ DataLayout *DL;
/// DeadInsts - Keep track of instructions we have made dead, so that
/// we can remove them after we are done working.
@@ -258,7 +258,7 @@ namespace {
class ConvertToScalarInfo {
/// AllocaSize - The size of the alloca being considered in bytes.
unsigned AllocaSize;
- const DataLayout &TD;
+ const DataLayout &DL;
unsigned ScalarLoadThreshold;
/// IsNotTrivial - This is set to true if there is some access to the object
@@ -301,9 +301,9 @@ class ConvertToScalarInfo {
bool HadDynamicAccess;
public:
- explicit ConvertToScalarInfo(unsigned Size, const DataLayout &td,
+ explicit ConvertToScalarInfo(unsigned Size, const DataLayout &DL,
unsigned SLT)
- : AllocaSize(Size), TD(td), ScalarLoadThreshold(SLT), IsNotTrivial(false),
+ : AllocaSize(Size), DL(DL), ScalarLoadThreshold(SLT), IsNotTrivial(false),
ScalarKind(Unknown), VectorTy(0), HadNonMemTransferAccess(false),
HadDynamicAccess(false) { }
@@ -364,7 +364,7 @@ AllocaInst *ConvertToScalarInfo::TryConvert(AllocaInst *AI) {
return 0;
if ((ScalarKind == ImplicitVector || ScalarKind == Integer) &&
- !HadNonMemTransferAccess && !TD.fitsInLegalInteger(BitWidth))
+ !HadNonMemTransferAccess && !DL.fitsInLegalInteger(BitWidth))
return 0;
// Dynamic accesses on integers aren't yet supported. They need us to shift
// by a dynamic amount which could be difficult to work out as we might not
@@ -520,7 +520,7 @@ bool ConvertToScalarInfo::CanConvertToScalar(Value *V, uint64_t Offset,
HadDynamicAccess = true;
} else
GEPNonConstantIdx = NonConstantIdx;
- uint64_t GEPOffset = TD.getIndexedOffset(PtrTy,
+ uint64_t GEPOffset = DL.getIndexedOffset(PtrTy,
Indices);
// See if all uses can be converted.
if (!CanConvertToScalar(GEP, Offset+GEPOffset, GEPNonConstantIdx))
@@ -615,7 +615,7 @@ void ConvertToScalarInfo::ConvertUsesToScalar(Value *Ptr, AllocaInst *NewAI,
GEPNonConstantIdx = Indices.pop_back_val();
} else
GEPNonConstantIdx = NonConstantIdx;
- uint64_t GEPOffset = TD.getIndexedOffset(GEP->getPointerOperandType(),
+ uint64_t GEPOffset = DL.getIndexedOffset(GEP->getPointerOperandType(),
Indices);
ConvertUsesToScalar(GEP, NewAI, Offset+GEPOffset*8, GEPNonConstantIdx);
GEP->eraseFromParent();
@@ -692,9 +692,9 @@ void ConvertToScalarInfo::ConvertUsesToScalar(Value *Ptr, AllocaInst *NewAI,
// If the source and destination are both to the same alloca, then this is
// a noop copy-to-self, just delete it. Otherwise, emit a load and store
// as appropriate.
- AllocaInst *OrigAI = cast<AllocaInst>(GetUnderlyingObject(Ptr, &TD, 0));
+ AllocaInst *OrigAI = cast<AllocaInst>(GetUnderlyingObject(Ptr, &DL, 0));
- if (GetUnderlyingObject(MTI->getSource(), &TD, 0) != OrigAI) {
+ if (GetUnderlyingObject(MTI->getSource(), &DL, 0) != OrigAI) {
// Dest must be OrigAI, change this to be a load from the original
// pointer (bitcasted), then a store to our new alloca.
assert(MTI->getRawDest() == Ptr && "Neither use is of pointer?");
@@ -710,7 +710,7 @@ void ConvertToScalarInfo::ConvertUsesToScalar(Value *Ptr, AllocaInst *NewAI,
LoadInst *SrcVal = Builder.CreateLoad(SrcPtr, "srcval");
SrcVal->setAlignment(MTI->getAlignment());
Builder.CreateStore(SrcVal, NewAI);
- } else if (GetUnderlyingObject(MTI->getDest(), &TD, 0) != OrigAI) {
+ } else if (GetUnderlyingObject(MTI->getDest(), &DL, 0) != OrigAI) {
// Src must be OrigAI, change this to be a load from NewAI then a store
// through the original dest pointer (bitcasted).
assert(MTI->getRawSource() == Ptr && "Neither use is of pointer?");
@@ -770,15 +770,15 @@ ConvertScalar_ExtractValue(Value *FromVal, Type *ToType,
// If the result alloca is a vector type, this is either an element
// access or a bitcast to another vector type of the same size.
if (VectorType *VTy = dyn_cast<VectorType>(FromType)) {
- unsigned FromTypeSize = TD.getTypeAllocSize(FromType);
- unsigned ToTypeSize = TD.getTypeAllocSize(ToType);
+ unsigned FromTypeSize = DL.getTypeAllocSize(FromType);
+ unsigned ToTypeSize = DL.getTypeAllocSize(ToType);
if (FromTypeSize == ToTypeSize)
return Builder.CreateBitCast(FromVal, ToType);
// Otherwise it must be an element access.
unsigned Elt = 0;
if (Offset) {
- unsigned EltSize = TD.getTypeAllocSizeInBits(VTy->getElementType());
+ unsigned EltSize = DL.getTypeAllocSizeInBits(VTy->getElementType());
Elt = Offset/EltSize;
assert(EltSize*Elt == Offset && "Invalid modulus in validity checking");
}
@@ -804,7 +804,7 @@ ConvertScalar_ExtractValue(Value *FromVal, Type *ToType,
if (StructType *ST = dyn_cast<StructType>(ToType)) {
assert(!NonConstantIdx &&
"Dynamic indexing into struct types not supported");
- const StructLayout &Layout = *TD.getStructLayout(ST);
+ const StructLayout &Layout = *DL.getStructLayout(ST);
Value *Res = UndefValue::get(ST);
for (unsigned i = 0, e = ST->getNumElements(); i != e; ++i) {
Value *Elt = ConvertScalar_ExtractValue(FromVal, ST->getElementType(i),
@@ -818,7 +818,7 @@ ConvertScalar_ExtractValue(Value *FromVal, Type *ToType,
if (ArrayType *AT = dyn_cast<ArrayType>(ToType)) {
assert(!NonConstantIdx &&
"Dynamic indexing into array types not supported");
- uint64_t EltSize = TD.getTypeAllocSizeInBits(AT->getElementType());
+ uint64_t EltSize = DL.getTypeAllocSizeInBits(AT->getElementType());
Value *Res = UndefValue::get(AT);
for (unsigned i = 0, e = AT->getNumElements(); i != e; ++i) {
Value *Elt = ConvertScalar_ExtractValue(FromVal, AT->getElementType(),
@@ -834,12 +834,12 @@ ConvertScalar_ExtractValue(Value *FromVal, Type *ToType,
// If this is a big-endian system and the load is narrower than the
// full alloca type, we need to do a shift to get the right bits.
int ShAmt = 0;
- if (TD.isBigEndian()) {
+ if (DL.isBigEndian()) {
// On big-endian machines, the lowest bit is stored at the bit offset
// from the pointer given by getTypeStoreSizeInBits. This matters for
// integers with a bitwidth that is not a multiple of 8.
- ShAmt = TD.getTypeStoreSizeInBits(NTy) -
- TD.getTypeStoreSizeInBits(ToType) - Offset;
+ ShAmt = DL.getTypeStoreSizeInBits(NTy) -
+ DL.getTypeStoreSizeInBits(ToType) - Offset;
} else {
ShAmt = Offset;
}
@@ -855,7 +855,7 @@ ConvertScalar_ExtractValue(Value *FromVal, Type *ToType,
ConstantInt::get(FromVal->getType(), -ShAmt));
// Finally, unconditionally truncate the integer to the right width.
- unsigned LIBitWidth = TD.getTypeSizeInBits(ToType);
+ unsigned LIBitWidth = DL.getTypeSizeInBits(ToType);
if (LIBitWidth < NTy->getBitWidth())
FromVal =
Builder.CreateTrunc(FromVal, IntegerType::get(FromVal->getContext(),
@@ -902,8 +902,8 @@ ConvertScalar_InsertValue(Value *SV, Value *Old,
LLVMContext &Context = Old->getContext();
if (VectorType *VTy = dyn_cast<VectorType>(AllocaType)) {
- uint64_t VecSize = TD.getTypeAllocSizeInBits(VTy);
- uint64_t ValSize = TD.getTypeAllocSizeInBits(SV->getType());
+ uint64_t VecSize = DL.getTypeAllocSizeInBits(VTy);
+ uint64_t ValSize = DL.getTypeAllocSizeInBits(SV->getType());
// Changing the whole vector with memset or with an access of a different
// vector type?
@@ -914,7 +914,7 @@ ConvertScalar_InsertValue(Value *SV, Value *Old,
Type *EltTy = VTy->getElementType();
if (SV->getType() != EltTy)
SV = Builder.CreateBitCast(SV, EltTy);
- uint64_t EltSize = TD.getTypeAllocSizeInBits(EltTy);
+ uint64_t EltSize = DL.getTypeAllocSizeInBits(EltTy);
unsigned Elt = Offset/EltSize;
Value *Idx;
if (NonConstantIdx) {
@@ -933,7 +933,7 @@ ConvertScalar_InsertValue(Value *SV, Value *Old,
if (StructType *ST = dyn_cast<StructType>(SV->getType())) {
assert(!NonConstantIdx &&
"Dynamic indexing into struct types not supported");
- const StructLayout &Layout = *TD.getStructLayout(ST);
+ const StructLayout &Layout = *DL.getStructLayout(ST);
for (unsigned i = 0, e = ST->getNumElements(); i != e; ++i) {
Value *Elt = Builder.CreateExtractValue(SV, i);
Old = ConvertScalar_InsertValue(Elt, Old,
@@ -946,7 +946,7 @@ ConvertScalar_InsertValue(Value *SV, Value *Old,
if (ArrayType *AT = dyn_cast<ArrayType>(SV->getType())) {
assert(!NonConstantIdx &&
"Dynamic indexing into array types not supported");
- uint64_t EltSize = TD.getTypeAllocSizeInBits(AT->getElementType());
+ uint64_t EltSize = DL.getTypeAllocSizeInBits(AT->getElementType());
for (unsigned i = 0, e = AT->getNumElements(); i != e; ++i) {
Value *Elt = Builder.CreateExtractValue(SV, i);
Old = ConvertScalar_InsertValue(Elt, Old, Offset+i*EltSize, 0, Builder);
@@ -956,14 +956,14 @@ ConvertScalar_InsertValue(Value *SV, Value *Old,
// If SV is a float, convert it to the appropriate integer type.
// If it is a pointer, do the same.
- unsigned SrcWidth = TD.getTypeSizeInBits(SV->getType());
- unsigned DestWidth = TD.getTypeSizeInBits(AllocaType);
- unsigned SrcStoreWidth = TD.getTypeStoreSizeInBits(SV->getType());
- unsigned DestStoreWidth = TD.getTypeStoreSizeInBits(AllocaType);
+ unsigned SrcWidth = DL.getTypeSizeInBits(SV->getType());
+ unsigned DestWidth = DL.getTypeSizeInBits(AllocaType);
+ unsigned SrcStoreWidth = DL.getTypeStoreSizeInBits(SV->getType());
+ unsigned DestStoreWidth = DL.getTypeStoreSizeInBits(AllocaType);
if (SV->getType()->isFloatingPointTy() || SV->getType()->isVectorTy())
SV = Builder.CreateBitCast(SV, IntegerType::get(SV->getContext(),SrcWidth));
else if (SV->getType()->isPointerTy())
- SV = Builder.CreatePtrToInt(SV, TD.getIntPtrType(SV->getType()));
+ SV = Builder.CreatePtrToInt(SV, DL.getIntPtrType(SV->getType()));
// Zero extend or truncate the value if needed.
if (SV->getType() != AllocaType) {
@@ -982,7 +982,7 @@ ConvertScalar_InsertValue(Value *SV, Value *Old,
// If this is a big-endian system and the store is narrower than the
// full alloca type, we need to do a shift to get the right bits.
int ShAmt = 0;
- if (TD.isBigEndian()) {
+ if (DL.isBigEndian()) {
// On big-endian machines, the lowest bit is stored at the bit offset
// from the pointer given by getTypeStoreSizeInBits. This matters for
// integers with a bitwidth that is not a multiple of 8.
@@ -1023,7 +1023,7 @@ bool SROA::runOnFunction(Function &F) {
if (skipOptnoneFunction(F))
return false;
- TD = getAnalysisIfAvailable<DataLayout>();
+ DL = getAnalysisIfAvailable<DataLayout>();
bool Changed = performPromotion(F);
@@ -1031,7 +1031,7 @@ bool SROA::runOnFunction(Function &F) {
// theoretically needs to. It should be refactored in order to support
// target-independent IR. Until this is done, just skip the actual
// scalar-replacement portion of this pass.
- if (!TD) return Changed;
+ if (!DL) return Changed;
while (1) {
bool LocalChange = performScalarRepl(F);
@@ -1137,7 +1137,7 @@ public:
///
/// We can do this to a select if its only uses are loads and if the operand to
/// the select can be loaded unconditionally.
-static bool isSafeSelectToSpeculate(SelectInst *SI, const DataLayout *TD) {
+static bool isSafeSelectToSpeculate(SelectInst *SI, const DataLayout *DL) {
bool TDerefable = SI->getTrueValue()->isDereferenceablePointer();
bool FDerefable = SI->getFalseValue()->isDereferenceablePointer();
@@ -1149,10 +1149,10 @@ static bool isSafeSelectToSpeculate(SelectInst *SI, const DataLayout *TD) {
// Both operands to the select need to be dereferencable, either absolutely
// (e.g. allocas) or at this point because we can see other accesses to it.
if (!TDerefable && !isSafeToLoadUnconditionally(SI->getTrueValue(), LI,
- LI->getAlignment(), TD))
+ LI->getAlignment(), DL))
return false;
if (!FDerefable && !isSafeToLoadUnconditionally(SI->getFalseValue(), LI,
- LI->getAlignment(), TD))
+ LI->getAlignment(), DL))
return false;
}
@@ -1175,7 +1175,7 @@ static bool isSafeSelectToSpeculate(SelectInst *SI, const DataLayout *TD) {
///
/// We can do this to a select if its only uses are loads and if the operand to
/// the select can be loaded unconditionally.
-static bool isSafePHIToSpeculate(PHINode *PN, const DataLayout *TD) {
+static bool isSafePHIToSpeculate(PHINode *PN, const DataLayout *DL) {
// For now, we can only do this promotion if the load is in the same block as
// the PHI, and if there are no stores between the phi and load.
// TODO: Allow recursive phi users.
@@ -1225,7 +1225,7 @@ static bool isSafePHIToSpeculate(PHINode *PN, const DataLayout *TD) {
// If this pointer is always safe to load, or if we can prove that there is
// already a load in the block, then we can move the load to the pred block.
if (InVal->isDereferenceablePointer() ||
- isSafeToLoadUnconditionally(InVal, Pred->getTerminator(), MaxAlign, TD))
+ isSafeToLoadUnconditionally(InVal, Pred->getTerminator(), MaxAlign, DL))
continue;
return false;
@@ -1239,7 +1239,7 @@ static bool isSafePHIToSpeculate(PHINode *PN, const DataLayout *TD) {
/// direct (non-volatile) loads and stores to it. If the alloca is close but
/// not quite there, this will transform the code to allow promotion. As such,
/// it is a non-pure predicate.
-static bool tryToMakeAllocaBePromotable(AllocaInst *AI, const DataLayout *TD) {
+static bool tryToMakeAllocaBePromotable(AllocaInst *AI, const DataLayout *DL) {
SetVector<Instruction*, SmallVector<Instruction*, 4>,
SmallPtrSet<Instruction*, 4> > InstsToRewrite;
@@ -1268,12 +1268,12 @@ static bool tryToMakeAllocaBePromotable(AllocaInst *AI, const DataLayout *TD) {
// This is very rare and we just scrambled the use list of AI, start
// over completely.
- return tryToMakeAllocaBePromotable(AI, TD);
+ return tryToMakeAllocaBePromotable(AI, DL);
}
// If it is safe to turn "load (select c, AI, ptr)" into a select of two
// loads, then we can transform this by rewriting the select.
- if (!isSafeSelectToSpeculate(SI, TD))
+ if (!isSafeSelectToSpeculate(SI, DL))
return false;
InstsToRewrite.insert(SI);
@@ -1288,7 +1288,7 @@ static bool tryToMakeAllocaBePromotable(AllocaInst *AI, const DataLayout *TD) {
// If it is safe to turn "load (phi [AI, ptr, ...])" into a PHI of loads
// in the pred blocks, then we can transform this by rewriting the PHI.
- if (!isSafePHIToSpeculate(PN, TD))
+ if (!isSafePHIToSpeculate(PN, DL))
return false;
InstsToRewrite.insert(PN);
@@ -1423,7 +1423,7 @@ bool SROA::performPromotion(Function &F) {
// the entry node
for (BasicBlock::iterator I = BB.begin(), E = --BB.end(); I != E; ++I)
if (AllocaInst *AI = dyn_cast<AllocaInst>(I)) // Is it an alloca?
- if (tryToMakeAllocaBePromotable(AI, TD))
+ if (tryToMakeAllocaBePromotable(AI, DL))
Allocas.push_back(AI);
if (Allocas.empty()) break;
@@ -1499,7 +1499,7 @@ bool SROA::performScalarRepl(Function &F) {
// transform the allocation instruction if it is an array allocation
// (allocations OF arrays are ok though), and an allocation of a scalar
// value cannot be decomposed at all.
- uint64_t AllocaSize = TD->getTypeAllocSize(AI->getAllocatedType());
+ uint64_t AllocaSize = DL->getTypeAllocSize(AI->getAllocatedType());
// Do not promote [0 x %struct].
if (AllocaSize == 0) continue;
@@ -1523,7 +1523,7 @@ bool SROA::performScalarRepl(Function &F) {
// that we can't just check based on the type: the alloca may be of an i32
// but that has pointer arithmetic to set byte 3 of it or something.
if (AllocaInst *NewAI = ConvertToScalarInfo(
- (unsigned)AllocaSize, *TD, ScalarLoadThreshold).TryConvert(AI)) {
+ (unsigned)AllocaSize, *DL, ScalarLoadThreshold).TryConvert(AI)) {
NewAI->takeName(AI);
AI->eraseFromParent();
++NumConverted;
@@ -1625,7 +1625,7 @@ void SROA::isSafeForScalarRepl(Instruction *I, uint64_t Offset,
if (!LI->isSimple())
return MarkUnsafe(Info, User);
Type *LIType = LI->getType();
- isSafeMemAccess(Offset, TD->getTypeAllocSize(LIType),
+ isSafeMemAccess(Offset, DL->getTypeAllocSize(LIType),
LIType, false, Info, LI, true /*AllowWholeAccess*/);
Info.hasALoadOrStore = true;
@@ -1635,7 +1635,7 @@ void SROA::isSafeForScalarRepl(Instruction *I, uint64_t Offset,
return MarkUnsafe(Info, User);
Type *SIType = SI->getOperand(0)->getType();
- isSafeMemAccess(Offset, TD->getTypeAllocSize(SIType),
+ isSafeMemAccess(Offset, DL->getTypeAllocSize(SIType),
SIType, true, Info, SI, true /*AllowWholeAccess*/);
Info.hasALoadOrStore = true;
} else if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(User)) {
@@ -1684,7 +1684,7 @@ void SROA::isSafePHISelectUseForScalarRepl(Instruction *I, uint64_t Offset,
if (!LI->isSimple())
return MarkUnsafe(Info, User);
Type *LIType = LI->getType();
- isSafeMemAccess(Offset, TD->getTypeAllocSize(LIType),
+ isSafeMemAccess(Offset, DL->getTypeAllocSize(LIType),
LIType, false, Info, LI, false /*AllowWholeAccess*/);
Info.hasALoadOrStore = true;
@@ -1694,7 +1694,7 @@ void SROA::isSafePHISelectUseForScalarRepl(Instruction *I, uint64_t Offset,
return MarkUnsafe(Info, User);
Type *SIType = SI->getOperand(0)->getType();
- isSafeMemAccess(Offset, TD->getTypeAllocSize(SIType),
+ isSafeMemAccess(Offset, DL->getTypeAllocSize(SIType),
SIType, true, Info, SI, false /*AllowWholeAccess*/);
Info.hasALoadOrStore = true;
} else if (isa<PHINode>(User) || isa<SelectInst>(User)) {
@@ -1739,7 +1739,7 @@ void SROA::isSafeGEP(GetElementPtrInst *GEPI,
// constant part of the offset.
if (NonConstant)
Indices.pop_back();
- Offset += TD->getIndexedOffset(GEPI->getPointerOperandType(), Indices);
+ Offset += DL->getIndexedOffset(GEPI->getPointerOperandType(), Indices);
if (!TypeHasComponent(Info.AI->getAllocatedType(), Offset,
NonConstantIdxSize))
MarkUnsafe(Info, GEPI);
@@ -1798,7 +1798,7 @@ void SROA::isSafeMemAccess(uint64_t Offset, uint64_t MemSize,
bool AllowWholeAccess) {
// Check if this is a load/store of the entire alloca.
if (Offset == 0 && AllowWholeAccess &&
- MemSize == TD->getTypeAllocSize(Info.AI->getAllocatedType())) {
+ MemSize == DL->getTypeAllocSize(Info.AI->getAllocatedType())) {
// This can be safe for MemIntrinsics (where MemOpType is 0) and integer
// loads/stores (which are essentially the same as the MemIntrinsics with
// regard to copying padding between elements). But, if an alloca is
@@ -1835,20 +1835,20 @@ bool SROA::TypeHasComponent(Type *T, uint64_t Offset, uint64_t Size) {
Type *EltTy;
uint64_t EltSize;
if (StructType *ST = dyn_cast<StructType>(T)) {
- const StructLayout *Layout = TD->getStructLayout(ST);
+ const StructLayout *Layout = DL->getStructLayout(ST);
unsigned EltIdx = Layout->getElementContainingOffset(Offset);
EltTy = ST->getContainedType(EltIdx);
- EltSize = TD->getTypeAllocSize(EltTy);
+ EltSize = DL->getTypeAllocSize(EltTy);
Offset -= Layout->getElementOffset(EltIdx);
} else if (ArrayType *AT = dyn_cast<ArrayType>(T)) {
EltTy = AT->getElementType();
- EltSize = TD->getTypeAllocSize(EltTy);
+ EltSize = DL->getTypeAllocSize(EltTy);
if (Offset >= AT->getNumElements() * EltSize)
return false;
Offset %= EltSize;
} else if (VectorType *VT = dyn_cast<VectorType>(T)) {
EltTy = VT->getElementType();
- EltSize = TD->getTypeAllocSize(EltTy);
+ EltSize = DL->getTypeAllocSize(EltTy);
if (Offset >= VT->getNumElements() * EltSize)
return false;
Offset %= EltSize;
@@ -1887,7 +1887,7 @@ void SROA::RewriteForScalarRepl(Instruction *I, AllocaInst *AI, uint64_t Offset,
ConstantInt *Length = dyn_cast<ConstantInt>(MI->getLength());
uint64_t MemSize = Length->getZExtValue();
if (Offset == 0 &&
- MemSize == TD->getTypeAllocSize(AI->getAllocatedType()))
+ MemSize == DL->getTypeAllocSize(AI->getAllocatedType()))
RewriteMemIntrinUserOfAlloca(MI, I, AI, NewElts);
// Otherwise the intrinsic can only touch a single element and the
// address operand will be updated, so nothing else needs to be done.
@@ -1923,8 +1923,8 @@ void SROA::RewriteForScalarRepl(Instruction *I, AllocaInst *AI, uint64_t Offset,
LI->replaceAllUsesWith(Insert);
DeadInsts.push_back(LI);
} else if (LIType->isIntegerTy() &&
- TD->getTypeAllocSize(LIType) ==
- TD->getTypeAllocSize(AI->getAllocatedType())) {
+ DL->getTypeAllocSize(LIType) ==
+ DL->getTypeAllocSize(AI->getAllocatedType())) {
// If this is a load of the entire alloca to an integer, rewrite it.
RewriteLoadUserOfWholeAlloca(LI, AI, NewElts);
}
@@ -1950,8 +1950,8 @@ void SROA::RewriteForScalarRepl(Instruction *I, AllocaInst *AI, uint64_t Offset,
}
DeadInsts.push_back(SI);
} else if (SIType->isIntegerTy() &&
- TD->getTypeAllocSize(SIType) ==
- TD->getTypeAllocSize(AI->getAllocatedType())) {
+ DL->getTypeAllocSize(SIType) ==
+ DL->getTypeAllocSize(AI->getAllocatedType())) {
// If this is a store of the entire alloca from an integer, rewrite it.
RewriteStoreUserOfWholeAlloca(SI, AI, NewElts);
}
@@ -2013,7 +2013,7 @@ uint64_t SROA::FindElementAndOffset(Type *&T, uint64_t &Offset,
Type *&IdxTy) {
uint64_t Idx = 0;
if (StructType *ST = dyn_cast<StructType>(T)) {
- const StructLayout *Layout = TD->getStructLayout(ST);
+ const StructLayout *Layout = DL->getStructLayout(ST);
Idx = Layout->getElementContainingOffset(Offset);
T = ST->getContainedType(Idx);
Offset -= Layout->getElementOffset(Idx);
@@ -2021,7 +2021,7 @@ uint64_t SROA::FindElementAndOffset(Type *&T, uint64_t &Offset,
return Idx;
} else if (ArrayType *AT = dyn_cast<ArrayType>(T)) {
T = AT->getElementType();
- uint64_t EltSize = TD->getTypeAllocSize(T);
+ uint64_t EltSize = DL->getTypeAllocSize(T);
Idx = Offset / EltSize;
Offset -= Idx * EltSize;
IdxTy = Type::getInt64Ty(T->getContext());
@@ -2029,7 +2029,7 @@ uint64_t SROA::FindElementAndOffset(Type *&T, uint64_t &Offset,
}
VectorType *VT = cast<VectorType>(T);
T = VT->getElementType();
- uint64_t EltSize = TD->getTypeAllocSize(T);
+ uint64_t EltSize = DL->getTypeAllocSize(T);
Idx = Offset / EltSize;
Offset -= Idx * EltSize;
IdxTy = Type::getInt64Ty(T->getContext());
@@ -2050,7 +2050,7 @@ void SROA::RewriteGEP(GetElementPtrInst *GEPI, AllocaInst *AI, uint64_t Offset,
Value* NonConstantIdx = 0;
if (!GEPI->hasAllConstantIndices())
NonConstantIdx = Indices.pop_back_val();
- Offset += TD->getIndexedOffset(GEPI->getPointerOperandType(), Indices);
+ Offset += DL->getIndexedOffset(GEPI->getPointerOperandType(), Indices);
RewriteForScalarRepl(GEPI, AI, Offset, NewElts);
@@ -2121,7 +2121,7 @@ void SROA::RewriteLifetimeIntrinsic(IntrinsicInst *II, AllocaInst *AI,
V = Builder.CreateGEP(V, Builder.getInt64(NewOffset));
IdxTy = NewElts[Idx]->getAllocatedType();
- uint64_t EltSize = TD->getTypeAllocSize(IdxTy) - NewOffset;
+ uint64_t EltSize = DL->getTypeAllocSize(IdxTy) - NewOffset;
if (EltSize > Size) {
EltSize = Size;
Size = 0;
@@ -2137,7 +2137,7 @@ void SROA::RewriteLifetimeIntrinsic(IntrinsicInst *II, AllocaInst *AI,
for (; Idx != NewElts.size() && Size; ++Idx) {
IdxTy = NewElts[Idx]->getAllocatedType();
- uint64_t EltSize = TD->getTypeAllocSize(IdxTy);
+ uint64_t EltSize = DL->getTypeAllocSize(IdxTy);
if (EltSize > Size) {
EltSize = Size;
Size = 0;
@@ -2229,10 +2229,10 @@ SROA::RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *Inst,
PointerType *OtherPtrTy = cast<PointerType>(OtherPtr->getType());
Type *OtherTy = OtherPtrTy->getElementType();
if (StructType *ST = dyn_cast<StructType>(OtherTy)) {
- EltOffset = TD->getStructLayout(ST)->getElementOffset(i);
+ EltOffset = DL->getStructLayout(ST)->getElementOffset(i);
} else {
Type *EltTy = cast<SequentialType>(OtherTy)->getElementType();
- EltOffset = TD->getTypeAllocSize(EltTy)*i;
+ EltOffset = DL->getTypeAllocSize(EltTy)*i;
}
// The alignment of the other pointer is the guaranteed alignment of the
@@ -2273,7 +2273,7 @@ SROA::RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *Inst,
Type *ValTy = EltTy->getScalarType();
// Construct an integer with the right value.
- unsigned EltSize = TD->getTypeSizeInBits(ValTy);
+ unsigned EltSize = DL->getTypeSizeInBits(ValTy);
APInt OneVal(EltSize, CI->getZExtValue());
APInt TotalVal(OneVal);
// Set each byte.
@@ -2303,7 +2303,7 @@ SROA::RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *Inst,
// this element.
}
- unsigned EltSize = TD->getTypeAllocSize(EltTy);
+ unsigned EltSize = DL->getTypeAllocSize(EltTy);
if (!EltSize)
continue;
@@ -2337,12 +2337,12 @@ SROA::RewriteStoreUserOfWholeAlloca(StoreInst *SI, AllocaInst *AI,
// and store the element value to the individual alloca.
Value *SrcVal = SI->getOperand(0);
Type *AllocaEltTy = AI->getAllocatedType();
- uint64_t AllocaSizeBits = TD->getTypeAllocSizeInBits(AllocaEltTy);
+ uint64_t AllocaSizeBits = DL->getTypeAllocSizeInBits(AllocaEltTy);
IRBuilder<> Builder(SI);
// Handle tail padding by extending the operand
- if (TD->getTypeSizeInBits(SrcVal->getType()) != AllocaSizeBits)
+ if (DL->getTypeSizeInBits(SrcVal->getType()) != AllocaSizeBits)
SrcVal = Builder.CreateZExt(SrcVal,
IntegerType::get(SI->getContext(), AllocaSizeBits));
@@ -2352,15 +2352,15 @@ SROA::RewriteStoreUserOfWholeAlloca(StoreInst *SI, AllocaInst *AI,
// There are two forms here: AI could be an array or struct. Both cases
// have different ways to compute the element offset.
if (StructType *EltSTy = dyn_cast<StructType>(AllocaEltTy)) {
- const StructLayout *Layout = TD->getStructLayout(EltSTy);
+ const StructLayout *Layout = DL->getStructLayout(EltSTy);
for (unsigned i = 0, e = NewElts.size(); i != e; ++i) {
// Get the number of bits to shift SrcVal to get the value.
Type *FieldTy = EltSTy->getElementType(i);
uint64_t Shift = Layout->getElementOffsetInBits(i);
- if (TD->isBigEndian())
- Shift = AllocaSizeBits-Shift-TD->getTypeAllocSizeInBits(FieldTy);
+ if (DL->isBigEndian())
+ Shift = AllocaSizeBits-Shift-DL->getTypeAllocSizeInBits(FieldTy);
Value *EltVal = SrcVal;
if (Shift) {
@@ -2369,7 +2369,7 @@ SROA::RewriteStoreUserOfWholeAlloca(StoreInst *SI, AllocaInst *AI,
}
// Truncate down to an integer of the right size.
- uint64_t FieldSizeBits = TD->getTypeSizeInBits(FieldTy);
+ uint64_t FieldSizeBits = DL->getTypeSizeInBits(FieldTy);
// Ignore zero sized fields like {}, they obviously contain no data.
if (FieldSizeBits == 0) continue;
@@ -2394,12 +2394,12 @@ SROA::RewriteStoreUserOfWholeAlloca(StoreInst *SI, AllocaInst *AI,
} else {
ArrayType *ATy = cast<ArrayType>(AllocaEltTy);
Type *ArrayEltTy = ATy->getElementType();
- uint64_t ElementOffset = TD->getTypeAllocSizeInBits(ArrayEltTy);
- uint64_t ElementSizeBits = TD->getTypeSizeInBits(ArrayEltTy);
+ uint64_t ElementOffset = DL->getTypeAllocSizeInBits(ArrayEltTy);
+ uint64_t ElementSizeBits = DL->getTypeSizeInBits(ArrayEltTy);
uint64_t Shift;
- if (TD->isBigEndian())
+ if (DL->isBigEndian())
Shift = AllocaSizeBits-ElementOffset;
else
Shift = 0;
@@ -2433,7 +2433,7 @@ SROA::RewriteStoreUserOfWholeAlloca(StoreInst *SI, AllocaInst *AI,
}
new StoreInst(EltVal, DestField, SI);
- if (TD->isBigEndian())
+ if (DL->isBigEndian())
Shift -= ElementOffset;
else
Shift += ElementOffset;
@@ -2451,7 +2451,7 @@ SROA::RewriteLoadUserOfWholeAlloca(LoadInst *LI, AllocaInst *AI,
// Extract each element out of the NewElts according to its structure offset
// and form the result value.
Type *AllocaEltTy = AI->getAllocatedType();
- uint64_t AllocaSizeBits = TD->getTypeAllocSizeInBits(AllocaEltTy);
+ uint64_t AllocaSizeBits = DL->getTypeAllocSizeInBits(AllocaEltTy);
DEBUG(dbgs() << "PROMOTING LOAD OF WHOLE ALLOCA: " << *AI << '\n' << *LI
<< '\n');
@@ -2461,10 +2461,10 @@ SROA::RewriteLoadUserOfWholeAlloca(LoadInst *LI, AllocaInst *AI,
const StructLayout *Layout = 0;
uint64_t ArrayEltBitOffset = 0;
if (StructType *EltSTy = dyn_cast<StructType>(AllocaEltTy)) {
- Layout = TD->getStructLayout(EltSTy);
+ Layout = DL->getStructLayout(EltSTy);
} else {
Type *ArrayEltTy = cast<ArrayType>(AllocaEltTy)->getElementType();
- ArrayEltBitOffset = TD->getTypeAllocSizeInBits(ArrayEltTy);
+ ArrayEltBitOffset = DL->getTypeAllocSizeInBits(ArrayEltTy);
}
Value *ResultVal =
@@ -2476,7 +2476,7 @@ SROA::RewriteLoadUserOfWholeAlloca(LoadInst *LI, AllocaInst *AI,
Value *SrcField = NewElts[i];
Type *FieldTy =
cast<PointerType>(SrcField->getType())->getElementType();
- uint64_t FieldSizeBits = TD->getTypeSizeInBits(FieldTy);
+ uint64_t FieldSizeBits = DL->getTypeSizeInBits(FieldTy);
// Ignore zero sized fields like {}, they obviously contain no data.
if (FieldSizeBits == 0) continue;
@@ -2507,7 +2507,7 @@ SROA::RewriteLoadUserOfWholeAlloca(LoadInst *LI, AllocaInst *AI,
else // Array case.
Shift = i*ArrayEltBitOffset;
- if (TD->isBigEndian())
+ if (DL->isBigEndian())
Shift = AllocaSizeBits-Shift-FieldIntTy->getBitWidth();
if (Shift) {
@@ -2524,7 +2524,7 @@ SROA::RewriteLoadUserOfWholeAlloca(LoadInst *LI, AllocaInst *AI,
}
// Handle tail padding by truncating the result
- if (TD->getTypeSizeInBits(LI->getType()) != AllocaSizeBits)
+ if (DL->getTypeSizeInBits(LI->getType()) != AllocaSizeBits)
ResultVal = new TruncInst(ResultVal, LI->getType(), "", LI);
LI->replaceAllUsesWith(ResultVal);
@@ -2534,15 +2534,15 @@ SROA::RewriteLoadUserOfWholeAlloca(LoadInst *LI, AllocaInst *AI,
/// HasPadding - Return true if the specified type has any structure or
/// alignment padding in between the elements that would be split apart
/// by SROA; return false otherwise.
-static bool HasPadding(Type *Ty, const DataLayout &TD) {
+static bool HasPadding(Type *Ty, const DataLayout &DL) {
if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
Ty = ATy->getElementType();
- return TD.getTypeSizeInBits(Ty) != TD.getTypeAllocSizeInBits(Ty);
+ return DL.getTypeSizeInBits(Ty) != DL.getTypeAllocSizeInBits(Ty);
}
// SROA currently handles only Arrays and Structs.
StructType *STy = cast<StructType>(Ty);
- const StructLayout *SL = TD.getStructLayout(STy);
+ const StructLayout *SL = DL.getStructLayout(STy);
unsigned PrevFieldBitOffset = 0;
for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
unsigned FieldBitOffset = SL->getElementOffsetInBits(i);
@@ -2551,7 +2551,7 @@ static bool HasPadding(Type *Ty, const DataLayout &TD) {
// previous one.
if (i) {
unsigned PrevFieldEnd =
- PrevFieldBitOffset+TD.getTypeSizeInBits(STy->getElementType(i-1));
+ PrevFieldBitOffset+DL.getTypeSizeInBits(STy->getElementType(i-1));
if (PrevFieldEnd < FieldBitOffset)
return true;
}
@@ -2560,7 +2560,7 @@ static bool HasPadding(Type *Ty, const DataLayout &TD) {
// Check for tail padding.
if (unsigned EltCount = STy->getNumElements()) {
unsigned PrevFieldEnd = PrevFieldBitOffset +
- TD.getTypeSizeInBits(STy->getElementType(EltCount-1));
+ DL.getTypeSizeInBits(STy->getElementType(EltCount-1));
if (PrevFieldEnd < SL->getSizeInBits())
return true;
}
@@ -2587,7 +2587,7 @@ bool SROA::isSafeAllocaToScalarRepl(AllocaInst *AI) {
// types, but may actually be used. In these cases, we refuse to promote the
// struct.
if (Info.isMemCpySrc && Info.isMemCpyDst &&
- HasPadding(AI->getAllocatedType(), *TD))
+ HasPadding(AI->getAllocatedType(), *DL))
return false;
// If the alloca never has an access to just *part* of it, but is accessed
diff --git a/lib/Transforms/Utils/CloneFunction.cpp b/lib/Transforms/Utils/CloneFunction.cpp
index d105f5e24a..32da80be65 100644
--- a/lib/Transforms/Utils/CloneFunction.cpp
+++ b/lib/Transforms/Utils/CloneFunction.cpp
@@ -205,17 +205,17 @@ namespace {
bool ModuleLevelChanges;
const char *NameSuffix;
ClonedCodeInfo *CodeInfo;
- const DataLayout *TD;
+ const DataLayout *DL;
public:
PruningFunctionCloner(Function *newFunc, const Function *oldFunc,
ValueToValueMapTy &valueMap,
bool moduleLevelChanges,
const char *nameSuffix,
ClonedCodeInfo *codeInfo,
- const DataLayout *td)
+ const DataLayout *DL)
: NewFunc(newFunc), OldFunc(oldFunc),
VMap(valueMap), ModuleLevelChanges(moduleLevelChanges),
- NameSuffix(nameSuffix), CodeInfo(codeInfo), TD(td) {
+ NameSuffix(nameSuffix), CodeInfo(codeInfo), DL(DL) {
}
/// CloneBlock - The specified block is found to be reachable, clone it and
@@ -272,7 +272,7 @@ void PruningFunctionCloner::CloneBlock(const BasicBlock *BB,
// If we can simplify this instruction to some other value, simply add
// a mapping to that value rather than inserting a new instruction into
// the basic block.
- if (Value *V = SimplifyInstruction(NewInst, TD)) {
+ if (Value *V = SimplifyInstruction(NewInst, DL)) {
// On the off-chance that this simplifies to an instruction in the old
// function, map it back into the new function.
if (Value *MappedV = VMap.lookup(V))
@@ -368,7 +368,7 @@ void llvm::CloneAndPruneFunctionInto(Function *NewFunc, const Function *OldFunc,
SmallVectorImpl<ReturnInst*> &Returns,
const char *NameSuffix,
ClonedCodeInfo *CodeInfo,
- const DataLayout *TD,
+ const DataLayout *DL,
Instruction *TheCall) {
assert(NameSuffix && "NameSuffix cannot be null!");
@@ -379,7 +379,7 @@ void llvm::CloneAndPruneFunctionInto(Function *NewFunc, const Function *OldFunc,
#endif
PruningFunctionCloner PFC(NewFunc, OldFunc, VMap, ModuleLevelChanges,
- NameSuffix, CodeInfo, TD);
+ NameSuffix, CodeInfo, DL);
// Clone the entry block, and anything recursively reachable from it.
std::vector<const BasicBlock*> CloneWorklist;
@@ -509,7 +509,7 @@ void llvm::CloneAndPruneFunctionInto(Function *NewFunc, const Function *OldFunc,
// node).
for (unsigned Idx = 0, Size = PHIToResolve.size(); Idx != Size; ++Idx)
if (PHINode *PN = dyn_cast<PHINode>(VMap[PHIToResolve[Idx]]))
- recursivelySimplifyInstruction(PN, TD);
+ recursivelySimplifyInstruction(PN, DL);
// Now that the inlined function body has been fully constructed, go through
// and zap unconditional fall-through branches. This happen all the time when
diff --git a/lib/Transforms/Utils/SimplifyCFG.cpp b/lib/Transforms/Utils/SimplifyCFG.cpp
index effc08edaa..e24acfffae 100644
--- a/lib/Transforms/Utils/SimplifyCFG.cpp
+++ b/lib/Transforms/Utils/SimplifyCFG.cpp
@@ -90,7 +90,7 @@ namespace {
class SimplifyCFGOpt {
const TargetTransformInfo &TTI;
- const DataLayout *const TD;
+ const DataLayout *const DL;
Value *isValueEqualityComparison(TerminatorInst *TI);
BasicBlock *GetValueEqualityComparisonCases(TerminatorInst *TI,
std::vector<ValueEqualityComparisonCase> &Cases);
@@ -109,8 +109,8 @@ class SimplifyCFGOpt {
bool SimplifyCondBranch(BranchInst *BI, IRBuilder <>&Builder);
public:
- SimplifyCFGOpt(const TargetTransformInfo &TTI, const DataLayout *TD)
- : TTI(TTI), TD(TD) {}
+ SimplifyCFGOpt(const TargetTransformInfo &TTI, const DataLayout *DL)
+ : TTI(TTI), DL(DL) {}
bool run(BasicBlock *BB);
};
}
@@ -306,15 +306,15 @@ static bool DominatesMergePoint(Value *V, BasicBlock *BB,
/// GetConstantInt - Extract ConstantInt from value, looking through IntToPtr
/// and PointerNullValue. Return NULL if value is not a constant int.
-static ConstantInt *GetConstantInt(Value *V, const DataLayout *TD) {
+static ConstantInt *GetConstantInt(Value *V, const DataLayout *DL) {
// Normal constant int.
ConstantInt *CI = dyn_cast<ConstantInt>(V);
- if (CI || !TD || !isa<Constant>(V) || !V->getType()->isPointerTy())
+ if (CI || !DL || !isa<Constant>(V) || !V->getType()->isPointerTy())
return CI;
// This is some kind of pointer constant. Turn it into a pointer-sized
// ConstantInt if possible.
- IntegerType *PtrTy = cast<IntegerType>(TD->getIntPtrType(V->getType()));
+ IntegerType *PtrTy = cast<IntegerType>(DL->getIntPtrType(V->getType()));
// Null pointer means 0, see SelectionDAGBuilder::getValue(const Value*).
if (isa<ConstantPointerNull>(V))
@@ -340,13 +340,13 @@ static ConstantInt *GetConstantInt(Value *V, const DataLayout *TD) {
/// Values vector.
static Value *
GatherConstantCompares(Value *V, std::vector<ConstantInt*> &Vals, Value *&Extra,
- const DataLayout *TD, bool isEQ, unsigned &UsedICmps) {
+ const DataLayout *DL, bool isEQ, unsigned &UsedICmps) {
Instruction *I = dyn_cast<Instruction>(V);
if (I == 0) return 0;
// If this is an icmp against a constant, handle this as one of the cases.
if (ICmpInst *ICI = dyn_cast<ICmpInst>(I)) {
- if (ConstantInt *C = GetConstantInt(I->getOperand(1), TD)) {
+ if (ConstantInt *C = GetConstantInt(I->getOperand(1), DL)) {
Value *RHSVal;
ConstantInt *RHSC;
@@ -405,11 +405,11 @@ GatherConstantCompares(Value *V, std::vector<ConstantInt*> &Vals, Value *&Extra,
unsigned NumValsBeforeLHS = Vals.size();
unsigned UsedICmpsBeforeLHS = UsedICmps;
- if (Value *LHS = GatherConstantCompares(I->getOperand(0), Vals, Extra, TD,
+ if (Value *LHS = GatherConstantCompares(I->getOperand(0), Vals, Extra, DL,
isEQ, UsedICmps)) {
unsigned NumVals = Vals.size();
unsigned UsedICmpsBeforeRHS = UsedICmps;
- if (Value *RHS = GatherConstantCompares(I->getOperand(1), Vals, Extra, TD,
+ if (Value *RHS = GatherConstantCompares(I->getOperand(1), Vals, Extra, DL,
isEQ, UsedICmps)) {
if (LHS == RHS)
return LHS;
@@ -434,7 +434,7 @@ GatherConstantCompares(Value *V, std::vector<ConstantInt*> &Vals, Value *&Extra,
if (Extra == 0 || Extra == I->getOperand(0)) {
Value *OldExtra = Extra;
Extra = I->getOperand(0);
- if (Value *RHS = GatherConstantCompares(I->getOperand(1), Vals, Extra, TD,
+ if (Value *RHS = GatherConstantCompares(I->getOperand(1), Vals, Extra, DL,
isEQ, UsedICmps))
return RHS;
assert(Vals.size() == NumValsBeforeLHS);
@@ -472,14 +472,14 @@ Value *SimplifyCFGOpt::isValueEqualityComparison(TerminatorInst *TI) {
} else if (BranchInst *BI = dyn_cast<BranchInst>(TI))
if (BI->isConditional() && BI->getCondition()->hasOneUse())
if (ICmpInst *ICI = dyn_cast<ICmpInst>(BI->getCondition()))
- if (ICI->isEquality() && GetConstantInt(ICI->getOperand(1), TD))
+ if (ICI->isEquality() && GetConstantInt(ICI->getOperand(1), DL))
CV = ICI->getOperand(0);
// Unwrap any lossless ptrtoint cast.
- if (TD && CV) {
+ if (DL && CV) {
if (PtrToIntInst *PTII = dyn_cast<PtrToIntInst>(CV)) {
Value *Ptr = PTII->getPointerOperand();
- if (PTII->getType() == TD->getIntPtrType(Ptr->getType()))
+ if (PTII->getType() == DL->getIntPtrType(Ptr->getType()))
CV = Ptr;
}
}
@@ -504,7 +504,7 @@ GetValueEqualityComparisonCases(TerminatorInst *TI,
ICmpInst *ICI = cast<ICmpInst>(BI->getCondition());
BasicBlock *Succ = BI->getSuccessor(ICI->getPredicate() == ICmpInst::ICMP_NE);
Cases.push_back(ValueEqualityComparisonCase(GetConstantInt(ICI->getOperand(1),
- TD),
+ DL),
Succ));
return BI->getSuccessor(ICI->getPredicate() == ICmpInst::ICMP_EQ);
}
@@ -930,8 +930,8 @@ bool SimplifyCFGOpt::FoldValueComparisonIntoPredecessors(TerminatorInst *TI,
Builder.SetInsertPoint(PTI);
// Convert pointer to int before we switch.
if (CV->getType()->isPointerTy()) {
- assert(TD && "Cannot switch on pointer without DataLayout");
- CV = Builder.CreatePtrToInt(CV, TD->getIntPtrType(CV->getType()),
+ assert(DL && "Cannot switch on pointer without DataLayout");
+ CV = Builder.CreatePtrToInt(CV, DL->getIntPtrType(CV->getType()),
"magicptr");
}
@@ -1606,7 +1606,7 @@ static bool BlockIsSimpleEnoughToThreadThrough(BasicBlock *BB) {
/// that is defined in the same block as the branch and if any PHI entries are
/// constants, thread edges corresponding to that entry to be branches to their
/// ultimate destination.
-static bool FoldCondBranchOnPHI(BranchInst *BI, const DataLayout *TD) {
+static bool FoldCondBranchOnPHI(BranchInst *BI, const DataLayout *DL) {
BasicBlock *BB = BI->getParent();
PHINode *PN = dyn_cast<PHINode>(BI->getCondition());
// NOTE: we currently cannot transform this case if the PHI node is used
@@ -1675,7 +1675,7 @@ static bool FoldCondBranchOnPHI(BranchInst *BI, const DataLayout *TD) {
}
// Check for trivial simplification.
- if (Value *V = SimplifyInstruction(N, TD)) {
+ if (Value *V = SimplifyInstruction(N, DL)) {
TranslateMap[BBI] = V;
delete N; // Instruction folded away, don't need actual inst
} else {
@@ -1696,7 +1696,7 @@ static bool FoldCondBranchOnPHI(BranchInst *BI, const DataLayout *TD) {
}
// Recurse, simplifying any other constants.
- return FoldCondBranchOnPHI(BI, TD) | true;
+ return FoldCondBranchOnPHI(BI, DL) | true;
}
return false;
@@ -1704,7 +1704,7 @@ static bool FoldCondBranchOnPHI(BranchInst *BI, const DataLayout *TD) {
/// FoldTwoEntryPHINode - Given a BB that starts with the specified two-entry
/// PHI node, see if we can eliminate it.
-static bool FoldTwoEntryPHINode(PHINode *PN, const DataLayout *TD) {
+static bool FoldTwoEntryPHINode(PHINode *PN, const DataLayout *DL) {
// Ok, this is a two entry PHI node. Check to see if this is a simple "if
// statement", which has a very simple dominance structure. Basically, we
// are trying to find the condition that is being branched on, which
@@ -1738,7 +1738,7 @@ static bool FoldTwoEntryPHINode(PHINode *PN, const DataLayout *TD) {
for (BasicBlock::iterator II = BB->begin(); isa<PHINode>(II);) {
PHINode *PN = cast<PHINode>(II++);
- if (Value *V = SimplifyInstruction(PN, TD)) {
+ if (Value *V = SimplifyInstruction(PN, DL)) {
PN->replaceAllUsesWith(V);
PN->eraseFromParent();
continue;
@@ -2634,7 +2634,7 @@ static bool SimplifyIndirectBrOnSelect(IndirectBrInst *IBI, SelectInst *SI) {
/// the PHI, merging the third icmp into the switch.
static bool TryToSimplifyUncondBranchWithICmpInIt(
ICmpInst *ICI, IRBuilder<> &Builder, const TargetTransformInfo &TTI,
- const DataLayout *TD) {
+ const DataLayout *DL) {
BasicBlock *BB = ICI->getParent();
// If the block has any PHIs in it or the icmp has multiple uses, it is too
@@ -2662,12 +2662,12 @@ static bool TryToSimplifyUncondBranchWithICmpInIt(
assert(VVal && "Should have a unique destination value");
ICI->setOperand(0, VVal);
- if (Value *V = SimplifyInstruction(ICI, TD)) {
+ if (Value *V = SimplifyInstruction(ICI, DL)) {
ICI->replaceAllUsesWith(V);
ICI->eraseFromParent();
}
// BB is now empty, so it is likely to simplify away.
- return SimplifyCFG(BB, TTI, TD) | true;
+ return SimplifyCFG(BB, TTI, DL) | true;
}
// Ok, the block is reachable from the default dest. If the constant we're
@@ -2683,7 +2683,7 @@ static bool TryToSimplifyUncondBranchWithICmpInIt(
ICI->replaceAllUsesWith(V);
ICI->eraseFromParent();
// BB is now empty, so it is likely to simplify away.
- return SimplifyCFG(BB, TTI, TD) | true;
+ return SimplifyCFG(BB, TTI, DL) | true;
}
// The use of the icmp has to be in the 'end' block, by the only PHI node in
@@ -2739,7 +2739,7 @@ static bool TryToSimplifyUncondBranchWithICmpInIt(
/// SimplifyBranchOnICmpChain - The specified branch is a conditional branch.
/// Check to see if it is branching on an or/and chain of icmp instructions, and
/// fold it into a switch instruction if so.
-static bool SimplifyBranchOnICmpChain(BranchInst *BI, const DataLayout *TD,
+static bool SimplifyBranchOnICmpChain(BranchInst *BI, const DataLayout *DL,
IRBuilder<> &Builder) {
Instruction *Cond = dyn_cast<Instruction>(BI->getCondition());
if (Cond == 0) return false;
@@ -2755,10 +2755,10 @@ static bool SimplifyBranchOnICmpChain(BranchInst *BI, const DataLayout *TD,
unsigned UsedICmps = 0;
if (Cond->getOpcode() == Instruction::Or) {
- CompVal = GatherConstantCompares(Cond, Values, ExtraCase, TD, true,
+ CompVal = GatherConstantCompares(Cond, Values, ExtraCase, DL, true,
UsedICmps);
} else if (Cond->getOpcode() == Instruction::And) {
- CompVal = GatherConstantCompares(Cond, Values, ExtraCase, TD, false,
+ CompVal = GatherConstantCompares(Cond, Values, ExtraCase, DL, false,
UsedICmps);
TrueWhenEqual = false;
}
@@ -2820,9 +2820,9 @@ static bool SimplifyBranchOnICmpChain(BranchInst *BI, const DataLayout *TD,
Builder.SetInsertPoint(BI);
// Convert pointer to int before we switch.
if (CompVal->getType()->isPointerTy()) {
- assert(TD && "Cannot switch on pointer without DataLayout");
+ assert(DL && "Cannot switch on pointer without DataLayout");
CompVal = Builder.CreatePtrToInt(CompVal,
- TD->getIntPtrType(CompVal->getType()),
+ DL->getIntPtrType(CompVal->getType()),
"magicptr");
}
@@ -3453,7 +3453,7 @@ namespace {
ConstantInt *Offset,
const SmallVectorImpl<std::pair<ConstantInt*, Constant*> >& Values,
Constant *DefaultValue,
- const DataLayout *TD);
+ const DataLayout *DL);
/// BuildLookup - Build instructions with Builder to retrieve the value at
/// the position given by Index in the lookup table.
@@ -3461,7 +3461,7 @@ namespace {
/// WouldFitInRegister - Return true if a table with TableSize elements of
/// type ElementType would fit in a target-legal register.
- static bool WouldFitInRegister(const DataLayout *TD,
+ static bool WouldFitInRegister(const DataLayout *DL,
uint64_t TableSize,
const Type *ElementType);
@@ -3500,7 +3500,7 @@ SwitchLookupTable::SwitchLookupTable(Module &M,
ConstantInt *Offset,
const SmallVectorImpl<std::pair<ConstantInt*, Constant*> >& Values,
Constant *DefaultValue,
- const DataLayout *TD)
+ const DataLayout *DL)
: SingleValue(0), BitMap(0), BitMapElementTy(0), Array(0) {
assert(Values.size() && "Can't build lookup table without values!");
assert(TableSize >= Values.size() && "Can't fit values in table!");
@@ -3546,7 +3546,7 @@ SwitchLookupTable::SwitchLookupTable(Module &M,
}
// If the type is integer and the table fits in a register, build a bitmap.
- if (WouldFitInRegister(TD, TableSize, ValueType)) {
+ if (WouldFitInRegister(DL, TableSize, ValueType)) {
IntegerType *IT = cast<IntegerType>(ValueType);
APInt TableInt(TableSize * IT->getBitWidth(), 0);
for (uint64_t I = TableSize; I > 0; --I) {
@@ -3611,10 +3611,10 @@ Value *SwitchLookupTable::BuildLookup(Value *Index, IRBuilder<> &Builder) {
llvm_unreachable("Unknown lookup table kind!");
}
-bool SwitchLookupTable::WouldFitInRegister(const DataLayout *TD,
+bool SwitchLookupTable::WouldFitInRegister(const DataLayout *DL,
uint64_t TableSize,
const Type *ElementType) {
- if (!TD)
+ if (!DL)
return false;
const IntegerType *IT = dyn_cast<IntegerType>(ElementType);
if (!IT)
@@ -3625,7 +3625,7 @@ bool SwitchLookupTable::WouldFitInRegister(const DataLayout *TD,
// Avoid overflow, fitsInLegalInteger uses unsigned int for the width.
if (TableSize >= UINT_MAX/IT->getBitWidth())
return false;
- return TD->fitsInLegalInteger(TableSize * IT->getBitWidth());
+ return DL->fitsInLegalInteger(TableSize * IT->getBitWidth());
}
/// ShouldBuildLookupTable - Determine whether a lookup table should be built
@@ -3634,7 +3634,7 @@ bool SwitchLookupTable::WouldFitInRegister(const DataLayout *TD,
static bool ShouldBuildLookupTable(SwitchInst *SI,
uint64_t TableSize,
const TargetTransformInfo &TTI,
- const DataLayout *TD,
+ const DataLayout *DL,
const SmallDenseMap<PHINode*, Type*>& ResultTypes) {
if (SI->getNumCases() > TableSize || TableSize >= UINT64_MAX / 10)
return false; // TableSize overflowed, or mul below might overflow.
@@ -3650,7 +3650,7 @@ static bool ShouldBuildLookupTable(SwitchInst *SI,
// Saturate this flag to false.
AllTablesFitInRegister = AllTablesFitInRegister &&
- SwitchLookupTable::WouldFitInRegister(TD, TableSize, Ty);
+ SwitchLookupTable::WouldFitInRegister(DL, TableSize, Ty);
// If both flags saturate, we're done. NOTE: This *only* works with
// saturating flags, and all flags have to saturate first due to the
@@ -3679,7 +3679,7 @@ static bool ShouldBuildLookupTable(SwitchInst *SI,
static bool SwitchToLookupTable(SwitchInst *SI,
IRBuilder<> &Builder,
const TargetTransformInfo &TTI,
- const DataLayout* TD) {
+ const DataLayout* DL) {
assert(SI->getNumCases() > 1 && "Degenerate switch?");
// Only build lookup table when we have a target that supports it.
@@ -3723,7 +3723,7 @@ static bool SwitchToLookupTable(SwitchInst *SI,
typedef SmallVector<std::pair<PHINode*, Constant*>, 4> ResultsTy;
ResultsTy Results;
if (!GetCaseResults(SI, CaseVal, CI.getCaseSuccessor(), &CommonDest,
- Results, TD))
+ Results, DL))
return false;
// Append the result from this case to the list for each phi.
@@ -3748,7 +3748,7 @@ static bool SwitchToLookupTable(SwitchInst *SI,
// If the table has holes, we need a constant result for the default case.
SmallVector<std::pair<PHINode*, Constant*>, 4> DefaultResultsList;
if (TableHasHoles && !GetCaseResults(SI, 0, SI->getDefaultDest(), &CommonDest,
- DefaultResultsList, TD))
+ DefaultResultsList, DL))
return false;
for (size_t I = 0, E = DefaultResultsList.size(); I != E; ++I) {
@@ -3757,7 +3757,7 @@ static bool SwitchToLookupTable(SwitchInst *SI,
DefaultResults[PHI] = Result;
}
- if (!ShouldBuildLookupTable(SI, TableSize, TTI, TD, ResultTypes))
+ if (!ShouldBuildLookupTable(SI, TableSize, TTI, DL, ResultTypes))
return false;
// Create the BB that does the lookups.
@@ -3801,7 +3801,7 @@ static bool SwitchToLookupTable(SwitchInst *SI,
PHINode *PHI = PHIs[I];
SwitchLookupTable Table(Mod, TableSize, MinCaseVal, ResultLists[PHI],
- DefaultResults[PHI], TD);
+ DefaultResults[PHI], DL);
Value *Result = Table.BuildLookup(TableIndex, Builder);
@@ -3842,12 +3842,12 @@ bool SimplifyCFGOpt::SimplifySwitch(SwitchInst *SI, IRBuilder<> &Builder) {
// see if that predecessor totally determines the outcome of this switch.
if (BasicBlock *OnlyPred = BB->getSinglePredecessor())
if (SimplifyEqualityComparisonWithOnlyPredecessor(SI, OnlyPred, Builder))
- return SimplifyCFG(BB, TTI, TD) | true;
+ return SimplifyCFG(BB, TTI, DL) | true;
Value *Cond = SI->getCondition();
if (SelectInst *Select = dyn_cast<SelectInst>(Cond))
if (SimplifySwitchOnSelect(SI, Select))
- return SimplifyCFG(BB, TTI, TD) | true;
+ return SimplifyCFG(BB, TTI, DL) | true;
// If the block only contains the switch, see if we can fold the block
// away into any preds.
@@ -3857,22 +3857,22 @@ bool SimplifyCFGOpt::SimplifySwitch(SwitchInst *SI, IRBuilder<> &Builder) {
++BBI;
if (SI == &*BBI)
if (FoldValueComparisonIntoPredecessors(SI, Builder))
- return SimplifyCFG(BB, TTI, TD) | true;
+ return SimplifyCFG(BB, TTI, DL) | true;
}
// Try to transform the switch into an icmp and a branch.
if (TurnSwitchRangeIntoICmp(SI, Builder))
- return SimplifyCFG(BB, TTI, TD) | true;
+ return SimplifyCFG(BB, TTI, DL) | true;
// Remove unreachable cases.
if (EliminateDeadSwitchCases(SI))
- return SimplifyCFG(BB, TTI, TD) | true;
+ return SimplifyCFG(BB, TTI, DL) | true;
if (ForwardSwitchConditionToPHI(SI))
- return SimplifyCFG(BB, TTI, TD) | true;
+ return SimplifyCFG(BB, TTI, DL) | true;
- if (SwitchToLookupTable(SI, Builder, TTI, TD))
- return SimplifyCFG(BB, TTI, TD) | true;
+ if (SwitchToLookupTable(SI, Builder, TTI, DL))
+ return SimplifyCFG(BB, TTI, DL) | true;
return false;
}
@@ -3909,7 +3909,7 @@ bool SimplifyCFGOpt::SimplifyIndirectBr(IndirectBrInst *IBI) {
if (SelectInst *SI = dyn_cast<SelectInst>(IBI->getAddress())) {
if (SimplifyIndirectBrOnSelect(IBI, SI))
- return SimplifyCFG(BB, TTI, TD) | true;
+ return SimplifyCFG(BB, TTI, DL) | true;
}
return Changed;
}
@@ -3933,7 +3933,7 @@ bool SimplifyCFGOpt::SimplifyUncondBranch(BranchInst *BI, IRBuilder<> &Builder){
for (++I; isa<DbgInfoIntrinsic>(I); ++I)
;
if (I->isTerminator() &&
- TryToSimplifyUncondBranchWithICmpInIt(ICI, Builder, TTI, TD))
+ TryToSimplifyUncondBranchWithICmpInIt(ICI, Builder, TTI, DL))
return true;
}
@@ -3942,7 +3942,7 @@ bool SimplifyCFGOpt::SimplifyUncondBranch(BranchInst *BI, IRBuilder<> &Builder){
// predecessor and use logical operations to update the incoming value
// for PHI nodes in common successor.
if (FoldBranchToCommonDest(BI))
- return SimplifyCFG(BB, TTI, TD) | true;
+ return SimplifyCFG(BB, TTI, DL) | true;
return false;
}
@@ -3957,7 +3957,7 @@ bool SimplifyCFGOpt::SimplifyCondBranch(BranchInst *BI, IRBuilder<> &Builder) {
// switch.
if (BasicBlock *OnlyPred = BB->getSinglePredecessor())
if (SimplifyEqualityComparisonWithOnlyPredecessor(BI, OnlyPred, Builder))
- return SimplifyCFG(BB, TTI, TD) | true;
+ return SimplifyCFG(BB, TTI, DL) | true;
// This block must be empty, except for the setcond inst, if it exists.
// Ignore dbg intrinsics.
@@ -3967,26 +3967,26 @@ bool SimplifyCFGOpt::SimplifyCondBranch(BranchInst *BI, IRBuilder<> &Builder) {
++I;
if (&*I == BI) {
if (FoldValueComparisonIntoPredecessors(BI, Builder))
- return SimplifyCFG(BB, TTI, TD) | true;
+ return SimplifyCFG(BB, TTI, DL) | true;
} else if (&*I == cast<Instruction>(BI->getCondition())){
++I;
// Ignore dbg intrinsics.
while (isa<DbgInfoIntrinsic>(I))
++I;
if (&*I == BI && FoldValueComparisonIntoPredecessors(BI, Builder))
- return SimplifyCFG(BB, TTI, TD) | true;
+ return SimplifyCFG(BB, TTI, DL) | true;
}
}
// Try to turn "br (X == 0 | X == 1), T, F" into a switch instruction.
- if (SimplifyBranchOnICmpChain(BI, TD, Builder))
+ if (SimplifyBranchOnICmpChain(BI, DL, Builder))
return true;
// If this basic block is ONLY a compare and a branch, and if a predecessor
// branches to us and one of our successors, fold the comparison into the
// predecessor and use logical operations to pick the right destination.
if (FoldBranchToCommonDest(BI))
- return SimplifyCFG(BB, TTI, TD) | true;
+ return SimplifyCFG(BB, TTI, DL) | true;
// We have a conditional branch to two blocks that are only reachable
// from BI. We know that the condbr dominates the two blocks, so see if
@@ -3995,7 +3995,7 @@ bool SimplifyCFGOpt::SimplifyCondBranch(BranchInst *BI, IRBuilder<> &Builder) {
if (BI->getSuccessor(0)->getSinglePredecessor() != 0) {
if (BI->getSuccessor(1)->getSinglePredecessor() != 0) {
if (HoistThenElseCodeToIf(BI))
- return SimplifyCFG(BB, TTI, TD) | true;
+ return SimplifyCFG(BB, TTI, DL) | true;
} else {
// If Successor #1 has multiple preds, we may be able to conditionally
// execute Successor #0 if it branches to successor #1.
@@ -4003,7 +4003,7 @@ bool SimplifyCFGOpt::SimplifyCondBranch(BranchInst *BI, IRBuilder<> &Builder) {
if (Succ0TI->getNumSuccessors() == 1 &&
Succ0TI->getSuccessor(0) == BI->getSuccessor(1))
if (SpeculativelyExecuteBB(BI, BI->getSuccessor(0)))
- return SimplifyCFG(BB, TTI, TD) | true;
+ return SimplifyCFG(BB, TTI, DL) | true;
}
} else if (BI->getSuccessor(1)->getSinglePredecessor() != 0) {
// If Successor #0 has multiple preds, we may be able to conditionally
@@ -4012,22 +4012,22 @@ bool SimplifyCFGOpt::SimplifyCondBranch(BranchInst *BI, IRBuilder<> &Builder) {
if (Succ1TI->getNumSuccessors() == 1 &&
Succ1TI->getSuccessor(0) == BI->getSuccessor(0))
if (SpeculativelyExecuteBB(BI, BI->getSuccessor(1)))
- return SimplifyCFG(BB, TTI, TD) | true;
+ return SimplifyCFG(BB, TTI, DL) | true;
}
// If this is a branch on a phi node in the current block, thread control
// through this block if any PHI node entries are constants.
if (PHINode *PN = dyn_cast<PHINode>(BI->getCondition()))
if (PN->getParent() == BI->getParent())
- if (FoldCondBranchOnPHI(BI, TD))
- return SimplifyCFG(BB, TTI, TD) | true;
+ if (FoldCondBranchOnPHI(BI, DL))
+ return SimplifyCFG(BB, TTI, DL) | true;
// Scan predecessor blocks for conditional branches.
for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI)
if (BranchInst *PBI = dyn_cast<BranchInst>((*PI)->getTerminator()))
if (PBI != BI && PBI->isConditional())
if (SimplifyCondBranchToCondBranch(PBI, BI))
- return SimplifyCFG(BB, TTI, TD) | true;
+ return SimplifyCFG(BB, TTI, DL) | true;
return false;
}
@@ -4139,7 +4139,7 @@ bool SimplifyCFGOpt::run(BasicBlock *BB) {
// eliminate it, do so now.
if (PHINode *PN = dyn_cast<PHINode>(BB->begin()))
if (PN->getNumIncomingValues() == 2)
- Changed |= FoldTwoEntryPHINode(PN, TD);
+ Changed |= FoldTwoEntryPHINode(PN, DL);
Builder.SetInsertPoint(BB->getTerminator());
if (BranchInst *BI = dyn_cast<BranchInst>(BB->getTerminator())) {
@@ -4171,6 +4171,6 @@ bool SimplifyCFGOpt::run(BasicBlock *BB) {
/// of the CFG. It returns true if a modification was made.
///
bool llvm::SimplifyCFG(BasicBlock *BB, const TargetTransformInfo &TTI,
- const DataLayout *TD) {
- return SimplifyCFGOpt(TTI, TD).run(BB);
+ const DataLayout *DL) {
+ return SimplifyCFGOpt(TTI, DL).run(BB);
}
diff --git a/lib/Transforms/Utils/SimplifyIndVar.cpp b/lib/Transforms/Utils/SimplifyIndVar.cpp
index 0b21d52b94..a75bb94ff2 100644
--- a/lib/Transforms/Utils/SimplifyIndVar.cpp
+++ b/lib/Transforms/Utils/SimplifyIndVar.cpp
@@ -48,7 +48,7 @@ namespace {
Loop *L;
LoopInfo *LI;
ScalarEvolution *SE;
- const DataLayout *TD; // May be NULL
+ const DataLayout *DL; // May be NULL
SmallVectorImpl<WeakVH> &DeadInsts;
@@ -60,7 +60,7 @@ namespace {
L(Loop),
LI(LPM->getAnalysisIfAvailable<LoopInfo>()),
SE(SE),
- TD(LPM->getAnalysisIfAvailable<DataLayout>()),
+ DL(LPM->getAnalysisIfAvailable<DataLayout>()),
DeadInsts(Dead),
Changed(false) {
assert(LI && "IV simplification requires LoopInfo");
diff --git a/lib/Transforms/Utils/SimplifyLibCalls.cpp b/lib/Transforms/Utils/SimplifyLibCalls.cpp
index 2ffd03668c..126160dbbc 100644
--- a/lib/Transforms/Utils/SimplifyLibCalls.cpp
+++ b/lib/Transforms/Utils/SimplifyLibCalls.cpp
@@ -43,7 +43,7 @@ namespace {
class LibCallOptimization {
protected:
Function *Caller;
- const DataLayout *TD;
+ const DataLayout *DL;
const TargetLibraryInfo *TLI;
const LibCallSimplifier *LCS;
LLVMContext* Context;
@@ -63,11 +63,11 @@ public:
/// change the calling convention.
virtual bool ignoreCallingConv() { return false; }
- Value *optimizeCall(CallInst *CI, const DataLayout *TD,
+ Value *optimizeCall(CallInst *CI, const DataLayout *DL,
const TargetLibraryInfo *TLI,
const LibCallSimplifier *LCS, IRBuilder<> &B) {
Caller = CI->getParent()->getParent();
- this->TD = TD;
+ this->DL = DL;
this->TLI = TLI;
this->LCS = LCS;
if (CI->getCalledFunction())
@@ -184,8 +184,8 @@ struct MemCpyChkOpt : public InstFortifiedLibCallOptimization {
if (FT->getNumParams() != 4 || FT->getReturnType() != FT->getParamType(0) ||
!FT->getParamType(0)->isPointerTy() ||
!FT->getParamType(1)->isPointerTy() ||
- FT->getParamType(2) != TD->getIntPtrType(Context) ||
- FT->getParamType(3) != TD->getIntPtrType(Context))
+ FT->getParamType(2) != DL->getIntPtrType(Context) ||
+ FT->getParamType(3) != DL->getIntPtrType(Context))
return 0;
if (isFoldable(3, 2, false)) {
@@ -207,8 +207,8 @@ struct MemMoveChkOpt : public InstFortifiedLibCallOptimization {
if (FT->getNumParams() != 4 || FT->getReturnType() != FT->getParamType(0) ||
!FT->getParamType(0)->isPointerTy() ||
!FT->getParamType(1)->isPointerTy() ||
- FT->getParamType(2) != TD->getIntPtrType(Context) ||
- FT->getParamType(3) != TD->getIntPtrType(Context))
+ FT->getParamType(2) != DL->getIntPtrType(Context) ||
+ FT->getParamType(3) != DL->getIntPtrType(Context))
return 0;
if (isFoldable(3, 2, false)) {
@@ -230,8 +230,8 @@ struct MemSetChkOpt : public InstFortifiedLibCallOptimization {
if (FT->getNumParams() != 4 || FT->getReturnType() != FT->getParamType(0) ||
!FT->getParamType(0)->isPointerTy() ||
!FT->getParamType(1)->isIntegerTy() ||
- FT->getParamType(2) != TD->getIntPtrType(Context) ||
- FT->getParamType(3) != TD->getIntPtrType(Context))
+ FT->getParamType(2) != DL->getIntPtrType(Context) ||
+ FT->getParamType(3) != DL->getIntPtrType(Context))
return 0;
if (isFoldable(3, 2, false)) {
@@ -256,7 +256,7 @@ struct StrCpyChkOpt : public InstFortifiedLibCallOptimization {
FT->getReturnType() != FT->getParamType(0) ||
FT->getParamType(0) != FT->getParamType(1) ||
FT->getParamType(0) != Type::getInt8PtrTy(Context) ||
- FT->getParamType(2) != TD->getIntPtrType(Context))
+ FT->getParamType(2) != DL->getIntPtrType(Context))
return 0;
Value *Dst = CI->getArgOperand(0), *Src = CI->getArgOperand(1);
@@ -269,7 +269,7 @@ struct StrCpyChkOpt : public InstFortifiedLibCallOptimization {
// TODO: It might be nice to get a maximum length out of the possible
// string lengths for varying.
if (isFoldable(2, 1, true)) {
- Value *Ret = EmitStrCpy(Dst, Src, B, TD, TLI, Name.substr(2, 6));
+ Value *Ret = EmitStrCpy(Dst, Src, B, DL, TLI, Name.substr(2, 6));
return Ret;
} else {
// Maybe we can stil fold __strcpy_chk to __memcpy_chk.
@@ -277,12 +277,12 @@ struct StrCpyChkOpt : public InstFortifiedLibCallOptimization {
if (Len == 0) return 0;
// This optimization require DataLayout.
- if (!TD) return 0;
+ if (!DL) return 0;
Value *Ret =
EmitMemCpyChk(Dst, Src,
- ConstantInt::get(TD->getIntPtrType(Context), Len),
- CI->getArgOperand(2), B, TD, TLI);
+ ConstantInt::get(DL->getIntPtrType(Context), Len),
+ CI->getArgOperand(2), B, DL, TLI);
return Ret;
}
return 0;
@@ -301,12 +301,12 @@ struct StpCpyChkOpt : public InstFortifiedLibCallOptimization {
FT->getReturnType() != FT->getParamType(0) ||
FT->getParamType(0) != FT->getParamType(1) ||
FT->getParamType(0) != Type::getInt8PtrTy(Context) ||
- FT->getParamType(2) != TD->getIntPtrType(FT->getParamType(0)))
+ FT->getParamType(2) != DL->getIntPtrType(FT->getParamType(0)))
return 0;
Value *Dst = CI->getArgOperand(0), *Src = CI->getArgOperand(1);
if (Dst == Src) { // stpcpy(x,x) -> x+strlen(x)
- Value *StrLen = EmitStrLen(Src, B, TD, TLI);
+ Value *StrLen = EmitStrLen(Src, B, DL, TLI);
return StrLen ? B.CreateInBoundsGEP(Dst, StrLen) : 0;
}
@@ -316,7 +316,7 @@ struct StpCpyChkOpt : public InstFortifiedLibCallOptimization {
// TODO: It might be nice to get a maximum length out of the possible
// string lengths for varying.
if (isFoldable(2, 1, true)) {
- Value *Ret = EmitStrCpy(Dst, Src, B, TD, TLI, Name.substr(2, 6));
+ Value *Ret = EmitStrCpy(Dst, Src, B, DL, TLI, Name.substr(2, 6));
return Ret;
} else {
// Maybe we can stil fold __stpcpy_chk to __memcpy_chk.
@@ -324,14 +324,14 @@ struct StpCpyChkOpt : public InstFortifiedLibCallOptimization {
if (Len == 0) return 0;
// This optimization require DataLayout.
- if (!TD) return 0;
+ if (!DL) return 0;
Type *PT = FT->getParamType(0);
- Value *LenV = ConstantInt::get(TD->getIntPtrType(PT), Len);
+ Value *LenV = ConstantInt::get(DL->getIntPtrType(PT), Len);
Value *DstEnd = B.CreateGEP(Dst,
- ConstantInt::get(TD->getIntPtrType(PT),
+ ConstantInt::get(DL->getIntPtrType(PT),
Len - 1));
- if (!EmitMemCpyChk(Dst, Src, LenV, CI->getArgOperand(2), B, TD, TLI))
+ if (!EmitMemCpyChk(Dst, Src, LenV, CI->getArgOperand(2), B, DL, TLI))
return 0;
return DstEnd;
}
@@ -351,12 +351,12 @@ struct StrNCpyChkOpt : public InstFortifiedLibCallOptimization {
FT->getParamType(0) != FT->getParamType(1) ||
FT->getParamType(0) != Type::getInt8PtrTy(Context) ||
!FT->getParamType(2)->isIntegerTy() ||
- FT->getParamType(3) != TD->getIntPtrType(Context))
+ FT->getParamType(3) != DL->getIntPtrType(Context))
return 0;
if (isFoldable(3, 2, false)) {
Value *Ret = EmitStrNCpy(CI->getArgOperand(0), CI->getArgOperand(1),
- CI->getArgOperand(2), B, TD, TLI,
+ CI->getArgOperand(2), B, DL, TLI,
Name.substr(2, 7));
return Ret;
}
@@ -392,7 +392,7 @@ struct StrCatOpt : public LibCallOptimization {
return Dst;
// These optimizations require DataLayout.
- if (!TD) return 0;
+ if (!DL) return 0;
return emitStrLenMemCpy(Src, Dst, Len, B);
}
@@ -401,7 +401,7 @@ struct StrCatOpt : public LibCallOptimization {
IRBuilder<> &B) {
// We need to find the end of the destination string. That's where the
// memory is to be moved to. We just generate a call to strlen.
- Value *DstLen = EmitStrLen(Dst, B, TD, TLI);
+ Value *DstLen = EmitStrLen(Dst, B, DL, TLI);
if (!DstLen)
return 0;
@@ -413,7 +413,7 @@ struct StrCatOpt : public LibCallOptimization {
// We have enough information to now generate the memcpy call to do the
// concatenation for us. Make a memcpy to copy the nul byte with align = 1.
B.CreateMemCpy(CpyDst, Src,
- ConstantInt::get(TD->getIntPtrType(*Context), Len + 1), 1);
+ ConstantInt::get(DL->getIntPtrType(*Context), Len + 1), 1);
return Dst;
}
};
@@ -451,7 +451,7 @@ struct StrNCatOpt : public StrCatOpt {
if (SrcLen == 0 || Len == 0) return Dst;
// These optimizations require DataLayout.
- if (!TD) return 0;
+ if (!DL) return 0;
// We don't optimize this case
if (Len < SrcLen) return 0;
@@ -479,23 +479,23 @@ struct StrChrOpt : public LibCallOptimization {
ConstantInt *CharC = dyn_cast<ConstantInt>(CI->getArgOperand(1));
if (CharC == 0) {
// These optimizations require DataLayout.
- if (!TD) return 0;
+ if (!DL) return 0;
uint64_t Len = GetStringLength(SrcStr);
if (Len == 0 || !FT->getParamType(1)->isIntegerTy(32))// memchr needs i32.
return 0;
return EmitMemChr(SrcStr, CI->getArgOperand(1), // include nul.
- ConstantInt::get(TD->getIntPtrType(*Context), Len),
- B, TD, TLI);
+ ConstantInt::get(DL->getIntPtrType(*Context), Len),
+ B, DL, TLI);
}
// Otherwise, the character is a constant, see if the first argument is
// a string literal. If so, we can constant fold.
StringRef Str;
if (!getConstantStringInfo(SrcStr, Str)) {
- if (TD && CharC->isZero()) // strchr(p, 0) -> p + strlen(p)
- return B.CreateGEP(SrcStr, EmitStrLen(SrcStr, B, TD, TLI), "strchr");
+ if (DL && CharC->isZero()) // strchr(p, 0) -> p + strlen(p)
+ return B.CreateGEP(SrcStr, EmitStrLen(SrcStr, B, DL, TLI), "strchr");
return 0;
}
@@ -531,8 +531,8 @@ struct StrRChrOpt : public LibCallOptimization {
StringRef Str;
if (!getConstantStringInfo(SrcStr, Str)) {
// strrchr(s, 0) -> strchr(s, 0)
- if (TD && CharC->isZero())
- return EmitStrChr(SrcStr, '\0', B, TD, TLI);
+ if (DL && CharC->isZero())
+ return EmitStrChr(SrcStr, '\0', B, DL, TLI);
return 0;
}
@@ -581,11 +581,11 @@ struct StrCmpOpt : public LibCallOptimization {
uint64_t Len2 = GetStringLength(Str2P);
if (Len1 && Len2) {
// These optimizations require DataLayout.
- if (!TD) return 0;
+ if (!DL) return 0;
return EmitMemCmp(Str1P, Str2P,
- ConstantInt::get(TD->getIntPtrType(*Context),
- std::min(Len1, Len2)), B, TD, TLI);
+ ConstantInt::get(DL->getIntPtrType(*Context),
+ std::min(Len1, Len2)), B, DL, TLI);
}
return 0;
@@ -617,8 +617,8 @@ struct StrNCmpOpt : public LibCallOptimization {
if (Length == 0) // strncmp(x,y,0) -> 0
return ConstantInt::get(CI->getType(), 0);
- if (TD && Length == 1) // strncmp(x,y,1) -> memcmp(x,y,1)
- return EmitMemCmp(Str1P, Str2P, CI->getArgOperand(2), B, TD, TLI);
+ if (DL && Length == 1) // strncmp(x,y,1) -> memcmp(x,y,1)
+ return EmitMemCmp(Str1P, Str2P, CI->getArgOperand(2), B, DL, TLI);
StringRef Str1, Str2;
bool HasStr1 = getConstantStringInfo(Str1P, Str1);
@@ -657,7 +657,7 @@ struct StrCpyOpt : public LibCallOptimization {
return Src;
// These optimizations require DataLayout.
- if (!TD) return 0;
+ if (!DL) return 0;
// See if we can get the length of the input string.
uint64_t Len = GetStringLength(Src);
@@ -666,7 +666,7 @@ struct StrCpyOpt : public LibCallOptimization {
// We have enough information to now generate the memcpy call to do the
// copy for us. Make a memcpy to copy the nul byte with align = 1.
B.CreateMemCpy(Dst, Src,
- ConstantInt::get(TD->getIntPtrType(*Context), Len), 1);
+ ConstantInt::get(DL->getIntPtrType(*Context), Len), 1);
return Dst;
}
};
@@ -682,11 +682,11 @@ struct StpCpyOpt: public LibCallOptimization {
return 0;
// These optimizations require DataLayout.
- if (!TD) return 0;
+ if (!DL) return 0;
Value *Dst = CI->getArgOperand(0), *Src = CI->getArgOperand(1);
if (Dst == Src) { // stpcpy(x,x) -> x+strlen(x)
- Value *StrLen = EmitStrLen(Src, B, TD, TLI);
+ Value *StrLen = EmitStrLen(Src, B, DL, TLI);
return StrLen ? B.CreateInBoundsGEP(Dst, StrLen) : 0;
}
@@ -695,9 +695,9 @@ struct StpCpyOpt: public LibCallOptimization {
if (Len == 0) return 0;
Type *PT = FT->getParamType(0);
- Value *LenV = ConstantInt::get(TD->getIntPtrType(PT), Len);
+ Value *LenV = ConstantInt::get(DL->getIntPtrType(PT), Len);
Value *DstEnd = B.CreateGEP(Dst,
- ConstantInt::get(TD->getIntPtrType(PT),
+ ConstantInt::get(DL->getIntPtrType(PT),
Len - 1));
// We have enough information to now generate the memcpy call to do the
@@ -740,7 +740,7 @@ struct StrNCpyOpt : public LibCallOptimization {
if (Len == 0) return Dst; // strncpy(x, y, 0) -> x
// These optimizations require DataLayout.
- if (!TD) return 0;
+ if (!DL) return 0;
// Let strncpy handle the zero padding
if (Len > SrcLen+1) return 0;
@@ -748,7 +748,7 @@ struct StrNCpyOpt : public LibCallOptimization {
Type *PT = FT->getParamType(0);
// strncpy(x, s, c) -> memcpy(x, s, c, 1) [s and c are constant]
B.CreateMemCpy(Dst, Src,
- ConstantInt::get(TD->getIntPtrType(PT), Len), 1);
+ ConstantInt::get(DL->getIntPtrType(PT), Len), 1);
return Dst;
}
@@ -805,8 +805,8 @@ struct StrPBrkOpt : public LibCallOptimization {
}
// strpbrk(s, "a") -> strchr(s, 'a')
- if (TD && HasS2 && S2.size() == 1)
- return EmitStrChr(CI->getArgOperand(0), S2[0], B, TD, TLI);
+ if (DL && HasS2 && S2.size() == 1)
+ return EmitStrChr(CI->getArgOperand(0), S2[0], B, DL, TLI);
return 0;
}
@@ -885,8 +885,8 @@ struct StrCSpnOpt : public LibCallOptimization {
}
// strcspn(s, "") -> strlen(s)
- if (TD && HasS2 && S2.empty())
- return EmitStrLen(CI->getArgOperand(0), B, TD, TLI);
+ if (DL && HasS2 && S2.empty())
+ return EmitStrLen(CI->getArgOperand(0), B, DL, TLI);
return 0;
}
@@ -906,12 +906,12 @@ struct StrStrOpt : public LibCallOptimization {
return B.CreateBitCast(CI->getArgOperand(0), CI->getType());
// fold strstr(a, b) == a -> strncmp(a, b, strlen(b)) == 0
- if (TD && isOnlyUsedInEqualityComparison(CI, CI->getArgOperand(0))) {
- Value *StrLen = EmitStrLen(CI->getArgOperand(1), B, TD, TLI);
+ if (DL && isOnlyUsedInEqualityComparison(CI, CI->getArgOperand(0))) {
+ Value *StrLen = EmitStrLen(CI->getArgOperand(1), B, DL, TLI);
if (!StrLen)
return 0;
Value *StrNCmp = EmitStrNCmp(CI->getArgOperand(0), CI->getArgOperand(1),
- StrLen, B, TD, TLI);
+ StrLen, B, DL, TLI);
if (!StrNCmp)
return 0;
for (Value::use_iterator UI = CI->use_begin(), UE = CI->use_end();
@@ -949,7 +949,7 @@ struct StrStrOpt : public LibCallOptimization {
// fold strstr(x, "y") -> strchr(x, 'y').
if (HasStr2 && ToFindStr.size() == 1) {
- Value *StrChr= EmitStrChr(CI->getArgOperand(0), ToFindStr[0], B, TD, TLI);
+ Value *StrChr= EmitStrChr(CI->getArgOperand(0), ToFindStr[0], B, DL, TLI);
return StrChr ? B.CreateBitCast(StrChr, CI->getType()) : 0;
}
return 0;
@@ -1011,13 +1011,13 @@ struct MemCmpOpt : public LibCallOptimization {
struct MemCpyOpt : public LibCallOptimization {
virtual Value *callOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
// These optimizations require DataLayout.
- if (!TD) return 0;
+ if (!DL) return 0;
FunctionType *FT = Callee->getFunctionType();
if (FT->getNumParams() != 3 || FT->getReturnType() != FT->getParamType(0) ||
!FT->getParamType(0)->isPointerTy() ||
!FT->getParamType(1)->isPointerTy() ||
- FT->getParamType(2) != TD->getIntPtrType(*Context))
+ FT->getParamType(2) != DL->getIntPtrType(*Context))
return 0;
// memcpy(x, y, n) -> llvm.memcpy(x, y, n, 1)
@@ -1030,13 +1030,13 @@ struct MemCpyOpt : public LibCallOptimization {
struct MemMoveOpt : public LibCallOptimization {
virtual Value *callOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
// These optimizations require DataLayout.
- if (!TD) return 0;
+ if (!DL) return 0;
FunctionType *FT = Callee->getFunctionType();
if (FT->getNumParams() != 3 || FT->getReturnType() != FT->getParamType(0) ||
!FT->getParamType(0)->isPointerTy() ||
!FT->getParamType(1)->isPointerTy() ||
- FT->getParamType(2) != TD->getIntPtrType(*Context))
+ FT->getParamType(2) != DL->getIntPtrType(*Context))
return 0;
// memmove(x, y, n) -> llvm.memmove(x, y, n, 1)
@@ -1049,13 +1049,13 @@ struct MemMoveOpt : public LibCallOptimization {
struct MemSetOpt : public LibCallOptimization {
virtual Value *callOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
// These optimizations require DataLayout.
- if (!TD) return 0;
+ if (!DL) return 0;
FunctionType *FT = Callee->getFunctionType();
if (FT->getNumParams() != 3 || FT->getReturnType() != FT->getParamType(0) ||
!FT->getParamType(0)->isPointerTy() ||
!FT->getParamType(1)->isIntegerTy() ||
- FT->getParamType(2) != TD->getIntPtrType(FT->getParamType(0)))
+ FT->getParamType(2) != DL->getIntPtrType(FT->getParamType(0)))
return 0;
// memset(p, v, n) -> llvm.memset(p, v, n, 1)
@@ -1632,7 +1632,7 @@ struct PrintFOpt : public LibCallOptimization {
// printf("x") -> putchar('x'), even for '%'.
if (FormatStr.size() == 1) {
- Value *Res = EmitPutChar(B.getInt32(FormatStr[0]), B, TD, TLI);
+ Value *Res = EmitPutChar(B.getInt32(FormatStr[0]), B, DL, TLI);
if (CI->use_empty() || !Res) return Res;
return B.CreateIntCast(Res, CI->getType(), true);
}
@@ -1644,7 +1644,7 @@ struct PrintFOpt : public LibCallOptimization {
// pass to be run after this pass, to merge duplicate strings.
FormatStr = FormatStr.drop_back();
Value *GV = B.CreateGlobalString(FormatStr, "str");
- Value *NewCI = EmitPutS(GV, B, TD, TLI);
+ Value *NewCI = EmitPutS(GV, B, DL, TLI);
return (CI->use_empty() || !NewCI) ?
NewCI :
ConstantInt::get(CI->getType(), FormatStr.size()+1);
@@ -1654,7 +1654,7 @@ struct PrintFOpt : public LibCallOptimization {
// printf("%c", chr) --> putchar(chr)
if (FormatStr == "%c" && CI->getNumArgOperands() > 1 &&
CI->getArgOperand(1)->getType()->isIntegerTy()) {
- Value *Res = EmitPutChar(CI->getArgOperand(1), B, TD, TLI);
+ Value *Res = EmitPutChar(CI->getArgOperand(1), B, DL, TLI);
if (CI->use_empty() || !Res) return Res;
return B.CreateIntCast(Res, CI->getType(), true);
@@ -1663,7 +1663,7 @@ struct PrintFOpt : public LibCallOptimization {
// printf("%s\n", str) --> puts(str)
if (FormatStr == "%s\n" && CI->getNumArgOperands() > 1 &&
CI->getArgOperand(1)->getType()->isPointerTy()) {
- return EmitPutS(CI->getArgOperand(1), B, TD, TLI);
+ return EmitPutS(CI->getArgOperand(1), B, DL, TLI);
}
return 0;
}
@@ -1712,11 +1712,11 @@ struct SPrintFOpt : public LibCallOptimization {
return 0; // we found a format specifier, bail out.
// These optimizations require DataLayout.
- if (!TD) return 0;
+ if (!DL) return 0;
// sprintf(str, fmt) -> llvm.memcpy(str, fmt, strlen(fmt)+1, 1)
B.CreateMemCpy(CI->getArgOperand(0), CI->getArgOperand(1),
- ConstantInt::get(TD->getIntPtrType(*Context), // Copy the
+ ConstantInt::get(DL->getIntPtrType(*Context), // Copy the
FormatStr.size() + 1), 1); // nul byte.
return ConstantInt::get(CI->getType(), FormatStr.size());
}
@@ -1742,12 +1742,12 @@ struct SPrintFOpt : public LibCallOptimization {
if (FormatStr[1] == 's') {
// These optimizations require DataLayout.
- if (!TD) return 0;
+ if (!DL) return 0;
// sprintf(dest, "%s", str) -> llvm.memcpy(dest, str, strlen(str)+1, 1)
if (!CI->getArgOperand(2)->getType()->isPointerTy()) return 0;
- Value *Len = EmitStrLen(CI->getArgOperand(2), B, TD, TLI);
+ Value *Len = EmitStrLen(CI->getArgOperand(2), B, DL, TLI);
if (!Len)
return 0;
Value *IncLen = B.CreateAdd(Len,
@@ -1812,12 +1812,12 @@ struct FPrintFOpt : public LibCallOptimization {
return 0; // We found a format specifier.
// These optimizations require DataLayout.
- if (!TD) return 0;
+ if (!DL) return 0;
return EmitFWrite(CI->getArgOperand(1),
- ConstantInt::get(TD->getIntPtrType(*Context),
+ ConstantInt::get(DL->getIntPtrType(*Context),
FormatStr.size()),
- CI->getArgOperand(0), B, TD, TLI);
+ CI->getArgOperand(0), B, DL, TLI);
}
// The remaining optimizations require the format string to be "%s" or "%c"
@@ -1830,14 +1830,14 @@ struct FPrintFOpt : public LibCallOptimization {
if (FormatStr[1] == 'c') {
// fprintf(F, "%c", chr) --> fputc(chr, F)
if (!CI->getArgOperand(2)->getType()->isIntegerTy()) return 0;
- return EmitFPutC(CI->getArgOperand(2), CI->getArgOperand(0), B, TD, TLI);
+ return EmitFPutC(CI->getArgOperand(2), CI->getArgOperand(0), B, DL, TLI);
}
if (FormatStr[1] == 's') {
// fprintf(F, "%s", str) --> fputs(str, F)
if (!CI->getArgOperand(2)->getType()->isPointerTy())
return 0;
- return EmitFPutS(CI->getArgOperand(2), CI->getArgOperand(0), B, TD, TLI);
+ return EmitFPutS(CI->getArgOperand(2), CI->getArgOperand(0), B, DL, TLI);
}
return 0;
}
@@ -1897,7 +1897,7 @@ struct FWriteOpt : public LibCallOptimization {
// This optimisation is only valid, if the return value is unused.
if (Bytes == 1 && CI->use_empty()) { // fwrite(S,1,1,F) -> fputc(S[0],F)
Value *Char = B.CreateLoad(CastToCStr(CI->getArgOperand(0), B), "char");
- Value *NewCI = EmitFPutC(Char, CI->getArgOperand(3), B, TD, TLI);
+ Value *NewCI = EmitFPutC(Char, CI->getArgOperand(3), B, DL, TLI);
return NewCI ? ConstantInt::get(CI->getType(), 1) : 0;
}
@@ -1911,7 +1911,7 @@ struct FPutsOpt : public LibCallOptimization {
(void) ER.callOptimizer(Callee, CI, B);
// These optimizations require DataLayout.
- if (!TD) return 0;
+ if (!DL) return 0;
// Require two pointers. Also, we can't optimize if return value is used.
FunctionType *FT = Callee->getFunctionType();
@@ -1925,8 +1925,8 @@ struct FPutsOpt : public LibCallOptimization {
if (!Len) return 0;
// Known to have no uses (see above).
return EmitFWrite(CI->getArgOperand(0),
- ConstantInt::get(TD->getIntPtrType(*Context), Len-1),
- CI->getArgOperand(1), B, TD, TLI);
+ ConstantInt::get(DL->getIntPtrType(*Context), Len-1),
+ CI->getArgOperand(1), B, DL, TLI);
}
};
@@ -1946,7 +1946,7 @@ struct PutsOpt : public LibCallOptimization {
if (Str.empty() && CI->use_empty()) {
// puts("") -> putchar('\n')
- Value *Res = EmitPutChar(B.getInt32('\n'), B, TD, TLI);
+ Value *Res = EmitPutChar(B.getInt32('\n'), B, DL, TLI);
if (CI->use_empty() || !Res) return Res;
return B.CreateIntCast(Res, CI->getType(), true);
}
@@ -1960,7 +1960,7 @@ struct PutsOpt : public LibCallOptimization {
namespace llvm {
class LibCallSimplifierImpl {
- const DataLayout *TD;
+ const DataLayout *DL;
const TargetLibraryInfo *TLI;
const LibCallSimplifier *LCS;
bool UnsafeFPShrink;
@@ -1970,11 +1970,11 @@ class LibCallSimplifierImpl {
PowOpt Pow;
Exp2Opt Exp2;
public:
- LibCallSimplifierImpl(const DataLayout *TD, const TargetLibraryInfo *TLI,
+ LibCallSimplifierImpl(const DataLayout *DL, const TargetLibraryInfo *TLI,
const LibCallSimplifier *LCS,
bool UnsafeFPShrink = false)
: Cos(UnsafeFPShrink), Pow(UnsafeFPShrink), Exp2(UnsafeFPShrink) {
- this->TD = TD;
+ this->DL = DL;
this->TLI = TLI;
this->LCS = LCS;
this->UnsafeFPShrink = UnsafeFPShrink;
@@ -2233,15 +2233,15 @@ Value *LibCallSimplifierImpl::optimizeCall(CallInst *CI) {
LibCallOptimization *LCO = lookupOptimization(CI);
if (LCO) {
IRBuilder<> Builder(CI);
- return LCO->optimizeCall(CI, TD, TLI, LCS, Builder);
+ return LCO->optimizeCall(CI, DL, TLI, LCS, Builder);
}
return 0;
}
-LibCallSimplifier::LibCallSimplifier(const DataLayout *TD,
+LibCallSimplifier::LibCallSimplifier(const DataLayout *DL,
const TargetLibraryInfo *TLI,
bool UnsafeFPShrink) {
- Impl = new LibCallSimplifierImpl(TD, TLI, this, UnsafeFPShrink);
+ Impl = new LibCallSimplifierImpl(DL, TLI, this, UnsafeFPShrink);
}
LibCallSimplifier::~LibCallSimplifier() {
diff --git a/lib/Transforms/Vectorize/BBVectorize.cpp b/lib/Transforms/Vectorize/BBVectorize.cpp
index 6ef2020130..865694611f 100644
--- a/lib/Transforms/Vectorize/BBVectorize.cpp
+++ b/lib/Transforms/Vectorize/BBVectorize.cpp
@@ -201,7 +201,7 @@ namespace {
AA = &P->getAnalysis<AliasAnalysis>();
DT = &P->getAnalysis<DominatorTreeWrapperPass>().getDomTree();
SE = &P->getAnalysis<ScalarEvolution>();
- TD = P->getAnalysisIfAvailable<DataLayout>();
+ DL = P->getAnalysisIfAvailable<DataLayout>();
TTI = IgnoreTargetInfo ? 0 : &P->getAnalysis<TargetTransformInfo>();
}
@@ -214,7 +214,7 @@ namespace {
AliasAnalysis *AA;
DominatorTree *DT;
ScalarEvolution *SE;
- DataLayout *TD;
+ DataLayout *DL;
const TargetTransformInfo *TTI;
// FIXME: const correct?
@@ -436,7 +436,7 @@ namespace {
AA = &getAnalysis<AliasAnalysis>();
DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
SE = &getAnalysis<ScalarEvolution>();
- TD = getAnalysisIfAvailable<DataLayout>();
+ DL = getAnalysisIfAvailable<DataLayout>();
TTI = IgnoreTargetInfo ? 0 : &getAnalysis<TargetTransformInfo>();
return vectorizeBB(BB);
@@ -634,11 +634,11 @@ namespace {
int64_t Offset = IntOff->getSExtValue();
Type *VTy = IPtr->getType()->getPointerElementType();
- int64_t VTyTSS = (int64_t) TD->getTypeStoreSize(VTy);
+ int64_t VTyTSS = (int64_t) DL->getTypeStoreSize(VTy);
Type *VTy2 = JPtr->getType()->getPointerElementType();
if (VTy != VTy2 && Offset < 0) {
- int64_t VTy2TSS = (int64_t) TD->getTypeStoreSize(VTy2);
+ int64_t VTy2TSS = (int64_t) DL->getTypeStoreSize(VTy2);
OffsetInElmts = Offset/VTy2TSS;
return (abs64(Offset) % VTy2TSS) == 0;
}
@@ -821,7 +821,7 @@ namespace {
// It is important to cleanup here so that future iterations of this
// function have less work to do.
- (void) SimplifyInstructionsInBlock(&BB, TD, AA->getTargetLibraryInfo());
+ (void) SimplifyInstructionsInBlock(&BB, DL, AA->getTargetLibraryInfo());
return true;
}
@@ -876,7 +876,7 @@ namespace {
}
// We can't vectorize memory operations without target data
- if (TD == 0 && IsSimpleLoadStore)
+ if (DL == 0 && IsSimpleLoadStore)
return false;
Type *T1, *T2;
@@ -913,7 +913,7 @@ namespace {
if (T2->isX86_FP80Ty() || T2->isPPC_FP128Ty() || T2->isX86_MMXTy())
return false;
- if ((!Config.VectorizePointers || TD == 0) &&
+ if ((!Config.VectorizePointers || DL == 0) &&
(T1->getScalarType()->isPointerTy() ||
T2->getScalarType()->isPointerTy()))
return false;
@@ -977,7 +977,7 @@ namespace {
// with the lower offset has an alignment suitable for the
// vector type.
- unsigned VecAlignment = TD->getPrefTypeAlignment(VType);
+ unsigned VecAlignment = DL->getPrefTypeAlignment(VType);
if (BottomAlignment < VecAlignment)
return false;
}