summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorCraig Topper <craig.topper@gmail.com>2014-04-25 05:29:35 +0000
committerCraig Topper <craig.topper@gmail.com>2014-04-25 05:29:35 +0000
commit8d7221ccf5012e7ece93aa976bf2603789b31441 (patch)
tree7f678906b5759ba6b3775b14f085ecc6c90e7ec6
parent39087bfbf0b33995b337b676e3c715b3e31a6c1a (diff)
downloadllvm-8d7221ccf5012e7ece93aa976bf2603789b31441.tar.gz
llvm-8d7221ccf5012e7ece93aa976bf2603789b31441.tar.bz2
llvm-8d7221ccf5012e7ece93aa976bf2603789b31441.tar.xz
[C++] Use 'nullptr'. Transforms edition.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@207196 91177308-0d34-0410-b5e6-96231b3b80d8
-rw-r--r--include/llvm/Transforms/Instrumentation.h4
-rw-r--r--include/llvm/Transforms/Utils/CodeExtractor.h2
-rw-r--r--include/llvm/Transforms/Utils/LoopUtils.h7
-rw-r--r--include/llvm/Transforms/Utils/PromoteMemToReg.h2
-rw-r--r--include/llvm/Transforms/Utils/SimplifyIndVar.h4
-rw-r--r--include/llvm/Transforms/Utils/UnifyFunctionExitNodes.h2
-rw-r--r--lib/Transforms/IPO/ArgumentPromotion.cpp14
-rw-r--r--lib/Transforms/IPO/ConstantMerge.cpp6
-rw-r--r--lib/Transforms/IPO/DeadArgumentElimination.cpp4
-rw-r--r--lib/Transforms/IPO/ExtractGV.cpp4
-rw-r--r--lib/Transforms/IPO/FunctionAttrs.cpp12
-rw-r--r--lib/Transforms/IPO/GlobalDCE.cpp4
-rw-r--r--lib/Transforms/IPO/GlobalOpt.cpp71
-rw-r--r--lib/Transforms/IPO/IPConstantPropagation.cpp8
-rw-r--r--lib/Transforms/IPO/InlineAlways.cpp5
-rw-r--r--lib/Transforms/IPO/InlineSimple.cpp4
-rw-r--r--lib/Transforms/IPO/Inliner.cpp8
-rw-r--r--lib/Transforms/IPO/Internalize.cpp4
-rw-r--r--lib/Transforms/IPO/LoopExtractor.cpp4
-rw-r--r--lib/Transforms/IPO/MergeFunctions.cpp6
-rw-r--r--lib/Transforms/IPO/PartialInlining.cpp8
-rw-r--r--lib/Transforms/IPO/PassManagerBuilder.cpp8
-rw-r--r--lib/Transforms/IPO/PruneEH.cpp2
-rw-r--r--lib/Transforms/IPO/StripSymbols.cpp2
-rw-r--r--lib/Transforms/InstCombine/InstCombineAddSub.cpp82
-rw-r--r--lib/Transforms/InstCombine/InstCombineAndOrXor.cpp138
-rw-r--r--lib/Transforms/InstCombine/InstCombineCalls.cpp65
-rw-r--r--lib/Transforms/InstCombine/InstCombineCasts.cpp78
-rw-r--r--lib/Transforms/InstCombine/InstCombineCompares.cpp158
-rw-r--r--lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp40
-rw-r--r--lib/Transforms/InstCombine/InstCombineMulDivRem.cpp81
-rw-r--r--lib/Transforms/InstCombine/InstCombinePHI.cpp78
-rw-r--r--lib/Transforms/InstCombine/InstCombineSelect.cpp62
-rw-r--r--lib/Transforms/InstCombine/InstCombineShifts.cpp20
-rw-r--r--lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp54
-rw-r--r--lib/Transforms/InstCombine/InstCombineVectorOps.cpp43
-rw-r--r--lib/Transforms/InstCombine/InstructionCombining.cpp118
-rw-r--r--lib/Transforms/Instrumentation/AddressSanitizer.cpp30
-rw-r--r--lib/Transforms/Instrumentation/BoundsChecking.cpp6
-rw-r--r--lib/Transforms/Instrumentation/DataFlowSanitizer.cpp23
-rw-r--r--lib/Transforms/Instrumentation/DebugIR.cpp26
-rw-r--r--lib/Transforms/Instrumentation/DebugIR.h2
-rw-r--r--lib/Transforms/Instrumentation/MemorySanitizer.cpp58
-rw-r--r--lib/Transforms/Instrumentation/ThreadSanitizer.cpp8
-rw-r--r--lib/Transforms/ObjCARC/DependencyAnalysis.cpp2
-rw-r--r--lib/Transforms/ObjCARC/ObjCARCAPElim.cpp6
-rw-r--r--lib/Transforms/ObjCARC/ObjCARCContract.cpp6
-rw-r--r--lib/Transforms/ObjCARC/ObjCARCOpts.cpp34
-rw-r--r--lib/Transforms/Scalar/ConstantHoisting.cpp3
-rw-r--r--lib/Transforms/Scalar/ConstantProp.cpp2
-rw-r--r--lib/Transforms/Scalar/CorrelatedValuePropagation.cpp2
-rw-r--r--lib/Transforms/Scalar/DeadStoreElimination.cpp30
-rw-r--r--lib/Transforms/Scalar/EarlyCSE.cpp20
-rw-r--r--lib/Transforms/Scalar/GVN.cpp66
-rw-r--r--lib/Transforms/Scalar/GlobalMerge.cpp5
-rw-r--r--lib/Transforms/Scalar/IndVarSimplify.cpp77
-rw-r--r--lib/Transforms/Scalar/JumpThreading.cpp36
-rw-r--r--lib/Transforms/Scalar/LICM.cpp8
-rw-r--r--lib/Transforms/Scalar/LoopIdiomRecognize.cpp56
-rw-r--r--lib/Transforms/Scalar/LoopInstSimplify.cpp4
-rw-r--r--lib/Transforms/Scalar/LoopRerollPass.cpp2
-rw-r--r--lib/Transforms/Scalar/LoopRotation.cpp6
-rw-r--r--lib/Transforms/Scalar/LoopStrengthReduce.cpp109
-rw-r--r--lib/Transforms/Scalar/LoopUnswitch.cpp38
-rw-r--r--lib/Transforms/Scalar/LowerAtomic.cpp2
-rw-r--r--lib/Transforms/Scalar/MemCpyOptimizer.cpp38
-rw-r--r--lib/Transforms/Scalar/Reassociate.cpp40
-rw-r--r--lib/Transforms/Scalar/SCCP.cpp28
-rw-r--r--lib/Transforms/Scalar/SROA.cpp103
-rw-r--r--lib/Transforms/Scalar/SampleProfile.cpp4
-rw-r--r--lib/Transforms/Scalar/ScalarReplAggregates.cpp73
-rw-r--r--lib/Transforms/Scalar/Scalarizer.cpp10
-rw-r--r--lib/Transforms/Scalar/SimplifyCFGPass.cpp10
-rw-r--r--lib/Transforms/Scalar/Sink.cpp10
-rw-r--r--lib/Transforms/Scalar/StructurizeCFG.cpp17
-rw-r--r--lib/Transforms/Scalar/TailRecursionElimination.cpp36
-rw-r--r--lib/Transforms/Utils/AddDiscriminators.cpp2
-rw-r--r--lib/Transforms/Utils/BasicBlockUtils.cpp52
-rw-r--r--lib/Transforms/Utils/BreakCriticalEdges.cpp12
-rw-r--r--lib/Transforms/Utils/BuildLibCalls.cpp28
-rw-r--r--lib/Transforms/Utils/BypassSlowDivision.cpp4
-rw-r--r--lib/Transforms/Utils/CloneFunction.cpp8
-rw-r--r--lib/Transforms/Utils/CloneModule.cpp6
-rw-r--r--lib/Transforms/Utils/CmpInstAnalysis.cpp2
-rw-r--r--lib/Transforms/Utils/CodeExtractor.cpp18
-rw-r--r--lib/Transforms/Utils/DemoteRegToStack.cpp14
-rw-r--r--lib/Transforms/Utils/FlattenCFG.cpp11
-rw-r--r--lib/Transforms/Utils/GlobalStatus.cpp4
-rw-r--r--lib/Transforms/Utils/InlineFunction.cpp26
-rw-r--r--lib/Transforms/Utils/Local.cpp14
-rw-r--r--lib/Transforms/Utils/LoopSimplify.cpp31
-rw-r--r--lib/Transforms/Utils/LoopUnroll.cpp10
-rw-r--r--lib/Transforms/Utils/LoopUnrollRuntime.cpp6
-rw-r--r--lib/Transforms/Utils/LowerSwitch.cpp5
-rw-r--r--lib/Transforms/Utils/PromoteMemoryToRegister.cpp21
-rw-r--r--lib/Transforms/Utils/SSAUpdater.cpp16
-rw-r--r--lib/Transforms/Utils/SimplifyCFG.cpp147
-rw-r--r--lib/Transforms/Utils/SimplifyIndVar.cpp26
-rw-r--r--lib/Transforms/Utils/SimplifyInstructions.cpp4
-rw-r--r--lib/Transforms/Utils/SimplifyLibCalls.cpp300
-rw-r--r--lib/Transforms/Utils/SpecialCaseList.cpp6
-rw-r--r--lib/Transforms/Utils/UnifyFunctionExitNodes.cpp8
-rw-r--r--lib/Transforms/Utils/ValueMapper.cpp20
-rw-r--r--lib/Transforms/Vectorize/BBVectorize.cpp54
-rw-r--r--lib/Transforms/Vectorize/LoopVectorize.cpp103
-rw-r--r--lib/Transforms/Vectorize/SLPVectorizer.cpp43
106 files changed, 1639 insertions, 1610 deletions
diff --git a/include/llvm/Transforms/Instrumentation.h b/include/llvm/Transforms/Instrumentation.h
index b527546e05..61d5c26c50 100644
--- a/include/llvm/Transforms/Instrumentation.h
+++ b/include/llvm/Transforms/Instrumentation.h
@@ -79,8 +79,8 @@ FunctionPass *createThreadSanitizerPass(StringRef BlacklistFile = StringRef());
// Insert DataFlowSanitizer (dynamic data flow analysis) instrumentation
ModulePass *createDataFlowSanitizerPass(StringRef ABIListFile = StringRef(),
- void *(*getArgTLS)() = 0,
- void *(*getRetValTLS)() = 0);
+ void *(*getArgTLS)() = nullptr,
+ void *(*getRetValTLS)() = nullptr);
#if defined(__GNUC__) && defined(__linux__) && !defined(ANDROID)
inline ModulePass *createDataFlowSanitizerPassForJIT(StringRef ABIListFile =
diff --git a/include/llvm/Transforms/Utils/CodeExtractor.h b/include/llvm/Transforms/Utils/CodeExtractor.h
index 1122678035..f273e59934 100644
--- a/include/llvm/Transforms/Utils/CodeExtractor.h
+++ b/include/llvm/Transforms/Utils/CodeExtractor.h
@@ -66,7 +66,7 @@ namespace llvm {
/// dominates the rest, prepare a code extractor object for pulling this
/// sequence out into its new function. When a DominatorTree is also given,
/// extra checking and transformations are enabled.
- CodeExtractor(ArrayRef<BasicBlock *> BBs, DominatorTree *DT = 0,
+ CodeExtractor(ArrayRef<BasicBlock *> BBs, DominatorTree *DT = nullptr,
bool AggregateArgs = false);
/// \brief Create a code extractor for a loop body.
diff --git a/include/llvm/Transforms/Utils/LoopUtils.h b/include/llvm/Transforms/Utils/LoopUtils.h
index 64e18ca1b6..92eab97a7e 100644
--- a/include/llvm/Transforms/Utils/LoopUtils.h
+++ b/include/llvm/Transforms/Utils/LoopUtils.h
@@ -32,7 +32,7 @@ BasicBlock *InsertPreheaderForLoop(Loop *L, Pass *P);
/// will optionally update \c AliasAnalysis and \c ScalarEvolution analyses if
/// passed into it.
bool simplifyLoop(Loop *L, DominatorTree *DT, LoopInfo *LI, Pass *PP,
- AliasAnalysis *AA = 0, ScalarEvolution *SE = 0);
+ AliasAnalysis *AA = nullptr, ScalarEvolution *SE = nullptr);
/// \brief Put loop into LCSSA form.
///
@@ -45,7 +45,7 @@ bool simplifyLoop(Loop *L, DominatorTree *DT, LoopInfo *LI, Pass *PP,
/// If ScalarEvolution is passed in, it will be preserved.
///
/// Returns true if any modifications are made to the loop.
-bool formLCSSA(Loop &L, DominatorTree &DT, ScalarEvolution *SE = 0);
+bool formLCSSA(Loop &L, DominatorTree &DT, ScalarEvolution *SE = nullptr);
/// \brief Put a loop nest into LCSSA form.
///
@@ -56,7 +56,8 @@ bool formLCSSA(Loop &L, DominatorTree &DT, ScalarEvolution *SE = 0);
/// If ScalarEvolution is passed in, it will be preserved.
///
/// Returns true if any modifications are made to the loop.
-bool formLCSSARecursively(Loop &L, DominatorTree &DT, ScalarEvolution *SE = 0);
+bool formLCSSARecursively(Loop &L, DominatorTree &DT,
+ ScalarEvolution *SE = nullptr);
}
diff --git a/include/llvm/Transforms/Utils/PromoteMemToReg.h b/include/llvm/Transforms/Utils/PromoteMemToReg.h
index 22f46e5fc9..c83fedb0e2 100644
--- a/include/llvm/Transforms/Utils/PromoteMemToReg.h
+++ b/include/llvm/Transforms/Utils/PromoteMemToReg.h
@@ -41,7 +41,7 @@ bool isAllocaPromotable(const AllocaInst *AI);
/// If AST is specified, the specified tracker is updated to reflect changes
/// made to the IR.
void PromoteMemToReg(ArrayRef<AllocaInst *> Allocas, DominatorTree &DT,
- AliasSetTracker *AST = 0);
+ AliasSetTracker *AST = nullptr);
} // End llvm namespace
diff --git a/include/llvm/Transforms/Utils/SimplifyIndVar.h b/include/llvm/Transforms/Utils/SimplifyIndVar.h
index dedeca3b30..d6dcd6dd9f 100644
--- a/include/llvm/Transforms/Utils/SimplifyIndVar.h
+++ b/include/llvm/Transforms/Utils/SimplifyIndVar.h
@@ -38,7 +38,7 @@ protected:
virtual void anchor();
public:
- IVVisitor(): DT(NULL), ShouldSplitOverflowIntrinsics(false) {}
+ IVVisitor(): DT(nullptr), ShouldSplitOverflowIntrinsics(false) {}
virtual ~IVVisitor() {}
const DominatorTree *getDomTree() const { return DT; }
@@ -57,7 +57,7 @@ public:
/// simplifyUsersOfIV - Simplify instructions that use this induction variable
/// by using ScalarEvolution to analyze the IV's recurrence.
bool simplifyUsersOfIV(PHINode *CurrIV, ScalarEvolution *SE, LPPassManager *LPM,
- SmallVectorImpl<WeakVH> &Dead, IVVisitor *V = NULL);
+ SmallVectorImpl<WeakVH> &Dead, IVVisitor *V = nullptr);
/// SimplifyLoopIVs - Simplify users of induction variables within this
/// loop. This does not actually change or add IVs.
diff --git a/include/llvm/Transforms/Utils/UnifyFunctionExitNodes.h b/include/llvm/Transforms/Utils/UnifyFunctionExitNodes.h
index 79a70cf7a6..0458028e1f 100644
--- a/include/llvm/Transforms/Utils/UnifyFunctionExitNodes.h
+++ b/include/llvm/Transforms/Utils/UnifyFunctionExitNodes.h
@@ -27,7 +27,7 @@ struct UnifyFunctionExitNodes : public FunctionPass {
public:
static char ID; // Pass identification, replacement for typeid
UnifyFunctionExitNodes() : FunctionPass(ID),
- ReturnBlock(0), UnwindBlock(0) {
+ ReturnBlock(nullptr), UnwindBlock(nullptr) {
initializeUnifyFunctionExitNodesPass(*PassRegistry::getPassRegistry());
}
diff --git a/lib/Transforms/IPO/ArgumentPromotion.cpp b/lib/Transforms/IPO/ArgumentPromotion.cpp
index 94b44654d9..377fa153a2 100644
--- a/lib/Transforms/IPO/ArgumentPromotion.cpp
+++ b/lib/Transforms/IPO/ArgumentPromotion.cpp
@@ -124,14 +124,14 @@ CallGraphNode *ArgPromotion::PromoteArguments(CallGraphNode *CGN) {
Function *F = CGN->getFunction();
// Make sure that it is local to this module.
- if (!F || !F->hasLocalLinkage()) return 0;
+ if (!F || !F->hasLocalLinkage()) return nullptr;
// First check: see if there are any pointer arguments! If not, quick exit.
SmallVector<Argument*, 16> PointerArgs;
for (Function::arg_iterator I = F->arg_begin(), E = F->arg_end(); I != E; ++I)
if (I->getType()->isPointerTy())
PointerArgs.push_back(I);
- if (PointerArgs.empty()) return 0;
+ if (PointerArgs.empty()) return nullptr;
// Second check: make sure that all callers are direct callers. We can't
// transform functions that have indirect callers. Also see if the function
@@ -140,7 +140,7 @@ CallGraphNode *ArgPromotion::PromoteArguments(CallGraphNode *CGN) {
for (Use &U : F->uses()) {
CallSite CS(U.getUser());
// Must be a direct call.
- if (CS.getInstruction() == 0 || !CS.isCallee(&U)) return 0;
+ if (CS.getInstruction() == nullptr || !CS.isCallee(&U)) return nullptr;
if (CS.getInstruction()->getParent()->getParent() == F)
isSelfRecursive = true;
@@ -208,7 +208,7 @@ CallGraphNode *ArgPromotion::PromoteArguments(CallGraphNode *CGN) {
// No promotable pointer arguments.
if (ArgsToPromote.empty() && ByValArgsToTransform.empty())
- return 0;
+ return nullptr;
return DoPromotion(F, ArgsToPromote, ByValArgsToTransform);
}
@@ -661,7 +661,7 @@ CallGraphNode *ArgPromotion::DoPromotion(Function *F,
Type *AgTy = cast<PointerType>(I->getType())->getElementType();
StructType *STy = cast<StructType>(AgTy);
Value *Idxs[2] = {
- ConstantInt::get(Type::getInt32Ty(F->getContext()), 0), 0 };
+ ConstantInt::get(Type::getInt32Ty(F->getContext()), 0), nullptr };
for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
Idxs[1] = ConstantInt::get(Type::getInt32Ty(F->getContext()), i);
Value *Idx = GetElementPtrInst::Create(*AI, Idxs,
@@ -789,10 +789,10 @@ CallGraphNode *ArgPromotion::DoPromotion(Function *F,
// Just add all the struct element types.
Type *AgTy = cast<PointerType>(I->getType())->getElementType();
- Value *TheAlloca = new AllocaInst(AgTy, 0, "", InsertPt);
+ Value *TheAlloca = new AllocaInst(AgTy, nullptr, "", InsertPt);
StructType *STy = cast<StructType>(AgTy);
Value *Idxs[2] = {
- ConstantInt::get(Type::getInt32Ty(F->getContext()), 0), 0 };
+ ConstantInt::get(Type::getInt32Ty(F->getContext()), 0), nullptr };
for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
Idxs[1] = ConstantInt::get(Type::getInt32Ty(F->getContext()), i);
diff --git a/lib/Transforms/IPO/ConstantMerge.cpp b/lib/Transforms/IPO/ConstantMerge.cpp
index badac5b2d8..23be0819e6 100644
--- a/lib/Transforms/IPO/ConstantMerge.cpp
+++ b/lib/Transforms/IPO/ConstantMerge.cpp
@@ -67,7 +67,7 @@ ModulePass *llvm::createConstantMergePass() { return new ConstantMerge(); }
/// Find values that are marked as llvm.used.
static void FindUsedValues(GlobalVariable *LLVMUsed,
SmallPtrSet<const GlobalValue*, 8> &UsedValues) {
- if (LLVMUsed == 0) return;
+ if (!LLVMUsed) return;
ConstantArray *Inits = cast<ConstantArray>(LLVMUsed->getInitializer());
for (unsigned i = 0, e = Inits->getNumOperands(); i != e; ++i) {
@@ -104,7 +104,7 @@ unsigned ConstantMerge::getAlignment(GlobalVariable *GV) const {
bool ConstantMerge::runOnModule(Module &M) {
DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- DL = DLP ? &DLP->getDataLayout() : 0;
+ DL = DLP ? &DLP->getDataLayout() : nullptr;
// Find all the globals that are marked "used". These cannot be merged.
SmallPtrSet<const GlobalValue*, 8> UsedGlobals;
@@ -162,7 +162,7 @@ bool ConstantMerge::runOnModule(Module &M) {
// If this is the first constant we find or if the old one is local,
// replace with the current one. If the current is externally visible
// it cannot be replace, but can be the canonical constant we merge with.
- if (Slot == 0 || IsBetterCanonical(*GV, *Slot))
+ if (!Slot || IsBetterCanonical(*GV, *Slot))
Slot = GV;
}
diff --git a/lib/Transforms/IPO/DeadArgumentElimination.cpp b/lib/Transforms/IPO/DeadArgumentElimination.cpp
index 4a3a0cd729..d0a42664a6 100644
--- a/lib/Transforms/IPO/DeadArgumentElimination.cpp
+++ b/lib/Transforms/IPO/DeadArgumentElimination.cpp
@@ -765,7 +765,7 @@ bool DAE::RemoveDeadStuffFromFunction(Function *F) {
// Find out the new return value.
Type *RetTy = FTy->getReturnType();
- Type *NRetTy = NULL;
+ Type *NRetTy = nullptr;
unsigned RetCount = NumRetVals(F);
// -1 means unused, other numbers are the new index
@@ -1051,7 +1051,7 @@ bool DAE::RemoveDeadStuffFromFunction(Function *F) {
Value *RetVal;
if (NFTy->getReturnType()->isVoidTy()) {
- RetVal = 0;
+ RetVal = nullptr;
} else {
assert (RetTy->isStructTy());
// The original return value was a struct, insert
diff --git a/lib/Transforms/IPO/ExtractGV.cpp b/lib/Transforms/IPO/ExtractGV.cpp
index 4211f12238..408713a2c2 100644
--- a/lib/Transforms/IPO/ExtractGV.cpp
+++ b/lib/Transforms/IPO/ExtractGV.cpp
@@ -95,7 +95,7 @@ namespace {
makeVisible(*I, Delete);
if (Delete)
- I->setInitializer(0);
+ I->setInitializer(nullptr);
}
// Visit the Functions.
@@ -134,7 +134,7 @@ namespace {
} else {
Declaration =
new GlobalVariable(M, Ty, false, GlobalValue::ExternalLinkage,
- 0, CurI->getName());
+ nullptr, CurI->getName());
}
CurI->replaceAllUsesWith(Declaration);
diff --git a/lib/Transforms/IPO/FunctionAttrs.cpp b/lib/Transforms/IPO/FunctionAttrs.cpp
index ba6182daf3..1819fe1f82 100644
--- a/lib/Transforms/IPO/FunctionAttrs.cpp
+++ b/lib/Transforms/IPO/FunctionAttrs.cpp
@@ -47,7 +47,7 @@ STATISTIC(NumAnnotated, "Number of attributes added to library functions");
namespace {
struct FunctionAttrs : public CallGraphSCCPass {
static char ID; // Pass identification, replacement for typeid
- FunctionAttrs() : CallGraphSCCPass(ID), AA(0) {
+ FunctionAttrs() : CallGraphSCCPass(ID), AA(nullptr) {
initializeFunctionAttrsPass(*PassRegistry::getPassRegistry());
}
@@ -161,7 +161,7 @@ bool FunctionAttrs::AddReadAttrs(const CallGraphSCC &SCC) {
for (CallGraphSCC::iterator I = SCC.begin(), E = SCC.end(); I != E; ++I) {
Function *F = (*I)->getFunction();
- if (F == 0)
+ if (!F)
// External node - may write memory. Just give up.
return false;
@@ -320,7 +320,7 @@ namespace {
ArgumentGraphNode SyntheticRoot;
public:
- ArgumentGraph() { SyntheticRoot.Definition = 0; }
+ ArgumentGraph() { SyntheticRoot.Definition = nullptr; }
typedef SmallVectorImpl<ArgumentGraphNode*>::iterator iterator;
@@ -522,7 +522,7 @@ bool FunctionAttrs::AddArgumentAttrs(const CallGraphSCC &SCC) {
for (CallGraphSCC::iterator I = SCC.begin(), E = SCC.end(); I != E; ++I) {
Function *F = (*I)->getFunction();
- if (F == 0)
+ if (!F)
// External node - only a problem for arguments that we pass to it.
continue;
@@ -776,7 +776,7 @@ bool FunctionAttrs::AddNoAliasAttrs(const CallGraphSCC &SCC) {
for (CallGraphSCC::iterator I = SCC.begin(), E = SCC.end(); I != E; ++I) {
Function *F = (*I)->getFunction();
- if (F == 0)
+ if (!F)
// External node - skip it;
return false;
@@ -1669,7 +1669,7 @@ bool FunctionAttrs::annotateLibraryCalls(const CallGraphSCC &SCC) {
for (CallGraphSCC::iterator I = SCC.begin(), E = SCC.end(); I != E; ++I) {
Function *F = (*I)->getFunction();
- if (F != 0 && F->isDeclaration())
+ if (F && F->isDeclaration())
MadeChange |= inferPrototypeAttributes(*F);
}
diff --git a/lib/Transforms/IPO/GlobalDCE.cpp b/lib/Transforms/IPO/GlobalDCE.cpp
index 37910d179a..0cf03a54c2 100644
--- a/lib/Transforms/IPO/GlobalDCE.cpp
+++ b/lib/Transforms/IPO/GlobalDCE.cpp
@@ -100,7 +100,7 @@ bool GlobalDCE::runOnModule(Module &M) {
I != E; ++I)
if (!AliveGlobals.count(I)) {
DeadGlobalVars.push_back(I); // Keep track of dead globals
- I->setInitializer(0);
+ I->setInitializer(nullptr);
}
// The second pass drops the bodies of functions which are dead...
@@ -118,7 +118,7 @@ bool GlobalDCE::runOnModule(Module &M) {
++I)
if (!AliveGlobals.count(I)) {
DeadAliases.push_back(I);
- I->setAliasee(0);
+ I->setAliasee(nullptr);
}
if (!DeadFunctions.empty()) {
diff --git a/lib/Transforms/IPO/GlobalOpt.cpp b/lib/Transforms/IPO/GlobalOpt.cpp
index 3846339b18..cc7e2abd80 100644
--- a/lib/Transforms/IPO/GlobalOpt.cpp
+++ b/lib/Transforms/IPO/GlobalOpt.cpp
@@ -295,7 +295,7 @@ static bool CleanupConstantGlobalUsers(Value *V, Constant *Init,
Changed = true;
} else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(U)) {
if (CE->getOpcode() == Instruction::GetElementPtr) {
- Constant *SubInit = 0;
+ Constant *SubInit = nullptr;
if (Init)
SubInit = ConstantFoldLoadThroughGEPConstantExpr(Init, CE);
Changed |= CleanupConstantGlobalUsers(CE, SubInit, DL, TLI);
@@ -303,7 +303,7 @@ static bool CleanupConstantGlobalUsers(Value *V, Constant *Init,
CE->getType()->isPointerTy()) ||
CE->getOpcode() == Instruction::AddrSpaceCast) {
// Pointer cast, delete any stores and memsets to the global.
- Changed |= CleanupConstantGlobalUsers(CE, 0, DL, TLI);
+ Changed |= CleanupConstantGlobalUsers(CE, nullptr, DL, TLI);
}
if (CE->use_empty()) {
@@ -314,7 +314,7 @@ static bool CleanupConstantGlobalUsers(Value *V, Constant *Init,
// Do not transform "gepinst (gep constexpr (GV))" here, because forming
// "gepconstexpr (gep constexpr (GV))" will cause the two gep's to fold
// and will invalidate our notion of what Init is.
- Constant *SubInit = 0;
+ Constant *SubInit = nullptr;
if (!isa<ConstantExpr>(GEP->getOperand(0))) {
ConstantExpr *CE =
dyn_cast_or_null<ConstantExpr>(ConstantFoldInstruction(GEP, DL, TLI));
@@ -371,7 +371,7 @@ static bool isSafeSROAElementUse(Value *V) {
// Otherwise, it must be a GEP.
GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(I);
- if (GEPI == 0) return false;
+ if (!GEPI) return false;
if (GEPI->getNumOperands() < 3 || !isa<Constant>(GEPI->getOperand(1)) ||
!cast<Constant>(GEPI->getOperand(1))->isNullValue())
@@ -471,7 +471,7 @@ static bool GlobalUsersSafeToSRA(GlobalValue *GV) {
static GlobalVariable *SRAGlobal(GlobalVariable *GV, const DataLayout &DL) {
// Make sure this global only has simple uses that we can SRA.
if (!GlobalUsersSafeToSRA(GV))
- return 0;
+ return nullptr;
assert(GV->hasLocalLinkage() && !GV->isConstant());
Constant *Init = GV->getInitializer();
@@ -515,7 +515,7 @@ static GlobalVariable *SRAGlobal(GlobalVariable *GV, const DataLayout &DL) {
NumElements = cast<VectorType>(STy)->getNumElements();
if (NumElements > 16 && GV->hasNUsesOrMore(16))
- return 0; // It's not worth it.
+ return nullptr; // It's not worth it.
NewGlobals.reserve(NumElements);
uint64_t EltSize = DL.getTypeAllocSize(STy->getElementType());
@@ -542,7 +542,7 @@ static GlobalVariable *SRAGlobal(GlobalVariable *GV, const DataLayout &DL) {
}
if (NewGlobals.empty())
- return 0;
+ return nullptr;
DEBUG(dbgs() << "PERFORMING GLOBAL SRA ON: " << *GV);
@@ -604,7 +604,7 @@ static GlobalVariable *SRAGlobal(GlobalVariable *GV, const DataLayout &DL) {
if (FirstGlobal == i) ++FirstGlobal;
}
- return FirstGlobal != NewGlobals.size() ? NewGlobals[FirstGlobal] : 0;
+ return FirstGlobal != NewGlobals.size() ? NewGlobals[FirstGlobal] : nullptr;
}
/// AllUsesOfValueWillTrapIfNull - Return true if all users of the specified
@@ -786,7 +786,7 @@ static bool OptimizeAwayTrappingUsesOfLoads(GlobalVariable *GV, Constant *LV,
Changed |= CleanupPointerRootUsers(GV, TLI);
} else {
Changed = true;
- CleanupConstantGlobalUsers(GV, 0, DL, TLI);
+ CleanupConstantGlobalUsers(GV, nullptr, DL, TLI);
}
if (GV->use_empty()) {
DEBUG(dbgs() << " *** GLOBAL NOW DEAD!\n");
@@ -848,7 +848,7 @@ static GlobalVariable *OptimizeGlobalAddressOfMalloc(GlobalVariable *GV,
// If there are bitcast users of the malloc (which is typical, usually we have
// a malloc + bitcast) then replace them with uses of the new global. Update
// other users to use the global as well.
- BitCastInst *TheBC = 0;
+ BitCastInst *TheBC = nullptr;
while (!CI->use_empty()) {
Instruction *User = cast<Instruction>(CI->user_back());
if (BitCastInst *BCI = dyn_cast<BitCastInst>(User)) {
@@ -859,7 +859,7 @@ static GlobalVariable *OptimizeGlobalAddressOfMalloc(GlobalVariable *GV,
BCI->setOperand(0, NewGV);
}
} else {
- if (TheBC == 0)
+ if (!TheBC)
TheBC = new BitCastInst(NewGV, CI->getType(), "newgv", CI);
User->replaceUsesOfWith(CI, TheBC);
}
@@ -1307,7 +1307,7 @@ static GlobalVariable *PerformHeapAllocSRoA(GlobalVariable *GV, CallInst *CI,
Type *IntPtrTy = DL->getIntPtrType(CI->getType());
Value *NMI = CallInst::CreateMalloc(CI, IntPtrTy, FieldTy,
ConstantInt::get(IntPtrTy, TypeSize),
- NElems, 0,
+ NElems, nullptr,
CI->getName() + ".f" + Twine(FieldNo));
FieldMallocs.push_back(NMI);
new StoreInst(NMI, NGV, CI);
@@ -1540,7 +1540,7 @@ static bool TryToOptimizeStoreOfMallocToGlobal(GlobalVariable *GV,
Value *NumElements = ConstantInt::get(IntPtrTy, AT->getNumElements());
Instruction *Malloc = CallInst::CreateMalloc(CI, IntPtrTy, AllocSTy,
AllocSize, NumElements,
- 0, CI->getName());
+ nullptr, CI->getName());
Instruction *Cast = new BitCastInst(Malloc, CI->getType(), "tmp", CI);
CI->replaceAllUsesWith(Cast);
CI->eraseFromParent();
@@ -1755,7 +1755,8 @@ bool GlobalOpt::ProcessInternalGlobal(GlobalVariable *GV,
->getEntryBlock().begin());
Type *ElemTy = GV->getType()->getElementType();
// FIXME: Pass Global's alignment when globals have alignment
- AllocaInst *Alloca = new AllocaInst(ElemTy, NULL, GV->getName(), &FirstI);
+ AllocaInst *Alloca = new AllocaInst(ElemTy, nullptr,
+ GV->getName(), &FirstI);
if (!isa<UndefValue>(GV->getInitializer()))
new StoreInst(GV->getInitializer(), Alloca, &FirstI);
@@ -1966,11 +1967,11 @@ bool GlobalOpt::OptimizeGlobalVars(Module &M) {
/// initializers have an init priority of 65535.
GlobalVariable *GlobalOpt::FindGlobalCtors(Module &M) {
GlobalVariable *GV = M.getGlobalVariable("llvm.global_ctors");
- if (GV == 0) return 0;
+ if (!GV) return nullptr;
// Verify that the initializer is simple enough for us to handle. We are
// only allowed to optimize the initializer if it is unique.
- if (!GV->hasUniqueInitializer()) return 0;
+ if (!GV->hasUniqueInitializer()) return nullptr;
if (isa<ConstantAggregateZero>(GV->getInitializer()))
return GV;
@@ -1985,12 +1986,12 @@ GlobalVariable *GlobalOpt::FindGlobalCtors(Module &M) {
// Must have a function or null ptr.
if (!isa<Function>(CS->getOperand(1)))
- return 0;
+ return nullptr;
// Init priority must be standard.
ConstantInt *CI = cast<ConstantInt>(CS->getOperand(0));
if (CI->getZExtValue() != 65535)
- return 0;
+ return nullptr;
}
return GV;
@@ -2018,7 +2019,7 @@ static GlobalVariable *InstallGlobalCtors(GlobalVariable *GCL,
// If we made a change, reassemble the initializer list.
Constant *CSVals[2];
CSVals[0] = ConstantInt::get(Type::getInt32Ty(GCL->getContext()), 65535);
- CSVals[1] = 0;
+ CSVals[1] = nullptr;
StructType *StructTy =
cast<StructType>(GCL->getType()->getElementType()->getArrayElementType());
@@ -2068,7 +2069,7 @@ static GlobalVariable *InstallGlobalCtors(GlobalVariable *GCL,
if (Ctors.size())
return NGV;
else
- return 0;
+ return nullptr;
}
@@ -2368,7 +2369,7 @@ Constant *Evaluator::ComputeLoadResult(Constant *P) {
if (GlobalVariable *GV = dyn_cast<GlobalVariable>(P)) {
if (GV->hasDefinitiveInitializer())
return GV->getInitializer();
- return 0;
+ return nullptr;
}
// Handle a constantexpr getelementptr.
@@ -2380,7 +2381,7 @@ Constant *Evaluator::ComputeLoadResult(Constant *P) {
return ConstantFoldLoadThroughGEPConstantExpr(GV->getInitializer(), CE);
}
- return 0; // don't know how to evaluate.
+ return nullptr; // don't know how to evaluate.
}
/// EvaluateBlock - Evaluate all instructions in block BB, returning true if
@@ -2390,7 +2391,7 @@ bool Evaluator::EvaluateBlock(BasicBlock::iterator CurInst,
BasicBlock *&NextBB) {
// This is the main evaluation loop.
while (1) {
- Constant *InstResult = 0;
+ Constant *InstResult = nullptr;
DEBUG(dbgs() << "Evaluating Instruction: " << *CurInst << "\n");
@@ -2516,7 +2517,7 @@ bool Evaluator::EvaluateBlock(BasicBlock::iterator CurInst,
"folding: " << *Ptr << "\n");
}
InstResult = ComputeLoadResult(Ptr);
- if (InstResult == 0) {
+ if (!InstResult) {
DEBUG(dbgs() << "Failed to compute load result. Can not evaluate load."
"\n");
return false; // Could not evaluate load.
@@ -2634,7 +2635,7 @@ bool Evaluator::EvaluateBlock(BasicBlock::iterator CurInst,
return false;
}
- Constant *RetVal = 0;
+ Constant *RetVal = nullptr;
// Execute the call, if successful, use the return value.
ValueStack.push_back(make_unique<DenseMap<Value *, Constant *>>());
if (!EvaluateFunction(Callee, RetVal, Formals)) {
@@ -2644,7 +2645,7 @@ bool Evaluator::EvaluateBlock(BasicBlock::iterator CurInst,
ValueStack.pop_back();
InstResult = RetVal;
- if (InstResult != NULL) {
+ if (InstResult) {
DEBUG(dbgs() << "Successfully evaluated function. Result: " <<
InstResult << "\n\n");
} else {
@@ -2676,7 +2677,7 @@ bool Evaluator::EvaluateBlock(BasicBlock::iterator CurInst,
else
return false; // Cannot determine.
} else if (isa<ReturnInst>(CurInst)) {
- NextBB = 0;
+ NextBB = nullptr;
} else {
// invoke, unwind, resume, unreachable.
DEBUG(dbgs() << "Can not handle terminator.");
@@ -2741,13 +2742,13 @@ bool Evaluator::EvaluateFunction(Function *F, Constant *&RetVal,
BasicBlock::iterator CurInst = CurBB->begin();
while (1) {
- BasicBlock *NextBB = 0; // Initialized to avoid compiler warnings.
+ BasicBlock *NextBB = nullptr; // Initialized to avoid compiler warnings.
DEBUG(dbgs() << "Trying to evaluate BB: " << *CurBB << "\n");
if (!EvaluateBlock(CurInst, NextBB))
return false;
- if (NextBB == 0) {
+ if (!NextBB) {
// Successfully running until there's no next block means that we found
// the return. Fill it the return value and pop the call stack.
ReturnInst *RI = cast<ReturnInst>(CurBB->getTerminator());
@@ -2766,7 +2767,7 @@ bool Evaluator::EvaluateFunction(Function *F, Constant *&RetVal,
// Okay, we have never been in this block before. Check to see if there
// are any PHI nodes. If so, evaluate them with information about where
// we came from.
- PHINode *PN = 0;
+ PHINode *PN = nullptr;
for (CurInst = NextBB->begin();
(PN = dyn_cast<PHINode>(CurInst)); ++CurInst)
setVal(PN, getVal(PN->getIncomingValueForBlock(CurBB)));
@@ -2816,7 +2817,7 @@ bool GlobalOpt::OptimizeGlobalCtorsList(GlobalVariable *&GCL) {
Function *F = Ctors[i];
// Found a null terminator in the middle of the list, prune off the rest of
// the list.
- if (F == 0) {
+ if (!F) {
if (i != Ctors.size()-1) {
Ctors.resize(i+1);
MadeChange = true;
@@ -3040,12 +3041,12 @@ bool GlobalOpt::OptimizeGlobalAliases(Module &M) {
static Function *FindCXAAtExit(Module &M, TargetLibraryInfo *TLI) {
if (!TLI->has(LibFunc::cxa_atexit))
- return 0;
+ return nullptr;
Function *Fn = M.getFunction(TLI->getName(LibFunc::cxa_atexit));
if (!Fn)
- return 0;
+ return nullptr;
FunctionType *FTy = Fn->getFunctionType();
@@ -3056,7 +3057,7 @@ static Function *FindCXAAtExit(Module &M, TargetLibraryInfo *TLI) {
!FTy->getParamType(0)->isPointerTy() ||
!FTy->getParamType(1)->isPointerTy() ||
!FTy->getParamType(2)->isPointerTy())
- return 0;
+ return nullptr;
return Fn;
}
@@ -3158,7 +3159,7 @@ bool GlobalOpt::runOnModule(Module &M) {
bool Changed = false;
DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- DL = DLP ? &DLP->getDataLayout() : 0;
+ DL = DLP ? &DLP->getDataLayout() : nullptr;
TLI = &getAnalysis<TargetLibraryInfo>();
// Try to find the llvm.globalctors list.
diff --git a/lib/Transforms/IPO/IPConstantPropagation.cpp b/lib/Transforms/IPO/IPConstantPropagation.cpp
index aaaab5e45c..af541d1552 100644
--- a/lib/Transforms/IPO/IPConstantPropagation.cpp
+++ b/lib/Transforms/IPO/IPConstantPropagation.cpp
@@ -113,7 +113,7 @@ bool IPCP::PropagateConstantsIntoArguments(Function &F) {
continue;
Constant *C = dyn_cast<Constant>(*AI);
- if (C && ArgumentConstants[i].first == 0) {
+ if (C && ArgumentConstants[i].first == nullptr) {
ArgumentConstants[i].first = C; // First constant seen.
} else if (C && ArgumentConstants[i].first == C) {
// Still the constant value we think it is.
@@ -140,7 +140,7 @@ bool IPCP::PropagateConstantsIntoArguments(Function &F) {
continue;
Value *V = ArgumentConstants[i].first;
- if (V == 0) V = UndefValue::get(AI->getType());
+ if (!V) V = UndefValue::get(AI->getType());
AI->replaceAllUsesWith(V);
++NumArgumentsProped;
MadeChange = true;
@@ -210,7 +210,7 @@ bool IPCP::PropagateConstantReturn(Function &F) {
}
// Different or no known return value? Don't propagate this return
// value.
- RetVals[i] = 0;
+ RetVals[i] = nullptr;
// All values non-constant? Stop looking.
if (++NumNonConstant == RetVals.size())
return false;
@@ -236,7 +236,7 @@ bool IPCP::PropagateConstantReturn(Function &F) {
MadeChange = true;
- if (STy == 0) {
+ if (!STy) {
Value* New = RetVals[0];
if (Argument *A = dyn_cast<Argument>(New))
// Was an argument returned? Then find the corresponding argument in
diff --git a/lib/Transforms/IPO/InlineAlways.cpp b/lib/Transforms/IPO/InlineAlways.cpp
index 73416722be..c5d9546cce 100644
--- a/lib/Transforms/IPO/InlineAlways.cpp
+++ b/lib/Transforms/IPO/InlineAlways.cpp
@@ -37,12 +37,13 @@ class AlwaysInliner : public Inliner {
public:
// Use extremely low threshold.
- AlwaysInliner() : Inliner(ID, -2000000000, /*InsertLifetime*/ true), ICA(0) {
+ AlwaysInliner() : Inliner(ID, -2000000000, /*InsertLifetime*/ true),
+ ICA(nullptr) {
initializeAlwaysInlinerPass(*PassRegistry::getPassRegistry());
}
AlwaysInliner(bool InsertLifetime)
- : Inliner(ID, -2000000000, InsertLifetime), ICA(0) {
+ : Inliner(ID, -2000000000, InsertLifetime), ICA(nullptr) {
initializeAlwaysInlinerPass(*PassRegistry::getPassRegistry());
}
diff --git a/lib/Transforms/IPO/InlineSimple.cpp b/lib/Transforms/IPO/InlineSimple.cpp
index 27c9f52f9e..d189756032 100644
--- a/lib/Transforms/IPO/InlineSimple.cpp
+++ b/lib/Transforms/IPO/InlineSimple.cpp
@@ -38,12 +38,12 @@ class SimpleInliner : public Inliner {
InlineCostAnalysis *ICA;
public:
- SimpleInliner() : Inliner(ID), ICA(0) {
+ SimpleInliner() : Inliner(ID), ICA(nullptr) {
initializeSimpleInlinerPass(*PassRegistry::getPassRegistry());
}
SimpleInliner(int Threshold)
- : Inliner(ID, Threshold, /*InsertLifetime*/ true), ICA(0) {
+ : Inliner(ID, Threshold, /*InsertLifetime*/ true), ICA(nullptr) {
initializeSimpleInlinerPass(*PassRegistry::getPassRegistry());
}
diff --git a/lib/Transforms/IPO/Inliner.cpp b/lib/Transforms/IPO/Inliner.cpp
index 03b5e4e5e8..e6748c4698 100644
--- a/lib/Transforms/IPO/Inliner.cpp
+++ b/lib/Transforms/IPO/Inliner.cpp
@@ -185,7 +185,7 @@ static bool InlineCallIfPossible(CallSite CS, InlineFunctionInfo &IFI,
// canonicalized to be an allocation *of* an array), or allocations whose
// type is not itself an array (because we're afraid of pessimizing SRoA).
ArrayType *ATy = dyn_cast<ArrayType>(AI->getAllocatedType());
- if (ATy == 0 || AI->isArrayAllocation())
+ if (!ATy || AI->isArrayAllocation())
continue;
// Get the list of all available allocas for this array type.
@@ -241,7 +241,7 @@ static bool InlineCallIfPossible(CallSite CS, InlineFunctionInfo &IFI,
AI->eraseFromParent();
MergedAwayAlloca = true;
++NumMergedAllocas;
- IFI.StaticAllocas[AllocaNo] = 0;
+ IFI.StaticAllocas[AllocaNo] = nullptr;
break;
}
@@ -412,7 +412,7 @@ static bool InlineHistoryIncludes(Function *F, int InlineHistoryID,
bool Inliner::runOnSCC(CallGraphSCC &SCC) {
CallGraph &CG = getAnalysis<CallGraphWrapperPass>().getCallGraph();
DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- const DataLayout *DL = DLP ? &DLP->getDataLayout() : 0;
+ const DataLayout *DL = DLP ? &DLP->getDataLayout() : nullptr;
const TargetLibraryInfo *TLI = getAnalysisIfAvailable<TargetLibraryInfo>();
SmallPtrSet<Function*, 8> SCCFunctions;
@@ -501,7 +501,7 @@ bool Inliner::runOnSCC(CallGraphSCC &SCC) {
++NumCallsDeleted;
} else {
// We can only inline direct calls to non-declarations.
- if (Callee == 0 || Callee->isDeclaration()) continue;
+ if (!Callee || Callee->isDeclaration()) continue;
// If this call site was obtained by inlining another function, verify
// that the include path for the function did not include the callee
diff --git a/lib/Transforms/IPO/Internalize.cpp b/lib/Transforms/IPO/Internalize.cpp
index 2ca0bdbdb8..cdb85fd525 100644
--- a/lib/Transforms/IPO/Internalize.cpp
+++ b/lib/Transforms/IPO/Internalize.cpp
@@ -132,8 +132,8 @@ static bool shouldInternalize(const GlobalValue &GV,
bool InternalizePass::runOnModule(Module &M) {
CallGraphWrapperPass *CGPass = getAnalysisIfAvailable<CallGraphWrapperPass>();
- CallGraph *CG = CGPass ? &CGPass->getCallGraph() : 0;
- CallGraphNode *ExternalNode = CG ? CG->getExternalCallingNode() : 0;
+ CallGraph *CG = CGPass ? &CGPass->getCallGraph() : nullptr;
+ CallGraphNode *ExternalNode = CG ? CG->getExternalCallingNode() : nullptr;
bool Changed = false;
SmallPtrSet<GlobalValue *, 8> Used;
diff --git a/lib/Transforms/IPO/LoopExtractor.cpp b/lib/Transforms/IPO/LoopExtractor.cpp
index c55fabceac..20414aa05b 100644
--- a/lib/Transforms/IPO/LoopExtractor.cpp
+++ b/lib/Transforms/IPO/LoopExtractor.cpp
@@ -137,7 +137,7 @@ bool LoopExtractor::runOnLoop(Loop *L, LPPassManager &LPM) {
if (NumLoops == 0) return Changed;
--NumLoops;
CodeExtractor Extractor(DT, *L);
- if (Extractor.extractCodeRegion() != 0) {
+ if (Extractor.extractCodeRegion() != nullptr) {
Changed = true;
// After extraction, the loop is replaced by a function call, so
// we shouldn't try to run any more loop passes on it.
@@ -242,7 +242,7 @@ void BlockExtractorPass::SplitLandingPadPreds(Function *F) {
if (!Split) continue;
SmallVector<BasicBlock*, 2> NewBBs;
- SplitLandingPadPredecessors(LPad, Parent, ".1", ".2", 0, NewBBs);
+ SplitLandingPadPredecessors(LPad, Parent, ".1", ".2", nullptr, NewBBs);
}
}
diff --git a/lib/Transforms/IPO/MergeFunctions.cpp b/lib/Transforms/IPO/MergeFunctions.cpp
index 3471d8f1f3..a1764ca33c 100644
--- a/lib/Transforms/IPO/MergeFunctions.cpp
+++ b/lib/Transforms/IPO/MergeFunctions.cpp
@@ -121,12 +121,12 @@ public:
void release() {
assert(Func &&
"Attempted to release function twice, or release empty/tombstone!");
- Func = NULL;
+ Func = nullptr;
}
private:
explicit ComparableFunction(unsigned Hash)
- : Func(NULL), Hash(Hash), DL(NULL) {}
+ : Func(nullptr), Hash(Hash), DL(nullptr) {}
AssertingVH<Function> Func;
unsigned Hash;
@@ -684,7 +684,7 @@ ModulePass *llvm::createMergeFunctionsPass() {
bool MergeFunctions::runOnModule(Module &M) {
bool Changed = false;
DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- DL = DLP ? &DLP->getDataLayout() : 0;
+ DL = DLP ? &DLP->getDataLayout() : nullptr;
for (Module::iterator I = M.begin(), E = M.end(); I != E; ++I) {
if (!I->isDeclaration() && !I->hasAvailableExternallyLinkage())
diff --git a/lib/Transforms/IPO/PartialInlining.cpp b/lib/Transforms/IPO/PartialInlining.cpp
index f35f491b7f..76d6dfa8e8 100644
--- a/lib/Transforms/IPO/PartialInlining.cpp
+++ b/lib/Transforms/IPO/PartialInlining.cpp
@@ -53,10 +53,10 @@ Function* PartialInliner::unswitchFunction(Function* F) {
BasicBlock* entryBlock = F->begin();
BranchInst *BR = dyn_cast<BranchInst>(entryBlock->getTerminator());
if (!BR || BR->isUnconditional())
- return 0;
+ return nullptr;
- BasicBlock* returnBlock = 0;
- BasicBlock* nonReturnBlock = 0;
+ BasicBlock* returnBlock = nullptr;
+ BasicBlock* nonReturnBlock = nullptr;
unsigned returnCount = 0;
for (succ_iterator SI = succ_begin(entryBlock), SE = succ_end(entryBlock);
SI != SE; ++SI)
@@ -67,7 +67,7 @@ Function* PartialInliner::unswitchFunction(Function* F) {
nonReturnBlock = *SI;
if (returnCount != 1)
- return 0;
+ return nullptr;
// Clone the function, so that we can hack away on it.
ValueToValueMapTy VMap;
diff --git a/lib/Transforms/IPO/PassManagerBuilder.cpp b/lib/Transforms/IPO/PassManagerBuilder.cpp
index 052d24f58a..01ab31deca 100644
--- a/lib/Transforms/IPO/PassManagerBuilder.cpp
+++ b/lib/Transforms/IPO/PassManagerBuilder.cpp
@@ -56,8 +56,8 @@ RunLoopRerolling("reroll-loops", cl::Hidden,
PassManagerBuilder::PassManagerBuilder() {
OptLevel = 2;
SizeLevel = 0;
- LibraryInfo = 0;
- Inliner = 0;
+ LibraryInfo = nullptr;
+ Inliner = nullptr;
DisableTailCalls = false;
DisableUnitAtATime = false;
DisableUnrollLoops = false;
@@ -129,7 +129,7 @@ void PassManagerBuilder::populateModulePassManager(PassManagerBase &MPM) {
if (OptLevel == 0) {
if (Inliner) {
MPM.add(Inliner);
- Inliner = 0;
+ Inliner = nullptr;
}
// FIXME: This is a HACK! The inliner pass above implicitly creates a CGSCC
@@ -165,7 +165,7 @@ void PassManagerBuilder::populateModulePassManager(PassManagerBase &MPM) {
MPM.add(createPruneEHPass()); // Remove dead EH info
if (Inliner) {
MPM.add(Inliner);
- Inliner = 0;
+ Inliner = nullptr;
}
if (!DisableUnitAtATime)
MPM.add(createFunctionAttrsPass()); // Set readonly/readnone attrs
diff --git a/lib/Transforms/IPO/PruneEH.cpp b/lib/Transforms/IPO/PruneEH.cpp
index 5f89065ea7..b2c4a099b0 100644
--- a/lib/Transforms/IPO/PruneEH.cpp
+++ b/lib/Transforms/IPO/PruneEH.cpp
@@ -86,7 +86,7 @@ bool PruneEH::runOnSCC(CallGraphSCC &SCC) {
for (CallGraphSCC::iterator I = SCC.begin(), E = SCC.end();
(!SCCMightUnwind || !SCCMightReturn) && I != E; ++I) {
Function *F = (*I)->getFunction();
- if (F == 0) {
+ if (!F) {
SCCMightUnwind = true;
SCCMightReturn = true;
} else if (F->isDeclaration() || F->mayBeOverridden()) {
diff --git a/lib/Transforms/IPO/StripSymbols.cpp b/lib/Transforms/IPO/StripSymbols.cpp
index 6d0be8fac2..1abbccc0fc 100644
--- a/lib/Transforms/IPO/StripSymbols.cpp
+++ b/lib/Transforms/IPO/StripSymbols.cpp
@@ -192,7 +192,7 @@ static void StripTypeNames(Module &M, bool PreserveDbgInfo) {
/// Find values that are marked as llvm.used.
static void findUsedValues(GlobalVariable *LLVMUsed,
SmallPtrSet<const GlobalValue*, 8> &UsedValues) {
- if (LLVMUsed == 0) return;
+ if (!LLVMUsed) return;
UsedValues.insert(LLVMUsed);
ConstantArray *Inits = cast<ConstantArray>(LLVMUsed->getInitializer());
diff --git a/lib/Transforms/InstCombine/InstCombineAddSub.cpp b/lib/Transforms/InstCombine/InstCombineAddSub.cpp
index 7bd75d81f2..e461145d6b 100644
--- a/lib/Transforms/InstCombine/InstCombineAddSub.cpp
+++ b/lib/Transforms/InstCombine/InstCombineAddSub.cpp
@@ -114,12 +114,12 @@ namespace {
///
class FAddend {
public:
- FAddend() { Val = 0; }
+ FAddend() { Val = nullptr; }
Value *getSymVal (void) const { return Val; }
const FAddendCoef &getCoef(void) const { return Coeff; }
- bool isConstant() const { return Val == 0; }
+ bool isConstant() const { return Val == nullptr; }
bool isZero() const { return Coeff.isZero(); }
void set(short Coefficient, Value *V) { Coeff.set(Coefficient), Val = V; }
@@ -156,7 +156,7 @@ namespace {
///
class FAddCombine {
public:
- FAddCombine(InstCombiner::BuilderTy *B) : Builder(B), Instr(0) {}
+ FAddCombine(InstCombiner::BuilderTy *B) : Builder(B), Instr(nullptr) {}
Value *simplify(Instruction *FAdd);
private:
@@ -350,8 +350,8 @@ Value *FAddendCoef::getValue(Type *Ty) const {
//
unsigned FAddend::drillValueDownOneStep
(Value *Val, FAddend &Addend0, FAddend &Addend1) {
- Instruction *I = 0;
- if (Val == 0 || !(I = dyn_cast<Instruction>(Val)))
+ Instruction *I = nullptr;
+ if (!Val || !(I = dyn_cast<Instruction>(Val)))
return 0;
unsigned Opcode = I->getOpcode();
@@ -361,16 +361,16 @@ unsigned FAddend::drillValueDownOneStep
Value *Opnd0 = I->getOperand(0);
Value *Opnd1 = I->getOperand(1);
if ((C0 = dyn_cast<ConstantFP>(Opnd0)) && C0->isZero())
- Opnd0 = 0;
+ Opnd0 = nullptr;
if ((C1 = dyn_cast<ConstantFP>(Opnd1)) && C1->isZero())
- Opnd1 = 0;
+ Opnd1 = nullptr;
if (Opnd0) {
if (!C0)
Addend0.set(1, Opnd0);
else
- Addend0.set(C0, 0);
+ Addend0.set(C0, nullptr);
}
if (Opnd1) {
@@ -378,7 +378,7 @@ unsigned FAddend::drillValueDownOneStep
if (!C1)
Addend.set(1, Opnd1);
else
- Addend.set(C1, 0);
+ Addend.set(C1, nullptr);
if (Opcode == Instruction::FSub)
Addend.negate();
}
@@ -387,7 +387,7 @@ unsigned FAddend::drillValueDownOneStep
return Opnd0 && Opnd1 ? 2 : 1;
// Both operands are zero. Weird!
- Addend0.set(APFloat(C0->getValueAPF().getSemantics()), 0);
+ Addend0.set(APFloat(C0->getValueAPF().getSemantics()), nullptr);
return 1;
}
@@ -445,13 +445,13 @@ Value *FAddCombine::performFactorization(Instruction *I) {
Instruction *I1 = dyn_cast<Instruction>(I->getOperand(1));
if (!I0 || !I1 || I0->getOpcode() != I1->getOpcode())
- return 0;
+ return nullptr;
bool isMpy = false;
if (I0->getOpcode() == Instruction::FMul)
isMpy = true;
else if (I0->getOpcode() != Instruction::FDiv)
- return 0;
+ return nullptr;
Value *Opnd0_0 = I0->getOperand(0);
Value *Opnd0_1 = I0->getOperand(1);
@@ -463,8 +463,8 @@ Value *FAddCombine::performFactorization(Instruction *I) {
// (x*y) +/- (x*z) x y z
// (y/x) +/- (z/x) x y z
//
- Value *Factor = 0;
- Value *AddSub0 = 0, *AddSub1 = 0;
+ Value *Factor = nullptr;
+ Value *AddSub0 = nullptr, *AddSub1 = nullptr;
if (isMpy) {
if (Opnd0_0 == Opnd1_0 || Opnd0_0 == Opnd1_1)
@@ -483,7 +483,7 @@ Value *FAddCombine::performFactorization(Instruction *I) {
}
if (!Factor)
- return 0;
+ return nullptr;
FastMathFlags Flags;
Flags.setUnsafeAlgebra();
@@ -497,7 +497,7 @@ Value *FAddCombine::performFactorization(Instruction *I) {
if (ConstantFP *CFP = dyn_cast<ConstantFP>(NewAddSub)) {
const APFloat &F = CFP->getValueAPF();
if (!F.isNormal())
- return 0;
+ return nullptr;
} else if (Instruction *II = dyn_cast<Instruction>(NewAddSub))
II->setFastMathFlags(Flags);
@@ -519,7 +519,7 @@ Value *FAddCombine::simplify(Instruction *I) {
// Currently we are not able to handle vector type.
if (I->getType()->isVectorTy())
- return 0;
+ return nullptr;
assert((I->getOpcode() == Instruction::FAdd ||
I->getOpcode() == Instruction::FSub) && "Expect add/sub");
@@ -570,7 +570,7 @@ Value *FAddCombine::simplify(Instruction *I) {
// been optimized into "I = Y - X" in the previous steps.
//
const FAddendCoef &CE = Opnd0.getCoef();
- return CE.isOne() ? Opnd0.getSymVal() : 0;
+ return CE.isOne() ? Opnd0.getSymVal() : nullptr;
}
// step 4: Try to optimize Opnd0 + Opnd1_0 [+ Opnd1_1]
@@ -616,7 +616,7 @@ Value *FAddCombine::simplifyFAdd(AddendVect& Addends, unsigned InstrQuota) {
// constant close to supper-expr(s) will potentially reveal some optimization
// opportunities in super-expr(s).
//
- const FAddend *ConstAdd = 0;
+ const FAddend *ConstAdd = nullptr;
// Simplified addends are placed <SimpVect>.
AddendVect SimpVect;
@@ -649,7 +649,7 @@ Value *FAddCombine::simplifyFAdd(AddendVect& Addends, unsigned InstrQuota) {
if (T && T->getSymVal() == Val) {
// Set null such that next iteration of the outer loop will not process
// this addend again.
- Addends[SameSymIdx] = 0;
+ Addends[SameSymIdx] = nullptr;
SimpVect.push_back(T);
}
}
@@ -663,7 +663,7 @@ Value *FAddCombine::simplifyFAdd(AddendVect& Addends, unsigned InstrQuota) {
// Pop all addends being folded and push the resulting folded addend.
SimpVect.resize(StartIdx);
- if (Val != 0) {
+ if (Val) {
if (!R.isZero()) {
SimpVect.push_back(&R);
}
@@ -700,7 +700,7 @@ Value *FAddCombine::createNaryFAdd
//
unsigned InstrNeeded = calcInstrNumber(Opnds);
if (InstrNeeded > InstrQuota)
- return 0;
+ return nullptr;
initCreateInstNum();
@@ -712,7 +712,7 @@ Value *FAddCombine::createNaryFAdd
// N-ary addition has at most two instructions, and we don't need to worry
// about tree-height when constructing the N-ary addition.
- Value *LastVal = 0;
+ Value *LastVal = nullptr;
bool LastValNeedNeg = false;
// Iterate the addends, creating fadd/fsub using adjacent two addends.
@@ -872,10 +872,10 @@ Value *FAddCombine::createAddendVal
//
static inline Value *dyn_castFoldableMul(Value *V, Constant *&CST) {
if (!V->hasOneUse() || !V->getType()->isIntOrIntVectorTy())
- return 0;
+ return nullptr;
Instruction *I = dyn_cast<Instruction>(V);
- if (I == 0) return 0;
+ if (!I) return nullptr;
if (I->getOpcode() == Instruction::Mul)
if ((CST = dyn_cast<Constant>(I->getOperand(1))))
@@ -886,7 +886,7 @@ static inline Value *dyn_castFoldableMul(Value *V, Constant *&CST) {
CST = ConstantExpr::getShl(ConstantInt::get(V->getType(), 1), CST);
return I->getOperand(0);
}
- return 0;
+ return nullptr;
}
@@ -944,7 +944,7 @@ Instruction *InstCombiner::visitAdd(BinaryOperator &I) {
if (ZI->getSrcTy()->isIntegerTy(1))
return SelectInst::Create(ZI->getOperand(0), AddOne(CI), CI);
- Value *XorLHS = 0; ConstantInt *XorRHS = 0;
+ Value *XorLHS = nullptr; ConstantInt *XorRHS = nullptr;
if (match(LHS, m_Xor(m_Value(XorLHS), m_ConstantInt(XorRHS)))) {
uint32_t TySizeBits = I.getType()->getScalarSizeInBits();
const APInt &RHSVal = CI->getValue();
@@ -1176,7 +1176,7 @@ Instruction *InstCombiner::visitAdd(BinaryOperator &I) {
// Check for (x & y) + (x ^ y)
{
- Value *A = 0, *B = 0;
+ Value *A = nullptr, *B = nullptr;
if (match(RHS, m_Xor(m_Value(A), m_Value(B))) &&
(match(LHS, m_And(m_Specific(A), m_Specific(B))) ||
match(LHS, m_And(m_Specific(B), m_Specific(A)))))
@@ -1188,7 +1188,7 @@ Instruction *InstCombiner::visitAdd(BinaryOperator &I) {
return BinaryOperator::CreateOr(A, B);
}
- return Changed ? &I : 0;
+ return Changed ? &I : nullptr;
}
Instruction *InstCombiner::visitFAdd(BinaryOperator &I) {
@@ -1268,7 +1268,7 @@ Instruction *InstCombiner::visitFAdd(BinaryOperator &I) {
if (match(LHS, m_Select(m_Value(C1), m_Value(A1), m_Value(B1))) &&
match(RHS, m_Select(m_Value(C2), m_Value(A2), m_Value(B2)))) {
if (C1 == C2) {
- Constant *Z1=0, *Z2=0;
+ Constant *Z1=nullptr, *Z2=nullptr;
Value *A, *B, *C=C1;
if (match(A1, m_AnyZero()) && match(B2, m_AnyZero())) {
Z1 = dyn_cast<Constant>(A1); A = A2;
@@ -1292,7 +1292,7 @@ Instruction *InstCombiner::visitFAdd(BinaryOperator &I) {
return ReplaceInstUsesWith(I, V);
}
- return Changed ? &I : 0;
+ return Changed ? &I : nullptr;
}
@@ -1307,7 +1307,7 @@ Value *InstCombiner::OptimizePointerDifference(Value *LHS, Value *RHS,
// If LHS is a gep based on RHS or RHS is a gep based on LHS, we can optimize
// this.
bool Swapped = false;
- GEPOperator *GEP1 = 0, *GEP2 = 0;
+ GEPOperator *GEP1 = nullptr, *GEP2 = nullptr;
// For now we require one side to be the base pointer "A" or a constant
// GEP derived from it.
@@ -1345,9 +1345,9 @@ Value *InstCombiner::OptimizePointerDifference(Value *LHS, Value *RHS,
// Avoid duplicating the arithmetic if GEP2 has non-constant indices and
// multiple users.
- if (GEP1 == 0 ||
- (GEP2 != 0 && !GEP2->hasAllConstantIndices() && !GEP2->hasOneUse()))
- return 0;
+ if (!GEP1 ||
+ (GEP2 && !GEP2->hasAllConstantIndices() && !GEP2->hasOneUse()))
+ return nullptr;
// Emit the offset of the GEP and an intptr_t.
Value *Result = EmitGEPOffset(GEP1);
@@ -1395,7 +1395,7 @@ Instruction *InstCombiner::visitSub(BinaryOperator &I) {
if (Constant *C = dyn_cast<Constant>(Op0)) {
// C - ~X == X + (1+C)
- Value *X = 0;
+ Value *X = nullptr;
if (match(Op1, m_Not(m_Value(X))))
return BinaryOperator::CreateAdd(X, AddOne(C));
@@ -1453,9 +1453,9 @@ Instruction *InstCombiner::visitSub(BinaryOperator &I) {
}
if (Op1->hasOneUse()) {
- Value *X = 0, *Y = 0, *Z = 0;
- Constant *C = 0;
- Constant *CI = 0;
+ Value *X = nullptr, *Y = nullptr, *Z = nullptr;
+ Constant *C = nullptr;
+ Constant *CI = nullptr;
// (X - (Y - Z)) --> (X + (Z - Y)).
if (match(Op1, m_Sub(m_Value(Y), m_Value(Z))))
@@ -1534,7 +1534,7 @@ Instruction *InstCombiner::visitSub(BinaryOperator &I) {
return ReplaceInstUsesWith(I, Res);
}
- return 0;
+ return nullptr;
}
Instruction *InstCombiner::visitFSub(BinaryOperator &I) {
@@ -1576,5 +1576,5 @@ Instruction *InstCombiner::visitFSub(BinaryOperator &I) {
return ReplaceInstUsesWith(I, V);
}
- return 0;
+ return nullptr;
}
diff --git a/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp b/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
index a8e2aaaa81..8c5da90ffe 100644
--- a/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
+++ b/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
@@ -52,7 +52,7 @@ static inline Value *dyn_castNotVal(Value *V) {
// Constants can be considered to be not'ed values...
if (ConstantInt *C = dyn_cast<ConstantInt>(V))
return ConstantInt::get(C->getType(), ~C->getValue());
- return 0;
+ return nullptr;
}
/// getFCmpCode - Similar to getICmpCode but for FCmpInst. This encodes a fcmp
@@ -125,7 +125,7 @@ Instruction *InstCombiner::OptAndOp(Instruction *Op,
ConstantInt *AndRHS,
BinaryOperator &TheAnd) {
Value *X = Op->getOperand(0);
- Constant *Together = 0;
+ Constant *Together = nullptr;
if (!Op->isShift())
Together = ConstantExpr::getAnd(AndRHS, OpRHS);
@@ -252,7 +252,7 @@ Instruction *InstCombiner::OptAndOp(Instruction *Op,
}
break;
}
- return 0;
+ return nullptr;
}
/// Emit a computation of: (V >= Lo && V < Hi) if Inside is true, otherwise
@@ -334,12 +334,12 @@ Value *InstCombiner::FoldLogicalPlusAnd(Value *LHS, Value *RHS,
Instruction &I) {
Instruction *LHSI = dyn_cast<Instruction>(LHS);
if (!LHSI || LHSI->getNumOperands() != 2 ||
- !isa<ConstantInt>(LHSI->getOperand(1))) return 0;
+ !isa<ConstantInt>(LHSI->getOperand(1))) return nullptr;
ConstantInt *N = cast<ConstantInt>(LHSI->getOperand(1));
switch (LHSI->getOpcode()) {
- default: return 0;
+ default: return nullptr;
case Instruction::And:
if (ConstantExpr::getAnd(N, Mask) == Mask) {
// If the AndRHS is a power of two minus one (0+1+), this is simple.
@@ -359,7 +359,7 @@ Value *InstCombiner::FoldLogicalPlusAnd(Value *LHS, Value *RHS,
break;
}
}
- return 0;
+ return nullptr;
case Instruction::Or:
case Instruction::Xor:
// If the AndRHS is a power of two minus one (0+1+), and N&Mask == 0
@@ -367,7 +367,7 @@ Value *InstCombiner::FoldLogicalPlusAnd(Value *LHS, Value *RHS,
Mask->getValue().countPopulation()) == Mask->getValue().getBitWidth()
&& ConstantExpr::getAnd(N, Mask)->isNullValue())
break;
- return 0;
+ return nullptr;
}
if (isSub)
@@ -420,12 +420,12 @@ static unsigned getTypeOfMaskedICmp(Value* A, Value* B, Value* C,
ConstantInt *BCst = dyn_cast<ConstantInt>(B);
ConstantInt *CCst = dyn_cast<ConstantInt>(C);
bool icmp_eq = (SCC == ICmpInst::ICMP_EQ);
- bool icmp_abit = (ACst != 0 && !ACst->isZero() &&
+ bool icmp_abit = (ACst && !ACst->isZero() &&
ACst->getValue().isPowerOf2());
- bool icmp_bbit = (BCst != 0 && !BCst->isZero() &&
+ bool icmp_bbit = (BCst && !BCst->isZero() &&
BCst->getValue().isPowerOf2());
unsigned result = 0;
- if (CCst != 0 && CCst->isZero()) {
+ if (CCst && CCst->isZero()) {
// if C is zero, then both A and B qualify as mask
result |= (icmp_eq ? (FoldMskICmp_Mask_AllZeroes |
FoldMskICmp_Mask_AllZeroes |
@@ -457,7 +457,7 @@ static unsigned getTypeOfMaskedICmp(Value* A, Value* B, Value* C,
FoldMskICmp_AMask_NotMixed)
: (FoldMskICmp_Mask_AllZeroes |
FoldMskICmp_AMask_Mixed));
- } else if (ACst != 0 && CCst != 0 &&
+ } else if (ACst && CCst &&
ConstantExpr::getAnd(ACst, CCst) == CCst) {
result |= (icmp_eq ? FoldMskICmp_AMask_Mixed
: FoldMskICmp_AMask_NotMixed);
@@ -472,7 +472,7 @@ static unsigned getTypeOfMaskedICmp(Value* A, Value* B, Value* C,
FoldMskICmp_BMask_NotMixed)
: (FoldMskICmp_Mask_AllZeroes |
FoldMskICmp_BMask_Mixed));
- } else if (BCst != 0 && CCst != 0 &&
+ } else if (BCst && CCst &&
ConstantExpr::getAnd(BCst, CCst) == CCst) {
result |= (icmp_eq ? FoldMskICmp_BMask_Mixed
: FoldMskICmp_BMask_NotMixed);
@@ -572,12 +572,12 @@ static unsigned foldLogOpOfMaskedICmpsHelper(Value*& A,
Value *L11,*L12,*L21,*L22;
// Check whether the icmp can be decomposed into a bit test.
if (decomposeBitTestICmp(LHS, LHSCC, L11, L12, L2)) {
- L21 = L22 = L1 = 0;
+ L21 = L22 = L1 = nullptr;
} else {
// Look for ANDs in the LHS icmp.
if (!L1->getType()->isIntegerTy()) {
// You can icmp pointers, for example. They really aren't masks.
- L11 = L12 = 0;
+ L11 = L12 = nullptr;
} else if (!match(L1, m_And(m_Value(L11), m_Value(L12)))) {
// Any icmp can be viewed as being trivially masked; if it allows us to
// remove one, it's worth it.
@@ -587,7 +587,7 @@ static unsigned foldLogOpOfMaskedICmpsHelper(Value*& A,
if (!L2->getType()->isIntegerTy()) {
// You can icmp pointers, for example. They really aren't masks.
- L21 = L22 = 0;
+ L21 = L22 = nullptr;
} else if (!match(L2, m_And(m_Value(L21), m_Value(L22)))) {
L21 = L2;
L22 = Constant::getAllOnesValue(L2->getType());
@@ -610,7 +610,7 @@ static unsigned foldLogOpOfMaskedICmpsHelper(Value*& A,
} else {
return 0;
}
- E = R2; R1 = 0; ok = true;
+ E = R2; R1 = nullptr; ok = true;
} else if (R1->getType()->isIntegerTy()) {
if (!match(R1, m_And(m_Value(R11), m_Value(R12)))) {
// As before, model no mask as a trivial mask if it'll let us do an
@@ -667,11 +667,11 @@ static unsigned foldLogOpOfMaskedICmpsHelper(Value*& A,
/// into a single (icmp(A & X) ==/!= Y)
static Value* foldLogOpOfMaskedICmps(ICmpInst *LHS, ICmpInst *RHS, bool IsAnd,
llvm::InstCombiner::BuilderTy* Builder) {
- Value *A = 0, *B = 0, *C = 0, *D = 0, *E = 0;
+ Value *A = nullptr, *B = nullptr, *C = nullptr, *D = nullptr, *E = nullptr;
ICmpInst::Predicate LHSCC = LHS->getPredicate(), RHSCC = RHS->getPredicate();
unsigned mask = foldLogOpOfMaskedICmpsHelper(A, B, C, D, E, LHS, RHS,
LHSCC, RHSCC);
- if (mask == 0) return 0;
+ if (mask == 0) return nullptr;
assert(ICmpInst::isEquality(LHSCC) && ICmpInst::isEquality(RHSCC) &&
"foldLogOpOfMaskedICmpsHelper must return an equality predicate.");
@@ -724,9 +724,9 @@ static Value* foldLogOpOfMaskedICmps(ICmpInst *LHS, ICmpInst *RHS, bool IsAnd,
// their actual values. This isn't strictly, necessary, just a "handle the
// easy cases for now" decision.
ConstantInt *BCst = dyn_cast<ConstantInt>(B);
- if (BCst == 0) return 0;
+ if (!BCst) return nullptr;
ConstantInt *DCst = dyn_cast<ConstantInt>(D);
- if (DCst == 0) return 0;
+ if (!DCst) return nullptr;
if (mask & (FoldMskICmp_Mask_NotAllZeroes | FoldMskICmp_BMask_NotAllOnes)) {
// (icmp ne (A & B), 0) & (icmp ne (A & D), 0) and
@@ -765,11 +765,11 @@ static Value* foldLogOpOfMaskedICmps(ICmpInst *LHS, ICmpInst *RHS, bool IsAnd,
// (icmp ne (A & B), B) & (icmp eq (A & D), D)
// with B and D, having a single bit set
ConstantInt *CCst = dyn_cast<ConstantInt>(C);
- if (CCst == 0) return 0;
+ if (!CCst) return nullptr;
if (LHSCC != NEWCC)
CCst = dyn_cast<ConstantInt>( ConstantExpr::getXor(BCst, CCst) );
ConstantInt *ECst = dyn_cast<ConstantInt>(E);
- if (ECst == 0) return 0;
+ if (!ECst) return nullptr;
if (RHSCC != NEWCC)
ECst = dyn_cast<ConstantInt>( ConstantExpr::getXor(DCst, ECst) );
ConstantInt* MCst = dyn_cast<ConstantInt>(
@@ -778,13 +778,13 @@ static Value* foldLogOpOfMaskedICmps(ICmpInst *LHS, ICmpInst *RHS, bool IsAnd,
// if there is a conflict we should actually return a false for the
// whole construct
if (!MCst->isZero())
- return 0;
+ return nullptr;
Value *newOr1 = Builder->CreateOr(B, D);
Value *newOr2 = ConstantExpr::getOr(CCst, ECst);
Value *newAnd = Builder->CreateAnd(A, newOr1);
return Builder->CreateICmp(NEWCC, newAnd, newOr2);
}
- return 0;
+ return nullptr;
}
/// FoldAndOfICmps - Fold (icmp)&(icmp) if possible.
@@ -813,7 +813,7 @@ Value *InstCombiner::FoldAndOfICmps(ICmpInst *LHS, ICmpInst *RHS) {
Value *Val = LHS->getOperand(0), *Val2 = RHS->getOperand(0);
ConstantInt *LHSCst = dyn_cast<ConstantInt>(LHS->getOperand(1));
ConstantInt *RHSCst = dyn_cast<ConstantInt>(RHS->getOperand(1));
- if (LHSCst == 0 || RHSCst == 0) return 0;
+ if (!LHSCst || !RHSCst) return nullptr;
if (LHSCst == RHSCst && LHSCC == RHSCC) {
// (icmp ult A, C) & (icmp ult B, C) --> (icmp ult (A|B), C)
@@ -837,7 +837,7 @@ Value *InstCombiner::FoldAndOfICmps(ICmpInst *LHS, ICmpInst *RHS) {
if (LHSCC == ICmpInst::ICMP_EQ && LHSCC == RHSCC &&
LHS->hasOneUse() && RHS->hasOneUse()) {
Value *V;
- ConstantInt *AndCst, *SmallCst = 0, *BigCst = 0;
+ ConstantInt *AndCst, *SmallCst = nullptr, *BigCst = nullptr;
// (trunc x) == C1 & (and x, CA) == C2
// (and x, CA) == C2 & (trunc x) == C1
@@ -868,14 +868,14 @@ Value *InstCombiner::FoldAndOfICmps(ICmpInst *LHS, ICmpInst *RHS) {
// From here on, we only handle:
// (icmp1 A, C1) & (icmp2 A, C2) --> something simpler.
- if (Val != Val2) return 0;
+ if (Val != Val2) return nullptr;
// ICMP_[US][GL]E X, CST is folded to ICMP_[US][GL]T elsewhere.
if (LHSCC == ICmpInst::ICMP_UGE || LHSCC == ICmpInst::ICMP_ULE ||
RHSCC == ICmpInst::ICMP_UGE || RHSCC == ICmpInst::ICMP_ULE ||
LHSCC == ICmpInst::ICMP_SGE || LHSCC == ICmpInst::ICMP_SLE ||
RHSCC == ICmpInst::ICMP_SGE || RHSCC == ICmpInst::ICMP_SLE)
- return 0;
+ return nullptr;
// Make a constant range that's the intersection of the two icmp ranges.
// If the intersection is empty, we know that the result is false.
@@ -889,7 +889,7 @@ Value *InstCombiner::FoldAndOfICmps(ICmpInst *LHS, ICmpInst *RHS) {
// We can't fold (ugt x, C) & (sgt x, C2).
if (!PredicatesFoldable(LHSCC, RHSCC))
- return 0;
+ return nullptr;
// Ensure that the larger constant is on the RHS.
bool ShouldSwap;
@@ -1018,7 +1018,7 @@ Value *InstCombiner::FoldAndOfICmps(ICmpInst *LHS, ICmpInst *RHS) {
break;
}
- return 0;
+ return nullptr;
}
/// FoldAndOfFCmps - Optimize (fcmp)&(fcmp). NOTE: Unlike the rest of
@@ -1028,7 +1028,7 @@ Value *InstCombiner::FoldAndOfFCmps(FCmpInst *LHS, FCmpInst *RHS) {
if (LHS->getPredicate() == FCmpInst::FCMP_ORD &&
RHS->getPredicate() == FCmpInst::FCMP_ORD) {
if (LHS->getOperand(0)->getType() != RHS->getOperand(0)->getType())
- return 0;
+ return nullptr;
// (fcmp ord x, c) & (fcmp ord y, c) -> (fcmp ord x, y)
if (ConstantFP *LHSC = dyn_cast<ConstantFP>(LHS->getOperand(1)))
@@ -1045,7 +1045,7 @@ Value *InstCombiner::FoldAndOfFCmps(FCmpInst *LHS, FCmpInst *RHS) {
if (isa<ConstantAggregateZero>(LHS->getOperand(1)) &&
isa<ConstantAggregateZero>(RHS->getOperand(1)))
return Builder->CreateFCmpORD(LHS->getOperand(0), RHS->getOperand(0));
- return 0;
+ return nullptr;
}
Value *Op0LHS = LHS->getOperand(0), *Op0RHS = LHS->getOperand(1);
@@ -1098,7 +1098,7 @@ Value *InstCombiner::FoldAndOfFCmps(FCmpInst *LHS, FCmpInst *RHS) {
}
}
- return 0;
+ return nullptr;
}
@@ -1200,7 +1200,7 @@ Instruction *InstCombiner::visitAnd(BinaryOperator &I) {
// If this is an integer truncation, and if the source is an 'and' with
// immediate, transform it. This frequently occurs for bitfield accesses.
{
- Value *X = 0; ConstantInt *YC = 0;
+ Value *X = nullptr; ConstantInt *YC = nullptr;
if (match(Op0, m_Trunc(m_And(m_Value(X), m_ConstantInt(YC))))) {
// Change: and (trunc (and X, YC) to T), C2
// into : and (trunc X to T), trunc(YC) & C2
@@ -1233,7 +1233,7 @@ Instruction *InstCombiner::visitAnd(BinaryOperator &I) {
}
{
- Value *A = 0, *B = 0, *C = 0, *D = 0;
+ Value *A = nullptr, *B = nullptr, *C = nullptr, *D = nullptr;
// (A|B) & ~(A&B) -> A^B
if (match(Op0, m_Or(m_Value(A), m_Value(B))) &&
match(Op1, m_Not(m_And(m_Value(C), m_Value(D)))) &&
@@ -1341,7 +1341,7 @@ Instruction *InstCombiner::visitAnd(BinaryOperator &I) {
}
{
- Value *X = 0;
+ Value *X = nullptr;
bool OpsSwapped = false;
// Canonicalize SExt or Not to the LHS
if (match(Op1, m_SExt(m_Value())) ||
@@ -1368,7 +1368,7 @@ Instruction *InstCombiner::visitAnd(BinaryOperator &I) {
std::swap(Op0, Op1);
}
- return Changed ? &I : 0;
+ return Changed ? &I : nullptr;
}
/// CollectBSwapParts - Analyze the specified subexpression and see if it is
@@ -1500,7 +1500,7 @@ Instruction *InstCombiner::MatchBSwap(BinaryOperator &I) {
if (!ITy || ITy->getBitWidth() % 16 ||
// ByteMask only allows up to 32-byte values.
ITy->getBitWidth() > 32*8)
- return 0; // Can only bswap pairs of bytes. Can't do vectors.
+ return nullptr; // Can only bswap pairs of bytes. Can't do vectors.
/// ByteValues - For each byte of the result, we keep track of which value
/// defines each byte.
@@ -1510,16 +1510,16 @@ Instruction *InstCombiner::MatchBSwap(BinaryOperator &I) {
// Try to find all the pieces corresponding to the bswap.
uint32_t ByteMask = ~0U >> (32-ByteValues.size());
if (CollectBSwapParts(&I, 0, ByteMask, ByteValues))
- return 0;
+ return nullptr;
// Check to see if all of the bytes come from the same value.
Value *V = ByteValues[0];
- if (V == 0) return 0; // Didn't find a byte? Must be zero.
+ if (!V) return nullptr; // Didn't find a byte? Must be zero.
// Check to make sure that all of the bytes come from the same value.
for (unsigned i = 1, e = ByteValues.size(); i != e; ++i)
if (ByteValues[i] != V)
- return 0;
+ return nullptr;
Module *M = I.getParent()->getParent()->getParent();
Function *F = Intrinsic::getDeclaration(M, Intrinsic::bswap, ITy);
return CallInst::Create(F, V);
@@ -1531,10 +1531,10 @@ Instruction *InstCombiner::MatchBSwap(BinaryOperator &I) {
static Instruction *MatchSelectFromAndOr(Value *A, Value *B,
Value *C, Value *D) {
// If A is not a select of -1/0, this cannot match.
- Value *Cond = 0;
+ Value *Cond = nullptr;
if (!match(A, m_SExt(m_Value(Cond))) ||
!Cond->getType()->isIntegerTy(1))
- return 0;
+ return nullptr;
// ((cond?-1:0)&C) | (B&(cond?0:-1)) -> cond ? C : B.
if (match(D, m_Not(m_SExt(m_Specific(Cond)))))
@@ -1547,7 +1547,7 @@ static Instruction *MatchSelectFromAndOr(Value *A, Value *B,
return SelectInst::Create(Cond, C, D);
if (match(B, m_SExt(m_Not(m_Specific(Cond)))))
return SelectInst::Create(Cond, C, D);
- return 0;
+ return nullptr;
}
/// FoldOrOfICmps - Fold (icmp)|(icmp) if possible.
@@ -1568,8 +1568,8 @@ Value *InstCombiner::FoldOrOfICmps(ICmpInst *LHS, ICmpInst *RHS) {
LAnd->getOpcode() == Instruction::And &&
RAnd->getOpcode() == Instruction::And) {
- Value *Mask = 0;
- Value *Masked = 0;
+ Value *Mask = nullptr;
+ Value *Masked = nullptr;
if (LAnd->getOperand(0) == RAnd->getOperand(0) &&
isKnownToBeAPowerOfTwo(LAnd->getOperand(1)) &&
isKnownToBeAPowerOfTwo(RAnd->getOperand(1))) {
@@ -1610,7 +1610,7 @@ Value *InstCombiner::FoldOrOfICmps(ICmpInst *LHS, ICmpInst *RHS) {
if (LHS->hasOneUse() || RHS->hasOneUse()) {
// (icmp eq B, 0) | (icmp ult A, B) -> (icmp ule A, B-1)
// (icmp eq B, 0) | (icmp ugt B, A) -> (icmp ule A, B-1)
- Value *A = 0, *B = 0;
+ Value *A = nullptr, *B = nullptr;
if (LHSCC == ICmpInst::ICMP_EQ && LHSCst && LHSCst->isZero()) {
B = Val;
if (RHSCC == ICmpInst::ICMP_ULT && Val == RHS->getOperand(1))
@@ -1634,7 +1634,7 @@ Value *InstCombiner::FoldOrOfICmps(ICmpInst *LHS, ICmpInst *RHS) {
}
// This only handles icmp of constants: (icmp1 A, C1) | (icmp2 B, C2).
- if (LHSCst == 0 || RHSCst == 0) return 0;
+ if (!LHSCst || !RHSCst) return nullptr;
if (LHSCst == RHSCst && LHSCC == RHSCC) {
// (icmp ne A, 0) | (icmp ne B, 0) --> (icmp ne (A|B), 0)
@@ -1655,18 +1655,18 @@ Value *InstCombiner::FoldOrOfICmps(ICmpInst *LHS, ICmpInst *RHS) {
// From here on, we only handle:
// (icmp1 A, C1) | (icmp2 A, C2) --> something simpler.
- if (Val != Val2) return 0;
+ if (Val != Val2) return nullptr;
// ICMP_[US][GL]E X, CST is folded to ICMP_[US][GL]T elsewhere.
if (LHSCC == ICmpInst::ICMP_UGE || LHSCC == ICmpInst::ICMP_ULE ||
RHSCC == ICmpInst::ICMP_UGE || RHSCC == ICmpInst::ICMP_ULE ||
LHSCC == ICmpInst::ICMP_SGE || LHSCC == ICmpInst::ICMP_SLE ||
RHSCC == ICmpInst::ICMP_SGE || RHSCC == ICmpInst::ICMP_SLE)
- return 0;
+ return nullptr;
// We can't fold (ugt x, C) | (sgt x, C2).
if (!PredicatesFoldable(LHSCC, RHSCC))
- return 0;
+ return nullptr;
// Ensure that the larger constant is on the RHS.
bool ShouldSwap;
@@ -1811,7 +1811,7 @@ Value *InstCombiner::FoldOrOfICmps(ICmpInst *LHS, ICmpInst *RHS) {
}
break;
}
- return 0;
+ return nullptr;
}
/// FoldOrOfFCmps - Optimize (fcmp)|(fcmp). NOTE: Unlike the rest of
@@ -1839,7 +1839,7 @@ Value *InstCombiner::FoldOrOfFCmps(FCmpInst *LHS, FCmpInst *RHS) {
isa<ConstantAggregateZero>(RHS->getOperand(1)))
return Builder->CreateFCmpUNO(LHS->getOperand(0), RHS->getOperand(0));
- return 0;
+ return nullptr;
}
Value *Op0LHS = LHS->getOperand(0), *Op0RHS = LHS->getOperand(1);
@@ -1871,7 +1871,7 @@ Value *InstCombiner::FoldOrOfFCmps(FCmpInst *LHS, FCmpInst *RHS) {
return getFCmpValue(Op0Ordered, Op0Pred|Op1Pred, Op0LHS, Op0RHS, Builder);
}
}
- return 0;
+ return nullptr;
}
/// FoldOrWithConstants - This helper function folds:
@@ -1886,21 +1886,21 @@ Value *InstCombiner::FoldOrOfFCmps(FCmpInst *LHS, FCmpInst *RHS) {
Instruction *InstCombiner::FoldOrWithConstants(BinaryOperator &I, Value *Op,
Value *A, Value *B, Value *C) {
ConstantInt *CI1 = dyn_cast<ConstantInt>(C);
- if (!CI1) return 0;
+ if (!CI1) return nullptr;
- Value *V1 = 0;
- ConstantInt *CI2 = 0;
- if (!match(Op, m_And(m_Value(V1), m_ConstantInt(CI2)))) return 0;
+ Value *V1 = nullptr;
+ ConstantInt *CI2 = nullptr;
+ if (!match(Op, m_And(m_Value(V1), m_ConstantInt(CI2)))) return nullptr;
APInt Xor = CI1->getValue() ^ CI2->getValue();
- if (!Xor.isAllOnesValue()) return 0;
+ if (!Xor.isAllOnesValue()) return nullptr;
if (V1 == A || V1 == B) {
Value *NewOp = Builder->CreateAnd((V1 == A) ? B : A, CI1);
return BinaryOperator::CreateOr(NewOp, V1);
}
- return 0;
+ return nullptr;
}
Instruction *InstCombiner::visitOr(BinaryOperator &I) {
@@ -1920,7 +1920,7 @@ Instruction *InstCombiner::visitOr(BinaryOperator &I) {
return &I;
if (ConstantInt *RHS = dyn_cast<ConstantInt>(Op1)) {
- ConstantInt *C1 = 0; Value *X = 0;
+ ConstantInt *C1 = nullptr; Value *X = nullptr;
// (X & C1) | C2 --> (X | C2) & (C1|C2)
// iff (C1 & C2) == 0.
if (match(Op0, m_And(m_Value(X), m_ConstantInt(C1))) &&
@@ -1951,8 +1951,8 @@ Instruction *InstCombiner::visitOr(BinaryOperator &I) {
return NV;
}
- Value *A = 0, *B = 0;
- ConstantInt *C1 = 0, *C2 = 0;
+ Value *A = nullptr, *B = nullptr;
+ ConstantInt *C1 = nullptr, *C2 = nullptr;
// (A | B) | C and A | (B | C) -> bswap if possible.
// (A >> B) | (C << D) and (A << B) | (B >> C) -> bswap if possible.
@@ -1983,10 +1983,10 @@ Instruction *InstCombiner::visitOr(BinaryOperator &I) {
}
// (A & C)|(B & D)
- Value *C = 0, *D = 0;
+ Value *C = nullptr, *D = nullptr;
if (match(Op0, m_And(m_Value(A), m_Value(C))) &&
match(Op1, m_And(m_Value(B), m_Value(D)))) {
- Value *V1 = 0, *V2 = 0;
+ Value *V1 = nullptr, *V2 = nullptr;
C1 = dyn_cast<ConstantInt>(C);
C2 = dyn_cast<ConstantInt>(D);
if (C1 && C2) { // (A & C1)|(B & C2)
@@ -2030,7 +2030,7 @@ Instruction *InstCombiner::visitOr(BinaryOperator &I) {
// ((V|C3)&C1) | ((V|C4)&C2) --> (V|C3|C4)&(C1|C2)
// iff (C1&C2) == 0 and (C3&~C1) == 0 and (C4&~C2) == 0.
- ConstantInt *C3 = 0, *C4 = 0;
+ ConstantInt *C3 = nullptr, *C4 = nullptr;
if (match(A, m_Or(m_Value(V1), m_ConstantInt(C3))) &&
(C3->getValue() & ~C1->getValue()) == 0 &&
match(B, m_Or(m_Specific(V1), m_ConstantInt(C4))) &&
@@ -2222,7 +2222,7 @@ Instruction *InstCombiner::visitOr(BinaryOperator &I) {
// Since this OR statement hasn't been optimized further yet, we hope
// that this transformation will allow the new ORs to be optimized.
{
- Value *X = 0, *Y = 0;
+ Value *X = nullptr, *Y = nullptr;
if (Op0->hasOneUse() && Op1->hasOneUse() &&
match(Op0, m_Select(m_Value(X), m_Value(A), m_Value(B))) &&
match(Op1, m_Select(m_Value(Y), m_Value(C), m_Value(D))) && X == Y) {
@@ -2232,7 +2232,7 @@ Instruction *InstCombiner::visitOr(BinaryOperator &I) {
}
}
- return Changed ? &I : 0;
+ return Changed ? &I : nullptr;
}
Instruction *InstCombiner::visitXor(BinaryOperator &I) {
@@ -2496,5 +2496,5 @@ Instruction *InstCombiner::visitXor(BinaryOperator &I) {
}
}
- return Changed ? &I : 0;
+ return Changed ? &I : nullptr;
}
diff --git a/lib/Transforms/InstCombine/InstCombineCalls.cpp b/lib/Transforms/InstCombine/InstCombineCalls.cpp
index 40732d2f0b..17ada47d2b 100644
--- a/lib/Transforms/InstCombine/InstCombineCalls.cpp
+++ b/lib/Transforms/InstCombine/InstCombineCalls.cpp
@@ -72,7 +72,7 @@ Instruction *InstCombiner::SimplifyMemTransfer(MemIntrinsic *MI) {
// If MemCpyInst length is 1/2/4/8 bytes then replace memcpy with
// load/store.
ConstantInt *MemOpLength = dyn_cast<ConstantInt>(MI->getArgOperand(2));
- if (MemOpLength == 0) return 0;
+ if (!MemOpLength) return nullptr;
// Source and destination pointer types are always "i8*" for intrinsic. See
// if the size is something we can handle with a single primitive load/store.
@@ -82,7 +82,7 @@ Instruction *InstCombiner::SimplifyMemTransfer(MemIntrinsic *MI) {
assert(Size && "0-sized memory transferring should be removed already.");
if (Size > 8 || (Size&(Size-1)))
- return 0; // If not 1/2/4/8 bytes, exit.
+ return nullptr; // If not 1/2/4/8 bytes, exit.
// Use an integer load+store unless we can find something better.
unsigned SrcAddrSp =
@@ -101,7 +101,7 @@ Instruction *InstCombiner::SimplifyMemTransfer(MemIntrinsic *MI) {
// dest address will be promotable. See if we can find a better type than the
// integer datatype.
Value *StrippedDest = MI->getArgOperand(0)->stripPointerCasts();
- MDNode *CopyMD = 0;
+ MDNode *CopyMD = nullptr;
if (StrippedDest != MI->getArgOperand(0)) {
Type *SrcETy = cast<PointerType>(StrippedDest->getType())
->getElementType();
@@ -165,7 +165,7 @@ Instruction *InstCombiner::SimplifyMemSet(MemSetInst *MI) {
ConstantInt *LenC = dyn_cast<ConstantInt>(MI->getLength());
ConstantInt *FillC = dyn_cast<ConstantInt>(MI->getValue());
if (!LenC || !FillC || !FillC->getType()->isIntegerTy(8))
- return 0;
+ return nullptr;
uint64_t Len = LenC->getLimitedValue();
Alignment = MI->getAlignment();
assert(Len && "0-sized memory setting should be removed already.");
@@ -193,7 +193,7 @@ Instruction *InstCombiner::SimplifyMemSet(MemSetInst *MI) {
return MI;
}
- return 0;
+ return nullptr;
}
/// visitCallInst - CallInst simplification. This mostly only handles folding
@@ -235,7 +235,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
// No other transformations apply to volatile transfers.
if (MI->isVolatile())
- return 0;
+ return nullptr;
// If we have a memmove and the source operation is a constant global,
// then the source and dest pointers can't alias, so we can change this
@@ -278,11 +278,11 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
uint64_t Size;
if (getObjectSize(II->getArgOperand(0), Size, DL, TLI))
return ReplaceInstUsesWith(CI, ConstantInt::get(CI.getType(), Size));
- return 0;
+ return nullptr;
}
case Intrinsic::bswap: {
Value *IIOperand = II->getArgOperand(0);
- Value *X = 0;
+ Value *X = nullptr;
// bswap(bswap(x)) -> x
if (match(IIOperand, m_BSwap(m_Value(X))))
@@ -711,8 +711,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
bool AllEltsOk = true;
for (unsigned i = 0; i != 16; ++i) {
Constant *Elt = Mask->getAggregateElement(i);
- if (Elt == 0 ||
- !(isa<ConstantInt>(Elt) || isa<UndefValue>(Elt))) {
+ if (!Elt || !(isa<ConstantInt>(Elt) || isa<UndefValue>(Elt))) {
AllEltsOk = false;
break;
}
@@ -737,7 +736,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
cast<ConstantInt>(Mask->getAggregateElement(i))->getZExtValue();
Idx &= 31; // Match the hardware behavior.
- if (ExtractedElts[Idx] == 0) {
+ if (!ExtractedElts[Idx]) {
ExtractedElts[Idx] =
Builder->CreateExtractElement(Idx < 16 ? Op0 : Op1,
Builder->getInt32(Idx&15));
@@ -901,14 +900,14 @@ static bool isSafeToEliminateVarargsCast(const CallSite CS,
// mempcpy_chk, memmove_chk, memset_chk, strcpy_chk, stpcpy_chk, strncpy_chk,
// strcat_chk and strncat_chk.
Instruction *InstCombiner::tryOptimizeCall(CallInst *CI, const DataLayout *DL) {
- if (CI->getCalledFunction() == 0) return 0;
+ if (!CI->getCalledFunction()) return nullptr;
if (Value *With = Simplifier->optimizeCall(CI)) {
++NumSimplified;
return CI->use_empty() ? CI : ReplaceInstUsesWith(*CI, With);
}
- return 0;
+ return nullptr;
}
static IntrinsicInst *FindInitTrampolineFromAlloca(Value *TrampMem) {
@@ -917,35 +916,35 @@ static IntrinsicInst *FindInitTrampolineFromAlloca(Value *TrampMem) {
Value *Underlying = TrampMem->stripPointerCasts();
if (Underlying != TrampMem &&
(!Underlying->hasOneUse() || Underlying->user_back() != TrampMem))
- return 0;
+ return nullptr;
if (!isa<AllocaInst>(Underlying))
- return 0;
+ return nullptr;
- IntrinsicInst *InitTrampoline = 0;
+ IntrinsicInst *InitTrampoline = nullptr;
for (User *U : TrampMem->users()) {
IntrinsicInst *II = dyn_cast<IntrinsicInst>(U);
if (!II)
- return 0;
+ return nullptr;
if (II->getIntrinsicID() == Intrinsic::init_trampoline) {
if (InitTrampoline)
// More than one init_trampoline writes to this value. Give up.
- return 0;
+ return nullptr;
InitTrampoline = II;
continue;
}
if (II->getIntrinsicID() == Intrinsic::adjust_trampoline)
// Allow any number of calls to adjust.trampoline.
continue;
- return 0;
+ return nullptr;
}
// No call to init.trampoline found.
if (!InitTrampoline)
- return 0;
+ return nullptr;
// Check that the alloca is being used in the expected way.
if (InitTrampoline->getOperand(0) != TrampMem)
- return 0;
+ return nullptr;
return InitTrampoline;
}
@@ -962,9 +961,9 @@ static IntrinsicInst *FindInitTrampolineFromBB(IntrinsicInst *AdjustTramp,
II->getOperand(0) == TrampMem)
return II;
if (Inst->mayWriteToMemory())
- return 0;
+ return nullptr;
}
- return 0;
+ return nullptr;
}
// Given a call to llvm.adjust.trampoline, find and return the corresponding
@@ -976,7 +975,7 @@ static IntrinsicInst *FindInitTrampoline(Value *Callee) {
IntrinsicInst *AdjustTramp = dyn_cast<IntrinsicInst>(Callee);
if (!AdjustTramp ||
AdjustTramp->getIntrinsicID() != Intrinsic::adjust_trampoline)
- return 0;
+ return nullptr;
Value *TrampMem = AdjustTramp->getOperand(0);
@@ -984,7 +983,7 @@ static IntrinsicInst *FindInitTrampoline(Value *Callee) {
return IT;
if (IntrinsicInst *IT = FindInitTrampolineFromBB(AdjustTramp, TrampMem))
return IT;
- return 0;
+ return nullptr;
}
// visitCallSite - Improvements for call and invoke instructions.
@@ -999,7 +998,7 @@ Instruction *InstCombiner::visitCallSite(CallSite CS) {
// arguments of the call/invoke.
Value *Callee = CS.getCalledValue();
if (!isa<Function>(Callee) && transformConstExprCastCall(CS))
- return 0;
+ return nullptr;
if (Function *CalleeF = dyn_cast<Function>(Callee))
// If the call and callee calling conventions don't match, this call must
@@ -1024,7 +1023,7 @@ Instruction *InstCombiner::visitCallSite(CallSite CS) {
// change the callee to a null pointer.
cast<InvokeInst>(OldCall)->setCalledFunction(
Constant::getNullValue(CalleeF->getType()));
- return 0;
+ return nullptr;
}
if (isa<ConstantPointerNull>(Callee) || isa<UndefValue>(Callee)) {
@@ -1036,7 +1035,7 @@ Instruction *InstCombiner::visitCallSite(CallSite CS) {
if (isa<InvokeInst>(CS.getInstruction())) {
// Can't remove an invoke because we cannot change the CFG.
- return 0;
+ return nullptr;
}
// This instruction is not reachable, just remove it. We insert a store to
@@ -1084,7 +1083,7 @@ Instruction *InstCombiner::visitCallSite(CallSite CS) {
if (I) return EraseInstFromFunction(*I);
}
- return Changed ? CS.getInstruction() : 0;
+ return Changed ? CS.getInstruction() : nullptr;
}
// transformConstExprCastCall - If the callee is a constexpr cast of a function,
@@ -1093,7 +1092,7 @@ Instruction *InstCombiner::visitCallSite(CallSite CS) {
bool InstCombiner::transformConstExprCastCall(CallSite CS) {
Function *Callee =
dyn_cast<Function>(CS.getCalledValue()->stripPointerCasts());
- if (Callee == 0)
+ if (!Callee)
return false;
Instruction *Caller = CS.getInstruction();
const AttributeSet &CallerPAL = CS.getAttributes();
@@ -1169,7 +1168,7 @@ bool InstCombiner::transformConstExprCastCall(CallSite CS) {
CallerPAL.getParamAttributes(i + 1).hasAttribute(i + 1,
Attribute::ByVal)) {
PointerType *ParamPTy = dyn_cast<PointerType>(ParamTy);
- if (ParamPTy == 0 || !ParamPTy->getElementType()->isSized() || DL == 0)
+ if (!ParamPTy || !ParamPTy->getElementType()->isSized() || !DL)
return false;
Type *CurElTy = ActTy->getPointerElementType();
@@ -1360,7 +1359,7 @@ InstCombiner::transformCallThroughTrampoline(CallSite CS,
// If the call already has the 'nest' attribute somewhere then give up -
// otherwise 'nest' would occur twice after splicing in the chain.
if (Attrs.hasAttrSomewhere(Attribute::Nest))
- return 0;
+ return nullptr;
assert(Tramp &&
"transformCallThroughTrampoline called with incorrect CallSite.");
@@ -1372,7 +1371,7 @@ InstCombiner::transformCallThroughTrampoline(CallSite CS,
const AttributeSet &NestAttrs = NestF->getAttributes();
if (!NestAttrs.isEmpty()) {
unsigned NestIdx = 1;
- Type *NestTy = 0;
+ Type *NestTy = nullptr;
AttributeSet NestAttr;
// Look for a parameter marked with the 'nest' attribute.
diff --git a/lib/Transforms/InstCombine/InstCombineCasts.cpp b/lib/Transforms/InstCombine/InstCombineCasts.cpp
index 3e50bcd42d..de8b94d6c2 100644
--- a/lib/Transforms/InstCombine/InstCombineCasts.cpp
+++ b/lib/Transforms/InstCombine/InstCombineCasts.cpp
@@ -81,7 +81,7 @@ static Value *DecomposeSimpleLinearExpr(Value *Val, unsigned &Scale,
Instruction *InstCombiner::PromoteCastOfAllocation(BitCastInst &CI,
AllocaInst &AI) {
// This requires DataLayout to get the alloca alignment and size information.
- if (!DL) return 0;
+ if (!DL) return nullptr;
PointerType *PTy = cast<PointerType>(CI.getType());
@@ -91,26 +91,26 @@ Instruction *InstCombiner::PromoteCastOfAllocation(BitCastInst &CI,
// Get the type really allocated and the type casted to.
Type *AllocElTy = AI.getAllocatedType();
Type *CastElTy = PTy->getElementType();
- if (!AllocElTy->isSized() || !CastElTy->isSized()) return 0;
+ if (!AllocElTy->isSized() || !CastElTy->isSized()) return nullptr;
unsigned AllocElTyAlign = DL->getABITypeAlignment(AllocElTy);
unsigned CastElTyAlign = DL->getABITypeAlignment(CastElTy);
- if (CastElTyAlign < AllocElTyAlign) return 0;
+ if (CastElTyAlign < AllocElTyAlign) return nullptr;
// If the allocation has multiple uses, only promote it if we are strictly
// increasing the alignment of the resultant allocation. If we keep it the
// same, we open the door to infinite loops of various kinds.
- if (!AI.hasOneUse() && CastElTyAlign == AllocElTyAlign) return 0;
+ if (!AI.hasOneUse() && CastElTyAlign == AllocElTyAlign) return nullptr;
uint64_t AllocElTySize = DL->getTypeAllocSize(AllocElTy);
uint64_t CastElTySize = DL->getTypeAllocSize(CastElTy);
- if (CastElTySize == 0 || AllocElTySize == 0) return 0;
+ if (CastElTySize == 0 || AllocElTySize == 0) return nullptr;
// If the allocation has multiple uses, only promote it if we're not
// shrinking the amount of memory being allocated.
uint64_t AllocElTyStoreSize = DL->getTypeStoreSize(AllocElTy);
uint64_t CastElTyStoreSize = DL->getTypeStoreSize(CastElTy);
- if (!AI.hasOneUse() && CastElTyStoreSize < AllocElTyStoreSize) return 0;
+ if (!AI.hasOneUse() && CastElTyStoreSize < AllocElTyStoreSize) return nullptr;
// See if we can satisfy the modulus by pulling a scale out of the array
// size argument.
@@ -122,10 +122,10 @@ Instruction *InstCombiner::PromoteCastOfAllocation(BitCastInst &CI,
// If we can now satisfy the modulus, by using a non-1 scale, we really can
// do the xform.
if ((AllocElTySize*ArraySizeScale) % CastElTySize != 0 ||
- (AllocElTySize*ArrayOffset ) % CastElTySize != 0) return 0;
+ (AllocElTySize*ArrayOffset ) % CastElTySize != 0) return nullptr;
unsigned Scale = (AllocElTySize*ArraySizeScale)/CastElTySize;
- Value *Amt = 0;
+ Value *Amt = nullptr;
if (Scale == 1) {
Amt = NumElements;
} else {
@@ -171,7 +171,7 @@ Value *InstCombiner::EvaluateInDifferentType(Value *V, Type *Ty,
// Otherwise, it must be an instruction.
Instruction *I = cast<Instruction>(V);
- Instruction *Res = 0;
+ Instruction *Res = nullptr;
unsigned Opc = I->getOpcode();
switch (Opc) {
case Instruction::Add:
@@ -247,11 +247,11 @@ isEliminableCastPair(
Instruction::CastOps firstOp = Instruction::CastOps(CI->getOpcode());
Instruction::CastOps secondOp = Instruction::CastOps(opcode);
Type *SrcIntPtrTy = DL && SrcTy->isPtrOrPtrVectorTy() ?
- DL->getIntPtrType(SrcTy) : 0;
+ DL->getIntPtrType(SrcTy) : nullptr;
Type *MidIntPtrTy = DL && MidTy->isPtrOrPtrVectorTy() ?
- DL->getIntPtrType(MidTy) : 0;
+ DL->getIntPtrType(MidTy) : nullptr;
Type *DstIntPtrTy = DL && DstTy->isPtrOrPtrVectorTy() ?
- DL->getIntPtrType(DstTy) : 0;
+ DL->getIntPtrType(DstTy) : nullptr;
unsigned Res = CastInst::isEliminableCastPair(firstOp, secondOp, SrcTy, MidTy,
DstTy, SrcIntPtrTy, MidIntPtrTy,
DstIntPtrTy);
@@ -320,7 +320,7 @@ Instruction *InstCombiner::commonCastTransforms(CastInst &CI) {
return NV;
}
- return 0;
+ return nullptr;
}
/// CanEvaluateTruncated - Return true if we can evaluate the specified
@@ -472,7 +472,7 @@ Instruction *InstCombiner::visitTrunc(TruncInst &CI) {
}
// Transform trunc(lshr (zext A), Cst) to eliminate one type conversion.
- Value *A = 0; ConstantInt *Cst = 0;
+ Value *A = nullptr; ConstantInt *Cst = nullptr;
if (Src->hasOneUse() &&
match(Src, m_LShr(m_ZExt(m_Value(A)), m_ConstantInt(Cst)))) {
// We have three types to worry about here, the type of A, the source of
@@ -504,7 +504,7 @@ Instruction *InstCombiner::visitTrunc(TruncInst &CI) {
ConstantExpr::getTrunc(Cst, CI.getType()));
}
- return 0;
+ return nullptr;
}
/// transformZExtICmp - Transform (zext icmp) to bitwise / integer operations
@@ -629,7 +629,7 @@ Instruction *InstCombiner::transformZExtICmp(ICmpInst *ICI, Instruction &CI,
}
}
- return 0;
+ return nullptr;
}
/// CanEvaluateZExtd - Determine if the specified value can be computed in the
@@ -760,7 +760,7 @@ Instruction *InstCombiner::visitZExt(ZExtInst &CI) {
// If this zero extend is only used by a truncate, let the truncate be
// eliminated before we try to optimize this zext.
if (CI.hasOneUse() && isa<TruncInst>(CI.user_back()))
- return 0;
+ return nullptr;
// If one of the common conversion will work, do it.
if (Instruction *Result = commonCastTransforms(CI))
@@ -885,7 +885,7 @@ Instruction *InstCombiner::visitZExt(ZExtInst &CI) {
return BinaryOperator::CreateXor(New, ConstantInt::get(CI.getType(), 1));
}
- return 0;
+ return nullptr;
}
/// transformSExtICmp - Transform (sext icmp) to bitwise / integer operations
@@ -969,7 +969,7 @@ Instruction *InstCombiner::transformSExtICmp(ICmpInst *ICI, Instruction &CI) {
}
}
- return 0;
+ return nullptr;
}
/// CanEvaluateSExtd - Return true if we can take the specified value
@@ -1041,7 +1041,7 @@ Instruction *InstCombiner::visitSExt(SExtInst &CI) {
// If this sign extend is only used by a truncate, let the truncate be
// eliminated before we try to optimize this sext.
if (CI.hasOneUse() && isa<TruncInst>(CI.user_back()))
- return 0;
+ return nullptr;
if (Instruction *I = commonCastTransforms(CI))
return I;
@@ -1109,9 +1109,9 @@ Instruction *InstCombiner::visitSExt(SExtInst &CI) {
// into:
// %a = shl i32 %i, 30
// %d = ashr i32 %a, 30
- Value *A = 0;
+ Value *A = nullptr;
// TODO: Eventually this could be subsumed by EvaluateInDifferentType.
- ConstantInt *BA = 0, *CA = 0;
+ ConstantInt *BA = nullptr, *CA = nullptr;
if (match(Src, m_AShr(m_Shl(m_Trunc(m_Value(A)), m_ConstantInt(BA)),
m_ConstantInt(CA))) &&
BA == CA && A->getType() == CI.getType()) {
@@ -1123,7 +1123,7 @@ Instruction *InstCombiner::visitSExt(SExtInst &CI) {
return BinaryOperator::CreateAShr(A, ShAmtV);
}
- return 0;
+ return nullptr;
}
@@ -1135,7 +1135,7 @@ static Constant *FitsInFPType(ConstantFP *CFP, const fltSemantics &Sem) {
(void)F.convert(Sem, APFloat::rmNearestTiesToEven, &losesInfo);
if (!losesInfo)
return ConstantFP::get(CFP->getContext(), F);
- return 0;
+ return nullptr;
}
/// LookThroughFPExtensions - If this is an fp extension instruction, look
@@ -1347,7 +1347,7 @@ Instruction *InstCombiner::visitFPTrunc(FPTruncInst &CI) {
}
}
- return 0;
+ return nullptr;
}
Instruction *InstCombiner::visitFPExt(CastInst &CI) {
@@ -1356,7 +1356,7 @@ Instruction *InstCombiner::visitFPExt(CastInst &CI) {
Instruction *InstCombiner::visitFPToUI(FPToUIInst &FI) {
Instruction *OpI = dyn_cast<Instruction>(FI.getOperand(0));
- if (OpI == 0)
+ if (!OpI)
return commonCastTransforms(FI);
// fptoui(uitofp(X)) --> X
@@ -1376,7 +1376,7 @@ Instruction *InstCombiner::visitFPToUI(FPToUIInst &FI) {
Instruction *InstCombiner::visitFPToSI(FPToSIInst &FI) {
Instruction *OpI = dyn_cast<Instruction>(FI.getOperand(0));
- if (OpI == 0)
+ if (!OpI)
return commonCastTransforms(FI);
// fptosi(sitofp(X)) --> X
@@ -1423,7 +1423,7 @@ Instruction *InstCombiner::visitIntToPtr(IntToPtrInst &CI) {
if (Instruction *I = commonCastTransforms(CI))
return I;
- return 0;
+ return nullptr;
}
/// @brief Implement the transforms for cast of pointer (bitcast/ptrtoint)
@@ -1522,7 +1522,7 @@ static Instruction *OptimizeVectorResize(Value *InVal, VectorType *DestTy,
// there yet.
if (SrcTy->getElementType()->getPrimitiveSizeInBits() !=
DestTy->getElementType()->getPrimitiveSizeInBits())
- return 0;
+ return nullptr;
SrcTy = VectorType::get(DestTy->getElementType(), SrcTy->getNumElements());
InVal = IC.Builder->CreateBitCast(InVal, SrcTy);
@@ -1600,7 +1600,7 @@ static bool CollectInsertionElements(Value *V, unsigned Shift,
ElementIndex = Elements.size() - ElementIndex - 1;
// Fail if multiple elements are inserted into this slot.
- if (Elements[ElementIndex] != 0)
+ if (Elements[ElementIndex])
return false;
Elements[ElementIndex] = V;
@@ -1640,7 +1640,7 @@ static bool CollectInsertionElements(Value *V, unsigned Shift,
if (!V->hasOneUse()) return false;
Instruction *I = dyn_cast<Instruction>(V);
- if (I == 0) return false;
+ if (!I) return false;
switch (I->getOpcode()) {
default: return false; // Unhandled case.
case Instruction::BitCast:
@@ -1661,7 +1661,7 @@ static bool CollectInsertionElements(Value *V, unsigned Shift,
case Instruction::Shl: {
// Must be shifting by a constant that is a multiple of the element size.
ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(1));
- if (CI == 0) return false;
+ if (!CI) return false;
Shift += CI->getZExtValue();
if (!isMultipleOfTypeSize(Shift, VecEltTy)) return false;
return CollectInsertionElements(I->getOperand(0), Shift,
@@ -1689,7 +1689,7 @@ static bool CollectInsertionElements(Value *V, unsigned Shift,
static Value *OptimizeIntegerToVectorInsertions(BitCastInst &CI,
InstCombiner &IC) {
// We need to know the target byte order to perform this optimization.
- if (!IC.getDataLayout()) return 0;
+ if (!IC.getDataLayout()) return nullptr;
VectorType *DestVecTy = cast<VectorType>(CI.getType());
Value *IntInput = CI.getOperand(0);
@@ -1697,14 +1697,14 @@ static Value *OptimizeIntegerToVectorInsertions(BitCastInst &CI,
SmallVector<Value*, 8> Elements(DestVecTy->getNumElements());
if (!CollectInsertionElements(IntInput, 0, Elements,
DestVecTy->getElementType(), IC))
- return 0;
+ return nullptr;
// If we succeeded, we know that all of the element are specified by Elements
// or are zero if Elements has a null entry. Recast this as a set of
// insertions.
Value *Result = Constant::getNullValue(CI.getType());
for (unsigned i = 0, e = Elements.size(); i != e; ++i) {
- if (Elements[i] == 0) continue; // Unset element.
+ if (!Elements[i]) continue; // Unset element.
Result = IC.Builder->CreateInsertElement(Result, Elements[i],
IC.Builder->getInt32(i));
@@ -1718,14 +1718,14 @@ static Value *OptimizeIntegerToVectorInsertions(BitCastInst &CI,
/// bitcast. The various long double bitcasts can't get in here.
static Instruction *OptimizeIntToFloatBitCast(BitCastInst &CI,InstCombiner &IC){
// We need to know the target byte order to perform this optimization.
- if (!IC.getDataLayout()) return 0;
+ if (!IC.getDataLayout()) return nullptr;
Value *Src = CI.getOperand(0);
Type *DestTy = CI.getType();
// If this is a bitcast from int to float, check to see if the int is an
// extraction from a vector.
- Value *VecInput = 0;
+ Value *VecInput = nullptr;
// bitcast(trunc(bitcast(somevector)))
if (match(Src, m_Trunc(m_BitCast(m_Value(VecInput)))) &&
isa<VectorType>(VecInput->getType())) {
@@ -1749,7 +1749,7 @@ static Instruction *OptimizeIntToFloatBitCast(BitCastInst &CI,InstCombiner &IC){
}
// bitcast(trunc(lshr(bitcast(somevector), cst))
- ConstantInt *ShAmt = 0;
+ ConstantInt *ShAmt = nullptr;
if (match(Src, m_Trunc(m_LShr(m_BitCast(m_Value(VecInput)),
m_ConstantInt(ShAmt)))) &&
isa<VectorType>(VecInput->getType())) {
@@ -1771,7 +1771,7 @@ static Instruction *OptimizeIntToFloatBitCast(BitCastInst &CI,InstCombiner &IC){
return ExtractElementInst::Create(VecInput, IC.Builder->getInt32(Elt));
}
}
- return 0;
+ return nullptr;
}
Instruction *InstCombiner::visitBitCast(BitCastInst &CI) {
diff --git a/lib/Transforms/InstCombine/InstCombineCompares.cpp b/lib/Transforms/InstCombine/InstCombineCompares.cpp
index da3787c6a2..d803904ed3 100644
--- a/lib/Transforms/InstCombine/InstCombineCompares.cpp
+++ b/lib/Transforms/InstCombine/InstCombineCompares.cpp
@@ -220,15 +220,15 @@ Instruction *InstCombiner::
FoldCmpLoadFromIndexedGlobal(GetElementPtrInst *GEP, GlobalVariable *GV,
CmpInst &ICI, ConstantInt *AndCst) {
// We need TD information to know the pointer size unless this is inbounds.
- if (!GEP->isInBounds() && DL == 0)
- return 0;
+ if (!GEP->isInBounds() && !DL)
+ return nullptr;
Constant *Init = GV->getInitializer();
if (!isa<ConstantArray>(Init) && !isa<ConstantDataArray>(Init))
- return 0;
+ return nullptr;
uint64_t ArrayElementCount = Init->getType()->getArrayNumElements();
- if (ArrayElementCount > 1024) return 0; // Don't blow up on huge arrays.
+ if (ArrayElementCount > 1024) return nullptr; // Don't blow up on huge arrays.
// There are many forms of this optimization we can handle, for now, just do
// the simple index into a single-dimensional array.
@@ -238,7 +238,7 @@ FoldCmpLoadFromIndexedGlobal(GetElementPtrInst *GEP, GlobalVariable *GV,
!isa<ConstantInt>(GEP->getOperand(1)) ||
!cast<ConstantInt>(GEP->getOperand(1))->isZero() ||
isa<Constant>(GEP->getOperand(2)))
- return 0;
+ return nullptr;
// Check that indices after the variable are constants and in-range for the
// type they index. Collect the indices. This is typically for arrays of
@@ -248,18 +248,18 @@ FoldCmpLoadFromIndexedGlobal(GetElementPtrInst *GEP, GlobalVariable *GV,
Type *EltTy = Init->getType()->getArrayElementType();
for (unsigned i = 3, e = GEP->getNumOperands(); i != e; ++i) {
ConstantInt *Idx = dyn_cast<ConstantInt>(GEP->getOperand(i));
- if (Idx == 0) return 0; // Variable index.
+ if (!Idx) return nullptr; // Variable index.
uint64_t IdxVal = Idx->getZExtValue();
- if ((unsigned)IdxVal != IdxVal) return 0; // Too large array index.
+ if ((unsigned)IdxVal != IdxVal) return nullptr; // Too large array index.
if (StructType *STy = dyn_cast<StructType>(EltTy))
EltTy = STy->getElementType(IdxVal);
else if (ArrayType *ATy = dyn_cast<ArrayType>(EltTy)) {
- if (IdxVal >= ATy->getNumElements()) return 0;
+ if (IdxVal >= ATy->getNumElements()) return nullptr;
EltTy = ATy->getElementType();
} else {
- return 0; // Unknown type.
+ return nullptr; // Unknown type.
}
LaterIndices.push_back(IdxVal);
@@ -298,7 +298,7 @@ FoldCmpLoadFromIndexedGlobal(GetElementPtrInst *GEP, GlobalVariable *GV,
Constant *CompareRHS = cast<Constant>(ICI.getOperand(1));
for (unsigned i = 0, e = ArrayElementCount; i != e; ++i) {
Constant *Elt = Init->getAggregateElement(i);
- if (Elt == 0) return 0;
+ if (!Elt) return nullptr;
// If this is indexing an array of structures, get the structure element.
if (!LaterIndices.empty())
@@ -323,7 +323,7 @@ FoldCmpLoadFromIndexedGlobal(GetElementPtrInst *GEP, GlobalVariable *GV,
// If we can't compute the result for any of the elements, we have to give
// up evaluating the entire conditional.
- if (!isa<ConstantInt>(C)) return 0;
+ if (!isa<ConstantInt>(C)) return nullptr;
// Otherwise, we know if the comparison is true or false for this element,
// update our state machines.
@@ -377,7 +377,7 @@ FoldCmpLoadFromIndexedGlobal(GetElementPtrInst *GEP, GlobalVariable *GV,
if ((i & 8) == 0 && i >= 64 && SecondTrueElement == Overdefined &&
SecondFalseElement == Overdefined && TrueRangeEnd == Overdefined &&
FalseRangeEnd == Overdefined)
- return 0;
+ return nullptr;
}
// Now that we've scanned the entire array, emit our new comparison(s). We
@@ -469,7 +469,7 @@ FoldCmpLoadFromIndexedGlobal(GetElementPtrInst *GEP, GlobalVariable *GV,
// of this load, replace it with computation that does:
// ((magic_cst >> i) & 1) != 0
{
- Type *Ty = 0;
+ Type *Ty = nullptr;
// Look for an appropriate type:
// - The type of Idx if the magic fits
@@ -482,7 +482,7 @@ FoldCmpLoadFromIndexedGlobal(GetElementPtrInst *GEP, GlobalVariable *GV,
else if (ArrayElementCount <= 32)
Ty = Type::getInt32Ty(Init->getContext());
- if (Ty != 0) {
+ if (Ty) {
Value *V = Builder->CreateIntCast(Idx, Ty, false);
V = Builder->CreateLShr(ConstantInt::get(Ty, MagicBitvector), V);
V = Builder->CreateAnd(ConstantInt::get(Ty, 1), V);
@@ -490,7 +490,7 @@ FoldCmpLoadFromIndexedGlobal(GetElementPtrInst *GEP, GlobalVariable *GV,
}
}
- return 0;
+ return nullptr;
}
@@ -535,7 +535,7 @@ static Value *EvaluateGEPOffsetExpression(User *GEP, InstCombiner &IC) {
// If there are no variable indices, we must have a constant offset, just
// evaluate it the general way.
- if (i == e) return 0;
+ if (i == e) return nullptr;
Value *VariableIdx = GEP->getOperand(i);
// Determine the scale factor of the variable element. For example, this is
@@ -545,7 +545,7 @@ static Value *EvaluateGEPOffsetExpression(User *GEP, InstCombiner &IC) {
// Verify that there are no other variable indices. If so, emit the hard way.
for (++i, ++GTI; i != e; ++i, ++GTI) {
ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(i));
- if (!CI) return 0;
+ if (!CI) return nullptr;
// Compute the aggregate offset of constant indices.
if (CI->isZero()) continue;
@@ -589,7 +589,7 @@ static Value *EvaluateGEPOffsetExpression(User *GEP, InstCombiner &IC) {
// multiple of the variable scale.
int64_t NewOffs = Offset / (int64_t)VariableScale;
if (Offset != NewOffs*(int64_t)VariableScale)
- return 0;
+ return nullptr;
// Okay, we can do this evaluation. Start by converting the index to intptr.
if (VariableIdx->getType() != IntPtrTy)
@@ -610,7 +610,7 @@ Instruction *InstCombiner::FoldGEPICmp(GEPOperator *GEPLHS, Value *RHS,
// e.g. "&foo[0] <s &foo[1]" can't be folded to "true" because "foo" could be
// the maximum signed value for the pointer type.
if (ICmpInst::isSigned(Cond))
- return 0;
+ return nullptr;
// Look through bitcasts.
if (BitCastInst *BCI = dyn_cast<BitCastInst>(RHS))
@@ -625,7 +625,7 @@ Instruction *InstCombiner::FoldGEPICmp(GEPOperator *GEPLHS, Value *RHS,
Value *Offset = EvaluateGEPOffsetExpression(GEPLHS, *this);
// If not, synthesize the offset the hard way.
- if (Offset == 0)
+ if (!Offset)
Offset = EmitGEPOffset(GEPLHS);
return new ICmpInst(ICmpInst::getSignedPredicate(Cond), Offset,
Constant::getNullValue(Offset->getType()));
@@ -663,7 +663,7 @@ Instruction *InstCombiner::FoldGEPICmp(GEPOperator *GEPLHS, Value *RHS,
// Otherwise, the base pointers are different and the indices are
// different, bail out.
- return 0;
+ return nullptr;
}
// If one of the GEPs has all zero indices, recurse.
@@ -731,7 +731,7 @@ Instruction *InstCombiner::FoldGEPICmp(GEPOperator *GEPLHS, Value *RHS,
return new ICmpInst(ICmpInst::getSignedPredicate(Cond), L, R);
}
}
- return 0;
+ return nullptr;
}
/// FoldICmpAddOpCst - Fold "icmp pred (X+CI), X".
@@ -814,11 +814,11 @@ Instruction *InstCombiner::FoldICmpDivCst(ICmpInst &ICI, BinaryOperator *DivI,
// if it finds it.
bool DivIsSigned = DivI->getOpcode() == Instruction::SDiv;
if (!ICI.isEquality() && DivIsSigned != ICI.isSigned())
- return 0;
+ return nullptr;
if (DivRHS->isZero())
- return 0; // The ProdOV computation fails on divide by zero.
+ return nullptr; // The ProdOV computation fails on divide by zero.
if (DivIsSigned && DivRHS->isAllOnesValue())
- return 0; // The overflow computation also screws up here
+ return nullptr; // The overflow computation also screws up here
if (DivRHS->isOne()) {
// This eliminates some funny cases with INT_MIN.
ICI.setOperand(0, DivI->getOperand(0)); // X/1 == X.
@@ -852,7 +852,7 @@ Instruction *InstCombiner::FoldICmpDivCst(ICmpInst &ICI, BinaryOperator *DivI,
// overflow variable is set to 0 if it's corresponding bound variable is valid
// -1 if overflowed off the bottom end, or +1 if overflowed off the top end.
int LoOverflow = 0, HiOverflow = 0;
- Constant *LoBound = 0, *HiBound = 0;
+ Constant *LoBound = nullptr, *HiBound = nullptr;
if (!DivIsSigned) { // udiv
// e.g. X/5 op 3 --> [15, 20)
@@ -892,7 +892,7 @@ Instruction *InstCombiner::FoldICmpDivCst(ICmpInst &ICI, BinaryOperator *DivI,
HiBound = cast<ConstantInt>(ConstantExpr::getNeg(RangeSize));
if (HiBound == DivRHS) { // -INTMIN = INTMIN
HiOverflow = 1; // [INTMIN+1, overflow)
- HiBound = 0; // e.g. X/INTMIN = 0 --> X > INTMIN
+ HiBound = nullptr; // e.g. X/INTMIN = 0 --> X > INTMIN
}
} else if (CmpRHSV.isStrictlyPositive()) { // (X / neg) op pos
// e.g. X/-5 op 3 --> [-19, -14)
@@ -966,20 +966,20 @@ Instruction *InstCombiner::FoldICmpShrCst(ICmpInst &ICI, BinaryOperator *Shr,
uint32_t TypeBits = CmpRHSV.getBitWidth();
uint32_t ShAmtVal = (uint32_t)ShAmt->getLimitedValue(TypeBits);
if (ShAmtVal >= TypeBits || ShAmtVal == 0)
- return 0;
+ return nullptr;
if (!ICI.isEquality()) {
// If we have an unsigned comparison and an ashr, we can't simplify this.
// Similarly for signed comparisons with lshr.
if (ICI.isSigned() != (Shr->getOpcode() == Instruction::AShr))
- return 0;
+ return nullptr;
// Otherwise, all lshr and most exact ashr's are equivalent to a udiv/sdiv
// by a power of 2. Since we already have logic to simplify these,
// transform to div and then simplify the resultant comparison.
if (Shr->getOpcode() == Instruction::AShr &&
(!Shr->isExact() || ShAmtVal == TypeBits - 1))
- return 0;
+ return nullptr;
// Revisit the shift (to delete it).
Worklist.Add(Shr);
@@ -996,7 +996,7 @@ Instruction *InstCombiner::FoldICmpShrCst(ICmpInst &ICI, BinaryOperator *Shr,
// If the builder folded the binop, just return it.
BinaryOperator *TheDiv = dyn_cast<BinaryOperator>(Tmp);
- if (TheDiv == 0)
+ if (!TheDiv)
return &ICI;
// Otherwise, fold this div/compare.
@@ -1039,7 +1039,7 @@ Instruction *InstCombiner::FoldICmpShrCst(ICmpInst &ICI, BinaryOperator *Shr,
Mask, Shr->getName()+".mask");
return new ICmpInst(ICI.getPredicate(), And, ShiftedCmpRHS);
}
- return 0;
+ return nullptr;
}
@@ -1183,10 +1183,10 @@ Instruction *InstCombiner::visitICmpInstWithInstAndIntCst(ICmpInst &ICI,
// access.
BinaryOperator *Shift = dyn_cast<BinaryOperator>(LHSI->getOperand(0));
if (Shift && !Shift->isShift())
- Shift = 0;
+ Shift = nullptr;
ConstantInt *ShAmt;
- ShAmt = Shift ? dyn_cast<ConstantInt>(Shift->getOperand(1)) : 0;
+ ShAmt = Shift ? dyn_cast<ConstantInt>(Shift->getOperand(1)) : nullptr;
// This seemingly simple opportunity to fold away a shift turns out to
// be rather complicated. See PR17827
@@ -1779,7 +1779,7 @@ Instruction *InstCombiner::visitICmpInstWithInstAndIntCst(ICmpInst &ICI,
}
}
}
- return 0;
+ return nullptr;
}
/// visitICmpInstWithCastAndCast - Handle icmp (cast x to y), (cast/cst).
@@ -1796,7 +1796,7 @@ Instruction *InstCombiner::visitICmpInstWithCastAndCast(ICmpInst &ICI) {
// integer type is the same size as the pointer type.
if (DL && LHSCI->getOpcode() == Instruction::PtrToInt &&
DL->getPointerTypeSizeInBits(SrcTy) == DestTy->getIntegerBitWidth()) {
- Value *RHSOp = 0;
+ Value *RHSOp = nullptr;
if (Constant *RHSC = dyn_cast<Constant>(ICI.getOperand(1))) {
RHSOp = ConstantExpr::getIntToPtr(RHSC, SrcTy);
} else if (PtrToIntInst *RHSC = dyn_cast<PtrToIntInst>(ICI.getOperand(1))) {
@@ -1814,7 +1814,7 @@ Instruction *InstCombiner::visitICmpInstWithCastAndCast(ICmpInst &ICI) {
// Enforce this.
if (LHSCI->getOpcode() != Instruction::ZExt &&
LHSCI->getOpcode() != Instruction::SExt)
- return 0;
+ return nullptr;
bool isSignedExt = LHSCI->getOpcode() == Instruction::SExt;
bool isSignedCmp = ICI.isSigned();
@@ -1823,12 +1823,12 @@ Instruction *InstCombiner::visitICmpInstWithCastAndCast(ICmpInst &ICI) {
// Not an extension from the same type?
RHSCIOp = CI->getOperand(0);
if (RHSCIOp->getType() != LHSCIOp->getType())
- return 0;
+ return nullptr;
// If the signedness of the two casts doesn't agree (i.e. one is a sext
// and the other is a zext), then we can't handle this.
if (CI->getOpcode() != LHSCI->getOpcode())
- return 0;
+ return nullptr;
// Deal with equality cases early.
if (ICI.isEquality())
@@ -1846,7 +1846,7 @@ Instruction *InstCombiner::visitICmpInstWithCastAndCast(ICmpInst &ICI) {
// If we aren't dealing with a constant on the RHS, exit early
ConstantInt *CI = dyn_cast<ConstantInt>(ICI.getOperand(1));
if (!CI)
- return 0;
+ return nullptr;
// Compute the constant that would happen if we truncated to SrcTy then
// reextended to DestTy.
@@ -1875,7 +1875,7 @@ Instruction *InstCombiner::visitICmpInstWithCastAndCast(ICmpInst &ICI) {
// by SimplifyICmpInst, so only deal with the tricky case.
if (isSignedCmp || !isSignedExt)
- return 0;
+ return nullptr;
// Evaluate the comparison for LT (we invert for GT below). LE and GE cases
// should have been folded away previously and not enter in here.
@@ -1911,12 +1911,12 @@ static Instruction *ProcessUGT_ADDCST_ADD(ICmpInst &I, Value *A, Value *B,
// In order to eliminate the add-with-constant, the compare can be its only
// use.
Instruction *AddWithCst = cast<Instruction>(I.getOperand(0));
- if (!AddWithCst->hasOneUse()) return 0;
+ if (!AddWithCst->hasOneUse()) return nullptr;
// If CI2 is 2^7, 2^15, 2^31, then it might be an sadd.with.overflow.
- if (!CI2->getValue().isPowerOf2()) return 0;
+ if (!CI2->getValue().isPowerOf2()) return nullptr;
unsigned NewWidth = CI2->getValue().countTrailingZeros();
- if (NewWidth != 7 && NewWidth != 15 && NewWidth != 31) return 0;
+ if (NewWidth != 7 && NewWidth != 15 && NewWidth != 31) return nullptr;
// The width of the new add formed is 1 more than the bias.
++NewWidth;
@@ -1924,7 +1924,7 @@ static Instruction *ProcessUGT_ADDCST_ADD(ICmpInst &I, Value *A, Value *B,
// Check to see that CI1 is an all-ones value with NewWidth bits.
if (CI1->getBitWidth() == NewWidth ||
CI1->getValue() != APInt::getLowBitsSet(CI1->getBitWidth(), NewWidth))
- return 0;
+ return nullptr;
// This is only really a signed overflow check if the inputs have been
// sign-extended; check for that condition. For example, if CI2 is 2^31 and
@@ -1932,7 +1932,7 @@ static Instruction *ProcessUGT_ADDCST_ADD(ICmpInst &I, Value *A, Value *B,
unsigned NeededSignBits = CI1->getBitWidth() - NewWidth + 1;
if (IC.ComputeNumSignBits(A) < NeededSignBits ||
IC.ComputeNumSignBits(B) < NeededSignBits)
- return 0;
+ return nullptr;
// In order to replace the original add with a narrower
// llvm.sadd.with.overflow, the only uses allowed are the add-with-constant
@@ -1948,8 +1948,8 @@ static Instruction *ProcessUGT_ADDCST_ADD(ICmpInst &I, Value *A, Value *B,
// original add had another add which was then immediately truncated, we
// could still do the transformation.
TruncInst *TI = dyn_cast<TruncInst>(U);
- if (TI == 0 ||
- TI->getType()->getPrimitiveSizeInBits() > NewWidth) return 0;
+ if (!TI || TI->getType()->getPrimitiveSizeInBits() > NewWidth)
+ return nullptr;
}
// If the pattern matches, truncate the inputs to the narrower type and
@@ -1985,11 +1985,11 @@ static Instruction *ProcessUAddIdiom(Instruction &I, Value *OrigAddV,
InstCombiner &IC) {
// Don't bother doing this transformation for pointers, don't do it for
// vectors.
- if (!isa<IntegerType>(OrigAddV->getType())) return 0;
+ if (!isa<IntegerType>(OrigAddV->getType())) return nullptr;
// If the add is a constant expr, then we don't bother transforming it.
Instruction *OrigAdd = dyn_cast<Instruction>(OrigAddV);
- if (OrigAdd == 0) return 0;
+ if (!OrigAdd) return nullptr;
Value *LHS = OrigAdd->getOperand(0), *RHS = OrigAdd->getOperand(1);
@@ -2063,19 +2063,19 @@ static Instruction *ProcessUMulZExtIdiom(ICmpInst &I, Value *MulVal,
// Check if truncation ignores bits above MulWidth.
unsigned TruncWidth = TI->getType()->getPrimitiveSizeInBits();
if (TruncWidth > MulWidth)
- return 0;
+ return nullptr;
} else if (BinaryOperator *BO = dyn_cast<BinaryOperator>(U)) {
// Check if AND ignores bits above MulWidth.
if (BO->getOpcode() != Instruction::And)
- return 0;
+ return nullptr;
if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->getOperand(1))) {
const APInt &CVal = CI->getValue();
if (CVal.getBitWidth() - CVal.countLeadingZeros() > MulWidth)
- return 0;
+ return nullptr;
}
} else {
// Other uses prohibit this transformation.
- return 0;
+ return nullptr;
}
}
@@ -2101,7 +2101,7 @@ static Instruction *ProcessUMulZExtIdiom(ICmpInst &I, Value *MulVal,
Value *ValToMask;
if (match(OtherVal, m_And(m_Value(ValToMask), m_ConstantInt(CI)))) {
if (ValToMask != MulVal)
- return 0;
+ return nullptr;
const APInt &CVal = CI->getValue() + 1;
if (CVal.isPowerOf2()) {
unsigned MaskWidth = CVal.logBase2();
@@ -2109,7 +2109,7 @@ static Instruction *ProcessUMulZExtIdiom(ICmpInst &I, Value *MulVal,
break; // Recognized
}
}
- return 0;
+ return nullptr;
case ICmpInst::ICMP_UGT:
// Recognize pattern:
@@ -2121,7 +2121,7 @@ static Instruction *ProcessUMulZExtIdiom(ICmpInst &I, Value *MulVal,
if (MaxVal.eq(CI->getValue()))
break; // Recognized
}
- return 0;
+ return nullptr;
case ICmpInst::ICMP_UGE:
// Recognize pattern:
@@ -2132,7 +2132,7 @@ static Instruction *ProcessUMulZExtIdiom(ICmpInst &I, Value *MulVal,
if (MaxVal.eq(CI->getValue()))
break; // Recognized
}
- return 0;
+ return nullptr;
case ICmpInst::ICMP_ULE:
// Recognize pattern:
@@ -2144,7 +2144,7 @@ static Instruction *ProcessUMulZExtIdiom(ICmpInst &I, Value *MulVal,
if (MaxVal.eq(CI->getValue()))
break; // Recognized
}
- return 0;
+ return nullptr;
case ICmpInst::ICMP_ULT:
// Recognize pattern:
@@ -2155,10 +2155,10 @@ static Instruction *ProcessUMulZExtIdiom(ICmpInst &I, Value *MulVal,
if (MaxVal.eq(CI->getValue()))
break; // Recognized
}
- return 0;
+ return nullptr;
default:
- return 0;
+ return nullptr;
}
InstCombiner::BuilderTy *Builder = IC.Builder;
@@ -2410,7 +2410,7 @@ Instruction *InstCombiner::visitICmpInst(ICmpInst &I) {
// See if we are doing a comparison with a constant.
if (ConstantInt *CI = dyn_cast<ConstantInt>(Op1)) {
- Value *A = 0, *B = 0;
+ Value *A = nullptr, *B = nullptr;
// Match the following pattern, which is a common idiom when writing
// overflow-safe integer arithmetic function. The source performs an
@@ -2525,15 +2525,15 @@ Instruction *InstCombiner::visitICmpInst(ICmpInst &I) {
APInt Op0KnownZeroInverted = ~Op0KnownZero;
if (~Op1KnownZero == 0 && Op0KnownZeroInverted.isPowerOf2()) {
// If the LHS is an AND with the same constant, look through it.
- Value *LHS = 0;
- ConstantInt *LHSC = 0;
+ Value *LHS = nullptr;
+ ConstantInt *LHSC = nullptr;
if (!match(Op0, m_And(m_Value(LHS), m_ConstantInt(LHSC))) ||
LHSC->getValue() != Op0KnownZeroInverted)
LHS = Op0;
// If the LHS is 1 << x, and we know the result is a power of 2 like 8,
// then turn "((1 << x)&8) == 0" into "x != 3".
- Value *X = 0;
+ Value *X = nullptr;
if (match(LHS, m_Shl(m_One(), m_Value(X)))) {
unsigned CmpVal = Op0KnownZeroInverted.countTrailingZeros();
return new ICmpInst(ICmpInst::ICMP_NE, X,
@@ -2562,15 +2562,15 @@ Instruction *InstCombiner::visitICmpInst(ICmpInst &I) {
APInt Op0KnownZeroInverted = ~Op0KnownZero;
if (~Op1KnownZero == 0 && Op0KnownZeroInverted.isPowerOf2()) {
// If the LHS is an AND with the same constant, look through it.
- Value *LHS = 0;
- ConstantInt *LHSC = 0;
+ Value *LHS = nullptr;
+ ConstantInt *LHSC = nullptr;
if (!match(Op0, m_And(m_Value(LHS), m_ConstantInt(LHSC))) ||
LHSC->getValue() != Op0KnownZeroInverted)
LHS = Op0;
// If the LHS is 1 << x, and we know the result is a power of 2 like 8,
// then turn "((1 << x)&8) != 0" into "x == 3".
- Value *X = 0;
+ Value *X = nullptr;
if (match(LHS, m_Shl(m_One(), m_Value(X)))) {
unsigned CmpVal = Op0KnownZeroInverted.countTrailingZeros();
return new ICmpInst(ICmpInst::ICMP_EQ, X,
@@ -2702,7 +2702,7 @@ Instruction *InstCombiner::visitICmpInst(ICmpInst &I) {
if (SelectInst *SI = dyn_cast<SelectInst>(*I.user_begin()))
if ((SI->getOperand(1) == Op0 && SI->getOperand(2) == Op1) ||
(SI->getOperand(2) == Op0 && SI->getOperand(1) == Op1))
- return 0;
+ return nullptr;
// See if we are doing a comparison between a constant and an instruction that
// can be folded into the comparison.
@@ -2738,7 +2738,7 @@ Instruction *InstCombiner::visitICmpInst(ICmpInst &I) {
// If either operand of the select is a constant, we can fold the
// comparison into the select arms, which will cause one to be
// constant folded and the select turned into a bitwise or.
- Value *Op1 = 0, *Op2 = 0;
+ Value *Op1 = nullptr, *Op2 = nullptr;
if (Constant *C = dyn_cast<Constant>(LHSI->getOperand(1)))
Op1 = ConstantExpr::getICmp(I.getPredicate(), C, RHSC);
if (Constant *C = dyn_cast<Constant>(LHSI->getOperand(2)))
@@ -2850,7 +2850,7 @@ Instruction *InstCombiner::visitICmpInst(ICmpInst &I) {
// Analyze the case when either Op0 or Op1 is an add instruction.
// Op0 = A + B (or A and B are null); Op1 = C + D (or C and D are null).
- Value *A = 0, *B = 0, *C = 0, *D = 0;
+ Value *A = nullptr, *B = nullptr, *C = nullptr, *D = nullptr;
if (BO0 && BO0->getOpcode() == Instruction::Add)
A = BO0->getOperand(0), B = BO0->getOperand(1);
if (BO1 && BO1->getOpcode() == Instruction::Add)
@@ -2945,7 +2945,7 @@ Instruction *InstCombiner::visitICmpInst(ICmpInst &I) {
// Analyze the case when either Op0 or Op1 is a sub instruction.
// Op0 = A - B (or A and B are null); Op1 = C - D (or C and D are null).
- A = 0; B = 0; C = 0; D = 0;
+ A = nullptr; B = nullptr; C = nullptr; D = nullptr;
if (BO0 && BO0->getOpcode() == Instruction::Sub)
A = BO0->getOperand(0), B = BO0->getOperand(1);
if (BO1 && BO1->getOpcode() == Instruction::Sub)
@@ -2971,7 +2971,7 @@ Instruction *InstCombiner::visitICmpInst(ICmpInst &I) {
BO0->hasOneUse() && BO1->hasOneUse())
return new ICmpInst(Pred, D, B);
- BinaryOperator *SRem = NULL;
+ BinaryOperator *SRem = nullptr;
// icmp (srem X, Y), Y
if (BO0 && BO0->getOpcode() == Instruction::SRem &&
Op1 == BO0->getOperand(1))
@@ -3160,7 +3160,7 @@ Instruction *InstCombiner::visitICmpInst(ICmpInst &I) {
// (X&Z) == (Y&Z) -> (X^Y) & Z == 0
if (match(Op0, m_OneUse(m_And(m_Value(A), m_Value(B)))) &&
match(Op1, m_OneUse(m_And(m_Value(C), m_Value(D))))) {
- Value *X = 0, *Y = 0, *Z = 0;
+ Value *X = nullptr, *Y = nullptr, *Z = nullptr;
if (A == C) {
X = B; Y = D; Z = A;
@@ -3251,7 +3251,7 @@ Instruction *InstCombiner::visitICmpInst(ICmpInst &I) {
if (match(Op1, m_Add(m_Value(X), m_ConstantInt(Cst))) && Op0 == X)
return FoldICmpAddOpCst(I, X, Cst, I.getSwappedPredicate());
}
- return Changed ? &I : 0;
+ return Changed ? &I : nullptr;
}
/// FoldFCmp_IntToFP_Cst - Fold fcmp ([us]itofp x, cst) if possible.
@@ -3259,13 +3259,13 @@ Instruction *InstCombiner::visitICmpInst(ICmpInst &I) {
Instruction *InstCombiner::FoldFCmp_IntToFP_Cst(FCmpInst &I,
Instruction *LHSI,
Constant *RHSC) {
- if (!isa<ConstantFP>(RHSC)) return 0;
+ if (!isa<ConstantFP>(RHSC)) return nullptr;
const APFloat &RHS = cast<ConstantFP>(RHSC)->getValueAPF();
// Get the width of the mantissa. We don't want to hack on conversions that
// might lose information from the integer, e.g. "i64 -> float"
int MantissaWidth = LHSI->getType()->getFPMantissaWidth();
- if (MantissaWidth == -1) return 0; // Unknown.
+ if (MantissaWidth == -1) return nullptr; // Unknown.
// Check to see that the input is converted from an integer type that is small
// enough that preserves all bits. TODO: check here for "known" sign bits.
@@ -3279,7 +3279,7 @@ Instruction *InstCombiner::FoldFCmp_IntToFP_Cst(FCmpInst &I,
// If the conversion would lose info, don't hack on this.
if ((int)InputSize > MantissaWidth)
- return 0;
+ return nullptr;
// Otherwise, we can potentially simplify the comparison. We know that it
// will always come through as an integer value and we know the constant is
@@ -3625,5 +3625,5 @@ Instruction *InstCombiner::visitFCmpInst(FCmpInst &I) {
return new FCmpInst(I.getPredicate(), LHSExt->getOperand(0),
RHSExt->getOperand(0));
- return Changed ? &I : 0;
+ return Changed ? &I : nullptr;
}
diff --git a/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp b/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
index dceb1a83e5..66d09388f4 100644
--- a/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
+++ b/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
@@ -117,7 +117,7 @@ isOnlyCopiedFromConstantGlobal(Value *V, MemTransferInst *&TheCopy,
// If this is isn't our memcpy/memmove, reject it as something we can't
// handle.
MemTransferInst *MI = dyn_cast<MemTransferInst>(I);
- if (MI == 0)
+ if (!MI)
return false;
// If the transfer is using the alloca as a source of the transfer, then
@@ -153,10 +153,10 @@ isOnlyCopiedFromConstantGlobal(Value *V, MemTransferInst *&TheCopy,
static MemTransferInst *
isOnlyCopiedFromConstantGlobal(AllocaInst *AI,
SmallVectorImpl<Instruction *> &ToDelete) {
- MemTransferInst *TheCopy = 0;
+ MemTransferInst *TheCopy = nullptr;
if (isOnlyCopiedFromConstantGlobal(AI, TheCopy, ToDelete))
return TheCopy;
- return 0;
+ return nullptr;
}
Instruction *InstCombiner::visitAllocaInst(AllocaInst &AI) {
@@ -177,7 +177,7 @@ Instruction *InstCombiner::visitAllocaInst(AllocaInst &AI) {
if (const ConstantInt *C = dyn_cast<ConstantInt>(AI.getArraySize())) {
Type *NewTy =
ArrayType::get(AI.getAllocatedType(), C->getZExtValue());
- AllocaInst *New = Builder->CreateAlloca(NewTy, 0, AI.getName());
+ AllocaInst *New = Builder->CreateAlloca(NewTy, nullptr, AI.getName());
New->setAlignment(AI.getAlignment());
// Scan to the end of the allocation instructions, to skip over a block of
@@ -300,7 +300,7 @@ static Instruction *InstCombineLoadCast(InstCombiner &IC, LoadInst &LI,
// If the address spaces don't match, don't eliminate the cast.
if (DestTy->getAddressSpace() != SrcTy->getAddressSpace())
- return 0;
+ return nullptr;
Type *SrcPTy = SrcTy->getElementType();
@@ -351,7 +351,7 @@ static Instruction *InstCombineLoadCast(InstCombiner &IC, LoadInst &LI,
}
}
}
- return 0;
+ return nullptr;
}
Instruction *InstCombiner::visitLoadInst(LoadInst &LI) {
@@ -378,7 +378,7 @@ Instruction *InstCombiner::visitLoadInst(LoadInst &LI) {
// None of the following transforms are legal for volatile/atomic loads.
// FIXME: Some of it is okay for atomic loads; needs refactoring.
- if (!LI.isSimple()) return 0;
+ if (!LI.isSimple()) return nullptr;
// Do really simple store-to-load forwarding and load CSE, to catch cases
// where there are several consecutive memory accesses to the same location,
@@ -460,7 +460,7 @@ Instruction *InstCombiner::visitLoadInst(LoadInst &LI) {
}
}
}
- return 0;
+ return nullptr;
}
/// InstCombineStoreToCast - Fold store V, (cast P) -> store (cast V), P
@@ -472,12 +472,12 @@ static Instruction *InstCombineStoreToCast(InstCombiner &IC, StoreInst &SI) {
Type *DestPTy = cast<PointerType>(CI->getType())->getElementType();
PointerType *SrcTy = dyn_cast<PointerType>(CastOp->getType());
- if (SrcTy == 0) return 0;
+ if (!SrcTy) return nullptr;
Type *SrcPTy = SrcTy->getElementType();
if (!DestPTy->isIntegerTy() && !DestPTy->isPointerTy())
- return 0;
+ return nullptr;
/// NewGEPIndices - If SrcPTy is an aggregate type, we can emit a "noop gep"
/// to its first element. This allows us to handle things like:
@@ -511,20 +511,20 @@ static Instruction *InstCombineStoreToCast(InstCombiner &IC, StoreInst &SI) {
}
if (!SrcPTy->isIntegerTy() && !SrcPTy->isPointerTy())
- return 0;
+ return nullptr;
// If the pointers point into different address spaces don't do the
// transformation.
if (SrcTy->getAddressSpace() !=
cast<PointerType>(CI->getType())->getAddressSpace())
- return 0;
+ return nullptr;
// If the pointers point to values of different sizes don't do the
// transformation.
if (!IC.getDataLayout() ||
IC.getDataLayout()->getTypeSizeInBits(SrcPTy) !=
IC.getDataLayout()->getTypeSizeInBits(DestPTy))
- return 0;
+ return nullptr;
// If the pointers point to pointers to different address spaces don't do the
// transformation. It is not safe to introduce an addrspacecast instruction in
@@ -532,7 +532,7 @@ static Instruction *InstCombineStoreToCast(InstCombiner &IC, StoreInst &SI) {
// cast.
if (SrcPTy->isPointerTy() && DestPTy->isPointerTy() &&
SrcPTy->getPointerAddressSpace() != DestPTy->getPointerAddressSpace())
- return 0;
+ return nullptr;
// Okay, we are casting from one integer or pointer type to another of
// the same size. Instead of casting the pointer before
@@ -612,7 +612,7 @@ Instruction *InstCombiner::visitStoreInst(StoreInst &SI) {
// Don't hack volatile/atomic stores.
// FIXME: Some bits are legal for atomic stores; needs refactoring.
- if (!SI.isSimple()) return 0;
+ if (!SI.isSimple()) return nullptr;
// If the RHS is an alloca with a single use, zapify the store, making the
// alloca dead.
@@ -679,7 +679,7 @@ Instruction *InstCombiner::visitStoreInst(StoreInst &SI) {
if (Instruction *U = dyn_cast<Instruction>(Val))
Worklist.Add(U); // Dropped a use.
}
- return 0; // Do not modify these!
+ return nullptr; // Do not modify these!
}
// store undef, Ptr -> noop
@@ -708,9 +708,9 @@ Instruction *InstCombiner::visitStoreInst(StoreInst &SI) {
if (BranchInst *BI = dyn_cast<BranchInst>(BBI))
if (BI->isUnconditional())
if (SimplifyStoreAtEndOfBlock(SI))
- return 0; // xform done!
+ return nullptr; // xform done!
- return 0;
+ return nullptr;
}
/// SimplifyStoreAtEndOfBlock - Turn things like:
@@ -733,7 +733,7 @@ bool InstCombiner::SimplifyStoreAtEndOfBlock(StoreInst &SI) {
// the other predecessor.
pred_iterator PI = pred_begin(DestBB);
BasicBlock *P = *PI;
- BasicBlock *OtherBB = 0;
+ BasicBlock *OtherBB = nullptr;
if (P != StoreBB)
OtherBB = P;
@@ -763,7 +763,7 @@ bool InstCombiner::SimplifyStoreAtEndOfBlock(StoreInst &SI) {
// If the other block ends in an unconditional branch, check for the 'if then
// else' case. there is an instruction before the branch.
- StoreInst *OtherStore = 0;
+ StoreInst *OtherStore = nullptr;
if (OtherBr->isUnconditional()) {
--BBI;
// Skip over debugging info.
diff --git a/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp b/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp
index bbd4b2edc1..ab705a4056 100644
--- a/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp
+++ b/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp
@@ -29,13 +29,13 @@ static Value *simplifyValueKnownNonZero(Value *V, InstCombiner &IC) {
// If V has multiple uses, then we would have to do more analysis to determine
// if this is safe. For example, the use could be in dynamically unreached
// code.
- if (!V->hasOneUse()) return 0;
+ if (!V->hasOneUse()) return nullptr;
bool MadeChange = false;
// ((1 << A) >>u B) --> (1 << (A-B))
// Because V cannot be zero, we know that B is less than A.
- Value *A = 0, *B = 0, *PowerOf2 = 0;
+ Value *A = nullptr, *B = nullptr, *PowerOf2 = nullptr;
if (match(V, m_LShr(m_OneUse(m_Shl(m_Value(PowerOf2), m_Value(A))),
m_Value(B))) &&
// The "1" can be any value known to be a power of 2.
@@ -70,7 +70,7 @@ static Value *simplifyValueKnownNonZero(Value *V, InstCombiner &IC) {
// If V is a phi node, we can call this on each of its operands.
// "select cond, X, 0" can simplify to "X".
- return MadeChange ? V : 0;
+ return MadeChange ? V : nullptr;
}
@@ -109,7 +109,7 @@ static Constant *getLogBase2Vector(ConstantDataVector *CV) {
for (unsigned I = 0, E = CV->getNumElements(); I != E; ++I) {
Constant *Elt = CV->getElementAsConstant(I);
if (!match(Elt, m_APInt(IVal)) || !IVal->isPowerOf2())
- return 0;
+ return nullptr;
Elts.push_back(ConstantInt::get(Elt->getType(), IVal->logBase2()));
}
@@ -141,7 +141,7 @@ Instruction *InstCombiner::visitMul(BinaryOperator &I) {
return BinaryOperator::CreateMul(NewOp, ConstantExpr::getShl(C1, C2));
if (match(&I, m_Mul(m_Value(NewOp), m_Constant(C1)))) {
- Constant *NewCst = 0;
+ Constant *NewCst = nullptr;
if (match(C1, m_APInt(IVal)) && IVal->isPowerOf2())
// Replace X*(2^C) with X << C, where C is either a scalar or a splat.
NewCst = ConstantInt::get(NewOp->getType(), IVal->logBase2());
@@ -167,10 +167,10 @@ Instruction *InstCombiner::visitMul(BinaryOperator &I) {
const APInt & Val = CI->getValue();
const APInt &PosVal = Val.abs();
if (Val.isNegative() && PosVal.isPowerOf2()) {
- Value *X = 0, *Y = 0;
+ Value *X = nullptr, *Y = nullptr;
if (Op0->hasOneUse()) {
ConstantInt *C1;
- Value *Sub = 0;
+ Value *Sub = nullptr;
if (match(Op0, m_Sub(m_Value(Y), m_Value(X))))
Sub = Builder->CreateSub(X, Y, "suba");
else if (match(Op0, m_Add(m_Value(Y), m_ConstantInt(C1))))
@@ -270,7 +270,7 @@ Instruction *InstCombiner::visitMul(BinaryOperator &I) {
// -2 is "-1 << 1" so it is all bits set except the low one.
APInt Negative2(I.getType()->getPrimitiveSizeInBits(), (uint64_t)-2, true);
- Value *BoolCast = 0, *OtherOp = 0;
+ Value *BoolCast = nullptr, *OtherOp = nullptr;
if (MaskedValueIsZero(Op0, Negative2))
BoolCast = Op0, OtherOp = Op1;
else if (MaskedValueIsZero(Op1, Negative2))
@@ -283,7 +283,7 @@ Instruction *InstCombiner::visitMul(BinaryOperator &I) {
}
}
- return Changed ? &I : 0;
+ return Changed ? &I : nullptr;
}
//
@@ -386,7 +386,7 @@ Value *InstCombiner::foldFMulConst(Instruction *FMulOrDiv, Constant *C,
Constant *C0 = dyn_cast<Constant>(Opnd0);
Constant *C1 = dyn_cast<Constant>(Opnd1);
- BinaryOperator *R = 0;
+ BinaryOperator *R = nullptr;
// (X * C0) * C => X * (C0*C)
if (FMulOrDiv->getOpcode() == Instruction::FMul) {
@@ -485,7 +485,7 @@ Instruction *InstCombiner::visitFMul(BinaryOperator &I) {
Value *M1 = ConstantExpr::getFMul(C1, C);
Value *M0 = isNormalFp(cast<Constant>(M1)) ?
foldFMulConst(cast<Instruction>(Opnd0), C, &I) :
- 0;
+ nullptr;
if (M0 && M1) {
if (Swap && FAddSub->getOpcode() == Instruction::FSub)
std::swap(M0, M1);
@@ -505,8 +505,8 @@ Instruction *InstCombiner::visitFMul(BinaryOperator &I) {
// Under unsafe algebra do:
// X * log2(0.5*Y) = X*log2(Y) - X
if (I.hasUnsafeAlgebra()) {
- Value *OpX = NULL;
- Value *OpY = NULL;
+ Value *OpX = nullptr;
+ Value *OpY = nullptr;
IntrinsicInst *Log2;
detectLog2OfHalf(Op0, OpY, Log2);
if (OpY) {
@@ -569,7 +569,7 @@ Instruction *InstCombiner::visitFMul(BinaryOperator &I) {
Value *Opnd0_0, *Opnd0_1;
if (Opnd0->hasOneUse() &&
match(Opnd0, m_FMul(m_Value(Opnd0_0), m_Value(Opnd0_1)))) {
- Value *Y = 0;
+ Value *Y = nullptr;
if (Opnd0_0 == Opnd1 && Opnd0_1 != Opnd1)
Y = Opnd0_1;
else if (Opnd0_1 == Opnd1 && Opnd0_0 != Opnd1)
@@ -623,7 +623,7 @@ Instruction *InstCombiner::visitFMul(BinaryOperator &I) {
break;
}
- return Changed ? &I : 0;
+ return Changed ? &I : nullptr;
}
/// SimplifyDivRemOfSelect - Try to fold a divide or remainder of a select
@@ -684,12 +684,12 @@ bool InstCombiner::SimplifyDivRemOfSelect(BinaryOperator &I) {
// If we past the instruction, quit looking for it.
if (&*BBI == SI)
- SI = 0;
+ SI = nullptr;
if (&*BBI == SelectCond)
- SelectCond = 0;
+ SelectCond = nullptr;
// If we ran out of things to eliminate, break out of the loop.
- if (SelectCond == 0 && SI == 0)
+ if (!SelectCond && !SI)
break;
}
@@ -742,7 +742,7 @@ Instruction *InstCombiner::commonIDivTransforms(BinaryOperator &I) {
return &I;
// (X - (X rem Y)) / Y -> X / Y; usually originates as ((X / Y) * Y) / Y
- Value *X = 0, *Z = 0;
+ Value *X = nullptr, *Z = nullptr;
if (match(Op0, m_Sub(m_Value(X), m_Value(Z)))) { // (X - Z) / Y; Y = Op1
bool isSigned = I.getOpcode() == Instruction::SDiv;
if ((isSigned && match(Z, m_SRem(m_Specific(X), m_Specific(Op1)))) ||
@@ -750,7 +750,7 @@ Instruction *InstCombiner::commonIDivTransforms(BinaryOperator &I) {
return BinaryOperator::Create(I.getOpcode(), X, Op1);
}
- return 0;
+ return nullptr;
}
/// dyn_castZExtVal - Checks if V is a zext or constant that can
@@ -763,7 +763,7 @@ static Value *dyn_castZExtVal(Value *V, Type *Ty) {
if (C->getValue().getActiveBits() <= cast<IntegerType>(Ty)->getBitWidth())
return ConstantExpr::getTrunc(C, Ty);
}
- return 0;
+ return nullptr;
}
namespace {
@@ -788,7 +788,7 @@ struct UDivFoldAction {
};
UDivFoldAction(FoldUDivOperandCb FA, Value *InputOperand)
- : FoldAction(FA), OperandToFold(InputOperand), FoldResult(0) {}
+ : FoldAction(FA), OperandToFold(InputOperand), FoldResult(nullptr) {}
UDivFoldAction(FoldUDivOperandCb FA, Value *InputOperand, size_t SLHS)
: FoldAction(FA), OperandToFold(InputOperand), SelectLHSIdx(SLHS) {}
};
@@ -867,7 +867,8 @@ static size_t visitUDivOperand(Value *Op0, Value *Op1, const BinaryOperator &I,
if (SelectInst *SI = dyn_cast<SelectInst>(Op1))
if (size_t LHSIdx = visitUDivOperand(Op0, SI->getOperand(1), I, Actions))
if (visitUDivOperand(Op0, SI->getOperand(2), I, Actions)) {
- Actions.push_back(UDivFoldAction((FoldUDivOperandCb)0, Op1, LHSIdx-1));
+ Actions.push_back(UDivFoldAction((FoldUDivOperandCb)nullptr, Op1,
+ LHSIdx-1));
return Actions.size();
}
@@ -930,7 +931,7 @@ Instruction *InstCombiner::visitUDiv(BinaryOperator &I) {
return Inst;
}
- return 0;
+ return nullptr;
}
Instruction *InstCombiner::visitSDiv(BinaryOperator &I) {
@@ -985,7 +986,7 @@ Instruction *InstCombiner::visitSDiv(BinaryOperator &I) {
}
}
- return 0;
+ return nullptr;
}
/// CvtFDivConstToReciprocal tries to convert X/C into X*1/C if C not a special
@@ -999,7 +1000,7 @@ static Instruction *CvtFDivConstToReciprocal(Value *Dividend,
Constant *Divisor,
bool AllowReciprocal) {
if (!isa<ConstantFP>(Divisor)) // TODO: handle vectors.
- return 0;
+ return nullptr;
const APFloat &FpVal = cast<ConstantFP>(Divisor)->getValueAPF();
APFloat Reciprocal(FpVal.getSemantics());
@@ -1012,7 +1013,7 @@ static Instruction *CvtFDivConstToReciprocal(Value *Dividend,
}
if (!Cvt)
- return 0;
+ return nullptr;
ConstantFP *R;
R = ConstantFP::get(Dividend->getType()->getContext(), Reciprocal);
@@ -1039,10 +1040,10 @@ Instruction *InstCombiner::visitFDiv(BinaryOperator &I) {
return R;
if (AllowReassociate) {
- Constant *C1 = 0;
+ Constant *C1 = nullptr;
Constant *C2 = Op1C;
Value *X;
- Instruction *Res = 0;
+ Instruction *Res = nullptr;
if (match(Op0, m_FMul(m_Value(X), m_Constant(C1)))) {
// (X*C1)/C2 => X * (C1/C2)
@@ -1073,12 +1074,12 @@ Instruction *InstCombiner::visitFDiv(BinaryOperator &I) {
return T;
}
- return 0;
+ return nullptr;
}
if (AllowReassociate && isa<Constant>(Op0)) {
Constant *C1 = cast<Constant>(Op0), *C2;
- Constant *Fold = 0;
+ Constant *Fold = nullptr;
Value *X;
bool CreateDiv = true;
@@ -1100,13 +1101,13 @@ Instruction *InstCombiner::visitFDiv(BinaryOperator &I) {
R->setFastMathFlags(I.getFastMathFlags());
return R;
}
- return 0;
+ return nullptr;
}
if (AllowReassociate) {
Value *X, *Y;
- Value *NewInst = 0;
- Instruction *SimpR = 0;
+ Value *NewInst = nullptr;
+ Instruction *SimpR = nullptr;
if (Op0->hasOneUse() && match(Op0, m_FDiv(m_Value(X), m_Value(Y)))) {
// (X/Y) / Z => X / (Y*Z)
@@ -1142,7 +1143,7 @@ Instruction *InstCombiner::visitFDiv(BinaryOperator &I) {
}
}
- return 0;
+ return nullptr;
}
/// This function implements the transforms common to both integer remainder
@@ -1178,7 +1179,7 @@ Instruction *InstCombiner::commonIRemTransforms(BinaryOperator &I) {
}
}
- return 0;
+ return nullptr;
}
Instruction *InstCombiner::visitURem(BinaryOperator &I) {
@@ -1210,7 +1211,7 @@ Instruction *InstCombiner::visitURem(BinaryOperator &I) {
return ReplaceInstUsesWith(I, Ext);
}
- return 0;
+ return nullptr;
}
Instruction *InstCombiner::visitSRem(BinaryOperator &I) {
@@ -1252,7 +1253,7 @@ Instruction *InstCombiner::visitSRem(BinaryOperator &I) {
bool hasMissing = false;
for (unsigned i = 0; i != VWidth; ++i) {
Constant *Elt = C->getAggregateElement(i);
- if (Elt == 0) {
+ if (!Elt) {
hasMissing = true;
break;
}
@@ -1281,7 +1282,7 @@ Instruction *InstCombiner::visitSRem(BinaryOperator &I) {
}
}
- return 0;
+ return nullptr;
}
Instruction *InstCombiner::visitFRem(BinaryOperator &I) {
@@ -1294,5 +1295,5 @@ Instruction *InstCombiner::visitFRem(BinaryOperator &I) {
if (isa<SelectInst>(Op1) && SimplifyDivRemOfSelect(I))
return &I;
- return 0;
+ return nullptr;
}
diff --git a/lib/Transforms/InstCombine/InstCombinePHI.cpp b/lib/Transforms/InstCombine/InstCombinePHI.cpp
index 292164271c..46f7b8a095 100644
--- a/lib/Transforms/InstCombine/InstCombinePHI.cpp
+++ b/lib/Transforms/InstCombine/InstCombinePHI.cpp
@@ -50,12 +50,12 @@ Instruction *InstCombiner::FoldPHIArgBinOpIntoPHI(PHINode &PN) {
// types.
I->getOperand(0)->getType() != LHSType ||
I->getOperand(1)->getType() != RHSType)
- return 0;
+ return nullptr;
// If they are CmpInst instructions, check their predicates
if (CmpInst *CI = dyn_cast<CmpInst>(I))
if (CI->getPredicate() != cast<CmpInst>(FirstInst)->getPredicate())
- return 0;
+ return nullptr;
if (isNUW)
isNUW = cast<OverflowingBinaryOperator>(I)->hasNoUnsignedWrap();
@@ -65,8 +65,8 @@ Instruction *InstCombiner::FoldPHIArgBinOpIntoPHI(PHINode &PN) {
isExact = cast<PossiblyExactOperator>(I)->isExact();
// Keep track of which operand needs a phi node.
- if (I->getOperand(0) != LHSVal) LHSVal = 0;
- if (I->getOperand(1) != RHSVal) RHSVal = 0;
+ if (I->getOperand(0) != LHSVal) LHSVal = nullptr;
+ if (I->getOperand(1) != RHSVal) RHSVal = nullptr;
}
// If both LHS and RHS would need a PHI, don't do this transformation,
@@ -74,14 +74,14 @@ Instruction *InstCombiner::FoldPHIArgBinOpIntoPHI(PHINode &PN) {
// which leads to higher register pressure. This is especially
// bad when the PHIs are in the header of a loop.
if (!LHSVal && !RHSVal)
- return 0;
+ return nullptr;
// Otherwise, this is safe to transform!
Value *InLHS = FirstInst->getOperand(0);
Value *InRHS = FirstInst->getOperand(1);
- PHINode *NewLHS = 0, *NewRHS = 0;
- if (LHSVal == 0) {
+ PHINode *NewLHS = nullptr, *NewRHS = nullptr;
+ if (!LHSVal) {
NewLHS = PHINode::Create(LHSType, PN.getNumIncomingValues(),
FirstInst->getOperand(0)->getName() + ".pn");
NewLHS->addIncoming(InLHS, PN.getIncomingBlock(0));
@@ -89,7 +89,7 @@ Instruction *InstCombiner::FoldPHIArgBinOpIntoPHI(PHINode &PN) {
LHSVal = NewLHS;
}
- if (RHSVal == 0) {
+ if (!RHSVal) {
NewRHS = PHINode::Create(RHSType, PN.getNumIncomingValues(),
FirstInst->getOperand(1)->getName() + ".pn");
NewRHS->addIncoming(InRHS, PN.getIncomingBlock(0));
@@ -150,7 +150,7 @@ Instruction *InstCombiner::FoldPHIArgGEPIntoPHI(PHINode &PN) {
GetElementPtrInst *GEP= dyn_cast<GetElementPtrInst>(PN.getIncomingValue(i));
if (!GEP || !GEP->hasOneUse() || GEP->getType() != FirstInst->getType() ||
GEP->getNumOperands() != FirstInst->getNumOperands())
- return 0;
+ return nullptr;
AllInBounds &= GEP->isInBounds();
@@ -172,19 +172,19 @@ Instruction *InstCombiner::FoldPHIArgGEPIntoPHI(PHINode &PN) {
// for struct indices, which must always be constant.
if (isa<ConstantInt>(FirstInst->getOperand(op)) ||
isa<ConstantInt>(GEP->getOperand(op)))
- return 0;
+ return nullptr;
if (FirstInst->getOperand(op)->getType() !=GEP->getOperand(op)->getType())
- return 0;
+ return nullptr;
// If we already needed a PHI for an earlier operand, and another operand
// also requires a PHI, we'd be introducing more PHIs than we're
// eliminating, which increases register pressure on entry to the PHI's
// block.
if (NeededPhi)
- return 0;
+ return nullptr;
- FixedOperands[op] = 0; // Needs a PHI.
+ FixedOperands[op] = nullptr; // Needs a PHI.
NeededPhi = true;
}
}
@@ -196,7 +196,7 @@ Instruction *InstCombiner::FoldPHIArgGEPIntoPHI(PHINode &PN) {
// load up into the predecessors so that we have a load of a gep of an alloca,
// which can usually all be folded into the load.
if (AllBasePointersAreAllocas)
- return 0;
+ return nullptr;
// Otherwise, this is safe to transform. Insert PHI nodes for each operand
// that is variable.
@@ -290,7 +290,7 @@ Instruction *InstCombiner::FoldPHIArgLoadIntoPHI(PHINode &PN) {
// FIXME: This is overconservative; this transform is allowed in some cases
// for atomic operations.
if (FirstLI->isAtomic())
- return 0;
+ return nullptr;
// When processing loads, we need to propagate two bits of information to the
// sunk load: whether it is volatile, and what its alignment is. We currently
@@ -305,20 +305,20 @@ Instruction *InstCombiner::FoldPHIArgLoadIntoPHI(PHINode &PN) {
// load and the PHI.
if (FirstLI->getParent() != PN.getIncomingBlock(0) ||
!isSafeAndProfitableToSinkLoad(FirstLI))
- return 0;
+ return nullptr;
// If the PHI is of volatile loads and the load block has multiple
// successors, sinking it would remove a load of the volatile value from
// the path through the other successor.
if (isVolatile &&
FirstLI->getParent()->getTerminator()->getNumSuccessors() != 1)
- return 0;
+ return nullptr;
// Check to see if all arguments are the same operation.
for (unsigned i = 1, e = PN.getNumIncomingValues(); i != e; ++i) {
LoadInst *LI = dyn_cast<LoadInst>(PN.getIncomingValue(i));
if (!LI || !LI->hasOneUse())
- return 0;
+ return nullptr;
// We can't sink the load if the loaded value could be modified between
// the load and the PHI.
@@ -326,12 +326,12 @@ Instruction *InstCombiner::FoldPHIArgLoadIntoPHI(PHINode &PN) {
LI->getParent() != PN.getIncomingBlock(i) ||
LI->getPointerAddressSpace() != LoadAddrSpace ||
!isSafeAndProfitableToSinkLoad(LI))
- return 0;
+ return nullptr;
// If some of the loads have an alignment specified but not all of them,
// we can't do the transformation.
if ((LoadAlignment != 0) != (LI->getAlignment() != 0))
- return 0;
+ return nullptr;
LoadAlignment = std::min(LoadAlignment, LI->getAlignment());
@@ -340,7 +340,7 @@ Instruction *InstCombiner::FoldPHIArgLoadIntoPHI(PHINode &PN) {
// the path through the other successor.
if (isVolatile &&
LI->getParent()->getTerminator()->getNumSuccessors() != 1)
- return 0;
+ return nullptr;
}
// Okay, they are all the same operation. Create a new PHI node of the
@@ -356,7 +356,7 @@ Instruction *InstCombiner::FoldPHIArgLoadIntoPHI(PHINode &PN) {
for (unsigned i = 1, e = PN.getNumIncomingValues(); i != e; ++i) {
Value *NewInVal = cast<LoadInst>(PN.getIncomingValue(i))->getOperand(0);
if (NewInVal != InVal)
- InVal = 0;
+ InVal = nullptr;
NewPN->addIncoming(NewInVal, PN.getIncomingBlock(i));
}
@@ -400,8 +400,8 @@ Instruction *InstCombiner::FoldPHIArgOpIntoPHI(PHINode &PN) {
// If all input operands to the phi are the same instruction (e.g. a cast from
// the same type or "+42") we can pull the operation through the PHI, reducing
// code size and simplifying code.
- Constant *ConstantOp = 0;
- Type *CastSrcTy = 0;
+ Constant *ConstantOp = nullptr;
+ Type *CastSrcTy = nullptr;
bool isNUW = false, isNSW = false, isExact = false;
if (isa<CastInst>(FirstInst)) {
@@ -411,13 +411,13 @@ Instruction *InstCombiner::FoldPHIArgOpIntoPHI(PHINode &PN) {
// the code by turning an i32 into an i1293.
if (PN.getType()->isIntegerTy() && CastSrcTy->isIntegerTy()) {
if (!ShouldChangeType(PN.getType(), CastSrcTy))
- return 0;
+ return nullptr;
}
} else if (isa<BinaryOperator>(FirstInst) || isa<CmpInst>(FirstInst)) {
// Can fold binop, compare or shift here if the RHS is a constant,
// otherwise call FoldPHIArgBinOpIntoPHI.
ConstantOp = dyn_cast<Constant>(FirstInst->getOperand(1));
- if (ConstantOp == 0)
+ if (!ConstantOp)
return FoldPHIArgBinOpIntoPHI(PN);
if (OverflowingBinaryOperator *BO =
@@ -428,19 +428,19 @@ Instruction *InstCombiner::FoldPHIArgOpIntoPHI(PHINode &PN) {
dyn_cast<PossiblyExactOperator>(FirstInst))
isExact = PEO->isExact();
} else {
- return 0; // Cannot fold this operation.
+ return nullptr; // Cannot fold this operation.
}
// Check to see if all arguments are the same operation.
for (unsigned i = 1, e = PN.getNumIncomingValues(); i != e; ++i) {
Instruction *I = dyn_cast<Instruction>(PN.getIncomingValue(i));
- if (I == 0 || !I->hasOneUse() || !I->isSameOperationAs(FirstInst))
- return 0;
+ if (!I || !I->hasOneUse() || !I->isSameOperationAs(FirstInst))
+ return nullptr;
if (CastSrcTy) {
if (I->getOperand(0)->getType() != CastSrcTy)
- return 0; // Cast operation must match.
+ return nullptr; // Cast operation must match.
} else if (I->getOperand(1) != ConstantOp) {
- return 0;
+ return nullptr;
}
if (isNUW)
@@ -464,7 +464,7 @@ Instruction *InstCombiner::FoldPHIArgOpIntoPHI(PHINode &PN) {
for (unsigned i = 1, e = PN.getNumIncomingValues(); i != e; ++i) {
Value *NewInVal = cast<Instruction>(PN.getIncomingValue(i))->getOperand(0);
if (NewInVal != InVal)
- InVal = 0;
+ InVal = nullptr;
NewPN->addIncoming(NewInVal, PN.getIncomingBlock(i));
}
@@ -589,10 +589,10 @@ namespace llvm {
template<>
struct DenseMapInfo<LoweredPHIRecord> {
static inline LoweredPHIRecord getEmptyKey() {
- return LoweredPHIRecord(0, 0);
+ return LoweredPHIRecord(nullptr, 0);
}
static inline LoweredPHIRecord getTombstoneKey() {
- return LoweredPHIRecord(0, 1);
+ return LoweredPHIRecord(nullptr, 1);
}
static unsigned getHashValue(const LoweredPHIRecord &Val) {
return DenseMapInfo<PHINode*>::getHashValue(Val.PN) ^ (Val.Shift>>3) ^
@@ -639,14 +639,14 @@ Instruction *InstCombiner::SliceUpIllegalIntegerPHI(PHINode &FirstPhi) {
// bail out.
for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
InvokeInst *II = dyn_cast<InvokeInst>(PN->getIncomingValue(i));
- if (II == 0) continue;
+ if (!II) continue;
if (II->getParent() != PN->getIncomingBlock(i))
continue;
// If we have a phi, and if it's directly in the predecessor, then we have
// a critical edge where we need to put the truncate. Since we can't
// split the edge in instcombine, we have to bail out.
- return 0;
+ return nullptr;
}
for (User *U : PN->users()) {
@@ -669,7 +669,7 @@ Instruction *InstCombiner::SliceUpIllegalIntegerPHI(PHINode &FirstPhi) {
if (UserI->getOpcode() != Instruction::LShr ||
!UserI->hasOneUse() || !isa<TruncInst>(UserI->user_back()) ||
!isa<ConstantInt>(UserI->getOperand(1)))
- return 0;
+ return nullptr;
unsigned Shift = cast<ConstantInt>(UserI->getOperand(1))->getZExtValue();
PHIUsers.push_back(PHIUsageRecord(PHIId, Shift, UserI->user_back()));
@@ -707,7 +707,7 @@ Instruction *InstCombiner::SliceUpIllegalIntegerPHI(PHINode &FirstPhi) {
// If we've already lowered a user like this, reuse the previously lowered
// value.
- if ((EltPHI = ExtractedVals[LoweredPHIRecord(PN, Offset, Ty)]) == 0) {
+ if ((EltPHI = ExtractedVals[LoweredPHIRecord(PN, Offset, Ty)]) == nullptr) {
// Otherwise, Create the new PHI node for this user.
EltPHI = PHINode::Create(Ty, PN->getNumIncomingValues(),
@@ -896,5 +896,5 @@ Instruction *InstCombiner::visitPHINode(PHINode &PN) {
if (Instruction *Res = SliceUpIllegalIntegerPHI(PN))
return Res;
- return 0;
+ return nullptr;
}
diff --git a/lib/Transforms/InstCombine/InstCombineSelect.cpp b/lib/Transforms/InstCombine/InstCombineSelect.cpp
index a366d6c416..795e645840 100644
--- a/lib/Transforms/InstCombine/InstCombineSelect.cpp
+++ b/lib/Transforms/InstCombine/InstCombineSelect.cpp
@@ -26,10 +26,10 @@ using namespace PatternMatch;
static SelectPatternFlavor
MatchSelectPattern(Value *V, Value *&LHS, Value *&RHS) {
SelectInst *SI = dyn_cast<SelectInst>(V);
- if (SI == 0) return SPF_UNKNOWN;
+ if (!SI) return SPF_UNKNOWN;
ICmpInst *ICI = dyn_cast<ICmpInst>(SI->getCondition());
- if (ICI == 0) return SPF_UNKNOWN;
+ if (!ICI) return SPF_UNKNOWN;
LHS = ICI->getOperand(0);
RHS = ICI->getOperand(1);
@@ -131,15 +131,15 @@ Instruction *InstCombiner::FoldSelectOpOp(SelectInst &SI, Instruction *TI,
if (TI->isCast()) {
Type *FIOpndTy = FI->getOperand(0)->getType();
if (TI->getOperand(0)->getType() != FIOpndTy)
- return 0;
+ return nullptr;
// The select condition may be a vector. We may only change the operand
// type if the vector width remains the same (and matches the condition).
Type *CondTy = SI.getCondition()->getType();
if (CondTy->isVectorTy() && (!FIOpndTy->isVectorTy() ||
CondTy->getVectorNumElements() != FIOpndTy->getVectorNumElements()))
- return 0;
+ return nullptr;
} else {
- return 0; // unknown unary op.
+ return nullptr; // unknown unary op.
}
// Fold this by inserting a select from the input values.
@@ -151,7 +151,7 @@ Instruction *InstCombiner::FoldSelectOpOp(SelectInst &SI, Instruction *TI,
// Only handle binary operators here.
if (!isa<BinaryOperator>(TI))
- return 0;
+ return nullptr;
// Figure out if the operations have any operands in common.
Value *MatchOp, *OtherOpT, *OtherOpF;
@@ -167,7 +167,7 @@ Instruction *InstCombiner::FoldSelectOpOp(SelectInst &SI, Instruction *TI,
OtherOpF = FI->getOperand(0);
MatchIsOpZero = false;
} else if (!TI->isCommutative()) {
- return 0;
+ return nullptr;
} else if (TI->getOperand(0) == FI->getOperand(1)) {
MatchOp = TI->getOperand(0);
OtherOpT = TI->getOperand(1);
@@ -179,7 +179,7 @@ Instruction *InstCombiner::FoldSelectOpOp(SelectInst &SI, Instruction *TI,
OtherOpF = FI->getOperand(1);
MatchIsOpZero = true;
} else {
- return 0;
+ return nullptr;
}
// If we reach here, they do have operations in common.
@@ -284,7 +284,7 @@ Instruction *InstCombiner::FoldSelectIntoOp(SelectInst &SI, Value *TrueVal,
}
}
- return 0;
+ return nullptr;
}
/// SimplifyWithOpReplaced - See if V simplifies when its operand Op is
@@ -298,7 +298,7 @@ static Value *SimplifyWithOpReplaced(Value *V, Value *Op, Value *RepOp,
Instruction *I = dyn_cast<Instruction>(V);
if (!I)
- return 0;
+ return nullptr;
// If this is a binary operator, try to simplify it with the replaced op.
if (BinaryOperator *B = dyn_cast<BinaryOperator>(I)) {
@@ -349,7 +349,7 @@ static Value *SimplifyWithOpReplaced(Value *V, Value *Op, Value *RepOp,
}
}
- return 0;
+ return nullptr;
}
/// foldSelectICmpAndOr - We want to turn:
@@ -370,18 +370,18 @@ static Value *foldSelectICmpAndOr(const SelectInst &SI, Value *TrueVal,
InstCombiner::BuilderTy *Builder) {
const ICmpInst *IC = dyn_cast<ICmpInst>(SI.getCondition());
if (!IC || !IC->isEquality() || !SI.getType()->isIntegerTy())
- return 0;
+ return nullptr;
Value *CmpLHS = IC->getOperand(0);
Value *CmpRHS = IC->getOperand(1);
if (!match(CmpRHS, m_Zero()))
- return 0;
+ return nullptr;
Value *X;
const APInt *C1;
if (!match(CmpLHS, m_And(m_Value(X), m_Power2(C1))))
- return 0;
+ return nullptr;
const APInt *C2;
bool OrOnTrueVal = false;
@@ -390,7 +390,7 @@ static Value *foldSelectICmpAndOr(const SelectInst &SI, Value *TrueVal,
OrOnTrueVal = match(TrueVal, m_Or(m_Specific(FalseVal), m_Power2(C2)));
if (!OrOnFalseVal && !OrOnTrueVal)
- return 0;
+ return nullptr;
Value *V = CmpLHS;
Value *Y = OrOnFalseVal ? TrueVal : FalseVal;
@@ -529,7 +529,7 @@ Instruction *InstCombiner::visitSelectInstWithICmp(SelectInst &SI,
if (IntegerType *Ty = dyn_cast<IntegerType>(CmpLHS->getType())) {
if (TrueVal->getType() == Ty) {
if (ConstantInt *Cmp = dyn_cast<ConstantInt>(CmpRHS)) {
- ConstantInt *C1 = NULL, *C2 = NULL;
+ ConstantInt *C1 = nullptr, *C2 = nullptr;
if (Pred == ICmpInst::ICMP_SGT && Cmp->isAllOnesValue()) {
C1 = dyn_cast<ConstantInt>(TrueVal);
C2 = dyn_cast<ConstantInt>(FalseVal);
@@ -588,7 +588,7 @@ Instruction *InstCombiner::visitSelectInstWithICmp(SelectInst &SI,
if (Value *V = foldSelectICmpAndOr(SI, TrueVal, FalseVal, Builder))
return ReplaceInstUsesWith(SI, V);
- return Changed ? &SI : 0;
+ return Changed ? &SI : nullptr;
}
@@ -608,7 +608,7 @@ static bool CanSelectOperandBeMappingIntoPredBlock(const Value *V,
// If the value is a non-instruction value like a constant or argument, it
// can always be mapped.
const Instruction *I = dyn_cast<Instruction>(V);
- if (I == 0) return true;
+ if (!I) return true;
// If V is a PHI node defined in the same block as the condition PHI, we can
// map the arguments.
@@ -652,7 +652,7 @@ Instruction *InstCombiner::FoldSPFofSPF(Instruction *Inner,
}
// TODO: MIN(MIN(A, 23), 97)
- return 0;
+ return nullptr;
}
@@ -665,27 +665,27 @@ static Value *foldSelectICmpAnd(const SelectInst &SI, ConstantInt *TrueVal,
InstCombiner::BuilderTy *Builder) {
const ICmpInst *IC = dyn_cast<ICmpInst>(SI.getCondition());
if (!IC || !IC->isEquality() || !SI.getType()->isIntegerTy())
- return 0;
+ return nullptr;
if (!match(IC->getOperand(1), m_Zero()))
- return 0;
+ return nullptr;
ConstantInt *AndRHS;
Value *LHS = IC->getOperand(0);
if (!match(LHS, m_And(m_Value(), m_ConstantInt(AndRHS))))
- return 0;
+ return nullptr;
// If both select arms are non-zero see if we have a select of the form
// 'x ? 2^n + C : C'. Then we can offset both arms by C, use the logic
// for 'x ? 2^n : 0' and fix the thing up at the end.
- ConstantInt *Offset = 0;
+ ConstantInt *Offset = nullptr;
if (!TrueVal->isZero() && !FalseVal->isZero()) {
if ((TrueVal->getValue() - FalseVal->getValue()).isPowerOf2())
Offset = FalseVal;
else if ((FalseVal->getValue() - TrueVal->getValue()).isPowerOf2())
Offset = TrueVal;
else
- return 0;
+ return nullptr;
// Adjust TrueVal and FalseVal to the offset.
TrueVal = ConstantInt::get(Builder->getContext(),
@@ -698,7 +698,7 @@ static Value *foldSelectICmpAnd(const SelectInst &SI, ConstantInt *TrueVal,
if (!AndRHS->getValue().isPowerOf2() ||
(!TrueVal->getValue().isPowerOf2() &&
!FalseVal->getValue().isPowerOf2()))
- return 0;
+ return nullptr;
// Determine which shift is needed to transform result of the 'and' into the
// desired result.
@@ -710,7 +710,7 @@ static Value *foldSelectICmpAnd(const SelectInst &SI, ConstantInt *TrueVal,
// or a trunc of the 'and'. The trunc case requires that all of the truncated
// bits are zero, we can figure that out by looking at the 'and' mask.
if (AndZeros >= ValC->getBitWidth())
- return 0;
+ return nullptr;
Value *V = Builder->CreateZExtOrTrunc(LHS, SI.getType());
if (ValZeros > AndZeros)
@@ -868,7 +868,7 @@ Instruction *InstCombiner::visitSelectInst(SelectInst &SI) {
if (Instruction *TI = dyn_cast<Instruction>(TrueVal))
if (Instruction *FI = dyn_cast<Instruction>(FalseVal))
if (TI->hasOneUse() && FI->hasOneUse()) {
- Instruction *AddOp = 0, *SubOp = 0;
+ Instruction *AddOp = nullptr, *SubOp = nullptr;
// Turn (select C, (op X, Y), (op X, Z)) -> (op X, (select C, Y, Z))
if (TI->getOpcode() == FI->getOpcode())
@@ -890,7 +890,7 @@ Instruction *InstCombiner::visitSelectInst(SelectInst &SI) {
}
if (AddOp) {
- Value *OtherAddOp = 0;
+ Value *OtherAddOp = nullptr;
if (SubOp->getOperand(0) == AddOp->getOperand(0)) {
OtherAddOp = AddOp->getOperand(1);
} else if (SubOp->getOperand(0) == AddOp->getOperand(1)) {
@@ -971,7 +971,7 @@ Instruction *InstCombiner::visitSelectInst(SelectInst &SI) {
if (SelectInst *TrueSI = dyn_cast<SelectInst>(TrueVal)) {
if (TrueSI->getCondition() == CondVal) {
if (SI.getTrueValue() == TrueSI->getTrueValue())
- return 0;
+ return nullptr;
SI.setOperand(1, TrueSI->getTrueValue());
return &SI;
}
@@ -979,7 +979,7 @@ Instruction *InstCombiner::visitSelectInst(SelectInst &SI) {
if (SelectInst *FalseSI = dyn_cast<SelectInst>(FalseVal)) {
if (FalseSI->getCondition() == CondVal) {
if (SI.getFalseValue() == FalseSI->getFalseValue())
- return 0;
+ return nullptr;
SI.setOperand(2, FalseSI->getFalseValue());
return &SI;
}
@@ -1007,5 +1007,5 @@ Instruction *InstCombiner::visitSelectInst(SelectInst &SI) {
}
}
- return 0;
+ return nullptr;
}
diff --git a/lib/Transforms/InstCombine/InstCombineShifts.cpp b/lib/Transforms/InstCombine/InstCombineShifts.cpp
index 1f98a0d22e..5504cb2ef8 100644
--- a/lib/Transforms/InstCombine/InstCombineShifts.cpp
+++ b/lib/Transforms/InstCombine/InstCombineShifts.cpp
@@ -52,7 +52,7 @@ Instruction *InstCombiner::commonShiftTransforms(BinaryOperator &I) {
return &I;
}
- return 0;
+ return nullptr;
}
/// CanEvaluateShifted - See if we can compute the specified value, but shifted
@@ -80,7 +80,7 @@ static bool CanEvaluateShifted(Value *V, unsigned NumBits, bool isLeftShift,
// if the needed bits are already zero in the input. This allows us to reuse
// the value which means that we don't care if the shift has multiple uses.
// TODO: Handle opposite shift by exact value.
- ConstantInt *CI = 0;
+ ConstantInt *CI = nullptr;
if ((isLeftShift && match(I, m_LShr(m_Value(), m_ConstantInt(CI)))) ||
(!isLeftShift && match(I, m_Shl(m_Value(), m_ConstantInt(CI))))) {
if (CI->getZExtValue() == NumBits) {
@@ -117,7 +117,7 @@ static bool CanEvaluateShifted(Value *V, unsigned NumBits, bool isLeftShift,
case Instruction::Shl: {
// We can often fold the shift into shifts-by-a-constant.
CI = dyn_cast<ConstantInt>(I->getOperand(1));
- if (CI == 0) return false;
+ if (!CI) return false;
// We can always fold shl(c1)+shl(c2) -> shl(c1+c2).
if (isLeftShift) return true;
@@ -141,7 +141,7 @@ static bool CanEvaluateShifted(Value *V, unsigned NumBits, bool isLeftShift,
case Instruction::LShr: {
// We can often fold the shift into shifts-by-a-constant.
CI = dyn_cast<ConstantInt>(I->getOperand(1));
- if (CI == 0) return false;
+ if (!CI) return false;
// We can always fold lshr(c1)+lshr(c2) -> lshr(c1+c2).
if (!isLeftShift) return true;
@@ -534,7 +534,7 @@ Instruction *InstCombiner::FoldShiftByConstant(Value *Op0, Constant *Op1,
// Find out if this is a shift of a shift by a constant.
BinaryOperator *ShiftOp = dyn_cast<BinaryOperator>(Op0);
if (ShiftOp && !ShiftOp->isShift())
- ShiftOp = 0;
+ ShiftOp = nullptr;
if (ShiftOp && isa<ConstantInt>(ShiftOp->getOperand(1))) {
@@ -554,7 +554,7 @@ Instruction *InstCombiner::FoldShiftByConstant(Value *Op0, Constant *Op1,
uint32_t ShiftAmt1 = ShiftAmt1C->getLimitedValue(TypeBits);
uint32_t ShiftAmt2 = COp1->getLimitedValue(TypeBits);
assert(ShiftAmt2 != 0 && "Should have been simplified earlier");
- if (ShiftAmt1 == 0) return 0; // Will be simplified in the future.
+ if (ShiftAmt1 == 0) return nullptr; // Will be simplified in the future.
Value *X = ShiftOp->getOperand(0);
IntegerType *Ty = cast<IntegerType>(I.getType());
@@ -682,7 +682,7 @@ Instruction *InstCombiner::FoldShiftByConstant(Value *Op0, Constant *Op1,
}
}
}
- return 0;
+ return nullptr;
}
Instruction *InstCombiner::visitShl(BinaryOperator &I) {
@@ -720,7 +720,7 @@ Instruction *InstCombiner::visitShl(BinaryOperator &I) {
match(I.getOperand(1), m_Constant(C2)))
return BinaryOperator::CreateShl(ConstantExpr::getShl(C1, C2), A);
- return 0;
+ return nullptr;
}
Instruction *InstCombiner::visitLShr(BinaryOperator &I) {
@@ -760,7 +760,7 @@ Instruction *InstCombiner::visitLShr(BinaryOperator &I) {
}
}
- return 0;
+ return nullptr;
}
Instruction *InstCombiner::visitAShr(BinaryOperator &I) {
@@ -816,5 +816,5 @@ Instruction *InstCombiner::visitAShr(BinaryOperator &I) {
if (NumSignBits == Op0->getType()->getScalarSizeInBits())
return ReplaceInstUsesWith(I, Op0);
- return 0;
+ return nullptr;
}
diff --git a/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp b/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp
index 248c5a0039..9f09584b45 100644
--- a/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp
+++ b/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp
@@ -58,7 +58,7 @@ bool InstCombiner::SimplifyDemandedInstructionBits(Instruction &Inst) {
Value *V = SimplifyDemandedUseBits(&Inst, DemandedMask,
KnownZero, KnownOne, 0);
- if (V == 0) return false;
+ if (!V) return false;
if (V == &Inst) return true;
ReplaceInstUsesWith(Inst, V);
return true;
@@ -72,7 +72,7 @@ bool InstCombiner::SimplifyDemandedBits(Use &U, APInt DemandedMask,
unsigned Depth) {
Value *NewVal = SimplifyDemandedUseBits(U.get(), DemandedMask,
KnownZero, KnownOne, Depth);
- if (NewVal == 0) return false;
+ if (!NewVal) return false;
U = NewVal;
return true;
}
@@ -119,25 +119,25 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
// We know all of the bits for a constant!
KnownOne = CI->getValue() & DemandedMask;
KnownZero = ~KnownOne & DemandedMask;
- return 0;
+ return nullptr;
}
if (isa<ConstantPointerNull>(V)) {
// We know all of the bits for a constant!
KnownOne.clearAllBits();
KnownZero = DemandedMask;
- return 0;
+ return nullptr;
}
KnownZero.clearAllBits();
KnownOne.clearAllBits();
if (DemandedMask == 0) { // Not demanding any bits from V.
if (isa<UndefValue>(V))
- return 0;
+ return nullptr;
return UndefValue::get(VTy);
}
if (Depth == 6) // Limit search depth.
- return 0;
+ return nullptr;
APInt LHSKnownZero(BitWidth, 0), LHSKnownOne(BitWidth, 0);
APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
@@ -145,7 +145,7 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
Instruction *I = dyn_cast<Instruction>(V);
if (!I) {
ComputeMaskedBits(V, KnownZero, KnownOne, Depth);
- return 0; // Only analyze instructions.
+ return nullptr; // Only analyze instructions.
}
// If there are multiple uses of this value and we aren't at the root, then
@@ -218,7 +218,7 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
// Compute the KnownZero/KnownOne bits to simplify things downstream.
ComputeMaskedBits(I, KnownZero, KnownOne, Depth);
- return 0;
+ return nullptr;
}
// If this is the root being simplified, allow it to have multiple uses,
@@ -410,20 +410,20 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
}
case Instruction::BitCast:
if (!I->getOperand(0)->getType()->isIntOrIntVectorTy())
- return 0; // vector->int or fp->int?
+ return nullptr; // vector->int or fp->int?
if (VectorType *DstVTy = dyn_cast<VectorType>(I->getType())) {
if (VectorType *SrcVTy =
dyn_cast<VectorType>(I->getOperand(0)->getType())) {
if (DstVTy->getNumElements() != SrcVTy->getNumElements())
// Don't touch a bitcast between vectors of different element counts.
- return 0;
+ return nullptr;
} else
// Don't touch a scalar-to-vector bitcast.
- return 0;
+ return nullptr;
} else if (I->getOperand(0)->getType()->isVectorTy())
// Don't touch a vector-to-scalar bitcast.
- return 0;
+ return nullptr;
if (SimplifyDemandedBits(I->getOperandUse(0), DemandedMask,
KnownZero, KnownOne, Depth+1))
@@ -811,7 +811,7 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
}
case Intrinsic::x86_sse42_crc32_64_64:
KnownZero = APInt::getHighBitsSet(64, 32);
- return 0;
+ return nullptr;
}
}
ComputeMaskedBits(V, KnownZero, KnownOne, Depth);
@@ -822,7 +822,7 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
// constant.
if ((DemandedMask & (KnownZero|KnownOne)) == DemandedMask)
return Constant::getIntegerValue(VTy, KnownOne);
- return 0;
+ return nullptr;
}
/// Helper routine of SimplifyDemandedUseBits. It tries to simplify
@@ -848,13 +848,13 @@ Value *InstCombiner::SimplifyShrShlDemandedBits(Instruction *Shr,
const APInt &ShlOp1 = cast<ConstantInt>(Shl->getOperand(1))->getValue();
const APInt &ShrOp1 = cast<ConstantInt>(Shr->getOperand(1))->getValue();
if (!ShlOp1 || !ShrOp1)
- return 0; // Noop.
+ return nullptr; // Noop.
Value *VarX = Shr->getOperand(0);
Type *Ty = VarX->getType();
unsigned BitWidth = Ty->getIntegerBitWidth();
if (ShlOp1.uge(BitWidth) || ShrOp1.uge(BitWidth))
- return 0; // Undef.
+ return nullptr; // Undef.
unsigned ShlAmt = ShlOp1.getZExtValue();
unsigned ShrAmt = ShrOp1.getZExtValue();
@@ -883,7 +883,7 @@ Value *InstCombiner::SimplifyShrShlDemandedBits(Instruction *Shr,
return VarX;
if (!Shr->hasOneUse())
- return 0;
+ return nullptr;
BinaryOperator *New;
if (ShrAmt < ShlAmt) {
@@ -903,7 +903,7 @@ Value *InstCombiner::SimplifyShrShlDemandedBits(Instruction *Shr,
return InsertNewInstWith(New, *Shl);
}
- return 0;
+ return nullptr;
}
/// SimplifyDemandedVectorElts - The specified value produces a vector with
@@ -924,7 +924,7 @@ Value *InstCombiner::SimplifyDemandedVectorElts(Value *V, APInt DemandedElts,
if (isa<UndefValue>(V)) {
// If the entire vector is undefined, just return this info.
UndefElts = EltMask;
- return 0;
+ return nullptr;
}
if (DemandedElts == 0) { // If nothing is demanded, provide undef.
@@ -939,7 +939,7 @@ Value *InstCombiner::SimplifyDemandedVectorElts(Value *V, APInt DemandedElts,
// Check if this is identity. If so, return 0 since we are not simplifying
// anything.
if (DemandedElts.isAllOnesValue())
- return 0;
+ return nullptr;
Type *EltTy = cast<VectorType>(V->getType())->getElementType();
Constant *Undef = UndefValue::get(EltTy);
@@ -953,7 +953,7 @@ Value *InstCombiner::SimplifyDemandedVectorElts(Value *V, APInt DemandedElts,
}
Constant *Elt = C->getAggregateElement(i);
- if (Elt == 0) return 0;
+ if (!Elt) return nullptr;
if (isa<UndefValue>(Elt)) { // Already undef.
Elts.push_back(Undef);
@@ -965,12 +965,12 @@ Value *InstCombiner::SimplifyDemandedVectorElts(Value *V, APInt DemandedElts,
// If we changed the constant, return it.
Constant *NewCV = ConstantVector::get(Elts);
- return NewCV != C ? NewCV : 0;
+ return NewCV != C ? NewCV : nullptr;
}
// Limit search depth.
if (Depth == 10)
- return 0;
+ return nullptr;
// If multiple users are using the root value, proceed with
// simplification conservatively assuming that all elements
@@ -981,14 +981,14 @@ Value *InstCombiner::SimplifyDemandedVectorElts(Value *V, APInt DemandedElts,
// the main instcombine process.
if (Depth != 0)
// TODO: Just compute the UndefElts information recursively.
- return 0;
+ return nullptr;
// Conservatively assume that all elements are needed.
DemandedElts = EltMask;
}
Instruction *I = dyn_cast<Instruction>(V);
- if (!I) return 0; // Only analyze instructions.
+ if (!I) return nullptr; // Only analyze instructions.
bool MadeChange = false;
APInt UndefElts2(VWidth, 0);
@@ -1000,7 +1000,7 @@ Value *InstCombiner::SimplifyDemandedVectorElts(Value *V, APInt DemandedElts,
// If this is a variable index, we don't know which element it overwrites.
// demand exactly the same input as we produce.
ConstantInt *Idx = dyn_cast<ConstantInt>(I->getOperand(2));
- if (Idx == 0) {
+ if (!Idx) {
// Note that we can't propagate undef elt info, because we don't know
// which elt is getting updated.
TmpV = SimplifyDemandedVectorElts(I->getOperand(0), DemandedElts,
@@ -1282,5 +1282,5 @@ Value *InstCombiner::SimplifyDemandedVectorElts(Value *V, APInt DemandedElts,
break;
}
}
- return MadeChange ? I : 0;
+ return MadeChange ? I : nullptr;
}
diff --git a/lib/Transforms/InstCombine/InstCombineVectorOps.cpp b/lib/Transforms/InstCombine/InstCombineVectorOps.cpp
index 12837eed16..b63c3a6b26 100644
--- a/lib/Transforms/InstCombine/InstCombineVectorOps.cpp
+++ b/lib/Transforms/InstCombine/InstCombineVectorOps.cpp
@@ -75,7 +75,7 @@ static Value *FindScalarElement(Value *V, unsigned EltNo) {
if (InsertElementInst *III = dyn_cast<InsertElementInst>(V)) {
// If this is an insert to a variable element, we don't know what it is.
if (!isa<ConstantInt>(III->getOperand(2)))
- return 0;
+ return nullptr;
unsigned IIElt = cast<ConstantInt>(III->getOperand(2))->getZExtValue();
// If this is an insert to the element we are looking for, return the
@@ -99,14 +99,14 @@ static Value *FindScalarElement(Value *V, unsigned EltNo) {
}
// Extract a value from a vector add operation with a constant zero.
- Value *Val = 0; Constant *Con = 0;
+ Value *Val = nullptr; Constant *Con = nullptr;
if (match(V, m_Add(m_Value(Val), m_Constant(Con)))) {
if (Con->getAggregateElement(EltNo)->isNullValue())
return FindScalarElement(Val, EltNo);
}
// Otherwise, we don't know.
- return 0;
+ return nullptr;
}
// If we have a PHI node with a vector type that has only 2 uses: feed
@@ -115,7 +115,7 @@ static Value *FindScalarElement(Value *V, unsigned EltNo) {
Instruction *InstCombiner::scalarizePHI(ExtractElementInst &EI, PHINode *PN) {
// Verify that the PHI node has exactly 2 uses. Otherwise return NULL.
if (!PN->hasNUses(2))
- return NULL;
+ return nullptr;
// If so, it's known at this point that one operand is PHI and the other is
// an extractelement node. Find the PHI user that is not the extractelement
@@ -130,7 +130,7 @@ Instruction *InstCombiner::scalarizePHI(ExtractElementInst &EI, PHINode *PN) {
// otherwise return NULL.
if (!PHIUser->hasOneUse() || !(PHIUser->user_back() == PN) ||
!(isa<BinaryOperator>(PHIUser)) || !CheapToScalarize(PHIUser, true))
- return NULL;
+ return nullptr;
// Create a scalar PHI node that will replace the vector PHI node
// just before the current PHI node.
@@ -320,7 +320,7 @@ Instruction *InstCombiner::visitExtractElementInst(ExtractElementInst &EI) {
}
}
}
- return 0;
+ return nullptr;
}
/// CollectSingleShuffleElements - If V is a shuffle of values that ONLY returns
@@ -442,7 +442,7 @@ static ShuffleOps CollectShuffleElements(Value *V,
// Either the extracted from or inserted into vector must be RHSVec,
// otherwise we'd end up with a shuffle of three inputs.
- if (EI->getOperand(0) == PermittedRHS || PermittedRHS == 0) {
+ if (EI->getOperand(0) == PermittedRHS || PermittedRHS == nullptr) {
Value *RHS = EI->getOperand(0);
ShuffleOps LR = CollectShuffleElements(VecOp, Mask, RHS);
assert(LR.second == 0 || LR.second == RHS);
@@ -525,13 +525,14 @@ Instruction *InstCombiner::visitInsertElementInst(InsertElementInst &IE) {
// (and any insertelements it points to), into one big shuffle.
if (!IE.hasOneUse() || !isa<InsertElementInst>(IE.user_back())) {
SmallVector<Constant*, 16> Mask;
- ShuffleOps LR = CollectShuffleElements(&IE, Mask, 0);
+ ShuffleOps LR = CollectShuffleElements(&IE, Mask, nullptr);
// The proposed shuffle may be trivial, in which case we shouldn't
// perform the combine.
if (LR.first != &IE && LR.second != &IE) {
// We now have a shuffle of LHS, RHS, Mask.
- if (LR.second == 0) LR.second = UndefValue::get(LR.first->getType());
+ if (LR.second == nullptr)
+ LR.second = UndefValue::get(LR.first->getType());
return new ShuffleVectorInst(LR.first, LR.second,
ConstantVector::get(Mask));
}
@@ -548,7 +549,7 @@ Instruction *InstCombiner::visitInsertElementInst(InsertElementInst &IE) {
return &IE;
}
- return 0;
+ return nullptr;
}
/// Return true if we can evaluate the specified expression tree if the vector
@@ -934,16 +935,16 @@ Instruction *InstCombiner::visitShuffleVectorInst(ShuffleVectorInst &SVI) {
ShuffleVectorInst* RHSShuffle = dyn_cast<ShuffleVectorInst>(RHS);
if (LHSShuffle)
if (!isa<UndefValue>(LHSShuffle->getOperand(1)) && !isa<UndefValue>(RHS))
- LHSShuffle = NULL;
+ LHSShuffle = nullptr;
if (RHSShuffle)
if (!isa<UndefValue>(RHSShuffle->getOperand(1)))
- RHSShuffle = NULL;
+ RHSShuffle = nullptr;
if (!LHSShuffle && !RHSShuffle)
- return MadeChange ? &SVI : 0;
+ return MadeChange ? &SVI : nullptr;
- Value* LHSOp0 = NULL;
- Value* LHSOp1 = NULL;
- Value* RHSOp0 = NULL;
+ Value* LHSOp0 = nullptr;
+ Value* LHSOp1 = nullptr;
+ Value* RHSOp0 = nullptr;
unsigned LHSOp0Width = 0;
unsigned RHSOp0Width = 0;
if (LHSShuffle) {
@@ -975,11 +976,11 @@ Instruction *InstCombiner::visitShuffleVectorInst(ShuffleVectorInst &SVI) {
// case 4
if (LHSOp0 == RHSOp0) {
newLHS = LHSOp0;
- newRHS = NULL;
+ newRHS = nullptr;
}
if (newLHS == LHS && newRHS == RHS)
- return MadeChange ? &SVI : 0;
+ return MadeChange ? &SVI : nullptr;
SmallVector<int, 16> LHSMask;
SmallVector<int, 16> RHSMask;
@@ -1039,7 +1040,7 @@ Instruction *InstCombiner::visitShuffleVectorInst(ShuffleVectorInst &SVI) {
// If newRHS == newLHS, we want to remap any references from newRHS to
// newLHS so that we can properly identify splats that may occur due to
// obfuscation across the two vectors.
- if (eltMask >= 0 && newRHS != NULL && newLHS != newRHS)
+ if (eltMask >= 0 && newRHS != nullptr && newLHS != newRHS)
eltMask += newLHSWidth;
}
@@ -1065,10 +1066,10 @@ Instruction *InstCombiner::visitShuffleVectorInst(ShuffleVectorInst &SVI) {
Elts.push_back(ConstantInt::get(Int32Ty, newMask[i]));
}
}
- if (newRHS == NULL)
+ if (!newRHS)
newRHS = UndefValue::get(newLHS->getType());
return new ShuffleVectorInst(newLHS, newRHS, ConstantVector::get(Elts));
}
- return MadeChange ? &SVI : 0;
+ return MadeChange ? &SVI : nullptr;
}
diff --git a/lib/Transforms/InstCombine/InstructionCombining.cpp b/lib/Transforms/InstCombine/InstructionCombining.cpp
index bc9a43fa7e..f0964e8d39 100644
--- a/lib/Transforms/InstCombine/InstructionCombining.cpp
+++ b/lib/Transforms/InstCombine/InstructionCombining.cpp
@@ -513,7 +513,7 @@ Value *InstCombiner::SimplifyUsingDistributiveLaws(BinaryOperator &I) {
}
}
- return 0;
+ return nullptr;
}
// dyn_castNegVal - Given a 'sub' instruction, return the RHS of the instruction
@@ -531,7 +531,7 @@ Value *InstCombiner::dyn_castNegVal(Value *V) const {
if (C->getType()->getElementType()->isIntegerTy())
return ConstantExpr::getNeg(C);
- return 0;
+ return nullptr;
}
// dyn_castFNegVal - Given a 'fsub' instruction, return the RHS of the
@@ -550,7 +550,7 @@ Value *InstCombiner::dyn_castFNegVal(Value *V, bool IgnoreZeroSign) const {
if (C->getType()->getElementType()->isFloatingPointTy())
return ConstantExpr::getFNeg(C);
- return 0;
+ return nullptr;
}
static Value *FoldOperationIntoSelectOperand(Instruction &I, Value *SO,
@@ -596,13 +596,13 @@ static Value *FoldOperationIntoSelectOperand(Instruction &I, Value *SO,
// not have a second operand.
Instruction *InstCombiner::FoldOpIntoSelect(Instruction &Op, SelectInst *SI) {
// Don't modify shared select instructions
- if (!SI->hasOneUse()) return 0;
+ if (!SI->hasOneUse()) return nullptr;
Value *TV = SI->getOperand(1);
Value *FV = SI->getOperand(2);
if (isa<Constant>(TV) || isa<Constant>(FV)) {
// Bool selects with constant operands can be folded to logical ops.
- if (SI->getType()->isIntegerTy(1)) return 0;
+ if (SI->getType()->isIntegerTy(1)) return nullptr;
// If it's a bitcast involving vectors, make sure it has the same number of
// elements on both sides.
@@ -611,10 +611,10 @@ Instruction *InstCombiner::FoldOpIntoSelect(Instruction &Op, SelectInst *SI) {
VectorType *SrcTy = dyn_cast<VectorType>(BC->getSrcTy());
// Verify that either both or neither are vectors.
- if ((SrcTy == NULL) != (DestTy == NULL)) return 0;
+ if ((SrcTy == nullptr) != (DestTy == nullptr)) return nullptr;
// If vectors, verify that they have the same number of elements.
if (SrcTy && SrcTy->getNumElements() != DestTy->getNumElements())
- return 0;
+ return nullptr;
}
Value *SelectTrueVal = FoldOperationIntoSelectOperand(Op, TV, this);
@@ -623,7 +623,7 @@ Instruction *InstCombiner::FoldOpIntoSelect(Instruction &Op, SelectInst *SI) {
return SelectInst::Create(SI->getCondition(),
SelectTrueVal, SelectFalseVal);
}
- return 0;
+ return nullptr;
}
@@ -635,7 +635,7 @@ Instruction *InstCombiner::FoldOpIntoPhi(Instruction &I) {
PHINode *PN = cast<PHINode>(I.getOperand(0));
unsigned NumPHIValues = PN->getNumIncomingValues();
if (NumPHIValues == 0)
- return 0;
+ return nullptr;
// We normally only transform phis with a single use. However, if a PHI has
// multiple uses and they are all the same operation, we can fold *all* of the
@@ -645,7 +645,7 @@ Instruction *InstCombiner::FoldOpIntoPhi(Instruction &I) {
for (User *U : PN->users()) {
Instruction *UI = cast<Instruction>(U);
if (UI != &I && !I.isIdenticalTo(UI))
- return 0;
+ return nullptr;
}
// Otherwise, we can replace *all* users with the new PHI we form.
}
@@ -655,14 +655,14 @@ Instruction *InstCombiner::FoldOpIntoPhi(Instruction &I) {
// remember the BB it is in. If there is more than one or if *it* is a PHI,
// bail out. We don't do arbitrary constant expressions here because moving
// their computation can be expensive without a cost model.
- BasicBlock *NonConstBB = 0;
+ BasicBlock *NonConstBB = nullptr;
for (unsigned i = 0; i != NumPHIValues; ++i) {
Value *InVal = PN->getIncomingValue(i);
if (isa<Constant>(InVal) && !isa<ConstantExpr>(InVal))
continue;
- if (isa<PHINode>(InVal)) return 0; // Itself a phi.
- if (NonConstBB) return 0; // More than one non-const value.
+ if (isa<PHINode>(InVal)) return nullptr; // Itself a phi.
+ if (NonConstBB) return nullptr; // More than one non-const value.
NonConstBB = PN->getIncomingBlock(i);
@@ -670,22 +670,22 @@ Instruction *InstCombiner::FoldOpIntoPhi(Instruction &I) {
// insert a computation after it without breaking the edge.
if (InvokeInst *II = dyn_cast<InvokeInst>(InVal))
if (II->getParent() == NonConstBB)
- return 0;
+ return nullptr;
// If the incoming non-constant value is in I's block, we will remove one
// instruction, but insert another equivalent one, leading to infinite
// instcombine.
if (NonConstBB == I.getParent())
- return 0;
+ return nullptr;
}
// If there is exactly one non-constant value, we can insert a copy of the
// operation in that block. However, if this is a critical edge, we would be
// inserting the computation one some other paths (e.g. inside a loop). Only
// do this if the pred block is unconditionally branching into the phi block.
- if (NonConstBB != 0) {
+ if (NonConstBB != nullptr) {
BranchInst *BI = dyn_cast<BranchInst>(NonConstBB->getTerminator());
- if (!BI || !BI->isUnconditional()) return 0;
+ if (!BI || !BI->isUnconditional()) return nullptr;
}
// Okay, we can do the transformation: create the new PHI node.
@@ -709,7 +709,7 @@ Instruction *InstCombiner::FoldOpIntoPhi(Instruction &I) {
BasicBlock *ThisBB = PN->getIncomingBlock(i);
Value *TrueVInPred = TrueV->DoPHITranslation(PhiTransBB, ThisBB);
Value *FalseVInPred = FalseV->DoPHITranslation(PhiTransBB, ThisBB);
- Value *InV = 0;
+ Value *InV = nullptr;
// Beware of ConstantExpr: it may eventually evaluate to getNullValue,
// even if currently isNullValue gives false.
Constant *InC = dyn_cast<Constant>(PN->getIncomingValue(i));
@@ -723,7 +723,7 @@ Instruction *InstCombiner::FoldOpIntoPhi(Instruction &I) {
} else if (CmpInst *CI = dyn_cast<CmpInst>(&I)) {
Constant *C = cast<Constant>(I.getOperand(1));
for (unsigned i = 0; i != NumPHIValues; ++i) {
- Value *InV = 0;
+ Value *InV = nullptr;
if (Constant *InC = dyn_cast<Constant>(PN->getIncomingValue(i)))
InV = ConstantExpr::getCompare(CI->getPredicate(), InC, C);
else if (isa<ICmpInst>(CI))
@@ -737,7 +737,7 @@ Instruction *InstCombiner::FoldOpIntoPhi(Instruction &I) {
} else if (I.getNumOperands() == 2) {
Constant *C = cast<Constant>(I.getOperand(1));
for (unsigned i = 0; i != NumPHIValues; ++i) {
- Value *InV = 0;
+ Value *InV = nullptr;
if (Constant *InC = dyn_cast<Constant>(PN->getIncomingValue(i)))
InV = ConstantExpr::get(I.getOpcode(), InC, C);
else
@@ -777,11 +777,11 @@ Type *InstCombiner::FindElementAtOffset(Type *PtrTy, int64_t Offset,
assert(PtrTy->isPtrOrPtrVectorTy());
if (!DL)
- return 0;
+ return nullptr;
Type *Ty = PtrTy->getPointerElementType();
if (!Ty->isSized())
- return 0;
+ return nullptr;
// Start with the index over the outer type. Note that the type size
// might be zero (even if the offset isn't zero) if the indexed type
@@ -807,7 +807,7 @@ Type *InstCombiner::FindElementAtOffset(Type *PtrTy, int64_t Offset,
while (Offset) {
// Indexing into tail padding between struct/array elements.
if (uint64_t(Offset*8) >= DL->getTypeSizeInBits(Ty))
- return 0;
+ return nullptr;
if (StructType *STy = dyn_cast<StructType>(Ty)) {
const StructLayout *SL = DL->getStructLayout(STy);
@@ -828,7 +828,7 @@ Type *InstCombiner::FindElementAtOffset(Type *PtrTy, int64_t Offset,
Ty = AT->getElementType();
} else {
// Otherwise, we can't index into the middle of this atomic type, bail.
- return 0;
+ return nullptr;
}
}
@@ -860,7 +860,7 @@ Value *InstCombiner::Descale(Value *Val, APInt Scale, bool &NoSignedWrap) {
// If Scale is zero then it does not divide Val.
if (Scale.isMinValue())
- return 0;
+ return nullptr;
// Look through chains of multiplications, searching for a constant that is
// divisible by Scale. For example, descaling X*(Y*(Z*4)) by a factor of 4
@@ -903,7 +903,7 @@ Value *InstCombiner::Descale(Value *Val, APInt Scale, bool &NoSignedWrap) {
APInt::sdivrem(CI->getValue(), Scale, Quotient, Remainder);
if (!Remainder.isMinValue())
// Not divisible by Scale.
- return 0;
+ return nullptr;
// Replace with the quotient in the parent.
Op = ConstantInt::get(CI->getType(), Quotient);
NoSignedWrap = true;
@@ -916,7 +916,7 @@ Value *InstCombiner::Descale(Value *Val, APInt Scale, bool &NoSignedWrap) {
// Multiplication.
NoSignedWrap = BO->hasNoSignedWrap();
if (RequireNoSignedWrap && !NoSignedWrap)
- return 0;
+ return nullptr;
// There are three cases for multiplication: multiplication by exactly
// the scale, multiplication by a constant different to the scale, and
@@ -935,7 +935,7 @@ Value *InstCombiner::Descale(Value *Val, APInt Scale, bool &NoSignedWrap) {
// Otherwise drill down into the constant.
if (!Op->hasOneUse())
- return 0;
+ return nullptr;
Parent = std::make_pair(BO, 1);
continue;
@@ -944,7 +944,7 @@ Value *InstCombiner::Descale(Value *Val, APInt Scale, bool &NoSignedWrap) {
// Multiplication by something else. Drill down into the left-hand side
// since that's where the reassociate pass puts the good stuff.
if (!Op->hasOneUse())
- return 0;
+ return nullptr;
Parent = std::make_pair(BO, 0);
continue;
@@ -955,7 +955,7 @@ Value *InstCombiner::Descale(Value *Val, APInt Scale, bool &NoSignedWrap) {
// Multiplication by a power of 2.
NoSignedWrap = BO->hasNoSignedWrap();
if (RequireNoSignedWrap && !NoSignedWrap)
- return 0;
+ return nullptr;
Value *LHS = BO->getOperand(0);
int32_t Amt = cast<ConstantInt>(BO->getOperand(1))->
@@ -969,7 +969,7 @@ Value *InstCombiner::Descale(Value *Val, APInt Scale, bool &NoSignedWrap) {
break;
}
if (Amt < logScale || !Op->hasOneUse())
- return 0;
+ return nullptr;
// Multiplication by more than the scale. Reduce the multiplying amount
// by the scale in the parent.
@@ -980,7 +980,7 @@ Value *InstCombiner::Descale(Value *Val, APInt Scale, bool &NoSignedWrap) {
}
if (!Op->hasOneUse())
- return 0;
+ return nullptr;
if (CastInst *Cast = dyn_cast<CastInst>(Op)) {
if (Cast->getOpcode() == Instruction::SExt) {
@@ -994,7 +994,7 @@ Value *InstCombiner::Descale(Value *Val, APInt Scale, bool &NoSignedWrap) {
// Scale and the multiplication Y * SmallScale should not overflow.
if (SmallScale.sext(Scale.getBitWidth()) != Scale)
// SmallScale does not sign-extend to Scale.
- return 0;
+ return nullptr;
assert(SmallScale.exactLogBase2() == logScale);
// Require that Y * SmallScale must not overflow.
RequireNoSignedWrap = true;
@@ -1013,7 +1013,7 @@ Value *InstCombiner::Descale(Value *Val, APInt Scale, bool &NoSignedWrap) {
// trunc (Y * sext Scale) does not, so nsw flags need to be cleared
// from this point up in the expression (see later).
if (RequireNoSignedWrap)
- return 0;
+ return nullptr;
// Drill down through the cast.
unsigned LargeSize = Cast->getSrcTy()->getPrimitiveSizeInBits();
@@ -1027,7 +1027,7 @@ Value *InstCombiner::Descale(Value *Val, APInt Scale, bool &NoSignedWrap) {
}
// Unsupported expression, bail out.
- return 0;
+ return nullptr;
}
// We know that we can successfully descale, so from here on we can safely
@@ -1131,7 +1131,7 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
//
if (GEPOperator *Src = dyn_cast<GEPOperator>(PtrOp)) {
if (!shouldMergeGEPs(*cast<GEPOperator>(&GEP), *Src))
- return 0;
+ return nullptr;
// Note that if our source is a gep chain itself then we wait for that
// chain to be resolved before we perform this transformation. This
@@ -1139,7 +1139,7 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
if (GEPOperator *SrcGEP =
dyn_cast<GEPOperator>(Src->getOperand(0)))
if (SrcGEP->getNumOperands() == 2 && shouldMergeGEPs(*Src, *SrcGEP))
- return 0; // Wait until our source is folded to completion.
+ return nullptr; // Wait until our source is folded to completion.
SmallVector<Value*, 8> Indices;
@@ -1167,7 +1167,7 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
// intptr_t). Just avoid transforming this until the input has been
// normalized.
if (SO1->getType() != GO1->getType())
- return 0;
+ return nullptr;
Sum = Builder->CreateAdd(SO1, GO1, PtrOp->getName()+".sum");
}
@@ -1217,7 +1217,7 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
// We do not handle pointer-vector geps here.
if (!StrippedPtrTy)
- return 0;
+ return nullptr;
if (StrippedPtr != PtrOp) {
bool HasZeroPointerIndex = false;
@@ -1385,7 +1385,7 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
}
if (!DL)
- return 0;
+ return nullptr;
/// See if we can simplify:
/// X = bitcast A* to B*
@@ -1437,7 +1437,7 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
}
}
- return 0;
+ return nullptr;
}
static bool
@@ -1552,7 +1552,7 @@ Instruction *InstCombiner::visitAllocSite(Instruction &MI) {
}
return EraseInstFromFunction(MI);
}
- return 0;
+ return nullptr;
}
/// \brief Move the call to free before a NULL test.
@@ -1581,30 +1581,30 @@ tryToMoveFreeBeforeNullTest(CallInst &FI) {
// would duplicate the call to free in each predecessor and it may
// not be profitable even for code size.
if (!PredBB)
- return 0;
+ return nullptr;
// Validate constraint #2: Does this block contains only the call to
// free and an unconditional branch?
// FIXME: We could check if we can speculate everything in the
// predecessor block
if (FreeInstrBB->size() != 2)
- return 0;
+ return nullptr;
BasicBlock *SuccBB;
if (!match(FreeInstrBB->getTerminator(), m_UnconditionalBr(SuccBB)))
- return 0;
+ return nullptr;
// Validate the rest of constraint #1 by matching on the pred branch.
TerminatorInst *TI = PredBB->getTerminator();
BasicBlock *TrueBB, *FalseBB;
ICmpInst::Predicate Pred;
if (!match(TI, m_Br(m_ICmp(Pred, m_Specific(Op), m_Zero()), TrueBB, FalseBB)))
- return 0;
+ return nullptr;
if (Pred != ICmpInst::ICMP_EQ && Pred != ICmpInst::ICMP_NE)
- return 0;
+ return nullptr;
// Validate constraint #3: Ensure the null case just falls through.
if (SuccBB != (Pred == ICmpInst::ICMP_EQ ? TrueBB : FalseBB))
- return 0;
+ return nullptr;
assert(FreeInstrBB == (Pred == ICmpInst::ICMP_EQ ? FalseBB : TrueBB) &&
"Broken CFG: missing edge from predecessor to successor");
@@ -1639,14 +1639,14 @@ Instruction *InstCombiner::visitFree(CallInst &FI) {
if (Instruction *I = tryToMoveFreeBeforeNullTest(FI))
return I;
- return 0;
+ return nullptr;
}
Instruction *InstCombiner::visitBranchInst(BranchInst &BI) {
// Change br (not X), label True, label False to: br X, label False, True
- Value *X = 0;
+ Value *X = nullptr;
BasicBlock *TrueDest;
BasicBlock *FalseDest;
if (match(&BI, m_Br(m_Not(m_Value(X)), TrueDest, FalseDest)) &&
@@ -1689,7 +1689,7 @@ Instruction *InstCombiner::visitBranchInst(BranchInst &BI) {
return &BI;
}
- return 0;
+ return nullptr;
}
Instruction *InstCombiner::visitSwitchInst(SwitchInst &SI) {
@@ -1713,7 +1713,7 @@ Instruction *InstCombiner::visitSwitchInst(SwitchInst &SI) {
return &SI;
}
}
- return 0;
+ return nullptr;
}
Instruction *InstCombiner::visitExtractValueInst(ExtractValueInst &EV) {
@@ -1730,7 +1730,7 @@ Instruction *InstCombiner::visitExtractValueInst(ExtractValueInst &EV) {
// first index
return ExtractValueInst::Create(C2, EV.getIndices().slice(1));
}
- return 0; // Can't handle other constants
+ return nullptr; // Can't handle other constants
}
if (InsertValueInst *IV = dyn_cast<InsertValueInst>(Agg)) {
@@ -1863,7 +1863,7 @@ Instruction *InstCombiner::visitExtractValueInst(ExtractValueInst &EV) {
// and if again single-use then via load (gep (gep)) to load (gep).
// However, double extracts from e.g. function arguments or return values
// aren't handled yet.
- return 0;
+ return nullptr;
}
enum Personality_Type {
@@ -2202,7 +2202,7 @@ Instruction *InstCombiner::visitLandingPadInst(LandingPadInst &LI) {
return &LI;
}
- return 0;
+ return nullptr;
}
@@ -2295,7 +2295,7 @@ static bool AddReachableCodeToWorklist(BasicBlock *BB,
for (User::op_iterator i = Inst->op_begin(), e = Inst->op_end();
i != e; ++i) {
ConstantExpr *CE = dyn_cast<ConstantExpr>(i);
- if (CE == 0) continue;
+ if (CE == nullptr) continue;
Constant*& FoldRes = FoldedConstants[CE];
if (!FoldRes)
@@ -2399,7 +2399,7 @@ bool InstCombiner::DoOneIteration(Function &F, unsigned Iteration) {
while (!Worklist.isEmpty()) {
Instruction *I = Worklist.RemoveOne();
- if (I == 0) continue; // skip null values.
+ if (I == nullptr) continue; // skip null values.
// Check to see if we can DCE the instruction.
if (isInstructionTriviallyDead(I, TLI)) {
@@ -2541,7 +2541,7 @@ bool InstCombiner::runOnFunction(Function &F) {
return false;
DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- DL = DLP ? &DLP->getDataLayout() : 0;
+ DL = DLP ? &DLP->getDataLayout() : nullptr;
TLI = &getAnalysis<TargetLibraryInfo>();
// Minimizing size?
MinimizeSize = F.getAttributes().hasAttribute(AttributeSet::FunctionIndex,
@@ -2568,7 +2568,7 @@ bool InstCombiner::runOnFunction(Function &F) {
while (DoOneIteration(F, Iteration++))
EverMadeChange = true;
- Builder = 0;
+ Builder = nullptr;
return EverMadeChange;
}
diff --git a/lib/Transforms/Instrumentation/AddressSanitizer.cpp b/lib/Transforms/Instrumentation/AddressSanitizer.cpp
index 27e5c83a4d..66d6ea0d65 100644
--- a/lib/Transforms/Instrumentation/AddressSanitizer.cpp
+++ b/lib/Transforms/Instrumentation/AddressSanitizer.cpp
@@ -606,7 +606,7 @@ Value *AddressSanitizer::memToShadow(Value *Shadow, IRBuilder<> &IRB) {
// Instrument memset/memmove/memcpy
void AddressSanitizer::instrumentMemIntrinsic(MemIntrinsic *MI) {
IRBuilder<> IRB(MI);
- Instruction *Call = 0;
+ Instruction *Call = nullptr;
if (isa<MemTransferInst>(MI)) {
Call = IRB.CreateCall3(
isa<MemMoveInst>(MI) ? AsanMemmove : AsanMemcpy,
@@ -628,26 +628,26 @@ void AddressSanitizer::instrumentMemIntrinsic(MemIntrinsic *MI) {
// and set IsWrite. Otherwise return NULL.
static Value *isInterestingMemoryAccess(Instruction *I, bool *IsWrite) {
if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
- if (!ClInstrumentReads) return NULL;
+ if (!ClInstrumentReads) return nullptr;
*IsWrite = false;
return LI->getPointerOperand();
}
if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
- if (!ClInstrumentWrites) return NULL;
+ if (!ClInstrumentWrites) return nullptr;
*IsWrite = true;
return SI->getPointerOperand();
}
if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I)) {
- if (!ClInstrumentAtomics) return NULL;
+ if (!ClInstrumentAtomics) return nullptr;
*IsWrite = true;
return RMW->getPointerOperand();
}
if (AtomicCmpXchgInst *XCHG = dyn_cast<AtomicCmpXchgInst>(I)) {
- if (!ClInstrumentAtomics) return NULL;
+ if (!ClInstrumentAtomics) return nullptr;
*IsWrite = true;
return XCHG->getPointerOperand();
}
- return NULL;
+ return nullptr;
}
static bool isPointerOperand(Value *V) {
@@ -732,7 +732,7 @@ void AddressSanitizer::instrumentMop(Instruction *I, bool UseCalls) {
// Instrument a 1-, 2-, 4-, 8-, or 16- byte access with one check.
if (TypeSize == 8 || TypeSize == 16 ||
TypeSize == 32 || TypeSize == 64 || TypeSize == 128)
- return instrumentAddress(I, I, Addr, TypeSize, IsWrite, 0, UseCalls);
+ return instrumentAddress(I, I, Addr, TypeSize, IsWrite, nullptr, UseCalls);
// Instrument unusual size (but still multiple of 8).
// We can not do it with a single check, so we do 1-byte check for the first
// and the last bytes. We call __asan_report_*_n(addr, real_size) to be able
@@ -821,7 +821,7 @@ void AddressSanitizer::instrumentAddress(Instruction *OrigIns,
Value *Cmp = IRB.CreateICmpNE(ShadowValue, CmpVal);
size_t Granularity = 1 << Mapping.Scale;
- TerminatorInst *CrashTerm = 0;
+ TerminatorInst *CrashTerm = nullptr;
if (ClAlwaysSlowPath || (TypeSize < 8 * Granularity)) {
TerminatorInst *CheckTerm =
@@ -1341,7 +1341,7 @@ bool AddressSanitizer::runOnFunction(Function &F) {
}
}
- Function *UninstrumentedDuplicate = 0;
+ Function *UninstrumentedDuplicate = nullptr;
bool LikelyToInstrument =
!NoReturnCalls.empty() || !ToInstrument.empty() || (NumAllocas > 0);
if (ClKeepUninstrumented && LikelyToInstrument) {
@@ -1685,7 +1685,7 @@ void FunctionStackPoisoner::poisonAlloca(Value *V, uint64_t Size,
AllocaInst *FunctionStackPoisoner::findAllocaForValue(Value *V) {
if (AllocaInst *AI = dyn_cast<AllocaInst>(V))
// We're intested only in allocas we can handle.
- return isInterestingAlloca(*AI) ? AI : 0;
+ return isInterestingAlloca(*AI) ? AI : nullptr;
// See if we've already calculated (or started to calculate) alloca for a
// given value.
AllocaForValueMapTy::iterator I = AllocaForValue.find(V);
@@ -1693,8 +1693,8 @@ AllocaInst *FunctionStackPoisoner::findAllocaForValue(Value *V) {
return I->second;
// Store 0 while we're calculating alloca for value V to avoid
// infinite recursion if the value references itself.
- AllocaForValue[V] = 0;
- AllocaInst *Res = 0;
+ AllocaForValue[V] = nullptr;
+ AllocaInst *Res = nullptr;
if (CastInst *CI = dyn_cast<CastInst>(V))
Res = findAllocaForValue(CI->getOperand(0));
else if (PHINode *PN = dyn_cast<PHINode>(V)) {
@@ -1704,12 +1704,12 @@ AllocaInst *FunctionStackPoisoner::findAllocaForValue(Value *V) {
if (IncValue == PN) continue;
AllocaInst *IncValueAI = findAllocaForValue(IncValue);
// AI for incoming values should exist and should all be equal.
- if (IncValueAI == 0 || (Res != 0 && IncValueAI != Res))
- return 0;
+ if (IncValueAI == nullptr || (Res != nullptr && IncValueAI != Res))
+ return nullptr;
Res = IncValueAI;
}
}
- if (Res != 0)
+ if (Res)
AllocaForValue[V] = Res;
return Res;
}
diff --git a/lib/Transforms/Instrumentation/BoundsChecking.cpp b/lib/Transforms/Instrumentation/BoundsChecking.cpp
index fcf3a50a25..9a5cea81f3 100644
--- a/lib/Transforms/Instrumentation/BoundsChecking.cpp
+++ b/lib/Transforms/Instrumentation/BoundsChecking.cpp
@@ -62,7 +62,7 @@ namespace {
BasicBlock *TrapBB;
BasicBlock *getTrapBB();
- void emitBranchToTrap(Value *Cmp = 0);
+ void emitBranchToTrap(Value *Cmp = nullptr);
bool instrument(Value *Ptr, Value *Val);
};
}
@@ -104,7 +104,7 @@ void BoundsChecking::emitBranchToTrap(Value *Cmp) {
if (!C->getZExtValue())
return;
else
- Cmp = 0; // unconditional branch
+ Cmp = nullptr; // unconditional branch
}
++ChecksAdded;
@@ -168,7 +168,7 @@ bool BoundsChecking::runOnFunction(Function &F) {
DL = &getAnalysis<DataLayoutPass>().getDataLayout();
TLI = &getAnalysis<TargetLibraryInfo>();
- TrapBB = 0;
+ TrapBB = nullptr;
BuilderTy TheBuilder(F.getContext(), TargetFolder(DL));
Builder = &TheBuilder;
ObjectSizeOffsetEvaluator TheObjSizeEval(DL, TLI, F.getContext(),
diff --git a/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp b/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp
index b90c7b74a6..5094771450 100644
--- a/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp
+++ b/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp
@@ -211,7 +211,8 @@ class DataFlowSanitizer : public ModulePass {
public:
DataFlowSanitizer(StringRef ABIListFile = StringRef(),
- void *(*getArgTLS)() = 0, void *(*getRetValTLS)() = 0);
+ void *(*getArgTLS)() = nullptr,
+ void *(*getRetValTLS)() = nullptr);
static char ID;
bool doInitialization(Module &M) override;
bool runOnModule(Module &M) override;
@@ -233,8 +234,8 @@ struct DFSanFunction {
DFSanFunction(DataFlowSanitizer &DFS, Function *F, bool IsNativeABI)
: DFS(DFS), F(F), IA(DFS.getInstrumentedABI()),
- IsNativeABI(IsNativeABI), ArgTLSPtr(0), RetvalTLSPtr(0),
- LabelReturnAlloca(0) {}
+ IsNativeABI(IsNativeABI), ArgTLSPtr(nullptr), RetvalTLSPtr(nullptr),
+ LabelReturnAlloca(nullptr) {}
Value *getArgTLSPtr();
Value *getArgTLS(unsigned Index, Instruction *Pos);
Value *getRetvalTLS();
@@ -303,7 +304,7 @@ FunctionType *DataFlowSanitizer::getArgsFunctionType(FunctionType *T) {
ArgTypes.push_back(ShadowPtrTy);
Type *RetType = T->getReturnType();
if (!RetType->isVoidTy())
- RetType = StructType::get(RetType, ShadowTy, (Type *)0);
+ RetType = StructType::get(RetType, ShadowTy, (Type *)nullptr);
return FunctionType::get(RetType, ArgTypes, T->isVarArg());
}
@@ -373,18 +374,20 @@ bool DataFlowSanitizer::doInitialization(Module &M) {
if (GetArgTLSPtr) {
Type *ArgTLSTy = ArrayType::get(ShadowTy, 64);
- ArgTLS = 0;
+ ArgTLS = nullptr;
GetArgTLS = ConstantExpr::getIntToPtr(
ConstantInt::get(IntptrTy, uintptr_t(GetArgTLSPtr)),
PointerType::getUnqual(
- FunctionType::get(PointerType::getUnqual(ArgTLSTy), (Type *)0)));
+ FunctionType::get(PointerType::getUnqual(ArgTLSTy),
+ (Type *)nullptr)));
}
if (GetRetvalTLSPtr) {
- RetvalTLS = 0;
+ RetvalTLS = nullptr;
GetRetvalTLS = ConstantExpr::getIntToPtr(
ConstantInt::get(IntptrTy, uintptr_t(GetRetvalTLSPtr)),
PointerType::getUnqual(
- FunctionType::get(PointerType::getUnqual(ShadowTy), (Type *)0)));
+ FunctionType::get(PointerType::getUnqual(ShadowTy),
+ (Type *)nullptr)));
}
ColdCallWeights = MDBuilder(*Ctx).createBranchWeights(1, 1000);
@@ -629,7 +632,7 @@ bool DataFlowSanitizer::runOnModule(Module &M) {
// function... yet.
} else if (FT->isVarArg()) {
UnwrappedFnMap[&F] = &F;
- *i = 0;
+ *i = nullptr;
} else if (!IsZeroArgsVoidRet || getWrapperKind(&F) == WK_Custom) {
// Build a wrapper function for F. The wrapper simply calls F, and is
// added to FnsToInstrument so that any instrumentation according to its
@@ -1312,7 +1315,7 @@ void DFSanVisitor::visitCallSite(CallSite CS) {
}
}
- Instruction *Next = 0;
+ Instruction *Next = nullptr;
if (!CS.getType()->isVoidTy()) {
if (InvokeInst *II = dyn_cast<InvokeInst>(CS.getInstruction())) {
if (II->getNormalDest()->getSinglePredecessor()) {
diff --git a/lib/Transforms/Instrumentation/DebugIR.cpp b/lib/Transforms/Instrumentation/DebugIR.cpp
index ff57834b19..18bda1a320 100644
--- a/lib/Transforms/Instrumentation/DebugIR.cpp
+++ b/lib/Transforms/Instrumentation/DebugIR.cpp
@@ -118,7 +118,7 @@ public:
void visitInstruction(Instruction &I) {
if (I.getMetadata(LLVMContext::MD_dbg))
- I.setMetadata(LLVMContext::MD_dbg, 0);
+ I.setMetadata(LLVMContext::MD_dbg, nullptr);
}
void run(Module *M) {
@@ -168,11 +168,11 @@ class DIUpdater : public InstVisitor<DIUpdater> {
public:
DIUpdater(Module &M, StringRef Filename = StringRef(),
- StringRef Directory = StringRef(), const Module *DisplayM = 0,
- const ValueToValueMapTy *VMap = 0)
+ StringRef Directory = StringRef(), const Module *DisplayM = nullptr,
+ const ValueToValueMapTy *VMap = nullptr)
: Builder(M), Layout(&M), LineTable(DisplayM ? DisplayM : &M), VMap(VMap),
- Finder(), Filename(Filename), Directory(Directory), FileNode(0),
- LexicalBlockFileNode(0), CUNode(0) {
+ Finder(), Filename(Filename), Directory(Directory), FileNode(nullptr),
+ LexicalBlockFileNode(nullptr), CUNode(nullptr) {
Finder.processModule(M);
visit(&M);
}
@@ -184,7 +184,7 @@ public:
report_fatal_error("DebugIR pass supports only a signle compile unit per "
"Module.");
createCompileUnit(Finder.compile_unit_count() == 1 ?
- (MDNode*)*Finder.compile_units().begin() : 0);
+ (MDNode*)*Finder.compile_units().begin() : nullptr);
}
void visitFunction(Function &F) {
@@ -232,7 +232,7 @@ public:
/// If a ValueToValueMap is provided, use it to get the real instruction as
/// the line table was generated on a clone of the module on which we are
/// operating.
- Value *RealInst = 0;
+ Value *RealInst = nullptr;
if (VMap)
RealInst = VMap->lookup(&I);
@@ -256,7 +256,7 @@ public:
NewLoc = DebugLoc::get(Line, Col, Loc.getScope(RealInst->getContext()),
Loc.getInlinedAt(RealInst->getContext()));
else if (MDNode *scope = findScope(&I))
- NewLoc = DebugLoc::get(Line, Col, scope, 0);
+ NewLoc = DebugLoc::get(Line, Col, scope, nullptr);
else {
DEBUG(dbgs() << "WARNING: no valid scope for instruction " << &I
<< ". no DebugLoc will be present."
@@ -334,7 +334,7 @@ private:
}
DEBUG(dbgs() << "unable to find DISubprogram node for function "
<< F->getName().str() << "\n");
- return 0;
+ return nullptr;
}
/// Sets Line to the line number on which V appears and returns true. If a
@@ -366,7 +366,7 @@ private:
TypeNodeIter i = TypeDescriptors.find(T);
if (i != TypeDescriptors.end())
return i->second;
- return 0;
+ return nullptr;
}
/// Returns a DebugInfo type from an LLVM type T.
@@ -375,12 +375,12 @@ private:
if (N)
return DIDerivedType(N);
else if (T->isVoidTy())
- return DIDerivedType(0);
+ return DIDerivedType(nullptr);
else if (T->isStructTy()) {
N = Builder.createStructType(
DIScope(LexicalBlockFileNode), T->getStructName(), DIFile(FileNode),
0, Layout.getTypeSizeInBits(T), Layout.getABITypeAlignment(T), 0,
- DIType(0), DIArray(0)); // filled in later
+ DIType(nullptr), DIArray(nullptr)); // filled in later
// N is added to the map (early) so that element search below can find it,
// so as to avoid infinite recursion for structs that contain pointers to
@@ -535,7 +535,7 @@ void DebugIR::writeDebugBitcode(const Module *M, int *fd) {
Out.reset(new raw_fd_ostream(*fd, true));
}
- M->print(*Out, 0);
+ M->print(*Out, nullptr);
Out->close();
}
diff --git a/lib/Transforms/Instrumentation/DebugIR.h b/lib/Transforms/Instrumentation/DebugIR.h
index 3f57da570e..02831eda2d 100644
--- a/lib/Transforms/Instrumentation/DebugIR.h
+++ b/lib/Transforms/Instrumentation/DebugIR.h
@@ -90,7 +90,7 @@ private:
/// Write M to disk, optionally passing in an fd to an open file which is
/// closed by this function after writing. If no fd is specified, a new file
/// is opened, written, and closed.
- void writeDebugBitcode(const llvm::Module *M, int *fd = 0);
+ void writeDebugBitcode(const llvm::Module *M, int *fd = nullptr);
};
} // llvm namespace
diff --git a/lib/Transforms/Instrumentation/MemorySanitizer.cpp b/lib/Transforms/Instrumentation/MemorySanitizer.cpp
index f3a8bf7a3a..98d372b3f0 100644
--- a/lib/Transforms/Instrumentation/MemorySanitizer.cpp
+++ b/lib/Transforms/Instrumentation/MemorySanitizer.cpp
@@ -215,8 +215,8 @@ class MemorySanitizer : public FunctionPass {
StringRef BlacklistFile = StringRef())
: FunctionPass(ID),
TrackOrigins(std::max(TrackOrigins, (int)ClTrackOrigins)),
- DL(0),
- WarningFn(0),
+ DL(nullptr),
+ WarningFn(nullptr),
BlacklistFile(BlacklistFile.empty() ? ClBlacklistFile : BlacklistFile),
WrapIndirectCalls(!ClWrapIndirectCalls.empty()) {}
const char *getPassName() const override { return "MemorySanitizer"; }
@@ -371,31 +371,32 @@ void MemorySanitizer::initializeCallbacks(Module &M) {
// Create globals.
RetvalTLS = new GlobalVariable(
M, ArrayType::get(IRB.getInt64Ty(), 8), false,
- GlobalVariable::ExternalLinkage, 0, "__msan_retval_tls", 0,
+ GlobalVariable::ExternalLinkage, nullptr, "__msan_retval_tls", nullptr,
GlobalVariable::InitialExecTLSModel);
RetvalOriginTLS = new GlobalVariable(
- M, OriginTy, false, GlobalVariable::ExternalLinkage, 0,
- "__msan_retval_origin_tls", 0, GlobalVariable::InitialExecTLSModel);
+ M, OriginTy, false, GlobalVariable::ExternalLinkage, nullptr,
+ "__msan_retval_origin_tls", nullptr, GlobalVariable::InitialExecTLSModel);
ParamTLS = new GlobalVariable(
M, ArrayType::get(IRB.getInt64Ty(), 1000), false,
- GlobalVariable::ExternalLinkage, 0, "__msan_param_tls", 0,
+ GlobalVariable::ExternalLinkage, nullptr, "__msan_param_tls", nullptr,
GlobalVariable::InitialExecTLSModel);
ParamOriginTLS = new GlobalVariable(
M, ArrayType::get(OriginTy, 1000), false, GlobalVariable::ExternalLinkage,
- 0, "__msan_param_origin_tls", 0, GlobalVariable::InitialExecTLSModel);
+ nullptr, "__msan_param_origin_tls", nullptr,
+ GlobalVariable::InitialExecTLSModel);
VAArgTLS = new GlobalVariable(
M, ArrayType::get(IRB.getInt64Ty(), 1000), false,
- GlobalVariable::ExternalLinkage, 0, "__msan_va_arg_tls", 0,
+ GlobalVariable::ExternalLinkage, nullptr, "__msan_va_arg_tls", nullptr,
GlobalVariable::InitialExecTLSModel);
VAArgOverflowSizeTLS = new GlobalVariable(
- M, IRB.getInt64Ty(), false, GlobalVariable::ExternalLinkage, 0,
- "__msan_va_arg_overflow_size_tls", 0,
+ M, IRB.getInt64Ty(), false, GlobalVariable::ExternalLinkage, nullptr,
+ "__msan_va_arg_overflow_size_tls", nullptr,
GlobalVariable::InitialExecTLSModel);
OriginTLS = new GlobalVariable(
- M, IRB.getInt32Ty(), false, GlobalVariable::ExternalLinkage, 0,
- "__msan_origin_tls", 0, GlobalVariable::InitialExecTLSModel);
+ M, IRB.getInt32Ty(), false, GlobalVariable::ExternalLinkage, nullptr,
+ "__msan_origin_tls", nullptr, GlobalVariable::InitialExecTLSModel);
// We insert an empty inline asm after __msan_report* to avoid callback merge.
EmptyAsm = InlineAsm::get(FunctionType::get(IRB.getVoidTy(), false),
@@ -412,11 +413,11 @@ void MemorySanitizer::initializeCallbacks(Module &M) {
if (ClWrapIndirectCallsFast) {
MsandrModuleStart = new GlobalVariable(
M, IRB.getInt32Ty(), false, GlobalValue::ExternalLinkage,
- 0, "__executable_start");
+ nullptr, "__executable_start");
MsandrModuleStart->setVisibility(GlobalVariable::HiddenVisibility);
MsandrModuleEnd = new GlobalVariable(
M, IRB.getInt32Ty(), false, GlobalValue::ExternalLinkage,
- 0, "_end");
+ nullptr, "_end");
MsandrModuleEnd->setVisibility(GlobalVariable::HiddenVisibility);
}
}
@@ -734,7 +735,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
for (size_t i = 0, n = ShadowPHINodes.size(); i < n; i++) {
PHINode *PN = ShadowPHINodes[i];
PHINode *PNS = cast<PHINode>(getShadow(PN));
- PHINode *PNO = MS.TrackOrigins ? cast<PHINode>(getOrigin(PN)) : 0;
+ PHINode *PNO = MS.TrackOrigins ? cast<PHINode>(getOrigin(PN)) : nullptr;
size_t NumValues = PN->getNumIncomingValues();
for (size_t v = 0; v < NumValues; v++) {
PNS->addIncoming(getShadow(PN, v), PN->getIncomingBlock(v));
@@ -770,7 +771,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
/// \brief Compute the shadow type that corresponds to a given Type.
Type *getShadowTy(Type *OrigTy) {
if (!OrigTy->isSized()) {
- return 0;
+ return nullptr;
}
// For integer type, shadow is the same as the original type.
// This may return weird-sized types like i1.
@@ -850,7 +851,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
/// \brief Compute the origin address for a given function argument.
Value *getOriginPtrForArgument(Value *A, IRBuilder<> &IRB,
int ArgOffset) {
- if (!MS.TrackOrigins) return 0;
+ if (!MS.TrackOrigins) return nullptr;
Value *Base = IRB.CreatePointerCast(MS.ParamOriginTLS, MS.IntptrTy);
Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
return IRB.CreateIntToPtr(Base, PointerType::get(MS.OriginTy, 0),
@@ -891,7 +892,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
Constant *getCleanShadow(Value *V) {
Type *ShadowTy = getShadowTy(V);
if (!ShadowTy)
- return 0;
+ return nullptr;
return Constant::getNullValue(ShadowTy);
}
@@ -911,7 +912,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
Constant *getPoisonedShadow(Value *V) {
Type *ShadowTy = getShadowTy(V);
if (!ShadowTy)
- return 0;
+ return nullptr;
return getPoisonedShadow(ShadowTy);
}
@@ -1002,7 +1003,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
/// \brief Get the origin for a value.
Value *getOrigin(Value *V) {
- if (!MS.TrackOrigins) return 0;
+ if (!MS.TrackOrigins) return nullptr;
if (isa<Instruction>(V) || isa<Argument>(V)) {
Value *Origin = OriginMap[V];
if (!Origin) {
@@ -1300,7 +1301,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
public:
Combiner(MemorySanitizerVisitor *MSV, IRBuilder<> &IRB) :
- Shadow(0), Origin(0), IRB(IRB), MSV(MSV) {}
+ Shadow(nullptr), Origin(nullptr), IRB(IRB), MSV(MSV) {}
/// \brief Add a pair of shadow and origin values to the mix.
Combiner &Add(Value *OpShadow, Value *OpOrigin) {
@@ -1331,7 +1332,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
/// \brief Add an application value to the mix.
Combiner &Add(Value *V) {
Value *OpShadow = MSV->getShadow(V);
- Value *OpOrigin = MSV->MS.TrackOrigins ? MSV->getOrigin(V) : 0;
+ Value *OpOrigin = MSV->MS.TrackOrigins ? MSV->getOrigin(V) : nullptr;
return Add(OpShadow, OpOrigin);
}
@@ -1546,7 +1547,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
void handleSignedRelationalComparison(ICmpInst &I) {
Constant *constOp0 = dyn_cast<Constant>(I.getOperand(0));
Constant *constOp1 = dyn_cast<Constant>(I.getOperand(1));
- Value* op = NULL;
+ Value* op = nullptr;
CmpInst::Predicate pre = I.getPredicate();
if (constOp0 && constOp0->isNullValue() &&
(pre == CmpInst::ICMP_SGT || pre == CmpInst::ICMP_SLE)) {
@@ -1855,7 +1856,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
break;
case 1:
ConvertOp = I.getArgOperand(0);
- CopyOp = NULL;
+ CopyOp = nullptr;
break;
default:
llvm_unreachable("Cvt intrinsic with unsupported number of arguments.");
@@ -1869,7 +1870,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
// FIXME: consider propagating shadow of ConvertOp, at least in the case of
// int->any conversion.
Value *ConvertShadow = getShadow(ConvertOp);
- Value *AggShadow = 0;
+ Value *AggShadow = nullptr;
if (ConvertOp->getType()->isVectorTy()) {
AggShadow = IRB.CreateExtractElement(
ConvertShadow, ConstantInt::get(IRB.getInt32Ty(), 0));
@@ -2121,7 +2122,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
continue;
}
unsigned Size = 0;
- Value *Store = 0;
+ Value *Store = nullptr;
// Compute the Shadow for arg even if it is ByVal, because
// in that case getShadow() will copy the actual arg shadow to
// __msan_param_tls.
@@ -2164,7 +2165,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
// Until we have full dynamic coverage, make sure the retval shadow is 0.
Value *Base = getShadowPtrForRetval(&I, IRBBefore);
IRBBefore.CreateAlignedStore(getCleanShadow(&I), Base, kShadowTLSAlignment);
- Instruction *NextInsn = 0;
+ Instruction *NextInsn = nullptr;
if (CS.isCall()) {
NextInsn = I.getNextNode();
} else {
@@ -2384,7 +2385,8 @@ struct VarArgAMD64Helper : public VarArgHelper {
VarArgAMD64Helper(Function &F, MemorySanitizer &MS,
MemorySanitizerVisitor &MSV)
- : F(F), MS(MS), MSV(MSV), VAArgTLSCopy(0), VAArgOverflowSize(0) { }
+ : F(F), MS(MS), MSV(MSV), VAArgTLSCopy(nullptr),
+ VAArgOverflowSize(nullptr) {}
enum ArgKind { AK_GeneralPurpose, AK_FloatingPoint, AK_Memory };
diff --git a/lib/Transforms/Instrumentation/ThreadSanitizer.cpp b/lib/Transforms/Instrumentation/ThreadSanitizer.cpp
index a2b24e54b4..8fe9bcae69 100644
--- a/lib/Transforms/Instrumentation/ThreadSanitizer.cpp
+++ b/lib/Transforms/Instrumentation/ThreadSanitizer.cpp
@@ -78,7 +78,7 @@ namespace {
struct ThreadSanitizer : public FunctionPass {
ThreadSanitizer(StringRef BlacklistFile = StringRef())
: FunctionPass(ID),
- DL(0),
+ DL(nullptr),
BlacklistFile(BlacklistFile.empty() ? ClBlacklistFile
: BlacklistFile) { }
const char *getPassName() const override;
@@ -174,8 +174,8 @@ void ThreadSanitizer::initializeCallbacks(Module &M) {
for (int op = AtomicRMWInst::FIRST_BINOP;
op <= AtomicRMWInst::LAST_BINOP; ++op) {
- TsanAtomicRMW[op][i] = NULL;
- const char *NamePart = NULL;
+ TsanAtomicRMW[op][i] = nullptr;
+ const char *NamePart = nullptr;
if (op == AtomicRMWInst::Xchg)
NamePart = "_exchange";
else if (op == AtomicRMWInst::Add)
@@ -518,7 +518,7 @@ bool ThreadSanitizer::instrumentAtomic(Instruction *I) {
if (Idx < 0)
return false;
Function *F = TsanAtomicRMW[RMWI->getOperation()][Idx];
- if (F == NULL)
+ if (!F)
return false;
const size_t ByteSize = 1 << Idx;
const size_t BitSize = ByteSize * 8;
diff --git a/lib/Transforms/ObjCARC/DependencyAnalysis.cpp b/lib/Transforms/ObjCARC/DependencyAnalysis.cpp
index db8e4c32ad..08c884293c 100644
--- a/lib/Transforms/ObjCARC/DependencyAnalysis.cpp
+++ b/lib/Transforms/ObjCARC/DependencyAnalysis.cpp
@@ -224,7 +224,7 @@ llvm::objcarc::FindDependencies(DependenceKind Flavor,
pred_iterator PI(LocalStartBB), PE(LocalStartBB, false);
if (PI == PE)
// If we've reached the function entry, produce a null dependence.
- DependingInsts.insert(0);
+ DependingInsts.insert(nullptr);
else
// Add the predecessors to the worklist.
do {
diff --git a/lib/Transforms/ObjCARC/ObjCARCAPElim.cpp b/lib/Transforms/ObjCARC/ObjCARCAPElim.cpp
index eb1cb1739e..9d36b723b9 100644
--- a/lib/Transforms/ObjCARC/ObjCARCAPElim.cpp
+++ b/lib/Transforms/ObjCARC/ObjCARCAPElim.cpp
@@ -94,7 +94,7 @@ bool ObjCARCAPElim::MayAutorelease(ImmutableCallSite CS, unsigned Depth) {
bool ObjCARCAPElim::OptimizeBB(BasicBlock *BB) {
bool Changed = false;
- Instruction *Push = 0;
+ Instruction *Push = nullptr;
for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ) {
Instruction *Inst = I++;
switch (GetBasicInstructionClass(Inst)) {
@@ -113,11 +113,11 @@ bool ObjCARCAPElim::OptimizeBB(BasicBlock *BB) {
Inst->eraseFromParent();
Push->eraseFromParent();
}
- Push = 0;
+ Push = nullptr;
break;
case IC_CallOrUser:
if (MayAutorelease(ImmutableCallSite(Inst)))
- Push = 0;
+ Push = nullptr;
break;
default:
break;
diff --git a/lib/Transforms/ObjCARC/ObjCARCContract.cpp b/lib/Transforms/ObjCARC/ObjCARCContract.cpp
index c441d861ef..f48d53d11b 100644
--- a/lib/Transforms/ObjCARC/ObjCARCContract.cpp
+++ b/lib/Transforms/ObjCARC/ObjCARCContract.cpp
@@ -158,7 +158,7 @@ ObjCARCContract::ContractAutorelease(Function &F, Instruction *Autorelease,
// Check that there are no instructions between the retain and the autorelease
// (such as an autorelease_pop) which may change the count.
- CallInst *Retain = 0;
+ CallInst *Retain = nullptr;
if (Class == IC_AutoreleaseRV)
FindDependencies(RetainAutoreleaseRVDep, Arg,
Autorelease->getParent(), Autorelease,
@@ -219,7 +219,7 @@ void ObjCARCContract::ContractRelease(Instruction *Release,
BasicBlock::iterator I = Load, End = BB->end();
++I;
AliasAnalysis::Location Loc = AA->getLocation(Load);
- StoreInst *Store = 0;
+ StoreInst *Store = nullptr;
bool SawRelease = false;
for (; !Store || !SawRelease; ++I) {
if (I == End)
@@ -301,7 +301,7 @@ bool ObjCARCContract::doInitialization(Module &M) {
EP.Initialize(&M);
// Initialize RetainRVMarker.
- RetainRVMarker = 0;
+ RetainRVMarker = nullptr;
if (NamedMDNode *NMD =
M.getNamedMetadata("clang.arc.retainAutoreleasedReturnValueMarker"))
if (NMD->getNumOperands() == 1) {
diff --git a/lib/Transforms/ObjCARC/ObjCARCOpts.cpp b/lib/Transforms/ObjCARC/ObjCARCOpts.cpp
index d64006fd24..f29cb2d91a 100644
--- a/lib/Transforms/ObjCARC/ObjCARCOpts.cpp
+++ b/lib/Transforms/ObjCARC/ObjCARCOpts.cpp
@@ -157,7 +157,7 @@ static const Value *FindSingleUseIdentifiedObject(const Value *Arg) {
return FindSingleUseIdentifiedObject(
cast<CallInst>(Arg)->getArgOperand(0));
if (!IsObjCIdentifiedObject(Arg))
- return 0;
+ return nullptr;
return Arg;
}
@@ -166,12 +166,12 @@ static const Value *FindSingleUseIdentifiedObject(const Value *Arg) {
if (IsObjCIdentifiedObject(Arg)) {
for (const User *U : Arg->users())
if (!U->use_empty() || StripPointerCastsAndObjCCalls(U) != Arg)
- return 0;
+ return nullptr;
return Arg;
}
- return 0;
+ return nullptr;
}
/// This is a wrapper around getUnderlyingObjCPtr along the lines of
@@ -374,7 +374,7 @@ namespace {
bool CFGHazardAfflicted;
RRInfo() :
- KnownSafe(false), IsTailCallRelease(false), ReleaseMetadata(0),
+ KnownSafe(false), IsTailCallRelease(false), ReleaseMetadata(nullptr),
CFGHazardAfflicted(false) {}
void clear();
@@ -389,7 +389,7 @@ namespace {
void RRInfo::clear() {
KnownSafe = false;
IsTailCallRelease = false;
- ReleaseMetadata = 0;
+ ReleaseMetadata = nullptr;
Calls.clear();
ReverseInsertPts.clear();
CFGHazardAfflicted = false;
@@ -398,7 +398,7 @@ void RRInfo::clear() {
bool RRInfo::Merge(const RRInfo &Other) {
// Conservatively merge the ReleaseMetadata information.
if (ReleaseMetadata != Other.ReleaseMetadata)
- ReleaseMetadata = 0;
+ ReleaseMetadata = nullptr;
// Conservatively merge the boolean state.
KnownSafe &= Other.KnownSafe;
@@ -457,7 +457,7 @@ namespace {
}
bool IsTrackingImpreciseReleases() const {
- return RRI.ReleaseMetadata != 0;
+ return RRI.ReleaseMetadata != nullptr;
}
const MDNode *GetReleaseMetadata() const {
@@ -1719,7 +1719,7 @@ ObjCARCOpt::VisitInstructionBottomUp(Instruction *Inst,
BBState &MyStates) {
bool NestingDetected = false;
InstructionClass Class = GetInstructionClass(Inst);
- const Value *Arg = 0;
+ const Value *Arg = nullptr;
DEBUG(dbgs() << "Class: " << Class << "\n");
@@ -1975,7 +1975,7 @@ ObjCARCOpt::VisitInstructionTopDown(Instruction *Inst,
BBState &MyStates) {
bool NestingDetected = false;
InstructionClass Class = GetInstructionClass(Inst);
- const Value *Arg = 0;
+ const Value *Arg = nullptr;
switch (Class) {
case IC_RetainBlock:
@@ -2027,7 +2027,7 @@ ObjCARCOpt::VisitInstructionTopDown(Instruction *Inst,
switch (OldSeq) {
case S_Retain:
case S_CanRelease:
- if (OldSeq == S_Retain || ReleaseMetadata != 0)
+ if (OldSeq == S_Retain || ReleaseMetadata != nullptr)
S.ClearReverseInsertPts();
// FALL THROUGH
case S_Use:
@@ -2433,7 +2433,7 @@ ObjCARCOpt::ConnectTDBUTraversals(DenseMap<const BasicBlock *, BBState>
} else {
if (ReleasesToMove.ReleaseMetadata !=
NewRetainReleaseRRI.ReleaseMetadata)
- ReleasesToMove.ReleaseMetadata = 0;
+ ReleasesToMove.ReleaseMetadata = nullptr;
if (ReleasesToMove.IsTailCallRelease !=
NewRetainReleaseRRI.IsTailCallRelease)
ReleasesToMove.IsTailCallRelease = false;
@@ -2885,7 +2885,7 @@ FindPredecessorRetainWithSafePath(const Value *Arg, BasicBlock *BB,
FindDependencies(CanChangeRetainCount, Arg,
BB, Autorelease, DepInsts, Visited, PA);
if (DepInsts.size() != 1)
- return 0;
+ return nullptr;
CallInst *Retain =
dyn_cast_or_null<CallInst>(*DepInsts.begin());
@@ -2894,7 +2894,7 @@ FindPredecessorRetainWithSafePath(const Value *Arg, BasicBlock *BB,
if (!Retain ||
!IsRetain(GetBasicInstructionClass(Retain)) ||
GetObjCArg(Retain) != Arg) {
- return 0;
+ return nullptr;
}
return Retain;
@@ -2912,17 +2912,17 @@ FindPredecessorAutoreleaseWithSafePath(const Value *Arg, BasicBlock *BB,
FindDependencies(NeedsPositiveRetainCount, Arg,
BB, Ret, DepInsts, V, PA);
if (DepInsts.size() != 1)
- return 0;
+ return nullptr;
CallInst *Autorelease =
dyn_cast_or_null<CallInst>(*DepInsts.begin());
if (!Autorelease)
- return 0;
+ return nullptr;
InstructionClass AutoreleaseClass = GetBasicInstructionClass(Autorelease);
if (!IsAutorelease(AutoreleaseClass))
- return 0;
+ return nullptr;
if (GetObjCArg(Autorelease) != Arg)
- return 0;
+ return nullptr;
return Autorelease;
}
diff --git a/lib/Transforms/Scalar/ConstantHoisting.cpp b/lib/Transforms/Scalar/ConstantHoisting.cpp
index af0450729a..7c3a260947 100644
--- a/lib/Transforms/Scalar/ConstantHoisting.cpp
+++ b/lib/Transforms/Scalar/ConstantHoisting.cpp
@@ -118,7 +118,8 @@ class ConstantHoisting : public FunctionPass {
SmallVector<ConstantInfo, 8> ConstantVec;
public:
static char ID; // Pass identification, replacement for typeid
- ConstantHoisting() : FunctionPass(ID), TTI(0), DT(0), Entry(0) {
+ ConstantHoisting() : FunctionPass(ID), TTI(nullptr), DT(nullptr),
+ Entry(nullptr) {
initializeConstantHoistingPass(*PassRegistry::getPassRegistry());
}
diff --git a/lib/Transforms/Scalar/ConstantProp.cpp b/lib/Transforms/Scalar/ConstantProp.cpp
index 7a0c556570..dd51ce1bc2 100644
--- a/lib/Transforms/Scalar/ConstantProp.cpp
+++ b/lib/Transforms/Scalar/ConstantProp.cpp
@@ -69,7 +69,7 @@ bool ConstantPropagation::runOnFunction(Function &F) {
}
bool Changed = false;
DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- const DataLayout *DL = DLP ? &DLP->getDataLayout() : 0;
+ const DataLayout *DL = DLP ? &DLP->getDataLayout() : nullptr;
TargetLibraryInfo *TLI = &getAnalysis<TargetLibraryInfo>();
while (!WorkList.empty()) {
diff --git a/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp b/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp
index 7884620eef..082946229b 100644
--- a/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp
+++ b/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp
@@ -139,7 +139,7 @@ bool CorrelatedValuePropagation::processPHI(PHINode *P) {
}
bool CorrelatedValuePropagation::processMemAccess(Instruction *I) {
- Value *Pointer = 0;
+ Value *Pointer = nullptr;
if (LoadInst *L = dyn_cast<LoadInst>(I))
Pointer = L->getPointerOperand();
else
diff --git a/lib/Transforms/Scalar/DeadStoreElimination.cpp b/lib/Transforms/Scalar/DeadStoreElimination.cpp
index ac4b56bcbb..3af8ee7546 100644
--- a/lib/Transforms/Scalar/DeadStoreElimination.cpp
+++ b/lib/Transforms/Scalar/DeadStoreElimination.cpp
@@ -50,7 +50,7 @@ namespace {
const TargetLibraryInfo *TLI;
static char ID; // Pass identification, replacement for typeid
- DSE() : FunctionPass(ID), AA(0), MD(0), DT(0) {
+ DSE() : FunctionPass(ID), AA(nullptr), MD(nullptr), DT(nullptr) {
initializeDSEPass(*PassRegistry::getPassRegistry());
}
@@ -70,7 +70,7 @@ namespace {
if (DT->isReachableFromEntry(I))
Changed |= runOnBasicBlock(*I);
- AA = 0; MD = 0; DT = 0;
+ AA = nullptr; MD = nullptr; DT = nullptr;
return Changed;
}
@@ -112,9 +112,9 @@ FunctionPass *llvm::createDeadStoreEliminationPass() { return new DSE(); }
/// If ValueSet is non-null, remove any deleted instructions from it as well.
///
static void DeleteDeadInstruction(Instruction *I,
- MemoryDependenceAnalysis &MD,
- const TargetLibraryInfo *TLI,
- SmallSetVector<Value*, 16> *ValueSet = 0) {
+ MemoryDependenceAnalysis &MD,
+ const TargetLibraryInfo *TLI,
+ SmallSetVector<Value*, 16> *ValueSet = nullptr) {
SmallVector<Instruction*, 32> NowDeadInsts;
NowDeadInsts.push_back(I);
@@ -132,7 +132,7 @@ static void DeleteDeadInstruction(Instruction *I,
for (unsigned op = 0, e = DeadInst->getNumOperands(); op != e; ++op) {
Value *Op = DeadInst->getOperand(op);
- DeadInst->setOperand(op, 0);
+ DeadInst->setOperand(op, nullptr);
// If this operand just became dead, add it to the NowDeadInsts list.
if (!Op->use_empty()) continue;
@@ -204,13 +204,13 @@ getLocForWrite(Instruction *Inst, AliasAnalysis &AA) {
// If we don't have target data around, an unknown size in Location means
// that we should use the size of the pointee type. This isn't valid for
// memset/memcpy, which writes more than an i8.
- if (Loc.Size == AliasAnalysis::UnknownSize && DL == 0)
+ if (Loc.Size == AliasAnalysis::UnknownSize && DL == nullptr)
return AliasAnalysis::Location();
return Loc;
}
IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst);
- if (II == 0) return AliasAnalysis::Location();
+ if (!II) return AliasAnalysis::Location();
switch (II->getIntrinsicID()) {
default: return AliasAnalysis::Location(); // Unhandled intrinsic.
@@ -218,7 +218,7 @@ getLocForWrite(Instruction *Inst, AliasAnalysis &AA) {
// If we don't have target data around, an unknown size in Location means
// that we should use the size of the pointee type. This isn't valid for
// init.trampoline, which writes more than an i8.
- if (DL == 0) return AliasAnalysis::Location();
+ if (!DL) return AliasAnalysis::Location();
// FIXME: We don't know the size of the trampoline, so we can't really
// handle it here.
@@ -360,7 +360,7 @@ static OverwriteResult isOverwrite(const AliasAnalysis::Location &Later,
// If we have no DataLayout information around, then the size of the store
// is inferrable from the pointee type. If they are the same type, then
// we know that the store is safe.
- if (DL == 0 && Later.Ptr->getType() == Earlier.Ptr->getType())
+ if (DL == nullptr && Later.Ptr->getType() == Earlier.Ptr->getType())
return OverwriteComplete;
return OverwriteUnknown;
@@ -374,7 +374,7 @@ static OverwriteResult isOverwrite(const AliasAnalysis::Location &Later,
// Otherwise, we have to have size information, and the later store has to be
// larger than the earlier one.
if (Later.Size == AliasAnalysis::UnknownSize ||
- Earlier.Size == AliasAnalysis::UnknownSize || DL == 0)
+ Earlier.Size == AliasAnalysis::UnknownSize || DL == nullptr)
return OverwriteUnknown;
// Check to see if the later store is to the entire object (either a global,
@@ -462,7 +462,7 @@ static bool isPossibleSelfRead(Instruction *Inst,
// Self reads can only happen for instructions that read memory. Get the
// location read.
AliasAnalysis::Location InstReadLoc = getLocForRead(Inst, AA);
- if (InstReadLoc.Ptr == 0) return false; // Not a reading instruction.
+ if (!InstReadLoc.Ptr) return false; // Not a reading instruction.
// If the read and written loc obviously don't alias, it isn't a read.
if (AA.isNoAlias(InstReadLoc, InstStoreLoc)) return false;
@@ -529,7 +529,7 @@ bool DSE::runOnBasicBlock(BasicBlock &BB) {
DeleteDeadInstruction(SI, *MD, TLI);
- if (NextInst == 0) // Next instruction deleted.
+ if (!NextInst) // Next instruction deleted.
BBI = BB.begin();
else if (BBI != BB.begin()) // Revisit this instruction if possible.
--BBI;
@@ -544,7 +544,7 @@ bool DSE::runOnBasicBlock(BasicBlock &BB) {
AliasAnalysis::Location Loc = getLocForWrite(Inst, *AA);
// If we didn't get a useful location, fail.
- if (Loc.Ptr == 0)
+ if (!Loc.Ptr)
continue;
while (InstDep.isDef() || InstDep.isClobber()) {
@@ -558,7 +558,7 @@ bool DSE::runOnBasicBlock(BasicBlock &BB) {
Instruction *DepWrite = InstDep.getInst();
AliasAnalysis::Location DepLoc = getLocForWrite(DepWrite, *AA);
// If we didn't get a useful location, or if it isn't a size, bail out.
- if (DepLoc.Ptr == 0)
+ if (!DepLoc.Ptr)
break;
// If we find a write that is a) removable (i.e., non-volatile), b) is
diff --git a/lib/Transforms/Scalar/EarlyCSE.cpp b/lib/Transforms/Scalar/EarlyCSE.cpp
index e97bfccda0..735f5c194c 100644
--- a/lib/Transforms/Scalar/EarlyCSE.cpp
+++ b/lib/Transforms/Scalar/EarlyCSE.cpp
@@ -208,7 +208,7 @@ namespace {
return false;
CallInst *CI = dyn_cast<CallInst>(Inst);
- if (CI == 0 || !CI->onlyReadsMemory())
+ if (!CI || !CI->onlyReadsMemory())
return false;
return true;
}
@@ -406,14 +406,14 @@ bool EarlyCSE::processNode(DomTreeNode *Node) {
// have invalidated the live-out memory values of our parent value. For now,
// just be conservative and invalidate memory if this block has multiple
// predecessors.
- if (BB->getSinglePredecessor() == 0)
+ if (!BB->getSinglePredecessor())
++CurrentGeneration;
/// LastStore - Keep track of the last non-volatile store that we saw... for
/// as long as there in no instruction that reads memory. If we see a store
/// to the same location, we delete the dead store. This zaps trivial dead
/// stores which can occur in bitfield code among other things.
- StoreInst *LastStore = 0;
+ StoreInst *LastStore = nullptr;
bool Changed = false;
@@ -463,7 +463,7 @@ bool EarlyCSE::processNode(DomTreeNode *Node) {
if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
// Ignore volatile loads.
if (!LI->isSimple()) {
- LastStore = 0;
+ LastStore = nullptr;
continue;
}
@@ -471,7 +471,7 @@ bool EarlyCSE::processNode(DomTreeNode *Node) {
// generation, replace this instruction.
std::pair<Value*, unsigned> InVal =
AvailableLoads->lookup(Inst->getOperand(0));
- if (InVal.first != 0 && InVal.second == CurrentGeneration) {
+ if (InVal.first != nullptr && InVal.second == CurrentGeneration) {
DEBUG(dbgs() << "EarlyCSE CSE LOAD: " << *Inst << " to: "
<< *InVal.first << '\n');
if (!Inst->use_empty()) Inst->replaceAllUsesWith(InVal.first);
@@ -484,20 +484,20 @@ bool EarlyCSE::processNode(DomTreeNode *Node) {
// Otherwise, remember that we have this instruction.
AvailableLoads->insert(Inst->getOperand(0),
std::pair<Value*, unsigned>(Inst, CurrentGeneration));
- LastStore = 0;
+ LastStore = nullptr;
continue;
}
// If this instruction may read from memory, forget LastStore.
if (Inst->mayReadFromMemory())
- LastStore = 0;
+ LastStore = nullptr;
// If this is a read-only call, process it.
if (CallValue::canHandle(Inst)) {
// If we have an available version of this call, and if it is the right
// generation, replace this instruction.
std::pair<Value*, unsigned> InVal = AvailableCalls->lookup(Inst);
- if (InVal.first != 0 && InVal.second == CurrentGeneration) {
+ if (InVal.first != nullptr && InVal.second == CurrentGeneration) {
DEBUG(dbgs() << "EarlyCSE CSE CALL: " << *Inst << " to: "
<< *InVal.first << '\n');
if (!Inst->use_empty()) Inst->replaceAllUsesWith(InVal.first);
@@ -529,7 +529,7 @@ bool EarlyCSE::processNode(DomTreeNode *Node) {
LastStore->eraseFromParent();
Changed = true;
++NumDSE;
- LastStore = 0;
+ LastStore = nullptr;
continue;
}
@@ -559,7 +559,7 @@ bool EarlyCSE::runOnFunction(Function &F) {
std::vector<StackNode *> nodesToProcess;
DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- DL = DLP ? &DLP->getDataLayout() : 0;
+ DL = DLP ? &DLP->getDataLayout() : nullptr;
TLI = &getAnalysis<TargetLibraryInfo>();
DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
diff --git a/lib/Transforms/Scalar/GVN.cpp b/lib/Transforms/Scalar/GVN.cpp
index 5686f08959..e7156fd6b6 100644
--- a/lib/Transforms/Scalar/GVN.cpp
+++ b/lib/Transforms/Scalar/GVN.cpp
@@ -220,7 +220,7 @@ Expression ValueTable::create_extractvalue_expression(ExtractValueInst *EI) {
e.opcode = 0;
IntrinsicInst *I = dyn_cast<IntrinsicInst>(EI->getAggregateOperand());
- if (I != 0 && EI->getNumIndices() == 1 && *EI->idx_begin() == 0 ) {
+ if (I != nullptr && EI->getNumIndices() == 1 && *EI->idx_begin() == 0 ) {
// EI might be an extract from one of our recognised intrinsics. If it
// is we'll synthesize a semantically equivalent expression instead on
// an extract value expression.
@@ -328,7 +328,7 @@ uint32_t ValueTable::lookup_or_add_call(CallInst *C) {
const MemoryDependenceAnalysis::NonLocalDepInfo &deps =
MD->getNonLocalCallDependency(CallSite(C));
// FIXME: Move the checking logic to MemDep!
- CallInst* cdep = 0;
+ CallInst* cdep = nullptr;
// Check to see if we have a single dominating call instruction that is
// identical to C.
@@ -339,8 +339,8 @@ uint32_t ValueTable::lookup_or_add_call(CallInst *C) {
// We don't handle non-definitions. If we already have a call, reject
// instruction dependencies.
- if (!I->getResult().isDef() || cdep != 0) {
- cdep = 0;
+ if (!I->getResult().isDef() || cdep != nullptr) {
+ cdep = nullptr;
break;
}
@@ -351,7 +351,7 @@ uint32_t ValueTable::lookup_or_add_call(CallInst *C) {
continue;
}
- cdep = 0;
+ cdep = nullptr;
break;
}
@@ -552,7 +552,7 @@ namespace {
static AvailableValueInBlock getUndef(BasicBlock *BB) {
AvailableValueInBlock Res;
Res.BB = BB;
- Res.Val.setPointer(0);
+ Res.Val.setPointer(nullptr);
Res.Val.setInt(UndefVal);
Res.Offset = 0;
return Res;
@@ -612,7 +612,7 @@ namespace {
public:
static char ID; // Pass identification, replacement for typeid
explicit GVN(bool noloads = false)
- : FunctionPass(ID), NoLoads(noloads), MD(0) {
+ : FunctionPass(ID), NoLoads(noloads), MD(nullptr) {
initializeGVNPass(*PassRegistry::getPassRegistry());
}
@@ -650,7 +650,7 @@ namespace {
/// removeFromLeaderTable - Scan the list of values corresponding to a given
/// value number, and remove the given instruction if encountered.
void removeFromLeaderTable(uint32_t N, Instruction *I, BasicBlock *BB) {
- LeaderTableEntry* Prev = 0;
+ LeaderTableEntry* Prev = nullptr;
LeaderTableEntry* Curr = &LeaderTable[N];
while (Curr->Val != I || Curr->BB != BB) {
@@ -662,8 +662,8 @@ namespace {
Prev->Next = Curr->Next;
} else {
if (!Curr->Next) {
- Curr->Val = 0;
- Curr->BB = 0;
+ Curr->Val = nullptr;
+ Curr->BB = nullptr;
} else {
LeaderTableEntry* Next = Curr->Next;
Curr->Val = Next->Val;
@@ -856,7 +856,7 @@ static Value *CoerceAvailableValueToLoadType(Value *StoredVal,
Instruction *InsertPt,
const DataLayout &DL) {
if (!CanCoerceMustAliasedValueToLoad(StoredVal, LoadedTy, DL))
- return 0;
+ return nullptr;
// If this is already the right type, just return it.
Type *StoredValTy = StoredVal->getType();
@@ -1061,7 +1061,7 @@ static int AnalyzeLoadFromClobberingMemInst(Type *LoadTy, Value *LoadPtr,
const DataLayout &DL) {
// If the mem operation is a non-constant size, we can't handle it.
ConstantInt *SizeCst = dyn_cast<ConstantInt>(MI->getLength());
- if (SizeCst == 0) return -1;
+ if (!SizeCst) return -1;
uint64_t MemSizeInBits = SizeCst->getZExtValue()*8;
// If this is memset, we just need to see if the offset is valid in the size
@@ -1076,10 +1076,10 @@ static int AnalyzeLoadFromClobberingMemInst(Type *LoadTy, Value *LoadPtr,
MemTransferInst *MTI = cast<MemTransferInst>(MI);
Constant *Src = dyn_cast<Constant>(MTI->getSource());
- if (Src == 0) return -1;
+ if (!Src) return -1;
GlobalVariable *GV = dyn_cast<GlobalVariable>(GetUnderlyingObject(Src, &DL));
- if (GV == 0 || !GV->isConstant()) return -1;
+ if (!GV || !GV->isConstant()) return -1;
// See if the access is within the bounds of the transfer.
int Offset = AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr,
@@ -1470,8 +1470,8 @@ void GVN::AnalyzeLoadAvailability(LoadInst *LI, LoadDepVect &Deps,
if (S->getValueOperand()->getType() != LI->getType()) {
// If the stored value is larger or equal to the loaded value, we can
// reuse it.
- if (DL == 0 || !CanCoerceMustAliasedValueToLoad(S->getValueOperand(),
- LI->getType(), *DL)) {
+ if (!DL || !CanCoerceMustAliasedValueToLoad(S->getValueOperand(),
+ LI->getType(), *DL)) {
UnavailableBlocks.push_back(DepBB);
continue;
}
@@ -1487,7 +1487,7 @@ void GVN::AnalyzeLoadAvailability(LoadInst *LI, LoadDepVect &Deps,
if (LD->getType() != LI->getType()) {
// If the stored value is larger or equal to the loaded value, we can
// reuse it.
- if (DL == 0 || !CanCoerceMustAliasedValueToLoad(LD, LI->getType(),*DL)){
+ if (!DL || !CanCoerceMustAliasedValueToLoad(LD, LI->getType(),*DL)) {
UnavailableBlocks.push_back(DepBB);
continue;
}
@@ -1554,7 +1554,7 @@ bool GVN::PerformLoadPRE(LoadInst *LI, AvailValInBlkVect &ValuesPerBlock,
if (IsValueFullyAvailableInBlock(Pred, FullyAvailableBlocks, 0)) {
continue;
}
- PredLoads[Pred] = 0;
+ PredLoads[Pred] = nullptr;
if (Pred->getTerminator()->getNumSuccessors() != 1) {
if (isa<IndirectBrInst>(Pred->getTerminator())) {
@@ -1592,7 +1592,7 @@ bool GVN::PerformLoadPRE(LoadInst *LI, AvailValInBlkVect &ValuesPerBlock,
BasicBlock *OrigPred = *I;
BasicBlock *NewPred = splitCriticalEdges(OrigPred, LoadBB);
PredLoads.erase(OrigPred);
- PredLoads[NewPred] = 0;
+ PredLoads[NewPred] = nullptr;
DEBUG(dbgs() << "Split critical edge " << OrigPred->getName() << "->"
<< LoadBB->getName() << '\n');
}
@@ -1611,13 +1611,13 @@ bool GVN::PerformLoadPRE(LoadInst *LI, AvailValInBlkVect &ValuesPerBlock,
// the load on the pred (?!?), so we can insert code to materialize the
// pointer if it is not available.
PHITransAddr Address(LI->getPointerOperand(), DL);
- Value *LoadPtr = 0;
+ Value *LoadPtr = nullptr;
LoadPtr = Address.PHITranslateWithInsertion(LoadBB, UnavailablePred,
*DT, NewInsts);
// If we couldn't find or insert a computation of this phi translated value,
// we fail PRE.
- if (LoadPtr == 0) {
+ if (!LoadPtr) {
DEBUG(dbgs() << "COULDN'T INSERT PHI TRANSLATED VALUE OF: "
<< *LI->getPointerOperand() << "\n");
CanDoPRE = false;
@@ -1777,7 +1777,7 @@ static void patchReplacementInstruction(Instruction *I, Value *Repl) {
MDNode *ReplMD = Metadata[i].second;
switch(Kind) {
default:
- ReplInst->setMetadata(Kind, NULL); // Remove unknown metadata
+ ReplInst->setMetadata(Kind, nullptr); // Remove unknown metadata
break;
case LLVMContext::MD_dbg:
llvm_unreachable("getAllMetadataOtherThanDebugLoc returned a MD_dbg");
@@ -1833,7 +1833,7 @@ bool GVN::processLoad(LoadInst *L) {
// a common base + constant offset, and if the previous store (or memset)
// completely covers this load. This sort of thing can happen in bitfield
// access code.
- Value *AvailVal = 0;
+ Value *AvailVal = nullptr;
if (StoreInst *DepSI = dyn_cast<StoreInst>(Dep.getInst())) {
int Offset = AnalyzeLoadFromClobberingStore(L->getType(),
L->getPointerOperand(),
@@ -1921,7 +1921,7 @@ bool GVN::processLoad(LoadInst *L) {
if (DL) {
StoredVal = CoerceAvailableValueToLoadType(StoredVal, L->getType(),
L, *DL);
- if (StoredVal == 0)
+ if (!StoredVal)
return false;
DEBUG(dbgs() << "GVN COERCED STORE:\n" << *DepSI << '\n' << *StoredVal
@@ -1950,7 +1950,7 @@ bool GVN::processLoad(LoadInst *L) {
if (DL) {
AvailableVal = CoerceAvailableValueToLoadType(DepLI, L->getType(),
L, *DL);
- if (AvailableVal == 0)
+ if (!AvailableVal)
return false;
DEBUG(dbgs() << "GVN COERCED LOAD:\n" << *DepLI << "\n" << *AvailableVal
@@ -2000,9 +2000,9 @@ bool GVN::processLoad(LoadInst *L) {
// a few comparisons of DFS numbers.
Value *GVN::findLeader(const BasicBlock *BB, uint32_t num) {
LeaderTableEntry Vals = LeaderTable[num];
- if (!Vals.Val) return 0;
+ if (!Vals.Val) return nullptr;
- Value *Val = 0;
+ Value *Val = nullptr;
if (DT->dominates(Vals.BB, BB)) {
Val = Vals.Val;
if (isa<Constant>(Val)) return Val;
@@ -2053,7 +2053,7 @@ static bool isOnlyReachableViaThisEdge(const BasicBlockEdge &E,
const BasicBlock *Src = E.getStart();
assert((!Pred || Pred == Src) && "No edge between these basic blocks!");
(void)Src;
- return Pred != 0;
+ return Pred != nullptr;
}
/// propagateEquality - The given values are known to be equal in every block
@@ -2297,7 +2297,7 @@ bool GVN::processInstruction(Instruction *I) {
// Perform fast-path value-number based elimination of values inherited from
// dominators.
Value *repl = findLeader(I->getParent(), Num);
- if (repl == 0) {
+ if (!repl) {
// Failure, just remember this instance for future use.
addToLeaderTable(Num, I, I->getParent());
return false;
@@ -2320,7 +2320,7 @@ bool GVN::runOnFunction(Function& F) {
MD = &getAnalysis<MemoryDependenceAnalysis>();
DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- DL = DLP ? &DLP->getDataLayout() : 0;
+ DL = DLP ? &DLP->getDataLayout() : nullptr;
TLI = &getAnalysis<TargetLibraryInfo>();
VN.setAliasAnalysis(&getAnalysis<AliasAnalysis>());
VN.setMemDep(MD);
@@ -2462,7 +2462,7 @@ bool GVN::performPRE(Function &F) {
// more complicated to get right.
unsigned NumWith = 0;
unsigned NumWithout = 0;
- BasicBlock *PREPred = 0;
+ BasicBlock *PREPred = nullptr;
predMap.clear();
for (pred_iterator PI = pred_begin(CurrentBlock),
@@ -2480,8 +2480,8 @@ bool GVN::performPRE(Function &F) {
}
Value* predV = findLeader(P, ValNo);
- if (predV == 0) {
- predMap.push_back(std::make_pair(static_cast<Value *>(0), P));
+ if (!predV) {
+ predMap.push_back(std::make_pair(static_cast<Value *>(nullptr), P));
PREPred = P;
++NumWithout;
} else if (predV == CurInst) {
diff --git a/lib/Transforms/Scalar/GlobalMerge.cpp b/lib/Transforms/Scalar/GlobalMerge.cpp
index 8c54d3aece..dd9c3784cc 100644
--- a/lib/Transforms/Scalar/GlobalMerge.cpp
+++ b/lib/Transforms/Scalar/GlobalMerge.cpp
@@ -108,7 +108,7 @@ namespace {
public:
static char ID; // Pass identification, replacement for typeid.
- explicit GlobalMerge(const TargetMachine *TM = 0)
+ explicit GlobalMerge(const TargetMachine *TM = nullptr)
: FunctionPass(ID), TM(TM) {
initializeGlobalMergePass(*PassRegistry::getPassRegistry());
}
@@ -174,7 +174,8 @@ bool GlobalMerge::doMerge(SmallVectorImpl<GlobalVariable*> &Globals,
GlobalVariable *MergedGV = new GlobalVariable(M, MergedTy, isConst,
GlobalValue::InternalLinkage,
MergedInit, "_MergedGlobals",
- 0, GlobalVariable::NotThreadLocal,
+ nullptr,
+ GlobalVariable::NotThreadLocal,
AddrSpace);
for (size_t k = i; k < j; ++k) {
Constant *Idx[2] = {
diff --git a/lib/Transforms/Scalar/IndVarSimplify.cpp b/lib/Transforms/Scalar/IndVarSimplify.cpp
index 4f2d0a6d36..e83a5c421b 100644
--- a/lib/Transforms/Scalar/IndVarSimplify.cpp
+++ b/lib/Transforms/Scalar/IndVarSimplify.cpp
@@ -80,8 +80,8 @@ namespace {
public:
static char ID; // Pass identification, replacement for typeid
- IndVarSimplify() : LoopPass(ID), LI(0), SE(0), DT(0), DL(0),
- Changed(false) {
+ IndVarSimplify() : LoopPass(ID), LI(nullptr), SE(nullptr), DT(nullptr),
+ DL(nullptr), Changed(false) {
initializeIndVarSimplifyPass(*PassRegistry::getPassRegistry());
}
@@ -197,7 +197,7 @@ static Instruction *getInsertPointForUses(Instruction *User, Value *Def,
if (!PHI)
return User;
- Instruction *InsertPt = 0;
+ Instruction *InsertPt = nullptr;
for (unsigned i = 0, e = PHI->getNumIncomingValues(); i != e; ++i) {
if (PHI->getIncomingValue(i) != Def)
continue;
@@ -258,13 +258,13 @@ void IndVarSimplify::HandleFloatingPointIV(Loop *L, PHINode *PN) {
// an add or increment value can not be represented by an integer.
BinaryOperator *Incr =
dyn_cast<BinaryOperator>(PN->getIncomingValue(BackEdge));
- if (Incr == 0 || Incr->getOpcode() != Instruction::FAdd) return;
+ if (Incr == nullptr || Incr->getOpcode() != Instruction::FAdd) return;
// If this is not an add of the PHI with a constantfp, or if the constant fp
// is not an integer, bail out.
ConstantFP *IncValueVal = dyn_cast<ConstantFP>(Incr->getOperand(1));
int64_t IncValue;
- if (IncValueVal == 0 || Incr->getOperand(0) != PN ||
+ if (IncValueVal == nullptr || Incr->getOperand(0) != PN ||
!ConvertToSInt(IncValueVal->getValueAPF(), IncValue))
return;
@@ -281,7 +281,7 @@ void IndVarSimplify::HandleFloatingPointIV(Loop *L, PHINode *PN) {
FCmpInst *Compare = dyn_cast<FCmpInst>(U1);
if (!Compare)
Compare = dyn_cast<FCmpInst>(U2);
- if (Compare == 0 || !Compare->hasOneUse() ||
+ if (!Compare || !Compare->hasOneUse() ||
!isa<BranchInst>(Compare->user_back()))
return;
@@ -302,7 +302,7 @@ void IndVarSimplify::HandleFloatingPointIV(Loop *L, PHINode *PN) {
// transform it.
ConstantFP *ExitValueVal = dyn_cast<ConstantFP>(Compare->getOperand(1));
int64_t ExitValue;
- if (ExitValueVal == 0 ||
+ if (ExitValueVal == nullptr ||
!ConvertToSInt(ExitValueVal->getValueAPF(), ExitValue))
return;
@@ -652,7 +652,8 @@ namespace {
Type *WidestNativeType; // Widest integer type created [sz]ext
bool IsSigned; // Was an sext user seen before a zext?
- WideIVInfo() : NarrowIV(0), WidestNativeType(0), IsSigned(false) {}
+ WideIVInfo() : NarrowIV(nullptr), WidestNativeType(nullptr),
+ IsSigned(false) {}
};
}
@@ -694,7 +695,7 @@ struct NarrowIVDefUse {
Instruction *NarrowUse;
Instruction *WideDef;
- NarrowIVDefUse(): NarrowDef(0), NarrowUse(0), WideDef(0) {}
+ NarrowIVDefUse(): NarrowDef(nullptr), NarrowUse(nullptr), WideDef(nullptr) {}
NarrowIVDefUse(Instruction *ND, Instruction *NU, Instruction *WD):
NarrowDef(ND), NarrowUse(NU), WideDef(WD) {}
@@ -737,9 +738,9 @@ public:
L(LI->getLoopFor(OrigPhi->getParent())),
SE(SEv),
DT(DTree),
- WidePhi(0),
- WideInc(0),
- WideIncExpr(0),
+ WidePhi(nullptr),
+ WideInc(nullptr),
+ WideIncExpr(nullptr),
DeadInsts(DI) {
assert(L->getHeader() == OrigPhi->getParent() && "Phi must be an IV");
}
@@ -794,7 +795,7 @@ Instruction *WidenIV::CloneIVUser(NarrowIVDefUse DU) {
unsigned Opcode = DU.NarrowUse->getOpcode();
switch (Opcode) {
default:
- return 0;
+ return nullptr;
case Instruction::Add:
case Instruction::Mul:
case Instruction::UDiv:
@@ -839,14 +840,14 @@ Instruction *WidenIV::CloneIVUser(NarrowIVDefUse DU) {
const SCEVAddRecExpr* WidenIV::GetExtendedOperandRecurrence(NarrowIVDefUse DU) {
// Handle the common case of add<nsw/nuw>
if (DU.NarrowUse->getOpcode() != Instruction::Add)
- return 0;
+ return nullptr;
// One operand (NarrowDef) has already been extended to WideDef. Now determine
// if extending the other will lead to a recurrence.
unsigned ExtendOperIdx = DU.NarrowUse->getOperand(0) == DU.NarrowDef ? 1 : 0;
assert(DU.NarrowUse->getOperand(1-ExtendOperIdx) == DU.NarrowDef && "bad DU");
- const SCEV *ExtendOperExpr = 0;
+ const SCEV *ExtendOperExpr = nullptr;
const OverflowingBinaryOperator *OBO =
cast<OverflowingBinaryOperator>(DU.NarrowUse);
if (IsSigned && OBO->hasNoSignedWrap())
@@ -856,7 +857,7 @@ const SCEVAddRecExpr* WidenIV::GetExtendedOperandRecurrence(NarrowIVDefUse DU) {
ExtendOperExpr = SE->getZeroExtendExpr(
SE->getSCEV(DU.NarrowUse->getOperand(ExtendOperIdx)), WideType);
else
- return 0;
+ return nullptr;
// When creating this AddExpr, don't apply the current operations NSW or NUW
// flags. This instruction may be guarded by control flow that the no-wrap
@@ -867,7 +868,7 @@ const SCEVAddRecExpr* WidenIV::GetExtendedOperandRecurrence(NarrowIVDefUse DU) {
SE->getAddExpr(SE->getSCEV(DU.WideDef), ExtendOperExpr));
if (!AddRec || AddRec->getLoop() != L)
- return 0;
+ return nullptr;
return AddRec;
}
@@ -878,14 +879,14 @@ const SCEVAddRecExpr* WidenIV::GetExtendedOperandRecurrence(NarrowIVDefUse DU) {
/// recurrence. Otherwise return NULL.
const SCEVAddRecExpr *WidenIV::GetWideRecurrence(Instruction *NarrowUse) {
if (!SE->isSCEVable(NarrowUse->getType()))
- return 0;
+ return nullptr;
const SCEV *NarrowExpr = SE->getSCEV(NarrowUse);
if (SE->getTypeSizeInBits(NarrowExpr->getType())
>= SE->getTypeSizeInBits(WideType)) {
// NarrowUse implicitly widens its operand. e.g. a gep with a narrow
// index. So don't follow this use.
- return 0;
+ return nullptr;
}
const SCEV *WideExpr = IsSigned ?
@@ -893,7 +894,7 @@ const SCEVAddRecExpr *WidenIV::GetWideRecurrence(Instruction *NarrowUse) {
SE->getZeroExtendExpr(NarrowExpr, WideType);
const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(WideExpr);
if (!AddRec || AddRec->getLoop() != L)
- return 0;
+ return nullptr;
return AddRec;
}
@@ -931,7 +932,7 @@ Instruction *WidenIV::WidenIVUse(NarrowIVDefUse DU, SCEVExpander &Rewriter) {
DEBUG(dbgs() << "INDVARS: Widen lcssa phi " << *UsePhi
<< " to " << *WidePhi << "\n");
}
- return 0;
+ return nullptr;
}
}
// Our raison d'etre! Eliminate sign and zero extension.
@@ -969,7 +970,7 @@ Instruction *WidenIV::WidenIVUse(NarrowIVDefUse DU, SCEVExpander &Rewriter) {
// push the uses of WideDef here.
// No further widening is needed. The deceased [sz]ext had done it for us.
- return 0;
+ return nullptr;
}
// Does this user itself evaluate to a recurrence after widening?
@@ -982,7 +983,7 @@ Instruction *WidenIV::WidenIVUse(NarrowIVDefUse DU, SCEVExpander &Rewriter) {
// follow it. Instead insert a Trunc to kill off the original use,
// eventually isolating the original narrow IV so it can be removed.
truncateIVUse(DU, DT);
- return 0;
+ return nullptr;
}
// Assume block terminators cannot evaluate to a recurrence. We can't to
// insert a Trunc after a terminator if there happens to be a critical edge.
@@ -991,14 +992,14 @@ Instruction *WidenIV::WidenIVUse(NarrowIVDefUse DU, SCEVExpander &Rewriter) {
// Reuse the IV increment that SCEVExpander created as long as it dominates
// NarrowUse.
- Instruction *WideUse = 0;
+ Instruction *WideUse = nullptr;
if (WideAddRec == WideIncExpr
&& Rewriter.hoistIVInc(WideInc, DU.NarrowUse))
WideUse = WideInc;
else {
WideUse = CloneIVUser(DU);
if (!WideUse)
- return 0;
+ return nullptr;
}
// Evaluation of WideAddRec ensured that the narrow expression could be
// extended outside the loop without overflow. This suggests that the wide use
@@ -1009,7 +1010,7 @@ Instruction *WidenIV::WidenIVUse(NarrowIVDefUse DU, SCEVExpander &Rewriter) {
DEBUG(dbgs() << "Wide use expression mismatch: " << *WideUse
<< ": " << *SE->getSCEV(WideUse) << " != " << *WideAddRec << "\n");
DeadInsts.push_back(WideUse);
- return 0;
+ return nullptr;
}
// Returning WideUse pushes it on the worklist.
@@ -1044,7 +1045,7 @@ PHINode *WidenIV::CreateWideIV(SCEVExpander &Rewriter) {
// Is this phi an induction variable?
const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(SE->getSCEV(OrigPhi));
if (!AddRec)
- return NULL;
+ return nullptr;
// Widen the induction variable expression.
const SCEV *WideIVExpr = IsSigned ?
@@ -1057,7 +1058,7 @@ PHINode *WidenIV::CreateWideIV(SCEVExpander &Rewriter) {
// Can the IV be extended outside the loop without overflow?
AddRec = dyn_cast<SCEVAddRecExpr>(WideIVExpr);
if (!AddRec || AddRec->getLoop() != L)
- return NULL;
+ return nullptr;
// An AddRec must have loop-invariant operands. Since this AddRec is
// materialized by a loop header phi, the expression cannot have any post-loop
@@ -1283,7 +1284,7 @@ static bool canExpandBackedgeTakenCount(Loop *L, ScalarEvolution *SE) {
static PHINode *getLoopPhiForCounter(Value *IncV, Loop *L, DominatorTree *DT) {
Instruction *IncI = dyn_cast<Instruction>(IncV);
if (!IncI)
- return 0;
+ return nullptr;
switch (IncI->getOpcode()) {
case Instruction::Add:
@@ -1294,17 +1295,17 @@ static PHINode *getLoopPhiForCounter(Value *IncV, Loop *L, DominatorTree *DT) {
if (IncI->getNumOperands() == 2)
break;
default:
- return 0;
+ return nullptr;
}
PHINode *Phi = dyn_cast<PHINode>(IncI->getOperand(0));
if (Phi && Phi->getParent() == L->getHeader()) {
if (isLoopInvariant(IncI->getOperand(1), L, DT))
return Phi;
- return 0;
+ return nullptr;
}
if (IncI->getOpcode() == Instruction::GetElementPtr)
- return 0;
+ return nullptr;
// Allow add/sub to be commuted.
Phi = dyn_cast<PHINode>(IncI->getOperand(1));
@@ -1312,7 +1313,7 @@ static PHINode *getLoopPhiForCounter(Value *IncV, Loop *L, DominatorTree *DT) {
if (isLoopInvariant(IncI->getOperand(0), L, DT))
return Phi;
}
- return 0;
+ return nullptr;
}
/// Return the compare guarding the loop latch, or NULL for unrecognized tests.
@@ -1322,7 +1323,7 @@ static ICmpInst *getLoopTest(Loop *L) {
BasicBlock *LatchBlock = L->getLoopLatch();
// Don't bother with LFTR if the loop is not properly simplified.
if (!LatchBlock)
- return 0;
+ return nullptr;
BranchInst *BI = dyn_cast<BranchInst>(L->getExitingBlock()->getTerminator());
assert(BI && "expected exit branch");
@@ -1447,8 +1448,8 @@ FindLoopCounter(Loop *L, const SCEV *BECount,
cast<BranchInst>(L->getExitingBlock()->getTerminator())->getCondition();
// Loop over all of the PHI nodes, looking for a simple counter.
- PHINode *BestPhi = 0;
- const SCEV *BestInit = 0;
+ PHINode *BestPhi = nullptr;
+ const SCEV *BestInit = nullptr;
BasicBlock *LatchBlock = L->getLoopLatch();
assert(LatchBlock && "needsLFTR should guarantee a loop latch");
@@ -1572,7 +1573,7 @@ static Value *genLoopLimit(PHINode *IndVar, const SCEV *IVCount, Loop *L,
// IVInit integer and IVCount pointer would only occur if a canonical IV
// were generated on top of case #2, which is not expected.
- const SCEV *IVLimit = 0;
+ const SCEV *IVLimit = nullptr;
// For unit stride, IVCount = Start + BECount with 2's complement overflow.
// For non-zero Start, compute IVCount here.
if (AR->getStart()->isZero())
@@ -1814,7 +1815,7 @@ bool IndVarSimplify::runOnLoop(Loop *L, LPPassManager &LPM) {
SE = &getAnalysis<ScalarEvolution>();
DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- DL = DLP ? &DLP->getDataLayout() : 0;
+ DL = DLP ? &DLP->getDataLayout() : nullptr;
TLI = getAnalysisIfAvailable<TargetLibraryInfo>();
DeadInsts.clear();
diff --git a/lib/Transforms/Scalar/JumpThreading.cpp b/lib/Transforms/Scalar/JumpThreading.cpp
index 9465952a62..230a381593 100644
--- a/lib/Transforms/Scalar/JumpThreading.cpp
+++ b/lib/Transforms/Scalar/JumpThreading.cpp
@@ -154,7 +154,7 @@ bool JumpThreading::runOnFunction(Function &F) {
DEBUG(dbgs() << "Jump threading on function '" << F.getName() << "'\n");
DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- DL = DLP ? &DLP->getDataLayout() : 0;
+ DL = DLP ? &DLP->getDataLayout() : nullptr;
TLI = &getAnalysis<TargetLibraryInfo>();
LVI = &getAnalysis<LazyValueInfo>();
@@ -309,7 +309,7 @@ void JumpThreading::FindLoopHeaders(Function &F) {
/// Returns null if Val is null or not an appropriate constant.
static Constant *getKnownConstant(Value *Val, ConstantPreference Preference) {
if (!Val)
- return 0;
+ return nullptr;
// Undef is "known" enough.
if (UndefValue *U = dyn_cast<UndefValue>(Val))
@@ -353,7 +353,7 @@ ComputeValueKnownInPredecessors(Value *V, BasicBlock *BB, PredValueInfo &Result,
// If V is a non-instruction value, or an instruction in a different block,
// then it can't be derived from a PHI.
Instruction *I = dyn_cast<Instruction>(V);
- if (I == 0 || I->getParent() != BB) {
+ if (!I || I->getParent() != BB) {
// Okay, if this is a live-in value, see if it has a known value at the end
// of any of our predecessors.
@@ -496,7 +496,7 @@ ComputeValueKnownInPredecessors(Value *V, BasicBlock *BB, PredValueInfo &Result,
Value *RHS = Cmp->getOperand(1)->DoPHITranslation(BB, PredBB);
Value *Res = SimplifyCmpInst(Cmp->getPredicate(), LHS, RHS, DL);
- if (Res == 0) {
+ if (!Res) {
if (!isa<Constant>(RHS))
continue;
@@ -582,7 +582,7 @@ ComputeValueKnownInPredecessors(Value *V, BasicBlock *BB, PredValueInfo &Result,
// Either operand will do, so be sure to pick the one that's a known
// constant.
// FIXME: Do this more cleverly if both values are known constants?
- KnownCond = (TrueVal != 0);
+ KnownCond = (TrueVal != nullptr);
}
// See if the select has a known constant value for this predecessor.
@@ -738,7 +738,7 @@ bool JumpThreading::ProcessBlock(BasicBlock *BB) {
Instruction *CondInst = dyn_cast<Instruction>(Condition);
// All the rest of our checks depend on the condition being an instruction.
- if (CondInst == 0) {
+ if (!CondInst) {
// FIXME: Unify this with code below.
if (ProcessThreadableEdges(Condition, BB, Preference))
return true;
@@ -891,7 +891,7 @@ bool JumpThreading::SimplifyPartiallyRedundantLoad(LoadInst *LI) {
SmallPtrSet<BasicBlock*, 8> PredsScanned;
typedef SmallVector<std::pair<BasicBlock*, Value*>, 8> AvailablePredsTy;
AvailablePredsTy AvailablePreds;
- BasicBlock *OneUnavailablePred = 0;
+ BasicBlock *OneUnavailablePred = nullptr;
// If we got here, the loaded value is transparent through to the start of the
// block. Check to see if it is available in any of the predecessor blocks.
@@ -905,16 +905,16 @@ bool JumpThreading::SimplifyPartiallyRedundantLoad(LoadInst *LI) {
// Scan the predecessor to see if the value is available in the pred.
BBIt = PredBB->end();
- MDNode *ThisTBAATag = 0;
+ MDNode *ThisTBAATag = nullptr;
Value *PredAvailable = FindAvailableLoadedValue(LoadedPtr, PredBB, BBIt, 6,
- 0, &ThisTBAATag);
+ nullptr, &ThisTBAATag);
if (!PredAvailable) {
OneUnavailablePred = PredBB;
continue;
}
// If tbaa tags disagree or are not present, forget about them.
- if (TBAATag != ThisTBAATag) TBAATag = 0;
+ if (TBAATag != ThisTBAATag) TBAATag = nullptr;
// If so, this load is partially redundant. Remember this info so that we
// can create a PHI node.
@@ -930,7 +930,7 @@ bool JumpThreading::SimplifyPartiallyRedundantLoad(LoadInst *LI) {
// predecessor, we want to insert a merge block for those common predecessors.
// This ensures that we only have to insert one reload, thus not increasing
// code size.
- BasicBlock *UnavailablePred = 0;
+ BasicBlock *UnavailablePred = nullptr;
// If there is exactly one predecessor where the value is unavailable, the
// already computed 'OneUnavailablePred' block is it. If it ends in an
@@ -997,7 +997,7 @@ bool JumpThreading::SimplifyPartiallyRedundantLoad(LoadInst *LI) {
BasicBlock *P = *PI;
AvailablePredsTy::iterator I =
std::lower_bound(AvailablePreds.begin(), AvailablePreds.end(),
- std::make_pair(P, (Value*)0));
+ std::make_pair(P, (Value*)nullptr));
assert(I != AvailablePreds.end() && I->first == P &&
"Didn't find entry for predecessor!");
@@ -1104,7 +1104,7 @@ bool JumpThreading::ProcessThreadableEdges(Value *Cond, BasicBlock *BB,
SmallPtrSet<BasicBlock*, 16> SeenPreds;
SmallVector<std::pair<BasicBlock*, BasicBlock*>, 16> PredToDestList;
- BasicBlock *OnlyDest = 0;
+ BasicBlock *OnlyDest = nullptr;
BasicBlock *MultipleDestSentinel = (BasicBlock*)(intptr_t)~0ULL;
for (unsigned i = 0, e = PredValues.size(); i != e; ++i) {
@@ -1121,7 +1121,7 @@ bool JumpThreading::ProcessThreadableEdges(Value *Cond, BasicBlock *BB,
BasicBlock *DestBB;
if (isa<UndefValue>(Val))
- DestBB = 0;
+ DestBB = nullptr;
else if (BranchInst *BI = dyn_cast<BranchInst>(BB->getTerminator()))
DestBB = BI->getSuccessor(cast<ConstantInt>(Val)->isZero());
else if (SwitchInst *SI = dyn_cast<SwitchInst>(BB->getTerminator())) {
@@ -1172,7 +1172,7 @@ bool JumpThreading::ProcessThreadableEdges(Value *Cond, BasicBlock *BB,
// If the threadable edges are branching on an undefined value, we get to pick
// the destination that these predecessors should get to.
- if (MostPopularDest == 0)
+ if (!MostPopularDest)
MostPopularDest = BB->getTerminator()->
getSuccessor(GetBestDestForJumpOnUndef(BB));
@@ -1274,7 +1274,7 @@ bool JumpThreading::ProcessBranchOnXOR(BinaryOperator *BO) {
}
// Determine which value to split on, true, false, or undef if neither.
- ConstantInt *SplitVal = 0;
+ ConstantInt *SplitVal = nullptr;
if (NumTrue > NumFalse)
SplitVal = ConstantInt::getTrue(BB->getContext());
else if (NumTrue != 0 || NumFalse != 0)
@@ -1295,7 +1295,7 @@ bool JumpThreading::ProcessBranchOnXOR(BinaryOperator *BO) {
// help us. However, we can just replace the LHS or RHS with the constant.
if (BlocksToFoldInto.size() ==
cast<PHINode>(BB->front()).getNumIncomingValues()) {
- if (SplitVal == 0) {
+ if (!SplitVal) {
// If all preds provide undef, just nuke the xor, because it is undef too.
BO->replaceAllUsesWith(UndefValue::get(BO->getType()));
BO->eraseFromParent();
@@ -1532,7 +1532,7 @@ bool JumpThreading::DuplicateCondBranchOnPHIIntoPred(BasicBlock *BB,
// can just clone the bits from BB into the end of the new PredBB.
BranchInst *OldPredBranch = dyn_cast<BranchInst>(PredBB->getTerminator());
- if (OldPredBranch == 0 || !OldPredBranch->isUnconditional()) {
+ if (!OldPredBranch || !OldPredBranch->isUnconditional()) {
PredBB = SplitEdge(PredBB, BB, this);
OldPredBranch = cast<BranchInst>(PredBB->getTerminator());
}
diff --git a/lib/Transforms/Scalar/LICM.cpp b/lib/Transforms/Scalar/LICM.cpp
index 6e29e3545f..4b7303c4ef 100644
--- a/lib/Transforms/Scalar/LICM.cpp
+++ b/lib/Transforms/Scalar/LICM.cpp
@@ -224,7 +224,7 @@ bool LICM::runOnLoop(Loop *L, LPPassManager &LPM) {
DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- DL = DLP ? &DLP->getDataLayout() : 0;
+ DL = DLP ? &DLP->getDataLayout() : nullptr;
TLI = &getAnalysis<TargetLibraryInfo>();
assert(L->isLCSSAForm(*DT) && "Loop is not in LCSSA form.");
@@ -316,8 +316,8 @@ bool LICM::runOnLoop(Loop *L, LPPassManager &LPM) {
"Parent loop not left in LCSSA form after LICM!");
// Clear out loops state information for the next iteration
- CurLoop = 0;
- Preheader = 0;
+ CurLoop = nullptr;
+ Preheader = nullptr;
// If this loop is nested inside of another one, save the alias information
// for when we process the outer loop.
@@ -775,7 +775,7 @@ void LICM::PromoteAliasSet(AliasSet &AS,
// We start with an alignment of one and try to find instructions that allow
// us to prove better alignment.
unsigned Alignment = 1;
- MDNode *TBAATag = 0;
+ MDNode *TBAATag = nullptr;
// Check that all of the pointers in the alias set have the same type. We
// cannot (yet) promote a memory location that is loaded and stored in
diff --git a/lib/Transforms/Scalar/LoopIdiomRecognize.cpp b/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
index 47ae9917f9..eaa73572e2 100644
--- a/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
+++ b/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
@@ -139,7 +139,7 @@ namespace {
static char ID;
explicit LoopIdiomRecognize() : LoopPass(ID) {
initializeLoopIdiomRecognizePass(*PassRegistry::getPassRegistry());
- DL = 0; DT = 0; SE = 0; TLI = 0; TTI = 0;
+ DL = nullptr; DT = nullptr; SE = nullptr; TLI = nullptr; TTI = nullptr;
}
bool runOnLoop(Loop *L, LPPassManager &LPM) override;
@@ -183,7 +183,7 @@ namespace {
if (DL)
return DL;
DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- DL = DLP ? &DLP->getDataLayout() : 0;
+ DL = DLP ? &DLP->getDataLayout() : nullptr;
return DL;
}
@@ -248,7 +248,7 @@ static void deleteDeadInstruction(Instruction *I, ScalarEvolution &SE,
for (unsigned op = 0, e = DeadInst->getNumOperands(); op != e; ++op) {
Value *Op = DeadInst->getOperand(op);
- DeadInst->setOperand(op, 0);
+ DeadInst->setOperand(op, nullptr);
// If this operand just became dead, add it to the NowDeadInsts list.
if (!Op->use_empty()) continue;
@@ -293,9 +293,9 @@ bool LIRUtil::isAlmostEmpty(BasicBlock *BB) {
BasicBlock *LIRUtil::getPrecondBb(BasicBlock *PreHead) {
if (BasicBlock *BB = PreHead->getSinglePredecessor()) {
BranchInst *Br = getBranch(BB);
- return Br && Br->isConditional() ? BB : 0;
+ return Br && Br->isConditional() ? BB : nullptr;
}
- return 0;
+ return nullptr;
}
//===----------------------------------------------------------------------===//
@@ -305,7 +305,7 @@ BasicBlock *LIRUtil::getPrecondBb(BasicBlock *PreHead) {
//===----------------------------------------------------------------------===//
NclPopcountRecognize::NclPopcountRecognize(LoopIdiomRecognize &TheLIR):
- LIR(TheLIR), CurLoop(TheLIR.getLoop()), PreCondBB(0) {
+ LIR(TheLIR), CurLoop(TheLIR.getLoop()), PreCondBB(nullptr) {
}
bool NclPopcountRecognize::preliminaryScreen() {
@@ -345,22 +345,22 @@ bool NclPopcountRecognize::preliminaryScreen() {
Value *NclPopcountRecognize::matchCondition (BranchInst *Br,
BasicBlock *LoopEntry) const {
if (!Br || !Br->isConditional())
- return 0;
+ return nullptr;
ICmpInst *Cond = dyn_cast<ICmpInst>(Br->getCondition());
if (!Cond)
- return 0;
+ return nullptr;
ConstantInt *CmpZero = dyn_cast<ConstantInt>(Cond->getOperand(1));
if (!CmpZero || !CmpZero->isZero())
- return 0;
+ return nullptr;
ICmpInst::Predicate Pred = Cond->getPredicate();
if ((Pred == ICmpInst::ICMP_NE && Br->getSuccessor(0) == LoopEntry) ||
(Pred == ICmpInst::ICMP_EQ && Br->getSuccessor(1) == LoopEntry))
return Cond->getOperand(0);
- return 0;
+ return nullptr;
}
bool NclPopcountRecognize::detectIdiom(Instruction *&CntInst,
@@ -391,9 +391,9 @@ bool NclPopcountRecognize::detectIdiom(Instruction *&CntInst,
Value *VarX1, *VarX0;
PHINode *PhiX, *CountPhi;
- DefX2 = CountInst = 0;
- VarX1 = VarX0 = 0;
- PhiX = CountPhi = 0;
+ DefX2 = CountInst = nullptr;
+ VarX1 = VarX0 = nullptr;
+ PhiX = CountPhi = nullptr;
LoopEntry = *(CurLoop->block_begin());
// step 1: Check if the loop-back branch is in desirable form.
@@ -440,7 +440,7 @@ bool NclPopcountRecognize::detectIdiom(Instruction *&CntInst,
// step 4: Find the instruction which count the population: cnt2 = cnt1 + 1
{
- CountInst = NULL;
+ CountInst = nullptr;
for (BasicBlock::iterator Iter = LoopEntry->getFirstNonPHI(),
IterE = LoopEntry->end(); Iter != IterE; Iter++) {
Instruction *Inst = Iter;
@@ -745,7 +745,7 @@ bool LoopIdiomRecognize::runOnLoopBlock(BasicBlock *BB, const SCEV *BECount,
// If processing the store invalidated our iterator, start over from the
// top of the block.
- if (InstPtr == 0)
+ if (!InstPtr)
I = BB->begin();
continue;
}
@@ -758,7 +758,7 @@ bool LoopIdiomRecognize::runOnLoopBlock(BasicBlock *BB, const SCEV *BECount,
// If processing the memset invalidated our iterator, start over from the
// top of the block.
- if (InstPtr == 0)
+ if (!InstPtr)
I = BB->begin();
continue;
}
@@ -785,7 +785,7 @@ bool LoopIdiomRecognize::processLoopStore(StoreInst *SI, const SCEV *BECount) {
// random store we can't handle.
const SCEVAddRecExpr *StoreEv =
dyn_cast<SCEVAddRecExpr>(SE->getSCEV(StorePtr));
- if (StoreEv == 0 || StoreEv->getLoop() != CurLoop || !StoreEv->isAffine())
+ if (!StoreEv || StoreEv->getLoop() != CurLoop || !StoreEv->isAffine())
return false;
// Check to see if the stride matches the size of the store. If so, then we
@@ -793,7 +793,7 @@ bool LoopIdiomRecognize::processLoopStore(StoreInst *SI, const SCEV *BECount) {
unsigned StoreSize = (unsigned)SizeInBits >> 3;
const SCEVConstant *Stride = dyn_cast<SCEVConstant>(StoreEv->getOperand(1));
- if (Stride == 0 || StoreSize != Stride->getValue()->getValue()) {
+ if (!Stride || StoreSize != Stride->getValue()->getValue()) {
// TODO: Could also handle negative stride here someday, that will require
// the validity check in mayLoopAccessLocation to be updated though.
// Enable this to print exact negative strides.
@@ -842,7 +842,7 @@ processLoopMemSet(MemSetInst *MSI, const SCEV *BECount) {
// loop, which indicates a strided store. If we have something else, it's a
// random store we can't handle.
const SCEVAddRecExpr *Ev = dyn_cast<SCEVAddRecExpr>(SE->getSCEV(Pointer));
- if (Ev == 0 || Ev->getLoop() != CurLoop || !Ev->isAffine())
+ if (!Ev || Ev->getLoop() != CurLoop || !Ev->isAffine())
return false;
// Reject memsets that are so large that they overflow an unsigned.
@@ -856,7 +856,7 @@ processLoopMemSet(MemSetInst *MSI, const SCEV *BECount) {
// TODO: Could also handle negative stride here someday, that will require the
// validity check in mayLoopAccessLocation to be updated though.
- if (Stride == 0 || MSI->getLength() != Stride->getValue())
+ if (!Stride || MSI->getLength() != Stride->getValue())
return false;
return processLoopStridedStore(Pointer, (unsigned)SizeInBytes,
@@ -909,23 +909,23 @@ static Constant *getMemSetPatternValue(Value *V, const DataLayout &DL) {
// array. We could theoretically do a store to an alloca or something, but
// that doesn't seem worthwhile.
Constant *C = dyn_cast<Constant>(V);
- if (C == 0) return 0;
+ if (!C) return nullptr;
// Only handle simple values that are a power of two bytes in size.
uint64_t Size = DL.getTypeSizeInBits(V->getType());
if (Size == 0 || (Size & 7) || (Size & (Size-1)))
- return 0;
+ return nullptr;
// Don't care enough about darwin/ppc to implement this.
if (DL.isBigEndian())
- return 0;
+ return nullptr;
// Convert to size in bytes.
Size /= 8;
// TODO: If CI is larger than 16-bytes, we can try slicing it in half to see
// if the top and bottom are the same (e.g. for vectors and large integers).
- if (Size > 16) return 0;
+ if (Size > 16) return nullptr;
// If the constant is exactly 16 bytes, just use it.
if (Size == 16) return C;
@@ -950,7 +950,7 @@ processLoopStridedStore(Value *DestPtr, unsigned StoreSize,
// are stored. A store of i32 0x01020304 can never be turned into a memset,
// but it can be turned into memset_pattern if the target supports it.
Value *SplatValue = isBytewiseValue(StoredVal);
- Constant *PatternValue = 0;
+ Constant *PatternValue = nullptr;
unsigned DestAS = DestPtr->getType()->getPointerAddressSpace();
@@ -961,13 +961,13 @@ processLoopStridedStore(Value *DestPtr, unsigned StoreSize,
// promote the memset.
CurLoop->isLoopInvariant(SplatValue)) {
// Keep and use SplatValue.
- PatternValue = 0;
+ PatternValue = nullptr;
} else if (DestAS == 0 &&
TLI->has(LibFunc::memset_pattern16) &&
(PatternValue = getMemSetPatternValue(StoredVal, *DL))) {
// Don't create memset_pattern16s with address spaces.
// It looks like we can use PatternValue!
- SplatValue = 0;
+ SplatValue = nullptr;
} else {
// Otherwise, this isn't an idiom we can transform. For example, we can't
// do anything with a 3-byte store.
@@ -1034,7 +1034,7 @@ processLoopStridedStore(Value *DestPtr, unsigned StoreSize,
Int8PtrTy,
Int8PtrTy,
IntPtr,
- (void*)0);
+ (void*)nullptr);
// Otherwise we should form a memset_pattern16. PatternValue is known to be
// an constant array of 16-bytes. Plop the value into a mergable global.
diff --git a/lib/Transforms/Scalar/LoopInstSimplify.cpp b/lib/Transforms/Scalar/LoopInstSimplify.cpp
index 39eddd9be0..a61923cabf 100644
--- a/lib/Transforms/Scalar/LoopInstSimplify.cpp
+++ b/lib/Transforms/Scalar/LoopInstSimplify.cpp
@@ -71,10 +71,10 @@ bool LoopInstSimplify::runOnLoop(Loop *L, LPPassManager &LPM) {
DominatorTreeWrapperPass *DTWP =
getAnalysisIfAvailable<DominatorTreeWrapperPass>();
- DominatorTree *DT = DTWP ? &DTWP->getDomTree() : 0;
+ DominatorTree *DT = DTWP ? &DTWP->getDomTree() : nullptr;
LoopInfo *LI = &getAnalysis<LoopInfo>();
DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- const DataLayout *DL = DLP ? &DLP->getDataLayout() : 0;
+ const DataLayout *DL = DLP ? &DLP->getDataLayout() : nullptr;
const TargetLibraryInfo *TLI = &getAnalysis<TargetLibraryInfo>();
SmallVector<BasicBlock*, 8> ExitBlocks;
diff --git a/lib/Transforms/Scalar/LoopRerollPass.cpp b/lib/Transforms/Scalar/LoopRerollPass.cpp
index bda755e495..8b5e036dbe 100644
--- a/lib/Transforms/Scalar/LoopRerollPass.cpp
+++ b/lib/Transforms/Scalar/LoopRerollPass.cpp
@@ -1134,7 +1134,7 @@ bool LoopReroll::runOnLoop(Loop *L, LPPassManager &LPM) {
SE = &getAnalysis<ScalarEvolution>();
TLI = &getAnalysis<TargetLibraryInfo>();
DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- DL = DLP ? &DLP->getDataLayout() : 0;
+ DL = DLP ? &DLP->getDataLayout() : nullptr;
DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
BasicBlock *Header = L->getHeader();
diff --git a/lib/Transforms/Scalar/LoopRotation.cpp b/lib/Transforms/Scalar/LoopRotation.cpp
index 923d851138..5c747e1ac5 100644
--- a/lib/Transforms/Scalar/LoopRotation.cpp
+++ b/lib/Transforms/Scalar/LoopRotation.cpp
@@ -291,7 +291,7 @@ bool LoopRotate::rotateLoop(Loop *L, bool SimplifiedLatch) {
BasicBlock *OrigLatch = L->getLoopLatch();
BranchInst *BI = dyn_cast<BranchInst>(OrigHeader->getTerminator());
- if (BI == 0 || BI->isUnconditional())
+ if (!BI || BI->isUnconditional())
return false;
// If the loop header is not one of the loop exiting blocks then
@@ -302,7 +302,7 @@ bool LoopRotate::rotateLoop(Loop *L, bool SimplifiedLatch) {
// If the loop latch already contains a branch that leaves the loop then the
// loop is already rotated.
- if (OrigLatch == 0)
+ if (!OrigLatch)
return false;
// Rotate if either the loop latch does *not* exit the loop, or if the loop
@@ -329,7 +329,7 @@ bool LoopRotate::rotateLoop(Loop *L, bool SimplifiedLatch) {
// If the loop could not be converted to canonical form, it must have an
// indirectbr in it, just give up.
- if (OrigPreheader == 0)
+ if (!OrigPreheader)
return false;
// Anything ScalarEvolution may know about this loop or the PHI nodes
diff --git a/lib/Transforms/Scalar/LoopStrengthReduce.cpp b/lib/Transforms/Scalar/LoopStrengthReduce.cpp
index 15a4e8e3c6..16a001ad93 100644
--- a/lib/Transforms/Scalar/LoopStrengthReduce.cpp
+++ b/lib/Transforms/Scalar/LoopStrengthReduce.cpp
@@ -251,8 +251,8 @@ struct Formula {
int64_t UnfoldedOffset;
Formula()
- : BaseGV(0), BaseOffset(0), HasBaseReg(false), Scale(0), ScaledReg(0),
- UnfoldedOffset(0) {}
+ : BaseGV(nullptr), BaseOffset(0), HasBaseReg(false), Scale(0),
+ ScaledReg(nullptr), UnfoldedOffset(0) {}
void InitialMatch(const SCEV *S, Loop *L, ScalarEvolution &SE);
@@ -361,7 +361,7 @@ Type *Formula::getType() const {
return !BaseRegs.empty() ? BaseRegs.front()->getType() :
ScaledReg ? ScaledReg->getType() :
BaseGV ? BaseGV->getType() :
- 0;
+ nullptr;
}
/// DeleteBaseReg - Delete the given base reg from the BaseRegs list.
@@ -488,11 +488,11 @@ static const SCEV *getExactSDiv(const SCEV *LHS, const SCEV *RHS,
// Check for a division of a constant by a constant.
if (const SCEVConstant *C = dyn_cast<SCEVConstant>(LHS)) {
if (!RC)
- return 0;
+ return nullptr;
const APInt &LA = C->getValue()->getValue();
const APInt &RA = RC->getValue()->getValue();
if (LA.srem(RA) != 0)
- return 0;
+ return nullptr;
return SE.getConstant(LA.sdiv(RA));
}
@@ -501,16 +501,16 @@ static const SCEV *getExactSDiv(const SCEV *LHS, const SCEV *RHS,
if (IgnoreSignificantBits || isAddRecSExtable(AR, SE)) {
const SCEV *Step = getExactSDiv(AR->getStepRecurrence(SE), RHS, SE,
IgnoreSignificantBits);
- if (!Step) return 0;
+ if (!Step) return nullptr;
const SCEV *Start = getExactSDiv(AR->getStart(), RHS, SE,
IgnoreSignificantBits);
- if (!Start) return 0;
+ if (!Start) return nullptr;
// FlagNW is independent of the start value, step direction, and is
// preserved with smaller magnitude steps.
// FIXME: AR->getNoWrapFlags(SCEV::FlagNW)
return SE.getAddRecExpr(Start, Step, AR->getLoop(), SCEV::FlagAnyWrap);
}
- return 0;
+ return nullptr;
}
// Distribute the sdiv over add operands, if the add doesn't overflow.
@@ -521,12 +521,12 @@ static const SCEV *getExactSDiv(const SCEV *LHS, const SCEV *RHS,
I != E; ++I) {
const SCEV *Op = getExactSDiv(*I, RHS, SE,
IgnoreSignificantBits);
- if (!Op) return 0;
+ if (!Op) return nullptr;
Ops.push_back(Op);
}
return SE.getAddExpr(Ops);
}
- return 0;
+ return nullptr;
}
// Check for a multiply operand that we can pull RHS out of.
@@ -545,13 +545,13 @@ static const SCEV *getExactSDiv(const SCEV *LHS, const SCEV *RHS,
}
Ops.push_back(S);
}
- return Found ? SE.getMulExpr(Ops) : 0;
+ return Found ? SE.getMulExpr(Ops) : nullptr;
}
- return 0;
+ return nullptr;
}
// Otherwise we don't know.
- return 0;
+ return nullptr;
}
/// ExtractImmediate - If S involves the addition of a constant integer value,
@@ -605,7 +605,7 @@ static GlobalValue *ExtractSymbol(const SCEV *&S, ScalarEvolution &SE) {
SCEV::FlagAnyWrap);
return Result;
}
- return 0;
+ return nullptr;
}
/// isAddressUse - Returns true if the specified instruction is using the
@@ -756,12 +756,12 @@ DeleteTriviallyDeadInstructions(SmallVectorImpl<WeakVH> &DeadInsts) {
Value *V = DeadInsts.pop_back_val();
Instruction *I = dyn_cast_or_null<Instruction>(V);
- if (I == 0 || !isInstructionTriviallyDead(I))
+ if (!I || !isInstructionTriviallyDead(I))
continue;
for (User::op_iterator OI = I->op_begin(), E = I->op_end(); OI != E; ++OI)
if (Instruction *U = dyn_cast<Instruction>(*OI)) {
- *OI = 0;
+ *OI = nullptr;
if (U->use_empty())
DeadInsts.push_back(U);
}
@@ -829,7 +829,7 @@ public:
const SmallVectorImpl<int64_t> &Offsets,
ScalarEvolution &SE, DominatorTree &DT,
const LSRUse &LU,
- SmallPtrSet<const SCEV *, 16> *LoserRegs = 0);
+ SmallPtrSet<const SCEV *, 16> *LoserRegs = nullptr);
void print(raw_ostream &OS) const;
void dump() const;
@@ -1048,7 +1048,8 @@ struct LSRFixup {
}
LSRFixup::LSRFixup()
- : UserInst(0), OperandValToReplace(0), LUIdx(~size_t(0)), Offset(0) {}
+ : UserInst(nullptr), OperandValToReplace(nullptr), LUIdx(~size_t(0)),
+ Offset(0) {}
/// isUseFullyOutsideLoop - Test whether this fixup always uses its
/// value outside of the given loop.
@@ -1184,7 +1185,7 @@ public:
MaxOffset(INT64_MIN),
AllFixupsOutsideLoop(true),
RigidFormula(false),
- WidestFixupType(0) {}
+ WidestFixupType(nullptr) {}
bool HasFormulaWithSameRegs(const Formula &F) const;
bool InsertFormula(const Formula &F);
@@ -1516,7 +1517,7 @@ struct IVChain {
SmallVector<IVInc,1> Incs;
const SCEV *ExprBase;
- IVChain() : ExprBase(0) {}
+ IVChain() : ExprBase(nullptr) {}
IVChain(const IVInc &Head, const SCEV *Base)
: Incs(1, Head), ExprBase(Base) {}
@@ -1722,7 +1723,7 @@ void LSRInstance::OptimizeShadowIV() {
IVUsers::const_iterator CandidateUI = UI;
++UI;
Instruction *ShadowUse = CandidateUI->getUser();
- Type *DestTy = 0;
+ Type *DestTy = nullptr;
bool IsSigned = false;
/* If shadow use is a int->float cast then insert a second IV
@@ -1784,7 +1785,7 @@ void LSRInstance::OptimizeShadowIV() {
continue;
/* Initialize new IV, double d = 0.0 in above example. */
- ConstantInt *C = 0;
+ ConstantInt *C = nullptr;
if (Incr->getOperand(0) == PH)
C = dyn_cast<ConstantInt>(Incr->getOperand(1));
else if (Incr->getOperand(1) == PH)
@@ -1906,7 +1907,7 @@ ICmpInst *LSRInstance::OptimizeMax(ICmpInst *Cond, IVStrideUse* &CondUse) {
// for ICMP_ULE here because the comparison would be with zero, which
// isn't interesting.
CmpInst::Predicate Pred = ICmpInst::BAD_ICMP_PREDICATE;
- const SCEVNAryExpr *Max = 0;
+ const SCEVNAryExpr *Max = nullptr;
if (const SCEVSMaxExpr *S = dyn_cast<SCEVSMaxExpr>(BackedgeTakenCount)) {
Pred = ICmpInst::ICMP_SLE;
Max = S;
@@ -1949,7 +1950,7 @@ ICmpInst *LSRInstance::OptimizeMax(ICmpInst *Cond, IVStrideUse* &CondUse) {
// Check the right operand of the select, and remember it, as it will
// be used in the new comparison instruction.
- Value *NewRHS = 0;
+ Value *NewRHS = nullptr;
if (ICmpInst::isTrueWhenEqual(Pred)) {
// Look for n+1, and grab n.
if (AddOperator *BO = dyn_cast<AddOperator>(Sel->getOperand(1)))
@@ -2019,7 +2020,7 @@ LSRInstance::OptimizeLoopTermCond() {
continue;
// Search IVUsesByStride to find Cond's IVUse if there is one.
- IVStrideUse *CondUse = 0;
+ IVStrideUse *CondUse = nullptr;
ICmpInst *Cond = cast<ICmpInst>(TermBr->getCondition());
if (!FindIVUserForCond(Cond, CondUse))
continue;
@@ -2072,12 +2073,12 @@ LSRInstance::OptimizeLoopTermCond() {
// Check for possible scaled-address reuse.
Type *AccessTy = getAccessType(UI->getUser());
int64_t Scale = C->getSExtValue();
- if (TTI.isLegalAddressingMode(AccessTy, /*BaseGV=*/ 0,
+ if (TTI.isLegalAddressingMode(AccessTy, /*BaseGV=*/ nullptr,
/*BaseOffset=*/ 0,
/*HasBaseReg=*/ false, Scale))
goto decline_post_inc;
Scale = -Scale;
- if (TTI.isLegalAddressingMode(AccessTy, /*BaseGV=*/ 0,
+ if (TTI.isLegalAddressingMode(AccessTy, /*BaseGV=*/ nullptr,
/*BaseOffset=*/ 0,
/*HasBaseReg=*/ false, Scale))
goto decline_post_inc;
@@ -2149,12 +2150,12 @@ LSRInstance::reconcileNewOffset(LSRUse &LU, int64_t NewOffset, bool HasBaseReg,
return false;
// Conservatively assume HasBaseReg is true for now.
if (NewOffset < LU.MinOffset) {
- if (!isAlwaysFoldable(TTI, Kind, AccessTy, /*BaseGV=*/ 0,
+ if (!isAlwaysFoldable(TTI, Kind, AccessTy, /*BaseGV=*/ nullptr,
LU.MaxOffset - NewOffset, HasBaseReg))
return false;
NewMinOffset = NewOffset;
} else if (NewOffset > LU.MaxOffset) {
- if (!isAlwaysFoldable(TTI, Kind, AccessTy, /*BaseGV=*/ 0,
+ if (!isAlwaysFoldable(TTI, Kind, AccessTy, /*BaseGV=*/ nullptr,
NewOffset - LU.MinOffset, HasBaseReg))
return false;
NewMaxOffset = NewOffset;
@@ -2184,7 +2185,7 @@ LSRInstance::getUse(const SCEV *&Expr,
int64_t Offset = ExtractImmediate(Expr, SE);
// Basic uses can't accept any offset, for example.
- if (!isAlwaysFoldable(TTI, Kind, AccessTy, /*BaseGV=*/ 0,
+ if (!isAlwaysFoldable(TTI, Kind, AccessTy, /*BaseGV=*/ nullptr,
Offset, /*HasBaseReg=*/ true)) {
Expr = Copy;
Offset = 0;
@@ -2268,7 +2269,7 @@ LSRInstance::FindUseWithSimilarFormula(const Formula &OrigF,
}
// Nothing looked good.
- return 0;
+ return nullptr;
}
void LSRInstance::CollectInterestingTypesAndFactors() {
@@ -2386,7 +2387,7 @@ static const SCEV *getExprBase(const SCEV *S) {
default: // uncluding scUnknown.
return S;
case scConstant:
- return 0;
+ return nullptr;
case scTruncate:
return getExprBase(cast<SCEVTruncateExpr>(S)->getOperand());
case scZeroExtend:
@@ -2477,7 +2478,7 @@ isProfitableChain(IVChain &Chain, SmallPtrSet<Instruction*, 4> &Users,
&& SE.getSCEV(Chain.tailUserInst()) == Chain.Incs[0].IncExpr) {
--cost;
}
- const SCEV *LastIncExpr = 0;
+ const SCEV *LastIncExpr = nullptr;
unsigned NumConstIncrements = 0;
unsigned NumVarIncrements = 0;
unsigned NumReusedIncrements = 0;
@@ -2536,7 +2537,7 @@ void LSRInstance::ChainInstruction(Instruction *UserInst, Instruction *IVOper,
// Visit all existing chains. Check if its IVOper can be computed as a
// profitable loop invariant increment from the last link in the Chain.
unsigned ChainIdx = 0, NChains = IVChainVec.size();
- const SCEV *LastIncExpr = 0;
+ const SCEV *LastIncExpr = nullptr;
for (; ChainIdx < NChains; ++ChainIdx) {
IVChain &Chain = IVChainVec[ChainIdx];
@@ -2756,7 +2757,7 @@ static bool canFoldIVIncExpr(const SCEV *IncExpr, Instruction *UserInst,
int64_t IncOffset = IncConst->getValue()->getSExtValue();
if (!isAlwaysFoldable(TTI, LSRUse::Address,
- getAccessType(UserInst), /*BaseGV=*/ 0,
+ getAccessType(UserInst), /*BaseGV=*/ nullptr,
IncOffset, /*HaseBaseReg=*/ false))
return false;
@@ -2774,7 +2775,7 @@ void LSRInstance::GenerateIVChain(const IVChain &Chain, SCEVExpander &Rewriter,
// findIVOperand returns IVOpEnd if it can no longer find a valid IV user.
User::op_iterator IVOpIter = findIVOperand(Head.UserInst->op_begin(),
IVOpEnd, L, SE);
- Value *IVSrc = 0;
+ Value *IVSrc = nullptr;
while (IVOpIter != IVOpEnd) {
IVSrc = getWideOperand(*IVOpIter);
@@ -2801,7 +2802,7 @@ void LSRInstance::GenerateIVChain(const IVChain &Chain, SCEVExpander &Rewriter,
DEBUG(dbgs() << "Generate chain at: " << *IVSrc << "\n");
Type *IVTy = IVSrc->getType();
Type *IntTy = SE.getEffectiveSCEVType(IVTy);
- const SCEV *LeftOverExpr = 0;
+ const SCEV *LeftOverExpr = nullptr;
for (IVChain::const_iterator IncI = Chain.begin(),
IncE = Chain.end(); IncI != IncE; ++IncI) {
@@ -2832,7 +2833,7 @@ void LSRInstance::GenerateIVChain(const IVChain &Chain, SCEVExpander &Rewriter,
TTI)) {
assert(IVTy == IVOper->getType() && "inconsistent IV increment type");
IVSrc = IVOper;
- LeftOverExpr = 0;
+ LeftOverExpr = nullptr;
}
}
Type *OperTy = IncI->IVOperand->getType();
@@ -2887,7 +2888,7 @@ void LSRInstance::CollectFixupsAndInitialFormulae() {
LF.PostIncLoops = UI->getPostIncLoops();
LSRUse::KindType Kind = LSRUse::Basic;
- Type *AccessTy = 0;
+ Type *AccessTy = nullptr;
if (isAddressUse(LF.UserInst, LF.OperandValToReplace)) {
Kind = LSRUse::Address;
AccessTy = getAccessType(LF.UserInst);
@@ -2918,7 +2919,7 @@ void LSRInstance::CollectFixupsAndInitialFormulae() {
if (SE.isLoopInvariant(N, L) && isSafeToExpand(N, SE)) {
// S is normalized, so normalize N before folding it into S
// to keep the result normalized.
- N = TransformForPostIncUse(Normalize, N, CI, 0,
+ N = TransformForPostIncUse(Normalize, N, CI, nullptr,
LF.PostIncLoops, SE, DT);
Kind = LSRUse::ICmpZero;
S = SE.getMinusSCEV(N, S);
@@ -3069,7 +3070,7 @@ LSRInstance::CollectLoopInvariantFixupsAndFormulae() {
LSRFixup &LF = getNewFixup();
LF.UserInst = const_cast<Instruction *>(UserInst);
LF.OperandValToReplace = U;
- std::pair<size_t, int64_t> P = getUse(S, LSRUse::Basic, 0);
+ std::pair<size_t, int64_t> P = getUse(S, LSRUse::Basic, nullptr);
LF.LUIdx = P.first;
LF.Offset = P.second;
LSRUse &LU = Uses[LF.LUIdx];
@@ -3108,7 +3109,7 @@ static const SCEV *CollectSubexprs(const SCEV *S, const SCEVConstant *C,
if (Remainder)
Ops.push_back(C ? SE.getMulExpr(C, Remainder) : Remainder);
}
- return 0;
+ return nullptr;
} else if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) {
// Split a non-zero base out of an addrec.
if (AR->getStart()->isZero())
@@ -3120,7 +3121,7 @@ static const SCEV *CollectSubexprs(const SCEV *S, const SCEVConstant *C,
// does not pertain to this loop.
if (Remainder && (AR->getLoop() == L || !isa<SCEVAddRecExpr>(Remainder))) {
Ops.push_back(C ? SE.getMulExpr(C, Remainder) : Remainder);
- Remainder = 0;
+ Remainder = nullptr;
}
if (Remainder != AR->getStart()) {
if (!Remainder)
@@ -3142,7 +3143,7 @@ static const SCEV *CollectSubexprs(const SCEV *S, const SCEVConstant *C,
CollectSubexprs(Mul->getOperand(1), C, Ops, L, SE, Depth+1);
if (Remainder)
Ops.push_back(SE.getMulExpr(C, Remainder));
- return 0;
+ return nullptr;
}
}
return S;
@@ -3160,7 +3161,7 @@ void LSRInstance::GenerateReassociations(LSRUse &LU, unsigned LUIdx,
const SCEV *BaseReg = Base.BaseRegs[i];
SmallVector<const SCEV *, 8> AddOps;
- const SCEV *Remainder = CollectSubexprs(BaseReg, 0, AddOps, L, SE);
+ const SCEV *Remainder = CollectSubexprs(BaseReg, nullptr, AddOps, L, SE);
if (Remainder)
AddOps.push_back(Remainder);
@@ -4034,7 +4035,7 @@ void LSRInstance::NarrowSearchSpaceByPickingWinnerRegs() {
// Pick the register which is used by the most LSRUses, which is likely
// to be a good reuse register candidate.
- const SCEV *Best = 0;
+ const SCEV *Best = nullptr;
unsigned BestNum = 0;
for (RegUseTracker::const_iterator I = RegUses.begin(), E = RegUses.end();
I != E; ++I) {
@@ -4241,7 +4242,7 @@ LSRInstance::HoistInsertPosition(BasicBlock::iterator IP,
}
bool AllDominate = true;
- Instruction *BetterPos = 0;
+ Instruction *BetterPos = nullptr;
Instruction *Tentative = IDom->getTerminator();
for (SmallVectorImpl<Instruction *>::const_iterator I = Inputs.begin(),
E = Inputs.end(); I != E; ++I) {
@@ -4380,11 +4381,11 @@ Value *LSRInstance::Expand(const LSRFixup &LF,
LF.UserInst, LF.OperandValToReplace,
Loops, SE, DT);
- Ops.push_back(SE.getUnknown(Rewriter.expandCodeFor(Reg, 0, IP)));
+ Ops.push_back(SE.getUnknown(Rewriter.expandCodeFor(Reg, nullptr, IP)));
}
// Expand the ScaledReg portion.
- Value *ICmpScaledV = 0;
+ Value *ICmpScaledV = nullptr;
if (F.Scale != 0) {
const SCEV *ScaledS = F.ScaledReg;
@@ -4400,7 +4401,7 @@ Value *LSRInstance::Expand(const LSRFixup &LF,
// of the icmp.
assert(F.Scale == -1 &&
"The only scale supported by ICmpZero uses is -1!");
- ICmpScaledV = Rewriter.expandCodeFor(ScaledS, 0, IP);
+ ICmpScaledV = Rewriter.expandCodeFor(ScaledS, nullptr, IP);
} else {
// Otherwise just expand the scaled register and an explicit scale,
// which is expected to be matched as part of the address.
@@ -4411,7 +4412,7 @@ Value *LSRInstance::Expand(const LSRFixup &LF,
Ops.clear();
Ops.push_back(SE.getUnknown(FullV));
}
- ScaledS = SE.getUnknown(Rewriter.expandCodeFor(ScaledS, 0, IP));
+ ScaledS = SE.getUnknown(Rewriter.expandCodeFor(ScaledS, nullptr, IP));
ScaledS = SE.getMulExpr(ScaledS,
SE.getConstant(ScaledS->getType(), F.Scale));
Ops.push_back(ScaledS);
@@ -4532,7 +4533,7 @@ void LSRInstance::RewriteForPHI(PHINode *PN,
Loop *PNLoop = LI.getLoopFor(Parent);
if (!PNLoop || Parent != PNLoop->getHeader()) {
// Split the critical edge.
- BasicBlock *NewBB = 0;
+ BasicBlock *NewBB = nullptr;
if (!Parent->isLandingPad()) {
NewBB = SplitCriticalEdge(BB, Parent, P,
/*MergeIdenticalEdges=*/true,
@@ -4561,7 +4562,7 @@ void LSRInstance::RewriteForPHI(PHINode *PN,
}
std::pair<DenseMap<BasicBlock *, Value *>::iterator, bool> Pair =
- Inserted.insert(std::make_pair(BB, static_cast<Value *>(0)));
+ Inserted.insert(std::make_pair(BB, static_cast<Value *>(nullptr)));
if (!Pair.second)
PN->setIncomingValue(i, Pair.first->second);
else {
@@ -4671,7 +4672,7 @@ LSRInstance::LSRInstance(Loop *L, Pass *P)
DT(P->getAnalysis<DominatorTreeWrapperPass>().getDomTree()),
LI(P->getAnalysis<LoopInfo>()),
TTI(P->getAnalysis<TargetTransformInfo>()), L(L), Changed(false),
- IVIncInsertPos(0) {
+ IVIncInsertPos(nullptr) {
// If LoopSimplify form is not available, stay out of trouble.
if (!L->isLoopSimplifyForm())
return;
diff --git a/lib/Transforms/Scalar/LoopUnswitch.cpp b/lib/Transforms/Scalar/LoopUnswitch.cpp
index 0becadceda..977c53a3bc 100644
--- a/lib/Transforms/Scalar/LoopUnswitch.cpp
+++ b/lib/Transforms/Scalar/LoopUnswitch.cpp
@@ -97,7 +97,7 @@ namespace {
public:
LUAnalysisCache() :
- CurLoopInstructions(0), CurrentLoopProperties(0),
+ CurLoopInstructions(nullptr), CurrentLoopProperties(nullptr),
MaxSize(Threshold)
{}
@@ -152,8 +152,8 @@ namespace {
static char ID; // Pass ID, replacement for typeid
explicit LoopUnswitch(bool Os = false) :
LoopPass(ID), OptimizeForSize(Os), redoLoop(false),
- currentLoop(0), DT(0), loopHeader(0),
- loopPreheader(0) {
+ currentLoop(nullptr), DT(nullptr), loopHeader(nullptr),
+ loopPreheader(nullptr) {
initializeLoopUnswitchPass(*PassRegistry::getPassRegistry());
}
@@ -204,8 +204,8 @@ namespace {
Instruction *InsertPt);
void SimplifyCode(std::vector<Instruction*> &Worklist, Loop *L);
- bool IsTrivialUnswitchCondition(Value *Cond, Constant **Val = 0,
- BasicBlock **LoopExit = 0);
+ bool IsTrivialUnswitchCondition(Value *Cond, Constant **Val = nullptr,
+ BasicBlock **LoopExit = nullptr);
};
}
@@ -274,8 +274,8 @@ void LUAnalysisCache::forgetLoop(const Loop *L) {
LoopsProperties.erase(LIt);
}
- CurrentLoopProperties = 0;
- CurLoopInstructions = 0;
+ CurrentLoopProperties = nullptr;
+ CurLoopInstructions = nullptr;
}
// Mark case value as unswitched.
@@ -346,10 +346,10 @@ static Value *FindLIVLoopCondition(Value *Cond, Loop *L, bool &Changed) {
// We can never unswitch on vector conditions.
if (Cond->getType()->isVectorTy())
- return 0;
+ return nullptr;
// Constants should be folded, not unswitched on!
- if (isa<Constant>(Cond)) return 0;
+ if (isa<Constant>(Cond)) return nullptr;
// TODO: Handle: br (VARIANT|INVARIANT).
@@ -369,7 +369,7 @@ static Value *FindLIVLoopCondition(Value *Cond, Loop *L, bool &Changed) {
return RHS;
}
- return 0;
+ return nullptr;
}
bool LoopUnswitch::runOnLoop(Loop *L, LPPassManager &LPM_Ref) {
@@ -380,7 +380,7 @@ bool LoopUnswitch::runOnLoop(Loop *L, LPPassManager &LPM_Ref) {
LPM = &LPM_Ref;
DominatorTreeWrapperPass *DTWP =
getAnalysisIfAvailable<DominatorTreeWrapperPass>();
- DT = DTWP ? &DTWP->getDomTree() : 0;
+ DT = DTWP ? &DTWP->getDomTree() : nullptr;
currentLoop = L;
Function *F = currentLoop->getHeader()->getParent();
bool Changed = false;
@@ -452,7 +452,7 @@ bool LoopUnswitch::processCurrentLoop() {
// Find a value to unswitch on:
// FIXME: this should chose the most expensive case!
// FIXME: scan for a case with a non-critical edge?
- Constant *UnswitchVal = 0;
+ Constant *UnswitchVal = nullptr;
// Do not process same value again and again.
// At this point we have some cases already unswitched and
@@ -509,7 +509,7 @@ static bool isTrivialLoopExitBlockHelper(Loop *L, BasicBlock *BB,
if (!L->contains(BB)) {
// Otherwise, this is a loop exit, this is fine so long as this is the
// first exit.
- if (ExitBB != 0) return false;
+ if (ExitBB) return false;
ExitBB = BB;
return true;
}
@@ -536,10 +536,10 @@ static bool isTrivialLoopExitBlockHelper(Loop *L, BasicBlock *BB,
static BasicBlock *isTrivialLoopExitBlock(Loop *L, BasicBlock *BB) {
std::set<BasicBlock*> Visited;
Visited.insert(L->getHeader()); // Branches to header make infinite loops.
- BasicBlock *ExitBB = 0;
+ BasicBlock *ExitBB = nullptr;
if (isTrivialLoopExitBlockHelper(L, BB, ExitBB, Visited))
return ExitBB;
- return 0;
+ return nullptr;
}
/// IsTrivialUnswitchCondition - Check to see if this unswitch condition is
@@ -560,7 +560,7 @@ bool LoopUnswitch::IsTrivialUnswitchCondition(Value *Cond, Constant **Val,
TerminatorInst *HeaderTerm = Header->getTerminator();
LLVMContext &Context = Header->getContext();
- BasicBlock *LoopExitBB = 0;
+ BasicBlock *LoopExitBB = nullptr;
if (BranchInst *BI = dyn_cast<BranchInst>(HeaderTerm)) {
// If the header block doesn't end with a conditional branch on Cond, we
// can't handle it.
@@ -630,8 +630,8 @@ bool LoopUnswitch::IsTrivialUnswitchCondition(Value *Cond, Constant **Val,
/// unswitch the loop, reprocess the pieces, then return true.
bool LoopUnswitch::UnswitchIfProfitable(Value *LoopCond, Constant *Val) {
Function *F = loopHeader->getParent();
- Constant *CondVal = 0;
- BasicBlock *ExitBlock = 0;
+ Constant *CondVal = nullptr;
+ BasicBlock *ExitBlock = nullptr;
if (IsTrivialUnswitchCondition(LoopCond, &CondVal, &ExitBlock)) {
// If the condition is trivial, always unswitch. There is no code growth
@@ -1000,7 +1000,7 @@ void LoopUnswitch::RewriteLoopBodyWithConditionConstant(Loop *L, Value *LIC,
// If we know that LIC is not Val, use this info to simplify code.
SwitchInst *SI = dyn_cast<SwitchInst>(UI);
- if (SI == 0 || !isa<ConstantInt>(Val)) continue;
+ if (!SI || !isa<ConstantInt>(Val)) continue;
SwitchInst::CaseIt DeadCase = SI->findCaseValue(cast<ConstantInt>(Val));
// Default case is live for multiple values.
diff --git a/lib/Transforms/Scalar/LowerAtomic.cpp b/lib/Transforms/Scalar/LowerAtomic.cpp
index 89963fcfe4..4251ac47ed 100644
--- a/lib/Transforms/Scalar/LowerAtomic.cpp
+++ b/lib/Transforms/Scalar/LowerAtomic.cpp
@@ -43,7 +43,7 @@ static bool LowerAtomicRMWInst(AtomicRMWInst *RMWI) {
Value *Val = RMWI->getValOperand();
LoadInst *Orig = Builder.CreateLoad(Ptr);
- Value *Res = NULL;
+ Value *Res = nullptr;
switch (RMWI->getOperation()) {
default: llvm_unreachable("Unexpected RMW operation");
diff --git a/lib/Transforms/Scalar/MemCpyOptimizer.cpp b/lib/Transforms/Scalar/MemCpyOptimizer.cpp
index b5114541d3..b6bc792288 100644
--- a/lib/Transforms/Scalar/MemCpyOptimizer.cpp
+++ b/lib/Transforms/Scalar/MemCpyOptimizer.cpp
@@ -50,7 +50,7 @@ static int64_t GetOffsetFromIndex(const GEPOperator *GEP, unsigned Idx,
int64_t Offset = 0;
for (unsigned i = Idx, e = GEP->getNumOperands(); i != e; ++i, ++GTI) {
ConstantInt *OpC = dyn_cast<ConstantInt>(GEP->getOperand(i));
- if (OpC == 0)
+ if (!OpC)
return VariableIdxFound = true;
if (OpC->isZero()) continue; // No offset.
@@ -90,12 +90,12 @@ static bool IsPointerOffset(Value *Ptr1, Value *Ptr2, int64_t &Offset,
// If one pointer is a GEP and the other isn't, then see if the GEP is a
// constant offset from the base, as in "P" and "gep P, 1".
- if (GEP1 && GEP2 == 0 && GEP1->getOperand(0)->stripPointerCasts() == Ptr2) {
+ if (GEP1 && !GEP2 && GEP1->getOperand(0)->stripPointerCasts() == Ptr2) {
Offset = -GetOffsetFromIndex(GEP1, 1, VariableIdxFound, TD);
return !VariableIdxFound;
}
- if (GEP2 && GEP1 == 0 && GEP2->getOperand(0)->stripPointerCasts() == Ptr1) {
+ if (GEP2 && !GEP1 && GEP2->getOperand(0)->stripPointerCasts() == Ptr1) {
Offset = GetOffsetFromIndex(GEP2, 1, VariableIdxFound, TD);
return !VariableIdxFound;
}
@@ -318,9 +318,9 @@ namespace {
static char ID; // Pass identification, replacement for typeid
MemCpyOpt() : FunctionPass(ID) {
initializeMemCpyOptPass(*PassRegistry::getPassRegistry());
- MD = 0;
- TLI = 0;
- DL = 0;
+ MD = nullptr;
+ TLI = nullptr;
+ DL = nullptr;
}
bool runOnFunction(Function &F) override;
@@ -374,7 +374,7 @@ INITIALIZE_PASS_END(MemCpyOpt, "memcpyopt", "MemCpy Optimization",
/// attempts to merge them together into a memcpy/memset.
Instruction *MemCpyOpt::tryMergingIntoMemset(Instruction *StartInst,
Value *StartPtr, Value *ByteVal) {
- if (DL == 0) return 0;
+ if (!DL) return nullptr;
// Okay, so we now have a single store that can be splatable. Scan to find
// all subsequent stores of the same value to offset from the same pointer.
@@ -427,7 +427,7 @@ Instruction *MemCpyOpt::tryMergingIntoMemset(Instruction *StartInst,
// If we have no ranges, then we just had a single store with nothing that
// could be merged in. This is a very common case of course.
if (Ranges.empty())
- return 0;
+ return nullptr;
// If we had at least one store that could be merged in, add the starting
// store as well. We try to avoid this unless there is at least something
@@ -441,7 +441,7 @@ Instruction *MemCpyOpt::tryMergingIntoMemset(Instruction *StartInst,
// Now that we have full information about ranges, loop over the ranges and
// emit memset's for anything big enough to be worthwhile.
- Instruction *AMemSet = 0;
+ Instruction *AMemSet = nullptr;
for (MemsetRanges::const_iterator I = Ranges.begin(), E = Ranges.end();
I != E; ++I) {
const MemsetRange &Range = *I;
@@ -492,7 +492,7 @@ Instruction *MemCpyOpt::tryMergingIntoMemset(Instruction *StartInst,
bool MemCpyOpt::processStore(StoreInst *SI, BasicBlock::iterator &BBI) {
if (!SI->isSimple()) return false;
- if (DL == 0) return false;
+ if (!DL) return false;
// Detect cases where we're performing call slot forwarding, but
// happen to be using a load-store pair to implement it, rather than
@@ -501,7 +501,7 @@ bool MemCpyOpt::processStore(StoreInst *SI, BasicBlock::iterator &BBI) {
if (LI->isSimple() && LI->hasOneUse() &&
LI->getParent() == SI->getParent()) {
MemDepResult ldep = MD->getDependency(LI);
- CallInst *C = 0;
+ CallInst *C = nullptr;
if (ldep.isClobber() && !isa<MemCpyInst>(ldep.getInst()))
C = dyn_cast<CallInst>(ldep.getInst());
@@ -513,7 +513,7 @@ bool MemCpyOpt::processStore(StoreInst *SI, BasicBlock::iterator &BBI) {
for (BasicBlock::iterator I = --BasicBlock::iterator(SI),
E = C; I != E; --I) {
if (AA.getModRefInfo(&*I, StoreLoc) != AliasAnalysis::NoModRef) {
- C = 0;
+ C = nullptr;
break;
}
}
@@ -604,7 +604,7 @@ bool MemCpyOpt::performCallSlotOptzn(Instruction *cpy,
return false;
// Check that all of src is copied to dest.
- if (DL == 0) return false;
+ if (!DL) return false;
ConstantInt *srcArraySize = dyn_cast<ConstantInt>(srcAlloca->getArraySize());
if (!srcArraySize)
@@ -847,7 +847,7 @@ bool MemCpyOpt::processMemCpy(MemCpyInst *M) {
// The optimizations after this point require the memcpy size.
ConstantInt *CopySize = dyn_cast<ConstantInt>(M->getLength());
- if (CopySize == 0) return false;
+ if (!CopySize) return false;
// The are three possible optimizations we can do for memcpy:
// a) memcpy-memcpy xform which exposes redundance for DSE.
@@ -930,7 +930,7 @@ bool MemCpyOpt::processMemMove(MemMoveInst *M) {
/// processByValArgument - This is called on every byval argument in call sites.
bool MemCpyOpt::processByValArgument(CallSite CS, unsigned ArgNo) {
- if (DL == 0) return false;
+ if (!DL) return false;
// Find out what feeds this byval argument.
Value *ByValArg = CS.getArgument(ArgNo);
@@ -947,13 +947,13 @@ bool MemCpyOpt::processByValArgument(CallSite CS, unsigned ArgNo) {
// a memcpy, see if we can byval from the source of the memcpy instead of the
// result.
MemCpyInst *MDep = dyn_cast<MemCpyInst>(DepInfo.getInst());
- if (MDep == 0 || MDep->isVolatile() ||
+ if (!MDep || MDep->isVolatile() ||
ByValArg->stripPointerCasts() != MDep->getDest())
return false;
// The length of the memcpy must be larger or equal to the size of the byval.
ConstantInt *C1 = dyn_cast<ConstantInt>(MDep->getLength());
- if (C1 == 0 || C1->getValue().getZExtValue() < ByValSize)
+ if (!C1 || C1->getValue().getZExtValue() < ByValSize)
return false;
// Get the alignment of the byval. If the call doesn't specify the alignment,
@@ -1044,7 +1044,7 @@ bool MemCpyOpt::runOnFunction(Function &F) {
bool MadeChange = false;
MD = &getAnalysis<MemoryDependenceAnalysis>();
DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- DL = DLP ? &DLP->getDataLayout() : 0;
+ DL = DLP ? &DLP->getDataLayout() : nullptr;
TLI = &getAnalysis<TargetLibraryInfo>();
// If we don't have at least memset and memcpy, there is little point of doing
@@ -1059,6 +1059,6 @@ bool MemCpyOpt::runOnFunction(Function &F) {
MadeChange = true;
}
- MD = 0;
+ MD = nullptr;
return MadeChange;
}
diff --git a/lib/Transforms/Scalar/Reassociate.cpp b/lib/Transforms/Scalar/Reassociate.cpp
index 91eb65a360..986d6a4bae 100644
--- a/lib/Transforms/Scalar/Reassociate.cpp
+++ b/lib/Transforms/Scalar/Reassociate.cpp
@@ -123,14 +123,14 @@ namespace {
public:
XorOpnd(Value *V);
- bool isInvalid() const { return SymbolicPart == 0; }
+ bool isInvalid() const { return SymbolicPart == nullptr; }
bool isOrExpr() const { return isOr; }
Value *getValue() const { return OrigVal; }
Value *getSymbolicPart() const { return SymbolicPart; }
unsigned getSymbolicRank() const { return SymbolicRank; }
const APInt &getConstPart() const { return ConstPart; }
- void Invalidate() { SymbolicPart = OrigVal = 0; }
+ void Invalidate() { SymbolicPart = OrigVal = nullptr; }
void setSymbolicRank(unsigned R) { SymbolicRank = R; }
// Sort the XorOpnd-Pointer in ascending order of symbolic-value-rank.
@@ -237,7 +237,7 @@ static BinaryOperator *isReassociableOp(Value *V, unsigned Opcode) {
if (V->hasOneUse() && isa<Instruction>(V) &&
cast<Instruction>(V)->getOpcode() == Opcode)
return cast<BinaryOperator>(V);
- return 0;
+ return nullptr;
}
static bool isUnmovableInstruction(Instruction *I) {
@@ -285,7 +285,7 @@ void Reassociate::BuildRankMap(Function &F) {
unsigned Reassociate::getRank(Value *V) {
Instruction *I = dyn_cast<Instruction>(V);
- if (I == 0) {
+ if (!I) {
if (isa<Argument>(V)) return ValueRankMap[V]; // Function argument.
return 0; // Otherwise it's a global or constant, rank 0.
}
@@ -706,7 +706,7 @@ void Reassociate::RewriteExprTree(BinaryOperator *I,
// ExpressionChanged - Non-null if the rewritten expression differs from the
// original in some non-trivial way, requiring the clearing of optional flags.
// Flags are cleared from the operator in ExpressionChanged up to I inclusive.
- BinaryOperator *ExpressionChanged = 0;
+ BinaryOperator *ExpressionChanged = nullptr;
for (unsigned i = 0; ; ++i) {
// The last operation (which comes earliest in the IR) is special as both
// operands will come from Ops, rather than just one with the other being
@@ -996,7 +996,7 @@ static Value *EmitAddTreeOfValues(Instruction *I,
/// remove Factor from the tree and return the new tree.
Value *Reassociate::RemoveFactorFromExpression(Value *V, Value *Factor) {
BinaryOperator *BO = isReassociableOp(V, Instruction::Mul);
- if (!BO) return 0;
+ if (!BO) return nullptr;
SmallVector<RepeatedValue, 8> Tree;
MadeChange |= LinearizeExprTree(BO, Tree);
@@ -1030,7 +1030,7 @@ Value *Reassociate::RemoveFactorFromExpression(Value *V, Value *Factor) {
if (!FoundFactor) {
// Make sure to restore the operands to the expression tree.
RewriteExprTree(BO, Factors);
- return 0;
+ return nullptr;
}
BasicBlock::iterator InsertPt = BO; ++InsertPt;
@@ -1115,7 +1115,7 @@ static Value *OptimizeAndOrXor(unsigned Opcode,
++NumAnnihil;
}
}
- return 0;
+ return nullptr;
}
/// Helper funciton of CombineXorOpnd(). It creates a bitwise-and
@@ -1136,7 +1136,7 @@ static Value *createAndInstr(Instruction *InsertBefore, Value *Opnd,
}
return Opnd;
}
- return 0;
+ return nullptr;
}
// Helper function of OptimizeXor(). It tries to simplify "Opnd1 ^ ConstOpnd"
@@ -1262,7 +1262,7 @@ Value *Reassociate::OptimizeXor(Instruction *I,
return V;
if (Ops.size() == 1)
- return 0;
+ return nullptr;
SmallVector<XorOpnd, 8> Opnds;
SmallVector<XorOpnd*, 8> OpndPtrs;
@@ -1295,7 +1295,7 @@ Value *Reassociate::OptimizeXor(Instruction *I,
std::stable_sort(OpndPtrs.begin(), OpndPtrs.end(), XorOpnd::PtrSortFunctor());
// Step 3: Combine adjacent operands
- XorOpnd *PrevOpnd = 0;
+ XorOpnd *PrevOpnd = nullptr;
bool Changed = false;
for (unsigned i = 0, e = Opnds.size(); i < e; i++) {
XorOpnd *CurrOpnd = OpndPtrs[i];
@@ -1329,7 +1329,7 @@ Value *Reassociate::OptimizeXor(Instruction *I,
PrevOpnd = CurrOpnd;
} else {
CurrOpnd->Invalidate();
- PrevOpnd = 0;
+ PrevOpnd = nullptr;
}
Changed = true;
}
@@ -1359,7 +1359,7 @@ Value *Reassociate::OptimizeXor(Instruction *I,
}
}
- return 0;
+ return nullptr;
}
/// OptimizeAdd - Optimize a series of operands to an 'add' instruction. This
@@ -1446,7 +1446,7 @@ Value *Reassociate::OptimizeAdd(Instruction *I,
// Keep track of each multiply we see, to avoid triggering on (X*4)+(X*4)
// where they are actually the same multiply.
unsigned MaxOcc = 0;
- Value *MaxOccVal = 0;
+ Value *MaxOccVal = nullptr;
for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
BinaryOperator *BOp = isReassociableOp(Ops[i].Op, Instruction::Mul);
if (!BOp)
@@ -1544,7 +1544,7 @@ Value *Reassociate::OptimizeAdd(Instruction *I,
Ops.insert(Ops.begin(), ValueEntry(getRank(V2), V2));
}
- return 0;
+ return nullptr;
}
/// \brief Build up a vector of value/power pairs factoring a product.
@@ -1689,14 +1689,14 @@ Value *Reassociate::OptimizeMul(BinaryOperator *I,
// We can only optimize the multiplies when there is a chain of more than
// three, such that a balanced tree might require fewer total multiplies.
if (Ops.size() < 4)
- return 0;
+ return nullptr;
// Try to turn linear trees of multiplies without other uses of the
// intermediate stages into minimal multiply DAGs with perfect sub-expression
// re-use.
SmallVector<Factor, 4> Factors;
if (!collectMultiplyFactors(Ops, Factors))
- return 0; // All distinct factors, so nothing left for us to do.
+ return nullptr; // All distinct factors, so nothing left for us to do.
IRBuilder<> Builder(I);
Value *V = buildMinimalMultiplyDAG(Builder, Factors);
@@ -1705,14 +1705,14 @@ Value *Reassociate::OptimizeMul(BinaryOperator *I,
ValueEntry NewEntry = ValueEntry(getRank(V), V);
Ops.insert(std::lower_bound(Ops.begin(), Ops.end(), NewEntry), NewEntry);
- return 0;
+ return nullptr;
}
Value *Reassociate::OptimizeExpression(BinaryOperator *I,
SmallVectorImpl<ValueEntry> &Ops) {
// Now that we have the linearized expression tree, try to optimize it.
// Start by folding any constants that we found.
- Constant *Cst = 0;
+ Constant *Cst = nullptr;
unsigned Opcode = I->getOpcode();
while (!Ops.empty() && isa<Constant>(Ops.back().Op)) {
Constant *C = cast<Constant>(Ops.pop_back_val().Op);
@@ -1762,7 +1762,7 @@ Value *Reassociate::OptimizeExpression(BinaryOperator *I,
if (Ops.size() != NumOps)
return OptimizeExpression(I, Ops);
- return 0;
+ return nullptr;
}
/// EraseInst - Zap the given instruction, adding interesting operands to the
diff --git a/lib/Transforms/Scalar/SCCP.cpp b/lib/Transforms/Scalar/SCCP.cpp
index 99d757da78..c4ce074ded 100644
--- a/lib/Transforms/Scalar/SCCP.cpp
+++ b/lib/Transforms/Scalar/SCCP.cpp
@@ -82,7 +82,7 @@ class LatticeVal {
}
public:
- LatticeVal() : Val(0, undefined) {}
+ LatticeVal() : Val(nullptr, undefined) {}
bool isUndefined() const { return getLatticeValue() == undefined; }
bool isConstant() const {
@@ -134,7 +134,7 @@ public:
ConstantInt *getConstantInt() const {
if (isConstant())
return dyn_cast<ConstantInt>(getConstant());
- return 0;
+ return nullptr;
}
void markForcedConstant(Constant *V) {
@@ -404,7 +404,7 @@ private:
if (Constant *C = dyn_cast<Constant>(V)) {
Constant *Elt = C->getAggregateElement(i);
- if (Elt == 0)
+ if (!Elt)
LV.markOverdefined(); // Unknown sort of constant.
else if (isa<UndefValue>(Elt))
; // Undef values remain undefined.
@@ -523,7 +523,7 @@ void SCCPSolver::getFeasibleSuccessors(TerminatorInst &TI,
LatticeVal BCValue = getValueState(BI->getCondition());
ConstantInt *CI = BCValue.getConstantInt();
- if (CI == 0) {
+ if (!CI) {
// Overdefined condition variables, and branches on unfoldable constant
// conditions, mean the branch could go either way.
if (!BCValue.isUndefined())
@@ -550,7 +550,7 @@ void SCCPSolver::getFeasibleSuccessors(TerminatorInst &TI,
LatticeVal SCValue = getValueState(SI->getCondition());
ConstantInt *CI = SCValue.getConstantInt();
- if (CI == 0) { // Overdefined or undefined condition?
+ if (!CI) { // Overdefined or undefined condition?
// All destinations are executable!
if (!SCValue.isUndefined())
Succs.assign(TI.getNumSuccessors(), true);
@@ -595,7 +595,7 @@ bool SCCPSolver::isEdgeFeasible(BasicBlock *From, BasicBlock *To) {
// Overdefined condition variables mean the branch could go either way,
// undef conditions mean that neither edge is feasible yet.
ConstantInt *CI = BCValue.getConstantInt();
- if (CI == 0)
+ if (!CI)
return !BCValue.isUndefined();
// Constant condition variables mean the branch can only go a single way.
@@ -613,7 +613,7 @@ bool SCCPSolver::isEdgeFeasible(BasicBlock *From, BasicBlock *To) {
LatticeVal SCValue = getValueState(SI->getCondition());
ConstantInt *CI = SCValue.getConstantInt();
- if (CI == 0)
+ if (!CI)
return !SCValue.isUndefined();
return SI->findCaseValue(CI).getCaseSuccessor() == To;
@@ -668,7 +668,7 @@ void SCCPSolver::visitPHINode(PHINode &PN) {
// constant. If they are constant and don't agree, the PHI is overdefined.
// If there are no executable operands, the PHI remains undefined.
//
- Constant *OperandVal = 0;
+ Constant *OperandVal = nullptr;
for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i) {
LatticeVal IV = getValueState(PN.getIncomingValue(i));
if (IV.isUndefined()) continue; // Doesn't influence PHI node.
@@ -679,7 +679,7 @@ void SCCPSolver::visitPHINode(PHINode &PN) {
if (IV.isOverdefined()) // PHI node becomes overdefined!
return markOverdefined(&PN);
- if (OperandVal == 0) { // Grab the first value.
+ if (!OperandVal) { // Grab the first value.
OperandVal = IV.getConstant();
continue;
}
@@ -775,7 +775,7 @@ void SCCPSolver::visitExtractValueInst(ExtractValueInst &EVI) {
void SCCPSolver::visitInsertValueInst(InsertValueInst &IVI) {
StructType *STy = dyn_cast<StructType>(IVI.getType());
- if (STy == 0)
+ if (!STy)
return markOverdefined(&IVI);
// If this has more than one index, we can't handle it, drive all results to
@@ -863,7 +863,7 @@ void SCCPSolver::visitBinaryOperator(Instruction &I) {
// If this is an AND or OR with 0 or -1, it doesn't matter that the other
// operand is overdefined.
if (I.getOpcode() == Instruction::And || I.getOpcode() == Instruction::Or) {
- LatticeVal *NonOverdefVal = 0;
+ LatticeVal *NonOverdefVal = nullptr;
if (!V1State.isOverdefined())
NonOverdefVal = &V1State;
else if (!V2State.isOverdefined())
@@ -1082,7 +1082,7 @@ void SCCPSolver::visitCallSite(CallSite CS) {
// The common case is that we aren't tracking the callee, either because we
// are not doing interprocedural analysis or the callee is indirect, or is
// external. Handle these cases first.
- if (F == 0 || F->isDeclaration()) {
+ if (!F || F->isDeclaration()) {
CallOverdefined:
// Void return and not tracking callee, just bail.
if (I->getType()->isVoidTy()) return;
@@ -1556,7 +1556,7 @@ bool SCCP::runOnFunction(Function &F) {
DEBUG(dbgs() << "SCCP on function '" << F.getName() << "'\n");
const DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- const DataLayout *DL = DLP ? &DLP->getDataLayout() : 0;
+ const DataLayout *DL = DLP ? &DLP->getDataLayout() : nullptr;
const TargetLibraryInfo *TLI = &getAnalysis<TargetLibraryInfo>();
SCCPSolver Solver(DL, TLI);
@@ -1685,7 +1685,7 @@ static bool AddressIsTaken(const GlobalValue *GV) {
bool IPSCCP::runOnModule(Module &M) {
DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- const DataLayout *DL = DLP ? &DLP->getDataLayout() : 0;
+ const DataLayout *DL = DLP ? &DLP->getDataLayout() : nullptr;
const TargetLibraryInfo *TLI = &getAnalysis<TargetLibraryInfo>();
SCCPSolver Solver(DL, TLI);
diff --git a/lib/Transforms/Scalar/SROA.cpp b/lib/Transforms/Scalar/SROA.cpp
index 784840e0b5..04bf4f8dfc 100644
--- a/lib/Transforms/Scalar/SROA.cpp
+++ b/lib/Transforms/Scalar/SROA.cpp
@@ -160,8 +160,8 @@ public:
Use *getUse() const { return UseAndIsSplittable.getPointer(); }
- bool isDead() const { return getUse() == 0; }
- void kill() { UseAndIsSplittable.setPointer(0); }
+ bool isDead() const { return getUse() == nullptr; }
+ void kill() { UseAndIsSplittable.setPointer(nullptr); }
/// \brief Support for ordering ranges.
///
@@ -321,7 +321,7 @@ static Value *foldSelectInst(SelectInst &SI) {
if (SI.getOperand(1) == SI.getOperand(2))
return SI.getOperand(1);
- return 0;
+ return nullptr;
}
/// \brief Builder for the alloca slices.
@@ -643,7 +643,7 @@ private:
Uses.push_back(std::make_pair(I, cast<Instruction>(U)));
} while (!Uses.empty());
- return 0;
+ return nullptr;
}
void visitPHINode(PHINode &PN) {
@@ -725,7 +725,7 @@ AllocaSlices::AllocaSlices(const DataLayout &DL, AllocaInst &AI)
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
AI(AI),
#endif
- PointerEscapingInstr(0) {
+ PointerEscapingInstr(nullptr) {
SliceBuilder PB(DL, AI, *this);
SliceBuilder::PtrInfo PtrI = PB.visitPtr(AI);
if (PtrI.isEscaped() || PtrI.isAborted()) {
@@ -874,7 +874,7 @@ public:
for (SmallVectorImpl<DbgValueInst *>::const_iterator I = DVIs.begin(),
E = DVIs.end(); I != E; ++I) {
DbgValueInst *DVI = *I;
- Value *Arg = 0;
+ Value *Arg = nullptr;
if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
// If an argument is zero extended then use argument directly. The ZExt
// may be zapped by an optimization pass in future.
@@ -970,7 +970,7 @@ class SROA : public FunctionPass {
public:
SROA(bool RequiresDomTree = true)
: FunctionPass(ID), RequiresDomTree(RequiresDomTree),
- C(0), DL(0), DT(0) {
+ C(nullptr), DL(nullptr), DT(nullptr) {
initializeSROAPass(*PassRegistry::getPassRegistry());
}
bool runOnFunction(Function &F) override;
@@ -1012,9 +1012,9 @@ INITIALIZE_PASS_END(SROA, "sroa", "Scalar Replacement Of Aggregates",
static Type *findCommonType(AllocaSlices::const_iterator B,
AllocaSlices::const_iterator E,
uint64_t EndOffset) {
- Type *Ty = 0;
+ Type *Ty = nullptr;
bool TyIsCommon = true;
- IntegerType *ITy = 0;
+ IntegerType *ITy = nullptr;
// Note that we need to look at *every* alloca slice's Use to ensure we
// always get consistent results regardless of the order of slices.
@@ -1025,7 +1025,7 @@ static Type *findCommonType(AllocaSlices::const_iterator B,
if (I->beginOffset() != B->beginOffset() || I->endOffset() != EndOffset)
continue;
- Type *UserTy = 0;
+ Type *UserTy = nullptr;
if (LoadInst *LI = dyn_cast<LoadInst>(U->getUser())) {
UserTy = LI->getType();
} else if (StoreInst *SI = dyn_cast<StoreInst>(U->getUser())) {
@@ -1075,7 +1075,7 @@ static Type *findCommonType(AllocaSlices::const_iterator B,
/// FIXME: This should be hoisted into a generic utility, likely in
/// Transforms/Util/Local.h
static bool isSafePHIToSpeculate(PHINode &PN,
- const DataLayout *DL = 0) {
+ const DataLayout *DL = nullptr) {
// For now, we can only do this promotion if the load is in the same block
// as the PHI, and if there are no stores between the phi and load.
// TODO: Allow recursive phi users.
@@ -1085,7 +1085,7 @@ static bool isSafePHIToSpeculate(PHINode &PN,
bool HaveLoad = false;
for (User *U : PN.users()) {
LoadInst *LI = dyn_cast<LoadInst>(U);
- if (LI == 0 || !LI->isSimple())
+ if (!LI || !LI->isSimple())
return false;
// For now we only allow loads in the same block as the PHI. This is
@@ -1192,7 +1192,8 @@ static void speculatePHINodeLoads(PHINode &PN) {
///
/// We can do this to a select if its only uses are loads and if the operand
/// to the select can be loaded unconditionally.
-static bool isSafeSelectToSpeculate(SelectInst &SI, const DataLayout *DL = 0) {
+static bool isSafeSelectToSpeculate(SelectInst &SI,
+ const DataLayout *DL = nullptr) {
Value *TValue = SI.getTrueValue();
Value *FValue = SI.getFalseValue();
bool TDerefable = TValue->isDereferenceablePointer();
@@ -1200,7 +1201,7 @@ static bool isSafeSelectToSpeculate(SelectInst &SI, const DataLayout *DL = 0) {
for (User *U : SI.users()) {
LoadInst *LI = dyn_cast<LoadInst>(U);
- if (LI == 0 || !LI->isSimple())
+ if (!LI || !LI->isSimple())
return false;
// Both operands to the select need to be dereferencable, either
@@ -1333,19 +1334,21 @@ static Value *getNaturalGEPRecursively(IRBuilderTy &IRB, const DataLayout &DL,
// We can't recurse through pointer types.
if (Ty->isPointerTy())
- return 0;
+ return nullptr;
// We try to analyze GEPs over vectors here, but note that these GEPs are
// extremely poorly defined currently. The long-term goal is to remove GEPing
// over a vector from the IR completely.
if (VectorType *VecTy = dyn_cast<VectorType>(Ty)) {
unsigned ElementSizeInBits = DL.getTypeSizeInBits(VecTy->getScalarType());
- if (ElementSizeInBits % 8)
- return 0; // GEPs over non-multiple of 8 size vector elements are invalid.
+ if (ElementSizeInBits % 8 != 0) {
+ // GEPs over non-multiple of 8 size vector elements are invalid.
+ return nullptr;
+ }
APInt ElementSize(Offset.getBitWidth(), ElementSizeInBits / 8);
APInt NumSkippedElements = Offset.sdiv(ElementSize);
if (NumSkippedElements.ugt(VecTy->getNumElements()))
- return 0;
+ return nullptr;
Offset -= NumSkippedElements * ElementSize;
Indices.push_back(IRB.getInt(NumSkippedElements));
return getNaturalGEPRecursively(IRB, DL, Ptr, VecTy->getElementType(),
@@ -1357,7 +1360,7 @@ static Value *getNaturalGEPRecursively(IRBuilderTy &IRB, const DataLayout &DL,
APInt ElementSize(Offset.getBitWidth(), DL.getTypeAllocSize(ElementTy));
APInt NumSkippedElements = Offset.sdiv(ElementSize);
if (NumSkippedElements.ugt(ArrTy->getNumElements()))
- return 0;
+ return nullptr;
Offset -= NumSkippedElements * ElementSize;
Indices.push_back(IRB.getInt(NumSkippedElements));
@@ -1367,17 +1370,17 @@ static Value *getNaturalGEPRecursively(IRBuilderTy &IRB, const DataLayout &DL,
StructType *STy = dyn_cast<StructType>(Ty);
if (!STy)
- return 0;
+ return nullptr;
const StructLayout *SL = DL.getStructLayout(STy);
uint64_t StructOffset = Offset.getZExtValue();
if (StructOffset >= SL->getSizeInBytes())
- return 0;
+ return nullptr;
unsigned Index = SL->getElementContainingOffset(StructOffset);
Offset -= APInt(Offset.getBitWidth(), SL->getElementOffset(Index));
Type *ElementTy = STy->getElementType(Index);
if (Offset.uge(DL.getTypeAllocSize(ElementTy)))
- return 0; // The offset points into alignment padding.
+ return nullptr; // The offset points into alignment padding.
Indices.push_back(IRB.getInt32(Index));
return getNaturalGEPRecursively(IRB, DL, Ptr, ElementTy, Offset, TargetTy,
@@ -1403,14 +1406,14 @@ static Value *getNaturalGEPWithOffset(IRBuilderTy &IRB, const DataLayout &DL,
// Don't consider any GEPs through an i8* as natural unless the TargetTy is
// an i8.
if (Ty == IRB.getInt8PtrTy(Ty->getAddressSpace()) && TargetTy->isIntegerTy(8))
- return 0;
+ return nullptr;
Type *ElementTy = Ty->getElementType();
if (!ElementTy->isSized())
- return 0; // We can't GEP through an unsized element.
+ return nullptr; // We can't GEP through an unsized element.
APInt ElementSize(Offset.getBitWidth(), DL.getTypeAllocSize(ElementTy));
if (ElementSize == 0)
- return 0; // Zero-length arrays can't help us build a natural GEP.
+ return nullptr; // Zero-length arrays can't help us build a natural GEP.
APInt NumSkippedElements = Offset.sdiv(ElementSize);
Offset -= NumSkippedElements * ElementSize;
@@ -1446,11 +1449,11 @@ static Value *getAdjustedPtr(IRBuilderTy &IRB, const DataLayout &DL, Value *Ptr,
// We may end up computing an offset pointer that has the wrong type. If we
// never are able to compute one directly that has the correct type, we'll
// fall back to it, so keep it around here.
- Value *OffsetPtr = 0;
+ Value *OffsetPtr = nullptr;
// Remember any i8 pointer we come across to re-use if we need to do a raw
// byte offset.
- Value *Int8Ptr = 0;
+ Value *Int8Ptr = nullptr;
APInt Int8PtrOffset(Offset.getBitWidth(), 0);
Type *TargetTy = PointerTy->getPointerElementType();
@@ -2044,14 +2047,14 @@ public:
NewAllocaBeginOffset(NewAllocaBeginOffset),
NewAllocaEndOffset(NewAllocaEndOffset),
NewAllocaTy(NewAI.getAllocatedType()),
- VecTy(IsVectorPromotable ? cast<VectorType>(NewAllocaTy) : 0),
- ElementTy(VecTy ? VecTy->getElementType() : 0),
+ VecTy(IsVectorPromotable ? cast<VectorType>(NewAllocaTy) : nullptr),
+ ElementTy(VecTy ? VecTy->getElementType() : nullptr),
ElementSize(VecTy ? DL.getTypeSizeInBits(ElementTy) / 8 : 0),
IntTy(IsIntegerPromotable
? Type::getIntNTy(
NewAI.getContext(),
DL.getTypeSizeInBits(NewAI.getAllocatedType()))
- : 0),
+ : nullptr),
BeginOffset(), EndOffset(), IsSplittable(), IsSplit(), OldUse(),
OldPtr(), PHIUsers(PHIUsers), SelectUsers(SelectUsers),
IRB(NewAI.getContext(), ConstantFolder()) {
@@ -2145,7 +2148,7 @@ private:
///
/// You can optionally pass a type to this routine and if that type's ABI
/// alignment is itself suitable, this will return zero.
- unsigned getSliceAlign(Type *Ty = 0) {
+ unsigned getSliceAlign(Type *Ty = nullptr) {
unsigned NewAIAlign = NewAI.getAlignment();
if (!NewAIAlign)
NewAIAlign = DL.getABITypeAlignment(NewAI.getAllocatedType());
@@ -2595,7 +2598,7 @@ private:
unsigned EndIndex = VecTy ? getIndex(NewEndOffset) : 0;
unsigned NumElements = EndIndex - BeginIndex;
IntegerType *SubIntTy
- = IntTy ? Type::getIntNTy(IntTy->getContext(), Size*8) : 0;
+ = IntTy ? Type::getIntNTy(IntTy->getContext(), Size*8) : nullptr;
// Reset the other pointer type to match the register type we're going to
// use, but using the address space of the original other pointer.
@@ -2993,22 +2996,22 @@ static Type *getTypePartition(const DataLayout &DL, Type *Ty,
return stripAggregateTypeWrapping(DL, Ty);
if (Offset > DL.getTypeAllocSize(Ty) ||
(DL.getTypeAllocSize(Ty) - Offset) < Size)
- return 0;
+ return nullptr;
if (SequentialType *SeqTy = dyn_cast<SequentialType>(Ty)) {
// We can't partition pointers...
if (SeqTy->isPointerTy())
- return 0;
+ return nullptr;
Type *ElementTy = SeqTy->getElementType();
uint64_t ElementSize = DL.getTypeAllocSize(ElementTy);
uint64_t NumSkippedElements = Offset / ElementSize;
if (ArrayType *ArrTy = dyn_cast<ArrayType>(SeqTy)) {
if (NumSkippedElements >= ArrTy->getNumElements())
- return 0;
+ return nullptr;
} else if (VectorType *VecTy = dyn_cast<VectorType>(SeqTy)) {
if (NumSkippedElements >= VecTy->getNumElements())
- return 0;
+ return nullptr;
}
Offset -= NumSkippedElements * ElementSize;
@@ -3016,7 +3019,7 @@ static Type *getTypePartition(const DataLayout &DL, Type *Ty,
if (Offset > 0 || Size < ElementSize) {
// Bail if the partition ends in a different array element.
if ((Offset + Size) > ElementSize)
- return 0;
+ return nullptr;
// Recurse through the element type trying to peel off offset bytes.
return getTypePartition(DL, ElementTy, Offset, Size);
}
@@ -3027,20 +3030,20 @@ static Type *getTypePartition(const DataLayout &DL, Type *Ty,
assert(Size > ElementSize);
uint64_t NumElements = Size / ElementSize;
if (NumElements * ElementSize != Size)
- return 0;
+ return nullptr;
return ArrayType::get(ElementTy, NumElements);
}
StructType *STy = dyn_cast<StructType>(Ty);
if (!STy)
- return 0;
+ return nullptr;
const StructLayout *SL = DL.getStructLayout(STy);
if (Offset >= SL->getSizeInBytes())
- return 0;
+ return nullptr;
uint64_t EndOffset = Offset + Size;
if (EndOffset > SL->getSizeInBytes())
- return 0;
+ return nullptr;
unsigned Index = SL->getElementContainingOffset(Offset);
Offset -= SL->getElementOffset(Index);
@@ -3048,12 +3051,12 @@ static Type *getTypePartition(const DataLayout &DL, Type *Ty,
Type *ElementTy = STy->getElementType(Index);
uint64_t ElementSize = DL.getTypeAllocSize(ElementTy);
if (Offset >= ElementSize)
- return 0; // The offset points into alignment padding.
+ return nullptr; // The offset points into alignment padding.
// See if any partition must be contained by the element.
if (Offset > 0 || Size < ElementSize) {
if ((Offset + Size) > ElementSize)
- return 0;
+ return nullptr;
return getTypePartition(DL, ElementTy, Offset, Size);
}
assert(Offset == 0);
@@ -3066,14 +3069,14 @@ static Type *getTypePartition(const DataLayout &DL, Type *Ty,
if (EndOffset < SL->getSizeInBytes()) {
unsigned EndIndex = SL->getElementContainingOffset(EndOffset);
if (Index == EndIndex)
- return 0; // Within a single element and its padding.
+ return nullptr; // Within a single element and its padding.
// Don't try to form "natural" types if the elements don't line up with the
// expected size.
// FIXME: We could potentially recurse down through the last element in the
// sub-struct to find a natural end point.
if (SL->getElementOffset(EndIndex) != EndOffset)
- return 0;
+ return nullptr;
assert(Index < EndIndex);
EE = STy->element_begin() + EndIndex;
@@ -3084,7 +3087,7 @@ static Type *getTypePartition(const DataLayout &DL, Type *Ty,
STy->isPacked());
const StructLayout *SubSL = DL.getStructLayout(SubTy);
if (Size != SubSL->getSizeInBytes())
- return 0; // The sub-struct doesn't have quite the size needed.
+ return nullptr; // The sub-struct doesn't have quite the size needed.
return SubTy;
}
@@ -3109,7 +3112,7 @@ bool SROA::rewritePartition(AllocaInst &AI, AllocaSlices &S,
// Try to compute a friendly type for this partition of the alloca. This
// won't always succeed, in which case we fall back to a legal integer type
// or an i8 array of an appropriate size.
- Type *SliceTy = 0;
+ Type *SliceTy = nullptr;
if (Type *CommonUseTy = findCommonType(B, E, EndOffset))
if (DL->getTypeAllocSize(CommonUseTy) >= SliceSize)
SliceTy = CommonUseTy;
@@ -3156,7 +3159,7 @@ bool SROA::rewritePartition(AllocaInst &AI, AllocaSlices &S,
// the alloca's alignment unconstrained.
if (Alignment <= DL->getABITypeAlignment(SliceTy))
Alignment = 0;
- NewAI = new AllocaInst(SliceTy, 0, Alignment,
+ NewAI = new AllocaInst(SliceTy, nullptr, Alignment,
AI.getName() + ".sroa." + Twine(B - S.begin()), &AI);
++NumNewAllocas;
}
@@ -3495,7 +3498,7 @@ void SROA::deleteDeadInstructions(SmallPtrSet<AllocaInst*, 4> &DeletedAllocas) {
for (Use &Operand : I->operands())
if (Instruction *U = dyn_cast<Instruction>(Operand)) {
// Zero out the operand and see if it becomes trivially dead.
- Operand = 0;
+ Operand = nullptr;
if (isInstructionTriviallyDead(U))
DeadInsts.insert(U);
}
@@ -3613,7 +3616,7 @@ bool SROA::runOnFunction(Function &F) {
DL = &DLP->getDataLayout();
DominatorTreeWrapperPass *DTWP =
getAnalysisIfAvailable<DominatorTreeWrapperPass>();
- DT = DTWP ? &DTWP->getDomTree() : 0;
+ DT = DTWP ? &DTWP->getDomTree() : nullptr;
BasicBlock &EntryBB = F.getEntryBlock();
for (BasicBlock::iterator I = EntryBB.begin(), E = std::prev(EntryBB.end());
diff --git a/lib/Transforms/Scalar/SampleProfile.cpp b/lib/Transforms/Scalar/SampleProfile.cpp
index 2f03edda18..8e557aaa2f 100644
--- a/lib/Transforms/Scalar/SampleProfile.cpp
+++ b/lib/Transforms/Scalar/SampleProfile.cpp
@@ -120,8 +120,8 @@ typedef DenseMap<BasicBlock *, SmallVector<BasicBlock *, 8>> BlockEdgeMap;
class SampleFunctionProfile {
public:
SampleFunctionProfile()
- : TotalSamples(0), TotalHeadSamples(0), HeaderLineno(0), DT(0), PDT(0),
- LI(0), Ctx(0) {}
+ : TotalSamples(0), TotalHeadSamples(0), HeaderLineno(0), DT(nullptr),
+ PDT(nullptr), LI(nullptr), Ctx(nullptr) {}
unsigned getFunctionLoc(Function &F);
bool emitAnnotations(Function &F, DominatorTree *DomTree,
diff --git a/lib/Transforms/Scalar/ScalarReplAggregates.cpp b/lib/Transforms/Scalar/ScalarReplAggregates.cpp
index a56df4bd5f..58192fc02b 100644
--- a/lib/Transforms/Scalar/ScalarReplAggregates.cpp
+++ b/lib/Transforms/Scalar/ScalarReplAggregates.cpp
@@ -305,7 +305,7 @@ public:
explicit ConvertToScalarInfo(unsigned Size, const DataLayout &DL,
unsigned SLT)
: AllocaSize(Size), DL(DL), ScalarLoadThreshold(SLT), IsNotTrivial(false),
- ScalarKind(Unknown), VectorTy(0), HadNonMemTransferAccess(false),
+ ScalarKind(Unknown), VectorTy(nullptr), HadNonMemTransferAccess(false),
HadDynamicAccess(false) { }
AllocaInst *TryConvert(AllocaInst *AI);
@@ -333,8 +333,8 @@ private:
AllocaInst *ConvertToScalarInfo::TryConvert(AllocaInst *AI) {
// If we can't convert this scalar, or if mem2reg can trivially do it, bail
// out.
- if (!CanConvertToScalar(AI, 0, 0) || !IsNotTrivial)
- return 0;
+ if (!CanConvertToScalar(AI, 0, nullptr) || !IsNotTrivial)
+ return nullptr;
// If an alloca has only memset / memcpy uses, it may still have an Unknown
// ScalarKind. Treat it as an Integer below.
@@ -362,23 +362,24 @@ AllocaInst *ConvertToScalarInfo::TryConvert(AllocaInst *AI) {
// Do not convert to scalar integer if the alloca size exceeds the
// scalar load threshold.
if (BitWidth > ScalarLoadThreshold)
- return 0;
+ return nullptr;
if ((ScalarKind == ImplicitVector || ScalarKind == Integer) &&
!HadNonMemTransferAccess && !DL.fitsInLegalInteger(BitWidth))
- return 0;
+ return nullptr;
// Dynamic accesses on integers aren't yet supported. They need us to shift
// by a dynamic amount which could be difficult to work out as we might not
// know whether to use a left or right shift.
if (ScalarKind == Integer && HadDynamicAccess)
- return 0;
+ return nullptr;
DEBUG(dbgs() << "CONVERT TO SCALAR INTEGER: " << *AI << "\n");
// Create and insert the integer alloca.
NewTy = IntegerType::get(AI->getContext(), BitWidth);
}
- AllocaInst *NewAI = new AllocaInst(NewTy, 0, "", AI->getParent()->begin());
- ConvertUsesToScalar(AI, NewAI, 0, 0);
+ AllocaInst *NewAI = new AllocaInst(NewTy, nullptr, "",
+ AI->getParent()->begin());
+ ConvertUsesToScalar(AI, NewAI, 0, nullptr);
return NewAI;
}
@@ -509,7 +510,7 @@ bool ConvertToScalarInfo::CanConvertToScalar(Value *V, uint64_t Offset,
// Compute the offset that this GEP adds to the pointer.
SmallVector<Value*, 8> Indices(GEP->op_begin()+1, GEP->op_end());
- Value *GEPNonConstantIdx = 0;
+ Value *GEPNonConstantIdx = nullptr;
if (!GEP->hasAllConstantIndices()) {
if (!isa<VectorType>(PtrTy->getElementType()))
return false;
@@ -565,7 +566,7 @@ bool ConvertToScalarInfo::CanConvertToScalar(Value *V, uint64_t Offset,
if (NonConstantIdx)
return false;
ConstantInt *Len = dyn_cast<ConstantInt>(MTI->getLength());
- if (Len == 0 || Len->getZExtValue() != AllocaSize || Offset != 0)
+ if (!Len || Len->getZExtValue() != AllocaSize || Offset != 0)
return false;
IsNotTrivial = true; // Can't be mem2reg'd.
@@ -609,7 +610,7 @@ void ConvertToScalarInfo::ConvertUsesToScalar(Value *Ptr, AllocaInst *NewAI,
if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(User)) {
// Compute the offset that this GEP adds to the pointer.
SmallVector<Value*, 8> Indices(GEP->op_begin()+1, GEP->op_end());
- Value* GEPNonConstantIdx = 0;
+ Value* GEPNonConstantIdx = nullptr;
if (!GEP->hasAllConstantIndices()) {
assert(!NonConstantIdx &&
"Dynamic GEP reading from dynamic GEP unsupported");
@@ -672,7 +673,7 @@ void ConvertToScalarInfo::ConvertUsesToScalar(Value *Ptr, AllocaInst *NewAI,
Instruction *Old = Builder.CreateLoad(NewAI, NewAI->getName()+".in");
Value *New = ConvertScalar_InsertValue(
ConstantInt::get(User->getContext(), APVal),
- Old, Offset, 0, Builder);
+ Old, Offset, nullptr, Builder);
Builder.CreateStore(New, NewAI);
// If the load we just inserted is now dead, then the memset overwrote
@@ -810,7 +811,7 @@ ConvertScalar_ExtractValue(Value *FromVal, Type *ToType,
for (unsigned i = 0, e = ST->getNumElements(); i != e; ++i) {
Value *Elt = ConvertScalar_ExtractValue(FromVal, ST->getElementType(i),
Offset+Layout.getElementOffsetInBits(i),
- 0, Builder);
+ nullptr, Builder);
Res = Builder.CreateInsertValue(Res, Elt, i);
}
return Res;
@@ -823,7 +824,8 @@ ConvertScalar_ExtractValue(Value *FromVal, Type *ToType,
Value *Res = UndefValue::get(AT);
for (unsigned i = 0, e = AT->getNumElements(); i != e; ++i) {
Value *Elt = ConvertScalar_ExtractValue(FromVal, AT->getElementType(),
- Offset+i*EltSize, 0, Builder);
+ Offset+i*EltSize, nullptr,
+ Builder);
Res = Builder.CreateInsertValue(Res, Elt, i);
}
return Res;
@@ -939,7 +941,7 @@ ConvertScalar_InsertValue(Value *SV, Value *Old,
Value *Elt = Builder.CreateExtractValue(SV, i);
Old = ConvertScalar_InsertValue(Elt, Old,
Offset+Layout.getElementOffsetInBits(i),
- 0, Builder);
+ nullptr, Builder);
}
return Old;
}
@@ -950,7 +952,8 @@ ConvertScalar_InsertValue(Value *SV, Value *Old,
uint64_t EltSize = DL.getTypeAllocSizeInBits(AT->getElementType());
for (unsigned i = 0, e = AT->getNumElements(); i != e; ++i) {
Value *Elt = Builder.CreateExtractValue(SV, i);
- Old = ConvertScalar_InsertValue(Elt, Old, Offset+i*EltSize, 0, Builder);
+ Old = ConvertScalar_InsertValue(Elt, Old, Offset+i*EltSize, nullptr,
+ Builder);
}
return Old;
}
@@ -1025,7 +1028,7 @@ bool SROA::runOnFunction(Function &F) {
return false;
DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- DL = DLP ? &DLP->getDataLayout() : 0;
+ DL = DLP ? &DLP->getDataLayout() : nullptr;
bool Changed = performPromotion(F);
@@ -1055,7 +1058,7 @@ class AllocaPromoter : public LoadAndStorePromoter {
public:
AllocaPromoter(const SmallVectorImpl<Instruction*> &Insts, SSAUpdater &S,
DIBuilder *DB)
- : LoadAndStorePromoter(Insts, S), AI(0), DIB(DB) {}
+ : LoadAndStorePromoter(Insts, S), AI(nullptr), DIB(DB) {}
void run(AllocaInst *AI, const SmallVectorImpl<Instruction*> &Insts) {
// Remember which alloca we're promoting (for isInstInList).
@@ -1101,7 +1104,7 @@ public:
for (SmallVectorImpl<DbgValueInst *>::const_iterator I = DVIs.begin(),
E = DVIs.end(); I != E; ++I) {
DbgValueInst *DVI = *I;
- Value *Arg = NULL;
+ Value *Arg = nullptr;
if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
// If an argument is zero extended then use argument directly. The ZExt
// may be zapped by an optimization pass in future.
@@ -1144,7 +1147,7 @@ static bool isSafeSelectToSpeculate(SelectInst *SI, const DataLayout *DL) {
for (User *U : SI->users()) {
LoadInst *LI = dyn_cast<LoadInst>(U);
- if (LI == 0 || !LI->isSimple()) return false;
+ if (!LI || !LI->isSimple()) return false;
// Both operands to the select need to be dereferencable, either absolutely
// (e.g. allocas) or at this point because we can see other accesses to it.
@@ -1184,7 +1187,7 @@ static bool isSafePHIToSpeculate(PHINode *PN, const DataLayout *DL) {
unsigned MaxAlign = 0;
for (User *U : PN->users()) {
LoadInst *LI = dyn_cast<LoadInst>(U);
- if (LI == 0 || !LI->isSimple()) return false;
+ if (!LI || !LI->isSimple()) return false;
// For now we only allow loads in the same block as the PHI. This is a
// common case that happens when instcombine merges two loads through a PHI.
@@ -1381,7 +1384,7 @@ static bool tryToMakeAllocaBePromotable(AllocaInst *AI, const DataLayout *DL) {
for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
BasicBlock *Pred = PN->getIncomingBlock(i);
LoadInst *&Load = InsertedLoads[Pred];
- if (Load == 0) {
+ if (!Load) {
Load = new LoadInst(PN->getIncomingValue(i),
PN->getName() + "." + Pred->getName(),
Pred->getTerminator());
@@ -1401,7 +1404,7 @@ static bool tryToMakeAllocaBePromotable(AllocaInst *AI, const DataLayout *DL) {
bool SROA::performPromotion(Function &F) {
std::vector<AllocaInst*> Allocas;
- DominatorTree *DT = 0;
+ DominatorTree *DT = nullptr;
if (HasDomTree)
DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
@@ -1538,7 +1541,7 @@ void SROA::DoScalarReplacement(AllocaInst *AI,
if (StructType *ST = dyn_cast<StructType>(AI->getAllocatedType())) {
ElementAllocas.reserve(ST->getNumContainedTypes());
for (unsigned i = 0, e = ST->getNumContainedTypes(); i != e; ++i) {
- AllocaInst *NA = new AllocaInst(ST->getContainedType(i), 0,
+ AllocaInst *NA = new AllocaInst(ST->getContainedType(i), nullptr,
AI->getAlignment(),
AI->getName() + "." + Twine(i), AI);
ElementAllocas.push_back(NA);
@@ -1549,7 +1552,7 @@ void SROA::DoScalarReplacement(AllocaInst *AI,
ElementAllocas.reserve(AT->getNumElements());
Type *ElTy = AT->getElementType();
for (unsigned i = 0, e = AT->getNumElements(); i != e; ++i) {
- AllocaInst *NA = new AllocaInst(ElTy, 0, AI->getAlignment(),
+ AllocaInst *NA = new AllocaInst(ElTy, nullptr, AI->getAlignment(),
AI->getName() + "." + Twine(i), AI);
ElementAllocas.push_back(NA);
WorkList.push_back(NA); // Add to worklist for recursive processing
@@ -1578,7 +1581,7 @@ void SROA::DeleteDeadInstructions() {
// Zero out the operand and see if it becomes trivially dead.
// (But, don't add allocas to the dead instruction list -- they are
// already on the worklist and will be deleted separately.)
- *OI = 0;
+ *OI = nullptr;
if (isInstructionTriviallyDead(U) && !isa<AllocaInst>(U))
DeadInsts.push_back(U);
}
@@ -1605,12 +1608,10 @@ void SROA::isSafeForScalarRepl(Instruction *I, uint64_t Offset,
isSafeForScalarRepl(GEPI, GEPOffset, Info);
} else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(User)) {
ConstantInt *Length = dyn_cast<ConstantInt>(MI->getLength());
- if (Length == 0)
- return MarkUnsafe(Info, User);
- if (Length->isNegative())
+ if (!Length || Length->isNegative())
return MarkUnsafe(Info, User);
- isSafeMemAccess(Offset, Length->getZExtValue(), 0,
+ isSafeMemAccess(Offset, Length->getZExtValue(), nullptr,
U.getOperandNo() == 0, Info, MI,
true /*AllowWholeAccess*/);
} else if (LoadInst *LI = dyn_cast<LoadInst>(User)) {
@@ -1745,12 +1746,12 @@ static bool isHomogeneousAggregate(Type *T, unsigned &NumElts,
Type *&EltTy) {
if (ArrayType *AT = dyn_cast<ArrayType>(T)) {
NumElts = AT->getNumElements();
- EltTy = (NumElts == 0 ? 0 : AT->getElementType());
+ EltTy = (NumElts == 0 ? nullptr : AT->getElementType());
return true;
}
if (StructType *ST = dyn_cast<StructType>(T)) {
NumElts = ST->getNumContainedTypes();
- EltTy = (NumElts == 0 ? 0 : ST->getContainedType(0));
+ EltTy = (NumElts == 0 ? nullptr : ST->getContainedType(0));
for (unsigned n = 1; n < NumElts; ++n) {
if (ST->getContainedType(n) != EltTy)
return false;
@@ -2039,7 +2040,7 @@ void SROA::RewriteGEP(GetElementPtrInst *GEPI, AllocaInst *AI, uint64_t Offset,
// In this case, it must be the last GEP operand which is dynamic so keep that
// aside until we've found the constant GEP offset then add it back in at the
// end.
- Value* NonConstantIdx = 0;
+ Value* NonConstantIdx = nullptr;
if (!GEPI->hasAllConstantIndices())
NonConstantIdx = Indices.pop_back_val();
Offset += DL->getIndexedOffset(GEPI->getPointerOperandType(), Indices);
@@ -2157,7 +2158,7 @@ SROA::RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *Inst,
// appropriate type. The "Other" pointer is the pointer that goes to memory
// that doesn't have anything to do with the alloca that we are promoting. For
// memset, this Value* stays null.
- Value *OtherPtr = 0;
+ Value *OtherPtr = nullptr;
unsigned MemAlignment = MI->getAlignment();
if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI)) { // memmove/memcopy
if (Inst == MTI->getRawDest())
@@ -2209,7 +2210,7 @@ SROA::RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *Inst,
for (unsigned i = 0, e = NewElts.size(); i != e; ++i) {
// If this is a memcpy/memmove, emit a GEP of the other element address.
- Value *OtherElt = 0;
+ Value *OtherElt = nullptr;
unsigned OtherEltAlign = MemAlignment;
if (OtherPtr) {
@@ -2451,7 +2452,7 @@ SROA::RewriteLoadUserOfWholeAlloca(LoadInst *LI, AllocaInst *AI,
// There are two forms here: AI could be an array or struct. Both cases
// have different ways to compute the element offset.
- const StructLayout *Layout = 0;
+ const StructLayout *Layout = nullptr;
uint64_t ArrayEltBitOffset = 0;
if (StructType *EltSTy = dyn_cast<StructType>(AllocaEltTy)) {
Layout = DL->getStructLayout(EltSTy);
diff --git a/lib/Transforms/Scalar/Scalarizer.cpp b/lib/Transforms/Scalar/Scalarizer.cpp
index 37866d74d4..7a73f113b1 100644
--- a/lib/Transforms/Scalar/Scalarizer.cpp
+++ b/lib/Transforms/Scalar/Scalarizer.cpp
@@ -49,7 +49,7 @@ public:
// insert them before BBI in BB. If Cache is nonnull, use it to cache
// the results.
Scatterer(BasicBlock *bb, BasicBlock::iterator bbi, Value *v,
- ValueVector *cachePtr = 0);
+ ValueVector *cachePtr = nullptr);
// Return component I, creating a new Value for it if necessary.
Value *operator[](unsigned I);
@@ -102,7 +102,7 @@ struct BinarySplitter {
// Information about a load or store that we're scalarizing.
struct VectorLayout {
- VectorLayout() : VecTy(0), ElemTy(0), VecAlign(0), ElemSize(0) {}
+ VectorLayout() : VecTy(nullptr), ElemTy(nullptr), VecAlign(0), ElemSize(0) {}
// Return the alignment of element I.
uint64_t getElemAlign(unsigned I) {
@@ -187,9 +187,9 @@ Scatterer::Scatterer(BasicBlock *bb, BasicBlock::iterator bbi, Value *v,
Ty = PtrTy->getElementType();
Size = Ty->getVectorNumElements();
if (!CachePtr)
- Tmp.resize(Size, 0);
+ Tmp.resize(Size, nullptr);
else if (CachePtr->empty())
- CachePtr->resize(Size, 0);
+ CachePtr->resize(Size, nullptr);
else
assert(Size == CachePtr->size() && "Inconsistent vector sizes");
}
@@ -242,7 +242,7 @@ bool Scalarizer::doInitialization(Module &M) {
bool Scalarizer::runOnFunction(Function &F) {
DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- DL = DLP ? &DLP->getDataLayout() : 0;
+ DL = DLP ? &DLP->getDataLayout() : nullptr;
for (Function::iterator BBI = F.begin(), BBE = F.end(); BBI != BBE; ++BBI) {
BasicBlock *BB = BBI;
for (BasicBlock::iterator II = BB->begin(), IE = BB->end(); II != IE;) {
diff --git a/lib/Transforms/Scalar/SimplifyCFGPass.cpp b/lib/Transforms/Scalar/SimplifyCFGPass.cpp
index 30d98b790c..5d5606ba47 100644
--- a/lib/Transforms/Scalar/SimplifyCFGPass.cpp
+++ b/lib/Transforms/Scalar/SimplifyCFGPass.cpp
@@ -72,7 +72,7 @@ FunctionPass *llvm::createCFGSimplificationPass() {
static bool mergeEmptyReturnBlocks(Function &F) {
bool Changed = false;
- BasicBlock *RetBlock = 0;
+ BasicBlock *RetBlock = nullptr;
// Scan all the blocks in the function, looking for empty return blocks.
for (Function::iterator BBI = F.begin(), E = F.end(); BBI != E; ) {
@@ -80,7 +80,7 @@ static bool mergeEmptyReturnBlocks(Function &F) {
// Only look at return blocks.
ReturnInst *Ret = dyn_cast<ReturnInst>(BB.getTerminator());
- if (Ret == 0) continue;
+ if (!Ret) continue;
// Only look at the block if it is empty or the only other thing in it is a
// single PHI node that is the operand to the return.
@@ -99,7 +99,7 @@ static bool mergeEmptyReturnBlocks(Function &F) {
}
// If this is the first returning block, remember it and keep going.
- if (RetBlock == 0) {
+ if (!RetBlock) {
RetBlock = &BB;
continue;
}
@@ -120,7 +120,7 @@ static bool mergeEmptyReturnBlocks(Function &F) {
// If the canonical return block has no PHI node, create one now.
PHINode *RetBlockPHI = dyn_cast<PHINode>(RetBlock->begin());
- if (RetBlockPHI == 0) {
+ if (!RetBlockPHI) {
Value *InVal = cast<ReturnInst>(RetBlock->getTerminator())->getOperand(0);
pred_iterator PB = pred_begin(RetBlock), PE = pred_end(RetBlock);
RetBlockPHI = PHINode::Create(Ret->getOperand(0)->getType(),
@@ -174,7 +174,7 @@ bool CFGSimplifyPass::runOnFunction(Function &F) {
const TargetTransformInfo &TTI = getAnalysis<TargetTransformInfo>();
DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- const DataLayout *DL = DLP ? &DLP->getDataLayout() : 0;
+ const DataLayout *DL = DLP ? &DLP->getDataLayout() : nullptr;
bool EverChanged = removeUnreachableBlocks(F);
EverChanged |= mergeEmptyReturnBlocks(F);
EverChanged |= iterativelySimplifyCFG(F, TTI, DL);
diff --git a/lib/Transforms/Scalar/Sink.cpp b/lib/Transforms/Scalar/Sink.cpp
index 121ee0923b..482c33aa6e 100644
--- a/lib/Transforms/Scalar/Sink.cpp
+++ b/lib/Transforms/Scalar/Sink.cpp
@@ -204,7 +204,7 @@ bool Sinking::IsAcceptableTarget(Instruction *Inst,
// Don't sink instructions into a loop.
Loop *succ = LI->getLoopFor(SuccToSinkTo);
Loop *cur = LI->getLoopFor(Inst->getParent());
- if (succ != 0 && succ != cur)
+ if (succ != nullptr && succ != cur)
return false;
}
@@ -238,14 +238,14 @@ bool Sinking::SinkInstruction(Instruction *Inst,
// SuccToSinkTo - This is the successor to sink this instruction to, once we
// decide.
- BasicBlock *SuccToSinkTo = 0;
+ BasicBlock *SuccToSinkTo = nullptr;
// Instructions can only be sunk if all their uses are in blocks
// dominated by one of the successors.
// Look at all the postdominators and see if we can sink it in one.
DomTreeNode *DTN = DT->getNode(Inst->getParent());
for (DomTreeNode::iterator I = DTN->begin(), E = DTN->end();
- I != E && SuccToSinkTo == 0; ++I) {
+ I != E && SuccToSinkTo == nullptr; ++I) {
BasicBlock *Candidate = (*I)->getBlock();
if ((*I)->getIDom()->getBlock() == Inst->getParent() &&
IsAcceptableTarget(Inst, Candidate))
@@ -255,13 +255,13 @@ bool Sinking::SinkInstruction(Instruction *Inst,
// If no suitable postdominator was found, look at all the successors and
// decide which one we should sink to, if any.
for (succ_iterator I = succ_begin(Inst->getParent()),
- E = succ_end(Inst->getParent()); I != E && SuccToSinkTo == 0; ++I) {
+ E = succ_end(Inst->getParent()); I != E && !SuccToSinkTo; ++I) {
if (IsAcceptableTarget(Inst, *I))
SuccToSinkTo = *I;
}
// If we couldn't find a block to sink to, ignore this instruction.
- if (SuccToSinkTo == 0)
+ if (!SuccToSinkTo)
return false;
DEBUG(dbgs() << "Sink" << *Inst << " (";
diff --git a/lib/Transforms/Scalar/StructurizeCFG.cpp b/lib/Transforms/Scalar/StructurizeCFG.cpp
index b7a9216a79..65e1a8b1d4 100644
--- a/lib/Transforms/Scalar/StructurizeCFG.cpp
+++ b/lib/Transforms/Scalar/StructurizeCFG.cpp
@@ -65,14 +65,14 @@ public:
/// \brief Start a new query
NearestCommonDominator(DominatorTree *DomTree) {
DT = DomTree;
- Result = 0;
+ Result = nullptr;
}
/// \brief Add BB to the resulting dominator
void addBlock(BasicBlock *BB, bool Remember = true) {
DomTreeNode *Node = DT->getNode(BB);
- if (Result == 0) {
+ if (!Result) {
unsigned Numbering = 0;
for (;Node;Node = Node->getIDom())
IndexMap[Node] = ++Numbering;
@@ -473,7 +473,7 @@ void StructurizeCFG::insertConditions(bool Loops) {
NearestCommonDominator Dominator(DT);
Dominator.addBlock(Parent, false);
- Value *ParentValue = 0;
+ Value *ParentValue = nullptr;
for (BBPredicates::iterator PI = Preds.begin(), PE = Preds.end();
PI != PE; ++PI) {
@@ -592,7 +592,7 @@ void StructurizeCFG::changeExit(RegionNode *Node, BasicBlock *NewExit,
if (Node->isSubRegion()) {
Region *SubRegion = Node->getNodeAs<Region>();
BasicBlock *OldExit = SubRegion->getExit();
- BasicBlock *Dominator = 0;
+ BasicBlock *Dominator = nullptr;
// Find all the edges from the sub region to the exit
for (pred_iterator I = pred_begin(OldExit), E = pred_end(OldExit);
@@ -679,7 +679,8 @@ BasicBlock *StructurizeCFG::needPostfix(BasicBlock *Flow,
/// \brief Set the previous node
void StructurizeCFG::setPrevNode(BasicBlock *BB) {
- PrevNode = ParentRegion->contains(BB) ? ParentRegion->getBBNode(BB) : 0;
+ PrevNode = ParentRegion->contains(BB) ? ParentRegion->getBBNode(BB)
+ : nullptr;
}
/// \brief Does BB dominate all the predicates of Node ?
@@ -700,7 +701,7 @@ bool StructurizeCFG::isPredictableTrue(RegionNode *Node) {
bool Dominated = false;
// Regionentry is always true
- if (PrevNode == 0)
+ if (!PrevNode)
return true;
for (BBPredicates::iterator I = Preds.begin(), E = Preds.end();
@@ -807,11 +808,11 @@ void StructurizeCFG::createFlow() {
Conditions.clear();
LoopConds.clear();
- PrevNode = 0;
+ PrevNode = nullptr;
Visited.clear();
while (!Order.empty()) {
- handleLoops(EntryDominatesExit, 0);
+ handleLoops(EntryDominatesExit, nullptr);
}
if (PrevNode)
diff --git a/lib/Transforms/Scalar/TailRecursionElimination.cpp b/lib/Transforms/Scalar/TailRecursionElimination.cpp
index 469ab6ebfe..8b2fb1b3f9 100644
--- a/lib/Transforms/Scalar/TailRecursionElimination.cpp
+++ b/lib/Transforms/Scalar/TailRecursionElimination.cpp
@@ -180,7 +180,7 @@ bool TailCallElim::runOnFunction(Function &F) {
if (F.getFunctionType()->isVarArg()) return false;
TTI = &getAnalysis<TargetTransformInfo>();
- BasicBlock *OldEntry = 0;
+ BasicBlock *OldEntry = nullptr;
bool TailCallsAreMarkedTail = false;
SmallVector<PHINode*, 8> ArgumentPHIs;
bool MadeChange = false;
@@ -353,11 +353,11 @@ static bool isDynamicConstant(Value *V, CallInst *CI, ReturnInst *RI) {
//
static Value *getCommonReturnValue(ReturnInst *IgnoreRI, CallInst *CI) {
Function *F = CI->getParent()->getParent();
- Value *ReturnedValue = 0;
+ Value *ReturnedValue = nullptr;
for (Function::iterator BBI = F->begin(), E = F->end(); BBI != E; ++BBI) {
ReturnInst *RI = dyn_cast<ReturnInst>(BBI->getTerminator());
- if (RI == 0 || RI == IgnoreRI) continue;
+ if (RI == nullptr || RI == IgnoreRI) continue;
// We can only perform this transformation if the value returned is
// evaluatable at the start of the initial invocation of the function,
@@ -365,10 +365,10 @@ static Value *getCommonReturnValue(ReturnInst *IgnoreRI, CallInst *CI) {
//
Value *RetOp = RI->getOperand(0);
if (!isDynamicConstant(RetOp, CI, RI))
- return 0;
+ return nullptr;
if (ReturnedValue && RetOp != ReturnedValue)
- return 0; // Cannot transform if differing values are returned.
+ return nullptr; // Cannot transform if differing values are returned.
ReturnedValue = RetOp;
}
return ReturnedValue;
@@ -380,18 +380,18 @@ static Value *getCommonReturnValue(ReturnInst *IgnoreRI, CallInst *CI) {
///
Value *TailCallElim::CanTransformAccumulatorRecursion(Instruction *I,
CallInst *CI) {
- if (!I->isAssociative() || !I->isCommutative()) return 0;
+ if (!I->isAssociative() || !I->isCommutative()) return nullptr;
assert(I->getNumOperands() == 2 &&
"Associative/commutative operations should have 2 args!");
// Exactly one operand should be the result of the call instruction.
if ((I->getOperand(0) == CI && I->getOperand(1) == CI) ||
(I->getOperand(0) != CI && I->getOperand(1) != CI))
- return 0;
+ return nullptr;
// The only user of this instruction we allow is a single return instruction.
if (!I->hasOneUse() || !isa<ReturnInst>(I->user_back()))
- return 0;
+ return nullptr;
// Ok, now we have to check all of the other return instructions in this
// function. If they return non-constants or differing values, then we cannot
@@ -412,11 +412,11 @@ TailCallElim::FindTRECandidate(Instruction *TI,
Function *F = BB->getParent();
if (&BB->front() == TI) // Make sure there is something before the terminator.
- return 0;
+ return nullptr;
// Scan backwards from the return, checking to see if there is a tail call in
// this block. If so, set CI to it.
- CallInst *CI = 0;
+ CallInst *CI = nullptr;
BasicBlock::iterator BBI = TI;
while (true) {
CI = dyn_cast<CallInst>(BBI);
@@ -424,14 +424,14 @@ TailCallElim::FindTRECandidate(Instruction *TI,
break;
if (BBI == BB->begin())
- return 0; // Didn't find a potential tail call.
+ return nullptr; // Didn't find a potential tail call.
--BBI;
}
// If this call is marked as a tail call, and if there are dynamic allocas in
// the function, we cannot perform this optimization.
if (CI->isTailCall() && CannotTailCallElimCallsMarkedTail)
- return 0;
+ return nullptr;
// As a special case, detect code like this:
// double fabs(double f) { return __builtin_fabs(f); } // a 'fabs' call
@@ -451,7 +451,7 @@ TailCallElim::FindTRECandidate(Instruction *TI,
for (; I != E && FI != FE; ++I, ++FI)
if (*I != &*FI) break;
if (I == E && FI == FE)
- return 0;
+ return nullptr;
}
return CI;
@@ -472,8 +472,8 @@ bool TailCallElim::EliminateRecursiveTailCall(CallInst *CI, ReturnInst *Ret,
// which is different to the constant returned by other return instructions
// (which is recorded in AccumulatorRecursionEliminationInitVal). This is a
// special case of accumulator recursion, the operation being "return C".
- Value *AccumulatorRecursionEliminationInitVal = 0;
- Instruction *AccumulatorRecursionInstr = 0;
+ Value *AccumulatorRecursionEliminationInitVal = nullptr;
+ Instruction *AccumulatorRecursionInstr = nullptr;
// Ok, we found a potential tail call. We can currently only transform the
// tail call if all of the instructions between the call and the return are
@@ -503,8 +503,8 @@ bool TailCallElim::EliminateRecursiveTailCall(CallInst *CI, ReturnInst *Ret,
// accumulator recursion variable eliminated.
if (Ret->getNumOperands() == 1 && Ret->getReturnValue() != CI &&
!isa<UndefValue>(Ret->getReturnValue()) &&
- AccumulatorRecursionEliminationInitVal == 0 &&
- !getCommonReturnValue(0, CI)) {
+ AccumulatorRecursionEliminationInitVal == nullptr &&
+ !getCommonReturnValue(nullptr, CI)) {
// One case remains that we are able to handle: the current return
// instruction returns a constant, and all other return instructions
// return a different constant.
@@ -522,7 +522,7 @@ bool TailCallElim::EliminateRecursiveTailCall(CallInst *CI, ReturnInst *Ret,
// OK! We can transform this tail call. If this is the first one found,
// create the new entry block, allowing us to branch back to the old entry.
- if (OldEntry == 0) {
+ if (!OldEntry) {
OldEntry = &F->getEntryBlock();
BasicBlock *NewEntry = BasicBlock::Create(F->getContext(), "", F, OldEntry);
NewEntry->takeName(OldEntry);
diff --git a/lib/Transforms/Utils/AddDiscriminators.cpp b/lib/Transforms/Utils/AddDiscriminators.cpp
index 3edb46d34c..196ac79aaf 100644
--- a/lib/Transforms/Utils/AddDiscriminators.cpp
+++ b/lib/Transforms/Utils/AddDiscriminators.cpp
@@ -99,7 +99,7 @@ FunctionPass *llvm::createAddDiscriminatorsPass() {
static bool hasDebugInfo(const Function &F) {
NamedMDNode *CUNodes = F.getParent()->getNamedMetadata("llvm.dbg.cu");
- return CUNodes != 0;
+ return CUNodes != nullptr;
}
/// \brief Assign DWARF discriminators.
diff --git a/lib/Transforms/Utils/BasicBlockUtils.cpp b/lib/Transforms/Utils/BasicBlockUtils.cpp
index b3cd5cedb6..dbe4897213 100644
--- a/lib/Transforms/Utils/BasicBlockUtils.cpp
+++ b/lib/Transforms/Utils/BasicBlockUtils.cpp
@@ -68,8 +68,8 @@ void llvm::DeleteDeadBlock(BasicBlock *BB) {
void llvm::FoldSingleEntryPHINodes(BasicBlock *BB, Pass *P) {
if (!isa<PHINode>(BB->begin())) return;
- AliasAnalysis *AA = 0;
- MemoryDependenceAnalysis *MemDep = 0;
+ AliasAnalysis *AA = nullptr;
+ MemoryDependenceAnalysis *MemDep = nullptr;
if (P) {
AA = P->getAnalysisIfAvailable<AliasAnalysis>();
MemDep = P->getAnalysisIfAvailable<MemoryDependenceAnalysis>();
@@ -130,7 +130,7 @@ bool llvm::MergeBlockIntoPredecessor(BasicBlock *BB, Pass *P) {
BasicBlock *OnlySucc = BB;
for (; SI != SE; ++SI)
if (*SI != OnlySucc) {
- OnlySucc = 0; // There are multiple distinct successors!
+ OnlySucc = nullptr; // There are multiple distinct successors!
break;
}
@@ -254,7 +254,7 @@ BasicBlock *llvm::SplitEdge(BasicBlock *BB, BasicBlock *Succ, Pass *P) {
// If the successor only has a single pred, split the top of the successor
// block.
assert(SP == BB && "CFG broken");
- SP = NULL;
+ SP = nullptr;
return SplitBlock(Succ, Succ->begin(), P);
}
@@ -310,7 +310,7 @@ static void UpdateAnalysisInformation(BasicBlock *OldBB, BasicBlock *NewBB,
if (!P) return;
LoopInfo *LI = P->getAnalysisIfAvailable<LoopInfo>();
- Loop *L = LI ? LI->getLoopFor(OldBB) : 0;
+ Loop *L = LI ? LI->getLoopFor(OldBB) : nullptr;
// If we need to preserve loop analyses, collect some information about how
// this split will affect loops.
@@ -351,7 +351,7 @@ static void UpdateAnalysisInformation(BasicBlock *OldBB, BasicBlock *NewBB,
// loop). To find this, examine each of the predecessors and determine which
// loops enclose them, and select the most-nested loop which contains the
// loop containing the block being split.
- Loop *InnermostPredLoop = 0;
+ Loop *InnermostPredLoop = nullptr;
for (ArrayRef<BasicBlock*>::iterator
i = Preds.begin(), e = Preds.end(); i != e; ++i) {
BasicBlock *Pred = *i;
@@ -384,18 +384,18 @@ static void UpdatePHINodes(BasicBlock *OrigBB, BasicBlock *NewBB,
ArrayRef<BasicBlock*> Preds, BranchInst *BI,
Pass *P, bool HasLoopExit) {
// Otherwise, create a new PHI node in NewBB for each PHI node in OrigBB.
- AliasAnalysis *AA = P ? P->getAnalysisIfAvailable<AliasAnalysis>() : 0;
+ AliasAnalysis *AA = P ? P->getAnalysisIfAvailable<AliasAnalysis>() : nullptr;
for (BasicBlock::iterator I = OrigBB->begin(); isa<PHINode>(I); ) {
PHINode *PN = cast<PHINode>(I++);
// Check to see if all of the values coming in are the same. If so, we
// don't need to create a new PHI node, unless it's needed for LCSSA.
- Value *InVal = 0;
+ Value *InVal = nullptr;
if (!HasLoopExit) {
InVal = PN->getIncomingValueForBlock(Preds[0]);
for (unsigned i = 1, e = Preds.size(); i != e; ++i)
if (InVal != PN->getIncomingValueForBlock(Preds[i])) {
- InVal = 0;
+ InVal = nullptr;
break;
}
}
@@ -542,7 +542,7 @@ void llvm::SplitLandingPadPredecessors(BasicBlock *OrigBB,
e = pred_end(OrigBB);
}
- BasicBlock *NewBB2 = 0;
+ BasicBlock *NewBB2 = nullptr;
if (!NewBB2Preds.empty()) {
// Create another basic block for the rest of OrigBB's predecessors.
NewBB2 = BasicBlock::Create(OrigBB->getContext(),
@@ -607,7 +607,7 @@ ReturnInst *llvm::FoldReturnIntoUncondBranch(ReturnInst *RI, BasicBlock *BB,
for (User::op_iterator i = NewRet->op_begin(), e = NewRet->op_end();
i != e; ++i) {
Value *V = *i;
- Instruction *NewBC = 0;
+ Instruction *NewBC = nullptr;
if (BitCastInst *BCI = dyn_cast<BitCastInst>(V)) {
// Return value might be bitcasted. Clone and insert it before the
// return instruction.
@@ -724,32 +724,32 @@ void llvm::SplitBlockAndInsertIfThenElse(Value *Cond, Instruction *SplitBefore,
Value *llvm::GetIfCondition(BasicBlock *BB, BasicBlock *&IfTrue,
BasicBlock *&IfFalse) {
PHINode *SomePHI = dyn_cast<PHINode>(BB->begin());
- BasicBlock *Pred1 = NULL;
- BasicBlock *Pred2 = NULL;
+ BasicBlock *Pred1 = nullptr;
+ BasicBlock *Pred2 = nullptr;
if (SomePHI) {
if (SomePHI->getNumIncomingValues() != 2)
- return NULL;
+ return nullptr;
Pred1 = SomePHI->getIncomingBlock(0);
Pred2 = SomePHI->getIncomingBlock(1);
} else {
pred_iterator PI = pred_begin(BB), PE = pred_end(BB);
if (PI == PE) // No predecessor
- return NULL;
+ return nullptr;
Pred1 = *PI++;
if (PI == PE) // Only one predecessor
- return NULL;
+ return nullptr;
Pred2 = *PI++;
if (PI != PE) // More than two predecessors
- return NULL;
+ return nullptr;
}
// We can only handle branches. Other control flow will be lowered to
// branches if possible anyway.
BranchInst *Pred1Br = dyn_cast<BranchInst>(Pred1->getTerminator());
BranchInst *Pred2Br = dyn_cast<BranchInst>(Pred2->getTerminator());
- if (Pred1Br == 0 || Pred2Br == 0)
- return 0;
+ if (!Pred1Br || !Pred2Br)
+ return nullptr;
// Eliminate code duplication by ensuring that Pred1Br is conditional if
// either are.
@@ -759,7 +759,7 @@ Value *llvm::GetIfCondition(BasicBlock *BB, BasicBlock *&IfTrue,
// required anyway, we stand no chance of eliminating it, so the xform is
// probably not profitable.
if (Pred1Br->isConditional())
- return 0;
+ return nullptr;
std::swap(Pred1, Pred2);
std::swap(Pred1Br, Pred2Br);
@@ -769,8 +769,8 @@ Value *llvm::GetIfCondition(BasicBlock *BB, BasicBlock *&IfTrue,
// The only thing we have to watch out for here is to make sure that Pred2
// doesn't have incoming edges from other blocks. If it does, the condition
// doesn't dominate BB.
- if (Pred2->getSinglePredecessor() == 0)
- return 0;
+ if (!Pred2->getSinglePredecessor())
+ return nullptr;
// If we found a conditional branch predecessor, make sure that it branches
// to BB and Pred2Br. If it doesn't, this isn't an "if statement".
@@ -785,7 +785,7 @@ Value *llvm::GetIfCondition(BasicBlock *BB, BasicBlock *&IfTrue,
} else {
// We know that one arm of the conditional goes to BB, so the other must
// go somewhere unrelated, and this must not be an "if statement".
- return 0;
+ return nullptr;
}
return Pred1Br->getCondition();
@@ -795,12 +795,12 @@ Value *llvm::GetIfCondition(BasicBlock *BB, BasicBlock *&IfTrue,
// BB. Don't panic! If both blocks only have a single (identical)
// predecessor, and THAT is a conditional branch, then we're all ok!
BasicBlock *CommonPred = Pred1->getSinglePredecessor();
- if (CommonPred == 0 || CommonPred != Pred2->getSinglePredecessor())
- return 0;
+ if (CommonPred == nullptr || CommonPred != Pred2->getSinglePredecessor())
+ return nullptr;
// Otherwise, if this is a conditional branch, then we can use it!
BranchInst *BI = dyn_cast<BranchInst>(CommonPred->getTerminator());
- if (BI == 0) return 0;
+ if (!BI) return nullptr;
assert(BI->isConditional() && "Two successors but not conditional?");
if (BI->getSuccessor(0) == Pred1) {
diff --git a/lib/Transforms/Utils/BreakCriticalEdges.cpp b/lib/Transforms/Utils/BreakCriticalEdges.cpp
index 4e2a6fc73a..80bd516375 100644
--- a/lib/Transforms/Utils/BreakCriticalEdges.cpp
+++ b/lib/Transforms/Utils/BreakCriticalEdges.cpp
@@ -142,7 +142,7 @@ BasicBlock *llvm::SplitCriticalEdge(TerminatorInst *TI, unsigned SuccNum,
Pass *P, bool MergeIdenticalEdges,
bool DontDeleteUselessPhis,
bool SplitLandingPads) {
- if (!isCriticalEdge(TI, SuccNum, MergeIdenticalEdges)) return 0;
+ if (!isCriticalEdge(TI, SuccNum, MergeIdenticalEdges)) return nullptr;
assert(!isa<IndirectBrInst>(TI) &&
"Cannot split critical edge from IndirectBrInst");
@@ -152,7 +152,7 @@ BasicBlock *llvm::SplitCriticalEdge(TerminatorInst *TI, unsigned SuccNum,
// Splitting the critical edge to a landing pad block is non-trivial. Don't do
// it in this generic function.
- if (DestBB->isLandingPad()) return 0;
+ if (DestBB->isLandingPad()) return nullptr;
// Create a new basic block, linking it into the CFG.
BasicBlock *NewBB = BasicBlock::Create(TI->getContext(),
@@ -208,15 +208,15 @@ BasicBlock *llvm::SplitCriticalEdge(TerminatorInst *TI, unsigned SuccNum,
// If we don't have a pass object, we can't update anything...
- if (P == 0) return NewBB;
+ if (!P) return NewBB;
DominatorTreeWrapperPass *DTWP =
P->getAnalysisIfAvailable<DominatorTreeWrapperPass>();
- DominatorTree *DT = DTWP ? &DTWP->getDomTree() : 0;
+ DominatorTree *DT = DTWP ? &DTWP->getDomTree() : nullptr;
LoopInfo *LI = P->getAnalysisIfAvailable<LoopInfo>();
// If we have nothing to update, just return.
- if (DT == 0 && LI == 0)
+ if (!DT && !LI)
return NewBB;
// Now update analysis information. Since the only predecessor of NewBB is
@@ -252,7 +252,7 @@ BasicBlock *llvm::SplitCriticalEdge(TerminatorInst *TI, unsigned SuccNum,
//
if (TINode) { // Don't break unreachable code!
DomTreeNode *NewBBNode = DT->addNewBlock(NewBB, TIBB);
- DomTreeNode *DestBBNode = 0;
+ DomTreeNode *DestBBNode = nullptr;
// If NewBBDominatesDestBB hasn't been computed yet, do so with DT.
if (!OtherPreds.empty()) {
diff --git a/lib/Transforms/Utils/BuildLibCalls.cpp b/lib/Transforms/Utils/BuildLibCalls.cpp
index 1539cc2d9a..be00b69561 100644
--- a/lib/Transforms/Utils/BuildLibCalls.cpp
+++ b/lib/Transforms/Utils/BuildLibCalls.cpp
@@ -36,7 +36,7 @@ Value *llvm::CastToCStr(Value *V, IRBuilder<> &B) {
Value *llvm::EmitStrLen(Value *Ptr, IRBuilder<> &B, const DataLayout *TD,
const TargetLibraryInfo *TLI) {
if (!TLI->has(LibFunc::strlen))
- return 0;
+ return nullptr;
Module *M = B.GetInsertBlock()->getParent()->getParent();
AttributeSet AS[2];
@@ -65,7 +65,7 @@ Value *llvm::EmitStrLen(Value *Ptr, IRBuilder<> &B, const DataLayout *TD,
Value *llvm::EmitStrNLen(Value *Ptr, Value *MaxLen, IRBuilder<> &B,
const DataLayout *TD, const TargetLibraryInfo *TLI) {
if (!TLI->has(LibFunc::strnlen))
- return 0;
+ return nullptr;
Module *M = B.GetInsertBlock()->getParent()->getParent();
AttributeSet AS[2];
@@ -95,7 +95,7 @@ Value *llvm::EmitStrNLen(Value *Ptr, Value *MaxLen, IRBuilder<> &B,
Value *llvm::EmitStrChr(Value *Ptr, char C, IRBuilder<> &B,
const DataLayout *TD, const TargetLibraryInfo *TLI) {
if (!TLI->has(LibFunc::strchr))
- return 0;
+ return nullptr;
Module *M = B.GetInsertBlock()->getParent()->getParent();
Attribute::AttrKind AVs[2] = { Attribute::ReadOnly, Attribute::NoUnwind };
@@ -121,7 +121,7 @@ Value *llvm::EmitStrNCmp(Value *Ptr1, Value *Ptr2, Value *Len,
IRBuilder<> &B, const DataLayout *TD,
const TargetLibraryInfo *TLI) {
if (!TLI->has(LibFunc::strncmp))
- return 0;
+ return nullptr;
Module *M = B.GetInsertBlock()->getParent()->getParent();
AttributeSet AS[3];
@@ -154,7 +154,7 @@ Value *llvm::EmitStrCpy(Value *Dst, Value *Src, IRBuilder<> &B,
const DataLayout *TD, const TargetLibraryInfo *TLI,
StringRef Name) {
if (!TLI->has(LibFunc::strcpy))
- return 0;
+ return nullptr;
Module *M = B.GetInsertBlock()->getParent()->getParent();
AttributeSet AS[2];
@@ -178,7 +178,7 @@ Value *llvm::EmitStrNCpy(Value *Dst, Value *Src, Value *Len,
IRBuilder<> &B, const DataLayout *TD,
const TargetLibraryInfo *TLI, StringRef Name) {
if (!TLI->has(LibFunc::strncpy))
- return 0;
+ return nullptr;
Module *M = B.GetInsertBlock()->getParent()->getParent();
AttributeSet AS[2];
@@ -205,7 +205,7 @@ Value *llvm::EmitMemCpyChk(Value *Dst, Value *Src, Value *Len, Value *ObjSize,
IRBuilder<> &B, const DataLayout *TD,
const TargetLibraryInfo *TLI) {
if (!TLI->has(LibFunc::memcpy_chk))
- return 0;
+ return nullptr;
Module *M = B.GetInsertBlock()->getParent()->getParent();
AttributeSet AS;
@@ -233,7 +233,7 @@ Value *llvm::EmitMemChr(Value *Ptr, Value *Val,
Value *Len, IRBuilder<> &B, const DataLayout *TD,
const TargetLibraryInfo *TLI) {
if (!TLI->has(LibFunc::memchr))
- return 0;
+ return nullptr;
Module *M = B.GetInsertBlock()->getParent()->getParent();
AttributeSet AS;
@@ -261,7 +261,7 @@ Value *llvm::EmitMemCmp(Value *Ptr1, Value *Ptr2,
Value *Len, IRBuilder<> &B, const DataLayout *TD,
const TargetLibraryInfo *TLI) {
if (!TLI->has(LibFunc::memcmp))
- return 0;
+ return nullptr;
Module *M = B.GetInsertBlock()->getParent()->getParent();
AttributeSet AS[3];
@@ -348,7 +348,7 @@ Value *llvm::EmitBinaryFloatFnCall(Value *Op1, Value *Op2, StringRef Name,
Value *llvm::EmitPutChar(Value *Char, IRBuilder<> &B, const DataLayout *TD,
const TargetLibraryInfo *TLI) {
if (!TLI->has(LibFunc::putchar))
- return 0;
+ return nullptr;
Module *M = B.GetInsertBlock()->getParent()->getParent();
Value *PutChar = M->getOrInsertFunction("putchar", B.getInt32Ty(),
@@ -370,7 +370,7 @@ Value *llvm::EmitPutChar(Value *Char, IRBuilder<> &B, const DataLayout *TD,
Value *llvm::EmitPutS(Value *Str, IRBuilder<> &B, const DataLayout *TD,
const TargetLibraryInfo *TLI) {
if (!TLI->has(LibFunc::puts))
- return 0;
+ return nullptr;
Module *M = B.GetInsertBlock()->getParent()->getParent();
AttributeSet AS[2];
@@ -394,7 +394,7 @@ Value *llvm::EmitPutS(Value *Str, IRBuilder<> &B, const DataLayout *TD,
Value *llvm::EmitFPutC(Value *Char, Value *File, IRBuilder<> &B,
const DataLayout *TD, const TargetLibraryInfo *TLI) {
if (!TLI->has(LibFunc::fputc))
- return 0;
+ return nullptr;
Module *M = B.GetInsertBlock()->getParent()->getParent();
AttributeSet AS[2];
@@ -427,7 +427,7 @@ Value *llvm::EmitFPutC(Value *Char, Value *File, IRBuilder<> &B,
Value *llvm::EmitFPutS(Value *Str, Value *File, IRBuilder<> &B,
const DataLayout *TD, const TargetLibraryInfo *TLI) {
if (!TLI->has(LibFunc::fputs))
- return 0;
+ return nullptr;
Module *M = B.GetInsertBlock()->getParent()->getParent();
AttributeSet AS[3];
@@ -460,7 +460,7 @@ Value *llvm::EmitFWrite(Value *Ptr, Value *Size, Value *File,
IRBuilder<> &B, const DataLayout *TD,
const TargetLibraryInfo *TLI) {
if (!TLI->has(LibFunc::fwrite))
- return 0;
+ return nullptr;
Module *M = B.GetInsertBlock()->getParent()->getParent();
AttributeSet AS[3];
diff --git a/lib/Transforms/Utils/BypassSlowDivision.cpp b/lib/Transforms/Utils/BypassSlowDivision.cpp
index 4942460fdc..f2d5e07450 100644
--- a/lib/Transforms/Utils/BypassSlowDivision.cpp
+++ b/lib/Transforms/Utils/BypassSlowDivision.cpp
@@ -54,11 +54,11 @@ namespace llvm {
}
static DivOpInfo getEmptyKey() {
- return DivOpInfo(false, 0, 0);
+ return DivOpInfo(false, nullptr, nullptr);
}
static DivOpInfo getTombstoneKey() {
- return DivOpInfo(true, 0, 0);
+ return DivOpInfo(true, nullptr, nullptr);
}
static unsigned getHashValue(const DivOpInfo &Val) {
diff --git a/lib/Transforms/Utils/CloneFunction.cpp b/lib/Transforms/Utils/CloneFunction.cpp
index a199086aed..5c8f20d5f8 100644
--- a/lib/Transforms/Utils/CloneFunction.cpp
+++ b/lib/Transforms/Utils/CloneFunction.cpp
@@ -159,7 +159,7 @@ static MDNode* FindSubprogram(const Function *F, DebugInfoFinder &Finder) {
for (DISubprogram Subprogram : Finder.subprograms()) {
if (Subprogram.describes(F)) return Subprogram;
}
- return NULL;
+ return nullptr;
}
// Add an operand to an existing MDNode. The new operand will be added at the
@@ -359,7 +359,7 @@ void PruningFunctionCloner::CloneBlock(const BasicBlock *BB,
// If the condition was a known constant in the callee...
ConstantInt *Cond = dyn_cast<ConstantInt>(BI->getCondition());
// Or is a known constant in the caller...
- if (Cond == 0) {
+ if (!Cond) {
Value *V = VMap[BI->getCondition()];
Cond = dyn_cast_or_null<ConstantInt>(V);
}
@@ -375,7 +375,7 @@ void PruningFunctionCloner::CloneBlock(const BasicBlock *BB,
} else if (const SwitchInst *SI = dyn_cast<SwitchInst>(OldTI)) {
// If switching on a value known constant in the caller.
ConstantInt *Cond = dyn_cast<ConstantInt>(SI->getCondition());
- if (Cond == 0) { // Or known constant after constant prop in the callee...
+ if (!Cond) { // Or known constant after constant prop in the callee...
Value *V = VMap[SI->getCondition()];
Cond = dyn_cast_or_null<ConstantInt>(V);
}
@@ -454,7 +454,7 @@ void llvm::CloneAndPruneFunctionInto(Function *NewFunc, const Function *OldFunc,
BI != BE; ++BI) {
Value *V = VMap[BI];
BasicBlock *NewBB = cast_or_null<BasicBlock>(V);
- if (NewBB == 0) continue; // Dead block.
+ if (!NewBB) continue; // Dead block.
// Add the new block to the new function.
NewFunc->getBasicBlockList().push_back(NewBB);
diff --git a/lib/Transforms/Utils/CloneModule.cpp b/lib/Transforms/Utils/CloneModule.cpp
index 64df089e1b..5d180a5478 100644
--- a/lib/Transforms/Utils/CloneModule.cpp
+++ b/lib/Transforms/Utils/CloneModule.cpp
@@ -47,8 +47,8 @@ Module *llvm::CloneModule(const Module *M, ValueToValueMapTy &VMap) {
GlobalVariable *GV = new GlobalVariable(*New,
I->getType()->getElementType(),
I->isConstant(), I->getLinkage(),
- (Constant*) 0, I->getName(),
- (GlobalVariable*) 0,
+ (Constant*) nullptr, I->getName(),
+ (GlobalVariable*) nullptr,
I->getThreadLocalMode(),
I->getType()->getAddressSpace());
GV->copyAttributesFrom(I);
@@ -68,7 +68,7 @@ Module *llvm::CloneModule(const Module *M, ValueToValueMapTy &VMap) {
for (Module::const_alias_iterator I = M->alias_begin(), E = M->alias_end();
I != E; ++I) {
GlobalAlias *GA = new GlobalAlias(I->getType(), I->getLinkage(),
- I->getName(), NULL, New);
+ I->getName(), nullptr, New);
GA->copyAttributesFrom(I);
VMap[I] = GA;
}
diff --git a/lib/Transforms/Utils/CmpInstAnalysis.cpp b/lib/Transforms/Utils/CmpInstAnalysis.cpp
index 8fa412a18b..3b15a0a3e6 100644
--- a/lib/Transforms/Utils/CmpInstAnalysis.cpp
+++ b/lib/Transforms/Utils/CmpInstAnalysis.cpp
@@ -84,7 +84,7 @@ Value *llvm::getICmpValue(bool Sign, unsigned Code, Value *LHS, Value *RHS,
case 7: // True.
return ConstantInt::get(CmpInst::makeCmpResultType(LHS->getType()), 1);
}
- return NULL;
+ return nullptr;
}
/// PredicatesFoldable - Return true if both predicates match sign or if at
diff --git a/lib/Transforms/Utils/CodeExtractor.cpp b/lib/Transforms/Utils/CodeExtractor.cpp
index 4bdff5dfb3..e70a7d6e76 100644
--- a/lib/Transforms/Utils/CodeExtractor.cpp
+++ b/lib/Transforms/Utils/CodeExtractor.cpp
@@ -120,7 +120,7 @@ buildExtractionBlockSet(const RegionNode &RN) {
}
CodeExtractor::CodeExtractor(BasicBlock *BB, bool AggregateArgs)
- : DT(0), AggregateArgs(AggregateArgs||AggregateArgsOpt),
+ : DT(nullptr), AggregateArgs(AggregateArgs||AggregateArgsOpt),
Blocks(buildExtractionBlockSet(BB)), NumExitBlocks(~0U) {}
CodeExtractor::CodeExtractor(ArrayRef<BasicBlock *> BBs, DominatorTree *DT,
@@ -412,7 +412,7 @@ static BasicBlock* FindPhiPredForUseInBlock(Value* Used, BasicBlock* BB) {
return P->getIncomingBlock(U);
}
- return 0;
+ return nullptr;
}
/// emitCallAndSwitchStatement - This method sets up the caller side by adding
@@ -440,14 +440,14 @@ emitCallAndSwitchStatement(Function *newFunction, BasicBlock *codeReplacer,
StructValues.push_back(*i);
} else {
AllocaInst *alloca =
- new AllocaInst((*i)->getType(), 0, (*i)->getName()+".loc",
+ new AllocaInst((*i)->getType(), nullptr, (*i)->getName()+".loc",
codeReplacer->getParent()->begin()->begin());
ReloadOutputs.push_back(alloca);
params.push_back(alloca);
}
}
- AllocaInst *Struct = 0;
+ AllocaInst *Struct = nullptr;
if (AggregateArgs && (inputs.size() + outputs.size() > 0)) {
std::vector<Type*> ArgTypes;
for (ValueSet::iterator v = StructValues.begin(),
@@ -457,7 +457,7 @@ emitCallAndSwitchStatement(Function *newFunction, BasicBlock *codeReplacer,
// Allocate a struct at the beginning of this function
Type *StructArgTy = StructType::get(newFunction->getContext(), ArgTypes);
Struct =
- new AllocaInst(StructArgTy, 0, "structArg",
+ new AllocaInst(StructArgTy, nullptr, "structArg",
codeReplacer->getParent()->begin()->begin());
params.push_back(Struct);
@@ -486,7 +486,7 @@ emitCallAndSwitchStatement(Function *newFunction, BasicBlock *codeReplacer,
// Reload the outputs passed in by reference
for (unsigned i = 0, e = outputs.size(); i != e; ++i) {
- Value *Output = 0;
+ Value *Output = nullptr;
if (AggregateArgs) {
Value *Idx[2];
Idx[0] = Constant::getNullValue(Type::getInt32Ty(Context));
@@ -539,7 +539,7 @@ emitCallAndSwitchStatement(Function *newFunction, BasicBlock *codeReplacer,
newFunction);
unsigned SuccNum = switchVal++;
- Value *brVal = 0;
+ Value *brVal = nullptr;
switch (NumExitBlocks) {
case 0:
case 1: break; // No value needed.
@@ -635,7 +635,7 @@ emitCallAndSwitchStatement(Function *newFunction, BasicBlock *codeReplacer,
// Check if the function should return a value
if (OldFnRetTy->isVoidTy()) {
- ReturnInst::Create(Context, 0, TheSwitch); // Return void
+ ReturnInst::Create(Context, nullptr, TheSwitch); // Return void
} else if (OldFnRetTy == TheSwitch->getCondition()->getType()) {
// return what we have
ReturnInst::Create(Context, TheSwitch->getCondition(), TheSwitch);
@@ -687,7 +687,7 @@ void CodeExtractor::moveCodeToFunction(Function *newFunction) {
Function *CodeExtractor::extractCodeRegion() {
if (!isEligible())
- return 0;
+ return nullptr;
ValueSet inputs, outputs;
diff --git a/lib/Transforms/Utils/DemoteRegToStack.cpp b/lib/Transforms/Utils/DemoteRegToStack.cpp
index ac6926fe9e..9972b22a07 100644
--- a/lib/Transforms/Utils/DemoteRegToStack.cpp
+++ b/lib/Transforms/Utils/DemoteRegToStack.cpp
@@ -25,17 +25,17 @@ AllocaInst *llvm::DemoteRegToStack(Instruction &I, bool VolatileLoads,
Instruction *AllocaPoint) {
if (I.use_empty()) {
I.eraseFromParent();
- return 0;
+ return nullptr;
}
// Create a stack slot to hold the value.
AllocaInst *Slot;
if (AllocaPoint) {
- Slot = new AllocaInst(I.getType(), 0,
+ Slot = new AllocaInst(I.getType(), nullptr,
I.getName()+".reg2mem", AllocaPoint);
} else {
Function *F = I.getParent()->getParent();
- Slot = new AllocaInst(I.getType(), 0, I.getName()+".reg2mem",
+ Slot = new AllocaInst(I.getType(), nullptr, I.getName()+".reg2mem",
F->getEntryBlock().begin());
}
@@ -56,7 +56,7 @@ AllocaInst *llvm::DemoteRegToStack(Instruction &I, bool VolatileLoads,
for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
if (PN->getIncomingValue(i) == &I) {
Value *&V = Loads[PN->getIncomingBlock(i)];
- if (V == 0) {
+ if (!V) {
// Insert the load into the predecessor block
V = new LoadInst(Slot, I.getName()+".reload", VolatileLoads,
PN->getIncomingBlock(i)->getTerminator());
@@ -110,17 +110,17 @@ AllocaInst *llvm::DemoteRegToStack(Instruction &I, bool VolatileLoads,
AllocaInst *llvm::DemotePHIToStack(PHINode *P, Instruction *AllocaPoint) {
if (P->use_empty()) {
P->eraseFromParent();
- return 0;
+ return nullptr;
}
// Create a stack slot to hold the value.
AllocaInst *Slot;
if (AllocaPoint) {
- Slot = new AllocaInst(P->getType(), 0,
+ Slot = new AllocaInst(P->getType(), nullptr,
P->getName()+".reg2mem", AllocaPoint);
} else {
Function *F = P->getParent()->getParent();
- Slot = new AllocaInst(P->getType(), 0, P->getName()+".reg2mem",
+ Slot = new AllocaInst(P->getType(), nullptr, P->getName()+".reg2mem",
F->getEntryBlock().begin());
}
diff --git a/lib/Transforms/Utils/FlattenCFG.cpp b/lib/Transforms/Utils/FlattenCFG.cpp
index ffdfacdc59..51ead40c91 100644
--- a/lib/Transforms/Utils/FlattenCFG.cpp
+++ b/lib/Transforms/Utils/FlattenCFG.cpp
@@ -28,11 +28,12 @@ class FlattenCFGOpt {
AliasAnalysis *AA;
/// \brief Use parallel-and or parallel-or to generate conditions for
/// conditional branches.
- bool FlattenParallelAndOr(BasicBlock *BB, IRBuilder<> &Builder, Pass *P = 0);
+ bool FlattenParallelAndOr(BasicBlock *BB, IRBuilder<> &Builder,
+ Pass *P = nullptr);
/// \brief If \param BB is the merge block of an if-region, attempt to merge
/// the if-region with an adjacent if-region upstream if two if-regions
/// contain identical instructions.
- bool MergeIfRegion(BasicBlock *BB, IRBuilder<> &Builder, Pass *P = 0);
+ bool MergeIfRegion(BasicBlock *BB, IRBuilder<> &Builder, Pass *P = nullptr);
/// \brief Compare a pair of blocks: \p Block1 and \p Block2, which
/// are from two if-regions whose entry blocks are \p Head1 and \p
/// Head2. \returns true if \p Block1 and \p Block2 contain identical
@@ -127,9 +128,9 @@ bool FlattenCFGOpt::FlattenParallelAndOr(BasicBlock *BB, IRBuilder<> &Builder,
if (PHI)
return false; // For simplicity, avoid cases containing PHI nodes.
- BasicBlock *LastCondBlock = NULL;
- BasicBlock *FirstCondBlock = NULL;
- BasicBlock *UnCondBlock = NULL;
+ BasicBlock *LastCondBlock = nullptr;
+ BasicBlock *FirstCondBlock = nullptr;
+ BasicBlock *UnCondBlock = nullptr;
int Idx = -1;
// Check predecessors of \param BB.
diff --git a/lib/Transforms/Utils/GlobalStatus.cpp b/lib/Transforms/Utils/GlobalStatus.cpp
index e9ebc452a0..12057e4b92 100644
--- a/lib/Transforms/Utils/GlobalStatus.cpp
+++ b/lib/Transforms/Utils/GlobalStatus.cpp
@@ -61,7 +61,7 @@ static bool analyzeGlobalAux(const Value *V, GlobalStatus &GS,
} else if (const Instruction *I = dyn_cast<Instruction>(UR)) {
if (!GS.HasMultipleAccessingFunctions) {
const Function *F = I->getParent()->getParent();
- if (GS.AccessingFunction == 0)
+ if (!GS.AccessingFunction)
GS.AccessingFunction = F;
else if (GS.AccessingFunction != F)
GS.HasMultipleAccessingFunctions = true;
@@ -176,6 +176,6 @@ bool GlobalStatus::analyzeGlobal(const Value *V, GlobalStatus &GS) {
GlobalStatus::GlobalStatus()
: IsCompared(false), IsLoaded(false), StoredType(NotStored),
- StoredOnceValue(0), AccessingFunction(0),
+ StoredOnceValue(nullptr), AccessingFunction(nullptr),
HasMultipleAccessingFunctions(false), HasNonInstructionUser(false),
Ordering(NotAtomic) {}
diff --git a/lib/Transforms/Utils/InlineFunction.cpp b/lib/Transforms/Utils/InlineFunction.cpp
index 1b2b992caf..adc10fc317 100644
--- a/lib/Transforms/Utils/InlineFunction.cpp
+++ b/lib/Transforms/Utils/InlineFunction.cpp
@@ -51,8 +51,8 @@ namespace {
public:
InvokeInliningInfo(InvokeInst *II)
- : OuterResumeDest(II->getUnwindDest()), InnerResumeDest(0),
- CallerLPad(0), InnerEHValuesPHI(0) {
+ : OuterResumeDest(II->getUnwindDest()), InnerResumeDest(nullptr),
+ CallerLPad(nullptr), InnerEHValuesPHI(nullptr) {
// If there are PHI nodes in the unwind destination block, we need to keep
// track of which values came into them from the invoke before removing
// the edge from this block.
@@ -289,13 +289,13 @@ static void UpdateCallGraphAfterInlining(CallSite CS,
ValueToValueMapTy::iterator VMI = VMap.find(OrigCall);
// Only copy the edge if the call was inlined!
- if (VMI == VMap.end() || VMI->second == 0)
+ if (VMI == VMap.end() || VMI->second == nullptr)
continue;
// If the call was inlined, but then constant folded, there is no edge to
// add. Check for this case.
Instruction *NewCall = dyn_cast<Instruction>(VMI->second);
- if (NewCall == 0) continue;
+ if (!NewCall) continue;
// Remember that this call site got inlined for the client of
// InlineFunction.
@@ -306,7 +306,7 @@ static void UpdateCallGraphAfterInlining(CallSite CS,
// happens, set the callee of the new call site to a more precise
// destination. This can also happen if the call graph node of the caller
// was just unnecessarily imprecise.
- if (I->second->getFunction() == 0)
+ if (!I->second->getFunction())
if (Function *F = CallSite(NewCall).getCalledFunction()) {
// Indirect call site resolved to direct call.
CallerNode->addCalledFunction(CallSite(NewCall), CG[F]);
@@ -335,7 +335,7 @@ static void HandleByValArgumentInit(Value *Dst, Value *Src, Module *M,
Value *SrcCast = builder.CreateBitCast(Src, VoidPtrTy, "tmp");
Value *Size;
- if (IFI.DL == 0)
+ if (IFI.DL == nullptr)
Size = ConstantExpr::getSizeOf(AggTy);
else
Size = ConstantInt::get(Type::getInt64Ty(Context),
@@ -393,7 +393,7 @@ static Value *HandleByValArgument(Value *Arg, Instruction *TheCall,
Function *Caller = TheCall->getParent()->getParent();
- Value *NewAlloca = new AllocaInst(AggTy, 0, Align, Arg->getName(),
+ Value *NewAlloca = new AllocaInst(AggTy, nullptr, Align, Arg->getName(),
&*Caller->begin()->begin());
IFI.StaticAllocas.push_back(cast<AllocaInst>(NewAlloca));
@@ -497,7 +497,7 @@ bool llvm::InlineFunction(CallSite CS, InlineFunctionInfo &IFI,
IFI.reset();
const Function *CalledFunc = CS.getCalledFunction();
- if (CalledFunc == 0 || // Can't inline external function or indirect
+ if (!CalledFunc || // Can't inline external function or indirect
CalledFunc->isDeclaration() || // call, or call to a vararg function!
CalledFunc->getFunctionType()->isVarArg()) return false;
@@ -525,7 +525,7 @@ bool llvm::InlineFunction(CallSite CS, InlineFunctionInfo &IFI,
}
// Get the personality function from the callee if it contains a landing pad.
- Value *CalleePersonality = 0;
+ Value *CalleePersonality = nullptr;
for (Function::const_iterator I = CalledFunc->begin(), E = CalledFunc->end();
I != E; ++I)
if (const InvokeInst *II = dyn_cast<InvokeInst>(I->getTerminator())) {
@@ -629,7 +629,7 @@ bool llvm::InlineFunction(CallSite CS, InlineFunctionInfo &IFI,
for (BasicBlock::iterator I = FirstNewBlock->begin(),
E = FirstNewBlock->end(); I != E; ) {
AllocaInst *AI = dyn_cast<AllocaInst>(I++);
- if (AI == 0) continue;
+ if (!AI) continue;
// If the alloca is now dead, remove it. This often occurs due to code
// specialization.
@@ -674,7 +674,7 @@ bool llvm::InlineFunction(CallSite CS, InlineFunctionInfo &IFI,
continue;
// Try to determine the size of the allocation.
- ConstantInt *AllocaSize = 0;
+ ConstantInt *AllocaSize = nullptr;
if (ConstantInt *AIArraySize =
dyn_cast<ConstantInt>(AI->getArraySize())) {
if (IFI.DL) {
@@ -784,7 +784,7 @@ bool llvm::InlineFunction(CallSite CS, InlineFunctionInfo &IFI,
// "starter" and "ender" blocks. How we accomplish this depends on whether
// this is an invoke instruction or a call instruction.
BasicBlock *AfterCallBB;
- BranchInst *CreatedBranchToNormalDest = NULL;
+ BranchInst *CreatedBranchToNormalDest = nullptr;
if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall)) {
// Add an unconditional branch to make this look like the CallInst case...
@@ -823,7 +823,7 @@ bool llvm::InlineFunction(CallSite CS, InlineFunctionInfo &IFI,
// any users of the original call/invoke instruction.
Type *RTy = CalledFunc->getReturnType();
- PHINode *PHI = 0;
+ PHINode *PHI = nullptr;
if (Returns.size() > 1) {
// The PHI node should go at the front of the new basic block to merge all
// possible incoming values.
diff --git a/lib/Transforms/Utils/Local.cpp b/lib/Transforms/Utils/Local.cpp
index abd73eb5c9..b3fc3dd6bb 100644
--- a/lib/Transforms/Utils/Local.cpp
+++ b/lib/Transforms/Utils/Local.cpp
@@ -161,7 +161,7 @@ bool llvm::ConstantFoldTerminator(BasicBlock *BB, bool DeleteDeadConditions,
// Otherwise, check to see if the switch only branches to one destination.
// We do this by reseting "TheOnlyDest" to null when we find two non-equal
// destinations.
- if (i.getCaseSuccessor() != TheOnlyDest) TheOnlyDest = 0;
+ if (i.getCaseSuccessor() != TheOnlyDest) TheOnlyDest = nullptr;
}
if (CI && !TheOnlyDest) {
@@ -182,7 +182,7 @@ bool llvm::ConstantFoldTerminator(BasicBlock *BB, bool DeleteDeadConditions,
// Found case matching a constant operand?
BasicBlock *Succ = SI->getSuccessor(i);
if (Succ == TheOnlyDest)
- TheOnlyDest = 0; // Don't modify the first branch to TheOnlyDest
+ TheOnlyDest = nullptr; // Don't modify the first branch to TheOnlyDest
else
Succ->removePredecessor(BB);
}
@@ -235,7 +235,7 @@ bool llvm::ConstantFoldTerminator(BasicBlock *BB, bool DeleteDeadConditions,
for (unsigned i = 0, e = IBI->getNumDestinations(); i != e; ++i) {
if (IBI->getDestination(i) == TheOnlyDest)
- TheOnlyDest = 0;
+ TheOnlyDest = nullptr;
else
IBI->getDestination(i)->removePredecessor(IBI->getParent());
}
@@ -333,7 +333,7 @@ llvm::RecursivelyDeleteTriviallyDeadInstructions(Value *V,
// dead as we go.
for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
Value *OpV = I->getOperand(i);
- I->setOperand(i, 0);
+ I->setOperand(i, nullptr);
if (!OpV->use_empty()) continue;
@@ -983,10 +983,10 @@ bool llvm::ConvertDebugDeclareToDebugValue(DbgDeclareInst *DDI,
if (LdStHasDebugValue(DIVar, SI))
return true;
- Instruction *DbgVal = NULL;
+ Instruction *DbgVal = nullptr;
// If an argument is zero extended then use argument directly. The ZExt
// may be zapped by an optimization pass in future.
- Argument *ExtendedArg = NULL;
+ Argument *ExtendedArg = nullptr;
if (ZExtInst *ZExt = dyn_cast<ZExtInst>(SI->getOperand(0)))
ExtendedArg = dyn_cast<Argument>(ZExt->getOperand(0));
if (SExtInst *SExt = dyn_cast<SExtInst>(SI->getOperand(0)))
@@ -1078,7 +1078,7 @@ DbgDeclareInst *llvm::FindAllocaDbgDeclare(Value *V) {
if (DbgDeclareInst *DDI = dyn_cast<DbgDeclareInst>(U))
return DDI;
- return 0;
+ return nullptr;
}
bool llvm::replaceDbgDeclareForAlloca(AllocaInst *AI, Value *NewAllocaAddress,
diff --git a/lib/Transforms/Utils/LoopSimplify.cpp b/lib/Transforms/Utils/LoopSimplify.cpp
index ae8ec426a7..bd825eb864 100644
--- a/lib/Transforms/Utils/LoopSimplify.cpp
+++ b/lib/Transforms/Utils/LoopSimplify.cpp
@@ -86,7 +86,7 @@ static void placeSplitBlockCarefully(BasicBlock *NewBB,
// Figure out *which* outside block to put this after. Prefer an outside
// block that neighbors a BB actually in the loop.
- BasicBlock *FoundBB = 0;
+ BasicBlock *FoundBB = nullptr;
for (unsigned i = 0, e = SplitPreds.size(); i != e; ++i) {
Function::iterator BBI = SplitPreds[i];
if (++BBI != NewBB->getParent()->end() &&
@@ -120,7 +120,7 @@ BasicBlock *llvm::InsertPreheaderForLoop(Loop *L, Pass *PP) {
// If the loop is branched to from an indirect branch, we won't
// be able to fully transform the loop, because it prohibits
// edge splitting.
- if (isa<IndirectBrInst>(P->getTerminator())) return 0;
+ if (isa<IndirectBrInst>(P->getTerminator())) return nullptr;
// Keep track of it.
OutsideBlocks.push_back(P);
@@ -161,14 +161,14 @@ static BasicBlock *rewriteLoopExitBlock(Loop *L, BasicBlock *Exit, Pass *PP) {
BasicBlock *P = *I;
if (L->contains(P)) {
// Don't do this if the loop is exited via an indirect branch.
- if (isa<IndirectBrInst>(P->getTerminator())) return 0;
+ if (isa<IndirectBrInst>(P->getTerminator())) return nullptr;
LoopBlocks.push_back(P);
}
}
assert(!LoopBlocks.empty() && "No edges coming in from outside the loop?");
- BasicBlock *NewExitBB = 0;
+ BasicBlock *NewExitBB = nullptr;
if (Exit->isLandingPad()) {
SmallVector<BasicBlock*, 2> NewBBs;
@@ -212,7 +212,7 @@ static PHINode *findPHIToPartitionLoops(Loop *L, AliasAnalysis *AA,
for (BasicBlock::iterator I = L->getHeader()->begin(); isa<PHINode>(I); ) {
PHINode *PN = cast<PHINode>(I);
++I;
- if (Value *V = SimplifyInstruction(PN, 0, 0, DT)) {
+ if (Value *V = SimplifyInstruction(PN, nullptr, nullptr, DT)) {
// This is a degenerate PHI already, don't modify it!
PN->replaceAllUsesWith(V);
if (AA) AA->deleteValue(PN);
@@ -227,7 +227,7 @@ static PHINode *findPHIToPartitionLoops(Loop *L, AliasAnalysis *AA,
// We found something tasty to remove.
return PN;
}
- return 0;
+ return nullptr;
}
/// \brief If this loop has multiple backedges, try to pull one of them out into
@@ -254,14 +254,14 @@ static Loop *separateNestedLoop(Loop *L, BasicBlock *Preheader,
LoopInfo *LI, ScalarEvolution *SE, Pass *PP) {
// Don't try to separate loops without a preheader.
if (!Preheader)
- return 0;
+ return nullptr;
// The header is not a landing pad; preheader insertion should ensure this.
assert(!L->getHeader()->isLandingPad() &&
"Can't insert backedge to landing pad");
PHINode *PN = findPHIToPartitionLoops(L, AA, DT);
- if (PN == 0) return 0; // No known way to partition.
+ if (!PN) return nullptr; // No known way to partition.
// Pull out all predecessors that have varying values in the loop. This
// handles the case when a PHI node has multiple instances of itself as
@@ -272,7 +272,7 @@ static Loop *separateNestedLoop(Loop *L, BasicBlock *Preheader,
!L->contains(PN->getIncomingBlock(i))) {
// We can't split indirectbr edges.
if (isa<IndirectBrInst>(PN->getIncomingBlock(i)->getTerminator()))
- return 0;
+ return nullptr;
OuterLoopPreds.push_back(PN->getIncomingBlock(i));
}
}
@@ -363,7 +363,7 @@ static BasicBlock *insertUniqueBackedgeBlock(Loop *L, BasicBlock *Preheader,
// Unique backedge insertion currently depends on having a preheader.
if (!Preheader)
- return 0;
+ return nullptr;
// The header is not a landing pad; preheader insertion should ensure this.
assert(!Header->isLandingPad() && "Can't insert backedge to landing pad");
@@ -375,7 +375,7 @@ static BasicBlock *insertUniqueBackedgeBlock(Loop *L, BasicBlock *Preheader,
// Indirectbr edges cannot be split, so we must fail if we find one.
if (isa<IndirectBrInst>(P->getTerminator()))
- return 0;
+ return nullptr;
if (P != Preheader) BackedgeBlocks.push_back(P);
}
@@ -404,7 +404,7 @@ static BasicBlock *insertUniqueBackedgeBlock(Loop *L, BasicBlock *Preheader,
// preheader over to the new PHI node.
unsigned PreheaderIdx = ~0U;
bool HasUniqueIncomingValue = true;
- Value *UniqueValue = 0;
+ Value *UniqueValue = nullptr;
for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
BasicBlock *IBB = PN->getIncomingBlock(i);
Value *IV = PN->getIncomingValue(i);
@@ -413,7 +413,7 @@ static BasicBlock *insertUniqueBackedgeBlock(Loop *L, BasicBlock *Preheader,
} else {
NewPN->addIncoming(IV, IBB);
if (HasUniqueIncomingValue) {
- if (UniqueValue == 0)
+ if (!UniqueValue)
UniqueValue = IV;
else if (UniqueValue != IV)
HasUniqueIncomingValue = false;
@@ -610,7 +610,7 @@ ReprocessLoop:
PHINode *PN;
for (BasicBlock::iterator I = L->getHeader()->begin();
(PN = dyn_cast<PHINode>(I++)); )
- if (Value *V = SimplifyInstruction(PN, 0, 0, DT)) {
+ if (Value *V = SimplifyInstruction(PN, nullptr, nullptr, DT)) {
if (AA) AA->deleteValue(PN);
if (SE) SE->forgetValue(PN);
PN->replaceAllUsesWith(V);
@@ -654,7 +654,8 @@ ReprocessLoop:
if (Inst == CI)
continue;
if (!L->makeLoopInvariant(Inst, AnyInvariant,
- Preheader ? Preheader->getTerminator() : 0)) {
+ Preheader ? Preheader->getTerminator()
+ : nullptr)) {
AllInvariant = false;
break;
}
diff --git a/lib/Transforms/Utils/LoopUnroll.cpp b/lib/Transforms/Utils/LoopUnroll.cpp
index 543f774c64..668324881e 100644
--- a/lib/Transforms/Utils/LoopUnroll.cpp
+++ b/lib/Transforms/Utils/LoopUnroll.cpp
@@ -69,10 +69,10 @@ static BasicBlock *FoldBlockIntoPredecessor(BasicBlock *BB, LoopInfo* LI,
// pred, and if there is only one distinct successor of the predecessor, and
// if there are no PHI nodes.
BasicBlock *OnlyPred = BB->getSinglePredecessor();
- if (!OnlyPred) return 0;
+ if (!OnlyPred) return nullptr;
if (OnlyPred->getTerminator()->getNumSuccessors() != 1)
- return 0;
+ return nullptr;
DEBUG(dbgs() << "Merging: " << *BB << "into: " << *OnlyPred);
@@ -412,7 +412,7 @@ bool llvm::UnrollLoop(Loop *L, unsigned Count, unsigned TripCount,
}
}
- DominatorTree *DT = 0;
+ DominatorTree *DT = nullptr;
if (PP) {
// FIXME: Reconstruct dom info, because it is not preserved properly.
// Incrementally updating domtree after loop unrolling would be easy.
@@ -459,7 +459,7 @@ bool llvm::UnrollLoop(Loop *L, unsigned Count, unsigned TripCount,
Loop *OuterL = L->getParentLoop();
// Remove the loop from the LoopPassManager if it's completely removed.
- if (CompletelyUnroll && LPM != NULL)
+ if (CompletelyUnroll && LPM != nullptr)
LPM->deleteLoopFromQueue(L);
// If we have a pass and a DominatorTree we should re-simplify impacted loops
@@ -471,7 +471,7 @@ bool llvm::UnrollLoop(Loop *L, unsigned Count, unsigned TripCount,
OuterL = L;
if (OuterL) {
ScalarEvolution *SE = PP->getAnalysisIfAvailable<ScalarEvolution>();
- simplifyLoop(OuterL, DT, LI, PP, /*AliasAnalysis*/ 0, SE);
+ simplifyLoop(OuterL, DT, LI, PP, /*AliasAnalysis*/ nullptr, SE);
formLCSSARecursively(*OuterL, *DT, SE);
}
}
diff --git a/lib/Transforms/Utils/LoopUnrollRuntime.cpp b/lib/Transforms/Utils/LoopUnrollRuntime.cpp
index 67f6eb5086..d1ea2d6be6 100644
--- a/lib/Transforms/Utils/LoopUnrollRuntime.cpp
+++ b/lib/Transforms/Utils/LoopUnrollRuntime.cpp
@@ -233,7 +233,7 @@ bool llvm::UnrollRuntimeLoopProlog(Loop *L, unsigned Count, LoopInfo *LI,
// Make sure the loop is in canonical form, and there is a single
// exit block only.
- if (!L->isLoopSimplifyForm() || L->getUniqueExitBlock() == 0)
+ if (!L->isLoopSimplifyForm() || !L->getUniqueExitBlock())
return false;
// Use Scalar Evolution to compute the trip count. This allows more
@@ -241,7 +241,7 @@ bool llvm::UnrollRuntimeLoopProlog(Loop *L, unsigned Count, LoopInfo *LI,
if (!LPM)
return false;
ScalarEvolution *SE = LPM->getAnalysisIfAvailable<ScalarEvolution>();
- if (SE == 0)
+ if (!SE)
return false;
// Only unroll loops with a computable trip count and the trip count needs
@@ -302,7 +302,7 @@ bool llvm::UnrollRuntimeLoopProlog(Loop *L, unsigned Count, LoopInfo *LI,
ValueToValueMapTy LVMap;
Function *F = Header->getParent();
// These variables are used to update the CFG links in each iteration
- BasicBlock *CompareBB = 0;
+ BasicBlock *CompareBB = nullptr;
BasicBlock *LastLoopBB = PH;
// Get an ordered list of blocks in the loop to help with the ordering of the
// cloned blocks in the prolog code
diff --git a/lib/Transforms/Utils/LowerSwitch.cpp b/lib/Transforms/Utils/LowerSwitch.cpp
index 6d5cd78095..9ef694c4ce 100644
--- a/lib/Transforms/Utils/LowerSwitch.cpp
+++ b/lib/Transforms/Utils/LowerSwitch.cpp
@@ -53,7 +53,8 @@ namespace {
Constant* High;
BasicBlock* BB;
- CaseRange(Constant *low = 0, Constant *high = 0, BasicBlock *bb = 0) :
+ CaseRange(Constant *low = nullptr, Constant *high = nullptr,
+ BasicBlock *bb = nullptr) :
Low(low), High(high), BB(bb) { }
};
@@ -184,7 +185,7 @@ BasicBlock* LowerSwitch::newLeafBlock(CaseRange& Leaf, Value* Val,
F->getBasicBlockList().insert(++FI, NewLeaf);
// Emit comparison
- ICmpInst* Comp = NULL;
+ ICmpInst* Comp = nullptr;
if (Leaf.Low == Leaf.High) {
// Make the seteq instruction...
Comp = new ICmpInst(*NewLeaf, ICmpInst::ICMP_EQ, Val,
diff --git a/lib/Transforms/Utils/PromoteMemoryToRegister.cpp b/lib/Transforms/Utils/PromoteMemoryToRegister.cpp
index 13d75a8a02..06d73feb1c 100644
--- a/lib/Transforms/Utils/PromoteMemoryToRegister.cpp
+++ b/lib/Transforms/Utils/PromoteMemoryToRegister.cpp
@@ -116,11 +116,11 @@ struct AllocaInfo {
void clear() {
DefiningBlocks.clear();
UsingBlocks.clear();
- OnlyStore = 0;
- OnlyBlock = 0;
+ OnlyStore = nullptr;
+ OnlyBlock = nullptr;
OnlyUsedInOneBlock = true;
- AllocaPointerVal = 0;
- DbgDeclare = 0;
+ AllocaPointerVal = nullptr;
+ DbgDeclare = nullptr;
}
/// Scan the uses of the specified alloca, filling in the AllocaInfo used
@@ -148,7 +148,7 @@ struct AllocaInfo {
}
if (OnlyUsedInOneBlock) {
- if (OnlyBlock == 0)
+ if (!OnlyBlock)
OnlyBlock = User->getParent();
else if (OnlyBlock != User->getParent())
OnlyUsedInOneBlock = false;
@@ -164,7 +164,7 @@ class RenamePassData {
public:
typedef std::vector<Value *> ValVector;
- RenamePassData() : BB(NULL), Pred(NULL), Values() {}
+ RenamePassData() : BB(nullptr), Pred(nullptr), Values() {}
RenamePassData(BasicBlock *B, BasicBlock *P, const ValVector &V)
: BB(B), Pred(P), Values(V) {}
BasicBlock *BB;
@@ -473,7 +473,8 @@ static void promoteSingleBlockAlloca(AllocaInst *AI, const AllocaInfo &Info,
// Find the nearest store that has a lower index than this load.
StoresByIndexTy::iterator I =
std::lower_bound(StoresByIndex.begin(), StoresByIndex.end(),
- std::make_pair(LoadIdx, static_cast<StoreInst *>(0)),
+ std::make_pair(LoadIdx,
+ static_cast<StoreInst *>(nullptr)),
less_first());
if (I == StoresByIndex.begin())
@@ -634,7 +635,7 @@ void PromoteMem2Reg::run() {
// and inserting the phi nodes we marked as necessary
//
std::vector<RenamePassData> RenamePassWorkList;
- RenamePassWorkList.push_back(RenamePassData(F.begin(), 0, Values));
+ RenamePassWorkList.push_back(RenamePassData(F.begin(), nullptr, Values));
do {
RenamePassData RPD;
RPD.swap(RenamePassWorkList.back());
@@ -684,7 +685,7 @@ void PromoteMem2Reg::run() {
PHINode *PN = I->second;
// If this PHI node merges one value and/or undefs, get the value.
- if (Value *V = SimplifyInstruction(PN, 0, 0, &DT)) {
+ if (Value *V = SimplifyInstruction(PN, nullptr, nullptr, &DT)) {
if (AST && PN->getType()->isPointerTy())
AST->deleteValue(PN);
PN->replaceAllUsesWith(V);
@@ -992,7 +993,7 @@ NextIteration:
// Get the next phi node.
++PNI;
APN = dyn_cast<PHINode>(PNI);
- if (APN == 0)
+ if (!APN)
break;
// Verify that it is missing entries. If not, it is not being inserted
diff --git a/lib/Transforms/Utils/SSAUpdater.cpp b/lib/Transforms/Utils/SSAUpdater.cpp
index 1bf9dea1f6..44dc7b7ec8 100644
--- a/lib/Transforms/Utils/SSAUpdater.cpp
+++ b/lib/Transforms/Utils/SSAUpdater.cpp
@@ -35,14 +35,14 @@ static AvailableValsTy &getAvailableVals(void *AV) {
}
SSAUpdater::SSAUpdater(SmallVectorImpl<PHINode*> *NewPHI)
- : AV(0), ProtoType(0), ProtoName(), InsertedPHIs(NewPHI) {}
+ : AV(nullptr), ProtoType(nullptr), ProtoName(), InsertedPHIs(NewPHI) {}
SSAUpdater::~SSAUpdater() {
delete static_cast<AvailableValsTy*>(AV);
}
void SSAUpdater::Initialize(Type *Ty, StringRef Name) {
- if (AV == 0)
+ if (!AV)
AV = new AvailableValsTy();
else
getAvailableVals(AV).clear();
@@ -91,7 +91,7 @@ Value *SSAUpdater::GetValueInMiddleOfBlock(BasicBlock *BB) {
// Otherwise, we have the hard case. Get the live-in values for each
// predecessor.
SmallVector<std::pair<BasicBlock*, Value*>, 8> PredValues;
- Value *SingularValue = 0;
+ Value *SingularValue = nullptr;
// We can get our predecessor info by walking the pred_iterator list, but it
// is relatively slow. If we already have PHI nodes in this block, walk one
@@ -106,7 +106,7 @@ Value *SSAUpdater::GetValueInMiddleOfBlock(BasicBlock *BB) {
if (i == 0)
SingularValue = PredVal;
else if (PredVal != SingularValue)
- SingularValue = 0;
+ SingularValue = nullptr;
}
} else {
bool isFirstPred = true;
@@ -120,7 +120,7 @@ Value *SSAUpdater::GetValueInMiddleOfBlock(BasicBlock *BB) {
SingularValue = PredVal;
isFirstPred = false;
} else if (PredVal != SingularValue)
- SingularValue = 0;
+ SingularValue = nullptr;
}
}
@@ -129,7 +129,7 @@ Value *SSAUpdater::GetValueInMiddleOfBlock(BasicBlock *BB) {
return UndefValue::get(ProtoType);
// Otherwise, if all the merged values are the same, just use it.
- if (SingularValue != 0)
+ if (SingularValue)
return SingularValue;
// Otherwise, we do need a PHI: check to see if we already have one available
@@ -292,7 +292,7 @@ public:
PHINode *PHI = ValueIsPHI(Val, Updater);
if (PHI && PHI->getNumIncomingValues() == 0)
return PHI;
- return 0;
+ return nullptr;
}
/// GetPHIValue - For the specified PHI instruction, return the value
@@ -402,7 +402,7 @@ run(const SmallVectorImpl<Instruction*> &Insts) const {
// the order of these instructions in the block. If the first use in the
// block is a load, then it uses the live in value. The last store defines
// the live out value. We handle this by doing a linear scan of the block.
- Value *StoredValue = 0;
+ Value *StoredValue = nullptr;
for (BasicBlock::iterator II = BB->begin(), E = BB->end(); II != E; ++II) {
if (LoadInst *L = dyn_cast<LoadInst>(II)) {
// If this is a load from an unrelated pointer, ignore it.
diff --git a/lib/Transforms/Utils/SimplifyCFG.cpp b/lib/Transforms/Utils/SimplifyCFG.cpp
index 89eef6de38..cb747f6f51 100644
--- a/lib/Transforms/Utils/SimplifyCFG.cpp
+++ b/lib/Transforms/Utils/SimplifyCFG.cpp
@@ -273,12 +273,12 @@ static bool DominatesMergePoint(Value *V, BasicBlock *BB,
// branch to BB, then it must be in the 'conditional' part of the "if
// statement". If not, it definitely dominates the region.
BranchInst *BI = dyn_cast<BranchInst>(PBB->getTerminator());
- if (BI == 0 || BI->isConditional() || BI->getSuccessor(0) != BB)
+ if (!BI || BI->isConditional() || BI->getSuccessor(0) != BB)
return true;
// If we aren't allowing aggressive promotion anymore, then don't consider
// instructions in the 'if region'.
- if (AggressiveInsts == 0) return false;
+ if (!AggressiveInsts) return false;
// If we have seen this instruction before, don't count it again.
if (AggressiveInsts->count(I)) return true;
@@ -333,7 +333,7 @@ static ConstantInt *GetConstantInt(Value *V, const DataLayout *DL) {
return cast<ConstantInt>
(ConstantExpr::getIntegerCast(CI, PtrTy, /*isSigned=*/false));
}
- return 0;
+ return nullptr;
}
/// GatherConstantCompares - Given a potentially 'or'd or 'and'd together
@@ -344,7 +344,7 @@ static Value *
GatherConstantCompares(Value *V, std::vector<ConstantInt*> &Vals, Value *&Extra,
const DataLayout *DL, bool isEQ, unsigned &UsedICmps) {
Instruction *I = dyn_cast<Instruction>(V);
- if (I == 0) return 0;
+ if (!I) return nullptr;
// If this is an icmp against a constant, handle this as one of the cases.
if (ICmpInst *ICI = dyn_cast<ICmpInst>(I)) {
@@ -391,19 +391,19 @@ GatherConstantCompares(Value *V, std::vector<ConstantInt*> &Vals, Value *&Extra,
// If there are a ton of values, we don't want to make a ginormous switch.
if (Span.getSetSize().ugt(8) || Span.isEmptySet())
- return 0;
+ return nullptr;
for (APInt Tmp = Span.getLower(); Tmp != Span.getUpper(); ++Tmp)
Vals.push_back(ConstantInt::get(V->getContext(), Tmp));
UsedICmps++;
return hasAdd ? RHSVal : I->getOperand(0);
}
- return 0;
+ return nullptr;
}
// Otherwise, we can only handle an | or &, depending on isEQ.
if (I->getOpcode() != (isEQ ? Instruction::Or : Instruction::And))
- return 0;
+ return nullptr;
unsigned NumValsBeforeLHS = Vals.size();
unsigned UsedICmpsBeforeLHS = UsedICmps;
@@ -421,19 +421,19 @@ GatherConstantCompares(Value *V, std::vector<ConstantInt*> &Vals, Value *&Extra,
// The RHS of the or/and can't be folded in and we haven't used "Extra" yet,
// set it and return success.
- if (Extra == 0 || Extra == I->getOperand(1)) {
+ if (Extra == nullptr || Extra == I->getOperand(1)) {
Extra = I->getOperand(1);
return LHS;
}
Vals.resize(NumValsBeforeLHS);
UsedICmps = UsedICmpsBeforeLHS;
- return 0;
+ return nullptr;
}
// If the LHS can't be folded in, but Extra is available and RHS can, try to
// use LHS as Extra.
- if (Extra == 0 || Extra == I->getOperand(0)) {
+ if (Extra == nullptr || Extra == I->getOperand(0)) {
Value *OldExtra = Extra;
Extra = I->getOperand(0);
if (Value *RHS = GatherConstantCompares(I->getOperand(1), Vals, Extra, DL,
@@ -443,11 +443,11 @@ GatherConstantCompares(Value *V, std::vector<ConstantInt*> &Vals, Value *&Extra,
Extra = OldExtra;
}
- return 0;
+ return nullptr;
}
static void EraseTerminatorInstAndDCECond(TerminatorInst *TI) {
- Instruction *Cond = 0;
+ Instruction *Cond = nullptr;
if (SwitchInst *SI = dyn_cast<SwitchInst>(TI)) {
Cond = dyn_cast<Instruction>(SI->getCondition());
} else if (BranchInst *BI = dyn_cast<BranchInst>(TI)) {
@@ -464,7 +464,7 @@ static void EraseTerminatorInstAndDCECond(TerminatorInst *TI) {
/// isValueEqualityComparison - Return true if the specified terminator checks
/// to see if a value is equal to constant integer value.
Value *SimplifyCFGOpt::isValueEqualityComparison(TerminatorInst *TI) {
- Value *CV = 0;
+ Value *CV = nullptr;
if (SwitchInst *SI = dyn_cast<SwitchInst>(TI)) {
// Do not permit merging of large switch instructions into their
// predecessors unless there is only one predecessor.
@@ -654,11 +654,11 @@ SimplifyEqualityComparisonWithOnlyPredecessor(TerminatorInst *TI,
// Otherwise, TI's block must correspond to some matched value. Find out
// which value (or set of values) this is.
- ConstantInt *TIV = 0;
+ ConstantInt *TIV = nullptr;
BasicBlock *TIBB = TI->getParent();
for (unsigned i = 0, e = PredCases.size(); i != e; ++i)
if (PredCases[i].Dest == TIBB) {
- if (TIV != 0)
+ if (TIV)
return false; // Cannot handle multiple values coming to this block.
TIV = PredCases[i].Value;
}
@@ -666,7 +666,7 @@ SimplifyEqualityComparisonWithOnlyPredecessor(TerminatorInst *TI,
// Okay, we found the one constant that our value can be if we get into TI's
// BB. Find out which successor will unconditionally be branched to.
- BasicBlock *TheRealDest = 0;
+ BasicBlock *TheRealDest = nullptr;
for (unsigned i = 0, e = ThisCases.size(); i != e; ++i)
if (ThisCases[i].Value == TIV) {
TheRealDest = ThisCases[i].Dest;
@@ -674,7 +674,7 @@ SimplifyEqualityComparisonWithOnlyPredecessor(TerminatorInst *TI,
}
// If not handled by any explicit cases, it is handled by the default case.
- if (TheRealDest == 0) TheRealDest = ThisDef;
+ if (!TheRealDest) TheRealDest = ThisDef;
// Remove PHI node entries for dead edges.
BasicBlock *CheckEdge = TheRealDest;
@@ -682,7 +682,7 @@ SimplifyEqualityComparisonWithOnlyPredecessor(TerminatorInst *TI,
if (*SI != CheckEdge)
(*SI)->removePredecessor(TIBB);
else
- CheckEdge = 0;
+ CheckEdge = nullptr;
// Insert the new branch.
Instruction *NI = Builder.CreateBr(TheRealDest);
@@ -951,10 +951,10 @@ bool SimplifyCFGOpt::FoldValueComparisonIntoPredecessors(TerminatorInst *TI,
// Okay, last check. If BB is still a successor of PSI, then we must
// have an infinite loop case. If so, add an infinitely looping block
// to handle the case to preserve the behavior of the code.
- BasicBlock *InfLoopBlock = 0;
+ BasicBlock *InfLoopBlock = nullptr;
for (unsigned i = 0, e = NewSI->getNumSuccessors(); i != e; ++i)
if (NewSI->getSuccessor(i) == BB) {
- if (InfLoopBlock == 0) {
+ if (!InfLoopBlock) {
// Insert it at the end of the function, because it's either code,
// or it won't matter if it's hot. :)
InfLoopBlock = BasicBlock::Create(BB->getContext(),
@@ -1100,7 +1100,7 @@ HoistTerminator:
// These values do not agree. Insert a select instruction before NT
// that determines the right value.
SelectInst *&SI = InsertedSelects[std::make_pair(BB1V, BB2V)];
- if (SI == 0)
+ if (!SI)
SI = cast<SelectInst>
(Builder.CreateSelect(BI->getCondition(), BB1V, BB2V,
BB1V->getName()+"."+BB2V->getName()));
@@ -1145,7 +1145,7 @@ static bool SinkThenElseCodeToEnd(BranchInst *BI1) {
// Gather the PHI nodes in BBEnd.
std::map<Value*, std::pair<Value*, PHINode*> > MapValueFromBB1ToBB2;
- Instruction *FirstNonPhiInBBEnd = 0;
+ Instruction *FirstNonPhiInBBEnd = nullptr;
for (BasicBlock::iterator I = BBEnd->begin(), E = BBEnd->end();
I != E; ++I) {
if (PHINode *PN = dyn_cast<PHINode>(I)) {
@@ -1223,7 +1223,7 @@ static bool SinkThenElseCodeToEnd(BranchInst *BI1) {
// The operands should be either the same or they need to be generated
// with a PHI node after sinking. We only handle the case where there is
// a single pair of different operands.
- Value *DifferentOp1 = 0, *DifferentOp2 = 0;
+ Value *DifferentOp1 = nullptr, *DifferentOp2 = nullptr;
unsigned Op1Idx = 0;
for (unsigned I = 0, E = I1->getNumOperands(); I != E; ++I) {
if (I1->getOperand(I) == I2->getOperand(I))
@@ -1319,11 +1319,11 @@ static Value *isSafeToSpeculateStore(Instruction *I, BasicBlock *BrBB,
BasicBlock *StoreBB, BasicBlock *EndBB) {
StoreInst *StoreToHoist = dyn_cast<StoreInst>(I);
if (!StoreToHoist)
- return 0;
+ return nullptr;
// Volatile or atomic.
if (!StoreToHoist->isSimple())
- return 0;
+ return nullptr;
Value *StorePtr = StoreToHoist->getPointerOperand();
@@ -1335,7 +1335,7 @@ static Value *isSafeToSpeculateStore(Instruction *I, BasicBlock *BrBB,
// Could be calling an instruction that effects memory like free().
if (CurI->mayHaveSideEffects() && !isa<StoreInst>(CurI))
- return 0;
+ return nullptr;
StoreInst *SI = dyn_cast<StoreInst>(CurI);
// Found the previous store make sure it stores to the same location.
@@ -1343,10 +1343,10 @@ static Value *isSafeToSpeculateStore(Instruction *I, BasicBlock *BrBB,
// Found the previous store, return its value operand.
return SI->getValueOperand();
else if (SI)
- return 0; // Unknown store.
+ return nullptr; // Unknown store.
}
- return 0;
+ return nullptr;
}
/// \brief Speculate a conditional basic block flattening the CFG.
@@ -1412,8 +1412,8 @@ static bool SpeculativelyExecuteBB(BranchInst *BI, BasicBlock *ThenBB) {
SmallDenseMap<Instruction *, unsigned, 4> SinkCandidateUseCounts;
unsigned SpeculationCost = 0;
- Value *SpeculatedStoreValue = 0;
- StoreInst *SpeculatedStore = 0;
+ Value *SpeculatedStoreValue = nullptr;
+ StoreInst *SpeculatedStore = nullptr;
for (BasicBlock::iterator BBI = ThenBB->begin(),
BBE = std::prev(ThenBB->end());
BBI != BBE; ++BBI) {
@@ -1621,7 +1621,7 @@ static bool FoldCondBranchOnPHI(BranchInst *BI, const DataLayout *DL) {
// constants.
for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
ConstantInt *CB = dyn_cast<ConstantInt>(PN->getIncomingValue(i));
- if (CB == 0 || !CB->getType()->isIntegerTy(1)) continue;
+ if (!CB || !CB->getType()->isIntegerTy(1)) continue;
// Okay, we now know that all edges from PredBB should be revectored to
// branch to RealDest.
@@ -1746,7 +1746,7 @@ static bool FoldTwoEntryPHINode(PHINode *PN, const DataLayout *DL) {
// If we folded the first phi, PN dangles at this point. Refresh it. If
// we ran out of PHIs then we simplified them all.
PN = dyn_cast<PHINode>(BB->begin());
- if (PN == 0) return true;
+ if (!PN) return true;
// Don't fold i1 branches on PHIs which contain binary operators. These can
// often be turned into switches and other things.
@@ -1760,11 +1760,11 @@ static bool FoldTwoEntryPHINode(PHINode *PN, const DataLayout *DL) {
// instructions in the predecessor blocks can be promoted as well. If
// not, we won't be able to get rid of the control flow, so it's not
// worth promoting to select instructions.
- BasicBlock *DomBlock = 0;
+ BasicBlock *DomBlock = nullptr;
BasicBlock *IfBlock1 = PN->getIncomingBlock(0);
BasicBlock *IfBlock2 = PN->getIncomingBlock(1);
if (cast<BranchInst>(IfBlock1->getTerminator())->isConditional()) {
- IfBlock1 = 0;
+ IfBlock1 = nullptr;
} else {
DomBlock = *pred_begin(IfBlock1);
for (BasicBlock::iterator I = IfBlock1->begin();!isa<TerminatorInst>(I);++I)
@@ -1777,7 +1777,7 @@ static bool FoldTwoEntryPHINode(PHINode *PN, const DataLayout *DL) {
}
if (cast<BranchInst>(IfBlock2->getTerminator())->isConditional()) {
- IfBlock2 = 0;
+ IfBlock2 = nullptr;
} else {
DomBlock = *pred_begin(IfBlock2);
for (BasicBlock::iterator I = IfBlock2->begin();!isa<TerminatorInst>(I);++I)
@@ -1960,7 +1960,7 @@ static bool checkCSEInPredecessor(Instruction *Inst, BasicBlock *PB) {
bool llvm::FoldBranchToCommonDest(BranchInst *BI) {
BasicBlock *BB = BI->getParent();
- Instruction *Cond = 0;
+ Instruction *Cond = nullptr;
if (BI->isConditional())
Cond = dyn_cast<Instruction>(BI->getCondition());
else {
@@ -1986,12 +1986,12 @@ bool llvm::FoldBranchToCommonDest(BranchInst *BI) {
}
}
- if (Cond == 0)
+ if (!Cond)
return false;
}
- if (Cond == 0 || (!isa<CmpInst>(Cond) && !isa<BinaryOperator>(Cond)) ||
- Cond->getParent() != BB || !Cond->hasOneUse())
+ if (!Cond || (!isa<CmpInst>(Cond) && !isa<BinaryOperator>(Cond)) ||
+ Cond->getParent() != BB || !Cond->hasOneUse())
return false;
// Only allow this if the condition is a simple instruction that can be
@@ -2006,7 +2006,7 @@ bool llvm::FoldBranchToCommonDest(BranchInst *BI) {
// that feeds the branch. We later ensure that any values that _it_ uses
// were also live in the predecessor, so that we don't unnecessarily create
// register pressure or inhibit out-of-order execution.
- Instruction *BonusInst = 0;
+ Instruction *BonusInst = nullptr;
if (&*FrontIt != Cond &&
FrontIt->hasOneUse() && FrontIt->user_back() == Cond &&
isSafeToSpeculativelyExecute(FrontIt)) {
@@ -2041,7 +2041,7 @@ bool llvm::FoldBranchToCommonDest(BranchInst *BI) {
// Finally, don't infinitely unroll conditional loops.
BasicBlock *TrueDest = BI->getSuccessor(0);
- BasicBlock *FalseDest = (BI->isConditional()) ? BI->getSuccessor(1) : 0;
+ BasicBlock *FalseDest = (BI->isConditional()) ? BI->getSuccessor(1) : nullptr;
if (TrueDest == BB || FalseDest == BB)
return false;
@@ -2053,7 +2053,7 @@ bool llvm::FoldBranchToCommonDest(BranchInst *BI) {
// the common successor, verify that the same value flows in from both
// blocks.
SmallVector<PHINode*, 4> PHIs;
- if (PBI == 0 || PBI->isUnconditional() ||
+ if (!PBI || PBI->isUnconditional() ||
(BI->isConditional() &&
!SafeToMergeTerminators(BI, PBI)) ||
(!BI->isConditional() &&
@@ -2143,7 +2143,7 @@ bool llvm::FoldBranchToCommonDest(BranchInst *BI) {
}
// If we have a bonus inst, clone it into the predecessor block.
- Instruction *NewBonus = 0;
+ Instruction *NewBonus = nullptr;
if (BonusInst) {
NewBonus = BonusInst->clone();
@@ -2219,14 +2219,14 @@ bool llvm::FoldBranchToCommonDest(BranchInst *BI) {
MDBuilder(BI->getContext()).
createBranchWeights(MDWeights));
} else
- PBI->setMetadata(LLVMContext::MD_prof, NULL);
+ PBI->setMetadata(LLVMContext::MD_prof, nullptr);
} else {
// Update PHI nodes in the common successors.
for (unsigned i = 0, e = PHIs.size(); i != e; ++i) {
ConstantInt *PBI_C = cast<ConstantInt>(
PHIs[i]->getIncomingValueForBlock(PBI->getParent()));
assert(PBI_C->getType()->isIntegerTy(1));
- Instruction *MergedCond = 0;
+ Instruction *MergedCond = nullptr;
if (PBI->getSuccessor(0) == TrueDest) {
// Create (PBI_Cond and PBI_C) or (!PBI_Cond and BI_Value)
// PBI_C is true: PBI_Cond or (!PBI_Cond and BI_Value)
@@ -2499,16 +2499,16 @@ static bool SimplifyTerminatorOnSelect(TerminatorInst *OldTerm, Value *Cond,
// If TrueBB and FalseBB are equal, only try to preserve one copy of that
// successor.
BasicBlock *KeepEdge1 = TrueBB;
- BasicBlock *KeepEdge2 = TrueBB != FalseBB ? FalseBB : 0;
+ BasicBlock *KeepEdge2 = TrueBB != FalseBB ? FalseBB : nullptr;
// Then remove the rest.
for (unsigned I = 0, E = OldTerm->getNumSuccessors(); I != E; ++I) {
BasicBlock *Succ = OldTerm->getSuccessor(I);
// Make sure only to keep exactly one copy of each edge.
if (Succ == KeepEdge1)
- KeepEdge1 = 0;
+ KeepEdge1 = nullptr;
else if (Succ == KeepEdge2)
- KeepEdge2 = 0;
+ KeepEdge2 = nullptr;
else
Succ->removePredecessor(OldTerm->getParent());
}
@@ -2517,7 +2517,7 @@ static bool SimplifyTerminatorOnSelect(TerminatorInst *OldTerm, Value *Cond,
Builder.SetCurrentDebugLocation(OldTerm->getDebugLoc());
// Insert an appropriate new terminator.
- if ((KeepEdge1 == 0) && (KeepEdge2 == 0)) {
+ if (!KeepEdge1 && !KeepEdge2) {
if (TrueBB == FalseBB)
// We were only looking for one successor, and it was present.
// Create an unconditional branch to it.
@@ -2539,7 +2539,7 @@ static bool SimplifyTerminatorOnSelect(TerminatorInst *OldTerm, Value *Cond,
// One of the selected values was a successor, but the other wasn't.
// Insert an unconditional branch to the one that was found;
// the edge to the one that wasn't must be unreachable.
- if (KeepEdge1 == 0)
+ if (!KeepEdge1)
// Only TrueBB was found.
Builder.CreateBr(TrueBB);
else
@@ -2640,7 +2640,7 @@ static bool TryToSimplifyUncondBranchWithICmpInIt(
// 'V' and this block is the default case for the switch. In this case we can
// fold the compared value into the switch to simplify things.
BasicBlock *Pred = BB->getSinglePredecessor();
- if (Pred == 0 || !isa<SwitchInst>(Pred->getTerminator())) return false;
+ if (!Pred || !isa<SwitchInst>(Pred->getTerminator())) return false;
SwitchInst *SI = cast<SwitchInst>(Pred->getTerminator());
if (SI->getCondition() != V)
@@ -2682,7 +2682,7 @@ static bool TryToSimplifyUncondBranchWithICmpInIt(
// the block.
BasicBlock *SuccBlock = BB->getTerminator()->getSuccessor(0);
PHINode *PHIUse = dyn_cast<PHINode>(ICI->user_back());
- if (PHIUse == 0 || PHIUse != &SuccBlock->front() ||
+ if (PHIUse == nullptr || PHIUse != &SuccBlock->front() ||
isa<PHINode>(++BasicBlock::iterator(PHIUse)))
return false;
@@ -2734,16 +2734,16 @@ static bool TryToSimplifyUncondBranchWithICmpInIt(
static bool SimplifyBranchOnICmpChain(BranchInst *BI, const DataLayout *DL,
IRBuilder<> &Builder) {
Instruction *Cond = dyn_cast<Instruction>(BI->getCondition());
- if (Cond == 0) return false;
+ if (!Cond) return false;
// Change br (X == 0 | X == 1), T, F into a switch instruction.
// If this is a bunch of seteq's or'd together, or if it's a bunch of
// 'setne's and'ed together, collect them.
- Value *CompVal = 0;
+ Value *CompVal = nullptr;
std::vector<ConstantInt*> Values;
bool TrueWhenEqual = true;
- Value *ExtraCase = 0;
+ Value *ExtraCase = nullptr;
unsigned UsedICmps = 0;
if (Cond->getOpcode() == Instruction::Or) {
@@ -2756,7 +2756,7 @@ static bool SimplifyBranchOnICmpChain(BranchInst *BI, const DataLayout *DL,
}
// If we didn't have a multiply compared value, fail.
- if (CompVal == 0) return false;
+ if (!CompVal) return false;
// Avoid turning single icmps into a switch.
if (UsedICmps <= 1)
@@ -3051,7 +3051,7 @@ bool SimplifyCFGOpt::SimplifyUnreachable(UnreachableInst *UI) {
// Find the most popular block.
unsigned MaxPop = 0;
unsigned MaxIndex = 0;
- BasicBlock *MaxBlock = 0;
+ BasicBlock *MaxBlock = nullptr;
for (std::map<BasicBlock*, std::pair<unsigned, unsigned> >::iterator
I = Popularity.begin(), E = Popularity.end(); I != E; ++I) {
if (I->second.first > MaxPop ||
@@ -3242,13 +3242,13 @@ static PHINode *FindPHIForConditionForwarding(ConstantInt *CaseValue,
BasicBlock *BB,
int *PhiIndex) {
if (BB->getFirstNonPHIOrDbg() != BB->getTerminator())
- return NULL; // BB must be empty to be a candidate for simplification.
+ return nullptr; // BB must be empty to be a candidate for simplification.
if (!BB->getSinglePredecessor())
- return NULL; // BB must be dominated by the switch.
+ return nullptr; // BB must be dominated by the switch.
BranchInst *Branch = dyn_cast<BranchInst>(BB->getTerminator());
if (!Branch || !Branch->isUnconditional())
- return NULL; // Terminator must be unconditional branch.
+ return nullptr; // Terminator must be unconditional branch.
BasicBlock *Succ = Branch->getSuccessor(0);
@@ -3264,7 +3264,7 @@ static PHINode *FindPHIForConditionForwarding(ConstantInt *CaseValue,
return PHI;
}
- return NULL;
+ return nullptr;
}
/// ForwardSwitchConditionToPHI - Try to forward the condition of a switch
@@ -3337,12 +3337,12 @@ ConstantFold(Instruction *I,
if (SelectInst *Select = dyn_cast<SelectInst>(I)) {
Constant *A = LookupConstant(Select->getCondition(), ConstantPool);
if (!A)
- return 0;
+ return nullptr;
if (A->isAllOnesValue())
return LookupConstant(Select->getTrueValue(), ConstantPool);
if (A->isNullValue())
return LookupConstant(Select->getFalseValue(), ConstantPool);
- return 0;
+ return nullptr;
}
SmallVector<Constant *, 4> COps;
@@ -3350,7 +3350,7 @@ ConstantFold(Instruction *I,
if (Constant *A = LookupConstant(I->getOperand(N), ConstantPool))
COps.push_back(A);
else
- return 0;
+ return nullptr;
}
if (CmpInst *Cmp = dyn_cast<CmpInst>(I))
@@ -3493,7 +3493,8 @@ SwitchLookupTable::SwitchLookupTable(Module &M,
const SmallVectorImpl<std::pair<ConstantInt*, Constant*> >& Values,
Constant *DefaultValue,
const DataLayout *DL)
- : SingleValue(0), BitMap(0), BitMapElementTy(0), Array(0) {
+ : SingleValue(nullptr), BitMap(nullptr), BitMapElementTy(nullptr),
+ Array(nullptr) {
assert(Values.size() && "Can't build lookup table without values!");
assert(TableSize >= Values.size() && "Can't fit values in table!");
@@ -3514,7 +3515,7 @@ SwitchLookupTable::SwitchLookupTable(Module &M,
TableContents[Idx] = CaseRes;
if (CaseRes != SingleValue)
- SingleValue = 0;
+ SingleValue = nullptr;
}
// Fill in any holes in the table with the default result.
@@ -3527,7 +3528,7 @@ SwitchLookupTable::SwitchLookupTable(Module &M,
}
if (DefaultValue != SingleValue)
- SingleValue = 0;
+ SingleValue = nullptr;
}
// If each element in the table contains the same value, we only need to store
@@ -3697,7 +3698,7 @@ static bool SwitchToLookupTable(SwitchInst *SI,
ConstantInt *MinCaseVal = CI.getCaseValue();
ConstantInt *MaxCaseVal = CI.getCaseValue();
- BasicBlock *CommonDest = 0;
+ BasicBlock *CommonDest = nullptr;
typedef SmallVector<std::pair<ConstantInt*, Constant*>, 4> ResultListTy;
SmallDenseMap<PHINode*, ResultListTy> ResultLists;
SmallDenseMap<PHINode*, Constant*> DefaultResults;
@@ -3742,8 +3743,8 @@ static bool SwitchToLookupTable(SwitchInst *SI,
SmallVector<std::pair<PHINode*, Constant*>, 4> DefaultResultsList;
bool HasDefaultResults = false;
if (TableHasHoles) {
- HasDefaultResults = GetCaseResults(SI, 0, SI->getDefaultDest(), &CommonDest,
- DefaultResultsList, DL);
+ HasDefaultResults = GetCaseResults(SI, nullptr, SI->getDefaultDest(),
+ &CommonDest, DefaultResultsList, DL);
}
bool NeedMask = (TableHasHoles && !HasDefaultResults);
if (NeedMask) {
@@ -4039,8 +4040,8 @@ bool SimplifyCFGOpt::SimplifyCondBranch(BranchInst *BI, IRBuilder<> &Builder) {
// from BI. We know that the condbr dominates the two blocks, so see if
// there is any identical code in the "then" and "else" blocks. If so, we
// can hoist it up to the branching block.
- if (BI->getSuccessor(0)->getSinglePredecessor() != 0) {
- if (BI->getSuccessor(1)->getSinglePredecessor() != 0) {
+ if (BI->getSuccessor(0)->getSinglePredecessor()) {
+ if (BI->getSuccessor(1)->getSinglePredecessor()) {
if (HoistThenElseCodeToIf(BI))
return SimplifyCFG(BB, TTI, DL) | true;
} else {
@@ -4052,7 +4053,7 @@ bool SimplifyCFGOpt::SimplifyCondBranch(BranchInst *BI, IRBuilder<> &Builder) {
if (SpeculativelyExecuteBB(BI, BI->getSuccessor(0)))
return SimplifyCFG(BB, TTI, DL) | true;
}
- } else if (BI->getSuccessor(1)->getSinglePredecessor() != 0) {
+ } else if (BI->getSuccessor(1)->getSinglePredecessor()) {
// If Successor #0 has multiple preds, we may be able to conditionally
// execute Successor #1 if it branches to successor #0.
TerminatorInst *Succ1TI = BI->getSuccessor(1)->getTerminator();
diff --git a/lib/Transforms/Utils/SimplifyIndVar.cpp b/lib/Transforms/Utils/SimplifyIndVar.cpp
index f78433bb3d..b284e6f6c1 100644
--- a/lib/Transforms/Utils/SimplifyIndVar.cpp
+++ b/lib/Transforms/Utils/SimplifyIndVar.cpp
@@ -56,14 +56,14 @@ namespace {
public:
SimplifyIndvar(Loop *Loop, ScalarEvolution *SE, LPPassManager *LPM,
- SmallVectorImpl<WeakVH> &Dead, IVUsers *IVU = NULL) :
+ SmallVectorImpl<WeakVH> &Dead, IVUsers *IVU = nullptr) :
L(Loop),
LI(LPM->getAnalysisIfAvailable<LoopInfo>()),
SE(SE),
DeadInsts(Dead),
Changed(false) {
DataLayoutPass *DLP = LPM->getAnalysisIfAvailable<DataLayoutPass>();
- DL = DLP ? &DLP->getDataLayout() : 0;
+ DL = DLP ? &DLP->getDataLayout() : nullptr;
assert(LI && "IV simplification requires LoopInfo");
}
@@ -72,7 +72,7 @@ namespace {
/// Iteratively perform simplification on a worklist of users of the
/// specified induction variable. This is the top-level driver that applies
/// all simplicitions to users of an IV.
- void simplifyUsers(PHINode *CurrIV, IVVisitor *V = NULL);
+ void simplifyUsers(PHINode *CurrIV, IVVisitor *V = nullptr);
Value *foldIVUser(Instruction *UseInst, Instruction *IVOperand);
@@ -95,25 +95,25 @@ namespace {
/// be folded (in case more folding opportunities have been exposed).
/// Otherwise return null.
Value *SimplifyIndvar::foldIVUser(Instruction *UseInst, Instruction *IVOperand) {
- Value *IVSrc = 0;
+ Value *IVSrc = nullptr;
unsigned OperIdx = 0;
- const SCEV *FoldedExpr = 0;
+ const SCEV *FoldedExpr = nullptr;
switch (UseInst->getOpcode()) {
default:
- return 0;
+ return nullptr;
case Instruction::UDiv:
case Instruction::LShr:
// We're only interested in the case where we know something about
// the numerator and have a constant denominator.
if (IVOperand != UseInst->getOperand(OperIdx) ||
!isa<ConstantInt>(UseInst->getOperand(1)))
- return 0;
+ return nullptr;
// Attempt to fold a binary operator with constant operand.
// e.g. ((I + 1) >> 2) => I >> 2
if (!isa<BinaryOperator>(IVOperand)
|| !isa<ConstantInt>(IVOperand->getOperand(1)))
- return 0;
+ return nullptr;
IVSrc = IVOperand->getOperand(0);
// IVSrc must be the (SCEVable) IV, since the other operand is const.
@@ -124,7 +124,7 @@ Value *SimplifyIndvar::foldIVUser(Instruction *UseInst, Instruction *IVOperand)
// Get a constant for the divisor. See createSCEV.
uint32_t BitWidth = cast<IntegerType>(UseInst->getType())->getBitWidth();
if (D->getValue().uge(BitWidth))
- return 0;
+ return nullptr;
D = ConstantInt::get(UseInst->getContext(),
APInt::getOneBitSet(BitWidth, D->getZExtValue()));
@@ -133,11 +133,11 @@ Value *SimplifyIndvar::foldIVUser(Instruction *UseInst, Instruction *IVOperand)
}
// We have something that might fold it's operand. Compare SCEVs.
if (!SE->isSCEVable(UseInst->getType()))
- return 0;
+ return nullptr;
// Bypass the operand if SCEV can prove it has no effect.
if (SE->getSCEV(UseInst) != FoldedExpr)
- return 0;
+ return nullptr;
DEBUG(dbgs() << "INDVARS: Eliminated IV operand: " << *IVOperand
<< " -> " << *UseInst << '\n');
@@ -283,8 +283,8 @@ Instruction *SimplifyIndvar::splitOverflowIntrinsic(Instruction *IVUser,
return IVUser;
// Find a branch guarded by the overflow check.
- BranchInst *Branch = 0;
- Instruction *AddVal = 0;
+ BranchInst *Branch = nullptr;
+ Instruction *AddVal = nullptr;
for (User *U : II->users()) {
if (ExtractValueInst *ExtractInst = dyn_cast<ExtractValueInst>(U)) {
if (ExtractInst->getNumIndices() != 1)
diff --git a/lib/Transforms/Utils/SimplifyInstructions.cpp b/lib/Transforms/Utils/SimplifyInstructions.cpp
index 8241b07eeb..c62aa663f6 100644
--- a/lib/Transforms/Utils/SimplifyInstructions.cpp
+++ b/lib/Transforms/Utils/SimplifyInstructions.cpp
@@ -48,9 +48,9 @@ namespace {
bool runOnFunction(Function &F) override {
const DominatorTreeWrapperPass *DTWP =
getAnalysisIfAvailable<DominatorTreeWrapperPass>();
- const DominatorTree *DT = DTWP ? &DTWP->getDomTree() : 0;
+ const DominatorTree *DT = DTWP ? &DTWP->getDomTree() : nullptr;
DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- const DataLayout *DL = DLP ? &DLP->getDataLayout() : 0;
+ const DataLayout *DL = DLP ? &DLP->getDataLayout() : nullptr;
const TargetLibraryInfo *TLI = &getAnalysis<TargetLibraryInfo>();
SmallPtrSet<const Instruction*, 8> S1, S2, *ToSimplify = &S1, *Next = &S2;
bool Changed = false;
diff --git a/lib/Transforms/Utils/SimplifyLibCalls.cpp b/lib/Transforms/Utils/SimplifyLibCalls.cpp
index b5bc391cc5..903fbb5899 100644
--- a/lib/Transforms/Utils/SimplifyLibCalls.cpp
+++ b/lib/Transforms/Utils/SimplifyLibCalls.cpp
@@ -75,7 +75,7 @@ public:
// We never change the calling convention.
if (!ignoreCallingConv() && CI->getCallingConv() != llvm::CallingConv::C)
- return NULL;
+ return nullptr;
return callOptimizer(CI->getCalledFunction(), CI, B);
}
@@ -186,14 +186,14 @@ struct MemCpyChkOpt : public InstFortifiedLibCallOptimization {
!FT->getParamType(1)->isPointerTy() ||
FT->getParamType(2) != DL->getIntPtrType(Context) ||
FT->getParamType(3) != DL->getIntPtrType(Context))
- return 0;
+ return nullptr;
if (isFoldable(3, 2, false)) {
B.CreateMemCpy(CI->getArgOperand(0), CI->getArgOperand(1),
CI->getArgOperand(2), 1);
return CI->getArgOperand(0);
}
- return 0;
+ return nullptr;
}
};
@@ -210,14 +210,14 @@ struct MemMoveChkOpt : public InstFortifiedLibCallOptimization {
!FT->getParamType(1)->isPointerTy() ||
FT->getParamType(2) != DL->getIntPtrType(Context) ||
FT->getParamType(3) != DL->getIntPtrType(Context))
- return 0;
+ return nullptr;
if (isFoldable(3, 2, false)) {
B.CreateMemMove(CI->getArgOperand(0), CI->getArgOperand(1),
CI->getArgOperand(2), 1);
return CI->getArgOperand(0);
}
- return 0;
+ return nullptr;
}
};
@@ -234,7 +234,7 @@ struct MemSetChkOpt : public InstFortifiedLibCallOptimization {
!FT->getParamType(1)->isIntegerTy() ||
FT->getParamType(2) != DL->getIntPtrType(Context) ||
FT->getParamType(3) != DL->getIntPtrType(Context))
- return 0;
+ return nullptr;
if (isFoldable(3, 2, false)) {
Value *Val = B.CreateIntCast(CI->getArgOperand(1), B.getInt8Ty(),
@@ -242,7 +242,7 @@ struct MemSetChkOpt : public InstFortifiedLibCallOptimization {
B.CreateMemSet(CI->getArgOperand(0), Val, CI->getArgOperand(2), 1);
return CI->getArgOperand(0);
}
- return 0;
+ return nullptr;
}
};
@@ -260,7 +260,7 @@ struct StrCpyChkOpt : public InstFortifiedLibCallOptimization {
FT->getParamType(0) != FT->getParamType(1) ||
FT->getParamType(0) != Type::getInt8PtrTy(Context) ||
FT->getParamType(2) != DL->getIntPtrType(Context))
- return 0;
+ return nullptr;
Value *Dst = CI->getArgOperand(0), *Src = CI->getArgOperand(1);
if (Dst == Src) // __strcpy_chk(x,x) -> x
@@ -277,10 +277,10 @@ struct StrCpyChkOpt : public InstFortifiedLibCallOptimization {
} else {
// Maybe we can stil fold __strcpy_chk to __memcpy_chk.
uint64_t Len = GetStringLength(Src);
- if (Len == 0) return 0;
+ if (Len == 0) return nullptr;
// This optimization require DataLayout.
- if (!DL) return 0;
+ if (!DL) return nullptr;
Value *Ret =
EmitMemCpyChk(Dst, Src,
@@ -288,7 +288,7 @@ struct StrCpyChkOpt : public InstFortifiedLibCallOptimization {
CI->getArgOperand(2), B, DL, TLI);
return Ret;
}
- return 0;
+ return nullptr;
}
};
@@ -306,12 +306,12 @@ struct StpCpyChkOpt : public InstFortifiedLibCallOptimization {
FT->getParamType(0) != FT->getParamType(1) ||
FT->getParamType(0) != Type::getInt8PtrTy(Context) ||
FT->getParamType(2) != DL->getIntPtrType(FT->getParamType(0)))
- return 0;
+ return nullptr;
Value *Dst = CI->getArgOperand(0), *Src = CI->getArgOperand(1);
if (Dst == Src) { // stpcpy(x,x) -> x+strlen(x)
Value *StrLen = EmitStrLen(Src, B, DL, TLI);
- return StrLen ? B.CreateInBoundsGEP(Dst, StrLen) : 0;
+ return StrLen ? B.CreateInBoundsGEP(Dst, StrLen) : nullptr;
}
// If a) we don't have any length information, or b) we know this will
@@ -325,10 +325,10 @@ struct StpCpyChkOpt : public InstFortifiedLibCallOptimization {
} else {
// Maybe we can stil fold __stpcpy_chk to __memcpy_chk.
uint64_t Len = GetStringLength(Src);
- if (Len == 0) return 0;
+ if (Len == 0) return nullptr;
// This optimization require DataLayout.
- if (!DL) return 0;
+ if (!DL) return nullptr;
Type *PT = FT->getParamType(0);
Value *LenV = ConstantInt::get(DL->getIntPtrType(PT), Len);
@@ -336,10 +336,10 @@ struct StpCpyChkOpt : public InstFortifiedLibCallOptimization {
ConstantInt::get(DL->getIntPtrType(PT),
Len - 1));
if (!EmitMemCpyChk(Dst, Src, LenV, CI->getArgOperand(2), B, DL, TLI))
- return 0;
+ return nullptr;
return DstEnd;
}
- return 0;
+ return nullptr;
}
};
@@ -357,7 +357,7 @@ struct StrNCpyChkOpt : public InstFortifiedLibCallOptimization {
FT->getParamType(0) != Type::getInt8PtrTy(Context) ||
!FT->getParamType(2)->isIntegerTy() ||
FT->getParamType(3) != DL->getIntPtrType(Context))
- return 0;
+ return nullptr;
if (isFoldable(3, 2, false)) {
Value *Ret = EmitStrNCpy(CI->getArgOperand(0), CI->getArgOperand(1),
@@ -365,7 +365,7 @@ struct StrNCpyChkOpt : public InstFortifiedLibCallOptimization {
Name.substr(2, 7));
return Ret;
}
- return 0;
+ return nullptr;
}
};
@@ -382,7 +382,7 @@ struct StrCatOpt : public LibCallOptimization {
FT->getReturnType() != B.getInt8PtrTy() ||
FT->getParamType(0) != FT->getReturnType() ||
FT->getParamType(1) != FT->getReturnType())
- return 0;
+ return nullptr;
// Extract some information from the instruction
Value *Dst = CI->getArgOperand(0);
@@ -390,7 +390,7 @@ struct StrCatOpt : public LibCallOptimization {
// See if we can get the length of the input string.
uint64_t Len = GetStringLength(Src);
- if (Len == 0) return 0;
+ if (Len == 0) return nullptr;
--Len; // Unbias length.
// Handle the simple, do-nothing case: strcat(x, "") -> x
@@ -398,7 +398,7 @@ struct StrCatOpt : public LibCallOptimization {
return Dst;
// These optimizations require DataLayout.
- if (!DL) return 0;
+ if (!DL) return nullptr;
return emitStrLenMemCpy(Src, Dst, Len, B);
}
@@ -409,7 +409,7 @@ struct StrCatOpt : public LibCallOptimization {
// memory is to be moved to. We just generate a call to strlen.
Value *DstLen = EmitStrLen(Dst, B, DL, TLI);
if (!DstLen)
- return 0;
+ return nullptr;
// Now that we have the destination's length, we must index into the
// destination's pointer to get the actual memcpy destination (end of
@@ -434,7 +434,7 @@ struct StrNCatOpt : public StrCatOpt {
FT->getParamType(0) != FT->getReturnType() ||
FT->getParamType(1) != FT->getReturnType() ||
!FT->getParamType(2)->isIntegerTy())
- return 0;
+ return nullptr;
// Extract some information from the instruction
Value *Dst = CI->getArgOperand(0);
@@ -445,11 +445,11 @@ struct StrNCatOpt : public StrCatOpt {
if (ConstantInt *LengthArg = dyn_cast<ConstantInt>(CI->getArgOperand(2)))
Len = LengthArg->getZExtValue();
else
- return 0;
+ return nullptr;
// See if we can get the length of the input string.
uint64_t SrcLen = GetStringLength(Src);
- if (SrcLen == 0) return 0;
+ if (SrcLen == 0) return nullptr;
--SrcLen; // Unbias length.
// Handle the simple, do-nothing cases:
@@ -458,10 +458,10 @@ struct StrNCatOpt : public StrCatOpt {
if (SrcLen == 0 || Len == 0) return Dst;
// These optimizations require DataLayout.
- if (!DL) return 0;
+ if (!DL) return nullptr;
// We don't optimize this case
- if (Len < SrcLen) return 0;
+ if (Len < SrcLen) return nullptr;
// strncat(x, s, c) -> strcat(x, s)
// s is constant so the strcat can be optimized further
@@ -478,20 +478,20 @@ struct StrChrOpt : public LibCallOptimization {
FT->getReturnType() != B.getInt8PtrTy() ||
FT->getParamType(0) != FT->getReturnType() ||
!FT->getParamType(1)->isIntegerTy(32))
- return 0;
+ return nullptr;
Value *SrcStr = CI->getArgOperand(0);
// If the second operand is non-constant, see if we can compute the length
// of the input string and turn this into memchr.
ConstantInt *CharC = dyn_cast<ConstantInt>(CI->getArgOperand(1));
- if (CharC == 0) {
+ if (!CharC) {
// These optimizations require DataLayout.
- if (!DL) return 0;
+ if (!DL) return nullptr;
uint64_t Len = GetStringLength(SrcStr);
if (Len == 0 || !FT->getParamType(1)->isIntegerTy(32))// memchr needs i32.
- return 0;
+ return nullptr;
return EmitMemChr(SrcStr, CI->getArgOperand(1), // include nul.
ConstantInt::get(DL->getIntPtrType(*Context), Len),
@@ -504,7 +504,7 @@ struct StrChrOpt : public LibCallOptimization {
if (!getConstantStringInfo(SrcStr, Str)) {
if (DL && CharC->isZero()) // strchr(p, 0) -> p + strlen(p)
return B.CreateGEP(SrcStr, EmitStrLen(SrcStr, B, DL, TLI), "strchr");
- return 0;
+ return nullptr;
}
// Compute the offset, make sure to handle the case when we're searching for
@@ -528,21 +528,21 @@ struct StrRChrOpt : public LibCallOptimization {
FT->getReturnType() != B.getInt8PtrTy() ||
FT->getParamType(0) != FT->getReturnType() ||
!FT->getParamType(1)->isIntegerTy(32))
- return 0;
+ return nullptr;
Value *SrcStr = CI->getArgOperand(0);
ConstantInt *CharC = dyn_cast<ConstantInt>(CI->getArgOperand(1));
// Cannot fold anything if we're not looking for a constant.
if (!CharC)
- return 0;
+ return nullptr;
StringRef Str;
if (!getConstantStringInfo(SrcStr, Str)) {
// strrchr(s, 0) -> strchr(s, 0)
if (DL && CharC->isZero())
return EmitStrChr(SrcStr, '\0', B, DL, TLI);
- return 0;
+ return nullptr;
}
// Compute the offset.
@@ -565,7 +565,7 @@ struct StrCmpOpt : public LibCallOptimization {
!FT->getReturnType()->isIntegerTy(32) ||
FT->getParamType(0) != FT->getParamType(1) ||
FT->getParamType(0) != B.getInt8PtrTy())
- return 0;
+ return nullptr;
Value *Str1P = CI->getArgOperand(0), *Str2P = CI->getArgOperand(1);
if (Str1P == Str2P) // strcmp(x,x) -> 0
@@ -591,14 +591,14 @@ struct StrCmpOpt : public LibCallOptimization {
uint64_t Len2 = GetStringLength(Str2P);
if (Len1 && Len2) {
// These optimizations require DataLayout.
- if (!DL) return 0;
+ if (!DL) return nullptr;
return EmitMemCmp(Str1P, Str2P,
ConstantInt::get(DL->getIntPtrType(*Context),
std::min(Len1, Len2)), B, DL, TLI);
}
- return 0;
+ return nullptr;
}
};
@@ -612,7 +612,7 @@ struct StrNCmpOpt : public LibCallOptimization {
FT->getParamType(0) != FT->getParamType(1) ||
FT->getParamType(0) != B.getInt8PtrTy() ||
!FT->getParamType(2)->isIntegerTy())
- return 0;
+ return nullptr;
Value *Str1P = CI->getArgOperand(0), *Str2P = CI->getArgOperand(1);
if (Str1P == Str2P) // strncmp(x,x,n) -> 0
@@ -623,7 +623,7 @@ struct StrNCmpOpt : public LibCallOptimization {
if (ConstantInt *LengthArg = dyn_cast<ConstantInt>(CI->getArgOperand(2)))
Length = LengthArg->getZExtValue();
else
- return 0;
+ return nullptr;
if (Length == 0) // strncmp(x,y,0) -> 0
return ConstantInt::get(CI->getType(), 0);
@@ -649,7 +649,7 @@ struct StrNCmpOpt : public LibCallOptimization {
if (HasStr2 && Str2.empty()) // strncmp(x, "", n) -> *x
return B.CreateZExt(B.CreateLoad(Str1P, "strcmpload"), CI->getType());
- return 0;
+ return nullptr;
}
};
@@ -662,18 +662,18 @@ struct StrCpyOpt : public LibCallOptimization {
FT->getReturnType() != FT->getParamType(0) ||
FT->getParamType(0) != FT->getParamType(1) ||
FT->getParamType(0) != B.getInt8PtrTy())
- return 0;
+ return nullptr;
Value *Dst = CI->getArgOperand(0), *Src = CI->getArgOperand(1);
if (Dst == Src) // strcpy(x,x) -> x
return Src;
// These optimizations require DataLayout.
- if (!DL) return 0;
+ if (!DL) return nullptr;
// See if we can get the length of the input string.
uint64_t Len = GetStringLength(Src);
- if (Len == 0) return 0;
+ if (Len == 0) return nullptr;
// We have enough information to now generate the memcpy call to do the
// copy for us. Make a memcpy to copy the nul byte with align = 1.
@@ -692,20 +692,20 @@ struct StpCpyOpt: public LibCallOptimization {
FT->getReturnType() != FT->getParamType(0) ||
FT->getParamType(0) != FT->getParamType(1) ||
FT->getParamType(0) != B.getInt8PtrTy())
- return 0;
+ return nullptr;
// These optimizations require DataLayout.
- if (!DL) return 0;
+ if (!DL) return nullptr;
Value *Dst = CI->getArgOperand(0), *Src = CI->getArgOperand(1);
if (Dst == Src) { // stpcpy(x,x) -> x+strlen(x)
Value *StrLen = EmitStrLen(Src, B, DL, TLI);
- return StrLen ? B.CreateInBoundsGEP(Dst, StrLen) : 0;
+ return StrLen ? B.CreateInBoundsGEP(Dst, StrLen) : nullptr;
}
// See if we can get the length of the input string.
uint64_t Len = GetStringLength(Src);
- if (Len == 0) return 0;
+ if (Len == 0) return nullptr;
Type *PT = FT->getParamType(0);
Value *LenV = ConstantInt::get(DL->getIntPtrType(PT), Len);
@@ -728,7 +728,7 @@ struct StrNCpyOpt : public LibCallOptimization {
FT->getParamType(0) != FT->getParamType(1) ||
FT->getParamType(0) != B.getInt8PtrTy() ||
!FT->getParamType(2)->isIntegerTy())
- return 0;
+ return nullptr;
Value *Dst = CI->getArgOperand(0);
Value *Src = CI->getArgOperand(1);
@@ -736,7 +736,7 @@ struct StrNCpyOpt : public LibCallOptimization {
// See if we can get the length of the input string.
uint64_t SrcLen = GetStringLength(Src);
- if (SrcLen == 0) return 0;
+ if (SrcLen == 0) return nullptr;
--SrcLen;
if (SrcLen == 0) {
@@ -749,15 +749,15 @@ struct StrNCpyOpt : public LibCallOptimization {
if (ConstantInt *LengthArg = dyn_cast<ConstantInt>(LenOp))
Len = LengthArg->getZExtValue();
else
- return 0;
+ return nullptr;
if (Len == 0) return Dst; // strncpy(x, y, 0) -> x
// These optimizations require DataLayout.
- if (!DL) return 0;
+ if (!DL) return nullptr;
// Let strncpy handle the zero padding
- if (Len > SrcLen+1) return 0;
+ if (Len > SrcLen+1) return nullptr;
Type *PT = FT->getParamType(0);
// strncpy(x, s, c) -> memcpy(x, s, c, 1) [s and c are constant]
@@ -776,7 +776,7 @@ struct StrLenOpt : public LibCallOptimization {
if (FT->getNumParams() != 1 ||
FT->getParamType(0) != B.getInt8PtrTy() ||
!FT->getReturnType()->isIntegerTy())
- return 0;
+ return nullptr;
Value *Src = CI->getArgOperand(0);
@@ -788,7 +788,7 @@ struct StrLenOpt : public LibCallOptimization {
// strlen(x) == 0 --> *x == 0
if (isOnlyUsedInZeroEqualityComparison(CI))
return B.CreateZExt(B.CreateLoad(Src, "strlenfirst"), CI->getType());
- return 0;
+ return nullptr;
}
};
@@ -800,7 +800,7 @@ struct StrPBrkOpt : public LibCallOptimization {
FT->getParamType(0) != B.getInt8PtrTy() ||
FT->getParamType(1) != FT->getParamType(0) ||
FT->getReturnType() != FT->getParamType(0))
- return 0;
+ return nullptr;
StringRef S1, S2;
bool HasS1 = getConstantStringInfo(CI->getArgOperand(0), S1);
@@ -824,7 +824,7 @@ struct StrPBrkOpt : public LibCallOptimization {
if (DL && HasS2 && S2.size() == 1)
return EmitStrChr(CI->getArgOperand(0), S2[0], B, DL, TLI);
- return 0;
+ return nullptr;
}
};
@@ -835,7 +835,7 @@ struct StrToOpt : public LibCallOptimization {
if ((FT->getNumParams() != 2 && FT->getNumParams() != 3) ||
!FT->getParamType(0)->isPointerTy() ||
!FT->getParamType(1)->isPointerTy())
- return 0;
+ return nullptr;
Value *EndPtr = CI->getArgOperand(1);
if (isa<ConstantPointerNull>(EndPtr)) {
@@ -844,7 +844,7 @@ struct StrToOpt : public LibCallOptimization {
CI->addAttribute(1, Attribute::NoCapture);
}
- return 0;
+ return nullptr;
}
};
@@ -856,7 +856,7 @@ struct StrSpnOpt : public LibCallOptimization {
FT->getParamType(0) != B.getInt8PtrTy() ||
FT->getParamType(1) != FT->getParamType(0) ||
!FT->getReturnType()->isIntegerTy())
- return 0;
+ return nullptr;
StringRef S1, S2;
bool HasS1 = getConstantStringInfo(CI->getArgOperand(0), S1);
@@ -874,7 +874,7 @@ struct StrSpnOpt : public LibCallOptimization {
return ConstantInt::get(CI->getType(), Pos);
}
- return 0;
+ return nullptr;
}
};
@@ -886,7 +886,7 @@ struct StrCSpnOpt : public LibCallOptimization {
FT->getParamType(0) != B.getInt8PtrTy() ||
FT->getParamType(1) != FT->getParamType(0) ||
!FT->getReturnType()->isIntegerTy())
- return 0;
+ return nullptr;
StringRef S1, S2;
bool HasS1 = getConstantStringInfo(CI->getArgOperand(0), S1);
@@ -907,7 +907,7 @@ struct StrCSpnOpt : public LibCallOptimization {
if (DL && HasS2 && S2.empty())
return EmitStrLen(CI->getArgOperand(0), B, DL, TLI);
- return 0;
+ return nullptr;
}
};
@@ -919,7 +919,7 @@ struct StrStrOpt : public LibCallOptimization {
!FT->getParamType(0)->isPointerTy() ||
!FT->getParamType(1)->isPointerTy() ||
!FT->getReturnType()->isPointerTy())
- return 0;
+ return nullptr;
// fold strstr(x, x) -> x.
if (CI->getArgOperand(0) == CI->getArgOperand(1))
@@ -929,11 +929,11 @@ struct StrStrOpt : public LibCallOptimization {
if (DL && isOnlyUsedInEqualityComparison(CI, CI->getArgOperand(0))) {
Value *StrLen = EmitStrLen(CI->getArgOperand(1), B, DL, TLI);
if (!StrLen)
- return 0;
+ return nullptr;
Value *StrNCmp = EmitStrNCmp(CI->getArgOperand(0), CI->getArgOperand(1),
StrLen, B, DL, TLI);
if (!StrNCmp)
- return 0;
+ return nullptr;
for (auto UI = CI->user_begin(), UE = CI->user_end(); UI != UE;) {
ICmpInst *Old = cast<ICmpInst>(*UI++);
Value *Cmp = B.CreateICmp(Old->getPredicate(), StrNCmp,
@@ -969,9 +969,9 @@ struct StrStrOpt : public LibCallOptimization {
// fold strstr(x, "y") -> strchr(x, 'y').
if (HasStr2 && ToFindStr.size() == 1) {
Value *StrChr= EmitStrChr(CI->getArgOperand(0), ToFindStr[0], B, DL, TLI);
- return StrChr ? B.CreateBitCast(StrChr, CI->getType()) : 0;
+ return StrChr ? B.CreateBitCast(StrChr, CI->getType()) : nullptr;
}
- return 0;
+ return nullptr;
}
};
@@ -982,7 +982,7 @@ struct MemCmpOpt : public LibCallOptimization {
if (FT->getNumParams() != 3 || !FT->getParamType(0)->isPointerTy() ||
!FT->getParamType(1)->isPointerTy() ||
!FT->getReturnType()->isIntegerTy(32))
- return 0;
+ return nullptr;
Value *LHS = CI->getArgOperand(0), *RHS = CI->getArgOperand(1);
@@ -991,7 +991,7 @@ struct MemCmpOpt : public LibCallOptimization {
// Make sure we have a constant length.
ConstantInt *LenC = dyn_cast<ConstantInt>(CI->getArgOperand(2));
- if (!LenC) return 0;
+ if (!LenC) return nullptr;
uint64_t Len = LenC->getZExtValue();
if (Len == 0) // memcmp(s1,s2,0) -> 0
@@ -1012,7 +1012,7 @@ struct MemCmpOpt : public LibCallOptimization {
getConstantStringInfo(RHS, RHSStr)) {
// Make sure we're not reading out-of-bounds memory.
if (Len > LHSStr.size() || Len > RHSStr.size())
- return 0;
+ return nullptr;
// Fold the memcmp and normalize the result. This way we get consistent
// results across multiple platforms.
uint64_t Ret = 0;
@@ -1024,7 +1024,7 @@ struct MemCmpOpt : public LibCallOptimization {
return ConstantInt::get(CI->getType(), Ret);
}
- return 0;
+ return nullptr;
}
};
@@ -1032,14 +1032,14 @@ struct MemCpyOpt : public LibCallOptimization {
Value *callOptimizer(Function *Callee, CallInst *CI,
IRBuilder<> &B) override {
// These optimizations require DataLayout.
- if (!DL) return 0;
+ if (!DL) return nullptr;
FunctionType *FT = Callee->getFunctionType();
if (FT->getNumParams() != 3 || FT->getReturnType() != FT->getParamType(0) ||
!FT->getParamType(0)->isPointerTy() ||
!FT->getParamType(1)->isPointerTy() ||
FT->getParamType(2) != DL->getIntPtrType(*Context))
- return 0;
+ return nullptr;
// memcpy(x, y, n) -> llvm.memcpy(x, y, n, 1)
B.CreateMemCpy(CI->getArgOperand(0), CI->getArgOperand(1),
@@ -1052,14 +1052,14 @@ struct MemMoveOpt : public LibCallOptimization {
Value *callOptimizer(Function *Callee, CallInst *CI,
IRBuilder<> &B) override {
// These optimizations require DataLayout.
- if (!DL) return 0;
+ if (!DL) return nullptr;
FunctionType *FT = Callee->getFunctionType();
if (FT->getNumParams() != 3 || FT->getReturnType() != FT->getParamType(0) ||
!FT->getParamType(0)->isPointerTy() ||
!FT->getParamType(1)->isPointerTy() ||
FT->getParamType(2) != DL->getIntPtrType(*Context))
- return 0;
+ return nullptr;
// memmove(x, y, n) -> llvm.memmove(x, y, n, 1)
B.CreateMemMove(CI->getArgOperand(0), CI->getArgOperand(1),
@@ -1072,14 +1072,14 @@ struct MemSetOpt : public LibCallOptimization {
Value *callOptimizer(Function *Callee, CallInst *CI,
IRBuilder<> &B) override {
// These optimizations require DataLayout.
- if (!DL) return 0;
+ if (!DL) return nullptr;
FunctionType *FT = Callee->getFunctionType();
if (FT->getNumParams() != 3 || FT->getReturnType() != FT->getParamType(0) ||
!FT->getParamType(0)->isPointerTy() ||
!FT->getParamType(1)->isIntegerTy() ||
FT->getParamType(2) != DL->getIntPtrType(FT->getParamType(0)))
- return 0;
+ return nullptr;
// memset(p, v, n) -> llvm.memset(p, v, n, 1)
Value *Val = B.CreateIntCast(CI->getArgOperand(1), B.getInt8Ty(), false);
@@ -1103,21 +1103,21 @@ struct UnaryDoubleFPOpt : public LibCallOptimization {
FunctionType *FT = Callee->getFunctionType();
if (FT->getNumParams() != 1 || !FT->getReturnType()->isDoubleTy() ||
!FT->getParamType(0)->isDoubleTy())
- return 0;
+ return nullptr;
if (CheckRetType) {
// Check if all the uses for function like 'sin' are converted to float.
for (User *U : CI->users()) {
FPTruncInst *Cast = dyn_cast<FPTruncInst>(U);
- if (Cast == 0 || !Cast->getType()->isFloatTy())
- return 0;
+ if (!Cast || !Cast->getType()->isFloatTy())
+ return nullptr;
}
}
// If this is something like 'floor((double)floatval)', convert to floorf.
FPExtInst *Cast = dyn_cast<FPExtInst>(CI->getArgOperand(0));
- if (Cast == 0 || !Cast->getOperand(0)->getType()->isFloatTy())
- return 0;
+ if (!Cast || !Cast->getOperand(0)->getType()->isFloatTy())
+ return nullptr;
// floor((double)floatval) -> (double)floorf(floatval)
Value *V = Cast->getOperand(0);
@@ -1138,15 +1138,15 @@ struct BinaryDoubleFPOpt : public LibCallOptimization {
if (FT->getNumParams() != 2 || FT->getReturnType() != FT->getParamType(0) ||
FT->getParamType(0) != FT->getParamType(1) ||
!FT->getParamType(0)->isFloatingPointTy())
- return 0;
+ return nullptr;
if (CheckRetType) {
// Check if all the uses for function like 'fmin/fmax' are converted to
// float.
for (User *U : CI->users()) {
FPTruncInst *Cast = dyn_cast<FPTruncInst>(U);
- if (Cast == 0 || !Cast->getType()->isFloatTy())
- return 0;
+ if (!Cast || !Cast->getType()->isFloatTy())
+ return nullptr;
}
}
@@ -1154,13 +1154,13 @@ struct BinaryDoubleFPOpt : public LibCallOptimization {
// we convert it to fminf.
FPExtInst *Cast1 = dyn_cast<FPExtInst>(CI->getArgOperand(0));
FPExtInst *Cast2 = dyn_cast<FPExtInst>(CI->getArgOperand(1));
- if (Cast1 == 0 || !Cast1->getOperand(0)->getType()->isFloatTy() ||
- Cast2 == 0 || !Cast2->getOperand(0)->getType()->isFloatTy())
- return 0;
+ if (!Cast1 || !Cast1->getOperand(0)->getType()->isFloatTy() ||
+ !Cast2 || !Cast2->getOperand(0)->getType()->isFloatTy())
+ return nullptr;
// fmin((double)floatval1, (double)floatval2)
// -> (double)fmin(floatval1, floatval2)
- Value *V = NULL;
+ Value *V = nullptr;
Value *V1 = Cast1->getOperand(0);
Value *V2 = Cast2->getOperand(0);
V = EmitBinaryFloatFnCall(V1, V2, Callee->getName(), B,
@@ -1180,7 +1180,7 @@ struct CosOpt : public UnsafeFPLibCallOptimization {
CosOpt(bool UnsafeFPShrink) : UnsafeFPLibCallOptimization(UnsafeFPShrink) {}
Value *callOptimizer(Function *Callee, CallInst *CI,
IRBuilder<> &B) override {
- Value *Ret = NULL;
+ Value *Ret = nullptr;
if (UnsafeFPShrink && Callee->getName() == "cos" &&
TLI->has(LibFunc::cosf)) {
UnaryDoubleFPOpt UnsafeUnaryDoubleFP(true);
@@ -1208,7 +1208,7 @@ struct PowOpt : public UnsafeFPLibCallOptimization {
PowOpt(bool UnsafeFPShrink) : UnsafeFPLibCallOptimization(UnsafeFPShrink) {}
Value *callOptimizer(Function *Callee, CallInst *CI,
IRBuilder<> &B) override {
- Value *Ret = NULL;
+ Value *Ret = nullptr;
if (UnsafeFPShrink && Callee->getName() == "pow" &&
TLI->has(LibFunc::powf)) {
UnaryDoubleFPOpt UnsafeUnaryDoubleFP(true);
@@ -1242,7 +1242,7 @@ struct PowOpt : public UnsafeFPLibCallOptimization {
}
ConstantFP *Op2C = dyn_cast<ConstantFP>(Op2);
- if (Op2C == 0) return Ret;
+ if (!Op2C) return Ret;
if (Op2C->getValueAPF().isZero()) // pow(x, 0.0) -> 1.0
return ConstantFP::get(CI->getType(), 1.0);
@@ -1275,7 +1275,7 @@ struct PowOpt : public UnsafeFPLibCallOptimization {
if (Op2C->isExactlyValue(-1.0)) // pow(x, -1.0) -> 1.0/x
return B.CreateFDiv(ConstantFP::get(CI->getType(), 1.0),
Op1, "powrecip");
- return 0;
+ return nullptr;
}
};
@@ -1283,7 +1283,7 @@ struct Exp2Opt : public UnsafeFPLibCallOptimization {
Exp2Opt(bool UnsafeFPShrink) : UnsafeFPLibCallOptimization(UnsafeFPShrink) {}
Value *callOptimizer(Function *Callee, CallInst *CI,
IRBuilder<> &B) override {
- Value *Ret = NULL;
+ Value *Ret = nullptr;
if (UnsafeFPShrink && Callee->getName() == "exp2" &&
TLI->has(LibFunc::exp2f)) {
UnaryDoubleFPOpt UnsafeUnaryDoubleFP(true);
@@ -1307,7 +1307,7 @@ struct Exp2Opt : public UnsafeFPLibCallOptimization {
LdExp = LibFunc::ldexp;
if (TLI->has(LdExp)) {
- Value *LdExpArg = 0;
+ Value *LdExpArg = nullptr;
if (SIToFPInst *OpC = dyn_cast<SIToFPInst>(Op)) {
if (OpC->getOperand(0)->getType()->getPrimitiveSizeInBits() <= 32)
LdExpArg = B.CreateSExt(OpC->getOperand(0), B.getInt32Ty());
@@ -1344,7 +1344,7 @@ struct SinCosPiOpt : public LibCallOptimization {
// Make sure the prototype is as expected, otherwise the rest of the
// function is probably invalid and likely to abort.
if (!isTrigLibCall(CI))
- return 0;
+ return nullptr;
Value *Arg = CI->getArgOperand(0);
SmallVector<CallInst *, 1> SinCalls;
@@ -1362,7 +1362,7 @@ struct SinCosPiOpt : public LibCallOptimization {
// It's only worthwhile if both sinpi and cospi are actually used.
if (SinCosCalls.empty() && (SinCalls.empty() || CosCalls.empty()))
- return 0;
+ return nullptr;
Value *Sin, *Cos, *SinCos;
insertSinCosCall(B, CI->getCalledFunction(), Arg, IsFloat, Sin, Cos,
@@ -1372,7 +1372,7 @@ struct SinCosPiOpt : public LibCallOptimization {
replaceTrigInsts(CosCalls, Cos);
replaceTrigInsts(SinCosCalls, SinCos);
- return 0;
+ return nullptr;
}
bool isTrigLibCall(CallInst *CI) {
@@ -1498,7 +1498,7 @@ struct FFSOpt : public LibCallOptimization {
if (FT->getNumParams() != 1 ||
!FT->getReturnType()->isIntegerTy(32) ||
!FT->getParamType(0)->isIntegerTy())
- return 0;
+ return nullptr;
Value *Op = CI->getArgOperand(0);
@@ -1531,7 +1531,7 @@ struct AbsOpt : public LibCallOptimization {
// We require integer(integer) where the types agree.
if (FT->getNumParams() != 1 || !FT->getReturnType()->isIntegerTy() ||
FT->getParamType(0) != FT->getReturnType())
- return 0;
+ return nullptr;
// abs(x) -> x >s -1 ? x : -x
Value *Op = CI->getArgOperand(0);
@@ -1549,7 +1549,7 @@ struct IsDigitOpt : public LibCallOptimization {
// We require integer(i32)
if (FT->getNumParams() != 1 || !FT->getReturnType()->isIntegerTy() ||
!FT->getParamType(0)->isIntegerTy(32))
- return 0;
+ return nullptr;
// isdigit(c) -> (c-'0') <u 10
Value *Op = CI->getArgOperand(0);
@@ -1566,7 +1566,7 @@ struct IsAsciiOpt : public LibCallOptimization {
// We require integer(i32)
if (FT->getNumParams() != 1 || !FT->getReturnType()->isIntegerTy() ||
!FT->getParamType(0)->isIntegerTy(32))
- return 0;
+ return nullptr;
// isascii(c) -> c <u 128
Value *Op = CI->getArgOperand(0);
@@ -1582,7 +1582,7 @@ struct ToAsciiOpt : public LibCallOptimization {
// We require i32(i32)
if (FT->getNumParams() != 1 || FT->getReturnType() != FT->getParamType(0) ||
!FT->getParamType(0)->isIntegerTy(32))
- return 0;
+ return nullptr;
// toascii(c) -> c & 0x7f
return B.CreateAnd(CI->getArgOperand(0),
@@ -1612,7 +1612,7 @@ struct ErrorReportingOpt : public LibCallOptimization {
CI->addAttribute(AttributeSet::FunctionIndex, Attribute::Cold);
}
- return 0;
+ return nullptr;
}
protected:
@@ -1649,7 +1649,7 @@ struct PrintFOpt : public LibCallOptimization {
// Check for a fixed format string.
StringRef FormatStr;
if (!getConstantStringInfo(CI->getArgOperand(0), FormatStr))
- return 0;
+ return nullptr;
// Empty format string -> noop.
if (FormatStr.empty()) // Tolerate printf's declared void.
@@ -1660,7 +1660,7 @@ struct PrintFOpt : public LibCallOptimization {
// is used, in general the printf return value is not compatible with either
// putchar() or puts().
if (!CI->use_empty())
- return 0;
+ return nullptr;
// printf("x") -> putchar('x'), even for '%'.
if (FormatStr.size() == 1) {
@@ -1697,7 +1697,7 @@ struct PrintFOpt : public LibCallOptimization {
CI->getArgOperand(1)->getType()->isPointerTy()) {
return EmitPutS(CI->getArgOperand(1), B, DL, TLI);
}
- return 0;
+ return nullptr;
}
Value *callOptimizer(Function *Callee, CallInst *CI,
@@ -1707,7 +1707,7 @@ struct PrintFOpt : public LibCallOptimization {
if (FT->getNumParams() < 1 || !FT->getParamType(0)->isPointerTy() ||
!(FT->getReturnType()->isIntegerTy() ||
FT->getReturnType()->isVoidTy()))
- return 0;
+ return nullptr;
if (Value *V = optimizeFixedFormatString(Callee, CI, B)) {
return V;
@@ -1724,7 +1724,7 @@ struct PrintFOpt : public LibCallOptimization {
B.Insert(New);
return New;
}
- return 0;
+ return nullptr;
}
};
@@ -1734,7 +1734,7 @@ struct SPrintFOpt : public LibCallOptimization {
// Check for a fixed format string.
StringRef FormatStr;
if (!getConstantStringInfo(CI->getArgOperand(1), FormatStr))
- return 0;
+ return nullptr;
// If we just have a format string (nothing else crazy) transform it.
if (CI->getNumArgOperands() == 2) {
@@ -1742,10 +1742,10 @@ struct SPrintFOpt : public LibCallOptimization {
// %% -> % in the future if we cared.
for (unsigned i = 0, e = FormatStr.size(); i != e; ++i)
if (FormatStr[i] == '%')
- return 0; // we found a format specifier, bail out.
+ return nullptr; // we found a format specifier, bail out.
// These optimizations require DataLayout.
- if (!DL) return 0;
+ if (!DL) return nullptr;
// sprintf(str, fmt) -> llvm.memcpy(str, fmt, strlen(fmt)+1, 1)
B.CreateMemCpy(CI->getArgOperand(0), CI->getArgOperand(1),
@@ -1758,12 +1758,12 @@ struct SPrintFOpt : public LibCallOptimization {
// and have an extra operand.
if (FormatStr.size() != 2 || FormatStr[0] != '%' ||
CI->getNumArgOperands() < 3)
- return 0;
+ return nullptr;
// Decode the second character of the format string.
if (FormatStr[1] == 'c') {
// sprintf(dst, "%c", chr) --> *(i8*)dst = chr; *((i8*)dst+1) = 0
- if (!CI->getArgOperand(2)->getType()->isIntegerTy()) return 0;
+ if (!CI->getArgOperand(2)->getType()->isIntegerTy()) return nullptr;
Value *V = B.CreateTrunc(CI->getArgOperand(2), B.getInt8Ty(), "char");
Value *Ptr = CastToCStr(CI->getArgOperand(0), B);
B.CreateStore(V, Ptr);
@@ -1775,14 +1775,14 @@ struct SPrintFOpt : public LibCallOptimization {
if (FormatStr[1] == 's') {
// These optimizations require DataLayout.
- if (!DL) return 0;
+ if (!DL) return nullptr;
// sprintf(dest, "%s", str) -> llvm.memcpy(dest, str, strlen(str)+1, 1)
- if (!CI->getArgOperand(2)->getType()->isPointerTy()) return 0;
+ if (!CI->getArgOperand(2)->getType()->isPointerTy()) return nullptr;
Value *Len = EmitStrLen(CI->getArgOperand(2), B, DL, TLI);
if (!Len)
- return 0;
+ return nullptr;
Value *IncLen = B.CreateAdd(Len,
ConstantInt::get(Len->getType(), 1),
"leninc");
@@ -1791,7 +1791,7 @@ struct SPrintFOpt : public LibCallOptimization {
// The sprintf result is the unincremented number of bytes in the string.
return B.CreateIntCast(Len, CI->getType(), false);
}
- return 0;
+ return nullptr;
}
Value *callOptimizer(Function *Callee, CallInst *CI,
@@ -1801,7 +1801,7 @@ struct SPrintFOpt : public LibCallOptimization {
if (FT->getNumParams() != 2 || !FT->getParamType(0)->isPointerTy() ||
!FT->getParamType(1)->isPointerTy() ||
!FT->getReturnType()->isIntegerTy())
- return 0;
+ return nullptr;
if (Value *V = OptimizeFixedFormatString(Callee, CI, B)) {
return V;
@@ -1818,7 +1818,7 @@ struct SPrintFOpt : public LibCallOptimization {
B.Insert(New);
return New;
}
- return 0;
+ return nullptr;
}
};
@@ -1831,22 +1831,22 @@ struct FPrintFOpt : public LibCallOptimization {
// All the optimizations depend on the format string.
StringRef FormatStr;
if (!getConstantStringInfo(CI->getArgOperand(1), FormatStr))
- return 0;
+ return nullptr;
// Do not do any of the following transformations if the fprintf return
// value is used, in general the fprintf return value is not compatible
// with fwrite(), fputc() or fputs().
if (!CI->use_empty())
- return 0;
+ return nullptr;
// fprintf(F, "foo") --> fwrite("foo", 3, 1, F)
if (CI->getNumArgOperands() == 2) {
for (unsigned i = 0, e = FormatStr.size(); i != e; ++i)
if (FormatStr[i] == '%') // Could handle %% -> % if we cared.
- return 0; // We found a format specifier.
+ return nullptr; // We found a format specifier.
// These optimizations require DataLayout.
- if (!DL) return 0;
+ if (!DL) return nullptr;
return EmitFWrite(CI->getArgOperand(1),
ConstantInt::get(DL->getIntPtrType(*Context),
@@ -1858,22 +1858,22 @@ struct FPrintFOpt : public LibCallOptimization {
// and have an extra operand.
if (FormatStr.size() != 2 || FormatStr[0] != '%' ||
CI->getNumArgOperands() < 3)
- return 0;
+ return nullptr;
// Decode the second character of the format string.
if (FormatStr[1] == 'c') {
// fprintf(F, "%c", chr) --> fputc(chr, F)
- if (!CI->getArgOperand(2)->getType()->isIntegerTy()) return 0;
+ if (!CI->getArgOperand(2)->getType()->isIntegerTy()) return nullptr;
return EmitFPutC(CI->getArgOperand(2), CI->getArgOperand(0), B, DL, TLI);
}
if (FormatStr[1] == 's') {
// fprintf(F, "%s", str) --> fputs(str, F)
if (!CI->getArgOperand(2)->getType()->isPointerTy())
- return 0;
+ return nullptr;
return EmitFPutS(CI->getArgOperand(2), CI->getArgOperand(0), B, DL, TLI);
}
- return 0;
+ return nullptr;
}
Value *callOptimizer(Function *Callee, CallInst *CI,
@@ -1883,7 +1883,7 @@ struct FPrintFOpt : public LibCallOptimization {
if (FT->getNumParams() != 2 || !FT->getParamType(0)->isPointerTy() ||
!FT->getParamType(1)->isPointerTy() ||
!FT->getReturnType()->isIntegerTy())
- return 0;
+ return nullptr;
if (Value *V = optimizeFixedFormatString(Callee, CI, B)) {
return V;
@@ -1900,7 +1900,7 @@ struct FPrintFOpt : public LibCallOptimization {
B.Insert(New);
return New;
}
- return 0;
+ return nullptr;
}
};
@@ -1917,12 +1917,12 @@ struct FWriteOpt : public LibCallOptimization {
!FT->getParamType(2)->isIntegerTy() ||
!FT->getParamType(3)->isPointerTy() ||
!FT->getReturnType()->isIntegerTy())
- return 0;
+ return nullptr;
// Get the element size and count.
ConstantInt *SizeC = dyn_cast<ConstantInt>(CI->getArgOperand(1));
ConstantInt *CountC = dyn_cast<ConstantInt>(CI->getArgOperand(2));
- if (!SizeC || !CountC) return 0;
+ if (!SizeC || !CountC) return nullptr;
uint64_t Bytes = SizeC->getZExtValue()*CountC->getZExtValue();
// If this is writing zero records, remove the call (it's a noop).
@@ -1934,10 +1934,10 @@ struct FWriteOpt : public LibCallOptimization {
if (Bytes == 1 && CI->use_empty()) { // fwrite(S,1,1,F) -> fputc(S[0],F)
Value *Char = B.CreateLoad(CastToCStr(CI->getArgOperand(0), B), "char");
Value *NewCI = EmitFPutC(Char, CI->getArgOperand(3), B, DL, TLI);
- return NewCI ? ConstantInt::get(CI->getType(), 1) : 0;
+ return NewCI ? ConstantInt::get(CI->getType(), 1) : nullptr;
}
- return 0;
+ return nullptr;
}
};
@@ -1948,18 +1948,18 @@ struct FPutsOpt : public LibCallOptimization {
(void) ER.callOptimizer(Callee, CI, B);
// These optimizations require DataLayout.
- if (!DL) return 0;
+ if (!DL) return nullptr;
// Require two pointers. Also, we can't optimize if return value is used.
FunctionType *FT = Callee->getFunctionType();
if (FT->getNumParams() != 2 || !FT->getParamType(0)->isPointerTy() ||
!FT->getParamType(1)->isPointerTy() ||
!CI->use_empty())
- return 0;
+ return nullptr;
// fputs(s,F) --> fwrite(s,1,strlen(s),F)
uint64_t Len = GetStringLength(CI->getArgOperand(0));
- if (!Len) return 0;
+ if (!Len) return nullptr;
// Known to have no uses (see above).
return EmitFWrite(CI->getArgOperand(0),
ConstantInt::get(DL->getIntPtrType(*Context), Len-1),
@@ -1975,12 +1975,12 @@ struct PutsOpt : public LibCallOptimization {
if (FT->getNumParams() < 1 || !FT->getParamType(0)->isPointerTy() ||
!(FT->getReturnType()->isIntegerTy() ||
FT->getReturnType()->isVoidTy()))
- return 0;
+ return nullptr;
// Check for a constant string.
StringRef Str;
if (!getConstantStringInfo(CI->getArgOperand(0), Str))
- return 0;
+ return nullptr;
if (Str.empty() && CI->use_empty()) {
// puts("") -> putchar('\n')
@@ -1989,7 +1989,7 @@ struct PutsOpt : public LibCallOptimization {
return B.CreateIntCast(Res, CI->getType(), true);
}
- return 0;
+ return nullptr;
}
};
@@ -2100,7 +2100,7 @@ LibCallOptimization *LibCallSimplifierImpl::lookupOptimization(CallInst *CI) {
case Intrinsic::exp2:
return &Exp2;
default:
- return 0;
+ return nullptr;
}
}
@@ -2210,7 +2210,7 @@ LibCallOptimization *LibCallSimplifierImpl::lookupOptimization(CallInst *CI) {
case LibFunc::trunc:
if (hasFloatVersion(FuncName))
return &UnaryDoubleFP;
- return 0;
+ return nullptr;
case LibFunc::acos:
case LibFunc::acosh:
case LibFunc::asin:
@@ -2234,16 +2234,16 @@ LibCallOptimization *LibCallSimplifierImpl::lookupOptimization(CallInst *CI) {
case LibFunc::tanh:
if (UnsafeFPShrink && hasFloatVersion(FuncName))
return &UnsafeUnaryDoubleFP;
- return 0;
+ return nullptr;
case LibFunc::fmin:
case LibFunc::fmax:
if (hasFloatVersion(FuncName))
return &BinaryDoubleFP;
- return 0;
+ return nullptr;
case LibFunc::memcpy_chk:
return &MemCpyChk;
default:
- return 0;
+ return nullptr;
}
}
@@ -2263,7 +2263,7 @@ LibCallOptimization *LibCallSimplifierImpl::lookupOptimization(CallInst *CI) {
return &StrNCpyChk;
}
- return 0;
+ return nullptr;
}
@@ -2273,7 +2273,7 @@ Value *LibCallSimplifierImpl::optimizeCall(CallInst *CI) {
IRBuilder<> Builder(CI);
return LCO->optimizeCall(CI, DL, TLI, LCS, Builder);
}
- return 0;
+ return nullptr;
}
LibCallSimplifier::LibCallSimplifier(const DataLayout *DL,
@@ -2287,7 +2287,7 @@ LibCallSimplifier::~LibCallSimplifier() {
}
Value *LibCallSimplifier::optimizeCall(CallInst *CI) {
- if (CI->isNoBuiltin()) return 0;
+ if (CI->isNoBuiltin()) return nullptr;
return Impl->optimizeCall(CI);
}
diff --git a/lib/Transforms/Utils/SpecialCaseList.cpp b/lib/Transforms/Utils/SpecialCaseList.cpp
index c318560ff8..2c6fcd14b9 100644
--- a/lib/Transforms/Utils/SpecialCaseList.cpp
+++ b/lib/Transforms/Utils/SpecialCaseList.cpp
@@ -41,7 +41,7 @@ struct SpecialCaseList::Entry {
StringSet<> Strings;
Regex *RegEx;
- Entry() : RegEx(0) {}
+ Entry() : RegEx(nullptr) {}
bool match(StringRef Query) const {
return Strings.count(Query) || (RegEx && RegEx->match(Query));
@@ -57,7 +57,7 @@ SpecialCaseList *SpecialCaseList::create(
std::unique_ptr<MemoryBuffer> File;
if (error_code EC = MemoryBuffer::getFile(Path, File)) {
Error = (Twine("Can't open file '") + Path + "': " + EC.message()).str();
- return 0;
+ return nullptr;
}
return create(File.get(), Error);
}
@@ -66,7 +66,7 @@ SpecialCaseList *SpecialCaseList::create(
const MemoryBuffer *MB, std::string &Error) {
std::unique_ptr<SpecialCaseList> SCL(new SpecialCaseList());
if (!SCL->parse(MB, Error))
- return 0;
+ return nullptr;
return SCL.release();
}
diff --git a/lib/Transforms/Utils/UnifyFunctionExitNodes.cpp b/lib/Transforms/Utils/UnifyFunctionExitNodes.cpp
index 560f581607..0c2fc0a972 100644
--- a/lib/Transforms/Utils/UnifyFunctionExitNodes.cpp
+++ b/lib/Transforms/Utils/UnifyFunctionExitNodes.cpp
@@ -59,7 +59,7 @@ bool UnifyFunctionExitNodes::runOnFunction(Function &F) {
// Then unreachable blocks.
if (UnreachableBlocks.empty()) {
- UnreachableBlock = 0;
+ UnreachableBlock = nullptr;
} else if (UnreachableBlocks.size() == 1) {
UnreachableBlock = UnreachableBlocks.front();
} else {
@@ -77,7 +77,7 @@ bool UnifyFunctionExitNodes::runOnFunction(Function &F) {
// Now handle return blocks.
if (ReturningBlocks.empty()) {
- ReturnBlock = 0;
+ ReturnBlock = nullptr;
return false; // No blocks return
} else if (ReturningBlocks.size() == 1) {
ReturnBlock = ReturningBlocks.front(); // Already has a single return block
@@ -91,9 +91,9 @@ bool UnifyFunctionExitNodes::runOnFunction(Function &F) {
BasicBlock *NewRetBlock = BasicBlock::Create(F.getContext(),
"UnifiedReturnBlock", &F);
- PHINode *PN = 0;
+ PHINode *PN = nullptr;
if (F.getReturnType()->isVoidTy()) {
- ReturnInst::Create(F.getContext(), NULL, NewRetBlock);
+ ReturnInst::Create(F.getContext(), nullptr, NewRetBlock);
} else {
// If the function doesn't return void... add a PHI node to the block...
PN = PHINode::Create(F.getReturnType(), ReturningBlocks.size(),
diff --git a/lib/Transforms/Utils/ValueMapper.cpp b/lib/Transforms/Utils/ValueMapper.cpp
index 457fc80e1e..0f20e6df6c 100644
--- a/lib/Transforms/Utils/ValueMapper.cpp
+++ b/lib/Transforms/Utils/ValueMapper.cpp
@@ -71,12 +71,12 @@ Value *llvm::MapValue(const Value *V, ValueToValueMapTy &VM, RemapFlags Flags,
// Check all operands to see if any need to be remapped.
for (unsigned i = 0, e = MD->getNumOperands(); i != e; ++i) {
Value *OP = MD->getOperand(i);
- if (OP == 0) continue;
+ if (!OP) continue;
Value *Mapped_OP = MapValue(OP, VM, Flags, TypeMapper, Materializer);
// Use identity map if Mapped_Op is null and we can ignore missing
// entries.
if (Mapped_OP == OP ||
- (Mapped_OP == 0 && (Flags & RF_IgnoreMissingEntries)))
+ (Mapped_OP == nullptr && (Flags & RF_IgnoreMissingEntries)))
continue;
// Ok, at least one operand needs remapping.
@@ -84,13 +84,13 @@ Value *llvm::MapValue(const Value *V, ValueToValueMapTy &VM, RemapFlags Flags,
Elts.reserve(MD->getNumOperands());
for (i = 0; i != e; ++i) {
Value *Op = MD->getOperand(i);
- if (Op == 0)
- Elts.push_back(0);
+ if (!Op)
+ Elts.push_back(nullptr);
else {
Value *Mapped_Op = MapValue(Op, VM, Flags, TypeMapper, Materializer);
// Use identity map if Mapped_Op is null and we can ignore missing
// entries.
- if (Mapped_Op == 0 && (Flags & RF_IgnoreMissingEntries))
+ if (Mapped_Op == nullptr && (Flags & RF_IgnoreMissingEntries))
Mapped_Op = Op;
Elts.push_back(Mapped_Op);
}
@@ -112,8 +112,8 @@ Value *llvm::MapValue(const Value *V, ValueToValueMapTy &VM, RemapFlags Flags,
// Okay, this either must be a constant (which may or may not be mappable) or
// is something that is not in the mapping table.
Constant *C = const_cast<Constant*>(dyn_cast<Constant>(V));
- if (C == 0)
- return 0;
+ if (!C)
+ return nullptr;
if (BlockAddress *BA = dyn_cast<BlockAddress>(C)) {
Function *F =
@@ -126,7 +126,7 @@ Value *llvm::MapValue(const Value *V, ValueToValueMapTy &VM, RemapFlags Flags,
// Otherwise, we have some other constant to remap. Start by checking to see
// if all operands have an identity remapping.
unsigned OpNo = 0, NumOperands = C->getNumOperands();
- Value *Mapped = 0;
+ Value *Mapped = nullptr;
for (; OpNo != NumOperands; ++OpNo) {
Value *Op = C->getOperand(OpNo);
Mapped = MapValue(Op, VM, Flags, TypeMapper, Materializer);
@@ -187,7 +187,7 @@ void llvm::RemapInstruction(Instruction *I, ValueToValueMapTy &VMap,
for (User::op_iterator op = I->op_begin(), E = I->op_end(); op != E; ++op) {
Value *V = MapValue(*op, VMap, Flags, TypeMapper, Materializer);
// If we aren't ignoring missing entries, assert that something happened.
- if (V != 0)
+ if (V)
*op = V;
else
assert((Flags & RF_IgnoreMissingEntries) &&
@@ -199,7 +199,7 @@ void llvm::RemapInstruction(Instruction *I, ValueToValueMapTy &VMap,
for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
Value *V = MapValue(PN->getIncomingBlock(i), VMap, Flags);
// If we aren't ignoring missing entries, assert that something happened.
- if (V != 0)
+ if (V)
PN->setIncomingBlock(i, cast<BasicBlock>(V));
else
assert((Flags & RF_IgnoreMissingEntries) &&
diff --git a/lib/Transforms/Vectorize/BBVectorize.cpp b/lib/Transforms/Vectorize/BBVectorize.cpp
index b68487f171..28ec83bf86 100644
--- a/lib/Transforms/Vectorize/BBVectorize.cpp
+++ b/lib/Transforms/Vectorize/BBVectorize.cpp
@@ -207,8 +207,8 @@ namespace {
DT = &P->getAnalysis<DominatorTreeWrapperPass>().getDomTree();
SE = &P->getAnalysis<ScalarEvolution>();
DataLayoutPass *DLP = P->getAnalysisIfAvailable<DataLayoutPass>();
- DL = DLP ? &DLP->getDataLayout() : 0;
- TTI = IgnoreTargetInfo ? 0 : &P->getAnalysis<TargetTransformInfo>();
+ DL = DLP ? &DLP->getDataLayout() : nullptr;
+ TTI = IgnoreTargetInfo ? nullptr : &P->getAnalysis<TargetTransformInfo>();
}
typedef std::pair<Value *, Value *> ValuePair;
@@ -284,7 +284,7 @@ namespace {
bool trackUsesOfI(DenseSet<Value *> &Users,
AliasSetTracker &WriteSet, Instruction *I,
Instruction *J, bool UpdateUsers = true,
- DenseSet<ValuePair> *LoadMoveSetPairs = 0);
+ DenseSet<ValuePair> *LoadMoveSetPairs = nullptr);
void computePairsConnectedTo(
DenseMap<Value *, std::vector<Value *> > &CandidatePairs,
@@ -297,8 +297,8 @@ namespace {
bool pairsConflict(ValuePair P, ValuePair Q,
DenseSet<ValuePair> &PairableInstUsers,
DenseMap<ValuePair, std::vector<ValuePair> >
- *PairableInstUserMap = 0,
- DenseSet<VPPair> *PairableInstUserPairSet = 0);
+ *PairableInstUserMap = nullptr,
+ DenseSet<VPPair> *PairableInstUserPairSet = nullptr);
bool pairWillFormCycle(ValuePair P,
DenseMap<ValuePair, std::vector<ValuePair> > &PairableInstUsers,
@@ -443,8 +443,8 @@ namespace {
DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
SE = &getAnalysis<ScalarEvolution>();
DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- DL = DLP ? &DLP->getDataLayout() : 0;
- TTI = IgnoreTargetInfo ? 0 : &getAnalysis<TargetTransformInfo>();
+ DL = DLP ? &DLP->getDataLayout() : nullptr;
+ TTI = IgnoreTargetInfo ? nullptr : &getAnalysis<TargetTransformInfo>();
return vectorizeBB(BB);
}
@@ -896,7 +896,7 @@ namespace {
}
// We can't vectorize memory operations without target data
- if (DL == 0 && IsSimpleLoadStore)
+ if (!DL && IsSimpleLoadStore)
return false;
Type *T1, *T2;
@@ -933,7 +933,7 @@ namespace {
if (T2->isX86_FP80Ty() || T2->isPPC_FP128Ty() || T2->isX86_MMXTy())
return false;
- if ((!Config.VectorizePointers || DL == 0) &&
+ if ((!Config.VectorizePointers || !DL) &&
(T1->getScalarType()->isPointerTy() ||
T2->getScalarType()->isPointerTy()))
return false;
@@ -1067,7 +1067,7 @@ namespace {
(isa<ConstantVector>(JOp) || isa<ConstantDataVector>(JOp))) {
Op2VK = TargetTransformInfo::OK_NonUniformConstantValue;
Constant *SplatValue = cast<Constant>(IOp)->getSplatValue();
- if (SplatValue != NULL &&
+ if (SplatValue != nullptr &&
SplatValue == cast<Constant>(JOp)->getSplatValue())
Op2VK = TargetTransformInfo::OK_UniformConstantValue;
}
@@ -1685,8 +1685,9 @@ namespace {
C2->first.second == C->first.first ||
C2->first.second == C->first.second ||
pairsConflict(C2->first, C->first, PairableInstUsers,
- UseCycleCheck ? &PairableInstUserMap : 0,
- UseCycleCheck ? &PairableInstUserPairSet : 0)) {
+ UseCycleCheck ? &PairableInstUserMap : nullptr,
+ UseCycleCheck ? &PairableInstUserPairSet
+ : nullptr)) {
if (C2->second >= C->second) {
CanAdd = false;
break;
@@ -1706,8 +1707,9 @@ namespace {
T->second == C->first.first ||
T->second == C->first.second ||
pairsConflict(*T, C->first, PairableInstUsers,
- UseCycleCheck ? &PairableInstUserMap : 0,
- UseCycleCheck ? &PairableInstUserPairSet : 0)) {
+ UseCycleCheck ? &PairableInstUserMap : nullptr,
+ UseCycleCheck ? &PairableInstUserPairSet
+ : nullptr)) {
CanAdd = false;
break;
}
@@ -1724,8 +1726,9 @@ namespace {
C2->first.second == C->first.first ||
C2->first.second == C->first.second ||
pairsConflict(C2->first, C->first, PairableInstUsers,
- UseCycleCheck ? &PairableInstUserMap : 0,
- UseCycleCheck ? &PairableInstUserPairSet : 0)) {
+ UseCycleCheck ? &PairableInstUserMap : nullptr,
+ UseCycleCheck ? &PairableInstUserPairSet
+ : nullptr)) {
CanAdd = false;
break;
}
@@ -1740,8 +1743,9 @@ namespace {
ChosenPairs.begin(), E2 = ChosenPairs.end();
C2 != E2; ++C2) {
if (pairsConflict(*C2, C->first, PairableInstUsers,
- UseCycleCheck ? &PairableInstUserMap : 0,
- UseCycleCheck ? &PairableInstUserPairSet : 0)) {
+ UseCycleCheck ? &PairableInstUserMap : nullptr,
+ UseCycleCheck ? &PairableInstUserPairSet
+ : nullptr)) {
CanAdd = false;
break;
}
@@ -1822,8 +1826,8 @@ namespace {
for (DenseMap<Value *, Value *>::iterator C = ChosenPairs.begin(),
E = ChosenPairs.end(); C != E; ++C) {
if (pairsConflict(*C, IJ, PairableInstUsers,
- UseCycleCheck ? &PairableInstUserMap : 0,
- UseCycleCheck ? &PairableInstUserPairSet : 0)) {
+ UseCycleCheck ? &PairableInstUserMap : nullptr,
+ UseCycleCheck ? &PairableInstUserPairSet : nullptr)) {
DoesConflict = true;
break;
}
@@ -2393,7 +2397,7 @@ namespace {
} while ((LIENext =
dyn_cast<InsertElementInst>(LIENext->getOperand(0))));
- LIENext = 0;
+ LIENext = nullptr;
Value *LIEPrev = UndefValue::get(ArgTypeH);
for (unsigned i = 0; i < numElemL; ++i) {
if (isa<UndefValue>(VectElemts[i])) continue;
@@ -2461,14 +2465,14 @@ namespace {
if ((LEE || LSV) && (HEE || HSV) && !IsSizeChangeShuffle) {
// We can have at most two unique vector inputs.
bool CanUseInputs = true;
- Value *I1, *I2 = 0;
+ Value *I1, *I2 = nullptr;
if (LEE) {
I1 = LEE->getOperand(0);
} else {
I1 = LSV->getOperand(0);
I2 = LSV->getOperand(1);
if (I2 == I1 || isa<UndefValue>(I2))
- I2 = 0;
+ I2 = nullptr;
}
if (HEE) {
@@ -2973,7 +2977,7 @@ namespace {
switch (Kind) {
default:
- K->setMetadata(Kind, 0); // Remove unknown metadata
+ K->setMetadata(Kind, nullptr); // Remove unknown metadata
break;
case LLVMContext::MD_tbaa:
K->setMetadata(Kind, MDNode::getMostGenericTBAA(JMD, KMD));
@@ -3144,7 +3148,7 @@ namespace {
// Instruction insertion point:
Instruction *InsertionPt = K;
- Instruction *K1 = 0, *K2 = 0;
+ Instruction *K1 = nullptr, *K2 = nullptr;
replaceOutputsOfPair(Context, L, H, K, InsertionPt, K1, K2);
// The use dag of the first original instruction must be moved to after
diff --git a/lib/Transforms/Vectorize/LoopVectorize.cpp b/lib/Transforms/Vectorize/LoopVectorize.cpp
index 6941bb79ff..843e9e90c2 100644
--- a/lib/Transforms/Vectorize/LoopVectorize.cpp
+++ b/lib/Transforms/Vectorize/LoopVectorize.cpp
@@ -230,8 +230,9 @@ public:
const TargetLibraryInfo *TLI, unsigned VecWidth,
unsigned UnrollFactor)
: OrigLoop(OrigLoop), SE(SE), LI(LI), DT(DT), DL(DL), TLI(TLI),
- VF(VecWidth), UF(UnrollFactor), Builder(SE->getContext()), Induction(0),
- OldInduction(0), WidenMap(UnrollFactor), Legal(0) {}
+ VF(VecWidth), UF(UnrollFactor), Builder(SE->getContext()),
+ Induction(nullptr), OldInduction(nullptr), WidenMap(UnrollFactor),
+ Legal(nullptr) {}
// Perform the actual loop widening (vectorization).
void vectorize(LoopVectorizationLegality *L) {
@@ -520,8 +521,8 @@ public:
LoopVectorizationLegality(Loop *L, ScalarEvolution *SE, const DataLayout *DL,
DominatorTree *DT, TargetLibraryInfo *TLI)
: NumLoads(0), NumStores(0), NumPredStores(0), TheLoop(L), SE(SE), DL(DL),
- DT(DT), TLI(TLI), Induction(0), WidestIndTy(0), HasFunNoNaNAttr(false),
- MaxSafeDepDistBytes(-1U) {}
+ DT(DT), TLI(TLI), Induction(nullptr), WidestIndTy(nullptr),
+ HasFunNoNaNAttr(false), MaxSafeDepDistBytes(-1U) {}
/// This enum represents the kinds of reductions that we support.
enum ReductionKind {
@@ -559,7 +560,7 @@ public:
/// This struct holds information about reduction variables.
struct ReductionDescriptor {
- ReductionDescriptor() : StartValue(0), LoopExitInstr(0),
+ ReductionDescriptor() : StartValue(nullptr), LoopExitInstr(nullptr),
Kind(RK_NoReduction), MinMaxKind(MRK_Invalid) {}
ReductionDescriptor(Value *Start, Instruction *Exit, ReductionKind K,
@@ -631,7 +632,7 @@ public:
/// A struct for saving information about induction variables.
struct InductionInfo {
InductionInfo(Value *Start, InductionKind K) : StartValue(Start), IK(K) {}
- InductionInfo() : StartValue(0), IK(IK_NoInduction) {}
+ InductionInfo() : StartValue(nullptr), IK(IK_NoInduction) {}
/// Start value.
TrackingVH<Value> StartValue;
/// Induction kind.
@@ -960,7 +961,7 @@ private:
assert(LoopID->getOperand(0) == LoopID && "invalid loop id");
for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) {
- const MDString *S = 0;
+ const MDString *S = nullptr;
SmallVector<Value*, 4> Args;
// The expected hint is either a MDString or a MDNode with the first
@@ -1053,7 +1054,7 @@ struct LoopVectorize : public FunctionPass {
bool runOnFunction(Function &F) override {
SE = &getAnalysis<ScalarEvolution>();
DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- DL = DLP ? &DLP->getDataLayout() : 0;
+ DL = DLP ? &DLP->getDataLayout() : nullptr;
LI = &getAnalysis<LoopInfo>();
TTI = &getAnalysis<TargetTransformInfo>();
DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
@@ -1070,7 +1071,7 @@ struct LoopVectorize : public FunctionPass {
if (!TTI->getNumberOfRegisters(true))
return false;
- if (DL == NULL) {
+ if (!DL) {
DEBUG(dbgs() << "\nLV: Not vectorizing " << F.getName()
<< ": Missing data layout\n");
return false;
@@ -1232,7 +1233,7 @@ static Value *stripIntegerCast(Value *V) {
/// \p Ptr.
static const SCEV *replaceSymbolicStrideSCEV(ScalarEvolution *SE,
ValueToValueMap &PtrToStride,
- Value *Ptr, Value *OrigPtr = 0) {
+ Value *Ptr, Value *OrigPtr = nullptr) {
const SCEV *OrigSCEV = SE->getSCEV(Ptr);
@@ -1399,7 +1400,7 @@ int LoopVectorizationLegality::isConsecutivePtr(Value *Ptr) {
// We can emit wide load/stores only if the last non-zero index is the
// induction variable.
- const SCEV *Last = 0;
+ const SCEV *Last = nullptr;
if (!Strides.count(Gep))
Last = SE->getSCEV(Gep->getOperand(InductionOperand));
else {
@@ -1648,17 +1649,17 @@ void InnerLoopVectorizer::scalarizeInstruction(Instruction *Instr, bool IfPredic
// Does this instruction return a value ?
bool IsVoidRetTy = Instr->getType()->isVoidTy();
- Value *UndefVec = IsVoidRetTy ? 0 :
+ Value *UndefVec = IsVoidRetTy ? nullptr :
UndefValue::get(VectorType::get(Instr->getType(), VF));
// Create a new entry in the WidenMap and initialize it to Undef or Null.
VectorParts &VecResults = WidenMap.splat(Instr, UndefVec);
Instruction *InsertPt = Builder.GetInsertPoint();
BasicBlock *IfBlock = Builder.GetInsertBlock();
- BasicBlock *CondBlock = 0;
+ BasicBlock *CondBlock = nullptr;
VectorParts Cond;
- Loop *VectorLp = 0;
+ Loop *VectorLp = nullptr;
if (IfPredicateStore) {
assert(Instr->getParent()->getSinglePredecessor() &&
"Only support single predecessor blocks");
@@ -1674,7 +1675,7 @@ void InnerLoopVectorizer::scalarizeInstruction(Instruction *Instr, bool IfPredic
for (unsigned Width = 0; Width < VF; ++Width) {
// Start if-block.
- Value *Cmp = 0;
+ Value *Cmp = nullptr;
if (IfPredicateStore) {
Cmp = Builder.CreateExtractElement(Cond[Part], Builder.getInt32(Width));
Cmp = Builder.CreateICmp(ICmpInst::ICMP_EQ, Cmp, ConstantInt::get(Cmp->getType(), 1));
@@ -1725,21 +1726,21 @@ static Instruction *getFirstInst(Instruction *FirstInst, Value *V,
if (FirstInst)
return FirstInst;
if (Instruction *I = dyn_cast<Instruction>(V))
- return I->getParent() == Loc->getParent() ? I : 0;
- return 0;
+ return I->getParent() == Loc->getParent() ? I : nullptr;
+ return nullptr;
}
std::pair<Instruction *, Instruction *>
InnerLoopVectorizer::addStrideCheck(Instruction *Loc) {
- Instruction *tnullptr = 0;
+ Instruction *tnullptr = nullptr;
if (!Legal->mustCheckStrides())
return std::pair<Instruction *, Instruction *>(tnullptr, tnullptr);
IRBuilder<> ChkBuilder(Loc);
// Emit checks.
- Value *Check = 0;
- Instruction *FirstInst = 0;
+ Value *Check = nullptr;
+ Instruction *FirstInst = nullptr;
for (SmallPtrSet<Value *, 8>::iterator SI = Legal->strides_begin(),
SE = Legal->strides_end();
SI != SE; ++SI) {
@@ -1771,7 +1772,7 @@ InnerLoopVectorizer::addRuntimeCheck(Instruction *Loc) {
LoopVectorizationLegality::RuntimePointerCheck *PtrRtCheck =
Legal->getRuntimePointerCheck();
- Instruction *tnullptr = 0;
+ Instruction *tnullptr = nullptr;
if (!PtrRtCheck->Need)
return std::pair<Instruction *, Instruction *>(tnullptr, tnullptr);
@@ -1781,7 +1782,7 @@ InnerLoopVectorizer::addRuntimeCheck(Instruction *Loc) {
LLVMContext &Ctx = Loc->getContext();
SCEVExpander Exp(*SE, "induction");
- Instruction *FirstInst = 0;
+ Instruction *FirstInst = nullptr;
for (unsigned i = 0; i < NumPointers; ++i) {
Value *Ptr = PtrRtCheck->Pointers[i];
@@ -1808,7 +1809,7 @@ InnerLoopVectorizer::addRuntimeCheck(Instruction *Loc) {
IRBuilder<> ChkBuilder(Loc);
// Our instructions might fold to a constant.
- Value *MemoryRuntimeCheck = 0;
+ Value *MemoryRuntimeCheck = nullptr;
for (unsigned i = 0; i < NumPointers; ++i) {
for (unsigned j = i+1; j < NumPointers; ++j) {
// No need to check if two readonly pointers intersect.
@@ -2072,7 +2073,7 @@ void InnerLoopVectorizer::createEmptyLoop() {
// start value.
// This variable saves the new starting index for the scalar loop.
- PHINode *ResumeIndex = 0;
+ PHINode *ResumeIndex = nullptr;
LoopVectorizationLegality::InductionList::iterator I, E;
LoopVectorizationLegality::InductionList *List = Legal->getInductionVars();
// Set builder to point to last bypass block.
@@ -2088,9 +2089,9 @@ void InnerLoopVectorizer::createEmptyLoop() {
// truncated version for the scalar loop.
PHINode *TruncResumeVal = (OrigPhi == OldInduction) ?
PHINode::Create(OrigPhi->getType(), 2, "trunc.resume.val",
- MiddleBlock->getTerminator()) : 0;
+ MiddleBlock->getTerminator()) : nullptr;
- Value *EndValue = 0;
+ Value *EndValue = nullptr;
switch (II.IK) {
case LoopVectorizationLegality::IK_NoInduction:
llvm_unreachable("Unknown induction");
@@ -2675,7 +2676,7 @@ void InnerLoopVectorizer::vectorizeLoop() {
assert(isPowerOf2_32(VF) &&
"Reduction emission only supported for pow2 vectors!");
Value *TmpVec = ReducedPartRdx;
- SmallVector<Constant*, 32> ShuffleMask(VF, 0);
+ SmallVector<Constant*, 32> ShuffleMask(VF, nullptr);
for (unsigned i = VF; i != 1; i >>= 1) {
// Move the upper half of the vector to the lower half.
for (unsigned j = 0; j != i/2; ++j)
@@ -3073,7 +3074,7 @@ void InnerLoopVectorizer::vectorizeBlockInLoop(BasicBlock *BB, PhiVector *PV) {
VectorParts &A = getVectorValue(it->getOperand(0));
VectorParts &B = getVectorValue(it->getOperand(1));
for (unsigned Part = 0; Part < UF; ++Part) {
- Value *C = 0;
+ Value *C = nullptr;
if (FCmp)
C = Builder.CreateFCmp(Cmp->getPredicate(), A[Part], B[Part]);
else
@@ -3560,14 +3561,14 @@ static Value *stripGetElementPtr(Value *Ptr, ScalarEvolution *SE,
///\brief Look for a cast use of the passed value.
static Value *getUniqueCastUse(Value *Ptr, Loop *Lp, Type *Ty) {
- Value *UniqueCast = 0;
+ Value *UniqueCast = nullptr;
for (User *U : Ptr->users()) {
CastInst *CI = dyn_cast<CastInst>(U);
if (CI && CI->getType() == Ty) {
if (!UniqueCast)
UniqueCast = CI;
else
- return 0;
+ return nullptr;
}
}
return UniqueCast;
@@ -3580,7 +3581,7 @@ static Value *getStrideFromPointer(Value *Ptr, ScalarEvolution *SE,
const DataLayout *DL, Loop *Lp) {
const PointerType *PtrTy = dyn_cast<PointerType>(Ptr->getType());
if (!PtrTy || PtrTy->isAggregateType())
- return 0;
+ return nullptr;
// Try to remove a gep instruction to make the pointer (actually index at this
// point) easier analyzable. If OrigPtr is equal to Ptr we are analzying the
@@ -3600,11 +3601,11 @@ static Value *getStrideFromPointer(Value *Ptr, ScalarEvolution *SE,
const SCEVAddRecExpr *S = dyn_cast<SCEVAddRecExpr>(V);
if (!S)
- return 0;
+ return nullptr;
V = S->getStepRecurrence(*SE);
if (!V)
- return 0;
+ return nullptr;
// Strip off the size of access multiplication if we are still analyzing the
// pointer.
@@ -3612,24 +3613,24 @@ static Value *getStrideFromPointer(Value *Ptr, ScalarEvolution *SE,
DL->getTypeAllocSize(PtrTy->getElementType());
if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(V)) {
if (M->getOperand(0)->getSCEVType() != scConstant)
- return 0;
+ return nullptr;
const APInt &APStepVal =
cast<SCEVConstant>(M->getOperand(0))->getValue()->getValue();
// Huge step value - give up.
if (APStepVal.getBitWidth() > 64)
- return 0;
+ return nullptr;
int64_t StepVal = APStepVal.getSExtValue();
if (PtrAccessSize != StepVal)
- return 0;
+ return nullptr;
V = M->getOperand(1);
}
}
// Strip off casts.
- Type *StripedOffRecurrenceCast = 0;
+ Type *StripedOffRecurrenceCast = nullptr;
if (const SCEVCastExpr *C = dyn_cast<SCEVCastExpr>(V)) {
StripedOffRecurrenceCast = C->getType();
V = C->getOperand();
@@ -3638,11 +3639,11 @@ static Value *getStrideFromPointer(Value *Ptr, ScalarEvolution *SE,
// Look for the loop invariant symbolic value.
const SCEVUnknown *U = dyn_cast<SCEVUnknown>(V);
if (!U)
- return 0;
+ return nullptr;
Value *Stride = U->getValue();
if (!Lp->isLoopInvariant(Stride))
- return 0;
+ return nullptr;
// If we have stripped off the recurrence cast we have to make sure that we
// return the value that is used in this loop so that we can replace it later.
@@ -3653,7 +3654,7 @@ static Value *getStrideFromPointer(Value *Ptr, ScalarEvolution *SE,
}
void LoopVectorizationLegality::collectStridedAcccess(Value *MemAccess) {
- Value *Ptr = 0;
+ Value *Ptr = nullptr;
if (LoadInst *LI = dyn_cast<LoadInst>(MemAccess))
Ptr = LI->getPointerOperand();
else if (StoreInst *SI = dyn_cast<StoreInst>(MemAccess))
@@ -4652,7 +4653,7 @@ bool LoopVectorizationLegality::AddReductionVar(PHINode *Phi,
// We only allow for a single reduction value to be used outside the loop.
// This includes users of the reduction, variables (which form a cycle
// which ends in the phi node).
- Instruction *ExitInstruction = 0;
+ Instruction *ExitInstruction = nullptr;
// Indicates that we found a reduction operation in our scan.
bool FoundReduxOp = false;
@@ -4666,7 +4667,7 @@ bool LoopVectorizationLegality::AddReductionVar(PHINode *Phi,
// the number of instruction we saw from the recognized min/max pattern,
// to make sure we only see exactly the two instructions.
unsigned NumCmpSelectPatternInst = 0;
- ReductionInstDesc ReduxDesc(false, 0);
+ ReductionInstDesc ReduxDesc(false, nullptr);
SmallPtrSet<Instruction *, 8> VisitedInsts;
SmallVector<Instruction *, 8> Worklist;
@@ -4749,7 +4750,7 @@ bool LoopVectorizationLegality::AddReductionVar(PHINode *Phi,
// being used. In this case the user uses the value of the previous
// iteration, in which case we would loose "VF-1" iterations of the
// reduction operation if we vectorize.
- if (ExitInstruction != 0 || Cur == Phi)
+ if (ExitInstruction != nullptr || Cur == Phi)
return false;
// The instruction used by an outside user must be the last instruction
@@ -4765,7 +4766,7 @@ bool LoopVectorizationLegality::AddReductionVar(PHINode *Phi,
// Process instructions only once (termination). Each reduction cycle
// value must only be used once, except by phi nodes and min/max
// reductions which are represented as a cmp followed by a select.
- ReductionInstDesc IgnoredVal(false, 0);
+ ReductionInstDesc IgnoredVal(false, nullptr);
if (VisitedInsts.insert(UI)) {
if (isa<PHINode>(UI))
PHIs.push_back(UI);
@@ -4819,8 +4820,8 @@ LoopVectorizationLegality::isMinMaxSelectCmpPattern(Instruction *I,
assert((isa<ICmpInst>(I) || isa<FCmpInst>(I) || isa<SelectInst>(I)) &&
"Expect a select instruction");
- Instruction *Cmp = 0;
- SelectInst *Select = 0;
+ Instruction *Cmp = nullptr;
+ SelectInst *Select = nullptr;
// We must handle the select(cmp()) as a single instruction. Advance to the
// select.
@@ -5540,7 +5541,7 @@ LoopVectorizationCostModel::getInstructionCost(Instruction *I, unsigned VF) {
Op2VK = TargetTransformInfo::OK_UniformConstantValue;
else if (isa<ConstantVector>(Op2) || isa<ConstantDataVector>(Op2)) {
Op2VK = TargetTransformInfo::OK_NonUniformConstantValue;
- if (cast<Constant>(Op2)->getSplatValue() != NULL)
+ if (cast<Constant>(Op2)->getSplatValue() != nullptr)
Op2VK = TargetTransformInfo::OK_UniformConstantValue;
}
@@ -5754,17 +5755,17 @@ void InnerLoopUnroller::scalarizeInstruction(Instruction *Instr,
// Does this instruction return a value ?
bool IsVoidRetTy = Instr->getType()->isVoidTy();
- Value *UndefVec = IsVoidRetTy ? 0 :
+ Value *UndefVec = IsVoidRetTy ? nullptr :
UndefValue::get(Instr->getType());
// Create a new entry in the WidenMap and initialize it to Undef or Null.
VectorParts &VecResults = WidenMap.splat(Instr, UndefVec);
Instruction *InsertPt = Builder.GetInsertPoint();
BasicBlock *IfBlock = Builder.GetInsertBlock();
- BasicBlock *CondBlock = 0;
+ BasicBlock *CondBlock = nullptr;
VectorParts Cond;
- Loop *VectorLp = 0;
+ Loop *VectorLp = nullptr;
if (IfPredicateStore) {
assert(Instr->getParent()->getSinglePredecessor() &&
"Only support single predecessor blocks");
@@ -5779,7 +5780,7 @@ void InnerLoopUnroller::scalarizeInstruction(Instruction *Instr,
// For each scalar that we create:
// Start an "if (pred) a[i] = ..." block.
- Value *Cmp = 0;
+ Value *Cmp = nullptr;
if (IfPredicateStore) {
if (Cond[Part]->getType()->isVectorTy())
Cond[Part] =
diff --git a/lib/Transforms/Vectorize/SLPVectorizer.cpp b/lib/Transforms/Vectorize/SLPVectorizer.cpp
index a869517a36..b15bf9c207 100644
--- a/lib/Transforms/Vectorize/SLPVectorizer.cpp
+++ b/lib/Transforms/Vectorize/SLPVectorizer.cpp
@@ -73,7 +73,7 @@ struct BlockNumbering {
BlockNumbering(BasicBlock *Bb) : BB(Bb), Valid(false) {}
- BlockNumbering() : BB(0), Valid(false) {}
+ BlockNumbering() : BB(nullptr), Valid(false) {}
void numberInstructions() {
unsigned Loc = 0;
@@ -121,15 +121,15 @@ private:
static BasicBlock *getSameBlock(ArrayRef<Value *> VL) {
Instruction *I0 = dyn_cast<Instruction>(VL[0]);
if (!I0)
- return 0;
+ return nullptr;
BasicBlock *BB = I0->getParent();
for (int i = 1, e = VL.size(); i < e; i++) {
Instruction *I = dyn_cast<Instruction>(VL[i]);
if (!I)
- return 0;
+ return nullptr;
if (BB != I->getParent())
- return 0;
+ return nullptr;
}
return BB;
}
@@ -181,7 +181,7 @@ static Instruction *propagateMetadata(Instruction *I, ArrayRef<Value *> VL) {
switch (Kind) {
default:
- MD = 0; // Remove unknown metadata
+ MD = nullptr; // Remove unknown metadata
break;
case LLVMContext::MD_tbaa:
MD = MDNode::getMostGenericTBAA(MD, IMD);
@@ -202,7 +202,7 @@ static Type* getSameType(ArrayRef<Value *> VL) {
Type *Ty = VL[0]->getType();
for (int i = 1, e = VL.size(); i < e; i++)
if (VL[i]->getType() != Ty)
- return 0;
+ return nullptr;
return Ty;
}
@@ -447,7 +447,7 @@ private:
bool isFullyVectorizableTinyTree();
struct TreeEntry {
- TreeEntry() : Scalars(), VectorizedValue(0), LastScalarIndex(0),
+ TreeEntry() : Scalars(), VectorizedValue(nullptr), LastScalarIndex(0),
NeedToGather(0) {}
/// \returns true if the scalars in VL are equal to this entry.
@@ -1096,7 +1096,7 @@ int BoUpSLP::getEntryCost(TreeEntry *E) {
// If instead not all operands are constants, then set the operand kind
// to OK_AnyValue. If all operands are constants but not the same,
// then set the operand kind to OK_NonUniformConstantValue.
- ConstantInt *CInt = NULL;
+ ConstantInt *CInt = nullptr;
for (unsigned i = 0; i < VL.size(); ++i) {
const Instruction *I = cast<Instruction>(VL[i]);
if (!isa<ConstantInt>(I->getOperand(1))) {
@@ -1250,7 +1250,7 @@ Value *BoUpSLP::getPointerOperand(Value *I) {
return LI->getPointerOperand();
if (StoreInst *SI = dyn_cast<StoreInst>(I))
return SI->getPointerOperand();
- return 0;
+ return nullptr;
}
unsigned BoUpSLP::getAddressSpaceOperand(Value *I) {
@@ -1324,7 +1324,7 @@ Value *BoUpSLP::getSinkBarrier(Instruction *Src, Instruction *Dst) {
if (!A.Ptr || !B.Ptr || AA->alias(A, B))
return I;
}
- return 0;
+ return nullptr;
}
int BoUpSLP::getLastIndex(ArrayRef<Value *> VL) {
@@ -1400,7 +1400,7 @@ Value *BoUpSLP::alreadyVectorized(ArrayRef<Value *> VL) const {
if (En->isSame(VL) && En->VectorizedValue)
return En->VectorizedValue;
}
- return 0;
+ return nullptr;
}
Value *BoUpSLP::vectorizeTree(ArrayRef<Value *> VL) {
@@ -1673,7 +1673,7 @@ Value *BoUpSLP::vectorizeTree(TreeEntry *E) {
default:
llvm_unreachable("unknown inst");
}
- return 0;
+ return nullptr;
}
Value *BoUpSLP::vectorizeTree() {
@@ -1842,7 +1842,7 @@ void BoUpSLP::optimizeGatherSequence() {
DT->dominates((*v)->getParent(), In->getParent())) {
In->replaceAllUsesWith(*v);
In->eraseFromParent();
- In = 0;
+ In = nullptr;
break;
}
}
@@ -1881,7 +1881,7 @@ struct SLPVectorizer : public FunctionPass {
SE = &getAnalysis<ScalarEvolution>();
DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- DL = DLP ? &DLP->getDataLayout() : 0;
+ DL = DLP ? &DLP->getDataLayout() : nullptr;
TTI = &getAnalysis<TargetTransformInfo>();
AA = &getAnalysis<AliasAnalysis>();
LI = &getAnalysis<LoopInfo>();
@@ -2339,7 +2339,7 @@ class HorizontalReduction {
public:
HorizontalReduction()
- : ReductionRoot(0), ReductionPHI(0), ReductionOpcode(0),
+ : ReductionRoot(nullptr), ReductionPHI(nullptr), ReductionOpcode(0),
ReducedValueOpcode(0), ReduxWidth(0), IsPairwiseReduction(false) {}
/// \brief Try to find a reduction tree.
@@ -2354,10 +2354,10 @@ public:
// In such a case start looking for a tree rooted in the first '+'.
if (Phi) {
if (B->getOperand(0) == Phi) {
- Phi = 0;
+ Phi = nullptr;
B = dyn_cast<BinaryOperator>(B->getOperand(1));
} else if (B->getOperand(1) == Phi) {
- Phi = 0;
+ Phi = nullptr;
B = dyn_cast<BinaryOperator>(B->getOperand(0));
}
}
@@ -2443,7 +2443,7 @@ public:
if (NumReducedVals < ReduxWidth)
return false;
- Value *VectorizedTree = 0;
+ Value *VectorizedTree = nullptr;
IRBuilder<> Builder(ReductionRoot);
FastMathFlags Unsafe;
Unsafe.setUnsafeAlgebra();
@@ -2492,7 +2492,7 @@ public:
} else
ReductionRoot->replaceAllUsesWith(VectorizedTree);
}
- return VectorizedTree != 0;
+ return VectorizedTree != nullptr;
}
private:
@@ -2675,7 +2675,8 @@ bool SLPVectorizer::vectorizeChainsInBlock(BasicBlock *BB, BoUpSLP &R) {
Value *Rdx =
(P->getIncomingBlock(0) == BB
? (P->getIncomingValue(0))
- : (P->getIncomingBlock(1) == BB ? P->getIncomingValue(1) : 0));
+ : (P->getIncomingBlock(1) == BB ? P->getIncomingValue(1)
+ : nullptr));
// Check if this is a Binary Operator.
BinaryOperator *BI = dyn_cast_or_null<BinaryOperator>(Rdx);
if (!BI)
@@ -2714,7 +2715,7 @@ bool SLPVectorizer::vectorizeChainsInBlock(BasicBlock *BB, BoUpSLP &R) {
if (BinaryOperator *BinOp =
dyn_cast<BinaryOperator>(SI->getValueOperand())) {
HorizontalReduction HorRdx;
- if (((HorRdx.matchAssociativeReduction(0, BinOp, DL) &&
+ if (((HorRdx.matchAssociativeReduction(nullptr, BinOp, DL) &&
HorRdx.tryToReduce(R, TTI)) ||
tryToVectorize(BinOp, R))) {
Changed = true;