summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--include/llvm/Analysis/AliasSetTracker.h25
-rw-r--r--include/llvm/Analysis/BranchProbabilityInfo.h2
-rw-r--r--include/llvm/Analysis/CFG.h8
-rw-r--r--include/llvm/Analysis/DependenceAnalysis.h10
-rw-r--r--include/llvm/Analysis/DominanceFrontier.h2
-rw-r--r--include/llvm/Analysis/IVUsers.h2
-rw-r--r--include/llvm/Analysis/IntervalPartition.h6
-rw-r--r--include/llvm/Analysis/LazyValueInfo.h4
-rw-r--r--include/llvm/Analysis/LibCallAliasAnalysis.h2
-rw-r--r--include/llvm/Analysis/LibCallSemantics.h2
-rw-r--r--include/llvm/Analysis/Loads.h7
-rw-r--r--include/llvm/Analysis/MemoryBuiltins.h2
-rw-r--r--include/llvm/Analysis/MemoryDependenceAnalysis.h9
-rw-r--r--include/llvm/Analysis/PHITransAddr.h3
-rw-r--r--include/llvm/Analysis/PtrUseVisitor.h12
-rw-r--r--include/llvm/Analysis/RegionInfo.h6
-rw-r--r--include/llvm/Analysis/ScalarEvolution.h8
-rw-r--r--include/llvm/Analysis/ScalarEvolutionExpander.h6
-rw-r--r--include/llvm/Analysis/SparsePropagation.h2
-rw-r--r--include/llvm/IR/PredIteratorCache.h2
-rw-r--r--lib/Analysis/AliasAnalysis.cpp2
-rw-r--r--lib/Analysis/AliasAnalysisCounter.cpp4
-rw-r--r--lib/Analysis/AliasSetTracker.cpp18
-rw-r--r--lib/Analysis/Analysis.cpp5
-rw-r--r--lib/Analysis/BasicAliasAnalysis.cpp26
-rw-r--r--lib/Analysis/BranchProbabilityInfo.cpp4
-rw-r--r--lib/Analysis/CFG.cpp8
-rw-r--r--lib/Analysis/CFGPrinter.cpp8
-rw-r--r--lib/Analysis/ConstantFolding.cpp106
-rw-r--r--lib/Analysis/CostModel.cpp20
-rw-r--r--lib/Analysis/Delinearization.cpp6
-rw-r--r--lib/Analysis/DependenceAnalysis.cpp76
-rw-r--r--lib/Analysis/DominanceFrontier.cpp4
-rw-r--r--lib/Analysis/IVUsers.cpp10
-rw-r--r--lib/Analysis/InstCount.cpp2
-rw-r--r--lib/Analysis/InstructionSimplify.cpp152
-rw-r--r--lib/Analysis/IntervalPartition.cpp2
-rw-r--r--lib/Analysis/LazyValueInfo.cpp18
-rw-r--r--lib/Analysis/LibCallAliasAnalysis.cpp2
-rw-r--r--lib/Analysis/LibCallSemantics.cpp4
-rw-r--r--lib/Analysis/Lint.cpp36
-rw-r--r--lib/Analysis/Loads.cpp10
-rw-r--r--lib/Analysis/LoopInfo.cpp30
-rw-r--r--lib/Analysis/LoopPass.cpp4
-rw-r--r--lib/Analysis/MemDepPrinter.cpp14
-rw-r--r--lib/Analysis/MemoryBuiltins.cpp54
-rw-r--r--lib/Analysis/MemoryDependenceAnalysis.cpp28
-rw-r--r--lib/Analysis/NoAliasAnalysis.cpp2
-rw-r--r--lib/Analysis/PHITransAddr.cpp44
-rw-r--r--lib/Analysis/RegionInfo.cpp48
-rw-r--r--lib/Analysis/RegionPass.cpp4
-rw-r--r--lib/Analysis/ScalarEvolution.cpp129
-rw-r--r--lib/Analysis/ScalarEvolutionAliasAnalysis.cpp8
-rw-r--r--lib/Analysis/ScalarEvolutionExpander.cpp51
-rw-r--r--lib/Analysis/ScalarEvolutionNormalization.cpp2
-rw-r--r--lib/Analysis/SparsePropagation.cpp4
-rw-r--r--lib/Analysis/TargetTransformInfo.cpp12
-rw-r--r--lib/Analysis/TypeBasedAliasAnalysis.cpp22
-rw-r--r--lib/Analysis/ValueTracking.cpp38
59 files changed, 574 insertions, 563 deletions
diff --git a/include/llvm/Analysis/AliasSetTracker.h b/include/llvm/Analysis/AliasSetTracker.h
index 72e75eca7e..6117d91ec6 100644
--- a/include/llvm/Analysis/AliasSetTracker.h
+++ b/include/llvm/Analysis/AliasSetTracker.h
@@ -43,13 +43,13 @@ class AliasSet : public ilist_node<AliasSet> {
const MDNode *TBAAInfo;
public:
PointerRec(Value *V)
- : Val(V), PrevInList(0), NextInList(0), AS(0), Size(0),
+ : Val(V), PrevInList(nullptr), NextInList(nullptr), AS(nullptr), Size(0),
TBAAInfo(DenseMapInfo<const MDNode *>::getEmptyKey()) {}
Value *getValue() const { return Val; }
PointerRec *getNext() const { return NextInList; }
- bool hasAliasSet() const { return AS != 0; }
+ bool hasAliasSet() const { return AS != nullptr; }
PointerRec** setPrevInList(PointerRec **PIL) {
PrevInList = PIL;
@@ -75,7 +75,7 @@ class AliasSet : public ilist_node<AliasSet> {
// If we have missing or conflicting TBAAInfo, return null.
if (TBAAInfo == DenseMapInfo<const MDNode *>::getEmptyKey() ||
TBAAInfo == DenseMapInfo<const MDNode *>::getTombstoneKey())
- return 0;
+ return nullptr;
return TBAAInfo;
}
@@ -91,7 +91,7 @@ class AliasSet : public ilist_node<AliasSet> {
}
void setAliasSet(AliasSet *as) {
- assert(AS == 0 && "Already have an alias set!");
+ assert(!AS && "Already have an alias set!");
AS = as;
}
@@ -100,7 +100,7 @@ class AliasSet : public ilist_node<AliasSet> {
*PrevInList = NextInList;
if (AS->PtrListEnd == &NextInList) {
AS->PtrListEnd = PrevInList;
- assert(*AS->PtrListEnd == 0 && "List not terminated right!");
+ assert(*AS->PtrListEnd == nullptr && "List not terminated right!");
}
delete this;
}
@@ -174,7 +174,7 @@ public:
class iterator;
iterator begin() const { return iterator(PtrList); }
iterator end() const { return iterator(); }
- bool empty() const { return PtrList == 0; }
+ bool empty() const { return PtrList == nullptr; }
void print(raw_ostream &OS) const;
void dump() const;
@@ -184,7 +184,7 @@ public:
PointerRec, ptrdiff_t> {
PointerRec *CurNode;
public:
- explicit iterator(PointerRec *CN = 0) : CurNode(CN) {}
+ explicit iterator(PointerRec *CN = nullptr) : CurNode(CN) {}
bool operator==(const iterator& x) const {
return CurNode == x.CurNode;
@@ -220,8 +220,9 @@ private:
// Can only be created by AliasSetTracker. Also, ilist creates one
// to serve as a sentinel.
friend struct ilist_sentinel_traits<AliasSet>;
- AliasSet() : PtrList(0), PtrListEnd(&PtrList), Forward(0), RefCount(0),
- AccessTy(NoModRef), AliasTy(MustAlias), Volatile(false) {
+ AliasSet()
+ : PtrList(nullptr), PtrListEnd(&PtrList), Forward(nullptr), RefCount(0),
+ AccessTy(NoModRef), AliasTy(MustAlias), Volatile(false) {
}
AliasSet(const AliasSet &AS) LLVM_DELETED_FUNCTION;
@@ -285,7 +286,7 @@ class AliasSetTracker {
void deleted() override;
void allUsesReplacedWith(Value *) override;
public:
- ASTCallbackVH(Value *V, AliasSetTracker *AST = 0);
+ ASTCallbackVH(Value *V, AliasSetTracker *AST = nullptr);
ASTCallbackVH &operator=(Value *V);
};
/// ASTCallbackVHDenseMapInfo - Traits to tell DenseMap that tell us how to
@@ -354,7 +355,7 @@ public:
/// pointer didn't alias anything).
AliasSet &getAliasSetForPointer(Value *P, uint64_t Size,
const MDNode *TBAAInfo,
- bool *New = 0);
+ bool *New = nullptr);
/// getAliasSetForPointerIfExists - Return the alias set containing the
/// location specified if one exists, otherwise return null.
@@ -408,7 +409,7 @@ private:
// entry for the pointer if it doesn't already exist.
AliasSet::PointerRec &getEntryFor(Value *V) {
AliasSet::PointerRec *&Entry = PointerMap[ASTCallbackVH(V, this)];
- if (Entry == 0)
+ if (!Entry)
Entry = new AliasSet::PointerRec(V);
return *Entry;
}
diff --git a/include/llvm/Analysis/BranchProbabilityInfo.h b/include/llvm/Analysis/BranchProbabilityInfo.h
index 4a6a280d0c..4414c84f6b 100644
--- a/include/llvm/Analysis/BranchProbabilityInfo.h
+++ b/include/llvm/Analysis/BranchProbabilityInfo.h
@@ -47,7 +47,7 @@ public:
void getAnalysisUsage(AnalysisUsage &AU) const override;
bool runOnFunction(Function &F) override;
- void print(raw_ostream &OS, const Module *M = 0) const override;
+ void print(raw_ostream &OS, const Module *M = nullptr) const override;
/// \brief Get an edge's probability, relative to other out-edges of the Src.
///
diff --git a/include/llvm/Analysis/CFG.h b/include/llvm/Analysis/CFG.h
index 02e3b45e9a..7f92eda8cb 100644
--- a/include/llvm/Analysis/CFG.h
+++ b/include/llvm/Analysis/CFG.h
@@ -65,8 +65,8 @@ bool isCriticalEdge(const TerminatorInst *TI, unsigned SuccNum,
/// on branchy code but not loops, and LI is most useful on code with loops but
/// does not help on branchy code outside loops.
bool isPotentiallyReachable(const Instruction *From, const Instruction *To,
- const DominatorTree *DT = 0,
- const LoopInfo *LI = 0);
+ const DominatorTree *DT = nullptr,
+ const LoopInfo *LI = nullptr);
/// \brief Determine whether block 'To' is reachable from 'From', returning
/// true if uncertain.
@@ -75,8 +75,8 @@ bool isPotentiallyReachable(const Instruction *From, const Instruction *To,
/// Returns false only if we can prove that once 'From' has been reached then
/// 'To' can not be executed. Conservatively returns true.
bool isPotentiallyReachable(const BasicBlock *From, const BasicBlock *To,
- const DominatorTree *DT = 0,
- const LoopInfo *LI = 0);
+ const DominatorTree *DT = nullptr,
+ const LoopInfo *LI = nullptr);
} // End llvm namespace
diff --git a/include/llvm/Analysis/DependenceAnalysis.h b/include/llvm/Analysis/DependenceAnalysis.h
index a142828d4f..78a03ff236 100644
--- a/include/llvm/Analysis/DependenceAnalysis.h
+++ b/include/llvm/Analysis/DependenceAnalysis.h
@@ -73,8 +73,8 @@ namespace llvm {
Instruction *Destination) :
Src(Source),
Dst(Destination),
- NextPredecessor(NULL),
- NextSuccessor(NULL) {}
+ NextPredecessor(nullptr),
+ NextSuccessor(nullptr) {}
virtual ~Dependence() {}
/// Dependence::DVEntry - Each level in the distance/direction vector
@@ -96,7 +96,7 @@ namespace llvm {
bool Splitable : 1; // Splitting the loop will break dependence.
const SCEV *Distance; // NULL implies no distance available.
DVEntry() : Direction(ALL), Scalar(true), PeelFirst(false),
- PeelLast(false), Splitable(false), Distance(NULL) { }
+ PeelLast(false), Splitable(false), Distance(nullptr) { }
};
/// getSrc - Returns the source instruction for this dependence.
@@ -154,7 +154,7 @@ namespace llvm {
/// getDistance - Returns the distance (or NULL) associated with a
/// particular level.
- virtual const SCEV *getDistance(unsigned Level) const { return NULL; }
+ virtual const SCEV *getDistance(unsigned Level) const { return nullptr; }
/// isPeelFirst - Returns true if peeling the first iteration from
/// this loop will break this dependence.
@@ -921,7 +921,7 @@ namespace llvm {
bool runOnFunction(Function &F) override;
void releaseMemory() override;
void getAnalysisUsage(AnalysisUsage &) const override;
- void print(raw_ostream &, const Module * = 0) const override;
+ void print(raw_ostream &, const Module * = nullptr) const override;
}; // class DependenceAnalysis
/// createDependenceAnalysisPass - This creates an instance of the
diff --git a/include/llvm/Analysis/DominanceFrontier.h b/include/llvm/Analysis/DominanceFrontier.h
index 4dcea2d1e7..0fbaa13bd3 100644
--- a/include/llvm/Analysis/DominanceFrontier.h
+++ b/include/llvm/Analysis/DominanceFrontier.h
@@ -142,7 +142,7 @@ public:
/// print - Convert to human readable form
///
- void print(raw_ostream &OS, const Module* = 0) const override;
+ void print(raw_ostream &OS, const Module* = nullptr) const override;
/// dump - Dump the dominance frontier to dbgs().
void dump() const;
diff --git a/include/llvm/Analysis/IVUsers.h b/include/llvm/Analysis/IVUsers.h
index c6bb49402b..6038872207 100644
--- a/include/llvm/Analysis/IVUsers.h
+++ b/include/llvm/Analysis/IVUsers.h
@@ -169,7 +169,7 @@ public:
return Processed.count(Inst);
}
- void print(raw_ostream &OS, const Module* = 0) const override;
+ void print(raw_ostream &OS, const Module* = nullptr) const override;
/// dump - This method is used for debugging.
void dump() const;
diff --git a/include/llvm/Analysis/IntervalPartition.h b/include/llvm/Analysis/IntervalPartition.h
index 05248bd0e5..274be2bdcf 100644
--- a/include/llvm/Analysis/IntervalPartition.h
+++ b/include/llvm/Analysis/IntervalPartition.h
@@ -48,7 +48,7 @@ class IntervalPartition : public FunctionPass {
public:
static char ID; // Pass identification, replacement for typeid
- IntervalPartition() : FunctionPass(ID), RootInterval(0) {
+ IntervalPartition() : FunctionPass(ID), RootInterval(nullptr) {
initializeIntervalPartitionPass(*PassRegistry::getPassRegistry());
}
@@ -62,7 +62,7 @@ public:
IntervalPartition(IntervalPartition &I, bool);
// print - Show contents in human readable format...
- void print(raw_ostream &O, const Module* = 0) const override;
+ void print(raw_ostream &O, const Module* = nullptr) const override;
// getRootInterval() - Return the root interval that contains the starting
// block of the function.
@@ -77,7 +77,7 @@ public:
// getBlockInterval - Return the interval that a basic block exists in.
inline Interval *getBlockInterval(BasicBlock *BB) {
IntervalMapTy::iterator I = IntervalMap.find(BB);
- return I != IntervalMap.end() ? I->second : 0;
+ return I != IntervalMap.end() ? I->second : nullptr;
}
// getAnalysisUsage - Implement the Pass API
diff --git a/include/llvm/Analysis/LazyValueInfo.h b/include/llvm/Analysis/LazyValueInfo.h
index a4cb806748..2fe7386e73 100644
--- a/include/llvm/Analysis/LazyValueInfo.h
+++ b/include/llvm/Analysis/LazyValueInfo.h
@@ -33,10 +33,10 @@ class LazyValueInfo : public FunctionPass {
void operator=(const LazyValueInfo&) LLVM_DELETED_FUNCTION;
public:
static char ID;
- LazyValueInfo() : FunctionPass(ID), PImpl(0) {
+ LazyValueInfo() : FunctionPass(ID), PImpl(nullptr) {
initializeLazyValueInfoPass(*PassRegistry::getPassRegistry());
}
- ~LazyValueInfo() { assert(PImpl == 0 && "releaseMemory not called"); }
+ ~LazyValueInfo() { assert(!PImpl && "releaseMemory not called"); }
/// Tristate - This is used to return true/false/dunno results.
enum Tristate {
diff --git a/include/llvm/Analysis/LibCallAliasAnalysis.h b/include/llvm/Analysis/LibCallAliasAnalysis.h
index 481015e2c1..4c03c92244 100644
--- a/include/llvm/Analysis/LibCallAliasAnalysis.h
+++ b/include/llvm/Analysis/LibCallAliasAnalysis.h
@@ -27,7 +27,7 @@ namespace llvm {
LibCallInfo *LCI;
- explicit LibCallAliasAnalysis(LibCallInfo *LC = 0)
+ explicit LibCallAliasAnalysis(LibCallInfo *LC = nullptr)
: FunctionPass(ID), LCI(LC) {
initializeLibCallAliasAnalysisPass(*PassRegistry::getPassRegistry());
}
diff --git a/include/llvm/Analysis/LibCallSemantics.h b/include/llvm/Analysis/LibCallSemantics.h
index 0f0bc23e00..8bd747f039 100644
--- a/include/llvm/Analysis/LibCallSemantics.h
+++ b/include/llvm/Analysis/LibCallSemantics.h
@@ -130,7 +130,7 @@ namespace llvm {
mutable const LibCallLocationInfo *Locations;
mutable unsigned NumLocations;
public:
- LibCallInfo() : Impl(0), Locations(0), NumLocations(0) {}
+ LibCallInfo() : Impl(nullptr), Locations(nullptr), NumLocations(0) {}
virtual ~LibCallInfo();
//===------------------------------------------------------------------===//
diff --git a/include/llvm/Analysis/Loads.h b/include/llvm/Analysis/Loads.h
index ebcb762541..25c59288f3 100644
--- a/include/llvm/Analysis/Loads.h
+++ b/include/llvm/Analysis/Loads.h
@@ -27,7 +27,8 @@ class MDNode;
/// specified pointer, we do a quick local scan of the basic block containing
/// ScanFrom, to determine if the address is already accessed.
bool isSafeToLoadUnconditionally(Value *V, Instruction *ScanFrom,
- unsigned Align, const DataLayout *TD = 0);
+ unsigned Align,
+ const DataLayout *TD = nullptr);
/// FindAvailableLoadedValue - Scan the ScanBB block backwards (starting at
/// the instruction before ScanFrom) checking to see if we have the value at
@@ -49,8 +50,8 @@ bool isSafeToLoadUnconditionally(Value *V, Instruction *ScanFrom,
Value *FindAvailableLoadedValue(Value *Ptr, BasicBlock *ScanBB,
BasicBlock::iterator &ScanFrom,
unsigned MaxInstsToScan = 6,
- AliasAnalysis *AA = 0,
- MDNode **TBAATag = 0);
+ AliasAnalysis *AA = nullptr,
+ MDNode **TBAATag = nullptr);
}
diff --git a/include/llvm/Analysis/MemoryBuiltins.h b/include/llvm/Analysis/MemoryBuiltins.h
index ff4bc22412..d414680b51 100644
--- a/include/llvm/Analysis/MemoryBuiltins.h
+++ b/include/llvm/Analysis/MemoryBuiltins.h
@@ -233,7 +233,7 @@ class ObjectSizeOffsetEvaluator
bool RoundToAlign;
SizeOffsetEvalType unknown() {
- return std::make_pair((Value*)0, (Value*)0);
+ return std::make_pair(nullptr, nullptr);
}
SizeOffsetEvalType compute_(Value *V);
diff --git a/include/llvm/Analysis/MemoryDependenceAnalysis.h b/include/llvm/Analysis/MemoryDependenceAnalysis.h
index 123d435a6e..1c4441bea6 100644
--- a/include/llvm/Analysis/MemoryDependenceAnalysis.h
+++ b/include/llvm/Analysis/MemoryDependenceAnalysis.h
@@ -97,7 +97,7 @@ namespace llvm {
PairTy Value;
explicit MemDepResult(PairTy V) : Value(V) {}
public:
- MemDepResult() : Value(0, Invalid) {}
+ MemDepResult() : Value(nullptr, Invalid) {}
/// get methods: These are static ctor methods for creating various
/// MemDepResult kinds.
@@ -155,7 +155,7 @@ namespace llvm {
/// getInst() - If this is a normal dependency, return the instruction that
/// is depended on. Otherwise, return null.
Instruction *getInst() const {
- if (Value.getInt() == Other) return NULL;
+ if (Value.getInt() == Other) return nullptr;
return Value.getPointer();
}
@@ -285,7 +285,8 @@ namespace llvm {
/// pointer. May be null if there are no tags or conflicting tags.
const MDNode *TBAATag;
- NonLocalPointerInfo() : Size(AliasAnalysis::UnknownSize), TBAATag(0) {}
+ NonLocalPointerInfo()
+ : Size(AliasAnalysis::UnknownSize), TBAATag(nullptr) {}
};
/// CachedNonLocalPointerInfo - This map stores the cached results of doing
@@ -401,7 +402,7 @@ namespace llvm {
bool isLoad,
BasicBlock::iterator ScanIt,
BasicBlock *BB,
- Instruction *QueryInst = 0);
+ Instruction *QueryInst = nullptr);
/// getLoadLoadClobberFullWidthSize - This is a little bit of analysis that
diff --git a/include/llvm/Analysis/PHITransAddr.h b/include/llvm/Analysis/PHITransAddr.h
index 6d70edd667..69f59071f9 100644
--- a/include/llvm/Analysis/PHITransAddr.h
+++ b/include/llvm/Analysis/PHITransAddr.h
@@ -45,7 +45,8 @@ class PHITransAddr {
/// InstInputs - The inputs for our symbolic address.
SmallVector<Instruction*, 4> InstInputs;
public:
- PHITransAddr(Value *addr, const DataLayout *DL) : Addr(addr), DL(DL), TLI(0) {
+ PHITransAddr(Value *addr, const DataLayout *DL)
+ : Addr(addr), DL(DL), TLI(nullptr) {
// If the address is an instruction, the whole thing is considered an input.
if (Instruction *I = dyn_cast<Instruction>(Addr))
InstInputs.push_back(I);
diff --git a/include/llvm/Analysis/PtrUseVisitor.h b/include/llvm/Analysis/PtrUseVisitor.h
index 572d5d7587..6e61fc3be3 100644
--- a/include/llvm/Analysis/PtrUseVisitor.h
+++ b/include/llvm/Analysis/PtrUseVisitor.h
@@ -48,13 +48,13 @@ public:
/// analysis and whether the visit completed or aborted early.
class PtrInfo {
public:
- PtrInfo() : AbortedInfo(0, false), EscapedInfo(0, false) {}
+ PtrInfo() : AbortedInfo(nullptr, false), EscapedInfo(nullptr, false) {}
/// \brief Reset the pointer info, clearing all state.
void reset() {
- AbortedInfo.setPointer(0);
+ AbortedInfo.setPointer(nullptr);
AbortedInfo.setInt(false);
- EscapedInfo.setPointer(0);
+ EscapedInfo.setPointer(nullptr);
EscapedInfo.setInt(false);
}
@@ -76,14 +76,14 @@ public:
/// \brief Mark the visit as aborted. Intended for use in a void return.
/// \param I The instruction which caused the visit to abort, if available.
- void setAborted(Instruction *I = 0) {
+ void setAborted(Instruction *I = nullptr) {
AbortedInfo.setInt(true);
AbortedInfo.setPointer(I);
}
/// \brief Mark the pointer as escaped. Intended for use in a void return.
/// \param I The instruction which escapes the pointer, if available.
- void setEscaped(Instruction *I = 0) {
+ void setEscaped(Instruction *I = nullptr) {
EscapedInfo.setInt(true);
EscapedInfo.setPointer(I);
}
@@ -92,7 +92,7 @@ public:
/// for use in a void return.
/// \param I The instruction which both escapes the pointer and aborts the
/// visit, if available.
- void setEscapedAndAborted(Instruction *I = 0) {
+ void setEscapedAndAborted(Instruction *I = nullptr) {
setEscaped(I);
setAborted(I);
}
diff --git a/include/llvm/Analysis/RegionInfo.h b/include/llvm/Analysis/RegionInfo.h
index 4d55408a47..6b43ae3124 100644
--- a/include/llvm/Analysis/RegionInfo.h
+++ b/include/llvm/Analysis/RegionInfo.h
@@ -246,7 +246,7 @@ public:
/// @param Parent The surrounding region or NULL if this is a top level
/// region.
Region(BasicBlock *Entry, BasicBlock *Exit, RegionInfo* RI,
- DominatorTree *DT, Region *Parent = 0);
+ DominatorTree *DT, Region *Parent = nullptr);
/// Delete the Region and all its subregions.
~Region();
@@ -311,7 +311,7 @@ public:
/// @brief Check if a Region is the TopLevel region.
///
/// The toplevel region represents the whole function.
- bool isTopLevelRegion() const { return exit == NULL; }
+ bool isTopLevelRegion() const { return exit == nullptr; }
/// @brief Return a new (non-canonical) region, that is obtained by joining
/// this region with its predecessors.
@@ -515,7 +515,7 @@ public:
}
// Construct the end iterator.
- block_iterator_wrapper() : super(df_end<pointer>((BasicBlock *)0)) {}
+ block_iterator_wrapper() : super(df_end<pointer>((BasicBlock *)nullptr)) {}
/*implicit*/ block_iterator_wrapper(super I) : super(I) {}
diff --git a/include/llvm/Analysis/ScalarEvolution.h b/include/llvm/Analysis/ScalarEvolution.h
index 06489d8d68..1e086fbd70 100644
--- a/include/llvm/Analysis/ScalarEvolution.h
+++ b/include/llvm/Analysis/ScalarEvolution.h
@@ -210,7 +210,7 @@ namespace llvm {
void deleted() override;
void allUsesReplacedWith(Value *New) override;
public:
- SCEVCallbackVH(Value *V, ScalarEvolution *SE = 0);
+ SCEVCallbackVH(Value *V, ScalarEvolution *SE = nullptr);
};
friend class SCEVCallbackVH;
@@ -291,7 +291,7 @@ namespace llvm {
const SCEV *ExactNotTaken;
PointerIntPair<ExitNotTakenInfo*, 1> NextExit;
- ExitNotTakenInfo() : ExitingBlock(0), ExactNotTaken(0) {}
+ ExitNotTakenInfo() : ExitingBlock(nullptr), ExactNotTaken(nullptr) {}
/// isCompleteList - Return true if all loop exits are computable.
bool isCompleteList() const {
@@ -321,7 +321,7 @@ namespace llvm {
const SCEV *Max;
public:
- BackedgeTakenInfo() : Max(0) {}
+ BackedgeTakenInfo() : Max(nullptr) {}
/// Initialize BackedgeTakenInfo from a list of exact exit counts.
BackedgeTakenInfo(
@@ -897,7 +897,7 @@ namespace llvm {
bool runOnFunction(Function &F) override;
void releaseMemory() override;
void getAnalysisUsage(AnalysisUsage &AU) const override;
- void print(raw_ostream &OS, const Module* = 0) const override;
+ void print(raw_ostream &OS, const Module* = nullptr) const override;
void verifyAnalysis() const override;
private:
diff --git a/include/llvm/Analysis/ScalarEvolutionExpander.h b/include/llvm/Analysis/ScalarEvolutionExpander.h
index 9162735dd8..b9bef970b5 100644
--- a/include/llvm/Analysis/ScalarEvolutionExpander.h
+++ b/include/llvm/Analysis/ScalarEvolutionExpander.h
@@ -92,7 +92,7 @@ namespace llvm {
public:
/// SCEVExpander - Construct a SCEVExpander in "canonical" mode.
explicit SCEVExpander(ScalarEvolution &se, const char *name)
- : SE(se), IVName(name), IVIncInsertLoop(0), IVIncInsertPos(0),
+ : SE(se), IVName(name), IVIncInsertLoop(nullptr), IVIncInsertPos(nullptr),
CanonicalMode(true), LSRMode(false),
Builder(se.getContext(), TargetFolder(se.DL)) {
#ifndef NDEBUG
@@ -131,7 +131,7 @@ namespace llvm {
/// representative. Return the number of phis eliminated.
unsigned replaceCongruentIVs(Loop *L, const DominatorTree *DT,
SmallVectorImpl<WeakVH> &DeadInsts,
- const TargetTransformInfo *TTI = NULL);
+ const TargetTransformInfo *TTI = nullptr);
/// expandCodeFor - Insert code to directly compute the specified SCEV
/// expression into the program. The inserted code is inserted into the
@@ -219,7 +219,7 @@ namespace llvm {
/// expression into the program. The inserted code is inserted into the
/// SCEVExpander's current insertion point. If a type is specified, the
/// result will be expanded to have that type, with a cast if necessary.
- Value *expandCodeFor(const SCEV *SH, Type *Ty = 0);
+ Value *expandCodeFor(const SCEV *SH, Type *Ty = nullptr);
/// getRelevantLoop - Determine the most "relevant" loop for the given SCEV.
const Loop *getRelevantLoop(const SCEV *);
diff --git a/include/llvm/Analysis/SparsePropagation.h b/include/llvm/Analysis/SparsePropagation.h
index 76c8ccf59c..65ff2f6431 100644
--- a/include/llvm/Analysis/SparsePropagation.h
+++ b/include/llvm/Analysis/SparsePropagation.h
@@ -82,7 +82,7 @@ public:
/// constant value, return it. Otherwise return null. The returned value
/// must be in the same LLVM type as Val.
virtual Constant *GetConstant(LatticeVal LV, Value *Val, SparseSolver &SS) {
- return 0;
+ return nullptr;
}
/// ComputeArgument - Given a formal argument value, compute and return a
diff --git a/include/llvm/IR/PredIteratorCache.h b/include/llvm/IR/PredIteratorCache.h
index bf18dfeb20..02bc583a25 100644
--- a/include/llvm/IR/PredIteratorCache.h
+++ b/include/llvm/IR/PredIteratorCache.h
@@ -44,7 +44,7 @@ namespace llvm {
if (Entry) return Entry;
SmallVector<BasicBlock*, 32> PredCache(pred_begin(BB), pred_end(BB));
- PredCache.push_back(0); // null terminator.
+ PredCache.push_back(nullptr); // null terminator.
BlockToPredCountMap[BB] = PredCache.size()-1;
diff --git a/lib/Analysis/AliasAnalysis.cpp b/lib/Analysis/AliasAnalysis.cpp
index 9583bbe5e3..57237e59e8 100644
--- a/lib/Analysis/AliasAnalysis.cpp
+++ b/lib/Analysis/AliasAnalysis.cpp
@@ -473,7 +473,7 @@ AliasAnalysis::~AliasAnalysis() {}
///
void AliasAnalysis::InitializeAliasAnalysis(Pass *P) {
DataLayoutPass *DLP = P->getAnalysisIfAvailable<DataLayoutPass>();
- DL = DLP ? &DLP->getDataLayout() : 0;
+ DL = DLP ? &DLP->getDataLayout() : nullptr;
TLI = P->getAnalysisIfAvailable<TargetLibraryInfo>();
AA = &P->getAnalysis<AliasAnalysis>();
}
diff --git a/lib/Analysis/AliasAnalysisCounter.cpp b/lib/Analysis/AliasAnalysisCounter.cpp
index 2e3bc553af..b8609142fa 100644
--- a/lib/Analysis/AliasAnalysisCounter.cpp
+++ b/lib/Analysis/AliasAnalysisCounter.cpp
@@ -126,7 +126,7 @@ AliasAnalysis::AliasResult
AliasAnalysisCounter::alias(const Location &LocA, const Location &LocB) {
AliasResult R = getAnalysis<AliasAnalysis>().alias(LocA, LocB);
- const char *AliasString = 0;
+ const char *AliasString = nullptr;
switch (R) {
case NoAlias: No++; AliasString = "No alias"; break;
case MayAlias: May++; AliasString = "May alias"; break;
@@ -152,7 +152,7 @@ AliasAnalysisCounter::getModRefInfo(ImmutableCallSite CS,
const Location &Loc) {
ModRefResult R = getAnalysis<AliasAnalysis>().getModRefInfo(CS, Loc);
- const char *MRString = 0;
+ const char *MRString = nullptr;
switch (R) {
case NoModRef: NoMR++; MRString = "NoModRef"; break;
case Ref: JustRef++; MRString = "JustRef"; break;
diff --git a/lib/Analysis/AliasSetTracker.cpp b/lib/Analysis/AliasSetTracker.cpp
index ab1005e83c..a45fe2389e 100644
--- a/lib/Analysis/AliasSetTracker.cpp
+++ b/lib/Analysis/AliasSetTracker.cpp
@@ -72,16 +72,16 @@ void AliasSet::mergeSetIn(AliasSet &AS, AliasSetTracker &AST) {
AS.PtrList->setPrevInList(PtrListEnd);
PtrListEnd = AS.PtrListEnd;
- AS.PtrList = 0;
+ AS.PtrList = nullptr;
AS.PtrListEnd = &AS.PtrList;
- assert(*AS.PtrListEnd == 0 && "End of list is not null?");
+ assert(*AS.PtrListEnd == nullptr && "End of list is not null?");
}
}
void AliasSetTracker::removeAliasSet(AliasSet *AS) {
if (AliasSet *Fwd = AS->Forward) {
Fwd->dropRef(*this);
- AS->Forward = 0;
+ AS->Forward = nullptr;
}
AliasSets.erase(AS);
}
@@ -115,10 +115,10 @@ void AliasSet::addPointer(AliasSetTracker &AST, PointerRec &Entry,
Entry.updateSizeAndTBAAInfo(Size, TBAAInfo);
// Add it to the end of the list...
- assert(*PtrListEnd == 0 && "End of list is not null?");
+ assert(*PtrListEnd == nullptr && "End of list is not null?");
*PtrListEnd = &Entry;
PtrListEnd = Entry.setPrevInList(PtrListEnd);
- assert(*PtrListEnd == 0 && "End of list is not null?");
+ assert(*PtrListEnd == nullptr && "End of list is not null?");
addRef(); // Entry points to alias set.
}
@@ -217,11 +217,11 @@ void AliasSetTracker::clear() {
AliasSet *AliasSetTracker::findAliasSetForPointer(const Value *Ptr,
uint64_t Size,
const MDNode *TBAAInfo) {
- AliasSet *FoundSet = 0;
+ AliasSet *FoundSet = nullptr;
for (iterator I = begin(), E = end(); I != E; ++I) {
if (I->Forward || !I->aliasesPointer(Ptr, Size, TBAAInfo, AA)) continue;
- if (FoundSet == 0) { // If this is the first alias set ptr can go into.
+ if (!FoundSet) { // If this is the first alias set ptr can go into.
FoundSet = I; // Remember it.
} else { // Otherwise, we must merge the sets.
FoundSet->mergeSetIn(*I, *this); // Merge in contents.
@@ -245,12 +245,12 @@ bool AliasSetTracker::containsPointer(Value *Ptr, uint64_t Size,
AliasSet *AliasSetTracker::findAliasSetForUnknownInst(Instruction *Inst) {
- AliasSet *FoundSet = 0;
+ AliasSet *FoundSet = nullptr;
for (iterator I = begin(), E = end(); I != E; ++I) {
if (I->Forward || !I->aliasesUnknownInst(Inst, AA))
continue;
- if (FoundSet == 0) // If this is the first alias set ptr can go into.
+ if (!FoundSet) // If this is the first alias set ptr can go into.
FoundSet = I; // Remember it.
else if (!I->Forward) // Otherwise, we must merge the sets.
FoundSet->mergeSetIn(*I, *this); // Merge in contents.
diff --git a/lib/Analysis/Analysis.cpp b/lib/Analysis/Analysis.cpp
index c960123d08..01c1c7e572 100644
--- a/lib/Analysis/Analysis.cpp
+++ b/lib/Analysis/Analysis.cpp
@@ -73,7 +73,7 @@ void LLVMInitializeAnalysis(LLVMPassRegistryRef R) {
LLVMBool LLVMVerifyModule(LLVMModuleRef M, LLVMVerifierFailureAction Action,
char **OutMessages) {
- raw_ostream *DebugOS = Action != LLVMReturnStatusAction ? &errs() : 0;
+ raw_ostream *DebugOS = Action != LLVMReturnStatusAction ? &errs() : nullptr;
std::string Messages;
raw_string_ostream MsgsOS(Messages);
@@ -94,7 +94,8 @@ LLVMBool LLVMVerifyModule(LLVMModuleRef M, LLVMVerifierFailureAction Action,
LLVMBool LLVMVerifyFunction(LLVMValueRef Fn, LLVMVerifierFailureAction Action) {
LLVMBool Result = verifyFunction(
- *unwrap<Function>(Fn), Action != LLVMReturnStatusAction ? &errs() : 0);
+ *unwrap<Function>(Fn), Action != LLVMReturnStatusAction ? &errs()
+ : nullptr);
if (Action == LLVMAbortProcessAction && Result)
report_fatal_error("Broken function found, compilation aborted!");
diff --git a/lib/Analysis/BasicAliasAnalysis.cpp b/lib/Analysis/BasicAliasAnalysis.cpp
index e267374834..c4ff3eef4d 100644
--- a/lib/Analysis/BasicAliasAnalysis.cpp
+++ b/lib/Analysis/BasicAliasAnalysis.cpp
@@ -298,7 +298,7 @@ DecomposeGEPExpression(const Value *V, int64_t &BaseOffs,
do {
// See if this is a bitcast or GEP.
const Operator *Op = dyn_cast<Operator>(V);
- if (Op == 0) {
+ if (!Op) {
// The only non-operator case we can handle are GlobalAliases.
if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
if (!GA->mayBeOverridden()) {
@@ -315,7 +315,7 @@ DecomposeGEPExpression(const Value *V, int64_t &BaseOffs,
}
const GEPOperator *GEPOp = dyn_cast<GEPOperator>(Op);
- if (GEPOp == 0) {
+ if (!GEPOp) {
// If it's not a GEP, hand it off to SimplifyInstruction to see if it
// can come up with something. This matches what GetUnderlyingObject does.
if (const Instruction *I = dyn_cast<Instruction>(V))
@@ -336,7 +336,7 @@ DecomposeGEPExpression(const Value *V, int64_t &BaseOffs,
// If we are lacking DataLayout information, we can't compute the offets of
// elements computed by GEPs. However, we can handle bitcast equivalent
// GEPs.
- if (DL == 0) {
+ if (!DL) {
if (!GEPOp->hasAllZeroIndices())
return V;
V = GEPOp->getOperand(0);
@@ -433,7 +433,7 @@ static const Function *getParent(const Value *V) {
if (const Argument *arg = dyn_cast<Argument>(V))
return arg->getParent();
- return NULL;
+ return nullptr;
}
static bool notDifferentParent(const Value *O1, const Value *O2) {
@@ -753,7 +753,7 @@ BasicAliasAnalysis::getModRefInfo(ImmutableCallSite CS,
// Finally, handle specific knowledge of intrinsics.
const IntrinsicInst *II = dyn_cast<IntrinsicInst>(CS.getInstruction());
- if (II != 0)
+ if (II != nullptr)
switch (II->getIntrinsicID()) {
default: break;
case Intrinsic::memcpy:
@@ -904,8 +904,8 @@ BasicAliasAnalysis::aliasGEP(const GEPOperator *GEP1, uint64_t V1Size,
// derived pointer.
if (const GEPOperator *GEP2 = dyn_cast<GEPOperator>(V2)) {
// Do the base pointers alias?
- AliasResult BaseAlias = aliasCheck(UnderlyingV1, UnknownSize, 0,
- UnderlyingV2, UnknownSize, 0);
+ AliasResult BaseAlias = aliasCheck(UnderlyingV1, UnknownSize, nullptr,
+ UnderlyingV2, UnknownSize, nullptr);
// Check for geps of non-aliasing underlying pointers where the offsets are
// identical.
@@ -929,8 +929,8 @@ BasicAliasAnalysis::aliasGEP(const GEPOperator *GEP1, uint64_t V1Size,
// DecomposeGEPExpression and GetUnderlyingObject should return the
// same result except when DecomposeGEPExpression has no DataLayout.
if (GEP1BasePtr != UnderlyingV1 || GEP2BasePtr != UnderlyingV2) {
- assert(DL == 0 &&
- "DecomposeGEPExpression and GetUnderlyingObject disagree!");
+ assert(!DL &&
+ "DecomposeGEPExpression and GetUnderlyingObject disagree!");
return MayAlias;
}
// If the max search depth is reached the result is undefined
@@ -966,7 +966,7 @@ BasicAliasAnalysis::aliasGEP(const GEPOperator *GEP1, uint64_t V1Size,
// DecomposeGEPExpression and GetUnderlyingObject should return the
// same result except when DecomposeGEPExpression has no DataLayout.
if (GEP1BasePtr != UnderlyingV1 || GEP2BasePtr != UnderlyingV2) {
- assert(DL == 0 &&
+ assert(!DL &&
"DecomposeGEPExpression and GetUnderlyingObject disagree!");
return MayAlias;
}
@@ -988,7 +988,7 @@ BasicAliasAnalysis::aliasGEP(const GEPOperator *GEP1, uint64_t V1Size,
if (V1Size == UnknownSize && V2Size == UnknownSize)
return MayAlias;
- AliasResult R = aliasCheck(UnderlyingV1, UnknownSize, 0,
+ AliasResult R = aliasCheck(UnderlyingV1, UnknownSize, nullptr,
V2, V2Size, V2TBAAInfo);
if (R != MustAlias)
// If V2 may alias GEP base pointer, conservatively returns MayAlias.
@@ -1005,7 +1005,7 @@ BasicAliasAnalysis::aliasGEP(const GEPOperator *GEP1, uint64_t V1Size,
// DecomposeGEPExpression and GetUnderlyingObject should return the
// same result except when DecomposeGEPExpression has no DataLayout.
if (GEP1BasePtr != UnderlyingV1) {
- assert(DL == 0 &&
+ assert(!DL &&
"DecomposeGEPExpression and GetUnderlyingObject disagree!");
return MayAlias;
}
@@ -1371,7 +1371,7 @@ bool BasicAliasAnalysis::isValueEqualInPotentialCycles(const Value *V,
// Use dominance or loop info if available.
DominatorTreeWrapperPass *DTWP =
getAnalysisIfAvailable<DominatorTreeWrapperPass>();
- DominatorTree *DT = DTWP ? &DTWP->getDomTree() : 0;
+ DominatorTree *DT = DTWP ? &DTWP->getDomTree() : nullptr;
LoopInfo *LI = getAnalysisIfAvailable<LoopInfo>();
// Make sure that the visited phis cannot reach the Value. This ensures that
diff --git a/lib/Analysis/BranchProbabilityInfo.cpp b/lib/Analysis/BranchProbabilityInfo.cpp
index 301c3ee6ad..0209784359 100644
--- a/lib/Analysis/BranchProbabilityInfo.cpp
+++ b/lib/Analysis/BranchProbabilityInfo.cpp
@@ -560,7 +560,7 @@ isEdgeHot(const BasicBlock *Src, const BasicBlock *Dst) const {
BasicBlock *BranchProbabilityInfo::getHotSucc(BasicBlock *BB) const {
uint32_t Sum = 0;
uint32_t MaxWeight = 0;
- BasicBlock *MaxSucc = 0;
+ BasicBlock *MaxSucc = nullptr;
for (succ_iterator I = succ_begin(BB), E = succ_end(BB); I != E; ++I) {
BasicBlock *Succ = *I;
@@ -580,7 +580,7 @@ BasicBlock *BranchProbabilityInfo::getHotSucc(BasicBlock *BB) const {
if (BranchProbability(MaxWeight, Sum) > BranchProbability(4, 5))
return MaxSucc;
- return 0;
+ return nullptr;
}
/// Get the raw edge weight for the edge. If can't find it, return
diff --git a/lib/Analysis/CFG.cpp b/lib/Analysis/CFG.cpp
index 6963760632..8ef5302717 100644
--- a/lib/Analysis/CFG.cpp
+++ b/lib/Analysis/CFG.cpp
@@ -123,7 +123,7 @@ static bool loopContainsBoth(const LoopInfo *LI,
const BasicBlock *BB1, const BasicBlock *BB2) {
const Loop *L1 = getOutermostLoop(LI, BB1);
const Loop *L2 = getOutermostLoop(LI, BB2);
- return L1 != NULL && L1 == L2;
+ return L1 != nullptr && L1 == L2;
}
static bool isPotentiallyReachableInner(SmallVectorImpl<BasicBlock *> &Worklist,
@@ -133,7 +133,7 @@ static bool isPotentiallyReachableInner(SmallVectorImpl<BasicBlock *> &Worklist,
// When the stop block is unreachable, it's dominated from everywhere,
// regardless of whether there's a path between the two blocks.
if (DT && !DT->isReachableFromEntry(StopBB))
- DT = 0;
+ DT = nullptr;
// Limit the number of blocks we visit. The goal is to avoid run-away compile
// times on large CFGs without hampering sensible code. Arbitrarily chosen.
@@ -156,7 +156,7 @@ static bool isPotentiallyReachableInner(SmallVectorImpl<BasicBlock *> &Worklist,
return true;
}
- if (const Loop *Outer = LI ? getOutermostLoop(LI, BB) : 0) {
+ if (const Loop *Outer = LI ? getOutermostLoop(LI, BB) : nullptr) {
// All blocks in a single loop are reachable from all other blocks. From
// any of these blocks, we can skip directly to the exits of the loop,
// ignoring any other blocks inside the loop body.
@@ -200,7 +200,7 @@ bool llvm::isPotentiallyReachable(const Instruction *A, const Instruction *B,
// If the block is in a loop then we can reach any instruction in the block
// from any other instruction in the block by going around a backedge.
- if (LI && LI->getLoopFor(BB) != 0)
+ if (LI && LI->getLoopFor(BB) != nullptr)
return true;
// Linear scan, start at 'A', see whether we hit 'B' or the end first.
diff --git a/lib/Analysis/CFGPrinter.cpp b/lib/Analysis/CFGPrinter.cpp
index 537d6d10f8..0fcc24e409 100644
--- a/lib/Analysis/CFGPrinter.cpp
+++ b/lib/Analysis/CFGPrinter.cpp
@@ -33,7 +33,7 @@ namespace {
return false;
}
- void print(raw_ostream &OS, const Module* = 0) const override {}
+ void print(raw_ostream &OS, const Module* = nullptr) const override {}
void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.setPreservesAll();
@@ -56,7 +56,7 @@ namespace {
return false;
}
- void print(raw_ostream &OS, const Module* = 0) const override {}
+ void print(raw_ostream &OS, const Module* = nullptr) const override {}
void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.setPreservesAll();
@@ -90,7 +90,7 @@ namespace {
return false;
}
- void print(raw_ostream &OS, const Module* = 0) const override {}
+ void print(raw_ostream &OS, const Module* = nullptr) const override {}
void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.setPreservesAll();
@@ -123,7 +123,7 @@ namespace {
errs() << "\n";
return false;
}
- void print(raw_ostream &OS, const Module* = 0) const override {}
+ void print(raw_ostream &OS, const Module* = nullptr) const override {}
void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.setPreservesAll();
diff --git a/lib/Analysis/ConstantFolding.cpp b/lib/Analysis/ConstantFolding.cpp
index 782acfa051..5f733302d2 100644
--- a/lib/Analysis/ConstantFolding.cpp
+++ b/lib/Analysis/ConstantFolding.cpp
@@ -56,7 +56,7 @@ static Constant *FoldBitCast(Constant *C, Type *DestTy,
// Handle a vector->integer cast.
if (IntegerType *IT = dyn_cast<IntegerType>(DestTy)) {
VectorType *VTy = dyn_cast<VectorType>(C->getType());
- if (VTy == 0)
+ if (!VTy)
return ConstantExpr::getBitCast(C, DestTy);
unsigned NumSrcElts = VTy->getNumElements();
@@ -73,7 +73,7 @@ static Constant *FoldBitCast(Constant *C, Type *DestTy,
}
ConstantDataVector *CDV = dyn_cast<ConstantDataVector>(C);
- if (CDV == 0)
+ if (!CDV)
return ConstantExpr::getBitCast(C, DestTy);
// Now that we know that the input value is a vector of integers, just shift
@@ -93,7 +93,7 @@ static Constant *FoldBitCast(Constant *C, Type *DestTy,
// The code below only handles casts to vectors currently.
VectorType *DestVTy = dyn_cast<VectorType>(DestTy);
- if (DestVTy == 0)
+ if (!DestVTy)
return ConstantExpr::getBitCast(C, DestTy);
// If this is a scalar -> vector cast, convert the input into a <1 x scalar>
@@ -411,32 +411,32 @@ static Constant *FoldReinterpretLoadFromConstPtr(Constant *C,
TD.getTypeAllocSizeInBits(LoadTy),
AS);
} else
- return 0;
+ return nullptr;
C = FoldBitCast(C, MapTy, TD);
if (Constant *Res = FoldReinterpretLoadFromConstPtr(C, TD))
return FoldBitCast(Res, LoadTy, TD);
- return 0;
+ return nullptr;
}
unsigned BytesLoaded = (IntType->getBitWidth() + 7) / 8;
if (BytesLoaded > 32 || BytesLoaded == 0)
- return 0;
+ return nullptr;
GlobalValue *GVal;
APInt Offset;
if (!IsConstantOffsetFromGlobal(C, GVal, Offset, TD))
- return 0;
+ return nullptr;
GlobalVariable *GV = dyn_cast<GlobalVariable>(GVal);
if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer() ||
!GV->getInitializer()->getType()->isSized())
- return 0;
+ return nullptr;
// If we're loading off the beginning of the global, some bytes may be valid,
// but we don't try to handle this.
if (Offset.isNegative())
- return 0;
+ return nullptr;
// If we're not accessing anything in this constant, the result is undefined.
if (Offset.getZExtValue() >=
@@ -446,7 +446,7 @@ static Constant *FoldReinterpretLoadFromConstPtr(Constant *C,
unsigned char RawBytes[32] = {0};
if (!ReadDataFromGlobal(GV->getInitializer(), Offset.getZExtValue(), RawBytes,
BytesLoaded, TD))
- return 0;
+ return nullptr;
APInt ResultVal = APInt(IntType->getBitWidth(), 0);
if (TD.isLittleEndian()) {
@@ -479,7 +479,7 @@ Constant *llvm::ConstantFoldLoadFromConstPtr(Constant *C,
// If the loaded value isn't a constant expr, we can't handle it.
ConstantExpr *CE = dyn_cast<ConstantExpr>(C);
if (!CE)
- return 0;
+ return nullptr;
if (CE->getOpcode() == Instruction::GetElementPtr) {
if (GlobalVariable *GV = dyn_cast<GlobalVariable>(CE->getOperand(0))) {
@@ -542,16 +542,16 @@ Constant *llvm::ConstantFoldLoadFromConstPtr(Constant *C,
// Try hard to fold loads from bitcasted strange and non-type-safe things.
if (TD)
return FoldReinterpretLoadFromConstPtr(CE, *TD);
- return 0;
+ return nullptr;
}
static Constant *ConstantFoldLoadInst(const LoadInst *LI, const DataLayout *TD){
- if (LI->isVolatile()) return 0;
+ if (LI->isVolatile()) return nullptr;
if (Constant *C = dyn_cast<Constant>(LI->getOperand(0)))
return ConstantFoldLoadFromConstPtr(C, TD);
- return 0;
+ return nullptr;
}
/// SymbolicallyEvaluateBinop - One of Op0/Op1 is a constant expression.
@@ -608,7 +608,7 @@ static Constant *SymbolicallyEvaluateBinop(unsigned Opc, Constant *Op0,
}
}
- return 0;
+ return nullptr;
}
/// CastGEPIndices - If array indices are not pointer-sized integers,
@@ -618,7 +618,7 @@ static Constant *CastGEPIndices(ArrayRef<Constant *> Ops,
Type *ResultTy, const DataLayout *TD,
const TargetLibraryInfo *TLI) {
if (!TD)
- return 0;
+ return nullptr;
Type *IntPtrTy = TD->getIntPtrType(ResultTy);
@@ -641,7 +641,7 @@ static Constant *CastGEPIndices(ArrayRef<Constant *> Ops,
}
if (!Any)
- return 0;
+ return nullptr;
Constant *C = ConstantExpr::getGetElementPtr(Ops[0], NewIdxs);
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
@@ -676,7 +676,7 @@ static Constant *SymbolicallyEvaluateGEP(ArrayRef<Constant *> Ops,
Constant *Ptr = Ops[0];
if (!TD || !Ptr->getType()->getPointerElementType()->isSized() ||
!Ptr->getType()->isPointerTy())
- return 0;
+ return nullptr;
Type *IntPtrTy = TD->getIntPtrType(Ptr->getType());
Type *ResultElementTy = ResultTy->getPointerElementType();
@@ -690,7 +690,7 @@ static Constant *SymbolicallyEvaluateGEP(ArrayRef<Constant *> Ops,
// "inttoptr (sub (ptrtoint Ptr), V)"
if (Ops.size() == 2 && ResultElementTy->isIntegerTy(8)) {
ConstantExpr *CE = dyn_cast<ConstantExpr>(Ops[1]);
- assert((CE == 0 || CE->getType() == IntPtrTy) &&
+ assert((!CE || CE->getType() == IntPtrTy) &&
"CastGEPIndices didn't canonicalize index types!");
if (CE && CE->getOpcode() == Instruction::Sub &&
CE->getOperand(0)->isNullValue()) {
@@ -702,7 +702,7 @@ static Constant *SymbolicallyEvaluateGEP(ArrayRef<Constant *> Ops,
return Res;
}
}
- return 0;
+ return nullptr;
}
unsigned BitWidth = TD->getTypeSizeInBits(IntPtrTy);
@@ -765,7 +765,7 @@ static Constant *SymbolicallyEvaluateGEP(ArrayRef<Constant *> Ops,
// Only handle pointers to sized types, not pointers to functions.
if (!ATy->getElementType()->isSized())
- return 0;
+ return nullptr;
}
// Determine which element of the array the offset points into.
@@ -810,7 +810,7 @@ static Constant *SymbolicallyEvaluateGEP(ArrayRef<Constant *> Ops,
// type, then the offset is pointing into the middle of an indivisible
// member, so we can't simplify it.
if (Offset != 0)
- return 0;
+ return nullptr;
// Create a GEP.
Constant *C = ConstantExpr::getGetElementPtr(Ptr, NewIdxs);
@@ -841,7 +841,7 @@ Constant *llvm::ConstantFoldInstruction(Instruction *I,
const TargetLibraryInfo *TLI) {
// Handle PHI nodes quickly here...
if (PHINode *PN = dyn_cast<PHINode>(I)) {
- Constant *CommonValue = 0;
+ Constant *CommonValue = nullptr;
for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
Value *Incoming = PN->getIncomingValue(i);
@@ -854,14 +854,14 @@ Constant *llvm::ConstantFoldInstruction(Instruction *I,
// If the incoming value is not a constant, then give up.
Constant *C = dyn_cast<Constant>(Incoming);
if (!C)
- return 0;
+ return nullptr;
// Fold the PHI's operands.
if (ConstantExpr *NewC = dyn_cast<ConstantExpr>(C))
C = ConstantFoldConstantExpression(NewC, TD, TLI);
// If the incoming value is a different constant to
// the one we saw previously, then give up.
if (CommonValue && C != CommonValue)
- return 0;
+ return nullptr;
CommonValue = C;
}
@@ -876,7 +876,7 @@ Constant *llvm::ConstantFoldInstruction(Instruction *I,
for (User::op_iterator i = I->op_begin(), e = I->op_end(); i != e; ++i) {
Constant *Op = dyn_cast<Constant>(*i);
if (!Op)
- return 0; // All operands not constant!
+ return nullptr; // All operands not constant!
// Fold the Instruction's operands.
if (ConstantExpr *NewCE = dyn_cast<ConstantExpr>(Op))
@@ -966,14 +966,14 @@ Constant *llvm::ConstantFoldInstOperands(unsigned Opcode, Type *DestTy,
}
switch (Opcode) {
- default: return 0;
+ default: return nullptr;
case Instruction::ICmp:
case Instruction::FCmp: llvm_unreachable("Invalid for compares");
case Instruction::Call:
if (Function *F = dyn_cast<Function>(Ops.back()))
if (canConstantFoldCallTo(F))
return ConstantFoldCall(F, Ops.slice(0, Ops.size() - 1), TLI);
- return 0;
+ return nullptr;
case Instruction::PtrToInt:
// If the input is a inttoptr, eliminate the pair. This requires knowing
// the width of a pointer, so it can't be done in ConstantExpr::getCast.
@@ -1142,14 +1142,14 @@ Constant *llvm::ConstantFoldCompareInstOperands(unsigned Predicate,
Constant *llvm::ConstantFoldLoadThroughGEPConstantExpr(Constant *C,
ConstantExpr *CE) {
if (!CE->getOperand(1)->isNullValue())
- return 0; // Do not allow stepping over the value!
+ return nullptr; // Do not allow stepping over the value!
// Loop over all of the operands, tracking down which value we are
// addressing.
for (unsigned i = 2, e = CE->getNumOperands(); i != e; ++i) {
C = C->getAggregateElement(CE->getOperand(i));
- if (C == 0)
- return 0;
+ if (!C)
+ return nullptr;
}
return C;
}
@@ -1164,8 +1164,8 @@ Constant *llvm::ConstantFoldLoadThroughGEPIndices(Constant *C,
// addressing.
for (unsigned i = 0, e = Indices.size(); i != e; ++i) {
C = C->getAggregateElement(Indices[i]);
- if (C == 0)
- return 0;
+ if (!C)
+ return nullptr;
}
return C;
}
@@ -1270,7 +1270,7 @@ static Constant *ConstantFoldFP(double (*NativeFP)(double), double V,
V = NativeFP(V);
if (sys::llvm_fenv_testexcept()) {
sys::llvm_fenv_clearexcept();
- return 0;
+ return nullptr;
}
return GetConstantFoldFPValue(V, Ty);
@@ -1282,7 +1282,7 @@ static Constant *ConstantFoldBinaryFP(double (*NativeFP)(double, double),
V = NativeFP(V, W);
if (sys::llvm_fenv_testexcept()) {
sys::llvm_fenv_clearexcept();
- return 0;
+ return nullptr;
}
return GetConstantFoldFPValue(V, Ty);
@@ -1311,7 +1311,7 @@ static Constant *ConstantFoldConvertToInt(const APFloat &Val,
/*isSigned=*/true, mode,
&isExact);
if (status != APFloat::opOK && status != APFloat::opInexact)
- return 0;
+ return nullptr;
return ConstantInt::get(Ty, UIntVal, /*isSigned=*/true);
}
@@ -1345,7 +1345,7 @@ static Constant *ConstantFoldScalarCall(StringRef Name, unsigned IntrinsicID,
}
if (!Ty->isHalfTy() && !Ty->isFloatTy() && !Ty->isDoubleTy())
- return 0;
+ return nullptr;
if (IntrinsicID == Intrinsic::round) {
APFloat V = Op->getValueAPF();
@@ -1357,7 +1357,7 @@ static Constant *ConstantFoldScalarCall(StringRef Name, unsigned IntrinsicID,
/// likely to be aborted with an exception anyway, and some host libms
/// have known errors raising exceptions.
if (Op->getValueAPF().isNaN() || Op->getValueAPF().isInfinity())
- return 0;
+ return nullptr;
/// Currently APFloat versions of these functions do not exist, so we use
/// the host native double versions. Float versions are not called
@@ -1396,7 +1396,7 @@ static Constant *ConstantFoldScalarCall(StringRef Name, unsigned IntrinsicID,
}
if (!TLI)
- return 0;
+ return nullptr;
switch (Name[0]) {
case 'a':
@@ -1467,7 +1467,7 @@ static Constant *ConstantFoldScalarCall(StringRef Name, unsigned IntrinsicID,
default:
break;
}
- return 0;
+ return nullptr;
}
if (ConstantInt *Op = dyn_cast<ConstantInt>(Operands[0])) {
@@ -1491,7 +1491,7 @@ static Constant *ConstantFoldScalarCall(StringRef Name, unsigned IntrinsicID,
return ConstantFP::get(Ty->getContext(), Val);
}
default:
- return 0;
+ return nullptr;
}
}
@@ -1523,21 +1523,21 @@ static Constant *ConstantFoldScalarCall(StringRef Name, unsigned IntrinsicID,
if (isa<UndefValue>(Operands[0])) {
if (IntrinsicID == Intrinsic::bswap)
return Operands[0];
- return 0;
+ return nullptr;
}
- return 0;
+ return nullptr;
}
if (Operands.size() == 2) {
if (ConstantFP *Op1 = dyn_cast<ConstantFP>(Operands[0])) {
if (!Ty->isHalfTy() && !Ty->isFloatTy() && !Ty->isDoubleTy())
- return 0;
+ return nullptr;
double Op1V = getValueAsDouble(Op1);
if (ConstantFP *Op2 = dyn_cast<ConstantFP>(Operands[1])) {
if (Op2->getType() != Op1->getType())
- return 0;
+ return nullptr;
double Op2V = getValueAsDouble(Op2);
if (IntrinsicID == Intrinsic::pow) {
@@ -1550,7 +1550,7 @@ static Constant *ConstantFoldScalarCall(StringRef Name, unsigned IntrinsicID,
return ConstantFP::get(Ty->getContext(), V1);
}
if (!TLI)
- return 0;
+ return nullptr;
if (Name == "pow" && TLI->has(LibFunc::pow))
return ConstantFoldBinaryFP(pow, Op1V, Op2V, Ty);
if (Name == "fmod" && TLI->has(LibFunc::fmod))
@@ -1571,7 +1571,7 @@ static Constant *ConstantFoldScalarCall(StringRef Name, unsigned IntrinsicID,
APFloat((double)std::pow((double)Op1V,
(int)Op2C->getZExtValue())));
}
- return 0;
+ return nullptr;
}
if (ConstantInt *Op1 = dyn_cast<ConstantInt>(Operands[0])) {
@@ -1624,13 +1624,13 @@ static Constant *ConstantFoldScalarCall(StringRef Name, unsigned IntrinsicID,
}
}
- return 0;
+ return nullptr;
}
- return 0;
+ return nullptr;
}
if (Operands.size() != 3)
- return 0;
+ return nullptr;
if (const ConstantFP *Op1 = dyn_cast<ConstantFP>(Operands[0])) {
if (const ConstantFP *Op2 = dyn_cast<ConstantFP>(Operands[1])) {
@@ -1646,14 +1646,14 @@ static Constant *ConstantFoldScalarCall(StringRef Name, unsigned IntrinsicID,
if (s != APFloat::opInvalidOp)
return ConstantFP::get(Ty->getContext(), V);
- return 0;
+ return nullptr;
}
}
}
}
}
- return 0;
+ return nullptr;
}
static Constant *ConstantFoldVectorCall(StringRef Name, unsigned IntrinsicID,
@@ -1690,7 +1690,7 @@ Constant *
llvm::ConstantFoldCall(Function *F, ArrayRef<Constant *> Operands,
const TargetLibraryInfo *TLI) {
if (!F->hasName())
- return 0;
+ return nullptr;
StringRef Name = F->getName();
Type *Ty = F->getReturnType();
diff --git a/lib/Analysis/CostModel.cpp b/lib/Analysis/CostModel.cpp
index b49211d486..9fe0bfa267 100644
--- a/lib/Analysis/CostModel.cpp
+++ b/lib/Analysis/CostModel.cpp
@@ -41,7 +41,7 @@ namespace {
public:
static char ID; // Class identification, replacement for typeinfo
- CostModelAnalysis() : FunctionPass(ID), F(0), TTI(0) {
+ CostModelAnalysis() : FunctionPass(ID), F(nullptr), TTI(nullptr) {
initializeCostModelAnalysisPass(
*PassRegistry::getPassRegistry());
}
@@ -101,7 +101,7 @@ static TargetTransformInfo::OperandValueKind getOperandInfo(Value *V) {
// Check for a splat of a constant or for a non uniform vector of constants.
if (isa<ConstantVector>(V) || isa<ConstantDataVector>(V)) {
OpInfo = TargetTransformInfo::OK_NonUniformConstantValue;
- if (cast<Constant>(V)->getSplatValue() != NULL)
+ if (cast<Constant>(V)->getSplatValue() != nullptr)
OpInfo = TargetTransformInfo::OK_UniformConstantValue;
}
@@ -150,7 +150,7 @@ static bool matchPairwiseReductionAtLevel(const BinaryOperator *BinOp,
// %rdx.shuf.0.1 = shufflevector <4 x float> %rdx, <4 x float> undef,
// <4 x i32> <i32 1, i32 3, i32 undef, i32 undef>
// %bin.rdx.0 = fadd <4 x float> %rdx.shuf.0.0, %rdx.shuf.0.1
- if (BinOp == 0)
+ if (BinOp == nullptr)
return false;
assert(BinOp->getType()->isVectorTy() && "Expecting a vector type");
@@ -171,9 +171,9 @@ static bool matchPairwiseReductionAtLevel(const BinaryOperator *BinOp,
return false;
// Shuffle inputs must match.
- Value *NextLevelOpL = LS ? LS->getOperand(0) : 0;
- Value *NextLevelOpR = RS ? RS->getOperand(0) : 0;
- Value *NextLevelOp = 0;
+ Value *NextLevelOpL = LS ? LS->getOperand(0) : nullptr;
+ Value *NextLevelOpR = RS ? RS->getOperand(0) : nullptr;
+ Value *NextLevelOp = nullptr;
if (NextLevelOpR && NextLevelOpL) {
// If we have two shuffles their operands must match.
if (NextLevelOpL != NextLevelOpR)
@@ -198,7 +198,7 @@ static bool matchPairwiseReductionAtLevel(const BinaryOperator *BinOp,
// Check that the next levels binary operation exists and matches with the
// current one.
- BinaryOperator *NextLevelBinOp = 0;
+ BinaryOperator *NextLevelBinOp = nullptr;
if (Level + 1 != NumLevels) {
if (!(NextLevelBinOp = dyn_cast<BinaryOperator>(NextLevelOp)))
return false;
@@ -277,7 +277,7 @@ getShuffleAndOtherOprd(BinaryOperator *B) {
Value *L = B->getOperand(0);
Value *R = B->getOperand(1);
- ShuffleVectorInst *S = 0;
+ ShuffleVectorInst *S = nullptr;
if ((S = dyn_cast<ShuffleVectorInst>(L)))
return std::make_pair(R, S);
@@ -337,7 +337,7 @@ static bool matchVectorSplittingReduction(const ExtractElementInst *ReduxRoot,
std::tie(NextRdxOp, Shuffle) = getShuffleAndOtherOprd(BinOp);
// Check the current reduction operation and the shuffle use the same value.
- if (Shuffle == 0)
+ if (Shuffle == nullptr)
return false;
if (Shuffle->getOperand(0) != NextRdxOp)
return false;
@@ -478,7 +478,7 @@ unsigned CostModelAnalysis::getInstructionCost(const Instruction *I) const {
if (NumVecElems == Mask.size() && isReverseVectorMask(Mask))
return TTI->getShuffleCost(TargetTransformInfo::SK_Reverse, VecTypOp0, 0,
- 0);
+ nullptr);
return -1;
}
case Instruction::Call:
diff --git a/lib/Analysis/Delinearization.cpp b/lib/Analysis/Delinearization.cpp
index 02b8f92fc2..b4c0523cd5 100644
--- a/lib/Analysis/Delinearization.cpp
+++ b/lib/Analysis/Delinearization.cpp
@@ -51,7 +51,7 @@ public:
}
bool runOnFunction(Function &F) override;
void getAnalysisUsage(AnalysisUsage &AU) const override;
- void print(raw_ostream &O, const Module *M = 0) const override;
+ void print(raw_ostream &O, const Module *M = nullptr) const override;
};
} // end anonymous namespace
@@ -76,7 +76,7 @@ static Value *getPointerOperand(Instruction &Inst) {
return Store->getPointerOperand();
else if (GetElementPtrInst *Gep = dyn_cast<GetElementPtrInst>(&Inst))
return Gep->getPointerOperand();
- return NULL;
+ return nullptr;
}
void Delinearization::print(raw_ostream &O, const Module *) const {
@@ -92,7 +92,7 @@ void Delinearization::print(raw_ostream &O, const Module *) const {
const BasicBlock *BB = Inst->getParent();
// Delinearize the memory access as analyzed in all the surrounding loops.
// Do not analyze memory accesses outside loops.
- for (Loop *L = LI->getLoopFor(BB); L != NULL; L = L->getParentLoop()) {
+ for (Loop *L = LI->getLoopFor(BB); L != nullptr; L = L->getParentLoop()) {
const SCEV *AccessFn = SE->getSCEVAtScope(getPointerOperand(*Inst), L);
const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(AccessFn);
diff --git a/lib/Analysis/DependenceAnalysis.cpp b/lib/Analysis/DependenceAnalysis.cpp
index ff98611bae..6c987943bb 100644
--- a/lib/Analysis/DependenceAnalysis.cpp
+++ b/lib/Analysis/DependenceAnalysis.cpp
@@ -234,7 +234,7 @@ FullDependence::FullDependence(Instruction *Source,
Levels(CommonLevels),
LoopIndependent(PossiblyLoopIndependent) {
Consistent = true;
- DV = CommonLevels ? new DVEntry[CommonLevels] : NULL;
+ DV = CommonLevels ? new DVEntry[CommonLevels] : nullptr;
}
// The rest are simple getters that hide the implementation.
@@ -658,7 +658,7 @@ Value *getPointerOperand(Instruction *I) {
if (StoreInst *SI = dyn_cast<StoreInst>(I))
return SI->getPointerOperand();
llvm_unreachable("Value is not load or store instruction");
- return 0;
+ return nullptr;
}
@@ -932,7 +932,7 @@ const SCEV *DependenceAnalysis::collectUpperBound(const Loop *L,
const SCEV *UB = SE->getBackedgeTakenCount(L);
return SE->getNoopOrZeroExtend(UB, T);
}
- return NULL;
+ return nullptr;
}
@@ -943,7 +943,7 @@ const SCEVConstant *DependenceAnalysis::collectConstantUpperBound(const Loop *L,
) const {
if (const SCEV *UB = collectUpperBound(L, T))
return dyn_cast<SCEVConstant>(UB);
- return NULL;
+ return nullptr;
}
@@ -2194,7 +2194,7 @@ const SCEVConstant *getConstantPart(const SCEVMulExpr *Product) {
if (const SCEVConstant *Constant = dyn_cast<SCEVConstant>(Product->getOperand(Op)))
return Constant;
}
- return NULL;
+ return nullptr;
}
@@ -2646,8 +2646,8 @@ void DependenceAnalysis::findBoundsALL(CoefficientInfo *A,
CoefficientInfo *B,
BoundInfo *Bound,
unsigned K) const {
- Bound[K].Lower[Dependence::DVEntry::ALL] = NULL; // Default value = -infinity.
- Bound[K].Upper[Dependence::DVEntry::ALL] = NULL; // Default value = +infinity.
+ Bound[K].Lower[Dependence::DVEntry::ALL] = nullptr; // Default value = -infinity.
+ Bound[K].Upper[Dependence::DVEntry::ALL] = nullptr; // Default value = +infinity.
if (Bound[K].Iterations) {
Bound[K].Lower[Dependence::DVEntry::ALL] =
SE->getMulExpr(SE->getMinusSCEV(A[K].NegPart, B[K].PosPart),
@@ -2687,8 +2687,8 @@ void DependenceAnalysis::findBoundsEQ(CoefficientInfo *A,
CoefficientInfo *B,
BoundInfo *Bound,
unsigned K) const {
- Bound[K].Lower[Dependence::DVEntry::EQ] = NULL; // Default value = -infinity.
- Bound[K].Upper[Dependence::DVEntry::EQ] = NULL; // Default value = +infinity.
+ Bound[K].Lower[Dependence::DVEntry::EQ] = nullptr; // Default value = -infinity.
+ Bound[K].Upper[Dependence::DVEntry::EQ] = nullptr; // Default value = +infinity.
if (Bound[K].Iterations) {
const SCEV *Delta = SE->getMinusSCEV(A[K].Coeff, B[K].Coeff);
const SCEV *NegativePart = getNegativePart(Delta);
@@ -2729,8 +2729,8 @@ void DependenceAnalysis::findBoundsLT(CoefficientInfo *A,
CoefficientInfo *B,
BoundInfo *Bound,
unsigned K) const {
- Bound[K].Lower[Dependence::DVEntry::LT] = NULL; // Default value = -infinity.
- Bound[K].Upper[Dependence::DVEntry::LT] = NULL; // Default value = +infinity.
+ Bound[K].Lower[Dependence::DVEntry::LT] = nullptr; // Default value = -infinity.
+ Bound[K].Upper[Dependence::DVEntry::LT] = nullptr; // Default value = +infinity.
if (Bound[K].Iterations) {
const SCEV *Iter_1 =
SE->getMinusSCEV(Bound[K].Iterations,
@@ -2776,8 +2776,8 @@ void DependenceAnalysis::findBoundsGT(CoefficientInfo *A,
CoefficientInfo *B,
BoundInfo *Bound,
unsigned K) const {
- Bound[K].Lower[Dependence::DVEntry::GT] = NULL; // Default value = -infinity.
- Bound[K].Upper[Dependence::DVEntry::GT] = NULL; // Default value = +infinity.
+ Bound[K].Lower[Dependence::DVEntry::GT] = nullptr; // Default value = -infinity.
+ Bound[K].Upper[Dependence::DVEntry::GT] = nullptr; // Default value = +infinity.
if (Bound[K].Iterations) {
const SCEV *Iter_1 =
SE->getMinusSCEV(Bound[K].Iterations,
@@ -2829,7 +2829,7 @@ DependenceAnalysis::collectCoeffInfo(const SCEV *Subscript,
CI[K].Coeff = Zero;
CI[K].PosPart = Zero;
CI[K].NegPart = Zero;
- CI[K].Iterations = NULL;
+ CI[K].Iterations = nullptr;
}
while (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(Subscript)) {
const Loop *L = AddRec->getLoop();
@@ -2872,7 +2872,7 @@ const SCEV *DependenceAnalysis::getLowerBound(BoundInfo *Bound) const {
if (Bound[K].Lower[Bound[K].Direction])
Sum = SE->getAddExpr(Sum, Bound[K].Lower[Bound[K].Direction]);
else
- Sum = NULL;
+ Sum = nullptr;
}
return Sum;
}
@@ -2888,7 +2888,7 @@ const SCEV *DependenceAnalysis::getUpperBound(BoundInfo *Bound) const {
if (Bound[K].Upper[Bound[K].Direction])
Sum = SE->getAddExpr(Sum, Bound[K].Upper[Bound[K].Direction]);
else
- Sum = NULL;
+ Sum = nullptr;
}
return Sum;
}
@@ -3148,12 +3148,12 @@ void DependenceAnalysis::updateDirection(Dependence::DVEntry &Level,
}
else if (CurConstraint.isLine()) {
Level.Scalar = false;
- Level.Distance = NULL;
+ Level.Distance = nullptr;
// direction should be accurate
}
else if (CurConstraint.isPoint()) {
Level.Scalar = false;
- Level.Distance = NULL;
+ Level.Distance = nullptr;
unsigned NewDirection = Dependence::DVEntry::NONE;
if (!isKnownPredicate(CmpInst::ICMP_NE,
CurConstraint.getY(),
@@ -3290,7 +3290,7 @@ Dependence *DependenceAnalysis::depends(Instruction *Src,
if ((!Src->mayReadFromMemory() && !Src->mayWriteToMemory()) ||
(!Dst->mayReadFromMemory() && !Dst->mayWriteToMemory()))
// if both instructions don't reference memory, there's no dependence
- return NULL;
+ return nullptr;
if (!isLoadOrStore(Src) || !isLoadOrStore(Dst)) {
// can only analyze simple loads and stores, i.e., no calls, invokes, etc.
@@ -3310,7 +3310,7 @@ Dependence *DependenceAnalysis::depends(Instruction *Src,
case AliasAnalysis::NoAlias:
// If the objects noalias, they are distinct, accesses are independent.
DEBUG(dbgs() << "no alias\n");
- return NULL;
+ return nullptr;
case AliasAnalysis::MustAlias:
break; // The underlying objects alias; test accesses for dependence.
}
@@ -3505,26 +3505,26 @@ Dependence *DependenceAnalysis::depends(Instruction *Src,
case Subscript::ZIV:
DEBUG(dbgs() << ", ZIV\n");
if (testZIV(Pair[SI].Src, Pair[SI].Dst, Result))
- return NULL;
+ return nullptr;
break;
case Subscript::SIV: {
DEBUG(dbgs() << ", SIV\n");
unsigned Level;
- const SCEV *SplitIter = NULL;
+ const SCEV *SplitIter = nullptr;
if (testSIV(Pair[SI].Src, Pair[SI].Dst, Level,
Result, NewConstraint, SplitIter))
- return NULL;
+ return nullptr;
break;
}
case Subscript::RDIV:
DEBUG(dbgs() << ", RDIV\n");
if (testRDIV(Pair[SI].Src, Pair[SI].Dst, Result))
- return NULL;
+ return nullptr;
break;
case Subscript::MIV:
DEBUG(dbgs() << ", MIV\n");
if (testMIV(Pair[SI].Src, Pair[SI].Dst, Pair[SI].Loops, Result))
- return NULL;
+ return nullptr;
break;
default:
llvm_unreachable("subscript has unexpected classification");
@@ -3558,16 +3558,16 @@ Dependence *DependenceAnalysis::depends(Instruction *Src,
DEBUG(dbgs() << "testing subscript " << SJ << ", SIV\n");
// SJ is an SIV subscript that's part of the current coupled group
unsigned Level;
- const SCEV *SplitIter = NULL;
+ const SCEV *SplitIter = nullptr;
DEBUG(dbgs() << "SIV\n");
if (testSIV(Pair[SJ].Src, Pair[SJ].Dst, Level,
Result, NewConstraint, SplitIter))
- return NULL;
+ return nullptr;
ConstrainedLevels.set(Level);
if (intersectConstraints(&Constraints[Level], &NewConstraint)) {
if (Constraints[Level].isEmpty()) {
++DeltaIndependence;
- return NULL;
+ return nullptr;
}
Changed = true;
}
@@ -3593,7 +3593,7 @@ Dependence *DependenceAnalysis::depends(Instruction *Src,
case Subscript::ZIV:
DEBUG(dbgs() << "ZIV\n");
if (testZIV(Pair[SJ].Src, Pair[SJ].Dst, Result))
- return NULL;
+ return nullptr;
Mivs.reset(SJ);
break;
case Subscript::SIV:
@@ -3616,7 +3616,7 @@ Dependence *DependenceAnalysis::depends(Instruction *Src,
if (Pair[SJ].Classification == Subscript::RDIV) {
DEBUG(dbgs() << "RDIV test\n");
if (testRDIV(Pair[SJ].Src, Pair[SJ].Dst, Result))
- return NULL;
+ return nullptr;
// I don't yet understand how to propagate RDIV results
Mivs.reset(SJ);
}
@@ -3629,7 +3629,7 @@ Dependence *DependenceAnalysis::depends(Instruction *Src,
if (Pair[SJ].Classification == Subscript::MIV) {
DEBUG(dbgs() << "MIV test\n");
if (testMIV(Pair[SJ].Src, Pair[SJ].Dst, Pair[SJ].Loops, Result))
- return NULL;
+ return nullptr;
}
else
llvm_unreachable("expected only MIV subscripts at this point");
@@ -3641,7 +3641,7 @@ Dependence *DependenceAnalysis::depends(Instruction *Src,
SJ >= 0; SJ = ConstrainedLevels.find_next(SJ)) {
updateDirection(Result.DV[SJ - 1], Constraints[SJ]);
if (Result.DV[SJ - 1].Direction == Dependence::DVEntry::NONE)
- return NULL;
+ return nullptr;
}
}
}
@@ -3676,11 +3676,11 @@ Dependence *DependenceAnalysis::depends(Instruction *Src,
}
}
if (AllEqual)
- return NULL;
+ return nullptr;
}
FullDependence *Final = new FullDependence(Result);
- Result.DV = NULL;
+ Result.DV = nullptr;
return Final;
}
@@ -3853,11 +3853,11 @@ const SCEV *DependenceAnalysis::getSplitIteration(const Dependence *Dep,
switch (Pair[SI].Classification) {
case Subscript::SIV: {
unsigned Level;
- const SCEV *SplitIter = NULL;
+ const SCEV *SplitIter = nullptr;
(void) testSIV(Pair[SI].Src, Pair[SI].Dst, Level,
Result, NewConstraint, SplitIter);
if (Level == SplitLevel) {
- assert(SplitIter != NULL);
+ assert(SplitIter != nullptr);
return SplitIter;
}
break;
@@ -3892,7 +3892,7 @@ const SCEV *DependenceAnalysis::getSplitIteration(const Dependence *Dep,
for (int SJ = Sivs.find_first(); SJ >= 0; SJ = Sivs.find_next(SJ)) {
// SJ is an SIV subscript that's part of the current coupled group
unsigned Level;
- const SCEV *SplitIter = NULL;
+ const SCEV *SplitIter = nullptr;
(void) testSIV(Pair[SJ].Src, Pair[SJ].Dst, Level,
Result, NewConstraint, SplitIter);
if (Level == SplitLevel && SplitIter)
@@ -3933,5 +3933,5 @@ const SCEV *DependenceAnalysis::getSplitIteration(const Dependence *Dep,
}
}
llvm_unreachable("somehow reached end of routine");
- return NULL;
+ return nullptr;
}
diff --git a/lib/Analysis/DominanceFrontier.cpp b/lib/Analysis/DominanceFrontier.cpp
index f0787f1140..74594f8b5f 100644
--- a/lib/Analysis/DominanceFrontier.cpp
+++ b/lib/Analysis/DominanceFrontier.cpp
@@ -40,12 +40,12 @@ const DominanceFrontier::DomSetType &
DominanceFrontier::calculate(const DominatorTree &DT,
const DomTreeNode *Node) {
BasicBlock *BB = Node->getBlock();
- DomSetType *Result = NULL;
+ DomSetType *Result = nullptr;
std::vector<DFCalculateWorkObject> workList;
SmallPtrSet<BasicBlock *, 32> visited;
- workList.push_back(DFCalculateWorkObject(BB, NULL, Node, NULL));
+ workList.push_back(DFCalculateWorkObject(BB, nullptr, Node, nullptr));
do {
DFCalculateWorkObject *currentW = &workList.back();
assert (currentW && "Missing work object.");
diff --git a/lib/Analysis/IVUsers.cpp b/lib/Analysis/IVUsers.cpp
index 5317a47959..9927f15c2d 100644
--- a/lib/Analysis/IVUsers.cpp
+++ b/lib/Analysis/IVUsers.cpp
@@ -84,7 +84,7 @@ static bool isInteresting(const SCEV *S, const Instruction *I, const Loop *L,
static bool isSimplifiedLoopNest(BasicBlock *BB, const DominatorTree *DT,
const LoopInfo *LI,
SmallPtrSet<Loop*,16> &SimpleLoopNests) {
- Loop *NearestLoop = 0;
+ Loop *NearestLoop = nullptr;
for (DomTreeNode *Rung = DT->getNode(BB);
Rung; Rung = Rung->getIDom()) {
BasicBlock *DomBB = Rung->getBlock();
@@ -253,7 +253,7 @@ bool IVUsers::runOnLoop(Loop *l, LPPassManager &LPM) {
DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
SE = &getAnalysis<ScalarEvolution>();
DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- DL = DLP ? &DLP->getDataLayout() : 0;
+ DL = DLP ? &DLP->getDataLayout() : nullptr;
// Find all uses of induction variables in this loop, and categorize
// them by stride. Start by finding all of the PHI nodes in the header for
@@ -329,16 +329,16 @@ static const SCEVAddRecExpr *findAddRecForLoop(const SCEV *S, const Loop *L) {
I != E; ++I)
if (const SCEVAddRecExpr *AR = findAddRecForLoop(*I, L))
return AR;
- return 0;
+ return nullptr;
}
- return 0;
+ return nullptr;
}
const SCEV *IVUsers::getStride(const IVStrideUse &IU, const Loop *L) const {
if (const SCEVAddRecExpr *AR = findAddRecForLoop(getExpr(IU), L))
return AR->getStepRecurrence(*SE);
- return 0;
+ return nullptr;
}
void IVStrideUse::transformToPostInc(const Loop *L) {
diff --git a/lib/Analysis/InstCount.cpp b/lib/Analysis/InstCount.cpp
index 3d05556363..6d81bbb5bd 100644
--- a/lib/Analysis/InstCount.cpp
+++ b/lib/Analysis/InstCount.cpp
@@ -47,7 +47,7 @@ namespace {
void visitInstruction(Instruction &I) {
errs() << "Instruction Count does not know about " << I;
- llvm_unreachable(0);
+ llvm_unreachable(nullptr);
}
public:
static char ID; // Pass identification, replacement for typeid
diff --git a/lib/Analysis/InstructionSimplify.cpp b/lib/Analysis/InstructionSimplify.cpp
index d8d8a09804..ef28c5476e 100644
--- a/lib/Analysis/InstructionSimplify.cpp
+++ b/lib/Analysis/InstructionSimplify.cpp
@@ -131,7 +131,7 @@ static Value *ExpandBinOp(unsigned Opcode, Value *LHS, Value *RHS,
Instruction::BinaryOps OpcodeToExpand = (Instruction::BinaryOps)OpcToExpand;
// Recursion is always used, so bail out at once if we already hit the limit.
if (!MaxRecurse--)
- return 0;
+ return nullptr;
// Check whether the expression has the form "(A op' B) op C".
if (BinaryOperator *Op0 = dyn_cast<BinaryOperator>(LHS))
@@ -179,7 +179,7 @@ static Value *ExpandBinOp(unsigned Opcode, Value *LHS, Value *RHS,
}
}
- return 0;
+ return nullptr;
}
/// FactorizeBinOp - Simplify "LHS Opcode RHS" by factorizing out a common term
@@ -192,14 +192,14 @@ static Value *FactorizeBinOp(unsigned Opcode, Value *LHS, Value *RHS,
Instruction::BinaryOps OpcodeToExtract = (Instruction::BinaryOps)OpcToExtract;
// Recursion is always used, so bail out at once if we already hit the limit.
if (!MaxRecurse--)
- return 0;
+ return nullptr;
BinaryOperator *Op0 = dyn_cast<BinaryOperator>(LHS);
BinaryOperator *Op1 = dyn_cast<BinaryOperator>(RHS);
if (!Op0 || Op0->getOpcode() != OpcodeToExtract ||
!Op1 || Op1->getOpcode() != OpcodeToExtract)
- return 0;
+ return nullptr;
// The expression has the form "(A op' B) op (C op' D)".
Value *A = Op0->getOperand(0), *B = Op0->getOperand(1);
@@ -251,7 +251,7 @@ static Value *FactorizeBinOp(unsigned Opcode, Value *LHS, Value *RHS,
}
}
- return 0;
+ return nullptr;
}
/// SimplifyAssociativeBinOp - Generic simplifications for associative binary
@@ -263,7 +263,7 @@ static Value *SimplifyAssociativeBinOp(unsigned Opc, Value *LHS, Value *RHS,
// Recursion is always used, so bail out at once if we already hit the limit.
if (!MaxRecurse--)
- return 0;
+ return nullptr;
BinaryOperator *Op0 = dyn_cast<BinaryOperator>(LHS);
BinaryOperator *Op1 = dyn_cast<BinaryOperator>(RHS);
@@ -308,7 +308,7 @@ static Value *SimplifyAssociativeBinOp(unsigned Opc, Value *LHS, Value *RHS,
// The remaining transforms require commutativity as well as associativity.
if (!Instruction::isCommutative(Opcode))
- return 0;
+ return nullptr;
// Transform: "(A op B) op C" ==> "(C op A) op B" if it simplifies completely.
if (Op0 && Op0->getOpcode() == Opcode) {
@@ -348,7 +348,7 @@ static Value *SimplifyAssociativeBinOp(unsigned Opc, Value *LHS, Value *RHS,
}
}
- return 0;
+ return nullptr;
}
/// ThreadBinOpOverSelect - In the case of a binary operation with a select
@@ -359,7 +359,7 @@ static Value *ThreadBinOpOverSelect(unsigned Opcode, Value *LHS, Value *RHS,
const Query &Q, unsigned MaxRecurse) {
// Recursion is always used, so bail out at once if we already hit the limit.
if (!MaxRecurse--)
- return 0;
+ return nullptr;
SelectInst *SI;
if (isa<SelectInst>(LHS)) {
@@ -420,7 +420,7 @@ static Value *ThreadBinOpOverSelect(unsigned Opcode, Value *LHS, Value *RHS,
}
}
- return 0;
+ return nullptr;
}
/// ThreadCmpOverSelect - In the case of a comparison with a select instruction,
@@ -432,7 +432,7 @@ static Value *ThreadCmpOverSelect(CmpInst::Predicate Pred, Value *LHS,
unsigned MaxRecurse) {
// Recursion is always used, so bail out at once if we already hit the limit.
if (!MaxRecurse--)
- return 0;
+ return nullptr;
// Make sure the select is on the LHS.
if (!isa<SelectInst>(LHS)) {
@@ -456,7 +456,7 @@ static Value *ThreadCmpOverSelect(CmpInst::Predicate Pred, Value *LHS,
// It didn't simplify. However if "cmp TV, RHS" is equal to the select
// condition then we can replace it with 'true'. Otherwise give up.
if (!isSameCompare(Cond, Pred, TV, RHS))
- return 0;
+ return nullptr;
TCmp = getTrue(Cond->getType());
}
@@ -470,7 +470,7 @@ static Value *ThreadCmpOverSelect(CmpInst::Predicate Pred, Value *LHS,
// It didn't simplify. However if "cmp FV, RHS" is equal to the select
// condition then we can replace it with 'false'. Otherwise give up.
if (!isSameCompare(Cond, Pred, FV, RHS))
- return 0;
+ return nullptr;
FCmp = getFalse(Cond->getType());
}
@@ -482,7 +482,7 @@ static Value *ThreadCmpOverSelect(CmpInst::Predicate Pred, Value *LHS,
// The remaining cases only make sense if the select condition has the same
// type as the result of the comparison, so bail out if this is not so.
if (Cond->getType()->isVectorTy() != RHS->getType()->isVectorTy())
- return 0;
+ return nullptr;
// If the false value simplified to false, then the result of the compare
// is equal to "Cond && TCmp". This also catches the case when the false
// value simplified to false and the true value to true, returning "Cond".
@@ -502,7 +502,7 @@ static Value *ThreadCmpOverSelect(CmpInst::Predicate Pred, Value *LHS,
Q, MaxRecurse))
return V;
- return 0;
+ return nullptr;
}
/// ThreadBinOpOverPHI - In the case of a binary operation with an operand that
@@ -513,24 +513,24 @@ static Value *ThreadBinOpOverPHI(unsigned Opcode, Value *LHS, Value *RHS,
const Query &Q, unsigned MaxRecurse) {
// Recursion is always used, so bail out at once if we already hit the limit.
if (!MaxRecurse--)
- return 0;
+ return nullptr;
PHINode *PI;
if (isa<PHINode>(LHS)) {
PI = cast<PHINode>(LHS);
// Bail out if RHS and the phi may be mutually interdependent due to a loop.
if (!ValueDominatesPHI(RHS, PI, Q.DT))
- return 0;
+ return nullptr;
} else {
assert(isa<PHINode>(RHS) && "No PHI instruction operand!");
PI = cast<PHINode>(RHS);
// Bail out if LHS and the phi may be mutually interdependent due to a loop.
if (!ValueDominatesPHI(LHS, PI, Q.DT))
- return 0;
+ return nullptr;
}
// Evaluate the BinOp on the incoming phi values.
- Value *CommonValue = 0;
+ Value *CommonValue = nullptr;
for (unsigned i = 0, e = PI->getNumIncomingValues(); i != e; ++i) {
Value *Incoming = PI->getIncomingValue(i);
// If the incoming value is the phi node itself, it can safely be skipped.
@@ -541,7 +541,7 @@ static Value *ThreadBinOpOverPHI(unsigned Opcode, Value *LHS, Value *RHS,
// If the operation failed to simplify, or simplified to a different value
// to previously, then give up.
if (!V || (CommonValue && V != CommonValue))
- return 0;
+ return nullptr;
CommonValue = V;
}
@@ -556,7 +556,7 @@ static Value *ThreadCmpOverPHI(CmpInst::Predicate Pred, Value *LHS, Value *RHS,
const Query &Q, unsigned MaxRecurse) {
// Recursion is always used, so bail out at once if we already hit the limit.
if (!MaxRecurse--)
- return 0;
+ return nullptr;
// Make sure the phi is on the LHS.
if (!isa<PHINode>(LHS)) {
@@ -568,10 +568,10 @@ static Value *ThreadCmpOverPHI(CmpInst::Predicate Pred, Value *LHS, Value *RHS,
// Bail out if RHS and the phi may be mutually interdependent due to a loop.
if (!ValueDominatesPHI(RHS, PI, Q.DT))
- return 0;
+ return nullptr;
// Evaluate the BinOp on the incoming phi values.
- Value *CommonValue = 0;
+ Value *CommonValue = nullptr;
for (unsigned i = 0, e = PI->getNumIncomingValues(); i != e; ++i) {
Value *Incoming = PI->getIncomingValue(i);
// If the incoming value is the phi node itself, it can safely be skipped.
@@ -580,7 +580,7 @@ static Value *ThreadCmpOverPHI(CmpInst::Predicate Pred, Value *LHS, Value *RHS,
// If the operation failed to simplify, or simplified to a different value
// to previously, then give up.
if (!V || (CommonValue && V != CommonValue))
- return 0;
+ return nullptr;
CommonValue = V;
}
@@ -613,7 +613,7 @@ static Value *SimplifyAddInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
// X + (Y - X) -> Y
// (Y - X) + X -> Y
// Eg: X + -X -> 0
- Value *Y = 0;
+ Value *Y = nullptr;
if (match(Op1, m_Sub(m_Value(Y), m_Specific(Op0))) ||
match(Op0, m_Sub(m_Value(Y), m_Specific(Op1))))
return Y;
@@ -647,7 +647,7 @@ static Value *SimplifyAddInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
// "A+B" and "A+C" thus gains nothing, but costs compile time. Similarly
// for threading over phi nodes.
- return 0;
+ return nullptr;
}
Value *llvm::SimplifyAddInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
@@ -720,7 +720,7 @@ static Constant *computePointerDifference(const DataLayout *DL,
// If LHS and RHS are not related via constant offsets to the same base
// value, there is nothing we can do here.
if (LHS != RHS)
- return 0;
+ return nullptr;
// Otherwise, the difference of LHS - RHS can be computed as:
// LHS - RHS
@@ -755,14 +755,14 @@ static Value *SimplifySubInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
// (X*2) - X -> X
// (X<<1) - X -> X
- Value *X = 0;
+ Value *X = nullptr;
if (match(Op0, m_Mul(m_Specific(Op1), m_ConstantInt<2>())) ||
match(Op0, m_Shl(m_Specific(Op1), m_One())))
return Op1;
// (X + Y) - Z -> X + (Y - Z) or Y + (X - Z) if everything simplifies.
// For example, (X + Y) - Y -> X; (Y + X) - Y -> X
- Value *Y = 0, *Z = Op1;
+ Value *Y = nullptr, *Z = Op1;
if (MaxRecurse && match(Op0, m_Add(m_Value(X), m_Value(Y)))) { // (X + Y) - Z
// See if "V === Y - Z" simplifies.
if (Value *V = SimplifyBinOp(Instruction::Sub, Y, Z, Q, MaxRecurse-1))
@@ -853,7 +853,7 @@ static Value *SimplifySubInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
// "A-B" and "A-C" thus gains nothing, but costs compile time. Similarly
// for threading over phi nodes.
- return 0;
+ return nullptr;
}
Value *llvm::SimplifySubInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
@@ -890,7 +890,7 @@ static Value *SimplifyFAddInst(Value *Op0, Value *Op1, FastMathFlags FMF,
// fadd [nnan ninf] X, (fsub [nnan ninf] 0, X) ==> 0
// where nnan and ninf have to occur at least once somewhere in this
// expression
- Value *SubOp = 0;
+ Value *SubOp = nullptr;
if (match(Op1, m_FSub(m_AnyZero(), m_Specific(Op0))))
SubOp = Op1;
else if (match(Op0, m_FSub(m_AnyZero(), m_Specific(Op1))))
@@ -902,7 +902,7 @@ static Value *SimplifyFAddInst(Value *Op0, Value *Op1, FastMathFlags FMF,
return Constant::getNullValue(Op0->getType());
}
- return 0;
+ return nullptr;
}
/// Given operands for an FSub, see if we can fold the result. If not, this
@@ -939,7 +939,7 @@ static Value *SimplifyFSubInst(Value *Op0, Value *Op1, FastMathFlags FMF,
if (FMF.noNaNs() && FMF.noInfs() && Op0 == Op1)
return Constant::getNullValue(Op0->getType());
- return 0;
+ return nullptr;
}
/// Given the operands for an FMul, see if we can fold the result
@@ -966,7 +966,7 @@ static Value *SimplifyFMulInst(Value *Op0, Value *Op1,
if (FMF.noNaNs() && FMF.noSignedZeros() && match(Op1, m_AnyZero()))
return Op1;
- return 0;
+ return nullptr;
}
/// SimplifyMulInst - Given operands for a Mul, see if we can
@@ -997,7 +997,7 @@ static Value *SimplifyMulInst(Value *Op0, Value *Op1, const Query &Q,
return Op0;
// (X / Y) * Y -> X if the division is exact.
- Value *X = 0;
+ Value *X = nullptr;
if (match(Op0, m_Exact(m_IDiv(m_Value(X), m_Specific(Op1)))) || // (X / Y) * Y
match(Op1, m_Exact(m_IDiv(m_Value(X), m_Specific(Op0))))) // Y * (X / Y)
return X;
@@ -1031,7 +1031,7 @@ static Value *SimplifyMulInst(Value *Op0, Value *Op1, const Query &Q,
MaxRecurse))
return V;
- return 0;
+ return nullptr;
}
Value *llvm::SimplifyFAddInst(Value *Op0, Value *Op1, FastMathFlags FMF,
@@ -1098,7 +1098,7 @@ static Value *SimplifyDiv(Instruction::BinaryOps Opcode, Value *Op0, Value *Op1,
return ConstantInt::get(Op0->getType(), 1);
// (X * Y) / Y -> X if the multiplication does not overflow.
- Value *X = 0, *Y = 0;
+ Value *X = nullptr, *Y = nullptr;
if (match(Op0, m_Mul(m_Value(X), m_Value(Y))) && (X == Op1 || Y == Op1)) {
if (Y != Op1) std::swap(X, Y); // Ensure expression is (X * Y) / Y, Y = Op1
OverflowingBinaryOperator *Mul = cast<OverflowingBinaryOperator>(Op0);
@@ -1129,7 +1129,7 @@ static Value *SimplifyDiv(Instruction::BinaryOps Opcode, Value *Op0, Value *Op1,
if (Value *V = ThreadBinOpOverPHI(Opcode, Op0, Op1, Q, MaxRecurse))
return V;
- return 0;
+ return nullptr;
}
/// SimplifySDivInst - Given operands for an SDiv, see if we can
@@ -1139,7 +1139,7 @@ static Value *SimplifySDivInst(Value *Op0, Value *Op1, const Query &Q,
if (Value *V = SimplifyDiv(Instruction::SDiv, Op0, Op1, Q, MaxRecurse))
return V;
- return 0;
+ return nullptr;
}
Value *llvm::SimplifySDivInst(Value *Op0, Value *Op1, const DataLayout *DL,
@@ -1155,7 +1155,7 @@ static Value *SimplifyUDivInst(Value *Op0, Value *Op1, const Query &Q,
if (Value *V = SimplifyDiv(Instruction::UDiv, Op0, Op1, Q, MaxRecurse))
return V;
- return 0;
+ return nullptr;
}
Value *llvm::SimplifyUDivInst(Value *Op0, Value *Op1, const DataLayout *DL,
@@ -1174,7 +1174,7 @@ static Value *SimplifyFDivInst(Value *Op0, Value *Op1, const Query &Q,
if (match(Op1, m_Undef()))
return Op1;
- return 0;
+ return nullptr;
}
Value *llvm::SimplifyFDivInst(Value *Op0, Value *Op1, const DataLayout *DL,
@@ -1234,7 +1234,7 @@ static Value *SimplifyRem(Instruction::BinaryOps Opcode, Value *Op0, Value *Op1,
if (Value *V = ThreadBinOpOverPHI(Opcode, Op0, Op1, Q, MaxRecurse))
return V;
- return 0;
+ return nullptr;
}
/// SimplifySRemInst - Given operands for an SRem, see if we can
@@ -1244,7 +1244,7 @@ static Value *SimplifySRemInst(Value *Op0, Value *Op1, const Query &Q,
if (Value *V = SimplifyRem(Instruction::SRem, Op0, Op1, Q, MaxRecurse))
return V;
- return 0;
+ return nullptr;
}
Value *llvm::SimplifySRemInst(Value *Op0, Value *Op1, const DataLayout *DL,
@@ -1260,7 +1260,7 @@ static Value *SimplifyURemInst(Value *Op0, Value *Op1, const Query &Q,
if (Value *V = SimplifyRem(Instruction::URem, Op0, Op1, Q, MaxRecurse))
return V;
- return 0;
+ return nullptr;
}
Value *llvm::SimplifyURemInst(Value *Op0, Value *Op1, const DataLayout *DL,
@@ -1279,7 +1279,7 @@ static Value *SimplifyFRemInst(Value *Op0, Value *Op1, const Query &,
if (match(Op1, m_Undef()))
return Op1;
- return 0;
+ return nullptr;
}
Value *llvm::SimplifyFRemInst(Value *Op0, Value *Op1, const DataLayout *DL,
@@ -1350,7 +1350,7 @@ static Value *SimplifyShift(unsigned Opcode, Value *Op0, Value *Op1,
if (Value *V = ThreadBinOpOverPHI(Opcode, Op0, Op1, Q, MaxRecurse))
return V;
- return 0;
+ return nullptr;
}
/// SimplifyShlInst - Given operands for an Shl, see if we can
@@ -1368,7 +1368,7 @@ static Value *SimplifyShlInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
Value *X;
if (match(Op0, m_Exact(m_Shr(m_Value(X), m_Specific(Op1)))))
return X;
- return 0;
+ return nullptr;
}
Value *llvm::SimplifyShlInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
@@ -1399,7 +1399,7 @@ static Value *SimplifyLShrInst(Value *Op0, Value *Op1, bool isExact,
cast<OverflowingBinaryOperator>(Op0)->hasNoUnsignedWrap())
return X;
- return 0;
+ return nullptr;
}
Value *llvm::SimplifyLShrInst(Value *Op0, Value *Op1, bool isExact,
@@ -1435,7 +1435,7 @@ static Value *SimplifyAShrInst(Value *Op0, Value *Op1, bool isExact,
cast<OverflowingBinaryOperator>(Op0)->hasNoSignedWrap())
return X;
- return 0;
+ return nullptr;
}
Value *llvm::SimplifyAShrInst(Value *Op0, Value *Op1, bool isExact,
@@ -1483,7 +1483,7 @@ static Value *SimplifyAndInst(Value *Op0, Value *Op1, const Query &Q,
return Constant::getNullValue(Op0->getType());
// (A | ?) & A = A
- Value *A = 0, *B = 0;
+ Value *A = nullptr, *B = nullptr;
if (match(Op0, m_Or(m_Value(A), m_Value(B))) &&
(A == Op1 || B == Op1))
return Op1;
@@ -1536,7 +1536,7 @@ static Value *SimplifyAndInst(Value *Op0, Value *Op1, const Query &Q,
MaxRecurse))
return V;
- return 0;
+ return nullptr;
}
Value *llvm::SimplifyAndInst(Value *Op0, Value *Op1, const DataLayout *DL,
@@ -1582,7 +1582,7 @@ static Value *SimplifyOrInst(Value *Op0, Value *Op1, const Query &Q,
return Constant::getAllOnesValue(Op0->getType());
// (A & ?) | A = A
- Value *A = 0, *B = 0;
+ Value *A = nullptr, *B = nullptr;
if (match(Op0, m_And(m_Value(A), m_Value(B))) &&
(A == Op1 || B == Op1))
return Op1;
@@ -1630,7 +1630,7 @@ static Value *SimplifyOrInst(Value *Op0, Value *Op1, const Query &Q,
if (Value *V = ThreadBinOpOverPHI(Instruction::Or, Op0, Op1, Q, MaxRecurse))
return V;
- return 0;
+ return nullptr;
}
Value *llvm::SimplifyOrInst(Value *Op0, Value *Op1, const DataLayout *DL,
@@ -1690,7 +1690,7 @@ static Value *SimplifyXorInst(Value *Op0, Value *Op1, const Query &Q,
// "A^B" and "A^C" thus gains nothing, but costs compile time. Similarly
// for threading over phi nodes.
- return 0;
+ return nullptr;
}
Value *llvm::SimplifyXorInst(Value *Op0, Value *Op1, const DataLayout *DL,
@@ -1710,17 +1710,17 @@ static Value *ExtractEquivalentCondition(Value *V, CmpInst::Predicate Pred,
Value *LHS, Value *RHS) {
SelectInst *SI = dyn_cast<SelectInst>(V);
if (!SI)
- return 0;
+ return nullptr;
CmpInst *Cmp = dyn_cast<CmpInst>(SI->getCondition());
if (!Cmp)
- return 0;
+ return nullptr;
Value *CmpLHS = Cmp->getOperand(0), *CmpRHS = Cmp->getOperand(1);
if (Pred == Cmp->getPredicate() && LHS == CmpLHS && RHS == CmpRHS)
return Cmp;
if (Pred == CmpInst::getSwappedPredicate(Cmp->getPredicate()) &&
LHS == CmpRHS && RHS == CmpLHS)
return Cmp;
- return 0;
+ return nullptr;
}
// A significant optimization not implemented here is assuming that alloca
@@ -1768,7 +1768,7 @@ static Constant *computePointerICmp(const DataLayout *DL,
// We can only fold certain predicates on pointer comparisons.
switch (Pred) {
default:
- return 0;
+ return nullptr;
// Equality comaprisons are easy to fold.
case CmpInst::ICMP_EQ:
@@ -1874,7 +1874,7 @@ static Constant *computePointerICmp(const DataLayout *DL,
}
// Otherwise, fail.
- return 0;
+ return nullptr;
}
/// SimplifyICmpInst - Given operands for an ICmpInst, see if we can
@@ -2221,7 +2221,7 @@ static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
BinaryOperator *RBO = dyn_cast<BinaryOperator>(RHS);
if (MaxRecurse && (LBO || RBO)) {
// Analyze the case when either LHS or RHS is an add instruction.
- Value *A = 0, *B = 0, *C = 0, *D = 0;
+ Value *A = nullptr, *B = nullptr, *C = nullptr, *D = nullptr;
// LHS = A + B (or A and B are null); RHS = C + D (or C and D are null).
bool NoLHSWrapProblem = false, NoRHSWrapProblem = false;
if (LBO && LBO->getOpcode() == Instruction::Add) {
@@ -2605,7 +2605,7 @@ static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
if (Value *V = ThreadCmpOverPHI(Pred, LHS, RHS, Q, MaxRecurse))
return V;
- return 0;
+ return nullptr;
}
Value *llvm::SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
@@ -2702,7 +2702,7 @@ static Value *SimplifyFCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
if (Value *V = ThreadCmpOverPHI(Pred, LHS, RHS, Q, MaxRecurse))
return V;
- return 0;
+ return nullptr;
}
Value *llvm::SimplifyFCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
@@ -2741,7 +2741,7 @@ static Value *SimplifySelectInst(Value *CondVal, Value *TrueVal,
if (isa<UndefValue>(FalseVal)) // select C, X, undef -> X
return TrueVal;
- return 0;
+ return nullptr;
}
Value *llvm::SimplifySelectInst(Value *Cond, Value *TrueVal, Value *FalseVal,
@@ -2786,7 +2786,7 @@ static Value *SimplifyGEPInst(ArrayRef<Value *> Ops, const Query &Q, unsigned) {
// Check to see if this is constant foldable.
for (unsigned i = 0, e = Ops.size(); i != e; ++i)
if (!isa<Constant>(Ops[i]))
- return 0;
+ return nullptr;
return ConstantExpr::getGetElementPtr(cast<Constant>(Ops[0]), Ops.slice(1));
}
@@ -2823,7 +2823,7 @@ static Value *SimplifyInsertValueInst(Value *Agg, Value *Val,
return Agg;
}
- return 0;
+ return nullptr;
}
Value *llvm::SimplifyInsertValueInst(Value *Agg, Value *Val,
@@ -2839,7 +2839,7 @@ Value *llvm::SimplifyInsertValueInst(Value *Agg, Value *Val,
static Value *SimplifyPHINode(PHINode *PN, const Query &Q) {
// If all of the PHI's incoming values are the same then replace the PHI node
// with the common value.
- Value *CommonValue = 0;
+ Value *CommonValue = nullptr;
bool HasUndefInput = false;
for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
Value *Incoming = PN->getIncomingValue(i);
@@ -2851,7 +2851,7 @@ static Value *SimplifyPHINode(PHINode *PN, const Query &Q) {
continue;
}
if (CommonValue && Incoming != CommonValue)
- return 0; // Not the same, bail out.
+ return nullptr; // Not the same, bail out.
CommonValue = Incoming;
}
@@ -2864,7 +2864,7 @@ static Value *SimplifyPHINode(PHINode *PN, const Query &Q) {
// instruction, we cannot return X as the result of the PHI node unless it
// dominates the PHI block.
if (HasUndefInput)
- return ValueDominatesPHI(CommonValue, PN, Q.DT) ? CommonValue : 0;
+ return ValueDominatesPHI(CommonValue, PN, Q.DT) ? CommonValue : nullptr;
return CommonValue;
}
@@ -2873,7 +2873,7 @@ static Value *SimplifyTruncInst(Value *Op, Type *Ty, const Query &Q, unsigned) {
if (Constant *C = dyn_cast<Constant>(Op))
return ConstantFoldInstOperands(Instruction::Trunc, Ty, C, Q.DL, Q.TLI);
- return 0;
+ return nullptr;
}
Value *llvm::SimplifyTruncInst(Value *Op, Type *Ty, const DataLayout *DL,
@@ -2945,7 +2945,7 @@ static Value *SimplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS,
if (Value *V = ThreadBinOpOverPHI(Opcode, LHS, RHS, Q, MaxRecurse))
return V;
- return 0;
+ return nullptr;
}
}
@@ -2992,7 +2992,7 @@ static Value *SimplifyIntrinsic(Intrinsic::ID IID, IterTy ArgBegin, IterTy ArgEn
const Query &Q, unsigned MaxRecurse) {
// Perform idempotent optimizations
if (!IsIdempotent(IID))
- return 0;
+ return nullptr;
// Unary Ops
if (std::distance(ArgBegin, ArgEnd) == 1)
@@ -3000,7 +3000,7 @@ static Value *SimplifyIntrinsic(Intrinsic::ID IID, IterTy ArgBegin, IterTy ArgEn
if (II->getIntrinsicID() == IID)
return II;
- return 0;
+ return nullptr;
}
template <typename IterTy>
@@ -3017,7 +3017,7 @@ static Value *SimplifyCall(Value *V, IterTy ArgBegin, IterTy ArgEnd,
Function *F = dyn_cast<Function>(V);
if (!F)
- return 0;
+ return nullptr;
if (unsigned IID = F->getIntrinsicID())
if (Value *Ret =
@@ -3025,14 +3025,14 @@ static Value *SimplifyCall(Value *V, IterTy ArgBegin, IterTy ArgEnd,
return Ret;
if (!canConstantFoldCallTo(F))
- return 0;
+ return nullptr;
SmallVector<Constant *, 4> ConstantArgs;
ConstantArgs.reserve(ArgEnd - ArgBegin);
for (IterTy I = ArgBegin, E = ArgEnd; I != E; ++I) {
Constant *C = dyn_cast<Constant>(*I);
if (!C)
- return 0;
+ return nullptr;
ConstantArgs.push_back(C);
}
@@ -3247,7 +3247,7 @@ bool llvm::recursivelySimplifyInstruction(Instruction *I,
const DataLayout *DL,
const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
- return replaceAndRecursivelySimplifyImpl(I, 0, DL, TLI, DT);
+ return replaceAndRecursivelySimplifyImpl(I, nullptr, DL, TLI, DT);
}
bool llvm::replaceAndRecursivelySimplify(Instruction *I, Value *SimpleV,
diff --git a/lib/Analysis/IntervalPartition.cpp b/lib/Analysis/IntervalPartition.cpp
index 2e259b147b..a0583e86d1 100644
--- a/lib/Analysis/IntervalPartition.cpp
+++ b/lib/Analysis/IntervalPartition.cpp
@@ -29,7 +29,7 @@ void IntervalPartition::releaseMemory() {
delete Intervals[i];
IntervalMap.clear();
Intervals.clear();
- RootInterval = 0;
+ RootInterval = nullptr;
}
void IntervalPartition::print(raw_ostream &O, const Module*) const {
diff --git a/lib/Analysis/LazyValueInfo.cpp b/lib/Analysis/LazyValueInfo.cpp
index 3d6c58396a..0c38dc6fe0 100644
--- a/lib/Analysis/LazyValueInfo.cpp
+++ b/lib/Analysis/LazyValueInfo.cpp
@@ -82,7 +82,7 @@ class LVILatticeVal {
ConstantRange Range;
public:
- LVILatticeVal() : Tag(undefined), Val(0), Range(1, true) {}
+ LVILatticeVal() : Tag(undefined), Val(nullptr), Range(1, true) {}
static LVILatticeVal get(Constant *C) {
LVILatticeVal Res;
@@ -516,7 +516,7 @@ bool LazyValueInfoCache::solveBlockValue(Value *Val, BasicBlock *BB) {
BBLV.markOverdefined();
Instruction *BBI = dyn_cast<Instruction>(Val);
- if (BBI == 0 || BBI->getParent() != BB) {
+ if (!BBI || BBI->getParent() != BB) {
return ODCacheUpdater.markResult(solveBlockValueNonLocal(BBLV, Val, BB));
}
@@ -595,7 +595,7 @@ bool LazyValueInfoCache::solveBlockValueNonLocal(LVILatticeVal &BBLV,
Value *UnderlyingVal = GetUnderlyingObject(Val);
// If 'GetUnderlyingObject' didn't converge, skip it. It won't converge
// inside InstructionDereferencesPointer either.
- if (UnderlyingVal == GetUnderlyingObject(UnderlyingVal, NULL, 1)) {
+ if (UnderlyingVal == GetUnderlyingObject(UnderlyingVal, nullptr, 1)) {
for (BasicBlock::iterator BI = BB->begin(), BE = BB->end();
BI != BE; ++BI) {
if (InstructionDereferencesPointer(BI, UnderlyingVal)) {
@@ -813,7 +813,7 @@ static bool getEdgeValueLocal(Value *Val, BasicBlock *BBFrom,
// Recognize the range checking idiom that InstCombine produces.
// (X-C1) u< C2 --> [C1, C1+C2)
- ConstantInt *NegOffset = 0;
+ ConstantInt *NegOffset = nullptr;
if (ICI->getPredicate() == ICmpInst::ICMP_ULT)
match(ICI->getOperand(0), m_Add(m_Specific(Val),
m_ConstantInt(NegOffset)));
@@ -1014,7 +1014,7 @@ bool LazyValueInfo::runOnFunction(Function &F) {
getCache(PImpl).clear();
DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- DL = DLP ? &DLP->getDataLayout() : 0;
+ DL = DLP ? &DLP->getDataLayout() : nullptr;
TLI = &getAnalysis<TargetLibraryInfo>();
// Fully lazy.
@@ -1030,7 +1030,7 @@ void LazyValueInfo::releaseMemory() {
// If the cache was allocated, free it.
if (PImpl) {
delete &getCache(PImpl);
- PImpl = 0;
+ PImpl = nullptr;
}
}
@@ -1044,7 +1044,7 @@ Constant *LazyValueInfo::getConstant(Value *V, BasicBlock *BB) {
if (const APInt *SingleVal = CR.getSingleElement())
return ConstantInt::get(V->getContext(), *SingleVal);
}
- return 0;
+ return nullptr;
}
/// getConstantOnEdge - Determine whether the specified value is known to be a
@@ -1060,7 +1060,7 @@ Constant *LazyValueInfo::getConstantOnEdge(Value *V, BasicBlock *FromBB,
if (const APInt *SingleVal = CR.getSingleElement())
return ConstantInt::get(V->getContext(), *SingleVal);
}
- return 0;
+ return nullptr;
}
/// getPredicateOnEdge - Determine whether the specified value comparison
@@ -1072,7 +1072,7 @@ LazyValueInfo::getPredicateOnEdge(unsigned Pred, Value *V, Constant *C,
LVILatticeVal Result = getCache(PImpl).getValueOnEdge(V, FromBB, ToBB);
// If we know the value is a constant, evaluate the conditional.
- Constant *Res = 0;
+ Constant *Res = nullptr;
if (Result.isConstant()) {
Res = ConstantFoldCompareInstOperands(Pred, Result.getConstant(), C, DL,
TLI);
diff --git a/lib/Analysis/LibCallAliasAnalysis.cpp b/lib/Analysis/LibCallAliasAnalysis.cpp
index fefa51660f..016f8c5cc7 100644
--- a/lib/Analysis/LibCallAliasAnalysis.cpp
+++ b/lib/Analysis/LibCallAliasAnalysis.cpp
@@ -54,7 +54,7 @@ LibCallAliasAnalysis::AnalyzeLibCallDetails(const LibCallFunctionInfo *FI,
// if we have detailed info and if 'P' is any of the locations we know
// about.
const LibCallFunctionInfo::LocationMRInfo *Details = FI->LocationDetails;
- if (Details == 0)
+ if (Details == nullptr)
return MRInfo;
// If the details array is of the 'DoesNot' kind, we only know something if
diff --git a/lib/Analysis/LibCallSemantics.cpp b/lib/Analysis/LibCallSemantics.cpp
index 0592ccb26c..7d4e254a11 100644
--- a/lib/Analysis/LibCallSemantics.cpp
+++ b/lib/Analysis/LibCallSemantics.cpp
@@ -46,11 +46,11 @@ LibCallInfo::getFunctionInfo(const Function *F) const {
/// If this is the first time we are querying for this info, lazily construct
/// the StringMap to index it.
- if (Map == 0) {
+ if (!Map) {
Impl = Map = new StringMap<const LibCallFunctionInfo*>();
const LibCallFunctionInfo *Array = getFunctionInfoArray();
- if (Array == 0) return 0;
+ if (!Array) return nullptr;
// We now have the array of entries. Populate the StringMap.
for (unsigned i = 0; Array[i].Name; ++i)
diff --git a/lib/Analysis/Lint.cpp b/lib/Analysis/Lint.cpp
index b2182b146d..5365fe5a3e 100644
--- a/lib/Analysis/Lint.cpp
+++ b/lib/Analysis/Lint.cpp
@@ -137,8 +137,8 @@ namespace {
// that failed. This provides a nice place to put a breakpoint if you want
// to see why something is not correct.
void CheckFailed(const Twine &Message,
- const Value *V1 = 0, const Value *V2 = 0,
- const Value *V3 = 0, const Value *V4 = 0) {
+ const Value *V1 = nullptr, const Value *V2 = nullptr,
+ const Value *V3 = nullptr, const Value *V4 = nullptr) {
MessagesStr << Message.str() << "\n";
WriteValue(V1);
WriteValue(V2);
@@ -177,7 +177,7 @@ bool Lint::runOnFunction(Function &F) {
AA = &getAnalysis<AliasAnalysis>();
DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- DL = DLP ? &DLP->getDataLayout() : 0;
+ DL = DLP ? &DLP->getDataLayout() : nullptr;
TLI = &getAnalysis<TargetLibraryInfo>();
visit(F);
dbgs() << MessagesStr.str();
@@ -199,7 +199,7 @@ void Lint::visitCallSite(CallSite CS) {
Value *Callee = CS.getCalledValue();
visitMemoryReference(I, Callee, AliasAnalysis::UnknownSize,
- 0, 0, MemRef::Callee);
+ 0, nullptr, MemRef::Callee);
if (Function *F = dyn_cast<Function>(findValue(Callee, /*OffsetOk=*/false))) {
Assert1(CS.getCallingConv() == F->getCallingConv(),
@@ -275,10 +275,10 @@ void Lint::visitCallSite(CallSite CS) {
MemCpyInst *MCI = cast<MemCpyInst>(&I);
// TODO: If the size is known, use it.
visitMemoryReference(I, MCI->getDest(), AliasAnalysis::UnknownSize,
- MCI->getAlignment(), 0,
+ MCI->getAlignment(), nullptr,
MemRef::Write);
visitMemoryReference(I, MCI->getSource(), AliasAnalysis::UnknownSize,
- MCI->getAlignment(), 0,
+ MCI->getAlignment(), nullptr,
MemRef::Read);
// Check that the memcpy arguments don't overlap. The AliasAnalysis API
@@ -299,10 +299,10 @@ void Lint::visitCallSite(CallSite CS) {
MemMoveInst *MMI = cast<MemMoveInst>(&I);
// TODO: If the size is known, use it.
visitMemoryReference(I, MMI->getDest(), AliasAnalysis::UnknownSize,
- MMI->getAlignment(), 0,
+ MMI->getAlignment(), nullptr,
MemRef::Write);
visitMemoryReference(I, MMI->getSource(), AliasAnalysis::UnknownSize,
- MMI->getAlignment(), 0,
+ MMI->getAlignment(), nullptr,
MemRef::Read);
break;
}
@@ -310,7 +310,7 @@ void Lint::visitCallSite(CallSite CS) {
MemSetInst *MSI = cast<MemSetInst>(&I);
// TODO: If the size is known, use it.
visitMemoryReference(I, MSI->getDest(), AliasAnalysis::UnknownSize,
- MSI->getAlignment(), 0,
+ MSI->getAlignment(), nullptr,
MemRef::Write);
break;
}
@@ -321,17 +321,17 @@ void Lint::visitCallSite(CallSite CS) {
&I);
visitMemoryReference(I, CS.getArgument(0), AliasAnalysis::UnknownSize,
- 0, 0, MemRef::Read | MemRef::Write);
+ 0, nullptr, MemRef::Read | MemRef::Write);
break;
case Intrinsic::vacopy:
visitMemoryReference(I, CS.getArgument(0), AliasAnalysis::UnknownSize,
- 0, 0, MemRef::Write);
+ 0, nullptr, MemRef::Write);
visitMemoryReference(I, CS.getArgument(1), AliasAnalysis::UnknownSize,
- 0, 0, MemRef::Read);
+ 0, nullptr, MemRef::Read);
break;
case Intrinsic::vaend:
visitMemoryReference(I, CS.getArgument(0), AliasAnalysis::UnknownSize,
- 0, 0, MemRef::Read | MemRef::Write);
+ 0, nullptr, MemRef::Read | MemRef::Write);
break;
case Intrinsic::stackrestore:
@@ -339,7 +339,7 @@ void Lint::visitCallSite(CallSite CS) {
// stack pointer, which the compiler may read from or write to
// at any time, so check it for both readability and writeability.
visitMemoryReference(I, CS.getArgument(0), AliasAnalysis::UnknownSize,
- 0, 0, MemRef::Read | MemRef::Write);
+ 0, nullptr, MemRef::Read | MemRef::Write);
break;
}
}
@@ -572,13 +572,13 @@ void Lint::visitAllocaInst(AllocaInst &I) {
}
void Lint::visitVAArgInst(VAArgInst &I) {
- visitMemoryReference(I, I.getOperand(0), AliasAnalysis::UnknownSize, 0, 0,
- MemRef::Read | MemRef::Write);
+ visitMemoryReference(I, I.getOperand(0), AliasAnalysis::UnknownSize, 0,
+ nullptr, MemRef::Read | MemRef::Write);
}
void Lint::visitIndirectBrInst(IndirectBrInst &I) {
- visitMemoryReference(I, I.getAddress(), AliasAnalysis::UnknownSize, 0, 0,
- MemRef::Branchee);
+ visitMemoryReference(I, I.getAddress(), AliasAnalysis::UnknownSize, 0,
+ nullptr, MemRef::Branchee);
Assert1(I.getNumDestinations() != 0,
"Undefined behavior: indirectbr with no destinations", &I);
diff --git a/lib/Analysis/Loads.cpp b/lib/Analysis/Loads.cpp
index 0902a39a9f..005d309894 100644
--- a/lib/Analysis/Loads.cpp
+++ b/lib/Analysis/Loads.cpp
@@ -62,7 +62,7 @@ bool llvm::isSafeToLoadUnconditionally(Value *V, Instruction *ScanFrom,
if (ByteOffset < 0) // out of bounds
return false;
- Type *BaseType = 0;
+ Type *BaseType = nullptr;
unsigned BaseAlign = 0;
if (const AllocaInst *AI = dyn_cast<AllocaInst>(Base)) {
// An alloca is safe to load from as load as it is suitably aligned.
@@ -161,7 +161,7 @@ Value *llvm::FindAvailableLoadedValue(Value *Ptr, BasicBlock *ScanBB,
ScanFrom++;
// Don't scan huge blocks.
- if (MaxInstsToScan-- == 0) return 0;
+ if (MaxInstsToScan-- == 0) return nullptr;
--ScanFrom;
// If this is a load of Ptr, the loaded value is available.
@@ -198,7 +198,7 @@ Value *llvm::FindAvailableLoadedValue(Value *Ptr, BasicBlock *ScanBB,
// Otherwise the store that may or may not alias the pointer, bail out.
++ScanFrom;
- return 0;
+ return nullptr;
}
// If this is some other instruction that may clobber Ptr, bail out.
@@ -211,11 +211,11 @@ Value *llvm::FindAvailableLoadedValue(Value *Ptr, BasicBlock *ScanBB,
// May modify the pointer, bail out.
++ScanFrom;
- return 0;
+ return nullptr;
}
}
// Got to the start of the block, we didn't find it, but are done for this
// block.
- return 0;
+ return nullptr;
}
diff --git a/lib/Analysis/LoopInfo.cpp b/lib/Analysis/LoopInfo.cpp
index b38672ec39..46c0eaabe1 100644
--- a/lib/Analysis/LoopInfo.cpp
+++ b/lib/Analysis/LoopInfo.cpp
@@ -141,21 +141,21 @@ bool Loop::makeLoopInvariant(Instruction *I, bool &Changed,
PHINode *Loop::getCanonicalInductionVariable() const {
BasicBlock *H = getHeader();
- BasicBlock *Incoming = 0, *Backedge = 0;
+ BasicBlock *Incoming = nullptr, *Backedge = nullptr;
pred_iterator PI = pred_begin(H);
assert(PI != pred_end(H) &&
"Loop must have at least one backedge!");
Backedge = *PI++;
- if (PI == pred_end(H)) return 0; // dead loop
+ if (PI == pred_end(H)) return nullptr; // dead loop
Incoming = *PI++;
- if (PI != pred_end(H)) return 0; // multiple backedges?
+ if (PI != pred_end(H)) return nullptr; // multiple backedges?
if (contains(Incoming)) {
if (contains(Backedge))
- return 0;
+ return nullptr;
std::swap(Incoming, Backedge);
} else if (!contains(Backedge))
- return 0;
+ return nullptr;
// Loop over all of the PHI nodes, looking for a canonical indvar.
for (BasicBlock::iterator I = H->begin(); isa<PHINode>(I); ++I) {
@@ -171,7 +171,7 @@ PHINode *Loop::getCanonicalInductionVariable() const {
if (CI->equalsInt(1))
return PN;
}
- return 0;
+ return nullptr;
}
/// isLCSSAForm - Return true if the Loop is in LCSSA form
@@ -232,7 +232,7 @@ bool Loop::isSafeToClone() const {
}
MDNode *Loop::getLoopID() const {
- MDNode *LoopID = 0;
+ MDNode *LoopID = nullptr;
if (isLoopSimplifyForm()) {
LoopID = getLoopLatch()->getTerminator()->getMetadata(LoopMDName);
} else {
@@ -241,7 +241,7 @@ MDNode *Loop::getLoopID() const {
BasicBlock *H = getHeader();
for (block_iterator I = block_begin(), IE = block_end(); I != IE; ++I) {
TerminatorInst *TI = (*I)->getTerminator();
- MDNode *MD = 0;
+ MDNode *MD = nullptr;
// Check if this terminator branches to the loop header.
for (unsigned i = 0, ie = TI->getNumSuccessors(); i != ie; ++i) {
@@ -251,17 +251,17 @@ MDNode *Loop::getLoopID() const {
}
}
if (!MD)
- return 0;
+ return nullptr;
if (!LoopID)
LoopID = MD;
else if (MD != LoopID)
- return 0;
+ return nullptr;
}
}
if (!LoopID || LoopID->getNumOperands() == 0 ||
LoopID->getOperand(0) != LoopID)
- return 0;
+ return nullptr;
return LoopID;
}
@@ -402,7 +402,7 @@ BasicBlock *Loop::getUniqueExitBlock() const {
getUniqueExitBlocks(UniqueExitBlocks);
if (UniqueExitBlocks.size() == 1)
return UniqueExitBlocks[0];
- return 0;
+ return nullptr;
}
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
@@ -548,7 +548,7 @@ Loop *UnloopUpdater::getNearestLoop(BasicBlock *BB, Loop *BBLoop) {
// is considered uninitialized.
Loop *NearLoop = BBLoop;
- Loop *Subloop = 0;
+ Loop *Subloop = nullptr;
if (NearLoop != Unloop && Unloop->contains(NearLoop)) {
Subloop = NearLoop;
// Find the subloop ancestor that is directly contained within Unloop.
@@ -564,7 +564,7 @@ Loop *UnloopUpdater::getNearestLoop(BasicBlock *BB, Loop *BBLoop) {
succ_iterator I = succ_begin(BB), E = succ_end(BB);
if (I == E) {
assert(!Subloop && "subloop blocks must have a successor");
- NearLoop = 0; // unloop blocks may now exit the function.
+ NearLoop = nullptr; // unloop blocks may now exit the function.
}
for (; I != E; ++I) {
if (*I == BB)
@@ -637,7 +637,7 @@ void LoopInfo::updateUnloop(Loop *Unloop) {
// Blocks no longer have a parent but are still referenced by Unloop until
// the Unloop object is deleted.
- LI.changeLoopFor(*I, 0);
+ LI.changeLoopFor(*I, nullptr);
}
// Remove the loop from the top-level LoopInfo object.
diff --git a/lib/Analysis/LoopPass.cpp b/lib/Analysis/LoopPass.cpp
index 38e753f129..addff3e64c 100644
--- a/lib/Analysis/LoopPass.cpp
+++ b/lib/Analysis/LoopPass.cpp
@@ -61,8 +61,8 @@ LPPassManager::LPPassManager()
: FunctionPass(ID), PMDataManager() {
skipThisLoop = false;
redoThisLoop = false;
- LI = NULL;
- CurrentLoop = NULL;
+ LI = nullptr;
+ CurrentLoop = nullptr;
}
/// Delete loop from the loop queue and loop hierarchy (LoopInfo).
diff --git a/lib/Analysis/MemDepPrinter.cpp b/lib/Analysis/MemDepPrinter.cpp
index bc1dc69137..10da3d5d61 100644
--- a/lib/Analysis/MemDepPrinter.cpp
+++ b/lib/Analysis/MemDepPrinter.cpp
@@ -46,7 +46,7 @@ namespace {
bool runOnFunction(Function &F) override;
- void print(raw_ostream &OS, const Module * = 0) const override;
+ void print(raw_ostream &OS, const Module * = nullptr) const override;
void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.addRequiredTransitive<AliasAnalysis>();
@@ -56,7 +56,7 @@ namespace {
void releaseMemory() override {
Deps.clear();
- F = 0;
+ F = nullptr;
}
private:
@@ -106,7 +106,7 @@ bool MemDepPrinter::runOnFunction(Function &F) {
MemDepResult Res = MDA.getDependency(Inst);
if (!Res.isNonLocal()) {
Deps[Inst].insert(std::make_pair(getInstTypePair(Res),
- static_cast<BasicBlock *>(0)));
+ static_cast<BasicBlock *>(nullptr)));
} else if (CallSite CS = cast<Value>(Inst)) {
const MemoryDependenceAnalysis::NonLocalDepInfo &NLDI =
MDA.getNonLocalCallDependency(CS);
@@ -122,8 +122,8 @@ bool MemDepPrinter::runOnFunction(Function &F) {
if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
if (!LI->isUnordered()) {
// FIXME: Handle atomic/volatile loads.
- Deps[Inst].insert(std::make_pair(getInstTypePair(0, Unknown),
- static_cast<BasicBlock *>(0)));
+ Deps[Inst].insert(std::make_pair(getInstTypePair(nullptr, Unknown),
+ static_cast<BasicBlock *>(nullptr)));
continue;
}
AliasAnalysis::Location Loc = AA.getLocation(LI);
@@ -131,8 +131,8 @@ bool MemDepPrinter::runOnFunction(Function &F) {
} else if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
if (!SI->isUnordered()) {
// FIXME: Handle atomic/volatile stores.
- Deps[Inst].insert(std::make_pair(getInstTypePair(0, Unknown),
- static_cast<BasicBlock *>(0)));
+ Deps[Inst].insert(std::make_pair(getInstTypePair(nullptr, Unknown),
+ static_cast<BasicBlock *>(nullptr)));
continue;
}
AliasAnalysis::Location Loc = AA.getLocation(SI);
diff --git a/lib/Analysis/MemoryBuiltins.cpp b/lib/Analysis/MemoryBuiltins.cpp
index 1dba32356a..6997dfc948 100644
--- a/lib/Analysis/MemoryBuiltins.cpp
+++ b/lib/Analysis/MemoryBuiltins.cpp
@@ -76,14 +76,14 @@ static Function *getCalledFunction(const Value *V, bool LookThroughBitCast) {
CallSite CS(const_cast<Value*>(V));
if (!CS.getInstruction())
- return 0;
+ return nullptr;
if (CS.isNoBuiltin())
- return 0;
+ return nullptr;
Function *Callee = CS.getCalledFunction();
if (!Callee || !Callee->isDeclaration())
- return 0;
+ return nullptr;
return Callee;
}
@@ -94,17 +94,17 @@ static const AllocFnsTy *getAllocationData(const Value *V, AllocType AllocTy,
bool LookThroughBitCast = false) {
// Skip intrinsics
if (isa<IntrinsicInst>(V))
- return 0;
+ return nullptr;
Function *Callee = getCalledFunction(V, LookThroughBitCast);
if (!Callee)
- return 0;
+ return nullptr;
// Make sure that the function is available.
StringRef FnName = Callee->getName();
LibFunc::Func TLIFn;
if (!TLI || !TLI->getLibFunc(FnName, TLIFn) || !TLI->has(TLIFn))
- return 0;
+ return nullptr;
unsigned i = 0;
bool found = false;
@@ -115,11 +115,11 @@ static const AllocFnsTy *getAllocationData(const Value *V, AllocType AllocTy,
}
}
if (!found)
- return 0;
+ return nullptr;
const AllocFnsTy *FnData = &AllocationFnData[i];
if ((FnData->AllocTy & AllocTy) != FnData->AllocTy)
- return 0;
+ return nullptr;
// Check function prototype.
int FstParam = FnData->FstParam;
@@ -135,7 +135,7 @@ static const AllocFnsTy *getAllocationData(const Value *V, AllocType AllocTy,
FTy->getParamType(SndParam)->isIntegerTy(32) ||
FTy->getParamType(SndParam)->isIntegerTy(64)))
return FnData;
- return 0;
+ return nullptr;
}
static bool hasNoAliasAttr(const Value *V, bool LookThroughBitCast) {
@@ -202,19 +202,19 @@ bool llvm::isOperatorNewLikeFn(const Value *V, const TargetLibraryInfo *TLI,
/// ignore InvokeInst here.
const CallInst *llvm::extractMallocCall(const Value *I,
const TargetLibraryInfo *TLI) {
- return isMallocLikeFn(I, TLI) ? dyn_cast<CallInst>(I) : 0;
+ return isMallocLikeFn(I, TLI) ? dyn_cast<CallInst>(I) : nullptr;
}
static Value *computeArraySize(const CallInst *CI, const DataLayout *DL,
const TargetLibraryInfo *TLI,
bool LookThroughSExt = false) {
if (!CI)
- return 0;
+ return nullptr;
// The size of the malloc's result type must be known to determine array size.
Type *T = getMallocAllocatedType(CI, TLI);
if (!T || !T->isSized() || !DL)
- return 0;
+ return nullptr;
unsigned ElementSize = DL->getTypeAllocSize(T);
if (StructType *ST = dyn_cast<StructType>(T))
@@ -223,12 +223,12 @@ static Value *computeArraySize(const CallInst *CI, const DataLayout *DL,
// If malloc call's arg can be determined to be a multiple of ElementSize,
// return the multiple. Otherwise, return NULL.
Value *MallocArg = CI->getArgOperand(0);
- Value *Multiple = 0;
+ Value *Multiple = nullptr;
if (ComputeMultiple(MallocArg, ElementSize, Multiple,
LookThroughSExt))
return Multiple;
- return 0;
+ return nullptr;
}
/// isArrayMalloc - Returns the corresponding CallInst if the instruction
@@ -245,7 +245,7 @@ const CallInst *llvm::isArrayMalloc(const Value *I,
return CI;
// CI is a non-array malloc or we can't figure out that it is an array malloc.
- return 0;
+ return nullptr;
}
/// getMallocType - Returns the PointerType resulting from the malloc call.
@@ -257,7 +257,7 @@ PointerType *llvm::getMallocType(const CallInst *CI,
const TargetLibraryInfo *TLI) {
assert(isMallocLikeFn(CI, TLI) && "getMallocType and not malloc call");
- PointerType *MallocType = 0;
+ PointerType *MallocType = nullptr;
unsigned NumOfBitCastUses = 0;
// Determine if CallInst has a bitcast use.
@@ -277,7 +277,7 @@ PointerType *llvm::getMallocType(const CallInst *CI,
return cast<PointerType>(CI->getType());
// Type could not be determined.
- return 0;
+ return nullptr;
}
/// getMallocAllocatedType - Returns the Type allocated by malloc call.
@@ -288,7 +288,7 @@ PointerType *llvm::getMallocType(const CallInst *CI,
Type *llvm::getMallocAllocatedType(const CallInst *CI,
const TargetLibraryInfo *TLI) {
PointerType *PT = getMallocType(CI, TLI);
- return PT ? PT->getElementType() : 0;
+ return PT ? PT->getElementType() : nullptr;
}
/// getMallocArraySize - Returns the array size of a malloc call. If the
@@ -308,7 +308,7 @@ Value *llvm::getMallocArraySize(CallInst *CI, const DataLayout *DL,
/// is a calloc call.
const CallInst *llvm::extractCallocCall(const Value *I,
const TargetLibraryInfo *TLI) {
- return isCallocLikeFn(I, TLI) ? cast<CallInst>(I) : 0;
+ return isCallocLikeFn(I, TLI) ? cast<CallInst>(I) : nullptr;
}
@@ -316,15 +316,15 @@ const CallInst *llvm::extractCallocCall(const Value *I,
const CallInst *llvm::isFreeCall(const Value *I, const TargetLibraryInfo *TLI) {
const CallInst *CI = dyn_cast<CallInst>(I);
if (!CI || isa<IntrinsicInst>(CI))
- return 0;
+ return nullptr;
Function *Callee = CI->getCalledFunction();
- if (Callee == 0 || !Callee->isDeclaration())
- return 0;
+ if (Callee == nullptr || !Callee->isDeclaration())
+ return nullptr;
StringRef FnName = Callee->getName();
LibFunc::Func TLIFn;
if (!TLI || !TLI->getLibFunc(FnName, TLIFn) || !TLI->has(TLIFn))
- return 0;
+ return nullptr;
unsigned ExpectedNumParams;
if (TLIFn == LibFunc::free ||
@@ -335,18 +335,18 @@ const CallInst *llvm::isFreeCall(const Value *I, const TargetLibraryInfo *TLI) {
TLIFn == LibFunc::ZdaPvRKSt9nothrow_t) // delete[](void*, nothrow)
ExpectedNumParams = 2;
else
- return 0;
+ return nullptr;
// Check free prototype.
// FIXME: workaround for PR5130, this will be obsolete when a nobuiltin
// attribute will exist.
FunctionType *FTy = Callee->getFunctionType();
if (!FTy->getReturnType()->isVoidTy())
- return 0;
+ return nullptr;
if (FTy->getNumParams() != ExpectedNumParams)
- return 0;
+ return nullptr;
if (FTy->getParamType(0) != Type::getInt8PtrTy(Callee->getContext()))
- return 0;
+ return nullptr;
return CI;
}
diff --git a/lib/Analysis/MemoryDependenceAnalysis.cpp b/lib/Analysis/MemoryDependenceAnalysis.cpp
index 015ded18d9..0b33d48725 100644
--- a/lib/Analysis/MemoryDependenceAnalysis.cpp
+++ b/lib/Analysis/MemoryDependenceAnalysis.cpp
@@ -88,10 +88,10 @@ void MemoryDependenceAnalysis::getAnalysisUsage(AnalysisUsage &AU) const {
bool MemoryDependenceAnalysis::runOnFunction(Function &) {
AA = &getAnalysis<AliasAnalysis>();
DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- DL = DLP ? &DLP->getDataLayout() : 0;
+ DL = DLP ? &DLP->getDataLayout() : nullptr;
DominatorTreeWrapperPass *DTWP =
getAnalysisIfAvailable<DominatorTreeWrapperPass>();
- DT = DTWP ? &DTWP->getDomTree() : 0;
+ DT = DTWP ? &DTWP->getDomTree() : nullptr;
if (!PredCache)
PredCache.reset(new PredIteratorCache());
return false;
@@ -261,10 +261,10 @@ isLoadLoadClobberIfExtendedToFullWidth(const AliasAnalysis::Location &MemLoc,
const LoadInst *LI,
const DataLayout *DL) {
// If we have no target data, we can't do this.
- if (DL == 0) return false;
+ if (!DL) return false;
// If we haven't already computed the base/offset of MemLoc, do so now.
- if (MemLocBase == 0)
+ if (!MemLocBase)
MemLocBase = GetPointerBaseWithConstantOffset(MemLoc.Ptr, MemLocOffs, DL);
unsigned Size = MemoryDependenceAnalysis::
@@ -362,13 +362,13 @@ getPointerDependencyFrom(const AliasAnalysis::Location &MemLoc, bool isLoad,
BasicBlock::iterator ScanIt, BasicBlock *BB,
Instruction *QueryInst) {
- const Value *MemLocBase = 0;
+ const Value *MemLocBase = nullptr;
int64_t MemLocOffset = 0;
unsigned Limit = BlockScanLimit;
bool isInvariantLoad = false;
if (isLoad && QueryInst) {
LoadInst *LI = dyn_cast<LoadInst>(QueryInst);
- if (LI && LI->getMetadata(LLVMContext::MD_invariant_load) != 0)
+ if (LI && LI->getMetadata(LLVMContext::MD_invariant_load) != nullptr)
isInvariantLoad = true;
}
@@ -696,7 +696,7 @@ MemoryDependenceAnalysis::getNonLocalCallDependency(CallSite QueryCS) {
if (Entry != Cache.begin() && std::prev(Entry)->getBB() == DirtyBB)
--Entry;
- NonLocalDepEntry *ExistingResult = 0;
+ NonLocalDepEntry *ExistingResult = nullptr;
if (Entry != Cache.begin()+NumSortedEntries &&
Entry->getBB() == DirtyBB) {
// If we already have an entry, and if it isn't already dirty, the block
@@ -807,7 +807,7 @@ GetNonLocalInfoForBlock(const AliasAnalysis::Location &Loc,
if (Entry != Cache->begin() && (Entry-1)->getBB() == BB)
--Entry;
- NonLocalDepEntry *ExistingResult = 0;
+ NonLocalDepEntry *ExistingResult = nullptr;
if (Entry != Cache->begin()+NumSortedEntries && Entry->getBB() == BB)
ExistingResult = &*Entry;
@@ -960,7 +960,7 @@ getNonLocalPointerDepFromBB(const PHITransAddr &Pointer,
if (CacheInfo->TBAATag != Loc.TBAATag) {
if (CacheInfo->TBAATag) {
CacheInfo->Pair = BBSkipFirstBlockPair();
- CacheInfo->TBAATag = 0;
+ CacheInfo->TBAATag = nullptr;
for (NonLocalDepInfo::iterator DI = CacheInfo->NonLocalDeps.begin(),
DE = CacheInfo->NonLocalDeps.end(); DI != DE; ++DI)
if (Instruction *Inst = DI->getResult().getInst())
@@ -1116,7 +1116,7 @@ getNonLocalPointerDepFromBB(const PHITransAddr &Pointer,
SortNonLocalDepInfoCache(*Cache, NumSortedEntries);
NumSortedEntries = Cache->size();
}
- Cache = 0;
+ Cache = nullptr;
PredList.clear();
for (BasicBlock **PI = PredCache->GetPreds(BB); *PI; ++PI) {
@@ -1126,7 +1126,7 @@ getNonLocalPointerDepFromBB(const PHITransAddr &Pointer,
// Get the PHI translated pointer in this predecessor. This can fail if
// not translatable, in which case the getAddr() returns null.
PHITransAddr &PredPointer = PredList.back().second;
- PredPointer.PHITranslateValue(BB, Pred, 0);
+ PredPointer.PHITranslateValue(BB, Pred, nullptr);
Value *PredPtrVal = PredPointer.getAddr();
@@ -1175,7 +1175,7 @@ getNonLocalPointerDepFromBB(const PHITransAddr &Pointer,
// predecessor, then we have to assume that the pointer is clobbered in
// that predecessor. We can still do PRE of the load, which would insert
// a computation of the pointer in this predecessor.
- if (PredPtrVal == 0)
+ if (!PredPtrVal)
CanTranslate = false;
// FIXME: it is entirely possible that PHI translating will end up with
@@ -1224,7 +1224,7 @@ getNonLocalPointerDepFromBB(const PHITransAddr &Pointer,
// for the given block. It assumes that we haven't modified any of
// our datastructures while processing the current block.
- if (Cache == 0) {
+ if (!Cache) {
// Refresh the CacheInfo/Cache pointer if it got invalidated.
CacheInfo = &NonLocalPointerDeps[CacheKey];
Cache = &CacheInfo->NonLocalDeps;
@@ -1279,7 +1279,7 @@ RemoveCachedNonLocalPointerDependencies(ValueIsLoadPair P) {
for (unsigned i = 0, e = PInfo.size(); i != e; ++i) {
Instruction *Target = PInfo[i].getResult().getInst();
- if (Target == 0) continue; // Ignore non-local dep results.
+ if (!Target) continue; // Ignore non-local dep results.
assert(Target->getParent() == PInfo[i].getBB());
// Eliminating the dirty entry from 'Cache', so update the reverse info.
diff --git a/lib/Analysis/NoAliasAnalysis.cpp b/lib/Analysis/NoAliasAnalysis.cpp
index 0c119d6459..4e11e50e28 100644
--- a/lib/Analysis/NoAliasAnalysis.cpp
+++ b/lib/Analysis/NoAliasAnalysis.cpp
@@ -36,7 +36,7 @@ namespace {
// Note: NoAA does not call InitializeAliasAnalysis because it's
// special and does not support chaining.
DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- DL = DLP ? &DLP->getDataLayout() : 0;
+ DL = DLP ? &DLP->getDataLayout() : nullptr;
}
AliasResult alias(const Location &LocA, const Location &LocB) override {
diff --git a/lib/Analysis/PHITransAddr.cpp b/lib/Analysis/PHITransAddr.cpp
index ad3685a445..bfe8642511 100644
--- a/lib/Analysis/PHITransAddr.cpp
+++ b/lib/Analysis/PHITransAddr.cpp
@@ -43,7 +43,7 @@ static bool CanPHITrans(Instruction *Inst) {
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
void PHITransAddr::dump() const {
- if (Addr == 0) {
+ if (!Addr) {
dbgs() << "PHITransAddr: null\n";
return;
}
@@ -58,7 +58,7 @@ static bool VerifySubExpr(Value *Expr,
SmallVectorImpl<Instruction*> &InstInputs) {
// If this is a non-instruction value, there is nothing to do.
Instruction *I = dyn_cast<Instruction>(Expr);
- if (I == 0) return true;
+ if (!I) return true;
// If it's an instruction, it is either in Tmp or its operands recursively
// are.
@@ -90,7 +90,7 @@ static bool VerifySubExpr(Value *Expr,
/// structure is valid, it returns true. If invalid, it prints errors and
/// returns false.
bool PHITransAddr::Verify() const {
- if (Addr == 0) return true;
+ if (!Addr) return true;
SmallVector<Instruction*, 8> Tmp(InstInputs.begin(), InstInputs.end());
@@ -116,14 +116,14 @@ bool PHITransAddr::IsPotentiallyPHITranslatable() const {
// If the input value is not an instruction, or if it is not defined in CurBB,
// then we don't need to phi translate it.
Instruction *Inst = dyn_cast<Instruction>(Addr);
- return Inst == 0 || CanPHITrans(Inst);
+ return !Inst || CanPHITrans(Inst);
}
static void RemoveInstInputs(Value *V,
SmallVectorImpl<Instruction*> &InstInputs) {
Instruction *I = dyn_cast<Instruction>(V);
- if (I == 0) return;
+ if (!I) return;
// If the instruction is in the InstInputs list, remove it.
SmallVectorImpl<Instruction*>::iterator Entry =
@@ -147,7 +147,7 @@ Value *PHITransAddr::PHITranslateSubExpr(Value *V, BasicBlock *CurBB,
const DominatorTree *DT) {
// If this is a non-instruction value, it can't require PHI translation.
Instruction *Inst = dyn_cast<Instruction>(V);
- if (Inst == 0) return V;
+ if (!Inst) return V;
// Determine whether 'Inst' is an input to our PHI translatable expression.
bool isInput = std::count(InstInputs.begin(), InstInputs.end(), Inst);
@@ -173,7 +173,7 @@ Value *PHITransAddr::PHITranslateSubExpr(Value *V, BasicBlock *CurBB,
// If this is a non-phi value, and it is analyzable, we can incorporate it
// into the expression by making all instruction operands be inputs.
if (!CanPHITrans(Inst))
- return 0;
+ return nullptr;
// All instruction operands are now inputs (and of course, they may also be
// defined in this block, so they may need to be phi translated themselves.
@@ -187,9 +187,9 @@ Value *PHITransAddr::PHITranslateSubExpr(Value *V, BasicBlock *CurBB,
// operands need to be phi translated, and if so, reconstruct it.
if (CastInst *Cast = dyn_cast<CastInst>(Inst)) {
- if (!isSafeToSpeculativelyExecute(Cast)) return 0;
+ if (!isSafeToSpeculativelyExecute(Cast)) return nullptr;
Value *PHIIn = PHITranslateSubExpr(Cast->getOperand(0), CurBB, PredBB, DT);
- if (PHIIn == 0) return 0;
+ if (!PHIIn) return nullptr;
if (PHIIn == Cast->getOperand(0))
return Cast;
@@ -209,7 +209,7 @@ Value *PHITransAddr::PHITranslateSubExpr(Value *V, BasicBlock *CurBB,
(!DT || DT->dominates(CastI->getParent(), PredBB)))
return CastI;
}
- return 0;
+ return nullptr;
}
// Handle getelementptr with at least one PHI translatable operand.
@@ -218,7 +218,7 @@ Value *PHITransAddr::PHITranslateSubExpr(Value *V, BasicBlock *CurBB,
bool AnyChanged = false;
for (unsigned i = 0, e = GEP->getNumOperands(); i != e; ++i) {
Value *GEPOp = PHITranslateSubExpr(GEP->getOperand(i), CurBB, PredBB, DT);
- if (GEPOp == 0) return 0;
+ if (!GEPOp) return nullptr;
AnyChanged |= GEPOp != GEP->getOperand(i);
GEPOps.push_back(GEPOp);
@@ -253,7 +253,7 @@ Value *PHITransAddr::PHITranslateSubExpr(Value *V, BasicBlock *CurBB,
return GEPI;
}
}
- return 0;
+ return nullptr;
}
// Handle add with a constant RHS.
@@ -265,7 +265,7 @@ Value *PHITransAddr::PHITranslateSubExpr(Value *V, BasicBlock *CurBB,
bool isNUW = cast<BinaryOperator>(Inst)->hasNoUnsignedWrap();
Value *LHS = PHITranslateSubExpr(Inst->getOperand(0), CurBB, PredBB, DT);
- if (LHS == 0) return 0;
+ if (!LHS) return nullptr;
// If the PHI translated LHS is an add of a constant, fold the immediates.
if (BinaryOperator *BOp = dyn_cast<BinaryOperator>(LHS))
@@ -304,11 +304,11 @@ Value *PHITransAddr::PHITranslateSubExpr(Value *V, BasicBlock *CurBB,
return BO;
}
- return 0;
+ return nullptr;
}
// Otherwise, we failed.
- return 0;
+ return nullptr;
}
@@ -326,10 +326,10 @@ bool PHITransAddr::PHITranslateValue(BasicBlock *CurBB, BasicBlock *PredBB,
// Make sure the value is live in the predecessor.
if (Instruction *Inst = dyn_cast_or_null<Instruction>(Addr))
if (!DT->dominates(Inst->getParent(), PredBB))
- Addr = 0;
+ Addr = nullptr;
}
- return Addr == 0;
+ return Addr == nullptr;
}
/// PHITranslateWithInsertion - PHI translate this value into the specified
@@ -354,7 +354,7 @@ PHITranslateWithInsertion(BasicBlock *CurBB, BasicBlock *PredBB,
// If not, destroy any intermediate instructions inserted.
while (NewInsts.size() != NISize)
NewInsts.pop_back_val()->eraseFromParent();
- return 0;
+ return nullptr;
}
@@ -379,10 +379,10 @@ InsertPHITranslatedSubExpr(Value *InVal, BasicBlock *CurBB,
// Handle cast of PHI translatable value.
if (CastInst *Cast = dyn_cast<CastInst>(Inst)) {
- if (!isSafeToSpeculativelyExecute(Cast)) return 0;
+ if (!isSafeToSpeculativelyExecute(Cast)) return nullptr;
Value *OpVal = InsertPHITranslatedSubExpr(Cast->getOperand(0),
CurBB, PredBB, DT, NewInsts);
- if (OpVal == 0) return 0;
+ if (!OpVal) return nullptr;
// Otherwise insert a cast at the end of PredBB.
CastInst *New = CastInst::Create(Cast->getOpcode(),
@@ -400,7 +400,7 @@ InsertPHITranslatedSubExpr(Value *InVal, BasicBlock *CurBB,
for (unsigned i = 0, e = GEP->getNumOperands(); i != e; ++i) {
Value *OpVal = InsertPHITranslatedSubExpr(GEP->getOperand(i),
CurBB, PredBB, DT, NewInsts);
- if (OpVal == 0) return 0;
+ if (!OpVal) return nullptr;
GEPOps.push_back(OpVal);
}
@@ -436,5 +436,5 @@ InsertPHITranslatedSubExpr(Value *InVal, BasicBlock *CurBB,
}
#endif
- return 0;
+ return nullptr;
}
diff --git a/lib/Analysis/RegionInfo.cpp b/lib/Analysis/RegionInfo.cpp
index f4da598d84..c9a07ab60d 100644
--- a/lib/Analysis/RegionInfo.cpp
+++ b/lib/Analysis/RegionInfo.cpp
@@ -128,8 +128,8 @@ bool Region::contains(const Loop *L) const {
// BBs that are not part of any loop are element of the Loop
// described by the NULL pointer. This loop is not part of any region,
// except if the region describes the whole function.
- if (L == 0)
- return getExit() == 0;
+ if (!L)
+ return getExit() == nullptr;
if (!contains(L->getHeader()))
return false;
@@ -147,7 +147,7 @@ bool Region::contains(const Loop *L) const {
Loop *Region::outermostLoopInRegion(Loop *L) const {
if (!contains(L))
- return 0;
+ return nullptr;
while (L && contains(L->getParentLoop())) {
L = L->getParentLoop();
@@ -165,14 +165,14 @@ Loop *Region::outermostLoopInRegion(LoopInfo *LI, BasicBlock* BB) const {
BasicBlock *Region::getEnteringBlock() const {
BasicBlock *entry = getEntry();
BasicBlock *Pred;
- BasicBlock *enteringBlock = 0;
+ BasicBlock *enteringBlock = nullptr;
for (pred_iterator PI = pred_begin(entry), PE = pred_end(entry); PI != PE;
++PI) {
Pred = *PI;
if (DT->getNode(Pred) && !contains(Pred)) {
if (enteringBlock)
- return 0;
+ return nullptr;
enteringBlock = Pred;
}
@@ -184,17 +184,17 @@ BasicBlock *Region::getEnteringBlock() const {
BasicBlock *Region::getExitingBlock() const {
BasicBlock *exit = getExit();
BasicBlock *Pred;
- BasicBlock *exitingBlock = 0;
+ BasicBlock *exitingBlock = nullptr;
if (!exit)
- return 0;
+ return nullptr;
for (pred_iterator PI = pred_begin(exit), PE = pred_end(exit); PI != PE;
++PI) {
Pred = *PI;
if (contains(Pred)) {
if (exitingBlock)
- return 0;
+ return nullptr;
exitingBlock = Pred;
}
@@ -295,7 +295,7 @@ Region* Region::getSubRegionNode(BasicBlock *BB) const {
Region *R = RI->getRegionFor(BB);
if (!R || R == this)
- return 0;
+ return nullptr;
// If we pass the BB out of this region, that means our code is broken.
assert(contains(R) && "BB not in current region!");
@@ -304,7 +304,7 @@ Region* Region::getSubRegionNode(BasicBlock *BB) const {
R = R->getParent();
if (R->getEntry() != BB)
- return 0;
+ return nullptr;
return R;
}
@@ -339,7 +339,7 @@ void Region::transferChildrenTo(Region *To) {
}
void Region::addSubRegion(Region *SubRegion, bool moveChildren) {
- assert(SubRegion->parent == 0 && "SubRegion already has a parent!");
+ assert(!SubRegion->parent && "SubRegion already has a parent!");
assert(std::find(begin(), end(), SubRegion) == children.end()
&& "Subregion already exists!");
@@ -375,7 +375,7 @@ void Region::addSubRegion(Region *SubRegion, bool moveChildren) {
Region *Region::removeSubRegion(Region *Child) {
assert(Child->parent == this && "Child is not a child of this region!");
- Child->parent = 0;
+ Child->parent = nullptr;
RegionSet::iterator I = std::find(children.begin(), children.end(), Child);
assert(I != children.end() && "Region does not exit. Unable to remove.");
children.erase(children.begin()+(I-begin()));
@@ -385,7 +385,7 @@ Region *Region::removeSubRegion(Region *Child) {
unsigned Region::getDepth() const {
unsigned Depth = 0;
- for (Region *R = parent; R != 0; R = R->parent)
+ for (Region *R = parent; R != nullptr; R = R->parent)
++Depth;
return Depth;
@@ -395,12 +395,12 @@ Region *Region::getExpandedRegion() const {
unsigned NumSuccessors = exit->getTerminator()->getNumSuccessors();
if (NumSuccessors == 0)
- return NULL;
+ return nullptr;
for (pred_iterator PI = pred_begin(getExit()), PE = pred_end(getExit());
PI != PE; ++PI)
if (!DT->dominates(getEntry(), *PI))
- return NULL;
+ return nullptr;
Region *R = RI->getRegionFor(exit);
@@ -408,7 +408,7 @@ Region *Region::getExpandedRegion() const {
if (exit->getTerminator()->getNumSuccessors() == 1)
return new Region(getEntry(), *succ_begin(exit), RI, DT);
else
- return NULL;
+ return nullptr;
}
while (R->getParent() && R->getParent()->getEntry() == exit)
@@ -418,7 +418,7 @@ Region *Region::getExpandedRegion() const {
for (pred_iterator PI = pred_begin(getExit()), PE = pred_end(getExit());
PI != PE; ++PI)
if (!DT->dominates(R->getExit(), *PI))
- return NULL;
+ return nullptr;
return new Region(getEntry(), R->getExit(), RI, DT);
}
@@ -577,7 +577,7 @@ Region *RegionInfo::createRegion(BasicBlock *entry, BasicBlock *exit) {
assert(entry && exit && "entry and exit must not be null!");
if (isTrivialRegion(entry, exit))
- return 0;
+ return nullptr;
Region *region = new Region(entry, exit, this, DT);
BBtoRegion.insert(std::make_pair(entry, region));
@@ -600,7 +600,7 @@ void RegionInfo::findRegionsWithEntry(BasicBlock *entry, BBtoBBMap *ShortCut) {
if (!N)
return;
- Region *lastRegion= 0;
+ Region *lastRegion= nullptr;
BasicBlock *lastExit = entry;
// As only a BasicBlock that postdominates entry can finish a region, walk the
@@ -680,12 +680,12 @@ void RegionInfo::releaseMemory() {
BBtoRegion.clear();
if (TopLevelRegion)
delete TopLevelRegion;
- TopLevelRegion = 0;
+ TopLevelRegion = nullptr;
}
RegionInfo::RegionInfo() : FunctionPass(ID) {
initializeRegionInfoPass(*PassRegistry::getPassRegistry());
- TopLevelRegion = 0;
+ TopLevelRegion = nullptr;
}
RegionInfo::~RegionInfo() {
@@ -710,7 +710,7 @@ bool RegionInfo::runOnFunction(Function &F) {
PDT = &getAnalysis<PostDominatorTree>();
DF = &getAnalysis<DominanceFrontier>();
- TopLevelRegion = new Region(&F.getEntryBlock(), 0, this, DT, 0);
+ TopLevelRegion = new Region(&F.getEntryBlock(), nullptr, this, DT, nullptr);
updateStatistics(TopLevelRegion);
Calculate(F);
@@ -744,7 +744,7 @@ void RegionInfo::verifyAnalysis() const {
Region *RegionInfo::getRegionFor(BasicBlock *BB) const {
BBtoRegionMap::const_iterator I=
BBtoRegion.find(BB);
- return I != BBtoRegion.end() ? I->second : 0;
+ return I != BBtoRegion.end() ? I->second : nullptr;
}
void RegionInfo::setRegionFor(BasicBlock *BB, Region *R) {
@@ -756,7 +756,7 @@ Region *RegionInfo::operator[](BasicBlock *BB) const {
}
BasicBlock *RegionInfo::getMaxRegionExit(BasicBlock *BB) const {
- BasicBlock *Exit = NULL;
+ BasicBlock *Exit = nullptr;
while (true) {
// Get largest region that starts at BB.
diff --git a/lib/Analysis/RegionPass.cpp b/lib/Analysis/RegionPass.cpp
index 12d7ca3ee8..b29070a4fb 100644
--- a/lib/Analysis/RegionPass.cpp
+++ b/lib/Analysis/RegionPass.cpp
@@ -31,8 +31,8 @@ RGPassManager::RGPassManager()
: FunctionPass(ID), PMDataManager() {
skipThisRegion = false;
redoThisRegion = false;
- RI = NULL;
- CurrentRegion = NULL;
+ RI = nullptr;
+ CurrentRegion = nullptr;
}
// Recurse through all subregions and all regions into RQ.
diff --git a/lib/Analysis/ScalarEvolution.cpp b/lib/Analysis/ScalarEvolution.cpp
index fdb15629f4..089ca42ef6 100644
--- a/lib/Analysis/ScalarEvolution.cpp
+++ b/lib/Analysis/ScalarEvolution.cpp
@@ -182,7 +182,7 @@ void SCEV::print(raw_ostream &OS) const {
case scUMaxExpr:
case scSMaxExpr: {
const SCEVNAryExpr *NAry = cast<SCEVNAryExpr>(this);
- const char *OpStr = 0;
+ const char *OpStr = nullptr;
switch (NAry->getSCEVType()) {
case scAddExpr: OpStr = " + "; break;
case scMulExpr: OpStr = " * "; break;
@@ -312,7 +312,7 @@ const SCEV *ScalarEvolution::getConstant(ConstantInt *V) {
FoldingSetNodeID ID;
ID.AddInteger(scConstant);
ID.AddPointer(V);
- void *IP = 0;
+ void *IP = nullptr;
if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
SCEV *S = new (SCEVAllocator) SCEVConstant(ID.Intern(SCEVAllocator), V);
UniqueSCEVs.InsertNode(S, IP);
@@ -365,7 +365,7 @@ void SCEVUnknown::deleted() {
SE->UniqueSCEVs.RemoveNode(this);
// Release the value.
- setValPtr(0);
+ setValPtr(nullptr);
}
void SCEVUnknown::allUsesReplacedWith(Value *New) {
@@ -829,7 +829,7 @@ const SCEV *ScalarEvolution::getTruncateExpr(const SCEV *Op,
ID.AddInteger(scTruncate);
ID.AddPointer(Op);
ID.AddPointer(Ty);
- void *IP = 0;
+ void *IP = nullptr;
if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
// Fold if the operand is constant.
@@ -919,7 +919,7 @@ const SCEV *ScalarEvolution::getZeroExtendExpr(const SCEV *Op,
ID.AddInteger(scZeroExtend);
ID.AddPointer(Op);
ID.AddPointer(Ty);
- void *IP = 0;
+ void *IP = nullptr;
if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
// zext(trunc(x)) --> zext(x) or x or trunc(x)
@@ -1072,7 +1072,7 @@ static const SCEV *getOverflowLimitForStep(const SCEV *Step,
return SE->getConstant(APInt::getSignedMaxValue(BitWidth) -
SE->getSignedRange(Step).getSignedMin());
}
- return 0;
+ return nullptr;
}
// The recurrence AR has been shown to have no signed wrap. Typically, if we can
@@ -1091,7 +1091,7 @@ static const SCEV *getPreStartForSignExtend(const SCEVAddRecExpr *AR,
// Check for a simple looking step prior to loop entry.
const SCEVAddExpr *SA = dyn_cast<SCEVAddExpr>(Start);
if (!SA)
- return 0;
+ return nullptr;
// Create an AddExpr for "PreStart" after subtracting Step. Full SCEV
// subtraction is expensive. For this purpose, perform a quick and dirty
@@ -1103,7 +1103,7 @@ static const SCEV *getPreStartForSignExtend(const SCEVAddRecExpr *AR,
DiffOps.push_back(*I);
}
if (DiffOps.size() == SA->getNumOperands())
- return 0;
+ return nullptr;
// This is a postinc AR. Check for overflow on the preinc recurrence using the
// same three conditions that getSignExtendedExpr checks.
@@ -1139,7 +1139,7 @@ static const SCEV *getPreStartForSignExtend(const SCEVAddRecExpr *AR,
SE->isLoopEntryGuardedByCond(L, Pred, PreStart, OverflowLimit)) {
return PreStart;
}
- return 0;
+ return nullptr;
}
// Get the normalized sign-extended expression for this AddRec's Start.
@@ -1181,7 +1181,7 @@ const SCEV *ScalarEvolution::getSignExtendExpr(const SCEV *Op,
ID.AddInteger(scSignExtend);
ID.AddPointer(Op);
ID.AddPointer(Ty);
- void *IP = 0;
+ void *IP = nullptr;
if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
// If the input value is provably positive, build a zext instead.
@@ -1811,7 +1811,7 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
ID.AddInteger(scAddExpr);
for (unsigned i = 0, e = Ops.size(); i != e; ++i)
ID.AddPointer(Ops[i]);
- void *IP = 0;
+ void *IP = nullptr;
SCEVAddExpr *S =
static_cast<SCEVAddExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP));
if (!S) {
@@ -2105,7 +2105,7 @@ const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
ID.AddInteger(scMulExpr);
for (unsigned i = 0, e = Ops.size(); i != e; ++i)
ID.AddPointer(Ops[i]);
- void *IP = 0;
+ void *IP = nullptr;
SCEVMulExpr *S =
static_cast<SCEVMulExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP));
if (!S) {
@@ -2230,7 +2230,7 @@ const SCEV *ScalarEvolution::getUDivExpr(const SCEV *LHS,
ID.AddInteger(scUDivExpr);
ID.AddPointer(LHS);
ID.AddPointer(RHS);
- void *IP = 0;
+ void *IP = nullptr;
if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
SCEV *S = new (SCEVAllocator) SCEVUDivExpr(ID.Intern(SCEVAllocator),
LHS, RHS);
@@ -2425,7 +2425,7 @@ ScalarEvolution::getAddRecExpr(SmallVectorImpl<const SCEV *> &Operands,
for (unsigned i = 0, e = Operands.size(); i != e; ++i)
ID.AddPointer(Operands[i]);
ID.AddPointer(L);
- void *IP = 0;
+ void *IP = nullptr;
SCEVAddRecExpr *S =
static_cast<SCEVAddRecExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP));
if (!S) {
@@ -2533,7 +2533,7 @@ ScalarEvolution::getSMaxExpr(SmallVectorImpl<const SCEV *> &Ops) {
ID.AddInteger(scSMaxExpr);
for (unsigned i = 0, e = Ops.size(); i != e; ++i)
ID.AddPointer(Ops[i]);
- void *IP = 0;
+ void *IP = nullptr;
if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
std::uninitialized_copy(Ops.begin(), Ops.end(), O);
@@ -2637,7 +2637,7 @@ ScalarEvolution::getUMaxExpr(SmallVectorImpl<const SCEV *> &Ops) {
ID.AddInteger(scUMaxExpr);
for (unsigned i = 0, e = Ops.size(); i != e; ++i)
ID.AddPointer(Ops[i]);
- void *IP = 0;
+ void *IP = nullptr;
if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
std::uninitialized_copy(Ops.begin(), Ops.end(), O);
@@ -2704,7 +2704,7 @@ const SCEV *ScalarEvolution::getUnknown(Value *V) {
FoldingSetNodeID ID;
ID.AddInteger(scUnknown);
ID.AddPointer(V);
- void *IP = 0;
+ void *IP = nullptr;
if (SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) {
assert(cast<SCEVUnknown>(S)->getValue() == V &&
"Stale SCEVUnknown in uniquing map!");
@@ -3010,7 +3010,7 @@ const SCEV *ScalarEvolution::getPointerBase(const SCEV *V) {
return getPointerBase(Cast->getOperand());
}
else if (const SCEVNAryExpr *NAry = dyn_cast<SCEVNAryExpr>(V)) {
- const SCEV *PtrOp = 0;
+ const SCEV *PtrOp = nullptr;
for (SCEVNAryExpr::op_iterator I = NAry->op_begin(), E = NAry->op_end();
I != E; ++I) {
if ((*I)->getType()->isPointerTy()) {
@@ -3090,20 +3090,20 @@ const SCEV *ScalarEvolution::createNodeForPHI(PHINode *PN) {
// The loop may have multiple entrances or multiple exits; we can analyze
// this phi as an addrec if it has a unique entry value and a unique
// backedge value.
- Value *BEValueV = 0, *StartValueV = 0;
+ Value *BEValueV = nullptr, *StartValueV = nullptr;
for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
Value *V = PN->getIncomingValue(i);
if (L->contains(PN->getIncomingBlock(i))) {
if (!BEValueV) {
BEValueV = V;
} else if (BEValueV != V) {
- BEValueV = 0;
+ BEValueV = nullptr;
break;
}
} else if (!StartValueV) {
StartValueV = V;
} else if (StartValueV != V) {
- StartValueV = 0;
+ StartValueV = nullptr;
break;
}
}
@@ -4316,9 +4316,9 @@ ScalarEvolution::BackedgeTakenInfo::getExact(ScalarEvolution *SE) const {
if (!ExitNotTaken.ExitingBlock) return SE->getCouldNotCompute();
assert(ExitNotTaken.ExactNotTaken && "uninitialized not-taken info");
- const SCEV *BECount = 0;
+ const SCEV *BECount = nullptr;
for (const ExitNotTakenInfo *ENT = &ExitNotTaken;
- ENT != 0; ENT = ENT->getNextExit()) {
+ ENT != nullptr; ENT = ENT->getNextExit()) {
assert(ENT->ExactNotTaken != SE->getCouldNotCompute() && "bad exit SCEV");
@@ -4336,7 +4336,7 @@ const SCEV *
ScalarEvolution::BackedgeTakenInfo::getExact(BasicBlock *ExitingBlock,
ScalarEvolution *SE) const {
for (const ExitNotTakenInfo *ENT = &ExitNotTaken;
- ENT != 0; ENT = ENT->getNextExit()) {
+ ENT != nullptr; ENT = ENT->getNextExit()) {
if (ENT->ExitingBlock == ExitingBlock)
return ENT->ExactNotTaken;
@@ -4359,7 +4359,7 @@ bool ScalarEvolution::BackedgeTakenInfo::hasOperand(const SCEV *S,
return false;
for (const ExitNotTakenInfo *ENT = &ExitNotTaken;
- ENT != 0; ENT = ENT->getNextExit()) {
+ ENT != nullptr; ENT = ENT->getNextExit()) {
if (ENT->ExactNotTaken != SE->getCouldNotCompute()
&& SE->hasOperand(ENT->ExactNotTaken, S)) {
@@ -4398,8 +4398,8 @@ ScalarEvolution::BackedgeTakenInfo::BackedgeTakenInfo(
/// clear - Invalidate this result and free the ExitNotTakenInfo array.
void ScalarEvolution::BackedgeTakenInfo::clear() {
- ExitNotTaken.ExitingBlock = 0;
- ExitNotTaken.ExactNotTaken = 0;
+ ExitNotTaken.ExitingBlock = nullptr;
+ ExitNotTaken.ExactNotTaken = nullptr;
delete[] ExitNotTaken.getNextExit();
}
@@ -4414,7 +4414,7 @@ ScalarEvolution::ComputeBackedgeTakenCount(const Loop *L) {
const SCEV *MaxBECount = getCouldNotCompute();
bool CouldComputeBECount = true;
BasicBlock *Latch = L->getLoopLatch(); // may be NULL.
- const SCEV *LatchMaxCount = 0;
+ const SCEV *LatchMaxCount = nullptr;
SmallVector<std::pair<BasicBlock *, const SCEV *>, 4> ExitCounts;
for (unsigned i = 0, e = ExitingBlocks.size(); i != e; ++i) {
ExitLimit EL = ComputeExitLimit(L, ExitingBlocks[i]);
@@ -4454,7 +4454,7 @@ ScalarEvolution::ComputeExitLimit(const Loop *L, BasicBlock *ExitingBlock) {
// exit at this block and remember the exit block and whether all other targets
// lead to the loop header.
bool MustExecuteLoopHeader = true;
- BasicBlock *Exit = 0;
+ BasicBlock *Exit = nullptr;
for (succ_iterator SI = succ_begin(ExitingBlock), SE = succ_end(ExitingBlock);
SI != SE; ++SI)
if (!L->contains(*SI)) {
@@ -4800,7 +4800,7 @@ ScalarEvolution::ComputeLoadConstantCompareExitLimit(
return getCouldNotCompute();
// Okay, we allow one non-constant index into the GEP instruction.
- Value *VarIdx = 0;
+ Value *VarIdx = nullptr;
std::vector<Constant*> Indexes;
unsigned VarIdxNum = 0;
for (unsigned i = 2, e = GEP->getNumOperands(); i != e; ++i)
@@ -4810,7 +4810,7 @@ ScalarEvolution::ComputeLoadConstantCompareExitLimit(
if (VarIdx) return getCouldNotCompute(); // Multiple non-constant idx's.
VarIdx = GEP->getOperand(i);
VarIdxNum = i-2;
- Indexes.push_back(0);
+ Indexes.push_back(nullptr);
}
// Loop-invariant loads may be a byproduct of loop optimization. Skip them.
@@ -4841,7 +4841,7 @@ ScalarEvolution::ComputeLoadConstantCompareExitLimit(
Constant *Result = ConstantFoldLoadThroughGEPIndices(GV->getInitializer(),
Indexes);
- if (Result == 0) break; // Cannot compute!
+ if (!Result) break; // Cannot compute!
// Evaluate the condition for this iteration.
Result = ConstantExpr::getICmp(predicate, Result, RHS);
@@ -4902,14 +4902,14 @@ getConstantEvolvingPHIOperands(Instruction *UseInst, const Loop *L,
// Otherwise, we can evaluate this instruction if all of its operands are
// constant or derived from a PHI node themselves.
- PHINode *PHI = 0;
+ PHINode *PHI = nullptr;
for (Instruction::op_iterator OpI = UseInst->op_begin(),
OpE = UseInst->op_end(); OpI != OpE; ++OpI) {
if (isa<Constant>(*OpI)) continue;
Instruction *OpInst = dyn_cast<Instruction>(*OpI);
- if (!OpInst || !canConstantEvolve(OpInst, L)) return 0;
+ if (!OpInst || !canConstantEvolve(OpInst, L)) return nullptr;
PHINode *P = dyn_cast<PHINode>(OpInst);
if (!P)
@@ -4923,8 +4923,10 @@ getConstantEvolvingPHIOperands(Instruction *UseInst, const Loop *L,
P = getConstantEvolvingPHIOperands(OpInst, L, PHIMap);
PHIMap[OpInst] = P;
}
- if (P == 0) return 0; // Not evolving from PHI
- if (PHI && PHI != P) return 0; // Evolving from multiple different PHIs.
+ if (!P)
+ return nullptr; // Not evolving from PHI
+ if (PHI && PHI != P)
+ return nullptr; // Evolving from multiple different PHIs.
PHI = P;
}
// This is a expression evolving from a constant PHI!
@@ -4938,7 +4940,7 @@ getConstantEvolvingPHIOperands(Instruction *UseInst, const Loop *L,
/// constraints, return null.
static PHINode *getConstantEvolvingPHI(Value *V, const Loop *L) {
Instruction *I = dyn_cast<Instruction>(V);
- if (I == 0 || !canConstantEvolve(I, L)) return 0;
+ if (!I || !canConstantEvolve(I, L)) return nullptr;
if (PHINode *PN = dyn_cast<PHINode>(I)) {
return PN;
@@ -4960,18 +4962,18 @@ static Constant *EvaluateExpression(Value *V, const Loop *L,
// Convenient constant check, but redundant for recursive calls.
if (Constant *C = dyn_cast<Constant>(V)) return C;
Instruction *I = dyn_cast<Instruction>(V);
- if (!I) return 0;
+ if (!I) return nullptr;
if (Constant *C = Vals.lookup(I)) return C;
// An instruction inside the loop depends on a value outside the loop that we
// weren't given a mapping for, or a value such as a call inside the loop.
- if (!canConstantEvolve(I, L)) return 0;
+ if (!canConstantEvolve(I, L)) return nullptr;
// An unmapped PHI can be due to a branch or another loop inside this loop,
// or due to this not being the initial iteration through a loop where we
// couldn't compute the evolution of this particular PHI last time.
- if (isa<PHINode>(I)) return 0;
+ if (isa<PHINode>(I)) return nullptr;
std::vector<Constant*> Operands(I->getNumOperands());
@@ -4979,12 +4981,12 @@ static Constant *EvaluateExpression(Value *V, const Loop *L,
Instruction *Operand = dyn_cast<Instruction>(I->getOperand(i));
if (!Operand) {
Operands[i] = dyn_cast<Constant>(I->getOperand(i));
- if (!Operands[i]) return 0;
+ if (!Operands[i]) return nullptr;
continue;
}
Constant *C = EvaluateExpression(Operand, L, Vals, DL, TLI);
Vals[Operand] = C;
- if (!C) return 0;
+ if (!C) return nullptr;
Operands[i] = C;
}
@@ -5013,7 +5015,7 @@ ScalarEvolution::getConstantEvolutionLoopExitValue(PHINode *PN,
return I->second;
if (BEs.ugt(MaxBruteForceIterations))
- return ConstantEvolutionLoopExitValue[PN] = 0; // Not going to evaluate it.
+ return ConstantEvolutionLoopExitValue[PN] = nullptr; // Not going to evaluate it.
Constant *&RetVal = ConstantEvolutionLoopExitValue[PN];
@@ -5025,22 +5027,22 @@ ScalarEvolution::getConstantEvolutionLoopExitValue(PHINode *PN,
// entry must be a constant (coming in from outside of the loop), and the
// second must be derived from the same PHI.
bool SecondIsBackedge = L->contains(PN->getIncomingBlock(1));
- PHINode *PHI = 0;
+ PHINode *PHI = nullptr;
for (BasicBlock::iterator I = Header->begin();
(PHI = dyn_cast<PHINode>(I)); ++I) {
Constant *StartCST =
dyn_cast<Constant>(PHI->getIncomingValue(!SecondIsBackedge));
- if (StartCST == 0) continue;
+ if (!StartCST) continue;
CurrentIterVals[PHI] = StartCST;
}
if (!CurrentIterVals.count(PN))
- return RetVal = 0;
+ return RetVal = nullptr;
Value *BEValue = PN->getIncomingValue(SecondIsBackedge);
// Execute the loop symbolically to determine the exit value.
if (BEs.getActiveBits() >= 32)
- return RetVal = 0; // More than 2^32-1 iterations?? Not doing it!
+ return RetVal = nullptr; // More than 2^32-1 iterations?? Not doing it!
unsigned NumIterations = BEs.getZExtValue(); // must be in range
unsigned IterationNum = 0;
@@ -5053,8 +5055,8 @@ ScalarEvolution::getConstantEvolutionLoopExitValue(PHINode *PN,
DenseMap<Instruction *, Constant *> NextIterVals;
Constant *NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, DL,
TLI);
- if (NextPHI == 0)
- return 0; // Couldn't evaluate!
+ if (!NextPHI)
+ return nullptr; // Couldn't evaluate!
NextIterVals[PN] = NextPHI;
bool StoppedEvolving = NextPHI == CurrentIterVals[PN];
@@ -5101,7 +5103,7 @@ const SCEV *ScalarEvolution::ComputeExitCountExhaustively(const Loop *L,
Value *Cond,
bool ExitWhen) {
PHINode *PN = getConstantEvolvingPHI(Cond, L);
- if (PN == 0) return getCouldNotCompute();
+ if (!PN) return getCouldNotCompute();
// If the loop is canonicalized, the PHI will have exactly two entries.
// That's the only form we support here.
@@ -5114,12 +5116,12 @@ const SCEV *ScalarEvolution::ComputeExitCountExhaustively(const Loop *L,
// One entry must be a constant (coming in from outside of the loop), and the
// second must be derived from the same PHI.
bool SecondIsBackedge = L->contains(PN->getIncomingBlock(1));
- PHINode *PHI = 0;
+ PHINode *PHI = nullptr;
for (BasicBlock::iterator I = Header->begin();
(PHI = dyn_cast<PHINode>(I)); ++I) {
Constant *StartCST =
dyn_cast<Constant>(PHI->getIncomingValue(!SecondIsBackedge));
- if (StartCST == 0) continue;
+ if (!StartCST) continue;
CurrentIterVals[PHI] = StartCST;
}
if (!CurrentIterVals.count(PN))
@@ -5189,7 +5191,7 @@ const SCEV *ScalarEvolution::getSCEVAtScope(const SCEV *V, const Loop *L) {
if (Values[u].first == L)
return Values[u].second ? Values[u].second : V;
}
- Values.push_back(std::make_pair(L, static_cast<const SCEV *>(0)));
+ Values.push_back(std::make_pair(L, static_cast<const SCEV *>(nullptr)));
// Otherwise compute it.
const SCEV *C = computeSCEVAtScope(V, L);
SmallVector<std::pair<const Loop *, const SCEV *>, 2> &Values2 = ValuesAtScopes[V];
@@ -5243,7 +5245,7 @@ static Constant *BuildConstantFromSCEV(const SCEV *V) {
}
for (unsigned i = 1, e = SA->getNumOperands(); i != e; ++i) {
Constant *C2 = BuildConstantFromSCEV(SA->getOperand(i));
- if (!C2) return 0;
+ if (!C2) return nullptr;
// First pointer!
if (!C->getType()->isPointerTy() && C2->getType()->isPointerTy()) {
@@ -5258,7 +5260,7 @@ static Constant *BuildConstantFromSCEV(const SCEV *V) {
// Don't bother trying to sum two pointers. We probably can't
// statically compute a load that results from it anyway.
if (C2->getType()->isPointerTy())
- return 0;
+ return nullptr;
if (PointerType *PTy = dyn_cast<PointerType>(C->getType())) {
if (PTy->getElementType()->isStructTy())
@@ -5276,10 +5278,10 @@ static Constant *BuildConstantFromSCEV(const SCEV *V) {
const SCEVMulExpr *SM = cast<SCEVMulExpr>(V);
if (Constant *C = BuildConstantFromSCEV(SM->getOperand(0))) {
// Don't bother with pointers at all.
- if (C->getType()->isPointerTy()) return 0;
+ if (C->getType()->isPointerTy()) return nullptr;
for (unsigned i = 1, e = SM->getNumOperands(); i != e; ++i) {
Constant *C2 = BuildConstantFromSCEV(SM->getOperand(i));
- if (!C2 || C2->getType()->isPointerTy()) return 0;
+ if (!C2 || C2->getType()->isPointerTy()) return nullptr;
C = ConstantExpr::getMul(C, C2);
}
return C;
@@ -5298,7 +5300,7 @@ static Constant *BuildConstantFromSCEV(const SCEV *V) {
case scUMaxExpr:
break; // TODO: smax, umax.
}
- return 0;
+ return nullptr;
}
const SCEV *ScalarEvolution::computeSCEVAtScope(const SCEV *V, const Loop *L) {
@@ -5365,7 +5367,7 @@ const SCEV *ScalarEvolution::computeSCEVAtScope(const SCEV *V, const Loop *L) {
// Check to see if getSCEVAtScope actually made an improvement.
if (MadeImprovement) {
- Constant *C = 0;
+ Constant *C = nullptr;
if (const CmpInst *CI = dyn_cast<CmpInst>(I))
C = ConstantFoldCompareInstOperands(CI->getPredicate(),
Operands[0], Operands[1], DL,
@@ -5697,7 +5699,7 @@ ScalarEvolution::HowFarToZero(const SCEV *V, const Loop *L, bool IsSubExpr) {
// to 0, it must be counting down to equal 0. Consequently, N = Start / -Step.
// We have not yet seen any such cases.
const SCEVConstant *StepC = dyn_cast<SCEVConstant>(Step);
- if (StepC == 0 || StepC->getValue()->equalsInt(0))
+ if (!StepC || StepC->getValue()->equalsInt(0))
return getCouldNotCompute();
// For positive steps (counting up until unsigned overflow):
@@ -7375,7 +7377,8 @@ ScalarEvolution::SCEVCallbackVH::SCEVCallbackVH(Value *V, ScalarEvolution *se)
//===----------------------------------------------------------------------===//
ScalarEvolution::ScalarEvolution()
- : FunctionPass(ID), ValuesAtScopes(64), LoopDispositions(64), BlockDispositions(64), FirstUnknown(0) {
+ : FunctionPass(ID), ValuesAtScopes(64), LoopDispositions(64),
+ BlockDispositions(64), FirstUnknown(nullptr) {
initializeScalarEvolutionPass(*PassRegistry::getPassRegistry());
}
@@ -7383,7 +7386,7 @@ bool ScalarEvolution::runOnFunction(Function &F) {
this->F = &F;
LI = &getAnalysis<LoopInfo>();
DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- DL = DLP ? &DLP->getDataLayout() : 0;
+ DL = DLP ? &DLP->getDataLayout() : nullptr;
TLI = &getAnalysis<TargetLibraryInfo>();
DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
return false;
@@ -7394,7 +7397,7 @@ void ScalarEvolution::releaseMemory() {
// destructors, so that they release their references to their values.
for (SCEVUnknown *U = FirstUnknown; U; U = U->Next)
U->~SCEVUnknown();
- FirstUnknown = 0;
+ FirstUnknown = nullptr;
ValueExprMap.clear();
diff --git a/lib/Analysis/ScalarEvolutionAliasAnalysis.cpp b/lib/Analysis/ScalarEvolutionAliasAnalysis.cpp
index 7be6aca768..6933f74150 100644
--- a/lib/Analysis/ScalarEvolutionAliasAnalysis.cpp
+++ b/lib/Analysis/ScalarEvolutionAliasAnalysis.cpp
@@ -34,7 +34,7 @@ namespace {
public:
static char ID; // Class identification, replacement for typeinfo
- ScalarEvolutionAliasAnalysis() : FunctionPass(ID), SE(0) {
+ ScalarEvolutionAliasAnalysis() : FunctionPass(ID), SE(nullptr) {
initializeScalarEvolutionAliasAnalysisPass(
*PassRegistry::getPassRegistry());
}
@@ -102,7 +102,7 @@ ScalarEvolutionAliasAnalysis::GetBaseValue(const SCEV *S) {
return U->getValue();
}
// No Identified object found.
- return 0;
+ return nullptr;
}
AliasAnalysis::AliasResult
@@ -162,10 +162,10 @@ ScalarEvolutionAliasAnalysis::alias(const Location &LocA,
if ((AO && AO != LocA.Ptr) || (BO && BO != LocB.Ptr))
if (alias(Location(AO ? AO : LocA.Ptr,
AO ? +UnknownSize : LocA.Size,
- AO ? 0 : LocA.TBAATag),
+ AO ? nullptr : LocA.TBAATag),
Location(BO ? BO : LocB.Ptr,
BO ? +UnknownSize : LocB.Size,
- BO ? 0 : LocB.TBAATag)) == NoAlias)
+ BO ? nullptr : LocB.TBAATag)) == NoAlias)
return NoAlias;
// Forward the query to the next analysis.
diff --git a/lib/Analysis/ScalarEvolutionExpander.cpp b/lib/Analysis/ScalarEvolutionExpander.cpp
index fb3d595b21..b5070434d1 100644
--- a/lib/Analysis/ScalarEvolutionExpander.cpp
+++ b/lib/Analysis/ScalarEvolutionExpander.cpp
@@ -44,7 +44,7 @@ Value *SCEVExpander::ReuseOrCreateCast(Value *V, Type *Ty,
// not allowed to move it.
BasicBlock::iterator BIP = Builder.GetInsertPoint();
- Instruction *Ret = NULL;
+ Instruction *Ret = nullptr;
// Check to see if there is already a cast!
for (User *U : V->users())
@@ -627,21 +627,21 @@ static const Loop *PickMostRelevantLoop(const Loop *A, const Loop *B,
const Loop *SCEVExpander::getRelevantLoop(const SCEV *S) {
// Test whether we've already computed the most relevant loop for this SCEV.
std::pair<DenseMap<const SCEV *, const Loop *>::iterator, bool> Pair =
- RelevantLoops.insert(std::make_pair(S, static_cast<const Loop *>(0)));
+ RelevantLoops.insert(std::make_pair(S, nullptr));
if (!Pair.second)
return Pair.first->second;
if (isa<SCEVConstant>(S))
// A constant has no relevant loops.
- return 0;
+ return nullptr;
if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
if (const Instruction *I = dyn_cast<Instruction>(U->getValue()))
return Pair.first->second = SE.LI->getLoopFor(I->getParent());
// A non-instruction has no relevant loops.
- return 0;
+ return nullptr;
}
if (const SCEVNAryExpr *N = dyn_cast<SCEVNAryExpr>(S)) {
- const Loop *L = 0;
+ const Loop *L = nullptr;
if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S))
L = AR->getLoop();
for (SCEVNAryExpr::op_iterator I = N->op_begin(), E = N->op_end();
@@ -716,7 +716,7 @@ Value *SCEVExpander::visitAddExpr(const SCEVAddExpr *S) {
// Emit instructions to add all the operands. Hoist as much as possible
// out of loops, and form meaningful getelementptrs where possible.
- Value *Sum = 0;
+ Value *Sum = nullptr;
for (SmallVectorImpl<std::pair<const Loop *, const SCEV *> >::iterator
I = OpsAndLoops.begin(), E = OpsAndLoops.end(); I != E; ) {
const Loop *CurLoop = I->first;
@@ -784,7 +784,7 @@ Value *SCEVExpander::visitMulExpr(const SCEVMulExpr *S) {
// Emit instructions to mul all the operands. Hoist as much as possible
// out of loops.
- Value *Prod = 0;
+ Value *Prod = nullptr;
for (SmallVectorImpl<std::pair<const Loop *, const SCEV *> >::iterator
I = OpsAndLoops.begin(), E = OpsAndLoops.end(); I != E; ) {
const SCEV *Op = I->second;
@@ -892,18 +892,18 @@ Instruction *SCEVExpander::getIVIncOperand(Instruction *IncV,
Instruction *InsertPos,
bool allowScale) {
if (IncV == InsertPos)
- return NULL;
+ return nullptr;
switch (IncV->getOpcode()) {
default:
- return NULL;
+ return nullptr;
// Check for a simple Add/Sub or GEP of a loop invariant step.
case Instruction::Add:
case Instruction::Sub: {
Instruction *OInst = dyn_cast<Instruction>(IncV->getOperand(1));
if (!OInst || SE.DT->dominates(OInst, InsertPos))
return dyn_cast<Instruction>(IncV->getOperand(0));
- return NULL;
+ return nullptr;
}
case Instruction::BitCast:
return dyn_cast<Instruction>(IncV->getOperand(0));
@@ -914,7 +914,7 @@ Instruction *SCEVExpander::getIVIncOperand(Instruction *IncV,
continue;
if (Instruction *OInst = dyn_cast<Instruction>(*I)) {
if (!SE.DT->dominates(OInst, InsertPos))
- return NULL;
+ return nullptr;
}
if (allowScale) {
// allow any kind of GEP as long as it can be hoisted.
@@ -925,11 +925,11 @@ Instruction *SCEVExpander::getIVIncOperand(Instruction *IncV,
// have 2 operands. i1* is used by the expander to represent an
// address-size element.
if (IncV->getNumOperands() != 2)
- return NULL;
+ return nullptr;
unsigned AS = cast<PointerType>(IncV->getType())->getAddressSpace();
if (IncV->getType() != Type::getInt1PtrTy(SE.getContext(), AS)
&& IncV->getType() != Type::getInt8PtrTy(SE.getContext(), AS))
- return NULL;
+ return nullptr;
break;
}
return dyn_cast<Instruction>(IncV->getOperand(0));
@@ -1077,9 +1077,9 @@ SCEVExpander::getAddRecExprPHILiterally(const SCEVAddRecExpr *Normalized,
// Reuse a previously-inserted PHI, if present.
BasicBlock *LatchBlock = L->getLoopLatch();
if (LatchBlock) {
- PHINode *AddRecPhiMatch = 0;
- Instruction *IncV = 0;
- TruncTy = 0;
+ PHINode *AddRecPhiMatch = nullptr;
+ Instruction *IncV = nullptr;
+ TruncTy = nullptr;
InvertStep = false;
// Only try partially matching scevs that need truncation and/or
@@ -1120,7 +1120,7 @@ SCEVExpander::getAddRecExprPHILiterally(const SCEVAddRecExpr *Normalized,
// Stop if we have found an exact match SCEV.
if (IsMatchingSCEV) {
IncV = TempIncV;
- TruncTy = 0;
+ TruncTy = nullptr;
InvertStep = false;
AddRecPhiMatch = PN;
break;
@@ -1243,13 +1243,13 @@ Value *SCEVExpander::expandAddRecExprLiterally(const SCEVAddRecExpr *S) {
PostIncLoopSet Loops;
Loops.insert(L);
Normalized =
- cast<SCEVAddRecExpr>(TransformForPostIncUse(Normalize, S, 0, 0,
- Loops, SE, *SE.DT));
+ cast<SCEVAddRecExpr>(TransformForPostIncUse(Normalize, S, nullptr,
+ nullptr, Loops, SE, *SE.DT));
}
// Strip off any non-loop-dominating component from the addrec start.
const SCEV *Start = Normalized->getStart();
- const SCEV *PostLoopOffset = 0;
+ const SCEV *PostLoopOffset = nullptr;
if (!SE.properlyDominates(Start, L->getHeader())) {
PostLoopOffset = Start;
Start = SE.getConstant(Normalized->getType(), 0);
@@ -1261,7 +1261,7 @@ Value *SCEVExpander::expandAddRecExprLiterally(const SCEVAddRecExpr *S) {
// Strip off any non-loop-dominating component from the addrec step.
const SCEV *Step = Normalized->getStepRecurrence(SE);
- const SCEV *PostLoopScale = 0;
+ const SCEV *PostLoopScale = nullptr;
if (!SE.dominates(Step, L->getHeader())) {
PostLoopScale = Step;
Step = SE.getConstant(Normalized->getType(), 1);
@@ -1276,7 +1276,7 @@ Value *SCEVExpander::expandAddRecExprLiterally(const SCEVAddRecExpr *S) {
Type *ExpandTy = PostLoopScale ? IntTy : STy;
// In some cases, we decide to reuse an existing phi node but need to truncate
// it and/or invert the step.
- Type *TruncTy = 0;
+ Type *TruncTy = nullptr;
bool InvertStep = false;
PHINode *PN = getAddRecExprPHILiterally(Normalized, L, ExpandTy, IntTy,
TruncTy, InvertStep);
@@ -1372,7 +1372,7 @@ Value *SCEVExpander::visitAddRecExpr(const SCEVAddRecExpr *S) {
const Loop *L = S->getLoop();
// First check for an existing canonical IV in a suitable type.
- PHINode *CanonicalIV = 0;
+ PHINode *CanonicalIV = nullptr;
if (PHINode *PN = L->getCanonicalInductionVariable())
if (SE.getTypeSizeInBits(PN->getType()) >= SE.getTypeSizeInBits(Ty))
CanonicalIV = PN;
@@ -1393,7 +1393,7 @@ Value *SCEVExpander::visitAddRecExpr(const SCEVAddRecExpr *S) {
while (isa<PHINode>(NewInsertPt) || isa<DbgInfoIntrinsic>(NewInsertPt) ||
isa<LandingPadInst>(NewInsertPt))
++NewInsertPt;
- V = expandCodeFor(SE.getTruncateExpr(SE.getUnknown(V), Ty), 0,
+ V = expandCodeFor(SE.getTruncateExpr(SE.getUnknown(V), Ty), nullptr,
NewInsertPt);
return V;
}
@@ -1666,7 +1666,8 @@ SCEVExpander::getOrInsertCanonicalInductionVariable(const Loop *L,
// Emit code for it.
BuilderType::InsertPointGuard Guard(Builder);
- PHINode *V = cast<PHINode>(expandCodeFor(H, 0, L->getHeader()->begin()));
+ PHINode *V = cast<PHINode>(expandCodeFor(H, nullptr,
+ L->getHeader()->begin()));
return V;
}
diff --git a/lib/Analysis/ScalarEvolutionNormalization.cpp b/lib/Analysis/ScalarEvolutionNormalization.cpp
index 1e4c0bdc7b..e9db295a4a 100644
--- a/lib/Analysis/ScalarEvolutionNormalization.cpp
+++ b/lib/Analysis/ScalarEvolutionNormalization.cpp
@@ -113,7 +113,7 @@ TransformImpl(const SCEV *S, Instruction *User, Value *OperandValToReplace) {
// Transform each operand.
for (SCEVNAryExpr::op_iterator I = AR->op_begin(), E = AR->op_end();
I != E; ++I) {
- Operands.push_back(TransformSubExpr(*I, LUser, 0));
+ Operands.push_back(TransformSubExpr(*I, LUser, nullptr));
}
// Conservatively use AnyWrap until/unless we need FlagNW.
const SCEV *Result = SE.getAddRecExpr(Operands, L, SCEV::FlagAnyWrap);
diff --git a/lib/Analysis/SparsePropagation.cpp b/lib/Analysis/SparsePropagation.cpp
index 87a4fa4e0c..90de884341 100644
--- a/lib/Analysis/SparsePropagation.cpp
+++ b/lib/Analysis/SparsePropagation.cpp
@@ -147,7 +147,7 @@ void SparseSolver::getFeasibleSuccessors(TerminatorInst &TI,
return;
Constant *C = LatticeFunc->GetConstant(BCValue, BI->getCondition(), *this);
- if (C == 0 || !isa<ConstantInt>(C)) {
+ if (!C || !isa<ConstantInt>(C)) {
// Non-constant values can go either way.
Succs[0] = Succs[1] = true;
return;
@@ -189,7 +189,7 @@ void SparseSolver::getFeasibleSuccessors(TerminatorInst &TI,
return;
Constant *C = LatticeFunc->GetConstant(SCValue, SI.getCondition(), *this);
- if (C == 0 || !isa<ConstantInt>(C)) {
+ if (!C || !isa<ConstantInt>(C)) {
// All destinations are executable!
Succs.assign(TI.getNumSuccessors(), true);
return;
diff --git a/lib/Analysis/TargetTransformInfo.cpp b/lib/Analysis/TargetTransformInfo.cpp
index 04d09f1372..8df5305a05 100644
--- a/lib/Analysis/TargetTransformInfo.cpp
+++ b/lib/Analysis/TargetTransformInfo.cpp
@@ -234,7 +234,7 @@ namespace {
struct NoTTI final : ImmutablePass, TargetTransformInfo {
const DataLayout *DL;
- NoTTI() : ImmutablePass(ID), DL(0) {
+ NoTTI() : ImmutablePass(ID), DL(nullptr) {
initializeNoTTIPass(*PassRegistry::getPassRegistry());
}
@@ -242,9 +242,9 @@ struct NoTTI final : ImmutablePass, TargetTransformInfo {
// Note that this subclass is special, and must *not* call initializeTTI as
// it does not chain.
TopTTI = this;
- PrevTTI = 0;
+ PrevTTI = nullptr;
DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
- DL = DLP ? &DLP->getDataLayout() : 0;
+ DL = DLP ? &DLP->getDataLayout() : nullptr;
}
virtual void getAnalysisUsage(AnalysisUsage &AU) const override {
@@ -443,7 +443,7 @@ struct NoTTI final : ImmutablePass, TargetTransformInfo {
// Otherwise delegate to the fully generic implementations.
return getOperationCost(Operator::getOpcode(U), U->getType(),
U->getNumOperands() == 1 ?
- U->getOperand(0)->getType() : 0);
+ U->getOperand(0)->getType() : nullptr);
}
bool hasBranchDivergence() const override { return false; }
@@ -567,7 +567,7 @@ struct NoTTI final : ImmutablePass, TargetTransformInfo {
}
unsigned getShuffleCost(ShuffleKind Kind, Type *Ty,
- int Index = 0, Type *SubTp = 0) const override {
+ int Index = 0, Type *SubTp = nullptr) const override {
return 1;
}
@@ -581,7 +581,7 @@ struct NoTTI final : ImmutablePass, TargetTransformInfo {
}
unsigned getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
- Type *CondTy = 0) const override {
+ Type *CondTy = nullptr) const override {
return 1;
}
diff --git a/lib/Analysis/TypeBasedAliasAnalysis.cpp b/lib/Analysis/TypeBasedAliasAnalysis.cpp
index 05daf18aa9..620d1799a5 100644
--- a/lib/Analysis/TypeBasedAliasAnalysis.cpp
+++ b/lib/Analysis/TypeBasedAliasAnalysis.cpp
@@ -144,7 +144,7 @@ namespace {
const MDNode *Node;
public:
- TBAANode() : Node(0) {}
+ TBAANode() : Node(nullptr) {}
explicit TBAANode(const MDNode *N) : Node(N) {}
/// getNode - Get the MDNode for this TBAANode.
@@ -182,7 +182,7 @@ namespace {
const MDNode *Node;
public:
- TBAAStructTagNode() : Node(0) {}
+ TBAAStructTagNode() : Node(nullptr) {}
explicit TBAAStructTagNode(const MDNode *N) : Node(N) {}
/// Get the MDNode for this TBAAStructTagNode.
@@ -218,7 +218,7 @@ namespace {
const MDNode *Node;
public:
- TBAAStructTypeNode() : Node(0) {}
+ TBAAStructTypeNode() : Node(nullptr) {}
explicit TBAAStructTypeNode(const MDNode *N) : Node(N) {}
/// Get the MDNode for this TBAAStructTypeNode.
@@ -555,7 +555,7 @@ bool MDNode::isTBAAVtableAccess() const {
MDNode *MDNode::getMostGenericTBAA(MDNode *A, MDNode *B) {
if (!A || !B)
- return NULL;
+ return nullptr;
if (A == B)
return A;
@@ -564,29 +564,31 @@ MDNode *MDNode::getMostGenericTBAA(MDNode *A, MDNode *B) {
bool StructPath = isStructPathTBAA(A);
if (StructPath) {
A = cast_or_null<MDNode>(A->getOperand(1));
- if (!A) return 0;
+ if (!A) return nullptr;
B = cast_or_null<MDNode>(B->getOperand(1));
- if (!B) return 0;
+ if (!B) return nullptr;
}
SmallVector<MDNode *, 4> PathA;
MDNode *T = A;
while (T) {
PathA.push_back(T);
- T = T->getNumOperands() >= 2 ? cast_or_null<MDNode>(T->getOperand(1)) : 0;
+ T = T->getNumOperands() >= 2 ? cast_or_null<MDNode>(T->getOperand(1))
+ : nullptr;
}
SmallVector<MDNode *, 4> PathB;
T = B;
while (T) {
PathB.push_back(T);
- T = T->getNumOperands() >= 2 ? cast_or_null<MDNode>(T->getOperand(1)) : 0;
+ T = T->getNumOperands() >= 2 ? cast_or_null<MDNode>(T->getOperand(1))
+ : nullptr;
}
int IA = PathA.size() - 1;
int IB = PathB.size() - 1;
- MDNode *Ret = 0;
+ MDNode *Ret = nullptr;
while (IA >= 0 && IB >=0) {
if (PathA[IA] == PathB[IB])
Ret = PathA[IA];
@@ -599,7 +601,7 @@ MDNode *MDNode::getMostGenericTBAA(MDNode *A, MDNode *B) {
return Ret;
if (!Ret)
- return 0;
+ return nullptr;
// We need to convert from a type node to a tag node.
Type *Int64 = IntegerType::get(A->getContext(), 64);
Value *Ops[3] = { Ret, Ret, ConstantInt::get(Int64, 0) };
diff --git a/lib/Analysis/ValueTracking.cpp b/lib/Analysis/ValueTracking.cpp
index 72617a0aad..07720d78bf 100644
--- a/lib/Analysis/ValueTracking.cpp
+++ b/lib/Analysis/ValueTracking.cpp
@@ -842,7 +842,7 @@ bool llvm::isKnownToBeAPowerOfTwo(Value *V, bool OrZero, unsigned Depth) {
if (Depth++ == MaxDepth)
return false;
- Value *X = 0, *Y = 0;
+ Value *X = nullptr, *Y = nullptr;
// A shift of a power of two is a power of two or zero.
if (OrZero && (match(V, m_Shl(m_Value(X), m_Value())) ||
match(V, m_Shr(m_Value(X), m_Value()))))
@@ -882,10 +882,10 @@ bool llvm::isKnownToBeAPowerOfTwo(Value *V, bool OrZero, unsigned Depth) {
unsigned BitWidth = V->getType()->getScalarSizeInBits();
APInt LHSZeroBits(BitWidth, 0), LHSOneBits(BitWidth, 0);
- ComputeMaskedBits(X, LHSZeroBits, LHSOneBits, 0, Depth);
+ ComputeMaskedBits(X, LHSZeroBits, LHSOneBits, nullptr, Depth);
APInt RHSZeroBits(BitWidth, 0), RHSOneBits(BitWidth, 0);
- ComputeMaskedBits(Y, RHSZeroBits, RHSOneBits, 0, Depth);
+ ComputeMaskedBits(Y, RHSZeroBits, RHSOneBits, nullptr, Depth);
// If i8 V is a power of two or zero:
// ZeroBits: 1 1 1 0 1 1 1 1
// ~ZeroBits: 0 0 0 1 0 0 0 0
@@ -1005,7 +1005,7 @@ bool llvm::isKnownNonZero(Value *V, const DataLayout *TD, unsigned Depth) {
unsigned BitWidth = getBitWidth(V->getType()->getScalarType(), TD);
// X | Y != 0 if X != 0 or Y != 0.
- Value *X = 0, *Y = 0;
+ Value *X = nullptr, *Y = nullptr;
if (match(V, m_Or(m_Value(X), m_Value(Y))))
return isKnownNonZero(X, TD, Depth) || isKnownNonZero(Y, TD, Depth);
@@ -1364,7 +1364,7 @@ bool llvm::ComputeMultiple(Value *V, unsigned Base, Value *&Multiple,
Op1 = ConstantInt::get(V->getContext(), API);
}
- Value *Mul0 = NULL;
+ Value *Mul0 = nullptr;
if (ComputeMultiple(Op0, Base, Mul0, LookThroughSExt, Depth+1)) {
if (Constant *Op1C = dyn_cast<Constant>(Op1))
if (Constant *MulC = dyn_cast<Constant>(Mul0)) {
@@ -1388,7 +1388,7 @@ bool llvm::ComputeMultiple(Value *V, unsigned Base, Value *&Multiple,
}
}
- Value *Mul1 = NULL;
+ Value *Mul1 = nullptr;
if (ComputeMultiple(Op1, Base, Mul1, LookThroughSExt, Depth+1)) {
if (Constant *Op0C = dyn_cast<Constant>(Op0))
if (Constant *MulC = dyn_cast<Constant>(Mul1)) {
@@ -1432,7 +1432,7 @@ bool llvm::CannotBeNegativeZero(const Value *V, unsigned Depth) {
return 1; // Limit search depth.
const Operator *I = dyn_cast<Operator>(V);
- if (I == 0) return false;
+ if (!I) return false;
// Check if the nsz fast-math flag is set
if (const FPMathOperator *FPO = dyn_cast<FPMathOperator>(I))
@@ -1513,7 +1513,7 @@ Value *llvm::isBytewiseValue(Value *V) {
// If the top/bottom halves aren't the same, reject it.
if (Val != Val2)
- return 0;
+ return nullptr;
}
return ConstantInt::get(V->getContext(), Val);
}
@@ -1525,11 +1525,11 @@ Value *llvm::isBytewiseValue(Value *V) {
Value *Elt = CA->getElementAsConstant(0);
Value *Val = isBytewiseValue(Elt);
if (!Val)
- return 0;
+ return nullptr;
for (unsigned I = 1, E = CA->getNumElements(); I != E; ++I)
if (CA->getElementAsConstant(I) != Elt)
- return 0;
+ return nullptr;
return Val;
}
@@ -1540,7 +1540,7 @@ Value *llvm::isBytewiseValue(Value *V) {
// %c = or i16 %a, %b
// but until there is an example that actually needs this, it doesn't seem
// worth worrying about.
- return 0;
+ return nullptr;
}
@@ -1590,7 +1590,7 @@ static Value *BuildSubAggregate(Value *From, Value* To, Type *IndexedType,
Value *V = FindInsertedValue(From, Idxs);
if (!V)
- return NULL;
+ return nullptr;
// Insert the value in the new (sub) aggregrate
return llvm::InsertValueInst::Create(To, V, makeArrayRef(Idxs).slice(IdxSkip),
@@ -1641,7 +1641,7 @@ Value *llvm::FindInsertedValue(Value *V, ArrayRef<unsigned> idx_range,
if (Constant *C = dyn_cast<Constant>(V)) {
C = C->getAggregateElement(idx_range[0]);
- if (C == 0) return 0;
+ if (!C) return nullptr;
return FindInsertedValue(C, idx_range.slice(1), InsertBefore);
}
@@ -1654,7 +1654,7 @@ Value *llvm::FindInsertedValue(Value *V, ArrayRef<unsigned> idx_range,
if (req_idx == idx_range.end()) {
// We can't handle this without inserting insertvalues
if (!InsertBefore)
- return 0;
+ return nullptr;
// The requested index identifies a part of a nested aggregate. Handle
// this specially. For example,
@@ -1708,7 +1708,7 @@ Value *llvm::FindInsertedValue(Value *V, ArrayRef<unsigned> idx_range,
}
// Otherwise, we don't know (such as, extracting from a function return value
// or load instruction)
- return 0;
+ return nullptr;
}
/// GetPointerBaseWithConstantOffset - Analyze the specified pointer to see if
@@ -1769,13 +1769,13 @@ bool llvm::getConstantStringInfo(const Value *V, StringRef &Str,
// Make sure the index-ee is a pointer to array of i8.
PointerType *PT = cast<PointerType>(GEP->getOperand(0)->getType());
ArrayType *AT = dyn_cast<ArrayType>(PT->getElementType());
- if (AT == 0 || !AT->getElementType()->isIntegerTy(8))
+ if (!AT || !AT->getElementType()->isIntegerTy(8))
return false;
// Check to make sure that the first operand of the GEP is an integer and
// has value 0 so that we are sure we're indexing into the initializer.
const ConstantInt *FirstIdx = dyn_cast<ConstantInt>(GEP->getOperand(1));
- if (FirstIdx == 0 || !FirstIdx->isZero())
+ if (!FirstIdx || !FirstIdx->isZero())
return false;
// If the second index isn't a ConstantInt, then this is a variable index
@@ -1807,7 +1807,7 @@ bool llvm::getConstantStringInfo(const Value *V, StringRef &Str,
// Must be a Constant Array
const ConstantDataArray *Array =
dyn_cast<ConstantDataArray>(GV->getInitializer());
- if (Array == 0 || !Array->isString())
+ if (!Array || !Array->isString())
return false;
// Get the number of elements in the array
@@ -1913,7 +1913,7 @@ llvm::GetUnderlyingObject(Value *V, const DataLayout *TD, unsigned MaxLookup) {
// See if InstructionSimplify knows any relevant tricks.
if (Instruction *I = dyn_cast<Instruction>(V))
// TODO: Acquire a DominatorTree and use it.
- if (Value *Simplified = SimplifyInstruction(I, TD, 0)) {
+ if (Value *Simplified = SimplifyInstruction(I, TD, nullptr)) {
V = Simplified;
continue;
}