summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorCraig Topper <craig.topper@gmail.com>2013-07-14 04:42:23 +0000
committerCraig Topper <craig.topper@gmail.com>2013-07-14 04:42:23 +0000
commita0ec3f9b7b826b9b40b80199923b664bad808cce (patch)
tree5ef8274354070e15800245d5310f860bb6e78727
parent6f71dd765ae9e1d1d0ba01d98a05627ffe3bfc8a (diff)
downloadllvm-a0ec3f9b7b826b9b40b80199923b664bad808cce.tar.gz
llvm-a0ec3f9b7b826b9b40b80199923b664bad808cce.tar.bz2
llvm-a0ec3f9b7b826b9b40b80199923b664bad808cce.tar.xz
Use SmallVectorImpl& instead of SmallVector to avoid repeating small vector size.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@186274 91177308-0d34-0410-b5e6-96231b3b80d8
-rw-r--r--include/llvm/Analysis/DependenceAnalysis.h2
-rw-r--r--include/llvm/CodeGen/CallingConvLower.h4
-rw-r--r--include/llvm/CodeGen/SelectionDAGNodes.h2
-rw-r--r--lib/Analysis/BasicAliasAnalysis.cpp4
-rw-r--r--lib/Analysis/DependenceAnalysis.cpp2
-rw-r--r--lib/Analysis/ProfileDataLoader.cpp4
-rw-r--r--lib/CodeGen/CallingConvLower.cpp2
-rw-r--r--lib/CodeGen/MachineCSE.cpp8
-rw-r--r--lib/CodeGen/MachineSSAUpdater.cpp2
-rw-r--r--lib/CodeGen/MachineSink.cpp2
-rw-r--r--lib/CodeGen/PrologEpilogInserter.h8
-rw-r--r--lib/CodeGen/RegAllocFast.cpp2
-rw-r--r--lib/CodeGen/SelectionDAG/DAGCombiner.cpp8
-rw-r--r--lib/CodeGen/SelectionDAG/LegalizeTypes.h8
-rw-r--r--lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp12
-rw-r--r--lib/CodeGen/SelectionDAG/ScheduleDAGFast.cpp12
-rw-r--r--lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp22
-rw-r--r--lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp19
-rw-r--r--lib/CodeGen/SelectionDAG/SelectionDAG.cpp7
-rw-r--r--lib/CodeGen/ShrinkWrapping.cpp8
-rw-r--r--lib/CodeGen/StackSlotColoring.cpp4
-rw-r--r--lib/CodeGen/TailDuplication.cpp26
-rw-r--r--lib/CodeGen/TwoAddressInstructionPass.cpp2
-rw-r--r--lib/IR/Metadata.cpp4
-rw-r--r--lib/Target/AArch64/AArch64ISelLowering.cpp6
-rw-r--r--lib/Target/ARM/ARMISelLowering.cpp10
-rw-r--r--lib/Target/ARM/ARMISelLowering.h2
-rw-r--r--lib/Target/ARM/ARMLoadStoreOptimizer.cpp14
-rw-r--r--lib/Target/Hexagon/HexagonCallingConvLower.cpp2
-rw-r--r--lib/Target/Hexagon/HexagonCallingConvLower.h4
-rw-r--r--lib/Target/Hexagon/HexagonHardwareLoops.cpp8
-rw-r--r--lib/Target/Hexagon/HexagonISelLowering.cpp8
-rw-r--r--lib/Target/MBlaze/MBlazeFrameLowering.cpp4
-rw-r--r--lib/Target/MBlaze/MBlazeISelLowering.cpp6
-rw-r--r--lib/Target/MBlaze/MBlazeMachineFunction.h2
-rw-r--r--lib/Target/MSP430/MSP430ISelLowering.cpp6
-rw-r--r--lib/Target/Mips/MipsISelLowering.cpp8
-rw-r--r--lib/Target/Mips/MipsISelLowering.h2
-rw-r--r--lib/Target/NVPTX/NVPTXISelLowering.cpp6
-rw-r--r--lib/Target/PowerPC/PPCFrameLowering.cpp4
-rw-r--r--lib/Target/PowerPC/PPCISelLowering.cpp24
-rw-r--r--lib/Target/PowerPC/PPCMachineFunctionInfo.h2
-rw-r--r--lib/Target/R600/AMDILCFGStructurizer.cpp8
-rw-r--r--lib/Target/R600/R600EmitClauseMarkers.cpp2
-rw-r--r--lib/Target/R600/R600InstrInfo.cpp2
-rw-r--r--lib/Target/Sparc/SparcISelLowering.cpp6
-rw-r--r--lib/Target/SystemZ/SystemZISelLowering.cpp6
-rw-r--r--lib/Target/X86/X86ISelLowering.cpp8
-rw-r--r--lib/Target/XCore/XCoreISelLowering.cpp8
-rw-r--r--lib/Transforms/InstCombine/InstCombineAndOrXor.cpp2
-rw-r--r--lib/Transforms/ObjCARC/ObjCARCOpts.cpp12
-rw-r--r--lib/Transforms/Scalar/LoopDeletion.cpp8
-rw-r--r--lib/Transforms/Scalar/LoopUnswitch.cpp4
-rw-r--r--lib/Transforms/Scalar/SCCP.cpp4
-rw-r--r--lib/Transforms/Scalar/ScalarReplAggregates.cpp39
-rw-r--r--lib/Transforms/Scalar/TailRecursionElimination.cpp19
-rw-r--r--lib/Transforms/Utils/SimplifyCFG.cpp17
-rw-r--r--lib/Transforms/Vectorize/BBVectorize.cpp4
-rw-r--r--utils/TableGen/CodeGenRegisters.cpp2
-rw-r--r--utils/TableGen/CodeGenRegisters.h4
-rw-r--r--utils/TableGen/CodeGenSchedule.cpp2
61 files changed, 227 insertions, 222 deletions
diff --git a/include/llvm/Analysis/DependenceAnalysis.h b/include/llvm/Analysis/DependenceAnalysis.h
index 95728a0e03..8fce2f6a9f 100644
--- a/include/llvm/Analysis/DependenceAnalysis.h
+++ b/include/llvm/Analysis/DependenceAnalysis.h
@@ -849,7 +849,7 @@ namespace llvm {
bool propagate(const SCEV *&Src,
const SCEV *&Dst,
SmallBitVector &Loops,
- SmallVector<Constraint, 4> &Constraints,
+ SmallVectorImpl<Constraint> &Constraints,
bool &Consistent);
/// propagateDistance - Attempt to propagate a distance
diff --git a/include/llvm/CodeGen/CallingConvLower.h b/include/llvm/CodeGen/CallingConvLower.h
index fa9d60f0d4..a18f433bda 100644
--- a/include/llvm/CodeGen/CallingConvLower.h
+++ b/include/llvm/CodeGen/CallingConvLower.h
@@ -158,7 +158,7 @@ private:
MachineFunction &MF;
const TargetMachine &TM;
const TargetRegisterInfo &TRI;
- SmallVector<CCValAssign, 16> &Locs;
+ SmallVectorImpl<CCValAssign> &Locs;
LLVMContext &Context;
unsigned StackOffset;
@@ -219,7 +219,7 @@ protected:
public:
CCState(CallingConv::ID CC, bool isVarArg, MachineFunction &MF,
- const TargetMachine &TM, SmallVector<CCValAssign, 16> &locs,
+ const TargetMachine &TM, SmallVectorImpl<CCValAssign> &locs,
LLVMContext &C);
void addLoc(const CCValAssign &V) {
diff --git a/include/llvm/CodeGen/SelectionDAGNodes.h b/include/llvm/CodeGen/SelectionDAGNodes.h
index 0f45dc8607..2f2137da04 100644
--- a/include/llvm/CodeGen/SelectionDAGNodes.h
+++ b/include/llvm/CodeGen/SelectionDAGNodes.h
@@ -539,7 +539,7 @@ public:
/// NOTE: This is still very expensive. Use carefully.
bool hasPredecessorHelper(const SDNode *N,
SmallPtrSet<const SDNode *, 32> &Visited,
- SmallVector<const SDNode *, 16> &Worklist) const;
+ SmallVectorImpl<const SDNode *> &Worklist) const;
/// getNumOperands - Return the number of values used by this operation.
///
diff --git a/lib/Analysis/BasicAliasAnalysis.cpp b/lib/Analysis/BasicAliasAnalysis.cpp
index f20e83e911..9fe136212c 100644
--- a/lib/Analysis/BasicAliasAnalysis.cpp
+++ b/lib/Analysis/BasicAliasAnalysis.cpp
@@ -857,8 +857,8 @@ BasicAliasAnalysis::getModRefInfo(ImmutableCallSite CS,
return ModRefResult(AliasAnalysis::getModRefInfo(CS, Loc) & Min);
}
-static bool areVarIndicesEqual(SmallVector<VariableGEPIndex, 4> &Indices1,
- SmallVector<VariableGEPIndex, 4> &Indices2) {
+static bool areVarIndicesEqual(SmallVectorImpl<VariableGEPIndex> &Indices1,
+ SmallVectorImpl<VariableGEPIndex> &Indices2) {
unsigned Size1 = Indices1.size();
unsigned Size2 = Indices2.size();
diff --git a/lib/Analysis/DependenceAnalysis.cpp b/lib/Analysis/DependenceAnalysis.cpp
index 450dcbce75..20576565a7 100644
--- a/lib/Analysis/DependenceAnalysis.cpp
+++ b/lib/Analysis/DependenceAnalysis.cpp
@@ -2977,7 +2977,7 @@ const SCEV *DependenceAnalysis::addToCoefficient(const SCEV *Expr,
bool DependenceAnalysis::propagate(const SCEV *&Src,
const SCEV *&Dst,
SmallBitVector &Loops,
- SmallVector<Constraint, 4> &Constraints,
+ SmallVectorImpl<Constraint> &Constraints,
bool &Consistent) {
bool Result = false;
for (int LI = Loops.find_first(); LI >= 0; LI = Loops.find_next(LI)) {
diff --git a/lib/Analysis/ProfileDataLoader.cpp b/lib/Analysis/ProfileDataLoader.cpp
index d7f444b4b6..3d0a1e2eac 100644
--- a/lib/Analysis/ProfileDataLoader.cpp
+++ b/lib/Analysis/ProfileDataLoader.cpp
@@ -76,7 +76,7 @@ static unsigned ReadProfilingNumEntries(const char *ToolName, FILE *F,
/// packet and then accumulate the entries into 'Data'.
static void ReadProfilingBlock(const char *ToolName, FILE *F,
bool ShouldByteSwap,
- SmallVector<unsigned, 32> &Data) {
+ SmallVectorImpl<unsigned> &Data) {
// Read the number of entries...
unsigned NumEntries = ReadProfilingNumEntries(ToolName, F, ShouldByteSwap);
@@ -99,7 +99,7 @@ static void ReadProfilingBlock(const char *ToolName, FILE *F,
/// run with when the current profiling data packet(s) were generated.
static void ReadProfilingArgBlock(const char *ToolName, FILE *F,
bool ShouldByteSwap,
- SmallVector<std::string, 1> &CommandLines) {
+ SmallVectorImpl<std::string> &CommandLines) {
// Read the number of bytes ...
unsigned ArgLength = ReadProfilingNumEntries(ToolName, F, ShouldByteSwap);
diff --git a/lib/CodeGen/CallingConvLower.cpp b/lib/CodeGen/CallingConvLower.cpp
index 75f4b96e3b..fcfc9dcbf7 100644
--- a/lib/CodeGen/CallingConvLower.cpp
+++ b/lib/CodeGen/CallingConvLower.cpp
@@ -24,7 +24,7 @@
using namespace llvm;
CCState::CCState(CallingConv::ID CC, bool isVarArg, MachineFunction &mf,
- const TargetMachine &tm, SmallVector<CCValAssign, 16> &locs,
+ const TargetMachine &tm, SmallVectorImpl<CCValAssign> &locs,
LLVMContext &C)
: CallingConv(CC), IsVarArg(isVarArg), MF(mf), TM(tm),
TRI(*TM.getRegisterInfo()), Locs(locs), Context(C),
diff --git a/lib/CodeGen/MachineCSE.cpp b/lib/CodeGen/MachineCSE.cpp
index 61d8d384cd..d228286d9d 100644
--- a/lib/CodeGen/MachineCSE.cpp
+++ b/lib/CodeGen/MachineCSE.cpp
@@ -84,11 +84,11 @@ namespace {
bool hasLivePhysRegDefUses(const MachineInstr *MI,
const MachineBasicBlock *MBB,
SmallSet<unsigned,8> &PhysRefs,
- SmallVector<unsigned,2> &PhysDefs,
+ SmallVectorImpl<unsigned> &PhysDefs,
bool &PhysUseDef) const;
bool PhysRegDefsReach(MachineInstr *CSMI, MachineInstr *MI,
SmallSet<unsigned,8> &PhysRefs,
- SmallVector<unsigned,2> &PhysDefs,
+ SmallVectorImpl<unsigned> &PhysDefs,
bool &NonLocal) const;
bool isCSECandidate(MachineInstr *MI);
bool isProfitableToCSE(unsigned CSReg, unsigned Reg,
@@ -193,7 +193,7 @@ MachineCSE::isPhysDefTriviallyDead(unsigned Reg,
bool MachineCSE::hasLivePhysRegDefUses(const MachineInstr *MI,
const MachineBasicBlock *MBB,
SmallSet<unsigned,8> &PhysRefs,
- SmallVector<unsigned,2> &PhysDefs,
+ SmallVectorImpl<unsigned> &PhysDefs,
bool &PhysUseDef) const{
// First, add all uses to PhysRefs.
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
@@ -244,7 +244,7 @@ bool MachineCSE::hasLivePhysRegDefUses(const MachineInstr *MI,
bool MachineCSE::PhysRegDefsReach(MachineInstr *CSMI, MachineInstr *MI,
SmallSet<unsigned,8> &PhysRefs,
- SmallVector<unsigned,2> &PhysDefs,
+ SmallVectorImpl<unsigned> &PhysDefs,
bool &NonLocal) const {
// For now conservatively returns false if the common subexpression is
// not in the same basic block as the given instruction. The only exception
diff --git a/lib/CodeGen/MachineSSAUpdater.cpp b/lib/CodeGen/MachineSSAUpdater.cpp
index bb6aad7f94..17f0af84dd 100644
--- a/lib/CodeGen/MachineSSAUpdater.cpp
+++ b/lib/CodeGen/MachineSSAUpdater.cpp
@@ -77,7 +77,7 @@ unsigned MachineSSAUpdater::GetValueAtEndOfBlock(MachineBasicBlock *BB) {
static
unsigned LookForIdenticalPHI(MachineBasicBlock *BB,
- SmallVector<std::pair<MachineBasicBlock*, unsigned>, 8> &PredValues) {
+ SmallVectorImpl<std::pair<MachineBasicBlock*, unsigned> > &PredValues) {
if (BB->empty())
return 0;
diff --git a/lib/CodeGen/MachineSink.cpp b/lib/CodeGen/MachineSink.cpp
index 757f60b19c..dacdbddfa2 100644
--- a/lib/CodeGen/MachineSink.cpp
+++ b/lib/CodeGen/MachineSink.cpp
@@ -394,7 +394,7 @@ static bool AvoidsSinking(MachineInstr *MI, MachineRegisterInfo *MRI) {
/// collectDebgValues - Scan instructions following MI and collect any
/// matching DBG_VALUEs.
static void collectDebugValues(MachineInstr *MI,
- SmallVector<MachineInstr *, 2> & DbgValues) {
+ SmallVectorImpl<MachineInstr *> &DbgValues) {
DbgValues.clear();
if (!MI->getOperand(0).isReg())
return;
diff --git a/lib/CodeGen/PrologEpilogInserter.h b/lib/CodeGen/PrologEpilogInserter.h
index 1497345f31..125b9b4ad7 100644
--- a/lib/CodeGen/PrologEpilogInserter.h
+++ b/lib/CodeGen/PrologEpilogInserter.h
@@ -112,13 +112,13 @@ namespace llvm {
bool calcAvailInOut(MachineBasicBlock* MBB);
void calculateAnticAvail(MachineFunction &Fn);
bool addUsesForMEMERegion(MachineBasicBlock* MBB,
- SmallVector<MachineBasicBlock*, 4>& blks);
- bool addUsesForTopLevelLoops(SmallVector<MachineBasicBlock*, 4>& blks);
+ SmallVectorImpl<MachineBasicBlock *> &blks);
+ bool addUsesForTopLevelLoops(SmallVectorImpl<MachineBasicBlock *> &blks);
bool calcSpillPlacements(MachineBasicBlock* MBB,
- SmallVector<MachineBasicBlock*, 4> &blks,
+ SmallVectorImpl<MachineBasicBlock *> &blks,
CSRegBlockMap &prevSpills);
bool calcRestorePlacements(MachineBasicBlock* MBB,
- SmallVector<MachineBasicBlock*, 4> &blks,
+ SmallVectorImpl<MachineBasicBlock *> &blks,
CSRegBlockMap &prevRestores);
void placeSpillsAndRestores(MachineFunction &Fn);
void placeCSRSpillsAndRestores(MachineFunction &Fn);
diff --git a/lib/CodeGen/RegAllocFast.cpp b/lib/CodeGen/RegAllocFast.cpp
index 58d00da014..6617e50f7f 100644
--- a/lib/CodeGen/RegAllocFast.cpp
+++ b/lib/CodeGen/RegAllocFast.cpp
@@ -293,7 +293,7 @@ void RAFast::spillVirtReg(MachineBasicBlock::iterator MI,
// If this register is used by DBG_VALUE then insert new DBG_VALUE to
// identify spilled location as the place to find corresponding variable's
// value.
- SmallVector<MachineInstr *, 4> &LRIDbgValues =
+ SmallVectorImpl<MachineInstr *> &LRIDbgValues =
LiveDbgValueMap[LRI->VirtReg];
for (unsigned li = 0, le = LRIDbgValues.size(); li != le; ++li) {
MachineInstr *DBG = LRIDbgValues[li];
diff --git a/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index dfb897082a..111803fd5b 100644
--- a/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -279,7 +279,7 @@ namespace {
/// GatherAllAliases - Walk up chain skipping non-aliasing memory nodes,
/// looking for aliasing nodes and adding them to the Aliases vector.
void GatherAllAliases(SDNode *N, SDValue OriginalChain,
- SmallVector<SDValue, 8> &Aliases);
+ SmallVectorImpl<SDValue> &Aliases);
/// isAlias - Return true if there is any possibility that the two addresses
/// overlap.
@@ -2950,7 +2950,7 @@ SDValue DAGCombiner::MatchBSwapHWordLow(SDNode *N, SDValue N0, SDValue N1,
/// isBSwapHWordElement - Return true if the specified node is an element
/// that makes up a 32-bit packed halfword byteswap. i.e.
/// ((x&0xff)<<8)|((x&0xff00)>>8)|((x&0x00ff0000)<<8)|((x&0xff000000)>>8)
-static bool isBSwapHWordElement(SDValue N, SmallVector<SDNode*,4> &Parts) {
+static bool isBSwapHWordElement(SDValue N, SmallVectorImpl<SDNode *> &Parts) {
if (!N.getNode()->hasOneUse())
return false;
@@ -4309,7 +4309,7 @@ SDValue DAGCombiner::visitSETCC(SDNode *N) {
// mentioned transformation is profitable.
static bool ExtendUsesToFormExtLoad(SDNode *N, SDValue N0,
unsigned ExtOpc,
- SmallVector<SDNode*, 4> &ExtendNodes,
+ SmallVectorImpl<SDNode *> &ExtendNodes,
const TargetLowering &TLI) {
bool HasCopyToRegUses = false;
bool isTruncFree = TLI.isTruncateFree(N->getValueType(0), N0.getValueType());
@@ -10240,7 +10240,7 @@ bool DAGCombiner::FindAliasInfo(SDNode *N,
/// GatherAllAliases - Walk up chain skipping non-aliasing memory nodes,
/// looking for aliasing nodes and adding them to the Aliases vector.
void DAGCombiner::GatherAllAliases(SDNode *N, SDValue OriginalChain,
- SmallVector<SDValue, 8> &Aliases) {
+ SmallVectorImpl<SDValue> &Aliases) {
SmallVector<SDValue, 8> Chains; // List of chains to visit.
SmallPtrSet<SDNode *, 16> Visited; // Visited node set.
diff --git a/lib/CodeGen/SelectionDAG/LegalizeTypes.h b/lib/CodeGen/SelectionDAG/LegalizeTypes.h
index e2597d6673..63e9af3f59 100644
--- a/lib/CodeGen/SelectionDAG/LegalizeTypes.h
+++ b/lib/CodeGen/SelectionDAG/LegalizeTypes.h
@@ -653,7 +653,7 @@ private:
/// loads to load a vector with a resulting wider type. It takes
/// LdChain: list of chains for the load to be generated.
/// Ld: load to widen
- SDValue GenWidenVectorLoads(SmallVector<SDValue, 16>& LdChain,
+ SDValue GenWidenVectorLoads(SmallVectorImpl<SDValue> &LdChain,
LoadSDNode *LD);
/// GenWidenVectorExtLoads - Helper function to generate a set of extension
@@ -661,20 +661,20 @@ private:
/// LdChain: list of chains for the load to be generated.
/// Ld: load to widen
/// ExtType: extension element type
- SDValue GenWidenVectorExtLoads(SmallVector<SDValue, 16>& LdChain,
+ SDValue GenWidenVectorExtLoads(SmallVectorImpl<SDValue> &LdChain,
LoadSDNode *LD, ISD::LoadExtType ExtType);
/// Helper genWidenVectorStores - Helper function to generate a set of
/// stores to store a widen vector into non widen memory
/// StChain: list of chains for the stores we have generated
/// ST: store of a widen value
- void GenWidenVectorStores(SmallVector<SDValue, 16>& StChain, StoreSDNode *ST);
+ void GenWidenVectorStores(SmallVectorImpl<SDValue> &StChain, StoreSDNode *ST);
/// Helper genWidenVectorTruncStores - Helper function to generate a set of
/// stores to store a truncate widen vector into non widen memory
/// StChain: list of chains for the stores we have generated
/// ST: store of a widen value
- void GenWidenVectorTruncStores(SmallVector<SDValue, 16>& StChain,
+ void GenWidenVectorTruncStores(SmallVectorImpl<SDValue> &StChain,
StoreSDNode *ST);
/// Modifies a vector input (widen or narrows) to a vector of NVT. The
diff --git a/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp b/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
index 2cd59ac081..75bb6094f5 100644
--- a/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
+++ b/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
@@ -2468,7 +2468,7 @@ static EVT FindMemType(SelectionDAG& DAG, const TargetLowering &TLI,
// LDOps: Load operators to build a vector type
// [Start,End) the list of loads to use.
static SDValue BuildVectorFromScalar(SelectionDAG& DAG, EVT VecTy,
- SmallVector<SDValue, 16>& LdOps,
+ SmallVectorImpl<SDValue> &LdOps,
unsigned Start, unsigned End) {
SDLoc dl(LdOps[Start]);
EVT LdTy = LdOps[Start].getValueType();
@@ -2495,7 +2495,7 @@ static SDValue BuildVectorFromScalar(SelectionDAG& DAG, EVT VecTy,
return DAG.getNode(ISD::BITCAST, dl, VecTy, VecOp);
}
-SDValue DAGTypeLegalizer::GenWidenVectorLoads(SmallVector<SDValue, 16> &LdChain,
+SDValue DAGTypeLegalizer::GenWidenVectorLoads(SmallVectorImpl<SDValue> &LdChain,
LoadSDNode *LD) {
// The strategy assumes that we can efficiently load powers of two widths.
// The routines chops the vector into the largest vector loads with the same
@@ -2649,8 +2649,8 @@ SDValue DAGTypeLegalizer::GenWidenVectorLoads(SmallVector<SDValue, 16> &LdChain,
}
SDValue
-DAGTypeLegalizer::GenWidenVectorExtLoads(SmallVector<SDValue, 16>& LdChain,
- LoadSDNode * LD,
+DAGTypeLegalizer::GenWidenVectorExtLoads(SmallVectorImpl<SDValue> &LdChain,
+ LoadSDNode *LD,
ISD::LoadExtType ExtType) {
// For extension loads, it may not be more efficient to chop up the vector
// and then extended it. Instead, we unroll the load and build a new vector.
@@ -2697,7 +2697,7 @@ DAGTypeLegalizer::GenWidenVectorExtLoads(SmallVector<SDValue, 16>& LdChain,
}
-void DAGTypeLegalizer::GenWidenVectorStores(SmallVector<SDValue, 16>& StChain,
+void DAGTypeLegalizer::GenWidenVectorStores(SmallVectorImpl<SDValue> &StChain,
StoreSDNode *ST) {
// The strategy assumes that we can efficiently store powers of two widths.
// The routines chops the vector into the largest vector stores with the same
@@ -2766,7 +2766,7 @@ void DAGTypeLegalizer::GenWidenVectorStores(SmallVector<SDValue, 16>& StChain,
}
void
-DAGTypeLegalizer::GenWidenVectorTruncStores(SmallVector<SDValue, 16>& StChain,
+DAGTypeLegalizer::GenWidenVectorTruncStores(SmallVectorImpl<SDValue> &StChain,
StoreSDNode *ST) {
// For extension loads, it may not be more efficient to truncate the vector
// and then store it. Instead, we extract each element and then store it.
diff --git a/lib/CodeGen/SelectionDAG/ScheduleDAGFast.cpp b/lib/CodeGen/SelectionDAG/ScheduleDAGFast.cpp
index d1f36cb647..6c5e0ab8b2 100644
--- a/lib/CodeGen/SelectionDAG/ScheduleDAGFast.cpp
+++ b/lib/CodeGen/SelectionDAG/ScheduleDAGFast.cpp
@@ -102,8 +102,8 @@ private:
void InsertCopiesAndMoveSuccs(SUnit*, unsigned,
const TargetRegisterClass*,
const TargetRegisterClass*,
- SmallVector<SUnit*, 2>&);
- bool DelayForLiveRegsBottomUp(SUnit*, SmallVector<unsigned, 4>&);
+ SmallVectorImpl<SUnit*>&);
+ bool DelayForLiveRegsBottomUp(SUnit*, SmallVectorImpl<unsigned>&);
void ListScheduleBottomUp();
/// forceUnitLatencies - The fast scheduler doesn't care about real latencies.
@@ -387,7 +387,7 @@ SUnit *ScheduleDAGFast::CopyAndMoveSuccessors(SUnit *SU) {
void ScheduleDAGFast::InsertCopiesAndMoveSuccs(SUnit *SU, unsigned Reg,
const TargetRegisterClass *DestRC,
const TargetRegisterClass *SrcRC,
- SmallVector<SUnit*, 2> &Copies) {
+ SmallVectorImpl<SUnit*> &Copies) {
SUnit *CopyFromSU = newSUnit(static_cast<SDNode *>(NULL));
CopyFromSU->CopySrcRC = SrcRC;
CopyFromSU->CopyDstRC = DestRC;
@@ -448,7 +448,7 @@ static EVT getPhysicalRegisterVT(SDNode *N, unsigned Reg,
static bool CheckForLiveRegDef(SUnit *SU, unsigned Reg,
std::vector<SUnit*> &LiveRegDefs,
SmallSet<unsigned, 4> &RegAdded,
- SmallVector<unsigned, 4> &LRegs,
+ SmallVectorImpl<unsigned> &LRegs,
const TargetRegisterInfo *TRI) {
bool Added = false;
for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI) {
@@ -467,7 +467,7 @@ static bool CheckForLiveRegDef(SUnit *SU, unsigned Reg,
/// If the specific node is the last one that's available to schedule, do
/// whatever is necessary (i.e. backtracking or cloning) to make it possible.
bool ScheduleDAGFast::DelayForLiveRegsBottomUp(SUnit *SU,
- SmallVector<unsigned, 4> &LRegs){
+ SmallVectorImpl<unsigned> &LRegs){
if (NumLiveRegs == 0)
return false;
@@ -567,7 +567,7 @@ void ScheduleDAGFast::ListScheduleBottomUp() {
// "expensive to copy" values to break the dependency. In case even
// that doesn't work, insert cross class copies.
SUnit *TrySU = NotReady[0];
- SmallVector<unsigned, 4> &LRegs = LRegsMap[TrySU];
+ SmallVectorImpl<unsigned> &LRegs = LRegsMap[TrySU];
assert(LRegs.size() == 1 && "Can't handle this yet!");
unsigned Reg = LRegs[0];
SUnit *LRDef = LiveRegDefs[Reg];
diff --git a/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp b/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp
index a7daf8777c..900f140152 100644
--- a/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp
+++ b/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp
@@ -229,8 +229,8 @@ private:
void InsertCopiesAndMoveSuccs(SUnit*, unsigned,
const TargetRegisterClass*,
const TargetRegisterClass*,
- SmallVector<SUnit*, 2>&);
- bool DelayForLiveRegsBottomUp(SUnit*, SmallVector<unsigned, 4>&);
+ SmallVectorImpl<SUnit*>&);
+ bool DelayForLiveRegsBottomUp(SUnit*, SmallVectorImpl<unsigned>&);
void releaseInterferences(unsigned Reg = 0);
@@ -1133,9 +1133,9 @@ SUnit *ScheduleDAGRRList::CopyAndMoveSuccessors(SUnit *SU) {
/// InsertCopiesAndMoveSuccs - Insert register copies and move all
/// scheduled successors of the given SUnit to the last copy.
void ScheduleDAGRRList::InsertCopiesAndMoveSuccs(SUnit *SU, unsigned Reg,
- const TargetRegisterClass *DestRC,
- const TargetRegisterClass *SrcRC,
- SmallVector<SUnit*, 2> &Copies) {
+ const TargetRegisterClass *DestRC,
+ const TargetRegisterClass *SrcRC,
+ SmallVectorImpl<SUnit*> &Copies) {
SUnit *CopyFromSU = CreateNewSUnit(NULL);
CopyFromSU->CopySrcRC = SrcRC;
CopyFromSU->CopyDstRC = DestRC;
@@ -1205,7 +1205,7 @@ static EVT getPhysicalRegisterVT(SDNode *N, unsigned Reg,
static void CheckForLiveRegDef(SUnit *SU, unsigned Reg,
std::vector<SUnit*> &LiveRegDefs,
SmallSet<unsigned, 4> &RegAdded,
- SmallVector<unsigned, 4> &LRegs,
+ SmallVectorImpl<unsigned> &LRegs,
const TargetRegisterInfo *TRI) {
for (MCRegAliasIterator AliasI(Reg, TRI, true); AliasI.isValid(); ++AliasI) {
@@ -1227,7 +1227,7 @@ static void CheckForLiveRegDef(SUnit *SU, unsigned Reg,
static void CheckForLiveRegDefMasked(SUnit *SU, const uint32_t *RegMask,
std::vector<SUnit*> &LiveRegDefs,
SmallSet<unsigned, 4> &RegAdded,
- SmallVector<unsigned, 4> &LRegs) {
+ SmallVectorImpl<unsigned> &LRegs) {
// Look at all live registers. Skip Reg0 and the special CallResource.
for (unsigned i = 1, e = LiveRegDefs.size()-1; i != e; ++i) {
if (!LiveRegDefs[i]) continue;
@@ -1252,7 +1252,7 @@ static const uint32_t *getNodeRegMask(const SDNode *N) {
/// If the specific node is the last one that's available to schedule, do
/// whatever is necessary (i.e. backtracking or cloning) to make it possible.
bool ScheduleDAGRRList::
-DelayForLiveRegsBottomUp(SUnit *SU, SmallVector<unsigned, 4> &LRegs) {
+DelayForLiveRegsBottomUp(SUnit *SU, SmallVectorImpl<unsigned> &LRegs) {
if (NumLiveRegs == 0)
return false;
@@ -1331,7 +1331,7 @@ void ScheduleDAGRRList::releaseInterferences(unsigned Reg) {
SUnit *SU = Interferences[i-1];
LRegsMapT::iterator LRegsPos = LRegsMap.find(SU);
if (Reg) {
- SmallVector<unsigned, 4> &LRegs = LRegsPos->second;
+ SmallVectorImpl<unsigned> &LRegs = LRegsPos->second;
if (std::find(LRegs.begin(), LRegs.end(), Reg) == LRegs.end())
continue;
}
@@ -1385,7 +1385,7 @@ SUnit *ScheduleDAGRRList::PickNodeToScheduleBottomUp() {
// to resolve it.
for (unsigned i = 0, e = Interferences.size(); i != e; ++i) {
SUnit *TrySU = Interferences[i];
- SmallVector<unsigned, 4> &LRegs = LRegsMap[TrySU];
+ SmallVectorImpl<unsigned> &LRegs = LRegsMap[TrySU];
// Try unscheduling up to the point where it's safe to schedule
// this node.
@@ -1433,7 +1433,7 @@ SUnit *ScheduleDAGRRList::PickNodeToScheduleBottomUp() {
// insert cross class copies.
// If it's not too expensive, i.e. cost != -1, issue copies.
SUnit *TrySU = Interferences[0];
- SmallVector<unsigned, 4> &LRegs = LRegsMap[TrySU];
+ SmallVectorImpl<unsigned> &LRegs = LRegsMap[TrySU];
assert(LRegs.size() == 1 && "Can't handle this yet!");
unsigned Reg = LRegs[0];
SUnit *LRDef = LiveRegDefs[Reg];
diff --git a/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp b/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp
index ad0647367f..982dcc92b2 100644
--- a/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp
+++ b/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp
@@ -700,11 +700,10 @@ namespace {
}
/// ProcessSDDbgValues - Process SDDbgValues associated with this node.
-static void ProcessSDDbgValues(SDNode *N, SelectionDAG *DAG,
- InstrEmitter &Emitter,
- SmallVector<std::pair<unsigned, MachineInstr*>, 32> &Orders,
- DenseMap<SDValue, unsigned> &VRBaseMap,
- unsigned Order) {
+static void
+ProcessSDDbgValues(SDNode *N, SelectionDAG *DAG, InstrEmitter &Emitter,
+ SmallVectorImpl<std::pair<unsigned, MachineInstr*> > &Orders,
+ DenseMap<SDValue, unsigned> &VRBaseMap, unsigned Order) {
if (!N->getHasDebugValue())
return;
@@ -731,11 +730,11 @@ static void ProcessSDDbgValues(SDNode *N, SelectionDAG *DAG,
// ProcessSourceNode - Process nodes with source order numbers. These are added
// to a vector which EmitSchedule uses to determine how to insert dbg_value
// instructions in the right order.
-static void ProcessSourceNode(SDNode *N, SelectionDAG *DAG,
- InstrEmitter &Emitter,
- DenseMap<SDValue, unsigned> &VRBaseMap,
- SmallVector<std::pair<unsigned, MachineInstr*>, 32> &Orders,
- SmallSet<unsigned, 8> &Seen) {
+static void
+ProcessSourceNode(SDNode *N, SelectionDAG *DAG, InstrEmitter &Emitter,
+ DenseMap<SDValue, unsigned> &VRBaseMap,
+ SmallVectorImpl<std::pair<unsigned, MachineInstr*> > &Orders,
+ SmallSet<unsigned, 8> &Seen) {
unsigned Order = N->getIROrder();
if (!Order || !Seen.insert(Order)) {
// Process any valid SDDbgValues even if node does not have any order
diff --git a/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
index 20b9892708..81d133a9eb 100644
--- a/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
+++ b/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
@@ -6077,9 +6077,10 @@ bool SDNode::hasPredecessor(const SDNode *N) const {
return hasPredecessorHelper(N, Visited, Worklist);
}
-bool SDNode::hasPredecessorHelper(const SDNode *N,
- SmallPtrSet<const SDNode *, 32> &Visited,
- SmallVector<const SDNode *, 16> &Worklist) const {
+bool
+SDNode::hasPredecessorHelper(const SDNode *N,
+ SmallPtrSet<const SDNode *, 32> &Visited,
+ SmallVectorImpl<const SDNode *> &Worklist) const {
if (Visited.empty()) {
Worklist.push_back(this);
} else {
diff --git a/lib/CodeGen/ShrinkWrapping.cpp b/lib/CodeGen/ShrinkWrapping.cpp
index 2feea59c03..6c826deaf0 100644
--- a/lib/CodeGen/ShrinkWrapping.cpp
+++ b/lib/CodeGen/ShrinkWrapping.cpp
@@ -554,7 +554,7 @@ bool PEI::calculateSets(MachineFunction &Fn) {
/// _outside_ the computed minimal placement regions have been covered.
///
bool PEI::addUsesForMEMERegion(MachineBasicBlock* MBB,
- SmallVector<MachineBasicBlock*, 4>& blks) {
+ SmallVectorImpl<MachineBasicBlock *> &blks) {
if (MBB->succ_size() < 2 && MBB->pred_size() < 2) {
bool processThisBlock = false;
for (MachineBasicBlock::succ_iterator SI = MBB->succ_begin(),
@@ -629,7 +629,7 @@ bool PEI::addUsesForMEMERegion(MachineBasicBlock* MBB,
/// addUsesForTopLevelLoops - add uses for CSRs used inside top
/// level loops to the exit blocks of those loops.
///
-bool PEI::addUsesForTopLevelLoops(SmallVector<MachineBasicBlock*, 4>& blks) {
+bool PEI::addUsesForTopLevelLoops(SmallVectorImpl<MachineBasicBlock *> &blks) {
bool addedUses = false;
// Place restores for top level loops where needed.
@@ -674,7 +674,7 @@ bool PEI::addUsesForTopLevelLoops(SmallVector<MachineBasicBlock*, 4>& blks) {
/// multi-entry/exit regions.
///
bool PEI::calcSpillPlacements(MachineBasicBlock* MBB,
- SmallVector<MachineBasicBlock*, 4> &blks,
+ SmallVectorImpl<MachineBasicBlock *> &blks,
CSRegBlockMap &prevSpills) {
bool placedSpills = false;
// Intersect (CSRegs - AnticIn[P]) for P in Predecessors(MBB)
@@ -736,7 +736,7 @@ bool PEI::calcSpillPlacements(MachineBasicBlock* MBB,
/// multi-entry/exit regions.
///
bool PEI::calcRestorePlacements(MachineBasicBlock* MBB,
- SmallVector<MachineBasicBlock*, 4> &blks,
+ SmallVectorImpl<MachineBasicBlock *> &blks,
CSRegBlockMap &prevRestores) {
bool placedRestores = false;
// Intersect (CSRegs - AvailOut[S]) for S in Successors(MBB)
diff --git a/lib/CodeGen/StackSlotColoring.cpp b/lib/CodeGen/StackSlotColoring.cpp
index d0c9e1d677..9f44df8f43 100644
--- a/lib/CodeGen/StackSlotColoring.cpp
+++ b/lib/CodeGen/StackSlotColoring.cpp
@@ -106,7 +106,7 @@ namespace {
bool OverlapWithAssignments(LiveInterval *li, int Color) const;
int ColorSlot(LiveInterval *li);
bool ColorSlots(MachineFunction &MF);
- void RewriteInstruction(MachineInstr *MI, SmallVector<int, 16> &SlotMapping,
+ void RewriteInstruction(MachineInstr *MI, SmallVectorImpl<int> &SlotMapping,
MachineFunction &MF);
bool RemoveDeadStores(MachineBasicBlock* MBB);
};
@@ -340,7 +340,7 @@ bool StackSlotColoring::ColorSlots(MachineFunction &MF) {
/// RewriteInstruction - Rewrite specified instruction by replacing references
/// to old frame index with new one.
void StackSlotColoring::RewriteInstruction(MachineInstr *MI,
- SmallVector<int, 16> &SlotMapping,
+ SmallVectorImpl<int> &SlotMapping,
MachineFunction &MF) {
// Update the operands.
for (unsigned i = 0, ee = MI->getNumOperands(); i != ee; ++i) {
diff --git a/lib/CodeGen/TailDuplication.cpp b/lib/CodeGen/TailDuplication.cpp
index 1ec88172a0..82b78e2f56 100644
--- a/lib/CodeGen/TailDuplication.cpp
+++ b/lib/CodeGen/TailDuplication.cpp
@@ -86,7 +86,7 @@ namespace {
void ProcessPHI(MachineInstr *MI, MachineBasicBlock *TailBB,
MachineBasicBlock *PredBB,
DenseMap<unsigned, unsigned> &LocalVRMap,
- SmallVector<std::pair<unsigned,unsigned>, 4> &Copies,
+ SmallVectorImpl<std::pair<unsigned,unsigned> > &Copies,
const DenseSet<unsigned> &UsedByPhi,
bool Remove);
void DuplicateInstruction(MachineInstr *MI,
@@ -96,7 +96,7 @@ namespace {
DenseMap<unsigned, unsigned> &LocalVRMap,
const DenseSet<unsigned> &UsedByPhi);
void UpdateSuccessorsPHIs(MachineBasicBlock *FromBB, bool isDead,
- SmallVector<MachineBasicBlock*, 8> &TDBBs,
+ SmallVectorImpl<MachineBasicBlock *> &TDBBs,
SmallSetVector<MachineBasicBlock*, 8> &Succs);
bool TailDuplicateBlocks(MachineFunction &MF);
bool shouldTailDuplicate(const MachineFunction &MF,
@@ -104,14 +104,14 @@ namespace {
bool isSimpleBB(MachineBasicBlock *TailBB);
bool canCompletelyDuplicateBB(MachineBasicBlock &BB);
bool duplicateSimpleBB(MachineBasicBlock *TailBB,
- SmallVector<MachineBasicBlock*, 8> &TDBBs,
+ SmallVectorImpl<MachineBasicBlock *> &TDBBs,
const DenseSet<unsigned> &RegsUsedByPhi,
- SmallVector<MachineInstr*, 16> &Copies);
+ SmallVectorImpl<MachineInstr *> &Copies);
bool TailDuplicate(MachineBasicBlock *TailBB,
bool IsSimple,
MachineFunction &MF,
- SmallVector<MachineBasicBlock*, 8> &TDBBs,
- SmallVector<MachineInstr*, 16> &Copies);
+ SmallVectorImpl<MachineBasicBlock *> &TDBBs,
+ SmallVectorImpl<MachineInstr *> &Copies);
bool TailDuplicateAndUpdate(MachineBasicBlock *MBB,
bool IsSimple,
MachineFunction &MF);
@@ -386,7 +386,7 @@ void TailDuplicatePass::ProcessPHI(MachineInstr *MI,
MachineBasicBlock *TailBB,
MachineBasicBlock *PredBB,
DenseMap<unsigned, unsigned> &LocalVRMap,
- SmallVector<std::pair<unsigned,unsigned>, 4> &Copies,
+ SmallVectorImpl<std::pair<unsigned,unsigned>> &Copies,
const DenseSet<unsigned> &RegsUsedByPhi,
bool Remove) {
unsigned DefReg = MI->getOperand(0).getReg();
@@ -452,7 +452,7 @@ void TailDuplicatePass::DuplicateInstruction(MachineInstr *MI,
/// instructions in them accordingly.
void
TailDuplicatePass::UpdateSuccessorsPHIs(MachineBasicBlock *FromBB, bool isDead,
- SmallVector<MachineBasicBlock*, 8> &TDBBs,
+ SmallVectorImpl<MachineBasicBlock *> &TDBBs,
SmallSetVector<MachineBasicBlock*,8> &Succs) {
for (SmallSetVector<MachineBasicBlock*, 8>::iterator SI = Succs.begin(),
SE = Succs.end(); SI != SE; ++SI) {
@@ -662,9 +662,9 @@ TailDuplicatePass::canCompletelyDuplicateBB(MachineBasicBlock &BB) {
bool
TailDuplicatePass::duplicateSimpleBB(MachineBasicBlock *TailBB,
- SmallVector<MachineBasicBlock*, 8> &TDBBs,
- const DenseSet<unsigned> &UsedByPhi,
- SmallVector<MachineInstr*, 16> &Copies) {
+ SmallVectorImpl<MachineBasicBlock *> &TDBBs,
+ const DenseSet<unsigned> &UsedByPhi,
+ SmallVectorImpl<MachineInstr *> &Copies) {
SmallPtrSet<MachineBasicBlock*, 8> Succs(TailBB->succ_begin(),
TailBB->succ_end());
SmallVector<MachineBasicBlock*, 8> Preds(TailBB->pred_begin(),
@@ -742,8 +742,8 @@ bool
TailDuplicatePass::TailDuplicate(MachineBasicBlock *TailBB,
bool IsSimple,
MachineFunction &MF,
- SmallVector<MachineBasicBlock*, 8> &TDBBs,
- SmallVector<MachineInstr*, 16> &Copies) {
+ SmallVectorImpl<MachineBasicBlock *> &TDBBs,
+ SmallVectorImpl<MachineInstr *> &Copies) {
DEBUG(dbgs() << "\n*** Tail-duplicating BB#" << TailBB->getNumber() << '\n');
DenseSet<unsigned> UsedByPhi;
diff --git a/lib/CodeGen/TwoAddressInstructionPass.cpp b/lib/CodeGen/TwoAddressInstructionPass.cpp
index 7ca2beef65..c52e6756c7 100644
--- a/lib/CodeGen/TwoAddressInstructionPass.cpp
+++ b/lib/CodeGen/TwoAddressInstructionPass.cpp
@@ -1539,7 +1539,7 @@ bool TwoAddressInstructionPass::runOnMachineFunction(MachineFunction &Func) {
// transformations that may either eliminate the tied operands or
// improve the opportunities for coalescing away the register copy.
if (TiedOperands.size() == 1) {
- SmallVector<std::pair<unsigned, unsigned>, 4> &TiedPairs
+ SmallVectorImpl<std::pair<unsigned, unsigned> > &TiedPairs
= TiedOperands.begin()->second;
if (TiedPairs.size() == 1) {
unsigned SrcIdx = TiedPairs[0].first;
diff --git a/lib/IR/Metadata.cpp b/lib/IR/Metadata.cpp
index 6a6b7af5cb..bd4d9c0a07 100644
--- a/lib/IR/Metadata.cpp
+++ b/lib/IR/Metadata.cpp
@@ -422,7 +422,7 @@ static bool canBeMerged(const ConstantRange &A, const ConstantRange &B) {
return !A.intersectWith(B).isEmptySet() || isContiguous(A, B);
}
-static bool tryMergeRange(SmallVector<Value*, 4> &EndPoints, ConstantInt *Low,
+static bool tryMergeRange(SmallVectorImpl<Value *> &EndPoints, ConstantInt *Low,
ConstantInt *High) {
ConstantRange NewRange(Low->getValue(), High->getValue());
unsigned Size = EndPoints.size();
@@ -439,7 +439,7 @@ static bool tryMergeRange(SmallVector<Value*, 4> &EndPoints, ConstantInt *Low,
return false;
}
-static void addRange(SmallVector<Value*, 4> &EndPoints, ConstantInt *Low,
+static void addRange(SmallVectorImpl<Value *> &EndPoints, ConstantInt *Low,
ConstantInt *High) {
if (!EndPoints.empty())
if (tryMergeRange(EndPoints, Low, High))
diff --git a/lib/Target/AArch64/AArch64ISelLowering.cpp b/lib/Target/AArch64/AArch64ISelLowering.cpp
index 1fa1edba19..777f4dd984 100644
--- a/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -1079,9 +1079,9 @@ AArch64TargetLowering::LowerCall(CallLoweringInfo &CLI,
SmallVectorImpl<SDValue> &InVals) const {
SelectionDAG &DAG = CLI.DAG;
SDLoc &dl = CLI.DL;
- SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs;
- SmallVector<SDValue, 32> &OutVals = CLI.OutVals;
- SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins;
+ SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
+ SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
+ SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
SDValue Chain = CLI.Chain;
SDValue Callee = CLI.Callee;
bool &IsTailCall = CLI.IsTailCall;
diff --git a/lib/Target/ARM/ARMISelLowering.cpp b/lib/Target/ARM/ARMISelLowering.cpp
index 8c4a3f13d1..cc323f684f 100644
--- a/lib/Target/ARM/ARMISelLowering.cpp
+++ b/lib/Target/ARM/ARMISelLowering.cpp
@@ -74,7 +74,7 @@ namespace {
class ARMCCState : public CCState {
public:
ARMCCState(CallingConv::ID CC, bool isVarArg, MachineFunction &MF,
- const TargetMachine &TM, SmallVector<CCValAssign, 16> &locs,
+ const TargetMachine &TM, SmallVectorImpl<CCValAssign> &locs,
LLVMContext &C, ParmContext PC)
: CCState(CC, isVarArg, MF, TM, locs, C) {
assert(((PC == Call) || (PC == Prologue)) &&
@@ -1330,7 +1330,7 @@ void ARMTargetLowering::PassF64ArgInRegs(SDLoc dl, SelectionDAG &DAG,
RegsToPassVector &RegsToPass,
CCValAssign &VA, CCValAssign &NextVA,
SDValue &StackPtr,
- SmallVector<SDValue, 8> &MemOpChains,
+ SmallVectorImpl<SDValue> &MemOpChains,
ISD::ArgFlagsTy Flags) const {
SDValue fmrrd = DAG.getNode(ARMISD::VMOVRRD, dl,
@@ -1358,9 +1358,9 @@ ARMTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
SmallVectorImpl<SDValue> &InVals) const {
SelectionDAG &DAG = CLI.DAG;
SDLoc &dl = CLI.DL;
- SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs;
- SmallVector<SDValue, 32> &OutVals = CLI.OutVals;
- SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins;
+ SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
+ SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
+ SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
SDValue Chain = CLI.Chain;
SDValue Callee = CLI.Callee;
bool &isTailCall = CLI.IsTailCall;
diff --git a/lib/Target/ARM/ARMISelLowering.h b/lib/Target/ARM/ARMISelLowering.h
index 659377772d..ed6c4057b6 100644
--- a/lib/Target/ARM/ARMISelLowering.h
+++ b/lib/Target/ARM/ARMISelLowering.h
@@ -417,7 +417,7 @@ namespace llvm {
RegsToPassVector &RegsToPass,
CCValAssign &VA, CCValAssign &NextVA,
SDValue &StackPtr,
- SmallVector<SDValue, 8> &MemOpChains,
+ SmallVectorImpl<SDValue> &MemOpChains,
ISD::ArgFlagsTy Flags) const;
SDValue GetF64FormalArgument(CCValAssign &VA, CCValAssign &NextVA,
SDValue &Root, SelectionDAG &DAG,
diff --git a/lib/Target/ARM/ARMLoadStoreOptimizer.cpp b/lib/Target/ARM/ARMLoadStoreOptimizer.cpp
index 0f83cdc4ec..1803a8a588 100644
--- a/lib/Target/ARM/ARMLoadStoreOptimizer.cpp
+++ b/lib/Target/ARM/ARMLoadStoreOptimizer.cpp
@@ -109,12 +109,12 @@ namespace {
unsigned PredReg,
unsigned Scratch,
DebugLoc dl,
- SmallVector<MachineBasicBlock::iterator, 4> &Merges);
+ SmallVectorImpl<MachineBasicBlock::iterator> &Merges);
void MergeLDR_STR(MachineBasicBlock &MBB, unsigned SIndex, unsigned Base,
int Opcode, unsigned Size,
ARMCC::CondCodes Pred, unsigned PredReg,
unsigned Scratch, MemOpQueue &MemOps,
- SmallVector<MachineBasicBlock::iterator, 4> &Merges);
+ SmallVectorImpl<MachineBasicBlock::iterator> &Merges);
void AdvanceRS(MachineBasicBlock &MBB, MemOpQueue &MemOps);
bool FixInvalidRegPairOp(MachineBasicBlock &MBB,
@@ -371,7 +371,7 @@ void ARMLoadStoreOpt::MergeOpsUpdate(MachineBasicBlock &MBB,
ARMCC::CondCodes Pred, unsigned PredReg,
unsigned Scratch,
DebugLoc dl,
- SmallVector<MachineBasicBlock::iterator, 4> &Merges) {
+ SmallVectorImpl<MachineBasicBlock::iterator> &Merges) {
// First calculate which of the registers should be killed by the merged
// instruction.
const unsigned insertPos = memOps[insertAfter].Position;
@@ -444,10 +444,10 @@ void ARMLoadStoreOpt::MergeOpsUpdate(MachineBasicBlock &MBB,
/// load / store multiple instructions.
void
ARMLoadStoreOpt::MergeLDR_STR(MachineBasicBlock &MBB, unsigned SIndex,
- unsigned Base, int Opcode, unsigned Size,
- ARMCC::CondCodes Pred, unsigned PredReg,
- unsigned Scratch, MemOpQueue &MemOps,
- SmallVector<MachineBasicBlock::iterator, 4> &Merges) {
+ unsigned Base, int Opcode, unsigned Size,
+ ARMCC::CondCodes Pred, unsigned PredReg,
+ unsigned Scratch, MemOpQueue &MemOps,
+ SmallVectorImpl<MachineBasicBlock::iterator> &Merges) {
bool isNotVFP = isi32Load(Opcode) || isi32Store(Opcode);
int Offset = MemOps[SIndex].Offset;
int SOffset = Offset;
diff --git a/lib/Target/Hexagon/HexagonCallingConvLower.cpp b/lib/Target/Hexagon/HexagonCallingConvLower.cpp
index fc5503aae2..f5f958c101 100644
--- a/lib/Target/Hexagon/HexagonCallingConvLower.cpp
+++ b/lib/Target/Hexagon/HexagonCallingConvLower.cpp
@@ -25,7 +25,7 @@ using namespace llvm;
Hexagon_CCState::Hexagon_CCState(CallingConv::ID CC, bool isVarArg,
const TargetMachine &tm,
- SmallVector<CCValAssign, 16> &locs,
+ SmallVectorImpl<CCValAssign> &locs,
LLVMContext &c)
: CallingConv(CC), IsVarArg(isVarArg), TM(tm), Locs(locs), Context(c) {
// No stack is used.
diff --git a/lib/Target/Hexagon/HexagonCallingConvLower.h b/lib/Target/Hexagon/HexagonCallingConvLower.h
index eed99f42c2..33c83064f9 100644
--- a/lib/Target/Hexagon/HexagonCallingConvLower.h
+++ b/lib/Target/Hexagon/HexagonCallingConvLower.h
@@ -48,14 +48,14 @@ class Hexagon_CCState {
CallingConv::ID CallingConv;
bool IsVarArg;
const TargetMachine &TM;
- SmallVector<CCValAssign, 16> &Locs;
+ SmallVectorImpl<CCValAssign> &Locs;
LLVMContext &Context;
unsigned StackOffset;
SmallVector<uint32_t, 16> UsedRegs;
public:
Hexagon_CCState(CallingConv::ID CC, bool isVarArg, const TargetMachine &TM,
- SmallVector<CCValAssign, 16> &locs, LLVMContext &c);
+ SmallVectorImpl<CCValAssign> &locs, LLVMContext &c);
void addLoc(const CCValAssign &V) {
Locs.push_back(V);
diff --git a/lib/Target/Hexagon/HexagonHardwareLoops.cpp b/lib/Target/Hexagon/HexagonHardwareLoops.cpp
index d00278811a..3c4ca0fc81 100644
--- a/lib/Target/Hexagon/HexagonHardwareLoops.cpp
+++ b/lib/Target/Hexagon/HexagonHardwareLoops.cpp
@@ -134,7 +134,7 @@ namespace {
/// has a computable trip count and, if so, return a value that represents
/// the trip count expression.
CountValue *getLoopTripCount(MachineLoop *L,
- SmallVector<MachineInstr*, 2> &OldInsts);
+ SmallVectorImpl<MachineInstr *> &OldInsts);
/// \brief Return the expression that represents the number of times
/// a loop iterates. The function takes the operands that represent the
@@ -164,7 +164,7 @@ namespace {
/// \brief Return true if the instruction is now dead.
bool isDead(const MachineInstr *MI,
- SmallVector<MachineInstr*, 1> &DeadPhis) const;
+ SmallVectorImpl<MachineInstr *> &DeadPhis) const;
/// \brief Remove the instruction if it is now dead.
void removeIfDead(MachineInstr *MI);
@@ -428,7 +428,7 @@ bool HexagonHardwareLoops::findInductionRegister(MachineLoop *L,
/// induction variable patterns that are used in the calculation for
/// the number of time the loop is executed.
CountValue *HexagonHardwareLoops::getLoopTripCount(MachineLoop *L,
- SmallVector<MachineInstr*, 2> &OldInsts) {
+ SmallVectorImpl<MachineInstr *> &OldInsts) {
MachineBasicBlock *TopMBB = L->getTopBlock();
MachineBasicBlock::pred_iterator PI = TopMBB->pred_begin();
assert(PI != TopMBB->pred_end() &&
@@ -890,7 +890,7 @@ bool HexagonHardwareLoops::containsInvalidInstruction(MachineLoop *L) const {
/// for inline asm, physical registers and instructions with side effects
/// removed.
bool HexagonHardwareLoops::isDead(const MachineInstr *MI,
- SmallVector<MachineInstr*, 1> &DeadPhis) const {
+ SmallVectorImpl<MachineInstr *> &DeadPhis) const {
// Examine each operand.
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
const MachineOperand &MO = MI->getOperand(i);
diff --git a/lib/Target/Hexagon/HexagonISelLowering.cpp b/lib/Target/Hexagon/HexagonISelLowering.cpp
index 85e1045fdf..6cb126f68b 100644
--- a/lib/Target/Hexagon/HexagonISelLowering.cpp
+++ b/lib/Target/Hexagon/HexagonISelLowering.cpp
@@ -382,10 +382,10 @@ SDValue
HexagonTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
SmallVectorImpl<SDValue> &InVals) const {
SelectionDAG &DAG = CLI.DAG;
- SDLoc &dl = CLI.DL;
- SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs;
- SmallVector<SDValue, 32> &OutVals = CLI.OutVals;
- SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins;
+ SDLoc &dl = CLI.DL;
+ SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
+ SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
+ SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
SDValue Chain = CLI.Chain;
SDValue Callee = CLI.Callee;
bool &isTailCall = CLI.IsTailCall;
diff --git a/lib/Target/MBlaze/MBlazeFrameLowering.cpp b/lib/Target/MBlaze/MBlazeFrameLowering.cpp
index 786eeeee8c..f7a88316bc 100644
--- a/lib/Target/MBlaze/MBlazeFrameLowering.cpp
+++ b/lib/Target/MBlaze/MBlazeFrameLowering.cpp
@@ -38,8 +38,8 @@ static cl::opt<bool> MBDisableStackAdjust(
cl::desc("Disable MBlaze stack layout adjustment."),
cl::Hidden);
-static void replaceFrameIndexes(MachineFunction &MF,
- SmallVector<std::pair<int,int64_t>, 16> &FR) {
+static void replaceFrameIndexes(MachineFunction &MF,
+ SmallVectorImpl<std::pair<int,int64_t> > &FR) {
MachineFrameInfo *MFI = MF.getFrameInfo();
MBlazeFunctionInfo *MBlazeFI = MF.getInfo<MBlazeFunctionInfo>();
const SmallVectorImpl<std::pair<int,int64_t> >::iterator FRB = FR.begin();
diff --git a/lib/Target/MBlaze/MBlazeISelLowering.cpp b/lib/Target/MBlaze/MBlazeISelLowering.cpp
index 8046da2728..54df96565a 100644
--- a/lib/Target/MBlaze/MBlazeISelLowering.cpp
+++ b/lib/Target/MBlaze/MBlazeISelLowering.cpp
@@ -687,9 +687,9 @@ LowerCall(TargetLowering::CallLoweringInfo &CLI,
SmallVectorImpl<SDValue> &InVals) const {
SelectionDAG &DAG = CLI.DAG;
SDLoc dl = CLI.DL;
- SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs;
- SmallVector<SDValue, 32> &OutVals = CLI.OutVals;
- SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins;
+ SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
+ SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
+ SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
SDValue Chain = CLI.Chain;
SDValue Callee = CLI.Callee;
bool &isTailCall = CLI.IsTailCall;
diff --git a/lib/Target/MBlaze/MBlazeMachineFunction.h b/lib/Target/MBlaze/MBlazeMachineFunction.h
index 10d507f37b..43240eb507 100644
--- a/lib/Target/MBlaze/MBlazeMachineFunction.h
+++ b/lib/Target/MBlaze/MBlazeMachineFunction.h
@@ -118,7 +118,7 @@ public:
return false;
}
- const SmallVector<int, 16>& getLiveIn() const { return LiveInFI; }
+ const SmallVectorImpl<int> &getLiveIn() const { return LiveInFI; }
void recordReplacement(int OFI, int NFI) {
FIReplacements.insert(std::make_pair(OFI,NFI));
diff --git a/lib/Target/MSP430/MSP430ISelLowering.cpp b/lib/Target/MSP430/MSP430ISelLowering.cpp
index 168e3f1af5..b144164ad6 100644
--- a/lib/Target/MSP430/MSP430ISelLowering.cpp
+++ b/lib/Target/MSP430/MSP430ISelLowering.cpp
@@ -279,9 +279,9 @@ MSP430TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
SmallVectorImpl<SDValue> &InVals) const {
SelectionDAG &DAG = CLI.DAG;
SDLoc &dl = CLI.DL;
- SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs;
- SmallVector<SDValue, 32> &OutVals = CLI.OutVals;
- SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins;
+ SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
+ SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
+ SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
SDValue Chain = CLI.Chain;
SDValue Callee = CLI.Callee;
bool &isTailCall = CLI.IsTailCall;
diff --git a/lib/Target/Mips/MipsISelLowering.cpp b/lib/Target/Mips/MipsISelLowering.cpp
index 5fd50fda31..ffa077fe5d 100644
--- a/lib/Target/Mips/MipsISelLowering.cpp
+++ b/lib/Target/Mips/MipsISelLowering.cpp
@@ -2323,9 +2323,9 @@ MipsTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
SmallVectorImpl<SDValue> &InVals) const {
SelectionDAG &DAG = CLI.DAG;
SDLoc DL = CLI.DL;
- SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs;
- SmallVector<SDValue, 32> &OutVals = CLI.OutVals;
- SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins;
+ SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
+ SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
+ SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
SDValue Chain = CLI.Chain;
SDValue Callee = CLI.Callee;
bool &IsTailCall = CLI.IsTailCall;
@@ -3383,7 +3383,7 @@ copyByValRegs(SDValue Chain, SDLoc DL, std::vector<SDValue> &OutChains,
void MipsTargetLowering::
passByValArg(SDValue Chain, SDLoc DL,
std::deque< std::pair<unsigned, SDValue> > &RegsToPass,
- SmallVector<SDValue, 8> &MemOpChains, SDValue StackPtr,
+ SmallVectorImpl<SDValue> &MemOpChains, SDValue StackPtr,
MachineFrameInfo *MFI, SelectionDAG &DAG, SDValue Arg,
const MipsCC &CC, const ByValArgInfo &ByVal,
const ISD::ArgFlagsTy &Flags, bool isLittle) const {
diff --git a/lib/Target/Mips/MipsISelLowering.h b/lib/Target/Mips/MipsISelLowering.h
index 6103db596a..123a2a69e5 100644
--- a/lib/Target/Mips/MipsISelLowering.h
+++ b/lib/Target/Mips/MipsISelLowering.h
@@ -386,7 +386,7 @@ namespace llvm {
/// passByValArg - Pass a byval argument in registers or on stack.
void passByValArg(SDValue Chain, SDLoc DL,
std::deque< std::pair<unsigned, SDValue> > &RegsToPass,
- SmallVector<SDValue, 8> &MemOpChains, SDValue StackPtr,
+ SmallVectorImpl<SDValue> &MemOpChains, SDValue StackPtr,
MachineFrameInfo *MFI, SelectionDAG &DAG, SDValue Arg,
const MipsCC &CC, const ByValArgInfo &ByVal,
const ISD::ArgFlagsTy &Flags, bool isLittle) const;
diff --git a/lib/Target/NVPTX/NVPTXISelLowering.cpp b/lib/Target/NVPTX/NVPTXISelLowering.cpp
index d4cc31b1a2..c89c2fc9e6 100644
--- a/lib/Target/NVPTX/NVPTXISelLowering.cpp
+++ b/lib/Target/NVPTX/NVPTXISelLowering.cpp
@@ -493,9 +493,9 @@ SDValue NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
SmallVectorImpl<SDValue> &InVals) const {
SelectionDAG &DAG = CLI.DAG;
SDLoc dl = CLI.DL;
- SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs;
- SmallVector<SDValue, 32> &OutVals = CLI.OutVals;
- SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins;
+ SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
+ SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
+ SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
SDValue Chain = CLI.Chain;
SDValue Callee = CLI.Callee;
bool &isTailCall = CLI.IsTailCall;
diff --git a/lib/Target/PowerPC/PPCFrameLowering.cpp b/lib/Target/PowerPC/PPCFrameLowering.cpp
index a19ce23937..8e33830da1 100644
--- a/lib/Target/PowerPC/PPCFrameLowering.cpp
+++ b/lib/Target/PowerPC/PPCFrameLowering.cpp
@@ -369,7 +369,7 @@ void PPCFrameLowering::emitPrologue(MachineFunction &MF) const {
// Check if the link register (LR) must be saved.
PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>();
bool MustSaveLR = FI->mustSaveLR();
- const SmallVector<unsigned, 3> &MustSaveCRs = FI->getMustSaveCRs();
+ const SmallVectorImpl<unsigned> &MustSaveCRs = FI->getMustSaveCRs();
// Do we have a frame pointer for this function?
bool HasFP = hasFP(MF);
@@ -642,7 +642,7 @@ void PPCFrameLowering::emitEpilogue(MachineFunction &MF,
// Check if the link register (LR) has been saved.
PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>();
bool MustSaveLR = FI->mustSaveLR();
- const SmallVector<unsigned, 3> &MustSaveCRs = FI->getMustSaveCRs();
+ const SmallVectorImpl<unsigned> &MustSaveCRs = FI->getMustSaveCRs();
// Do we have a frame pointer for this function?
bool HasFP = hasFP(MF);
diff --git a/lib/Target/PowerPC/PPCISelLowering.cpp b/lib/Target/PowerPC/PPCISelLowering.cpp
index b39f0d5367..a38201abd5 100644
--- a/lib/Target/PowerPC/PPCISelLowering.cpp
+++ b/lib/Target/PowerPC/PPCISelLowering.cpp
@@ -2957,8 +2957,8 @@ struct TailCallArgumentInfo {
static void
StoreTailCallArgumentsToStackSlot(SelectionDAG &DAG,
SDValue Chain,
- const SmallVector<TailCallArgumentInfo, 8> &TailCallArgs,
- SmallVector<SDValue, 8> &MemOpChains,
+ const SmallVectorImpl<TailCallArgumentInfo> &TailCallArgs,
+ SmallVectorImpl<SDValue> &MemOpChains,
SDLoc dl) {
for (unsigned i = 0, e = TailCallArgs.size(); i != e; ++i) {
SDValue Arg = TailCallArgs[i].Arg;
@@ -3016,7 +3016,7 @@ static SDValue EmitTailCallStoreFPAndRetAddr(SelectionDAG &DAG,
static void
CalculateTailCallArgDest(SelectionDAG &DAG, MachineFunction &MF, bool isPPC64,
SDValue Arg, int SPDiff, unsigned ArgOffset,
- SmallVector<TailCallArgumentInfo, 8>& TailCallArguments) {
+ SmallVectorImpl<TailCallArgumentInfo>& TailCallArguments) {
int Offset = ArgOffset + SPDiff;
uint32_t OpSize = (Arg.getValueType().getSizeInBits()+7)/8;
int FI = MF.getFrameInfo()->CreateFixedObject(OpSize, Offset, true);
@@ -3081,8 +3081,8 @@ static void
LowerMemOpCallTo(SelectionDAG &DAG, MachineFunction &MF, SDValue Chain,
SDValue Arg, SDValue PtrOff, int SPDiff,
unsigned ArgOffset, bool isPPC64, bool isTailCall,
- bool isVector, SmallVector<SDValue, 8> &MemOpChains,
- SmallVector<TailCallArgumentInfo, 8> &TailCallArguments,
+ bool isVector, SmallVectorImpl<SDValue> &MemOpChains,
+ SmallVectorImpl<TailCallArgumentInfo> &TailCallArguments,
SDLoc dl) {
EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
if (!isTailCall) {
@@ -3106,7 +3106,7 @@ static
void PrepareTailCall(SelectionDAG &DAG, SDValue &InFlag, SDValue &Chain,
SDLoc dl, bool isPPC64, int SPDiff, unsigned NumBytes,
SDValue LROp, SDValue FPOp, bool isDarwinABI,
- SmallVector<TailCallArgumentInfo, 8> &TailCallArguments) {
+ SmallVectorImpl<TailCallArgumentInfo> &TailCallArguments) {
MachineFunction &MF = DAG.getMachineFunction();
// Emit a sequence of copyto/copyfrom virtual registers for arguments that
@@ -3133,8 +3133,8 @@ void PrepareTailCall(SelectionDAG &DAG, SDValue &InFlag, SDValue &Chain,
static
unsigned PrepareCall(SelectionDAG &DAG, SDValue &Callee, SDValue &InFlag,
SDValue &Chain, SDLoc dl, int SPDiff, bool isTailCall,
- SmallVector<std::pair<unsigned, SDValue>, 8> &RegsToPass,
- SmallVector<SDValue, 8> &Ops, std::vector<EVT> &NodeTys,
+ SmallVectorImpl<std::pair<unsigned, SDValue> > &RegsToPass,
+ SmallVectorImpl<SDValue> &Ops, std::vector<EVT> &NodeTys,
const PPCSubtarget &PPCSubTarget) {
bool isPPC64 = PPCSubTarget.isPPC64();
@@ -3460,10 +3460,10 @@ SDValue
PPCTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
SmallVectorImpl<SDValue> &InVals) const {
SelectionDAG &DAG = CLI.DAG;
- SDLoc &dl = CLI.DL;
- SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs;
- SmallVector<SDValue, 32> &OutVals = CLI.OutVals;
- SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins;
+ SDLoc &dl = CLI.DL;
+ SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
+ SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
+ SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
SDValue Chain = CLI.Chain;
SDValue Callee = CLI.Callee;
bool &isTailCall = CLI.IsTailCall;
diff --git a/lib/Target/PowerPC/PPCMachineFunctionInfo.h b/lib/Target/PowerPC/PPCMachineFunctionInfo.h
index 40d1f3a5fc..3b2ac3bd13 100644
--- a/lib/Target/PowerPC/PPCMachineFunctionInfo.h
+++ b/lib/Target/PowerPC/PPCMachineFunctionInfo.h
@@ -160,7 +160,7 @@ public:
int getCRSpillFrameIndex() const { return CRSpillFrameIndex; }
void setCRSpillFrameIndex(int idx) { CRSpillFrameIndex = idx; }
- const SmallVector<unsigned, 3> &
+ const SmallVectorImpl<unsigned> &
getMustSaveCRs() const { return MustSaveCRs; }
void addMustSaveCR(unsigned Reg) { MustSaveCRs.push_back(Reg); }
};
diff --git a/lib/Target/R600/AMDILCFGStructurizer.cpp b/lib/Target/R600/AMDILCFGStructurizer.cpp
index 437480c39e..20a94f1fa6 100644
--- a/lib/Target/R600/AMDILCFGStructurizer.cpp
+++ b/lib/Target/R600/AMDILCFGStructurizer.cpp
@@ -92,7 +92,7 @@ void PrintLoopinfo(const LoopinfoT &LoopInfo, llvm::raw_ostream &OS) {
}
template<class NodeT>
-void ReverseVector(SmallVector<NodeT *, DEFAULT_VEC_SLOTS> &Src) {
+void ReverseVector(SmallVectorImpl<NodeT *> &Src) {
size_t sz = Src.size();
for (size_t i = 0; i < sz/2; ++i) {
NodeT *t = Src[i];
@@ -258,7 +258,7 @@ private:
BlockT *normalizeInfiniteLoopExit(LoopT *LoopRep);
void removeUnconditionalBranch(BlockT *SrcBlock);
void removeRedundantConditionalBranch(BlockT *SrcBlock);
- void addDummyExitBlock(SmallVector<BlockT *, DEFAULT_VEC_SLOTS> &RetBlocks);
+ void addDummyExitBlock(SmallVectorImpl<BlockT *> &RetBlocks);
void removeSuccessor(BlockT *SrcBlock);
BlockT *cloneBlockForPredecessor(BlockT *CurBlock, BlockT *PredBlock);
@@ -2076,8 +2076,8 @@ void CFGStructurizer<PassT>::removeRedundantConditionalBranch(BlockT *srcBlk) {
} //removeRedundantConditionalBranch
template<class PassT>
-void CFGStructurizer<PassT>::addDummyExitBlock(SmallVector<BlockT*,
- DEFAULT_VEC_SLOTS> &retBlks) {
+void CFGStructurizer<PassT>::addDummyExitBlock(SmallVectorImpl<BlockT *>
+ &retBlks) {
BlockT *dummyExitBlk = funcRep->CreateMachineBasicBlock();
funcRep->push_back(dummyExitBlk); //insert to function
CFGTraits::insertInstrEnd(dummyExitBlk, AMDGPU::RETURN, passRep);
diff --git a/lib/Target/R600/R600EmitClauseMarkers.cpp b/lib/Target/R600/R600EmitClauseMarkers.cpp
index c1da64cac4..fac2b470a8 100644
--- a/lib/Target/R600/R600EmitClauseMarkers.cpp
+++ b/lib/Target/R600/R600EmitClauseMarkers.cpp
@@ -107,7 +107,7 @@ private:
bool SubstituteKCacheBank(MachineInstr *MI,
std::vector<std::pair<unsigned, unsigned> > &CachedConsts) const {
std::vector<std::pair<unsigned, unsigned> > UsedKCache;
- const SmallVector<std::pair<MachineOperand *, int64_t>, 3> &Consts =
+ const SmallVectorImpl<std::pair<MachineOperand *, int64_t> > &Consts =
TII->getSrcs(MI);
assert((TII->isALUInstr(MI->getOpcode()) ||
MI->getOpcode() == AMDGPU::DOT_4) && "Can't assign Const");
diff --git a/lib/Target/R600/R600InstrInfo.cpp b/lib/Target/R600/R600InstrInfo.cpp
index 1a07b05c4b..0c059aa3c4 100644
--- a/lib/Target/R600/R600InstrInfo.cpp
+++ b/lib/Target/R600/R600InstrInfo.cpp
@@ -519,7 +519,7 @@ R600InstrInfo::fitsConstReadLimitations(const std::vector<MachineInstr *> &MIs)
if (!isALUInstr(MI->getOpcode()))
continue;
- const SmallVector<std::pair<MachineOperand *, int64_t>, 3> &Srcs =
+ const SmallVectorImpl<std::pair<MachineOperand *, int64_t> > &Srcs =
getSrcs(MI);
for (unsigned j = 0, e = Srcs.size(); j < e; j++) {
diff --git a/lib/Target/Sparc/SparcISelLowering.cpp b/lib/Target/Sparc/SparcISelLowering.cpp
index 414087afcd..6ddfa8cc29 100644
--- a/lib/Target/Sparc/SparcISelLowering.cpp
+++ b/lib/Target/Sparc/SparcISelLowering.cpp
@@ -654,9 +654,9 @@ SparcTargetLowering::LowerCall_32(TargetLowering::CallLoweringInfo &CLI,
SmallVectorImpl<SDValue> &InVals) const {
SelectionDAG &DAG = CLI.DAG;
SDLoc &dl = CLI.DL;
- SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs;
- SmallVector<SDValue, 32> &OutVals = CLI.OutVals;
- SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins;
+ SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
+ SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
+ SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
SDValue Chain = CLI.Chain;
SDValue Callee = CLI.Callee;
bool &isTailCall = CLI.IsTailCall;
diff --git a/lib/Target/SystemZ/SystemZISelLowering.cpp b/lib/Target/SystemZ/SystemZISelLowering.cpp
index a317f0c3f6..e6e6d0266c 100644
--- a/lib/Target/SystemZ/SystemZISelLowering.cpp
+++ b/lib/Target/SystemZ/SystemZISelLowering.cpp
@@ -679,9 +679,9 @@ SystemZTargetLowering::LowerCall(CallLoweringInfo &CLI,
SmallVectorImpl<SDValue> &InVals) const {
SelectionDAG &DAG = CLI.DAG;
SDLoc &DL = CLI.DL;
- SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs;
- SmallVector<SDValue, 32> &OutVals = CLI.OutVals;
- SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins;
+ SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
+ SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
+ SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
SDValue Chain = CLI.Chain;
SDValue Callee = CLI.Callee;
bool &isTailCall = CLI.IsTailCall;
diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp
index 95ca6c315d..3407a983a0 100644
--- a/lib/Target/X86/X86ISelLowering.cpp
+++ b/lib/Target/X86/X86ISelLowering.cpp
@@ -2280,10 +2280,10 @@ SDValue
X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
SmallVectorImpl<SDValue> &InVals) const {
SelectionDAG &DAG = CLI.DAG;
- SDLoc &dl = CLI.DL;
- SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs;
- SmallVector<SDValue, 32> &OutVals = CLI.OutVals;
- SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins;
+ SDLoc &dl = CLI.DL;
+ SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
+ SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
+ SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
SDValue Chain = CLI.Chain;
SDValue Callee = CLI.Callee;
CallingConv::ID CallConv = CLI.CallConv;
diff --git a/lib/Target/XCore/XCoreISelLowering.cpp b/lib/Target/XCore/XCoreISelLowering.cpp
index 5af2c9c0cc..5f3d93531f 100644
--- a/lib/Target/XCore/XCoreISelLowering.cpp
+++ b/lib/Target/XCore/XCoreISelLowering.cpp
@@ -847,10 +847,10 @@ SDValue
XCoreTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
SmallVectorImpl<SDValue> &InVals) const {
SelectionDAG &DAG = CLI.DAG;
- SDLoc &dl = CLI.DL;
- SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs;
- SmallVector<SDValue, 32> &OutVals = CLI.OutVals;
- SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins;
+ SDLoc &dl = CLI.DL;
+ SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
+ SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
+ SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
SDValue Chain = CLI.Chain;
SDValue Callee = CLI.Callee;
bool &isTailCall = CLI.IsTailCall;
diff --git a/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp b/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
index b631b28565..b474bd84ca 100644
--- a/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
+++ b/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
@@ -1299,7 +1299,7 @@ Instruction *InstCombiner::visitAnd(BinaryOperator &I) {
/// always in the local (OverallLeftShift) coordinate space.
///
static bool CollectBSwapParts(Value *V, int OverallLeftShift, uint32_t ByteMask,
- SmallVector<Value*, 8> &ByteValues) {
+ SmallVectorImpl<Value *> &ByteValues) {
if (Instruction *I = dyn_cast<Instruction>(V)) {
// If this is an or instruction, it may be an inner node of the bswap.
if (I->getOpcode() == Instruction::Or) {
diff --git a/lib/Transforms/ObjCARC/ObjCARCOpts.cpp b/lib/Transforms/ObjCARC/ObjCARCOpts.cpp
index cef049591c..6f94a7cc15 100644
--- a/lib/Transforms/ObjCARC/ObjCARCOpts.cpp
+++ b/lib/Transforms/ObjCARC/ObjCARCOpts.cpp
@@ -1198,9 +1198,9 @@ namespace {
MapVector<Value *, RRInfo> &Retains,
DenseMap<Value *, RRInfo> &Releases,
Module *M,
- SmallVector<Instruction *, 4> &NewRetains,
- SmallVector<Instruction *, 4> &NewReleases,
- SmallVector<Instruction *, 8> &DeadInsts,
+ SmallVectorImpl<Instruction *> &NewRetains,
+ SmallVectorImpl<Instruction *> &NewReleases,
+ SmallVectorImpl<Instruction *> &DeadInsts,
RRInfo &RetainsToMove,
RRInfo &ReleasesToMove,
Value *Arg,
@@ -2477,9 +2477,9 @@ ObjCARCOpt::ConnectTDBUTraversals(DenseMap<const BasicBlock *, BBState>
MapVector<Value *, RRInfo> &Retains,
DenseMap<Value *, RRInfo> &Releases,
Module *M,
- SmallVector<Instruction *, 4> &NewRetains,
- SmallVector<Instruction *, 4> &NewReleases,
- SmallVector<Instruction *, 8> &DeadInsts,
+ SmallVectorImpl<Instruction *> &NewRetains,
+ SmallVectorImpl<Instruction *> &NewReleases,
+ SmallVectorImpl<Instruction *> &DeadInsts,
RRInfo &RetainsToMove,
RRInfo &ReleasesToMove,
Value *Arg,
diff --git a/lib/Transforms/Scalar/LoopDeletion.cpp b/lib/Transforms/Scalar/LoopDeletion.cpp
index 0a406005af..9e39d2ee84 100644
--- a/lib/Transforms/Scalar/LoopDeletion.cpp
+++ b/lib/Transforms/Scalar/LoopDeletion.cpp
@@ -51,8 +51,8 @@ namespace {
}
private:
- bool isLoopDead(Loop *L, SmallVector<BasicBlock*, 4> &exitingBlocks,
- SmallVector<BasicBlock*, 4> &exitBlocks,
+ bool isLoopDead(Loop *L, SmallVectorImpl<BasicBlock *> &exitingBlocks,
+ SmallVectorImpl<BasicBlock *> &exitBlocks,
bool &Changed, BasicBlock *Preheader);
};
@@ -77,8 +77,8 @@ Pass *llvm::createLoopDeletionPass() {
/// checked for unique exit and exiting blocks, and that the code is in LCSSA
/// form.
bool LoopDeletion::isLoopDead(Loop *L,
- SmallVector<BasicBlock*, 4> &exitingBlocks,
- SmallVector<BasicBlock*, 4> &exitBlocks,
+ SmallVectorImpl<BasicBlock *> &exitingBlocks,
+ SmallVectorImpl<BasicBlock *> &exitBlocks,
bool &Changed, BasicBlock *Preheader) {
BasicBlock *exitBlock = exitBlocks[0];
diff --git a/lib/Transforms/Scalar/LoopUnswitch.cpp b/lib/Transforms/Scalar/LoopUnswitch.cpp
index 0e8199f2fd..e5a9c62cca 100644
--- a/lib/Transforms/Scalar/LoopUnswitch.cpp
+++ b/lib/Transforms/Scalar/LoopUnswitch.cpp
@@ -196,7 +196,7 @@ namespace {
/// Split all of the edges from inside the loop to their exit blocks.
/// Update the appropriate Phi nodes as we do so.
- void SplitExitEdges(Loop *L, const SmallVector<BasicBlock *, 8> &ExitBlocks);
+ void SplitExitEdges(Loop *L, const SmallVectorImpl<BasicBlock *> &ExitBlocks);
bool UnswitchIfProfitable(Value *LoopCond, Constant *Val);
void UnswitchTrivialCondition(Loop *L, Value *Cond, Constant *Val,
@@ -752,7 +752,7 @@ void LoopUnswitch::UnswitchTrivialCondition(Loop *L, Value *Cond,
/// SplitExitEdges - Split all of the edges from inside the loop to their exit
/// blocks. Update the appropriate Phi nodes as we do so.
void LoopUnswitch::SplitExitEdges(Loop *L,
- const SmallVector<BasicBlock *, 8> &ExitBlocks){
+ const SmallVectorImpl<BasicBlock *> &ExitBlocks){
for (unsigned i = 0, e = ExitBlocks.size(); i != e; ++i) {
BasicBlock *ExitBlock = ExitBlocks[i];
diff --git a/lib/Transforms/Scalar/SCCP.cpp b/lib/Transforms/Scalar/SCCP.cpp
index 76c510e63e..43647207c2 100644
--- a/lib/Transforms/Scalar/SCCP.cpp
+++ b/lib/Transforms/Scalar/SCCP.cpp
@@ -439,7 +439,7 @@ private:
// getFeasibleSuccessors - Return a vector of booleans to indicate which
// successors are reachable from a given terminator instruction.
//
- void getFeasibleSuccessors(TerminatorInst &TI, SmallVector<bool, 16> &Succs);
+ void getFeasibleSuccessors(TerminatorInst &TI, SmallVectorImpl<bool> &Succs);
// isEdgeFeasible - Return true if the control flow edge from the 'From' basic
// block to the 'To' basic block is currently feasible.
@@ -513,7 +513,7 @@ private:
// successors are reachable from a given terminator instruction.
//
void SCCPSolver::getFeasibleSuccessors(TerminatorInst &TI,
- SmallVector<bool, 16> &Succs) {
+ SmallVectorImpl<bool> &Succs) {
Succs.resize(TI.getNumSuccessors());
if (BranchInst *BI = dyn_cast<BranchInst>(&TI)) {
if (BI->isUnconditional()) {
diff --git a/lib/Transforms/Scalar/ScalarReplAggregates.cpp b/lib/Transforms/Scalar/ScalarReplAggregates.cpp
index 8d3d53cd70..33bbe15163 100644
--- a/lib/Transforms/Scalar/ScalarReplAggregates.cpp
+++ b/lib/Transforms/Scalar/ScalarReplAggregates.cpp
@@ -166,21 +166,21 @@ namespace {
void DeleteDeadInstructions();
void RewriteForScalarRepl(Instruction *I, AllocaInst *AI, uint64_t Offset,
- SmallVector<AllocaInst*, 32> &NewElts);
+ SmallVectorImpl<AllocaInst *> &NewElts);
void RewriteBitCast(BitCastInst *BC, AllocaInst *AI, uint64_t Offset,
- SmallVector<AllocaInst*, 32> &NewElts);
+ SmallVectorImpl<AllocaInst *> &NewElts);
void RewriteGEP(GetElementPtrInst *GEPI, AllocaInst *AI, uint64_t Offset,
- SmallVector<AllocaInst*, 32> &NewElts);
+ SmallVectorImpl<AllocaInst *> &NewElts);
void RewriteLifetimeIntrinsic(IntrinsicInst *II, AllocaInst *AI,
uint64_t Offset,
- SmallVector<AllocaInst*, 32> &NewElts);
+ SmallVectorImpl<AllocaInst *> &NewElts);
void RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *Inst,
AllocaInst *AI,
- SmallVector<AllocaInst*, 32> &NewElts);
+ SmallVectorImpl<AllocaInst *> &NewElts);
void RewriteStoreUserOfWholeAlloca(StoreInst *SI, AllocaInst *AI,
- SmallVector<AllocaInst*, 32> &NewElts);
+ SmallVectorImpl<AllocaInst *> &NewElts);
void RewriteLoadUserOfWholeAlloca(LoadInst *LI, AllocaInst *AI,
- SmallVector<AllocaInst*, 32> &NewElts);
+ SmallVectorImpl<AllocaInst *> &NewElts);
bool ShouldAttemptScalarRepl(AllocaInst *AI);
};
@@ -1865,7 +1865,7 @@ bool SROA::TypeHasComponent(Type *T, uint64_t Offset, uint64_t Size) {
/// Offset indicates the position within AI that is referenced by this
/// instruction.
void SROA::RewriteForScalarRepl(Instruction *I, AllocaInst *AI, uint64_t Offset,
- SmallVector<AllocaInst*, 32> &NewElts) {
+ SmallVectorImpl<AllocaInst *> &NewElts) {
for (Value::use_iterator UI = I->use_begin(), E = I->use_end(); UI!=E;) {
Use &TheUse = UI.getUse();
Instruction *User = cast<Instruction>(*UI++);
@@ -1979,7 +1979,7 @@ void SROA::RewriteForScalarRepl(Instruction *I, AllocaInst *AI, uint64_t Offset,
/// RewriteBitCast - Update a bitcast reference to the alloca being replaced
/// and recursively continue updating all of its uses.
void SROA::RewriteBitCast(BitCastInst *BC, AllocaInst *AI, uint64_t Offset,
- SmallVector<AllocaInst*, 32> &NewElts) {
+ SmallVectorImpl<AllocaInst *> &NewElts) {
RewriteForScalarRepl(BC, AI, Offset, NewElts);
if (BC->getOperand(0) != AI)
return;
@@ -2037,7 +2037,7 @@ uint64_t SROA::FindElementAndOffset(Type *&T, uint64_t &Offset,
/// elements of the alloca that are being split apart, and if so, rewrite
/// the GEP to be relative to the new element.
void SROA::RewriteGEP(GetElementPtrInst *GEPI, AllocaInst *AI, uint64_t Offset,
- SmallVector<AllocaInst*, 32> &NewElts) {
+ SmallVectorImpl<AllocaInst *> &NewElts) {
uint64_t OldOffset = Offset;
SmallVector<Value*, 8> Indices(GEPI->op_begin() + 1, GEPI->op_end());
// If the GEP was dynamic then it must have been a dynamic vector lookup.
@@ -2099,7 +2099,7 @@ void SROA::RewriteGEP(GetElementPtrInst *GEPI, AllocaInst *AI, uint64_t Offset,
/// to mark the lifetime of the scalarized memory.
void SROA::RewriteLifetimeIntrinsic(IntrinsicInst *II, AllocaInst *AI,
uint64_t Offset,
- SmallVector<AllocaInst*, 32> &NewElts) {
+ SmallVectorImpl<AllocaInst *> &NewElts) {
ConstantInt *OldSize = cast<ConstantInt>(II->getArgOperand(0));
// Put matching lifetime markers on everything from Offset up to
// Offset+OldSize.
@@ -2153,9 +2153,10 @@ void SROA::RewriteLifetimeIntrinsic(IntrinsicInst *II, AllocaInst *AI,
/// RewriteMemIntrinUserOfAlloca - MI is a memcpy/memset/memmove from or to AI.
/// Rewrite it to copy or set the elements of the scalarized memory.
-void SROA::RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *Inst,
- AllocaInst *AI,
- SmallVector<AllocaInst*, 32> &NewElts) {
+void
+SROA::RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *Inst,
+ AllocaInst *AI,
+ SmallVectorImpl<AllocaInst *> &NewElts) {
// If this is a memcpy/memmove, construct the other pointer as the
// appropriate type. The "Other" pointer is the pointer that goes to memory
// that doesn't have anything to do with the alloca that we are promoting. For
@@ -2326,8 +2327,9 @@ void SROA::RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *Inst,
/// RewriteStoreUserOfWholeAlloca - We found a store of an integer that
/// overwrites the entire allocation. Extract out the pieces of the stored
/// integer and store them individually.
-void SROA::RewriteStoreUserOfWholeAlloca(StoreInst *SI, AllocaInst *AI,
- SmallVector<AllocaInst*, 32> &NewElts){
+void
+SROA::RewriteStoreUserOfWholeAlloca(StoreInst *SI, AllocaInst *AI,
+ SmallVectorImpl<AllocaInst *> &NewElts) {
// Extract each element out of the integer according to its structure offset
// and store the element value to the individual alloca.
Value *SrcVal = SI->getOperand(0);
@@ -2440,8 +2442,9 @@ void SROA::RewriteStoreUserOfWholeAlloca(StoreInst *SI, AllocaInst *AI,
/// RewriteLoadUserOfWholeAlloca - We found a load of the entire allocation to
/// an integer. Load the individual pieces to form the aggregate value.
-void SROA::RewriteLoadUserOfWholeAlloca(LoadInst *LI, AllocaInst *AI,
- SmallVector<AllocaInst*, 32> &NewElts) {
+void
+SROA::RewriteLoadUserOfWholeAlloca(LoadInst *LI, AllocaInst *AI,
+ SmallVectorImpl<AllocaInst *> &NewElts) {
// Extract each element out of the NewElts according to its structure offset
// and form the result value.
Type *AllocaEltTy = AI->getAllocatedType();
diff --git a/lib/Transforms/Scalar/TailRecursionElimination.cpp b/lib/Transforms/Scalar/TailRecursionElimination.cpp
index 1da6a070d9..9de1388921 100644
--- a/lib/Transforms/Scalar/TailRecursionElimination.cpp
+++ b/lib/Transforms/Scalar/TailRecursionElimination.cpp
@@ -99,16 +99,16 @@ namespace {
bool EliminateRecursiveTailCall(CallInst *CI, ReturnInst *Ret,
BasicBlock *&OldEntry,
bool &TailCallsAreMarkedTail,
- SmallVector<PHINode*, 8> &ArgumentPHIs,
+ SmallVectorImpl<PHINode *> &ArgumentPHIs,
bool CannotTailCallElimCallsMarkedTail);
bool FoldReturnAndProcessPred(BasicBlock *BB,
ReturnInst *Ret, BasicBlock *&OldEntry,
bool &TailCallsAreMarkedTail,
- SmallVector<PHINode*, 8> &ArgumentPHIs,
+ SmallVectorImpl<PHINode *> &ArgumentPHIs,
bool CannotTailCallElimCallsMarkedTail);
bool ProcessReturningBlock(ReturnInst *RI, BasicBlock *&OldEntry,
bool &TailCallsAreMarkedTail,
- SmallVector<PHINode*, 8> &ArgumentPHIs,
+ SmallVectorImpl<PHINode *> &ArgumentPHIs,
bool CannotTailCallElimCallsMarkedTail);
bool CanMoveAboveCall(Instruction *I, CallInst *CI);
Value *CanTransformAccumulatorRecursion(Instruction *I, CallInst *CI);
@@ -445,7 +445,7 @@ TailCallElim::FindTRECandidate(Instruction *TI,
bool TailCallElim::EliminateRecursiveTailCall(CallInst *CI, ReturnInst *Ret,
BasicBlock *&OldEntry,
bool &TailCallsAreMarkedTail,
- SmallVector<PHINode*, 8> &ArgumentPHIs,
+ SmallVectorImpl<PHINode *> &ArgumentPHIs,
bool CannotTailCallElimCallsMarkedTail) {
// If we are introducing accumulator recursion to eliminate operations after
// the call instruction that are both associative and commutative, the initial
@@ -621,7 +621,7 @@ bool TailCallElim::EliminateRecursiveTailCall(CallInst *CI, ReturnInst *Ret,
bool TailCallElim::FoldReturnAndProcessPred(BasicBlock *BB,
ReturnInst *Ret, BasicBlock *&OldEntry,
bool &TailCallsAreMarkedTail,
- SmallVector<PHINode*, 8> &ArgumentPHIs,
+ SmallVectorImpl<PHINode *> &ArgumentPHIs,
bool CannotTailCallElimCallsMarkedTail) {
bool Change = false;
@@ -655,10 +655,11 @@ bool TailCallElim::FoldReturnAndProcessPred(BasicBlock *BB,
return Change;
}
-bool TailCallElim::ProcessReturningBlock(ReturnInst *Ret, BasicBlock *&OldEntry,
- bool &TailCallsAreMarkedTail,
- SmallVector<PHINode*, 8> &ArgumentPHIs,
- bool CannotTailCallElimCallsMarkedTail) {
+bool
+TailCallElim::ProcessReturningBlock(ReturnInst *Ret, BasicBlock *&OldEntry,
+ bool &TailCallsAreMarkedTail,
+ SmallVectorImpl<PHINode *> &ArgumentPHIs,
+ bool CannotTailCallElimCallsMarkedTail) {
CallInst *CI = FindTRECandidate(Ret, CannotTailCallElimCallsMarkedTail);
if (!CI)
return false;
diff --git a/lib/Transforms/Utils/SimplifyCFG.cpp b/lib/Transforms/Utils/SimplifyCFG.cpp
index d8878f2e51..eaeb19fad9 100644
--- a/lib/Transforms/Utils/SimplifyCFG.cpp
+++ b/lib/Transforms/Utils/SimplifyCFG.cpp
@@ -3353,7 +3353,7 @@ static bool ForwardSwitchConditionToPHI(SwitchInst *SI) {
for (ForwardingNodesMap::iterator I = ForwardingNodes.begin(),
E = ForwardingNodes.end(); I != E; ++I) {
PHINode *Phi = I->first;
- SmallVector<int,4> &Indexes = I->second;
+ SmallVectorImpl<int> &Indexes = I->second;
if (Indexes.size() < 2) continue;
@@ -3438,11 +3438,12 @@ static Constant *ConstantFold(Instruction *I,
/// at the common destination basic block, *CommonDest, for one of the case
/// destionations CaseDest corresponding to value CaseVal (0 for the default
/// case), of a switch instruction SI.
-static bool GetCaseResults(SwitchInst *SI,
- ConstantInt *CaseVal,
- BasicBlock *CaseDest,
- BasicBlock **CommonDest,
- SmallVector<std::pair<PHINode*,Constant*>, 4> &Res) {
+static bool
+GetCaseResults(SwitchInst *SI,
+ ConstantInt *CaseVal,
+ BasicBlock *CaseDest,
+ BasicBlock **CommonDest,
+ SmallVectorImpl<std::pair<PHINode*,Constant*> > &Res) {
// The block from which we enter the common destination.
BasicBlock *Pred = SI->getParent();
@@ -3515,7 +3516,7 @@ namespace {
SwitchLookupTable(Module &M,
uint64_t TableSize,
ConstantInt *Offset,
- const SmallVector<std::pair<ConstantInt*, Constant*>, 4>& Values,
+ const SmallVectorImpl<std::pair<ConstantInt*, Constant*> >& Values,
Constant *DefaultValue,
const DataLayout *TD);
@@ -3562,7 +3563,7 @@ namespace {
SwitchLookupTable::SwitchLookupTable(Module &M,
uint64_t TableSize,
ConstantInt *Offset,
- const SmallVector<std::pair<ConstantInt*, Constant*>, 4>& Values,
+ const SmallVectorImpl<std::pair<ConstantInt*, Constant*> >& Values,
Constant *DefaultValue,
const DataLayout *TD)
: SingleValue(0), BitMap(0), BitMapElementTy(0), Array(0) {
diff --git a/lib/Transforms/Vectorize/BBVectorize.cpp b/lib/Transforms/Vectorize/BBVectorize.cpp
index bd4a3b90b1..cbc1d630d5 100644
--- a/lib/Transforms/Vectorize/BBVectorize.cpp
+++ b/lib/Transforms/Vectorize/BBVectorize.cpp
@@ -356,7 +356,7 @@ namespace {
Instruction *J, unsigned o, bool IBeforeJ);
void getReplacementInputsForPair(LLVMContext& Context, Instruction *I,
- Instruction *J, SmallVector<Value *, 3> &ReplacedOperands,
+ Instruction *J, SmallVectorImpl<Value *> &ReplacedOperands,
bool IBeforeJ);
void replaceOutputsOfPair(LLVMContext& Context, Instruction *I,
@@ -2687,7 +2687,7 @@ namespace {
// to the vector instruction that fuses I with J.
void BBVectorize::getReplacementInputsForPair(LLVMContext& Context,
Instruction *I, Instruction *J,
- SmallVector<Value *, 3> &ReplacedOperands,
+ SmallVectorImpl<Value *> &ReplacedOperands,
bool IBeforeJ) {
unsigned NumOperands = I->getNumOperands();
diff --git a/utils/TableGen/CodeGenRegisters.cpp b/utils/TableGen/CodeGenRegisters.cpp
index 6134225492..d9f230ef1c 100644
--- a/utils/TableGen/CodeGenRegisters.cpp
+++ b/utils/TableGen/CodeGenRegisters.cpp
@@ -1088,7 +1088,7 @@ CodeGenRegBank::getCompositeSubRegIndex(CodeGenSubRegIndex *A,
}
CodeGenSubRegIndex *CodeGenRegBank::
-getConcatSubRegIndex(const SmallVector<CodeGenSubRegIndex*, 8> &Parts) {
+getConcatSubRegIndex(const SmallVector<CodeGenSubRegIndex *, 8> &Parts) {
assert(Parts.size() > 1 && "Need two parts to concatenate");
// Look for an existing entry.
diff --git a/utils/TableGen/CodeGenRegisters.h b/utils/TableGen/CodeGenRegisters.h
index f9edc6553a..33f32b0812 100644
--- a/utils/TableGen/CodeGenRegisters.h
+++ b/utils/TableGen/CodeGenRegisters.h
@@ -534,10 +534,10 @@ namespace llvm {
// Find or create a sub-register index representing the concatenation of
// non-overlapping sibling indices.
CodeGenSubRegIndex *
- getConcatSubRegIndex(const SmallVector<CodeGenSubRegIndex*, 8>&);
+ getConcatSubRegIndex(const SmallVector<CodeGenSubRegIndex *, 8>&);
void
- addConcatSubRegIndex(const SmallVector<CodeGenSubRegIndex*, 8> &Parts,
+ addConcatSubRegIndex(const SmallVector<CodeGenSubRegIndex *, 8> &Parts,
CodeGenSubRegIndex *Idx) {
ConcatIdx.insert(std::make_pair(Parts, Idx));
}
diff --git a/utils/TableGen/CodeGenSchedule.cpp b/utils/TableGen/CodeGenSchedule.cpp
index 85eee5fde9..001e97d1e1 100644
--- a/utils/TableGen/CodeGenSchedule.cpp
+++ b/utils/TableGen/CodeGenSchedule.cpp
@@ -1102,7 +1102,7 @@ void PredTransitions::getIntersectingVariants(
TransVariant &Variant = Variants[VIdx];
// Don't expand variants if the processor models don't intersect.
// A zero processor index means any processor.
- SmallVector<unsigned, 4> &ProcIndices = TransVec[TransIdx].ProcIndices;
+ SmallVectorImpl<unsigned> &ProcIndices = TransVec[TransIdx].ProcIndices;
if (ProcIndices[0] && Variants[VIdx].ProcIdx) {
unsigned Cnt = std::count(ProcIndices.begin(), ProcIndices.end(),
Variant.ProcIdx);