summaryrefslogtreecommitdiff
path: root/lib/Transforms
diff options
context:
space:
mode:
Diffstat (limited to 'lib/Transforms')
-rw-r--r--lib/Transforms/ExprTypeConvert.cpp5
-rw-r--r--lib/Transforms/IPO/ArgumentPromotion.cpp2
-rw-r--r--lib/Transforms/IPO/GlobalOpt.cpp2
-rw-r--r--lib/Transforms/IPO/IndMemRemoval.cpp2
-rw-r--r--lib/Transforms/IPO/InlineSimple.cpp2
-rw-r--r--lib/Transforms/IPO/Inliner.cpp2
-rw-r--r--lib/Transforms/IPO/LowerSetJmp.cpp1
-rw-r--r--lib/Transforms/IPO/SimplifyLibCalls.cpp4
-rw-r--r--lib/Transforms/Instrumentation/RSProfiling.cpp1
-rw-r--r--lib/Transforms/Instrumentation/TraceBasicBlocks.cpp2
-rw-r--r--lib/Transforms/Scalar/CorrelatedExprs.cpp1
-rw-r--r--lib/Transforms/Scalar/InstructionCombining.cpp16
-rw-r--r--lib/Transforms/Scalar/LoopStrengthReduce.cpp2
-rw-r--r--lib/Transforms/Scalar/LoopUnroll.cpp3
-rw-r--r--lib/Transforms/Scalar/LoopUnswitch.cpp7
-rw-r--r--lib/Transforms/Scalar/SCCP.cpp4
-rw-r--r--lib/Transforms/Scalar/ScalarReplAggregates.cpp2
-rw-r--r--lib/Transforms/Scalar/TailRecursionElimination.cpp1
-rw-r--r--lib/Transforms/Utils/CloneFunction.cpp3
-rw-r--r--lib/Transforms/Utils/LoopSimplify.cpp1
-rw-r--r--lib/Transforms/Utils/LowerInvoke.cpp1
-rw-r--r--lib/Transforms/Utils/SimplifyCFG.cpp5
22 files changed, 22 insertions, 47 deletions
diff --git a/lib/Transforms/ExprTypeConvert.cpp b/lib/Transforms/ExprTypeConvert.cpp
index 3cbbc76824..91450218c0 100644
--- a/lib/Transforms/ExprTypeConvert.cpp
+++ b/lib/Transforms/ExprTypeConvert.cpp
@@ -537,7 +537,7 @@ static bool OperandConvertibleToType(User *U, Value *V, const Type *Ty,
// a whole structure at a time), so the level raiser must be trying to
// store into the first field. Check for this and allow it now:
//
- if (const StructType *SElTy = dyn_cast<StructType>(ElTy)) {
+ if (isa<StructType>(ElTy)) {
unsigned Offset = 0;
std::vector<Value*> Indices;
ElTy = getStructOffsetType(ElTy, Offset, Indices, TD, false);
@@ -799,9 +799,6 @@ static void ConvertOperandToType(User *U, Value *OldVal, Value *NewVal,
Value *SrcPtr = VMCI->second;
if (ElTy != NewTy) {
- // We check that this is a struct in the initial scan...
- const StructType *SElTy = cast<StructType>(ElTy);
-
std::vector<Value*> Indices;
Indices.push_back(Constant::getNullValue(Type::UIntTy));
diff --git a/lib/Transforms/IPO/ArgumentPromotion.cpp b/lib/Transforms/IPO/ArgumentPromotion.cpp
index 79f21b5c02..646f5b8e08 100644
--- a/lib/Transforms/IPO/ArgumentPromotion.cpp
+++ b/lib/Transforms/IPO/ArgumentPromotion.cpp
@@ -259,7 +259,6 @@ bool ArgPromotion::isSafeToPromoteArgument(Argument *Arg) const {
// it is safe to unconditionally load the pointer. Use alias analysis to
// check to see if the pointer is guaranteed to not be modified from entry of
// the function to each of the load instructions.
- Function &F = *Arg->getParent();
// Because there could be several/many load instructions, remember which
// blocks we know to be transparent to the load.
@@ -508,7 +507,6 @@ Function *ArgPromotion::DoPromotion(Function *F,
GetElementPtrInst *GEP = cast<GetElementPtrInst>(I->use_back());
std::vector<Value*> Operands(GEP->op_begin()+1, GEP->op_end());
- unsigned ArgNo = 0;
Function::arg_iterator TheArg = I2;
for (ScalarizeTable::iterator It = ArgIndices.begin();
*It != Operands; ++It, ++TheArg) {
diff --git a/lib/Transforms/IPO/GlobalOpt.cpp b/lib/Transforms/IPO/GlobalOpt.cpp
index bc5aecd76a..6cdd530a11 100644
--- a/lib/Transforms/IPO/GlobalOpt.cpp
+++ b/lib/Transforms/IPO/GlobalOpt.cpp
@@ -1770,7 +1770,7 @@ static bool EvaluateFunction(Function *F, Constant *&RetVal,
return false;
InstResult = RetVal;
}
- } else if (TerminatorInst *TI = dyn_cast<TerminatorInst>(CurInst)) {
+ } else if (isa<TerminatorInst>(CurInst)) {
BasicBlock *NewBB = 0;
if (BranchInst *BI = dyn_cast<BranchInst>(CurInst)) {
if (BI->isUnconditional()) {
diff --git a/lib/Transforms/IPO/IndMemRemoval.cpp b/lib/Transforms/IPO/IndMemRemoval.cpp
index 78b6ea53e3..ef27cd61a9 100644
--- a/lib/Transforms/IPO/IndMemRemoval.cpp
+++ b/lib/Transforms/IPO/IndMemRemoval.cpp
@@ -77,7 +77,7 @@ bool IndMemRemPass::runOnModule(Module &M) {
BasicBlock* bb = new BasicBlock("entry",FN);
Instruction* c = new CastInst(FN->arg_begin(), Type::UIntTy, "c", bb);
Instruction* a = new MallocInst(Type::SByteTy, c, "m", bb);
- Instruction* R = new ReturnInst(a, bb);
+ new ReturnInst(a, bb);
++NumBounce;
NumBounceSites += F->getNumUses();
F->replaceAllUsesWith(FN);
diff --git a/lib/Transforms/IPO/InlineSimple.cpp b/lib/Transforms/IPO/InlineSimple.cpp
index c2aa4cb380..c4b033c8c1 100644
--- a/lib/Transforms/IPO/InlineSimple.cpp
+++ b/lib/Transforms/IPO/InlineSimple.cpp
@@ -245,7 +245,7 @@ int SimpleInliner::getInlineCost(CallSite CS) {
// significant future optimization possibilities (like scalar promotion, and
// scalarization), so encourage the inlining of the function.
//
- else if (AllocaInst *AI = dyn_cast<AllocaInst>(I)) {
+ else if (isa<AllocaInst>(I)) {
if (ArgNo < CalleeFI.ArgumentWeights.size())
InlineCost -= CalleeFI.ArgumentWeights[ArgNo].AllocaWeight;
diff --git a/lib/Transforms/IPO/Inliner.cpp b/lib/Transforms/IPO/Inliner.cpp
index ae4032bbfb..edb34be203 100644
--- a/lib/Transforms/IPO/Inliner.cpp
+++ b/lib/Transforms/IPO/Inliner.cpp
@@ -129,8 +129,6 @@ bool Inliner::runOnSCC(const std::vector<CallGraphNode*> &SCC) {
DEBUG(std::cerr << " Inlining: cost=" << InlineCost
<< ", Call: " << *CS.getInstruction());
- Function *Caller = CS.getInstruction()->getParent()->getParent();
-
// Attempt to inline the function...
if (InlineCallIfPossible(CS, CG, SCCFunctions)) {
// Remove this call site from the list.
diff --git a/lib/Transforms/IPO/LowerSetJmp.cpp b/lib/Transforms/IPO/LowerSetJmp.cpp
index cd46718ba6..e6ff5c977a 100644
--- a/lib/Transforms/IPO/LowerSetJmp.cpp
+++ b/lib/Transforms/IPO/LowerSetJmp.cpp
@@ -496,7 +496,6 @@ void LowerSetJmp::visitInvokeInst(InvokeInst& II)
// If not reachable from a setjmp call, don't transform.
if (!DFSBlocks.count(BB)) return;
- BasicBlock* NormalBB = II.getNormalDest();
BasicBlock* ExceptBB = II.getUnwindDest();
Function* Func = BB->getParent();
diff --git a/lib/Transforms/IPO/SimplifyLibCalls.cpp b/lib/Transforms/IPO/SimplifyLibCalls.cpp
index 157ea384b7..af117094d6 100644
--- a/lib/Transforms/IPO/SimplifyLibCalls.cpp
+++ b/lib/Transforms/IPO/SimplifyLibCalls.cpp
@@ -1106,7 +1106,7 @@ struct LLVMMemCpyMoveOptzn : public LibCallOptimization {
CastInst* DestCast =
new CastInst(dest,PointerType::get(castType),dest->getName()+".cast",ci);
LoadInst* LI = new LoadInst(SrcCast,SrcCast->getName()+".val",ci);
- StoreInst* SI = new StoreInst(LI, DestCast, ci);
+ new StoreInst(LI, DestCast, ci);
ci->eraseFromParent();
return true;
}
@@ -2063,7 +2063,7 @@ bool getConstantStringLength(Value *V, uint64_t &len, ConstantArray **CA) {
Constant* INTLZR = GV->getInitializer();
// Handle the ConstantAggregateZero case
- if (ConstantAggregateZero *CAZ = dyn_cast<ConstantAggregateZero>(INTLZR)) {
+ if (isa<ConstantAggregateZero>(INTLZR)) {
// This is a degenerate case. The initializer is constant zero so the
// length of the string must be zero.
len = 0;
diff --git a/lib/Transforms/Instrumentation/RSProfiling.cpp b/lib/Transforms/Instrumentation/RSProfiling.cpp
index 5d6654d504..671b3bcf22 100644
--- a/lib/Transforms/Instrumentation/RSProfiling.cpp
+++ b/lib/Transforms/Instrumentation/RSProfiling.cpp
@@ -580,7 +580,6 @@ static void CollapsePhi(BasicBlock* btarget, BasicBlock* bsrc) {
for(BasicBlock::iterator ib = btarget->begin(), ie = btarget->end();
ib != ie; ++ib)
if (PHINode* phi = dyn_cast<PHINode>(&*ib)) {
- unsigned total = phi->getNumIncomingValues();
std::map<BasicBlock*, Value*> counter;
for(unsigned i = 0; i < phi->getNumIncomingValues(); ) {
if (counter[phi->getIncomingBlock(i)]) {
diff --git a/lib/Transforms/Instrumentation/TraceBasicBlocks.cpp b/lib/Transforms/Instrumentation/TraceBasicBlocks.cpp
index 8bf6001053..003ea0d26a 100644
--- a/lib/Transforms/Instrumentation/TraceBasicBlocks.cpp
+++ b/lib/Transforms/Instrumentation/TraceBasicBlocks.cpp
@@ -56,7 +56,7 @@ static void InsertInstrumentationCall (BasicBlock *BB,
while (isa<AllocaInst>(InsertPos) || isa<PHINode>(InsertPos))
++InsertPos;
- Instruction *InstrCall = new CallInst (InstrFn, Args, "", InsertPos);
+ new CallInst (InstrFn, Args, "", InsertPos);
}
bool TraceBasicBlocks::runOnModule(Module &M) {
diff --git a/lib/Transforms/Scalar/CorrelatedExprs.cpp b/lib/Transforms/Scalar/CorrelatedExprs.cpp
index 35e6a971f1..3ea67955fd 100644
--- a/lib/Transforms/Scalar/CorrelatedExprs.cpp
+++ b/lib/Transforms/Scalar/CorrelatedExprs.cpp
@@ -726,7 +726,6 @@ void CEE::InsertRegionExitMerges(PHINode *BBVal, Instruction *OldVal,
const std::vector<BasicBlock*> &RegionExitBlocks) {
assert(BBVal->getType() == OldVal->getType() && "Should be derived values!");
BasicBlock *BB = BBVal->getParent();
- BasicBlock *OldSucc = OldVal->getParent();
// Loop over all of the blocks we have to place PHIs in, doing it.
for (unsigned i = 0, e = RegionExitBlocks.size(); i != e; ++i) {
diff --git a/lib/Transforms/Scalar/InstructionCombining.cpp b/lib/Transforms/Scalar/InstructionCombining.cpp
index d3a625b8bc..98c35ddb0c 100644
--- a/lib/Transforms/Scalar/InstructionCombining.cpp
+++ b/lib/Transforms/Scalar/InstructionCombining.cpp
@@ -2579,8 +2579,6 @@ Instruction *InstCombiner::visitSRem(BinaryOperator &I) {
}
Instruction *InstCombiner::visitFRem(BinaryOperator &I) {
- Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
-
return commonRemTransforms(I);
}
@@ -3109,7 +3107,6 @@ Instruction *InstCombiner::visitAnd(BinaryOperator &I) {
{
Value *A = 0, *B = 0;
- ConstantInt *C1 = 0, *C2 = 0;
if (match(Op0, m_Or(m_Value(A), m_Value(B))))
if (A == Op1 || B == Op1) // (A | ?) & A --> A
return ReplaceInstUsesWith(I, Op1);
@@ -5510,7 +5507,7 @@ static bool CanEvaluateInDifferentType(Value *V, const Type *Ty,
// If the first operand is itself a cast, and is eliminable, do not count
// this as an eliminable cast. We would prefer to eliminate those two
// casts first.
- if (CastInst *OpCast = dyn_cast<CastInst>(I->getOperand(0)))
+ if (isa<CastInst>(I->getOperand(0)))
return true;
++NumCastsRemoved;
@@ -6192,7 +6189,6 @@ Instruction *InstCombiner::visitSelectInst(SelectInst &SI) {
if (Instruction *TI = dyn_cast<Instruction>(TrueVal))
if (Instruction *FI = dyn_cast<Instruction>(FalseVal))
if (TI->hasOneUse() && FI->hasOneUse()) {
- bool isInverse = false;
Instruction *AddOp = 0, *SubOp = 0;
// Turn (select C, (op X, Y), (op X, Z)) -> (op X, (select C, Y, Z))
@@ -6971,7 +6967,7 @@ Instruction *InstCombiner::FoldPHIArgOpIntoPHI(PHINode &PN) {
// Insert and return the new operation.
if (isa<CastInst>(FirstInst))
return new CastInst(PhiVal, PN.getType());
- else if (LoadInst *LI = dyn_cast<LoadInst>(FirstInst))
+ else if (isa<LoadInst>(FirstInst))
return new LoadInst(PhiVal, "", isVolatile);
else if (BinaryOperator *BinOp = dyn_cast<BinaryOperator>(FirstInst))
return BinaryOperator::create(BinOp->getOpcode(), PhiVal, ConstantOp);
@@ -7327,7 +7323,7 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
// If the index will be to exactly the right offset with the scale taken
// out, perform the transformation.
if (Scale && Scale->getZExtValue() % ArrayEltSize == 0) {
- if (ConstantInt *C = dyn_cast<ConstantInt>(Scale))
+ if (isa<ConstantInt>(Scale))
Scale = ConstantInt::get(Scale->getType(),
Scale->getZExtValue() / ArrayEltSize);
if (Scale->getZExtValue() != 1) {
@@ -7501,7 +7497,7 @@ Instruction *InstCombiner::visitLoadInst(LoadInst &LI) {
Value *Op = LI.getOperand(0);
// load (cast X) --> cast (load X) iff safe
- if (CastInst *CI = dyn_cast<CastInst>(Op))
+ if (isa<CastInst>(Op))
if (Instruction *Res = InstCombineLoadCast(*this, LI))
return Res;
@@ -7728,7 +7724,7 @@ Instruction *InstCombiner::visitStoreInst(StoreInst &SI) {
// If the pointer destination is a cast, see if we can fold the cast into the
// source instead.
- if (CastInst *CI = dyn_cast<CastInst>(Ptr))
+ if (isa<CastInst>(Ptr))
if (Instruction *Res = InstCombineStoreToCast(*this, SI))
return Res;
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ptr))
@@ -8015,7 +8011,7 @@ Instruction *InstCombiner::visitExtractElementInst(ExtractElementInst &EI) {
InsertNewInstBefore(newEI1, EI);
return BinaryOperator::create(BO->getOpcode(), newEI0, newEI1);
}
- } else if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
+ } else if (isa<LoadInst>(I)) {
Value *Ptr = InsertCastBefore(I->getOperand(0),
PointerType::get(EI.getType()), EI);
GetElementPtrInst *GEP =
diff --git a/lib/Transforms/Scalar/LoopStrengthReduce.cpp b/lib/Transforms/Scalar/LoopStrengthReduce.cpp
index 5b64e54f1f..dc6b986a5c 100644
--- a/lib/Transforms/Scalar/LoopStrengthReduce.cpp
+++ b/lib/Transforms/Scalar/LoopStrengthReduce.cpp
@@ -307,7 +307,7 @@ static bool getSCEVStartAndStride(const SCEVHandle &SH, Loop *L,
Start = SCEVAddExpr::get(Start, AE->getOperand(i));
}
- } else if (SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(SH)) {
+ } else if (isa<SCEVAddRecExpr>(SH)) {
TheAddRec = SH;
} else {
return false; // not analyzable.
diff --git a/lib/Transforms/Scalar/LoopUnroll.cpp b/lib/Transforms/Scalar/LoopUnroll.cpp
index 9c59dd3de0..2fa3fcd7f3 100644
--- a/lib/Transforms/Scalar/LoopUnroll.cpp
+++ b/lib/Transforms/Scalar/LoopUnroll.cpp
@@ -92,7 +92,7 @@ static unsigned ApproximateLoopSize(const Loop *L) {
// Ignore PHI nodes in the header.
} else if (I->hasOneUse() && I->use_back() == Term) {
// Ignore instructions only used by the loop terminator.
- } else if (DbgInfoIntrinsic *DbgI = dyn_cast<DbgInfoIntrinsic>(I)) {
+ } else if (isa<DbgInfoIntrinsic>(I)) {
// Ignore debug instructions
} else {
++Size;
@@ -135,7 +135,6 @@ BasicBlock* LoopUnroll::FoldBlockIntoPredecessor(BasicBlock* BB) {
return 0;
DEBUG(std::cerr << "Merging: " << *BB << "into: " << *OnlyPred);
- TerminatorInst *Term = OnlyPred->getTerminator();
// Resolve any PHI nodes at the start of the block. They are all
// guaranteed to have exactly one entry if they exist, unless there are
diff --git a/lib/Transforms/Scalar/LoopUnswitch.cpp b/lib/Transforms/Scalar/LoopUnswitch.cpp
index 95217254bd..a91d295540 100644
--- a/lib/Transforms/Scalar/LoopUnswitch.cpp
+++ b/lib/Transforms/Scalar/LoopUnswitch.cpp
@@ -576,8 +576,6 @@ void LoopUnswitch::UnswitchNontrivialCondition(Value *LIC, Constant *Val,
// Split all of the edges from inside the loop to their exit blocks. Update
// the appropriate Phi nodes as we do so.
- unsigned NumBlocks = L->getBlocks().size();
-
for (unsigned i = 0, e = ExitBlocks.size(); i != e; ++i) {
BasicBlock *ExitBlock = ExitBlocks[i];
std::vector<BasicBlock*> Preds(pred_begin(ExitBlock), pred_end(ExitBlock));
@@ -966,9 +964,8 @@ void LoopUnswitch::RewriteLoopBodyWithConditionConstant(Loop *L, Value *LIC,
BasicBlock* Split = SplitBlock(Old, SI);
Instruction* OldTerm = Old->getTerminator();
- BranchInst* Branch = new BranchInst(Split, SI->getSuccessor(i),
- ConstantBool::getTrue(),
- OldTerm);
+ new BranchInst(Split, SI->getSuccessor(i),
+ ConstantBool::getTrue(), OldTerm);
Old->getTerminator()->eraseFromParent();
diff --git a/lib/Transforms/Scalar/SCCP.cpp b/lib/Transforms/Scalar/SCCP.cpp
index 44634cd7c3..8e585c2ab1 100644
--- a/lib/Transforms/Scalar/SCCP.cpp
+++ b/lib/Transforms/Scalar/SCCP.cpp
@@ -377,7 +377,7 @@ void SCCPSolver::getFeasibleSuccessors(TerminatorInst &TI,
Succs[BCValue.getConstant() == ConstantBool::getFalse()] = true;
}
}
- } else if (InvokeInst *II = dyn_cast<InvokeInst>(&TI)) {
+ } else if (isa<InvokeInst>(&TI)) {
// Invoke instructions successors are always executable.
Succs[0] = Succs[1] = true;
} else if (SwitchInst *SI = dyn_cast<SwitchInst>(&TI)) {
@@ -436,7 +436,7 @@ bool SCCPSolver::isEdgeFeasible(BasicBlock *From, BasicBlock *To) {
}
return false;
}
- } else if (InvokeInst *II = dyn_cast<InvokeInst>(TI)) {
+ } else if (isa<InvokeInst>(TI)) {
// Invoke instructions successors are always executable.
return true;
} else if (SwitchInst *SI = dyn_cast<SwitchInst>(TI)) {
diff --git a/lib/Transforms/Scalar/ScalarReplAggregates.cpp b/lib/Transforms/Scalar/ScalarReplAggregates.cpp
index ffb095a048..3eba528296 100644
--- a/lib/Transforms/Scalar/ScalarReplAggregates.cpp
+++ b/lib/Transforms/Scalar/ScalarReplAggregates.cpp
@@ -302,7 +302,7 @@ int SROA::isSafeUseOfAllocation(Instruction *User) {
if (const ArrayType *AT = dyn_cast<ArrayType>(*I)) {
uint64_t NumElements = AT->getNumElements();
- if (ConstantInt *CI = dyn_cast<ConstantInt>(I.getOperand())) {
+ if (isa<ConstantInt>(I.getOperand())) {
// Check to make sure that index falls within the array. If not,
// something funny is going on, so we won't do the optimization.
//
diff --git a/lib/Transforms/Scalar/TailRecursionElimination.cpp b/lib/Transforms/Scalar/TailRecursionElimination.cpp
index 14c4d8bdc7..e732392f40 100644
--- a/lib/Transforms/Scalar/TailRecursionElimination.cpp
+++ b/lib/Transforms/Scalar/TailRecursionElimination.cpp
@@ -161,7 +161,6 @@ bool TailCallElim::runOnFunction(Function &F) {
// occurs when a function passes an argument straight through to its tail
// call.
if (!ArgumentPHIs.empty()) {
- unsigned NumIncoming = ArgumentPHIs[0]->getNumIncomingValues();
for (unsigned i = 0, e = ArgumentPHIs.size(); i != e; ++i) {
PHINode *PN = ArgumentPHIs[i];
diff --git a/lib/Transforms/Utils/CloneFunction.cpp b/lib/Transforms/Utils/CloneFunction.cpp
index f80b1530f8..30a7add79b 100644
--- a/lib/Transforms/Utils/CloneFunction.cpp
+++ b/lib/Transforms/Utils/CloneFunction.cpp
@@ -361,10 +361,7 @@ void llvm::CloneAndPruneFunctionInto(Function *NewFunc, const Function *OldFunc,
// requires the CFG to be up-to-date.
for (unsigned phino = 0, e = PHIToResolve.size(); phino != e; ) {
const PHINode *OPN = PHIToResolve[phino];
-
unsigned NumPreds = OPN->getNumIncomingValues();
-
- unsigned BBPHIStart = phino;
const BasicBlock *OldBB = OPN->getParent();
BasicBlock *NewBB = cast<BasicBlock>(ValueMap[OldBB]);
diff --git a/lib/Transforms/Utils/LoopSimplify.cpp b/lib/Transforms/Utils/LoopSimplify.cpp
index 620a581c8a..19293f47fb 100644
--- a/lib/Transforms/Utils/LoopSimplify.cpp
+++ b/lib/Transforms/Utils/LoopSimplify.cpp
@@ -763,7 +763,6 @@ void LoopSimplify::UpdateDomInfoForRevectoredPreds(BasicBlock *NewBB,
// If NewBB dominates some blocks, then it will dominate all blocks that
// NewBBSucc does.
if (NewBBDominatesNewBBSucc) {
- BasicBlock *PredBlock = PredBlocks[0];
Function *F = NewBB->getParent();
for (Function::iterator I = F->begin(), E = F->end(); I != E; ++I)
if (DS.dominates(NewBBSucc, I))
diff --git a/lib/Transforms/Utils/LowerInvoke.cpp b/lib/Transforms/Utils/LowerInvoke.cpp
index 1816144fff..3385ba1c64 100644
--- a/lib/Transforms/Utils/LowerInvoke.cpp
+++ b/lib/Transforms/Utils/LowerInvoke.cpp
@@ -370,7 +370,6 @@ splitLiveRangesLiveAcrossInvokes(std::vector<InvokeInst*> &Invokes) {
// Scan all of the uses and see if the live range is live across an unwind
// edge. If we find a use live across an invoke edge, create an alloca
// and spill the value.
- AllocaInst *SpillLoc = 0;
std::set<InvokeInst*> InvokesWithStoreInserted;
// Find all of the blocks that this value is live in.
diff --git a/lib/Transforms/Utils/SimplifyCFG.cpp b/lib/Transforms/Utils/SimplifyCFG.cpp
index 9794ed6fbb..de9f8ecfa1 100644
--- a/lib/Transforms/Utils/SimplifyCFG.cpp
+++ b/lib/Transforms/Utils/SimplifyCFG.cpp
@@ -1340,7 +1340,7 @@ bool llvm::SimplifyCFG(BasicBlock *BB) {
}
}
}
- } else if (UnwindInst *UI = dyn_cast<UnwindInst>(BB->begin())) {
+ } else if (isa<UnwindInst>(BB->begin())) {
// Check to see if the first instruction in this block is just an unwind.
// If so, replace any invoke instructions which use this as an exception
// destination with call instructions, and any unconditional branch
@@ -1409,7 +1409,7 @@ bool llvm::SimplifyCFG(BasicBlock *BB) {
return 1;
} else { // Conditional branch
- if (Value *CompVal = isValueEqualityComparison(BI)) {
+ if (isValueEqualityComparison(BI)) {
// If we only have one predecessor, and if it is a branch on this value,
// see if that predecessor totally determines the outcome of this
// switch.
@@ -1764,7 +1764,6 @@ bool llvm::SimplifyCFG(BasicBlock *BB) {
if (OnlySucc) {
DEBUG(std::cerr << "Merging: " << *BB << "into: " << *OnlyPred);
- TerminatorInst *Term = OnlyPred->getTerminator();
// Resolve any PHI nodes at the start of the block. They are all
// guaranteed to have exactly one entry if they exist, unless there are