summaryrefslogtreecommitdiff
path: root/lib/Transforms/InstCombine
diff options
context:
space:
mode:
authorChris Lattner <sabre@nondot.org>2011-07-18 04:54:35 +0000
committerChris Lattner <sabre@nondot.org>2011-07-18 04:54:35 +0000
commitdb125cfaf57cc83e7dd7453de2d509bc8efd0e5e (patch)
treea163ac0f83da7be3f9675a122a6144b12418be09 /lib/Transforms/InstCombine
parent4b3d5469fb7c25504fa20dc65640f02d79675d48 (diff)
downloadllvm-db125cfaf57cc83e7dd7453de2d509bc8efd0e5e.tar.gz
llvm-db125cfaf57cc83e7dd7453de2d509bc8efd0e5e.tar.bz2
llvm-db125cfaf57cc83e7dd7453de2d509bc8efd0e5e.tar.xz
land David Blaikie's patch to de-constify Type, with a few tweaks.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@135375 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/Transforms/InstCombine')
-rw-r--r--lib/Transforms/InstCombine/InstCombine.h10
-rw-r--r--lib/Transforms/InstCombine/InstCombineAddSub.cpp8
-rw-r--r--lib/Transforms/InstCombine/InstCombineAndOrXor.cpp6
-rw-r--r--lib/Transforms/InstCombine/InstCombineCalls.cpp68
-rw-r--r--lib/Transforms/InstCombine/InstCombineCasts.cpp78
-rw-r--r--lib/Transforms/InstCombine/InstCombineCompares.cpp34
-rw-r--r--lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp28
-rw-r--r--lib/Transforms/InstCombine/InstCombineMulDivRem.cpp2
-rw-r--r--lib/Transforms/InstCombine/InstCombinePHI.cpp10
-rw-r--r--lib/Transforms/InstCombine/InstCombineSelect.cpp4
-rw-r--r--lib/Transforms/InstCombine/InstCombineShifts.cpp2
-rw-r--r--lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp12
-rw-r--r--lib/Transforms/InstCombine/InstCombineVectorOps.cpp8
-rw-r--r--lib/Transforms/InstCombine/InstructionCombining.cpp34
14 files changed, 152 insertions, 152 deletions
diff --git a/lib/Transforms/InstCombine/InstCombine.h b/lib/Transforms/InstCombine/InstCombine.h
index 8257d6b89d..c6bdb08998 100644
--- a/lib/Transforms/InstCombine/InstCombine.h
+++ b/lib/Transforms/InstCombine/InstCombine.h
@@ -103,7 +103,7 @@ public:
//
Instruction *visitAdd(BinaryOperator &I);
Instruction *visitFAdd(BinaryOperator &I);
- Value *OptimizePointerDifference(Value *LHS, Value *RHS, const Type *Ty);
+ Value *OptimizePointerDifference(Value *LHS, Value *RHS, Type *Ty);
Instruction *visitSub(BinaryOperator &I);
Instruction *visitFSub(BinaryOperator &I);
Instruction *visitMul(BinaryOperator &I);
@@ -197,10 +197,10 @@ public:
Instruction *visitInstruction(Instruction &I) { return 0; }
private:
- bool ShouldChangeType(const Type *From, const Type *To) const;
+ bool ShouldChangeType(Type *From, Type *To) const;
Value *dyn_castNegVal(Value *V) const;
Value *dyn_castFNegVal(Value *V) const;
- const Type *FindElementAtOffset(const Type *Ty, int64_t Offset,
+ Type *FindElementAtOffset(Type *Ty, int64_t Offset,
SmallVectorImpl<Value*> &NewIndices);
Instruction *FoldOpIntoSelect(Instruction &Op, SelectInst *SI);
@@ -209,7 +209,7 @@ private:
/// the cast can be eliminated by some other simple transformation, we prefer
/// to do the simplification first.
bool ShouldOptimizeCast(Instruction::CastOps opcode,const Value *V,
- const Type *Ty);
+ Type *Ty);
Instruction *visitCallSite(CallSite CS);
Instruction *tryOptimizeCall(CallInst *CI, const TargetData *TD);
@@ -357,7 +357,7 @@ private:
Instruction *SimplifyMemSet(MemSetInst *MI);
- Value *EvaluateInDifferentType(Value *V, const Type *Ty, bool isSigned);
+ Value *EvaluateInDifferentType(Value *V, Type *Ty, bool isSigned);
};
diff --git a/lib/Transforms/InstCombine/InstCombineAddSub.cpp b/lib/Transforms/InstCombine/InstCombineAddSub.cpp
index c36a9552e7..d10046c10b 100644
--- a/lib/Transforms/InstCombine/InstCombineAddSub.cpp
+++ b/lib/Transforms/InstCombine/InstCombineAddSub.cpp
@@ -188,7 +188,7 @@ Instruction *InstCombiner::visitAdd(BinaryOperator &I) {
return BinaryOperator::CreateMul(LHS, AddOne(C2));
// A+B --> A|B iff A and B have no bits set in common.
- if (const IntegerType *IT = dyn_cast<IntegerType>(I.getType())) {
+ if (IntegerType *IT = dyn_cast<IntegerType>(I.getType())) {
APInt Mask = APInt::getAllOnesValue(IT->getBitWidth());
APInt LHSKnownOne(IT->getBitWidth(), 0);
APInt LHSKnownZero(IT->getBitWidth(), 0);
@@ -401,7 +401,7 @@ Instruction *InstCombiner::visitFAdd(BinaryOperator &I) {
Value *InstCombiner::EmitGEPOffset(User *GEP) {
TargetData &TD = *getTargetData();
gep_type_iterator GTI = gep_type_begin(GEP);
- const Type *IntPtrTy = TD.getIntPtrType(GEP->getContext());
+ Type *IntPtrTy = TD.getIntPtrType(GEP->getContext());
Value *Result = Constant::getNullValue(IntPtrTy);
// If the GEP is inbounds, we know that none of the addressing operations will
@@ -420,7 +420,7 @@ Value *InstCombiner::EmitGEPOffset(User *GEP) {
if (OpC->isZero()) continue;
// Handle a struct index, which adds its field offset to the pointer.
- if (const StructType *STy = dyn_cast<StructType>(*GTI)) {
+ if (StructType *STy = dyn_cast<StructType>(*GTI)) {
Size = TD.getStructLayout(STy)->getElementOffset(OpC->getZExtValue());
if (Size)
@@ -460,7 +460,7 @@ Value *InstCombiner::EmitGEPOffset(User *GEP) {
/// operands to the ptrtoint instructions for the LHS/RHS of the subtract.
///
Value *InstCombiner::OptimizePointerDifference(Value *LHS, Value *RHS,
- const Type *Ty) {
+ Type *Ty) {
assert(TD && "Must have target data info for this");
// If LHS is a gep based on RHS or RHS is a gep based on LHS, we can optimize
diff --git a/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp b/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
index 64ea36fb1e..32920fabc3 100644
--- a/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
+++ b/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
@@ -1224,7 +1224,7 @@ Instruction *InstCombiner::visitAnd(BinaryOperator &I) {
// fold (and (cast A), (cast B)) -> (cast (and A, B))
if (CastInst *Op0C = dyn_cast<CastInst>(Op0))
if (CastInst *Op1C = dyn_cast<CastInst>(Op1)) {
- const Type *SrcTy = Op0C->getOperand(0)->getType();
+ Type *SrcTy = Op0C->getOperand(0)->getType();
if (Op0C->getOpcode() == Op1C->getOpcode() && // same cast kind ?
SrcTy == Op1C->getOperand(0)->getType() &&
SrcTy->isIntOrIntVectorTy()) {
@@ -2008,7 +2008,7 @@ Instruction *InstCombiner::visitOr(BinaryOperator &I) {
if (CastInst *Op0C = dyn_cast<CastInst>(Op0)) {
CastInst *Op1C = dyn_cast<CastInst>(Op1);
if (Op1C && Op0C->getOpcode() == Op1C->getOpcode()) {// same cast kind ?
- const Type *SrcTy = Op0C->getOperand(0)->getType();
+ Type *SrcTy = Op0C->getOperand(0)->getType();
if (SrcTy == Op1C->getOperand(0)->getType() &&
SrcTy->isIntOrIntVectorTy()) {
Value *Op0COp = Op0C->getOperand(0), *Op1COp = Op1C->getOperand(0);
@@ -2288,7 +2288,7 @@ Instruction *InstCombiner::visitXor(BinaryOperator &I) {
if (CastInst *Op0C = dyn_cast<CastInst>(Op0)) {
if (CastInst *Op1C = dyn_cast<CastInst>(Op1))
if (Op0C->getOpcode() == Op1C->getOpcode()) { // same cast kind?
- const Type *SrcTy = Op0C->getOperand(0)->getType();
+ Type *SrcTy = Op0C->getOperand(0)->getType();
if (SrcTy == Op1C->getOperand(0)->getType() && SrcTy->isIntegerTy() &&
// Only do this if the casts both really cause code to be generated.
ShouldOptimizeCast(Op0C->getOpcode(), Op0C->getOperand(0),
diff --git a/lib/Transforms/InstCombine/InstCombineCalls.cpp b/lib/Transforms/InstCombine/InstCombineCalls.cpp
index 537f2b318a..12096476ff 100644
--- a/lib/Transforms/InstCombine/InstCombineCalls.cpp
+++ b/lib/Transforms/InstCombine/InstCombineCalls.cpp
@@ -22,8 +22,8 @@ using namespace llvm;
/// getPromotedType - Return the specified type promoted as it would be to pass
/// though a va_arg area.
-static const Type *getPromotedType(const Type *Ty) {
- if (const IntegerType* ITy = dyn_cast<IntegerType>(Ty)) {
+static Type *getPromotedType(Type *Ty) {
+ if (IntegerType* ITy = dyn_cast<IntegerType>(Ty)) {
if (ITy->getBitWidth() < 32)
return Type::getInt32Ty(Ty->getContext());
}
@@ -64,7 +64,7 @@ Instruction *InstCombiner::SimplifyMemTransfer(MemIntrinsic *MI) {
unsigned DstAddrSp =
cast<PointerType>(MI->getArgOperand(0)->getType())->getAddressSpace();
- const IntegerType* IntType = IntegerType::get(MI->getContext(), Size<<3);
+ IntegerType* IntType = IntegerType::get(MI->getContext(), Size<<3);
Type *NewSrcPtrTy = PointerType::get(IntType, SrcAddrSp);
Type *NewDstPtrTy = PointerType::get(IntType, DstAddrSp);
@@ -76,18 +76,18 @@ Instruction *InstCombiner::SimplifyMemTransfer(MemIntrinsic *MI) {
// integer datatype.
Value *StrippedDest = MI->getArgOperand(0)->stripPointerCasts();
if (StrippedDest != MI->getArgOperand(0)) {
- const Type *SrcETy = cast<PointerType>(StrippedDest->getType())
+ Type *SrcETy = cast<PointerType>(StrippedDest->getType())
->getElementType();
if (TD && SrcETy->isSized() && TD->getTypeStoreSize(SrcETy) == Size) {
// The SrcETy might be something like {{{double}}} or [1 x double]. Rip
// down through these levels if so.
while (!SrcETy->isSingleValueType()) {
- if (const StructType *STy = dyn_cast<StructType>(SrcETy)) {
+ if (StructType *STy = dyn_cast<StructType>(SrcETy)) {
if (STy->getNumElements() == 1)
SrcETy = STy->getElementType(0);
else
break;
- } else if (const ArrayType *ATy = dyn_cast<ArrayType>(SrcETy)) {
+ } else if (ArrayType *ATy = dyn_cast<ArrayType>(SrcETy)) {
if (ATy->getNumElements() == 1)
SrcETy = ATy->getElementType();
else
@@ -142,7 +142,7 @@ Instruction *InstCombiner::SimplifyMemSet(MemSetInst *MI) {
// memset(s,c,n) -> store s, c (for n=1,2,4,8)
if (Len <= 8 && isPowerOf2_32((uint32_t)Len)) {
- const Type *ITy = IntegerType::get(MI->getContext(), Len*8); // n=1 -> i8.
+ Type *ITy = IntegerType::get(MI->getContext(), Len*8); // n=1 -> i8.
Value *Dest = MI->getDest();
unsigned DstAddrSp = cast<PointerType>(Dest->getType())->getAddressSpace();
@@ -250,7 +250,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
// We need target data for just about everything so depend on it.
if (!TD) break;
- const Type *ReturnTy = CI.getType();
+ Type *ReturnTy = CI.getType();
uint64_t DontKnow = II->getArgOperand(1) == Builder->getTrue() ? 0 : -1ULL;
// Get to the real allocated thing and offset as fast as possible.
@@ -300,7 +300,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
}
} else if (CallInst *MI = extractMallocCall(Op1)) {
// Get allocation size.
- const Type* MallocType = getMallocAllocatedType(MI);
+ Type* MallocType = getMallocAllocatedType(MI);
if (MallocType && MallocType->isSized())
if (Value *NElems = getMallocArraySize(MI, TD, true))
if (ConstantInt *NElements = dyn_cast<ConstantInt>(NElems))
@@ -355,7 +355,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
case Intrinsic::cttz: {
// If all bits below the first known one are known zero,
// this value is constant.
- const IntegerType *IT = dyn_cast<IntegerType>(II->getArgOperand(0)->getType());
+ IntegerType *IT = dyn_cast<IntegerType>(II->getArgOperand(0)->getType());
// FIXME: Try to simplify vectors of integers.
if (!IT) break;
uint32_t BitWidth = IT->getBitWidth();
@@ -374,7 +374,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
case Intrinsic::ctlz: {
// If all bits above the first known one are known zero,
// this value is constant.
- const IntegerType *IT = dyn_cast<IntegerType>(II->getArgOperand(0)->getType());
+ IntegerType *IT = dyn_cast<IntegerType>(II->getArgOperand(0)->getType());
// FIXME: Try to simplify vectors of integers.
if (!IT) break;
uint32_t BitWidth = IT->getBitWidth();
@@ -392,7 +392,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
break;
case Intrinsic::uadd_with_overflow: {
Value *LHS = II->getArgOperand(0), *RHS = II->getArgOperand(1);
- const IntegerType *IT = cast<IntegerType>(II->getArgOperand(0)->getType());
+ IntegerType *IT = cast<IntegerType>(II->getArgOperand(0)->getType());
uint32_t BitWidth = IT->getBitWidth();
APInt Mask = APInt::getSignBit(BitWidth);
APInt LHSKnownZero(BitWidth, 0);
@@ -416,7 +416,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
UndefValue::get(LHS->getType()),
ConstantInt::getTrue(II->getContext())
};
- const StructType *ST = cast<StructType>(II->getType());
+ StructType *ST = cast<StructType>(II->getType());
Constant *Struct = ConstantStruct::get(ST, V);
return InsertValueInst::Create(Struct, Add, 0);
}
@@ -430,7 +430,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
UndefValue::get(LHS->getType()),
ConstantInt::getFalse(II->getContext())
};
- const StructType *ST = cast<StructType>(II->getType());
+ StructType *ST = cast<StructType>(II->getType());
Constant *Struct = ConstantStruct::get(ST, V);
return InsertValueInst::Create(Struct, Add, 0);
}
@@ -559,7 +559,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
case Intrinsic::ppc_altivec_stvxl:
// Turn stvx -> store if the pointer is known aligned.
if (getOrEnforceKnownAlignment(II->getArgOperand(1), 16, TD) >= 16) {
- const Type *OpPtrTy =
+ Type *OpPtrTy =
PointerType::getUnqual(II->getArgOperand(0)->getType());
Value *Ptr = Builder->CreateBitCast(II->getArgOperand(1), OpPtrTy);
return new StoreInst(II->getArgOperand(0), Ptr);
@@ -570,7 +570,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
case Intrinsic::x86_sse2_storeu_dq:
// Turn X86 storeu -> store if the pointer is known aligned.
if (getOrEnforceKnownAlignment(II->getArgOperand(0), 16, TD) >= 16) {
- const Type *OpPtrTy =
+ Type *OpPtrTy =
PointerType::getUnqual(II->getArgOperand(1)->getType());
Value *Ptr = Builder->CreateBitCast(II->getArgOperand(0), OpPtrTy);
return new StoreInst(II->getArgOperand(1), Ptr);
@@ -765,9 +765,9 @@ static bool isSafeToEliminateVarargsCast(const CallSite CS,
if (!CS.paramHasAttr(ix, Attribute::ByVal))
return true;
- const Type* SrcTy =
+ Type* SrcTy =
cast<PointerType>(CI->getOperand(0)->getType())->getElementType();
- const Type* DstTy = cast<PointerType>(CI->getType())->getElementType();
+ Type* DstTy = cast<PointerType>(CI->getType())->getElementType();
if (!SrcTy->isSized() || !DstTy->isSized())
return false;
if (!TD || TD->getTypeAllocSize(SrcTy) != TD->getTypeAllocSize(DstTy))
@@ -884,8 +884,8 @@ Instruction *InstCombiner::visitCallSite(CallSite CS) {
if (In->getIntrinsicID() == Intrinsic::init_trampoline)
return transformCallThroughTrampoline(CS);
- const PointerType *PTy = cast<PointerType>(Callee->getType());
- const FunctionType *FTy = cast<FunctionType>(PTy->getElementType());
+ PointerType *PTy = cast<PointerType>(Callee->getType());
+ FunctionType *FTy = cast<FunctionType>(PTy->getElementType());
if (FTy->isVarArg()) {
int ix = FTy->getNumParams() + (isa<InvokeInst>(Callee) ? 3 : 1);
// See if we can optimize any arguments passed through the varargs area of
@@ -934,9 +934,9 @@ bool InstCombiner::transformConstExprCastCall(CallSite CS) {
// would cause a type conversion of one of our arguments, change this call to
// be a direct call with arguments casted to the appropriate types.
//
- const FunctionType *FT = Callee->getFunctionType();
- const Type *OldRetTy = Caller->getType();
- const Type *NewRetTy = FT->getReturnType();
+ FunctionType *FT = Callee->getFunctionType();
+ Type *OldRetTy = Caller->getType();
+ Type *NewRetTy = FT->getReturnType();
if (NewRetTy->isStructTy())
return false; // TODO: Handle multiple return values.
@@ -982,8 +982,8 @@ bool InstCombiner::transformConstExprCastCall(CallSite CS) {
CallSite::arg_iterator AI = CS.arg_begin();
for (unsigned i = 0, e = NumCommonArgs; i != e; ++i, ++AI) {
- const Type *ParamTy = FT->getParamType(i);
- const Type *ActTy = (*AI)->getType();
+ Type *ParamTy = FT->getParamType(i);
+ Type *ActTy = (*AI)->getType();
if (!CastInst::isCastable(ActTy, ParamTy))
return false; // Cannot transform this parameter value.
@@ -995,11 +995,11 @@ bool InstCombiner::transformConstExprCastCall(CallSite CS) {
// If the parameter is passed as a byval argument, then we have to have a
// sized type and the sized type has to have the same size as the old type.
if (ParamTy != ActTy && (Attrs & Attribute::ByVal)) {
- const PointerType *ParamPTy = dyn_cast<PointerType>(ParamTy);
+ PointerType *ParamPTy = dyn_cast<PointerType>(ParamTy);
if (ParamPTy == 0 || !ParamPTy->getElementType()->isSized() || TD == 0)
return false;
- const Type *CurElTy = cast<PointerType>(ActTy)->getElementType();
+ Type *CurElTy = cast<PointerType>(ActTy)->getElementType();
if (TD->getTypeAllocSize(CurElTy) !=
TD->getTypeAllocSize(ParamPTy->getElementType()))
return false;
@@ -1023,7 +1023,7 @@ bool InstCombiner::transformConstExprCastCall(CallSite CS) {
// If the callee is just a declaration, don't change the varargsness of the
// call. We don't want to introduce a varargs call where one doesn't
// already exist.
- const PointerType *APTy = cast<PointerType>(CS.getCalledValue()->getType());
+ PointerType *APTy = cast<PointerType>(CS.getCalledValue()->getType());
if (FT->isVarArg()!=cast<FunctionType>(APTy->getElementType())->isVarArg())
return false;
}
@@ -1062,7 +1062,7 @@ bool InstCombiner::transformConstExprCastCall(CallSite CS) {
AI = CS.arg_begin();
for (unsigned i = 0; i != NumCommonArgs; ++i, ++AI) {
- const Type *ParamTy = FT->getParamType(i);
+ Type *ParamTy = FT->getParamType(i);
if ((*AI)->getType() == ParamTy) {
Args.push_back(*AI);
} else {
@@ -1089,7 +1089,7 @@ bool InstCombiner::transformConstExprCastCall(CallSite CS) {
} else {
// Add all of the arguments in their promoted form to the arg list.
for (unsigned i = FT->getNumParams(); i != NumActualArgs; ++i, ++AI) {
- const Type *PTy = getPromotedType((*AI)->getType());
+ Type *PTy = getPromotedType((*AI)->getType());
if (PTy != (*AI)->getType()) {
// Must promote to pass through va_arg area!
Instruction::CastOps opcode =
@@ -1168,8 +1168,8 @@ bool InstCombiner::transformConstExprCastCall(CallSite CS) {
//
Instruction *InstCombiner::transformCallThroughTrampoline(CallSite CS) {
Value *Callee = CS.getCalledValue();
- const PointerType *PTy = cast<PointerType>(Callee->getType());
- const FunctionType *FTy = cast<FunctionType>(PTy->getElementType());
+ PointerType *PTy = cast<PointerType>(Callee->getType());
+ FunctionType *FTy = cast<FunctionType>(PTy->getElementType());
const AttrListPtr &Attrs = CS.getAttributes();
// If the call already has the 'nest' attribute somewhere then give up -
@@ -1181,8 +1181,8 @@ Instruction *InstCombiner::transformCallThroughTrampoline(CallSite CS) {
cast<IntrinsicInst>(cast<BitCastInst>(Callee)->getOperand(0));
Function *NestF =cast<Function>(Tramp->getArgOperand(1)->stripPointerCasts());
- const PointerType *NestFPTy = cast<PointerType>(NestF->getType());
- const FunctionType *NestFTy = cast<FunctionType>(NestFPTy->getElementType());
+ PointerType *NestFPTy = cast<PointerType>(NestF->getType());
+ FunctionType *NestFTy = cast<FunctionType>(NestFPTy->getElementType());
const AttrListPtr &NestAttrs = NestF->getAttributes();
if (!NestAttrs.isEmpty()) {
diff --git a/lib/Transforms/InstCombine/InstCombineCasts.cpp b/lib/Transforms/InstCombine/InstCombineCasts.cpp
index 82c734e0b8..f99e457482 100644
--- a/lib/Transforms/InstCombine/InstCombineCasts.cpp
+++ b/lib/Transforms/InstCombine/InstCombineCasts.cpp
@@ -79,14 +79,14 @@ Instruction *InstCombiner::PromoteCastOfAllocation(BitCastInst &CI,
// This requires TargetData to get the alloca alignment and size information.
if (!TD) return 0;
- const PointerType *PTy = cast<PointerType>(CI.getType());
+ PointerType *PTy = cast<PointerType>(CI.getType());
BuilderTy AllocaBuilder(*Builder);
AllocaBuilder.SetInsertPoint(AI.getParent(), &AI);
// Get the type really allocated and the type casted to.
- const Type *AllocElTy = AI.getAllocatedType();
- const Type *CastElTy = PTy->getElementType();
+ Type *AllocElTy = AI.getAllocatedType();
+ Type *CastElTy = PTy->getElementType();
if (!AllocElTy->isSized() || !CastElTy->isSized()) return 0;
unsigned AllocElTyAlign = TD->getABITypeAlignment(AllocElTy);
@@ -151,7 +151,7 @@ Instruction *InstCombiner::PromoteCastOfAllocation(BitCastInst &CI,
/// EvaluateInDifferentType - Given an expression that
/// CanEvaluateTruncated or CanEvaluateSExtd returns true for, actually
/// insert the code to evaluate the expression.
-Value *InstCombiner::EvaluateInDifferentType(Value *V, const Type *Ty,
+Value *InstCombiner::EvaluateInDifferentType(Value *V, Type *Ty,
bool isSigned) {
if (Constant *C = dyn_cast<Constant>(V)) {
C = ConstantExpr::getIntegerCast(C, Ty, isSigned /*Sext or ZExt*/);
@@ -229,12 +229,12 @@ static Instruction::CastOps
isEliminableCastPair(
const CastInst *CI, ///< The first cast instruction
unsigned opcode, ///< The opcode of the second cast instruction
- const Type *DstTy, ///< The target type for the second cast instruction
+ Type *DstTy, ///< The target type for the second cast instruction
TargetData *TD ///< The target data for pointer size
) {
- const Type *SrcTy = CI->getOperand(0)->getType(); // A from above
- const Type *MidTy = CI->getType(); // B from above
+ Type *SrcTy = CI->getOperand(0)->getType(); // A from above
+ Type *MidTy = CI->getType(); // B from above
// Get the opcodes of the two Cast instructions
Instruction::CastOps firstOp = Instruction::CastOps(CI->getOpcode());
@@ -260,7 +260,7 @@ isEliminableCastPair(
/// the cast can be eliminated by some other simple transformation, we prefer
/// to do the simplification first.
bool InstCombiner::ShouldOptimizeCast(Instruction::CastOps opc, const Value *V,
- const Type *Ty) {
+ Type *Ty) {
// Noop casts and casts of constants should be eliminated trivially.
if (V->getType() == Ty || isa<Constant>(V)) return false;
@@ -324,7 +324,7 @@ Instruction *InstCombiner::commonCastTransforms(CastInst &CI) {
///
/// This function works on both vectors and scalars.
///
-static bool CanEvaluateTruncated(Value *V, const Type *Ty) {
+static bool CanEvaluateTruncated(Value *V, Type *Ty) {
// We can always evaluate constants in another type.
if (isa<Constant>(V))
return true;
@@ -332,7 +332,7 @@ static bool CanEvaluateTruncated(Value *V, const Type *Ty) {
Instruction *I = dyn_cast<Instruction>(V);
if (!I) return false;
- const Type *OrigTy = V->getType();
+ Type *OrigTy = V->getType();
// If this is an extension from the dest type, we can eliminate it, even if it
// has multiple uses.
@@ -435,7 +435,7 @@ Instruction *InstCombiner::visitTrunc(TruncInst &CI) {
return &CI;
Value *Src = CI.getOperand(0);
- const Type *DestTy = CI.getType(), *SrcTy = Src->getType();
+ Type *DestTy = CI.getType(), *SrcTy = Src->getType();
// Attempt to truncate the entire input expression tree to the destination
// type. Only do this if the dest type is a simple type, don't convert the
@@ -586,7 +586,7 @@ Instruction *InstCombiner::transformZExtICmp(ICmpInst *ICI, Instruction &CI,
// It is also profitable to transform icmp eq into not(xor(A, B)) because that
// may lead to additional simplifications.
if (ICI->isEquality() && CI.getType() == ICI->getOperand(0)->getType()) {
- if (const IntegerType *ITy = dyn_cast<IntegerType>(CI.getType())) {
+ if (IntegerType *ITy = dyn_cast<IntegerType>(CI.getType())) {
uint32_t BitWidth = ITy->getBitWidth();
Value *LHS = ICI->getOperand(0);
Value *RHS = ICI->getOperand(1);
@@ -644,7 +644,7 @@ Instruction *InstCombiner::transformZExtICmp(ICmpInst *ICI, Instruction &CI,
/// clear the top bits anyway, doing this has no extra cost.
///
/// This function works on both vectors and scalars.
-static bool CanEvaluateZExtd(Value *V, const Type *Ty, unsigned &BitsToClear) {
+static bool CanEvaluateZExtd(Value *V, Type *Ty, unsigned &BitsToClear) {
BitsToClear = 0;
if (isa<Constant>(V))
return true;
@@ -758,7 +758,7 @@ Instruction *InstCombiner::visitZExt(ZExtInst &CI) {
return &CI;
Value *Src = CI.getOperand(0);
- const Type *SrcTy = Src->getType(), *DestTy = CI.getType();
+ Type *SrcTy = Src->getType(), *DestTy = CI.getType();
// Attempt to extend the entire input expression tree to the destination
// type. Only do this if the dest type is a simple type, don't convert the
@@ -965,10 +965,10 @@ Instruction *InstCombiner::transformSExtICmp(ICmpInst *ICI, Instruction &CI) {
}
// vector (x <s 0) ? -1 : 0 -> ashr x, 31 -> all ones if signed.
- if (const VectorType *VTy = dyn_cast<VectorType>(CI.getType())) {
+ if (VectorType *VTy = dyn_cast<VectorType>(CI.getType())) {
if (Pred == ICmpInst::ICMP_SLT && match(Op1, m_Zero()) &&
Op0->getType() == CI.getType()) {
- const Type *EltTy = VTy->getElementType();
+ Type *EltTy = VTy->getElementType();
// splat the shift constant to a constant vector.
Constant *VSh = ConstantInt::get(VTy, EltTy->getScalarSizeInBits()-1);
@@ -988,7 +988,7 @@ Instruction *InstCombiner::transformSExtICmp(ICmpInst *ICI, Instruction &CI) {
///
/// This function works on both vectors and scalars.
///
-static bool CanEvaluateSExtd(Value *V, const Type *Ty) {
+static bool CanEvaluateSExtd(Value *V, Type *Ty) {
assert(V->getType()->getScalarSizeInBits() < Ty->getScalarSizeInBits() &&
"Can't sign extend type to a smaller type");
// If this is a constant, it can be trivially promoted.
@@ -1063,7 +1063,7 @@ Instruction *InstCombiner::visitSExt(SExtInst &CI) {
return &CI;
Value *Src = CI.getOperand(0);
- const Type *SrcTy = Src->getType(), *DestTy = CI.getType();
+ Type *SrcTy = Src->getType(), *DestTy = CI.getType();
// Attempt to extend the entire input expression tree to the destination
// type. Only do this if the dest type is a simple type, don't convert the
@@ -1192,7 +1192,7 @@ Instruction *InstCombiner::visitFPTrunc(FPTruncInst &CI) {
case Instruction::FMul:
case Instruction::FDiv:
case Instruction::FRem:
- const Type *SrcTy = OpI->getType();
+ Type *SrcTy = OpI->getType();
Value *LHSTrunc = LookThroughFPExtensions(OpI->getOperand(0));
Value *RHSTrunc = LookThroughFPExtensions(OpI->getOperand(1));
if (LHSTrunc->getType() != SrcTy &&
@@ -1351,7 +1351,7 @@ Instruction *InstCombiner::commonPointerCastTransforms(CastInst &CI) {
// Get the base pointer input of the bitcast, and the type it points to.
Value *OrigBase = cast<BitCastInst>(GEP->getOperand(0))->getOperand(0);
- const Type *GEPIdxTy =
+ Type *GEPIdxTy =
cast<PointerType>(OrigBase->getType())->getElementType();
SmallVector<Value*, 8> NewIndices;
if (FindElementAtOffset(GEPIdxTy, Offset, NewIndices)) {
@@ -1402,12 +1402,12 @@ Instruction *InstCombiner::visitPtrToInt(PtrToIntInst &CI) {
/// replace it with a shuffle (and vector/vector bitcast) if possible.
///
/// The source and destination vector types may have different element types.
-static Instruction *OptimizeVectorResize(Value *InVal, const VectorType *DestTy,
+static Instruction *OptimizeVectorResize(Value *InVal, VectorType *DestTy,
InstCombiner &IC) {
// We can only do this optimization if the output is a multiple of the input
// element size, or the input is a multiple of the output element size.
// Convert the input type to have the same element type as the output.
- const VectorType *SrcTy = cast<VectorType>(InVal->getType());
+ VectorType *SrcTy = cast<VectorType>(InVal->getType());
if (SrcTy->getElementType() != DestTy->getElementType()) {
// The input types don't need to be identical, but for now they must be the
@@ -1427,7 +1427,7 @@ static Instruction *OptimizeVectorResize(Value *InVal, const VectorType *DestTy,
// size of the input.
SmallVector<Constant*, 16> ShuffleMask;
Value *V2;
- const IntegerType *Int32Ty = Type::getInt32Ty(SrcTy->getContext());
+ IntegerType *Int32Ty = Type::getInt32Ty(SrcTy->getContext());
if (SrcTy->getNumElements() > DestTy->getNumElements()) {
// If we're shrinking the number of elements, just shuffle in the low
@@ -1453,11 +1453,11 @@ static Instruction *OptimizeVectorResize(Value *InVal, const VectorType *DestTy,
return new ShuffleVectorInst(InVal, V2, ConstantVector::get(ShuffleMask));
}
-static bool isMultipleOfTypeSize(unsigned Value, const Type *Ty) {
+static bool isMultipleOfTypeSize(unsigned Value, Type *Ty) {
return Value % Ty->getPrimitiveSizeInBits() == 0;
}
-static unsigned getTypeSizeIndex(unsigned Value, const Type *Ty) {
+static unsigned getTypeSizeIndex(unsigned Value, Type *Ty) {
return Value / Ty->getPrimitiveSizeInBits();
}
@@ -1471,7 +1471,7 @@ static unsigned getTypeSizeIndex(unsigned Value, const Type *Ty) {
/// filling in Elements with the elements found here.
static bool CollectInsertionElements(Value *V, unsigned ElementIndex,
SmallVectorImpl<Value*> &Elements,
- const Type *VecEltTy) {
+ Type *VecEltTy) {
// Undef values never contribute useful bits to the result.
if (isa<UndefValue>(V)) return true;
@@ -1508,7 +1508,7 @@ static bool CollectInsertionElements(Value *V, unsigned ElementIndex,
C = ConstantExpr::getBitCast(C, IntegerType::get(V->getContext(),
C->getType()->getPrimitiveSizeInBits()));
unsigned ElementSize = VecEltTy->getPrimitiveSizeInBits();
- const Type *ElementIntTy = IntegerType::get(C->getContext(), ElementSize);
+ Type *ElementIntTy = IntegerType::get(C->getContext(), ElementSize);
for (unsigned i = 0; i != NumElts; ++i) {
Constant *Piece = ConstantExpr::getLShr(C, ConstantInt::get(C->getType(),
@@ -1572,7 +1572,7 @@ static bool CollectInsertionElements(Value *V, unsigned ElementIndex,
/// Into two insertelements that do "buildvector{%inc, %inc5}".
static Value *OptimizeIntegerToVectorInsertions(BitCastInst &CI,
InstCombiner &IC) {
- const VectorType *DestVecTy = cast<VectorType>(CI.getType());
+ VectorType *DestVecTy = cast<VectorType>(CI.getType());
Value *IntInput = CI.getOperand(0);
SmallVector<Value*, 8> Elements(DestVecTy->getNumElements());
@@ -1599,7 +1599,7 @@ static Value *OptimizeIntegerToVectorInsertions(BitCastInst &CI,
/// bitcast. The various long double bitcasts can't get in here.
static Instruction *OptimizeIntToFloatBitCast(BitCastInst &CI,InstCombiner &IC){
Value *Src = CI.getOperand(0);
- const Type *DestTy = CI.getType();
+ Type *DestTy = CI.getType();
// If this is a bitcast from int to float, check to see if the int is an
// extraction from a vector.
@@ -1607,7 +1607,7 @@ static Instruction *OptimizeIntToFloatBitCast(BitCastInst &CI,InstCombiner &IC){
// bitcast(trunc(bitcast(somevector)))
if (match(Src, m_Trunc(m_BitCast(m_Value(VecInput)))) &&
isa<VectorType>(VecInput->getType())) {
- const VectorType *VecTy = cast<VectorType>(VecInput->getType());
+ VectorType *VecTy = cast<VectorType>(VecInput->getType());
unsigned DestWidth = DestTy->getPrimitiveSizeInBits();
if (VecTy->getPrimitiveSizeInBits() % DestWidth == 0) {
@@ -1628,7 +1628,7 @@ static Instruction *OptimizeIntToFloatBitCast(BitCastInst &CI,InstCombiner &IC){
if (match(Src, m_Trunc(m_LShr(m_BitCast(m_Value(VecInput)),
m_ConstantInt(ShAmt)))) &&
isa<VectorType>(VecInput->getType())) {
- const VectorType *VecTy = cast<VectorType>(VecInput->getType());
+ VectorType *VecTy = cast<VectorType>(VecInput->getType());
unsigned DestWidth = DestTy->getPrimitiveSizeInBits();
if (VecTy->getPrimitiveSizeInBits() % DestWidth == 0 &&
ShAmt->getZExtValue() % DestWidth == 0) {
@@ -1651,18 +1651,18 @@ Instruction *InstCombiner::visitBitCast(BitCastInst &CI) {
// If the operands are integer typed then apply the integer transforms,
// otherwise just apply the common ones.
Value *Src = CI.getOperand(0);
- const Type *SrcTy = Src->getType();
- const Type *DestTy = CI.getType();
+ Type *SrcTy = Src->getType();
+ Type *DestTy = CI.getType();
// Get rid of casts from one type to the same type. These are useless and can
// be replaced by the operand.
if (DestTy == Src->getType())
return ReplaceInstUsesWith(CI, Src);
- if (const PointerType *DstPTy = dyn_cast<PointerType>(DestTy)) {
- const PointerType *SrcPTy = cast<PointerType>(SrcTy);
- const Type *DstElTy = DstPTy->getElementType();
- const Type *SrcElTy = SrcPTy->getElementType();
+ if (PointerType *DstPTy = dyn_cast<PointerType>(DestTy)) {
+ PointerType *SrcPTy = cast<PointerType>(SrcTy);
+ Type *DstElTy = DstPTy->getElementType();
+ Type *SrcElTy = SrcPTy->getElementType();
// If the address spaces don't match, don't eliminate the bitcast, which is
// required for changing types.
@@ -1702,7 +1702,7 @@ Instruction *InstCombiner::visitBitCast(BitCastInst &CI) {
if (Instruction *I = OptimizeIntToFloatBitCast(CI, *this))
return I;
- if (const VectorType *DestVTy = dyn_cast<VectorType>(DestTy)) {
+ if (VectorType *DestVTy = dyn_cast<VectorType>(DestTy)) {
if (DestVTy->getNumElements() == 1 && !SrcTy->isVectorTy()) {
Value *Elem = Builder->CreateBitCast(Src, DestVTy->getElementType());
return InsertElementInst::Create(UndefValue::get(DestTy), Elem,
@@ -1731,7 +1731,7 @@ Instruction *InstCombiner::visitBitCast(BitCastInst &CI) {
}
}
- if (const VectorType *SrcVTy = dyn_cast<VectorType>(SrcTy)) {
+ if (VectorType *SrcVTy = dyn_cast<VectorType>(SrcTy)) {
if (SrcVTy->getNumElements() == 1 && !DestTy->isVectorTy()) {
Value *Elem =
Builder->CreateExtractElement(Src,
diff --git a/lib/Transforms/InstCombine/InstCombineCompares.cpp b/lib/Transforms/InstCombine/InstCombineCompares.cpp
index c78760b206..b8ce4b7eb9 100644
--- a/lib/Transforms/InstCombine/InstCombineCompares.cpp
+++ b/lib/Transforms/InstCombine/InstCombineCompares.cpp
@@ -56,7 +56,7 @@ static bool AddWithOverflow(Constant *&Result, Constant *In1,
Constant *In2, bool IsSigned = false) {
Result = ConstantExpr::getAdd(In1, In2);
- if (const VectorType *VTy = dyn_cast<VectorType>(In1->getType())) {
+ if (VectorType *VTy = dyn_cast<VectorType>(In1->getType())) {
for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i) {
Constant *Idx = ConstantInt::get(Type::getInt32Ty(In1->getContext()), i);
if (HasAddOverflow(ExtractElement(Result, Idx),
@@ -91,7 +91,7 @@ static bool SubWithOverflow(Constant *&Result, Constant *In1,
Constant *In2, bool IsSigned = false) {
Result = ConstantExpr::getSub(In1, In2);
- if (const VectorType *VTy = dyn_cast<VectorType>(In1->getType())) {
+ if (VectorType *VTy = dyn_cast<VectorType>(In1->getType())) {
for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i) {
Constant *Idx = ConstantInt::get(Type::getInt32Ty(In1->getContext()), i);
if (HasSubOverflow(ExtractElement(Result, Idx),
@@ -220,7 +220,7 @@ FoldCmpLoadFromIndexedGlobal(GetElementPtrInst *GEP, GlobalVariable *GV,
// structs.
SmallVector<unsigned, 4> LaterIndices;
- const Type *EltTy = cast<ArrayType>(Init->getType())->getElementType();
+ Type *EltTy = cast<ArrayType>(Init->getType())->getElementType();
for (unsigned i = 3, e = GEP->getNumOperands(); i != e; ++i) {
ConstantInt *Idx = dyn_cast<ConstantInt>(GEP->getOperand(i));
if (Idx == 0) return 0; // Variable index.
@@ -228,9 +228,9 @@ FoldCmpLoadFromIndexedGlobal(GetElementPtrInst *GEP, GlobalVariable *GV,
uint64_t IdxVal = Idx->getZExtValue();
if ((unsigned)IdxVal != IdxVal) return 0; // Too large array index.
- if (const StructType *STy = dyn_cast<StructType>(EltTy))
+ if (StructType *STy = dyn_cast<StructType>(EltTy))
EltTy = STy->getElementType(IdxVal);
- else if (const ArrayType *ATy = dyn_cast<ArrayType>(EltTy)) {
+ else if (ArrayType *ATy = dyn_cast<ArrayType>(EltTy)) {
if (IdxVal >= ATy->getNumElements()) return 0;
EltTy = ATy->getElementType();
} else {
@@ -441,7 +441,7 @@ FoldCmpLoadFromIndexedGlobal(GetElementPtrInst *GEP, GlobalVariable *GV,
// ((magic_cst >> i) & 1) != 0
if (Init->getNumOperands() <= 32 ||
(TD && Init->getNumOperands() <= 64 && TD->isLegalInteger(64))) {
- const Type *Ty;
+ Type *Ty;
if (Init->getNumOperands() <= 32)
Ty = Type::getInt32Ty(Init->getContext());
else
@@ -483,7 +483,7 @@ static Value *EvaluateGEPOffsetExpression(User *GEP, InstCombiner &IC) {
if (CI->isZero()) continue;
// Handle a struct index, which adds its field offset to the pointer.
- if (const StructType *STy = dyn_cast<StructType>(*GTI)) {
+ if (StructType *STy = dyn_cast<StructType>(*GTI)) {
Offset += TD.getStructLayout(STy)->getElementOffset(CI->getZExtValue());
} else {
uint64_t Size = TD.getTypeAllocSize(GTI.getIndexedType());
@@ -513,7 +513,7 @@ static Value *EvaluateGEPOffsetExpression(User *GEP, InstCombiner &IC) {
if (CI->isZero()) continue;
// Handle a struct index, which adds its field offset to the pointer.
- if (const StructType *STy = dyn_cast<StructType>(*GTI)) {
+ if (StructType *STy = dyn_cast<StructType>(*GTI)) {
Offset += TD.getStructLayout(STy)->getElementOffset(CI->getZExtValue());
} else {
uint64_t Size = TD.getTypeAllocSize(GTI.getIndexedType());
@@ -530,7 +530,7 @@ static Value *EvaluateGEPOffsetExpression(User *GEP, InstCombiner &IC) {
// we don't need to bother extending: the extension won't affect where the
// computation crosses zero.
if (VariableIdx->getType()->getPrimitiveSizeInBits() > IntPtrWidth) {
- const Type *IntPtrTy = TD.getIntPtrType(VariableIdx->getContext());
+ Type *IntPtrTy = TD.getIntPtrType(VariableIdx->getContext());
VariableIdx = IC.Builder->CreateTrunc(VariableIdx, IntPtrTy);
}
return VariableIdx;
@@ -552,7 +552,7 @@ static Value *EvaluateGEPOffsetExpression(User *GEP, InstCombiner &IC) {
return 0;
// Okay, we can do this evaluation. Start by converting the index to intptr.
- const Type *IntPtrTy = TD.getIntPtrType(VariableIdx->getContext());
+ Type *IntPtrTy = TD.getIntPtrType(VariableIdx->getContext());
if (VariableIdx->getType() != IntPtrTy)
VariableIdx = IC.Builder->CreateIntCast(VariableIdx, IntPtrTy,
true /*Signed*/);
@@ -1098,7 +1098,7 @@ Instruction *InstCombiner::visitICmpInstWithInstAndIntCst(ICmpInst &ICI,
// If the LHS is an AND of a zext, and we have an equality compare, we can
// shrink the and/compare to the smaller type, eliminating the cast.
if (ZExtInst *Cast = dyn_cast<ZExtInst>(LHSI->getOperand(0))) {
- const IntegerType *Ty = cast<IntegerType>(Cast->getSrcTy());
+ IntegerType *Ty = cast<IntegerType>(Cast->getSrcTy());
// Make sure we don't compare the upper bits, SimplifyDemandedBits
// should fold the icmp to true/false in that case.
if (ICI.isEquality() && RHSV.getActiveBits() <= Ty->getBitWidth()) {
@@ -1121,8 +1121,8 @@ Instruction *InstCombiner::visitICmpInstWithInstAndIntCst(ICmpInst &ICI,
ConstantInt *ShAmt;
ShAmt = Shift ? dyn_cast<ConstantInt>(Shift->getOperand(1)) : 0;
- const Type *Ty = Shift ? Shift->getType() : 0; // Type of the shift.
- const Type *AndTy = AndCST->getType(); // Type of the and.
+ Type *Ty = Shift ? Shift->getType() : 0; // Type of the shift.
+ Type *AndTy = AndCST->getType(); // Type of the and.
// We can fold this as long as we can't shift unknown bits
// into the mask. This can only happen with signed shift
@@ -1517,8 +1517,8 @@ Instruction *InstCombiner::visitICmpInstWithInstAndIntCst(ICmpInst &ICI,
Instruction *InstCombiner::visitICmpInstWithCastAndCast(ICmpInst &ICI) {
const CastInst *LHSCI = cast<CastInst>(ICI.getOperand(0));
Value *LHSCIOp = LHSCI->getOperand(0);
- const Type *SrcTy = LHSCIOp->getType();
- const Type *DestTy = LHSCI->getType();
+ Type *SrcTy = LHSCIOp->getType();
+ Type *DestTy = LHSCI->getType();
Value *RHSCIOp;
// Turn icmp (ptrtoint x), (ptrtoint/c) into a compare of the input if the
@@ -1786,7 +1786,7 @@ Instruction *InstCombiner::visitICmpInst(ICmpInst &I) {
if (Value *V = SimplifyICmpInst(I.getPredicate(), Op0, Op1, TD))
return ReplaceInstUsesWith(I, V);
- const Type *Ty = Op0->getType();
+ Type *Ty = Op0->getType();
// icmp's with boolean values can always be turned into bitwise operations
if (Ty->isIntegerTy(1)) {
@@ -2637,7 +2637,7 @@ Instruction *InstCombiner::FoldFCmp_IntToFP_Cst(FCmpInst &I,
return ReplaceInstUsesWith(I, ConstantInt::getFalse(I.getContext()));
}
- const IntegerType *IntTy = cast<IntegerType>(LHSI->getOperand(0)->getType());
+ IntegerType *IntTy = cast<IntegerType>(LHSI->getOperand(0)->getType());
// Now we know that the APFloat is a normal number, zero or inf.
diff --git a/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp b/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
index f499290fe8..bdd2edb991 100644
--- a/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
+++ b/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
@@ -26,7 +26,7 @@ Instruction *InstCombiner::visitAllocaInst(AllocaInst &AI) {
// Ensure that the alloca array size argument has type intptr_t, so that
// any casting is exposed early.
if (TD) {
- const Type *IntPtrTy = TD->getIntPtrType(AI.getContext());
+ Type *IntPtrTy = TD->getIntPtrType(AI.getContext());
if (AI.getArraySize()->getType() != IntPtrTy) {
Value *V = Builder->CreateIntCast(AI.getArraySize(),
IntPtrTy, false);
@@ -38,7 +38,7 @@ Instruction *InstCombiner::visitAllocaInst(AllocaInst &AI) {
// Convert: alloca Ty, C - where C is a constant != 1 into: alloca [C x Ty], 1
if (AI.isArrayAllocation()) { // Check C != 1
if (const ConstantInt *C = dyn_cast<ConstantInt>(AI.getArraySize())) {
- const Type *NewTy =
+ Type *NewTy =
ArrayType::get(AI.getAllocatedType(), C->getZExtValue());
assert(isa<AllocaInst>(AI) && "Unknown type of allocation inst!");
AllocaInst *New = Builder->CreateAlloca(NewTy, 0, AI.getName());
@@ -92,22 +92,22 @@ static Instruction *InstCombineLoadCast(InstCombiner &IC, LoadInst &LI,
User *CI = cast<User>(LI.getOperand(0));
Value *CastOp = CI->getOperand(0);
- const PointerType *DestTy = cast<PointerType>(CI->getType());
- const Type *DestPTy = DestTy->getElementType();
- if (const PointerType *SrcTy = dyn_cast<PointerType>(CastOp->getType())) {
+ PointerType *DestTy = cast<PointerType>(CI->getType());
+ Type *DestPTy = DestTy->getElementType();
+ if (PointerType *SrcTy = dyn_cast<PointerType>(CastOp->getType())) {
// If the address spaces don't match, don't eliminate the cast.
if (DestTy->getAddressSpace() != SrcTy->getAddressSpace())
return 0;
- const Type *SrcPTy = SrcTy->getElementType();
+ Type *SrcPTy = SrcTy->getElementType();
if (DestPTy->isIntegerTy() || DestPTy->isPointerTy() ||
DestPTy->isVectorTy()) {
// If the source is an array, the code below will not succeed. Check to
// see if a trivial 'gep P, 0, 0' will help matters. Only do this for
// constants.
- if (const ArrayType *ASrcTy = dyn_cast<ArrayType>(SrcPTy))
+ if (ArrayType *ASrcTy = dyn_cast<ArrayType>(SrcPTy))
if (Constant *CSrc = dyn_cast<Constant>(CastOp))
if (ASrcTy->getNumElements() != 0) {
Value *Idxs[2];
@@ -256,11 +256,11 @@ static Instruction *InstCombineStoreToCast(InstCombiner &IC, StoreInst &SI) {
User *CI = cast<User>(SI.getOperand(1));
Value *CastOp = CI->getOperand(0);
- const Type *DestPTy = cast<PointerType>(CI->getType())->getElementType();
- const PointerType *SrcTy = dyn_cast<PointerType>(CastOp->getType());
+ Type *DestPTy = cast<PointerType>(CI->getType())->getElementType();
+ PointerType *SrcTy = dyn_cast<PointerType>(CastOp->getType());
if (SrcTy == 0) return 0;
- const Type *SrcPTy = SrcTy->getElementType();
+ Type *SrcPTy = SrcTy->getElementType();
if (!DestPTy->isIntegerTy() && !DestPTy->isPointerTy())
return 0;
@@ -280,12 +280,12 @@ static Instruction *InstCombineStoreToCast(InstCombiner &IC, StoreInst &SI) {
NewGEPIndices.push_back(Zero);
while (1) {
- if (const StructType *STy = dyn_cast<StructType>(SrcPTy)) {
+ if (StructType *STy = dyn_cast<StructType>(SrcPTy)) {
if (!STy->getNumElements()) /* Struct can be empty {} */
break;
NewGEPIndices.push_back(Zero);
SrcPTy = STy->getElementType(0);
- } else if (const ArrayType *ATy = dyn_cast<ArrayType>(SrcPTy)) {
+ } else if (ArrayType *ATy = dyn_cast<ArrayType>(SrcPTy)) {
NewGEPIndices.push_back(Zero);
SrcPTy = ATy->getElementType();
} else {
@@ -314,8 +314,8 @@ static Instruction *InstCombineStoreToCast(InstCombiner &IC, StoreInst &SI) {
Value *NewCast;
Value *SIOp0 = SI.getOperand(0);
Instruction::CastOps opcode = Instruction::BitCast;
- const Type* CastSrcTy = SIOp0->getType();
- const Type* CastDstTy = SrcPTy;
+ Type* CastSrcTy = SIOp0->getType();
+ Type* CastDstTy = SrcPTy;
if (CastDstTy->isPointerTy()) {
if (CastSrcTy->isIntegerTy())
opcode = Instruction::IntToPtr;
diff --git a/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp b/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp
index 630a6fee39..53341ccbfc 100644
--- a/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp
+++ b/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp
@@ -421,7 +421,7 @@ Instruction *InstCombiner::commonIDivTransforms(BinaryOperator &I) {
/// dyn_castZExtVal - Checks if V is a zext or constant that can
/// be truncated to Ty without losing bits.
-static Value *dyn_castZExtVal(Value *V, const Type *Ty) {
+static Value *dyn_castZExtVal(Value *V, Type *Ty) {
if (ZExtInst *Z = dyn_cast<ZExtInst>(V)) {
if (Z->getSrcTy() == Ty)
return Z->getOperand(0);
diff --git a/lib/Transforms/InstCombine/InstCombinePHI.cpp b/lib/Transforms/InstCombine/InstCombinePHI.cpp
index 3777340349..bf1049d152 100644
--- a/lib/Transforms/InstCombine/InstCombinePHI.cpp
+++ b/lib/Transforms/InstCombine/InstCombinePHI.cpp
@@ -28,8 +28,8 @@ Instruction *InstCombiner::FoldPHIArgBinOpIntoPHI(PHINode &PN) {
Value *LHSVal = FirstInst->getOperand(0);
Value *RHSVal = FirstInst->getOperand(1);
- const Type *LHSType = LHSVal->getType();
- const Type *RHSType = RHSVal->getType();
+ Type *LHSType = LHSVal->getType();
+ Type *RHSType = RHSVal->getType();
bool isNUW = false, isNSW = false, isExact = false;
if (OverflowingBinaryOperator *BO =
@@ -397,7 +397,7 @@ Instruction *InstCombiner::FoldPHIArgOpIntoPHI(PHINode &PN) {
// the same type or "+42") we can pull the operation through the PHI, reducing
// code size and simplifying code.
Constant *ConstantOp = 0;
- const Type *CastSrcTy = 0;
+ Type *CastSrcTy = 0;
bool isNUW = false, isNSW = false, isExact = false;
if (isa<CastInst>(FirstInst)) {
@@ -572,7 +572,7 @@ struct LoweredPHIRecord {
unsigned Shift; // The amount shifted.
unsigned Width; // The width extracted.
- LoweredPHIRecord(PHINode *pn, unsigned Sh, const Type *Ty)
+ LoweredPHIRecord(PHINode *pn, unsigned Sh, Type *Ty)
: PN(pn), Shift(Sh), Width(Ty->getPrimitiveSizeInBits()) {}
// Ctor form used by DenseMap.
@@ -701,7 +701,7 @@ Instruction *InstCombiner::SliceUpIllegalIntegerPHI(PHINode &FirstPhi) {
unsigned PHIId = PHIUsers[UserI].PHIId;
PHINode *PN = PHIsToSlice[PHIId];
unsigned Offset = PHIUsers[UserI].Shift;
- const Type *Ty = PHIUsers[UserI].Inst->getType();
+ Type *Ty = PHIUsers[UserI].Inst->getType();
PHINode *EltPHI;
diff --git a/lib/Transforms/InstCombine/InstCombineSelect.cpp b/lib/Transforms/InstCombine/InstCombineSelect.cpp
index 5733c20828..eb463902d6 100644
--- a/lib/Transforms/InstCombine/InstCombineSelect.cpp
+++ b/lib/Transforms/InstCombine/InstCombineSelect.cpp
@@ -363,7 +363,7 @@ Instruction *InstCombiner::visitSelectInstWithICmp(SelectInst &SI,
case ICmpInst::ICMP_UGT:
case ICmpInst::ICMP_SGT: {
// These transformations only work for selects over integers.
- const IntegerType *SelectTy = dyn_cast<IntegerType>(SI.getType());
+ IntegerType *SelectTy = dyn_cast<IntegerType>(SI.getType());
if (!SelectTy)
break;
@@ -443,7 +443,7 @@ Instruction *InstCombiner::visitSelectInstWithICmp(SelectInst &SI,
// FIXME: Type and constness constraints could be lifted, but we have to
// watch code size carefully. We should consider xor instead of
// sub/add when we decide to do that.
- if (const IntegerType *Ty = dyn_cast<IntegerType>(CmpLHS->getType())) {
+ if (IntegerType *Ty = dyn_cast<IntegerType>(CmpLHS->getType())) {
if (TrueVal->getType() == Ty) {
if (ConstantInt *Cmp = dyn_cast<ConstantInt>(CmpRHS)) {
ConstantInt *C1 = NULL, *C2 = NULL;
diff --git a/lib/Transforms/InstCombine/InstCombineShifts.cpp b/lib/Transforms/InstCombine/InstCombineShifts.cpp
index 811f94976f..65d1a66f71 100644
--- a/lib/Transforms/InstCombine/InstCombineShifts.cpp
+++ b/lib/Transforms/InstCombine/InstCombineShifts.cpp
@@ -528,7 +528,7 @@ Instruction *InstCombiner::FoldShiftByConstant(Value *Op0, ConstantInt *Op1,
uint32_t AmtSum = ShiftAmt1+ShiftAmt2; // Fold into one big shift.
- const IntegerType *Ty = cast<IntegerType>(I.getType());
+ IntegerType *Ty = cast<IntegerType>(I.getType());
// Check for (X << c1) << c2 and (X >> c1) >> c2
if (I.getOpcode() == ShiftOp->getOpcode()) {
diff --git a/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp b/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp
index 8fea8eb7ef..66f39be17b 100644
--- a/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp
+++ b/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp
@@ -103,7 +103,7 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
assert(V != 0 && "Null pointer of Value???");
assert(Depth <= 6 && "Limit Search Depth");
uint32_t BitWidth = DemandedMask.getBitWidth();
- const Type *VTy = V->getType();
+ Type *VTy = V->getType();
assert((TD || !VTy->isPointerTy()) &&
"SimplifyDemandedBits needs to know bit widths!");
assert((!TD || TD->getTypeSizeInBits(VTy->getScalarType()) == BitWidth) &&
@@ -404,8 +404,8 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
if (!I->getOperand(0)->getType()->isIntOrIntVectorTy())
return 0; // vector->int or fp->int?
- if (const VectorType *DstVTy = dyn_cast<VectorType>(I->getType())) {
- if (const VectorType *SrcVTy =
+ if (VectorType *DstVTy = dyn_cast<VectorType>(I->getType())) {
+ if (VectorType *SrcVTy =
dyn_cast<VectorType>(I->getOperand(0)->getType())) {
if (DstVTy->getNumElements() != SrcVTy->getNumElements())
// Don't touch a bitcast between vectors of different element counts.
@@ -826,7 +826,7 @@ Value *InstCombiner::SimplifyDemandedVectorElts(Value *V, APInt DemandedElts,
UndefElts = 0;
if (ConstantVector *CV = dyn_cast<ConstantVector>(V)) {
- const Type *EltTy = cast<VectorType>(V->getType())->getElementType();
+ Type *EltTy = cast<VectorType>(V->getType())->getElementType();
Constant *Undef = UndefValue::get(EltTy);
std::vector<Constant*> Elts;
@@ -855,7 +855,7 @@ Value *InstCombiner::SimplifyDemandedVectorElts(Value *V, APInt DemandedElts,
if (DemandedElts.isAllOnesValue())
return 0;
- const Type *EltTy = cast<VectorType>(V->getType())->getElementType();
+ Type *EltTy = cast<VectorType>(V->getType())->getElementType();
Constant *Zero = Constant::getNullValue(EltTy);
Constant *Undef = UndefValue::get(EltTy);
std::vector<Constant*> Elts;
@@ -992,7 +992,7 @@ Value *InstCombiner::SimplifyDemandedVectorElts(Value *V, APInt DemandedElts,
}
case Instruction::BitCast: {
// Vector->vector casts only.
- const VectorType *VTy = dyn_cast<VectorType>(I->getOperand(0)->getType());
+ VectorType *VTy = dyn_cast<VectorType>(I->getOperand(0)->getType());
if (!VTy) break;
unsigned InVWidth = VTy->getNumElements();
APInt InputDemandedElts(InVWidth, 0);
diff --git a/lib/Transforms/InstCombine/InstCombineVectorOps.cpp b/lib/Transforms/InstCombine/InstCombineVectorOps.cpp
index ad6a8d054e..154267c034 100644
--- a/lib/Transforms/InstCombine/InstCombineVectorOps.cpp
+++ b/lib/Transforms/InstCombine/InstCombineVectorOps.cpp
@@ -77,7 +77,7 @@ static std::vector<int> getShuffleMask(const ShuffleVectorInst *SVI) {
/// extracted from the vector.
static Value *FindScalarElement(Value *V, unsigned EltNo) {
assert(V->getType()->isVectorTy() && "Not looking at a vector?");
- const VectorType *PTy = cast<VectorType>(V->getType());
+ VectorType *PTy = cast<VectorType>(V->getType());
unsigned Width = PTy->getNumElements();
if (EltNo >= Width) // Out of range access.
return UndefValue::get(PTy->getElementType());
@@ -175,7 +175,7 @@ Instruction *InstCombiner::visitExtractElementInst(ExtractElementInst &EI) {
// the same number of elements, see if we can find the source element from
// it. In this case, we will end up needing to bitcast the scalars.
if (BitCastInst *BCI = dyn_cast<BitCastInst>(EI.getOperand(0))) {
- if (const VectorType *VT =
+ if (VectorType *VT =
dyn_cast<VectorType>(BCI->getOperand(0)->getType()))
if (VT->getNumElements() == VectorWidth)
if (Value *Elt = FindScalarElement(BCI->getOperand(0), IndexVal))
@@ -225,7 +225,7 @@ Instruction *InstCombiner::visitExtractElementInst(ExtractElementInst &EI) {
SrcIdx -= LHSWidth;
Src = SVI->getOperand(1);
}
- const Type *Int32Ty = Type::getInt32Ty(EI.getContext());
+ Type *Int32Ty = Type::getInt32Ty(EI.getContext());
return ExtractElementInst::Create(Src,
ConstantInt::get(Int32Ty,
SrcIdx, false));
@@ -555,7 +555,7 @@ Instruction *InstCombiner::visitShuffleVectorInst(ShuffleVectorInst &SVI) {
// shuffle mask, do the replacement.
if (isSplat || NewMask == LHSMask || NewMask == Mask) {
std::vector<Constant*> Elts;
- const Type *Int32Ty = Type::getInt32Ty(SVI.getContext());
+ Type *Int32Ty = Type::getInt32Ty(SVI.getContext());
for (unsigned i = 0, e = NewMask.size(); i != e; ++i) {
if (NewMask[i] < 0) {
Elts.push_back(UndefValue::get(Int32Ty));
diff --git a/lib/Transforms/InstCombine/InstructionCombining.cpp b/lib/Transforms/InstCombine/InstructionCombining.cpp
index ab98ef9fcc..5828ec2ee9 100644
--- a/lib/Transforms/InstCombine/InstructionCombining.cpp
+++ b/lib/Transforms/InstCombine/InstructionCombining.cpp
@@ -83,7 +83,7 @@ void InstCombiner::getAnalysisUsage(AnalysisUsage &AU) const {
/// ShouldChangeType - Return true if it is desirable to convert a computation
/// from 'From' to 'To'. We don't want to convert from a legal to an illegal
/// type for example, or from a smaller to a larger illegal type.
-bool InstCombiner::ShouldChangeType(const Type *From, const Type *To) const {
+bool InstCombiner::ShouldChangeType(Type *From, Type *To) const {
assert(From->isIntegerTy() && To->isIntegerTy());
// If we don't have TD, we don't know if the source/dest are legal.
@@ -516,8 +516,8 @@ Instruction *InstCombiner::FoldOpIntoSelect(Instruction &Op, SelectInst *SI) {
// If it's a bitcast involving vectors, make sure it has the same number of
// elements on both sides.
if (BitCastInst *BC = dyn_cast<BitCastInst>(&Op)) {
- const VectorType *DestTy = dyn_cast<VectorType>(BC->getDestTy());
- const VectorType *SrcTy = dyn_cast<VectorType>(BC->getSrcTy());
+ VectorType *DestTy = dyn_cast<VectorType>(BC->getDestTy());
+ VectorType *SrcTy = dyn_cast<VectorType>(BC->getSrcTy());
// Verify that either both or neither are vectors.
if ((SrcTy == NULL) != (DestTy == NULL)) return 0;
@@ -654,7 +654,7 @@ Instruction *InstCombiner::FoldOpIntoPhi(Instruction &I) {
}
} else {
CastInst *CI = cast<CastInst>(&I);
- const Type *RetTy = CI->getType();
+ Type *RetTy = CI->getType();
for (unsigned i = 0; i != NumPHIValues; ++i) {
Value *InV;
if (Constant *InC = dyn_cast<Constant>(PN->getIncomingValue(i)))
@@ -680,7 +680,7 @@ Instruction *InstCombiner::FoldOpIntoPhi(Instruction &I) {
/// or not there is a sequence of GEP indices into the type that will land us at
/// the specified offset. If so, fill them into NewIndices and return the
/// resultant element type, otherwise return null.
-const Type *InstCombiner::FindElementAtOffset(const Type *Ty, int64_t Offset,
+Type *InstCombiner::FindElementAtOffset(Type *Ty, int64_t Offset,
SmallVectorImpl<Value*> &NewIndices) {
if (!TD) return 0;
if (!Ty->isSized()) return 0;
@@ -688,7 +688,7 @@ const Type *InstCombiner::FindElementAtOffset(const Type *Ty, int64_t Offset,
// Start with the index over the outer type. Note that the type size
// might be zero (even if the offset isn't zero) if the indexed type
// is something like [0 x {int, int}]
- const Type *IntPtrTy = TD->getIntPtrType(Ty->getContext());
+ Type *IntPtrTy = TD->getIntPtrType(Ty->getContext());
int64_t FirstIdx = 0;
if (int64_t TySize = TD->getTypeAllocSize(Ty)) {
FirstIdx = Offset/TySize;
@@ -711,7 +711,7 @@ const Type *InstCombiner::FindElementAtOffset(const Type *Ty, int64_t Offset,
if (uint64_t(Offset*8) >= TD->getTypeSizeInBits(Ty))
return 0;
- if (const StructType *STy = dyn_cast<StructType>(Ty)) {
+ if (StructType *STy = dyn_cast<StructType>(Ty)) {
const StructLayout *SL = TD->getStructLayout(STy);
assert(Offset < (int64_t)SL->getSizeInBytes() &&
"Offset must stay within the indexed type");
@@ -722,7 +722,7 @@ const Type *InstCombiner::FindElementAtOffset(const Type *Ty, int64_t Offset,
Offset -= SL->getElementOffset(Elt);
Ty = STy->getElementType(Elt);
- } else if (const ArrayType *AT = dyn_cast<ArrayType>(Ty)) {
+ } else if (ArrayType *AT = dyn_cast<ArrayType>(Ty)) {
uint64_t EltSize = TD->getTypeAllocSize(AT->getElementType());
assert(EltSize && "Cannot index into a zero-sized array");
NewIndices.push_back(ConstantInt::get(IntPtrTy,Offset/EltSize));
@@ -751,13 +751,13 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
// by multiples of a zero size type with zero.
if (TD) {
bool MadeChange = false;
- const Type *IntPtrTy = TD->getIntPtrType(GEP.getContext());
+ Type *IntPtrTy = TD->getIntPtrType(GEP.getContext());
gep_type_iterator GTI = gep_type_begin(GEP);
for (User::op_iterator I = GEP.op_begin() + 1, E = GEP.op_end();
I != E; ++I, ++GTI) {
// Skip indices into struct types.
- const SequentialType *SeqTy = dyn_cast<SequentialType>(*GTI);
+ SequentialType *SeqTy = dyn_cast<SequentialType>(*GTI);
if (!SeqTy) continue;
// If the element type has zero size then any index over it is equivalent
@@ -859,7 +859,7 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
// Handle gep(bitcast x) and gep(gep x, 0, 0, 0).
Value *StrippedPtr = PtrOp->stripPointerCasts();
- const PointerType *StrippedPtrTy =cast<PointerType>(StrippedPtr->getType());
+ PointerType *StrippedPtrTy =cast<PointerType>(StrippedPtr->getType());
if (StrippedPtr != PtrOp &&
StrippedPtrTy->getAddressSpace() == GEP.getPointerAddressSpace()) {
@@ -875,8 +875,8 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
//
// This occurs when the program declares an array extern like "int X[];"
if (HasZeroPointerIndex) {
- const PointerType *CPTy = cast<PointerType>(PtrOp->getType());
- if (const ArrayType *CATy =
+ PointerType *CPTy = cast<PointerType>(PtrOp->getType());
+ if (ArrayType *CATy =
dyn_cast<ArrayType>(CPTy->getElementType())) {
// GEP (bitcast i8* X to [0 x i8]*), i32 0, ... ?
if (CATy->getElementType() == StrippedPtrTy->getElementType()) {
@@ -889,7 +889,7 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
return Res;
}
- if (const ArrayType *XATy =
+ if (ArrayType *XATy =
dyn_cast<ArrayType>(StrippedPtrTy->getElementType())){
// GEP (bitcast [10 x i8]* X to [0 x i8]*), i32 0, ... ?
if (CATy->getElementType() == XATy->getElementType()) {
@@ -907,8 +907,8 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
// Transform things like:
// %t = getelementptr i32* bitcast ([2 x i32]* %str to i32*), i32 %V
// into: %t1 = getelementptr [2 x i32]* %str, i32 0, i32 %V; bitcast
- const Type *SrcElTy = StrippedPtrTy->getElementType();
- const Type *ResElTy=cast<PointerType>(PtrOp->getType())->getElementType();
+ Type *SrcElTy = StrippedPtrTy->getElementType();
+ Type *ResElTy=cast<PointerType>(PtrOp->getType())->getElementType();
if (TD && SrcElTy->isArrayTy() &&
TD->getTypeAllocSize(cast<ArrayType>(SrcElTy)->getElementType()) ==
TD->getTypeAllocSize(ResElTy)) {
@@ -1023,7 +1023,7 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
// field at Offset in 'A's type. If so, we can pull the cast through the
// GEP.
SmallVector<Value*, 8> NewIndices;
- const Type *InTy =
+ Type *InTy =
cast<PointerType>(BCI->getOperand(0)->getType())->getElementType();
if (FindElementAtOffset(InTy, Offset, NewIndices)) {
Value *NGEP = GEP.isInBounds() ?