summaryrefslogtreecommitdiff
path: root/lib/Transforms
diff options
context:
space:
mode:
authorDuncan Sands <baldrick@free.fr>2010-02-15 16:12:20 +0000
committerDuncan Sands <baldrick@free.fr>2010-02-15 16:12:20 +0000
commitb0bc6c361da9009e8414efde317d9bbff755f6c0 (patch)
tree0e5eb5ae8ac1b20e3979719c3d670a4318e039bd /lib/Transforms
parentf6814754e8d3944b6fab7326a6f1f696fd9122f9 (diff)
downloadllvm-b0bc6c361da9009e8414efde317d9bbff755f6c0.tar.gz
llvm-b0bc6c361da9009e8414efde317d9bbff755f6c0.tar.bz2
llvm-b0bc6c361da9009e8414efde317d9bbff755f6c0.tar.xz
Uniformize the names of type predicates: rather than having isFloatTy and
isInteger, we now have isFloatTy and isIntegerTy. Requested by Chris! git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@96223 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/Transforms')
-rw-r--r--lib/Transforms/IPO/DeadTypeElimination.cpp4
-rw-r--r--lib/Transforms/IPO/GlobalOpt.cpp4
-rw-r--r--lib/Transforms/InstCombine/InstCombineAddSub.cpp12
-rw-r--r--lib/Transforms/InstCombine/InstCombineAndOrXor.cpp8
-rw-r--r--lib/Transforms/InstCombine/InstCombineCalls.cpp2
-rw-r--r--lib/Transforms/InstCombine/InstCombineCasts.cpp4
-rw-r--r--lib/Transforms/InstCombine/InstCombineCompares.cpp4
-rw-r--r--lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp10
-rw-r--r--lib/Transforms/InstCombine/InstCombineMulDivRem.cpp12
-rw-r--r--lib/Transforms/InstCombine/InstCombineSelect.cpp4
-rw-r--r--lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp4
-rw-r--r--lib/Transforms/InstCombine/InstructionCombining.cpp8
-rw-r--r--lib/Transforms/Instrumentation/ProfilingUtils.cpp2
-rw-r--r--lib/Transforms/Scalar/JumpThreading.cpp2
-rw-r--r--lib/Transforms/Scalar/LoopUnswitch.cpp10
-rw-r--r--lib/Transforms/Scalar/MemCpyOptimizer.cpp2
-rw-r--r--lib/Transforms/Scalar/Reassociate.cpp6
-rw-r--r--lib/Transforms/Scalar/ScalarReplAggregates.cpp13
-rw-r--r--lib/Transforms/Scalar/SimplifyLibCalls.cpp22
-rw-r--r--lib/Transforms/Utils/SimplifyCFG.cpp2
20 files changed, 68 insertions, 67 deletions
diff --git a/lib/Transforms/IPO/DeadTypeElimination.cpp b/lib/Transforms/IPO/DeadTypeElimination.cpp
index 025d77e0c8..662fbb5cd4 100644
--- a/lib/Transforms/IPO/DeadTypeElimination.cpp
+++ b/lib/Transforms/IPO/DeadTypeElimination.cpp
@@ -57,13 +57,13 @@ ModulePass *llvm::createDeadTypeEliminationPass() {
//
static inline bool ShouldNukeSymtabEntry(const Type *Ty){
// Nuke all names for primitive types!
- if (Ty->isPrimitiveType() || Ty->isInteger())
+ if (Ty->isPrimitiveType() || Ty->isIntegerTy())
return true;
// Nuke all pointers to primitive types as well...
if (const PointerType *PT = dyn_cast<PointerType>(Ty))
if (PT->getElementType()->isPrimitiveType() ||
- PT->getElementType()->isInteger())
+ PT->getElementType()->isIntegerTy())
return true;
return false;
diff --git a/lib/Transforms/IPO/GlobalOpt.cpp b/lib/Transforms/IPO/GlobalOpt.cpp
index ac91631f56..df060eb234 100644
--- a/lib/Transforms/IPO/GlobalOpt.cpp
+++ b/lib/Transforms/IPO/GlobalOpt.cpp
@@ -1590,7 +1590,7 @@ static bool TryToShrinkGlobalToBoolean(GlobalVariable *GV, Constant *OtherVal) {
// simplification. In these cases, we typically end up with "cond ? v1 : v2"
// where v1 and v2 both require constant pool loads, a big loss.
if (GVElType == Type::getInt1Ty(GV->getContext()) ||
- GVElType->isFloatingPoint() ||
+ GVElType->isFloatingPointTy() ||
isa<PointerType>(GVElType) || isa<VectorType>(GVElType))
return false;
@@ -1925,7 +1925,7 @@ GlobalVariable *GlobalOpt::FindGlobalCtors(Module &M) {
if (!ATy) return 0;
const StructType *STy = dyn_cast<StructType>(ATy->getElementType());
if (!STy || STy->getNumElements() != 2 ||
- !STy->getElementType(0)->isInteger(32)) return 0;
+ !STy->getElementType(0)->isIntegerTy(32)) return 0;
const PointerType *PFTy = dyn_cast<PointerType>(STy->getElementType(1));
if (!PFTy) return 0;
const FunctionType *FTy = dyn_cast<FunctionType>(PFTy->getElementType());
diff --git a/lib/Transforms/InstCombine/InstCombineAddSub.cpp b/lib/Transforms/InstCombine/InstCombineAddSub.cpp
index c2924abba1..2da17f1dd5 100644
--- a/lib/Transforms/InstCombine/InstCombineAddSub.cpp
+++ b/lib/Transforms/InstCombine/InstCombineAddSub.cpp
@@ -35,7 +35,7 @@ static Constant *SubOne(ConstantInt *C) {
// Otherwise, return null.
//
static inline Value *dyn_castFoldableMul(Value *V, ConstantInt *&CST) {
- if (!V->hasOneUse() || !V->getType()->isInteger())
+ if (!V->hasOneUse() || !V->getType()->isIntegerTy())
return 0;
Instruction *I = dyn_cast<Instruction>(V);
@@ -145,10 +145,10 @@ Instruction *InstCombiner::visitAdd(BinaryOperator &I) {
}
}
- if (I.getType()->isInteger(1))
+ if (I.getType()->isIntegerTy(1))
return BinaryOperator::CreateXor(LHS, RHS);
- if (I.getType()->isInteger()) {
+ if (I.getType()->isIntegerTy()) {
// X + X --> X << 1
if (LHS == RHS)
return BinaryOperator::CreateShl(LHS, ConstantInt::get(I.getType(), 1));
@@ -168,7 +168,7 @@ Instruction *InstCombiner::visitAdd(BinaryOperator &I) {
// -A + B --> B - A
// -A + -B --> -(A + B)
if (Value *LHSV = dyn_castNegVal(LHS)) {
- if (LHS->getType()->isIntOrIntVector()) {
+ if (LHS->getType()->isIntOrIntVectorTy()) {
if (Value *RHSV = dyn_castNegVal(RHS)) {
Value *NewAdd = Builder->CreateAdd(LHSV, RHSV, "sum");
return BinaryOperator::CreateNeg(NewAdd);
@@ -222,7 +222,7 @@ Instruction *InstCombiner::visitAdd(BinaryOperator &I) {
}
// W*X + Y*Z --> W * (X+Z) iff W == Y
- if (I.getType()->isIntOrIntVector()) {
+ if (I.getType()->isIntOrIntVectorTy()) {
Value *W, *X, *Y, *Z;
if (match(LHS, m_Mul(m_Value(W), m_Value(X))) &&
match(RHS, m_Mul(m_Value(Y), m_Value(Z)))) {
@@ -560,7 +560,7 @@ Instruction *InstCombiner::visitSub(BinaryOperator &I) {
return ReplaceInstUsesWith(I, Op0); // undef - X -> undef
if (isa<UndefValue>(Op1))
return ReplaceInstUsesWith(I, Op1); // X - undef -> undef
- if (I.getType()->isInteger(1))
+ if (I.getType()->isIntegerTy(1))
return BinaryOperator::CreateXor(Op0, Op1);
if (ConstantInt *C = dyn_cast<ConstantInt>(Op0)) {
diff --git a/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp b/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
index 515753fae4..5e47953d1e 100644
--- a/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
+++ b/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
@@ -946,7 +946,7 @@ Instruction *InstCombiner::visitAnd(BinaryOperator &I) {
const Type *SrcTy = Op0C->getOperand(0)->getType();
if (Op0C->getOpcode() == Op1C->getOpcode() && // same cast kind ?
SrcTy == Op1C->getOperand(0)->getType() &&
- SrcTy->isIntOrIntVector()) {
+ SrcTy->isIntOrIntVectorTy()) {
Value *Op0COp = Op0C->getOperand(0), *Op1COp = Op1C->getOperand(0);
// Only do this if the casts both really cause code to be generated.
@@ -1161,7 +1161,7 @@ static Instruction *MatchSelectFromAndOr(Value *A, Value *B,
// If A is not a select of -1/0, this cannot match.
Value *Cond = 0;
if (!match(A, m_SExt(m_Value(Cond))) ||
- !Cond->getType()->isInteger(1))
+ !Cond->getType()->isIntegerTy(1))
return 0;
// ((cond?-1:0)&C) | (B&(cond?0:-1)) -> cond ? C : B.
@@ -1699,7 +1699,7 @@ Instruction *InstCombiner::visitOr(BinaryOperator &I) {
if (Op0C->getOpcode() == Op1C->getOpcode()) {// same cast kind ?
const Type *SrcTy = Op0C->getOperand(0)->getType();
if (SrcTy == Op1C->getOperand(0)->getType() &&
- SrcTy->isIntOrIntVector()) {
+ SrcTy->isIntOrIntVectorTy()) {
Value *Op0COp = Op0C->getOperand(0), *Op1COp = Op1C->getOperand(0);
if ((!isa<ICmpInst>(Op0COp) || !isa<ICmpInst>(Op1COp)) &&
@@ -2016,7 +2016,7 @@ Instruction *InstCombiner::visitXor(BinaryOperator &I) {
if (CastInst *Op1C = dyn_cast<CastInst>(Op1))
if (Op0C->getOpcode() == Op1C->getOpcode()) { // same cast kind?
const Type *SrcTy = Op0C->getOperand(0)->getType();
- if (SrcTy == Op1C->getOperand(0)->getType() && SrcTy->isInteger() &&
+ if (SrcTy == Op1C->getOperand(0)->getType() && SrcTy->isIntegerTy() &&
// Only do this if the casts both really cause code to be generated.
ShouldOptimizeCast(Op0C->getOpcode(), Op0C->getOperand(0),
I.getType()) &&
diff --git a/lib/Transforms/InstCombine/InstCombineCalls.cpp b/lib/Transforms/InstCombine/InstCombineCalls.cpp
index e501ddc4b8..d7efdcfa3b 100644
--- a/lib/Transforms/InstCombine/InstCombineCalls.cpp
+++ b/lib/Transforms/InstCombine/InstCombineCalls.cpp
@@ -199,7 +199,7 @@ Instruction *InstCombiner::SimplifyMemSet(MemSetInst *MI) {
// Extract the length and alignment and fill if they are constant.
ConstantInt *LenC = dyn_cast<ConstantInt>(MI->getLength());
ConstantInt *FillC = dyn_cast<ConstantInt>(MI->getValue());
- if (!LenC || !FillC || !FillC->getType()->isInteger(8))
+ if (!LenC || !FillC || !FillC->getType()->isIntegerTy(8))
return 0;
uint64_t Len = LenC->getZExtValue();
Alignment = MI->getAlignment();
diff --git a/lib/Transforms/InstCombine/InstCombineCasts.cpp b/lib/Transforms/InstCombine/InstCombineCasts.cpp
index 68e17e59f6..bb4a0e9496 100644
--- a/lib/Transforms/InstCombine/InstCombineCasts.cpp
+++ b/lib/Transforms/InstCombine/InstCombineCasts.cpp
@@ -23,7 +23,7 @@ using namespace PatternMatch;
///
static Value *DecomposeSimpleLinearExpr(Value *Val, unsigned &Scale,
int &Offset) {
- assert(Val->getType()->isInteger(32) && "Unexpected allocation size type!");
+ assert(Val->getType()->isIntegerTy(32) && "Unexpected allocation size type!");
if (ConstantInt *CI = dyn_cast<ConstantInt>(Val)) {
Offset = CI->getZExtValue();
Scale = 0;
@@ -837,7 +837,7 @@ Instruction *InstCombiner::visitZExt(ZExtInst &CI) {
// zext (xor i1 X, true) to i32 --> xor (zext i1 X to i32), 1
Value *X;
- if (SrcI && SrcI->hasOneUse() && SrcI->getType()->isInteger(1) &&
+ if (SrcI && SrcI->hasOneUse() && SrcI->getType()->isIntegerTy(1) &&
match(SrcI, m_Not(m_Value(X))) &&
(!X->hasOneUse() || !isa<CmpInst>(X))) {
Value *New = Builder->CreateZExt(X, CI.getType());
diff --git a/lib/Transforms/InstCombine/InstCombineCompares.cpp b/lib/Transforms/InstCombine/InstCombineCompares.cpp
index 7c00c2c61c..72af80fbe1 100644
--- a/lib/Transforms/InstCombine/InstCombineCompares.cpp
+++ b/lib/Transforms/InstCombine/InstCombineCompares.cpp
@@ -1606,7 +1606,7 @@ Instruction *InstCombiner::visitICmpInst(ICmpInst &I) {
const Type *Ty = Op0->getType();
// icmp's with boolean values can always be turned into bitwise operations
- if (Ty->isInteger(1)) {
+ if (Ty->isIntegerTy(1)) {
switch (I.getPredicate()) {
default: llvm_unreachable("Invalid icmp instruction!");
case ICmpInst::ICMP_EQ: { // icmp eq i1 A, B -> ~(A^B)
@@ -1650,7 +1650,7 @@ Instruction *InstCombiner::visitICmpInst(ICmpInst &I) {
unsigned BitWidth = 0;
if (TD)
BitWidth = TD->getTypeSizeInBits(Ty->getScalarType());
- else if (Ty->isIntOrIntVector())
+ else if (Ty->isIntOrIntVectorTy())
BitWidth = Ty->getScalarSizeInBits();
bool isSignBit = false;
diff --git a/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp b/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
index 2d13298300..e6c59c7d38 100644
--- a/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
+++ b/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
@@ -87,7 +87,7 @@ static Instruction *InstCombineLoadCast(InstCombiner &IC, LoadInst &LI,
const Type *SrcPTy = SrcTy->getElementType();
- if (DestPTy->isInteger() || isa<PointerType>(DestPTy) ||
+ if (DestPTy->isIntegerTy() || isa<PointerType>(DestPTy) ||
isa<VectorType>(DestPTy)) {
// If the source is an array, the code below will not succeed. Check to
// see if a trivial 'gep P, 0, 0' will help matters. Only do this for
@@ -104,7 +104,7 @@ static Instruction *InstCombineLoadCast(InstCombiner &IC, LoadInst &LI,
}
if (IC.getTargetData() &&
- (SrcPTy->isInteger() || isa<PointerType>(SrcPTy) ||
+ (SrcPTy->isIntegerTy() || isa<PointerType>(SrcPTy) ||
isa<VectorType>(SrcPTy)) &&
// Do not allow turning this into a load of an integer, which is then
// casted to a pointer, this pessimizes pointer analysis a lot.
@@ -243,7 +243,7 @@ static Instruction *InstCombineStoreToCast(InstCombiner &IC, StoreInst &SI) {
const Type *SrcPTy = SrcTy->getElementType();
- if (!DestPTy->isInteger() && !isa<PointerType>(DestPTy))
+ if (!DestPTy->isIntegerTy() && !isa<PointerType>(DestPTy))
return 0;
/// NewGEPIndices - If SrcPTy is an aggregate type, we can emit a "noop gep"
@@ -277,7 +277,7 @@ static Instruction *InstCombineStoreToCast(InstCombiner &IC, StoreInst &SI) {
SrcTy = PointerType::get(SrcPTy, SrcTy->getAddressSpace());
}
- if (!SrcPTy->isInteger() && !isa<PointerType>(SrcPTy))
+ if (!SrcPTy->isIntegerTy() && !isa<PointerType>(SrcPTy))
return 0;
// If the pointers point into different address spaces or if they point to
@@ -298,7 +298,7 @@ static Instruction *InstCombineStoreToCast(InstCombiner &IC, StoreInst &SI) {
const Type* CastSrcTy = SIOp0->getType();
const Type* CastDstTy = SrcPTy;
if (isa<PointerType>(CastDstTy)) {
- if (CastSrcTy->isInteger())
+ if (CastSrcTy->isIntegerTy())
opcode = Instruction::IntToPtr;
} else if (isa<IntegerType>(CastDstTy)) {
if (isa<PointerType>(SIOp0->getType()))
diff --git a/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp b/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp
index 2e26a75b2a..668c34fc06 100644
--- a/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp
+++ b/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp
@@ -157,7 +157,7 @@ Instruction *InstCombiner::visitMul(BinaryOperator &I) {
}
/// i1 mul -> i1 and.
- if (I.getType()->isInteger(1))
+ if (I.getType()->isIntegerTy(1))
return BinaryOperator::CreateAnd(Op0, Op1);
// X*(1 << Y) --> X << Y
@@ -314,7 +314,7 @@ Instruction *InstCombiner::commonDivTransforms(BinaryOperator &I) {
// undef / X -> 0 for integer.
// undef / X -> undef for FP (the undef could be a snan).
if (isa<UndefValue>(Op0)) {
- if (Op0->getType()->isFPOrFPVector())
+ if (Op0->getType()->isFPOrFPVectorTy())
return ReplaceInstUsesWith(I, Op0);
return ReplaceInstUsesWith(I, Constant::getNullValue(I.getType()));
}
@@ -386,7 +386,7 @@ Instruction *InstCombiner::commonIDivTransforms(BinaryOperator &I) {
return ReplaceInstUsesWith(I, Constant::getNullValue(I.getType()));
// It can't be division by zero, hence it must be division by one.
- if (I.getType()->isInteger(1))
+ if (I.getType()->isIntegerTy(1))
return ReplaceInstUsesWith(I, Op0);
if (ConstantVector *Op1V = dyn_cast<ConstantVector>(Op1)) {
@@ -493,7 +493,7 @@ Instruction *InstCombiner::visitSDiv(BinaryOperator &I) {
// If the sign bits of both operands are zero (i.e. we can prove they are
// unsigned inputs), turn this into a udiv.
- if (I.getType()->isInteger()) {
+ if (I.getType()->isIntegerTy()) {
APInt Mask(APInt::getSignBit(I.getType()->getPrimitiveSizeInBits()));
if (MaskedValueIsZero(Op0, Mask)) {
if (MaskedValueIsZero(Op1, Mask)) {
@@ -527,7 +527,7 @@ Instruction *InstCombiner::commonRemTransforms(BinaryOperator &I) {
Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
if (isa<UndefValue>(Op0)) { // undef % X -> 0
- if (I.getType()->isFPOrFPVector())
+ if (I.getType()->isFPOrFPVectorTy())
return ReplaceInstUsesWith(I, Op0); // X % undef -> undef (could be SNaN)
return ReplaceInstUsesWith(I, Constant::getNullValue(I.getType()));
}
@@ -648,7 +648,7 @@ Instruction *InstCombiner::visitSRem(BinaryOperator &I) {
// If the sign bits of both operands are zero (i.e. we can prove they are
// unsigned inputs), turn this into a urem.
- if (I.getType()->isInteger()) {
+ if (I.getType()->isIntegerTy()) {
APInt Mask(APInt::getSignBit(I.getType()->getPrimitiveSizeInBits()));
if (MaskedValueIsZero(Op1, Mask) && MaskedValueIsZero(Op0, Mask)) {
// X srem Y -> X urem Y, iff X and Y don't have sign bit set
diff --git a/lib/Transforms/InstCombine/InstCombineSelect.cpp b/lib/Transforms/InstCombine/InstCombineSelect.cpp
index 9a02b33b7b..7807d9a633 100644
--- a/lib/Transforms/InstCombine/InstCombineSelect.cpp
+++ b/lib/Transforms/InstCombine/InstCombineSelect.cpp
@@ -441,7 +441,7 @@ Instruction *InstCombiner::visitSelectInst(SelectInst &SI) {
return ReplaceInstUsesWith(SI, FalseVal);
}
- if (SI.getType()->isInteger(1)) {
+ if (SI.getType()->isIntegerTy(1)) {
if (ConstantInt *C = dyn_cast<ConstantInt>(TrueVal)) {
if (C->getZExtValue()) {
// Change: A = select B, true, C --> A = or B, C
@@ -629,7 +629,7 @@ Instruction *InstCombiner::visitSelectInst(SelectInst &SI) {
}
// See if we can fold the select into one of our operands.
- if (SI.getType()->isInteger()) {
+ if (SI.getType()->isIntegerTy()) {
if (Instruction *FoldI = FoldSelectIntoOp(SI, TrueVal, FalseVal))
return FoldI;
diff --git a/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp b/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp
index 53a568466e..5e9a52f77d 100644
--- a/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp
+++ b/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp
@@ -107,7 +107,7 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
assert((TD || !isa<PointerType>(VTy)) &&
"SimplifyDemandedBits needs to know bit widths!");
assert((!TD || TD->getTypeSizeInBits(VTy->getScalarType()) == BitWidth) &&
- (!VTy->isIntOrIntVector() ||
+ (!VTy->isIntOrIntVectorTy() ||
VTy->getScalarSizeInBits() == BitWidth) &&
KnownZero.getBitWidth() == BitWidth &&
KnownOne.getBitWidth() == BitWidth &&
@@ -401,7 +401,7 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
break;
}
case Instruction::BitCast:
- if (!I->getOperand(0)->getType()->isIntOrIntVector())
+ if (!I->getOperand(0)->getType()->isIntOrIntVectorTy())
return 0; // vector->int or fp->int?
if (const VectorType *DstVTy = dyn_cast<VectorType>(I->getType())) {
diff --git a/lib/Transforms/InstCombine/InstructionCombining.cpp b/lib/Transforms/InstCombine/InstructionCombining.cpp
index 93b196126b..96c03428bd 100644
--- a/lib/Transforms/InstCombine/InstructionCombining.cpp
+++ b/lib/Transforms/InstCombine/InstructionCombining.cpp
@@ -158,7 +158,7 @@ Value *InstCombiner::dyn_castNegVal(Value *V) const {
return ConstantExpr::getNeg(C);
if (ConstantVector *C = dyn_cast<ConstantVector>(V))
- if (C->getType()->getElementType()->isInteger())
+ if (C->getType()->getElementType()->isIntegerTy())
return ConstantExpr::getNeg(C);
return 0;
@@ -177,7 +177,7 @@ Value *InstCombiner::dyn_castFNegVal(Value *V) const {
return ConstantExpr::getFNeg(C);
if (ConstantVector *C = dyn_cast<ConstantVector>(V))
- if (C->getType()->getElementType()->isFloatingPoint())
+ if (C->getType()->getElementType()->isFloatingPointTy())
return ConstantExpr::getFNeg(C);
return 0;
@@ -226,7 +226,7 @@ Instruction *InstCombiner::FoldOpIntoSelect(Instruction &Op, SelectInst *SI) {
if (isa<Constant>(TV) || isa<Constant>(FV)) {
// Bool selects with constant operands can be folded to logical ops.
- if (SI->getType()->isInteger(1)) return 0;
+ if (SI->getType()->isIntegerTy(1)) return 0;
Value *SelectTrueVal = FoldOperationIntoSelectOperand(Op, TV, this);
Value *SelectFalseVal = FoldOperationIntoSelectOperand(Op, FV, this);
@@ -596,7 +596,7 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
// (where tmp = 8*tmp2) into:
// getelementptr [100 x double]* %arr, i32 0, i32 %tmp2; bitcast
- if (TD && isa<ArrayType>(SrcElTy) && ResElTy->isInteger(8)) {
+ if (TD && isa<ArrayType>(SrcElTy) && ResElTy->isIntegerTy(8)) {
uint64_t ArrayEltSize =
TD->getTypeAllocSize(cast<ArrayType>(SrcElTy)->getElementType());
diff --git a/lib/Transforms/Instrumentation/ProfilingUtils.cpp b/lib/Transforms/Instrumentation/ProfilingUtils.cpp
index 3214c8c459..8662a82e8e 100644
--- a/lib/Transforms/Instrumentation/ProfilingUtils.cpp
+++ b/lib/Transforms/Instrumentation/ProfilingUtils.cpp
@@ -84,7 +84,7 @@ void llvm::InsertProfilingInitCall(Function *MainFn, const char *FnName,
AI = MainFn->arg_begin();
// If the program looked at argc, have it look at the return value of the
// init call instead.
- if (!AI->getType()->isInteger(32)) {
+ if (!AI->getType()->isIntegerTy(32)) {
Instruction::CastOps opcode;
if (!AI->use_empty()) {
opcode = CastInst::getCastOpcode(InitCall, true, AI->getType(), true);
diff --git a/lib/Transforms/Scalar/JumpThreading.cpp b/lib/Transforms/Scalar/JumpThreading.cpp
index 02346a135f..8f21aacbdd 100644
--- a/lib/Transforms/Scalar/JumpThreading.cpp
+++ b/lib/Transforms/Scalar/JumpThreading.cpp
@@ -405,7 +405,7 @@ ComputeValueKnownInPredecessors(Value *V, BasicBlock *BB,PredValueInfo &Result){
// If comparing a live-in value against a constant, see if we know the
// live-in value on any predecessors.
if (LVI && isa<Constant>(Cmp->getOperand(1)) &&
- Cmp->getType()->isInteger() && // Not vector compare.
+ Cmp->getType()->isIntegerTy() && // Not vector compare.
(!isa<Instruction>(Cmp->getOperand(0)) ||
cast<Instruction>(Cmp->getOperand(0))->getParent() != BB)) {
Constant *RHSCst = cast<Constant>(Cmp->getOperand(1));
diff --git a/lib/Transforms/Scalar/LoopUnswitch.cpp b/lib/Transforms/Scalar/LoopUnswitch.cpp
index e5fba28374..990e0c4e1b 100644
--- a/lib/Transforms/Scalar/LoopUnswitch.cpp
+++ b/lib/Transforms/Scalar/LoopUnswitch.cpp
@@ -871,7 +871,7 @@ void LoopUnswitch::RewriteLoopBodyWithConditionConstant(Loop *L, Value *LIC,
// If we know that LIC == Val, or that LIC == NotVal, just replace uses of LIC
// in the loop with the appropriate one directly.
if (IsEqual || (isa<ConstantInt>(Val) &&
- Val->getType()->isInteger(1))) {
+ Val->getType()->isIntegerTy(1))) {
Value *Replacement;
if (IsEqual)
Replacement = Val;
@@ -997,10 +997,10 @@ void LoopUnswitch::SimplifyCode(std::vector<Instruction*> &Worklist, Loop *L) {
case Instruction::And:
if (isa<ConstantInt>(I->getOperand(0)) &&
// constant -> RHS
- I->getOperand(0)->getType()->isInteger(1))
+ I->getOperand(0)->getType()->isIntegerTy(1))
cast<BinaryOperator>(I)->swapOperands();
if (ConstantInt *CB = dyn_cast<ConstantInt>(I->getOperand(1)))
- if (CB->getType()->isInteger(1)) {
+ if (CB->getType()->isIntegerTy(1)) {
if (CB->isOne()) // X & 1 -> X
ReplaceUsesOfWith(I, I->getOperand(0), Worklist, L, LPM);
else // X & 0 -> 0
@@ -1011,10 +1011,10 @@ void LoopUnswitch::SimplifyCode(std::vector<Instruction*> &Worklist, Loop *L) {
case Instruction::Or:
if (isa<ConstantInt>(I->getOperand(0)) &&
// constant -> RHS
- I->getOperand(0)->getType()->isInteger(1))
+ I->getOperand(0)->getType()->isIntegerTy(1))
cast<BinaryOperator>(I)->swapOperands();
if (ConstantInt *CB = dyn_cast<ConstantInt>(I->getOperand(1)))
- if (CB->getType()->isInteger(1)) {
+ if (CB->getType()->isIntegerTy(1)) {
if (CB->isOne()) // X | 1 -> 1
ReplaceUsesOfWith(I, I->getOperand(1), Worklist, L, LPM);
else // X | 0 -> X
diff --git a/lib/Transforms/Scalar/MemCpyOptimizer.cpp b/lib/Transforms/Scalar/MemCpyOptimizer.cpp
index e0aa49154f..62e2977058 100644
--- a/lib/Transforms/Scalar/MemCpyOptimizer.cpp
+++ b/lib/Transforms/Scalar/MemCpyOptimizer.cpp
@@ -42,7 +42,7 @@ static Value *isBytewiseValue(Value *V) {
LLVMContext &Context = V->getContext();
// All byte-wide stores are splatable, even of arbitrary variables.
- if (V->getType()->isInteger(8)) return V;
+ if (V->getType()->isIntegerTy(8)) return V;
// Constant float and double values can be handled as integer values if the
// corresponding integer value is "byteable". An important case is 0.0.
diff --git a/lib/Transforms/Scalar/Reassociate.cpp b/lib/Transforms/Scalar/Reassociate.cpp
index bbd4b451fb..187216a989 100644
--- a/lib/Transforms/Scalar/Reassociate.cpp
+++ b/lib/Transforms/Scalar/Reassociate.cpp
@@ -182,7 +182,7 @@ unsigned Reassociate::getRank(Value *V) {
// If this is a not or neg instruction, do not count it for rank. This
// assures us that X and ~X will have the same rank.
- if (!I->getType()->isInteger() ||
+ if (!I->getType()->isIntegerTy() ||
(!BinaryOperator::isNot(I) && !BinaryOperator::isNeg(I)))
++Rank;
@@ -929,7 +929,7 @@ void Reassociate::ReassociateBB(BasicBlock *BB) {
}
// Reject cases where it is pointless to do this.
- if (!isa<BinaryOperator>(BI) || BI->getType()->isFloatingPoint() ||
+ if (!isa<BinaryOperator>(BI) || BI->getType()->isFloatingPointTy() ||
isa<VectorType>(BI->getType()))
continue; // Floating point ops are not associative.
@@ -939,7 +939,7 @@ void Reassociate::ReassociateBB(BasicBlock *BB) {
// is not further optimized, it is likely to be transformed back to a
// short-circuited form for code gen, and the source order may have been
// optimized for the most likely conditions.
- if (BI->getType()->isInteger(1))
+ if (BI->getType()->isIntegerTy(1))
continue;
// If this is a subtract instruction which is not already in negate form,
diff --git a/lib/Transforms/Scalar/ScalarReplAggregates.cpp b/lib/Transforms/Scalar/ScalarReplAggregates.cpp
index 900d119cb4..822712e028 100644
--- a/lib/Transforms/Scalar/ScalarReplAggregates.cpp
+++ b/lib/Transforms/Scalar/ScalarReplAggregates.cpp
@@ -835,7 +835,7 @@ void SROA::RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *Inst,
StoreVal = ConstantInt::get(Context, TotalVal);
if (isa<PointerType>(ValTy))
StoreVal = ConstantExpr::getIntToPtr(StoreVal, ValTy);
- else if (ValTy->isFloatingPoint())
+ else if (ValTy->isFloatingPointTy())
StoreVal = ConstantExpr::getBitCast(StoreVal, ValTy);
assert(StoreVal->getType() == ValTy && "Type mismatch!");
@@ -939,7 +939,7 @@ void SROA::RewriteStoreUserOfWholeAlloca(StoreInst *SI, AllocaInst *AI,
Value *DestField = NewElts[i];
if (EltVal->getType() == FieldTy) {
// Storing to an integer field of this size, just do it.
- } else if (FieldTy->isFloatingPoint() || isa<VectorType>(FieldTy)) {
+ } else if (FieldTy->isFloatingPointTy() || isa<VectorType>(FieldTy)) {
// Bitcast to the right element type (for fp/vector values).
EltVal = new BitCastInst(EltVal, FieldTy, "", SI);
} else {
@@ -983,7 +983,8 @@ void SROA::RewriteStoreUserOfWholeAlloca(StoreInst *SI, AllocaInst *AI,
Value *DestField = NewElts[i];
if (EltVal->getType() == ArrayEltTy) {
// Storing to an integer field of this size, just do it.
- } else if (ArrayEltTy->isFloatingPoint() || isa<VectorType>(ArrayEltTy)) {
+ } else if (ArrayEltTy->isFloatingPointTy() ||
+ isa<VectorType>(ArrayEltTy)) {
// Bitcast to the right element type (for fp/vector values).
EltVal = new BitCastInst(EltVal, ArrayEltTy, "", SI);
} else {
@@ -1043,7 +1044,7 @@ void SROA::RewriteLoadUserOfWholeAlloca(LoadInst *LI, AllocaInst *AI,
const IntegerType *FieldIntTy = IntegerType::get(LI->getContext(),
FieldSizeBits);
- if (!isa<IntegerType>(FieldTy) && !FieldTy->isFloatingPoint() &&
+ if (!isa<IntegerType>(FieldTy) && !FieldTy->isFloatingPointTy() &&
!isa<VectorType>(FieldTy))
SrcField = new BitCastInst(SrcField,
PointerType::getUnqual(FieldIntTy),
@@ -1522,7 +1523,7 @@ Value *SROA::ConvertScalar_ExtractValue(Value *FromVal, const Type *ToType,
// If the result is an integer, this is a trunc or bitcast.
if (isa<IntegerType>(ToType)) {
// Should be done.
- } else if (ToType->isFloatingPoint() || isa<VectorType>(ToType)) {
+ } else if (ToType->isFloatingPointTy() || isa<VectorType>(ToType)) {
// Just do a bitcast, we know the sizes match up.
FromVal = Builder.CreateBitCast(FromVal, ToType, "tmp");
} else {
@@ -1600,7 +1601,7 @@ Value *SROA::ConvertScalar_InsertValue(Value *SV, Value *Old,
unsigned DestWidth = TD->getTypeSizeInBits(AllocaType);
unsigned SrcStoreWidth = TD->getTypeStoreSizeInBits(SV->getType());
unsigned DestStoreWidth = TD->getTypeStoreSizeInBits(AllocaType);
- if (SV->getType()->isFloatingPoint() || isa<VectorType>(SV->getType()))
+ if (SV->getType()->isFloatingPointTy() || isa<VectorType>(SV->getType()))
SV = Builder.CreateBitCast(SV,
IntegerType::get(SV->getContext(),SrcWidth), "tmp");
else if (isa<PointerType>(SV->getType()))
diff --git a/lib/Transforms/Scalar/SimplifyLibCalls.cpp b/lib/Transforms/Scalar/SimplifyLibCalls.cpp
index 4216e8f995..54b4380ce3 100644
--- a/lib/Transforms/Scalar/SimplifyLibCalls.cpp
+++ b/lib/Transforms/Scalar/SimplifyLibCalls.cpp
@@ -525,7 +525,7 @@ static uint64_t GetStringLengthH(Value *V, SmallPtrSet<PHINode*, 32> &PHIs) {
// Must be a Constant Array
ConstantArray *Array = dyn_cast<ConstantArray>(GlobalInit);
- if (!Array || !Array->getType()->getElementType()->isInteger(8))
+ if (!Array || !Array->getType()->getElementType()->isIntegerTy(8))
return false;
// Get the number of elements in the array
@@ -697,7 +697,7 @@ struct StrChrOpt : public LibCallOptimization {
if (!TD) return 0;
uint64_t Len = GetStringLength(SrcStr);
- if (Len == 0 || !FT->getParamType(1)->isInteger(32)) // memchr needs i32.
+ if (Len == 0 || !FT->getParamType(1)->isIntegerTy(32))// memchr needs i32.
return 0;
return EmitMemChr(SrcStr, CI->getOperand(2), // include nul.
@@ -739,7 +739,7 @@ struct StrCmpOpt : public LibCallOptimization {
// Verify the "strcmp" function prototype.
const FunctionType *FT = Callee->getFunctionType();
if (FT->getNumParams() != 2 ||
- !FT->getReturnType()->isInteger(32) ||
+ !FT->getReturnType()->isIntegerTy(32) ||
FT->getParamType(0) != FT->getParamType(1) ||
FT->getParamType(0) != Type::getInt8PtrTy(*Context))
return 0;
@@ -787,7 +787,7 @@ struct StrNCmpOpt : public LibCallOptimization {
// Verify the "strncmp" function prototype.
const FunctionType *FT = Callee->getFunctionType();
if (FT->getNumParams() != 3 ||
- !FT->getReturnType()->isInteger(32) ||
+ !FT->getReturnType()->isIntegerTy(32) ||
FT->getParamType(0) != FT->getParamType(1) ||
FT->getParamType(0) != Type::getInt8PtrTy(*Context) ||
!isa<IntegerType>(FT->getParamType(2)))
@@ -1008,7 +1008,7 @@ struct MemCmpOpt : public LibCallOptimization {
const FunctionType *FT = Callee->getFunctionType();
if (FT->getNumParams() != 3 || !isa<PointerType>(FT->getParamType(0)) ||
!isa<PointerType>(FT->getParamType(1)) ||
- !FT->getReturnType()->isInteger(32))
+ !FT->getReturnType()->isIntegerTy(32))
return 0;
Value *LHS = CI->getOperand(1), *RHS = CI->getOperand(2);
@@ -1241,7 +1241,7 @@ struct PowOpt : public LibCallOptimization {
// result type.
if (FT->getNumParams() != 2 || FT->getReturnType() != FT->getParamType(0) ||
FT->getParamType(0) != FT->getParamType(1) ||
- !FT->getParamType(0)->isFloatingPoint())
+ !FT->getParamType(0)->isFloatingPointTy())
return 0;
Value *Op1 = CI->getOperand(1), *Op2 = CI->getOperand(2);
@@ -1295,7 +1295,7 @@ struct Exp2Opt : public LibCallOptimization {
// Just make sure this has 1 argument of FP type, which matches the
// result type.
if (FT->getNumParams() != 1 || FT->getReturnType() != FT->getParamType(0) ||
- !FT->getParamType(0)->isFloatingPoint())
+ !FT->getParamType(0)->isFloatingPointTy())
return 0;
Value *Op = CI->getOperand(1);
@@ -1375,7 +1375,7 @@ struct FFSOpt : public LibCallOptimization {
// Just make sure this has 2 arguments of the same FP type, which match the
// result type.
if (FT->getNumParams() != 1 ||
- !FT->getReturnType()->isInteger(32) ||
+ !FT->getReturnType()->isIntegerTy(32) ||
!isa<IntegerType>(FT->getParamType(0)))
return 0;
@@ -1411,7 +1411,7 @@ struct IsDigitOpt : public LibCallOptimization {
const FunctionType *FT = Callee->getFunctionType();
// We require integer(i32)
if (FT->getNumParams() != 1 || !isa<IntegerType>(FT->getReturnType()) ||
- !FT->getParamType(0)->isInteger(32))
+ !FT->getParamType(0)->isIntegerTy(32))
return 0;
// isdigit(c) -> (c-'0') <u 10
@@ -1432,7 +1432,7 @@ struct IsAsciiOpt : public LibCallOptimization {
const FunctionType *FT = Callee->getFunctionType();
// We require integer(i32)
if (FT->getNumParams() != 1 || !isa<IntegerType>(FT->getReturnType()) ||
- !FT->getParamType(0)->isInteger(32))
+ !FT->getParamType(0)->isIntegerTy(32))
return 0;
// isascii(c) -> c <u 128
@@ -1473,7 +1473,7 @@ struct ToAsciiOpt : public LibCallOptimization {
const FunctionType *FT = Callee->getFunctionType();
// We require i32(i32)
if (FT->getNumParams() != 1 || FT->getReturnType() != FT->getParamType(0) ||
- !FT->getParamType(0)->isInteger(32))
+ !FT->getParamType(0)->isIntegerTy(32))
return 0;
// isascii(c) -> c & 0x7f
diff --git a/lib/Transforms/Utils/SimplifyCFG.cpp b/lib/Transforms/Utils/SimplifyCFG.cpp
index 795b6bfd6e..2215059a5f 100644
--- a/lib/Transforms/Utils/SimplifyCFG.cpp
+++ b/lib/Transforms/Utils/SimplifyCFG.cpp
@@ -1077,7 +1077,7 @@ static bool FoldCondBranchOnPHI(BranchInst *BI) {
for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
ConstantInt *CB;
if ((CB = dyn_cast<ConstantInt>(PN->getIncomingValue(i))) &&
- CB->getType()->isInteger(1)) {
+ CB->getType()->isIntegerTy(1)) {
// Okay, we now know that all edges from PredBB should be revectored to
// branch to RealDest.
BasicBlock *PredBB = PN->getIncomingBlock(i);