summaryrefslogtreecommitdiff
path: root/lib/Analysis
diff options
context:
space:
mode:
authorMicah Villmow <villmow@gmail.com>2012-10-24 15:52:52 +0000
committerMicah Villmow <villmow@gmail.com>2012-10-24 15:52:52 +0000
commitaa76e9e2cf50af190de90bc778b7f7e42ef9ceff (patch)
tree5206b0fb0ac695e3ab1c9cf434b5a85195abf336 /lib/Analysis
parent3575222175b4982f380ff291bb17be67aadc0966 (diff)
downloadllvm-aa76e9e2cf50af190de90bc778b7f7e42ef9ceff.tar.gz
llvm-aa76e9e2cf50af190de90bc778b7f7e42ef9ceff.tar.bz2
llvm-aa76e9e2cf50af190de90bc778b7f7e42ef9ceff.tar.xz
Add in support for getIntPtrType to get the pointer type based on the address space.
This checkin also adds in some tests that utilize these paths and updates some of the clients. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@166578 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/Analysis')
-rw-r--r--lib/Analysis/ConstantFolding.cpp238
-rw-r--r--lib/Analysis/InlineCost.cpp2
-rw-r--r--lib/Analysis/InstructionSimplify.cpp2
-rw-r--r--lib/Analysis/Lint.cpp5
-rw-r--r--lib/Analysis/MemoryBuiltins.cpp10
-rw-r--r--lib/Analysis/ScalarEvolution.cpp15
-rw-r--r--lib/Analysis/ScalarEvolutionExpander.cpp4
7 files changed, 143 insertions, 133 deletions
diff --git a/lib/Analysis/ConstantFolding.cpp b/lib/Analysis/ConstantFolding.cpp
index 146897ad67..c0e9020d91 100644
--- a/lib/Analysis/ConstantFolding.cpp
+++ b/lib/Analysis/ConstantFolding.cpp
@@ -41,7 +41,7 @@ using namespace llvm;
// Constant Folding internal helper functions
//===----------------------------------------------------------------------===//
-/// FoldBitCast - Constant fold bitcast, symbolically evaluating it with
+/// FoldBitCast - Constant fold bitcast, symbolically evaluating it with
/// DataLayout. This always returns a non-null constant, but it may be a
/// ConstantExpr if unfoldable.
static Constant *FoldBitCast(Constant *C, Type *DestTy,
@@ -59,9 +59,9 @@ static Constant *FoldBitCast(Constant *C, Type *DestTy,
return ConstantExpr::getBitCast(C, DestTy);
unsigned NumSrcElts = CDV->getType()->getNumElements();
-
+
Type *SrcEltTy = CDV->getType()->getElementType();
-
+
// If the vector is a vector of floating point, convert it to vector of int
// to simplify things.
if (SrcEltTy->isFloatingPointTy()) {
@@ -72,7 +72,7 @@ static Constant *FoldBitCast(Constant *C, Type *DestTy,
C = ConstantExpr::getBitCast(C, SrcIVTy);
CDV = cast<ConstantDataVector>(C);
}
-
+
// Now that we know that the input value is a vector of integers, just shift
// and insert them into our result.
unsigned BitShift = TD.getTypeAllocSizeInBits(SrcEltTy);
@@ -84,43 +84,43 @@ static Constant *FoldBitCast(Constant *C, Type *DestTy,
else
Result |= CDV->getElementAsInteger(i);
}
-
+
return ConstantInt::get(IT, Result);
}
-
+
// The code below only handles casts to vectors currently.
VectorType *DestVTy = dyn_cast<VectorType>(DestTy);
if (DestVTy == 0)
return ConstantExpr::getBitCast(C, DestTy);
-
+
// If this is a scalar -> vector cast, convert the input into a <1 x scalar>
// vector so the code below can handle it uniformly.
if (isa<ConstantFP>(C) || isa<ConstantInt>(C)) {
Constant *Ops = C; // don't take the address of C!
return FoldBitCast(ConstantVector::get(Ops), DestTy, TD);
}
-
+
// If this is a bitcast from constant vector -> vector, fold it.
if (!isa<ConstantDataVector>(C) && !isa<ConstantVector>(C))
return ConstantExpr::getBitCast(C, DestTy);
-
+
// If the element types match, VMCore can fold it.
unsigned NumDstElt = DestVTy->getNumElements();
unsigned NumSrcElt = C->getType()->getVectorNumElements();
if (NumDstElt == NumSrcElt)
return ConstantExpr::getBitCast(C, DestTy);
-
+
Type *SrcEltTy = C->getType()->getVectorElementType();
Type *DstEltTy = DestVTy->getElementType();
-
- // Otherwise, we're changing the number of elements in a vector, which
+
+ // Otherwise, we're changing the number of elements in a vector, which
// requires endianness information to do the right thing. For example,
// bitcast (<2 x i64> <i64 0, i64 1> to <4 x i32>)
// folds to (little endian):
// <4 x i32> <i32 0, i32 0, i32 1, i32 0>
// and to (big endian):
// <4 x i32> <i32 0, i32 0, i32 0, i32 1>
-
+
// First thing is first. We only want to think about integer here, so if
// we have something in FP form, recast it as integer.
if (DstEltTy->isFloatingPointTy()) {
@@ -130,11 +130,11 @@ static Constant *FoldBitCast(Constant *C, Type *DestTy,
VectorType::get(IntegerType::get(C->getContext(), FPWidth), NumDstElt);
// Recursively handle this integer conversion, if possible.
C = FoldBitCast(C, DestIVTy, TD);
-
+
// Finally, VMCore can handle this now that #elts line up.
return ConstantExpr::getBitCast(C, DestTy);
}
-
+
// Okay, we know the destination is integer, if the input is FP, convert
// it to integer first.
if (SrcEltTy->isFloatingPointTy()) {
@@ -148,13 +148,13 @@ static Constant *FoldBitCast(Constant *C, Type *DestTy,
!isa<ConstantDataVector>(C))
return C;
}
-
+
// Now we know that the input and output vectors are both integer vectors
// of the same size, and that their #elements is not the same. Do the
// conversion here, which depends on whether the input or output has
// more elements.
bool isLittleEndian = TD.isLittleEndian();
-
+
SmallVector<Constant*, 32> Result;
if (NumDstElt < NumSrcElt) {
// Handle: bitcast (<4 x i32> <i32 0, i32 1, i32 2, i32 3> to <2 x i64>)
@@ -170,15 +170,15 @@ static Constant *FoldBitCast(Constant *C, Type *DestTy,
Constant *Src =dyn_cast<ConstantInt>(C->getAggregateElement(SrcElt++));
if (!Src) // Reject constantexpr elements.
return ConstantExpr::getBitCast(C, DestTy);
-
+
// Zero extend the element to the right size.
Src = ConstantExpr::getZExt(Src, Elt->getType());
-
+
// Shift it to the right place, depending on endianness.
- Src = ConstantExpr::getShl(Src,
+ Src = ConstantExpr::getShl(Src,
ConstantInt::get(Src->getType(), ShiftAmt));
ShiftAmt += isLittleEndian ? SrcBitSize : -SrcBitSize;
-
+
// Mix it in.
Elt = ConstantExpr::getOr(Elt, Src);
}
@@ -186,30 +186,30 @@ static Constant *FoldBitCast(Constant *C, Type *DestTy,
}
return ConstantVector::get(Result);
}
-
+
// Handle: bitcast (<2 x i64> <i64 0, i64 1> to <4 x i32>)
unsigned Ratio = NumDstElt/NumSrcElt;
unsigned DstBitSize = DstEltTy->getPrimitiveSizeInBits();
-
+
// Loop over each source value, expanding into multiple results.
for (unsigned i = 0; i != NumSrcElt; ++i) {
Constant *Src = dyn_cast<ConstantInt>(C->getAggregateElement(i));
if (!Src) // Reject constantexpr elements.
return ConstantExpr::getBitCast(C, DestTy);
-
+
unsigned ShiftAmt = isLittleEndian ? 0 : DstBitSize*(Ratio-1);
for (unsigned j = 0; j != Ratio; ++j) {
// Shift the piece of the value into the right place, depending on
// endianness.
- Constant *Elt = ConstantExpr::getLShr(Src,
+ Constant *Elt = ConstantExpr::getLShr(Src,
ConstantInt::get(Src->getType(), ShiftAmt));
ShiftAmt += isLittleEndian ? DstBitSize : -DstBitSize;
-
+
// Truncate and remember this piece.
Result.push_back(ConstantExpr::getTrunc(Elt, DstEltTy));
}
}
-
+
return ConstantVector::get(Result);
}
@@ -224,28 +224,28 @@ static bool IsConstantOffsetFromGlobal(Constant *C, GlobalValue *&GV,
Offset = 0;
return true;
}
-
+
// Otherwise, if this isn't a constant expr, bail out.
ConstantExpr *CE = dyn_cast<ConstantExpr>(C);
if (!CE) return false;
-
+
// Look through ptr->int and ptr->ptr casts.
if (CE->getOpcode() == Instruction::PtrToInt ||
CE->getOpcode() == Instruction::BitCast)
return IsConstantOffsetFromGlobal(CE->getOperand(0), GV, Offset, TD);
-
- // i32* getelementptr ([5 x i32]* @a, i32 0, i32 5)
+
+ // i32* getelementptr ([5 x i32]* @a, i32 0, i32 5)
if (CE->getOpcode() == Instruction::GetElementPtr) {
// Cannot compute this if the element type of the pointer is missing size
// info.
if (!cast<PointerType>(CE->getOperand(0)->getType())
->getElementType()->isSized())
return false;
-
+
// If the base isn't a global+constant, we aren't either.
if (!IsConstantOffsetFromGlobal(CE->getOperand(0), GV, Offset, TD))
return false;
-
+
// Otherwise, add any offset that our operands provide.
gep_type_iterator GTI = gep_type_begin(CE);
for (User::const_op_iterator i = CE->op_begin() + 1, e = CE->op_end();
@@ -253,7 +253,7 @@ static bool IsConstantOffsetFromGlobal(Constant *C, GlobalValue *&GV,
ConstantInt *CI = dyn_cast<ConstantInt>(*i);
if (!CI) return false; // Index isn't a simple constant?
if (CI->isZero()) continue; // Not adding anything.
-
+
if (StructType *ST = dyn_cast<StructType>(*GTI)) {
// N = N + Offset
Offset += TD.getStructLayout(ST)->getElementOffset(CI->getZExtValue());
@@ -264,7 +264,7 @@ static bool IsConstantOffsetFromGlobal(Constant *C, GlobalValue *&GV,
}
return true;
}
-
+
return false;
}
@@ -277,27 +277,27 @@ static bool ReadDataFromGlobal(Constant *C, uint64_t ByteOffset,
const DataLayout &TD) {
assert(ByteOffset <= TD.getTypeAllocSize(C->getType()) &&
"Out of range access");
-
+
// If this element is zero or undefined, we can just return since *CurPtr is
// zero initialized.
if (isa<ConstantAggregateZero>(C) || isa<UndefValue>(C))
return true;
-
+
if (ConstantInt *CI = dyn_cast<ConstantInt>(C)) {
if (CI->getBitWidth() > 64 ||
(CI->getBitWidth() & 7) != 0)
return false;
-
+
uint64_t Val = CI->getZExtValue();
unsigned IntBytes = unsigned(CI->getBitWidth()/8);
-
+
for (unsigned i = 0; i != BytesLeft && ByteOffset != IntBytes; ++i) {
CurPtr[i] = (unsigned char)(Val >> (ByteOffset * 8));
++ByteOffset;
}
return true;
}
-
+
if (ConstantFP *CFP = dyn_cast<ConstantFP>(C)) {
if (CFP->getType()->isDoubleTy()) {
C = FoldBitCast(C, Type::getInt64Ty(C->getContext()), TD);
@@ -309,13 +309,13 @@ static bool ReadDataFromGlobal(Constant *C, uint64_t ByteOffset,
}
return false;
}
-
+
if (ConstantStruct *CS = dyn_cast<ConstantStruct>(C)) {
const StructLayout *SL = TD.getStructLayout(CS->getType());
unsigned Index = SL->getElementContainingOffset(ByteOffset);
uint64_t CurEltOffset = SL->getElementOffset(Index);
ByteOffset -= CurEltOffset;
-
+
while (1) {
// If the element access is to the element itself and not to tail padding,
// read the bytes from the element.
@@ -325,9 +325,9 @@ static bool ReadDataFromGlobal(Constant *C, uint64_t ByteOffset,
!ReadDataFromGlobal(CS->getOperand(Index), ByteOffset, CurPtr,
BytesLeft, TD))
return false;
-
+
++Index;
-
+
// Check to see if we read from the last struct element, if so we're done.
if (Index == CS->getType()->getNumElements())
return true;
@@ -375,11 +375,11 @@ static bool ReadDataFromGlobal(Constant *C, uint64_t ByteOffset,
}
return true;
}
-
+
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
if (CE->getOpcode() == Instruction::IntToPtr &&
- CE->getOperand(0)->getType() == TD.getIntPtrType(CE->getContext()))
- return ReadDataFromGlobal(CE->getOperand(0), ByteOffset, CurPtr,
+ CE->getOperand(0)->getType() == TD.getIntPtrType(CE->getType()))
+ return ReadDataFromGlobal(CE->getOperand(0), ByteOffset, CurPtr,
BytesLeft, TD);
}
@@ -391,7 +391,7 @@ static Constant *FoldReinterpretLoadFromConstPtr(Constant *C,
const DataLayout &TD) {
Type *LoadTy = cast<PointerType>(C->getType())->getElementType();
IntegerType *IntType = dyn_cast<IntegerType>(LoadTy);
-
+
// If this isn't an integer load we can't fold it directly.
if (!IntType) {
// If this is a float/double load, we can try folding it as an int32/64 load
@@ -415,15 +415,15 @@ static Constant *FoldReinterpretLoadFromConstPtr(Constant *C,
return FoldBitCast(Res, LoadTy, TD);
return 0;
}
-
+
unsigned BytesLoaded = (IntType->getBitWidth() + 7) / 8;
if (BytesLoaded > 32 || BytesLoaded == 0) return 0;
-
+
GlobalValue *GVal;
int64_t Offset;
if (!IsConstantOffsetFromGlobal(C, GVal, Offset, TD))
return 0;
-
+
GlobalVariable *GV = dyn_cast<GlobalVariable>(GVal);
if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer() ||
!GV->getInitializer()->getType()->isSized())
@@ -432,11 +432,11 @@ static Constant *FoldReinterpretLoadFromConstPtr(Constant *C,
// If we're loading off the beginning of the global, some bytes may be valid,
// but we don't try to handle this.
if (Offset < 0) return 0;
-
+
// If we're not accessing anything in this constant, the result is undefined.
if (uint64_t(Offset) >= TD.getTypeAllocSize(GV->getInitializer()->getType()))
return UndefValue::get(IntType);
-
+
unsigned char RawBytes[32] = {0};
if (!ReadDataFromGlobal(GV->getInitializer(), Offset, RawBytes,
BytesLoaded, TD))
@@ -464,15 +464,15 @@ Constant *llvm::ConstantFoldLoadFromConstPtr(Constant *C,
// If the loaded value isn't a constant expr, we can't handle it.
ConstantExpr *CE = dyn_cast<ConstantExpr>(C);
if (!CE) return 0;
-
+
if (CE->getOpcode() == Instruction::GetElementPtr) {
if (GlobalVariable *GV = dyn_cast<GlobalVariable>(CE->getOperand(0)))
if (GV->isConstant() && GV->hasDefinitiveInitializer())
- if (Constant *V =
+ if (Constant *V =
ConstantFoldLoadThroughGEPConstantExpr(GV->getInitializer(), CE))
return V;
}
-
+
// Instead of loading constant c string, use corresponding integer value
// directly if string length is small enough.
StringRef Str;
@@ -500,14 +500,14 @@ Constant *llvm::ConstantFoldLoadFromConstPtr(Constant *C,
SingleChar = 0;
StrVal = (StrVal << 8) | SingleChar;
}
-
+
Constant *Res = ConstantInt::get(CE->getContext(), StrVal);
if (Ty->isFloatingPointTy())
Res = ConstantExpr::getBitCast(Res, Ty);
return Res;
}
}
-
+
// If this load comes from anywhere in a constant global, and if the global
// is all undef or zero, we know what it loads.
if (GlobalVariable *GV =
@@ -520,7 +520,7 @@ Constant *llvm::ConstantFoldLoadFromConstPtr(Constant *C,
return UndefValue::get(ResTy);
}
}
-
+
// Try hard to fold loads from bitcasted strange and non-type-safe things. We
// currently don't do any of this for big endian systems. It can be
// generalized in the future if someone is interested.
@@ -531,7 +531,7 @@ Constant *llvm::ConstantFoldLoadFromConstPtr(Constant *C,
static Constant *ConstantFoldLoadInst(const LoadInst *LI, const DataLayout *TD){
if (LI->isVolatile()) return 0;
-
+
if (Constant *C = dyn_cast<Constant>(LI->getOperand(0)))
return ConstantFoldLoadFromConstPtr(C, TD);
@@ -540,23 +540,23 @@ static Constant *ConstantFoldLoadInst(const LoadInst *LI, const DataLayout *TD){
/// SymbolicallyEvaluateBinop - One of Op0/Op1 is a constant expression.
/// Attempt to symbolically evaluate the result of a binary operator merging
-/// these together. If target data info is available, it is provided as TD,
+/// these together. If target data info is available, it is provided as TD,
/// otherwise TD is null.
static Constant *SymbolicallyEvaluateBinop(unsigned Opc, Constant *Op0,
Constant *Op1, const DataLayout *TD){
// SROA
-
+
// Fold (and 0xffffffff00000000, (shl x, 32)) -> shl.
// Fold (lshr (or X, Y), 32) -> (lshr [X/Y], 32) if one doesn't contribute
// bits.
-
-
+
+
// If the constant expr is something like &A[123] - &A[4].f, fold this into a
// constant. This happens frequently when iterating over a global array.
if (Opc == Instruction::Sub && TD) {
GlobalValue *GV1, *GV2;
int64_t Offs1, Offs2;
-
+
if (IsConstantOffsetFromGlobal(Op0, GV1, Offs1, *TD))
if (IsConstantOffsetFromGlobal(Op1, GV2, Offs2, *TD) &&
GV1 == GV2) {
@@ -564,7 +564,7 @@ static Constant *SymbolicallyEvaluateBinop(unsigned Opc, Constant *Op0,
return ConstantInt::get(Op0->getType(), Offs1-Offs2);
}
}
-
+
return 0;
}
@@ -575,7 +575,7 @@ static Constant *CastGEPIndices(ArrayRef<Constant *> Ops,
Type *ResultTy, const DataLayout *TD,
const TargetLibraryInfo *TLI) {
if (!TD) return 0;
- Type *IntPtrTy = TD->getIntPtrType(ResultTy->getContext());
+ Type *IntPtrTy = TD->getIntPtrType(ResultTy);
bool Any = false;
SmallVector<Constant*, 32> NewIdxs;
@@ -628,14 +628,15 @@ static Constant *SymbolicallyEvaluateGEP(ArrayRef<Constant *> Ops,
if (!TD || !cast<PointerType>(Ptr->getType())->getElementType()->isSized() ||
!Ptr->getType()->isPointerTy())
return 0;
-
- Type *IntPtrTy = TD->getIntPtrType(Ptr->getContext());
+
+ unsigned AS = cast<PointerType>(Ptr->getType())->getAddressSpace();
+ Type *IntPtrTy = TD->getIntPtrType(Ptr->getContext(), AS);
// If this is a constant expr gep that is effectively computing an
// "offsetof", fold it into 'cast int Size to T*' instead of 'gep 0, 0, 12'
for (unsigned i = 1, e = Ops.size(); i != e; ++i)
if (!isa<ConstantInt>(Ops[i])) {
-
+
// If this is "gep i8* Ptr, (sub 0, V)", fold this as:
// "inttoptr (sub (ptrtoint Ptr), V)"
if (Ops.size() == 2 &&
@@ -702,6 +703,8 @@ static Constant *SymbolicallyEvaluateGEP(ArrayRef<Constant *> Ops,
// Also, this helps GlobalOpt do SROA on GlobalVariables.
Type *Ty = Ptr->getType();
assert(Ty->isPointerTy() && "Forming regular GEP of non-pointer type");
+ assert(Ty->getPointerAddressSpace() == AS
+ && "Operand and result of GEP should be in the same address space.");
SmallVector<Constant*, 32> NewIdxs;
do {
if (SequentialType *ATy = dyn_cast<SequentialType>(Ty)) {
@@ -709,15 +712,15 @@ static Constant *SymbolicallyEvaluateGEP(ArrayRef<Constant *> Ops,
// The only pointer indexing we'll do is on the first index of the GEP.
if (!NewIdxs.empty())
break;
-
+
// Only handle pointers to sized types, not pointers to functions.
if (!ATy->getElementType()->isSized())
return 0;
}
-
+
// Determine which element of the array the offset points into.
APInt ElemSize(BitWidth, TD->getTypeAllocSize(ATy->getElementType()));
- IntegerType *IntPtrTy = TD->getIntPtrType(Ty->getContext());
+ IntegerType *IntPtrTy = TD->getIntPtrType(Ty->getContext(), AS);
if (ElemSize == 0)
// The element size is 0. This may be [0 x Ty]*, so just use a zero
// index for this level and proceed to the next level to see if it can
@@ -837,7 +840,7 @@ Constant *llvm::ConstantFoldInstruction(Instruction *I,
if (const CmpInst *CI = dyn_cast<CmpInst>(I))
return ConstantFoldCompareInstOperands(CI->getPredicate(), Ops[0], Ops[1],
TD, TLI);
-
+
if (const LoadInst *LI = dyn_cast<LoadInst>(I))
return ConstantFoldLoadInst(LI, TD);
@@ -887,19 +890,19 @@ Constant *llvm::ConstantFoldConstantExpression(const ConstantExpr *CE,
/// information, due to only being passed an opcode and operands. Constant
/// folding using this function strips this information.
///
-Constant *llvm::ConstantFoldInstOperands(unsigned Opcode, Type *DestTy,
+Constant *llvm::ConstantFoldInstOperands(unsigned Opcode, Type *DestTy,
ArrayRef<Constant *> Ops,
const DataLayout *TD,
- const TargetLibraryInfo *TLI) {
+ const TargetLibraryInfo *TLI) {
// Handle easy binops first.
if (Instruction::isBinaryOp(Opcode)) {
if (isa<ConstantExpr>(Ops[0]) || isa<ConstantExpr>(Ops[1]))
if (Constant *C = SymbolicallyEvaluateBinop(Opcode, Ops[0], Ops[1], TD))
return C;
-
+
return ConstantExpr::get(Opcode, Ops[0], Ops[1]);
}
-
+
switch (Opcode) {
default: return 0;
case Instruction::ICmp:
@@ -918,7 +921,7 @@ Constant *llvm::ConstantFoldInstOperands(unsigned Opcode, Type *DestTy,
unsigned InWidth = Input->getType()->getScalarSizeInBits();
unsigned AS = cast<PointerType>(CE->getType())->getAddressSpace();
if (TD->getPointerSizeInBits(AS) < InWidth) {
- Constant *Mask =
+ Constant *Mask =
ConstantInt::get(CE->getContext(), APInt::getLowBitsSet(InWidth,
TD->getPointerSizeInBits(AS)));
Input = ConstantExpr::getAnd(Input, Mask);
@@ -967,7 +970,7 @@ Constant *llvm::ConstantFoldInstOperands(unsigned Opcode, Type *DestTy,
return C;
if (Constant *C = SymbolicallyEvaluateGEP(Ops, DestTy, TD, TLI))
return C;
-
+
return ConstantExpr::getGetElementPtr(Ops[0], Ops.slice(1));
}
}
@@ -977,7 +980,7 @@ Constant *llvm::ConstantFoldInstOperands(unsigned Opcode, Type *DestTy,
/// returns a constant expression of the specified operands.
///
Constant *llvm::ConstantFoldCompareInstOperands(unsigned Predicate,
- Constant *Ops0, Constant *Ops1,
+ Constant *Ops0, Constant *Ops1,
const DataLayout *TD,
const TargetLibraryInfo *TLI) {
// fold: icmp (inttoptr x), null -> icmp x, 0
@@ -988,9 +991,10 @@ Constant *llvm::ConstantFoldCompareInstOperands(unsigned Predicate,
// ConstantExpr::getCompare cannot do this, because it doesn't have TD
// around to know if bit truncation is happening.
if (ConstantExpr *CE0 = dyn_cast<ConstantExpr>(Ops0)) {
+ Type *IntPtrTy = NULL;
if (TD && Ops1->isNullValue()) {
- Type *IntPtrTy = TD->getIntPtrType(CE0->getContext());
if (CE0->getOpcode() == Instruction::IntToPtr) {
+ IntPtrTy = TD->getIntPtrType(CE0->getType());
// Convert the integer value to the right size to ensure we get the
// proper extension or truncation.
Constant *C = ConstantExpr::getIntegerCast(CE0->getOperand(0),
@@ -998,22 +1002,24 @@ Constant *llvm::ConstantFoldCompareInstOperands(unsigned Predicate,
Constant *Null = Constant::getNullValue(C->getType());
return ConstantFoldCompareInstOperands(Predicate, C, Null, TD, TLI);
}
-
+
// Only do this transformation if the int is intptrty in size, otherwise
// there is a truncation or extension that we aren't modeling.
- if (CE0->getOpcode() == Instruction::PtrToInt &&
- CE0->getType() == IntPtrTy) {
- Constant *C = CE0->getOperand(0);
- Constant *Null = Constant::getNullValue(C->getType());
- return ConstantFoldCompareInstOperands(Predicate, C, Null, TD, TLI);
+ if (CE0->getOpcode() == Instruction::PtrToInt) {
+ IntPtrTy = TD->getIntPtrType(CE0->getOperand(0)->getType());
+ if (CE0->getType() == IntPtrTy) {
+ Constant *C = CE0->getOperand(0);
+ Constant *Null = Constant::getNullValue(C->getType());
+ return ConstantFoldCompareInstOperands(Predicate, C, Null, TD, TLI);
+ }
}
}
-
+
if (ConstantExpr *CE1 = dyn_cast<ConstantExpr>(Ops1)) {
if (TD && CE0->getOpcode() == CE1->getOpcode()) {
- Type *IntPtrTy = TD->getIntPtrType(CE0->getContext());
if (CE0->getOpcode() == Instruction::IntToPtr) {
+ Type *IntPtrTy = TD->getIntPtrType(CE0->getType());
// Convert the integer value to the right size to ensure we get the
// proper extension or truncation.
Constant *C0 = ConstantExpr::getIntegerCast(CE0->getOperand(0),
@@ -1022,34 +1028,36 @@ Constant *llvm::ConstantFoldCompareInstOperands(unsigned Predicate,
IntPtrTy, false);
return ConstantFoldCompareInstOperands(Predicate, C0, C1, TD, TLI);
}
+ }
- // Only do this transformation if the int is intptrty in size, otherwise
- // there is a truncation or extension that we aren't modeling.
- if ((CE0->getOpcode() == Instruction::PtrToInt &&
- CE0->getType() == IntPtrTy &&
- CE0->getOperand(0)->getType() == CE1->getOperand(0)->getType()))
+ // Only do this transformation if the int is intptrty in size, otherwise
+ // there is a truncation or extension that we aren't modeling.
+ if (CE0->getOpcode() == Instruction::PtrToInt) {
+ IntPtrTy = TD->getIntPtrType(CE0->getOperand(0)->getType());
+ if (CE0->getType() == IntPtrTy &&
+ CE0->getOperand(0)->getType() == CE1->getOperand(0)->getType())
return ConstantFoldCompareInstOperands(Predicate, CE0->getOperand(0),
- CE1->getOperand(0), TD, TLI);
+ CE1->getOperand(0), TD, TLI);
}
}
-
+
// icmp eq (or x, y), 0 -> (icmp eq x, 0) & (icmp eq y, 0)
// icmp ne (or x, y), 0 -> (icmp ne x, 0) | (icmp ne y, 0)
if ((Predicate == ICmpInst::ICMP_EQ || Predicate == ICmpInst::ICMP_NE) &&
CE0->getOpcode() == Instruction::Or && Ops1->isNullValue()) {
- Constant *LHS =
+ Constant *LHS =
ConstantFoldCompareInstOperands(Predicate, CE0->getOperand(0), Ops1,
TD, TLI);
- Constant *RHS =
+ Constant *RHS =
ConstantFoldCompareInstOperands(Predicate, CE0->getOperand(1), Ops1,
TD, TLI);
- unsigned OpC =
+ unsigned OpC =
Predicate == ICmpInst::ICMP_EQ ? Instruction::And : Instruction::Or;
Constant *Ops[] = { LHS, RHS };
return ConstantFoldInstOperands(OpC, LHS->getType(), Ops, TD, TLI);
}
}
-
+
return ConstantExpr::getCompare(Predicate, Ops0, Ops1);
}
@@ -1057,7 +1065,7 @@ Constant *llvm::ConstantFoldCompareInstOperands(unsigned Predicate,
/// ConstantFoldLoadThroughGEPConstantExpr - Given a constant and a
/// getelementptr constantexpr, return the constant value being addressed by the
/// constant expression, or null if something is funny and we can't decide.
-Constant *llvm::ConstantFoldLoadThroughGEPConstantExpr(Constant *C,
+Constant *llvm::ConstantFoldLoadThroughGEPConstantExpr(Constant *C,
ConstantExpr *CE) {
if (!CE->getOperand(1)->isNullValue())
return 0; // Do not allow stepping over the value!
@@ -1127,14 +1135,14 @@ llvm::canConstantFoldCallTo(const Function *F) {
if (!F->hasName()) return false;
StringRef Name = F->getName();
-
+
// In these cases, the check of the length is required. We don't want to
// return true for a name like "cos\0blah" which strcmp would return equal to
// "cos", but has length 8.
switch (Name[0]) {
default: return false;
case 'a':
- return Name == "acos" || Name == "asin" ||
+ return Name == "acos" || Name == "asin" ||
Name == "atan" || Name == "atan2";
case 'c':
return Name == "cos" || Name == "ceil" || Name == "cosf" || Name == "cosh";
@@ -1154,7 +1162,7 @@ llvm::canConstantFoldCallTo(const Function *F) {
}
}
-static Constant *ConstantFoldFP(double (*NativeFP)(double), double V,
+static Constant *ConstantFoldFP(double (*NativeFP)(double), double V,
Type *Ty) {
sys::llvm_fenv_clearexcept();
V = NativeFP(V);
@@ -1162,7 +1170,7 @@ static Constant *ConstantFoldFP(double (*NativeFP)(double), double V,
sys::llvm_fenv_clearexcept();
return 0;
}
-
+
if (Ty->isFloatTy())
return ConstantFP::get(Ty->getContext(), APFloat((float)V));
if (Ty->isDoubleTy())
@@ -1178,7 +1186,7 @@ static Constant *ConstantFoldBinaryFP(double (*NativeFP)(double, double),
sys::llvm_fenv_clearexcept();
return 0;
}
-
+
if (Ty->isFloatTy())
return ConstantFP::get(Ty->getContext(), APFloat((float)V));
if (Ty->isDoubleTy())
@@ -1272,7 +1280,7 @@ llvm::ConstantFoldCall(Function *F, ArrayRef<Constant *> Operands,
case 'e':
if (Name == "exp" && TLI->has(LibFunc::exp))
return ConstantFoldFP(exp, V, Ty);
-
+
if (Name == "exp2" && TLI->has(LibFunc::exp2)) {
// Constant fold exp2(x) as pow(2,x) in case the host doesn't have a
// C99 library.
@@ -1348,7 +1356,7 @@ llvm::ConstantFoldCall(Function *F, ArrayRef<Constant *> Operands,
}
// Support ConstantVector in case we have an Undef in the top.
- if (isa<ConstantVector>(Operands[0]) ||
+ if (isa<ConstantVector>(Operands[0]) ||
isa<ConstantDataVector>(Operands[0])) {
Constant *Op = cast<Constant>(Operands[0]);
switch (F->getIntrinsicID()) {
@@ -1367,11 +1375,11 @@ llvm::ConstantFoldCall(Function *F, ArrayRef<Constant *> Operands,
case Intrinsic::x86_sse2_cvttsd2si64:
if (ConstantFP *FPOp =
dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U)))
- return ConstantFoldConvertToInt(FPOp->getValueAPF(),
+ return ConstantFoldConvertToInt(FPOp->getValueAPF(),
/*roundTowardZero=*/true, Ty);
}
}
-
+
if (isa<UndefValue>(Operands[0])) {
if (F->getIntrinsicID() == Intrinsic::bswap)
return Operands[0];
@@ -1385,14 +1393,14 @@ llvm::ConstantFoldCall(Function *F, ArrayRef<Constant *> Operands,
if (ConstantFP *Op1 = dyn_cast<ConstantFP>(Operands[0])) {
if (!Ty->isFloatTy() && !Ty->isDoubleTy())
return 0;
- double Op1V = Ty->isFloatTy() ?
+ double Op1V = Ty->isFloatTy() ?
(double)Op1->getValueAPF().convertToFloat() :
Op1->getValueAPF().convertToDouble();
if (ConstantFP *Op2 = dyn_cast<ConstantFP>(Operands[1])) {
if (Op2->getType() != Op1->getType())
return 0;
- double Op2V = Ty->isFloatTy() ?
+ double Op2V = Ty->isFloatTy() ?
(double)Op2->getValueAPF().convertToFloat():
Op2->getValueAPF().convertToDouble();
@@ -1419,7 +1427,7 @@ llvm::ConstantFoldCall(Function *F, ArrayRef<Constant *> Operands,
}
return 0;
}
-
+
if (ConstantInt *Op1 = dyn_cast<ConstantInt>(Operands[0])) {
if (ConstantInt *Op2 = dyn_cast<ConstantInt>(Operands[1])) {
switch (F->getIntrinsicID()) {
@@ -1469,7 +1477,7 @@ llvm::ConstantFoldCall(Function *F, ArrayRef<Constant *> Operands,
return ConstantInt::get(Ty, Op1->getValue().countLeadingZeros());
}
}
-
+
return 0;
}
return 0;
diff --git a/lib/Analysis/InlineCost.cpp b/lib/Analysis/InlineCost.cpp
index 95e58022ca..ede1bf30db 100644
--- a/lib/Analysis/InlineCost.cpp
+++ b/lib/Analysis/InlineCost.cpp
@@ -788,7 +788,7 @@ ConstantInt *CallAnalyzer::stripAndComputeInBoundsConstantOffsets(Value *&V) {
assert(V->getType()->isPointerTy() && "Unexpected operand type!");
} while (Visited.insert(V));
- Type *IntPtrTy = TD->getIntPtrType(V->getContext());
+ Type *IntPtrTy = TD->getIntPtrType(V->getType());
return cast<ConstantInt>(ConstantInt::get(IntPtrTy, Offset));
}
diff --git a/lib/Analysis/InstructionSimplify.cpp b/lib/Analysis/InstructionSimplify.cpp
index 8e326122fa..016c328e9e 100644
--- a/lib/Analysis/InstructionSimplify.cpp
+++ b/lib/Analysis/InstructionSimplify.cpp
@@ -728,7 +728,7 @@ static Constant *stripAndComputeConstantOffsets(const DataLayout &TD,
assert(V->getType()->isPointerTy() && "Unexpected operand type!");
} while (Visited.insert(V));
- Type *IntPtrTy = TD.getIntPtrType(V->getContext());
+ Type *IntPtrTy = TD.getIntPtrType(V->getContext(), AS);
return ConstantInt::get(IntPtrTy, Offset);
}
diff --git a/lib/Analysis/Lint.cpp b/lib/Analysis/Lint.cpp
index 6d6d580ed1..d62808e9cd 100644
--- a/lib/Analysis/Lint.cpp
+++ b/lib/Analysis/Lint.cpp
@@ -626,8 +626,7 @@ Value *Lint::findValueImpl(Value *V, bool OffsetOk,
if (W != V)
return findValueImpl(W, OffsetOk, Visited);
} else if (CastInst *CI = dyn_cast<CastInst>(V)) {
- if (CI->isNoopCast(TD ? TD->getIntPtrType(V->getContext()) :
- Type::getInt64Ty(V->getContext())))
+ if (CI->isNoopCast(*TD))
return findValueImpl(CI->getOperand(0), OffsetOk, Visited);
} else if (ExtractValueInst *Ex = dyn_cast<ExtractValueInst>(V)) {
if (Value *W = FindInsertedValue(Ex->getAggregateOperand(),
@@ -640,7 +639,7 @@ Value *Lint::findValueImpl(Value *V, bool OffsetOk,
if (CastInst::isNoopCast(Instruction::CastOps(CE->getOpcode()),
CE->getOperand(0)->getType(),
CE->getType(),
- TD ? TD->getIntPtrType(V->getContext()) :
+ TD ? TD->getIntPtrType(CE->getType()) :
Type::getInt64Ty(V->getContext())))
return findValueImpl(CE->getOperand(0), OffsetOk, Visited);
} else if (CE->getOpcode() == Instruction::ExtractValue) {
diff --git a/lib/Analysis/MemoryBuiltins.cpp b/lib/Analysis/MemoryBuiltins.cpp
index 0a539fe758..8d903c63af 100644
--- a/lib/Analysis/MemoryBuiltins.cpp
+++ b/lib/Analysis/MemoryBuiltins.cpp
@@ -376,9 +376,10 @@ APInt ObjectSizeOffsetVisitor::align(APInt Size, uint64_t Align) {
ObjectSizeOffsetVisitor::ObjectSizeOffsetVisitor(const DataLayout *TD,
const TargetLibraryInfo *TLI,
LLVMContext &Context,
- bool RoundToAlign)
+ bool RoundToAlign,
+ unsigned AS)
: TD(TD), TLI(TLI), RoundToAlign(RoundToAlign) {
- IntegerType *IntTy = TD->getIntPtrType(Context);
+ IntegerType *IntTy = TD->getIntPtrType(Context, AS);
IntTyBits = IntTy->getBitWidth();
Zero = APInt::getNullValue(IntTyBits);
}
@@ -561,9 +562,10 @@ SizeOffsetType ObjectSizeOffsetVisitor::visitInstruction(Instruction &I) {
ObjectSizeOffsetEvaluator::ObjectSizeOffsetEvaluator(const DataLayout *TD,
const TargetLibraryInfo *TLI,
- LLVMContext &Context)
+ LLVMContext &Context,
+ unsigned AS)
: TD(TD), TLI(TLI), Context(Context), Builder(Context, TargetFolder(TD)) {
- IntTy = TD->getIntPtrType(Context);
+ IntTy = TD->getIntPtrType(Context, AS);
Zero = ConstantInt::get(IntTy, 0);
}
diff --git a/lib/Analysis/ScalarEvolution.cpp b/lib/Analysis/ScalarEvolution.cpp
index 5400646be1..3e06298882 100644
--- a/lib/Analysis/ScalarEvolution.cpp
+++ b/lib/Analysis/ScalarEvolution.cpp
@@ -2581,13 +2581,12 @@ const SCEV *ScalarEvolution::getUMinExpr(const SCEV *LHS,
return getNotSCEV(getUMaxExpr(getNotSCEV(LHS), getNotSCEV(RHS)));
}
-const SCEV *ScalarEvolution::getSizeOfExpr(Type *AllocTy) {
+const SCEV *ScalarEvolution::getSizeOfExpr(Type *AllocTy, Type *IntPtrTy) {
// If we have DataLayout, we can bypass creating a target-independent
// constant expression and then folding it back into a ConstantInt.
// This is just a compile-time optimization.
if (TD)
- return getConstant(TD->getIntPtrType(getContext()),
- TD->getTypeAllocSize(AllocTy));
+ return getConstant(IntPtrTy, TD->getTypeAllocSize(AllocTy));
Constant *C = ConstantExpr::getSizeOf(AllocTy);
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))
@@ -2606,13 +2605,13 @@ const SCEV *ScalarEvolution::getAlignOfExpr(Type *AllocTy) {
return getTruncateOrZeroExtend(getSCEV(C), Ty);
}
-const SCEV *ScalarEvolution::getOffsetOfExpr(StructType *STy,
+const SCEV *ScalarEvolution::getOffsetOfExpr(StructType *STy, Type *IntPtrTy,
unsigned FieldNo) {
// If we have DataLayout, we can bypass creating a target-independent
// constant expression and then folding it back into a ConstantInt.
// This is just a compile-time optimization.
if (TD)
- return getConstant(TD->getIntPtrType(getContext()),
+ return getConstant(IntPtrTy,
TD->getStructLayout(STy)->getElementOffset(FieldNo));
Constant *C = ConstantExpr::getOffsetOf(STy, FieldNo);
@@ -2699,7 +2698,7 @@ Type *ScalarEvolution::getEffectiveSCEVType(Type *Ty) const {
// The only other support type is pointer.
assert(Ty->isPointerTy() && "Unexpected non-pointer non-integer type!");
- if (TD) return TD->getIntPtrType(getContext());
+ if (TD) return TD->getIntPtrType(Ty);
// Without DataLayout, conservatively assume pointers are 64-bit.
return Type::getInt64Ty(getContext());
@@ -3152,13 +3151,13 @@ const SCEV *ScalarEvolution::createNodeForGEP(GEPOperator *GEP) {
if (StructType *STy = dyn_cast<StructType>(*GTI++)) {
// For a struct, add the member offset.
unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue();
- const SCEV *FieldOffset = getOffsetOfExpr(STy, FieldNo);
+ const SCEV *FieldOffset = getOffsetOfExpr(STy, IntPtrTy, FieldNo);
// Add the field offset to the running total offset.
TotalOffset = getAddExpr(TotalOffset, FieldOffset);
} else {
// For an array, add the element offset, explicitly scaled.
- const SCEV *ElementSize = getSizeOfExpr(*GTI);
+ const SCEV *ElementSize = getSizeOfExpr(*GTI, IntPtrTy);
const SCEV *IndexS = getSCEV(Index);
// Getelementptr indices are signed.
IndexS = getTruncateOrSignExtend(IndexS, IntPtrTy);
diff --git a/lib/Analysis/ScalarEvolutionExpander.cpp b/lib/Analysis/ScalarEvolutionExpander.cpp
index 111bfb4a6a..0295da5e4a 100644
--- a/lib/Analysis/ScalarEvolutionExpander.cpp
+++ b/lib/Analysis/ScalarEvolutionExpander.cpp
@@ -417,7 +417,9 @@ Value *SCEVExpander::expandAddToGEP(const SCEV *const *op_begin,
// array indexing.
SmallVector<const SCEV *, 8> ScaledOps;
if (ElTy->isSized()) {
- const SCEV *ElSize = SE.getSizeOfExpr(ElTy);
+ Type *IntPtrTy = SE.TD ? SE.TD->getIntPtrType(PTy) :
+ IntegerType::getInt64Ty(PTy->getContext());
+ const SCEV *ElSize = SE.getSizeOfExpr(ElTy, IntPtrTy);
if (!ElSize->isZero()) {
SmallVector<const SCEV *, 8> NewOps;
for (unsigned i = 0, e = Ops.size(); i != e; ++i) {