summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--include/llvm/Analysis/MemoryBuiltins.h5
-rw-r--r--include/llvm/Analysis/ScalarEvolution.h5
-rw-r--r--include/llvm/DataLayout.h12
-rw-r--r--include/llvm/InstrTypes.h6
-rw-r--r--include/llvm/Transforms/Utils/Local.h4
-rw-r--r--lib/Analysis/ConstantFolding.cpp238
-rw-r--r--lib/Analysis/InlineCost.cpp2
-rw-r--r--lib/Analysis/InstructionSimplify.cpp2
-rw-r--r--lib/Analysis/Lint.cpp5
-rw-r--r--lib/Analysis/MemoryBuiltins.cpp10
-rw-r--r--lib/Analysis/ScalarEvolution.cpp15
-rw-r--r--lib/Analysis/ScalarEvolutionExpander.cpp4
-rw-r--r--lib/CodeGen/AsmPrinter/AsmPrinter.cpp2
-rw-r--r--lib/CodeGen/IntrinsicLowering.cpp12
-rw-r--r--lib/CodeGen/SelectionDAG/FastISel.cpp5
-rw-r--r--lib/CodeGen/SelectionDAG/SelectionDAG.cpp9
-rw-r--r--lib/Target/ARM/ARMSelectionDAGInfo.cpp3
-rw-r--r--lib/Target/NVPTX/NVPTXAsmPrinter.cpp2
-rw-r--r--lib/Target/PowerPC/PPCISelLowering.cpp3
-rw-r--r--lib/Target/Target.cpp2
-rw-r--r--lib/Target/X86/X86FastISel.cpp10
-rw-r--r--lib/Target/X86/X86SelectionDAGInfo.cpp3
-rw-r--r--lib/Target/XCore/XCoreISelLowering.cpp6
-rw-r--r--lib/Transforms/IPO/GlobalOpt.cpp4
-rw-r--r--lib/Transforms/IPO/MergeFunctions.cpp5
-rw-r--r--lib/Transforms/InstCombine/InstCombine.h2
-rw-r--r--lib/Transforms/InstCombine/InstCombineCalls.cpp10
-rw-r--r--lib/Transforms/InstCombine/InstCombineCasts.cpp290
-rw-r--r--lib/Transforms/InstCombine/InstCombineCompares.cpp8
-rw-r--r--lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp79
-rw-r--r--lib/Transforms/InstCombine/InstructionCombining.cpp12
-rw-r--r--lib/Transforms/Instrumentation/BoundsChecking.cpp2
-rw-r--r--lib/Transforms/Scalar/CodeGenPrepare.cpp2
-rw-r--r--lib/Transforms/Scalar/GVN.cpp9
-rw-r--r--lib/Transforms/Scalar/IndVarSimplify.cpp10
-rw-r--r--lib/Transforms/Scalar/LoopIdiomRecognize.cpp8
-rw-r--r--lib/Transforms/Scalar/ScalarReplAggregates.cpp2
-rw-r--r--lib/Transforms/Scalar/SimplifyLibCalls.cpp26
-rw-r--r--lib/Transforms/Utils/BuildLibCalls.cpp62
-rw-r--r--lib/Transforms/Utils/SimplifyCFG.cpp14
-rw-r--r--lib/Transforms/Utils/SimplifyLibCalls.cpp34
-rw-r--r--lib/VMCore/DataLayout.cpp23
-rw-r--r--lib/VMCore/Instructions.cpp11
-rw-r--r--lib/VMCore/Type.cpp7
-rw-r--r--test/Other/multi-pointer-size.ll43
-rw-r--r--test/Transforms/InstCombine/constant-fold-gep-as-0.ll235
46 files changed, 813 insertions, 450 deletions
diff --git a/include/llvm/Analysis/MemoryBuiltins.h b/include/llvm/Analysis/MemoryBuiltins.h
index a842898e41..9e5d97dd7f 100644
--- a/include/llvm/Analysis/MemoryBuiltins.h
+++ b/include/llvm/Analysis/MemoryBuiltins.h
@@ -168,7 +168,8 @@ class ObjectSizeOffsetVisitor
public:
ObjectSizeOffsetVisitor(const DataLayout *TD, const TargetLibraryInfo *TLI,
- LLVMContext &Context, bool RoundToAlign = false);
+ LLVMContext &Context, bool RoundToAlign = false,
+ unsigned AS = 0);
SizeOffsetType compute(Value *V);
@@ -229,7 +230,7 @@ class ObjectSizeOffsetEvaluator
public:
ObjectSizeOffsetEvaluator(const DataLayout *TD, const TargetLibraryInfo *TLI,
- LLVMContext &Context);
+ LLVMContext &Context, unsigned AS = 0);
SizeOffsetEvalType compute(Value *V);
bool knownSize(SizeOffsetEvalType SizeOffset) {
diff --git a/include/llvm/Analysis/ScalarEvolution.h b/include/llvm/Analysis/ScalarEvolution.h
index 67c9a4d14f..d2df67080c 100644
--- a/include/llvm/Analysis/ScalarEvolution.h
+++ b/include/llvm/Analysis/ScalarEvolution.h
@@ -628,7 +628,7 @@ namespace llvm {
/// getSizeOfExpr - Return an expression for sizeof on the given type.
///
- const SCEV *getSizeOfExpr(Type *AllocTy);
+ const SCEV *getSizeOfExpr(Type *AllocTy, Type *IntPtrTy);
/// getAlignOfExpr - Return an expression for alignof on the given type.
///
@@ -636,7 +636,8 @@ namespace llvm {
/// getOffsetOfExpr - Return an expression for offsetof on the given field.
///
- const SCEV *getOffsetOfExpr(StructType *STy, unsigned FieldNo);
+ const SCEV *getOffsetOfExpr(StructType *STy, Type *IntPtrTy,
+ unsigned FieldNo);
/// getOffsetOfExpr - Return an expression for offsetof on the given field.
///
diff --git a/include/llvm/DataLayout.h b/include/llvm/DataLayout.h
index c9ac0b7fea..0a37353da5 100644
--- a/include/llvm/DataLayout.h
+++ b/include/llvm/DataLayout.h
@@ -337,11 +337,13 @@ public:
///
unsigned getPreferredTypeAlignmentShift(Type *Ty) const;
- /// getIntPtrType - Return an unsigned integer type that is the same size or
- /// greater to the host pointer size.
- /// FIXME: Need to remove the default argument when the rest of the LLVM code
- /// base has been updated.
- IntegerType *getIntPtrType(LLVMContext &C, unsigned AddressSpace = 0) const;
+ /// getIntPtrType - Return an integer type that is the same size or
+ /// greater to the pointer size based on the address space.
+ IntegerType *getIntPtrType(LLVMContext &C, unsigned AddressSpace) const;
+
+ /// getIntPtrType - Return an integer type that is the same size or
+ /// greater to the pointer size based on the Type.
+ IntegerType *getIntPtrType(Type *) const;
/// getIndexedOffset - return the offset from the beginning of the type for
/// the specified indices. This is used to implement getelementptr.
diff --git a/include/llvm/InstrTypes.h b/include/llvm/InstrTypes.h
index cfc79394b2..b661372f53 100644
--- a/include/llvm/InstrTypes.h
+++ b/include/llvm/InstrTypes.h
@@ -17,6 +17,7 @@
#define LLVM_INSTRUCTION_TYPES_H
#include "llvm/Instruction.h"
+#include "llvm/DataLayout.h"
#include "llvm/OperandTraits.h"
#include "llvm/DerivedTypes.h"
#include "llvm/ADT/Twine.h"
@@ -576,6 +577,11 @@ public:
Type *IntPtrTy ///< Integer type corresponding to pointer
) const;
+ /// @brief Determine if this cast is a no-op cast.
+ bool isNoopCast(
+ const DataLayout &DL ///< DataLayout to get the Int Ptr type from.
+ ) const;
+
/// Determine how a pair of casts can be eliminated, if they can be at all.
/// This is a helper function for both CastInst and ConstantExpr.
/// @returns 0 if the CastInst pair can't be eliminated, otherwise
diff --git a/include/llvm/Transforms/Utils/Local.h b/include/llvm/Transforms/Utils/Local.h
index fd1b5556ef..49eeb57622 100644
--- a/include/llvm/Transforms/Utils/Local.h
+++ b/include/llvm/Transforms/Utils/Local.h
@@ -177,8 +177,9 @@ static inline unsigned getKnownAlignment(Value *V, const DataLayout *TD = 0) {
template<typename IRBuilderTy>
Value *EmitGEPOffset(IRBuilderTy *Builder, const DataLayout &TD, User *GEP,
bool NoAssumptions = false) {
+ unsigned AS = cast<GEPOperator>(GEP)->getPointerAddressSpace();
gep_type_iterator GTI = gep_type_begin(GEP);
- Type *IntPtrTy = TD.getIntPtrType(GEP->getContext());
+ Type *IntPtrTy = TD.getIntPtrType(GEP->getContext(), AS);
Value *Result = Constant::getNullValue(IntPtrTy);
// If the GEP is inbounds, we know that none of the addressing operations will
@@ -186,7 +187,6 @@ Value *EmitGEPOffset(IRBuilderTy *Builder, const DataLayout &TD, User *GEP,
bool isInBounds = cast<GEPOperator>(GEP)->isInBounds() && !NoAssumptions;
// Build a mask for high order bits.
- unsigned AS = cast<GEPOperator>(GEP)->getPointerAddressSpace();
unsigned IntPtrWidth = TD.getPointerSizeInBits(AS);
uint64_t PtrSizeMask = ~0ULL >> (64-IntPtrWidth);
diff --git a/lib/Analysis/ConstantFolding.cpp b/lib/Analysis/ConstantFolding.cpp
index 146897ad67..c0e9020d91 100644
--- a/lib/Analysis/ConstantFolding.cpp
+++ b/lib/Analysis/ConstantFolding.cpp
@@ -41,7 +41,7 @@ using namespace llvm;
// Constant Folding internal helper functions
//===----------------------------------------------------------------------===//
-/// FoldBitCast - Constant fold bitcast, symbolically evaluating it with
+/// FoldBitCast - Constant fold bitcast, symbolically evaluating it with
/// DataLayout. This always returns a non-null constant, but it may be a
/// ConstantExpr if unfoldable.
static Constant *FoldBitCast(Constant *C, Type *DestTy,
@@ -59,9 +59,9 @@ static Constant *FoldBitCast(Constant *C, Type *DestTy,
return ConstantExpr::getBitCast(C, DestTy);
unsigned NumSrcElts = CDV->getType()->getNumElements();
-
+
Type *SrcEltTy = CDV->getType()->getElementType();
-
+
// If the vector is a vector of floating point, convert it to vector of int
// to simplify things.
if (SrcEltTy->isFloatingPointTy()) {
@@ -72,7 +72,7 @@ static Constant *FoldBitCast(Constant *C, Type *DestTy,
C = ConstantExpr::getBitCast(C, SrcIVTy);
CDV = cast<ConstantDataVector>(C);
}
-
+
// Now that we know that the input value is a vector of integers, just shift
// and insert them into our result.
unsigned BitShift = TD.getTypeAllocSizeInBits(SrcEltTy);
@@ -84,43 +84,43 @@ static Constant *FoldBitCast(Constant *C, Type *DestTy,
else
Result |= CDV->getElementAsInteger(i);
}
-
+
return ConstantInt::get(IT, Result);
}
-
+
// The code below only handles casts to vectors currently.
VectorType *DestVTy = dyn_cast<VectorType>(DestTy);
if (DestVTy == 0)
return ConstantExpr::getBitCast(C, DestTy);
-
+
// If this is a scalar -> vector cast, convert the input into a <1 x scalar>
// vector so the code below can handle it uniformly.
if (isa<ConstantFP>(C) || isa<ConstantInt>(C)) {
Constant *Ops = C; // don't take the address of C!
return FoldBitCast(ConstantVector::get(Ops), DestTy, TD);
}
-
+
// If this is a bitcast from constant vector -> vector, fold it.
if (!isa<ConstantDataVector>(C) && !isa<ConstantVector>(C))
return ConstantExpr::getBitCast(C, DestTy);
-
+
// If the element types match, VMCore can fold it.
unsigned NumDstElt = DestVTy->getNumElements();
unsigned NumSrcElt = C->getType()->getVectorNumElements();
if (NumDstElt == NumSrcElt)
return ConstantExpr::getBitCast(C, DestTy);
-
+
Type *SrcEltTy = C->getType()->getVectorElementType();
Type *DstEltTy = DestVTy->getElementType();
-
- // Otherwise, we're changing the number of elements in a vector, which
+
+ // Otherwise, we're changing the number of elements in a vector, which
// requires endianness information to do the right thing. For example,
// bitcast (<2 x i64> <i64 0, i64 1> to <4 x i32>)
// folds to (little endian):
// <4 x i32> <i32 0, i32 0, i32 1, i32 0>
// and to (big endian):
// <4 x i32> <i32 0, i32 0, i32 0, i32 1>
-
+
// First thing is first. We only want to think about integer here, so if
// we have something in FP form, recast it as integer.
if (DstEltTy->isFloatingPointTy()) {
@@ -130,11 +130,11 @@ static Constant *FoldBitCast(Constant *C, Type *DestTy,
VectorType::get(IntegerType::get(C->getContext(), FPWidth), NumDstElt);
// Recursively handle this integer conversion, if possible.
C = FoldBitCast(C, DestIVTy, TD);
-
+
// Finally, VMCore can handle this now that #elts line up.
return ConstantExpr::getBitCast(C, DestTy);
}
-
+
// Okay, we know the destination is integer, if the input is FP, convert
// it to integer first.
if (SrcEltTy->isFloatingPointTy()) {
@@ -148,13 +148,13 @@ static Constant *FoldBitCast(Constant *C, Type *DestTy,
!isa<ConstantDataVector>(C))
return C;
}
-
+
// Now we know that the input and output vectors are both integer vectors
// of the same size, and that their #elements is not the same. Do the
// conversion here, which depends on whether the input or output has
// more elements.
bool isLittleEndian = TD.isLittleEndian();
-
+
SmallVector<Constant*, 32> Result;
if (NumDstElt < NumSrcElt) {
// Handle: bitcast (<4 x i32> <i32 0, i32 1, i32 2, i32 3> to <2 x i64>)
@@ -170,15 +170,15 @@ static Constant *FoldBitCast(Constant *C, Type *DestTy,
Constant *Src =dyn_cast<ConstantInt>(C->getAggregateElement(SrcElt++));
if (!Src) // Reject constantexpr elements.
return ConstantExpr::getBitCast(C, DestTy);
-
+
// Zero extend the element to the right size.
Src = ConstantExpr::getZExt(Src, Elt->getType());
-
+
// Shift it to the right place, depending on endianness.
- Src = ConstantExpr::getShl(Src,
+ Src = ConstantExpr::getShl(Src,
ConstantInt::get(Src->getType(), ShiftAmt));
ShiftAmt += isLittleEndian ? SrcBitSize : -SrcBitSize;
-
+
// Mix it in.
Elt = ConstantExpr::getOr(Elt, Src);
}
@@ -186,30 +186,30 @@ static Constant *FoldBitCast(Constant *C, Type *DestTy,
}
return ConstantVector::get(Result);
}
-
+
// Handle: bitcast (<2 x i64> <i64 0, i64 1> to <4 x i32>)
unsigned Ratio = NumDstElt/NumSrcElt;
unsigned DstBitSize = DstEltTy->getPrimitiveSizeInBits();
-
+
// Loop over each source value, expanding into multiple results.
for (unsigned i = 0; i != NumSrcElt; ++i) {
Constant *Src = dyn_cast<ConstantInt>(C->getAggregateElement(i));
if (!Src) // Reject constantexpr elements.
return ConstantExpr::getBitCast(C, DestTy);
-
+
unsigned ShiftAmt = isLittleEndian ? 0 : DstBitSize*(Ratio-1);
for (unsigned j = 0; j != Ratio; ++j) {
// Shift the piece of the value into the right place, depending on
// endianness.
- Constant *Elt = ConstantExpr::getLShr(Src,
+ Constant *Elt = ConstantExpr::getLShr(Src,
ConstantInt::get(Src->getType(), ShiftAmt));
ShiftAmt += isLittleEndian ? DstBitSize : -DstBitSize;
-
+
// Truncate and remember this piece.
Result.push_back(ConstantExpr::getTrunc(Elt, DstEltTy));
}
}
-
+
return ConstantVector::get(Result);
}
@@ -224,28 +224,28 @@ static bool IsConstantOffsetFromGlobal(Constant *C, GlobalValue *&GV,
Offset = 0;
return true;
}
-
+
// Otherwise, if this isn't a constant expr, bail out.
ConstantExpr *CE = dyn_cast<ConstantExpr>(C);
if (!CE) return false;
-
+
// Look through ptr->int and ptr->ptr casts.
if (CE->getOpcode() == Instruction::PtrToInt ||
CE->getOpcode() == Instruction::BitCast)
return IsConstantOffsetFromGlobal(CE->getOperand(0), GV, Offset, TD);
-
- // i32* getelementptr ([5 x i32]* @a, i32 0, i32 5)
+
+ // i32* getelementptr ([5 x i32]* @a, i32 0, i32 5)
if (CE->getOpcode() == Instruction::GetElementPtr) {
// Cannot compute this if the element type of the pointer is missing size
// info.
if (!cast<PointerType>(CE->getOperand(0)->getType())
->getElementType()->isSized())
return false;
-
+
// If the base isn't a global+constant, we aren't either.
if (!IsConstantOffsetFromGlobal(CE->getOperand(0), GV, Offset, TD))
return false;
-
+
// Otherwise, add any offset that our operands provide.
gep_type_iterator GTI = gep_type_begin(CE);
for (User::const_op_iterator i = CE->op_begin() + 1, e = CE->op_end();
@@ -253,7 +253,7 @@ static bool IsConstantOffsetFromGlobal(Constant *C, GlobalValue *&GV,
ConstantInt *CI = dyn_cast<ConstantInt>(*i);
if (!CI) return false; // Index isn't a simple constant?
if (CI->isZero()) continue; // Not adding anything.
-
+
if (StructType *ST = dyn_cast<StructType>(*GTI)) {
// N = N + Offset
Offset += TD.getStructLayout(ST)->getElementOffset(CI->getZExtValue());
@@ -264,7 +264,7 @@ static bool IsConstantOffsetFromGlobal(Constant *C, GlobalValue *&GV,
}
return true;
}
-
+
return false;
}
@@ -277,27 +277,27 @@ static bool ReadDataFromGlobal(Constant *C, uint64_t ByteOffset,
const DataLayout &TD) {
assert(ByteOffset <= TD.getTypeAllocSize(C->getType()) &&
"Out of range access");
-
+
// If this element is zero or undefined, we can just return since *CurPtr is
// zero initialized.
if (isa<ConstantAggregateZero>(C) || isa<UndefValue>(C))
return true;
-
+
if (ConstantInt *CI = dyn_cast<ConstantInt>(C)) {
if (CI->getBitWidth() > 64 ||
(CI->getBitWidth() & 7) != 0)
return false;
-
+
uint64_t Val = CI->getZExtValue();
unsigned IntBytes = unsigned(CI->getBitWidth()/8);
-
+
for (unsigned i = 0; i != BytesLeft && ByteOffset != IntBytes; ++i) {
CurPtr[i] = (unsigned char)(Val >> (ByteOffset * 8));
++ByteOffset;
}
return true;
}
-
+
if (ConstantFP *CFP = dyn_cast<ConstantFP>(C)) {
if (CFP->getType()->isDoubleTy()) {
C = FoldBitCast(C, Type::getInt64Ty(C->getContext()), TD);
@@ -309,13 +309,13 @@ static bool ReadDataFromGlobal(Constant *C, uint64_t ByteOffset,
}
return false;
}
-
+
if (ConstantStruct *CS = dyn_cast<ConstantStruct>(C)) {
const StructLayout *SL = TD.getStructLayout(CS->getType());
unsigned Index = SL->getElementContainingOffset(ByteOffset);
uint64_t CurEltOffset = SL->getElementOffset(Index);
ByteOffset -= CurEltOffset;
-
+
while (1) {
// If the element access is to the element itself and not to tail padding,
// read the bytes from the element.
@@ -325,9 +325,9 @@ static bool ReadDataFromGlobal(Constant *C, uint64_t ByteOffset,
!ReadDataFromGlobal(CS->getOperand(Index), ByteOffset, CurPtr,
BytesLeft, TD))
return false;
-
+
++Index;
-
+
// Check to see if we read from the last struct element, if so we're done.
if (Index == CS->getType()->getNumElements())
return true;
@@ -375,11 +375,11 @@ static bool ReadDataFromGlobal(Constant *C, uint64_t ByteOffset,
}
return true;
}
-
+
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
if (CE->getOpcode() == Instruction::IntToPtr &&
- CE->getOperand(0)->getType() == TD.getIntPtrType(CE->getContext()))
- return ReadDataFromGlobal(CE->getOperand(0), ByteOffset, CurPtr,
+ CE->getOperand(0)->getType() == TD.getIntPtrType(CE->getType()))
+ return ReadDataFromGlobal(CE->getOperand(0), ByteOffset, CurPtr,
BytesLeft, TD);
}
@@ -391,7 +391,7 @@ static Constant *FoldReinterpretLoadFromConstPtr(Constant *C,
const DataLayout &TD) {
Type *LoadTy = cast<PointerType>(C->getType())->getElementType();
IntegerType *IntType = dyn_cast<IntegerType>(LoadTy);
-
+
// If this isn't an integer load we can't fold it directly.
if (!IntType) {
// If this is a float/double load, we can try folding it as an int32/64 load
@@ -415,15 +415,15 @@ static Constant *FoldReinterpretLoadFromConstPtr(Constant *C,
return FoldBitCast(Res, LoadTy, TD);
return 0;
}
-
+
unsigned BytesLoaded = (IntType->getBitWidth() + 7) / 8;
if (BytesLoaded > 32 || BytesLoaded == 0) return 0;
-
+
GlobalValue *GVal;
int64_t Offset;
if (!IsConstantOffsetFromGlobal(C, GVal, Offset, TD))
return 0;
-
+
GlobalVariable *GV = dyn_cast<GlobalVariable>(GVal);
if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer() ||
!GV->getInitializer()->getType()->isSized())
@@ -432,11 +432,11 @@ static Constant *FoldReinterpretLoadFromConstPtr(Constant *C,
// If we're loading off the beginning of the global, some bytes may be valid,
// but we don't try to handle this.
if (Offset < 0) return 0;
-
+
// If we're not accessing anything in this constant, the result is undefined.
if (uint64_t(Offset) >= TD.getTypeAllocSize(GV->getInitializer()->getType()))
return UndefValue::get(IntType);
-
+
unsigned char RawBytes[32] = {0};
if (!ReadDataFromGlobal(GV->getInitializer(), Offset, RawBytes,
BytesLoaded, TD))
@@ -464,15 +464,15 @@ Constant *llvm::ConstantFoldLoadFromConstPtr(Constant *C,
// If the loaded value isn't a constant expr, we can't handle it.
ConstantExpr *CE = dyn_cast<ConstantExpr>(C);
if (!CE) return 0;
-
+
if (CE->getOpcode() == Instruction::GetElementPtr) {
if (GlobalVariable *GV = dyn_cast<GlobalVariable>(CE->getOperand(0)))
if (GV->isConstant() && GV->hasDefinitiveInitializer())
- if (Constant *V =
+ if (Constant *V =
ConstantFoldLoadThroughGEPConstantExpr(GV->getInitializer(), CE))
return V;
}
-
+
// Instead of loading constant c string, use corresponding integer value
// directly if string length is small enough.
StringRef Str;
@@ -500,14 +500,14 @@ Constant *llvm::ConstantFoldLoadFromConstPtr(Constant *C,
SingleChar = 0;
StrVal = (StrVal << 8) | SingleChar;
}
-
+
Constant *Res = ConstantInt::get(CE->getContext(), StrVal);
if (Ty->isFloatingPointTy())
Res = ConstantExpr::getBitCast(Res, Ty);
return Res;
}
}
-
+
// If this load comes from anywhere in a constant global, and if the global
// is all undef or zero, we know what it loads.
if (GlobalVariable *GV =
@@ -520,7 +520,7 @@ Constant *llvm::ConstantFoldLoadFromConstPtr(Constant *C,
return UndefValue::get(ResTy);
}
}
-
+
// Try hard to fold loads from bitcasted strange and non-type-safe things. We
// currently don't do any of this for big endian systems. It can be
// generalized in the future if someone is interested.
@@ -531,7 +531,7 @@ Constant *llvm::ConstantFoldLoadFromConstPtr(Constant *C,
static Constant *ConstantFoldLoadInst(const LoadInst *LI, const DataLayout *TD){
if (LI->isVolatile()) return 0;
-
+
if (Constant *C = dyn_cast<Constant>(LI->getOperand(0)))
return ConstantFoldLoadFromConstPtr(C, TD);
@@ -540,23 +540,23 @@ static Constant *ConstantFoldLoadInst(const LoadInst *LI, const DataLayout *TD){
/// SymbolicallyEvaluateBinop - One of Op0/Op1 is a constant expression.
/// Attempt to symbolically evaluate the result of a binary operator merging
-/// these together. If target data info is available, it is provided as TD,
+/// these together. If target data info is available, it is provided as TD,
/// otherwise TD is null.
static Constant *SymbolicallyEvaluateBinop(unsigned Opc, Constant *Op0,
Constant *Op1, const DataLayout *TD){
// SROA
-
+
// Fold (and 0xffffffff00000000, (shl x, 32)) -> shl.
// Fold (lshr (or X, Y), 32) -> (lshr [X/Y], 32) if one doesn't contribute
// bits.
-
-
+
+
// If the constant expr is something like &A[123] - &A[4].f, fold this into a
// constant. This happens frequently when iterating over a global array.
if (Opc == Instruction::Sub && TD) {
GlobalValue *GV1, *GV2;
int64_t Offs1, Offs2;
-
+
if (IsConstantOffsetFromGlobal(Op0, GV1, Offs1, *TD))
if (IsConstantOffsetFromGlobal(Op1, GV2, Offs2, *TD) &&
GV1 == GV2) {
@@ -564,7 +564,7 @@ static Constant *SymbolicallyEvaluateBinop(unsigned Opc, Constant *Op0,
return ConstantInt::get(Op0->getType(), Offs1-Offs2);
}
}
-
+
return 0;
}
@@ -575,7 +575,7 @@ static Constant *CastGEPIndices(ArrayRef<Constant *> Ops,
Type *ResultTy, const DataLayout *TD,
const TargetLibraryInfo *TLI) {
if (!TD) return 0;
- Type *IntPtrTy = TD->getIntPtrType(ResultTy->getContext());
+ Type *IntPtrTy = TD->getIntPtrType(ResultTy);
bool Any = false;
SmallVector<Constant*, 32> NewIdxs;
@@ -628,14 +628,15 @@ static Constant *SymbolicallyEvaluateGEP(ArrayRef<Constant *> Ops,
if (!TD || !cast<PointerType>(Ptr->getType())->getElementType()->isSized() ||
!Ptr->getType()->isPointerTy())
return 0;
-
- Type *IntPtrTy = TD->getIntPtrType(Ptr->getContext());
+
+ unsigned AS = cast<PointerType>(Ptr->getType())->getAddressSpace();
+ Type *IntPtrTy = TD->getIntPtrType(Ptr->getContext(), AS);
// If this is a constant expr gep that is effectively computing an
// "offsetof", fold it into 'cast int Size to T*' instead of 'gep 0, 0, 12'
for (unsigned i = 1, e = Ops.size(); i != e; ++i)
if (!isa<ConstantInt>(Ops[i])) {
-
+
// If this is "gep i8* Ptr, (sub 0, V)", fold this as:
// "inttoptr (sub (ptrtoint Ptr), V)"
if (Ops.size() == 2 &&
@@ -702,6 +703,8 @@ static Constant *SymbolicallyEvaluateGEP(ArrayRef<Constant *> Ops,
// Also, this helps GlobalOpt do SROA on GlobalVariables.
Type *Ty = Ptr->getType();
assert(Ty->isPointerTy() && "Forming regular GEP of non-pointer type");
+ assert(Ty->getPointerAddressSpace() == AS
+ && "Operand and result of GEP should be in the same address space.");
SmallVector<Constant*, 32> NewIdxs;
do {
if (SequentialType *ATy = dyn_cast<SequentialType>(Ty)) {
@@ -709,15 +712,15 @@ static Constant *SymbolicallyEvaluateGEP(ArrayRef<Constant *> Ops,
// The only pointer indexing we'll do is on the first index of the GEP.
if (!NewIdxs.empty())
break;
-
+
// Only handle pointers to sized types, not pointers to functions.
if (!ATy->getElementType()->isSized())
return 0;
}
-
+
// Determine which element of the array the offset points into.
APInt ElemSize(BitWidth, TD->getTypeAllocSize(ATy->getElementType()));
- IntegerType *IntPtrTy = TD->getIntPtrType(Ty->getContext());
+ IntegerType *IntPtrTy = TD->getIntPtrType(Ty->getContext(), AS);
if (ElemSize == 0)
// The element size is 0. This may be [0 x Ty]*, so just use a zero
// index for this level and proceed to the next level to see if it can
@@ -837,7 +840,7 @@ Constant *llvm::ConstantFoldInstruction(Instruction *I,
if (const CmpInst *CI = dyn_cast<CmpInst>(I))
return ConstantFoldCompareInstOperands(CI->getPredicate(), Ops[0], Ops[1],
TD, TLI);
-
+
if (const LoadInst *LI = dyn_cast<LoadInst>(I))
return ConstantFoldLoadInst(LI, TD);
@@ -887,19 +890,19 @@ Constant *llvm::ConstantFoldConstantExpression(const ConstantExpr *CE,
/// information, due to only being passed an opcode and operands. Constant
/// folding using this function strips this information.
///
-Constant *llvm::ConstantFoldInstOperands(unsigned Opcode, Type *DestTy,
+Constant *llvm::ConstantFoldInstOperands(unsigned Opcode, Type *DestTy,
ArrayRef<Constant *> Ops,
const DataLayout *TD,
- const TargetLibraryInfo *TLI) {
+ const TargetLibraryInfo *TLI) {
// Handle easy binops first.
if (Instruction::isBinaryOp(Opcode)) {
if (isa<ConstantExpr>(Ops[0]) || isa<ConstantExpr>(Ops[1]))
if (Constant *C = SymbolicallyEvaluateBinop(Opcode, Ops[0], Ops[1], TD))
return C;
-
+
return ConstantExpr::get(Opcode, Ops[0], Ops[1]);
}
-
+
switch (Opcode) {
default: return 0;
case Instruction::ICmp:
@@ -918,7 +921,7 @@ Constant *llvm::ConstantFoldInstOperands(unsigned Opcode, Type *DestTy,
unsigned InWidth = Input->getType()->getScalarSizeInBits();
unsigned AS = cast<PointerType>(CE->getType())->getAddressSpace();
if (TD->getPointerSizeInBits(AS) < InWidth) {
- Constant *Mask =
+ Constant *Mask =
ConstantInt::get(CE->getContext(), APInt::getLowBitsSet(InWidth,
TD->getPointerSizeInBits(AS)));
Input = ConstantExpr::getAnd(Input, Mask);
@@ -967,7 +970,7 @@ Constant *llvm::ConstantFoldInstOperands(unsigned Opcode, Type *DestTy,
return C;
if (Constant *C = SymbolicallyEvaluateGEP(Ops, DestTy, TD, TLI))
return C;
-
+
return ConstantExpr::getGetElementPtr(Ops[0], Ops.slice(1));
}
}
@@ -977,7 +980,7 @@ Constant *llvm::ConstantFoldInstOperands(unsigned Opcode, Type *DestTy,
/// returns a constant expression of the specified operands.
///
Constant *llvm::ConstantFoldCompareInstOperands(unsigned Predicate,
- Constant *Ops0, Constant *Ops1,
+ Constant *Ops0, Constant *Ops1,
const DataLayout *TD,
const TargetLibraryInfo *TLI) {
// fold: icmp (inttoptr x), null -> icmp x, 0
@@ -988,9 +991,10 @@ Constant *llvm::ConstantFoldCompareInstOperands(unsigned Predicate,
// ConstantExpr::getCompare cannot do this, because it doesn't have TD
// around to know if bit truncation is happening.
if (ConstantExpr *CE0 = dyn_cast<ConstantExpr>(Ops0)) {
+ Type *IntPtrTy = NULL;
if (TD && Ops1->isNullValue()) {
- Type *IntPtrTy = TD->getIntPtrType(CE0->getContext());
if (CE0->getOpcode() == Instruction::IntToPtr) {
+ IntPtrTy = TD->getIntPtrType(CE0->getType());
// Convert the integer value to the right size to ensure we get the
// proper extension or truncation.
Constant *C = ConstantExpr::getIntegerCast(CE0->getOperand(0),
@@ -998,22 +1002,24 @@ Constant *llvm::ConstantFoldCompareInstOperands(unsigned Predicate,
Constant *Null = Constant::getNullValue(C->getType());
return ConstantFoldCompareInstOperands(Predicate, C, Null, TD, TLI);
}
-
+
// Only do this transformation if the int is intptrty in size, otherwise
// there is a truncation or extension that we aren't modeling.
- if (CE0->getOpcode() == Instruction::PtrToInt &&
- CE0->getType() == IntPtrTy) {
- Constant *C = CE0->getOperand(0);
- Constant *Null = Constant::getNullValue(C->getType());
- return ConstantFoldCompareInstOperands(Predicate, C, Null, TD, TLI);
+ if (CE0->getOpcode() == Instruction::PtrToInt) {
+ IntPtrTy = TD->getIntPtrType(CE0->getOperand(0)->getType());
+ if (CE0->getType() == IntPtrTy) {
+ Constant *C = CE0->getOperand(0);
+ Constant *Null = Constant::getNullValue(C->getType());
+ return ConstantFoldCompareInstOperands(Predicate, C, Null, TD, TLI);
+ }
}
}
-
+
if (ConstantExpr *CE1 = dyn_cast<ConstantExpr>(Ops1)) {
if (TD && CE0->getOpcode() == CE1->getOpcode()) {
- Type *IntPtrTy = TD->getIntPtrType(CE0->getContext());
if (CE0->getOpcode() == Instruction::IntToPtr) {
+ Type *IntPtrTy = TD->getIntPtrType(CE0->getType());
// Convert the integer value to the right size to ensure we get the
// proper extension or truncation.
Constant *C0 = ConstantExpr::getIntegerCast(CE0->getOperand(0),
@@ -1022,34 +1028,36 @@ Constant *llvm::ConstantFoldCompareInstOperands(unsigned Predicate,
IntPtrTy, false);
return ConstantFoldCompareInstOperands(Predicate, C0, C1, TD, TLI);
}
+ }
- // Only do this transformation if the int is intptrty in size, otherwise
- // there is a truncation or extension that we aren't modeling.
- if ((CE0->getOpcode() == Instruction::PtrToInt &&
- CE0->getType() == IntPtrTy &&
- CE0->getOperand(0)->getType() == CE1->getOperand(0)->getType()))
+ // Only do this transformation if the int is intptrty in size, otherwise
+ // there is a truncation or extension that we aren't modeling.
+ if (CE0->getOpcode() == Instruction::PtrToInt) {
+ IntPtrTy = TD->getIntPtrType(CE0->getOperand(0)->getType());
+ if (CE0->getType() == IntPtrTy &&
+ CE0->getOperand(0)->getType() == CE1->getOperand(0)->getType())
return ConstantFoldCompareInstOperands(Predicate, CE0->getOperand(0),
- CE1->getOperand(0), TD, TLI);
+ CE1->getOperand(0), TD, TLI);
}
}
-
+
// icmp eq (or x, y), 0 -> (icmp eq x, 0) & (icmp eq y, 0)
// icmp ne (or x, y), 0 -> (icmp ne x, 0) | (icmp ne y, 0)
if ((Predicate == ICmpInst::ICMP_EQ || Predicate == ICmpInst::ICMP_NE) &&
CE0->getOpcode() == Instruction::Or && Ops1->isNullValue()) {
- Constant *LHS =
+ Constant *LHS =
ConstantFoldCompareInstOperands(Predicate, CE0->getOperand(0), Ops1,
TD, TLI);
- Constant *RHS =
+ Constant *RHS =
ConstantFoldCompareInstOperands(Predicate, CE0->getOperand(1), Ops1,
TD, TLI);
- unsigned OpC =
+ unsigned OpC =
Predicate == ICmpInst::ICMP_EQ ? Instruction::And : Instruction::Or;
Constant *Ops[] = { LHS, RHS };
return ConstantFoldInstOperands(OpC, LHS->getType(), Ops, TD, TLI);
}
}
-
+
return ConstantExpr::getCompare(Predicate, Ops0, Ops1);
}
@@ -1057,7 +1065,7 @@ Constant *llvm::ConstantFoldCompareInstOperands(unsigned Predicate,
/// ConstantFoldLoadThroughGEPConstantExpr - Given a constant and a
/// getelementptr constantexpr, return the constant value being addressed by the
/// constant expression, or null if something is funny and we can't decide.
-Constant *llvm::ConstantFoldLoadThroughGEPConstantExpr(Constant *C,
+Constant *llvm::ConstantFoldLoadThroughGEPConstantExpr(Constant *C,
ConstantExpr *CE) {
if (!CE->getOperand(1)->isNullValue())
return 0; // Do not allow stepping over the value!
@@ -1127,14 +1135,14 @@ llvm::canConstantFoldCallTo(const Function *F) {
if (!F->hasName()) return false;
StringRef Name = F->getName();
-
+
// In these cases, the check of the length is required. We don't want to
// return true for a name like "cos\0blah" which strcmp would return equal to
// "cos", but has length 8.
switch (Name[0]) {
default: return false;
case 'a':
- return Name == "acos" || Name == "asin" ||
+ return Name == "acos" || Name == "asin" ||
Name == "atan" || Name == "atan2";
case 'c':
return Name == "cos" || Name == "ceil" || Name == "cosf" || Name == "cosh";
@@ -1154,7 +1162,7 @@ llvm::canConstantFoldCallTo(const Function *F) {
}
}
-static Constant *ConstantFoldFP(double (*NativeFP)(double), double V,
+static Constant *ConstantFoldFP(double (*NativeFP)(double), double V,
Type *Ty) {
sys::llvm_fenv_clearexcept();
V = NativeFP(V);
@@ -1162,7 +1170,7 @@ static Constant *ConstantFoldFP(double (*NativeFP)(double), double V,
sys::llvm_fenv_clearexcept();
return 0;
}
-
+
if (Ty->isFloatTy())
return ConstantFP::get(Ty->getContext(), APFloat((float)V));
if (Ty->isDoubleTy())
@@ -1178,7 +1186,7 @@ static Constant *ConstantFoldBinaryFP(double (*NativeFP)(double, double),
sys::llvm_fenv_clearexcept();
return 0;
}
-
+
if (Ty->isFloatTy())
return ConstantFP::get(Ty->getContext(), APFloat((float)V));
if (Ty->isDoubleTy())
@@ -1272,7 +1280,7 @@ llvm::ConstantFoldCall(Function *F, ArrayRef<Constant *> Operands,
case 'e':
if (Name == "exp" && TLI->has(LibFunc::exp))
return ConstantFoldFP(exp, V, Ty);
-
+
if (Name == "exp2" && TLI->has(LibFunc::exp2)) {
// Constant fold exp2(x) as pow(2,x) in case the host doesn't have a
// C99 library.
@@ -1348,7 +1356,7 @@ llvm::ConstantFoldCall(Function *F, ArrayRef<Constant *> Operands,
}
// Support ConstantVector in case we have an Undef in the top.
- if (isa<ConstantVector>(Operands[0]) ||
+ if (isa<ConstantVector>(Operands[0]) ||
isa<ConstantDataVector>(Operands[0])) {
Constant *Op = cast<Constant>(Operands[0]);
switch (F->getIntrinsicID()) {
@@ -1367,11 +1375,11 @@ llvm::ConstantFoldCall(Function *F, ArrayRef<Constant *> Operands,
case Intrinsic::x86_sse2_cvttsd2si64:
if (ConstantFP *FPOp =
dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U)))
- return ConstantFoldConvertToInt(FPOp->getValueAPF(),
+ return ConstantFoldConvertToInt(FPOp->getValueAPF(),
/*roundTowardZero=*/true, Ty);
}
}
-
+
if (isa<UndefValue>(Operands[0])) {
if (F->getIntrinsicID() == Intrinsic::bswap)
return Operands[0];
@@ -1385,14 +1393,14 @@ llvm::ConstantFoldCall(Function *F, ArrayRef<Constant *> Operands,
if (ConstantFP *Op1 = dyn_cast<ConstantFP>(Operands[0])) {
if (!Ty->isFloatTy() && !Ty->isDoubleTy())
return 0;
- double Op1V = Ty->isFloatTy() ?
+ double Op1V = Ty->isFloatTy() ?
(double)Op1->getValueAPF().convertToFloat() :
Op1->getValueAPF().convertToDouble();
if (ConstantFP *Op2 = dyn_cast<ConstantFP>(Operands[1])) {
if (Op2->getType() != Op1->getType())
return 0;
- double Op2V = Ty->isFloatTy() ?
+ double Op2V = Ty->isFloatTy() ?
(double)Op2->getValueAPF().convertToFloat():
Op2->getValueAPF().convertToDouble();
@@ -1419,7 +1427,7 @@ llvm::ConstantFoldCall(Function *F, ArrayRef<Constant *> Operands,
}
return 0;
}
-
+
if (ConstantInt *Op1 = dyn_cast<ConstantInt>(Operands[0])) {
if (ConstantInt *Op2 = dyn_cast<ConstantInt>(Operands[1])) {
switch (F->getIntrinsicID()) {
@@ -1469,7 +1477,7 @@ llvm::ConstantFoldCall(Function *F, ArrayRef<Constant *> Operands,
return ConstantInt::get(Ty, Op1->getValue().countLeadingZeros());
}
}
-
+
return 0;
}
return 0;
diff --git a/lib/Analysis/InlineCost.cpp b/lib/Analysis/InlineCost.cpp
index 95e58022ca..ede1bf30db 100644
--- a/lib/Analysis/InlineCost.cpp
+++ b/lib/Analysis/InlineCost.cpp
@@ -788,7 +788,7 @@ ConstantInt *CallAnalyzer::stripAndComputeInBoundsConstantOffsets(Value *&V) {
assert(V->getType()->isPointerTy() && "Unexpected operand type!");
} while (Visited.insert(V));
- Type *IntPtrTy = TD->getIntPtrType(V->getContext());
+ Type *IntPtrTy = TD->getIntPtrType(V->getType());
return cast<ConstantInt>(ConstantInt::get(IntPtrTy, Offset));
}
diff --git a/lib/Analysis/InstructionSimplify.cpp b/lib/Analysis/InstructionSimplify.cpp
index 8e326122fa..016c328e9e 100644
--- a/lib/Analysis/InstructionSimplify.cpp
+++ b/lib/Analysis/InstructionSimplify.cpp
@@ -728,7 +728,7 @@ static Constant *stripAndComputeConstantOffsets(const DataLayout &TD,
assert(V->getType()->isPointerTy() && "Unexpected operand type!");
} while (Visited.insert(V));
- Type *IntPtrTy = TD.getIntPtrType(V->getContext());
+ Type *IntPtrTy = TD.getIntPtrType(V->getContext(), AS);
return ConstantInt::get(IntPtrTy, Offset);
}
diff --git a/lib/Analysis/Lint.cpp b/lib/Analysis/Lint.cpp
index 6d6d580ed1..d62808e9cd 100644
--- a/lib/Analysis/Lint.cpp
+++ b/lib/Analysis/Lint.cpp
@@ -626,8 +626,7 @@ Value *Lint::findValueImpl(Value *V, bool OffsetOk,
if (W != V)
return findValueImpl(W, OffsetOk, Visited);
} else if (CastInst *CI = dyn_cast<CastInst>(V)) {
- if (CI->isNoopCast(TD ? TD->getIntPtrType(V->getContext()) :
- Type::getInt64Ty(V->getContext())))
+ if (CI->isNoopCast(*TD))
return findValueImpl(CI->getOperand(0), OffsetOk, Visited);
} else if (ExtractValueInst *Ex = dyn_cast<ExtractValueInst>(V)) {
if (Value *W = FindInsertedValue(Ex->getAggregateOperand(),
@@ -640,7 +639,7 @@ Value *Lint::findValueImpl(Value *V, bool OffsetOk,
if (CastInst::isNoopCast(Instruction::CastOps(CE->getOpcode()),
CE->getOperand(0)->getType(),
CE->getType(),
- TD ? TD->getIntPtrType(V->getContext()) :
+ TD ? TD->getIntPtrType(CE->getType()) :
Type::getInt64Ty(V->getContext())))
return findValueImpl(CE->getOperand(0), OffsetOk, Visited);
} else if (CE->getOpcode() == Instruction::ExtractValue) {
diff --git a/lib/Analysis/MemoryBuiltins.cpp b/lib/Analysis/MemoryBuiltins.cpp
index 0a539fe758..8d903c63af 100644
--- a/lib/Analysis/MemoryBuiltins.cpp
+++ b/lib/Analysis/MemoryBuiltins.cpp
@@ -376,9 +376,10 @@ APInt ObjectSizeOffsetVisitor::align(APInt Size, uint64_t Align) {
ObjectSizeOffsetVisitor::ObjectSizeOffsetVisitor(const DataLayout *TD,
const TargetLibraryInfo *TLI,
LLVMContext &Context,
- bool RoundToAlign)
+ bool RoundToAlign,
+ unsigned AS)
: TD(TD), TLI(TLI), RoundToAlign(RoundToAlign) {
- IntegerType *IntTy = TD->getIntPtrType(Context);
+ IntegerType *IntTy = TD->getIntPtrType(Context, AS);
IntTyBits = IntTy->getBitWidth();
Zero = APInt::getNullValue(IntTyBits);
}
@@ -561,9 +562,10 @@ SizeOffsetType ObjectSizeOffsetVisitor::visitInstruction(Instruction &I) {
ObjectSizeOffsetEvaluator::ObjectSizeOffsetEvaluator(const DataLayout *TD,
const TargetLibraryInfo *TLI,
- LLVMContext &Context)
+ LLVMContext &Context,
+ unsigned AS)
: TD(TD), TLI(TLI), Context(Context), Builder(Context, TargetFolder(TD)) {
- IntTy = TD->getIntPtrType(Context);
+ IntTy = TD->getIntPtrType(Context, AS);
Zero = ConstantInt::get(IntTy, 0);
}
diff --git a/lib/Analysis/ScalarEvolution.cpp b/lib/Analysis/ScalarEvolution.cpp
index 5400646be1..3e06298882 100644
--- a/lib/Analysis/ScalarEvolution.cpp
+++ b/lib/Analysis/ScalarEvolution.cpp
@@ -2581,13 +2581,12 @@ const SCEV *ScalarEvolution::getUMinExpr(const SCEV *LHS,
return getNotSCEV(getUMaxExpr(getNotSCEV(LHS), getNotSCEV(RHS)));
}
-const SCEV *ScalarEvolution::getSizeOfExpr(Type *AllocTy) {
+const SCEV *ScalarEvolution::getSizeOfExpr(Type *AllocTy, Type *IntPtrTy) {
// If we have DataLayout, we can bypass creating a target-independent
// constant expression and then folding it back into a ConstantInt.
// This is just a compile-time optimization.
if (TD)
- return getConstant(TD->getIntPtrType(getContext()),
- TD->getTypeAllocSize(AllocTy));
+ return getConstant(IntPtrTy, TD->getTypeAllocSize(AllocTy));
Constant *C = ConstantExpr::getSizeOf(AllocTy);
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))
@@ -2606,13 +2605,13 @@ const SCEV *ScalarEvolution::getAlignOfExpr(Type *AllocTy) {
return getTruncateOrZeroExtend(getSCEV(C), Ty);
}
-const SCEV *ScalarEvolution::getOffsetOfExpr(StructType *STy,
+const SCEV *ScalarEvolution::getOffsetOfExpr(StructType *STy, Type *IntPtrTy,
unsigned FieldNo) {
// If we have DataLayout, we can bypass creating a target-independent
// constant expression and then folding it back into a ConstantInt.
// This is just a compile-time optimization.
if (TD)
- return getConstant(TD->getIntPtrType(getContext()),
+ return getConstant(IntPtrTy,
TD->getStructLayout(STy)->getElementOffset(FieldNo));
Constant *C = ConstantExpr::getOffsetOf(STy, FieldNo);
@@ -2699,7 +2698,7 @@ Type *ScalarEvolution::getEffectiveSCEVType(Type *Ty) const {
// The only other support type is pointer.
assert(Ty->isPointerTy() && "Unexpected non-pointer non-integer type!");
- if (TD) return TD->getIntPtrType(getContext());
+ if (TD) return TD->getIntPtrType(Ty);
// Without DataLayout, conservatively assume pointers are 64-bit.
return Type::getInt64Ty(getContext());
@@ -3152,13 +3151,13 @@ const SCEV *ScalarEvolution::createNodeForGEP(GEPOperator *GEP) {
if (StructType *STy = dyn_cast<StructType>(*GTI++)) {
// For a struct, add the member offset.
unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue();
- const SCEV *FieldOffset = getOffsetOfExpr(STy, FieldNo);
+ const SCEV *FieldOffset = getOffsetOfExpr(STy, IntPtrTy, FieldNo);
// Add the field offset to the running total offset.
TotalOffset = getAddExpr(TotalOffset, FieldOffset);
} else {
// For an array, add the element offset, explicitly scaled.
- const SCEV *ElementSize = getSizeOfExpr(*GTI);
+ const SCEV *ElementSize = getSizeOfExpr(*GTI, IntPtrTy);
const SCEV *IndexS = getSCEV(Index);
// Getelementptr indices are signed.
IndexS = getTruncateOrSignExtend(IndexS, IntPtrTy);
diff --git a/lib/Analysis/ScalarEvolutionExpander.cpp b/lib/Analysis/ScalarEvolutionExpander.cpp
index 111bfb4a6a..0295da5e4a 100644
--- a/lib/Analysis/ScalarEvolutionExpander.cpp
+++ b/lib/Analysis/ScalarEvolutionExpander.cpp
@@ -417,7 +417,9 @@ Value *SCEVExpander::expandAddToGEP(const SCEV *const *op_begin,
// array indexing.
SmallVector<const SCEV *, 8> ScaledOps;
if (ElTy->isSized()) {
- const SCEV *ElSize = SE.getSizeOfExpr(ElTy);
+ Type *IntPtrTy = SE.TD ? SE.TD->getIntPtrType(PTy) :
+ IntegerType::getInt64Ty(PTy->getContext());
+ const SCEV *ElSize = SE.getSizeOfExpr(ElTy, IntPtrTy);
if (!ElSize->isZero()) {
SmallVector<const SCEV *, 8> NewOps;
for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
diff --git a/lib/CodeGen/AsmPrinter/AsmPrinter.cpp b/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
index 4de98da655..faadc0fa66 100644
--- a/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
+++ b/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
@@ -1505,7 +1505,7 @@ static const MCExpr *lowerConstant(const Constant *CV, AsmPrinter &AP) {
// Handle casts to pointers by changing them into casts to the appropriate
// integer type. This promotes constant folding and simplifies this code.
Constant *Op = CE->getOperand(0);
- Op = ConstantExpr::getIntegerCast(Op, TD.getIntPtrType(CV->getContext()),
+ Op = ConstantExpr::getIntegerCast(Op, TD.getIntPtrType(CE->getType()),
false/*ZExt*/);
return lowerConstant(Op, AP);
}
diff --git a/lib/CodeGen/IntrinsicLowering.cpp b/lib/CodeGen/IntrinsicLowering.cpp
index 00b1198563..fcb8c9df3d 100644
--- a/lib/CodeGen/IntrinsicLowering.cpp
+++ b/lib/CodeGen/IntrinsicLowering.cpp
@@ -115,21 +115,21 @@ void IntrinsicLowering::AddPrototypes(Module &M) {
Type::getInt8PtrTy(Context),
Type::getInt8PtrTy(Context),
Type::getInt8PtrTy(Context),
- TD.getIntPtrType(Context), (Type *)0);
+ TD.getIntPtrType(Context, 0), (Type *)0);
break;
case Intrinsic::memmove:
M.getOrInsertFunction("memmove",
Type::getInt8PtrTy(Context),
Type::getInt8PtrTy(Context),
Type::getInt8PtrTy(Context),
- TD.getIntPtrType(Context), (Type *)0);
+ TD.getIntPtrType(Context, 0), (Type *)0);
break;
case Intrinsic::memset:
M.getOrInsertFunction("memset",
Type::getInt8PtrTy(Context),
Type::getInt8PtrTy(Context),
Type::getInt32Ty(M.getContext()),
- TD.getIntPtrType(Context), (Type *)0);
+ TD.getIntPtrType(Context, 0), (Type *)0);
break;
case Intrinsic::sqrt:
EnsureFPIntrinsicsExist(M, I, "sqrtf", "sqrt", "sqrtl");
@@ -457,7 +457,7 @@ void IntrinsicLowering::LowerIntrinsicCall(CallInst *CI) {
break; // Strip out annotate intrinsic
case Intrinsic::memcpy: {
- IntegerType *IntPtr = TD.getIntPtrType(Context);
+ IntegerType *IntPtr = TD.getIntPtrType(CI->getArgOperand(0)->getType());
Value *Size = Builder.CreateIntCast(CI->getArgOperand(2), IntPtr,
/* isSigned */ false);
Value *Ops[3];
@@ -468,7 +468,7 @@ void IntrinsicLowering::LowerIntrinsicCall(CallInst *CI) {
break;
}
case Intrinsic::memmove: {
- IntegerType *IntPtr = TD.getIntPtrType(Context);
+ IntegerType *IntPtr = TD.getIntPtrType(CI->getArgOperand(0)->getType());
Value *Size = Builder.CreateIntCast(CI->getArgOperand(2), IntPtr,
/* isSigned */ false);
Value *Ops[3];
@@ -479,7 +479,7 @@ void IntrinsicLowering::LowerIntrinsicCall(CallInst *CI) {
break;
}
case Intrinsic::memset: {
- IntegerType *IntPtr = TD.getIntPtrType(Context);
+ IntegerType *IntPtr = TD.getIntPtrType(CI->getArgOperand(0)->getType());
Value *Size = Builder.CreateIntCast(CI->getArgOperand(2), IntPtr,
/* isSigned */ false);
Value *Ops[3];
diff --git a/lib/CodeGen/SelectionDAG/FastISel.cpp b/lib/CodeGen/SelectionDAG/FastISel.cpp
index 4854cf7b26..2ddc07cc63 100644
--- a/lib/CodeGen/SelectionDAG/FastISel.cpp
+++ b/lib/CodeGen/SelectionDAG/FastISel.cpp
@@ -101,8 +101,7 @@ bool FastISel::hasTrivialKill(const Value *V) const {
// No-op casts are trivially coalesced by fast-isel.
if (const CastInst *Cast = dyn_cast<CastInst>(I))
- if (Cast->isNoopCast(TD.getIntPtrType(Cast->getContext())) &&
- !hasTrivialKill(Cast->getOperand(0)))
+ if (Cast->isNoopCast(TD) && !hasTrivialKill(Cast->getOperand(0)))
return false;
// GEPs with all zero indices are trivially coalesced by fast-isel.
@@ -175,7 +174,7 @@ unsigned FastISel::materializeRegForValue(const Value *V, MVT VT) {
// Translate this as an integer zero so that it can be
// local-CSE'd with actual integer zeros.
Reg =
- getRegForValue(Constant::getNullValue(TD.getIntPtrType(V->getContext())));
+ getRegForValue(Constant::getNullValue(TD.getIntPtrType(V->getType())));
} else if (const ConstantFP *CF = dyn_cast<ConstantFP>(V)) {
if (CF->isNullValue()) {
Reg = TargetMaterializeFloatZero(CF);
diff --git a/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
index 183416f3fd..d661971bb8 100644
--- a/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
+++ b/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
@@ -3804,7 +3804,8 @@ SDValue SelectionDAG::getMemcpy(SDValue Chain, DebugLoc dl, SDValue Dst,
// Emit a library call.
TargetLowering::ArgListTy Args;
TargetLowering::ArgListEntry Entry;
- Entry.Ty = TLI.getDataLayout()->getIntPtrType(*getContext());
+ unsigned AS = SrcPtrInfo.getAddrSpace();
+ Entry.Ty = TLI.getDataLayout()->getIntPtrType(*getContext(), AS);
Entry.Node = Dst; Args.push_back(Entry);
Entry.Node = Src; Args.push_back(Entry);
Entry.Node = Size; Args.push_back(Entry);
@@ -3859,7 +3860,8 @@ SDValue SelectionDAG::getMemmove(SDValue Chain, DebugLoc dl, SDValue Dst,
// Emit a library call.
TargetLowering::ArgListTy Args;
TargetLowering::ArgListEntry Entry;
- Entry.Ty = TLI.getDataLayout()->getIntPtrType(*getContext());
+ unsigned AS = SrcPtrInfo.getAddrSpace();
+ Entry.Ty = TLI.getDataLayout()->getIntPtrType(*getContext(), AS);
Entry.Node = Dst; Args.push_back(Entry);
Entry.Node = Src; Args.push_back(Entry);
Entry.Node = Size; Args.push_back(Entry);
@@ -3908,7 +3910,8 @@ SDValue SelectionDAG::getMemset(SDValue Chain, DebugLoc dl, SDValue Dst,
return Result;
// Emit a library call.
- Type *IntPtrTy = TLI.getDataLayout()->getIntPtrType(*getContext());
+ unsigned AS = DstPtrInfo.getAddrSpace();
+ Type *IntPtrTy = TLI.getDataLayout()->getIntPtrType(*getContext(), AS);
TargetLowering::ArgListTy Args;
TargetLowering::ArgListEntry Entry;
Entry.Node = Dst; Entry.Ty = IntPtrTy;
diff --git a/lib/Target/ARM/ARMSelectionDAGInfo.cpp b/lib/Target/ARM/ARMSelectionDAGInfo.cpp
index b33b3c915a..99d6ec0d03 100644
--- a/lib/Target/ARM/ARMSelectionDAGInfo.cpp
+++ b/lib/Target/ARM/ARMSelectionDAGInfo.cpp
@@ -155,7 +155,8 @@ EmitTargetCodeForMemset(SelectionDAG &DAG, DebugLoc dl,
TargetLowering::ArgListEntry Entry;
// First argument: data pointer
- Type *IntPtrTy = TLI.getDataLayout()->getIntPtrType(*DAG.getContext());
+ unsigned AS = DstPtrInfo.getAddrSpace();
+ Type *IntPtrTy = TLI.getDataLayout()->getIntPtrType(*DAG.getContext(), AS);
Entry.Node = Dst;
Entry.Ty = IntPtrTy;
Args.push_back(Entry);
diff --git a/lib/Target/NVPTX/NVPTXAsmPrinter.cpp b/lib/Target/NVPTX/NVPTXAsmPrinter.cpp
index c46094569e..5ac9fa5e59 100644
--- a/lib/Target/NVPTX/NVPTXAsmPrinter.cpp
+++ b/lib/Target/NVPTX/NVPTXAsmPrinter.cpp
@@ -151,7 +151,7 @@ const MCExpr *nvptx::LowerConstant(const Constant *CV, AsmPrinter &AP) {
// Handle casts to pointers by changing them into casts to the appropriate
// integer type. This promotes constant folding and simplifies this code.
Constant *Op = CE->getOperand(0);
- Op = ConstantExpr::getIntegerCast(Op, TD.getIntPtrType(CV->getContext()),
+ Op = ConstantExpr::getIntegerCast(Op, TD.getIntPtrType(CE->getType()),
false/*ZExt*/);
return LowerConstant(Op, AP);
}
diff --git a/lib/Target/PowerPC/PPCISelLowering.cpp b/lib/Target/PowerPC/PPCISelLowering.cpp
index b93d50326a..62bd2ffeb7 100644
--- a/lib/Target/PowerPC/PPCISelLowering.cpp
+++ b/lib/Target/PowerPC/PPCISelLowering.cpp
@@ -1498,9 +1498,10 @@ SDValue PPCTargetLowering::LowerINIT_TRAMPOLINE(SDValue Op,
EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
bool isPPC64 = (PtrVT == MVT::i64);
+ unsigned AS = 0;
Type *IntPtrTy =
DAG.getTargetLoweringInfo().getDataLayout()->getIntPtrType(
- *DAG.getContext());
+ *DAG.getContext(), AS);
TargetLowering::ArgListTy Args;
TargetLowering::ArgListEntry Entry;
diff --git a/lib/Target/Target.cpp b/lib/Target/Target.cpp
index 393178a469..7d3dd8f015 100644
--- a/lib/Target/Target.cpp
+++ b/lib/Target/Target.cpp
@@ -64,7 +64,7 @@ unsigned LLVMPointerSizeForAS(LLVMTargetDataRef TD, unsigned AS) {
}
LLVMTypeRef LLVMIntPtrType(LLVMTargetDataRef TD) {
- return wrap(unwrap(TD)->getIntPtrType(getGlobalContext()));
+ return wrap(unwrap(TD)->getIntPtrType(getGlobalContext(), 0));
}
LLVMTypeRef LLVMIntPtrTypeForAS(LLVMTargetDataRef TD, unsigned AS) {
diff --git a/lib/Target/X86/X86FastISel.cpp b/lib/Target/X86/X86FastISel.cpp
index dbae608352..d319d9e998 100644
--- a/lib/Target/X86/X86FastISel.cpp
+++ b/lib/Target/X86/X86FastISel.cpp
@@ -282,8 +282,9 @@ X86FastISel::X86FastEmitStore(EVT VT, unsigned Val, const X86AddressMode &AM) {
bool X86FastISel::X86FastEmitStore(EVT VT, const Value *Val,
const X86AddressMode &AM) {
// Handle 'null' like i32/i64 0.
- if (isa<ConstantPointerNull>(Val))
- Val = Constant::getNullValue(TD.getIntPtrType(Val->getContext()));
+ if (isa<ConstantPointerNull>(Val)) {
+ Val = Constant::getNullValue(TD.getIntPtrType(Val->getType()));
+ }
// If this is a store of a simple constant, fold the constant into the store.
if (const ConstantInt *CI = dyn_cast<ConstantInt>(Val)) {
@@ -894,8 +895,9 @@ bool X86FastISel::X86FastEmitCompare(const Value *Op0, const Value *Op1,
if (Op0Reg == 0) return false;
// Handle 'null' like i32/i64 0.
- if (isa<ConstantPointerNull>(Op1))
- Op1 = Constant::getNullValue(TD.getIntPtrType(Op0->getContext()));
+ if (isa<ConstantPointerNull>(Op1)) {
+ Op1 = Constant::getNullValue(TD.getIntPtrType(Op0->getType()));
+ }
// We have two options: compare with register or immediate. If the RHS of
// the compare is an immediate that we can fold into this compare, use
diff --git a/lib/Target/X86/X86SelectionDAGInfo.cpp b/lib/Target/X86/X86SelectionDAGInfo.cpp
index 723e50cc18..4adca83af1 100644
--- a/lib/Target/X86/X86SelectionDAGInfo.cpp
+++ b/lib/Target/X86/X86SelectionDAGInfo.cpp
@@ -54,7 +54,8 @@ X86SelectionDAGInfo::EmitTargetCodeForMemset(SelectionDAG &DAG, DebugLoc dl,
if (const char *bzeroEntry = V &&
V->isNullValue() ? Subtarget->getBZeroEntry() : 0) {
EVT IntPtr = TLI.getPointerTy();
- Type *IntPtrTy = getDataLayout()->getIntPtrType(*DAG.getContext());
+ unsigned AS = DstPtrInfo.getAddrSpace();
+ Type *IntPtrTy = getDataLayout()->getIntPtrType(*DAG.getContext(), AS);
TargetLowering::ArgListTy Args;
TargetLowering::ArgListEntry Entry;
Entry.Node = Dst;
diff --git a/lib/Target/XCore/XCoreISelLowering.cpp b/lib/Target/XCore/XCoreISelLowering.cpp
index 9e7816e21f..eaa745ba9b 100644
--- a/lib/Target/XCore/XCoreISelLowering.cpp
+++ b/lib/Target/XCore/XCoreISelLowering.cpp
@@ -477,7 +477,8 @@ LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
}
// Lower to a call to __misaligned_load(BasePtr).
- Type *IntPtrTy = getDataLayout()->getIntPtrType(*DAG.getContext());
+ unsigned AS = LD->getAddressSpace();
+ Type *IntPtrTy = getDataLayout()->getIntPtrType(*DAG.getContext(), AS);
TargetLowering::ArgListTy Args;
TargetLowering::ArgListEntry Entry;
@@ -536,7 +537,8 @@ LowerSTORE(SDValue Op, SelectionDAG &DAG) const
}
// Lower to a call to __misaligned_store(BasePtr, Value).
- Type *IntPtrTy = getDataLayout()->getIntPtrType(*DAG.getContext());
+ unsigned AS = ST->getAddressSpace();
+ Type *IntPtrTy = getDataLayout()->getIntPtrType(*DAG.getContext(), AS);
TargetLowering::ArgListTy Args;
TargetLowering::ArgListEntry Entry;
diff --git a/lib/Transforms/IPO/GlobalOpt.cpp b/lib/Transforms/IPO/GlobalOpt.cpp
index 678189b3d6..3d5657fe6a 100644
--- a/lib/Transforms/IPO/GlobalOpt.cpp
+++ b/lib/Transforms/IPO/GlobalOpt.cpp
@@ -1500,7 +1500,7 @@ static GlobalVariable *PerformHeapAllocSRoA(GlobalVariable *GV, CallInst *CI,
unsigned TypeSize = TD->getTypeAllocSize(FieldTy);
if (StructType *ST = dyn_cast<StructType>(FieldTy))
TypeSize = TD->getStructLayout(ST)->getSizeInBytes();
- Type *IntPtrTy = TD->getIntPtrType(CI->getContext());
+ Type *IntPtrTy = TD->getIntPtrType(GV->getType());
Value *NMI = CallInst::CreateMalloc(CI, IntPtrTy, FieldTy,
ConstantInt::get(IntPtrTy, TypeSize),
NElems, 0,
@@ -1730,7 +1730,7 @@ static bool TryToOptimizeStoreOfMallocToGlobal(GlobalVariable *GV,
// If this is a fixed size array, transform the Malloc to be an alloc of
// structs. malloc [100 x struct],1 -> malloc struct, 100
if (ArrayType *AT = dyn_cast<ArrayType>(getMallocAllocatedType(CI, TLI))) {
- Type *IntPtrTy = TD->getIntPtrType(CI->getContext());
+ Type *IntPtrTy = TD->getIntPtrType(GV->getType());
unsigned TypeSize = TD->getStructLayout(AllocSTy)->getSizeInBytes();
Value *AllocSize = ConstantInt::get(IntPtrTy, TypeSize);
Value *NumElements = ConstantInt::get(IntPtrTy, AT->getNumElements());
diff --git a/lib/Transforms/IPO/MergeFunctions.cpp b/lib/Transforms/IPO/MergeFunctions.cpp
index 44283ddce7..1c6477c022 100644
--- a/lib/Transforms/IPO/MergeFunctions.cpp
+++ b/lib/Transforms/IPO/MergeFunctions.cpp
@@ -206,9 +206,8 @@ bool FunctionComparator::isEquivalentType(Type *Ty1,
return true;
if (Ty1->getTypeID() != Ty2->getTypeID()) {
if (TD) {
- LLVMContext &Ctx = Ty1->getContext();
- if (isa<PointerType>(Ty1) && Ty2 == TD->getIntPtrType(Ctx)) return true;
- if (isa<PointerType>(Ty2) && Ty1 == TD->getIntPtrType(Ctx)) return true;
+ if (isa<PointerType>(Ty1) && Ty2 == TD->getIntPtrType(Ty1)) return true;
+ if (isa<PointerType>(Ty2) && Ty1 == TD->getIntPtrType(Ty2)) return true;
}
return false;
}
diff --git a/lib/Transforms/InstCombine/InstCombine.h b/lib/Transforms/InstCombine/InstCombine.h
index 7467eca7ab..0e765f7aaa 100644
--- a/lib/Transforms/InstCombine/InstCombine.h
+++ b/lib/Transforms/InstCombine/InstCombine.h
@@ -208,7 +208,7 @@ private:
bool ShouldChangeType(Type *From, Type *To) const;
Value *dyn_castNegVal(Value *V) const;
Value *dyn_castFNegVal(Value *V) const;
- Type *FindElementAtOffset(Type *Ty, int64_t Offset,
+ Type *FindElementAtOffset(Type *Ty, int64_t Offset, Type *IntPtrTy,
SmallVectorImpl<Value*> &NewIndices);
Instruction *FoldOpIntoSelect(Instruction &Op, SelectInst *SI);
diff --git a/lib/Transforms/InstCombine/InstCombineCalls.cpp b/lib/Transforms/InstCombine/InstCombineCalls.cpp
index 5ad6f9111c..359bc488f3 100644
--- a/lib/Transforms/InstCombine/InstCombineCalls.cpp
+++ b/lib/Transforms/InstCombine/InstCombineCalls.cpp
@@ -996,9 +996,9 @@ bool InstCombiner::transformConstExprCastCall(CallSite CS) {
// Conversion is ok if changing from one pointer type to another or from
// a pointer to an integer of the same size.
!((OldRetTy->isPointerTy() || !TD ||
- OldRetTy == TD->getIntPtrType(Caller->getContext())) &&
+ OldRetTy == TD->getIntPtrType(NewRetTy)) &&
(NewRetTy->isPointerTy() || !TD ||
- NewRetTy == TD->getIntPtrType(Caller->getContext()))))
+ NewRetTy == TD->getIntPtrType(OldRetTy))))
return false; // Cannot transform this return value.
if (!Caller->use_empty() &&
@@ -1057,11 +1057,13 @@ bool InstCombiner::transformConstExprCastCall(CallSite CS) {
// Converting from one pointer type to another or between a pointer and an
// integer of the same size is safe even if we do not have a body.
+ // FIXME: Not sure what to do here, so setting AS to 0.
+ // How can the AS for a function call be outside the default?
bool isConvertible = ActTy == ParamTy ||
(TD && ((ParamTy->isPointerTy() ||
- ParamTy == TD->getIntPtrType(Caller->getContext())) &&
+ ParamTy == TD->getIntPtrType(ActTy)) &&
(ActTy->isPointerTy() ||
- ActTy == TD->getIntPtrType(Caller->getContext()))));
+ ActTy == TD->getIntPtrType(ParamTy))));
if (Callee->isDeclaration() && !isConvertible) return false;
}
diff --git a/lib/Transforms/InstCombine/InstCombineCasts.cpp b/lib/Transforms/InstCombine/InstCombineCasts.cpp
index f3f3f8f585..119d2f5c99 100644
--- a/lib/Transforms/InstCombine/InstCombineCasts.cpp
+++ b/lib/Transforms/InstCombine/InstCombineCasts.cpp
@@ -30,7 +30,7 @@ static Value *DecomposeSimpleLinearExpr(Value *Val, unsigned &Scale,
Scale = 0;
return ConstantInt::get(Val->getType(), 0);
}
-
+
if (BinaryOperator *I = dyn_cast<BinaryOperator>(Val)) {
// Cannot look past anything that might overflow.
OverflowingBinaryOperator *OBI = dyn_cast<OverflowingBinaryOperator>(Val);
@@ -47,19 +47,19 @@ static Value *DecomposeSimpleLinearExpr(Value *Val, unsigned &Scale,
Offset = 0;
return I->getOperand(0);
}
-
+
if (I->getOpcode() == Instruction::Mul) {
// This value is scaled by 'RHS'.
Scale = RHS->getZExtValue();
Offset = 0;
return I->getOperand(0);
}
-
+
if (I->getOpcode() == Instruction::Add) {
- // We have X+C. Check to see if we really have (X*C2)+C1,
+ // We have X+C. Check to see if we really have (X*C2)+C1,
// where C1 is divisible by C2.
unsigned SubScale;
- Value *SubVal =
+ Value *SubVal =
DecomposeSimpleLinearExpr(I->getOperand(0), SubScale, Offset);
Offset += RHS->getZExtValue();
Scale = SubScale;
@@ -82,7 +82,7 @@ Instruction *InstCombiner::PromoteCastOfAllocation(BitCastInst &CI,
if (!TD) return 0;
PointerType *PTy = cast<PointerType>(CI.getType());
-
+
BuilderTy AllocaBuilder(*Builder);
AllocaBuilder.SetInsertPoint(AI.getParent(), &AI);
@@ -110,7 +110,7 @@ Instruction *InstCombiner::PromoteCastOfAllocation(BitCastInst &CI,
uint64_t ArrayOffset;
Value *NumElements = // See if the array size is a decomposable linear expr.
DecomposeSimpleLinearExpr(AI.getOperand(0), ArraySizeScale, ArrayOffset);
-
+
// If we can now satisfy the modulus, by using a non-1 scale, we really can
// do the xform.
if ((AllocElTySize*ArraySizeScale) % CastElTySize != 0 ||
@@ -125,17 +125,17 @@ Instruction *InstCombiner::PromoteCastOfAllocation(BitCastInst &CI,
// Insert before the alloca, not before the cast.
Amt = AllocaBuilder.CreateMul(Amt, NumElements);
}
-
+
if (uint64_t Offset = (AllocElTySize*ArrayOffset)/CastElTySize) {
Value *Off = ConstantInt::get(AI.getArraySize()->getType(),
Offset, true);
Amt = AllocaBuilder.CreateAdd(Amt, Off);
}
-
+
AllocaInst *New = AllocaBuilder.CreateAlloca(CastElTy, Amt);
New->setAlignment(AI.getAlignment());
New->takeName(&AI);
-
+
// If the allocation has multiple real uses, insert a cast and change all
// things that used it to use the new cast. This will also hack on CI, but it
// will die soon.
@@ -148,10 +148,10 @@ Instruction *InstCombiner::PromoteCastOfAllocation(BitCastInst &CI,
return ReplaceInstUsesWith(CI, New);
}
-/// EvaluateInDifferentType - Given an expression that
+/// EvaluateInDifferentType - Given an expression that
/// CanEvaluateTruncated or CanEvaluateSExtd returns true for, actually
/// insert the code to evaluate the expression.
-Value *InstCombiner::EvaluateInDifferentType(Value *V, Type *Ty,
+Value *InstCombiner::EvaluateInDifferentType(Value *V, Type *Ty,
bool isSigned) {
if (Constant *C = dyn_cast<Constant>(V)) {
C = ConstantExpr::getIntegerCast(C, Ty, isSigned /*Sext or ZExt*/);
@@ -181,7 +181,7 @@ Value *InstCombiner::EvaluateInDifferentType(Value *V, Type *Ty,
Value *RHS = EvaluateInDifferentType(I->getOperand(1), Ty, isSigned);
Res = BinaryOperator::Create((Instruction::BinaryOps)Opc, LHS, RHS);
break;
- }
+ }
case Instruction::Trunc:
case Instruction::ZExt:
case Instruction::SExt:
@@ -190,7 +190,7 @@ Value *InstCombiner::EvaluateInDifferentType(Value *V, Type *Ty,
// new.
if (I->getOperand(0)->getType() == Ty)
return I->getOperand(0);
-
+
// Otherwise, must be the same type of cast, so just reinsert a new one.
// This also handles the case of zext(trunc(x)) -> zext(x).
Res = CastInst::CreateIntegerCast(I->getOperand(0), Ty,
@@ -212,11 +212,11 @@ Value *InstCombiner::EvaluateInDifferentType(Value *V, Type *Ty,
Res = NPN;
break;
}
- default:
+ default:
// TODO: Can handle more cases here.
llvm_unreachable("Unreachable!");
}
-
+
Res->takeName(I);
return InsertNewInstWith(Res, *I);
}
@@ -224,7 +224,7 @@ Value *InstCombiner::EvaluateInDifferentType(Value *V, Type *Ty,
/// This function is a wrapper around CastInst::isEliminableCastPair. It
/// simply extracts arguments and returns what that function returns.
-static Instruction::CastOps
+static Instruction::CastOps
isEliminableCastPair(
const CastInst *CI, ///< The first cast instruction
unsigned opcode, ///< The opcode of the second cast instruction
@@ -238,19 +238,18 @@ isEliminableCastPair(
// Get the opcodes of the two Cast instructions
Instruction::CastOps firstOp = Instruction::CastOps(CI->getOpcode());
Instruction::CastOps secondOp = Instruction::CastOps(opcode);
-
unsigned Res = CastInst::isEliminableCastPair(firstOp, secondOp, SrcTy, MidTy,
DstTy,
- TD ? TD->getIntPtrType(CI->getContext()) : 0);
-
+ TD ? TD->getIntPtrType(DstTy) : 0);
+
// We don't want to form an inttoptr or ptrtoint that converts to an integer
// type that differs from the pointer size.
if ((Res == Instruction::IntToPtr &&
- (!TD || SrcTy != TD->getIntPtrType(CI->getContext()))) ||
+ (!TD || SrcTy != TD->getIntPtrType(DstTy))) ||
(Res == Instruction::PtrToInt &&
- (!TD || DstTy != TD->getIntPtrType(CI->getContext()))))
+ (!TD || DstTy != TD->getIntPtrType(SrcTy))))
Res = 0;
-
+
return Instruction::CastOps(Res);
}
@@ -262,18 +261,18 @@ bool InstCombiner::ShouldOptimizeCast(Instruction::CastOps opc, const Value *V,
Type *Ty) {
// Noop casts and casts of constants should be eliminated trivially.
if (V->getType() == Ty || isa<Constant>(V)) return false;
-
+
// If this is another cast that can be eliminated, we prefer to have it
// eliminated.
if (const CastInst *CI = dyn_cast<CastInst>(V))
if (isEliminableCastPair(CI, opc, Ty, TD))
return false;
-
+
// If this is a vector sext from a compare, then we don't want to break the
// idiom where each element of the extended vector is either zero or all ones.
if (opc == Instruction::SExt && isa<CmpInst>(V) && Ty->isVectorTy())
return false;
-
+
return true;
}
@@ -285,7 +284,7 @@ Instruction *InstCombiner::commonCastTransforms(CastInst &CI) {
// Many cases of "cast of a cast" are eliminable. If it's eliminable we just
// eliminate it now.
if (CastInst *CSrc = dyn_cast<CastInst>(Src)) { // A->B->C cast
- if (Instruction::CastOps opc =
+ if (Instruction::CastOps opc =
isEliminableCastPair(CSrc, CI.getOpcode(), CI.getType(), TD)) {
// The first cast (CSrc) is eliminable so we need to fix up or replace
// the second cast (CI). CSrc will then have a good chance of being dead.
@@ -308,7 +307,7 @@ Instruction *InstCombiner::commonCastTransforms(CastInst &CI) {
if (Instruction *NV = FoldOpIntoPhi(CI))
return NV;
}
-
+
return 0;
}
@@ -327,15 +326,15 @@ static bool CanEvaluateTruncated(Value *V, Type *Ty) {
// We can always evaluate constants in another type.
if (isa<Constant>(V))
return true;
-
+
Instruction *I = dyn_cast<Instruction>(V);
if (!I) return false;
-
+
Type *OrigTy = V->getType();
-
+
// If this is an extension from the dest type, we can eliminate it, even if it
// has multiple uses.
- if ((isa<ZExtInst>(I) || isa<SExtInst>(I)) &&
+ if ((isa<ZExtInst>(I) || isa<SExtInst>(I)) &&
I->getOperand(0)->getType() == Ty)
return true;
@@ -420,29 +419,29 @@ static bool CanEvaluateTruncated(Value *V, Type *Ty) {
// TODO: Can handle more cases here.
break;
}
-
+
return false;
}
Instruction *InstCombiner::visitTrunc(TruncInst &CI) {
if (Instruction *Result = commonCastTransforms(CI))
return Result;
-
- // See if we can simplify any instructions used by the input whose sole
+
+ // See if we can simplify any instructions used by the input whose sole
// purpose is to compute bits we don't care about.
if (SimplifyDemandedInstructionBits(CI))
return &CI;
-
+
Value *Src = CI.getOperand(0);
Type *DestTy = CI.getType(), *SrcTy = Src->getType();
-
+
// Attempt to truncate the entire input expression tree to the destination
// type. Only do this if the dest type is a simple type, don't convert the
// expression tree to something weird like i93 unless the source is also
// strange.
if ((DestTy->isVectorTy() || ShouldChangeType(SrcTy, DestTy)) &&
CanEvaluateTruncated(Src, DestTy)) {
-
+
// If this cast is a truncate, evaluting in a different type always
// eliminates the cast, so it is always a win.
DEBUG(dbgs() << "ICE: EvaluateInDifferentType converting expression type"
@@ -459,7 +458,7 @@ Instruction *InstCombiner::visitTrunc(TruncInst &CI) {
Value *Zero = Constant::getNullValue(Src->getType());
return new ICmpInst(ICmpInst::ICMP_NE, Src, Zero);
}
-
+
// Transform trunc(lshr (zext A), Cst) to eliminate one type conversion.
Value *A = 0; ConstantInt *Cst = 0;
if (Src->hasOneUse() &&
@@ -469,7 +468,7 @@ Instruction *InstCombiner::visitTrunc(TruncInst &CI) {
// ASize < MidSize and MidSize > ResultSize, but don't know the relation
// between ASize and ResultSize.
unsigned ASize = A->getType()->getPrimitiveSizeInBits();
-
+
// If the shift amount is larger than the size of A, then the result is
// known to be zero because all the input bits got shifted out.
if (Cst->getZExtValue() >= ASize)
@@ -482,7 +481,7 @@ Instruction *InstCombiner::visitTrunc(TruncInst &CI) {
Shift->takeName(Src);
return CastInst::CreateIntegerCast(Shift, CI.getType(), false);
}
-
+
// Transform "trunc (and X, cst)" -> "and (trunc X), cst" so long as the dest
// type isn't non-native.
if (Src->hasOneUse() && isa<IntegerType>(Src->getType()) &&
@@ -505,7 +504,7 @@ Instruction *InstCombiner::transformZExtICmp(ICmpInst *ICI, Instruction &CI,
// cast to integer to avoid the comparison.
if (ConstantInt *Op1C = dyn_cast<ConstantInt>(ICI->getOperand(1))) {
const APInt &Op1CV = Op1C->getValue();
-
+
// zext (x <s 0) to i32 --> x>>u31 true if signbit set.
// zext (x >s -1) to i32 --> (x>>u31)^1 true if signbit clear.
if ((ICI->getPredicate() == ICmpInst::ICMP_SLT && Op1CV == 0) ||
@@ -535,14 +534,14 @@ Instruction *InstCombiner::transformZExtICmp(ICmpInst *ICI, Instruction &CI,
// zext (X != 0) to i32 --> X>>1 iff X has only the 2nd bit set.
// zext (X != 1) to i32 --> X^1 iff X has only the low bit set.
// zext (X != 2) to i32 --> (X>>1)^1 iff X has only the 2nd bit set.
- if ((Op1CV == 0 || Op1CV.isPowerOf2()) &&
+ if ((Op1CV == 0 || Op1CV.isPowerOf2()) &&
// This only works for EQ and NE
ICI->isEquality()) {
// If Op1C some other power of two, convert:
uint32_t BitWidth = Op1C->getType()->getBitWidth();
APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
ComputeMaskedBits(ICI->getOperand(0), KnownZero, KnownOne);
-
+
APInt KnownZeroMask(~KnownZero);
if (KnownZeroMask.isPowerOf2()) { // Exactly 1 possible 1?
if (!DoXform) return ICI;
@@ -556,7 +555,7 @@ Instruction *InstCombiner::transformZExtICmp(ICmpInst *ICI, Instruction &CI,
Res = ConstantExpr::getZExt(Res, CI.getType());
return ReplaceInstUsesWith(CI, Res);
}
-
+
uint32_t ShiftAmt = KnownZeroMask.logBase2();
Value *In = ICI->getOperand(0);
if (ShiftAmt) {
@@ -565,12 +564,12 @@ Instruction *InstCombiner::transformZExtICmp(ICmpInst *ICI, Instruction &CI,
In = Builder->CreateLShr(In, ConstantInt::get(In->getType(),ShiftAmt),
In->getName()+".lobit");
}
-
+
if ((Op1CV != 0) == isNE) { // Toggle the low bit.
Constant *One = ConstantInt::get(In->getType(), 1);
In = Builder->CreateXor(In, One);
}
-
+
if (CI.getType() == In->getType())
return ReplaceInstUsesWith(CI, In);
return CastInst::CreateIntegerCast(In, CI.getType(), false/*ZExt*/);
@@ -643,19 +642,19 @@ static bool CanEvaluateZExtd(Value *V, Type *Ty, unsigned &BitsToClear) {
BitsToClear = 0;
if (isa<Constant>(V))
return true;
-
+
Instruction *I = dyn_cast<Instruction>(V);
if (!I) return false;
-
+
// If the input is a truncate from the destination type, we can trivially
// eliminate it.
if (isa<TruncInst>(I) && I->getOperand(0)->getType() == Ty)
return true;
-
+
// We can't extend or shrink something that has multiple uses: doing so would
// require duplicating the instruction in general, which isn't profitable.
if (!I->hasOneUse()) return false;
-
+
unsigned Opc = I->getOpcode(), Tmp;
switch (Opc) {
case Instruction::ZExt: // zext(zext(x)) -> zext(x).
@@ -675,7 +674,7 @@ static bool CanEvaluateZExtd(Value *V, Type *Ty, unsigned &BitsToClear) {
// These can all be promoted if neither operand has 'bits to clear'.
if (BitsToClear == 0 && Tmp == 0)
return true;
-
+
// If the operation is an AND/OR/XOR and the bits to clear are zero in the
// other side, BitsToClear is ok.
if (Tmp == 0 &&
@@ -688,10 +687,10 @@ static bool CanEvaluateZExtd(Value *V, Type *Ty, unsigned &BitsToClear) {
APInt::getHighBitsSet(VSize, BitsToClear)))
return true;
}
-
+
// Otherwise, we don't know how to analyze this BitsToClear case yet.
return false;
-
+
case Instruction::LShr:
// We can promote lshr(x, cst) if we can promote x. This requires the
// ultimate 'and' to clear out the high zero bits we're clearing out though.
@@ -713,7 +712,7 @@ static bool CanEvaluateZExtd(Value *V, Type *Ty, unsigned &BitsToClear) {
Tmp != BitsToClear)
return false;
return true;
-
+
case Instruction::PHI: {
// We can change a phi if we can change all operands. Note that we never
// get into trouble with cyclic PHIs here because we only consider
@@ -740,44 +739,44 @@ Instruction *InstCombiner::visitZExt(ZExtInst &CI) {
// eliminated before we try to optimize this zext.
if (CI.hasOneUse() && isa<TruncInst>(CI.use_back()))
return 0;
-
+
// If one of the common conversion will work, do it.
if (Instruction *Result = commonCastTransforms(CI))
return Result;
- // See if we can simplify any instructions used by the input whose sole
+ // See if we can simplify any instructions used by the input whose sole
// purpose is to compute bits we don't care about.
if (SimplifyDemandedInstructionBits(CI))
return &CI;
-
+
Value *Src = CI.getOperand(0);
Type *SrcTy = Src->getType(), *DestTy = CI.getType();
-
+
// Attempt to extend the entire input expression tree to the destination
// type. Only do this if the dest type is a simple type, don't convert the
// expression tree to something weird like i93 unless the source is also
// strange.
unsigned BitsToClear;
if ((DestTy->isVectorTy() || ShouldChangeType(SrcTy, DestTy)) &&
- CanEvaluateZExtd(Src, DestTy, BitsToClear)) {
+ CanEvaluateZExtd(Src, DestTy, BitsToClear)) {
assert(BitsToClear < SrcTy->getScalarSizeInBits() &&
"Unreasonable BitsToClear");
-
+
// Okay, we can transform this! Insert the new expression now.
DEBUG(dbgs() << "ICE: EvaluateInDifferentType converting expression type"
" to avoid zero extend: " << CI);
Value *Res = EvaluateInDifferentType(Src, DestTy, false);
assert(Res->getType() == DestTy);
-
+
uint32_t SrcBitsKept = SrcTy->getScalarSizeInBits()-BitsToClear;
uint32_t DestBitSize = DestTy->getScalarSizeInBits();
-
+
// If the high bits are already filled with zeros, just replace this
// cast with the result.
if (MaskedValueIsZero(Res, APInt::getHighBitsSet(DestBitSize,
DestBitSize-SrcBitsKept)))
return ReplaceInstUsesWith(CI, Res);
-
+
// We need to emit an AND to clear the high bits.
Constant *C = ConstantInt::get(Res->getType(),
APInt::getLowBitsSet(DestBitSize, SrcBitsKept));
@@ -789,7 +788,7 @@ Instruction *InstCombiner::visitZExt(ZExtInst &CI) {
// 'and' which will be much cheaper than the pair of casts.
if (TruncInst *CSrc = dyn_cast<TruncInst>(Src)) { // A->B->C cast
// TODO: Subsume this into EvaluateInDifferentType.
-
+
// Get the sizes of the types involved. We know that the intermediate type
// will be smaller than A or C, but don't know the relation between A and C.
Value *A = CSrc->getOperand(0);
@@ -806,7 +805,7 @@ Instruction *InstCombiner::visitZExt(ZExtInst &CI) {
Value *And = Builder->CreateAnd(A, AndConst, CSrc->getName()+".mask");
return new ZExtInst(And, CI.getType());
}
-
+
if (SrcSize == DstSize) {
APInt AndValue(APInt::getLowBitsSet(SrcSize, MidSize));
return BinaryOperator::CreateAnd(A, ConstantInt::get(A->getType(),
@@ -815,7 +814,7 @@ Instruction *InstCombiner::visitZExt(ZExtInst &CI) {
if (SrcSize > DstSize) {
Value *Trunc = Builder->CreateTrunc(A, CI.getType());
APInt AndValue(APInt::getLowBitsSet(DstSize, MidSize));
- return BinaryOperator::CreateAnd(Trunc,
+ return BinaryOperator::CreateAnd(Trunc,
ConstantInt::get(Trunc->getType(),
AndValue));
}
@@ -873,7 +872,7 @@ Instruction *InstCombiner::visitZExt(ZExtInst &CI) {
Value *New = Builder->CreateZExt(X, CI.getType());
return BinaryOperator::CreateXor(New, ConstantInt::get(CI.getType(), 1));
}
-
+
return 0;
}
@@ -986,14 +985,14 @@ static bool CanEvaluateSExtd(Value *V, Type *Ty) {
// If this is a constant, it can be trivially promoted.
if (isa<Constant>(V))
return true;
-
+
Instruction *I = dyn_cast<Instruction>(V);
if (!I) return false;
-
+
// If this is a truncate from the dest type, we can trivially eliminate it.
if (isa<TruncInst>(I) && I->getOperand(0)->getType() == Ty)
return true;
-
+
// We can't extend or shrink something that has multiple uses: doing so would
// require duplicating the instruction in general, which isn't profitable.
if (!I->hasOneUse()) return false;
@@ -1012,14 +1011,14 @@ static bool CanEvaluateSExtd(Value *V, Type *Ty) {
// These operators can all arbitrarily be extended if their inputs can.
return CanEvaluateSExtd(I->getOperand(0), Ty) &&
CanEvaluateSExtd(I->getOperand(1), Ty);
-
+
//case Instruction::Shl: TODO
//case Instruction::LShr: TODO
-
+
case Instruction::Select:
return CanEvaluateSExtd(I->getOperand(1), Ty) &&
CanEvaluateSExtd(I->getOperand(2), Ty);
-
+
case Instruction::PHI: {
// We can change a phi if we can change all operands. Note that we never
// get into trouble with cyclic PHIs here because we only consider
@@ -1033,7 +1032,7 @@ static bool CanEvaluateSExtd(Value *V, Type *Ty) {
// TODO: Can handle more cases here.
break;
}
-
+
return false;
}
@@ -1042,15 +1041,15 @@ Instruction *InstCombiner::visitSExt(SExtInst &CI) {
// eliminated before we try to optimize this zext.
if (CI.hasOneUse() && isa<TruncInst>(CI.use_back()))
return 0;
-
+
if (Instruction *I = commonCastTransforms(CI))
return I;
-
- // See if we can simplify any instructions used by the input whose sole
+
+ // See if we can simplify any instructions used by the input whose sole
// purpose is to compute bits we don't care about.
if (SimplifyDemandedInstructionBits(CI))
return &CI;
-
+
Value *Src = CI.getOperand(0);
Type *SrcTy = Src->getType(), *DestTy = CI.getType();
@@ -1073,7 +1072,7 @@ Instruction *InstCombiner::visitSExt(SExtInst &CI) {
// cast with the result.
if (ComputeNumSignBits(Res) > DestBitSize - SrcBitSize)
return ReplaceInstUsesWith(CI, Res);
-
+
// We need to emit a shl + ashr to do the sign extend.
Value *ShAmt = ConstantInt::get(DestTy, DestBitSize-SrcBitSize);
return BinaryOperator::CreateAShr(Builder->CreateShl(Res, ShAmt, "sext"),
@@ -1086,7 +1085,7 @@ Instruction *InstCombiner::visitSExt(SExtInst &CI) {
if (TI->hasOneUse() && TI->getOperand(0)->getType() == DestTy) {
uint32_t SrcBitSize = SrcTy->getScalarSizeInBits();
uint32_t DestBitSize = DestTy->getScalarSizeInBits();
-
+
// We need to emit a shl + ashr to do the sign extend.
Value *ShAmt = ConstantInt::get(DestTy, DestBitSize-SrcBitSize);
Value *Res = Builder->CreateShl(TI->getOperand(0), ShAmt, "sext");
@@ -1122,7 +1121,7 @@ Instruction *InstCombiner::visitSExt(SExtInst &CI) {
A = Builder->CreateShl(A, ShAmtV, CI.getName());
return BinaryOperator::CreateAShr(A, ShAmtV);
}
-
+
return 0;
}
@@ -1144,7 +1143,7 @@ static Value *LookThroughFPExtensions(Value *V) {
if (Instruction *I = dyn_cast<Instruction>(V))
if (I->getOpcode() == Instruction::FPExt)
return LookThroughFPExtensions(I->getOperand(0));
-
+
// If this value is a constant, return the constant in the smallest FP type
// that can accurately represent it. This allows us to turn
// (float)((double)X+2.0) into x+2.0f.
@@ -1163,14 +1162,14 @@ static Value *LookThroughFPExtensions(Value *V) {
return V;
// Don't try to shrink to various long double types.
}
-
+
return V;
}
Instruction *InstCombiner::visitFPTrunc(FPTruncInst &CI) {
if (Instruction *I = commonCastTransforms(CI))
return I;
-
+
// If we have fptrunc(fadd (fpextend x), (fpextend y)), where x and y are
// smaller than the destination type, we can eliminate the truncate by doing
// the add as the smaller type. This applies to fadd/fsub/fmul/fdiv as well
@@ -1187,7 +1186,7 @@ Instruction *InstCombiner::visitFPTrunc(FPTruncInst &CI) {
Type *SrcTy = OpI->getType();
Value *LHSTrunc = LookThroughFPExtensions(OpI->getOperand(0));
Value *RHSTrunc = LookThroughFPExtensions(OpI->getOperand(1));
- if (LHSTrunc->getType() != SrcTy &&
+ if (LHSTrunc->getType() != SrcTy &&
RHSTrunc->getType() != SrcTy) {
unsigned DstSize = CI.getType()->getScalarSizeInBits();
// If the source types were both smaller than the destination type of
@@ -1199,10 +1198,10 @@ Instruction *InstCombiner::visitFPTrunc(FPTruncInst &CI) {
return BinaryOperator::Create(OpI->getOpcode(), LHSTrunc, RHSTrunc);
}
}
- break;
+ break;
}
}
-
+
// Fold (fptrunc (sqrt (fpext x))) -> (sqrtf x)
CallInst *Call = dyn_cast<CallInst>(CI.getOperand(0));
if (Call && Call->getCalledFunction() && TLI->has(LibFunc::sqrtf) &&
@@ -1217,7 +1216,7 @@ Instruction *InstCombiner::visitFPTrunc(FPTruncInst &CI) {
Arg->getOperand(0)->getType()->isFloatTy()) {
Function *Callee = Call->getCalledFunction();
Module *M = CI.getParent()->getParent()->getParent();
- Constant *SqrtfFunc = M->getOrInsertFunction("sqrtf",
+ Constant *SqrtfFunc = M->getOrInsertFunction("sqrtf",
Callee->getAttributes(),
Builder->getFloatTy(),
Builder->getFloatTy(),
@@ -1225,15 +1224,15 @@ Instruction *InstCombiner::visitFPTrunc(FPTruncInst &CI) {
CallInst *ret = CallInst::Create(SqrtfFunc, Arg->getOperand(0),
"sqrtfcall");
ret->setAttributes(Callee->getAttributes());
-
-
+
+
// Remove the old Call. With -fmath-errno, it won't get marked readnone.
ReplaceInstUsesWith(*Call, UndefValue::get(Call->getType()));
EraseInstFromFunction(*Call);
return ret;
}
}
-
+
return 0;
}
@@ -1251,7 +1250,7 @@ Instruction *InstCombiner::visitFPToUI(FPToUIInst &FI) {
// This is safe if the intermediate type has enough bits in its mantissa to
// accurately represent all values of X. For example, do not do this with
// i64->float->i64. This is also safe for sitofp case, because any negative
- // 'X' value would cause an undefined result for the fptoui.
+ // 'X' value would cause an undefined result for the fptoui.
if ((isa<UIToFPInst>(OpI) || isa<SIToFPInst>(OpI)) &&
OpI->getOperand(0)->getType() == FI.getType() &&
(int)FI.getType()->getScalarSizeInBits() < /*extra bit for sign */
@@ -1265,19 +1264,19 @@ Instruction *InstCombiner::visitFPToSI(FPToSIInst &FI) {
Instruction *OpI = dyn_cast<Instruction>(FI.getOperand(0));
if (OpI == 0)
return commonCastTransforms(FI);
-
+
// fptosi(sitofp(X)) --> X
// fptosi(uitofp(X)) --> X
// This is safe if the intermediate type has enough bits in its mantissa to
// accurately represent all values of X. For example, do not do this with
// i64->float->i64. This is also safe for sitofp case, because any negative
- // 'X' value would cause an undefined result for the fptoui.
+ // 'X' value would cause an undefined result for the fptoui.
if ((isa<UIToFPInst>(OpI) || isa<SIToFPInst>(OpI)) &&
OpI->getOperand(0)->getType() == FI.getType() &&
(int)FI.getType()->getScalarSizeInBits() <=
OpI->getType()->getFPMantissaWidth())
return ReplaceInstUsesWith(FI, OpI->getOperand(0));
-
+
return commonCastTransforms(FI);
}
@@ -1298,17 +1297,17 @@ Instruction *InstCombiner::visitIntToPtr(IntToPtrInst &CI) {
if (CI.getOperand(0)->getType()->getScalarSizeInBits() >
TD->getPointerSizeInBits(AS)) {
Value *P = Builder->CreateTrunc(CI.getOperand(0),
- TD->getIntPtrType(CI.getContext()));
+ TD->getIntPtrType(CI.getType()));
return new IntToPtrInst(P, CI.getType());
}
if (CI.getOperand(0)->getType()->getScalarSizeInBits() <
TD->getPointerSizeInBits(AS)) {
Value *P = Builder->CreateZExt(CI.getOperand(0),
- TD->getIntPtrType(CI.getContext()));
+ TD->getIntPtrType(CI.getType()));
return new IntToPtrInst(P, CI.getType());
}
}
-
+
if (Instruction *I = commonCastTransforms(CI))
return I;
@@ -1318,19 +1317,19 @@ Instruction *InstCombiner::visitIntToPtr(IntToPtrInst &CI) {
/// @brief Implement the transforms for cast of pointer (bitcast/ptrtoint)
Instruction *InstCombiner::commonPointerCastTransforms(CastInst &CI) {
Value *Src = CI.getOperand(0);
-
+
if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Src)) {
// If casting the result of a getelementptr instruction with no offset, turn
// this into a cast of the original pointer!
if (GEP->hasAllZeroIndices()) {
// Changing the cast operand is usually not a good idea but it is safe
- // here because the pointer operand is being replaced with another
+ // here because the pointer operand is being replaced with another
// pointer operand so the opcode doesn't need to change.
Worklist.Add(GEP);
CI.setOperand(0, GEP->getOperand(0));
return &CI;
}
-
+
// If the GEP has a single use, and the base pointer is a bitcast, and the
// GEP computes a constant offset, see if we can convert these three
// instructions into fewer. This typically happens with unions and other
@@ -1345,7 +1344,8 @@ Instruction *InstCombiner::commonPointerCastTransforms(CastInst &CI) {
Type *GEPIdxTy =
cast<PointerType>(OrigBase->getType())->getElementType();
SmallVector<Value*, 8> NewIndices;
- if (FindElementAtOffset(GEPIdxTy, Offset, NewIndices)) {
+ Type *IntPtrTy = TD->getIntPtrType(OrigBase->getType());
+ if (FindElementAtOffset(GEPIdxTy, Offset, IntPtrTy, NewIndices)) {
// If we were able to index down into an element, create the GEP
// and bitcast the result. This eliminates one bitcast, potentially
// two.
@@ -1353,15 +1353,15 @@ Instruction *InstCombiner::commonPointerCastTransforms(CastInst &CI) {
Builder->CreateInBoundsGEP(OrigBase, NewIndices) :
Builder->CreateGEP(OrigBase, NewIndices);
NGEP->takeName(GEP);
-
+
if (isa<BitCastInst>(CI))
return new BitCastInst(NGEP, CI.getType());
assert(isa<PtrToIntInst>(CI));
return new PtrToIntInst(NGEP, CI.getType());
- }
+ }
}
}
-
+
return commonCastTransforms(CI);
}
@@ -1373,16 +1373,16 @@ Instruction *InstCombiner::visitPtrToInt(PtrToIntInst &CI) {
if (TD) {
if (CI.getType()->getScalarSizeInBits() < TD->getPointerSizeInBits(AS)) {
Value *P = Builder->CreatePtrToInt(CI.getOperand(0),
- TD->getIntPtrType(CI.getContext()));
+ TD->getIntPtrType(CI.getContext(), AS));
return new TruncInst(P, CI.getType());
}
if (CI.getType()->getScalarSizeInBits() > TD->getPointerSizeInBits(AS)) {
Value *P = Builder->CreatePtrToInt(CI.getOperand(0),
- TD->getIntPtrType(CI.getContext()));
+ TD->getIntPtrType(CI.getContext(), AS));
return new ZExtInst(P, CI.getType());
}
}
-
+
return commonPointerCastTransforms(CI);
}
@@ -1397,33 +1397,33 @@ static Instruction *OptimizeVectorResize(Value *InVal, VectorType *DestTy,
// element size, or the input is a multiple of the output element size.
// Convert the input type to have the same element type as the output.
VectorType *SrcTy = cast<VectorType>(InVal->getType());
-
+
if (SrcTy->getElementType() != DestTy->getElementType()) {
// The input types don't need to be identical, but for now they must be the
// same size. There is no specific reason we couldn't handle things like
// <4 x i16> -> <4 x i32> by bitcasting to <2 x i32> but haven't gotten
- // there yet.
+ // there yet.
if (SrcTy->getElementType()->getPrimitiveSizeInBits() !=
DestTy->getElementType()->getPrimitiveSizeInBits())
return 0;
-
+
SrcTy = VectorType::get(DestTy->getElementType(), SrcTy->getNumElements());
InVal = IC.Builder->CreateBitCast(InVal, SrcTy);
}
-
+
// Now that the element types match, get the shuffle mask and RHS of the
// shuffle to use, which depends on whether we're increasing or decreasing the
// size of the input.
SmallVector<uint32_t, 16> ShuffleMask;
Value *V2;
-
+
if (SrcTy->getNumElements() > DestTy->getNumElements()) {
// If we're shrinking the number of elements, just shuffle in the low
// elements from the input and use undef as the second shuffle input.
V2 = UndefValue::get(SrcTy);
for (unsigned i = 0, e = DestTy->getNumElements(); i != e; ++i)
ShuffleMask.push_back(i);
-
+
} else {
// If we're increasing the number of elements, shuffle in all of the
// elements from InVal and fill the rest of the result elements with zeros
@@ -1437,7 +1437,7 @@ static Instruction *OptimizeVectorResize(Value *InVal, VectorType *DestTy,
for (unsigned i = 0, e = DestTy->getNumElements()-SrcElts; i != e; ++i)
ShuffleMask.push_back(SrcElts);
}
-
+
return new ShuffleVectorInst(InVal, V2,
ConstantDataVector::get(V2->getContext(),
ShuffleMask));
@@ -1464,7 +1464,7 @@ static bool CollectInsertionElements(Value *V, unsigned ElementIndex,
Type *VecEltTy) {
// Undef values never contribute useful bits to the result.
if (isa<UndefValue>(V)) return true;
-
+
// If we got down to a value of the right type, we win, try inserting into the
// right element.
if (V->getType() == VecEltTy) {
@@ -1472,15 +1472,15 @@ static bool CollectInsertionElements(Value *V, unsigned ElementIndex,
if (Constant *C = dyn_cast<Constant>(V))
if (C->isNullValue())
return true;
-
+
// Fail if multiple elements are inserted into this slot.
if (ElementIndex >= Elements.size() || Elements[ElementIndex] != 0)
return false;
-
+
Elements[ElementIndex] = V;
return true;
}
-
+
if (Constant *C = dyn_cast<Constant>(V)) {
// Figure out the # elements this provides, and bitcast it or slice it up
// as required.
@@ -1491,7 +1491,7 @@ static bool CollectInsertionElements(Value *V, unsigned ElementIndex,
if (NumElts == 1)
return CollectInsertionElements(ConstantExpr::getBitCast(C, VecEltTy),
ElementIndex, Elements, VecEltTy);
-
+
// Okay, this is a constant that covers multiple elements. Slice it up into
// pieces and insert each element-sized piece into the vector.
if (!isa<IntegerType>(C->getType()))
@@ -1499,7 +1499,7 @@ static bool CollectInsertionElements(Value *V, unsigned ElementIndex,
C->getType()->getPrimitiveSizeInBits()));
unsigned ElementSize = VecEltTy->getPrimitiveSizeInBits();
Type *ElementIntTy = IntegerType::get(C->getContext(), ElementSize);
-
+
for (unsigned i = 0; i != NumElts; ++i) {
Constant *Piece = ConstantExpr::getLShr(C, ConstantInt::get(C->getType(),
i*ElementSize));
@@ -1509,23 +1509,23 @@ static bool CollectInsertionElements(Value *V, unsigned ElementIndex,
}
return true;
}
-
+
if (!V->hasOneUse()) return false;
-
+
Instruction *I = dyn_cast<Instruction>(V);
if (I == 0) return false;
switch (I->getOpcode()) {
default: return false; // Unhandled case.
case Instruction::BitCast:
return CollectInsertionElements(I->getOperand(0), ElementIndex,
- Elements, VecEltTy);
+ Elements, VecEltTy);
case Instruction::ZExt:
if (!isMultipleOfTypeSize(
I->getOperand(0)->getType()->getPrimitiveSizeInBits(),
VecEltTy))
return false;
return CollectInsertionElements(I->getOperand(0), ElementIndex,
- Elements, VecEltTy);
+ Elements, VecEltTy);
case Instruction::Or:
return CollectInsertionElements(I->getOperand(0), ElementIndex,
Elements, VecEltTy) &&
@@ -1537,11 +1537,11 @@ static bool CollectInsertionElements(Value *V, unsigned ElementIndex,
if (CI == 0) return false;
if (!isMultipleOfTypeSize(CI->getZExtValue(), VecEltTy)) return false;
unsigned IndexShift = getTypeSizeIndex(CI->getZExtValue(), VecEltTy);
-
+
return CollectInsertionElements(I->getOperand(0), ElementIndex+IndexShift,
Elements, VecEltTy);
}
-
+
}
}
@@ -1576,11 +1576,11 @@ static Value *OptimizeIntegerToVectorInsertions(BitCastInst &CI,
Value *Result = Constant::getNullValue(CI.getType());
for (unsigned i = 0, e = Elements.size(); i != e; ++i) {
if (Elements[i] == 0) continue; // Unset element.
-
+
Result = IC.Builder->CreateInsertElement(Result, Elements[i],
IC.Builder->getInt32(i));
}
-
+
return Result;
}
@@ -1608,11 +1608,11 @@ static Instruction *OptimizeIntToFloatBitCast(BitCastInst &CI,InstCombiner &IC){
VecTy->getPrimitiveSizeInBits() / DestWidth);
VecInput = IC.Builder->CreateBitCast(VecInput, VecTy);
}
-
+
return ExtractElementInst::Create(VecInput, IC.Builder->getInt32(0));
}
}
-
+
// bitcast(trunc(lshr(bitcast(somevector), cst))
ConstantInt *ShAmt = 0;
if (match(Src, m_Trunc(m_LShr(m_BitCast(m_Value(VecInput)),
@@ -1629,7 +1629,7 @@ static Instruction *OptimizeIntToFloatBitCast(BitCastInst &CI,InstCombiner &IC){
VecTy->getPrimitiveSizeInBits() / DestWidth);
VecInput = IC.Builder->CreateBitCast(VecInput, VecTy);
}
-
+
unsigned Elt = ShAmt->getZExtValue() / DestWidth;
return ExtractElementInst::Create(VecInput, IC.Builder->getInt32(Elt));
}
@@ -1653,12 +1653,12 @@ Instruction *InstCombiner::visitBitCast(BitCastInst &CI) {
PointerType *SrcPTy = cast<PointerType>(SrcTy);
Type *DstElTy = DstPTy->getElementType();
Type *SrcElTy = SrcPTy->getElementType();
-
+
// If the address spaces don't match, don't eliminate the bitcast, which is
// required for changing types.
if (SrcPTy->getAddressSpace() != DstPTy->getAddressSpace())
return 0;
-
+
// If we are casting a alloca to a pointer to a type of the same
// size, rewrite the allocation instruction to allocate the "right" type.
// There is no need to modify malloc calls because it is their bitcast that
@@ -1666,14 +1666,14 @@ Instruction *InstCombiner::visitBitCast(BitCastInst &CI) {
if (AllocaInst *AI = dyn_cast<AllocaInst>(Src))
if (Instruction *V = PromoteCastOfAllocation(CI, *AI))
return V;
-
+
// If the source and destination are pointers, and this cast is equivalent
// to a getelementptr X, 0, 0, 0... turn it into the appropriate gep.
// This can enhance SROA and other transforms that want type-safe pointers.
Constant *ZeroUInt =
Constant::getNullValue(Type::getInt32Ty(CI.getContext()));
unsigned NumZeros = 0;
- while (SrcElTy != DstElTy &&
+ while (SrcElTy != DstElTy &&
isa<CompositeType>(SrcElTy) && !SrcElTy->isPointerTy() &&
SrcElTy->getNumContainedTypes() /* not "{}" */) {
SrcElTy = cast<CompositeType>(SrcElTy)->getTypeAtIndex(ZeroUInt);
@@ -1686,7 +1686,7 @@ Instruction *InstCombiner::visitBitCast(BitCastInst &CI) {
return GetElementPtrInst::CreateInBounds(Src, Idxs);
}
}
-
+
// Try to optimize int -> float bitcasts.
if ((DestTy->isFloatTy() || DestTy->isDoubleTy()) && isa<IntegerType>(SrcTy))
if (Instruction *I = OptimizeIntToFloatBitCast(CI, *this))
@@ -1699,7 +1699,7 @@ Instruction *InstCombiner::visitBitCast(BitCastInst &CI) {
Constant::getNullValue(Type::getInt32Ty(CI.getContext())));
// FIXME: Canonicalize bitcast(insertelement) -> insertelement(bitcast)
}
-
+
if (isa<IntegerType>(SrcTy)) {
// If this is a cast from an integer to vector, check to see if the input
// is a trunc or zext of a bitcast from vector. If so, we can replace all
@@ -1712,7 +1712,7 @@ Instruction *InstCombiner::visitBitCast(BitCastInst &CI) {
cast<VectorType>(DestTy), *this))
return I;
}
-
+
// If the input is an 'or' instruction, we may be doing shifts and ors to
// assemble the elements of the vector manually. Try to rip the code out
// and replace it with insertelements.
@@ -1723,7 +1723,7 @@ Instruction *InstCombiner::visitBitCast(BitCastInst &CI) {
if (VectorType *SrcVTy = dyn_cast<VectorType>(SrcTy)) {
if (SrcVTy->getNumElements() == 1 && !DestTy->isVectorTy()) {
- Value *Elem =
+ Value *Elem =
Builder->CreateExtractElement(Src,
Constant::getNullValue(Type::getInt32Ty(CI.getContext())));
return CastInst::Create(Instruction::BitCast, Elem, DestTy);
@@ -1733,7 +1733,7 @@ Instruction *InstCombiner::visitBitCast(BitCastInst &CI) {
if (ShuffleVectorInst *SVI = dyn_cast<ShuffleVectorInst>(Src)) {
// Okay, we have (bitcast (shuffle ..)). Check to see if this is
// a bitcast to a vector with the same # elts.
- if (SVI->hasOneUse() && DestTy->isVectorTy() &&
+ if (SVI->hasOneUse() && DestTy->isVectorTy() &&
cast<VectorType>(DestTy)->getNumElements() ==
SVI->getType()->getNumElements() &&
SVI->getType()->getNumElements() ==
@@ -1742,9 +1742,9 @@ Instruction *InstCombiner::visitBitCast(BitCastInst &CI) {
// If either of the operands is a cast from CI.getType(), then
// evaluating the shuffle in the casted destination's type will allow
// us to eliminate at least one cast.
- if (((Tmp = dyn_cast<BitCastInst>(SVI->getOperand(0))) &&
+ if (((Tmp = dyn_cast<BitCastInst>(SVI->getOperand(0))) &&
Tmp->getOperand(0)->getType() == DestTy) ||
- ((Tmp = dyn_cast<BitCastInst>(SVI->getOperand(1))) &&
+ ((Tmp = dyn_cast<BitCastInst>(SVI->getOperand(1))) &&
Tmp->getOperand(0)->getType() == DestTy)) {
Value *LHS = Builder->CreateBitCast(SVI->getOperand(0), DestTy);
Value *RHS = Builder->CreateBitCast(SVI->getOperand(1), DestTy);
@@ -1754,7 +1754,7 @@ Instruction *InstCombiner::visitBitCast(BitCastInst &CI) {
}
}
}
-
+
if (SrcTy->isPointerTy())
return commonPointerCastTransforms(CI);
return commonCastTransforms(CI);
diff --git a/lib/Transforms/InstCombine/InstCombineCompares.cpp b/lib/Transforms/InstCombine/InstCombineCompares.cpp
index e3e5ddae80..057496d210 100644
--- a/lib/Transforms/InstCombine/InstCombineCompares.cpp
+++ b/lib/Transforms/InstCombine/InstCombineCompares.cpp
@@ -371,7 +371,7 @@ FoldCmpLoadFromIndexedGlobal(GetElementPtrInst *GEP, GlobalVariable *GV,
// an inbounds GEP because the index can't be out of range.
if (!GEP->isInBounds() &&
Idx->getType()->getPrimitiveSizeInBits() > TD->getPointerSizeInBits(AS))
- Idx = Builder->CreateTrunc(Idx, TD->getIntPtrType(Idx->getContext()));
+ Idx = Builder->CreateTrunc(Idx, TD->getIntPtrType(Idx->getContext(), AS));
// If the comparison is only true for one or two elements, emit direct
// comparisons.
@@ -539,7 +539,7 @@ static Value *EvaluateGEPOffsetExpression(User *GEP, InstCombiner &IC) {
// we don't need to bother extending: the extension won't affect where the
// computation crosses zero.
if (VariableIdx->getType()->getPrimitiveSizeInBits() > IntPtrWidth) {
- Type *IntPtrTy = TD.getIntPtrType(VariableIdx->getContext());
+ Type *IntPtrTy = TD.getIntPtrType(VariableIdx->getContext(), AS);
VariableIdx = IC.Builder->CreateTrunc(VariableIdx, IntPtrTy);
}
return VariableIdx;
@@ -561,7 +561,7 @@ static Value *EvaluateGEPOffsetExpression(User *GEP, InstCombiner &IC) {
return 0;
// Okay, we can do this evaluation. Start by converting the index to intptr.
- Type *IntPtrTy = TD.getIntPtrType(VariableIdx->getContext());
+ Type *IntPtrTy = TD.getIntPtrType(VariableIdx->getContext(), AS);
if (VariableIdx->getType() != IntPtrTy)
VariableIdx = IC.Builder->CreateIntCast(VariableIdx, IntPtrTy,
true /*Signed*/);
@@ -2251,7 +2251,7 @@ Instruction *InstCombiner::visitICmpInst(ICmpInst &I) {
case Instruction::IntToPtr:
// icmp pred inttoptr(X), null -> icmp pred X, 0
if (RHSC->isNullValue() && TD &&
- TD->getIntPtrType(RHSC->getContext()) ==
+ TD->getIntPtrType(LHSI->getType()) ==
LHSI->getOperand(0)->getType())
return new ICmpInst(I.getPredicate(), LHSI->getOperand(0),
Constant::getNullValue(LHSI->getOperand(0)->getType()));
diff --git a/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp b/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
index 4ab5b6e4a0..633ad93ad9 100644
--- a/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
+++ b/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
@@ -173,7 +173,7 @@ Instruction *InstCombiner::visitAllocaInst(AllocaInst &AI) {
// Ensure that the alloca array size argument has type intptr_t, so that
// any casting is exposed early.
if (TD) {
- Type *IntPtrTy = TD->getIntPtrType(AI.getContext());
+ Type *IntPtrTy = TD->getIntPtrType(AI.getType());
if (AI.getArraySize()->getType() != IntPtrTy) {
Value *V = Builder->CreateIntCast(AI.getArraySize(),
IntPtrTy, false);
@@ -185,7 +185,7 @@ Instruction *InstCombiner::visitAllocaInst(AllocaInst &AI) {
// Convert: alloca Ty, C - where C is a constant != 1 into: alloca [C x Ty], 1
if (AI.isArrayAllocation()) { // Check C != 1
if (const ConstantInt *C = dyn_cast<ConstantInt>(AI.getArraySize())) {
- Type *NewTy =
+ Type *NewTy =
ArrayType::get(AI.getAllocatedType(), C->getZExtValue());
AllocaInst *New = Builder->CreateAlloca(NewTy, 0, AI.getName());
New->setAlignment(AI.getAlignment());
@@ -311,7 +311,7 @@ static Instruction *InstCombineLoadCast(InstCombiner &IC, LoadInst &LI,
Type *SrcPTy = SrcTy->getElementType();
- if (DestPTy->isIntegerTy() || DestPTy->isPointerTy() ||
+ if (DestPTy->isIntegerTy() || DestPTy->isPointerTy() ||
DestPTy->isVectorTy()) {
// If the source is an array, the code below will not succeed. Check to
// see if a trivial 'gep P, 0, 0' will help matters. Only do this for
@@ -328,7 +328,7 @@ static Instruction *InstCombineLoadCast(InstCombiner &IC, LoadInst &LI,
}
if (IC.getDataLayout() &&
- (SrcPTy->isIntegerTy() || SrcPTy->isPointerTy() ||
+ (SrcPTy->isIntegerTy() || SrcPTy->isPointerTy() ||
SrcPTy->isVectorTy()) &&
// Do not allow turning this into a load of an integer, which is then
// casted to a pointer, this pessimizes pointer analysis a lot.
@@ -339,7 +339,7 @@ static Instruction *InstCombineLoadCast(InstCombiner &IC, LoadInst &LI,
// Okay, we are casting from one integer or pointer type to another of
// the same size. Instead of casting the pointer before the load, cast
// the result of the loaded value.
- LoadInst *NewLoad =
+ LoadInst *NewLoad =
IC.Builder->CreateLoad(CastOp, LI.isVolatile(), CI->getName());
NewLoad->setAlignment(LI.getAlignment());
NewLoad->setAtomic(LI.getOrdering(), LI.getSynchScope());
@@ -376,7 +376,7 @@ Instruction *InstCombiner::visitLoadInst(LoadInst &LI) {
// None of the following transforms are legal for volatile/atomic loads.
// FIXME: Some of it is okay for atomic loads; needs refactoring.
if (!LI.isSimple()) return 0;
-
+
// Do really simple store-to-load forwarding and load CSE, to catch cases
// where there are several consecutive memory accesses to the same location,
// separated by a few arithmetic operations.
@@ -397,7 +397,7 @@ Instruction *InstCombiner::visitLoadInst(LoadInst &LI) {
Constant::getNullValue(Op->getType()), &LI);
return ReplaceInstUsesWith(LI, UndefValue::get(LI.getType()));
}
- }
+ }
// load null/undef -> unreachable
// TODO: Consider a target hook for valid address spaces for this xform.
@@ -416,7 +416,7 @@ Instruction *InstCombiner::visitLoadInst(LoadInst &LI) {
if (CE->isCast())
if (Instruction *Res = InstCombineLoadCast(*this, LI, TD))
return Res;
-
+
if (Op->hasOneUse()) {
// Change select and PHI nodes to select values instead of addresses: this
// helps alias analysis out a lot, allows many others simplifications, and
@@ -470,18 +470,18 @@ static Instruction *InstCombineStoreToCast(InstCombiner &IC, StoreInst &SI) {
Type *DestPTy = cast<PointerType>(CI->getType())->getElementType();
PointerType *SrcTy = dyn_cast<PointerType>(CastOp->getType());
if (SrcTy == 0) return 0;
-
+
Type *SrcPTy = SrcTy->getElementType();
if (!DestPTy->isIntegerTy() && !DestPTy->isPointerTy())
return 0;
-
+
/// NewGEPIndices - If SrcPTy is an aggregate type, we can emit a "noop gep"
/// to its first element. This allows us to handle things like:
/// store i32 xxx, (bitcast {foo*, float}* %P to i32*)
/// on 32-bit hosts.
SmallVector<Value*, 4> NewGEPIndices;
-
+
// If the source is an array, the code below will not succeed. Check to
// see if a trivial 'gep P, 0, 0' will help matters. Only do this for
// constants.
@@ -489,7 +489,7 @@ static Instruction *InstCombineStoreToCast(InstCombiner &IC, StoreInst &SI) {
// Index through pointer.
Constant *Zero = Constant::getNullValue(Type::getInt32Ty(SI.getContext()));
NewGEPIndices.push_back(Zero);
-
+
while (1) {
if (StructType *STy = dyn_cast<StructType>(SrcPTy)) {
if (!STy->getNumElements()) /* Struct can be empty {} */
@@ -503,24 +503,23 @@ static Instruction *InstCombineStoreToCast(InstCombiner &IC, StoreInst &SI) {
break;
}
}
-
+
SrcTy = PointerType::get(SrcPTy, SrcTy->getAddressSpace());
}
if (!SrcPTy->isIntegerTy() && !SrcPTy->isPointerTy())
return 0;
-
+
// If the pointers point into different address spaces or if they point to
// values with different sizes, we can't do the transformation.
if (!IC.getDataLayout() ||
- SrcTy->getAddressSpace() !=
- cast<PointerType>(CI->getType())->getAddressSpace() ||
+ SrcTy->getAddressSpace() != CI->getType()->getPointerAddressSpace() ||
IC.getDataLayout()->getTypeSizeInBits(SrcPTy) !=
IC.getDataLayout()->getTypeSizeInBits(DestPTy))
return 0;
// Okay, we are casting from one integer or pointer type to another of
- // the same size. Instead of casting the pointer before
+ // the same size. Instead of casting the pointer before
// the store, cast the value to be stored.
Value *NewCast;
Value *SIOp0 = SI.getOperand(0);
@@ -534,12 +533,12 @@ static Instruction *InstCombineStoreToCast(InstCombiner &IC, StoreInst &SI) {
if (SIOp0->getType()->isPointerTy())
opcode = Instruction::PtrToInt;
}
-
+
// SIOp0 is a pointer to aggregate and this is a store to the first field,
// emit a GEP to index into its first field.
if (!NewGEPIndices.empty())
CastOp = IC.Builder->CreateInBoundsGEP(CastOp, NewGEPIndices);
-
+
NewCast = IC.Builder->CreateCast(opcode, SIOp0, CastDstTy,
SIOp0->getName()+".c");
SI.setOperand(0, NewCast);
@@ -558,7 +557,7 @@ static Instruction *InstCombineStoreToCast(InstCombiner &IC, StoreInst &SI) {
static bool equivalentAddressValues(Value *A, Value *B) {
// Test if the values are trivially equivalent.
if (A == B) return true;
-
+
// Test if the values come form identical arithmetic instructions.
// This uses isIdenticalToWhenDefined instead of isIdenticalTo because
// its only used to compare two uses within the same basic block, which
@@ -571,7 +570,7 @@ static bool equivalentAddressValues(Value *A, Value *B) {
if (Instruction *BI = dyn_cast<Instruction>(B))
if (cast<Instruction>(A)->isIdenticalToWhenDefined(BI))
return true;
-
+
// Otherwise they may not be equivalent.
return false;
}
@@ -602,7 +601,7 @@ Instruction *InstCombiner::visitStoreInst(StoreInst &SI) {
// If the RHS is an alloca with a single use, zapify the store, making the
// alloca dead.
if (Ptr->hasOneUse()) {
- if (isa<AllocaInst>(Ptr))
+ if (isa<AllocaInst>(Ptr))
return EraseInstFromFunction(SI);
if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr)) {
if (isa<AllocaInst>(GEP->getOperand(0))) {
@@ -625,8 +624,8 @@ Instruction *InstCombiner::visitStoreInst(StoreInst &SI) {
(isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy())) {
ScanInsts++;
continue;
- }
-
+ }
+
if (StoreInst *PrevSI = dyn_cast<StoreInst>(BBI)) {
// Prev store isn't volatile, and stores to the same location?
if (PrevSI->isSimple() && equivalentAddressValues(PrevSI->getOperand(1),
@@ -638,7 +637,7 @@ Instruction *InstCombiner::visitStoreInst(StoreInst &SI) {
}
break;
}
-
+
// If this is a load, we have to stop. However, if the loaded value is from
// the pointer we're loading and is producing the pointer we're storing,
// then *this* store is dead (X = load P; store X -> P).
@@ -646,12 +645,12 @@ Instruction *InstCombiner::visitStoreInst(StoreInst &SI) {
if (LI == Val && equivalentAddressValues(LI->getOperand(0), Ptr) &&
LI->isSimple())
return EraseInstFromFunction(SI);
-
+
// Otherwise, this is a load from some other location. Stores before it
// may not be dead.
break;
}
-
+
// Don't skip over loads or things that can modify memory.
if (BBI->mayWriteToMemory() || BBI->mayReadFromMemory())
break;
@@ -681,11 +680,11 @@ Instruction *InstCombiner::visitStoreInst(StoreInst &SI) {
if (Instruction *Res = InstCombineStoreToCast(*this, SI))
return Res;
-
+
// If this store is the last instruction in the basic block (possibly
// excepting debug info instructions), and if the block ends with an
// unconditional branch, try to move it to the successor block.
- BBI = &SI;
+ BBI = &SI;
do {
++BBI;
} while (isa<DbgInfoIntrinsic>(BBI) ||
@@ -694,7 +693,7 @@ Instruction *InstCombiner::visitStoreInst(StoreInst &SI) {
if (BI->isUnconditional())
if (SimplifyStoreAtEndOfBlock(SI))
return 0; // xform done!
-
+
return 0;
}
@@ -708,12 +707,12 @@ Instruction *InstCombiner::visitStoreInst(StoreInst &SI) {
///
bool InstCombiner::SimplifyStoreAtEndOfBlock(StoreInst &SI) {
BasicBlock *StoreBB = SI.getParent();
-
+
// Check to see if the successor block has exactly two incoming edges. If
// so, see if the other predecessor contains a store to the same location.
// if so, insert a PHI node (if needed) and move the stores down.
BasicBlock *DestBB = StoreBB->getTerminator()->getSuccessor(0);
-
+
// Determine whether Dest has exactly two predecessors and, if so, compute
// the other predecessor.
pred_iterator PI = pred_begin(DestBB);
@@ -725,7 +724,7 @@ bool InstCombiner::SimplifyStoreAtEndOfBlock(StoreInst &SI) {
if (++PI == pred_end(DestBB))
return false;
-
+
P = *PI;
if (P != StoreBB) {
if (OtherBB)
@@ -745,7 +744,7 @@ bool InstCombiner::SimplifyStoreAtEndOfBlock(StoreInst &SI) {
BranchInst *OtherBr = dyn_cast<BranchInst>(BBI);
if (!OtherBr || BBI == OtherBB->begin())
return false;
-
+
// If the other block ends in an unconditional branch, check for the 'if then
// else' case. there is an instruction before the branch.
StoreInst *OtherStore = 0;
@@ -767,10 +766,10 @@ bool InstCombiner::SimplifyStoreAtEndOfBlock(StoreInst &SI) {
} else {
// Otherwise, the other block ended with a conditional branch. If one of the
// destinations is StoreBB, then we have the if/then case.
- if (OtherBr->getSuccessor(0) != StoreBB &&
+ if (OtherBr->getSuccessor(0) != StoreBB &&
OtherBr->getSuccessor(1) != StoreBB)
return false;
-
+
// Okay, we know that OtherBr now goes to Dest and StoreBB, so this is an
// if/then triangle. See if there is a store to the same ptr as SI that
// lives in OtherBB.
@@ -788,7 +787,7 @@ bool InstCombiner::SimplifyStoreAtEndOfBlock(StoreInst &SI) {
BBI == OtherBB->begin())
return false;
}
-
+
// In order to eliminate the store in OtherBr, we have to
// make sure nothing reads or overwrites the stored value in
// StoreBB.
@@ -798,7 +797,7 @@ bool InstCombiner::SimplifyStoreAtEndOfBlock(StoreInst &SI) {
return false;
}
}
-
+
// Insert a PHI node now if we need it.
Value *MergedVal = OtherStore->getOperand(0);
if (MergedVal != SI.getOperand(0)) {
@@ -807,7 +806,7 @@ bool InstCombiner::SimplifyStoreAtEndOfBlock(StoreInst &SI) {
PN->addIncoming(OtherStore->getOperand(0), OtherBB);
MergedVal = InsertNewInstBefore(PN, DestBB->front());
}
-
+
// Advance to a place where it is safe to insert the new store and
// insert it.
BBI = DestBB->getFirstInsertionPt();
@@ -817,7 +816,7 @@ bool InstCombiner::SimplifyStoreAtEndOfBlock(StoreInst &SI) {
SI.getOrdering(),
SI.getSynchScope());
InsertNewInstBefore(NewSI, *BBI);
- NewSI->setDebugLoc(OtherStore->getDebugLoc());
+ NewSI->setDebugLoc(OtherStore->getDebugLoc());
// Nuke the old stores.
EraseInstFromFunction(SI);
diff --git a/lib/Transforms/InstCombine/InstructionCombining.cpp b/lib/Transforms/InstCombine/InstructionCombining.cpp
index 7f8c3ae558..00b7fca681 100644
--- a/lib/Transforms/InstCombine/InstructionCombining.cpp
+++ b/lib/Transforms/InstCombine/InstructionCombining.cpp
@@ -738,7 +738,7 @@ Instruction *InstCombiner::FoldOpIntoPhi(Instruction &I) {
/// or not there is a sequence of GEP indices into the type that will land us at
/// the specified offset. If so, fill them into NewIndices and return the
/// resultant element type, otherwise return null.
-Type *InstCombiner::FindElementAtOffset(Type *Ty, int64_t Offset,
+Type *InstCombiner::FindElementAtOffset(Type *Ty, int64_t Offset, Type *IntPtrTy,
SmallVectorImpl<Value*> &NewIndices) {
if (!TD) return 0;
if (!Ty->isSized()) return 0;
@@ -746,7 +746,6 @@ Type *InstCombiner::FindElementAtOffset(Type *Ty, int64_t Offset,
// Start with the index over the outer type. Note that the type size
// might be zero (even if the offset isn't zero) if the indexed type
// is something like [0 x {int, int}]
- Type *IntPtrTy = TD->getIntPtrType(Ty->getContext());
int64_t FirstIdx = 0;
if (int64_t TySize = TD->getTypeAllocSize(Ty)) {
FirstIdx = Offset/TySize;
@@ -1055,7 +1054,7 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
// by multiples of a zero size type with zero.
if (TD) {
bool MadeChange = false;
- Type *IntPtrTy = TD->getIntPtrType(GEP.getContext());
+ Type *IntPtrTy = TD->getIntPtrType(PtrOp->getType());
gep_type_iterator GTI = gep_type_begin(GEP);
for (User::op_iterator I = GEP.op_begin() + 1, E = GEP.op_end();
@@ -1240,7 +1239,7 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
// Earlier transforms ensure that the index has type IntPtrType, which
// considerably simplifies the logic by eliminating implicit casts.
- assert(Idx->getType() == TD->getIntPtrType(GEP.getContext()) &&
+ assert(Idx->getType() == TD->getIntPtrType(GEP.getType()) &&
"Index not cast to pointer width?");
bool NSW;
@@ -1275,7 +1274,7 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
// Earlier transforms ensure that the index has type IntPtrType, which
// considerably simplifies the logic by eliminating implicit casts.
- assert(Idx->getType() == TD->getIntPtrType(GEP.getContext()) &&
+ assert(Idx->getType() == TD->getIntPtrType(GEP.getType()) &&
"Index not cast to pointer width?");
bool NSW;
@@ -1337,7 +1336,8 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
SmallVector<Value*, 8> NewIndices;
Type *InTy =
cast<PointerType>(BCI->getOperand(0)->getType())->getElementType();
- if (FindElementAtOffset(InTy, Offset, NewIndices)) {
+ Type *IntPtrTy = TD->getIntPtrType(BCI->getOperand(0)->getType());
+ if (FindElementAtOffset(InTy, Offset, IntPtrTy, NewIndices)) {
Value *NGEP = GEP.isInBounds() ?
Builder->CreateInBoundsGEP(BCI->getOperand(0), NewIndices) :
Builder->CreateGEP(BCI->getOperand(0), NewIndices);
diff --git a/lib/Transforms/Instrumentation/BoundsChecking.cpp b/lib/Transforms/Instrumentation/BoundsChecking.cpp
index 976b963046..dd36a00070 100644
--- a/lib/Transforms/Instrumentation/BoundsChecking.cpp
+++ b/lib/Transforms/Instrumentation/BoundsChecking.cpp
@@ -143,7 +143,7 @@ bool BoundsChecking::instrument(Value *Ptr, Value *InstVal) {
Value *Offset = SizeOffset.second;
ConstantInt *SizeCI = dyn_cast<ConstantInt>(Size);
- IntegerType *IntTy = TD->getIntPtrType(Inst->getContext());
+ IntegerType *IntTy = TD->getIntPtrType(Ptr->getType());
Value *NeededSizeVal = ConstantInt::get(IntTy, NeededSize);
// three checks are required to ensure safety:
diff --git a/lib/Transforms/Scalar/CodeGenPrepare.cpp b/lib/Transforms/Scalar/CodeGenPrepare.cpp
index 2b42c75d14..74e310f7e7 100644
--- a/lib/Transforms/Scalar/CodeGenPrepare.cpp
+++ b/lib/Transforms/Scalar/CodeGenPrepare.cpp
@@ -935,7 +935,7 @@ bool CodeGenPrepare::OptimizeMemoryInst(Instruction *MemoryInst, Value *Addr,
DEBUG(dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrMode << " for "
<< *MemoryInst);
Type *IntPtrTy =
- TLI->getDataLayout()->getIntPtrType(AccessTy->getContext());
+ TLI->getDataLayout()->getIntPtrType(Addr->getType());
Value *Result = 0;
diff --git a/lib/Transforms/Scalar/GVN.cpp b/lib/Transforms/Scalar/GVN.cpp
index eb0da20abb..9271388baa 100644
--- a/lib/Transforms/Scalar/GVN.cpp
+++ b/lib/Transforms/Scalar/GVN.cpp
@@ -774,13 +774,13 @@ static Value *CoerceAvailableValueToLoadType(Value *StoredVal,
// Convert source pointers to integers, which can be bitcast.
if (StoredValTy->isPointerTy()) {
- StoredValTy = TD.getIntPtrType(StoredValTy->getContext());
+ StoredValTy = TD.getIntPtrType(StoredValTy);
StoredVal = new PtrToIntInst(StoredVal, StoredValTy, "", InsertPt);
}
Type *TypeToCastTo = LoadedTy;
if (TypeToCastTo->isPointerTy())
- TypeToCastTo = TD.getIntPtrType(StoredValTy->getContext());
+ TypeToCastTo = TD.getIntPtrType(StoredValTy);
if (StoredValTy != TypeToCastTo)
StoredVal = new BitCastInst(StoredVal, TypeToCastTo, "", InsertPt);
@@ -799,7 +799,7 @@ static Value *CoerceAvailableValueToLoadType(Value *StoredVal,
// Convert source pointers to integers, which can be manipulated.
if (StoredValTy->isPointerTy()) {
- StoredValTy = TD.getIntPtrType(StoredValTy->getContext());
+ StoredValTy = TD.getIntPtrType(StoredValTy);
StoredVal = new PtrToIntInst(StoredVal, StoredValTy, "", InsertPt);
}
@@ -1020,7 +1020,8 @@ static Value *GetStoreValueForLoad(Value *SrcVal, unsigned Offset,
// Compute which bits of the stored value are being used by the load. Convert
// to an integer type to start with.
if (SrcVal->getType()->isPointerTy())
- SrcVal = Builder.CreatePtrToInt(SrcVal, TD.getIntPtrType(Ctx));
+ SrcVal = Builder.CreatePtrToInt(SrcVal,
+ TD.getIntPtrType(SrcVal->getType()));
if (!SrcVal->getType()->isIntegerTy())
SrcVal = Builder.CreateBitCast(SrcVal, IntegerType::get(Ctx, StoreSize*8));
diff --git a/lib/Transforms/Scalar/IndVarSimplify.cpp b/lib/Transforms/Scalar/IndVarSimplify.cpp
index 82eb746467..8a2f093629 100644
--- a/lib/Transforms/Scalar/IndVarSimplify.cpp
+++ b/lib/Transforms/Scalar/IndVarSimplify.cpp
@@ -1430,7 +1430,8 @@ FindLoopCounter(Loop *L, const SCEV *BECount,
/// genLoopLimit - Help LinearFunctionTestReplace by generating a value that
/// holds the RHS of the new loop test.
static Value *genLoopLimit(PHINode *IndVar, const SCEV *IVCount, Loop *L,
- SCEVExpander &Rewriter, ScalarEvolution *SE) {
+ SCEVExpander &Rewriter, ScalarEvolution *SE,
+ Type *IntPtrTy) {
const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(SE->getSCEV(IndVar));
assert(AR && AR->getLoop() == L && AR->isAffine() && "bad loop counter");
const SCEV *IVInit = AR->getStart();
@@ -1456,7 +1457,8 @@ static Value *genLoopLimit(PHINode *IndVar, const SCEV *IVCount, Loop *L,
// We could handle pointer IVs other than i8*, but we need to compensate for
// gep index scaling. See canExpandBackedgeTakenCount comments.
assert(SE->getSizeOfExpr(
- cast<PointerType>(GEPBase->getType())->getElementType())->isOne()
+ cast<PointerType>(GEPBase->getType())->getElementType(),
+ IntPtrTy)->isOne()
&& "unit stride pointer IV must be i8*");
IRBuilder<> Builder(L->getLoopPreheader()->getTerminator());
@@ -1555,7 +1557,9 @@ LinearFunctionTestReplace(Loop *L,
CmpIndVar = IndVar;
}
- Value *ExitCnt = genLoopLimit(IndVar, IVCount, L, Rewriter, SE);
+ Type *IntPtrTy = TD ? TD->getIntPtrType(IndVar->getType()) :
+ IntegerType::getInt64Ty(IndVar->getContext());
+ Value *ExitCnt = genLoopLimit(IndVar, IVCount, L, Rewriter, SE, IntPtrTy);
assert(ExitCnt->getType()->isPointerTy() == IndVar->getType()->isPointerTy()
&& "genLoopLimit missed a cast");
diff --git a/lib/Transforms/Scalar/LoopIdiomRecognize.cpp b/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
index a44e798f12..e4b40f3d3a 100644
--- a/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
+++ b/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
@@ -486,7 +486,9 @@ processLoopStridedStore(Value *DestPtr, unsigned StoreSize,
// would be unsafe to do if there is anything else in the loop that may read
// or write to the aliased location. Check for any overlap by generating the
// base pointer and checking the region.
- unsigned AddrSpace = cast<PointerType>(DestPtr->getType())->getAddressSpace();
+ assert(DestPtr->getType()->isPointerTy()
+ && "Must be a pointer type.");
+ unsigned AddrSpace = DestPtr->getType()->getPointerAddressSpace();
Value *BasePtr =
Expander.expandCodeFor(Ev->getStart(), Builder.getInt8PtrTy(AddrSpace),
Preheader->getTerminator());
@@ -505,7 +507,7 @@ processLoopStridedStore(Value *DestPtr, unsigned StoreSize,
// The # stored bytes is (BECount+1)*Size. Expand the trip count out to
// pointer size if it isn't already.
- Type *IntPtr = TD->getIntPtrType(DestPtr->getContext());
+ Type *IntPtr = TD->getIntPtrType(DestPtr->getType());
BECount = SE->getTruncateOrZeroExtend(BECount, IntPtr);
const SCEV *NumBytesS = SE->getAddExpr(BECount, SE->getConstant(IntPtr, 1),
@@ -611,7 +613,7 @@ processLoopStoreOfLoopLoad(StoreInst *SI, unsigned StoreSize,
// The # stored bytes is (BECount+1)*Size. Expand the trip count out to
// pointer size if it isn't already.
- Type *IntPtr = TD->getIntPtrType(SI->getContext());
+ Type *IntPtr = TD->getIntPtrType(SI->getType());
BECount = SE->getTruncateOrZeroExtend(BECount, IntPtr);
const SCEV *NumBytesS = SE->getAddExpr(BECount, SE->getConstant(IntPtr, 1),
diff --git a/lib/Transforms/Scalar/ScalarReplAggregates.cpp b/lib/Transforms/Scalar/ScalarReplAggregates.cpp
index a46d09c320..a5446294e3 100644
--- a/lib/Transforms/Scalar/ScalarReplAggregates.cpp
+++ b/lib/Transforms/Scalar/ScalarReplAggregates.cpp
@@ -963,7 +963,7 @@ ConvertScalar_InsertValue(Value *SV, Value *Old,
if (SV->getType()->isFloatingPointTy() || SV->getType()->isVectorTy())
SV = Builder.CreateBitCast(SV, IntegerType::get(SV->getContext(),SrcWidth));
else if (SV->getType()->isPointerTy())
- SV = Builder.CreatePtrToInt(SV, TD.getIntPtrType(SV->getContext()));
+ SV = Builder.CreatePtrToInt(SV, TD.getIntPtrType(SV->getType()));
// Zero extend or truncate the value if needed.
if (SV->getType() != AllocaType) {
diff --git a/lib/Transforms/Scalar/SimplifyLibCalls.cpp b/lib/Transforms/Scalar/SimplifyLibCalls.cpp
index c82a00fc2c..f3448bcd87 100644
--- a/lib/Transforms/Scalar/SimplifyLibCalls.cpp
+++ b/lib/Transforms/Scalar/SimplifyLibCalls.cpp
@@ -165,9 +165,10 @@ struct StpCpyOpt: public LibCallOptimization {
uint64_t Len = GetStringLength(Src);
if (Len == 0) return 0;
- Value *LenV = ConstantInt::get(TD->getIntPtrType(*Context), Len);
+ Type *PT = FT->getParamType(0);
+ Value *LenV = ConstantInt::get(TD->getIntPtrType(PT), Len);
Value *DstEnd = B.CreateGEP(Dst,
- ConstantInt::get(TD->getIntPtrType(*Context),
+ ConstantInt::get(TD->getIntPtrType(PT),
Len - 1));
// We have enough information to now generate the memcpy call to do the
@@ -220,9 +221,10 @@ struct StrNCpyOpt : public LibCallOptimization {
// Let strncpy handle the zero padding
if (Len > SrcLen+1) return 0;
+ Type *PT = FT->getParamType(0);
// strncpy(x, s, c) -> memcpy(x, s, c, 1) [s and c are constant]
B.CreateMemCpy(Dst, Src,
- ConstantInt::get(TD->getIntPtrType(*Context), Len), 1);
+ ConstantInt::get(TD->getIntPtrType(PT), Len), 1);
return Dst;
}
@@ -508,10 +510,11 @@ struct MemCpyOpt : public LibCallOptimization {
if (!TD) return 0;
FunctionType *FT = Callee->getFunctionType();
+ Type *PT = FT->getParamType(0);
if (FT->getNumParams() != 3 || FT->getReturnType() != FT->getParamType(0) ||
!FT->getParamType(0)->isPointerTy() ||
!FT->getParamType(1)->isPointerTy() ||
- FT->getParamType(2) != TD->getIntPtrType(*Context))
+ FT->getParamType(2) != TD->getIntPtrType(PT))
return 0;
// memcpy(x, y, n) -> llvm.memcpy(x, y, n, 1)
@@ -530,10 +533,11 @@ struct MemMoveOpt : public LibCallOptimization {
if (!TD) return 0;
FunctionType *FT = Callee->getFunctionType();
+ Type *PT = FT->getParamType(0);
if (FT->getNumParams() != 3 || FT->getReturnType() != FT->getParamType(0) ||
!FT->getParamType(0)->isPointerTy() ||
!FT->getParamType(1)->isPointerTy() ||
- FT->getParamType(2) != TD->getIntPtrType(*Context))
+ FT->getParamType(2) != TD->getIntPtrType(PT))
return 0;
// memmove(x, y, n) -> llvm.memmove(x, y, n, 1)
@@ -552,10 +556,11 @@ struct MemSetOpt : public LibCallOptimization {
if (!TD) return 0;
FunctionType *FT = Callee->getFunctionType();
+ Type *PT = FT->getParamType(0);
if (FT->getNumParams() != 3 || FT->getReturnType() != FT->getParamType(0) ||
!FT->getParamType(0)->isPointerTy() ||
!FT->getParamType(1)->isIntegerTy() ||
- FT->getParamType(2) != TD->getIntPtrType(*Context))
+ FT->getParamType(2) != TD->getIntPtrType(PT))
return 0;
// memset(p, v, n) -> llvm.memset(p, v, n, 1)
@@ -980,8 +985,9 @@ struct SPrintFOpt : public LibCallOptimization {
if (!TD) return 0;
// sprintf(str, fmt) -> llvm.memcpy(str, fmt, strlen(fmt)+1, 1)
+ Type *AT = CI->getArgOperand(0)->getType();
B.CreateMemCpy(CI->getArgOperand(0), CI->getArgOperand(1),
- ConstantInt::get(TD->getIntPtrType(*Context), // Copy the
+ ConstantInt::get(TD->getIntPtrType(AT), // Copy the
FormatStr.size() + 1), 1); // nul byte.
return ConstantInt::get(CI->getType(), FormatStr.size());
}
@@ -1108,8 +1114,9 @@ struct FPutsOpt : public LibCallOptimization {
uint64_t Len = GetStringLength(CI->getArgOperand(0));
if (!Len) return 0;
// Known to have no uses (see above).
+ Type *PT = FT->getParamType(0);
return EmitFWrite(CI->getArgOperand(0),
- ConstantInt::get(TD->getIntPtrType(*Context), Len-1),
+ ConstantInt::get(TD->getIntPtrType(PT), Len-1),
CI->getArgOperand(1), B, TD, TLI);
}
};
@@ -1134,8 +1141,9 @@ struct FPrintFOpt : public LibCallOptimization {
// These optimizations require DataLayout.
if (!TD) return 0;
+ Type *AT = CI->getArgOperand(1)->getType();
Value *NewCI = EmitFWrite(CI->getArgOperand(1),
- ConstantInt::get(TD->getIntPtrType(*Context),
+ ConstantInt::get(TD->getIntPtrType(AT),
FormatStr.size()),
CI->getArgOperand(0), B, TD, TLI);
return NewCI ? ConstantInt::get(CI->getType(), FormatStr.size()) : 0;
diff --git a/lib/Transforms/Utils/BuildLibCalls.cpp b/lib/Transforms/Utils/BuildLibCalls.cpp
index fa2faa2dad..bd28f10654 100644
--- a/lib/Transforms/Utils/BuildLibCalls.cpp
+++ b/lib/Transforms/Utils/BuildLibCalls.cpp
@@ -46,9 +46,8 @@ Value *llvm::EmitStrLen(Value *Ptr, IRBuilder<> &B, const DataLayout *TD,
AWI[1] = AttributeWithIndex::get(M->getContext(), AttrListPtr::FunctionIndex,
ArrayRef<Attributes::AttrVal>(AVs, 2));
- LLVMContext &Context = B.GetInsertBlock()->getContext();
Constant *StrLen = M->getOrInsertFunction("strlen", AttrListPtr::get(AWI),
- TD->getIntPtrType(Context),
+ TD->getIntPtrType(Ptr->getType()),
B.getInt8PtrTy(),
NULL);
CallInst *CI = B.CreateCall(StrLen, CastToCStr(Ptr, B), "strlen");
@@ -73,11 +72,10 @@ Value *llvm::EmitStrNLen(Value *Ptr, Value *MaxLen, IRBuilder<> &B,
AWI[1] = AttributeWithIndex::get(M->getContext(), AttrListPtr::FunctionIndex,
ArrayRef<Attributes::AttrVal>(AVs, 2));
- LLVMContext &Context = B.GetInsertBlock()->getContext();
Constant *StrNLen = M->getOrInsertFunction("strnlen", AttrListPtr::get(AWI),
- TD->getIntPtrType(Context),
+ TD->getIntPtrType(Ptr->getType()),
B.getInt8PtrTy(),
- TD->getIntPtrType(Context),
+ TD->getIntPtrType(Ptr->getType()),
NULL);
CallInst *CI = B.CreateCall2(StrNLen, CastToCStr(Ptr, B), MaxLen, "strnlen");
if (const Function *F = dyn_cast<Function>(StrNLen->stripPointerCasts()))
@@ -126,12 +124,12 @@ Value *llvm::EmitStrNCmp(Value *Ptr1, Value *Ptr2, Value *Len,
AWI[2] = AttributeWithIndex::get(M->getContext(), AttrListPtr::FunctionIndex,
ArrayRef<Attributes::AttrVal>(AVs, 2));
- LLVMContext &Context = B.GetInsertBlock()->getContext();
Value *StrNCmp = M->getOrInsertFunction("strncmp", AttrListPtr::get(AWI),
B.getInt32Ty(),
B.getInt8PtrTy(),
B.getInt8PtrTy(),
- TD->getIntPtrType(Context), NULL);
+ TD->getIntPtrType(Ptr1->getType()),
+ NULL);
CallInst *CI = B.CreateCall3(StrNCmp, CastToCStr(Ptr1, B),
CastToCStr(Ptr2, B), Len, "strncmp");
@@ -201,14 +199,14 @@ Value *llvm::EmitMemCpyChk(Value *Dst, Value *Src, Value *Len, Value *ObjSize,
AttributeWithIndex AWI;
AWI = AttributeWithIndex::get(M->getContext(), AttrListPtr::FunctionIndex,
Attributes::NoUnwind);
- LLVMContext &Context = B.GetInsertBlock()->getContext();
Value *MemCpy = M->getOrInsertFunction("__memcpy_chk",
AttrListPtr::get(AWI),
B.getInt8PtrTy(),
B.getInt8PtrTy(),
B.getInt8PtrTy(),
- TD->getIntPtrType(Context),
- TD->getIntPtrType(Context), NULL);
+ TD->getIntPtrType(Dst->getType()),
+ TD->getIntPtrType(Src->getType()),
+ NULL);
Dst = CastToCStr(Dst, B);
Src = CastToCStr(Src, B);
CallInst *CI = B.CreateCall4(MemCpy, Dst, Src, Len, ObjSize);
@@ -230,12 +228,11 @@ Value *llvm::EmitMemChr(Value *Ptr, Value *Val,
Attributes::AttrVal AVs[2] = { Attributes::ReadOnly, Attributes::NoUnwind };
AWI = AttributeWithIndex::get(M->getContext(), AttrListPtr::FunctionIndex,
ArrayRef<Attributes::AttrVal>(AVs, 2));
- LLVMContext &Context = B.GetInsertBlock()->getContext();
Value *MemChr = M->getOrInsertFunction("memchr", AttrListPtr::get(AWI),
B.getInt8PtrTy(),
B.getInt8PtrTy(),
B.getInt32Ty(),
- TD->getIntPtrType(Context),
+ TD->getIntPtrType(Ptr->getType()),
NULL);
CallInst *CI = B.CreateCall3(MemChr, CastToCStr(Ptr, B), Val, Len, "memchr");
@@ -260,12 +257,12 @@ Value *llvm::EmitMemCmp(Value *Ptr1, Value *Ptr2,
AWI[2] = AttributeWithIndex::get(M->getContext(), AttrListPtr::FunctionIndex,
ArrayRef<Attributes::AttrVal>(AVs, 2));
- LLVMContext &Context = B.GetInsertBlock()->getContext();
Value *MemCmp = M->getOrInsertFunction("memcmp", AttrListPtr::get(AWI),
B.getInt32Ty(),
B.getInt8PtrTy(),
B.getInt8PtrTy(),
- TD->getIntPtrType(Context), NULL);
+ TD->getIntPtrType(Ptr1->getType()),
+ NULL);
CallInst *CI = B.CreateCall3(MemCmp, CastToCStr(Ptr1, B), CastToCStr(Ptr2, B),
Len, "memcmp");
@@ -425,24 +422,24 @@ Value *llvm::EmitFWrite(Value *Ptr, Value *Size, Value *File,
AWI[1] = AttributeWithIndex::get(M->getContext(), 4, Attributes::NoCapture);
AWI[2] = AttributeWithIndex::get(M->getContext(), AttrListPtr::FunctionIndex,
Attributes::NoUnwind);
- LLVMContext &Context = B.GetInsertBlock()->getContext();
StringRef FWriteName = TLI->getName(LibFunc::fwrite);
Constant *F;
+ Type *PtrTy = Ptr->getType();
if (File->getType()->isPointerTy())
F = M->getOrInsertFunction(FWriteName, AttrListPtr::get(AWI),
- TD->getIntPtrType(Context),
+ TD->getIntPtrType(PtrTy),
B.getInt8PtrTy(),
- TD->getIntPtrType(Context),
- TD->getIntPtrType(Context),
+ TD->getIntPtrType(PtrTy),
+ TD->getIntPtrType(PtrTy),
File->getType(), NULL);
else
- F = M->getOrInsertFunction(FWriteName, TD->getIntPtrType(Context),
+ F = M->getOrInsertFunction(FWriteName, TD->getIntPtrType(PtrTy),
B.getInt8PtrTy(),
- TD->getIntPtrType(Context),
- TD->getIntPtrType(Context),
+ TD->getIntPtrType(PtrTy),
+ TD->getIntPtrType(PtrTy),
File->getType(), NULL);
CallInst *CI = B.CreateCall4(F, CastToCStr(Ptr, B), Size,
- ConstantInt::get(TD->getIntPtrType(Context), 1), File);
+ ConstantInt::get(TD->getIntPtrType(PtrTy), 1), File);
if (const Function *Fn = dyn_cast<Function>(F->stripPointerCasts()))
CI->setCallingConv(Fn->getCallingConv());
@@ -464,12 +461,13 @@ bool SimplifyFortifiedLibCalls::fold(CallInst *CI, const DataLayout *TD,
IRBuilder<> B(CI);
if (Name == "__memcpy_chk") {
+ Type *PT = FT->getParamType(0);
// Check if this has the right signature.
if (FT->getNumParams() != 4 || FT->getReturnType() != FT->getParamType(0) ||
!FT->getParamType(0)->isPointerTy() ||
!FT->getParamType(1)->isPointerTy() ||
- FT->getParamType(2) != TD->getIntPtrType(Context) ||
- FT->getParamType(3) != TD->getIntPtrType(Context))
+ FT->getParamType(2) != TD->getIntPtrType(PT) ||
+ FT->getParamType(3) != TD->getIntPtrType(PT))
return false;
if (isFoldable(3, 2, false)) {
@@ -488,11 +486,12 @@ bool SimplifyFortifiedLibCalls::fold(CallInst *CI, const DataLayout *TD,
if (Name == "__memmove_chk") {
// Check if this has the right signature.
+ Type *PT = FT->getParamType(0);
if (FT->getNumParams() != 4 || FT->getReturnType() != FT->getParamType(0) ||
!FT->getParamType(0)->isPointerTy() ||
!FT->getParamType(1)->isPointerTy() ||
- FT->getParamType(2) != TD->getIntPtrType(Context) ||
- FT->getParamType(3) != TD->getIntPtrType(Context))
+ FT->getParamType(2) != TD->getIntPtrType(PT) ||
+ FT->getParamType(3) != TD->getIntPtrType(PT))
return false;
if (isFoldable(3, 2, false)) {
@@ -506,11 +505,12 @@ bool SimplifyFortifiedLibCalls::fold(CallInst *CI, const DataLayout *TD,
if (Name == "__memset_chk") {
// Check if this has the right signature.
+ Type *PT = FT->getParamType(0);
if (FT->getNumParams() != 4 || FT->getReturnType() != FT->getParamType(0) ||
!FT->getParamType(0)->isPointerTy() ||
!FT->getParamType(1)->isIntegerTy() ||
- FT->getParamType(2) != TD->getIntPtrType(Context) ||
- FT->getParamType(3) != TD->getIntPtrType(Context))
+ FT->getParamType(2) != TD->getIntPtrType(PT) ||
+ FT->getParamType(3) != TD->getIntPtrType(PT))
return false;
if (isFoldable(3, 2, false)) {
@@ -525,11 +525,12 @@ bool SimplifyFortifiedLibCalls::fold(CallInst *CI, const DataLayout *TD,
if (Name == "__strcpy_chk" || Name == "__stpcpy_chk") {
// Check if this has the right signature.
+ Type *PT = FT->getParamType(0);
if (FT->getNumParams() != 3 ||
FT->getReturnType() != FT->getParamType(0) ||
FT->getParamType(0) != FT->getParamType(1) ||
FT->getParamType(0) != Type::getInt8PtrTy(Context) ||
- FT->getParamType(2) != TD->getIntPtrType(Context))
+ FT->getParamType(2) != TD->getIntPtrType(PT))
return 0;
@@ -551,11 +552,12 @@ bool SimplifyFortifiedLibCalls::fold(CallInst *CI, const DataLayout *TD,
if (Name == "__strncpy_chk" || Name == "__stpncpy_chk") {
// Check if this has the right signature.
+ Type *PT = FT->getParamType(0);
if (FT->getNumParams() != 4 || FT->getReturnType() != FT->getParamType(0) ||
FT->getParamType(0) != FT->getParamType(1) ||
FT->getParamType(0) != Type::getInt8PtrTy(Context) ||
!FT->getParamType(2)->isIntegerTy() ||
- FT->getParamType(3) != TD->getIntPtrType(Context))
+ FT->getParamType(3) != TD->getIntPtrType(PT))
return false;
if (isFoldable(3, 2, false)) {
diff --git a/lib/Transforms/Utils/SimplifyCFG.cpp b/lib/Transforms/Utils/SimplifyCFG.cpp
index a008da67e9..870e2b2ade 100644
--- a/lib/Transforms/Utils/SimplifyCFG.cpp
+++ b/lib/Transforms/Utils/SimplifyCFG.cpp
@@ -392,7 +392,7 @@ static ConstantInt *GetConstantInt(Value *V, const DataLayout *TD) {
// This is some kind of pointer constant. Turn it into a pointer-sized
// ConstantInt if possible.
- IntegerType *PtrTy = TD->getIntPtrType(V->getContext());
+ IntegerType *PtrTy = TD->getIntPtrType(V->getType());
// Null pointer means 0, see SelectionDAGBuilder::getValue(const Value*).
if (isa<ConstantPointerNull>(V))
@@ -532,9 +532,13 @@ Value *SimplifyCFGOpt::isValueEqualityComparison(TerminatorInst *TI) {
CV = ICI->getOperand(0);
// Unwrap any lossless ptrtoint cast.
- if (TD && CV && CV->getType() == TD->getIntPtrType(CV->getContext()))
- if (PtrToIntInst *PTII = dyn_cast<PtrToIntInst>(CV))
+ if (TD && CV) {
+ PtrToIntInst *PTII = NULL;
+ if ((PTII = dyn_cast<PtrToIntInst>(CV)) &&
+ CV->getType() == TD->getIntPtrType(CV->getContext(),
+ PTII->getPointerAddressSpace()))
CV = PTII->getOperand(0);
+ }
return CV;
}
@@ -981,7 +985,7 @@ bool SimplifyCFGOpt::FoldValueComparisonIntoPredecessors(TerminatorInst *TI,
// Convert pointer to int before we switch.
if (CV->getType()->isPointerTy()) {
assert(TD && "Cannot switch on pointer without DataLayout");
- CV = Builder.CreatePtrToInt(CV, TD->getIntPtrType(CV->getContext()),
+ CV = Builder.CreatePtrToInt(CV, TD->getIntPtrType(CV->getType()),
"magicptr");
}
@@ -2709,7 +2713,7 @@ static bool SimplifyBranchOnICmpChain(BranchInst *BI, const DataLayout *TD,
if (CompVal->getType()->isPointerTy()) {
assert(TD && "Cannot switch on pointer without DataLayout");
CompVal = Builder.CreatePtrToInt(CompVal,
- TD->getIntPtrType(CompVal->getContext()),
+ TD->getIntPtrType(CompVal->getType()),
"magicptr");
}
diff --git a/lib/Transforms/Utils/SimplifyLibCalls.cpp b/lib/Transforms/Utils/SimplifyLibCalls.cpp
index b15acdff63..162b29e829 100644
--- a/lib/Transforms/Utils/SimplifyLibCalls.cpp
+++ b/lib/Transforms/Utils/SimplifyLibCalls.cpp
@@ -102,14 +102,13 @@ struct MemCpyChkOpt : public InstFortifiedLibCallOptimization {
virtual Value *callOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
this->CI = CI;
FunctionType *FT = Callee->getFunctionType();
- LLVMContext &Context = CI->getParent()->getContext();
// Check if this has the right signature.
if (FT->getNumParams() != 4 || FT->getReturnType() != FT->getParamType(0) ||
!FT->getParamType(0)->isPointerTy() ||
!FT->getParamType(1)->isPointerTy() ||
- FT->getParamType(2) != TD->getIntPtrType(Context) ||
- FT->getParamType(3) != TD->getIntPtrType(Context))
+ FT->getParamType(2) != TD->getIntPtrType(FT->getParamType(0)) ||
+ FT->getParamType(3) != TD->getIntPtrType(FT->getParamType(1)))
return 0;
if (isFoldable(3, 2, false)) {
@@ -125,14 +124,13 @@ struct MemMoveChkOpt : public InstFortifiedLibCallOptimization {
virtual Value *callOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
this->CI = CI;
FunctionType *FT = Callee->getFunctionType();
- LLVMContext &Context = CI->getParent()->getContext();
// Check if this has the right signature.
if (FT->getNumParams() != 4 || FT->getReturnType() != FT->getParamType(0) ||
!FT->getParamType(0)->isPointerTy() ||
!FT->getParamType(1)->isPointerTy() ||
- FT->getParamType(2) != TD->getIntPtrType(Context) ||
- FT->getParamType(3) != TD->getIntPtrType(Context))
+ FT->getParamType(2) != TD->getIntPtrType(FT->getParamType(0)) ||
+ FT->getParamType(3) != TD->getIntPtrType(FT->getParamType(1)))
return 0;
if (isFoldable(3, 2, false)) {
@@ -148,14 +146,13 @@ struct MemSetChkOpt : public InstFortifiedLibCallOptimization {
virtual Value *callOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) {
this->CI = CI;
FunctionType *FT = Callee->getFunctionType();
- LLVMContext &Context = CI->getParent()->getContext();
// Check if this has the right signature.
if (FT->getNumParams() != 4 || FT->getReturnType() != FT->getParamType(0) ||
!FT->getParamType(0)->isPointerTy() ||
!FT->getParamType(1)->isIntegerTy() ||
- FT->getParamType(2) != TD->getIntPtrType(Context) ||
- FT->getParamType(3) != TD->getIntPtrType(Context))
+ FT->getParamType(2) != TD->getIntPtrType(FT->getParamType(0)) ||
+ FT->getParamType(3) != TD->getIntPtrType(FT->getParamType(0)))
return 0;
if (isFoldable(3, 2, false)) {
@@ -180,7 +177,7 @@ struct StrCpyChkOpt : public InstFortifiedLibCallOptimization {
FT->getReturnType() != FT->getParamType(0) ||
FT->getParamType(0) != FT->getParamType(1) ||
FT->getParamType(0) != Type::getInt8PtrTy(Context) ||
- FT->getParamType(2) != TD->getIntPtrType(Context))
+ FT->getParamType(2) != TD->getIntPtrType(FT->getParamType(0)))
return 0;
Value *Dst = CI->getArgOperand(0), *Src = CI->getArgOperand(1);
@@ -205,8 +202,8 @@ struct StrCpyChkOpt : public InstFortifiedLibCallOptimization {
Value *Ret =
EmitMemCpyChk(Dst, Src,
- ConstantInt::get(TD->getIntPtrType(Context), Len),
- CI->getArgOperand(2), B, TD, TLI);
+ ConstantInt::get(TD->getIntPtrType(Dst->getType()),
+ Len), CI->getArgOperand(2), B, TD, TLI);
return Ret;
}
return 0;
@@ -225,7 +222,7 @@ struct StrNCpyChkOpt : public InstFortifiedLibCallOptimization {
FT->getParamType(0) != FT->getParamType(1) ||
FT->getParamType(0) != Type::getInt8PtrTy(Context) ||
!FT->getParamType(2)->isIntegerTy() ||
- FT->getParamType(3) != TD->getIntPtrType(Context))
+ FT->getParamType(3) != TD->getIntPtrType(FT->getParamType(0)))
return 0;
if (isFoldable(3, 2, false)) {
@@ -287,7 +284,8 @@ struct StrCatOpt : public LibCallOptimization {
// We have enough information to now generate the memcpy call to do the
// concatenation for us. Make a memcpy to copy the nul byte with align = 1.
B.CreateMemCpy(CpyDst, Src,
- ConstantInt::get(TD->getIntPtrType(*Context), Len + 1), 1);
+ ConstantInt::get(TD->getIntPtrType(Src->getType()),
+ Len + 1), 1);
return Dst;
}
};
@@ -359,8 +357,9 @@ struct StrChrOpt : public LibCallOptimization {
if (Len == 0 || !FT->getParamType(1)->isIntegerTy(32))// memchr needs i32.
return 0;
+ Type *PT = FT->getParamType(0);
return EmitMemChr(SrcStr, CI->getArgOperand(1), // include nul.
- ConstantInt::get(TD->getIntPtrType(*Context), Len),
+ ConstantInt::get(TD->getIntPtrType(PT), Len),
B, TD, TLI);
}
@@ -454,8 +453,9 @@ struct StrCmpOpt : public LibCallOptimization {
// These optimizations require DataLayout.
if (!TD) return 0;
+ Type *PT = FT->getParamType(0);
return EmitMemCmp(Str1P, Str2P,
- ConstantInt::get(TD->getIntPtrType(*Context),
+ ConstantInt::get(TD->getIntPtrType(PT),
std::min(Len1, Len2)), B, TD, TLI);
}
@@ -537,7 +537,7 @@ struct StrCpyOpt : public LibCallOptimization {
// We have enough information to now generate the memcpy call to do the
// copy for us. Make a memcpy to copy the nul byte with align = 1.
B.CreateMemCpy(Dst, Src,
- ConstantInt::get(TD->getIntPtrType(*Context), Len), 1);
+ ConstantInt::get(TD->getIntPtrType(Dst->getType()), Len), 1);
return Dst;
}
};
diff --git a/lib/VMCore/DataLayout.cpp b/lib/VMCore/DataLayout.cpp
index e6994be257..8d7a8e267c 100644
--- a/lib/VMCore/DataLayout.cpp
+++ b/lib/VMCore/DataLayout.cpp
@@ -660,13 +660,32 @@ unsigned DataLayout::getPreferredTypeAlignmentShift(Type *Ty) const {
return Log2_32(Align);
}
-/// getIntPtrType - Return an unsigned integer type that is the same size or
-/// greater to the host pointer size.
+/// getIntPtrType - Return an integer type that is the same size or
+/// greater to the pointer size for the address space.
IntegerType *DataLayout::getIntPtrType(LLVMContext &C,
unsigned AddressSpace) const {
return IntegerType::get(C, getPointerSizeInBits(AddressSpace));
}
+/// getIntPtrType - Return an integer type that is the same size or
+/// greater to the pointer size of the specific PointerType.
+IntegerType *DataLayout::getIntPtrType(Type *Ty) const {
+ LLVMContext &C = Ty->getContext();
+ // For pointers, we return the size for the specific address space.
+ if (Ty->isPointerTy()) return IntegerType::get(C, getTypeSizeInBits(Ty));
+ // For vector of pointers, we return the size of the address space
+ // of the pointer type.
+ if (Ty->isVectorTy() && cast<VectorType>(Ty)->getElementType()->isPointerTy())
+ return IntegerType::get(C,
+ getTypeSizeInBits(cast<VectorType>(Ty)->getElementType()));
+ // Otherwise return the address space for the default address space.
+ // An example of this occuring is that you want to get the IntPtr
+ // for all of the arguments in a function. However, the IntPtr
+ // for a non-pointer type cannot be determined by the type, so
+ // the default value is used.
+ return getIntPtrType(C, 0);
+}
+
uint64_t DataLayout::getIndexedOffset(Type *ptrTy,
ArrayRef<Value *> Indices) const {
diff --git a/lib/VMCore/Instructions.cpp b/lib/VMCore/Instructions.cpp
index 13c4a5d257..e9b96d6cd2 100644
--- a/lib/VMCore/Instructions.cpp
+++ b/lib/VMCore/Instructions.cpp
@@ -2120,6 +2120,17 @@ bool CastInst::isNoopCast(Type *IntPtrTy) const {
return isNoopCast(getOpcode(), getOperand(0)->getType(), getType(), IntPtrTy);
}
+/// @brief Determine if a cast is a no-op
+bool CastInst::isNoopCast(const DataLayout &DL) const {
+ unsigned AS = 0;
+ if (getOpcode() == Instruction::PtrToInt)
+ AS = getOperand(0)->getType()->getPointerAddressSpace();
+ else if (getOpcode() == Instruction::IntToPtr)
+ AS = getType()->getPointerAddressSpace();
+ Type *IntPtrTy = DL.getIntPtrType(getContext(), AS);
+ return isNoopCast(getOpcode(), getOperand(0)->getType(), getType(), IntPtrTy);
+}
+
/// This function determines if a pair of casts can be eliminated and what
/// opcode should be used in the elimination. This assumes that there are two
/// instructions like this:
diff --git a/lib/VMCore/Type.cpp b/lib/VMCore/Type.cpp
index 1a7a650989..54146e118c 100644
--- a/lib/VMCore/Type.cpp
+++ b/lib/VMCore/Type.cpp
@@ -233,7 +233,12 @@ unsigned Type::getVectorNumElements() const {
}
unsigned Type::getPointerAddressSpace() const {
- return cast<PointerType>(this)->getAddressSpace();
+ if (isPointerTy())
+ return cast<PointerType>(this)->getAddressSpace();
+ if (isVectorTy())
+ return getSequentialElementType()->getPointerAddressSpace();
+ llvm_unreachable("Should never reach here!");
+ return 0;
}
diff --git a/test/Other/multi-pointer-size.ll b/test/Other/multi-pointer-size.ll
new file mode 100644
index 0000000000..95fa54b8f2
--- /dev/null
+++ b/test/Other/multi-pointer-size.ll
@@ -0,0 +1,43 @@
+; RUN: opt -instcombine %s | llvm-dis | FileCheck %s
+target datalayout = "e-p:32:32:32-p1:64:64:64-p2:8:8:8-p3:16:16:16--p4:96:96:96-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:32"
+
+define i32 @test_as0(i32 addrspace(0)* %A) {
+entry:
+; CHECK: %arrayidx = getelementptr i32* %A, i32 1
+ %arrayidx = getelementptr i32 addrspace(0)* %A, i64 1
+ %y = load i32 addrspace(0)* %arrayidx, align 4
+ ret i32 %y
+}
+
+define i32 @test_as1(i32 addrspace(1)* %A) {
+entry:
+; CHECK: %arrayidx = getelementptr i32 addrspace(1)* %A, i64 1
+ %arrayidx = getelementptr i32 addrspace(1)* %A, i32 1
+ %y = load i32 addrspace(1)* %arrayidx, align 4
+ ret i32 %y
+}
+
+define i32 @test_as2(i32 addrspace(2)* %A) {
+entry:
+; CHECK: %arrayidx = getelementptr i32 addrspace(2)* %A, i8 1
+ %arrayidx = getelementptr i32 addrspace(2)* %A, i32 1
+ %y = load i32 addrspace(2)* %arrayidx, align 4
+ ret i32 %y
+}
+
+define i32 @test_as3(i32 addrspace(3)* %A) {
+entry:
+; CHECK: %arrayidx = getelementptr i32 addrspace(3)* %A, i16 1
+ %arrayidx = getelementptr i32 addrspace(3)* %A, i32 1
+ %y = load i32 addrspace(3)* %arrayidx, align 4
+ ret i32 %y
+}
+
+define i32 @test_as4(i32 addrspace(4)* %A) {
+entry:
+; CHECK: %arrayidx = getelementptr i32 addrspace(4)* %A, i96 1
+ %arrayidx = getelementptr i32 addrspace(4)* %A, i32 1
+ %y = load i32 addrspace(4)* %arrayidx, align 4
+ ret i32 %y
+}
+
diff --git a/test/Transforms/InstCombine/constant-fold-gep-as-0.ll b/test/Transforms/InstCombine/constant-fold-gep-as-0.ll
new file mode 100644
index 0000000000..74fe316137
--- /dev/null
+++ b/test/Transforms/InstCombine/constant-fold-gep-as-0.ll
@@ -0,0 +1,235 @@
+; "PLAIN" - No optimizations. This tests the target-independent
+; constant folder.
+; RUN: opt -S -o - < %s | FileCheck --check-prefix=PLAIN %s
+
+target datalayout = "e-p:128:128:128-p1:32:32:32-p2:8:8:8-p3:16:16:16-p4:64:64:64-p5:96:96:96-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:32"
+
+; PLAIN: ModuleID = '<stdin>'
+
+; The automatic constant folder in opt does not have targetdata access, so
+; it can't fold gep arithmetic, in general. However, the constant folder run
+; from instcombine and global opt can use targetdata.
+; PLAIN: @G8 = global i8 addrspace(1)* getelementptr (i8 addrspace(1)* inttoptr (i32 1 to i8 addrspace(1)*), i32 -1)
+@G8 = global i8 addrspace(1)* getelementptr (i8 addrspace(1)* inttoptr (i32 1 to i8 addrspace(1)*), i32 -1)
+; PLAIN: @G1 = global i1 addrspace(2)* getelementptr (i1 addrspace(2)* inttoptr (i8 1 to i1 addrspace(2)*), i8 -1)
+@G1 = global i1 addrspace(2)* getelementptr (i1 addrspace(2)* inttoptr (i8 1 to i1 addrspace(2)*), i8 -1)
+; PLAIN: @F8 = global i8 addrspace(1)* getelementptr (i8 addrspace(1)* inttoptr (i32 1 to i8 addrspace(1)*), i32 -2)
+@F8 = global i8 addrspace(1)* getelementptr (i8 addrspace(1)* inttoptr (i32 1 to i8 addrspace(1)*), i32 -2)
+; PLAIN: @F1 = global i1 addrspace(2)* getelementptr (i1 addrspace(2)* inttoptr (i8 1 to i1 addrspace(2)*), i8 -2)
+@F1 = global i1 addrspace(2)* getelementptr (i1 addrspace(2)* inttoptr (i8 1 to i1 addrspace(2)*), i8 -2)
+; PLAIN: @H8 = global i8 addrspace(1)* getelementptr (i8 addrspace(1)* null, i32 -1)
+@H8 = global i8 addrspace(1)* getelementptr (i8 addrspace(1)* inttoptr (i32 0 to i8 addrspace(1)*), i32 -1)
+; PLAIN: @H1 = global i1 addrspace(2)* getelementptr (i1 addrspace(2)* null, i8 -1)
+@H1 = global i1 addrspace(2)* getelementptr (i1 addrspace(2)* inttoptr (i8 0 to i1 addrspace(2)*), i8 -1)
+
+
+; The target-independent folder should be able to do some clever
+; simplifications on sizeof, alignof, and offsetof expressions. The
+; target-dependent folder should fold these down to constants.
+; PLAIN-X: @a = constant i64 mul (i64 ptrtoint (double addrspace(4)* getelementptr (double addrspace(4)* null, i32 1) to i64), i64 2310)
+@a = constant i64 mul (i64 3, i64 mul (i64 ptrtoint ({[7 x double], [7 x double]} addrspace(4)* getelementptr ({[7 x double], [7 x double]} addrspace(4)* null, i64 11) to i64), i64 5))
+
+; PLAIN-X: @b = constant i64 ptrtoint (double addrspace(4)* getelementptr ({ i1, double }* null, i64 0, i32 1) to i64)
+@b = constant i64 ptrtoint ([13 x double] addrspace(4)* getelementptr ({i1, [13 x double]} addrspace(4)* null, i64 0, i32 1) to i64)
+
+; PLAIN-X: @c = constant i64 mul nuw (i64 ptrtoint (double addrspace(4)* getelementptr (double addrspace(4)* null, i32 1) to i64), i64 2)
+@c = constant i64 ptrtoint (double addrspace(4)* getelementptr ({double, double, double, double} addrspace(4)* null, i64 0, i32 2) to i64)
+
+; PLAIN-X: @d = constant i64 mul nuw (i64 ptrtoint (double addrspace(4)* getelementptr (double addrspace(4)* null, i32 1) to i64), i64 11)
+@d = constant i64 ptrtoint (double addrspace(4)* getelementptr ([13 x double] addrspace(4)* null, i64 0, i32 11) to i64)
+
+; PLAIN-X: @e = constant i64 ptrtoint (double addrspace(4)* getelementptr ({ double, float, double, double }* null, i64 0, i32 2) to i64)
+@e = constant i64 ptrtoint (double addrspace(4)* getelementptr ({double, float, double, double} addrspace(4)* null, i64 0, i32 2) to i64)
+
+; PLAIN-X: @f = constant i64 1
+@f = constant i64 ptrtoint (<{ i16, i128 }> addrspace(4)* getelementptr ({i1, <{ i16, i128 }>} addrspace(4)* null, i64 0, i32 1) to i64)
+
+; PLAIN-X: @g = constant i64 ptrtoint (double addrspace(4)* getelementptr ({ i1, double }* null, i64 0, i32 1) to i64)
+@g = constant i64 ptrtoint ({double, double} addrspace(4)* getelementptr ({i1, {double, double}} addrspace(4)* null, i64 0, i32 1) to i64)
+
+; PLAIN-X: @h = constant i64 ptrtoint (i1 addrspace(2)* getelementptr (i1 addrspace(2)* null, i32 1) to i64)
+@h = constant i64 ptrtoint (double addrspace(4)* getelementptr (double addrspace(4)* null, i64 1) to i64)
+
+; PLAIN-X: @i = constant i64 ptrtoint (i1 addrspace(2)* getelementptr ({ i1, i1 addrspace(2)* }* null, i64 0, i32 1) to i64)
+@i = constant i64 ptrtoint (double addrspace(4)* getelementptr ({i1, double} addrspace(4)* null, i64 0, i32 1) to i64)
+
+; The target-dependent folder should cast GEP indices to integer-sized pointers.
+
+; PLAIN: @M = constant i64 addrspace(5)* getelementptr (i64 addrspace(5)* null, i32 1)
+; PLAIN: @N = constant i64 addrspace(5)* getelementptr ({ i64, i64 } addrspace(5)* null, i32 0, i32 1)
+; PLAIN: @O = constant i64 addrspace(5)* getelementptr ([2 x i64] addrspace(5)* null, i32 0, i32 1)
+
+@M = constant i64 addrspace(5)* getelementptr (i64 addrspace(5)* null, i32 1)
+@N = constant i64 addrspace(5)* getelementptr ({ i64, i64 } addrspace(5)* null, i32 0, i32 1)
+@O = constant i64 addrspace(5)* getelementptr ([2 x i64] addrspace(5)* null, i32 0, i32 1)
+
+; Fold GEP of a GEP. Very simple cases are folded.
+
+; PLAIN-X: @Y = global [3 x { i32, i32 }]addrspace(3)* getelementptr inbounds ([3 x { i32, i32 }]addrspace(3)* @ext, i64 2)
+@ext = external addrspace(3) global [3 x { i32, i32 }]
+@Y = global [3 x { i32, i32 }]addrspace(3)* getelementptr inbounds ([3 x { i32, i32 }]addrspace(3)* getelementptr inbounds ([3 x { i32, i32 }]addrspace(3)* @ext, i64 1), i64 1)
+
+; PLAIN-X: @Z = global i32addrspace(3)* getelementptr inbounds (i32addrspace(3)* getelementptr inbounds ([3 x { i32, i32 }]addrspace(3)* @ext, i64 0, i64 1, i32 0), i64 1)
+@Z = global i32addrspace(3)* getelementptr inbounds (i32addrspace(3)* getelementptr inbounds ([3 x { i32, i32 }]addrspace(3)* @ext, i64 0, i64 1, i32 0), i64 1)
+
+
+; Duplicate all of the above as function return values rather than
+; global initializers.
+
+; PLAIN: define i8 addrspace(1)* @goo8() nounwind {
+; PLAIN: %t = bitcast i8 addrspace(1)* getelementptr (i8 addrspace(1)* inttoptr (i32 1 to i8 addrspace(1)*), i32 -1) to i8 addrspace(1)*
+; PLAIN: ret i8 addrspace(1)* %t
+; PLAIN: }
+; PLAIN: define i1 addrspace(2)* @goo1() nounwind {
+; PLAIN: %t = bitcast i1 addrspace(2)* getelementptr (i1 addrspace(2)* inttoptr (i32 1 to i1 addrspace(2)*), i32 -1) to i1 addrspace(2)*
+; PLAIN: ret i1 addrspace(2)* %t
+; PLAIN: }
+; PLAIN: define i8 addrspace(1)* @foo8() nounwind {
+; PLAIN: %t = bitcast i8 addrspace(1)* getelementptr (i8 addrspace(1)* inttoptr (i32 1 to i8 addrspace(1)*), i32 -2) to i8 addrspace(1)*
+; PLAIN: ret i8 addrspace(1)* %t
+; PLAIN: }
+; PLAIN: define i1 addrspace(2)* @foo1() nounwind {
+; PLAIN: %t = bitcast i1 addrspace(2)* getelementptr (i1 addrspace(2)* inttoptr (i32 1 to i1 addrspace(2)*), i32 -2) to i1 addrspace(2)*
+; PLAIN: ret i1 addrspace(2)* %t
+; PLAIN: }
+; PLAIN: define i8 addrspace(1)* @hoo8() nounwind {
+; PLAIN: %t = bitcast i8 addrspace(1)* getelementptr (i8 addrspace(1)* null, i32 -1) to i8 addrspace(1)*
+; PLAIN: ret i8 addrspace(1)* %t
+; PLAIN: }
+; PLAIN: define i1 addrspace(2)* @hoo1() nounwind {
+; PLAIN: %t = bitcast i1 addrspace(2)* getelementptr (i1 addrspace(2)* null, i32 -1) to i1 addrspace(2)*
+; PLAIN: ret i1 addrspace(2)* %t
+; PLAIN: }
+define i8 addrspace(1)* @goo8() nounwind {
+ %t = bitcast i8 addrspace(1)* getelementptr (i8 addrspace(1)* inttoptr (i32 1 to i8 addrspace(1)*), i32 -1) to i8 addrspace(1)*
+ ret i8 addrspace(1)* %t
+}
+define i1 addrspace(2)* @goo1() nounwind {
+ %t = bitcast i1 addrspace(2)* getelementptr (i1 addrspace(2)* inttoptr (i32 1 to i1 addrspace(2)*), i32 -1) to i1 addrspace(2)*
+ ret i1 addrspace(2)* %t
+}
+define i8 addrspace(1)* @foo8() nounwind {
+ %t = bitcast i8 addrspace(1)* getelementptr (i8 addrspace(1)* inttoptr (i32 1 to i8 addrspace(1)*), i32 -2) to i8 addrspace(1)*
+ ret i8 addrspace(1)* %t
+}
+define i1 addrspace(2)* @foo1() nounwind {
+ %t = bitcast i1 addrspace(2)* getelementptr (i1 addrspace(2)* inttoptr (i32 1 to i1 addrspace(2)*), i32 -2) to i1 addrspace(2)*
+ ret i1 addrspace(2)* %t
+}
+define i8 addrspace(1)* @hoo8() nounwind {
+ %t = bitcast i8 addrspace(1)* getelementptr (i8 addrspace(1)* inttoptr (i32 0 to i8 addrspace(1)*), i32 -1) to i8 addrspace(1)*
+ ret i8 addrspace(1)* %t
+}
+define i1 addrspace(2)* @hoo1() nounwind {
+ %t = bitcast i1 addrspace(2)* getelementptr (i1 addrspace(2)* inttoptr (i32 0 to i1 addrspace(2)*), i32 -1) to i1 addrspace(2)*
+ ret i1 addrspace(2)* %t
+}
+
+; PLAIN-X: define i64 @fa() nounwind {
+; PLAIN-X: %t = bitcast i64 mul (i64 ptrtoint (double addrspace(4)* getelementptr (double addrspace(4)* null, i32 1) to i64), i64 2310) to i64
+; PLAIN-X: ret i64 %t
+; PLAIN-X: }
+; PLAIN-X: define i64 @fb() nounwind {
+; PLAIN-X: %t = bitcast i64 ptrtoint (double addrspace(4)* getelementptr ({ i1, double }* null, i64 0, i32 1) to i64) to i64
+; PLAIN-X: ret i64 %t
+; PLAIN-X: }
+; PLAIN-X: define i64 @fc() nounwind {
+; PLAIN-X: %t = bitcast i64 mul nuw (i64 ptrtoint (double addrspace(4)* getelementptr (double addrspace(4)* null, i32 1) to i64), i64 2) to i64
+; PLAIN-X: ret i64 %t
+; PLAIN-X: }
+; PLAIN-X: define i64 @fd() nounwind {
+; PLAIN-X: %t = bitcast i64 mul nuw (i64 ptrtoint (double addrspace(4)* getelementptr (double addrspace(4)* null, i32 1) to i64), i64 11) to i64
+; PLAIN-X: ret i64 %t
+; PLAIN-X: }
+; PLAIN-X: define i64 @fe() nounwind {
+; PLAIN-X: %t = bitcast i64 ptrtoint (double addrspace(4)* getelementptr ({ double, float, double, double }* null, i64 0, i32 2) to i64) to i64
+; PLAIN-X: ret i64 %t
+; PLAIN-X: }
+; PLAIN-X: define i64 @ff() nounwind {
+; PLAIN-X: %t = bitcast i64 1 to i64
+; PLAIN-X: ret i64 %t
+; PLAIN-X: }
+; PLAIN-X: define i64 @fg() nounwind {
+; PLAIN-X: %t = bitcast i64 ptrtoint (double addrspace(4)* getelementptr ({ i1, double }* null, i64 0, i32 1) to i64) to i64
+; PLAIN-X: ret i64 %t
+; PLAIN-X: }
+; PLAIN-X: define i64 @fh() nounwind {
+; PLAIN-X: %t = bitcast i64 ptrtoint (i1 addrspace(2)* getelementptr (i1 addrspace(2)* null, i32 1) to i64) to i64
+; PLAIN-X: ret i64 %t
+; PLAIN-X: }
+; PLAIN-X: define i64 @fi() nounwind {
+; PLAIN-X: %t = bitcast i64 ptrtoint (i1 addrspace(2)* getelementptr ({ i1, i1 addrspace(2)* }* null, i64 0, i32 1) to i64) to i64
+; PLAIN-X: ret i64 %t
+; PLAIN-X: }
+define i64 @fa() nounwind {
+ %t = bitcast i64 mul (i64 3, i64 mul (i64 ptrtoint ({[7 x double], [7 x double]}* getelementptr ({[7 x double], [7 x double]}* null, i64 11) to i64), i64 5)) to i64
+ ret i64 %t
+}
+define i64 @fb() nounwind {
+ %t = bitcast i64 ptrtoint ([13 x double] addrspace(4)* getelementptr ({i1, [13 x double]} addrspace(4)* null, i64 0, i32 1) to i64) to i64
+ ret i64 %t
+}
+define i64 @fc() nounwind {
+ %t = bitcast i64 ptrtoint (double addrspace(4)* getelementptr ({double, double, double, double} addrspace(4)* null, i64 0, i32 2) to i64) to i64
+ ret i64 %t
+}
+define i64 @fd() nounwind {
+ %t = bitcast i64 ptrtoint (double addrspace(4)* getelementptr ([13 x double] addrspace(4)* null, i64 0, i32 11) to i64) to i64
+ ret i64 %t
+}
+define i64 @fe() nounwind {
+ %t = bitcast i64 ptrtoint (double addrspace(4)* getelementptr ({double, float, double, double} addrspace(4)* null, i64 0, i32 2) to i64) to i64
+ ret i64 %t
+}
+define i64 @ff() nounwind {
+ %t = bitcast i64 ptrtoint (<{ i16, i128 }> addrspace(4)* getelementptr ({i1, <{ i16, i128 }>} addrspace(4)* null, i64 0, i32 1) to i64) to i64
+ ret i64 %t
+}
+define i64 @fg() nounwind {
+ %t = bitcast i64 ptrtoint ({double, double} addrspace(4)* getelementptr ({i1, {double, double}} addrspace(4)* null, i64 0, i32 1) to i64) to i64
+ ret i64 %t
+}
+define i64 @fh() nounwind {
+ %t = bitcast i64 ptrtoint (double addrspace(4)* getelementptr (double addrspace(4)* null, i32 1) to i64) to i64
+ ret i64 %t
+}
+define i64 @fi() nounwind {
+ %t = bitcast i64 ptrtoint (double addrspace(4)* getelementptr ({i1, double}addrspace(4)* null, i64 0, i32 1) to i64) to i64
+ ret i64 %t
+}
+
+; PLAIN: define i64* @fM() nounwind {
+; PLAIN: %t = bitcast i64* getelementptr (i64* null, i32 1) to i64*
+; PLAIN: ret i64* %t
+; PLAIN: }
+; PLAIN: define i64* @fN() nounwind {
+; PLAIN: %t = bitcast i64* getelementptr ({ i64, i64 }* null, i32 0, i32 1) to i64*
+; PLAIN: ret i64* %t
+; PLAIN: }
+; PLAIN: define i64* @fO() nounwind {
+; PLAIN: %t = bitcast i64* getelementptr ([2 x i64]* null, i32 0, i32 1) to i64*
+; PLAIN: ret i64* %t
+; PLAIN: }
+
+define i64* @fM() nounwind {
+ %t = bitcast i64* getelementptr (i64* null, i32 1) to i64*
+ ret i64* %t
+}
+define i64* @fN() nounwind {
+ %t = bitcast i64* getelementptr ({ i64, i64 }* null, i32 0, i32 1) to i64*
+ ret i64* %t
+}
+define i64* @fO() nounwind {
+ %t = bitcast i64* getelementptr ([2 x i64]* null, i32 0, i32 1) to i64*
+ ret i64* %t
+}
+
+; PLAIN: define i32 addrspace(1)* @fZ() nounwind {
+; PLAIN: %t = bitcast i32 addrspace(1)* getelementptr inbounds (i32 addrspace(1)* getelementptr inbounds ([3 x { i32, i32 }] addrspace(1)* @ext2, i64 0, i64 1, i32 0), i64 1) to i32 addrspace(1)*
+; PLAIN: ret i32 addrspace(1)* %t
+; PLAIN: }
+@ext2 = external addrspace(1) global [3 x { i32, i32 }]
+define i32 addrspace(1)* @fZ() nounwind {
+ %t = bitcast i32 addrspace(1)* getelementptr inbounds (i32 addrspace(1)* getelementptr inbounds ([3 x { i32, i32 }] addrspace(1)* @ext2, i64 0, i64 1, i32 0), i64 1) to i32 addrspace(1)*
+ ret i32 addrspace(1)* %t
+}