summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
authorMicah Villmow <villmow@gmail.com>2012-10-15 16:24:29 +0000
committerMicah Villmow <villmow@gmail.com>2012-10-15 16:24:29 +0000
commit2c39b15073db81d93bb629303915b7d7e5d088dc (patch)
tree966e03c76191ad625cf466ea6ccf1238ddc23ed7 /lib
parentf35c62bf025411393c7df0803851010cc0e597ba (diff)
downloadllvm-2c39b15073db81d93bb629303915b7d7e5d088dc.tar.gz
llvm-2c39b15073db81d93bb629303915b7d7e5d088dc.tar.bz2
llvm-2c39b15073db81d93bb629303915b7d7e5d088dc.tar.xz
Resubmit the changes to llvm core to update the functions to support different pointer sizes on a per address space basis.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@165941 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib')
-rw-r--r--lib/Analysis/BasicAliasAnalysis.cpp7
-rw-r--r--lib/Analysis/CodeMetrics.cpp6
-rw-r--r--lib/Analysis/ConstantFolding.cpp12
-rw-r--r--lib/Analysis/InlineCost.cpp15
-rw-r--r--lib/Analysis/InstructionSimplify.cpp11
-rw-r--r--lib/Analysis/ValueTracking.cpp6
-rw-r--r--lib/CodeGen/AsmPrinter/AsmPrinter.cpp8
-rw-r--r--lib/CodeGen/AsmPrinter/AsmPrinterDwarf.cpp2
-rw-r--r--lib/CodeGen/AsmPrinter/DIE.cpp8
-rw-r--r--lib/CodeGen/AsmPrinter/DwarfDebug.cpp18
-rw-r--r--lib/CodeGen/AsmPrinter/DwarfException.cpp2
-rw-r--r--lib/CodeGen/AsmPrinter/OcamlGCPrinter.cpp2
-rw-r--r--lib/CodeGen/MachineFunction.cpp4
-rw-r--r--lib/CodeGen/SelectionDAG/SelectionDAG.cpp5
-rw-r--r--lib/CodeGen/TargetLoweringObjectFileImpl.cpp4
-rw-r--r--lib/ExecutionEngine/ExecutionEngine.cpp12
-rw-r--r--lib/ExecutionEngine/Interpreter/Execution.cpp3
-rw-r--r--lib/ExecutionEngine/Interpreter/ExternalFunctions.cpp2
-rw-r--r--lib/ExecutionEngine/JIT/JITDwarfEmitter.cpp14
-rw-r--r--lib/Target/ARM/ARMELFWriterInfo.cpp2
-rw-r--r--lib/Target/MBlaze/MBlazeELFWriterInfo.cpp2
-rw-r--r--lib/Target/MSP430/MSP430ISelLowering.cpp4
-rw-r--r--lib/Target/NVPTX/NVPTXAsmPrinter.cpp8
-rw-r--r--lib/Target/PowerPC/PPCAsmPrinter.cpp6
-rw-r--r--lib/Target/PowerPC/PPCRegisterInfo.cpp2
-rw-r--r--lib/Target/Target.cpp10
-rw-r--r--lib/Target/X86/X86AsmPrinter.cpp2
-rw-r--r--lib/Target/X86/X86FrameLowering.cpp4
-rw-r--r--lib/Target/X86/X86ISelLowering.cpp12
-rw-r--r--lib/Transforms/InstCombine/InstCombineCasts.cpp10
-rw-r--r--lib/Transforms/InstCombine/InstCombineCompares.cpp9
-rw-r--r--lib/Transforms/Instrumentation/AddressSanitizer.cpp2
-rw-r--r--lib/Transforms/Scalar/MemCpyOptimizer.cpp5
-rw-r--r--lib/Transforms/Scalar/SROA.cpp14
-rw-r--r--lib/Transforms/Utils/Local.cpp3
35 files changed, 144 insertions, 92 deletions
diff --git a/lib/Analysis/BasicAliasAnalysis.cpp b/lib/Analysis/BasicAliasAnalysis.cpp
index 263bfc031f..36903f94e2 100644
--- a/lib/Analysis/BasicAliasAnalysis.cpp
+++ b/lib/Analysis/BasicAliasAnalysis.cpp
@@ -286,7 +286,8 @@ DecomposeGEPExpression(const Value *V, int64_t &BaseOffs,
V = GEPOp->getOperand(0);
continue;
}
-
+
+ unsigned AS = GEPOp->getPointerAddressSpace();
// Walk the indices of the GEP, accumulating them into BaseOff/VarIndices.
gep_type_iterator GTI = gep_type_begin(GEPOp);
for (User::const_op_iterator I = GEPOp->op_begin()+1,
@@ -315,7 +316,7 @@ DecomposeGEPExpression(const Value *V, int64_t &BaseOffs,
// If the integer type is smaller than the pointer size, it is implicitly
// sign extended to pointer size.
unsigned Width = cast<IntegerType>(Index->getType())->getBitWidth();
- if (TD->getPointerSizeInBits() > Width)
+ if (TD->getPointerSizeInBits(AS) > Width)
Extension = EK_SignExt;
// Use GetLinearExpression to decompose the index into a C1*V+C2 form.
@@ -344,7 +345,7 @@ DecomposeGEPExpression(const Value *V, int64_t &BaseOffs,
// Make sure that we have a scale that makes sense for this target's
// pointer size.
- if (unsigned ShiftBits = 64-TD->getPointerSizeInBits()) {
+ if (unsigned ShiftBits = 64-TD->getPointerSizeInBits(AS)) {
Scale <<= ShiftBits;
Scale = (int64_t)Scale >> ShiftBits;
}
diff --git a/lib/Analysis/CodeMetrics.cpp b/lib/Analysis/CodeMetrics.cpp
index 651a54be1b..d669268496 100644
--- a/lib/Analysis/CodeMetrics.cpp
+++ b/lib/Analysis/CodeMetrics.cpp
@@ -91,14 +91,16 @@ bool llvm::isInstructionFree(const Instruction *I, const DataLayout *TD) {
// which doesn't contain values outside the range of a pointer.
if (isa<IntToPtrInst>(CI) && TD &&
TD->isLegalInteger(Op->getType()->getScalarSizeInBits()) &&
- Op->getType()->getScalarSizeInBits() <= TD->getPointerSizeInBits())
+ Op->getType()->getScalarSizeInBits() <= TD->getPointerSizeInBits(
+ cast<IntToPtrInst>(CI)->getAddressSpace()))
return true;
// A ptrtoint cast is free so long as the result is large enough to store
// the pointer, and a legal integer type.
if (isa<PtrToIntInst>(CI) && TD &&
TD->isLegalInteger(Op->getType()->getScalarSizeInBits()) &&
- Op->getType()->getScalarSizeInBits() >= TD->getPointerSizeInBits())
+ Op->getType()->getScalarSizeInBits() >= TD->getPointerSizeInBits(
+ cast<PtrToIntInst>(CI)->getPointerAddressSpace()))
return true;
// trunc to a native type is free (assuming the target has compare and
diff --git a/lib/Analysis/ConstantFolding.cpp b/lib/Analysis/ConstantFolding.cpp
index b7bf044a36..146897ad67 100644
--- a/lib/Analysis/ConstantFolding.cpp
+++ b/lib/Analysis/ConstantFolding.cpp
@@ -916,10 +916,11 @@ Constant *llvm::ConstantFoldInstOperands(unsigned Opcode, Type *DestTy,
if (TD && CE->getOpcode() == Instruction::IntToPtr) {
Constant *Input = CE->getOperand(0);
unsigned InWidth = Input->getType()->getScalarSizeInBits();
- if (TD->getPointerSizeInBits() < InWidth) {
+ unsigned AS = cast<PointerType>(CE->getType())->getAddressSpace();
+ if (TD->getPointerSizeInBits(AS) < InWidth) {
Constant *Mask =
ConstantInt::get(CE->getContext(), APInt::getLowBitsSet(InWidth,
- TD->getPointerSizeInBits()));
+ TD->getPointerSizeInBits(AS)));
Input = ConstantExpr::getAnd(Input, Mask);
}
// Do a zext or trunc to get to the dest size.
@@ -932,9 +933,10 @@ Constant *llvm::ConstantFoldInstOperands(unsigned Opcode, Type *DestTy,
// the int size is >= the ptr size. This requires knowing the width of a
// pointer, so it can't be done in ConstantExpr::getCast.
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ops[0]))
- if (TD &&
- TD->getPointerSizeInBits() <= CE->getType()->getScalarSizeInBits() &&
- CE->getOpcode() == Instruction::PtrToInt)
+ if (TD && CE->getOpcode() == Instruction::PtrToInt &&
+ TD->getPointerSizeInBits(
+ cast<PointerType>(CE->getOperand(0)->getType())->getAddressSpace())
+ <= CE->getType()->getScalarSizeInBits())
return FoldBitCast(CE->getOperand(0), DestTy, *TD);
return ConstantExpr::getCast(Opcode, Ops[0], DestTy);
diff --git a/lib/Analysis/InlineCost.cpp b/lib/Analysis/InlineCost.cpp
index 5f51f775f1..95e58022ca 100644
--- a/lib/Analysis/InlineCost.cpp
+++ b/lib/Analysis/InlineCost.cpp
@@ -243,7 +243,8 @@ bool CallAnalyzer::accumulateGEPOffset(GEPOperator &GEP, APInt &Offset) {
if (!TD)
return false;
- unsigned IntPtrWidth = TD->getPointerSizeInBits();
+ unsigned AS = GEP.getPointerAddressSpace();
+ unsigned IntPtrWidth = TD->getPointerSizeInBits(AS);
assert(IntPtrWidth == Offset.getBitWidth());
for (gep_type_iterator GTI = gep_type_begin(GEP), GTE = gep_type_end(GEP);
@@ -391,7 +392,8 @@ bool CallAnalyzer::visitPtrToInt(PtrToIntInst &I) {
// Track base/offset pairs when converted to a plain integer provided the
// integer is large enough to represent the pointer.
unsigned IntegerSize = I.getType()->getScalarSizeInBits();
- if (TD && IntegerSize >= TD->getPointerSizeInBits()) {
+ unsigned AS = I.getPointerAddressSpace();
+ if (TD && IntegerSize >= TD->getPointerSizeInBits(AS)) {
std::pair<Value *, APInt> BaseAndOffset
= ConstantOffsetPtrs.lookup(I.getOperand(0));
if (BaseAndOffset.first)
@@ -425,7 +427,8 @@ bool CallAnalyzer::visitIntToPtr(IntToPtrInst &I) {
// modifications provided the integer is not too large.
Value *Op = I.getOperand(0);
unsigned IntegerSize = Op->getType()->getScalarSizeInBits();
- if (TD && IntegerSize <= TD->getPointerSizeInBits()) {
+ unsigned AS = I.getAddressSpace();
+ if (TD && IntegerSize <= TD->getPointerSizeInBits(AS)) {
std::pair<Value *, APInt> BaseAndOffset = ConstantOffsetPtrs.lookup(Op);
if (BaseAndOffset.first)
ConstantOffsetPtrs[&I] = BaseAndOffset;
@@ -760,7 +763,8 @@ ConstantInt *CallAnalyzer::stripAndComputeInBoundsConstantOffsets(Value *&V) {
if (!TD || !V->getType()->isPointerTy())
return 0;
- unsigned IntPtrWidth = TD->getPointerSizeInBits();
+ unsigned AS = cast<PointerType>(V->getType())->getAddressSpace();;
+ unsigned IntPtrWidth = TD->getPointerSizeInBits(AS);
APInt Offset = APInt::getNullValue(IntPtrWidth);
// Even though we don't look through PHI nodes, we could be called on an
@@ -824,7 +828,8 @@ bool CallAnalyzer::analyzeCall(CallSite CS) {
// size of the byval type by the target's pointer size.
PointerType *PTy = cast<PointerType>(CS.getArgument(I)->getType());
unsigned TypeSize = TD->getTypeSizeInBits(PTy->getElementType());
- unsigned PointerSize = TD->getPointerSizeInBits();
+ unsigned AS = PTy->getAddressSpace();
+ unsigned PointerSize = TD->getPointerSizeInBits(AS);
// Ceiling division.
unsigned NumStores = (TypeSize + PointerSize - 1) / PointerSize;
diff --git a/lib/Analysis/InstructionSimplify.cpp b/lib/Analysis/InstructionSimplify.cpp
index b3d62487fc..8e326122fa 100644
--- a/lib/Analysis/InstructionSimplify.cpp
+++ b/lib/Analysis/InstructionSimplify.cpp
@@ -666,7 +666,8 @@ Value *llvm::SimplifyAddInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
/// 'Offset' APInt must be the bitwidth of the target's pointer size.
static bool accumulateGEPOffset(const DataLayout &TD, GEPOperator *GEP,
APInt &Offset) {
- unsigned IntPtrWidth = TD.getPointerSizeInBits();
+ unsigned AS = GEP->getPointerAddressSpace();
+ unsigned IntPtrWidth = TD.getPointerSizeInBits(AS);
assert(IntPtrWidth == Offset.getBitWidth());
gep_type_iterator GTI = gep_type_begin(GEP);
@@ -696,12 +697,14 @@ static bool accumulateGEPOffset(const DataLayout &TD, GEPOperator *GEP,
/// accumulates the total constant offset applied in the returned constant. It
/// returns 0 if V is not a pointer, and returns the constant '0' if there are
/// no constant offsets applied.
+/// FIXME: This function also exists in InlineCost.cpp.
static Constant *stripAndComputeConstantOffsets(const DataLayout &TD,
Value *&V) {
if (!V->getType()->isPointerTy())
return 0;
- unsigned IntPtrWidth = TD.getPointerSizeInBits();
+ unsigned AS = cast<PointerType>(V->getType())->getAddressSpace();;
+ unsigned IntPtrWidth = TD.getPointerSizeInBits(AS);
APInt Offset = APInt::getNullValue(IntPtrWidth);
// Even though we don't look through PHI nodes, we could be called on an
@@ -1877,7 +1880,9 @@ static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
// Turn icmp (ptrtoint x), (ptrtoint/constant) into a compare of the input
// if the integer type is the same size as the pointer type.
if (MaxRecurse && Q.TD && isa<PtrToIntInst>(LI) &&
- Q.TD->getPointerSizeInBits() == DstTy->getPrimitiveSizeInBits()) {
+ Q.TD->getPointerSizeInBits(
+ cast<PtrToIntInst>(LI)->getPointerAddressSpace()) ==
+ DstTy->getPrimitiveSizeInBits()) {
if (Constant *RHSC = dyn_cast<Constant>(RHS)) {
// Transfer the cast to the constant.
if (Value *V = SimplifyICmpInst(Pred, SrcOp,
diff --git a/lib/Analysis/ValueTracking.cpp b/lib/Analysis/ValueTracking.cpp
index 951b442b87..1d7f0692cb 100644
--- a/lib/Analysis/ValueTracking.cpp
+++ b/lib/Analysis/ValueTracking.cpp
@@ -40,7 +40,8 @@ static unsigned getBitWidth(Type *Ty, const DataLayout *TD) {
if (unsigned BitWidth = Ty->getScalarSizeInBits())
return BitWidth;
assert(isa<PointerType>(Ty) && "Expected a pointer type!");
- return TD ? TD->getPointerSizeInBits() : 0;
+ return TD ?
+ TD->getPointerSizeInBits(cast<PointerType>(Ty)->getAddressSpace()) : 0;
}
static void ComputeMaskedBitsAddSub(bool Add, Value *Op0, Value *Op1, bool NSW,
@@ -1621,7 +1622,8 @@ Value *llvm::GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset,
// Re-sign extend from the pointer size if needed to get overflow edge cases
// right.
- unsigned PtrSize = TD.getPointerSizeInBits();
+ unsigned AS = GEP->getPointerAddressSpace();
+ unsigned PtrSize = TD.getPointerSizeInBits(AS);
if (PtrSize < 64)
Offset = SignExtend64(Offset, PtrSize);
diff --git a/lib/CodeGen/AsmPrinter/AsmPrinter.cpp b/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
index d74a70362a..4de98da655 100644
--- a/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
+++ b/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
@@ -385,7 +385,8 @@ void AsmPrinter::EmitGlobalVariable(const GlobalVariable *GV) {
// - __tlv_bootstrap - used to make sure support exists
// - spare pointer, used when mapped by the runtime
// - pointer to mangled symbol above with initializer
- unsigned PtrSize = TD->getPointerSizeInBits()/8;
+ unsigned AS = GV->getType()->getAddressSpace();
+ unsigned PtrSize = TD->getPointerSizeInBits(AS)/8;
OutStreamer.EmitSymbolValue(GetExternalSymbolSymbol("_tlv_bootstrap"),
PtrSize, 0);
OutStreamer.EmitIntValue(0, PtrSize, 0);
@@ -1299,7 +1300,7 @@ void AsmPrinter::EmitXXStructorList(const Constant *List, bool isCtor) {
// Emit the function pointers in the target-specific order
const DataLayout *TD = TM.getDataLayout();
- unsigned Align = Log2_32(TD->getPointerPrefAlignment());
+ unsigned Align = Log2_32(TD->getPointerPrefAlignment(0));
std::stable_sort(Structors.begin(), Structors.end(), priority_order);
for (unsigned i = 0, e = Structors.size(); i != e; ++i) {
const MCSection *OutputSection =
@@ -1480,8 +1481,9 @@ static const MCExpr *lowerConstant(const Constant *CV, AsmPrinter &AP) {
if (Offset == 0)
return Base;
+ unsigned AS = cast<PointerType>(CE->getType())->getAddressSpace();
// Truncate/sext the offset to the pointer size.
- unsigned Width = TD.getPointerSizeInBits();
+ unsigned Width = TD.getPointerSizeInBits(AS);
if (Width < 64)
Offset = SignExtend64(Offset, Width);
diff --git a/lib/CodeGen/AsmPrinter/AsmPrinterDwarf.cpp b/lib/CodeGen/AsmPrinter/AsmPrinterDwarf.cpp
index d94e1fe61b..6c17af2e8c 100644
--- a/lib/CodeGen/AsmPrinter/AsmPrinterDwarf.cpp
+++ b/lib/CodeGen/AsmPrinter/AsmPrinterDwarf.cpp
@@ -112,7 +112,7 @@ unsigned AsmPrinter::GetSizeOfEncodedValue(unsigned Encoding) const {
switch (Encoding & 0x07) {
default: llvm_unreachable("Invalid encoded value.");
- case dwarf::DW_EH_PE_absptr: return TM.getDataLayout()->getPointerSize();
+ case dwarf::DW_EH_PE_absptr: return TM.getDataLayout()->getPointerSize(0);
case dwarf::DW_EH_PE_udata2: return 2;
case dwarf::DW_EH_PE_udata4: return 4;
case dwarf::DW_EH_PE_udata8: return 8;
diff --git a/lib/CodeGen/AsmPrinter/DIE.cpp b/lib/CodeGen/AsmPrinter/DIE.cpp
index 4d73b3c222..73e18cd817 100644
--- a/lib/CodeGen/AsmPrinter/DIE.cpp
+++ b/lib/CodeGen/AsmPrinter/DIE.cpp
@@ -200,7 +200,7 @@ void DIEInteger::EmitValue(AsmPrinter *Asm, unsigned Form) const {
case dwarf::DW_FORM_udata: Asm->EmitULEB128(Integer); return;
case dwarf::DW_FORM_sdata: Asm->EmitSLEB128(Integer); return;
case dwarf::DW_FORM_addr:
- Size = Asm->getDataLayout().getPointerSize(); break;
+ Size = Asm->getDataLayout().getPointerSize(0); break;
default: llvm_unreachable("DIE Value form not supported yet");
}
Asm->OutStreamer.EmitIntValue(Integer, Size, 0/*addrspace*/);
@@ -222,7 +222,7 @@ unsigned DIEInteger::SizeOf(AsmPrinter *AP, unsigned Form) const {
case dwarf::DW_FORM_data8: return sizeof(int64_t);
case dwarf::DW_FORM_udata: return MCAsmInfo::getULEB128Size(Integer);
case dwarf::DW_FORM_sdata: return MCAsmInfo::getSLEB128Size(Integer);
- case dwarf::DW_FORM_addr: return AP->getDataLayout().getPointerSize();
+ case dwarf::DW_FORM_addr: return AP->getDataLayout().getPointerSize(0);
default: llvm_unreachable("DIE Value form not supported yet");
}
}
@@ -249,7 +249,7 @@ void DIELabel::EmitValue(AsmPrinter *AP, unsigned Form) const {
unsigned DIELabel::SizeOf(AsmPrinter *AP, unsigned Form) const {
if (Form == dwarf::DW_FORM_data4) return 4;
if (Form == dwarf::DW_FORM_strp) return 4;
- return AP->getDataLayout().getPointerSize();
+ return AP->getDataLayout().getPointerSize(0);
}
#ifndef NDEBUG
@@ -273,7 +273,7 @@ void DIEDelta::EmitValue(AsmPrinter *AP, unsigned Form) const {
unsigned DIEDelta::SizeOf(AsmPrinter *AP, unsigned Form) const {
if (Form == dwarf::DW_FORM_data4) return 4;
if (Form == dwarf::DW_FORM_strp) return 4;
- return AP->getDataLayout().getPointerSize();
+ return AP->getDataLayout().getPointerSize(0);
}
#ifndef NDEBUG
diff --git a/lib/CodeGen/AsmPrinter/DwarfDebug.cpp b/lib/CodeGen/AsmPrinter/DwarfDebug.cpp
index 6acf19ee8c..df162e07a8 100644
--- a/lib/CodeGen/AsmPrinter/DwarfDebug.cpp
+++ b/lib/CodeGen/AsmPrinter/DwarfDebug.cpp
@@ -384,7 +384,7 @@ DIE *DwarfDebug::constructLexicalScopeDIE(CompileUnit *TheCU,
// DW_AT_ranges appropriately.
TheCU->addUInt(ScopeDIE, dwarf::DW_AT_ranges, dwarf::DW_FORM_data4,
DebugRangeSymbols.size()
- * Asm->getDataLayout().getPointerSize());
+ * Asm->getDataLayout().getPointerSize(0));
for (SmallVector<InsnRange, 4>::const_iterator RI = Ranges.begin(),
RE = Ranges.end(); RI != RE; ++RI) {
DebugRangeSymbols.push_back(getLabelBeforeInsn(RI->first));
@@ -450,7 +450,7 @@ DIE *DwarfDebug::constructInlinedScopeDIE(CompileUnit *TheCU,
// DW_AT_ranges appropriately.
TheCU->addUInt(ScopeDIE, dwarf::DW_AT_ranges, dwarf::DW_FORM_data4,
DebugRangeSymbols.size()
- * Asm->getDataLayout().getPointerSize());
+ * Asm->getDataLayout().getPointerSize(0));
for (SmallVector<InsnRange, 4>::const_iterator RI = Ranges.begin(),
RE = Ranges.end(); RI != RE; ++RI) {
DebugRangeSymbols.push_back(getLabelBeforeInsn(RI->first));
@@ -1765,7 +1765,7 @@ void DwarfDebug::emitDebugInfo() {
Asm->EmitSectionOffset(Asm->GetTempSymbol("abbrev_begin"),
DwarfAbbrevSectionSym);
Asm->OutStreamer.AddComment("Address Size (in bytes)");
- Asm->EmitInt8(Asm->getDataLayout().getPointerSize());
+ Asm->EmitInt8(Asm->getDataLayout().getPointerSize(0));
emitDIE(Die);
Asm->OutStreamer.EmitLabel(Asm->GetTempSymbol("info_end", TheCU->getID()));
@@ -1811,14 +1811,14 @@ void DwarfDebug::emitEndOfLineMatrix(unsigned SectionEnd) {
Asm->EmitInt8(0);
Asm->OutStreamer.AddComment("Op size");
- Asm->EmitInt8(Asm->getDataLayout().getPointerSize() + 1);
+ Asm->EmitInt8(Asm->getDataLayout().getPointerSize(0) + 1);
Asm->OutStreamer.AddComment("DW_LNE_set_address");
Asm->EmitInt8(dwarf::DW_LNE_set_address);
Asm->OutStreamer.AddComment("Section end label");
Asm->OutStreamer.EmitSymbolValue(Asm->GetTempSymbol("section_end",SectionEnd),
- Asm->getDataLayout().getPointerSize(),
+ Asm->getDataLayout().getPointerSize(0),
0/*AddrSpace*/);
// Mark end of matrix.
@@ -2047,7 +2047,7 @@ void DwarfDebug::emitDebugLoc() {
// Start the dwarf loc section.
Asm->OutStreamer.SwitchSection(
Asm->getObjFileLowering().getDwarfLocSection());
- unsigned char Size = Asm->getDataLayout().getPointerSize();
+ unsigned char Size = Asm->getDataLayout().getPointerSize(0);
Asm->OutStreamer.EmitLabel(Asm->GetTempSymbol("debug_loc", 0));
unsigned index = 1;
for (SmallVector<DotDebugLocEntry, 4>::iterator
@@ -2144,7 +2144,7 @@ void DwarfDebug::emitDebugRanges() {
// Start the dwarf ranges section.
Asm->OutStreamer.SwitchSection(
Asm->getObjFileLowering().getDwarfRangesSection());
- unsigned char Size = Asm->getDataLayout().getPointerSize();
+ unsigned char Size = Asm->getDataLayout().getPointerSize(0);
for (SmallVector<const MCSymbol *, 8>::iterator
I = DebugRangeSymbols.begin(), E = DebugRangeSymbols.end();
I != E; ++I) {
@@ -2202,7 +2202,7 @@ void DwarfDebug::emitDebugInlineInfo() {
Asm->OutStreamer.AddComment("Dwarf Version");
Asm->EmitInt16(dwarf::DWARF_VERSION);
Asm->OutStreamer.AddComment("Address Size (in bytes)");
- Asm->EmitInt8(Asm->getDataLayout().getPointerSize());
+ Asm->EmitInt8(Asm->getDataLayout().getPointerSize(0));
for (SmallVector<const MDNode *, 4>::iterator I = InlinedSPNodes.begin(),
E = InlinedSPNodes.end(); I != E; ++I) {
@@ -2233,7 +2233,7 @@ void DwarfDebug::emitDebugInlineInfo() {
if (Asm->isVerbose()) Asm->OutStreamer.AddComment("low_pc");
Asm->OutStreamer.EmitSymbolValue(LI->first,
- Asm->getDataLayout().getPointerSize(),0);
+ Asm->getDataLayout().getPointerSize(0),0);
}
}
diff --git a/lib/CodeGen/AsmPrinter/DwarfException.cpp b/lib/CodeGen/AsmPrinter/DwarfException.cpp
index 08fb6b3f52..31d07141a1 100644
--- a/lib/CodeGen/AsmPrinter/DwarfException.cpp
+++ b/lib/CodeGen/AsmPrinter/DwarfException.cpp
@@ -417,7 +417,7 @@ void DwarfException::EmitExceptionTable() {
// that we're omitting that bit.
TTypeEncoding = dwarf::DW_EH_PE_omit;
// dwarf::DW_EH_PE_absptr
- TypeFormatSize = Asm->getDataLayout().getPointerSize();
+ TypeFormatSize = Asm->getDataLayout().getPointerSize(0);
} else {
// Okay, we have actual filters or typeinfos to emit. As such, we need to
// pick a type encoding for them. We're about to emit a list of pointers to
diff --git a/lib/CodeGen/AsmPrinter/OcamlGCPrinter.cpp b/lib/CodeGen/AsmPrinter/OcamlGCPrinter.cpp
index f7c011968c..d0e27d1d04 100644
--- a/lib/CodeGen/AsmPrinter/OcamlGCPrinter.cpp
+++ b/lib/CodeGen/AsmPrinter/OcamlGCPrinter.cpp
@@ -91,7 +91,7 @@ void OcamlGCMetadataPrinter::beginAssembly(AsmPrinter &AP) {
/// either condition is detected in a function which uses the GC.
///
void OcamlGCMetadataPrinter::finishAssembly(AsmPrinter &AP) {
- unsigned IntPtrSize = AP.TM.getDataLayout()->getPointerSize();
+ unsigned IntPtrSize = AP.TM.getDataLayout()->getPointerSize(0);
AP.OutStreamer.SwitchSection(AP.getObjFileLowering().getTextSection());
EmitCamlGlobal(getModule(), AP, "code_end");
diff --git a/lib/CodeGen/MachineFunction.cpp b/lib/CodeGen/MachineFunction.cpp
index 91d5211857..f11785070b 100644
--- a/lib/CodeGen/MachineFunction.cpp
+++ b/lib/CodeGen/MachineFunction.cpp
@@ -550,7 +550,7 @@ unsigned MachineJumpTableInfo::getEntrySize(const DataLayout &TD) const {
// address of a block, in which case it is the pointer size.
switch (getEntryKind()) {
case MachineJumpTableInfo::EK_BlockAddress:
- return TD.getPointerSize();
+ return TD.getPointerSize(0);
case MachineJumpTableInfo::EK_GPRel64BlockAddress:
return 8;
case MachineJumpTableInfo::EK_GPRel32BlockAddress:
@@ -570,7 +570,7 @@ unsigned MachineJumpTableInfo::getEntryAlignment(const DataLayout &TD) const {
// alignment.
switch (getEntryKind()) {
case MachineJumpTableInfo::EK_BlockAddress:
- return TD.getPointerABIAlignment();
+ return TD.getPointerABIAlignment(0);
case MachineJumpTableInfo::EK_GPRel64BlockAddress:
return TD.getABIIntegerTypeAlignment(64);
case MachineJumpTableInfo::EK_GPRel32BlockAddress:
diff --git a/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
index 79cfcdfe0e..183416f3fd 100644
--- a/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
+++ b/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
@@ -3449,9 +3449,12 @@ static bool FindOptimalMemOpLowering(std::vector<EVT> &MemOps,
EVT VT = TLI.getOptimalMemOpType(Size, DstAlign, SrcAlign,
IsZeroVal, MemcpyStrSrc,
DAG.getMachineFunction());
+ Type *vtType = VT.isExtended() ? VT.getTypeForEVT(*DAG.getContext()) : NULL;
+ unsigned AS = (vtType && vtType->isPointerTy()) ?
+ cast<PointerType>(vtType)->getAddressSpace() : 0;
if (VT == MVT::Other) {
- if (DstAlign >= TLI.getDataLayout()->getPointerPrefAlignment() ||
+ if (DstAlign >= TLI.getDataLayout()->getPointerPrefAlignment(AS) ||
TLI.allowsUnalignedMemoryAccesses(VT)) {
VT = TLI.getPointerTy();
} else {
diff --git a/lib/CodeGen/TargetLoweringObjectFileImpl.cpp b/lib/CodeGen/TargetLoweringObjectFileImpl.cpp
index 8f5d770f66..bf26a6d592 100644
--- a/lib/CodeGen/TargetLoweringObjectFileImpl.cpp
+++ b/lib/CodeGen/TargetLoweringObjectFileImpl.cpp
@@ -77,9 +77,9 @@ void TargetLoweringObjectFileELF::emitPersonalityValue(MCStreamer &Streamer,
Flags,
SectionKind::getDataRel(),
0, Label->getName());
- unsigned Size = TM.getDataLayout()->getPointerSize();
+ unsigned Size = TM.getDataLayout()->getPointerSize(0);
Streamer.SwitchSection(Sec);
- Streamer.EmitValueToAlignment(TM.getDataLayout()->getPointerABIAlignment());
+ Streamer.EmitValueToAlignment(TM.getDataLayout()->getPointerABIAlignment(0));
Streamer.EmitSymbolAttribute(Label, MCSA_ELF_TypeObject);
const MCExpr *E = MCConstantExpr::Create(Size, getContext());
Streamer.EmitELFSize(Label, E);
diff --git a/lib/ExecutionEngine/ExecutionEngine.cpp b/lib/ExecutionEngine/ExecutionEngine.cpp
index c5c46815a2..94a2542e7a 100644
--- a/lib/ExecutionEngine/ExecutionEngine.cpp
+++ b/lib/ExecutionEngine/ExecutionEngine.cpp
@@ -17,6 +17,7 @@
#include "llvm/Constants.h"
#include "llvm/DerivedTypes.h"
+#include "llvm/Instructions.h"
#include "llvm/Module.h"
#include "llvm/ExecutionEngine/GenericValue.h"
#include "llvm/ADT/SmallString.h"
@@ -267,7 +268,7 @@ public:
void *ArgvArray::reset(LLVMContext &C, ExecutionEngine *EE,
const std::vector<std::string> &InputArgv) {
clear(); // Free the old contents.
- unsigned PtrSize = EE->getDataLayout()->getPointerSize();
+ unsigned PtrSize = EE->getDataLayout()->getPointerSize(0);
Array = new char[(InputArgv.size()+1)*PtrSize];
DEBUG(dbgs() << "JIT: ARGV = " << (void*)Array << "\n");
@@ -342,7 +343,7 @@ void ExecutionEngine::runStaticConstructorsDestructors(bool isDtors) {
#ifndef NDEBUG
/// isTargetNullPtr - Return whether the target pointer stored at Loc is null.
static bool isTargetNullPtr(ExecutionEngine *EE, void *Loc) {
- unsigned PtrSize = EE->getDataLayout()->getPointerSize();
+ unsigned PtrSize = EE->getDataLayout()->getPointerSize(0);
for (unsigned i = 0; i < PtrSize; ++i)
if (*(i + (uint8_t*)Loc))
return false;
@@ -644,13 +645,16 @@ GenericValue ExecutionEngine::getConstantValue(const Constant *C) {
}
case Instruction::PtrToInt: {
GenericValue GV = getConstantValue(Op0);
- uint32_t PtrWidth = TD->getPointerSizeInBits();
+ unsigned AS = cast<PointerType>(CE->getOperand(1)->getType())
+ ->getAddressSpace();
+ uint32_t PtrWidth = TD->getPointerSizeInBits(AS);
GV.IntVal = APInt(PtrWidth, uintptr_t(GV.PointerVal));
return GV;
}
case Instruction::IntToPtr: {
GenericValue GV = getConstantValue(Op0);
- uint32_t PtrWidth = TD->getPointerSizeInBits();
+ unsigned AS = cast<PointerType>(CE->getType())->getAddressSpace();
+ uint32_t PtrWidth = TD->getPointerSizeInBits(AS);
if (PtrWidth != GV.IntVal.getBitWidth())
GV.IntVal = GV.IntVal.zextOrTrunc(PtrWidth);
assert(GV.IntVal.getBitWidth() <= 64 && "Bad pointer width");
diff --git a/lib/ExecutionEngine/Interpreter/Execution.cpp b/lib/ExecutionEngine/Interpreter/Execution.cpp
index 5202b09165..326bf79c58 100644
--- a/lib/ExecutionEngine/Interpreter/Execution.cpp
+++ b/lib/ExecutionEngine/Interpreter/Execution.cpp
@@ -1054,7 +1054,8 @@ GenericValue Interpreter::executeIntToPtrInst(Value *SrcVal, Type *DstTy,
GenericValue Dest, Src = getOperandValue(SrcVal, SF);
assert(DstTy->isPointerTy() && "Invalid PtrToInt instruction");
- uint32_t PtrSize = TD.getPointerSizeInBits();
+ unsigned AS = cast<PointerType>(DstTy)->getAddressSpace();
+ uint32_t PtrSize = TD.getPointerSizeInBits(AS);
if (PtrSize != Src.IntVal.getBitWidth())
Src.IntVal = Src.IntVal.zextOrTrunc(PtrSize);
diff --git a/lib/ExecutionEngine/Interpreter/ExternalFunctions.cpp b/lib/ExecutionEngine/Interpreter/ExternalFunctions.cpp
index e16e2d112a..f58adbe1e1 100644
--- a/lib/ExecutionEngine/Interpreter/ExternalFunctions.cpp
+++ b/lib/ExecutionEngine/Interpreter/ExternalFunctions.cpp
@@ -376,7 +376,7 @@ GenericValue lle_X_sprintf(FunctionType *FT,
case 'x': case 'X':
if (HowLong >= 1) {
if (HowLong == 1 &&
- TheInterpreter->getDataLayout()->getPointerSizeInBits() == 64 &&
+ TheInterpreter->getDataLayout()->getPointerSizeInBits(0) == 64 &&
sizeof(long) < sizeof(int64_t)) {
// Make sure we use %lld with a 64 bit argument because we might be
// compiling LLI on a 32 bit compiler.
diff --git a/lib/ExecutionEngine/JIT/JITDwarfEmitter.cpp b/lib/ExecutionEngine/JIT/JITDwarfEmitter.cpp
index 19c197903a..bcd5b26365 100644
--- a/lib/ExecutionEngine/JIT/JITDwarfEmitter.cpp
+++ b/lib/ExecutionEngine/JIT/JITDwarfEmitter.cpp
@@ -14,7 +14,9 @@
#include "JIT.h"
#include "JITDwarfEmitter.h"
+#include "llvm/DerivedTypes.h"
#include "llvm/Function.h"
+#include "llvm/GlobalVariable.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/CodeGen/JITCodeEmitter.h"
#include "llvm/CodeGen/MachineFunction.h"
@@ -66,7 +68,7 @@ unsigned char* JITDwarfEmitter::EmitDwarfTable(MachineFunction& F,
void
JITDwarfEmitter::EmitFrameMoves(intptr_t BaseLabelPtr,
const std::vector<MachineMove> &Moves) const {
- unsigned PointerSize = TD->getPointerSize();
+ unsigned PointerSize = TD->getPointerSize(0);
int stackGrowth = stackGrowthDirection == TargetFrameLowering::StackGrowsUp ?
PointerSize : -PointerSize;
MCSymbol *BaseLabel = 0;
@@ -378,7 +380,7 @@ unsigned char* JITDwarfEmitter::EmitExceptionTable(MachineFunction* MF,
for (unsigned i = 0, e = CallSites.size(); i < e; ++i)
SizeSites += MCAsmInfo::getULEB128Size(CallSites[i].Action);
- unsigned SizeTypes = TypeInfos.size() * TD->getPointerSize();
+ unsigned SizeTypes = TypeInfos.size() * TD->getPointerSize(0);
unsigned TypeOffset = sizeof(int8_t) + // Call site format
// Call-site table length
@@ -454,12 +456,12 @@ unsigned char* JITDwarfEmitter::EmitExceptionTable(MachineFunction* MF,
const GlobalVariable *GV = TypeInfos[M - 1];
if (GV) {
- if (TD->getPointerSize() == sizeof(int32_t))
+ if (TD->getPointerSize(GV->getType()->getAddressSpace()) == sizeof(int32_t))
JCE->emitInt32((intptr_t)Jit.getOrEmitGlobalVariable(GV));
else
JCE->emitInt64((intptr_t)Jit.getOrEmitGlobalVariable(GV));
} else {
- if (TD->getPointerSize() == sizeof(int32_t))
+ if (TD->getPointerSize(0) == sizeof(int32_t))
JCE->emitInt32(0);
else
JCE->emitInt64(0);
@@ -481,7 +483,7 @@ unsigned char* JITDwarfEmitter::EmitExceptionTable(MachineFunction* MF,
unsigned char*
JITDwarfEmitter::EmitCommonEHFrame(const Function* Personality) const {
- unsigned PointerSize = TD->getPointerSize();
+ unsigned PointerSize = TD->getPointerSize(0);
int stackGrowth = stackGrowthDirection == TargetFrameLowering::StackGrowsUp ?
PointerSize : -PointerSize;
@@ -541,7 +543,7 @@ JITDwarfEmitter::EmitEHFrame(const Function* Personality,
unsigned char* StartFunction,
unsigned char* EndFunction,
unsigned char* ExceptionTable) const {
- unsigned PointerSize = TD->getPointerSize();
+ unsigned PointerSize = TD->getPointerSize(0);
// EH frame header.
unsigned char* StartEHPtr = (unsigned char*)JCE->getCurrentPCValue();
diff --git a/lib/Target/ARM/ARMELFWriterInfo.cpp b/lib/Target/ARM/ARMELFWriterInfo.cpp
index d88bf0c8fa..7bca0edf91 100644
--- a/lib/Target/ARM/ARMELFWriterInfo.cpp
+++ b/lib/Target/ARM/ARMELFWriterInfo.cpp
@@ -26,7 +26,7 @@ using namespace llvm;
//===----------------------------------------------------------------------===//
ARMELFWriterInfo::ARMELFWriterInfo(TargetMachine &TM)
- : TargetELFWriterInfo(TM.getDataLayout()->getPointerSizeInBits() == 64,
+ : TargetELFWriterInfo(TM.getDataLayout()->getPointerSizeInBits(0) == 64,
TM.getDataLayout()->isLittleEndian()) {
}
diff --git a/lib/Target/MBlaze/MBlazeELFWriterInfo.cpp b/lib/Target/MBlaze/MBlazeELFWriterInfo.cpp
index 4ca30ba81f..6b575099e5 100644
--- a/lib/Target/MBlaze/MBlazeELFWriterInfo.cpp
+++ b/lib/Target/MBlaze/MBlazeELFWriterInfo.cpp
@@ -26,7 +26,7 @@ using namespace llvm;
//===----------------------------------------------------------------------===//
MBlazeELFWriterInfo::MBlazeELFWriterInfo(TargetMachine &TM)
- : TargetELFWriterInfo(TM.getDataLayout()->getPointerSizeInBits() == 64,
+ : TargetELFWriterInfo(TM.getDataLayout()->getPointerSizeInBits(0) == 64,
TM.getDataLayout()->isLittleEndian()) {
}
diff --git a/lib/Target/MSP430/MSP430ISelLowering.cpp b/lib/Target/MSP430/MSP430ISelLowering.cpp
index fc677aec38..113378a5f3 100644
--- a/lib/Target/MSP430/MSP430ISelLowering.cpp
+++ b/lib/Target/MSP430/MSP430ISelLowering.cpp
@@ -881,7 +881,7 @@ MSP430TargetLowering::getReturnAddressFrameIndex(SelectionDAG &DAG) const {
if (ReturnAddrIndex == 0) {
// Set up a frame object for the return address.
- uint64_t SlotSize = TD->getPointerSize();
+ uint64_t SlotSize = TD->getPointerSize(0);
ReturnAddrIndex = MF.getFrameInfo()->CreateFixedObject(SlotSize, -SlotSize,
true);
FuncInfo->setRAIndex(ReturnAddrIndex);
@@ -901,7 +901,7 @@ SDValue MSP430TargetLowering::LowerRETURNADDR(SDValue Op,
if (Depth > 0) {
SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
SDValue Offset =
- DAG.getConstant(TD->getPointerSize(), MVT::i16);
+ DAG.getConstant(TD->getPointerSize(0), MVT::i16);
return DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(),
DAG.getNode(ISD::ADD, dl, getPointerTy(),
FrameAddr, Offset),
diff --git a/lib/Target/NVPTX/NVPTXAsmPrinter.cpp b/lib/Target/NVPTX/NVPTXAsmPrinter.cpp
index d3dfb35e26..c46094569e 100644
--- a/lib/Target/NVPTX/NVPTXAsmPrinter.cpp
+++ b/lib/Target/NVPTX/NVPTXAsmPrinter.cpp
@@ -126,8 +126,10 @@ const MCExpr *nvptx::LowerConstant(const Constant *CV, AsmPrinter &AP) {
return Base;
// Truncate/sext the offset to the pointer size.
- if (TD.getPointerSizeInBits() != 64) {
- int SExtAmount = 64-TD.getPointerSizeInBits();
+ unsigned AS = PtrVal->getType()->isPointerTy() ?
+ cast<PointerType>(PtrVal->getType())->getAddressSpace() : 0;
+ if (TD.getPointerSizeInBits(AS) != 64) {
+ int SExtAmount = 64-TD.getPointerSizeInBits(AS);
Offset = (Offset << SExtAmount) >> SExtAmount;
}
@@ -1378,7 +1380,7 @@ getOpenCLAlignment(const DataLayout *TD,
const FunctionType *FTy = dyn_cast<FunctionType>(Ty);
if (FTy)
- return TD->getPointerPrefAlignment();
+ return TD->getPointerPrefAlignment(0);
return TD->getPrefTypeAlignment(Ty);
}
diff --git a/lib/Target/PowerPC/PPCAsmPrinter.cpp b/lib/Target/PowerPC/PPCAsmPrinter.cpp
index d877e45fad..d8abd9fba0 100644
--- a/lib/Target/PowerPC/PPCAsmPrinter.cpp
+++ b/lib/Target/PowerPC/PPCAsmPrinter.cpp
@@ -439,7 +439,7 @@ void PPCLinuxAsmPrinter::EmitFunctionEntryLabel() {
bool PPCLinuxAsmPrinter::doFinalization(Module &M) {
const DataLayout *TD = TM.getDataLayout();
- bool isPPC64 = TD->getPointerSizeInBits() == 64;
+ bool isPPC64 = TD->getPointerSizeInBits(0) == 64;
if (isPPC64 && !TOC.empty()) {
const MCSectionELF *Section = OutStreamer.getContext().getELFSection(".toc",
@@ -545,7 +545,7 @@ static MCSymbol *GetAnonSym(MCSymbol *Sym, MCContext &Ctx) {
void PPCDarwinAsmPrinter::
EmitFunctionStubs(const MachineModuleInfoMachO::SymbolListTy &Stubs) {
- bool isPPC64 = TM.getDataLayout()->getPointerSizeInBits() == 64;
+ bool isPPC64 = TM.getDataLayout()->getPointerSizeInBits(0) == 64;
const TargetLoweringObjectFileMachO &TLOFMacho =
static_cast<const TargetLoweringObjectFileMachO &>(getObjFileLowering());
@@ -640,7 +640,7 @@ EmitFunctionStubs(const MachineModuleInfoMachO::SymbolListTy &Stubs) {
bool PPCDarwinAsmPrinter::doFinalization(Module &M) {
- bool isPPC64 = TM.getDataLayout()->getPointerSizeInBits() == 64;
+ bool isPPC64 = TM.getDataLayout()->getPointerSizeInBits(0) == 64;
// Darwin/PPC always uses mach-o.
const TargetLoweringObjectFileMachO &TLOFMacho =
diff --git a/lib/Target/PowerPC/PPCRegisterInfo.cpp b/lib/Target/PowerPC/PPCRegisterInfo.cpp
index 459c3589d3..d123211473 100644
--- a/lib/Target/PowerPC/PPCRegisterInfo.cpp
+++ b/lib/Target/PowerPC/PPCRegisterInfo.cpp
@@ -498,7 +498,7 @@ PPCRegisterInfo::hasReservedSpillSlot(const MachineFunction &MF,
} else if (CRSpillFrameIdx) {
FrameIdx = CRSpillFrameIdx;
} else {
- MachineFrameInfo *MFI = ((MachineFunction &)MF).getFrameInfo();
+ MachineFrameInfo *MFI = (const_cast<MachineFunction &>(MF)).getFrameInfo();
FrameIdx = MFI->CreateFixedObject((uint64_t)4, (int64_t)-4, true);
CRSpillFrameIdx = FrameIdx;
}
diff --git a/lib/Target/Target.cpp b/lib/Target/Target.cpp
index 219cbf1afc..393178a469 100644
--- a/lib/Target/Target.cpp
+++ b/lib/Target/Target.cpp
@@ -56,13 +56,21 @@ LLVMByteOrdering LLVMByteOrder(LLVMTargetDataRef TD) {
}
unsigned LLVMPointerSize(LLVMTargetDataRef TD) {
- return unwrap(TD)->getPointerSize();
+ return unwrap(TD)->getPointerSize(0);
+}
+
+unsigned LLVMPointerSizeForAS(LLVMTargetDataRef TD, unsigned AS) {
+ return unwrap(TD)->getPointerSize(AS);
}
LLVMTypeRef LLVMIntPtrType(LLVMTargetDataRef TD) {
return wrap(unwrap(TD)->getIntPtrType(getGlobalContext()));
}
+LLVMTypeRef LLVMIntPtrTypeForAS(LLVMTargetDataRef TD, unsigned AS) {
+ return wrap(unwrap(TD)->getIntPtrType(getGlobalContext(), AS));
+}
+
unsigned long long LLVMSizeOfTypeInBits(LLVMTargetDataRef TD, LLVMTypeRef Ty) {
return unwrap(TD)->getTypeSizeInBits(unwrap(Ty));
}
diff --git a/lib/Target/X86/X86AsmPrinter.cpp b/lib/Target/X86/X86AsmPrinter.cpp
index c704ca1701..92afac62e6 100644
--- a/lib/Target/X86/X86AsmPrinter.cpp
+++ b/lib/Target/X86/X86AsmPrinter.cpp
@@ -693,7 +693,7 @@ void X86AsmPrinter::EmitEndOfAsmFile(Module &M) {
for (unsigned i = 0, e = Stubs.size(); i != e; ++i) {
OutStreamer.EmitLabel(Stubs[i].first);
OutStreamer.EmitSymbolValue(Stubs[i].second.getPointer(),
- TD->getPointerSize(), 0);
+ TD->getPointerSize(0), 0);
}
Stubs.clear();
}
diff --git a/lib/Target/X86/X86FrameLowering.cpp b/lib/Target/X86/X86FrameLowering.cpp
index 8acef9dc7b..e7f817e3a9 100644
--- a/lib/Target/X86/X86FrameLowering.cpp
+++ b/lib/Target/X86/X86FrameLowering.cpp
@@ -317,7 +317,7 @@ void X86FrameLowering::emitCalleeSavedFrameMoves(MachineFunction &MF,
bool HasFP = hasFP(MF);
// Calculate amount of bytes used for return address storing.
- int stackGrowth = -TD->getPointerSize();
+ int stackGrowth = -TD->getPointerSize(0);
// FIXME: This is dirty hack. The code itself is pretty mess right now.
// It should be rewritten from scratch and generalized sometimes.
@@ -717,7 +717,7 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF) const {
std::vector<MachineMove> &Moves = MMI.getFrameMoves();
const DataLayout *TD = MF.getTarget().getDataLayout();
uint64_t NumBytes = 0;
- int stackGrowth = -TD->getPointerSize();
+ int stackGrowth = -TD->getPointerSize(0);
if (HasFP) {
// Calculate required stack adjustment.
diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp
index 9ec03bf8f7..7916ccdeb3 100644
--- a/lib/Target/X86/X86ISelLowering.cpp
+++ b/lib/Target/X86/X86ISelLowering.cpp
@@ -2652,7 +2652,7 @@ X86TargetLowering::GetAlignedArgumentStackSize(unsigned StackSize,
unsigned StackAlignment = TFI.getStackAlignment();
uint64_t AlignMask = StackAlignment - 1;
int64_t Offset = StackSize;
- uint64_t SlotSize = TD->getPointerSize();
+ uint64_t SlotSize = TD->getPointerSize(0);
if ( (Offset & AlignMask) <= (StackAlignment - SlotSize) ) {
// Number smaller than 12 so just add the difference.
Offset += ((StackAlignment - SlotSize) - (Offset & AlignMask));
@@ -3020,7 +3020,7 @@ SDValue X86TargetLowering::getReturnAddressFrameIndex(SelectionDAG &DAG) const {
if (ReturnAddrIndex == 0) {
// Set up a frame object for the return address.
- uint64_t SlotSize = TD->getPointerSize();
+ uint64_t SlotSize = TD->getPointerSize(0);
ReturnAddrIndex = MF.getFrameInfo()->CreateFixedObject(SlotSize, -SlotSize,
false);
FuncInfo->setRAIndex(ReturnAddrIndex);
@@ -7643,7 +7643,7 @@ X86TargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const {
IDX = DAG.getLoad(getPointerTy(), dl, Chain, IDX, MachinePointerInfo(),
false, false, false, 0);
- SDValue Scale = DAG.getConstant(Log2_64_Ceil(TD->getPointerSize()),
+ SDValue Scale = DAG.getConstant(Log2_64_Ceil(TD->getPointerSize(0)),
getPointerTy());
IDX = DAG.getNode(ISD::SHL, dl, getPointerTy(), IDX, Scale);
@@ -10293,7 +10293,7 @@ SDValue X86TargetLowering::LowerRETURNADDR(SDValue Op,
if (Depth > 0) {
SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
SDValue Offset =
- DAG.getConstant(TD->getPointerSize(),
+ DAG.getConstant(TD->getPointerSize(0),
Subtarget->is64Bit() ? MVT::i64 : MVT::i32);
return DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(),
DAG.getNode(ISD::ADD, dl, getPointerTy(),
@@ -10325,7 +10325,7 @@ SDValue X86TargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const {
SDValue X86TargetLowering::LowerFRAME_TO_ARGS_OFFSET(SDValue Op,
SelectionDAG &DAG) const {
- return DAG.getIntPtrConstant(2*TD->getPointerSize());
+ return DAG.getIntPtrConstant(2*TD->getPointerSize(0));
}
SDValue X86TargetLowering::LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const {
@@ -10340,7 +10340,7 @@ SDValue X86TargetLowering::LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const {
unsigned StoreAddrReg = (Subtarget->is64Bit() ? X86::RCX : X86::ECX);
SDValue StoreAddr = DAG.getNode(ISD::ADD, dl, getPointerTy(), Frame,
- DAG.getIntPtrConstant(TD->getPointerSize()));
+ DAG.getIntPtrConstant(TD->getPointerSize(0)));
StoreAddr = DAG.getNode(ISD::ADD, dl, getPointerTy(), StoreAddr, Offset);
Chain = DAG.getStore(Chain, dl, Handler, StoreAddr, MachinePointerInfo(),
false, false, 0);
diff --git a/lib/Transforms/InstCombine/InstCombineCasts.cpp b/lib/Transforms/InstCombine/InstCombineCasts.cpp
index b59210a9df..f3f3f8f585 100644
--- a/lib/Transforms/InstCombine/InstCombineCasts.cpp
+++ b/lib/Transforms/InstCombine/InstCombineCasts.cpp
@@ -1293,15 +1293,16 @@ Instruction *InstCombiner::visitIntToPtr(IntToPtrInst &CI) {
// If the source integer type is not the intptr_t type for this target, do a
// trunc or zext to the intptr_t type, then inttoptr of it. This allows the
// cast to be exposed to other transforms.
+ unsigned AS = CI.getAddressSpace();
if (TD) {
if (CI.getOperand(0)->getType()->getScalarSizeInBits() >
- TD->getPointerSizeInBits()) {
+ TD->getPointerSizeInBits(AS)) {
Value *P = Builder->CreateTrunc(CI.getOperand(0),
TD->getIntPtrType(CI.getContext()));
return new IntToPtrInst(P, CI.getType());
}
if (CI.getOperand(0)->getType()->getScalarSizeInBits() <
- TD->getPointerSizeInBits()) {
+ TD->getPointerSizeInBits(AS)) {
Value *P = Builder->CreateZExt(CI.getOperand(0),
TD->getIntPtrType(CI.getContext()));
return new IntToPtrInst(P, CI.getType());
@@ -1368,13 +1369,14 @@ Instruction *InstCombiner::visitPtrToInt(PtrToIntInst &CI) {
// If the destination integer type is not the intptr_t type for this target,
// do a ptrtoint to intptr_t then do a trunc or zext. This allows the cast
// to be exposed to other transforms.
+ unsigned AS = CI.getPointerAddressSpace();
if (TD) {
- if (CI.getType()->getScalarSizeInBits() < TD->getPointerSizeInBits()) {
+ if (CI.getType()->getScalarSizeInBits() < TD->getPointerSizeInBits(AS)) {
Value *P = Builder->CreatePtrToInt(CI.getOperand(0),
TD->getIntPtrType(CI.getContext()));
return new TruncInst(P, CI.getType());
}
- if (CI.getType()->getScalarSizeInBits() > TD->getPointerSizeInBits()) {
+ if (CI.getType()->getScalarSizeInBits() > TD->getPointerSizeInBits(AS)) {
Value *P = Builder->CreatePtrToInt(CI.getOperand(0),
TD->getIntPtrType(CI.getContext()));
return new ZExtInst(P, CI.getType());
diff --git a/lib/Transforms/InstCombine/InstCombineCompares.cpp b/lib/Transforms/InstCombine/InstCombineCompares.cpp
index 4d5ffddc4c..e3e5ddae80 100644
--- a/lib/Transforms/InstCombine/InstCombineCompares.cpp
+++ b/lib/Transforms/InstCombine/InstCombineCompares.cpp
@@ -365,11 +365,12 @@ FoldCmpLoadFromIndexedGlobal(GetElementPtrInst *GEP, GlobalVariable *GV,
// order the state machines in complexity of the generated code.
Value *Idx = GEP->getOperand(2);
+ unsigned AS = GEP->getPointerAddressSpace();
// If the index is larger than the pointer size of the target, truncate the
// index down like the GEP would do implicitly. We don't have to do this for
// an inbounds GEP because the index can't be out of range.
if (!GEP->isInBounds() &&
- Idx->getType()->getPrimitiveSizeInBits() > TD->getPointerSizeInBits())
+ Idx->getType()->getPrimitiveSizeInBits() > TD->getPointerSizeInBits(AS))
Idx = Builder->CreateTrunc(Idx, TD->getIntPtrType(Idx->getContext()));
// If the comparison is only true for one or two elements, emit direct
@@ -528,10 +529,11 @@ static Value *EvaluateGEPOffsetExpression(User *GEP, InstCombiner &IC) {
}
}
+ unsigned AS = cast<GetElementPtrInst>(GEP)->getPointerAddressSpace();
// Okay, we know we have a single variable index, which must be a
// pointer/array/vector index. If there is no offset, life is simple, return
// the index.
- unsigned IntPtrWidth = TD.getPointerSizeInBits();
+ unsigned IntPtrWidth = TD.getPointerSizeInBits(AS);
if (Offset == 0) {
// Cast to intptrty in case a truncation occurs. If an extension is needed,
// we don't need to bother extending: the extension won't affect where the
@@ -1552,7 +1554,8 @@ Instruction *InstCombiner::visitICmpInstWithCastAndCast(ICmpInst &ICI) {
// Turn icmp (ptrtoint x), (ptrtoint/c) into a compare of the input if the
// integer type is the same size as the pointer type.
if (TD && LHSCI->getOpcode() == Instruction::PtrToInt &&
- TD->getPointerSizeInBits() ==
+ TD->getPointerSizeInBits(
+ cast<PtrToIntInst>(LHSCI)->getPointerAddressSpace()) ==
cast<IntegerType>(DestTy)->getBitWidth()) {
Value *RHSOp = 0;
if (Constant *RHSC = dyn_cast<Constant>(ICI.getOperand(1))) {
diff --git a/lib/Transforms/Instrumentation/AddressSanitizer.cpp b/lib/Transforms/Instrumentation/AddressSanitizer.cpp
index 6071ee5a6a..b566994edf 100644
--- a/lib/Transforms/Instrumentation/AddressSanitizer.cpp
+++ b/lib/Transforms/Instrumentation/AddressSanitizer.cpp
@@ -735,7 +735,7 @@ bool AddressSanitizer::doInitialization(Module &M) {
BL.reset(new BlackList(ClBlackListFile));
C = &(M.getContext());
- LongSize = TD->getPointerSizeInBits();
+ LongSize = TD->getPointerSizeInBits(0);
IntptrTy = Type::getIntNTy(*C, LongSize);
IntptrPtrTy = PointerType::get(IntptrTy, 0);
diff --git a/lib/Transforms/Scalar/MemCpyOptimizer.cpp b/lib/Transforms/Scalar/MemCpyOptimizer.cpp
index 517657cf52..97fff9edd6 100644
--- a/lib/Transforms/Scalar/MemCpyOptimizer.cpp
+++ b/lib/Transforms/Scalar/MemCpyOptimizer.cpp
@@ -174,10 +174,11 @@ bool MemsetRange::isProfitableToUseMemset(const DataLayout &TD) const {
// this width can be stored. If so, check to see whether we will end up
// actually reducing the number of stores used.
unsigned Bytes = unsigned(End-Start);
- unsigned NumPointerStores = Bytes/TD.getPointerSize();
+ unsigned AS = cast<StoreInst>(TheStores[0])->getPointerAddressSpace();
+ unsigned NumPointerStores = Bytes/TD.getPointerSize(AS);
// Assume the remaining bytes if any are done a byte at a time.
- unsigned NumByteStores = Bytes - NumPointerStores*TD.getPointerSize();
+ unsigned NumByteStores = Bytes - NumPointerStores*TD.getPointerSize(AS);
// If we will reduce the # stores (according to this heuristic), do the
// transformation. This encourages merging 4 x i8 -> i32 and 2 x i16 -> i32
diff --git a/lib/Transforms/Scalar/SROA.cpp b/lib/Transforms/Scalar/SROA.cpp
index 7d2ce098aa..3e84a91c1d 100644
--- a/lib/Transforms/Scalar/SROA.cpp
+++ b/lib/Transforms/Scalar/SROA.cpp
@@ -447,6 +447,7 @@ protected:
bool computeConstantGEPOffset(GetElementPtrInst &GEPI, int64_t &GEPOffset) {
GEPOffset = Offset;
+ unsigned int AS = GEPI.getPointerAddressSpace();
for (gep_type_iterator GTI = gep_type_begin(GEPI), GTE = gep_type_end(GEPI);
GTI != GTE; ++GTI) {
ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand());
@@ -476,7 +477,7 @@ protected:
continue;
}
- APInt Index = OpC->getValue().sextOrTrunc(TD.getPointerSizeInBits());
+ APInt Index = OpC->getValue().sextOrTrunc(TD.getPointerSizeInBits(AS));
Index *= APInt(Index.getBitWidth(),
TD.getTypeAllocSize(GTI.getIndexedType()));
Index += APInt(Index.getBitWidth(), (uint64_t)GEPOffset,
@@ -1784,7 +1785,9 @@ static Value *getNaturalGEPWithType(IRBuilder<> &IRB, const DataLayout &TD,
break;
if (SequentialType *SeqTy = dyn_cast<SequentialType>(ElementTy)) {
ElementTy = SeqTy->getElementType();
- Indices.push_back(IRB.getInt(APInt(TD.getPointerSizeInBits(), 0)));
+ Indices.push_back(IRB.getInt(APInt(TD.getPointerSizeInBits(
+ ElementTy->isPointerTy() ?
+ cast<PointerType>(ElementTy)->getAddressSpace(): 0), 0)));
} else if (StructType *STy = dyn_cast<StructType>(ElementTy)) {
if (STy->element_begin() == STy->element_end())
break; // Nothing left to descend into.
@@ -2322,7 +2325,8 @@ private:
Value *getAdjustedAllocaPtr(IRBuilder<> &IRB, Type *PointerTy) {
assert(BeginOffset >= NewAllocaBeginOffset);
- APInt Offset(TD.getPointerSizeInBits(), BeginOffset - NewAllocaBeginOffset);
+ unsigned AS = cast<PointerType>(PointerTy)->getAddressSpace();
+ APInt Offset(TD.getPointerSizeInBits(AS), BeginOffset - NewAllocaBeginOffset);
return getAdjustedPtr(IRB, TD, &NewAI, Offset, PointerTy, getName(""));
}
@@ -2677,8 +2681,10 @@ private:
const AllocaPartitioning::MemTransferOffsets &MTO
= P.getMemTransferOffsets(II);
+ assert(OldPtr->getType()->isPointerTy() && "Must be a pointer type!");
+ unsigned AS = cast<PointerType>(OldPtr->getType())->getAddressSpace();
// Compute the relative offset within the transfer.
- unsigned IntPtrWidth = TD.getPointerSizeInBits();
+ unsigned IntPtrWidth = TD.getPointerSizeInBits(AS);
APInt RelOffset(IntPtrWidth, BeginOffset - (IsDest ? MTO.DestBegin
: MTO.SourceBegin));
diff --git a/lib/Transforms/Utils/Local.cpp b/lib/Transforms/Utils/Local.cpp
index a954d82c05..9729687a83 100644
--- a/lib/Transforms/Utils/Local.cpp
+++ b/lib/Transforms/Utils/Local.cpp
@@ -806,7 +806,8 @@ unsigned llvm::getOrEnforceKnownAlignment(Value *V, unsigned PrefAlign,
const DataLayout *TD) {
assert(V->getType()->isPointerTy() &&
"getOrEnforceKnownAlignment expects a pointer!");
- unsigned BitWidth = TD ? TD->getPointerSizeInBits() : 64;
+ unsigned AS = cast<PointerType>(V->getType())->getAddressSpace();
+ unsigned BitWidth = TD ? TD->getPointerSizeInBits(AS) : 64;
APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
ComputeMaskedBits(V, KnownZero, KnownOne, TD);
unsigned TrailZ = KnownZero.countTrailingOnes();