summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
authorRafael Espindola <rafael.espindola@gmail.com>2014-02-18 22:05:46 +0000
committerRafael Espindola <rafael.espindola@gmail.com>2014-02-18 22:05:46 +0000
commit1836fe5651722c638e32ac94c5b4fe885afa9f15 (patch)
tree01c74e3b3be668c0653cdd4a2a570396e7a98ce3 /lib
parent25818a6e26710e555dd8a32e8a6475ec20b60d65 (diff)
downloadllvm-1836fe5651722c638e32ac94c5b4fe885afa9f15.tar.gz
llvm-1836fe5651722c638e32ac94c5b4fe885afa9f15.tar.bz2
llvm-1836fe5651722c638e32ac94c5b4fe885afa9f15.tar.xz
Rename a DebugLoc variable to DbgLoc and a DataLayout to DL.
This is quiet a bit less confusing now that TargetData was renamed DataLayout. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@201606 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib')
-rw-r--r--lib/CodeGen/SelectionDAG/FastISel.cpp146
-rw-r--r--lib/Target/ARM/ARMFastISel.cpp296
-rw-r--r--lib/Target/PowerPC/PPCFastISel.cpp134
-rw-r--r--lib/Target/X86/X86FastISel.cpp163
4 files changed, 384 insertions, 355 deletions
diff --git a/lib/CodeGen/SelectionDAG/FastISel.cpp b/lib/CodeGen/SelectionDAG/FastISel.cpp
index a6f746140d..bcea6ad218 100644
--- a/lib/CodeGen/SelectionDAG/FastISel.cpp
+++ b/lib/CodeGen/SelectionDAG/FastISel.cpp
@@ -118,7 +118,7 @@ bool FastISel::hasTrivialKill(const Value *V) const {
// No-op casts are trivially coalesced by fast-isel.
if (const CastInst *Cast = dyn_cast<CastInst>(I))
- if (Cast->isNoopCast(TD.getIntPtrType(Cast->getContext())) &&
+ if (Cast->isNoopCast(DL.getIntPtrType(Cast->getContext())) &&
!hasTrivialKill(Cast->getOperand(0)))
return false;
@@ -192,7 +192,7 @@ unsigned FastISel::materializeRegForValue(const Value *V, MVT VT) {
// Translate this as an integer zero so that it can be
// local-CSE'd with actual integer zeros.
Reg =
- getRegForValue(Constant::getNullValue(TD.getIntPtrType(V->getContext())));
+ getRegForValue(Constant::getNullValue(DL.getIntPtrType(V->getContext())));
} else if (const ConstantFP *CF = dyn_cast<ConstantFP>(V)) {
if (CF->isNullValue()) {
Reg = TargetMaterializeFloatZero(CF);
@@ -229,7 +229,7 @@ unsigned FastISel::materializeRegForValue(const Value *V, MVT VT) {
Reg = lookUpRegForValue(Op);
} else if (isa<UndefValue>(V)) {
Reg = createResultReg(TLI.getRegClassFor(VT));
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::IMPLICIT_DEF), Reg);
}
@@ -335,9 +335,9 @@ void FastISel::removeDeadCode(MachineBasicBlock::iterator I,
FastISel::SavePoint FastISel::enterLocalValueArea() {
MachineBasicBlock::iterator OldInsertPt = FuncInfo.InsertPt;
- DebugLoc OldDL = DL;
+ DebugLoc OldDL = DbgLoc;
recomputeInsertPt();
- DL = DebugLoc();
+ DbgLoc = DebugLoc();
SavePoint SP = { OldInsertPt, OldDL };
return SP;
}
@@ -348,7 +348,7 @@ void FastISel::leaveLocalValueArea(SavePoint OldInsertPt) {
// Restore the previous insert position.
FuncInfo.InsertPt = OldInsertPt.InsertPt;
- DL = OldInsertPt.DL;
+ DbgLoc = OldInsertPt.DL;
}
/// SelectBinaryOp - Select and emit code for a binary operator instruction,
@@ -484,7 +484,7 @@ bool FastISel::SelectGetElementPtr(const User *I) {
unsigned Field = cast<ConstantInt>(Idx)->getZExtValue();
if (Field) {
// N = N + Offset
- TotalOffs += TD.getStructLayout(StTy)->getElementOffset(Field);
+ TotalOffs += DL.getStructLayout(StTy)->getElementOffset(Field);
if (TotalOffs >= MaxOffs) {
N = FastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT);
if (N == 0)
@@ -503,7 +503,7 @@ bool FastISel::SelectGetElementPtr(const User *I) {
if (CI->isZero()) continue;
// N = N + Offset
TotalOffs +=
- TD.getTypeAllocSize(Ty)*cast<ConstantInt>(CI)->getSExtValue();
+ DL.getTypeAllocSize(Ty)*cast<ConstantInt>(CI)->getSExtValue();
if (TotalOffs >= MaxOffs) {
N = FastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT);
if (N == 0)
@@ -524,7 +524,7 @@ bool FastISel::SelectGetElementPtr(const User *I) {
}
// N = N + Idx * ElementSize;
- uint64_t ElementSize = TD.getTypeAllocSize(Ty);
+ uint64_t ElementSize = DL.getTypeAllocSize(Ty);
std::pair<unsigned, bool> Pair = getRegForGEPIndex(Idx);
unsigned IdxN = Pair.first;
bool IdxNIsKill = Pair.second;
@@ -572,7 +572,7 @@ bool FastISel::SelectCall(const User *I) {
if (IA->isAlignStack())
ExtraInfo |= InlineAsm::Extra_IsAlignStack;
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::INLINEASM))
.addExternalSymbol(IA->getAsmString().c_str())
.addImm(ExtraInfo);
@@ -643,11 +643,11 @@ bool FastISel::SelectCall(const User *I) {
if (Op) {
if (Op->isReg()) {
Op->setIsDebug(true);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::DBG_VALUE), false, Op->getReg(), 0,
DI->getVariable());
} else
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::DBG_VALUE))
.addOperand(*Op)
.addImm(0)
@@ -667,26 +667,26 @@ bool FastISel::SelectCall(const User *I) {
if (!V) {
// Currently the optimizer can produce this; insert an undef to
// help debugging. Probably the optimizer should not do this.
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
.addReg(0U).addImm(DI->getOffset())
.addMetadata(DI->getVariable());
} else if (const ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
if (CI->getBitWidth() > 64)
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
.addCImm(CI).addImm(DI->getOffset())
.addMetadata(DI->getVariable());
else
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
.addImm(CI->getZExtValue()).addImm(DI->getOffset())
.addMetadata(DI->getVariable());
} else if (const ConstantFP *CF = dyn_cast<ConstantFP>(V)) {
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
.addFPImm(CF).addImm(DI->getOffset())
.addMetadata(DI->getVariable());
} else if (unsigned Reg = lookUpRegForValue(V)) {
// FIXME: This does not handle register-indirect values at offset 0.
bool IsIndirect = DI->getOffset() != 0;
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, IsIndirect,
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, IsIndirect,
Reg, DI->getOffset(), DI->getVariable());
} else {
// We can't yet handle anything else here because it would require
@@ -798,8 +798,8 @@ bool FastISel::SelectBitCast(const User *I) {
// Don't attempt a cross-class copy. It will likely fail.
if (SrcClass == DstClass) {
ResultReg = createResultReg(DstClass);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
- ResultReg).addReg(Op0);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(TargetOpcode::COPY), ResultReg).addReg(Op0);
}
}
@@ -822,7 +822,7 @@ FastISel::SelectInstruction(const Instruction *I) {
if (!HandlePHINodesInSuccessorBlocks(I->getParent()))
return false;
- DL = I->getDebugLoc();
+ DbgLoc = I->getDebugLoc();
MachineBasicBlock::iterator SavedInsertPt = FuncInfo.InsertPt;
@@ -840,7 +840,7 @@ FastISel::SelectInstruction(const Instruction *I) {
// First, try doing target-independent selection.
if (SelectOperator(I, I->getOpcode())) {
++NumFastIselSuccessIndependent;
- DL = DebugLoc();
+ DbgLoc = DebugLoc();
return true;
}
// Remove dead code. However, ignore call instructions since we've flushed
@@ -855,7 +855,7 @@ FastISel::SelectInstruction(const Instruction *I) {
SavedInsertPt = FuncInfo.InsertPt;
if (TargetSelectInstruction(I)) {
++NumFastIselSuccessTarget;
- DL = DebugLoc();
+ DbgLoc = DebugLoc();
return true;
}
// Check for dead code and remove as necessary.
@@ -863,7 +863,7 @@ FastISel::SelectInstruction(const Instruction *I) {
if (SavedInsertPt != FuncInfo.InsertPt)
removeDeadCode(FuncInfo.InsertPt, SavedInsertPt);
- DL = DebugLoc();
+ DbgLoc = DebugLoc();
return false;
}
@@ -871,7 +871,7 @@ FastISel::SelectInstruction(const Instruction *I) {
/// unless it is the immediate (fall-through) successor, and update
/// the CFG.
void
-FastISel::FastEmitBranch(MachineBasicBlock *MSucc, DebugLoc DL) {
+FastISel::FastEmitBranch(MachineBasicBlock *MSucc, DebugLoc DbgLoc) {
if (FuncInfo.MBB->getBasicBlock()->size() > 1 &&
FuncInfo.MBB->isLayoutSuccessor(MSucc)) {
@@ -881,7 +881,7 @@ FastISel::FastEmitBranch(MachineBasicBlock *MSucc, DebugLoc DL) {
} else {
// The unconditional branch case.
TII.InsertBranch(*FuncInfo.MBB, MSucc, NULL,
- SmallVector<MachineOperand, 0>(), DL);
+ SmallVector<MachineOperand, 0>(), DbgLoc);
}
FuncInfo.MBB->addSuccessor(MSucc);
}
@@ -1096,7 +1096,7 @@ FastISel::FastISel(FunctionLoweringInfo &funcInfo,
MFI(*FuncInfo.MF->getFrameInfo()),
MCP(*FuncInfo.MF->getConstantPool()),
TM(FuncInfo.MF->getTarget()),
- TD(*TM.getDataLayout()),
+ DL(*TM.getDataLayout()),
TII(*TM.getInstrInfo()),
TLI(*TM.getTargetLowering()),
TRI(*TM.getRegisterInfo()),
@@ -1209,7 +1209,7 @@ unsigned FastISel::FastEmitInst_(unsigned MachineInstOpcode,
unsigned ResultReg = createResultReg(RC);
const MCInstrDesc &II = TII.get(MachineInstOpcode);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg);
return ResultReg;
}
@@ -1220,13 +1220,13 @@ unsigned FastISel::FastEmitInst_r(unsigned MachineInstOpcode,
const MCInstrDesc &II = TII.get(MachineInstOpcode);
if (II.getNumDefs() >= 1)
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
.addReg(Op0, Op0IsKill * RegState::Kill);
else {
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
.addReg(Op0, Op0IsKill * RegState::Kill);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
- ResultReg).addReg(II.ImplicitDefs[0]);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
}
return ResultReg;
@@ -1240,15 +1240,15 @@ unsigned FastISel::FastEmitInst_rr(unsigned MachineInstOpcode,
const MCInstrDesc &II = TII.get(MachineInstOpcode);
if (II.getNumDefs() >= 1)
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
.addReg(Op0, Op0IsKill * RegState::Kill)
.addReg(Op1, Op1IsKill * RegState::Kill);
else {
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
.addReg(Op0, Op0IsKill * RegState::Kill)
.addReg(Op1, Op1IsKill * RegState::Kill);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
- ResultReg).addReg(II.ImplicitDefs[0]);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
}
return ResultReg;
}
@@ -1262,17 +1262,17 @@ unsigned FastISel::FastEmitInst_rrr(unsigned MachineInstOpcode,
const MCInstrDesc &II = TII.get(MachineInstOpcode);
if (II.getNumDefs() >= 1)
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
.addReg(Op0, Op0IsKill * RegState::Kill)
.addReg(Op1, Op1IsKill * RegState::Kill)
.addReg(Op2, Op2IsKill * RegState::Kill);
else {
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
.addReg(Op0, Op0IsKill * RegState::Kill)
.addReg(Op1, Op1IsKill * RegState::Kill)
.addReg(Op2, Op2IsKill * RegState::Kill);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
- ResultReg).addReg(II.ImplicitDefs[0]);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
}
return ResultReg;
}
@@ -1285,15 +1285,15 @@ unsigned FastISel::FastEmitInst_ri(unsigned MachineInstOpcode,
const MCInstrDesc &II = TII.get(MachineInstOpcode);
if (II.getNumDefs() >= 1)
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
.addReg(Op0, Op0IsKill * RegState::Kill)
.addImm(Imm);
else {
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
.addReg(Op0, Op0IsKill * RegState::Kill)
.addImm(Imm);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
- ResultReg).addReg(II.ImplicitDefs[0]);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
}
return ResultReg;
}
@@ -1306,17 +1306,17 @@ unsigned FastISel::FastEmitInst_rii(unsigned MachineInstOpcode,
const MCInstrDesc &II = TII.get(MachineInstOpcode);
if (II.getNumDefs() >= 1)
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
.addReg(Op0, Op0IsKill * RegState::Kill)
.addImm(Imm1)
.addImm(Imm2);
else {
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
.addReg(Op0, Op0IsKill * RegState::Kill)
.addImm(Imm1)
.addImm(Imm2);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
- ResultReg).addReg(II.ImplicitDefs[0]);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
}
return ResultReg;
}
@@ -1329,15 +1329,15 @@ unsigned FastISel::FastEmitInst_rf(unsigned MachineInstOpcode,
const MCInstrDesc &II = TII.get(MachineInstOpcode);
if (II.getNumDefs() >= 1)
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
.addReg(Op0, Op0IsKill * RegState::Kill)
.addFPImm(FPImm);
else {
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
.addReg(Op0, Op0IsKill * RegState::Kill)
.addFPImm(FPImm);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
- ResultReg).addReg(II.ImplicitDefs[0]);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
}
return ResultReg;
}
@@ -1351,17 +1351,17 @@ unsigned FastISel::FastEmitInst_rri(unsigned MachineInstOpcode,
const MCInstrDesc &II = TII.get(MachineInstOpcode);
if (II.getNumDefs() >= 1)
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
.addReg(Op0, Op0IsKill * RegState::Kill)
.addReg(Op1, Op1IsKill * RegState::Kill)
.addImm(Imm);
else {
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
.addReg(Op0, Op0IsKill * RegState::Kill)
.addReg(Op1, Op1IsKill * RegState::Kill)
.addImm(Imm);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
- ResultReg).addReg(II.ImplicitDefs[0]);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
}
return ResultReg;
}
@@ -1375,17 +1375,17 @@ unsigned FastISel::FastEmitInst_rrii(unsigned MachineInstOpcode,
const MCInstrDesc &II = TII.get(MachineInstOpcode);
if (II.getNumDefs() >= 1)
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
.addReg(Op0, Op0IsKill * RegState::Kill)
.addReg(Op1, Op1IsKill * RegState::Kill)
.addImm(Imm1).addImm(Imm2);
else {
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
.addReg(Op0, Op0IsKill * RegState::Kill)
.addReg(Op1, Op1IsKill * RegState::Kill)
.addImm(Imm1).addImm(Imm2);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
- ResultReg).addReg(II.ImplicitDefs[0]);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
}
return ResultReg;
}
@@ -1397,11 +1397,11 @@ unsigned FastISel::FastEmitInst_i(unsigned MachineInstOpcode,
const MCInstrDesc &II = TII.get(MachineInstOpcode);
if (II.getNumDefs() >= 1)
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg).addImm(Imm);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg).addImm(Imm);
else {
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II).addImm(Imm);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
- ResultReg).addReg(II.ImplicitDefs[0]);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II).addImm(Imm);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
}
return ResultReg;
}
@@ -1413,12 +1413,12 @@ unsigned FastISel::FastEmitInst_ii(unsigned MachineInstOpcode,
const MCInstrDesc &II = TII.get(MachineInstOpcode);
if (II.getNumDefs() >= 1)
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
.addImm(Imm1).addImm(Imm2);
else {
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II).addImm(Imm1).addImm(Imm2);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
- ResultReg).addReg(II.ImplicitDefs[0]);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II).addImm(Imm1).addImm(Imm2);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
}
return ResultReg;
}
@@ -1432,7 +1432,7 @@ unsigned FastISel::FastEmitInst_extractsubreg(MVT RetVT,
const TargetRegisterClass *RC = MRI.getRegClass(Op0);
MRI.constrainRegClass(Op0, TRI.getSubClassWithSubReg(RC, Idx));
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt,
- DL, TII.get(TargetOpcode::COPY), ResultReg)
+ DbgLoc, TII.get(TargetOpcode::COPY), ResultReg)
.addReg(Op0, getKillRegState(Op0IsKill), Idx);
return ResultReg;
}
@@ -1498,9 +1498,9 @@ bool FastISel::HandlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB) {
// Set the DebugLoc for the copy. Prefer the location of the operand
// if there is one; use the location of the PHI otherwise.
- DL = PN->getDebugLoc();
+ DbgLoc = PN->getDebugLoc();
if (const Instruction *Inst = dyn_cast<Instruction>(PHIOp))
- DL = Inst->getDebugLoc();
+ DbgLoc = Inst->getDebugLoc();
unsigned Reg = getRegForValue(PHIOp);
if (Reg == 0) {
@@ -1508,7 +1508,7 @@ bool FastISel::HandlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB) {
return false;
}
FuncInfo.PHINodesToUpdate.push_back(std::make_pair(MBBI++, Reg));
- DL = DebugLoc();
+ DbgLoc = DebugLoc();
}
}
@@ -1576,8 +1576,8 @@ bool FastISel::canFoldAddIntoGEP(const User *GEP, const Value *Add) {
if (!isa<AddOperator>(Add))
return false;
// Type size needs to match.
- if (TD.getTypeSizeInBits(GEP->getType()) !=
- TD.getTypeSizeInBits(Add->getType()))
+ if (DL.getTypeSizeInBits(GEP->getType()) !=
+ DL.getTypeSizeInBits(Add->getType()))
return false;
// Must be in the same basic block.
if (isa<Instruction>(Add) &&
diff --git a/lib/Target/ARM/ARMFastISel.cpp b/lib/Target/ARM/ARMFastISel.cpp
index 68fcee7d24..c5a04268f5 100644
--- a/lib/Target/ARM/ARMFastISel.cpp
+++ b/lib/Target/ARM/ARMFastISel.cpp
@@ -304,7 +304,7 @@ unsigned ARMFastISel::constrainOperandRegClass(const MCInstrDesc &II,
// If it's not legal to COPY between the register classes, something
// has gone very wrong before we got here.
unsigned NewOp = createResultReg(RegClass);
- AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::COPY), NewOp).addReg(Op));
return NewOp;
}
@@ -317,7 +317,8 @@ unsigned ARMFastISel::FastEmitInst_(unsigned MachineInstOpcode,
unsigned ResultReg = createResultReg(RC);
const MCInstrDesc &II = TII.get(MachineInstOpcode);
- AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg));
+ AddOptionalDefs(
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg));
return ResultReg;
}
@@ -331,12 +332,12 @@ unsigned ARMFastISel::FastEmitInst_r(unsigned MachineInstOpcode,
// for this instruction.
Op0 = constrainOperandRegClass(II, Op0, 1);
if (II.getNumDefs() >= 1) {
- AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
- .addReg(Op0, Op0IsKill * RegState::Kill));
+ AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II,
+ ResultReg).addReg(Op0, Op0IsKill * RegState::Kill));
} else {
- AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
+ AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
.addReg(Op0, Op0IsKill * RegState::Kill));
- AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::COPY), ResultReg)
.addReg(II.ImplicitDefs[0]));
}
@@ -356,14 +357,15 @@ unsigned ARMFastISel::FastEmitInst_rr(unsigned MachineInstOpcode,
Op1 = constrainOperandRegClass(II, Op1, 2);
if (II.getNumDefs() >= 1) {
- AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
- .addReg(Op0, Op0IsKill * RegState::Kill)
- .addReg(Op1, Op1IsKill * RegState::Kill));
+ AddOptionalDefs(
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
+ .addReg(Op0, Op0IsKill * RegState::Kill)
+ .addReg(Op1, Op1IsKill * RegState::Kill));
} else {
- AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
+ AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
.addReg(Op0, Op0IsKill * RegState::Kill)
.addReg(Op1, Op1IsKill * RegState::Kill));
- AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::COPY), ResultReg)
.addReg(II.ImplicitDefs[0]));
}
@@ -385,16 +387,17 @@ unsigned ARMFastISel::FastEmitInst_rrr(unsigned MachineInstOpcode,
Op2 = constrainOperandRegClass(II, Op1, 3);
if (II.getNumDefs() >= 1) {
- AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
- .addReg(Op0, Op0IsKill * RegState::Kill)
- .addReg(Op1, Op1IsKill * RegState::Kill)
- .addReg(Op2, Op2IsKill * RegState::Kill));
+ AddOptionalDefs(
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
+ .addReg(Op0, Op0IsKill * RegState::Kill)
+ .addReg(Op1, Op1IsKill * RegState::Kill)
+ .addReg(Op2, Op2IsKill * RegState::Kill));
} else {
- AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
+ AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
.addReg(Op0, Op0IsKill * RegState::Kill)
.addReg(Op1, Op1IsKill * RegState::Kill)
.addReg(Op2, Op2IsKill * RegState::Kill));
- AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::COPY), ResultReg)
.addReg(II.ImplicitDefs[0]));
}
@@ -412,14 +415,15 @@ unsigned ARMFastISel::FastEmitInst_ri(unsigned MachineInstOpcode,
// for this instruction.
Op0 = constrainOperandRegClass(II, Op0, 1);
if (II.getNumDefs() >= 1) {
- AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
- .addReg(Op0, Op0IsKill * RegState::Kill)
- .addImm(Imm));
+ AddOptionalDefs(
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
+ .addReg(Op0, Op0IsKill * RegState::Kill)
+ .addImm(Imm));
} else {
- AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
+ AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
.addReg(Op0, Op0IsKill * RegState::Kill)
.addImm(Imm));
- AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::COPY), ResultReg)
.addReg(II.ImplicitDefs[0]));
}
@@ -437,14 +441,15 @@ unsigned ARMFastISel::FastEmitInst_rf(unsigned MachineInstOpcode,
// for this instruction.
Op0 = constrainOperandRegClass(II, Op0, 1);
if (II.getNumDefs() >= 1) {
- AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
- .addReg(Op0, Op0IsKill * RegState::Kill)
- .addFPImm(FPImm));
+ AddOptionalDefs(
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
+ .addReg(Op0, Op0IsKill * RegState::Kill)
+ .addFPImm(FPImm));
} else {
- AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
+ AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
.addReg(Op0, Op0IsKill * RegState::Kill)
.addFPImm(FPImm));
- AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::COPY), ResultReg)
.addReg(II.ImplicitDefs[0]));
}
@@ -464,16 +469,17 @@ unsigned ARMFastISel::FastEmitInst_rri(unsigned MachineInstOpcode,
Op0 = constrainOperandRegClass(II, Op0, 1);
Op1 = constrainOperandRegClass(II, Op1, 2);
if (II.getNumDefs() >= 1) {
- AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
- .addReg(Op0, Op0IsKill * RegState::Kill)
- .addReg(Op1, Op1IsKill * RegState::Kill)
- .addImm(Imm));
+ AddOptionalDefs(
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
+ .addReg(Op0, Op0IsKill * RegState::Kill)
+ .addReg(Op1, Op1IsKill * RegState::Kill)
+ .addImm(Imm));
} else {
- AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
+ AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
.addReg(Op0, Op0IsKill * RegState::Kill)
.addReg(Op1, Op1IsKill * RegState::Kill)
.addImm(Imm));
- AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::COPY), ResultReg)
.addReg(II.ImplicitDefs[0]));
}
@@ -487,12 +493,12 @@ unsigned ARMFastISel::FastEmitInst_i(unsigned MachineInstOpcode,
const MCInstrDesc &II = TII.get(MachineInstOpcode);
if (II.getNumDefs() >= 1) {
- AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
- .addImm(Imm));
+ AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II,
+ ResultReg).addImm(Imm));
} else {
- AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
+ AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
.addImm(Imm));
- AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::COPY), ResultReg)
.addReg(II.ImplicitDefs[0]));
}
@@ -506,12 +512,14 @@ unsigned ARMFastISel::FastEmitInst_ii(unsigned MachineInstOpcode,
const MCInstrDesc &II = TII.get(MachineInstOpcode);
if (II.getNumDefs() >= 1) {
- AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg)
- .addImm(Imm1).addImm(Imm2));
+ AddOptionalDefs(
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
+ .addImm(Imm1)
+ .addImm(Imm2));
} else {
- AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
+ AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
.addImm(Imm1).addImm(Imm2));
- AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::COPY),
ResultReg)
.addReg(II.ImplicitDefs[0]));
@@ -527,7 +535,7 @@ unsigned ARMFastISel::FastEmitInst_extractsubreg(MVT RetVT,
"Cannot yet extract from physregs");
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt,
- DL, TII.get(TargetOpcode::COPY), ResultReg)
+ DbgLoc, TII.get(TargetOpcode::COPY), ResultReg)
.addReg(Op0, getKillRegState(Op0IsKill), Idx));
return ResultReg;
}
@@ -538,7 +546,7 @@ unsigned ARMFastISel::ARMMoveToFPReg(MVT VT, unsigned SrcReg) {
if (VT == MVT::f64) return 0;
unsigned MoveReg = createResultReg(TLI.getRegClassFor(VT));
- AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(ARM::VMOVSR), MoveReg)
.addReg(SrcReg));
return MoveReg;
@@ -548,7 +556,7 @@ unsigned ARMFastISel::ARMMoveToIntReg(MVT VT, unsigned SrcReg) {
if (VT == MVT::i64) return 0;
unsigned MoveReg = createResultReg(TLI.getRegClassFor(VT));
- AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(ARM::VMOVRS), MoveReg)
.addReg(SrcReg));
return MoveReg;
@@ -574,9 +582,8 @@ unsigned ARMFastISel::ARMMaterializeFP(const ConstantFP *CFP, MVT VT) {
Opc = ARM::FCONSTS;
}
unsigned DestReg = createResultReg(TLI.getRegClassFor(VT));
- AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc),
- DestReg)
- .addImm(Imm));
+ AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(Opc), DestReg).addImm(Imm));
return DestReg;
}
@@ -584,20 +591,20 @@ unsigned ARMFastISel::ARMMaterializeFP(const ConstantFP *CFP, MVT VT) {
if (!Subtarget->hasVFP2()) return false;
// MachineConstantPool wants an explicit alignment.
- unsigned Align = TD.getPrefTypeAlignment(CFP->getType());
+ unsigned Align = DL.getPrefTypeAlignment(CFP->getType());
if (Align == 0) {
// TODO: Figure out if this is correct.
- Align = TD.getTypeAllocSize(CFP->getType());
+ Align = DL.getTypeAllocSize(CFP->getType());
}
unsigned Idx = MCP.getConstantPoolIndex(cast<Constant>(CFP), Align);
unsigned DestReg = createResultReg(TLI.getRegClassFor(VT));
unsigned Opc = is64bit ? ARM::VLDRD : ARM::VLDRS;
// The extra reg is for addrmode5.
- AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc),
- DestReg)
- .addConstantPoolIndex(Idx)
- .addReg(0));
+ AddOptionalDefs(
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), DestReg)
+ .addConstantPoolIndex(Idx)
+ .addReg(0));
return DestReg;
}
@@ -614,7 +621,7 @@ unsigned ARMFastISel::ARMMaterializeInt(const Constant *C, MVT VT) {
const TargetRegisterClass *RC = isThumb2 ? &ARM::rGPRRegClass :
&ARM::GPRRegClass;
unsigned ImmReg = createResultReg(RC);
- AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(Opc), ImmReg)
.addImm(CI->getZExtValue()));
return ImmReg;
@@ -628,7 +635,7 @@ unsigned ARMFastISel::ARMMaterializeInt(const Constant *C, MVT VT) {
if (UseImm) {
unsigned Opc = isThumb2 ? ARM::t2MVNi : ARM::MVNi;
unsigned ImmReg = createResultReg(TLI.getRegClassFor(MVT::i32));
- AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(Opc), ImmReg)
.addImm(Imm));
return ImmReg;
@@ -642,21 +649,21 @@ unsigned ARMFastISel::ARMMaterializeInt(const Constant *C, MVT VT) {
unsigned DestReg = createResultReg(TLI.getRegClassFor(VT));
// MachineConstantPool wants an explicit alignment.
- unsigned Align = TD.getPrefTypeAlignment(C->getType());
+ unsigned Align = DL.getPrefTypeAlignment(C->getType());
if (Align == 0) {
// TODO: Figure out if this is correct.
- Align = TD.getTypeAllocSize(C->getType());
+ Align = DL.getTypeAllocSize(C->getType());
}
unsigned Idx = MCP.getConstantPoolIndex(C, Align);
if (isThumb2)
- AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(ARM::t2LDRpci), DestReg)
.addConstantPoolIndex(Idx));
else {
// The extra immediate is for addrmode2.
DestReg = constrainOperandRegClass(TII.get(ARM::LDRcp), DestReg, 0);
- AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(ARM::LDRcp), DestReg)
.addConstantPoolIndex(Idx)
.addImm(0));
@@ -698,14 +705,14 @@ unsigned ARMFastISel::ARMMaterializeGV(const GlobalValue *GV, MVT VT) {
Opc = isThumb2 ? ARM::t2MOVi32imm : ARM::MOVi32imm;
break;
}
- AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc),
- DestReg).addGlobalAddress(GV, 0, TF));
+ AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(Opc), DestReg).addGlobalAddress(GV, 0, TF));
} else {
// MachineConstantPool wants an explicit alignment.
- unsigned Align = TD.getPrefTypeAlignment(GV->getType());
+ unsigned Align = DL.getPrefTypeAlignment(GV->getType());
if (Align == 0) {
// TODO: Figure out if this is correct.
- Align = TD.getTypeAllocSize(GV->getType());
+ Align = DL.getTypeAllocSize(GV->getType());
}
if (Subtarget->isTargetELF() && RelocM == Reloc::PIC_)
@@ -724,18 +731,18 @@ unsigned ARMFastISel::ARMMaterializeGV(const GlobalValue *GV, MVT VT) {
MachineInstrBuilder MIB;
if (isThumb2) {
unsigned Opc = (RelocM!=Reloc::PIC_) ? ARM::t2LDRpci : ARM::t2LDRpci_pic;
- MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), DestReg)
- .addConstantPoolIndex(Idx);
+ MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc),
+ DestReg).addConstantPoolIndex(Idx);
if (RelocM == Reloc::PIC_)
MIB.addImm(Id);
AddOptionalDefs(MIB);
} else {
// The extra immediate is for addrmode2.
DestReg = constrainOperandRegClass(TII.get(ARM::LDRcp), DestReg, 0);
- MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(ARM::LDRcp),
- DestReg)
- .addConstantPoolIndex(Idx)
- .addImm(0);
+ MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(ARM::LDRcp), DestReg)
+ .addConstantPoolIndex(Idx)
+ .addImm(0);
AddOptionalDefs(MIB);
if (RelocM == Reloc::PIC_) {
@@ -743,7 +750,7 @@ unsigned ARMFastISel::ARMMaterializeGV(const GlobalValue *GV, MVT VT) {
unsigned NewDestReg = createResultReg(TLI.getRegClassFor(VT));
MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt,
- DL, TII.get(Opc), NewDestReg)
+ DbgLoc, TII.get(Opc), NewDestReg)
.addReg(DestReg)
.addImm(Id);
AddOptionalDefs(MIB);
@@ -756,15 +763,15 @@ unsigned ARMFastISel::ARMMaterializeGV(const GlobalValue *GV, MVT VT) {
MachineInstrBuilder MIB;
unsigned NewDestReg = createResultReg(TLI.getRegClassFor(VT));
if (isThumb2)
- MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(ARM::t2LDRi12), NewDestReg)
.addReg(DestReg)
.addImm(0);
else
- MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(ARM::LDRi12),
- NewDestReg)
- .addReg(DestReg)
- .addImm(0);
+ MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(ARM::LDRi12), NewDestReg)
+ .addReg(DestReg)
+ .addImm(0);
DestReg = NewDestReg;
AddOptionalDefs(MIB);
}
@@ -809,7 +816,7 @@ unsigned ARMFastISel::TargetMaterializeAlloca(const AllocaInst *AI) {
unsigned ResultReg = createResultReg(RC);
ResultReg = constrainOperandRegClass(TII.get(Opc), ResultReg, 0);
- AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(Opc), ResultReg)
.addFrameIndex(SI->second)
.addImm(0));
@@ -893,11 +900,11 @@ bool ARMFastISel::ARMComputeAddress(const Value *Obj, Address &Addr) {
i != e; ++i, ++GTI) {
const Value *Op = *i;
if (StructType *STy = dyn_cast<StructType>(*GTI)) {
- const StructLayout *SL = TD.getStructLayout(STy);
+ const StructLayout *SL = DL.getStructLayout(STy);
unsigned Idx = cast<ConstantInt>(Op)->getZExtValue();
TmpOffset += SL->getElementOffset(Idx);
} else {
- uint64_t S = TD.getTypeAllocSize(GTI.getIndexedType());
+ uint64_t S = DL.getTypeAllocSize(GTI.getIndexedType());
for (;;) {
if (const ConstantInt *CI = dyn_cast<ConstantInt>(Op)) {
// Constant-offset addressing.
@@ -983,7 +990,7 @@ void ARMFastISel::ARMSimplifyAddress(Address &Addr, MVT VT, bool useAM3) {
(const TargetRegisterClass*)&ARM::GPRRegClass;
unsigned ResultReg = createResultReg(RC);
unsigned Opc = isThumb2 ? ARM::t2ADDri : ARM::ADDri;
- AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(Opc), ResultReg)
.addFrameIndex(Addr.Base.FI)
.addImm(0));
@@ -1134,7 +1141,7 @@ bool ARMFastISel::ARMEmitLoad(MVT VT, unsigned &ResultReg, Address &Addr,
if (allocReg)
ResultReg = createResultReg(RC);
assert (ResultReg > 255 && "Expected an allocated virtual register.");
- MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(Opc), ResultReg);
AddLoadStoreOperands(VT, Addr, MIB, MachineMemOperand::MOLoad, useAM3);
@@ -1142,7 +1149,7 @@ bool ARMFastISel::ARMEmitLoad(MVT VT, unsigned &ResultReg, Address &Addr,
// load. Now we must move from the GRP to the FP register.
if (needVMOV) {
unsigned MoveReg = createResultReg(TLI.getRegClassFor(MVT::f32));
- AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(ARM::VMOVSR), MoveReg)
.addReg(ResultReg));
ResultReg = MoveReg;
@@ -1184,7 +1191,7 @@ bool ARMFastISel::ARMEmitStore(MVT VT, unsigned SrcReg, Address &Addr,
(const TargetRegisterClass*)&ARM::GPRRegClass);
unsigned Opc = isThumb2 ? ARM::t2ANDri : ARM::ANDri;
SrcReg = constrainOperandRegClass(TII.get(Opc), SrcReg, 1);
- AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(Opc), Res)
.addReg(SrcReg).addImm(1));
SrcReg = Res;
@@ -1231,7 +1238,7 @@ bool ARMFastISel::ARMEmitStore(MVT VT, unsigned SrcReg, Address &Addr,
// Unaligned stores need special handling. Floats require word-alignment.
if (Alignment && Alignment < 4) {
unsigned MoveReg = createResultReg(TLI.getRegClassFor(MVT::i32));
- AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(ARM::VMOVRS), MoveReg)
.addReg(SrcReg));
SrcReg = MoveReg;
@@ -1256,7 +1263,7 @@ bool ARMFastISel::ARMEmitStore(MVT VT, unsigned SrcReg, Address &Addr,
// Create the base instruction, then add the operands.
SrcReg = constrainOperandRegClass(TII.get(StrOpc), SrcReg, 0);
- MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(StrOpc))
.addReg(SrcReg);
AddLoadStoreOperands(VT, Addr, MIB, MachineMemOperand::MOStore, useAM3);
@@ -1367,9 +1374,9 @@ bool ARMFastISel::SelectBranch(const Instruction *I) {
return false;
unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc;
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(BrOpc))
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(BrOpc))
.addMBB(TBB).addImm(ARMPred).addReg(ARM::CPSR);
- FastEmitBranch(FBB, DL);
+ FastEmitBranch(FBB, DbgLoc);
FuncInfo.MBB->addSuccessor(TBB);
return true;
}
@@ -1380,7 +1387,7 @@ bool ARMFastISel::SelectBranch(const Instruction *I) {
unsigned TstOpc = isThumb2 ? ARM::t2TSTri : ARM::TSTri;
unsigned OpReg = getRegForValue(TI->getOperand(0));
OpReg = constrainOperandRegClass(TII.get(TstOpc), OpReg, 0);
- AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TstOpc))
.addReg(OpReg).addImm(1));
@@ -1391,10 +1398,10 @@ bool ARMFastISel::SelectBranch(const Instruction *I) {
}
unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc;
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(BrOpc))
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(BrOpc))
.addMBB(TBB).addImm(CCMode).addReg(ARM::CPSR);
- FastEmitBranch(FBB, DL);
+ FastEmitBranch(FBB, DbgLoc);
FuncInfo.MBB->addSuccessor(TBB);
return true;
}
@@ -1402,7 +1409,7 @@ bool ARMFastISel::SelectBranch(const Instruction *I) {
dyn_cast<ConstantInt>(BI->getCondition())) {
uint64_t Imm = CI->getZExtValue();
MachineBasicBlock *Target = (Imm == 0) ? FBB : TBB;
- FastEmitBranch(Target, DL);
+ FastEmitBranch(Target, DbgLoc);
return true;
}
@@ -1418,8 +1425,10 @@ bool ARMFastISel::SelectBranch(const Instruction *I) {
// the one-bit value left in the virtual register.
unsigned TstOpc = isThumb2 ? ARM::t2TSTri : ARM::TSTri;
CmpReg = constrainOperandRegClass(TII.get(TstOpc), CmpReg, 0);
- AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TstOpc))
- .addReg(CmpReg).addImm(1));
+ AddOptionalDefs(
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TstOpc))
+ .addReg(CmpReg)
+ .addImm(1));
unsigned CCMode = ARMCC::NE;
if (FuncInfo.MBB->isLayoutSuccessor(TBB)) {
@@ -1428,9 +1437,9 @@ bool ARMFastISel::SelectBranch(const Instruction *I) {
}
unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc;
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(BrOpc))
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(BrOpc))
.addMBB(TBB).addImm(CCMode).addReg(ARM::CPSR);
- FastEmitBranch(FBB, DL);
+ FastEmitBranch(FBB, DbgLoc);
FuncInfo.MBB->addSuccessor(TBB);
return true;
}
@@ -1440,8 +1449,8 @@ bool ARMFastISel::SelectIndirectBr(const Instruction *I) {
if (AddrReg == 0) return false;
unsigned Opc = isThumb2 ? ARM::tBRIND : ARM::BX;
- AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc))
- .addReg(AddrReg));
+ AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(Opc)).addReg(AddrReg));
const IndirectBrInst *IB = cast<IndirectBrInst>(I);
for (unsigned i = 0, e = IB->getNumSuccessors(); i != e; ++i)
@@ -1546,11 +1555,11 @@ bool ARMFastISel::ARMEmitCmp(const Value *Src1Value, const Value *Src2Value,
SrcReg1 = constrainOperandRegClass(II, SrcReg1, 0);
if (!UseImm) {
SrcReg2 = constrainOperandRegClass(II, SrcReg2, 1);
- AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
+ AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
.addReg(SrcReg1).addReg(SrcReg2));
} else {
MachineInstrBuilder MIB;
- MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
+ MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
.addReg(SrcReg1);
// Only add immediate for icmp as the immediate for fcmp is an implicit 0.0.
@@ -1562,7 +1571,7 @@ bool ARMFastISel::ARMEmitCmp(const Value *Src1Value, const Value *Src2Value,
// For floating point we need to move the result to a comparison register
// that we can then use for branches.
if (Ty->isFloatTy() || Ty->isDoubleTy())
- AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(ARM::FMSTAT)));
return true;
}
@@ -1590,7 +1599,7 @@ bool ARMFastISel::SelectCmp(const Instruction *I) {
Constant *Zero = ConstantInt::get(Type::getInt32Ty(*Context), 0);
unsigned ZeroReg = TargetMaterializeConstant(Zero);
// ARMEmitCmp emits a FMSTAT when necessary, so it's always safe to use CPSR.
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(MovCCOpc), DestReg)
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(MovCCOpc), DestReg)
.addReg(ZeroReg).addImm(1)
.addImm(ARMPred).addReg(ARM::CPSR);
@@ -1610,7 +1619,7 @@ bool ARMFastISel::SelectFPExt(const Instruction *I) {
if (Op == 0) return false;
unsigned Result = createResultReg(&ARM::DPRRegClass);
- AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(ARM::VCVTDS), Result)
.addReg(Op));
UpdateValueMap(I, Result);
@@ -1629,7 +1638,7 @@ bool ARMFastISel::SelectFPTrunc(const Instruction *I) {
if (Op == 0) return false;
unsigned Result = createResultReg(&ARM::SPRRegClass);
- AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(ARM::VCVTSD), Result)
.addReg(Op));
UpdateValueMap(I, Result);
@@ -1674,9 +1683,8 @@ bool ARMFastISel::SelectIToFP(const Instruction *I, bool isSigned) {
else return false;
unsigned ResultReg = createResultReg(TLI.getRegClassFor(DstVT));
- AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc),
- ResultReg)
- .addReg(FP));
+ AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(Opc), ResultReg).addReg(FP));
UpdateValueMap(I, ResultReg);
return true;
}
@@ -1701,9 +1709,8 @@ bool ARMFastISel::SelectFPToI(const Instruction *I, bool isSigned) {
// f64->s32/u32 or f32->s32/u32 both need an intermediate f32 reg.
unsigned ResultReg = createResultReg(TLI.getRegClassFor(MVT::f32));
- AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc),
- ResultReg)
- .addReg(Op));
+ AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(Opc), ResultReg).addReg(Op));
// This result needs to be in an integer register, but the conversion only
// takes place in fp-regs.
@@ -1750,8 +1757,10 @@ bool ARMFastISel::SelectSelect(const Instruction *I) {
unsigned CmpOpc = isThumb2 ? ARM::t2CMPri : ARM::CMPri;
CondReg = constrainOperandRegClass(TII.get(CmpOpc), CondReg, 0);
- AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CmpOpc))
- .addReg(CondReg).addImm(0));
+ AddOptionalDefs(
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(CmpOpc))
+ .addReg(CondReg)
+ .addImm(0));
unsigned MovCCOpc;
const TargetRegisterClass *RC;
@@ -1769,12 +1778,20 @@ bool ARMFastISel::SelectSelect(const Instruction *I) {
if (!UseImm) {
Op2Reg = constrainOperandRegClass(TII.get(MovCCOpc), Op2Reg, 1);
Op1Reg = constrainOperandRegClass(TII.get(MovCCOpc), Op1Reg, 2);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(MovCCOpc), ResultReg)
- .addReg(Op2Reg).addReg(Op1Reg).addImm(ARMCC::NE).addReg(ARM::CPSR);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(MovCCOpc),
+ ResultReg)
+ .addReg(Op2Reg)
+ .addReg(Op1Reg)
+ .addImm(ARMCC::NE)
+ .addReg(ARM::CPSR);
} else {
Op1Reg = constrainOperandRegClass(TII.get(MovCCOpc), Op1Reg, 1);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(MovCCOpc), ResultReg)
- .addReg(Op1Reg).addImm(Imm).addImm(ARMCC::EQ).addReg(ARM::CPSR);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(MovCCOpc),
+ ResultReg)
+ .addReg(Op1Reg)
+ .addImm(Imm)
+ .addImm(ARMCC::EQ)
+ .addReg(ARM::CPSR);
}
UpdateValueMap(I, ResultReg);
return true;
@@ -1863,7 +1880,7 @@ bool ARMFastISel::SelectBinaryIntOp(const Instruction *I, unsigned ISDOpcode) {
unsigned ResultReg = createResultReg(&ARM::GPRnopcRegClass);
SrcReg1 = constrainOperandRegClass(TII.get(Opc), SrcReg1, 1);
SrcReg2 = constrainOperandRegClass(TII.get(Opc), SrcReg2, 2);
- AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(Opc), ResultReg)
.addReg(SrcReg1).addReg(SrcReg2));
UpdateValueMap(I, ResultReg);
@@ -1905,7 +1922,7 @@ bool ARMFastISel::SelectBinaryFPOp(const Instruction *I, unsigned ISDOpcode) {
if (Op2 == 0) return false;
unsigned ResultReg = createResultReg(TLI.getRegClassFor(VT.SimpleTy));
- AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(Opc), ResultReg)
.addReg(Op1).addReg(Op2));
UpdateValueMap(I, ResultReg);
@@ -2017,7 +2034,7 @@ bool ARMFastISel::ProcessCallArgs(SmallVectorImpl<Value*> &Args,
// Issue CALLSEQ_START
unsigned AdjStackDown = TII.getCallFrameSetupOpcode();
- AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(AdjStackDown))
.addImm(NumBytes));
@@ -2062,9 +2079,8 @@ bool ARMFastISel::ProcessCallArgs(SmallVectorImpl<Value*> &Args,
// Now copy/store arg to correct locations.
if (VA.isRegLoc() && !VA.needsCustom()) {
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
- VA.getLocReg())
- .addReg(Arg);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(TargetOpcode::COPY), VA.getLocReg()).addReg(Arg);
RegArgs.push_back(VA.getLocReg());
} else if (VA.needsCustom()) {
// TODO: We need custom lowering for vector (v2f64) args.
@@ -2076,7 +2092,7 @@ bool ARMFastISel::ProcessCallArgs(SmallVectorImpl<Value*> &Args,
assert(VA.isRegLoc() && NextVA.isRegLoc() &&
"We only handle register args!");
- AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(ARM::VMOVRRD), VA.getLocReg())
.addReg(NextVA.getLocReg(), RegState::Define)
.addReg(Arg));
@@ -2103,7 +2119,7 @@ bool ARMFastISel::FinishCall(MVT RetVT, SmallVectorImpl<unsigned> &UsedRegs,
unsigned &NumBytes, bool isVarArg) {
// Issue CALLSEQ_END
unsigned AdjStackUp = TII.getCallFrameDestroyOpcode();
- AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(AdjStackUp))
.addImm(NumBytes).addImm(0));
@@ -2120,7 +2136,7 @@ bool ARMFastISel::FinishCall(MVT RetVT, SmallVectorImpl<unsigned> &UsedRegs,
MVT DestVT = RVLocs[0].getValVT();
const TargetRegisterClass* DstRC = TLI.getRegClassFor(DestVT);
unsigned ResultReg = createResultReg(DstRC);
- AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(ARM::VMOVDRR), ResultReg)
.addReg(RVLocs[0].getLocReg())
.addReg(RVLocs[1].getLocReg()));
@@ -2141,7 +2157,8 @@ bool ARMFastISel::FinishCall(MVT RetVT, SmallVectorImpl<unsigned> &UsedRegs,
const TargetRegisterClass* DstRC = TLI.getRegClassFor(CopyVT);
unsigned ResultReg = createResultReg(DstRC);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(TargetOpcode::COPY),
ResultReg).addReg(RVLocs[0].getLocReg());
UsedRegs.push_back(RVLocs[0].getLocReg());
@@ -2218,15 +2235,15 @@ bool ARMFastISel::SelectRet(const Instruction *I) {
// Avoid a cross-class copy. This is very unlikely.
if (!SrcRC->contains(DstReg))
return false;
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
- DstReg).addReg(SrcReg);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(TargetOpcode::COPY), DstReg).addReg(SrcReg);
// Add register to return instruction.
RetRegs.push_back(VA.getLocReg());
}
unsigned RetOpc = isThumb2 ? ARM::tBX_RET : ARM::BX_RET;
- MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(RetOpc));
AddOptionalDefs(MIB);
for (unsigned i = 0, e = RetRegs.size(); i != e; ++i)
@@ -2299,7 +2316,7 @@ bool ARMFastISel::ARMEmitLibcall(const Instruction *I, RTLIB::Libcall Call) {
if (!isTypeLegal(ArgTy, ArgVT)) return false;
ISD::ArgFlagsTy Flags;
- unsigned OriginalAlignment = TD.getABITypeAlignment(ArgTy);
+ unsigned OriginalAlignment = DL.getABITypeAlignment(ArgTy);
Flags.setOrigAlign(OriginalAlignment);
Args.push_back(Op);
@@ -2324,7 +2341,7 @@ bool ARMFastISel::ARMEmitLibcall(const Instruction *I, RTLIB::Libcall Call) {
// Issue the call.
unsigned CallOpc = ARMSelectCallOp(EnableARMLongCalls);
MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt,
- DL, TII.get(CallOpc));
+ DbgLoc, TII.get(CallOpc));
// BL / BLX don't take a predicate, but tBL / tBLX do.
if (isThumb2)
AddDefaultPred(MIB);
@@ -2432,7 +2449,7 @@ bool ARMFastISel::SelectCall(const Instruction *I,
if (Arg == 0)
return false;
- unsigned OriginalAlignment = TD.getABITypeAlignment(ArgTy);
+ unsigned OriginalAlignment = DL.getABITypeAlignment(ArgTy);
Flags.setOrigAlign(OriginalAlignment);
Args.push_back(*i);
@@ -2465,7 +2482,7 @@ bool ARMFastISel::SelectCall(const Instruction *I,
// Issue the call.
unsigned CallOpc = ARMSelectCallOp(UseReg);
MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt,
- DL, TII.get(CallOpc));
+ DbgLoc, TII.get(CallOpc));
unsigned char OpFlags = 0;
@@ -2582,7 +2599,7 @@ bool ARMFastISel::SelectIntrinsicCall(const IntrinsicInst &I) {
unsigned Depth = cast<ConstantInt>(I.getOperand(0))->getZExtValue();
while (Depth--) {
DestReg = createResultReg(RC);
- AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(LdrOpc), DestReg)
.addReg(SrcReg).addImm(0));
SrcReg = DestReg;
@@ -2639,7 +2656,7 @@ bool ARMFastISel::SelectIntrinsicCall(const IntrinsicInst &I) {
return SelectCall(&I, "memset");
}
case Intrinsic::trap: {
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(
Subtarget->useNaClTrap() ? ARM::TRAPNaCl : ARM::TRAP));
return true;
}
@@ -2792,7 +2809,7 @@ unsigned ARMFastISel::ARMEmitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT,
unsigned ImmEnc = ImmIsSO ? ARM_AM::getSORegOpc(ShiftAM, Imm) : Imm;
bool isKill = 1 == Instr;
MachineInstrBuilder MIB = BuildMI(
- *FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opcode), ResultReg);
+ *FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opcode), ResultReg);
if (setsCPSR)
MIB.addReg(ARM::CPSR, RegState::Define);
SrcReg = constrainOperandRegClass(TII.get(Opcode), SrcReg, 1 + setsCPSR);
@@ -2870,7 +2887,7 @@ bool ARMFastISel::SelectShift(const Instruction *I,
unsigned ResultReg = createResultReg(&ARM::GPRnopcRegClass);
if(ResultReg == 0) return false;
- MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(Opc), ResultReg)
.addReg(Reg1);
@@ -3031,7 +3048,7 @@ unsigned ARMFastISel::ARMLowerPICELF(const GlobalValue *GV,
// Load value.
if (isThumb2) {
DestReg1 = constrainOperandRegClass(TII.get(ARM::t2LDRpci), DestReg1, 0);
- AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(ARM::t2LDRpci), DestReg1)
.addConstantPoolIndex(Idx));
Opc = UseGOTOFF ? ARM::t2ADDrr : ARM::t2LDRs;
@@ -3039,7 +3056,7 @@ unsigned ARMFastISel::ARMLowerPICELF(const GlobalValue *GV,
// The extra immediate is for addrmode2.
DestReg1 = constrainOperandRegClass(TII.get(ARM::LDRcp), DestReg1, 0);
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt,
- DL, TII.get(ARM::LDRcp), DestReg1)
+ DbgLoc, TII.get(ARM::LDRcp), DestReg1)
.addConstantPoolIndex(Idx).addImm(0));
Opc = UseGOTOFF ? ARM::ADDrr : ARM::LDRrs;
}
@@ -3055,7 +3072,7 @@ unsigned ARMFastISel::ARMLowerPICELF(const GlobalValue *GV,
DestReg1 = constrainOperandRegClass(TII.get(Opc), DestReg1, 1);
GlobalBaseReg = constrainOperandRegClass(TII.get(Opc), GlobalBaseReg, 2);
MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt,
- DL, TII.get(Opc), DestReg2)
+ DbgLoc, TII.get(Opc), DestReg2)
.addReg(DestReg1)
.addReg(GlobalBaseReg);
if (!UseGOTOFF)
@@ -3129,7 +3146,8 @@ bool ARMFastISel::FastLowerArguments() {
// Without this, EmitLiveInCopies may eliminate the livein if its only
// use is a bitcast (which isn't turned into an instruction).
unsigned ResultReg = createResultReg(RC);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(TargetOpcode::COPY),
ResultReg).addReg(DstReg, getKillRegState(true));
UpdateValueMap(I, ResultReg);
}
diff --git a/lib/Target/PowerPC/PPCFastISel.cpp b/lib/Target/PowerPC/PPCFastISel.cpp
index 806bc0a953..07e3b64f7a 100644
--- a/lib/Target/PowerPC/PPCFastISel.cpp
+++ b/lib/Target/PowerPC/PPCFastISel.cpp
@@ -325,11 +325,11 @@ bool PPCFastISel::PPCComputeAddress(const Value *Obj, Address &Addr) {
II != IE; ++II, ++GTI) {
const Value *Op = *II;
if (StructType *STy = dyn_cast<StructType>(*GTI)) {
- const StructLayout *SL = TD.getStructLayout(STy);
+ const StructLayout *SL = DL.getStructLayout(STy);
unsigned Idx = cast<ConstantInt>(Op)->getZExtValue();
TmpOffset += SL->getElementOffset(Idx);
} else {
- uint64_t S = TD.getTypeAllocSize(GTI.getIndexedType());
+ uint64_t S = DL.getTypeAllocSize(GTI.getIndexedType());
for (;;) {
if (const ConstantInt *CI = dyn_cast<ConstantInt>(Op)) {
// Constant-offset addressing.
@@ -407,7 +407,7 @@ void PPCFastISel::PPCSimplifyAddress(Address &Addr, MVT VT, bool &UseOffset,
// register and continue. This should almost never happen.
if (!UseOffset && Addr.BaseType == Address::FrameIndexBase) {
unsigned ResultReg = createResultReg(&PPC::G8RC_and_G8RC_NOX0RegClass);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(PPC::ADDI8),
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(PPC::ADDI8),
ResultReg).addFrameIndex(Addr.Base.FI).addImm(0);
Addr.Base.Reg = ResultReg;
Addr.BaseType = Address::RegBase;
@@ -499,13 +499,13 @@ bool PPCFastISel::PPCEmitLoad(MVT VT, unsigned &ResultReg, Address &Addr,
MachineMemOperand::MOLoad, MFI.getObjectSize(Addr.Base.FI),
MFI.getObjectAlignment(Addr.Base.FI));
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), ResultReg)
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg)
.addImm(Addr.Offset).addFrameIndex(Addr.Base.FI).addMemOperand(MMO);
// Base reg with offset in range.
} else if (UseOffset) {
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), ResultReg)
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg)
.addImm(Addr.Offset).addReg(Addr.Base.Reg);
// Indexed form.
@@ -529,7 +529,7 @@ bool PPCFastISel::PPCEmitLoad(MVT VT, unsigned &ResultReg, Address &Addr,
case PPC::LFS: Opc = PPC::LFSX; break;
case PPC::LFD: Opc = PPC::LFDX; break;
}
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), ResultReg)
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg)
.addReg(Addr.Base.Reg).addReg(IndexReg);
}
@@ -615,12 +615,15 @@ bool PPCFastISel::PPCEmitStore(MVT VT, unsigned SrcReg, Address &Addr) {
MachineMemOperand::MOStore, MFI.getObjectSize(Addr.Base.FI),
MFI.getObjectAlignment(Addr.Base.FI));
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc)).addReg(SrcReg)
- .addImm(Addr.Offset).addFrameIndex(Addr.Base.FI).addMemOperand(MMO);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc))
+ .addReg(SrcReg)
+ .addImm(Addr.Offset)
+ .addFrameIndex(Addr.Base.FI)
+ .addMemOperand(MMO);
// Base reg with offset in range.
} else if (UseOffset)
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc))
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc))
.addReg(SrcReg).addImm(Addr.Offset).addReg(Addr.Base.Reg);
// Indexed form.
@@ -640,7 +643,7 @@ bool PPCFastISel::PPCEmitStore(MVT VT, unsigned SrcReg, Address &Addr) {
case PPC::STFS: Opc = PPC::STFSX; break;
case PPC::STFD: Opc = PPC::STFDX; break;
}
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc))
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc))
.addReg(SrcReg).addReg(Addr.Base.Reg).addReg(IndexReg);
}
@@ -704,9 +707,9 @@ bool PPCFastISel::SelectBranch(const Instruction *I) {
CondReg))
return false;
- BuildMI(*BrBB, FuncInfo.InsertPt, DL, TII.get(PPC::BCC))
+ BuildMI(*BrBB, FuncInfo.InsertPt, DbgLoc, TII.get(PPC::BCC))
.addImm(PPCPred).addReg(CondReg).addMBB(TBB);
- FastEmitBranch(FBB, DL);
+ FastEmitBranch(FBB, DbgLoc);
FuncInfo.MBB->addSuccessor(TBB);
return true;
@@ -714,7 +717,7 @@ bool PPCFastISel::SelectBranch(const Instruction *I) {
dyn_cast<ConstantInt>(BI->getCondition())) {
uint64_t Imm = CI->getZExtValue();
MachineBasicBlock *Target = (Imm == 0) ? FBB : TBB;
- FastEmitBranch(Target, DL);
+ FastEmitBranch(Target, DbgLoc);
return true;
}
@@ -811,10 +814,10 @@ bool PPCFastISel::PPCEmitCmp(const Value *SrcValue1, const Value *SrcValue2,
}
if (!UseImm)
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CmpOpc), DestReg)
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(CmpOpc), DestReg)
.addReg(SrcReg1).addReg(SrcReg2);
else
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CmpOpc), DestReg)
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(CmpOpc), DestReg)
.addReg(SrcReg1).addImm(Imm);
return true;
@@ -853,7 +856,7 @@ bool PPCFastISel::SelectFPTrunc(const Instruction *I) {
// Round the result to single precision.
unsigned DestReg = createResultReg(&PPC::F4RCRegClass);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(PPC::FRSP), DestReg)
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(PPC::FRSP), DestReg)
.addReg(SrcReg);
UpdateValueMap(I, DestReg);
@@ -970,7 +973,7 @@ bool PPCFastISel::SelectIToFP(const Instruction *I, bool IsSigned) {
Opc = IsSigned ? PPC::FCFID : PPC::FCFIDU;
// Generate the convert.
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), DestReg)
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), DestReg)
.addReg(FPReg);
UpdateValueMap(I, DestReg);
@@ -1042,7 +1045,7 @@ bool PPCFastISel::SelectFPToI(const Instruction *I, bool IsSigned) {
const TargetRegisterClass *InRC = MRI.getRegClass(SrcReg);
if (InRC == &PPC::F4RCRegClass) {
unsigned TmpReg = createResultReg(&PPC::F8RCRegClass);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::COPY_TO_REGCLASS), TmpReg)
.addReg(SrcReg).addImm(PPC::F8RCRegClassID);
SrcReg = TmpReg;
@@ -1062,7 +1065,7 @@ bool PPCFastISel::SelectFPToI(const Instruction *I, bool IsSigned) {
Opc = IsSigned ? PPC::FCTIDZ : PPC::FCTIDUZ;
// Generate the convert.
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), DestReg)
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), DestReg)
.addReg(SrcReg);
// Now move the integer value from a float register to an integer register.
@@ -1155,8 +1158,10 @@ bool PPCFastISel::SelectBinaryIntOp(const Instruction *I, unsigned ISDOpcode) {
}
if (UseImm) {
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), ResultReg)
- .addReg(SrcReg1).addImm(Imm);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc),
+ ResultReg)
+ .addReg(SrcReg1)
+ .addImm(Imm);
UpdateValueMap(I, ResultReg);
return true;
}
@@ -1171,7 +1176,7 @@ bool PPCFastISel::SelectBinaryIntOp(const Instruction *I, unsigned ISDOpcode) {
if (ISDOpcode == ISD::SUB)
std::swap(SrcReg1, SrcReg2);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), ResultReg)
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg)
.addReg(SrcReg1).addReg(SrcReg2);
UpdateValueMap(I, ResultReg);
return true;
@@ -1211,7 +1216,7 @@ bool PPCFastISel::processCallArgs(SmallVectorImpl<Value*> &Args,
NumBytes = CCInfo.getNextStackOffset();
// Issue CALLSEQ_START.
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TII.getCallFrameSetupOpcode()))
.addImm(NumBytes);
@@ -1270,9 +1275,9 @@ bool PPCFastISel::processCallArgs(SmallVectorImpl<Value*> &Args,
++NextGPR;
} else
ArgReg = NextGPR++;
-
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
- ArgReg).addReg(Arg);
+
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(TargetOpcode::COPY), ArgReg).addReg(Arg);
RegArgs.push_back(ArgReg);
}
@@ -1285,7 +1290,7 @@ void PPCFastISel::finishCall(MVT RetVT, SmallVectorImpl<unsigned> &UsedRegs,
const Instruction *I, CallingConv::ID CC,
unsigned &NumBytes, bool IsVarArg) {
// Issue CallSEQ_END.
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TII.getCallFrameDestroyOpcode()))
.addImm(NumBytes).addImm(0);
@@ -1315,14 +1320,14 @@ void PPCFastISel::finishCall(MVT RetVT, SmallVectorImpl<unsigned> &UsedRegs,
const TargetRegisterClass *CpyRC = TLI.getRegClassFor(CopyVT);
ResultReg = createResultReg(CpyRC);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::COPY), ResultReg)
.addReg(SourcePhysReg);
// If necessary, round the floating result to single precision.
} else if (CopyVT == MVT::f64) {
ResultReg = createResultReg(TLI.getRegClassFor(RetVT));
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(PPC::FRSP),
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(PPC::FRSP),
ResultReg).addReg(SourcePhysReg);
// If only the low half of a general register is needed, generate
@@ -1333,7 +1338,7 @@ void PPCFastISel::finishCall(MVT RetVT, SmallVectorImpl<unsigned> &UsedRegs,
ResultReg = createResultReg(&PPC::GPRCRegClass);
// Convert physical register from G8RC to GPRC.
SourcePhysReg -= PPC::X0 - PPC::R0;
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::COPY), ResultReg)
.addReg(SourcePhysReg);
}
@@ -1440,7 +1445,7 @@ bool PPCFastISel::SelectCall(const Instruction *I) {
if (Arg == 0)
return false;
- unsigned OriginalAlignment = TD.getABITypeAlignment(ArgTy);
+ unsigned OriginalAlignment = DL.getABITypeAlignment(ArgTy);
Flags.setOrigAlign(OriginalAlignment);
Args.push_back(*II);
@@ -1465,7 +1470,7 @@ bool PPCFastISel::SelectCall(const Instruction *I) {
// Build direct call with NOP for TOC restore.
// FIXME: We can and should optimize away the NOP for local calls.
- MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(PPC::BL8_NOP));
// Add callee.
MIB.addGlobalAddress(GV);
@@ -1522,8 +1527,8 @@ bool PPCFastISel::SelectRet(const Instruction *I) {
const Constant *C = cast<Constant>(RV);
unsigned SrcReg = PPCMaterializeInt(C, MVT::i64);
unsigned RetReg = ValLocs[0].getLocReg();
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
- RetReg).addReg(SrcReg);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(TargetOpcode::COPY), RetReg).addReg(SrcReg);
RetRegs.push_back(RetReg);
} else {
@@ -1578,14 +1583,14 @@ bool PPCFastISel::SelectRet(const Instruction *I) {
}
}
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::COPY), RetRegs[i])
.addReg(SrcReg);
}
}
}
- MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(PPC::BLR));
for (unsigned i = 0, e = RetRegs.size(); i != e; ++i)
@@ -1615,7 +1620,7 @@ bool PPCFastISel::PPCEmitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT,
assert(DestVT == MVT::i64 && "Signed extend from i32 to i32??");
Opc = PPC::EXTSW_32_64;
}
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), DestReg)
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), DestReg)
.addReg(SrcReg);
// Unsigned 32-bit extensions use RLWINM.
@@ -1627,7 +1632,7 @@ bool PPCFastISel::PPCEmitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT,
assert(SrcVT == MVT::i16 && "Unsigned extend from i32 to i32??");
MB = 16;
}
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(PPC::RLWINM),
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(PPC::RLWINM),
DestReg)
.addReg(SrcReg).addImm(/*SH=*/0).addImm(MB).addImm(/*ME=*/31);
@@ -1640,7 +1645,7 @@ bool PPCFastISel::PPCEmitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT,
MB = 48;
else
MB = 32;
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(PPC::RLDICL_32_64), DestReg)
.addReg(SrcReg).addImm(/*SH=*/0).addImm(MB);
}
@@ -1654,9 +1659,9 @@ bool PPCFastISel::SelectIndirectBr(const Instruction *I) {
if (AddrReg == 0)
return false;
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(PPC::MTCTR8))
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(PPC::MTCTR8))
.addReg(AddrReg);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(PPC::BCTR8));
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(PPC::BCTR8));
const IndirectBrInst *IB = cast<IndirectBrInst>(I);
for (unsigned i = 0, e = IB->getNumSuccessors(); i != e; ++i)
@@ -1684,7 +1689,8 @@ bool PPCFastISel::SelectTrunc(const Instruction *I) {
// The only interesting case is when we need to switch register classes.
if (SrcVT == MVT::i64) {
unsigned ResultReg = createResultReg(&PPC::GPRCRegClass);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(TargetOpcode::COPY),
ResultReg).addReg(SrcReg, 0, PPC::sub_32);
SrcReg = ResultReg;
}
@@ -1791,7 +1797,7 @@ unsigned PPCFastISel::PPCMaterializeFP(const ConstantFP *CFP, MVT VT) {
return 0;
// All FP constants are loaded from the constant pool.
- unsigned Align = TD.getPrefTypeAlignment(CFP->getType());
+ unsigned Align = DL.getPrefTypeAlignment(CFP->getType());
assert(Align > 0 && "Unexpectedly missing alignment information!");
unsigned Idx = MCP.getConstantPoolIndex(cast<Constant>(CFP), Align);
unsigned DestReg = createResultReg(TLI.getRegClassFor(VT));
@@ -1807,25 +1813,25 @@ unsigned PPCFastISel::PPCMaterializeFP(const ConstantFP *CFP, MVT VT) {
// For small code model, generate a LF[SD](0, LDtocCPT(Idx, X2)).
if (CModel == CodeModel::Small || CModel == CodeModel::JITDefault) {
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(PPC::LDtocCPT),
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(PPC::LDtocCPT),
TmpReg)
.addConstantPoolIndex(Idx).addReg(PPC::X2);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), DestReg)
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), DestReg)
.addImm(0).addReg(TmpReg).addMemOperand(MMO);
} else {
// Otherwise we generate LF[SD](Idx[lo], ADDIStocHA(X2, Idx)).
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(PPC::ADDIStocHA),
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(PPC::ADDIStocHA),
TmpReg).addReg(PPC::X2).addConstantPoolIndex(Idx);
// But for large code model, we must generate a LDtocL followed
// by the LF[SD].
if (CModel == CodeModel::Large) {
unsigned TmpReg2 = createResultReg(&PPC::G8RC_and_G8RC_NOX0RegClass);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(PPC::LDtocL),
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(PPC::LDtocL),
TmpReg2).addConstantPoolIndex(Idx).addReg(TmpReg);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), DestReg)
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), DestReg)
.addImm(0).addReg(TmpReg2);
} else
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), DestReg)
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), DestReg)
.addConstantPoolIndex(Idx, 0, PPCII::MO_TOC_LO)
.addReg(TmpReg)
.addMemOperand(MMO);
@@ -1863,8 +1869,10 @@ unsigned PPCFastISel::PPCMaterializeGV(const GlobalValue *GV, MVT VT) {
// For small code model, generate a simple TOC load.
if (CModel == CodeModel::Small || CModel == CodeModel::JITDefault)
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(PPC::LDtoc), DestReg)
- .addGlobalAddress(GV).addReg(PPC::X2);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(PPC::LDtoc),
+ DestReg)
+ .addGlobalAddress(GV)
+ .addReg(PPC::X2);
else {
// If the address is an externally defined symbol, a symbol with
// common or externally available linkage, a function address, or a
@@ -1875,7 +1883,7 @@ unsigned PPCFastISel::PPCMaterializeGV(const GlobalValue *GV, MVT VT) {
// ADDItocL(ADDIStocHA(%X2, GV), GV)
// Either way, start with the ADDIStocHA:
unsigned HighPartReg = createResultReg(RC);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(PPC::ADDIStocHA),
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(PPC::ADDIStocHA),
HighPartReg).addReg(PPC::X2).addGlobalAddress(GV);
// !GVar implies a function address. An external variable is one
@@ -1884,11 +1892,11 @@ unsigned PPCFastISel::PPCMaterializeGV(const GlobalValue *GV, MVT VT) {
// on the "if" path here.
if (CModel == CodeModel::Large || !GVar || !GVar->hasInitializer() ||
GVar->hasCommonLinkage() || GVar->hasAvailableExternallyLinkage())
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(PPC::LDtocL),
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(PPC::LDtocL),
DestReg).addGlobalAddress(GV).addReg(HighPartReg);
else
// Otherwise generate the ADDItocL.
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(PPC::ADDItocL),
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(PPC::ADDItocL),
DestReg).addReg(HighPartReg).addGlobalAddress(GV);
}
@@ -1906,21 +1914,21 @@ unsigned PPCFastISel::PPCMaterialize32BitInt(int64_t Imm,
bool IsGPRC = RC->hasSuperClassEq(&PPC::GPRCRegClass);
if (isInt<16>(Imm))
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(IsGPRC ? PPC::LI : PPC::LI8), ResultReg)
.addImm(Imm);
else if (Lo) {
// Both Lo and Hi have nonzero bits.
unsigned TmpReg = createResultReg(RC);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(IsGPRC ? PPC::LIS : PPC::LIS8), TmpReg)
.addImm(Hi);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(IsGPRC ? PPC::ORI : PPC::ORI8), ResultReg)
.addReg(TmpReg).addImm(Lo);
} else
// Just Hi bits.
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(IsGPRC ? PPC::LIS : PPC::LIS8), ResultReg)
.addImm(Hi);
@@ -1960,7 +1968,7 @@ unsigned PPCFastISel::PPCMaterialize64BitInt(int64_t Imm,
unsigned TmpReg2;
if (Imm) {
TmpReg2 = createResultReg(RC);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(PPC::RLDICR),
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(PPC::RLDICR),
TmpReg2).addReg(TmpReg1).addImm(Shift).addImm(63 - Shift);
} else
TmpReg2 = TmpReg1;
@@ -1968,14 +1976,14 @@ unsigned PPCFastISel::PPCMaterialize64BitInt(int64_t Imm,
unsigned TmpReg3, Hi, Lo;
if ((Hi = (Remainder >> 16) & 0xFFFF)) {
TmpReg3 = createResultReg(RC);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(PPC::ORIS8),
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(PPC::ORIS8),
TmpReg3).addReg(TmpReg2).addImm(Hi);
} else
TmpReg3 = TmpReg2;
if ((Lo = Remainder & 0xFFFF)) {
unsigned ResultReg = createResultReg(RC);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(PPC::ORI8),
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(PPC::ORI8),
ResultReg).addReg(TmpReg3).addImm(Lo);
return ResultReg;
}
@@ -2000,7 +2008,7 @@ unsigned PPCFastISel::PPCMaterializeInt(const Constant *C, MVT VT) {
if (isInt<16>(CI->getSExtValue())) {
unsigned Opc = (VT == MVT::i64) ? PPC::LI8 : PPC::LI;
unsigned ImmReg = createResultReg(RC);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), ImmReg)
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ImmReg)
.addImm(CI->getSExtValue());
return ImmReg;
}
@@ -2049,7 +2057,7 @@ unsigned PPCFastISel::TargetMaterializeAlloca(const AllocaInst *AI) {
if (SI != FuncInfo.StaticAllocaMap.end()) {
unsigned ResultReg = createResultReg(&PPC::G8RC_and_G8RC_NOX0RegClass);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(PPC::ADDI8),
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(PPC::ADDI8),
ResultReg).addFrameIndex(SI->second).addImm(0);
return ResultReg;
}
diff --git a/lib/Target/X86/X86FastISel.cpp b/lib/Target/X86/X86FastISel.cpp
index 9b842b74e4..4209fb9af1 100644
--- a/lib/Target/X86/X86FastISel.cpp
+++ b/lib/Target/X86/X86FastISel.cpp
@@ -229,7 +229,7 @@ bool X86FastISel::X86FastEmitLoad(EVT VT, const X86AddressMode &AM,
ResultReg = createResultReg(RC);
addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt,
- DL, TII.get(Opc), ResultReg), AM);
+ DbgLoc, TII.get(Opc), ResultReg), AM);
return true;
}
@@ -248,7 +248,7 @@ X86FastISel::X86FastEmitStore(EVT VT, unsigned ValReg,
case MVT::i1: {
// Mask out all but lowest bit.
unsigned AndResult = createResultReg(&X86::GR8RegClass);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(X86::AND8ri), AndResult).addReg(ValReg).addImm(1);
ValReg = AndResult;
}
@@ -289,7 +289,7 @@ X86FastISel::X86FastEmitStore(EVT VT, unsigned ValReg,
}
addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt,
- DL, TII.get(Opc)), AM).addReg(ValReg);
+ DbgLoc, TII.get(Opc)), AM).addReg(ValReg);
return true;
}
@@ -297,7 +297,7 @@ bool X86FastISel::X86FastEmitStore(EVT VT, const Value *Val,
const X86AddressMode &AM, bool Aligned) {
// Handle 'null' like i32/i64 0.
if (isa<ConstantPointerNull>(Val))
- Val = Constant::getNullValue(TD.getIntPtrType(Val->getContext()));
+ Val = Constant::getNullValue(DL.getIntPtrType(Val->getContext()));
// If this is a store of a simple constant, fold the constant into the store.
if (const ConstantInt *CI = dyn_cast<ConstantInt>(Val)) {
@@ -318,7 +318,7 @@ bool X86FastISel::X86FastEmitStore(EVT VT, const Value *Val,
if (Opc) {
addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt,
- DL, TII.get(Opc)), AM)
+ DbgLoc, TII.get(Opc)), AM)
.addImm(Signed ? (uint64_t) CI->getSExtValue() :
CI->getZExtValue());
return true;
@@ -428,7 +428,7 @@ bool X86FastISel::handleConstantAddresses(const Value *V, X86AddressMode &AM) {
LoadReg = createResultReg(RC);
MachineInstrBuilder LoadMI =
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), LoadReg);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), LoadReg);
addFullAddress(LoadMI, StubAM);
// Ok, back to normal mode.
@@ -547,14 +547,14 @@ redo_gep:
i != e; ++i, ++GTI) {
const Value *Op = *i;
if (StructType *STy = dyn_cast<StructType>(*GTI)) {
- const StructLayout *SL = TD.getStructLayout(STy);
+ const StructLayout *SL = DL.getStructLayout(STy);
Disp += SL->getElementOffset(cast<ConstantInt>(Op)->getZExtValue());
continue;
}
// A array/variable index is always of the form i*S where S is the
// constant scale size. See if we can push the scale into immediates.
- uint64_t S = TD.getTypeAllocSize(GTI.getIndexedType());
+ uint64_t S = DL.getTypeAllocSize(GTI.getIndexedType());
for (;;) {
if (const ConstantInt *CI = dyn_cast<ConstantInt>(Op)) {
// Constant-offset addressing.
@@ -696,7 +696,7 @@ bool X86FastISel::X86SelectCallAddress(const Value *V, X86AddressMode &AM) {
(AM.Base.Reg != 0 || AM.IndexReg != 0))
return false;
- // Can't handle DLLImport.
+ // Can't handle DbgLocLImport.
if (GV->hasDLLImportStorageClass())
return false;
@@ -750,7 +750,7 @@ bool X86FastISel::X86SelectStore(const Instruction *I) {
return false;
unsigned SABIAlignment =
- TD.getABITypeAlignment(S->getValueOperand()->getType());
+ DL.getABITypeAlignment(S->getValueOperand()->getType());
bool Aligned = S->getAlignment() == 0 || S->getAlignment() >= SABIAlignment;
MVT VT;
@@ -864,7 +864,7 @@ bool X86FastISel::X86SelectRet(const Instruction *I) {
// Avoid a cross-class copy. This is very unlikely.
if (!SrcRC->contains(DstReg))
return false;
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TargetOpcode::COPY),
DstReg).addReg(SrcReg);
// Add register to return instruction.
@@ -881,14 +881,14 @@ bool X86FastISel::X86SelectRet(const Instruction *I) {
assert(Reg &&
"SRetReturnReg should have been set in LowerFormalArguments()!");
unsigned RetReg = Subtarget->is64Bit() ? X86::RAX : X86::EAX;
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TargetOpcode::COPY),
RetReg).addReg(Reg);
RetRegs.push_back(RetReg);
}
// Now emit the RET.
MachineInstrBuilder MIB =
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Subtarget->is64Bit() ? X86::RETQ : X86::RETL));
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Subtarget->is64Bit() ? X86::RETQ : X86::RETL));
for (unsigned i = 0, e = RetRegs.size(); i != e; ++i)
MIB.addReg(RetRegs[i], RegState::Implicit);
return true;
@@ -961,14 +961,14 @@ bool X86FastISel::X86FastEmitCompare(const Value *Op0, const Value *Op1,
// Handle 'null' like i32/i64 0.
if (isa<ConstantPointerNull>(Op1))
- Op1 = Constant::getNullValue(TD.getIntPtrType(Op0->getContext()));
+ Op1 = Constant::getNullValue(DL.getIntPtrType(Op0->getContext()));
// We have two options: compare with register or immediate. If the RHS of
// the compare is an immediate that we can fold into this compare, use
// CMPri, otherwise use CMPrr.
if (const ConstantInt *Op1C = dyn_cast<ConstantInt>(Op1)) {
if (unsigned CompareImmOpc = X86ChooseCmpImmediateOpcode(VT, Op1C)) {
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CompareImmOpc))
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(CompareImmOpc))
.addReg(Op0Reg)
.addImm(Op1C->getSExtValue());
return true;
@@ -980,7 +980,7 @@ bool X86FastISel::X86FastEmitCompare(const Value *Op0, const Value *Op1,
unsigned Op1Reg = getRegForValue(Op1);
if (Op1Reg == 0) return false;
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CompareOpc))
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(CompareOpc))
.addReg(Op0Reg)
.addReg(Op1Reg);
@@ -1004,10 +1004,10 @@ bool X86FastISel::X86SelectCmp(const Instruction *I) {
unsigned EReg = createResultReg(&X86::GR8RegClass);
unsigned NPReg = createResultReg(&X86::GR8RegClass);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(X86::SETEr), EReg);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::SETEr), EReg);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(X86::SETNPr), NPReg);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(X86::AND8rr), ResultReg).addReg(NPReg).addReg(EReg);
UpdateValueMap(I, ResultReg);
return true;
@@ -1018,9 +1018,9 @@ bool X86FastISel::X86SelectCmp(const Instruction *I) {
unsigned NEReg = createResultReg(&X86::GR8RegClass);
unsigned PReg = createResultReg(&X86::GR8RegClass);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(X86::SETNEr), NEReg);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(X86::SETPr), PReg);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(X86::OR8rr),ResultReg)
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::SETNEr), NEReg);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::SETPr), PReg);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::OR8rr),ResultReg)
.addReg(PReg).addReg(NEReg);
UpdateValueMap(I, ResultReg);
return true;
@@ -1060,7 +1060,7 @@ bool X86FastISel::X86SelectCmp(const Instruction *I) {
if (!X86FastEmitCompare(Op0, Op1, VT))
return false;
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(SetCCOpc), ResultReg);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(SetCCOpc), ResultReg);
UpdateValueMap(I, ResultReg);
return true;
}
@@ -1097,11 +1097,11 @@ bool X86FastISel::X86SelectZExt(const Instruction *I) {
}
unsigned Result32 = createResultReg(&X86::GR32RegClass);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(MovInst), Result32)
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(MovInst), Result32)
.addReg(ResultReg);
ResultReg = createResultReg(&X86::GR64RegClass);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::SUBREG_TO_REG),
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TargetOpcode::SUBREG_TO_REG),
ResultReg)
.addImm(0).addReg(Result32).addImm(X86::sub_32bit);
} else if (DstVT != MVT::i8) {
@@ -1181,17 +1181,17 @@ bool X86FastISel::X86SelectBranch(const Instruction *I) {
if (!X86FastEmitCompare(Op0, Op1, VT))
return false;
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(BranchOpc))
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(BranchOpc))
.addMBB(TrueMBB);
if (Predicate == CmpInst::FCMP_UNE) {
// X86 requires a second branch to handle UNE (and OEQ,
// which is mapped to UNE above).
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(X86::JP_4))
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::JP_4))
.addMBB(TrueMBB);
}
- FastEmitBranch(FalseMBB, DL);
+ FastEmitBranch(FalseMBB, DbgLoc);
FuncInfo.MBB->addSuccessor(TrueMBB);
return true;
}
@@ -1212,7 +1212,7 @@ bool X86FastISel::X86SelectBranch(const Instruction *I) {
if (TestOpc) {
unsigned OpReg = getRegForValue(TI->getOperand(0));
if (OpReg == 0) return false;
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TestOpc))
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TestOpc))
.addReg(OpReg).addImm(1);
unsigned JmpOpc = X86::JNE_4;
@@ -1221,9 +1221,9 @@ bool X86FastISel::X86SelectBranch(const Instruction *I) {
JmpOpc = X86::JE_4;
}
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(JmpOpc))
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(JmpOpc))
.addMBB(TrueMBB);
- FastEmitBranch(FalseMBB, DL);
+ FastEmitBranch(FalseMBB, DbgLoc);
FuncInfo.MBB->addSuccessor(TrueMBB);
return true;
}
@@ -1236,11 +1236,11 @@ bool X86FastISel::X86SelectBranch(const Instruction *I) {
unsigned OpReg = getRegForValue(BI->getCondition());
if (OpReg == 0) return false;
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(X86::TEST8ri))
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::TEST8ri))
.addReg(OpReg).addImm(1);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(X86::JNE_4))
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::JNE_4))
.addMBB(TrueMBB);
- FastEmitBranch(FalseMBB, DL);
+ FastEmitBranch(FalseMBB, DbgLoc);
FuncInfo.MBB->addSuccessor(TrueMBB);
return true;
}
@@ -1297,18 +1297,18 @@ bool X86FastISel::X86SelectShift(const Instruction *I) {
unsigned Op1Reg = getRegForValue(I->getOperand(1));
if (Op1Reg == 0) return false;
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TargetOpcode::COPY),
CReg).addReg(Op1Reg);
// The shift instruction uses X86::CL. If we defined a super-register
// of X86::CL, emit a subreg KILL to precisely describe what we're doing here.
if (CReg != X86::CL)
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::KILL), X86::CL)
.addReg(CReg, RegState::Kill);
unsigned ResultReg = createResultReg(RC);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(OpReg), ResultReg)
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(OpReg), ResultReg)
.addReg(Op0Reg);
UpdateValueMap(I, ResultReg);
return true;
@@ -1409,38 +1409,38 @@ bool X86FastISel::X86SelectDivRem(const Instruction *I) {
return false;
// Move op0 into low-order input register.
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(OpEntry.OpCopy), TypeEntry.LowInReg).addReg(Op0Reg);
// Zero-extend or sign-extend into high-order input register.
if (OpEntry.OpSignExtend) {
if (OpEntry.IsOpSigned)
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(OpEntry.OpSignExtend));
else {
unsigned Zero32 = createResultReg(&X86::GR32RegClass);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(X86::MOV32r0), Zero32);
// Copy the zero into the appropriate sub/super/identical physical
// register. Unfortunately the operations needed are not uniform enough to
// fit neatly into the table above.
if (VT.SimpleTy == MVT::i16) {
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(Copy), TypeEntry.HighInReg)
.addReg(Zero32, 0, X86::sub_16bit);
} else if (VT.SimpleTy == MVT::i32) {
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(Copy), TypeEntry.HighInReg)
.addReg(Zero32);
} else if (VT.SimpleTy == MVT::i64) {
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::SUBREG_TO_REG), TypeEntry.HighInReg)
.addImm(0).addReg(Zero32).addImm(X86::sub_32bit);
}
}
}
// Generate the DIV/IDIV instruction.
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(OpEntry.OpDivRem)).addReg(Op1Reg);
// For i8 remainder, we can't reference AH directly, as we'll end
// up with bogus copies like %R9B = COPY %AH. Reference AX
@@ -1456,11 +1456,11 @@ bool X86FastISel::X86SelectDivRem(const Instruction *I) {
OpEntry.DivRemResultReg == X86::AH && Subtarget->is64Bit()) {
unsigned SourceSuperReg = createResultReg(&X86::GR16RegClass);
unsigned ResultSuperReg = createResultReg(&X86::GR16RegClass);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(Copy), SourceSuperReg).addReg(X86::AX);
// Shift AX right by 8 bits instead of using AH.
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(X86::SHR16ri),
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::SHR16ri),
ResultSuperReg).addReg(SourceSuperReg).addImm(8);
// Now reference the 8-bit subreg of the result.
@@ -1470,7 +1470,7 @@ bool X86FastISel::X86SelectDivRem(const Instruction *I) {
// Copy the result out of the physreg if we haven't already.
if (!ResultReg) {
ResultReg = createResultReg(TypeEntry.RC);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Copy), ResultReg)
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Copy), ResultReg)
.addReg(OpEntry.DivRemResultReg);
}
UpdateValueMap(I, ResultReg);
@@ -1513,10 +1513,10 @@ bool X86FastISel::X86SelectSelect(const Instruction *I) {
// If we read more than the lsb, we may see non-zero values whereas lsb
// is zero. Therefore, we have to truncate Op0Reg to i1 for the select.
// This is achieved by performing TEST against 1.
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(X86::TEST8ri))
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::TEST8ri))
.addReg(Op0Reg).addImm(1);
unsigned ResultReg = createResultReg(RC);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), ResultReg)
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg)
.addReg(Op1Reg).addReg(Op2Reg);
UpdateValueMap(I, ResultReg);
return true;
@@ -1531,7 +1531,7 @@ bool X86FastISel::X86SelectFPExt(const Instruction *I) {
unsigned OpReg = getRegForValue(V);
if (OpReg == 0) return false;
unsigned ResultReg = createResultReg(&X86::FR64RegClass);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(X86::CVTSS2SDrr), ResultReg)
.addReg(OpReg);
UpdateValueMap(I, ResultReg);
@@ -1550,7 +1550,7 @@ bool X86FastISel::X86SelectFPTrunc(const Instruction *I) {
unsigned OpReg = getRegForValue(V);
if (OpReg == 0) return false;
unsigned ResultReg = createResultReg(&X86::FR32RegClass);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(X86::CVTSD2SSrr), ResultReg)
.addReg(OpReg);
UpdateValueMap(I, ResultReg);
@@ -1590,7 +1590,7 @@ bool X86FastISel::X86SelectTrunc(const Instruction *I) {
(const TargetRegisterClass*)&X86::GR16_ABCDRegClass :
(const TargetRegisterClass*)&X86::GR32_ABCDRegClass;
unsigned CopyReg = createResultReg(CopyRC);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TargetOpcode::COPY),
CopyReg).addReg(InputReg);
InputReg = CopyReg;
}
@@ -1718,12 +1718,12 @@ bool X86FastISel::X86VisitIntrinsicCall(const IntrinsicInst &I) {
const MCInstrDesc &II = TII.get(TargetOpcode::DBG_VALUE);
// FIXME may need to add RegState::Debug to any registers produced,
// although ESP/EBP should be the only ones at the moment.
- addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II), AM).
+ addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II), AM).
addImm(0).addMetadata(DI->getVariable());
return true;
}
case Intrinsic::trap: {
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(X86::TRAP));
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::TRAP));
return true;
}
case Intrinsic::sadd_with_overflow:
@@ -1760,13 +1760,14 @@ bool X86FastISel::X86VisitIntrinsicCall(const IntrinsicInst &I) {
// The call to CreateRegs builds two sequential registers, to store the
// both the returned values.
unsigned ResultReg = FuncInfo.CreateRegs(I.getType());
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(OpC), ResultReg)
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(OpC), ResultReg)
.addReg(Reg1).addReg(Reg2);
unsigned Opc = X86::SETBr;
if (I.getIntrinsicID() == Intrinsic::sadd_with_overflow)
Opc = X86::SETOr;
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), ResultReg+1);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc),
+ ResultReg + 1);
UpdateValueMap(&I, ResultReg, 2);
return true;
@@ -1840,7 +1841,8 @@ bool X86FastISel::FastLowerArguments() {
// Without this, EmitLiveInCopies may eliminate the livein if its only
// use is a bitcast (which isn't turned into an instruction).
unsigned ResultReg = createResultReg(RC);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(TargetOpcode::COPY),
ResultReg).addReg(DstReg, getKillRegState(true));
UpdateValueMap(I, ResultReg);
}
@@ -1969,7 +1971,7 @@ bool X86FastISel::DoSelectCall(const Instruction *I, const char *MemIntName) {
if (CS.paramHasAttr(AttrInd, Attribute::ByVal)) {
PointerType *Ty = cast<PointerType>(ArgVal->getType());
Type *ElementTy = Ty->getElementType();
- unsigned FrameSize = TD.getTypeAllocSize(ElementTy);
+ unsigned FrameSize = DL.getTypeAllocSize(ElementTy);
unsigned FrameAlign = CS.getParamAlignment(AttrInd);
if (!FrameAlign)
FrameAlign = TLI.getByValTypeAlignment(ElementTy);
@@ -2026,7 +2028,7 @@ bool X86FastISel::DoSelectCall(const Instruction *I, const char *MemIntName) {
return false;
if (ArgVT == MVT::x86mmx)
return false;
- unsigned OriginalAlignment = TD.getABITypeAlignment(ArgTy);
+ unsigned OriginalAlignment = DL.getABITypeAlignment(ArgTy);
Flags.setOrigAlign(OriginalAlignment);
Args.push_back(ArgReg);
@@ -2051,7 +2053,7 @@ bool X86FastISel::DoSelectCall(const Instruction *I, const char *MemIntName) {
// Issue CALLSEQ_START
unsigned AdjStackDown = TII.getCallFrameSetupOpcode();
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(AdjStackDown))
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AdjStackDown))
.addImm(NumBytes);
// Process argument: walk the register/memloc assignments, inserting
@@ -2120,8 +2122,8 @@ bool X86FastISel::DoSelectCall(const Instruction *I, const char *MemIntName) {
}
if (VA.isRegLoc()) {
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
- VA.getLocReg()).addReg(Arg);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(TargetOpcode::COPY), VA.getLocReg()).addReg(Arg);
RegArgs.push_back(VA.getLocReg());
} else {
unsigned LocMemOffset = VA.getLocMemOffset();
@@ -2155,8 +2157,8 @@ bool X86FastISel::DoSelectCall(const Instruction *I, const char *MemIntName) {
// GOT pointer.
if (Subtarget->isPICStyleGOT()) {
unsigned Base = getInstrInfo()->getGlobalBaseReg(FuncInfo.MF);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
- X86::EBX).addReg(Base);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(TargetOpcode::COPY), X86::EBX).addReg(Base);
}
if (Subtarget->is64Bit() && isVarArg && !isWin64) {
@@ -2166,7 +2168,7 @@ bool X86FastISel::DoSelectCall(const Instruction *I, const char *MemIntName) {
X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
};
unsigned NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs, 8);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(X86::MOV8ri),
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::MOV8ri),
X86::AL).addImm(NumXMMRegs);
}
@@ -2179,7 +2181,7 @@ bool X86FastISel::DoSelectCall(const Instruction *I, const char *MemIntName) {
CallOpc = X86::CALL64r;
else
CallOpc = X86::CALL32r;
- MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CallOpc))
+ MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(CallOpc))
.addReg(CalleeOp);
} else {
@@ -2213,7 +2215,7 @@ bool X86FastISel::DoSelectCall(const Instruction *I, const char *MemIntName) {
}
- MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CallOpc));
+ MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(CallOpc));
if (MemIntName)
MIB.addExternalSymbol(MemIntName, OpFlags);
else
@@ -2238,7 +2240,7 @@ bool X86FastISel::DoSelectCall(const Instruction *I, const char *MemIntName) {
// Issue CALLSEQ_END
unsigned AdjStackUp = TII.getCallFrameDestroyOpcode();
const unsigned NumBytesCallee = computeBytesPoppedByCallee(*Subtarget, CS);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(AdjStackUp))
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AdjStackUp))
.addImm(NumBytes).addImm(NumBytesCallee);
// Build info for return calling conv lowering code.
@@ -2284,10 +2286,11 @@ bool X86FastISel::DoSelectCall(const Instruction *I, const char *MemIntName) {
CopyVT = MVT::f80;
CopyReg = createResultReg(&X86::RFP80RegClass);
}
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(X86::FpPOP_RETVAL),
- CopyReg);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(X86::FpPOP_RETVAL), CopyReg);
} else {
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
+ TII.get(TargetOpcode::COPY),
CopyReg).addReg(RVLocs[i].getLocReg());
UsedRegs.push_back(RVLocs[i].getLocReg());
}
@@ -2300,11 +2303,11 @@ bool X86FastISel::DoSelectCall(const Instruction *I, const char *MemIntName) {
unsigned Opc = ResVT == MVT::f32 ? X86::ST_Fp80m32 : X86::ST_Fp80m64;
unsigned MemSize = ResVT.getSizeInBits()/8;
int FI = MFI.CreateStackObject(MemSize, MemSize, false);
- addFrameReference(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ addFrameReference(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(Opc)), FI)
.addReg(CopyReg);
Opc = ResVT == MVT::f32 ? X86::MOVSSrm : X86::MOVSDrm;
- addFrameReference(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ addFrameReference(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(Opc), ResultReg + i), FI);
}
}
@@ -2439,7 +2442,7 @@ unsigned X86FastISel::TargetMaterializeConstant(const Constant *C) {
Opc = TLI.getPointerTy() == MVT::i32 ? X86::LEA32r : X86::LEA64r;
unsigned ResultReg = createResultReg(RC);
- addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(Opc), ResultReg), AM);
return ResultReg;
}
@@ -2447,10 +2450,10 @@ unsigned X86FastISel::TargetMaterializeConstant(const Constant *C) {
}
// MachineConstantPool wants an explicit alignment.
- unsigned Align = TD.getPrefTypeAlignment(C->getType());
+ unsigned Align = DL.getPrefTypeAlignment(C->getType());
if (Align == 0) {
// Alignment of vector types. FIXME!
- Align = TD.getTypeAllocSize(C->getType());
+ Align = DL.getTypeAllocSize(C->getType());
}
// x86-32 PIC requires a PIC base register for constant pools.
@@ -2470,7 +2473,7 @@ unsigned X86FastISel::TargetMaterializeConstant(const Constant *C) {
// Create the load from the constant pool.
unsigned MCPOffset = MCP.getConstantPoolIndex(C, Align);
unsigned ResultReg = createResultReg(RC);
- addConstantPoolReference(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ addConstantPoolReference(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(Opc), ResultReg),
MCPOffset, PICBase, OpFlag);
@@ -2495,7 +2498,7 @@ unsigned X86FastISel::TargetMaterializeAlloca(const AllocaInst *C) {
unsigned Opc = Subtarget->is64Bit() ? X86::LEA64r : X86::LEA32r;
const TargetRegisterClass* RC = TLI.getRegClassFor(TLI.getPointerTy());
unsigned ResultReg = createResultReg(RC);
- addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
+ addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(Opc), ResultReg), AM);
return ResultReg;
}
@@ -2534,7 +2537,7 @@ unsigned X86FastISel::TargetMaterializeFloatZero(const ConstantFP *CF) {
}
unsigned ResultReg = createResultReg(RC);
- BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), ResultReg);
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg);
return ResultReg;
}
@@ -2547,7 +2550,7 @@ bool X86FastISel::tryToFoldLoadIntoMI(MachineInstr *MI, unsigned OpNo,
const X86InstrInfo &XII = (const X86InstrInfo&)TII;
- unsigned Size = TD.getTypeAllocSize(LI->getType());
+ unsigned Size = DL.getTypeAllocSize(LI->getType());
unsigned Alignment = LI->getAlignment();
SmallVector<MachineOperand, 8> AddrOps;