summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--include/llvm/CodeGen/StackMaps.h16
-rw-r--r--include/llvm/Target/TargetLowering.h4
-rw-r--r--lib/CodeGen/PrologEpilogInserter.cpp15
-rw-r--r--lib/CodeGen/StackMaps.cpp64
-rw-r--r--lib/CodeGen/TargetInstrInfo.cpp90
-rw-r--r--lib/CodeGen/TargetLoweringBase.cpp56
-rw-r--r--lib/Target/X86/X86AsmPrinter.h12
-rw-r--r--lib/Target/X86/X86ISelLowering.cpp45
-rw-r--r--lib/Target/X86/X86ISelLowering.h3
-rw-r--r--lib/Target/X86/X86InstrInfo.cpp74
-rw-r--r--lib/Target/X86/X86MCInstLower.cpp84
11 files changed, 222 insertions, 241 deletions
diff --git a/include/llvm/CodeGen/StackMaps.h b/include/llvm/CodeGen/StackMaps.h
index e90f22e5b6..ef4657b139 100644
--- a/include/llvm/CodeGen/StackMaps.h
+++ b/include/llvm/CodeGen/StackMaps.h
@@ -92,19 +92,12 @@ public:
: LocType(LocType), Size(Size), Reg(Reg), Offset(Offset) {}
};
- // Typedef a function pointer for functions that parse sequences of operands
- // and return a Location, plus a new "next" operand iterator.
- typedef std::pair<Location, MachineInstr::const_mop_iterator>
- (*OperandParser)(MachineInstr::const_mop_iterator,
- MachineInstr::const_mop_iterator, const TargetMachine&);
-
// OpTypes are used to encode information about the following logical
// operand (which may consist of several MachineOperands) for the
// OpParser.
typedef enum { DirectMemRefOp, IndirectMemRefOp, ConstantOp } OpType;
- StackMaps(AsmPrinter &AP, OperandParser OpParser)
- : AP(AP), OpParser(OpParser) {}
+ StackMaps(AsmPrinter &AP) : AP(AP) {}
/// \brief Generate a stackmap record for a stackmap instruction.
///
@@ -155,10 +148,15 @@ private:
};
AsmPrinter &AP;
- OperandParser OpParser;
CallsiteInfoList CSInfos;
ConstantPool ConstPool;
+ /// Parse
+ std::pair<Location, MachineInstr::const_mop_iterator>
+ parseOperand(MachineInstr::const_mop_iterator MOI,
+ MachineInstr::const_mop_iterator MOE);
+
+
/// This should be called by the MC lowering code _immediately_ before
/// lowering the MI to an MCInst. It records where the operands for the
/// instruction are stored, and outputs a label to record the offset of
diff --git a/include/llvm/Target/TargetLowering.h b/include/llvm/Target/TargetLowering.h
index 5ab04f7944..6f643c3583 100644
--- a/include/llvm/Target/TargetLowering.h
+++ b/include/llvm/Target/TargetLowering.h
@@ -1685,6 +1685,10 @@ protected:
/// Return true if the value types that can be represented by the specified
/// register class are all legal.
bool isLegalRC(const TargetRegisterClass *RC) const;
+
+ /// Replace/modify any TargetFrameIndex operands with a targte-dependent
+ /// sequence of memory operands that is recognized by PrologEpilogInserter.
+ MachineBasicBlock *emitPatchPoint(MachineInstr *MI, MachineBasicBlock *MBB) const;
};
/// This class defines information used to lower LLVM code to legal SelectionDAG
diff --git a/lib/CodeGen/PrologEpilogInserter.cpp b/lib/CodeGen/PrologEpilogInserter.cpp
index b0e494ffcd..0107a9cb44 100644
--- a/lib/CodeGen/PrologEpilogInserter.cpp
+++ b/lib/CodeGen/PrologEpilogInserter.cpp
@@ -731,15 +731,18 @@ void PEI::replaceFrameIndices(MachineBasicBlock *BB, MachineFunction &Fn,
// Frame indicies in debug values are encoded in a target independent
// way with simply the frame index and offset rather than any
// target-specific addressing mode.
- if (MI->isDebugValue()) {
- assert(i == 0 && "Frame indicies can only appear as the first "
- "operand of a DBG_VALUE machine instruction");
+ if (MI->isDebugValue() ||
+ MI->getOpcode() == TargetOpcode::STACKMAP ||
+ MI->getOpcode() == TargetOpcode::PATCHPOINT) {
+ assert((!MI->isDebugValue() || i == 0) &&
+ "Frame indicies can only appear as the first operand of a "
+ "DBG_VALUE machine instruction");
unsigned Reg;
- MachineOperand &Offset = MI->getOperand(1);
+ MachineOperand &Offset = MI->getOperand(i + 1);
Offset.setImm(Offset.getImm() +
TFI->getFrameIndexReference(
- Fn, MI->getOperand(0).getIndex(), Reg));
- MI->getOperand(0).ChangeToRegister(Reg, false /*isDef*/);
+ Fn, MI->getOperand(i).getIndex(), Reg));
+ MI->getOperand(i).ChangeToRegister(Reg, false /*isDef*/);
continue;
}
diff --git a/lib/CodeGen/StackMaps.cpp b/lib/CodeGen/StackMaps.cpp
index 3b9dd65bb9..bec3021423 100644
--- a/lib/CodeGen/StackMaps.cpp
+++ b/lib/CodeGen/StackMaps.cpp
@@ -13,6 +13,7 @@
#include "llvm/CodeGen/AsmPrinter.h"
#include "llvm/CodeGen/MachineInstr.h"
+#include "llvm/IR/DataLayout.h"
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCObjectFileInfo.h"
@@ -65,6 +66,60 @@ unsigned PatchPointOpers::getNextScratchIdx(unsigned StartIdx) const {
return ScratchIdx;
}
+std::pair<StackMaps::Location, MachineInstr::const_mop_iterator>
+StackMaps::parseOperand(MachineInstr::const_mop_iterator MOI,
+ MachineInstr::const_mop_iterator MOE) {
+ const MachineOperand &MOP = *MOI;
+ assert(!MOP.isRegMask() && (!MOP.isReg() || !MOP.isImplicit()) &&
+ "Register mask and implicit operands should not be processed.");
+
+ if (MOP.isImm()) {
+ // Verify anyregcc
+ // [<def>], <id>, <numBytes>, <target>, <numArgs>, <cc>, ...
+
+ switch (MOP.getImm()) {
+ default: llvm_unreachable("Unrecognized operand type.");
+ case StackMaps::DirectMemRefOp: {
+ unsigned Size = AP.TM.getDataLayout()->getPointerSizeInBits();
+ assert((Size % 8) == 0 && "Need pointer size in bytes.");
+ Size /= 8;
+ unsigned Reg = (++MOI)->getReg();
+ int64_t Imm = (++MOI)->getImm();
+ return std::make_pair(
+ Location(StackMaps::Location::Direct, Size, Reg, Imm), ++MOI);
+ }
+ case StackMaps::IndirectMemRefOp: {
+ int64_t Size = (++MOI)->getImm();
+ assert(Size > 0 && "Need a valid size for indirect memory locations.");
+ unsigned Reg = (++MOI)->getReg();
+ int64_t Imm = (++MOI)->getImm();
+ return std::make_pair(
+ Location(StackMaps::Location::Indirect, Size, Reg, Imm), ++MOI);
+ }
+ case StackMaps::ConstantOp: {
+ ++MOI;
+ assert(MOI->isImm() && "Expected constant operand.");
+ int64_t Imm = MOI->getImm();
+ return std::make_pair(
+ Location(Location::Constant, sizeof(int64_t), 0, Imm), ++MOI);
+ }
+ }
+ }
+
+ // Otherwise this is a reg operand. The physical register number will
+ // ultimately be encoded as a DWARF regno. The stack map also records the size
+ // of a spill slot that can hold the register content. (The runtime can
+ // track the actual size of the data type if it needs to.)
+ assert(MOP.isReg() && "Expected register operand here.");
+ assert(TargetRegisterInfo::isPhysicalRegister(MOP.getReg()) &&
+ "Virtreg operands should have been rewritten before now.");
+ const TargetRegisterClass *RC =
+ AP.TM.getRegisterInfo()->getMinimalPhysRegClass(MOP.getReg());
+ assert(!MOP.getSubReg() && "Physical subreg still around.");
+ return std::make_pair(
+ Location(Location::Register, RC->getSize(), MOP.getReg(), 0), ++MOI);
+}
+
void StackMaps::recordStackMapOpers(const MachineInstr &MI, uint32_t ID,
MachineInstr::const_mop_iterator MOI,
MachineInstr::const_mop_iterator MOE,
@@ -78,7 +133,7 @@ void StackMaps::recordStackMapOpers(const MachineInstr &MI, uint32_t ID,
if (recordResult) {
std::pair<Location, MachineInstr::const_mop_iterator> ParseResult =
- OpParser(MI.operands_begin(), llvm::next(MI.operands_begin()), AP.TM);
+ parseOperand(MI.operands_begin(), llvm::next(MI.operands_begin()));
Location &Loc = ParseResult.first;
assert(Loc.LocType == Location::Register &&
@@ -87,10 +142,8 @@ void StackMaps::recordStackMapOpers(const MachineInstr &MI, uint32_t ID,
}
while (MOI != MOE) {
- std::pair<Location, MachineInstr::const_mop_iterator> ParseResult =
- OpParser(MOI, MOE, AP.TM);
-
- Location &Loc = ParseResult.first;
+ Location Loc;
+ tie(Loc, MOI) = parseOperand(MOI, MOE);
// Move large constants into the constant pool.
if (Loc.LocType == Location::Constant && (Loc.Offset & ~0xFFFFFFFFULL)) {
@@ -99,7 +152,6 @@ void StackMaps::recordStackMapOpers(const MachineInstr &MI, uint32_t ID,
}
CallsiteLocs.push_back(Loc);
- MOI = ParseResult.second;
}
const MCExpr *CSOffsetExpr = MCBinaryExpr::CreateSub(
diff --git a/lib/CodeGen/TargetInstrInfo.cpp b/lib/CodeGen/TargetInstrInfo.cpp
index bf4fd6587e..7c5092bfa2 100644
--- a/lib/CodeGen/TargetInstrInfo.cpp
+++ b/lib/CodeGen/TargetInstrInfo.cpp
@@ -13,10 +13,12 @@
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineMemOperand.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/PseudoSourceValue.h"
#include "llvm/CodeGen/ScoreboardHazardRecognizer.h"
+#include "llvm/CodeGen/StackMaps.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCInstrItineraries.h"
@@ -372,6 +374,65 @@ canFoldMemoryOperand(const MachineInstr *MI,
return MI->isCopy() && Ops.size() == 1 && canFoldCopy(MI, Ops[0]);
}
+static MachineInstr* foldPatchpoint(MachineFunction &MF,
+ MachineInstr *MI,
+ const SmallVectorImpl<unsigned> &Ops,
+ int FrameIndex,
+ const TargetInstrInfo &TII) {
+ unsigned StartIdx = 0;
+ switch (MI->getOpcode()) {
+ case TargetOpcode::STACKMAP:
+ StartIdx = 2; // Skip ID, nShadowBytes.
+ break;
+ case TargetOpcode::PATCHPOINT: {
+ // For PatchPoint, the call args are not foldable.
+ PatchPointOpers opers(MI);
+ StartIdx = opers.getVarIdx();
+ break;
+ }
+ default:
+ llvm_unreachable("unexpected stackmap opcode");
+ }
+
+ // Return false if any operands requested for folding are not foldable (not
+ // part of the stackmap's live values).
+ for (SmallVectorImpl<unsigned>::const_iterator I = Ops.begin(), E = Ops.end();
+ I != E; ++I) {
+ if (*I < StartIdx)
+ return 0;
+ }
+
+ MachineInstr *NewMI =
+ MF.CreateMachineInstr(TII.get(MI->getOpcode()), MI->getDebugLoc(), true);
+ MachineInstrBuilder MIB(MF, NewMI);
+
+ // No need to fold return, the meta data, and function arguments
+ for (unsigned i = 0; i < StartIdx; ++i)
+ MIB.addOperand(MI->getOperand(i));
+
+ for (unsigned i = StartIdx; i < MI->getNumOperands(); ++i) {
+ MachineOperand &MO = MI->getOperand(i);
+ if (std::find(Ops.begin(), Ops.end(), i) != Ops.end()) {
+ unsigned SpillSize;
+ unsigned SpillOffset;
+ // Compute the spill slot size and offset.
+ const TargetRegisterClass *RC =
+ MF.getRegInfo().getRegClass(MO.getReg());
+ bool Valid = TII.getStackSlotRange(RC, MO.getSubReg(), SpillSize,
+ SpillOffset, &MF.getTarget());
+ if (!Valid)
+ report_fatal_error("cannot spill patchpoint subregister operand");
+ MIB.addImm(StackMaps::IndirectMemRefOp);
+ MIB.addImm(SpillSize);
+ MIB.addFrameIndex(FrameIndex);
+ MIB.addImm(0);
+ }
+ else
+ MIB.addOperand(MO);
+ }
+ return NewMI;
+}
+
/// foldMemoryOperand - Attempt to fold a load or store of the specified stack
/// slot into the specified machine instruction for the specified operand(s).
/// If this is possible, a new instruction is returned with the specified
@@ -393,8 +454,18 @@ TargetInstrInfo::foldMemoryOperand(MachineBasicBlock::iterator MI,
assert(MBB && "foldMemoryOperand needs an inserted instruction");
MachineFunction &MF = *MBB->getParent();
- // Ask the target to do the actual folding.
- if (MachineInstr *NewMI = foldMemoryOperandImpl(MF, MI, Ops, FI)) {
+ MachineInstr *NewMI = 0;
+
+ if (MI->getOpcode() == TargetOpcode::STACKMAP ||
+ MI->getOpcode() == TargetOpcode::PATCHPOINT) {
+ // Fold stackmap/patchpoint.
+ NewMI = foldPatchpoint(MF, MI, Ops, FI, *this);
+ } else {
+ // Ask the target to do the actual folding.
+ NewMI =foldMemoryOperandImpl(MF, MI, Ops, FI);
+ }
+
+ if (NewMI) {
NewMI->setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
// Add a memory operand, foldMemoryOperandImpl doesn't do that.
assert((!(Flags & MachineMemOperand::MOStore) ||
@@ -450,7 +521,20 @@ TargetInstrInfo::foldMemoryOperand(MachineBasicBlock::iterator MI,
MachineFunction &MF = *MBB.getParent();
// Ask the target to do the actual folding.
- MachineInstr *NewMI = foldMemoryOperandImpl(MF, MI, Ops, LoadMI);
+ MachineInstr *NewMI = 0;
+ int FrameIndex = 0;
+
+ if ((MI->getOpcode() == TargetOpcode::STACKMAP ||
+ MI->getOpcode() == TargetOpcode::PATCHPOINT) &&
+ isLoadFromStackSlot(LoadMI, FrameIndex)) {
+ // Fold stackmap/patchpoint.
+ NewMI = foldPatchpoint(MF, MI, Ops, FrameIndex, *this);
+ } else {
+ // Ask the target to do the actual folding.
+ NewMI =foldMemoryOperandImpl(MF, MI, Ops, LoadMI);
+ }
+ foldMemoryOperandImpl(MF, MI, Ops, LoadMI);
+
if (!NewMI) return 0;
NewMI = MBB.insert(MI, NewMI);
diff --git a/lib/CodeGen/TargetLoweringBase.cpp b/lib/CodeGen/TargetLoweringBase.cpp
index 30305af211..fa1f526b1b 100644
--- a/lib/CodeGen/TargetLoweringBase.cpp
+++ b/lib/CodeGen/TargetLoweringBase.cpp
@@ -18,7 +18,9 @@
#include "llvm/CodeGen/Analysis.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineJumpTableInfo.h"
+#include "llvm/CodeGen/StackMaps.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/GlobalVariable.h"
@@ -894,6 +896,60 @@ bool TargetLoweringBase::isLegalRC(const TargetRegisterClass *RC) const {
return false;
}
+/// Replace/modify any TargetFrameIndex operands with a targte-dependent
+/// sequence of memory operands that is recognized by PrologEpilogInserter.
+MachineBasicBlock*
+TargetLoweringBase::emitPatchPoint(MachineInstr *MI,
+ MachineBasicBlock *MBB) const {
+ const TargetMachine &TM = getTargetMachine();
+ const TargetInstrInfo *TII = TM.getInstrInfo();
+ MachineFunction &MF = *MI->getParent()->getParent();
+
+ // MI changes inside this loop as we grow operands.
+ for(unsigned OperIdx = 0; OperIdx != MI->getNumOperands(); ++OperIdx) {
+ MachineOperand &MO = MI->getOperand(OperIdx);
+ if (!MO.isFI())
+ continue;
+
+ // foldMemoryOperand builds a new MI after replacing a single FI operand
+ // with the canonical set of five x86 addressing-mode operands.
+ int FI = MO.getIndex();
+ MachineInstrBuilder MIB = BuildMI(MF, MI->getDebugLoc(), MI->getDesc());
+
+ // Copy operands before the frame-index.
+ for (unsigned i = 0; i < OperIdx; ++i)
+ MIB.addOperand(MI->getOperand(i));
+ // Add frame index operands: direct-mem-ref tag, #FI, offset.
+ MIB.addImm(StackMaps::DirectMemRefOp);
+ MIB.addOperand(MI->getOperand(OperIdx));
+ MIB.addImm(0);
+ // Copy the operands after the frame index.
+ for (unsigned i = OperIdx + 1; i != MI->getNumOperands(); ++i)
+ MIB.addOperand(MI->getOperand(i));
+
+ // Inherit previous memory operands.
+ MIB->setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
+ assert(MIB->mayLoad() && "Folded a stackmap use to a non-load!");
+
+ // Add a new memory operand for this FI.
+ const MachineFrameInfo &MFI = *MF.getFrameInfo();
+ assert(MFI.getObjectOffset(FI) != -1);
+ MachineMemOperand *MMO =
+ MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(FI),
+ MachineMemOperand::MOLoad,
+ TM.getDataLayout()->getPointerSize(),
+ MFI.getObjectAlignment(FI));
+ MIB->addMemOperand(MF, MMO);
+
+ // Replace the instruction and update the operand index.
+ MBB->insert(MachineBasicBlock::iterator(MI), MIB);
+ OperIdx += (MIB->getNumOperands() - MI->getNumOperands()) - 1;
+ MI->eraseFromParent();
+ MI = MIB;
+ }
+ return MBB;
+}
+
/// findRepresentativeClass - Return the largest legal super-reg register class
/// of the register class for the specified type and its associated "cost".
std::pair<const TargetRegisterClass*, uint8_t>
diff --git a/lib/Target/X86/X86AsmPrinter.h b/lib/Target/X86/X86AsmPrinter.h
index 99be066df3..cf47dc48bc 100644
--- a/lib/Target/X86/X86AsmPrinter.h
+++ b/lib/Target/X86/X86AsmPrinter.h
@@ -27,19 +27,9 @@ class LLVM_LIBRARY_VISIBILITY X86AsmPrinter : public AsmPrinter {
const X86Subtarget *Subtarget;
StackMaps SM;
- // Parses operands of PATCHPOINT and STACKMAP to produce stack map Location
- // structures. Returns a result location and an iterator to the operand
- // immediately following the operands consumed.
- //
- // This method is implemented in X86MCInstLower.cpp.
- static std::pair<StackMaps::Location, MachineInstr::const_mop_iterator>
- stackmapOperandParser(MachineInstr::const_mop_iterator MOI,
- MachineInstr::const_mop_iterator MOE,
- const TargetMachine &TM);
-
public:
explicit X86AsmPrinter(TargetMachine &TM, MCStreamer &Streamer)
- : AsmPrinter(TM, Streamer), SM(*this, stackmapOperandParser) {
+ : AsmPrinter(TM, Streamer), SM(*this) {
Subtarget = &TM.getSubtarget<X86Subtarget>();
}
diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp
index 40d5f2b0c7..825f216c42 100644
--- a/lib/Target/X86/X86ISelLowering.cpp
+++ b/lib/Target/X86/X86ISelLowering.cpp
@@ -15819,51 +15819,6 @@ X86TargetLowering::emitEHSjLjLongJmp(MachineInstr *MI,
return MBB;
}
-/// Convert any TargetFrameIndex operands into the x86-specific pattern of five
-/// memory operands that is recognized by PrologEpilogInserter.
-MachineBasicBlock *
-X86TargetLowering::emitPatchPoint(MachineInstr *MI,
- MachineBasicBlock *MBB) const {
- const TargetMachine &TM = getTargetMachine();
- const X86InstrInfo *TII = static_cast<const X86InstrInfo*>(TM.getInstrInfo());
-
- // MI changes inside this loop as we grow operands.
- for(unsigned OperIdx = 0; OperIdx != MI->getNumOperands(); ++OperIdx) {
- MachineOperand &MO = MI->getOperand(OperIdx);
- if (!MO.isFI())
- continue;
-
- // foldMemoryOperand builds a new MI after replacing a single FI operand
- // with the canonical set of five x86 addressing-mode operands.
- int FI = MO.getIndex();
- MachineFunction &MF = *MBB->getParent();
- SmallVector<unsigned, 1> FIOps(1, OperIdx);
- MachineInstr *NewMI = TII->foldMemoryOperandImpl(MF, MI, FIOps, FI);
- assert(NewMI && "Cannot fold frame index operand into stackmap.");
-
- // Inherit previous memory operands.
- NewMI->setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
- assert(NewMI->mayLoad() && "Folded a stackmap use to a non-load!");
-
- // Add a new memory operand for this FI.
- const MachineFrameInfo &MFI = *MF.getFrameInfo();
- assert(MFI.getObjectOffset(FI) != -1);
- MachineMemOperand *MMO =
- MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(FI),
- MachineMemOperand::MOLoad,
- TM.getDataLayout()->getPointerSize(),
- MFI.getObjectAlignment(FI));
- NewMI->addMemOperand(MF, MMO);
-
- // Replace the instruction and update the operand index.
- MBB->insert(MachineBasicBlock::iterator(MI), NewMI);
- OperIdx += (NewMI->getNumOperands() - MI->getNumOperands()) - 1;
- MI->eraseFromParent();
- MI = NewMI;
- }
- return MBB;
-}
-
MachineBasicBlock *
X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
MachineBasicBlock *BB) const {
diff --git a/lib/Target/X86/X86ISelLowering.h b/lib/Target/X86/X86ISelLowering.h
index 6231e253d2..bc3dd608da 100644
--- a/lib/Target/X86/X86ISelLowering.h
+++ b/lib/Target/X86/X86ISelLowering.h
@@ -973,9 +973,6 @@ namespace llvm {
MachineBasicBlock *emitEHSjLjLongJmp(MachineInstr *MI,
MachineBasicBlock *MBB) const;
- MachineBasicBlock *emitPatchPoint(MachineInstr *MI,
- MachineBasicBlock *MBB) const;
-
/// Emit nodes that will be selected as "test Op0,Op0", or something
/// equivalent, for use with the given x86 condition code.
SDValue EmitTest(SDValue Op0, unsigned X86CC, SelectionDAG &DAG) const;
diff --git a/lib/Target/X86/X86InstrInfo.cpp b/lib/Target/X86/X86InstrInfo.cpp
index ad46c10c7d..45af24b50d 100644
--- a/lib/Target/X86/X86InstrInfo.cpp
+++ b/lib/Target/X86/X86InstrInfo.cpp
@@ -4198,84 +4198,10 @@ breakPartialRegDependency(MachineBasicBlock::iterator MI, unsigned OpNum,
MI->addRegisterKilled(Reg, TRI, true);
}
-static MachineInstr* foldPatchpoint(MachineFunction &MF,
- MachineInstr *MI,
- const SmallVectorImpl<unsigned> &Ops,
- int FrameIndex,
- const TargetInstrInfo &TII) {
- unsigned StartIdx = 0;
- switch (MI->getOpcode()) {
- case TargetOpcode::STACKMAP:
- StartIdx = 2; // Skip ID, nShadowBytes.
- break;
- case TargetOpcode::PATCHPOINT: {
- // For PatchPoint, the call args are not foldable.
- PatchPointOpers opers(MI);
- StartIdx = opers.getVarIdx();
- break;
- }
- default:
- llvm_unreachable("unexpected stackmap opcode");
- }
-
- // Return false if any operands requested for folding are not foldable (not
- // part of the stackmap's live values).
- for (SmallVectorImpl<unsigned>::const_iterator I = Ops.begin(), E = Ops.end();
- I != E; ++I) {
- if (*I < StartIdx)
- return 0;
- }
-
- MachineInstr *NewMI =
- MF.CreateMachineInstr(TII.get(MI->getOpcode()), MI->getDebugLoc(), true);
- MachineInstrBuilder MIB(MF, NewMI);
-
- // No need to fold return, the meta data, and function arguments
- for (unsigned i = 0; i < StartIdx; ++i)
- MIB.addOperand(MI->getOperand(i));
-
- for (unsigned i = StartIdx; i < MI->getNumOperands(); ++i) {
- MachineOperand &MO = MI->getOperand(i);
- if (std::find(Ops.begin(), Ops.end(), i) != Ops.end()) {
- unsigned SpillSize;
- unsigned SpillOffset;
- if (MO.isReg()) {
- // Compute the spill slot size and offset.
- const TargetRegisterClass *RC =
- MF.getRegInfo().getRegClass(MO.getReg());
- bool Valid = TII.getStackSlotRange(RC, MO.getSubReg(), SpillSize,
- SpillOffset, &MF.getTarget());
- if (!Valid)
- report_fatal_error("cannot spill patchpoint subregister operand");
- MIB.addOperand(MachineOperand::CreateImm(StackMaps::IndirectMemRefOp));
- MIB.addOperand(MachineOperand::CreateImm(SpillSize));
- }
- else {
- // ExpandISelPseudos is converting a simple frame index into a 5-operand
- // frame index.
- assert(MO.isFI() && MO.getIndex() == FrameIndex &&
- "patchpoint can only fold a vreg operand or frame index");
- SpillOffset = 0;
- MIB.addOperand(MachineOperand::CreateImm(StackMaps::DirectMemRefOp));
- }
- MIB.addOperand(MachineOperand::CreateFI(FrameIndex));
- addOffset(MIB, SpillOffset);
- }
- else
- MIB.addOperand(MO);
- }
- return NewMI;
-}
-
MachineInstr*
X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI,
const SmallVectorImpl<unsigned> &Ops,
int FrameIndex) const {
- // Special case stack map and patch point intrinsics.
- if (MI->getOpcode() == TargetOpcode::STACKMAP
- || MI->getOpcode() == TargetOpcode::PATCHPOINT) {
- return foldPatchpoint(MF, MI, Ops, FrameIndex, *this);
- }
// Check switch flag
if (NoFusing) return NULL;
diff --git a/lib/Target/X86/X86MCInstLower.cpp b/lib/Target/X86/X86MCInstLower.cpp
index e6efbcd729..51ff713e63 100644
--- a/lib/Target/X86/X86MCInstLower.cpp
+++ b/lib/Target/X86/X86MCInstLower.cpp
@@ -674,90 +674,6 @@ static void LowerTlsAddr(MCStreamer &OutStreamer,
.addExpr(tlsRef));
}
-static std::pair<StackMaps::Location, MachineInstr::const_mop_iterator>
-parseMemoryOperand(StackMaps::Location::LocationType LocTy, unsigned Size,
- MachineInstr::const_mop_iterator MOI,
- MachineInstr::const_mop_iterator MOE) {
-
- typedef StackMaps::Location Location;
-
- assert(std::distance(MOI, MOE) >= 5 && "Too few operands to encode mem op.");
-
- const MachineOperand &Base = *MOI;
- const MachineOperand &Scale = *(++MOI);
- const MachineOperand &Index = *(++MOI);
- const MachineOperand &Disp = *(++MOI);
- const MachineOperand &ZeroReg = *(++MOI);
-
- // Sanity check for supported operand format.
- assert(Base.isReg() &&
- Scale.isImm() && Scale.getImm() == 1 &&
- Index.isReg() && Index.getReg() == 0 &&
- Disp.isImm() && ZeroReg.isReg() && (ZeroReg.getReg() == 0) &&
- "Unsupported x86 memory operand sequence.");
- (void)Scale;
- (void)Index;
- (void)ZeroReg;
-
- return std::make_pair(
- Location(LocTy, Size, Base.getReg(), Disp.getImm()), ++MOI);
-}
-
-std::pair<StackMaps::Location, MachineInstr::const_mop_iterator>
-X86AsmPrinter::stackmapOperandParser(MachineInstr::const_mop_iterator MOI,
- MachineInstr::const_mop_iterator MOE,
- const TargetMachine &TM) {
-
- typedef StackMaps::Location Location;
-
- const MachineOperand &MOP = *MOI;
- assert(!MOP.isRegMask() && (!MOP.isReg() || !MOP.isImplicit()) &&
- "Register mask and implicit operands should not be processed.");
-
- if (MOP.isImm()) {
- // Verify anyregcc
- // [<def>], <id>, <numBytes>, <target>, <numArgs>, <cc>, ...
-
- switch (MOP.getImm()) {
- default: llvm_unreachable("Unrecognized operand type.");
- case StackMaps::DirectMemRefOp: {
- unsigned Size = TM.getDataLayout()->getPointerSizeInBits();
- assert((Size % 8) == 0 && "Need pointer size in bytes.");
- Size /= 8;
- return parseMemoryOperand(StackMaps::Location::Direct, Size,
- llvm::next(MOI), MOE);
- }
- case StackMaps::IndirectMemRefOp: {
- ++MOI;
- int64_t Size = MOI->getImm();
- assert(Size > 0 && "Need a valid size for indirect memory locations.");
- return parseMemoryOperand(StackMaps::Location::Indirect, Size,
- llvm::next(MOI), MOE);
- }
- case StackMaps::ConstantOp: {
- ++MOI;
- assert(MOI->isImm() && "Expected constant operand.");
- int64_t Imm = MOI->getImm();
- return std::make_pair(
- Location(Location::Constant, sizeof(int64_t), 0, Imm), ++MOI);
- }
- }
- }
-
- // Otherwise this is a reg operand. The physical register number will
- // ultimately be encoded as a DWARF regno. The stack map also records the size
- // of a spill slot that can hold the register content. (The runtime can
- // track the actual size of the data type if it needs to.)
- assert(MOP.isReg() && "Expected register operand here.");
- assert(TargetRegisterInfo::isPhysicalRegister(MOP.getReg()) &&
- "Virtreg operands should have been rewritten before now.");
- const TargetRegisterClass *RC =
- TM.getRegisterInfo()->getMinimalPhysRegClass(MOP.getReg());
- assert(!MOP.getSubReg() && "Physical subreg still around.");
- return std::make_pair(
- Location(Location::Register, RC->getSize(), MOP.getReg(), 0), ++MOI);
-}
-
// Lower a stackmap of the form:
// <id>, <shadowBytes>, ...
static void LowerSTACKMAP(MCStreamer &OutStreamer,