summaryrefslogtreecommitdiff
path: root/lib/Target/X86
diff options
context:
space:
mode:
authorLang Hames <lhames@gmail.com>2013-11-29 03:07:54 +0000
committerLang Hames <lhames@gmail.com>2013-11-29 03:07:54 +0000
commit1cbca515b6804a24d778fc0cab04ea0c53900141 (patch)
tree4373adbf32f0dd251035a76616dcfcd68b49c6a7 /lib/Target/X86
parent7fd70e7b0c9c12183d9a3b084e5789622d0414fa (diff)
downloadllvm-1cbca515b6804a24d778fc0cab04ea0c53900141.tar.gz
llvm-1cbca515b6804a24d778fc0cab04ea0c53900141.tar.bz2
llvm-1cbca515b6804a24d778fc0cab04ea0c53900141.tar.xz
Refactor a lot of patchpoint/stackmap related code to simplify and make it
target independent. Most of the x86 specific stackmap/patchpoint handling was necessitated by the use of the native address-mode format for frame index operands. PEI has now been modified to treat stackmap/patchpoint similarly to DEBUG_INFO, allowing us to use a simple, platform independent register/offset pair for frame indexes on stackmap/patchpoints. Notes: - Folding is now platform independent and automatically supported. - Emiting patchpoints with direct memory references now just involves calling the TargetLoweringBase::emitPatchPoint utility method from the target's XXXTargetLowering::EmitInstrWithCustomInserter method. (See X86TargetLowering for an example). - No more ugly platform-specific operand parsers. This patch shouldn't change the generated output for X86. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@195944 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/Target/X86')
-rw-r--r--lib/Target/X86/X86AsmPrinter.h12
-rw-r--r--lib/Target/X86/X86ISelLowering.cpp45
-rw-r--r--lib/Target/X86/X86ISelLowering.h3
-rw-r--r--lib/Target/X86/X86InstrInfo.cpp74
-rw-r--r--lib/Target/X86/X86MCInstLower.cpp84
5 files changed, 1 insertions, 217 deletions
diff --git a/lib/Target/X86/X86AsmPrinter.h b/lib/Target/X86/X86AsmPrinter.h
index 99be066df3..cf47dc48bc 100644
--- a/lib/Target/X86/X86AsmPrinter.h
+++ b/lib/Target/X86/X86AsmPrinter.h
@@ -27,19 +27,9 @@ class LLVM_LIBRARY_VISIBILITY X86AsmPrinter : public AsmPrinter {
const X86Subtarget *Subtarget;
StackMaps SM;
- // Parses operands of PATCHPOINT and STACKMAP to produce stack map Location
- // structures. Returns a result location and an iterator to the operand
- // immediately following the operands consumed.
- //
- // This method is implemented in X86MCInstLower.cpp.
- static std::pair<StackMaps::Location, MachineInstr::const_mop_iterator>
- stackmapOperandParser(MachineInstr::const_mop_iterator MOI,
- MachineInstr::const_mop_iterator MOE,
- const TargetMachine &TM);
-
public:
explicit X86AsmPrinter(TargetMachine &TM, MCStreamer &Streamer)
- : AsmPrinter(TM, Streamer), SM(*this, stackmapOperandParser) {
+ : AsmPrinter(TM, Streamer), SM(*this) {
Subtarget = &TM.getSubtarget<X86Subtarget>();
}
diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp
index 40d5f2b0c7..825f216c42 100644
--- a/lib/Target/X86/X86ISelLowering.cpp
+++ b/lib/Target/X86/X86ISelLowering.cpp
@@ -15819,51 +15819,6 @@ X86TargetLowering::emitEHSjLjLongJmp(MachineInstr *MI,
return MBB;
}
-/// Convert any TargetFrameIndex operands into the x86-specific pattern of five
-/// memory operands that is recognized by PrologEpilogInserter.
-MachineBasicBlock *
-X86TargetLowering::emitPatchPoint(MachineInstr *MI,
- MachineBasicBlock *MBB) const {
- const TargetMachine &TM = getTargetMachine();
- const X86InstrInfo *TII = static_cast<const X86InstrInfo*>(TM.getInstrInfo());
-
- // MI changes inside this loop as we grow operands.
- for(unsigned OperIdx = 0; OperIdx != MI->getNumOperands(); ++OperIdx) {
- MachineOperand &MO = MI->getOperand(OperIdx);
- if (!MO.isFI())
- continue;
-
- // foldMemoryOperand builds a new MI after replacing a single FI operand
- // with the canonical set of five x86 addressing-mode operands.
- int FI = MO.getIndex();
- MachineFunction &MF = *MBB->getParent();
- SmallVector<unsigned, 1> FIOps(1, OperIdx);
- MachineInstr *NewMI = TII->foldMemoryOperandImpl(MF, MI, FIOps, FI);
- assert(NewMI && "Cannot fold frame index operand into stackmap.");
-
- // Inherit previous memory operands.
- NewMI->setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
- assert(NewMI->mayLoad() && "Folded a stackmap use to a non-load!");
-
- // Add a new memory operand for this FI.
- const MachineFrameInfo &MFI = *MF.getFrameInfo();
- assert(MFI.getObjectOffset(FI) != -1);
- MachineMemOperand *MMO =
- MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(FI),
- MachineMemOperand::MOLoad,
- TM.getDataLayout()->getPointerSize(),
- MFI.getObjectAlignment(FI));
- NewMI->addMemOperand(MF, MMO);
-
- // Replace the instruction and update the operand index.
- MBB->insert(MachineBasicBlock::iterator(MI), NewMI);
- OperIdx += (NewMI->getNumOperands() - MI->getNumOperands()) - 1;
- MI->eraseFromParent();
- MI = NewMI;
- }
- return MBB;
-}
-
MachineBasicBlock *
X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
MachineBasicBlock *BB) const {
diff --git a/lib/Target/X86/X86ISelLowering.h b/lib/Target/X86/X86ISelLowering.h
index 6231e253d2..bc3dd608da 100644
--- a/lib/Target/X86/X86ISelLowering.h
+++ b/lib/Target/X86/X86ISelLowering.h
@@ -973,9 +973,6 @@ namespace llvm {
MachineBasicBlock *emitEHSjLjLongJmp(MachineInstr *MI,
MachineBasicBlock *MBB) const;
- MachineBasicBlock *emitPatchPoint(MachineInstr *MI,
- MachineBasicBlock *MBB) const;
-
/// Emit nodes that will be selected as "test Op0,Op0", or something
/// equivalent, for use with the given x86 condition code.
SDValue EmitTest(SDValue Op0, unsigned X86CC, SelectionDAG &DAG) const;
diff --git a/lib/Target/X86/X86InstrInfo.cpp b/lib/Target/X86/X86InstrInfo.cpp
index ad46c10c7d..45af24b50d 100644
--- a/lib/Target/X86/X86InstrInfo.cpp
+++ b/lib/Target/X86/X86InstrInfo.cpp
@@ -4198,84 +4198,10 @@ breakPartialRegDependency(MachineBasicBlock::iterator MI, unsigned OpNum,
MI->addRegisterKilled(Reg, TRI, true);
}
-static MachineInstr* foldPatchpoint(MachineFunction &MF,
- MachineInstr *MI,
- const SmallVectorImpl<unsigned> &Ops,
- int FrameIndex,
- const TargetInstrInfo &TII) {
- unsigned StartIdx = 0;
- switch (MI->getOpcode()) {
- case TargetOpcode::STACKMAP:
- StartIdx = 2; // Skip ID, nShadowBytes.
- break;
- case TargetOpcode::PATCHPOINT: {
- // For PatchPoint, the call args are not foldable.
- PatchPointOpers opers(MI);
- StartIdx = opers.getVarIdx();
- break;
- }
- default:
- llvm_unreachable("unexpected stackmap opcode");
- }
-
- // Return false if any operands requested for folding are not foldable (not
- // part of the stackmap's live values).
- for (SmallVectorImpl<unsigned>::const_iterator I = Ops.begin(), E = Ops.end();
- I != E; ++I) {
- if (*I < StartIdx)
- return 0;
- }
-
- MachineInstr *NewMI =
- MF.CreateMachineInstr(TII.get(MI->getOpcode()), MI->getDebugLoc(), true);
- MachineInstrBuilder MIB(MF, NewMI);
-
- // No need to fold return, the meta data, and function arguments
- for (unsigned i = 0; i < StartIdx; ++i)
- MIB.addOperand(MI->getOperand(i));
-
- for (unsigned i = StartIdx; i < MI->getNumOperands(); ++i) {
- MachineOperand &MO = MI->getOperand(i);
- if (std::find(Ops.begin(), Ops.end(), i) != Ops.end()) {
- unsigned SpillSize;
- unsigned SpillOffset;
- if (MO.isReg()) {
- // Compute the spill slot size and offset.
- const TargetRegisterClass *RC =
- MF.getRegInfo().getRegClass(MO.getReg());
- bool Valid = TII.getStackSlotRange(RC, MO.getSubReg(), SpillSize,
- SpillOffset, &MF.getTarget());
- if (!Valid)
- report_fatal_error("cannot spill patchpoint subregister operand");
- MIB.addOperand(MachineOperand::CreateImm(StackMaps::IndirectMemRefOp));
- MIB.addOperand(MachineOperand::CreateImm(SpillSize));
- }
- else {
- // ExpandISelPseudos is converting a simple frame index into a 5-operand
- // frame index.
- assert(MO.isFI() && MO.getIndex() == FrameIndex &&
- "patchpoint can only fold a vreg operand or frame index");
- SpillOffset = 0;
- MIB.addOperand(MachineOperand::CreateImm(StackMaps::DirectMemRefOp));
- }
- MIB.addOperand(MachineOperand::CreateFI(FrameIndex));
- addOffset(MIB, SpillOffset);
- }
- else
- MIB.addOperand(MO);
- }
- return NewMI;
-}
-
MachineInstr*
X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI,
const SmallVectorImpl<unsigned> &Ops,
int FrameIndex) const {
- // Special case stack map and patch point intrinsics.
- if (MI->getOpcode() == TargetOpcode::STACKMAP
- || MI->getOpcode() == TargetOpcode::PATCHPOINT) {
- return foldPatchpoint(MF, MI, Ops, FrameIndex, *this);
- }
// Check switch flag
if (NoFusing) return NULL;
diff --git a/lib/Target/X86/X86MCInstLower.cpp b/lib/Target/X86/X86MCInstLower.cpp
index e6efbcd729..51ff713e63 100644
--- a/lib/Target/X86/X86MCInstLower.cpp
+++ b/lib/Target/X86/X86MCInstLower.cpp
@@ -674,90 +674,6 @@ static void LowerTlsAddr(MCStreamer &OutStreamer,
.addExpr(tlsRef));
}
-static std::pair<StackMaps::Location, MachineInstr::const_mop_iterator>
-parseMemoryOperand(StackMaps::Location::LocationType LocTy, unsigned Size,
- MachineInstr::const_mop_iterator MOI,
- MachineInstr::const_mop_iterator MOE) {
-
- typedef StackMaps::Location Location;
-
- assert(std::distance(MOI, MOE) >= 5 && "Too few operands to encode mem op.");
-
- const MachineOperand &Base = *MOI;
- const MachineOperand &Scale = *(++MOI);
- const MachineOperand &Index = *(++MOI);
- const MachineOperand &Disp = *(++MOI);
- const MachineOperand &ZeroReg = *(++MOI);
-
- // Sanity check for supported operand format.
- assert(Base.isReg() &&
- Scale.isImm() && Scale.getImm() == 1 &&
- Index.isReg() && Index.getReg() == 0 &&
- Disp.isImm() && ZeroReg.isReg() && (ZeroReg.getReg() == 0) &&
- "Unsupported x86 memory operand sequence.");
- (void)Scale;
- (void)Index;
- (void)ZeroReg;
-
- return std::make_pair(
- Location(LocTy, Size, Base.getReg(), Disp.getImm()), ++MOI);
-}
-
-std::pair<StackMaps::Location, MachineInstr::const_mop_iterator>
-X86AsmPrinter::stackmapOperandParser(MachineInstr::const_mop_iterator MOI,
- MachineInstr::const_mop_iterator MOE,
- const TargetMachine &TM) {
-
- typedef StackMaps::Location Location;
-
- const MachineOperand &MOP = *MOI;
- assert(!MOP.isRegMask() && (!MOP.isReg() || !MOP.isImplicit()) &&
- "Register mask and implicit operands should not be processed.");
-
- if (MOP.isImm()) {
- // Verify anyregcc
- // [<def>], <id>, <numBytes>, <target>, <numArgs>, <cc>, ...
-
- switch (MOP.getImm()) {
- default: llvm_unreachable("Unrecognized operand type.");
- case StackMaps::DirectMemRefOp: {
- unsigned Size = TM.getDataLayout()->getPointerSizeInBits();
- assert((Size % 8) == 0 && "Need pointer size in bytes.");
- Size /= 8;
- return parseMemoryOperand(StackMaps::Location::Direct, Size,
- llvm::next(MOI), MOE);
- }
- case StackMaps::IndirectMemRefOp: {
- ++MOI;
- int64_t Size = MOI->getImm();
- assert(Size > 0 && "Need a valid size for indirect memory locations.");
- return parseMemoryOperand(StackMaps::Location::Indirect, Size,
- llvm::next(MOI), MOE);
- }
- case StackMaps::ConstantOp: {
- ++MOI;
- assert(MOI->isImm() && "Expected constant operand.");
- int64_t Imm = MOI->getImm();
- return std::make_pair(
- Location(Location::Constant, sizeof(int64_t), 0, Imm), ++MOI);
- }
- }
- }
-
- // Otherwise this is a reg operand. The physical register number will
- // ultimately be encoded as a DWARF regno. The stack map also records the size
- // of a spill slot that can hold the register content. (The runtime can
- // track the actual size of the data type if it needs to.)
- assert(MOP.isReg() && "Expected register operand here.");
- assert(TargetRegisterInfo::isPhysicalRegister(MOP.getReg()) &&
- "Virtreg operands should have been rewritten before now.");
- const TargetRegisterClass *RC =
- TM.getRegisterInfo()->getMinimalPhysRegClass(MOP.getReg());
- assert(!MOP.getSubReg() && "Physical subreg still around.");
- return std::make_pair(
- Location(Location::Register, RC->getSize(), MOP.getReg(), 0), ++MOI);
-}
-
// Lower a stackmap of the form:
// <id>, <shadowBytes>, ...
static void LowerSTACKMAP(MCStreamer &OutStreamer,