summaryrefslogtreecommitdiff
path: root/lib/Target/X86
diff options
context:
space:
mode:
authorAndrew Trick <atrick@apple.com>2013-10-31 22:11:56 +0000
committerAndrew Trick <atrick@apple.com>2013-10-31 22:11:56 +0000
commit3d74dea4bddc84d1881efc21eb5eefbddbfa9aed (patch)
tree76963fde5a977fb0fa931940c4db9351d916d0e4 /lib/Target/X86
parent53446e50a08451df106f76205c6081bf5a103c9f (diff)
downloadllvm-3d74dea4bddc84d1881efc21eb5eefbddbfa9aed.tar.gz
llvm-3d74dea4bddc84d1881efc21eb5eefbddbfa9aed.tar.bz2
llvm-3d74dea4bddc84d1881efc21eb5eefbddbfa9aed.tar.xz
Add support for stack map generation in the X86 backend.
Originally implemented by Lang Hames. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@193811 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/Target/X86')
-rw-r--r--lib/Target/X86/X86AsmPrinter.cpp2
-rw-r--r--lib/Target/X86/X86AsmPrinter.h14
-rw-r--r--lib/Target/X86/X86InstrInfo.cpp43
-rw-r--r--lib/Target/X86/X86MCInstLower.cpp124
4 files changed, 178 insertions, 5 deletions
diff --git a/lib/Target/X86/X86AsmPrinter.cpp b/lib/Target/X86/X86AsmPrinter.cpp
index 84f633fa84..1a81ba8f41 100644
--- a/lib/Target/X86/X86AsmPrinter.cpp
+++ b/lib/Target/X86/X86AsmPrinter.cpp
@@ -626,6 +626,8 @@ void X86AsmPrinter::EmitEndOfAsmFile(Module &M) {
OutStreamer.AddBlankLine();
}
+ SM.serializeToStackMapSection();
+
// Funny Darwin hack: This flag tells the linker that no global symbols
// contain code that falls through to other global symbols (e.g. the obvious
// implementation of multiple entry points). If this doesn't occur, the
diff --git a/lib/Target/X86/X86AsmPrinter.h b/lib/Target/X86/X86AsmPrinter.h
index 6eed5ce08c..050c78df6a 100644
--- a/lib/Target/X86/X86AsmPrinter.h
+++ b/lib/Target/X86/X86AsmPrinter.h
@@ -16,6 +16,7 @@
#include "llvm/CodeGen/AsmPrinter.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/CodeGen/ValueTypes.h"
+#include "llvm/CodeGen/StackMaps.h"
#include "llvm/Support/Compiler.h"
namespace llvm {
@@ -24,9 +25,20 @@ class MCStreamer;
class LLVM_LIBRARY_VISIBILITY X86AsmPrinter : public AsmPrinter {
const X86Subtarget *Subtarget;
+ StackMaps SM;
+
+ // Parses operands of PATCHPOINT and STACKMAP to produce stack map Location
+ // structures. Returns a result location and an iterator to the operand
+ // immediately following the operands consumed.
+ //
+ // This method is implemented in X86MCInstLower.cpp.
+ static std::pair<StackMaps::Location, MachineInstr::const_mop_iterator>
+ stackmapOperandParser(MachineInstr::const_mop_iterator MOI,
+ MachineInstr::const_mop_iterator MOE);
+
public:
explicit X86AsmPrinter(TargetMachine &TM, MCStreamer &Streamer)
- : AsmPrinter(TM, Streamer) {
+ : AsmPrinter(TM, Streamer), SM(*this, stackmapOperandParser) {
Subtarget = &TM.getSubtarget<X86Subtarget>();
}
diff --git a/lib/Target/X86/X86InstrInfo.cpp b/lib/Target/X86/X86InstrInfo.cpp
index 7f66c6ef11..369b031113 100644
--- a/lib/Target/X86/X86InstrInfo.cpp
+++ b/lib/Target/X86/X86InstrInfo.cpp
@@ -24,6 +24,7 @@
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/StackMaps.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/MC/MCAsmInfo.h"
@@ -4192,10 +4193,44 @@ breakPartialRegDependency(MachineBasicBlock::iterator MI, unsigned OpNum,
MI->addRegisterKilled(Reg, TRI, true);
}
-MachineInstr* X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
- MachineInstr *MI,
- const SmallVectorImpl<unsigned> &Ops,
- int FrameIndex) const {
+static MachineInstr* foldPatchpoint(MachineFunction &MF,
+ MachineInstr *MI,
+ const SmallVectorImpl<unsigned> &Ops,
+ int FrameIndex,
+ const TargetInstrInfo &TII) {
+ MachineInstr *NewMI =
+ MF.CreateMachineInstr(TII.get(MI->getOpcode()), MI->getDebugLoc(), true);
+ MachineInstrBuilder MIB(MF, NewMI);
+
+ bool isPatchPoint = MI->getOpcode() == TargetOpcode::PATCHPOINT;
+ unsigned StartIdx = isPatchPoint ? MI->getOperand(3).getImm() + 4 : 2;
+
+ // No need to fold the meta data and function arguments
+ for (unsigned i = 0; i < StartIdx; ++i)
+ MIB.addOperand(MI->getOperand(i));
+
+ for (unsigned i = StartIdx; i < MI->getNumOperands(); ++i) {
+ MachineOperand &MO = MI->getOperand(i);
+ if (std::find(Ops.begin(), Ops.end(), i) != Ops.end()) {
+ MIB.addOperand(MachineOperand::CreateImm(StackMaps::IndirectMemRefOp));
+ MIB.addOperand(MachineOperand::CreateFI(FrameIndex));
+ addOffset(MIB, 0);
+ }
+ else
+ MIB.addOperand(MO);
+ }
+ return NewMI;
+}
+
+MachineInstr*
+X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI,
+ const SmallVectorImpl<unsigned> &Ops,
+ int FrameIndex) const {
+ // Special case stack map and patch point intrinsics.
+ if (MI->getOpcode() == TargetOpcode::STACKMAP
+ || MI->getOpcode() == TargetOpcode::PATCHPOINT) {
+ return foldPatchpoint(MF, MI, Ops, FrameIndex, *this);
+ }
// Check switch flag
if (NoFusing) return NULL;
diff --git a/lib/Target/X86/X86MCInstLower.cpp b/lib/Target/X86/X86MCInstLower.cpp
index bd2be3e6c9..04d1ad57a7 100644
--- a/lib/Target/X86/X86MCInstLower.cpp
+++ b/lib/Target/X86/X86MCInstLower.cpp
@@ -17,6 +17,7 @@
#include "X86COFFMachineModuleInfo.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/CodeGen/MachineModuleInfoImpls.h"
+#include "llvm/CodeGen/StackMaps.h"
#include "llvm/IR/Type.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCContext.h"
@@ -686,6 +687,123 @@ static void LowerTlsAddr(MCStreamer &OutStreamer,
.addExpr(tlsRef));
}
+static std::pair<StackMaps::Location, MachineInstr::const_mop_iterator>
+parseMemoryOperand(StackMaps::Location::LocationType LocTy,
+ MachineInstr::const_mop_iterator MOI,
+ MachineInstr::const_mop_iterator MOE) {
+
+ typedef StackMaps::Location Location;
+
+ assert(std::distance(MOI, MOE) >= 5 && "Too few operands to encode mem op.");
+
+ const MachineOperand &Base = *MOI;
+ const MachineOperand &Scale = *(++MOI);
+ const MachineOperand &Index = *(++MOI);
+ const MachineOperand &Disp = *(++MOI);
+ const MachineOperand &ZeroReg = *(++MOI);
+
+ // Sanity check for supported operand format.
+ assert(Base.isReg() &&
+ Scale.isImm() && Scale.getImm() == 1 &&
+ Index.isReg() && Index.getReg() == 0 &&
+ Disp.isImm() && ZeroReg.isReg() && (ZeroReg.getReg() == 0) &&
+ "Unsupported x86 memory operand sequence.");
+
+ return std::make_pair(
+ Location(LocTy, Base.getReg(), Disp.getImm()), ++MOI);
+}
+
+std::pair<StackMaps::Location, MachineInstr::const_mop_iterator>
+X86AsmPrinter::stackmapOperandParser(MachineInstr::const_mop_iterator MOI,
+ MachineInstr::const_mop_iterator MOE) {
+
+ typedef StackMaps::Location Location;
+
+ const MachineOperand &MOP = *MOI;
+ assert(!MOP.isRegMask() && (!MOP.isReg() || !MOP.isImplicit()) &&
+ "Register mask and implicit operands should not be processed.");
+
+ if (MOP.isImm()) {
+ switch (MOP.getImm()) {
+ default: llvm_unreachable("Unrecognized operand type.");
+ case StackMaps::DirectMemRefOp:
+ return parseMemoryOperand(StackMaps::Location::Direct,
+ llvm::next(MOI), MOE);
+ case StackMaps::IndirectMemRefOp:
+ return parseMemoryOperand(StackMaps::Location::Indirect,
+ llvm::next(MOI), MOE);
+ case StackMaps::ConstantOp: {
+ ++MOI;
+ assert(MOI->isImm() && "Expected constant operand.");
+ int64_t Imm = MOI->getImm();
+ return std::make_pair(Location(Location::Constant, 0, Imm), ++MOI);
+ }
+ }
+ }
+
+ // Otherwise this is a reg operand.
+ assert(MOP.isReg() && "Expected register operand here.");
+ assert(TargetRegisterInfo::isPhysicalRegister(MOP.getReg()) &&
+ "Virtreg operands should have been rewritten before now.");
+ return std::make_pair(Location(Location::Register, MOP.getReg(), 0), ++MOI);
+}
+
+static MachineInstr::const_mop_iterator
+getStackMapEndMOP(MachineInstr::const_mop_iterator MOI,
+ MachineInstr::const_mop_iterator MOE) {
+ for (; MOI != MOE; ++MOI)
+ if (MOI->isRegMask() || (MOI->isReg() && MOI->isImplicit()))
+ break;
+
+ return MOI;
+}
+
+static void LowerSTACKMAP(MCStreamer &OutStreamer,
+ X86MCInstLower &MCInstLowering,
+ StackMaps &SM,
+ const MachineInstr &MI)
+{
+ int64_t ID = MI.getOperand(0).getImm();
+ unsigned NumNOPBytes = MI.getOperand(1).getImm();
+
+ assert((int32_t)ID == ID && "Stack maps hold 32-bit IDs");
+ SM.recordStackMap(MI, ID, llvm::next(MI.operands_begin(), 2),
+ getStackMapEndMOP(MI.operands_begin(), MI.operands_end()));
+ // Emit padding.
+ for (unsigned i = 0; i < NumNOPBytes; ++i)
+ OutStreamer.EmitInstruction(MCInstBuilder(X86::NOOP));
+}
+
+static void LowerPATCHPOINT(MCStreamer &OutStreamer,
+ X86MCInstLower &MCInstLowering,
+ StackMaps &SM,
+ const MachineInstr &MI)
+{
+ int64_t ID = MI.getOperand(0).getImm();
+ assert((int32_t)ID == ID && "Stack maps hold 32-bit IDs");
+
+ // Get the number of arguments participating in the call. This number was
+ // adjusted during call lowering by subtracting stack args.
+ int64_t StackMapIdx = MI.getOperand(3).getImm() + 4;
+ assert(StackMapIdx <= MI.getNumOperands() && "Patchpoint dropped args.");
+
+ SM.recordStackMap(MI, ID, llvm::next(MI.operands_begin(), StackMapIdx),
+ getStackMapEndMOP(MI.operands_begin(), MI.operands_end()));
+
+ // Emit call. We need to know how many bytes we encoded here.
+ unsigned EncodedBytes = 2;
+ OutStreamer.EmitInstruction(MCInstBuilder(X86::CALL64r)
+ .addReg(MI.getOperand(2).getReg()));
+
+ // Emit padding.
+ unsigned NumNOPBytes = MI.getOperand(1).getImm();
+ assert(NumNOPBytes >= EncodedBytes &&
+ "Patchpoint can't request size less than the length of a call.");
+
+ for (unsigned i = EncodedBytes; i < NumNOPBytes; ++i)
+ OutStreamer.EmitInstruction(MCInstBuilder(X86::NOOP));
+}
+
void X86AsmPrinter::EmitInstruction(const MachineInstr *MI) {
X86MCInstLower MCInstLowering(*MF, *this);
switch (MI->getOpcode()) {
@@ -775,6 +893,12 @@ void X86AsmPrinter::EmitInstruction(const MachineInstr *MI) {
.addExpr(DotExpr));
return;
}
+
+ case TargetOpcode::STACKMAP:
+ return LowerSTACKMAP(OutStreamer, MCInstLowering, SM, *MI);
+
+ case TargetOpcode::PATCHPOINT:
+ return LowerPATCHPOINT(OutStreamer, MCInstLowering, SM, *MI);
}
MCInst TmpInst;