summaryrefslogtreecommitdiff
path: root/lib/Target
diff options
context:
space:
mode:
authorAbdoulaye Walsimou Gaye <awg@embtoolkit.org>2013-05-12 19:55:19 +0200
committerAbdoulaye Walsimou Gaye <awg@embtoolkit.org>2013-05-12 19:55:19 +0200
commit23d8d191eff180ba312a4d1b4fec8597e5a988d5 (patch)
tree25022dea79a8f9b830aad4d2845af6a2a9fa081b /lib/Target
parent0824091315296ab3da27856b76e7422348d3850d (diff)
parentfa49d7d6e4384381e4307a0d2495e6e5b15821e3 (diff)
downloadllvm-23d8d191eff180ba312a4d1b4fec8597e5a988d5.tar.gz
llvm-23d8d191eff180ba312a4d1b4fec8597e5a988d5.tar.bz2
llvm-23d8d191eff180ba312a4d1b4fec8597e5a988d5.tar.xz
Merge branch 'master' into embtk-support-master
Diffstat (limited to 'lib/Target')
-rw-r--r--lib/Target/AArch64/AArch64FrameLowering.cpp9
-rw-r--r--lib/Target/AArch64/MCTargetDesc/AArch64MCTargetDesc.cpp2
-rw-r--r--lib/Target/ARM/AsmParser/ARMAsmParser.cpp255
-rw-r--r--lib/Target/ARM/MCTargetDesc/ARMELFStreamer.cpp56
-rw-r--r--lib/Target/ARM/MCTargetDesc/ARMMCTargetDesc.cpp2
-rw-r--r--lib/Target/Hexagon/Hexagon.td30
-rw-r--r--lib/Target/Hexagon/HexagonFrameLowering.cpp61
-rw-r--r--lib/Target/Hexagon/HexagonInstrFormats.td2
-rw-r--r--lib/Target/Hexagon/HexagonInstrInfo.cpp182
-rw-r--r--lib/Target/Hexagon/HexagonInstrInfo.h4
-rw-r--r--lib/Target/Hexagon/HexagonInstrInfoV4.td87
-rw-r--r--lib/Target/Hexagon/HexagonInstrInfoV5.td23
-rw-r--r--lib/Target/Hexagon/HexagonRegisterInfo.cpp10
-rw-r--r--lib/Target/Hexagon/HexagonRegisterInfo.h1
-rw-r--r--lib/Target/Hexagon/HexagonVLIWPacketizer.cpp1279
-rw-r--r--lib/Target/Hexagon/MCTargetDesc/HexagonMCAsmInfo.cpp2
-rw-r--r--lib/Target/Hexagon/MCTargetDesc/HexagonMCAsmInfo.h4
-rw-r--r--lib/Target/Hexagon/MCTargetDesc/HexagonMCTargetDesc.cpp4
-rw-r--r--lib/Target/MBlaze/MCTargetDesc/MBlazeMCTargetDesc.cpp2
-rw-r--r--lib/Target/MSP430/MCTargetDesc/MSP430MCAsmInfo.cpp2
-rw-r--r--lib/Target/MSP430/MCTargetDesc/MSP430MCAsmInfo.h3
-rw-r--r--lib/Target/Mips/CMakeLists.txt1
-rw-r--r--lib/Target/Mips/MCTargetDesc/MipsMCAsmInfo.cpp2
-rw-r--r--lib/Target/Mips/MCTargetDesc/MipsMCAsmInfo.h3
-rw-r--r--lib/Target/Mips/MCTargetDesc/MipsMCTargetDesc.cpp4
-rw-r--r--lib/Target/Mips/Mips16FrameLowering.cpp9
-rw-r--r--lib/Target/Mips/Mips16HardFloat.cpp141
-rw-r--r--lib/Target/Mips/Mips16HardFloat.h54
-rw-r--r--lib/Target/Mips/Mips16ISelLowering.cpp11
-rw-r--r--lib/Target/Mips/MipsCallingConv.td10
-rw-r--r--lib/Target/Mips/MipsISelLowering.cpp40
-rw-r--r--lib/Target/Mips/MipsISelLowering.h14
-rw-r--r--lib/Target/Mips/MipsRegisterInfo.cpp4
-rw-r--r--lib/Target/Mips/MipsRegisterInfo.h1
-rw-r--r--lib/Target/Mips/MipsSEFrameLowering.cpp13
-rw-r--r--lib/Target/Mips/MipsSubtarget.cpp8
-rw-r--r--lib/Target/Mips/MipsSubtarget.h11
-rw-r--r--lib/Target/Mips/MipsTargetMachine.cpp3
-rw-r--r--lib/Target/NVPTX/MCTargetDesc/NVPTXMCAsmInfo.cpp2
-rw-r--r--lib/Target/NVPTX/MCTargetDesc/NVPTXMCAsmInfo.h2
-rw-r--r--lib/Target/PowerPC/MCTargetDesc/PPCMCTargetDesc.cpp2
-rw-r--r--lib/Target/PowerPC/PPCFrameLowering.cpp16
-rw-r--r--lib/Target/R600/MCTargetDesc/AMDGPUMCAsmInfo.cpp2
-rw-r--r--lib/Target/R600/MCTargetDesc/AMDGPUMCAsmInfo.h3
-rw-r--r--lib/Target/Sparc/MCTargetDesc/SparcMCAsmInfo.cpp2
-rw-r--r--lib/Target/Sparc/MCTargetDesc/SparcMCAsmInfo.h3
-rw-r--r--lib/Target/SystemZ/MCTargetDesc/SystemZMCAsmInfo.cpp2
-rw-r--r--lib/Target/SystemZ/MCTargetDesc/SystemZMCAsmInfo.h3
-rw-r--r--lib/Target/SystemZ/MCTargetDesc/SystemZMCTargetDesc.cpp4
-rw-r--r--lib/Target/SystemZ/SystemZFrameLowering.cpp9
-rw-r--r--lib/Target/X86/AsmParser/X86AsmParser.cpp8
-rw-r--r--lib/Target/X86/MCTargetDesc/X86MCTargetDesc.cpp2
-rw-r--r--lib/Target/X86/X86FrameLowering.cpp18
-rw-r--r--lib/Target/X86/X86InstrInfo.td7
-rw-r--r--lib/Target/XCore/MCTargetDesc/XCoreMCAsmInfo.cpp2
-rw-r--r--lib/Target/XCore/MCTargetDesc/XCoreMCAsmInfo.h2
-rw-r--r--lib/Target/XCore/MCTargetDesc/XCoreMCTargetDesc.cpp4
-rw-r--r--lib/Target/XCore/XCoreFrameLowering.cpp14
58 files changed, 896 insertions, 1560 deletions
diff --git a/lib/Target/AArch64/AArch64FrameLowering.cpp b/lib/Target/AArch64/AArch64FrameLowering.cpp
index daa7f1d5ef..0474aaf487 100644
--- a/lib/Target/AArch64/AArch64FrameLowering.cpp
+++ b/lib/Target/AArch64/AArch64FrameLowering.cpp
@@ -54,7 +54,6 @@ void AArch64FrameLowering::emitPrologue(MachineFunction &MF) const {
DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc();
MachineModuleInfo &MMI = MF.getMMI();
- std::vector<MachineMove> &Moves = MMI.getFrameMoves();
bool NeedsFrameMoves = MMI.hasDebugInfo()
|| MF.getFunction()->needsUnwindTableEntry();
@@ -98,7 +97,7 @@ void AArch64FrameLowering::emitPrologue(MachineFunction &MF) const {
MachineLocation Dst(MachineLocation::VirtualFP);
MachineLocation Src(AArch64::XSP, NumInitialBytes);
- Moves.push_back(MachineMove(SPLabel, Dst, Src));
+ MMI.addFrameMove(SPLabel, Dst, Src);
}
// Otherwise we need to set the frame pointer and/or add a second stack
@@ -133,7 +132,7 @@ void AArch64FrameLowering::emitPrologue(MachineFunction &MF) const {
.addSym(FPLabel);
MachineLocation Dst(MachineLocation::VirtualFP);
MachineLocation Src(AArch64::X29, -MFI->getObjectOffset(X29FrameIdx));
- Moves.push_back(MachineMove(FPLabel, Dst, Src));
+ MMI.addFrameMove(FPLabel, Dst, Src);
}
FPNeedsSetting = false;
@@ -165,7 +164,7 @@ void AArch64FrameLowering::emitPrologue(MachineFunction &MF) const {
MachineLocation Dst(MachineLocation::VirtualFP);
MachineLocation Src(AArch64::XSP, NumResidualBytes + NumInitialBytes);
- Moves.push_back(MachineMove(CSLabel, Dst, Src));
+ MMI.addFrameMove(CSLabel, Dst, Src);
}
// And any callee-saved registers (it's fine to leave them to the end here,
@@ -183,7 +182,7 @@ void AArch64FrameLowering::emitPrologue(MachineFunction &MF) const {
MachineLocation Dst(MachineLocation::VirtualFP,
MFI->getObjectOffset(I->getFrameIdx()));
MachineLocation Src(I->getReg());
- Moves.push_back(MachineMove(CSLabel, Dst, Src));
+ MMI.addFrameMove(CSLabel, Dst, Src);
}
}
}
diff --git a/lib/Target/AArch64/MCTargetDesc/AArch64MCTargetDesc.cpp b/lib/Target/AArch64/MCTargetDesc/AArch64MCTargetDesc.cpp
index 819eeadb44..3435217bb2 100644
--- a/lib/Target/AArch64/MCTargetDesc/AArch64MCTargetDesc.cpp
+++ b/lib/Target/AArch64/MCTargetDesc/AArch64MCTargetDesc.cpp
@@ -57,7 +57,7 @@ static MCRegisterInfo *createAArch64MCRegisterInfo(StringRef Triple) {
return X;
}
-static MCAsmInfo *createAArch64MCAsmInfo(const Target &T, StringRef TT) {
+static MCAsmInfo *createAArch64MCAsmInfo(StringRef TT) {
Triple TheTriple(TT);
MCAsmInfo *MAI = new AArch64ELFMCAsmInfo();
diff --git a/lib/Target/ARM/AsmParser/ARMAsmParser.cpp b/lib/Target/ARM/AsmParser/ARMAsmParser.cpp
index 1dd2953b29..4d3bf34d48 100644
--- a/lib/Target/ARM/AsmParser/ARMAsmParser.cpp
+++ b/lib/Target/ARM/AsmParser/ARMAsmParser.cpp
@@ -49,6 +49,20 @@ class ARMAsmParser : public MCTargetAsmParser {
MCAsmParser &Parser;
const MCRegisterInfo *MRI;
+ // Unwind directives state
+ SMLoc FnStartLoc;
+ SMLoc CantUnwindLoc;
+ SMLoc PersonalityLoc;
+ SMLoc HandlerDataLoc;
+ int FPReg;
+ void resetUnwindDirectiveParserState() {
+ FnStartLoc = SMLoc();
+ CantUnwindLoc = SMLoc();
+ PersonalityLoc = SMLoc();
+ HandlerDataLoc = SMLoc();
+ FPReg = -1;
+ }
+
// Map of register aliases registers via the .req directive.
StringMap<unsigned> RegisterReqs;
@@ -113,6 +127,14 @@ class ARMAsmParser : public MCTargetAsmParser {
bool parseDirectiveUnreq(SMLoc L);
bool parseDirectiveArch(SMLoc L);
bool parseDirectiveEabiAttr(SMLoc L);
+ bool parseDirectiveFnStart(SMLoc L);
+ bool parseDirectiveFnEnd(SMLoc L);
+ bool parseDirectiveCantUnwind(SMLoc L);
+ bool parseDirectivePersonality(SMLoc L);
+ bool parseDirectiveHandlerData(SMLoc L);
+ bool parseDirectiveSetFP(SMLoc L);
+ bool parseDirectivePad(SMLoc L);
+ bool parseDirectiveRegSave(SMLoc L, bool IsVector);
StringRef splitMnemonic(StringRef Mnemonic, unsigned &PredicationCode,
bool &CarrySetting, unsigned &ProcessorIMod,
@@ -242,7 +264,7 @@ public:
};
ARMAsmParser(MCSubtargetInfo &_STI, MCAsmParser &_Parser)
- : MCTargetAsmParser(), STI(_STI), Parser(_Parser) {
+ : MCTargetAsmParser(), STI(_STI), Parser(_Parser), FPReg(-1) {
MCAsmParserExtension::Initialize(_Parser);
// Cache the MCRegisterInfo.
@@ -7658,6 +7680,24 @@ bool ARMAsmParser::ParseDirective(AsmToken DirectiveID) {
return parseDirectiveArch(DirectiveID.getLoc());
else if (IDVal == ".eabi_attribute")
return parseDirectiveEabiAttr(DirectiveID.getLoc());
+ else if (IDVal == ".fnstart")
+ return parseDirectiveFnStart(DirectiveID.getLoc());
+ else if (IDVal == ".fnend")
+ return parseDirectiveFnEnd(DirectiveID.getLoc());
+ else if (IDVal == ".cantunwind")
+ return parseDirectiveCantUnwind(DirectiveID.getLoc());
+ else if (IDVal == ".personality")
+ return parseDirectivePersonality(DirectiveID.getLoc());
+ else if (IDVal == ".handlerdata")
+ return parseDirectiveHandlerData(DirectiveID.getLoc());
+ else if (IDVal == ".setfp")
+ return parseDirectiveSetFP(DirectiveID.getLoc());
+ else if (IDVal == ".pad")
+ return parseDirectivePad(DirectiveID.getLoc());
+ else if (IDVal == ".save")
+ return parseDirectiveRegSave(DirectiveID.getLoc(), false);
+ else if (IDVal == ".vsave")
+ return parseDirectiveRegSave(DirectiveID.getLoc(), true);
return true;
}
@@ -7858,6 +7898,219 @@ bool ARMAsmParser::parseDirectiveEabiAttr(SMLoc L) {
return true;
}
+/// parseDirectiveFnStart
+/// ::= .fnstart
+bool ARMAsmParser::parseDirectiveFnStart(SMLoc L) {
+ if (FnStartLoc.isValid()) {
+ Error(L, ".fnstart starts before the end of previous one");
+ Error(FnStartLoc, "previous .fnstart starts here");
+ return true;
+ }
+
+ FnStartLoc = L;
+ getParser().getStreamer().EmitFnStart();
+ return false;
+}
+
+/// parseDirectiveFnEnd
+/// ::= .fnend
+bool ARMAsmParser::parseDirectiveFnEnd(SMLoc L) {
+ // Check the ordering of unwind directives
+ if (!FnStartLoc.isValid())
+ return Error(L, ".fnstart must precede .fnend directive");
+
+ // Reset the unwind directives parser state
+ resetUnwindDirectiveParserState();
+
+ getParser().getStreamer().EmitFnEnd();
+ return false;
+}
+
+/// parseDirectiveCantUnwind
+/// ::= .cantunwind
+bool ARMAsmParser::parseDirectiveCantUnwind(SMLoc L) {
+ // Check the ordering of unwind directives
+ CantUnwindLoc = L;
+ if (!FnStartLoc.isValid())
+ return Error(L, ".fnstart must precede .cantunwind directive");
+ if (HandlerDataLoc.isValid()) {
+ Error(L, ".cantunwind can't be used with .handlerdata directive");
+ Error(HandlerDataLoc, ".handlerdata was specified here");
+ return true;
+ }
+ if (PersonalityLoc.isValid()) {
+ Error(L, ".cantunwind can't be used with .personality directive");
+ Error(PersonalityLoc, ".personality was specified here");
+ return true;
+ }
+
+ getParser().getStreamer().EmitCantUnwind();
+ return false;
+}
+
+/// parseDirectivePersonality
+/// ::= .personality name
+bool ARMAsmParser::parseDirectivePersonality(SMLoc L) {
+ // Check the ordering of unwind directives
+ PersonalityLoc = L;
+ if (!FnStartLoc.isValid())
+ return Error(L, ".fnstart must precede .personality directive");
+ if (CantUnwindLoc.isValid()) {
+ Error(L, ".personality can't be used with .cantunwind directive");
+ Error(CantUnwindLoc, ".cantunwind was specified here");
+ return true;
+ }
+ if (HandlerDataLoc.isValid()) {
+ Error(L, ".personality must precede .handlerdata directive");
+ Error(HandlerDataLoc, ".handlerdata was specified here");
+ return true;
+ }
+
+ // Parse the name of the personality routine
+ if (Parser.getTok().isNot(AsmToken::Identifier)) {
+ Parser.eatToEndOfStatement();
+ return Error(L, "unexpected input in .personality directive.");
+ }
+ StringRef Name(Parser.getTok().getIdentifier());
+ Parser.Lex();
+
+ MCSymbol *PR = getParser().getContext().GetOrCreateSymbol(Name);
+ getParser().getStreamer().EmitPersonality(PR);
+ return false;
+}
+
+/// parseDirectiveHandlerData
+/// ::= .handlerdata
+bool ARMAsmParser::parseDirectiveHandlerData(SMLoc L) {
+ // Check the ordering of unwind directives
+ HandlerDataLoc = L;
+ if (!FnStartLoc.isValid())
+ return Error(L, ".fnstart must precede .personality directive");
+ if (CantUnwindLoc.isValid()) {
+ Error(L, ".handlerdata can't be used with .cantunwind directive");
+ Error(CantUnwindLoc, ".cantunwind was specified here");
+ return true;
+ }
+
+ getParser().getStreamer().EmitHandlerData();
+ return false;
+}
+
+/// parseDirectiveSetFP
+/// ::= .setfp fpreg, spreg [, offset]
+bool ARMAsmParser::parseDirectiveSetFP(SMLoc L) {
+ // Check the ordering of unwind directives
+ if (!FnStartLoc.isValid())
+ return Error(L, ".fnstart must precede .setfp directive");
+ if (HandlerDataLoc.isValid())
+ return Error(L, ".setfp must precede .handlerdata directive");
+
+ // Parse fpreg
+ SMLoc NewFPRegLoc = Parser.getTok().getLoc();
+ int NewFPReg = tryParseRegister();
+ if (NewFPReg == -1)
+ return Error(NewFPRegLoc, "frame pointer register expected");
+
+ // Consume comma
+ if (!Parser.getTok().is(AsmToken::Comma))
+ return Error(Parser.getTok().getLoc(), "comma expected");
+ Parser.Lex(); // skip comma
+
+ // Parse spreg
+ SMLoc NewSPRegLoc = Parser.getTok().getLoc();
+ int NewSPReg = tryParseRegister();
+ if (NewSPReg == -1)
+ return Error(NewSPRegLoc, "stack pointer register expected");
+
+ if (NewSPReg != ARM::SP && NewSPReg != FPReg)
+ return Error(NewSPRegLoc,
+ "register should be either $sp or the latest fp register");
+
+ // Update the frame pointer register
+ FPReg = NewFPReg;
+
+ // Parse offset
+ int64_t Offset = 0;
+ if (Parser.getTok().is(AsmToken::Comma)) {
+ Parser.Lex(); // skip comma
+
+ if (Parser.getTok().isNot(AsmToken::Hash) &&
+ Parser.getTok().isNot(AsmToken::Dollar)) {
+ return Error(Parser.getTok().getLoc(), "'#' expected");
+ }
+ Parser.Lex(); // skip hash token.
+
+ const MCExpr *OffsetExpr;
+ SMLoc ExLoc = Parser.getTok().getLoc();
+ SMLoc EndLoc;
+ if (getParser().parseExpression(OffsetExpr, EndLoc))
+ return Error(ExLoc, "malformed setfp offset");
+ const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(OffsetExpr);
+ if (!CE)
+ return Error(ExLoc, "setfp offset must be an immediate");
+
+ Offset = CE->getValue();
+ }
+
+ getParser().getStreamer().EmitSetFP(static_cast<unsigned>(NewFPReg),
+ static_cast<unsigned>(NewSPReg),
+ Offset);
+ return false;
+}
+
+/// parseDirective
+/// ::= .pad offset
+bool ARMAsmParser::parseDirectivePad(SMLoc L) {
+ // Check the ordering of unwind directives
+ if (!FnStartLoc.isValid())
+ return Error(L, ".fnstart must precede .pad directive");
+ if (HandlerDataLoc.isValid())
+ return Error(L, ".pad must precede .handlerdata directive");
+
+ // Parse the offset
+ if (Parser.getTok().isNot(AsmToken::Hash) &&
+ Parser.getTok().isNot(AsmToken::Dollar)) {
+ return Error(Parser.getTok().getLoc(), "'#' expected");
+ }
+ Parser.Lex(); // skip hash token.
+
+ const MCExpr *OffsetExpr;
+ SMLoc ExLoc = Parser.getTok().getLoc();
+ SMLoc EndLoc;
+ if (getParser().parseExpression(OffsetExpr, EndLoc))
+ return Error(ExLoc, "malformed pad offset");
+ const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(OffsetExpr);
+ if (!CE)
+ return Error(ExLoc, "pad offset must be an immediate");
+
+ getParser().getStreamer().EmitPad(CE->getValue());
+ return false;
+}
+
+/// parseDirectiveRegSave
+/// ::= .save { registers }
+/// ::= .vsave { registers }
+bool ARMAsmParser::parseDirectiveRegSave(SMLoc L, bool IsVector) {
+ // Check the ordering of unwind directives
+ if (!FnStartLoc.isValid())
+ return Error(L, ".fnstart must precede .save or .vsave directives");
+ if (HandlerDataLoc.isValid())
+ return Error(L, ".save or .vsave must precede .handlerdata directive");
+
+ // Parse the register list
+ SmallVector<MCParsedAsmOperand*, 1> Operands;
+ if (parseRegisterList(Operands))
+ return true;
+ ARMOperand *Op = (ARMOperand*)Operands[0];
+ if (!IsVector && !Op->isRegList())
+ return Error(L, ".save expects GPR registers");
+ if (IsVector && !Op->isDPRRegList())
+ return Error(L, ".vsave expects DPR registers");
+
+ getParser().getStreamer().EmitRegSave(Op->getRegList(), IsVector);
+ return false;
+}
+
/// Force static initialization.
extern "C" void LLVMInitializeARMAsmParser() {
RegisterMCAsmParser<ARMAsmParser> X(TheARMTarget);
diff --git a/lib/Target/ARM/MCTargetDesc/ARMELFStreamer.cpp b/lib/Target/ARM/MCTargetDesc/ARMELFStreamer.cpp
index 6c3d247668..82d296ffd7 100644
--- a/lib/Target/ARM/MCTargetDesc/ARMELFStreamer.cpp
+++ b/lib/Target/ARM/MCTargetDesc/ARMELFStreamer.cpp
@@ -204,6 +204,7 @@ private:
void EmitPersonalityFixup(StringRef Name);
void CollectUnwindOpcodes();
+ void FlushUnwindOpcodes(bool AllowCompactModel0);
void SwitchToEHSection(const char *Prefix, unsigned Type, unsigned Flags,
SectionKind Kind, const MCSymbol &Fn);
@@ -333,22 +334,8 @@ void ARMELFStreamer::EmitFnEnd() {
assert(FnStart && ".fnstart must preceeds .fnend");
// Emit unwind opcodes if there is no .handlerdata directive
- if (!ExTab && !CantUnwind) {
- CollectUnwindOpcodes();
-
- unsigned PersonalityIndex = UnwindOpAsm.getPersonalityIndex();
- if (PersonalityIndex == AEABI_UNWIND_CPP_PR1 ||
- PersonalityIndex == AEABI_UNWIND_CPP_PR2) {
- // For the __aeabi_unwind_cpp_pr1 and __aeabi_unwind_cpp_pr2, we have to
- // emit the unwind opcodes in the corresponding ".ARM.extab" section, and
- // then emit a reference to these unwind opcodes in the second word of
- // the exception index table entry.
- SwitchToExTabSection(*FnStart);
- ExTab = getContext().CreateTempSymbol();
- EmitLabel(ExTab);
- EmitBytes(UnwindOpAsm.data(), 0);
- }
- }
+ if (!ExTab && !CantUnwind)
+ FlushUnwindOpcodes(true);
// Emit the exception index table entry
SwitchToExIdxSection(*FnStart);
@@ -384,6 +371,9 @@ void ARMELFStreamer::EmitFnEnd() {
EmitBytes(UnwindOpAsm.data(), 0);
}
+ // Switch to the section containing FnStart
+ SwitchSection(&FnStart->getSection());
+
// Clean exception handling frame information
Reset();
}
@@ -392,7 +382,18 @@ void ARMELFStreamer::EmitCantUnwind() {
CantUnwind = true;
}
-void ARMELFStreamer::EmitHandlerData() {
+void ARMELFStreamer::FlushUnwindOpcodes(bool AllowCompactModel0) {
+ // Collect and finalize the unwind opcodes
+ CollectUnwindOpcodes();
+
+ // For compact model 0, we have to emit the unwind opcodes in the .ARM.exidx
+ // section. Thus, we don't have to create an entry in the .ARM.extab
+ // section.
+ if (AllowCompactModel0 &&
+ UnwindOpAsm.getPersonalityIndex() == AEABI_UNWIND_CPP_PR0)
+ return;
+
+ // Switch to .ARM.extab section.
SwitchToExTabSection(*FnStart);
// Create .ARM.extab label for offset in .ARM.exidx
@@ -400,21 +401,24 @@ void ARMELFStreamer::EmitHandlerData() {
ExTab = getContext().CreateTempSymbol();
EmitLabel(ExTab);
- // Emit Personality
- assert(Personality && ".personality directive must preceed .handlerdata");
-
- const MCSymbolRefExpr *PersonalityRef =
- MCSymbolRefExpr::Create(Personality,
- MCSymbolRefExpr::VK_ARM_PREL31,
- getContext());
+ // Emit personality
+ if (Personality) {
+ const MCSymbolRefExpr *PersonalityRef =
+ MCSymbolRefExpr::Create(Personality,
+ MCSymbolRefExpr::VK_ARM_PREL31,
+ getContext());
- EmitValue(PersonalityRef, 4, 0);
+ EmitValue(PersonalityRef, 4, 0);
+ }
// Emit unwind opcodes
- CollectUnwindOpcodes();
EmitBytes(UnwindOpAsm.data(), 0);
}
+void ARMELFStreamer::EmitHandlerData() {
+ FlushUnwindOpcodes(false);
+}
+
void ARMELFStreamer::EmitPersonality(const MCSymbol *Per) {
Personality = Per;
UnwindOpAsm.setPersonality(Per);
diff --git a/lib/Target/ARM/MCTargetDesc/ARMMCTargetDesc.cpp b/lib/Target/ARM/MCTargetDesc/ARMMCTargetDesc.cpp
index f09fb5a94f..57239f8011 100644
--- a/lib/Target/ARM/MCTargetDesc/ARMMCTargetDesc.cpp
+++ b/lib/Target/ARM/MCTargetDesc/ARMMCTargetDesc.cpp
@@ -159,7 +159,7 @@ static MCRegisterInfo *createARMMCRegisterInfo(StringRef Triple) {
return X;
}
-static MCAsmInfo *createARMMCAsmInfo(const Target &T, StringRef TT) {
+static MCAsmInfo *createARMMCAsmInfo(StringRef TT) {
Triple TheTriple(TT);
if (TheTriple.isOSDarwin())
diff --git a/lib/Target/Hexagon/Hexagon.td b/lib/Target/Hexagon/Hexagon.td
index 9b3a64316a..568798c3a4 100644
--- a/lib/Target/Hexagon/Hexagon.td
+++ b/lib/Target/Hexagon/Hexagon.td
@@ -120,15 +120,39 @@ def getPredNewOpcode : InstrMapping {
}
//===----------------------------------------------------------------------===//
+// Generate mapping table to relate .new predicated instructions with their old
+// format.
+//
+def getPredOldOpcode : InstrMapping {
+ let FilterClass = "PredNewRel";
+ let RowFields = ["BaseOpcode", "PredSense", "isNVStore"];
+ let ColFields = ["PNewValue"];
+ let KeyCol = ["new"];
+ let ValueCols = [[""]];
+}
+
+//===----------------------------------------------------------------------===//
// Generate mapping table to relate store instructions with their new-value
// format.
//
def getNewValueOpcode : InstrMapping {
let FilterClass = "NewValueRel";
let RowFields = ["BaseOpcode", "PredSense", "PNewValue"];
- let ColFields = ["isNVStore"];
- let KeyCol = ["0"];
- let ValueCols = [["1"]];
+ let ColFields = ["NValueST"];
+ let KeyCol = ["false"];
+ let ValueCols = [["true"]];
+}
+
+//===----------------------------------------------------------------------===//
+// Generate mapping table to relate new-value store instructions with their old
+// format.
+//
+def getNonNVStore : InstrMapping {
+ let FilterClass = "NewValueRel";
+ let RowFields = ["BaseOpcode", "PredSense", "PNewValue"];
+ let ColFields = ["NValueST"];
+ let KeyCol = ["true"];
+ let ValueCols = [["false"]];
}
def getBasedWithImmOffset : InstrMapping {
diff --git a/lib/Target/Hexagon/HexagonFrameLowering.cpp b/lib/Target/Hexagon/HexagonFrameLowering.cpp
index de993ee875..f9bc83bb20 100644
--- a/lib/Target/Hexagon/HexagonFrameLowering.cpp
+++ b/lib/Target/Hexagon/HexagonFrameLowering.cpp
@@ -113,8 +113,6 @@ void HexagonFrameLowering::emitPrologue(MachineFunction &MF) const {
MO.setImm(MFI->getMaxCallFrameSize());
}
- std::vector<MachineMove> &Moves = MMI.getFrameMoves();
-
if (needsFrameMoves) {
// Advance CFA. DW_CFA_def_cfa
unsigned FPReg = QRI->getFrameRegister();
@@ -122,17 +120,17 @@ void HexagonFrameLowering::emitPrologue(MachineFunction &MF) const {
MachineLocation Dst(MachineLocation::VirtualFP);
MachineLocation Src(FPReg, -8);
- Moves.push_back(MachineMove(0, Dst, Src));
+ MMI.addFrameMove(0, Dst, Src);
// R31 = (R31 - #4)
MachineLocation LRDst(RAReg, -4);
MachineLocation LRSrc(RAReg);
- Moves.push_back(MachineMove(0, LRDst, LRSrc));
+ MMI.addFrameMove(0, LRDst, LRSrc);
// R30 = (R30 - #8)
MachineLocation SPDst(FPReg, -8);
MachineLocation SPSrc(FPReg);
- Moves.push_back(MachineMove(0, SPDst, SPSrc));
+ MMI.addFrameMove(0, SPDst, SPSrc);
}
//
@@ -174,30 +172,55 @@ void HexagonFrameLowering::emitEpilogue(MachineFunction &MF,
MachineBasicBlock::iterator MBBI = prior(MBB.end());
DebugLoc dl = MBBI->getDebugLoc();
//
- // Only insert deallocframe if we need to.
+ // Only insert deallocframe if we need to. Also at -O0. See comment
+ // in emitPrologue above.
//
- if (hasFP(MF)) {
+ if (hasFP(MF) || MF.getTarget().getOptLevel() == CodeGenOpt::None) {
MachineBasicBlock::iterator MBBI = prior(MBB.end());
MachineBasicBlock::iterator MBBI_end = MBB.end();
- //
- // For Hexagon, we don't need the frame size.
- //
- MachineFrameInfo *MFI = MF.getFrameInfo();
- int NumBytes = (int) MFI->getStackSize();
const TargetInstrInfo &TII = *MF.getTarget().getInstrInfo();
-
+ // Handle EH_RETURN.
+ if (MBBI->getOpcode() == Hexagon::EH_RETURN_JMPR) {
+ assert(MBBI->getOperand(0).isReg() && "Offset should be in register!");
+ BuildMI(MBB, MBBI, dl, TII.get(Hexagon::DEALLOCFRAME));
+ BuildMI(MBB, MBBI, dl, TII.get(Hexagon::ADD_rr),
+ Hexagon::R29).addReg(Hexagon::R29).addReg(Hexagon::R28);
+ return;
+ }
// Replace 'jumpr r31' instruction with dealloc_return for V4 and higher
// versions.
if (STI.hasV4TOps() && MBBI->getOpcode() == Hexagon::JMPret
&& !DisableDeallocRet) {
- // Remove jumpr node.
- MBB.erase(MBBI);
+ // Check for RESTORE_DEALLOC_RET_JMP_V4 call. Don't emit an extra DEALLOC
+ // instruction if we encounter it.
+ MachineBasicBlock::iterator BeforeJMPR =
+ MBB.begin() == MBBI ? MBBI : prior(MBBI);
+ if (BeforeJMPR != MBBI &&
+ BeforeJMPR->getOpcode() == Hexagon::RESTORE_DEALLOC_RET_JMP_V4) {
+ // Remove the JMPR node.
+ MBB.erase(MBBI);
+ return;
+ }
+
// Add dealloc_return.
- BuildMI(MBB, MBBI_end, dl, TII.get(Hexagon::DEALLOC_RET_V4))
- .addImm(NumBytes);
- } else { // Add deallocframe for V2 and V3.
- BuildMI(MBB, MBBI, dl, TII.get(Hexagon::DEALLOCFRAME)).addImm(NumBytes);
+ MachineInstrBuilder MIB =
+ BuildMI(MBB, MBBI_end, dl, TII.get(Hexagon::DEALLOC_RET_V4));
+ // Transfer the function live-out registers.
+ MIB->copyImplicitOps(*MBB.getParent(), &*MBBI);
+ // Remove the JUMPR node.
+ MBB.erase(MBBI);
+ } else { // Add deallocframe for V2 and V3, and V4 tail calls.
+ // Check for RESTORE_DEALLOC_BEFORE_TAILCALL_V4. We don't need an extra
+ // DEALLOCFRAME instruction after it.
+ MachineBasicBlock::iterator Term = MBB.getFirstTerminator();
+ MachineBasicBlock::iterator I =
+ Term == MBB.begin() ? MBB.end() : prior(Term);
+ if (I != MBB.end() &&
+ I->getOpcode() == Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4)
+ return;
+
+ BuildMI(MBB, MBBI, dl, TII.get(Hexagon::DEALLOCFRAME));
}
}
}
diff --git a/lib/Target/Hexagon/HexagonInstrFormats.td b/lib/Target/Hexagon/HexagonInstrFormats.td
index f97143ff01..e71386ada2 100644
--- a/lib/Target/Hexagon/HexagonInstrFormats.td
+++ b/lib/Target/Hexagon/HexagonInstrFormats.td
@@ -158,6 +158,7 @@ class InstHexagon<dag outs, dag ins, string asmstr, list<dag> pattern,
string CextOpcode = "";
string PredSense = "";
string PNewValue = "";
+ string NValueST = ""; // Set to "true" for new-value stores.
string InputType = ""; // Input is "imm" or "reg" type.
string isMEMri = "false"; // Set to "true" for load/store with MEMri operand.
string isFloat = "false"; // Set to "true" for the floating-point load/store.
@@ -166,6 +167,7 @@ class InstHexagon<dag outs, dag ins, string asmstr, list<dag> pattern,
let PredSense = !if(isPredicated, !if(isPredicatedFalse, "false", "true"),
"");
let PNewValue = !if(isPredicatedNew, "new", "");
+ let NValueST = !if(isNVStore, "true", "false");
// *** Must match MCTargetDesc/HexagonBaseInfo.h ***
}
diff --git a/lib/Target/Hexagon/HexagonInstrInfo.cpp b/lib/Target/Hexagon/HexagonInstrInfo.cpp
index 469e8021fa..33a937308d 100644
--- a/lib/Target/Hexagon/HexagonInstrInfo.cpp
+++ b/lib/Target/Hexagon/HexagonInstrInfo.cpp
@@ -630,111 +630,6 @@ bool HexagonInstrInfo::isBranch (const MachineInstr *MI) const {
return MI->getDesc().isBranch();
}
-bool HexagonInstrInfo::isNewValueStore(const MachineInstr *MI) const {
- switch (MI->getOpcode()) {
- default: return false;
- // Store Byte
- case Hexagon::STrib_nv_V4:
- case Hexagon::STrib_indexed_nv_V4:
- case Hexagon::STrib_indexed_shl_nv_V4:
- case Hexagon::STrib_shl_nv_V4:
- case Hexagon::STb_GP_nv_V4:
- case Hexagon::POST_STbri_nv_V4:
- case Hexagon::STrib_cPt_nv_V4:
- case Hexagon::STrib_cdnPt_nv_V4:
- case Hexagon::STrib_cNotPt_nv_V4:
- case Hexagon::STrib_cdnNotPt_nv_V4:
- case Hexagon::STrib_indexed_cPt_nv_V4:
- case Hexagon::STrib_indexed_cdnPt_nv_V4:
- case Hexagon::STrib_indexed_cNotPt_nv_V4:
- case Hexagon::STrib_indexed_cdnNotPt_nv_V4:
- case Hexagon::STrib_indexed_shl_cPt_nv_V4:
- case Hexagon::STrib_indexed_shl_cdnPt_nv_V4:
- case Hexagon::STrib_indexed_shl_cNotPt_nv_V4:
- case Hexagon::STrib_indexed_shl_cdnNotPt_nv_V4:
- case Hexagon::POST_STbri_cPt_nv_V4:
- case Hexagon::POST_STbri_cdnPt_nv_V4:
- case Hexagon::POST_STbri_cNotPt_nv_V4:
- case Hexagon::POST_STbri_cdnNotPt_nv_V4:
- case Hexagon::STb_GP_cPt_nv_V4:
- case Hexagon::STb_GP_cNotPt_nv_V4:
- case Hexagon::STb_GP_cdnPt_nv_V4:
- case Hexagon::STb_GP_cdnNotPt_nv_V4:
- case Hexagon::STrib_abs_nv_V4:
- case Hexagon::STrib_abs_cPt_nv_V4:
- case Hexagon::STrib_abs_cdnPt_nv_V4:
- case Hexagon::STrib_abs_cNotPt_nv_V4:
- case Hexagon::STrib_abs_cdnNotPt_nv_V4:
-
- // Store Halfword
- case Hexagon::STrih_nv_V4:
- case Hexagon::STrih_indexed_nv_V4:
- case Hexagon::STrih_indexed_shl_nv_V4:
- case Hexagon::STrih_shl_nv_V4:
- case Hexagon::STh_GP_nv_V4:
- case Hexagon::POST_SThri_nv_V4:
- case Hexagon::STrih_cPt_nv_V4:
- case Hexagon::STrih_cdnPt_nv_V4:
- case Hexagon::STrih_cNotPt_nv_V4:
- case Hexagon::STrih_cdnNotPt_nv_V4:
- case Hexagon::STrih_indexed_cPt_nv_V4:
- case Hexagon::STrih_indexed_cdnPt_nv_V4:
- case Hexagon::STrih_indexed_cNotPt_nv_V4:
- case Hexagon::STrih_indexed_cdnNotPt_nv_V4:
- case Hexagon::STrih_indexed_shl_cPt_nv_V4:
- case Hexagon::STrih_indexed_shl_cdnPt_nv_V4:
- case Hexagon::STrih_indexed_shl_cNotPt_nv_V4:
- case Hexagon::STrih_indexed_shl_cdnNotPt_nv_V4:
- case Hexagon::POST_SThri_cPt_nv_V4:
- case Hexagon::POST_SThri_cdnPt_nv_V4:
- case Hexagon::POST_SThri_cNotPt_nv_V4:
- case Hexagon::POST_SThri_cdnNotPt_nv_V4:
- case Hexagon::STh_GP_cPt_nv_V4:
- case Hexagon::STh_GP_cNotPt_nv_V4:
- case Hexagon::STh_GP_cdnPt_nv_V4:
- case Hexagon::STh_GP_cdnNotPt_nv_V4:
- case Hexagon::STrih_abs_nv_V4:
- case Hexagon::STrih_abs_cPt_nv_V4:
- case Hexagon::STrih_abs_cdnPt_nv_V4:
- case Hexagon::STrih_abs_cNotPt_nv_V4:
- case Hexagon::STrih_abs_cdnNotPt_nv_V4:
-
- // Store Word
- case Hexagon::STriw_nv_V4:
- case Hexagon::STriw_indexed_nv_V4:
- case Hexagon::STriw_indexed_shl_nv_V4:
- case Hexagon::STriw_shl_nv_V4:
- case Hexagon::STw_GP_nv_V4:
- case Hexagon::POST_STwri_nv_V4:
- case Hexagon::STriw_cPt_nv_V4:
- case Hexagon::STriw_cdnPt_nv_V4:
- case Hexagon::STriw_cNotPt_nv_V4:
- case Hexagon::STriw_cdnNotPt_nv_V4:
- case Hexagon::STriw_indexed_cPt_nv_V4:
- case Hexagon::STriw_indexed_cdnPt_nv_V4:
- case Hexagon::STriw_indexed_cNotPt_nv_V4:
- case Hexagon::STriw_indexed_cdnNotPt_nv_V4:
- case Hexagon::STriw_indexed_shl_cPt_nv_V4:
- case Hexagon::STriw_indexed_shl_cdnPt_nv_V4:
- case Hexagon::STriw_indexed_shl_cNotPt_nv_V4:
- case Hexagon::STriw_indexed_shl_cdnNotPt_nv_V4:
- case Hexagon::POST_STwri_cPt_nv_V4:
- case Hexagon::POST_STwri_cdnPt_nv_V4:
- case Hexagon::POST_STwri_cNotPt_nv_V4:
- case Hexagon::POST_STwri_cdnNotPt_nv_V4:
- case Hexagon::STw_GP_cPt_nv_V4:
- case Hexagon::STw_GP_cNotPt_nv_V4:
- case Hexagon::STw_GP_cdnPt_nv_V4:
- case Hexagon::STw_GP_cdnNotPt_nv_V4:
- case Hexagon::STriw_abs_nv_V4:
- case Hexagon::STriw_abs_cPt_nv_V4:
- case Hexagon::STriw_abs_cdnPt_nv_V4:
- case Hexagon::STriw_abs_cNotPt_nv_V4:
- case Hexagon::STriw_abs_cdnNotPt_nv_V4:
- return true;
- }
-}
-
bool HexagonInstrInfo::isNewValueInst(const MachineInstr *MI) const {
if (isNewValueJump(MI))
return true;
@@ -862,6 +757,18 @@ unsigned HexagonInstrInfo::getInvertedPredicatedOpcode(const int Opc) const {
}
}
+// New Value Store instructions.
+bool HexagonInstrInfo::isNewValueStore(const MachineInstr *MI) const {
+ const uint64_t F = MI->getDesc().TSFlags;
+
+ return ((F >> HexagonII::NVStorePos) & HexagonII::NVStoreMask);
+}
+
+bool HexagonInstrInfo::isNewValueStore(unsigned Opcode) const {
+ const uint64_t F = get(Opcode).TSFlags;
+
+ return ((F >> HexagonII::NVStorePos) & HexagonII::NVStoreMask);
+}
int HexagonInstrInfo::
getMatchingCondBranchOpcode(int Opc, bool invertPredicate) const {
@@ -1120,6 +1027,16 @@ bool HexagonInstrInfo::isPredicatedNew(unsigned Opcode) const {
return ((F >> HexagonII::PredicatedNewPos) & HexagonII::PredicatedNewMask);
}
+// Returns true, if a ST insn can be promoted to a new-value store.
+bool HexagonInstrInfo::mayBeNewStore(const MachineInstr *MI) const {
+ const HexagonRegisterInfo& QRI = getRegisterInfo();
+ const uint64_t F = MI->getDesc().TSFlags;
+
+ return ((F >> HexagonII::mayNVStorePos) &
+ HexagonII::mayNVStoreMask &
+ QRI.Subtarget.hasV4TOps());
+}
+
bool
HexagonInstrInfo::DefinesPredicate(MachineInstr *MI,
std::vector<MachineOperand> &Pred) const {
@@ -1304,6 +1221,8 @@ isValidAutoIncImm(const EVT VT, const int Offset) const {
bool HexagonInstrInfo::
isMemOp(const MachineInstr *MI) const {
+// return MI->getDesc().mayLoad() && MI->getDesc().mayStore();
+
switch (MI->getOpcode())
{
default: return false;
@@ -1611,6 +1530,59 @@ bool HexagonInstrInfo::isDotNewInst (const MachineInstr* MI) const {
(isPredicated(MI) && isPredicatedNew(MI)));
}
+// Returns the most basic instruction for the .new predicated instructions and
+// new-value stores.
+// For example, all of the following instructions will be converted back to the
+// same instruction:
+// 1) if (p0.new) memw(R0+#0) = R1.new --->
+// 2) if (p0) memw(R0+#0)= R1.new -------> if (p0) memw(R0+#0) = R1
+// 3) if (p0.new) memw(R0+#0) = R1 --->
+//
+
+int HexagonInstrInfo::GetDotOldOp(const int opc) const {
+ int NewOp = opc;
+ if (isPredicated(NewOp) && isPredicatedNew(NewOp)) { // Get predicate old form
+ NewOp = Hexagon::getPredOldOpcode(NewOp);
+ if (NewOp < 0)
+ assert(0 && "Couldn't change predicate new instruction to its old form.");
+ }
+
+ if (isNewValueStore(NewOp)) { // Convert into non new-value format
+ NewOp = Hexagon::getNonNVStore(NewOp);
+ if (NewOp < 0)
+ assert(0 && "Couldn't change new-value store to its old form.");
+ }
+ return NewOp;
+}
+
+// Return the new value instruction for a given store.
+int HexagonInstrInfo::GetDotNewOp(const MachineInstr* MI) const {
+ int NVOpcode = Hexagon::getNewValueOpcode(MI->getOpcode());
+ if (NVOpcode >= 0) // Valid new-value store instruction.
+ return NVOpcode;
+
+ switch (MI->getOpcode()) {
+ default: llvm_unreachable("Unknown .new type");
+ // store new value byte
+ case Hexagon::STrib_shl_V4:
+ return Hexagon::STrib_shl_nv_V4;
+
+ case Hexagon::STrih_shl_V4:
+ return Hexagon::STrih_shl_nv_V4;
+
+ case Hexagon::STriw_f:
+ return Hexagon::STriw_nv_V4;
+
+ case Hexagon::STriw_indexed_f:
+ return Hexagon::STriw_indexed_nv_V4;
+
+ case Hexagon::STriw_shl_V4:
+ return Hexagon::STriw_shl_nv_V4;
+
+ }
+ return 0;
+}
+
// Return .new predicate version for an instruction.
int HexagonInstrInfo::GetDotNewPredOp(MachineInstr *MI,
const MachineBranchProbabilityInfo
diff --git a/lib/Target/Hexagon/HexagonInstrInfo.h b/lib/Target/Hexagon/HexagonInstrInfo.h
index 2becc02570..d1209dec3b 100644
--- a/lib/Target/Hexagon/HexagonInstrInfo.h
+++ b/lib/Target/Hexagon/HexagonInstrInfo.h
@@ -185,15 +185,19 @@ public:
bool isNewValueInst(const MachineInstr* MI) const;
bool isNewValue(const MachineInstr* MI) const;
bool isDotNewInst(const MachineInstr* MI) const;
+ int GetDotOldOp(const int opc) const;
+ int GetDotNewOp(const MachineInstr* MI) const;
int GetDotNewPredOp(MachineInstr *MI,
const MachineBranchProbabilityInfo
*MBPI) const;
+ bool mayBeNewStore(const MachineInstr* MI) const;
bool isDeallocRet(const MachineInstr *MI) const;
unsigned getInvertedPredicatedOpcode(const int Opc) const;
bool isExtendable(const MachineInstr* MI) const;
bool isExtended(const MachineInstr* MI) const;
bool isPostIncrement(const MachineInstr* MI) const;
bool isNewValueStore(const MachineInstr* MI) const;
+ bool isNewValueStore(unsigned Opcode) const;
bool isNewValueJump(const MachineInstr* MI) const;
bool isNewValueJumpCandidate(const MachineInstr *MI) const;
diff --git a/lib/Target/Hexagon/HexagonInstrInfoV4.td b/lib/Target/Hexagon/HexagonInstrInfoV4.td
index c7f4edfe67..022a7f6136 100644
--- a/lib/Target/Hexagon/HexagonInstrInfoV4.td
+++ b/lib/Target/Hexagon/HexagonInstrInfoV4.td
@@ -2564,8 +2564,9 @@ def NTSTBIT_ri : SInst<(outs PredRegs:$dst), (ins IntRegs:$src1, u5Imm:$src2),
//Deallocate frame and return.
// dealloc_return
let isReturn = 1, isTerminator = 1, isBarrier = 1, isPredicable = 1,
- Defs = [R29, R30, R31, PC], Uses = [R29, R31], neverHasSideEffects = 1 in {
- def DEALLOC_RET_V4 : NVInst_V4<(outs), (ins i32imm:$amt1),
+ Defs = [R29, R30, R31, PC], Uses = [R30], neverHasSideEffects = 1 in {
+let validSubTargets = HasV4SubT in
+ def DEALLOC_RET_V4 : LD0Inst<(outs), (ins),
"dealloc_return",
[]>,
Requires<[HasV4T]>;
@@ -2574,9 +2575,10 @@ let isReturn = 1, isTerminator = 1, isBarrier = 1, isPredicable = 1,
// Restore registers and dealloc return function call.
let isCall = 1, isBarrier = 1, isReturn = 1, isTerminator = 1,
Defs = [R29, R30, R31, PC] in {
+let validSubTargets = HasV4SubT in
def RESTORE_DEALLOC_RET_JMP_V4 : JInst<(outs),
(ins calltarget:$dst),
- "jump $dst // Restore_and_dealloc_return",
+ "jump $dst",
[]>,
Requires<[HasV4T]>;
}
@@ -2584,9 +2586,10 @@ let isCall = 1, isBarrier = 1, isReturn = 1, isTerminator = 1,
// Restore registers and dealloc frame before a tail call.
let isCall = 1, isBarrier = 1,
Defs = [R29, R30, R31, PC] in {
+let validSubTargets = HasV4SubT in
def RESTORE_DEALLOC_BEFORE_TAILCALL_V4 : JInst<(outs),
(ins calltarget:$dst),
- "call $dst // Restore_and_dealloc_before_tailcall",
+ "call $dst",
[]>,
Requires<[HasV4T]>;
}
@@ -2603,10 +2606,11 @@ let isCall = 1, isBarrier = 1,
// if (Ps) dealloc_return
let isReturn = 1, isTerminator = 1,
- Defs = [R29, R30, R31, PC], Uses = [R29, R31], neverHasSideEffects = 1,
+ Defs = [R29, R30, R31, PC], Uses = [R30], neverHasSideEffects = 1,
isPredicated = 1 in {
- def DEALLOC_RET_cPt_V4 : NVInst_V4<(outs),
- (ins PredRegs:$src1, i32imm:$amt1),
+let validSubTargets = HasV4SubT in
+ def DEALLOC_RET_cPt_V4 : LD0Inst<(outs),
+ (ins PredRegs:$src1),
"if ($src1) dealloc_return",
[]>,
Requires<[HasV4T]>;
@@ -2614,10 +2618,10 @@ let isReturn = 1, isTerminator = 1,
// if (!Ps) dealloc_return
let isReturn = 1, isTerminator = 1,
- Defs = [R29, R30, R31, PC], Uses = [R29, R31], neverHasSideEffects = 1,
- isPredicated = 1 in {
- def DEALLOC_RET_cNotPt_V4 : NVInst_V4<(outs), (ins PredRegs:$src1,
- i32imm:$amt1),
+ Defs = [R29, R30, R31, PC], Uses = [R30], neverHasSideEffects = 1,
+ isPredicated = 1, isPredicatedFalse = 1 in {
+let validSubTargets = HasV4SubT in
+ def DEALLOC_RET_cNotPt_V4 : LD0Inst<(outs), (ins PredRegs:$src1),
"if (!$src1) dealloc_return",
[]>,
Requires<[HasV4T]>;
@@ -2625,10 +2629,10 @@ let isReturn = 1, isTerminator = 1,
// if (Ps.new) dealloc_return:nt
let isReturn = 1, isTerminator = 1,
- Defs = [R29, R30, R31, PC], Uses = [R29, R31], neverHasSideEffects = 1,
+ Defs = [R29, R30, R31, PC], Uses = [R30], neverHasSideEffects = 1,
isPredicated = 1 in {
- def DEALLOC_RET_cdnPnt_V4 : NVInst_V4<(outs), (ins PredRegs:$src1,
- i32imm:$amt1),
+let validSubTargets = HasV4SubT in
+ def DEALLOC_RET_cdnPnt_V4 : LD0Inst<(outs), (ins PredRegs:$src1),
"if ($src1.new) dealloc_return:nt",
[]>,
Requires<[HasV4T]>;
@@ -2636,10 +2640,10 @@ let isReturn = 1, isTerminator = 1,
// if (!Ps.new) dealloc_return:nt
let isReturn = 1, isTerminator = 1,
- Defs = [R29, R30, R31, PC], Uses = [R29, R31], neverHasSideEffects = 1,
- isPredicated = 1 in {
- def DEALLOC_RET_cNotdnPnt_V4 : NVInst_V4<(outs), (ins PredRegs:$src1,
- i32imm:$amt1),
+ Defs = [R29, R30, R31, PC], Uses = [R30], neverHasSideEffects = 1,
+ isPredicated = 1, isPredicatedFalse = 1 in {
+let validSubTargets = HasV4SubT in
+ def DEALLOC_RET_cNotdnPnt_V4 : LD0Inst<(outs), (ins PredRegs:$src1),
"if (!$src1.new) dealloc_return:nt",
[]>,
Requires<[HasV4T]>;
@@ -2647,21 +2651,21 @@ let isReturn = 1, isTerminator = 1,
// if (Ps.new) dealloc_return:t
let isReturn = 1, isTerminator = 1,
- Defs = [R29, R30, R31, PC], Uses = [R29, R31], neverHasSideEffects = 1,
+ Defs = [R29, R30, R31, PC], Uses = [R30], neverHasSideEffects = 1,
isPredicated = 1 in {
- def DEALLOC_RET_cdnPt_V4 : NVInst_V4<(outs), (ins PredRegs:$src1,
- i32imm:$amt1),
+let validSubTargets = HasV4SubT in
+ def DEALLOC_RET_cdnPt_V4 : LD0Inst<(outs), (ins PredRegs:$src1),
"if ($src1.new) dealloc_return:t",
[]>,
Requires<[HasV4T]>;
}
-// if (!Ps.new) dealloc_return:nt
+// if (!Ps.new) dealloc_return:nt
let isReturn = 1, isTerminator = 1,
- Defs = [R29, R30, R31, PC], Uses = [R29, R31], neverHasSideEffects = 1,
- isPredicated = 1 in {
- def DEALLOC_RET_cNotdnPt_V4 : NVInst_V4<(outs), (ins PredRegs:$src1,
- i32imm:$amt1),
+ Defs = [R29, R30, R31, PC], Uses = [R30], neverHasSideEffects = 1,
+ isPredicated = 1, isPredicatedFalse = 1 in {
+let validSubTargets = HasV4SubT in
+ def DEALLOC_RET_cNotdnPt_V4 : LD0Inst<(outs), (ins PredRegs:$src1),
"if (!$src1.new) dealloc_return:t",
[]>,
Requires<[HasV4T]>;
@@ -3063,37 +3067,42 @@ def : Pat<(HexagonCONST32_GP tblockaddress:$src1),
(TFRI_V4 tblockaddress:$src1)>,
Requires<[HasV4T]>;
-let AddedComplexity=50, neverHasSideEffects = 1, isPredicated = 1 in
+let isExtended = 1, opExtendable = 2, AddedComplexity=50,
+neverHasSideEffects = 1, isPredicated = 1, validSubTargets = HasV4SubT in
def TFRI_cPt_V4 : ALU32_ri<(outs IntRegs:$dst),
- (ins PredRegs:$src1, globaladdress:$src2),
- "if($src1) $dst = ##$src2",
+ (ins PredRegs:$src1, s16Ext:$src2),
+ "if($src1) $dst = #$src2",
[]>,
Requires<[HasV4T]>;
-let AddedComplexity=50, neverHasSideEffects = 1, isPredicated = 1 in
+let isExtended = 1, opExtendable = 2, AddedComplexity=50, isPredicatedFalse = 1,
+neverHasSideEffects = 1, isPredicated = 1, validSubTargets = HasV4SubT in
def TFRI_cNotPt_V4 : ALU32_ri<(outs IntRegs:$dst),
- (ins PredRegs:$src1, globaladdress:$src2),
- "if(!$src1) $dst = ##$src2",
+ (ins PredRegs:$src1, s16Ext:$src2),
+ "if(!$src1) $dst = #$src2",
[]>,
Requires<[HasV4T]>;
-let AddedComplexity=50, neverHasSideEffects = 1, isPredicated = 1 in
+let isExtended = 1, opExtendable = 2, AddedComplexity=50,
+neverHasSideEffects = 1, isPredicated = 1, validSubTargets = HasV4SubT in
def TFRI_cdnPt_V4 : ALU32_ri<(outs IntRegs:$dst),
- (ins PredRegs:$src1, globaladdress:$src2),
- "if($src1.new) $dst = ##$src2",
+ (ins PredRegs:$src1, s16Ext:$src2),
+ "if($src1.new) $dst = #$src2",
[]>,
Requires<[HasV4T]>;
-let AddedComplexity=50, neverHasSideEffects = 1, isPredicated = 1 in
+let isExtended = 1, opExtendable = 2, AddedComplexity=50, isPredicatedFalse = 1,
+neverHasSideEffects = 1, isPredicated = 1, validSubTargets = HasV4SubT in
def TFRI_cdnNotPt_V4 : ALU32_ri<(outs IntRegs:$dst),
- (ins PredRegs:$src1, globaladdress:$src2),
- "if(!$src1.new) $dst = ##$src2",
+ (ins PredRegs:$src1, s16Ext:$src2),
+ "if(!$src1.new) $dst = #$src2",
[]>,
Requires<[HasV4T]>;
let AddedComplexity = 50, Predicates = [HasV4T] in
def : Pat<(HexagonCONST32_GP tglobaladdr:$src1),
- (TFRI_V4 tglobaladdr:$src1)>;
+ (TFRI_V4 tglobaladdr:$src1)>,
+ Requires<[HasV4T]>;
// Load - Indirect with long offset: These instructions take global address
diff --git a/lib/Target/Hexagon/HexagonInstrInfoV5.td b/lib/Target/Hexagon/HexagonInstrInfoV5.td
index 92d098cc04..9da6074558 100644
--- a/lib/Target/Hexagon/HexagonInstrInfoV5.td
+++ b/lib/Target/Hexagon/HexagonInstrInfoV5.td
@@ -26,22 +26,29 @@ def CONST32_Float_Real : LDInst<(outs IntRegs:$dst), (ins f32imm:$src1),
// Only works with single precision fp value.
// For double precision, use CONST64_float_real, as 64bit transfer
// can only hold 40-bit values - 32 from const ext + 8 bit immediate.
-let isMoveImm = 1, isReMaterializable = 1, isPredicable = 1 in
-def TFRI_f : ALU32_ri<(outs IntRegs:$dst), (ins f32imm:$src1),
- "$dst = ##$src1",
+// Make sure that complexity is more than the CONST32 pattern in
+// HexagonInstrInfo.td patterns.
+let isExtended = 1, opExtendable = 1, isMoveImm = 1, isReMaterializable = 1,
+isPredicable = 1, AddedComplexity = 30, validSubTargets = HasV5SubT,
+isCodeGenOnly = 1 in
+def TFRI_f : ALU32_ri<(outs IntRegs:$dst), (ins f32Ext:$src1),
+ "$dst = #$src1",
[(set IntRegs:$dst, fpimm:$src1)]>,
Requires<[HasV5T]>;
+let isExtended = 1, opExtendable = 2, isPredicated = 1,
+neverHasSideEffects = 1, validSubTargets = HasV5SubT in
def TFRI_cPt_f : ALU32_ri<(outs IntRegs:$dst),
- (ins PredRegs:$src1, f32imm:$src2),
- "if ($src1) $dst = ##$src2",
+ (ins PredRegs:$src1, f32Ext:$src2),
+ "if ($src1) $dst = #$src2",
[]>,
Requires<[HasV5T]>;
-let isPredicated = 1 in
+let isExtended = 1, opExtendable = 2, isPredicated = 1, isPredicatedFalse = 1,
+neverHasSideEffects = 1, validSubTargets = HasV5SubT in
def TFRI_cNotPt_f : ALU32_ri<(outs IntRegs:$dst),
- (ins PredRegs:$src1, f32imm:$src2),
- "if (!$src1) $dst = ##$src2",
+ (ins PredRegs:$src1, f32Ext:$src2),
+ "if (!$src1) $dst =#$src2",
[]>,
Requires<[HasV5T]>;
diff --git a/lib/Target/Hexagon/HexagonRegisterInfo.cpp b/lib/Target/Hexagon/HexagonRegisterInfo.cpp
index d8b4e2fcb3..d59d7d3382 100644
--- a/lib/Target/Hexagon/HexagonRegisterInfo.cpp
+++ b/lib/Target/Hexagon/HexagonRegisterInfo.cpp
@@ -295,16 +295,6 @@ unsigned HexagonRegisterInfo::getStackRegister() const {
return Hexagon::R29;
}
-void HexagonRegisterInfo::getInitialFrameState(std::vector<MachineMove>
- &Moves) const
-{
- // VirtualFP = (R30 + #0).
- unsigned FPReg = getFrameRegister();
- MachineLocation Dst(MachineLocation::VirtualFP);
- MachineLocation Src(FPReg, 0);
- Moves.push_back(MachineMove(0, Dst, Src));
-}
-
unsigned HexagonRegisterInfo::getEHExceptionRegister() const {
llvm_unreachable("What is the exception register");
}
diff --git a/lib/Target/Hexagon/HexagonRegisterInfo.h b/lib/Target/Hexagon/HexagonRegisterInfo.h
index 8a3f94a3fd..c74155ac3b 100644
--- a/lib/Target/Hexagon/HexagonRegisterInfo.h
+++ b/lib/Target/Hexagon/HexagonRegisterInfo.h
@@ -78,7 +78,6 @@ struct HexagonRegisterInfo : public HexagonGenRegisterInfo {
unsigned getRARegister() const;
unsigned getFrameRegister(const MachineFunction &MF) const;
unsigned getFrameRegister() const;
- void getInitialFrameState(std::vector<MachineMove> &Moves) const;
unsigned getStackRegister() const;
// Exception handling queries.
diff --git a/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp b/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp
index fbe67e3882..c508d124b3 100644
--- a/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp
+++ b/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp
@@ -164,7 +164,6 @@ namespace {
unsigned, std::map <MachineInstr*, SUnit*>);
bool isNewifiable(MachineInstr* MI);
bool isCondInst(MachineInstr* MI);
- bool IsNewifyStore (MachineInstr* MI);
bool tryAllocateResourcesForConstExt(MachineInstr* MI);
bool canReserveResourcesForConstExt(MachineInstr *MI);
void reserveResourcesForConstExt(MachineInstr* MI);
@@ -383,104 +382,6 @@ static bool IsControlFlow(MachineInstr* MI) {
return (MI->getDesc().isTerminator() || MI->getDesc().isCall());
}
-// Function returns true if an instruction can be promoted to the new-value
-// store. It will always return false for v2 and v3.
-// It lists all the conditional and unconditional stores that can be promoted
-// to the new-value stores.
-
-bool HexagonPacketizerList::IsNewifyStore (MachineInstr* MI) {
- const HexagonRegisterInfo* QRI =
- (const HexagonRegisterInfo *) TM.getRegisterInfo();
- switch (MI->getOpcode())
- {
- // store byte
- case Hexagon::STrib:
- case Hexagon::STrib_indexed:
- case Hexagon::STrib_indexed_shl_V4:
- case Hexagon::STrib_shl_V4:
- case Hexagon::STb_GP_V4:
- case Hexagon::POST_STbri:
- case Hexagon::STrib_cPt:
- case Hexagon::STrib_cdnPt_V4:
- case Hexagon::STrib_cNotPt:
- case Hexagon::STrib_cdnNotPt_V4:
- case Hexagon::STrib_indexed_cPt:
- case Hexagon::STrib_indexed_cdnPt_V4:
- case Hexagon::STrib_indexed_cNotPt:
- case Hexagon::STrib_indexed_cdnNotPt_V4:
- case Hexagon::STrib_indexed_shl_cPt_V4:
- case Hexagon::STrib_indexed_shl_cdnPt_V4:
- case Hexagon::STrib_indexed_shl_cNotPt_V4:
- case Hexagon::STrib_indexed_shl_cdnNotPt_V4:
- case Hexagon::POST_STbri_cPt:
- case Hexagon::POST_STbri_cdnPt_V4:
- case Hexagon::POST_STbri_cNotPt:
- case Hexagon::POST_STbri_cdnNotPt_V4:
- case Hexagon::STb_GP_cPt_V4:
- case Hexagon::STb_GP_cNotPt_V4:
- case Hexagon::STb_GP_cdnPt_V4:
- case Hexagon::STb_GP_cdnNotPt_V4:
-
- // store halfword
- case Hexagon::STrih:
- case Hexagon::STrih_indexed:
- case Hexagon::STrih_indexed_shl_V4:
- case Hexagon::STrih_shl_V4:
- case Hexagon::STh_GP_V4:
- case Hexagon::POST_SThri:
- case Hexagon::STrih_cPt:
- case Hexagon::STrih_cdnPt_V4:
- case Hexagon::STrih_cNotPt:
- case Hexagon::STrih_cdnNotPt_V4:
- case Hexagon::STrih_indexed_cPt:
- case Hexagon::STrih_indexed_cdnPt_V4:
- case Hexagon::STrih_indexed_cNotPt:
- case Hexagon::STrih_indexed_cdnNotPt_V4:
- case Hexagon::STrih_indexed_shl_cPt_V4:
- case Hexagon::STrih_indexed_shl_cdnPt_V4:
- case Hexagon::STrih_indexed_shl_cNotPt_V4:
- case Hexagon::STrih_indexed_shl_cdnNotPt_V4:
- case Hexagon::POST_SThri_cPt:
- case Hexagon::POST_SThri_cdnPt_V4:
- case Hexagon::POST_SThri_cNotPt:
- case Hexagon::POST_SThri_cdnNotPt_V4:
- case Hexagon::STh_GP_cPt_V4:
- case Hexagon::STh_GP_cNotPt_V4:
- case Hexagon::STh_GP_cdnPt_V4:
- case Hexagon::STh_GP_cdnNotPt_V4:
-
- // store word
- case Hexagon::STriw:
- case Hexagon::STriw_indexed:
- case Hexagon::STriw_indexed_shl_V4:
- case Hexagon::STriw_shl_V4:
- case Hexagon::STw_GP_V4:
- case Hexagon::POST_STwri:
- case Hexagon::STriw_cPt:
- case Hexagon::STriw_cdnPt_V4:
- case Hexagon::STriw_cNotPt:
- case Hexagon::STriw_cdnNotPt_V4:
- case Hexagon::STriw_indexed_cPt:
- case Hexagon::STriw_indexed_cdnPt_V4:
- case Hexagon::STriw_indexed_cNotPt:
- case Hexagon::STriw_indexed_cdnNotPt_V4:
- case Hexagon::STriw_indexed_shl_cPt_V4:
- case Hexagon::STriw_indexed_shl_cdnPt_V4:
- case Hexagon::STriw_indexed_shl_cNotPt_V4:
- case Hexagon::STriw_indexed_shl_cdnNotPt_V4:
- case Hexagon::POST_STwri_cPt:
- case Hexagon::POST_STwri_cdnPt_V4:
- case Hexagon::POST_STwri_cNotPt:
- case Hexagon::POST_STwri_cdnNotPt_V4:
- case Hexagon::STw_GP_cPt_V4:
- case Hexagon::STw_GP_cNotPt_V4:
- case Hexagon::STw_GP_cdnPt_V4:
- case Hexagon::STw_GP_cdnNotPt_V4:
- return QRI->Subtarget.hasV4TOps();
- }
- return false;
-}
-
static bool IsLoopN(MachineInstr *MI) {
return (MI->getOpcode() == Hexagon::LOOP0_i ||
MI->getOpcode() == Hexagon::LOOP0_r);
@@ -498,254 +399,11 @@ static bool DoesModifyCalleeSavedReg(MachineInstr *MI,
return false;
}
-// Return the new value instruction for a given store.
-static int GetDotNewOp(const int opc) {
- switch (opc) {
- default: llvm_unreachable("Unknown .new type");
- // store new value byte
- case Hexagon::STrib:
- return Hexagon::STrib_nv_V4;
-
- case Hexagon::STrib_indexed:
- return Hexagon::STrib_indexed_nv_V4;
-
- case Hexagon::STrib_indexed_shl_V4:
- return Hexagon::STrib_indexed_shl_nv_V4;
-
- case Hexagon::STrib_shl_V4:
- return Hexagon::STrib_shl_nv_V4;
-
- case Hexagon::STb_GP_V4:
- return Hexagon::STb_GP_nv_V4;
-
- case Hexagon::POST_STbri:
- return Hexagon::POST_STbri_nv_V4;
-
- case Hexagon::STrib_cPt:
- return Hexagon::STrib_cPt_nv_V4;
-
- case Hexagon::STrib_cdnPt_V4:
- return Hexagon::STrib_cdnPt_nv_V4;
-
- case Hexagon::STrib_cNotPt:
- return Hexagon::STrib_cNotPt_nv_V4;
-
- case Hexagon::STrib_cdnNotPt_V4:
- return Hexagon::STrib_cdnNotPt_nv_V4;
-
- case Hexagon::STrib_indexed_cPt:
- return Hexagon::STrib_indexed_cPt_nv_V4;
-
- case Hexagon::STrib_indexed_cdnPt_V4:
- return Hexagon::STrib_indexed_cdnPt_nv_V4;
-
- case Hexagon::STrib_indexed_cNotPt:
- return Hexagon::STrib_indexed_cNotPt_nv_V4;
-
- case Hexagon::STrib_indexed_cdnNotPt_V4:
- return Hexagon::STrib_indexed_cdnNotPt_nv_V4;
-
- case Hexagon::STrib_indexed_shl_cPt_V4:
- return Hexagon::STrib_indexed_shl_cPt_nv_V4;
-
- case Hexagon::STrib_indexed_shl_cdnPt_V4:
- return Hexagon::STrib_indexed_shl_cdnPt_nv_V4;
-
- case Hexagon::STrib_indexed_shl_cNotPt_V4:
- return Hexagon::STrib_indexed_shl_cNotPt_nv_V4;
-
- case Hexagon::STrib_indexed_shl_cdnNotPt_V4:
- return Hexagon::STrib_indexed_shl_cdnNotPt_nv_V4;
-
- case Hexagon::POST_STbri_cPt:
- return Hexagon::POST_STbri_cPt_nv_V4;
-
- case Hexagon::POST_STbri_cdnPt_V4:
- return Hexagon::POST_STbri_cdnPt_nv_V4;
-
- case Hexagon::POST_STbri_cNotPt:
- return Hexagon::POST_STbri_cNotPt_nv_V4;
-
- case Hexagon::POST_STbri_cdnNotPt_V4:
- return Hexagon::POST_STbri_cdnNotPt_nv_V4;
-
- case Hexagon::STb_GP_cPt_V4:
- return Hexagon::STb_GP_cPt_nv_V4;
-
- case Hexagon::STb_GP_cNotPt_V4:
- return Hexagon::STb_GP_cNotPt_nv_V4;
-
- case Hexagon::STb_GP_cdnPt_V4:
- return Hexagon::STb_GP_cdnPt_nv_V4;
-
- case Hexagon::STb_GP_cdnNotPt_V4:
- return Hexagon::STb_GP_cdnNotPt_nv_V4;
-
- // store new value halfword
- case Hexagon::STrih:
- return Hexagon::STrih_nv_V4;
-
- case Hexagon::STrih_indexed:
- return Hexagon::STrih_indexed_nv_V4;
-
- case Hexagon::STrih_indexed_shl_V4:
- return Hexagon::STrih_indexed_shl_nv_V4;
-
- case Hexagon::STrih_shl_V4:
- return Hexagon::STrih_shl_nv_V4;
-
- case Hexagon::STh_GP_V4:
- return Hexagon::STh_GP_nv_V4;
-
- case Hexagon::POST_SThri:
- return Hexagon::POST_SThri_nv_V4;
-
- case Hexagon::STrih_cPt:
- return Hexagon::STrih_cPt_nv_V4;
-
- case Hexagon::STrih_cdnPt_V4:
- return Hexagon::STrih_cdnPt_nv_V4;
-
- case Hexagon::STrih_cNotPt:
- return Hexagon::STrih_cNotPt_nv_V4;
-
- case Hexagon::STrih_cdnNotPt_V4:
- return Hexagon::STrih_cdnNotPt_nv_V4;
-
- case Hexagon::STrih_indexed_cPt:
- return Hexagon::STrih_indexed_cPt_nv_V4;
-
- case Hexagon::STrih_indexed_cdnPt_V4:
- return Hexagon::STrih_indexed_cdnPt_nv_V4;
-
- case Hexagon::STrih_indexed_cNotPt:
- return Hexagon::STrih_indexed_cNotPt_nv_V4;
-
- case Hexagon::STrih_indexed_cdnNotPt_V4:
- return Hexagon::STrih_indexed_cdnNotPt_nv_V4;
-
- case Hexagon::STrih_indexed_shl_cPt_V4:
- return Hexagon::STrih_indexed_shl_cPt_nv_V4;
-
- case Hexagon::STrih_indexed_shl_cdnPt_V4:
- return Hexagon::STrih_indexed_shl_cdnPt_nv_V4;
-
- case Hexagon::STrih_indexed_shl_cNotPt_V4:
- return Hexagon::STrih_indexed_shl_cNotPt_nv_V4;
-
- case Hexagon::STrih_indexed_shl_cdnNotPt_V4:
- return Hexagon::STrih_indexed_shl_cdnNotPt_nv_V4;
-
- case Hexagon::POST_SThri_cPt:
- return Hexagon::POST_SThri_cPt_nv_V4;
-
- case Hexagon::POST_SThri_cdnPt_V4:
- return Hexagon::POST_SThri_cdnPt_nv_V4;
-
- case Hexagon::POST_SThri_cNotPt:
- return Hexagon::POST_SThri_cNotPt_nv_V4;
-
- case Hexagon::POST_SThri_cdnNotPt_V4:
- return Hexagon::POST_SThri_cdnNotPt_nv_V4;
-
- case Hexagon::STh_GP_cPt_V4:
- return Hexagon::STh_GP_cPt_nv_V4;
-
- case Hexagon::STh_GP_cNotPt_V4:
- return Hexagon::STh_GP_cNotPt_nv_V4;
-
- case Hexagon::STh_GP_cdnPt_V4:
- return Hexagon::STh_GP_cdnPt_nv_V4;
-
- case Hexagon::STh_GP_cdnNotPt_V4:
- return Hexagon::STh_GP_cdnNotPt_nv_V4;
-
- // store new value word
- case Hexagon::STriw:
- return Hexagon::STriw_nv_V4;
-
- case Hexagon::STriw_indexed:
- return Hexagon::STriw_indexed_nv_V4;
-
- case Hexagon::STriw_indexed_shl_V4:
- return Hexagon::STriw_indexed_shl_nv_V4;
-
- case Hexagon::STriw_shl_V4:
- return Hexagon::STriw_shl_nv_V4;
-
- case Hexagon::STw_GP_V4:
- return Hexagon::STw_GP_nv_V4;
-
- case Hexagon::POST_STwri:
- return Hexagon::POST_STwri_nv_V4;
-
- case Hexagon::STriw_cPt:
- return Hexagon::STriw_cPt_nv_V4;
-
- case Hexagon::STriw_cdnPt_V4:
- return Hexagon::STriw_cdnPt_nv_V4;
-
- case Hexagon::STriw_cNotPt:
- return Hexagon::STriw_cNotPt_nv_V4;
-
- case Hexagon::STriw_cdnNotPt_V4:
- return Hexagon::STriw_cdnNotPt_nv_V4;
-
- case Hexagon::STriw_indexed_cPt:
- return Hexagon::STriw_indexed_cPt_nv_V4;
-
- case Hexagon::STriw_indexed_cdnPt_V4:
- return Hexagon::STriw_indexed_cdnPt_nv_V4;
-
- case Hexagon::STriw_indexed_cNotPt:
- return Hexagon::STriw_indexed_cNotPt_nv_V4;
-
- case Hexagon::STriw_indexed_cdnNotPt_V4:
- return Hexagon::STriw_indexed_cdnNotPt_nv_V4;
-
- case Hexagon::STriw_indexed_shl_cPt_V4:
- return Hexagon::STriw_indexed_shl_cPt_nv_V4;
-
- case Hexagon::STriw_indexed_shl_cdnPt_V4:
- return Hexagon::STriw_indexed_shl_cdnPt_nv_V4;
-
- case Hexagon::STriw_indexed_shl_cNotPt_V4:
- return Hexagon::STriw_indexed_shl_cNotPt_nv_V4;
-
- case Hexagon::STriw_indexed_shl_cdnNotPt_V4:
- return Hexagon::STriw_indexed_shl_cdnNotPt_nv_V4;
-
- case Hexagon::POST_STwri_cPt:
- return Hexagon::POST_STwri_cPt_nv_V4;
-
- case Hexagon::POST_STwri_cdnPt_V4:
- return Hexagon::POST_STwri_cdnPt_nv_V4;
-
- case Hexagon::POST_STwri_cNotPt:
- return Hexagon::POST_STwri_cNotPt_nv_V4;
-
- case Hexagon::POST_STwri_cdnNotPt_V4:
- return Hexagon::POST_STwri_cdnNotPt_nv_V4;
-
- case Hexagon::STw_GP_cPt_V4:
- return Hexagon::STw_GP_cPt_nv_V4;
-
- case Hexagon::STw_GP_cNotPt_V4:
- return Hexagon::STw_GP_cNotPt_nv_V4;
-
- case Hexagon::STw_GP_cdnPt_V4:
- return Hexagon::STw_GP_cdnPt_nv_V4;
-
- case Hexagon::STw_GP_cdnNotPt_V4:
- return Hexagon::STw_GP_cdnNotPt_nv_V4;
-
- }
-}
-
// Returns true if an instruction can be promoted to .new predicate
// or new-value store.
bool HexagonPacketizerList::isNewifiable(MachineInstr* MI) {
- if ( isCondInst(MI) || IsNewifyStore(MI))
+ const HexagonInstrInfo *QII = (const HexagonInstrInfo *) TII;
+ if ( isCondInst(MI) || QII->mayBeNewStore(MI))
return true;
else
return false;
@@ -781,894 +439,36 @@ bool HexagonPacketizerList::PromoteToDotNew(MachineInstr* MI,
if (RC == &Hexagon::PredRegsRegClass)
NewOpcode = QII->GetDotNewPredOp(MI, MBPI);
else
- NewOpcode = GetDotNewOp(MI->getOpcode());
+ NewOpcode = QII->GetDotNewOp(MI);
MI->setDesc(QII->get(NewOpcode));
return true;
}
-// Returns the most basic instruction for the .new predicated instructions and
-// new-value stores.
-// For example, all of the following instructions will be converted back to the
-// same instruction:
-// 1) if (p0.new) memw(R0+#0) = R1.new --->
-// 2) if (p0) memw(R0+#0)= R1.new -------> if (p0) memw(R0+#0) = R1
-// 3) if (p0.new) memw(R0+#0) = R1 --->
-//
-// To understand the translation of instruction 1 to its original form, consider
-// a packet with 3 instructions.
-// { p0 = cmp.eq(R0,R1)
-// if (p0.new) R2 = add(R3, R4)
-// R5 = add (R3, R1)
-// }
-// if (p0) memw(R5+#0) = R2 <--- trying to include it in the previous packet
-//
-// This instruction can be part of the previous packet only if both p0 and R2
-// are promoted to .new values. This promotion happens in steps, first
-// predicate register is promoted to .new and in the next iteration R2 is
-// promoted. Therefore, in case of dependence check failure (due to R5) during
-// next iteration, it should be converted back to its most basic form.
-
-static int GetDotOldOp(const int opc) {
- switch (opc) {
- default: llvm_unreachable("Unknown .old type");
- case Hexagon::TFR_cdnPt:
- return Hexagon::TFR_cPt;
-
- case Hexagon::TFR_cdnNotPt:
- return Hexagon::TFR_cNotPt;
-
- case Hexagon::TFRI_cdnPt:
- return Hexagon::TFRI_cPt;
-
- case Hexagon::TFRI_cdnNotPt:
- return Hexagon::TFRI_cNotPt;
-
- case Hexagon::JMP_tnew_t:
- return Hexagon::JMP_t;
-
- case Hexagon::JMP_fnew_t:
- return Hexagon::JMP_f;
-
- case Hexagon::JMPR_tnew_tV3:
- return Hexagon::JMPR_t;
-
- case Hexagon::JMPR_fnew_tV3:
- return Hexagon::JMPR_f;
-
- // Load double word
-
- case Hexagon::LDrid_cdnPt :
- return Hexagon::LDrid_cPt;
-
- case Hexagon::LDrid_cdnNotPt :
- return Hexagon::LDrid_cNotPt;
-
- case Hexagon::LDrid_indexed_cdnPt :
- return Hexagon::LDrid_indexed_cPt;
-
- case Hexagon::LDrid_indexed_cdnNotPt :
- return Hexagon::LDrid_indexed_cNotPt;
-
- case Hexagon::POST_LDrid_cdnPt_V4 :
- return Hexagon::POST_LDrid_cPt;
-
- case Hexagon::POST_LDrid_cdnNotPt_V4 :
- return Hexagon::POST_LDrid_cNotPt;
-
- // Load word
-
- case Hexagon::LDriw_cdnPt :
- return Hexagon::LDriw_cPt;
-
- case Hexagon::LDriw_cdnNotPt :
- return Hexagon::LDriw_cNotPt;
-
- case Hexagon::LDriw_indexed_cdnPt :
- return Hexagon::LDriw_indexed_cPt;
-
- case Hexagon::LDriw_indexed_cdnNotPt :
- return Hexagon::LDriw_indexed_cNotPt;
-
- case Hexagon::POST_LDriw_cdnPt_V4 :
- return Hexagon::POST_LDriw_cPt;
-
- case Hexagon::POST_LDriw_cdnNotPt_V4 :
- return Hexagon::POST_LDriw_cNotPt;
-
- // Load half
-
- case Hexagon::LDrih_cdnPt :
- return Hexagon::LDrih_cPt;
-
- case Hexagon::LDrih_cdnNotPt :
- return Hexagon::LDrih_cNotPt;
-
- case Hexagon::LDrih_indexed_cdnPt :
- return Hexagon::LDrih_indexed_cPt;
-
- case Hexagon::LDrih_indexed_cdnNotPt :
- return Hexagon::LDrih_indexed_cNotPt;
-
- case Hexagon::POST_LDrih_cdnPt_V4 :
- return Hexagon::POST_LDrih_cPt;
-
- case Hexagon::POST_LDrih_cdnNotPt_V4 :
- return Hexagon::POST_LDrih_cNotPt;
-
- // Load byte
-
- case Hexagon::LDrib_cdnPt :
- return Hexagon::LDrib_cPt;
-
- case Hexagon::LDrib_cdnNotPt :
- return Hexagon::LDrib_cNotPt;
-
- case Hexagon::LDrib_indexed_cdnPt :
- return Hexagon::LDrib_indexed_cPt;
-
- case Hexagon::LDrib_indexed_cdnNotPt :
- return Hexagon::LDrib_indexed_cNotPt;
-
- case Hexagon::POST_LDrib_cdnPt_V4 :
- return Hexagon::POST_LDrib_cPt;
-
- case Hexagon::POST_LDrib_cdnNotPt_V4 :
- return Hexagon::POST_LDrib_cNotPt;
-
- // Load unsigned half
-
- case Hexagon::LDriuh_cdnPt :
- return Hexagon::LDriuh_cPt;
-
- case Hexagon::LDriuh_cdnNotPt :
- return Hexagon::LDriuh_cNotPt;
-
- case Hexagon::LDriuh_indexed_cdnPt :
- return Hexagon::LDriuh_indexed_cPt;
-
- case Hexagon::LDriuh_indexed_cdnNotPt :
- return Hexagon::LDriuh_indexed_cNotPt;
-
- case Hexagon::POST_LDriuh_cdnPt_V4 :
- return Hexagon::POST_LDriuh_cPt;
-
- case Hexagon::POST_LDriuh_cdnNotPt_V4 :
- return Hexagon::POST_LDriuh_cNotPt;
-
- // Load unsigned byte
- case Hexagon::LDriub_cdnPt :
- return Hexagon::LDriub_cPt;
-
- case Hexagon::LDriub_cdnNotPt :
- return Hexagon::LDriub_cNotPt;
-
- case Hexagon::LDriub_indexed_cdnPt :
- return Hexagon::LDriub_indexed_cPt;
-
- case Hexagon::LDriub_indexed_cdnNotPt :
- return Hexagon::LDriub_indexed_cNotPt;
-
- case Hexagon::POST_LDriub_cdnPt_V4 :
- return Hexagon::POST_LDriub_cPt;
-
- case Hexagon::POST_LDriub_cdnNotPt_V4 :
- return Hexagon::POST_LDriub_cNotPt;
-
- // V4 indexed+scaled Load
-
- case Hexagon::LDrid_indexed_shl_cdnPt_V4 :
- return Hexagon::LDrid_indexed_shl_cPt_V4;
-
- case Hexagon::LDrid_indexed_shl_cdnNotPt_V4 :
- return Hexagon::LDrid_indexed_shl_cNotPt_V4;
-
- case Hexagon::LDrib_indexed_shl_cdnPt_V4 :
- return Hexagon::LDrib_indexed_shl_cPt_V4;
-
- case Hexagon::LDrib_indexed_shl_cdnNotPt_V4 :
- return Hexagon::LDrib_indexed_shl_cNotPt_V4;
-
- case Hexagon::LDriub_indexed_shl_cdnPt_V4 :
- return Hexagon::LDriub_indexed_shl_cPt_V4;
-
- case Hexagon::LDriub_indexed_shl_cdnNotPt_V4 :
- return Hexagon::LDriub_indexed_shl_cNotPt_V4;
-
- case Hexagon::LDrih_indexed_shl_cdnPt_V4 :
- return Hexagon::LDrih_indexed_shl_cPt_V4;
-
- case Hexagon::LDrih_indexed_shl_cdnNotPt_V4 :
- return Hexagon::LDrih_indexed_shl_cNotPt_V4;
-
- case Hexagon::LDriuh_indexed_shl_cdnPt_V4 :
- return Hexagon::LDriuh_indexed_shl_cPt_V4;
-
- case Hexagon::LDriuh_indexed_shl_cdnNotPt_V4 :
- return Hexagon::LDriuh_indexed_shl_cNotPt_V4;
-
- case Hexagon::LDriw_indexed_shl_cdnPt_V4 :
- return Hexagon::LDriw_indexed_shl_cPt_V4;
-
- case Hexagon::LDriw_indexed_shl_cdnNotPt_V4 :
- return Hexagon::LDriw_indexed_shl_cNotPt_V4;
-
- // V4 global address load
-
- case Hexagon::LDd_GP_cdnPt_V4:
- return Hexagon::LDd_GP_cPt_V4;
-
- case Hexagon::LDd_GP_cdnNotPt_V4:
- return Hexagon::LDd_GP_cNotPt_V4;
-
- case Hexagon::LDb_GP_cdnPt_V4:
- return Hexagon::LDb_GP_cPt_V4;
-
- case Hexagon::LDb_GP_cdnNotPt_V4:
- return Hexagon::LDb_GP_cNotPt_V4;
-
- case Hexagon::LDub_GP_cdnPt_V4:
- return Hexagon::LDub_GP_cPt_V4;
-
- case Hexagon::LDub_GP_cdnNotPt_V4:
- return Hexagon::LDub_GP_cNotPt_V4;
-
- case Hexagon::LDh_GP_cdnPt_V4:
- return Hexagon::LDh_GP_cPt_V4;
-
- case Hexagon::LDh_GP_cdnNotPt_V4:
- return Hexagon::LDh_GP_cNotPt_V4;
-
- case Hexagon::LDuh_GP_cdnPt_V4:
- return Hexagon::LDuh_GP_cPt_V4;
-
- case Hexagon::LDuh_GP_cdnNotPt_V4:
- return Hexagon::LDuh_GP_cNotPt_V4;
-
- case Hexagon::LDw_GP_cdnPt_V4:
- return Hexagon::LDw_GP_cPt_V4;
-
- case Hexagon::LDw_GP_cdnNotPt_V4:
- return Hexagon::LDw_GP_cNotPt_V4;
-
- // Conditional add
-
- case Hexagon::ADD_ri_cdnPt :
- return Hexagon::ADD_ri_cPt;
- case Hexagon::ADD_ri_cdnNotPt :
- return Hexagon::ADD_ri_cNotPt;
-
- case Hexagon::ADD_rr_cdnPt :
- return Hexagon::ADD_rr_cPt;
- case Hexagon::ADD_rr_cdnNotPt:
- return Hexagon::ADD_rr_cNotPt;
-
- // Conditional logical Operations
-
- case Hexagon::XOR_rr_cdnPt :
- return Hexagon::XOR_rr_cPt;
- case Hexagon::XOR_rr_cdnNotPt :
- return Hexagon::XOR_rr_cNotPt;
-
- case Hexagon::AND_rr_cdnPt :
- return Hexagon::AND_rr_cPt;
- case Hexagon::AND_rr_cdnNotPt :
- return Hexagon::AND_rr_cNotPt;
-
- case Hexagon::OR_rr_cdnPt :
- return Hexagon::OR_rr_cPt;
- case Hexagon::OR_rr_cdnNotPt :
- return Hexagon::OR_rr_cNotPt;
-
- // Conditional Subtract
-
- case Hexagon::SUB_rr_cdnPt :
- return Hexagon::SUB_rr_cPt;
- case Hexagon::SUB_rr_cdnNotPt :
- return Hexagon::SUB_rr_cNotPt;
-
- // Conditional combine
-
- case Hexagon::COMBINE_rr_cdnPt :
- return Hexagon::COMBINE_rr_cPt;
- case Hexagon::COMBINE_rr_cdnNotPt :
- return Hexagon::COMBINE_rr_cNotPt;
-
-// Conditional shift operations
-
- case Hexagon::ASLH_cdnPt_V4 :
- return Hexagon::ASLH_cPt_V4;
- case Hexagon::ASLH_cdnNotPt_V4 :
- return Hexagon::ASLH_cNotPt_V4;
-
- case Hexagon::ASRH_cdnPt_V4 :
- return Hexagon::ASRH_cPt_V4;
- case Hexagon::ASRH_cdnNotPt_V4 :
- return Hexagon::ASRH_cNotPt_V4;
-
- case Hexagon::SXTB_cdnPt_V4 :
- return Hexagon::SXTB_cPt_V4;
- case Hexagon::SXTB_cdnNotPt_V4 :
- return Hexagon::SXTB_cNotPt_V4;
-
- case Hexagon::SXTH_cdnPt_V4 :
- return Hexagon::SXTH_cPt_V4;
- case Hexagon::SXTH_cdnNotPt_V4 :
- return Hexagon::SXTH_cNotPt_V4;
-
- case Hexagon::ZXTB_cdnPt_V4 :
- return Hexagon::ZXTB_cPt_V4;
- case Hexagon::ZXTB_cdnNotPt_V4 :
- return Hexagon::ZXTB_cNotPt_V4;
-
- case Hexagon::ZXTH_cdnPt_V4 :
- return Hexagon::ZXTH_cPt_V4;
- case Hexagon::ZXTH_cdnNotPt_V4 :
- return Hexagon::ZXTH_cNotPt_V4;
-
- // Store byte
-
- case Hexagon::STrib_imm_cdnPt_V4 :
- return Hexagon::STrib_imm_cPt_V4;
-
- case Hexagon::STrib_imm_cdnNotPt_V4 :
- return Hexagon::STrib_imm_cNotPt_V4;
-
- case Hexagon::STrib_cdnPt_nv_V4 :
- case Hexagon::STrib_cPt_nv_V4 :
- case Hexagon::STrib_cdnPt_V4 :
- return Hexagon::STrib_cPt;
-
- case Hexagon::STrib_cdnNotPt_nv_V4 :
- case Hexagon::STrib_cNotPt_nv_V4 :
- case Hexagon::STrib_cdnNotPt_V4 :
- return Hexagon::STrib_cNotPt;
-
- case Hexagon::STrib_indexed_cdnPt_V4 :
- case Hexagon::STrib_indexed_cPt_nv_V4 :
- case Hexagon::STrib_indexed_cdnPt_nv_V4 :
- return Hexagon::STrib_indexed_cPt;
-
- case Hexagon::STrib_indexed_cdnNotPt_V4 :
- case Hexagon::STrib_indexed_cNotPt_nv_V4 :
- case Hexagon::STrib_indexed_cdnNotPt_nv_V4 :
- return Hexagon::STrib_indexed_cNotPt;
-
- case Hexagon::STrib_indexed_shl_cdnPt_nv_V4:
- case Hexagon::STrib_indexed_shl_cPt_nv_V4 :
- case Hexagon::STrib_indexed_shl_cdnPt_V4 :
- return Hexagon::STrib_indexed_shl_cPt_V4;
-
- case Hexagon::STrib_indexed_shl_cdnNotPt_nv_V4:
- case Hexagon::STrib_indexed_shl_cNotPt_nv_V4 :
- case Hexagon::STrib_indexed_shl_cdnNotPt_V4 :
- return Hexagon::STrib_indexed_shl_cNotPt_V4;
-
- case Hexagon::POST_STbri_cdnPt_nv_V4 :
- case Hexagon::POST_STbri_cPt_nv_V4 :
- case Hexagon::POST_STbri_cdnPt_V4 :
- return Hexagon::POST_STbri_cPt;
-
- case Hexagon::POST_STbri_cdnNotPt_nv_V4 :
- case Hexagon::POST_STbri_cNotPt_nv_V4:
- case Hexagon::POST_STbri_cdnNotPt_V4 :
- return Hexagon::POST_STbri_cNotPt;
-
- case Hexagon::STb_GP_cdnPt_nv_V4:
- case Hexagon::STb_GP_cdnPt_V4:
- case Hexagon::STb_GP_cPt_nv_V4:
- return Hexagon::STb_GP_cPt_V4;
-
- case Hexagon::STb_GP_cdnNotPt_nv_V4:
- case Hexagon::STb_GP_cdnNotPt_V4:
- case Hexagon::STb_GP_cNotPt_nv_V4:
- return Hexagon::STb_GP_cNotPt_V4;
-
- // Store new-value byte - unconditional
- case Hexagon::STrib_nv_V4:
- return Hexagon::STrib;
-
- case Hexagon::STrib_indexed_nv_V4:
- return Hexagon::STrib_indexed;
-
- case Hexagon::STrib_indexed_shl_nv_V4:
- return Hexagon::STrib_indexed_shl_V4;
-
- case Hexagon::STrib_shl_nv_V4:
- return Hexagon::STrib_shl_V4;
-
- case Hexagon::STb_GP_nv_V4:
- return Hexagon::STb_GP_V4;
-
- case Hexagon::POST_STbri_nv_V4:
- return Hexagon::POST_STbri;
-
- // Store halfword
- case Hexagon::STrih_imm_cdnPt_V4 :
- return Hexagon::STrih_imm_cPt_V4;
-
- case Hexagon::STrih_imm_cdnNotPt_V4 :
- return Hexagon::STrih_imm_cNotPt_V4;
-
- case Hexagon::STrih_cdnPt_nv_V4 :
- case Hexagon::STrih_cPt_nv_V4 :
- case Hexagon::STrih_cdnPt_V4 :
- return Hexagon::STrih_cPt;
-
- case Hexagon::STrih_cdnNotPt_nv_V4 :
- case Hexagon::STrih_cNotPt_nv_V4 :
- case Hexagon::STrih_cdnNotPt_V4 :
- return Hexagon::STrih_cNotPt;
-
- case Hexagon::STrih_indexed_cdnPt_nv_V4:
- case Hexagon::STrih_indexed_cPt_nv_V4 :
- case Hexagon::STrih_indexed_cdnPt_V4 :
- return Hexagon::STrih_indexed_cPt;
-
- case Hexagon::STrih_indexed_cdnNotPt_nv_V4:
- case Hexagon::STrih_indexed_cNotPt_nv_V4 :
- case Hexagon::STrih_indexed_cdnNotPt_V4 :
- return Hexagon::STrih_indexed_cNotPt;
-
- case Hexagon::STrih_indexed_shl_cdnPt_nv_V4 :
- case Hexagon::STrih_indexed_shl_cPt_nv_V4 :
- case Hexagon::STrih_indexed_shl_cdnPt_V4 :
- return Hexagon::STrih_indexed_shl_cPt_V4;
-
- case Hexagon::STrih_indexed_shl_cdnNotPt_nv_V4 :
- case Hexagon::STrih_indexed_shl_cNotPt_nv_V4 :
- case Hexagon::STrih_indexed_shl_cdnNotPt_V4 :
- return Hexagon::STrih_indexed_shl_cNotPt_V4;
-
- case Hexagon::POST_SThri_cdnPt_nv_V4 :
- case Hexagon::POST_SThri_cPt_nv_V4 :
- case Hexagon::POST_SThri_cdnPt_V4 :
- return Hexagon::POST_SThri_cPt;
-
- case Hexagon::POST_SThri_cdnNotPt_nv_V4 :
- case Hexagon::POST_SThri_cNotPt_nv_V4 :
- case Hexagon::POST_SThri_cdnNotPt_V4 :
- return Hexagon::POST_SThri_cNotPt;
-
- case Hexagon::STh_GP_cdnPt_nv_V4:
- case Hexagon::STh_GP_cdnPt_V4:
- case Hexagon::STh_GP_cPt_nv_V4:
- return Hexagon::STh_GP_cPt_V4;
-
- case Hexagon::STh_GP_cdnNotPt_nv_V4:
- case Hexagon::STh_GP_cdnNotPt_V4:
- case Hexagon::STh_GP_cNotPt_nv_V4:
- return Hexagon::STh_GP_cNotPt_V4;
-
- // Store new-value halfword - unconditional
-
- case Hexagon::STrih_nv_V4:
- return Hexagon::STrih;
-
- case Hexagon::STrih_indexed_nv_V4:
- return Hexagon::STrih_indexed;
-
- case Hexagon::STrih_indexed_shl_nv_V4:
- return Hexagon::STrih_indexed_shl_V4;
-
- case Hexagon::STrih_shl_nv_V4:
- return Hexagon::STrih_shl_V4;
-
- case Hexagon::STh_GP_nv_V4:
- return Hexagon::STh_GP_V4;
-
- case Hexagon::POST_SThri_nv_V4:
- return Hexagon::POST_SThri;
-
- // Store word
-
- case Hexagon::STriw_imm_cdnPt_V4 :
- return Hexagon::STriw_imm_cPt_V4;
-
- case Hexagon::STriw_imm_cdnNotPt_V4 :
- return Hexagon::STriw_imm_cNotPt_V4;
-
- case Hexagon::STriw_cdnPt_nv_V4 :
- case Hexagon::STriw_cPt_nv_V4 :
- case Hexagon::STriw_cdnPt_V4 :
- return Hexagon::STriw_cPt;
-
- case Hexagon::STriw_cdnNotPt_nv_V4 :
- case Hexagon::STriw_cNotPt_nv_V4 :
- case Hexagon::STriw_cdnNotPt_V4 :
- return Hexagon::STriw_cNotPt;
-
- case Hexagon::STriw_indexed_cdnPt_nv_V4 :
- case Hexagon::STriw_indexed_cPt_nv_V4 :
- case Hexagon::STriw_indexed_cdnPt_V4 :
- return Hexagon::STriw_indexed_cPt;
-
- case Hexagon::STriw_indexed_cdnNotPt_nv_V4 :
- case Hexagon::STriw_indexed_cNotPt_nv_V4 :
- case Hexagon::STriw_indexed_cdnNotPt_V4 :
- return Hexagon::STriw_indexed_cNotPt;
-
- case Hexagon::STriw_indexed_shl_cdnPt_nv_V4 :
- case Hexagon::STriw_indexed_shl_cPt_nv_V4 :
- case Hexagon::STriw_indexed_shl_cdnPt_V4 :
- return Hexagon::STriw_indexed_shl_cPt_V4;
-
- case Hexagon::STriw_indexed_shl_cdnNotPt_nv_V4 :
- case Hexagon::STriw_indexed_shl_cNotPt_nv_V4 :
- case Hexagon::STriw_indexed_shl_cdnNotPt_V4 :
- return Hexagon::STriw_indexed_shl_cNotPt_V4;
-
- case Hexagon::POST_STwri_cdnPt_nv_V4 :
- case Hexagon::POST_STwri_cPt_nv_V4 :
- case Hexagon::POST_STwri_cdnPt_V4 :
- return Hexagon::POST_STwri_cPt;
-
- case Hexagon::POST_STwri_cdnNotPt_nv_V4 :
- case Hexagon::POST_STwri_cNotPt_nv_V4 :
- case Hexagon::POST_STwri_cdnNotPt_V4 :
- return Hexagon::POST_STwri_cNotPt;
-
- case Hexagon::STw_GP_cdnPt_nv_V4:
- case Hexagon::STw_GP_cdnPt_V4:
- case Hexagon::STw_GP_cPt_nv_V4:
- return Hexagon::STw_GP_cPt_V4;
-
- case Hexagon::STw_GP_cdnNotPt_nv_V4:
- case Hexagon::STw_GP_cdnNotPt_V4:
- case Hexagon::STw_GP_cNotPt_nv_V4:
- return Hexagon::STw_GP_cNotPt_V4;
-
- // Store new-value word - unconditional
-
- case Hexagon::STriw_nv_V4:
- return Hexagon::STriw;
-
- case Hexagon::STriw_indexed_nv_V4:
- return Hexagon::STriw_indexed;
-
- case Hexagon::STriw_indexed_shl_nv_V4:
- return Hexagon::STriw_indexed_shl_V4;
-
- case Hexagon::STriw_shl_nv_V4:
- return Hexagon::STriw_shl_V4;
-
- case Hexagon::STw_GP_nv_V4:
- return Hexagon::STw_GP_V4;
-
- case Hexagon::POST_STwri_nv_V4:
- return Hexagon::POST_STwri;
-
- // Store doubleword
-
- case Hexagon::STrid_cdnPt_V4 :
- return Hexagon::STrid_cPt;
-
- case Hexagon::STrid_cdnNotPt_V4 :
- return Hexagon::STrid_cNotPt;
-
- case Hexagon::STrid_indexed_cdnPt_V4 :
- return Hexagon::STrid_indexed_cPt;
-
- case Hexagon::STrid_indexed_cdnNotPt_V4 :
- return Hexagon::STrid_indexed_cNotPt;
-
- case Hexagon::STrid_indexed_shl_cdnPt_V4 :
- return Hexagon::STrid_indexed_shl_cPt_V4;
-
- case Hexagon::STrid_indexed_shl_cdnNotPt_V4 :
- return Hexagon::STrid_indexed_shl_cNotPt_V4;
-
- case Hexagon::POST_STdri_cdnPt_V4 :
- return Hexagon::POST_STdri_cPt;
-
- case Hexagon::POST_STdri_cdnNotPt_V4 :
- return Hexagon::POST_STdri_cNotPt;
-
- case Hexagon::STd_GP_cdnPt_V4 :
- return Hexagon::STd_GP_cPt_V4;
-
- case Hexagon::STd_GP_cdnNotPt_V4 :
- return Hexagon::STd_GP_cNotPt_V4;
-
- }
-}
-
bool HexagonPacketizerList::DemoteToDotOld(MachineInstr* MI) {
const HexagonInstrInfo *QII = (const HexagonInstrInfo *) TII;
- int NewOpcode = GetDotOldOp(MI->getOpcode());
+ int NewOpcode = QII->GetDotOldOp(MI->getOpcode());
MI->setDesc(QII->get(NewOpcode));
return true;
}
-// Returns true if an instruction is predicated on p0 and false if it's
-// predicated on !p0.
+enum PredicateKind {
+ PK_False,
+ PK_True,
+ PK_Unknown
+};
-static bool GetPredicateSense(MachineInstr* MI,
- const HexagonInstrInfo *QII) {
+/// Returns true if an instruction is predicated on p0 and false if it's
+/// predicated on !p0.
+static PredicateKind getPredicateSense(MachineInstr* MI,
+ const HexagonInstrInfo *QII) {
+ if (!QII->isPredicated(MI))
+ return PK_Unknown;
- switch (MI->getOpcode()) {
- default: llvm_unreachable("Unknown predicate sense of the instruction");
- case Hexagon::TFR_cPt:
- case Hexagon::TFR_cdnPt:
- case Hexagon::TFRI_cPt:
- case Hexagon::TFRI_cdnPt:
- case Hexagon::STrib_cPt :
- case Hexagon::STrib_cdnPt_V4 :
- case Hexagon::STrib_indexed_cPt :
- case Hexagon::STrib_indexed_cdnPt_V4 :
- case Hexagon::STrib_indexed_shl_cPt_V4 :
- case Hexagon::STrib_indexed_shl_cdnPt_V4 :
- case Hexagon::POST_STbri_cPt :
- case Hexagon::POST_STbri_cdnPt_V4 :
- case Hexagon::STrih_cPt :
- case Hexagon::STrih_cdnPt_V4 :
- case Hexagon::STrih_indexed_cPt :
- case Hexagon::STrih_indexed_cdnPt_V4 :
- case Hexagon::STrih_indexed_shl_cPt_V4 :
- case Hexagon::STrih_indexed_shl_cdnPt_V4 :
- case Hexagon::POST_SThri_cPt :
- case Hexagon::POST_SThri_cdnPt_V4 :
- case Hexagon::STriw_cPt :
- case Hexagon::STriw_cdnPt_V4 :
- case Hexagon::STriw_indexed_cPt :
- case Hexagon::STriw_indexed_cdnPt_V4 :
- case Hexagon::STriw_indexed_shl_cPt_V4 :
- case Hexagon::STriw_indexed_shl_cdnPt_V4 :
- case Hexagon::POST_STwri_cPt :
- case Hexagon::POST_STwri_cdnPt_V4 :
- case Hexagon::STrib_imm_cPt_V4 :
- case Hexagon::STrib_imm_cdnPt_V4 :
- case Hexagon::STrid_cPt :
- case Hexagon::STrid_cdnPt_V4 :
- case Hexagon::STrid_indexed_cPt :
- case Hexagon::STrid_indexed_cdnPt_V4 :
- case Hexagon::STrid_indexed_shl_cPt_V4 :
- case Hexagon::STrid_indexed_shl_cdnPt_V4 :
- case Hexagon::POST_STdri_cPt :
- case Hexagon::POST_STdri_cdnPt_V4 :
- case Hexagon::STrih_imm_cPt_V4 :
- case Hexagon::STrih_imm_cdnPt_V4 :
- case Hexagon::STriw_imm_cPt_V4 :
- case Hexagon::STriw_imm_cdnPt_V4 :
- case Hexagon::JMP_tnew_t :
- case Hexagon::LDrid_cPt :
- case Hexagon::LDrid_cdnPt :
- case Hexagon::LDrid_indexed_cPt :
- case Hexagon::LDrid_indexed_cdnPt :
- case Hexagon::POST_LDrid_cPt :
- case Hexagon::POST_LDrid_cdnPt_V4 :
- case Hexagon::LDriw_cPt :
- case Hexagon::LDriw_cdnPt :
- case Hexagon::LDriw_indexed_cPt :
- case Hexagon::LDriw_indexed_cdnPt :
- case Hexagon::POST_LDriw_cPt :
- case Hexagon::POST_LDriw_cdnPt_V4 :
- case Hexagon::LDrih_cPt :
- case Hexagon::LDrih_cdnPt :
- case Hexagon::LDrih_indexed_cPt :
- case Hexagon::LDrih_indexed_cdnPt :
- case Hexagon::POST_LDrih_cPt :
- case Hexagon::POST_LDrih_cdnPt_V4 :
- case Hexagon::LDrib_cPt :
- case Hexagon::LDrib_cdnPt :
- case Hexagon::LDrib_indexed_cPt :
- case Hexagon::LDrib_indexed_cdnPt :
- case Hexagon::POST_LDrib_cPt :
- case Hexagon::POST_LDrib_cdnPt_V4 :
- case Hexagon::LDriuh_cPt :
- case Hexagon::LDriuh_cdnPt :
- case Hexagon::LDriuh_indexed_cPt :
- case Hexagon::LDriuh_indexed_cdnPt :
- case Hexagon::POST_LDriuh_cPt :
- case Hexagon::POST_LDriuh_cdnPt_V4 :
- case Hexagon::LDriub_cPt :
- case Hexagon::LDriub_cdnPt :
- case Hexagon::LDriub_indexed_cPt :
- case Hexagon::LDriub_indexed_cdnPt :
- case Hexagon::POST_LDriub_cPt :
- case Hexagon::POST_LDriub_cdnPt_V4 :
- case Hexagon::LDrid_indexed_shl_cPt_V4 :
- case Hexagon::LDrid_indexed_shl_cdnPt_V4 :
- case Hexagon::LDrib_indexed_shl_cPt_V4 :
- case Hexagon::LDrib_indexed_shl_cdnPt_V4 :
- case Hexagon::LDriub_indexed_shl_cPt_V4 :
- case Hexagon::LDriub_indexed_shl_cdnPt_V4 :
- case Hexagon::LDrih_indexed_shl_cPt_V4 :
- case Hexagon::LDrih_indexed_shl_cdnPt_V4 :
- case Hexagon::LDriuh_indexed_shl_cPt_V4 :
- case Hexagon::LDriuh_indexed_shl_cdnPt_V4 :
- case Hexagon::LDriw_indexed_shl_cPt_V4 :
- case Hexagon::LDriw_indexed_shl_cdnPt_V4 :
- case Hexagon::ADD_ri_cPt :
- case Hexagon::ADD_ri_cdnPt :
- case Hexagon::ADD_rr_cPt :
- case Hexagon::ADD_rr_cdnPt :
- case Hexagon::XOR_rr_cPt :
- case Hexagon::XOR_rr_cdnPt :
- case Hexagon::AND_rr_cPt :
- case Hexagon::AND_rr_cdnPt :
- case Hexagon::OR_rr_cPt :
- case Hexagon::OR_rr_cdnPt :
- case Hexagon::SUB_rr_cPt :
- case Hexagon::SUB_rr_cdnPt :
- case Hexagon::COMBINE_rr_cPt :
- case Hexagon::COMBINE_rr_cdnPt :
- case Hexagon::ASLH_cPt_V4 :
- case Hexagon::ASLH_cdnPt_V4 :
- case Hexagon::ASRH_cPt_V4 :
- case Hexagon::ASRH_cdnPt_V4 :
- case Hexagon::SXTB_cPt_V4 :
- case Hexagon::SXTB_cdnPt_V4 :
- case Hexagon::SXTH_cPt_V4 :
- case Hexagon::SXTH_cdnPt_V4 :
- case Hexagon::ZXTB_cPt_V4 :
- case Hexagon::ZXTB_cdnPt_V4 :
- case Hexagon::ZXTH_cPt_V4 :
- case Hexagon::ZXTH_cdnPt_V4 :
- case Hexagon::LDd_GP_cPt_V4 :
- case Hexagon::LDb_GP_cPt_V4 :
- case Hexagon::LDub_GP_cPt_V4 :
- case Hexagon::LDh_GP_cPt_V4 :
- case Hexagon::LDuh_GP_cPt_V4 :
- case Hexagon::LDw_GP_cPt_V4 :
- case Hexagon::STd_GP_cPt_V4 :
- case Hexagon::STb_GP_cPt_V4 :
- case Hexagon::STh_GP_cPt_V4 :
- case Hexagon::STw_GP_cPt_V4 :
- case Hexagon::LDd_GP_cdnPt_V4 :
- case Hexagon::LDb_GP_cdnPt_V4 :
- case Hexagon::LDub_GP_cdnPt_V4 :
- case Hexagon::LDh_GP_cdnPt_V4 :
- case Hexagon::LDuh_GP_cdnPt_V4 :
- case Hexagon::LDw_GP_cdnPt_V4 :
- case Hexagon::STd_GP_cdnPt_V4 :
- case Hexagon::STb_GP_cdnPt_V4 :
- case Hexagon::STh_GP_cdnPt_V4 :
- case Hexagon::STw_GP_cdnPt_V4 :
- return true;
+ if (QII->isPredicatedTrue(MI))
+ return PK_True;
- case Hexagon::TFR_cNotPt:
- case Hexagon::TFR_cdnNotPt:
- case Hexagon::TFRI_cNotPt:
- case Hexagon::TFRI_cdnNotPt:
- case Hexagon::STrib_cNotPt :
- case Hexagon::STrib_cdnNotPt_V4 :
- case Hexagon::STrib_indexed_cNotPt :
- case Hexagon::STrib_indexed_cdnNotPt_V4 :
- case Hexagon::STrib_indexed_shl_cNotPt_V4 :
- case Hexagon::STrib_indexed_shl_cdnNotPt_V4 :
- case Hexagon::POST_STbri_cNotPt :
- case Hexagon::POST_STbri_cdnNotPt_V4 :
- case Hexagon::STrih_cNotPt :
- case Hexagon::STrih_cdnNotPt_V4 :
- case Hexagon::STrih_indexed_cNotPt :
- case Hexagon::STrih_indexed_cdnNotPt_V4 :
- case Hexagon::STrih_indexed_shl_cNotPt_V4 :
- case Hexagon::STrih_indexed_shl_cdnNotPt_V4 :
- case Hexagon::POST_SThri_cNotPt :
- case Hexagon::POST_SThri_cdnNotPt_V4 :
- case Hexagon::STriw_cNotPt :
- case Hexagon::STriw_cdnNotPt_V4 :
- case Hexagon::STriw_indexed_cNotPt :
- case Hexagon::STriw_indexed_cdnNotPt_V4 :
- case Hexagon::STriw_indexed_shl_cNotPt_V4 :
- case Hexagon::STriw_indexed_shl_cdnNotPt_V4 :
- case Hexagon::POST_STwri_cNotPt :
- case Hexagon::POST_STwri_cdnNotPt_V4 :
- case Hexagon::STrib_imm_cNotPt_V4 :
- case Hexagon::STrib_imm_cdnNotPt_V4 :
- case Hexagon::STrid_cNotPt :
- case Hexagon::STrid_cdnNotPt_V4 :
- case Hexagon::STrid_indexed_cdnNotPt_V4 :
- case Hexagon::STrid_indexed_cNotPt :
- case Hexagon::STrid_indexed_shl_cNotPt_V4 :
- case Hexagon::STrid_indexed_shl_cdnNotPt_V4 :
- case Hexagon::POST_STdri_cNotPt :
- case Hexagon::POST_STdri_cdnNotPt_V4 :
- case Hexagon::STrih_imm_cNotPt_V4 :
- case Hexagon::STrih_imm_cdnNotPt_V4 :
- case Hexagon::STriw_imm_cNotPt_V4 :
- case Hexagon::STriw_imm_cdnNotPt_V4 :
- case Hexagon::JMP_fnew_t :
- case Hexagon::LDrid_cNotPt :
- case Hexagon::LDrid_cdnNotPt :
- case Hexagon::LDrid_indexed_cNotPt :
- case Hexagon::LDrid_indexed_cdnNotPt :
- case Hexagon::POST_LDrid_cNotPt :
- case Hexagon::POST_LDrid_cdnNotPt_V4 :
- case Hexagon::LDriw_cNotPt :
- case Hexagon::LDriw_cdnNotPt :
- case Hexagon::LDriw_indexed_cNotPt :
- case Hexagon::LDriw_indexed_cdnNotPt :
- case Hexagon::POST_LDriw_cNotPt :
- case Hexagon::POST_LDriw_cdnNotPt_V4 :
- case Hexagon::LDrih_cNotPt :
- case Hexagon::LDrih_cdnNotPt :
- case Hexagon::LDrih_indexed_cNotPt :
- case Hexagon::LDrih_indexed_cdnNotPt :
- case Hexagon::POST_LDrih_cNotPt :
- case Hexagon::POST_LDrih_cdnNotPt_V4 :
- case Hexagon::LDrib_cNotPt :
- case Hexagon::LDrib_cdnNotPt :
- case Hexagon::LDrib_indexed_cNotPt :
- case Hexagon::LDrib_indexed_cdnNotPt :
- case Hexagon::POST_LDrib_cNotPt :
- case Hexagon::POST_LDrib_cdnNotPt_V4 :
- case Hexagon::LDriuh_cNotPt :
- case Hexagon::LDriuh_cdnNotPt :
- case Hexagon::LDriuh_indexed_cNotPt :
- case Hexagon::LDriuh_indexed_cdnNotPt :
- case Hexagon::POST_LDriuh_cNotPt :
- case Hexagon::POST_LDriuh_cdnNotPt_V4 :
- case Hexagon::LDriub_cNotPt :
- case Hexagon::LDriub_cdnNotPt :
- case Hexagon::LDriub_indexed_cNotPt :
- case Hexagon::LDriub_indexed_cdnNotPt :
- case Hexagon::POST_LDriub_cNotPt :
- case Hexagon::POST_LDriub_cdnNotPt_V4 :
- case Hexagon::LDrid_indexed_shl_cNotPt_V4 :
- case Hexagon::LDrid_indexed_shl_cdnNotPt_V4 :
- case Hexagon::LDrib_indexed_shl_cNotPt_V4 :
- case Hexagon::LDrib_indexed_shl_cdnNotPt_V4 :
- case Hexagon::LDriub_indexed_shl_cNotPt_V4 :
- case Hexagon::LDriub_indexed_shl_cdnNotPt_V4 :
- case Hexagon::LDrih_indexed_shl_cNotPt_V4 :
- case Hexagon::LDrih_indexed_shl_cdnNotPt_V4 :
- case Hexagon::LDriuh_indexed_shl_cNotPt_V4 :
- case Hexagon::LDriuh_indexed_shl_cdnNotPt_V4 :
- case Hexagon::LDriw_indexed_shl_cNotPt_V4 :
- case Hexagon::LDriw_indexed_shl_cdnNotPt_V4 :
- case Hexagon::ADD_ri_cNotPt :
- case Hexagon::ADD_ri_cdnNotPt :
- case Hexagon::ADD_rr_cNotPt :
- case Hexagon::ADD_rr_cdnNotPt :
- case Hexagon::XOR_rr_cNotPt :
- case Hexagon::XOR_rr_cdnNotPt :
- case Hexagon::AND_rr_cNotPt :
- case Hexagon::AND_rr_cdnNotPt :
- case Hexagon::OR_rr_cNotPt :
- case Hexagon::OR_rr_cdnNotPt :
- case Hexagon::SUB_rr_cNotPt :
- case Hexagon::SUB_rr_cdnNotPt :
- case Hexagon::COMBINE_rr_cNotPt :
- case Hexagon::COMBINE_rr_cdnNotPt :
- case Hexagon::ASLH_cNotPt_V4 :
- case Hexagon::ASLH_cdnNotPt_V4 :
- case Hexagon::ASRH_cNotPt_V4 :
- case Hexagon::ASRH_cdnNotPt_V4 :
- case Hexagon::SXTB_cNotPt_V4 :
- case Hexagon::SXTB_cdnNotPt_V4 :
- case Hexagon::SXTH_cNotPt_V4 :
- case Hexagon::SXTH_cdnNotPt_V4 :
- case Hexagon::ZXTB_cNotPt_V4 :
- case Hexagon::ZXTB_cdnNotPt_V4 :
- case Hexagon::ZXTH_cNotPt_V4 :
- case Hexagon::ZXTH_cdnNotPt_V4 :
-
- case Hexagon::LDd_GP_cNotPt_V4 :
- case Hexagon::LDb_GP_cNotPt_V4 :
- case Hexagon::LDub_GP_cNotPt_V4 :
- case Hexagon::LDh_GP_cNotPt_V4 :
- case Hexagon::LDuh_GP_cNotPt_V4 :
- case Hexagon::LDw_GP_cNotPt_V4 :
- case Hexagon::STd_GP_cNotPt_V4 :
- case Hexagon::STb_GP_cNotPt_V4 :
- case Hexagon::STh_GP_cNotPt_V4 :
- case Hexagon::STw_GP_cNotPt_V4 :
- case Hexagon::LDd_GP_cdnNotPt_V4 :
- case Hexagon::LDb_GP_cdnNotPt_V4 :
- case Hexagon::LDub_GP_cdnNotPt_V4 :
- case Hexagon::LDh_GP_cdnNotPt_V4 :
- case Hexagon::LDuh_GP_cdnNotPt_V4 :
- case Hexagon::LDw_GP_cdnNotPt_V4 :
- case Hexagon::STd_GP_cdnNotPt_V4 :
- case Hexagon::STb_GP_cdnNotPt_V4 :
- case Hexagon::STh_GP_cdnNotPt_V4 :
- case Hexagon::STw_GP_cdnNotPt_V4 :
- return false;
- }
- // return *some value* to avoid compiler warning
- return false;
+ return PK_False;
}
static MachineOperand& GetPostIncrementOperand(MachineInstr *MI,
@@ -1737,10 +537,10 @@ static MachineOperand& GetStoreValueOperand(MachineInstr *MI) {
// Arch Spec: 3.4.4.2
bool HexagonPacketizerList::CanPromoteToNewValueStore( MachineInstr *MI,
MachineInstr *PacketMI, unsigned DepReg,
- std::map <MachineInstr*, SUnit*> MIToSUnit)
-{
- // Make sure we are looking at the store
- if (!IsNewifyStore(MI))
+ std::map <MachineInstr*, SUnit*> MIToSUnit) {
+ const HexagonInstrInfo *QII = (const HexagonInstrInfo *) TII;
+ // Make sure we are looking at the store, that can be promoted.
+ if (!QII->mayBeNewStore(MI))
return false;
// Make sure there is dependency and can be new value'ed
@@ -1748,12 +548,11 @@ bool HexagonPacketizerList::CanPromoteToNewValueStore( MachineInstr *MI,
GetStoreValueOperand(MI).getReg() != DepReg)
return false;
- const HexagonRegisterInfo* QRI =
+ const HexagonRegisterInfo* QRI =
(const HexagonRegisterInfo *) TM.getRegisterInfo();
const MCInstrDesc& MCID = PacketMI->getDesc();
// first operand is always the result
- const HexagonInstrInfo *QII = (const HexagonInstrInfo *) TII;
const TargetRegisterClass* PacketRC = QII->getRegClass(MCID, 0, QRI, MF);
// if there is already an store in the packet, no can do new value store
@@ -1796,7 +595,7 @@ bool HexagonPacketizerList::CanPromoteToNewValueStore( MachineInstr *MI,
}
// If the source that feeds the store is predicated, new value store must
- // also be also predicated.
+ // also be predicated.
if (QII->isPredicated(PacketMI)) {
if (!QII->isPredicated(MI))
return false;
@@ -1842,7 +641,7 @@ bool HexagonPacketizerList::CanPromoteToNewValueStore( MachineInstr *MI,
if (( predRegNumDst != predRegNumSrc) ||
QII->isDotNewInst(PacketMI) != QII->isDotNewInst(MI) ||
- GetPredicateSense(MI, QII) != GetPredicateSense(PacketMI, QII)) {
+ getPredicateSense(MI, QII) != getPredicateSense(PacketMI, QII)) {
return false;
}
}
@@ -1923,10 +722,11 @@ bool HexagonPacketizerList::CanPromoteToNewValue( MachineInstr *MI,
MachineBasicBlock::iterator &MII)
{
+ const HexagonInstrInfo *QII = (const HexagonInstrInfo *) TII;
const HexagonRegisterInfo* QRI =
(const HexagonRegisterInfo *) TM.getRegisterInfo();
if (!QRI->Subtarget.hasV4TOps() ||
- !IsNewifyStore(MI))
+ !QII->mayBeNewStore(MI))
return false;
MachineInstr *PacketMI = PacketSU->getInstr();
@@ -1953,7 +753,7 @@ bool HexagonPacketizerList::CanPromoteToDotNew( MachineInstr *MI,
{
const HexagonInstrInfo *QII = (const HexagonInstrInfo *) TII;
// Already a dot new instruction.
- if (QII->isDotNewInst(MI) && !IsNewifyStore(MI))
+ if (QII->isDotNewInst(MI) && !QII->mayBeNewStore(MI))
return false;
if (!isNewifiable(MI))
@@ -1963,12 +763,12 @@ bool HexagonPacketizerList::CanPromoteToDotNew( MachineInstr *MI,
if (RC == &Hexagon::PredRegsRegClass && isCondInst(MI))
return true;
else if (RC != &Hexagon::PredRegsRegClass &&
- !IsNewifyStore(MI)) // MI is not a new-value store
+ !QII->mayBeNewStore(MI)) // MI is not a new-value store
return false;
else {
// Create a dot new machine instruction to see if resources can be
// allocated. If not, bail out now.
- int NewOpcode = GetDotNewOp(MI->getOpcode());
+ int NewOpcode = QII->GetDotNewOp(MI);
const MCInstrDesc &desc = QII->get(NewOpcode);
DebugLoc dl;
MachineInstr *NewMI =
@@ -2109,7 +909,7 @@ bool HexagonPacketizerList::ArePredicatesComplements (MachineInstr* MI1,
// We also need to differentiate .old vs. .new:
// !p0 is not complimentary to p0.new
return ((MI1->getOperand(1).getReg() == MI2->getOperand(1).getReg()) &&
- (GetPredicateSense(MI1, QII) != GetPredicateSense(MI2, QII)) &&
+ (getPredicateSense(MI1, QII) != getPredicateSense(MI2, QII)) &&
(QII->isDotNewInst(MI1) == QII->isDotNewInst(MI2)));
}
@@ -2207,24 +1007,21 @@ bool HexagonPacketizerList::isLegalToPacketizeTogether(SUnit *SUI, SUnit *SUJ) {
}
// A LoopN instruction cannot appear in the same packet as a jump or call.
- if (IsLoopN(I) && ( IsDirectJump(J)
- || MCIDJ.isCall()
- || QII->isDeallocRet(J))) {
+ if (IsLoopN(I) &&
+ (IsDirectJump(J) || MCIDJ.isCall() || QII->isDeallocRet(J))) {
Dependence = true;
return false;
}
- if (IsLoopN(J) && ( IsDirectJump(I)
- || MCIDI.isCall()
- || QII->isDeallocRet(I))) {
+ if (IsLoopN(J) &&
+ (IsDirectJump(I) || MCIDI.isCall() || QII->isDeallocRet(I))) {
Dependence = true;
return false;
}
// dealloc_return cannot appear in the same packet as a conditional or
// unconditional jump.
- if (QII->isDeallocRet(I) && ( MCIDJ.isBranch()
- || MCIDJ.isCall()
- || MCIDJ.isBarrier())) {
+ if (QII->isDeallocRet(I) &&
+ (MCIDJ.isBranch() || MCIDJ.isCall() || MCIDJ.isBarrier())) {
Dependence = true;
return false;
}
@@ -2249,7 +1046,7 @@ bool HexagonPacketizerList::isLegalToPacketizeTogether(SUnit *SUI, SUnit *SUJ) {
}
//if dealloc_return
- if (MCIDJ.mayStore() && QII->isDeallocRet(I)){
+ if (MCIDJ.mayStore() && QII->isDeallocRet(I)) {
Dependence = true;
return false;
}
diff --git a/lib/Target/Hexagon/MCTargetDesc/HexagonMCAsmInfo.cpp b/lib/Target/Hexagon/MCTargetDesc/HexagonMCAsmInfo.cpp
index 3deb8d1deb..495dbb97b0 100644
--- a/lib/Target/Hexagon/MCTargetDesc/HexagonMCAsmInfo.cpp
+++ b/lib/Target/Hexagon/MCTargetDesc/HexagonMCAsmInfo.cpp
@@ -15,7 +15,7 @@
using namespace llvm;
-HexagonMCAsmInfo::HexagonMCAsmInfo(const Target &T, StringRef TT) {
+HexagonMCAsmInfo::HexagonMCAsmInfo(StringRef TT) {
Data16bitsDirective = "\t.half\t";
Data32bitsDirective = "\t.word\t";
Data64bitsDirective = 0; // .xword is only supported by V9.
diff --git a/lib/Target/Hexagon/MCTargetDesc/HexagonMCAsmInfo.h b/lib/Target/Hexagon/MCTargetDesc/HexagonMCAsmInfo.h
index d336cd5be9..0b94d2141c 100644
--- a/lib/Target/Hexagon/MCTargetDesc/HexagonMCAsmInfo.h
+++ b/lib/Target/Hexagon/MCTargetDesc/HexagonMCAsmInfo.h
@@ -18,11 +18,9 @@
#include "llvm/MC/MCAsmInfo.h"
namespace llvm {
- class Target;
-
class HexagonMCAsmInfo : public MCAsmInfo {
public:
- explicit HexagonMCAsmInfo(const Target &T, StringRef TT);
+ explicit HexagonMCAsmInfo(StringRef TT);
};
} // namespace llvm
diff --git a/lib/Target/Hexagon/MCTargetDesc/HexagonMCTargetDesc.cpp b/lib/Target/Hexagon/MCTargetDesc/HexagonMCTargetDesc.cpp
index 6b1d2d1619..273bc22b8e 100644
--- a/lib/Target/Hexagon/MCTargetDesc/HexagonMCTargetDesc.cpp
+++ b/lib/Target/Hexagon/MCTargetDesc/HexagonMCTargetDesc.cpp
@@ -54,8 +54,8 @@ static MCSubtargetInfo *createHexagonMCSubtargetInfo(StringRef TT,
return X;
}
-static MCAsmInfo *createHexagonMCAsmInfo(const Target &T, StringRef TT) {
- MCAsmInfo *MAI = new HexagonMCAsmInfo(T, TT);
+static MCAsmInfo *createHexagonMCAsmInfo(StringRef TT) {
+ MCAsmInfo *MAI = new HexagonMCAsmInfo(TT);
// VirtualFP = (R30 + #0).
MachineLocation Dst(MachineLocation::VirtualFP);
diff --git a/lib/Target/MBlaze/MCTargetDesc/MBlazeMCTargetDesc.cpp b/lib/Target/MBlaze/MCTargetDesc/MBlazeMCTargetDesc.cpp
index 380750d50f..ec76dba491 100644
--- a/lib/Target/MBlaze/MCTargetDesc/MBlazeMCTargetDesc.cpp
+++ b/lib/Target/MBlaze/MCTargetDesc/MBlazeMCTargetDesc.cpp
@@ -53,7 +53,7 @@ static MCSubtargetInfo *createMBlazeMCSubtargetInfo(StringRef TT, StringRef CPU,
return X;
}
-static MCAsmInfo *createMCAsmInfo(const Target &T, StringRef TT) {
+static MCAsmInfo *createMCAsmInfo(StringRef TT) {
Triple TheTriple(TT);
switch (TheTriple.getOS()) {
default:
diff --git a/lib/Target/MSP430/MCTargetDesc/MSP430MCAsmInfo.cpp b/lib/Target/MSP430/MCTargetDesc/MSP430MCAsmInfo.cpp
index 3c95760569..d213a45bb9 100644
--- a/lib/Target/MSP430/MCTargetDesc/MSP430MCAsmInfo.cpp
+++ b/lib/Target/MSP430/MCTargetDesc/MSP430MCAsmInfo.cpp
@@ -17,7 +17,7 @@ using namespace llvm;
void MSP430MCAsmInfo::anchor() { }
-MSP430MCAsmInfo::MSP430MCAsmInfo(const Target &T, StringRef TT) {
+MSP430MCAsmInfo::MSP430MCAsmInfo(StringRef TT) {
PointerSize = CalleeSaveStackSlotSize = 2;
PrivateGlobalPrefix = ".L";
diff --git a/lib/Target/MSP430/MCTargetDesc/MSP430MCAsmInfo.h b/lib/Target/MSP430/MCTargetDesc/MSP430MCAsmInfo.h
index e5c2fc283b..feb040d13b 100644
--- a/lib/Target/MSP430/MCTargetDesc/MSP430MCAsmInfo.h
+++ b/lib/Target/MSP430/MCTargetDesc/MSP430MCAsmInfo.h
@@ -18,12 +18,11 @@
namespace llvm {
class StringRef;
- class Target;
class MSP430MCAsmInfo : public MCAsmInfo {
virtual void anchor();
public:
- explicit MSP430MCAsmInfo(const Target &T, StringRef TT);
+ explicit MSP430MCAsmInfo(StringRef TT);
};
} // namespace llvm
diff --git a/lib/Target/Mips/CMakeLists.txt b/lib/Target/Mips/CMakeLists.txt
index 78a9f70c66..8be68c5b4f 100644
--- a/lib/Target/Mips/CMakeLists.txt
+++ b/lib/Target/Mips/CMakeLists.txt
@@ -15,6 +15,7 @@ add_public_tablegen_target(MipsCommonTableGen)
add_llvm_target(MipsCodeGen
Mips16FrameLowering.cpp
+ Mips16HardFloat.cpp
Mips16InstrInfo.cpp
Mips16ISelDAGToDAG.cpp
Mips16ISelLowering.cpp
diff --git a/lib/Target/Mips/MCTargetDesc/MipsMCAsmInfo.cpp b/lib/Target/Mips/MCTargetDesc/MipsMCAsmInfo.cpp
index 5d4b32d305..33f6f9620e 100644
--- a/lib/Target/Mips/MCTargetDesc/MipsMCAsmInfo.cpp
+++ b/lib/Target/Mips/MCTargetDesc/MipsMCAsmInfo.cpp
@@ -18,7 +18,7 @@ using namespace llvm;
void MipsMCAsmInfo::anchor() { }
-MipsMCAsmInfo::MipsMCAsmInfo(const Target &T, StringRef TT) {
+MipsMCAsmInfo::MipsMCAsmInfo(StringRef TT) {
Triple TheTriple(TT);
if ((TheTriple.getArch() == Triple::mips) ||
(TheTriple.getArch() == Triple::mips64))
diff --git a/lib/Target/Mips/MCTargetDesc/MipsMCAsmInfo.h b/lib/Target/Mips/MCTargetDesc/MipsMCAsmInfo.h
index e1d878936f..772234eb71 100644
--- a/lib/Target/Mips/MCTargetDesc/MipsMCAsmInfo.h
+++ b/lib/Target/Mips/MCTargetDesc/MipsMCAsmInfo.h
@@ -18,12 +18,11 @@
namespace llvm {
class StringRef;
- class Target;
class MipsMCAsmInfo : public MCAsmInfo {
virtual void anchor();
public:
- explicit MipsMCAsmInfo(const Target &T, StringRef TT);
+ explicit MipsMCAsmInfo(StringRef TT);
};
} // namespace llvm
diff --git a/lib/Target/Mips/MCTargetDesc/MipsMCTargetDesc.cpp b/lib/Target/Mips/MCTargetDesc/MipsMCTargetDesc.cpp
index be83b54b61..26694ffdac 100644
--- a/lib/Target/Mips/MCTargetDesc/MipsMCTargetDesc.cpp
+++ b/lib/Target/Mips/MCTargetDesc/MipsMCTargetDesc.cpp
@@ -93,8 +93,8 @@ static MCSubtargetInfo *createMipsMCSubtargetInfo(StringRef TT, StringRef CPU,
return X;
}
-static MCAsmInfo *createMipsMCAsmInfo(const Target &T, StringRef TT) {
- MCAsmInfo *MAI = new MipsMCAsmInfo(T, TT);
+static MCAsmInfo *createMipsMCAsmInfo(StringRef TT) {
+ MCAsmInfo *MAI = new MipsMCAsmInfo(TT);
MachineLocation Dst(MachineLocation::VirtualFP);
MachineLocation Src(Mips::SP, 0);
diff --git a/lib/Target/Mips/Mips16FrameLowering.cpp b/lib/Target/Mips/Mips16FrameLowering.cpp
index 1bb6fe4629..d9a7487bf7 100644
--- a/lib/Target/Mips/Mips16FrameLowering.cpp
+++ b/lib/Target/Mips/Mips16FrameLowering.cpp
@@ -40,7 +40,6 @@ void Mips16FrameLowering::emitPrologue(MachineFunction &MF) const {
if (StackSize == 0 && !MFI->adjustsStack()) return;
MachineModuleInfo &MMI = MF.getMMI();
- std::vector<MachineMove> &Moves = MMI.getFrameMoves();
MachineLocation DstML, SrcML;
// Adjust stack.
@@ -52,22 +51,22 @@ void Mips16FrameLowering::emitPrologue(MachineFunction &MF) const {
TII.get(TargetOpcode::PROLOG_LABEL)).addSym(AdjustSPLabel);
DstML = MachineLocation(MachineLocation::VirtualFP);
SrcML = MachineLocation(MachineLocation::VirtualFP, -StackSize);
- Moves.push_back(MachineMove(AdjustSPLabel, DstML, SrcML));
+ MMI.addFrameMove(AdjustSPLabel, DstML, SrcML);
MCSymbol *CSLabel = MMI.getContext().CreateTempSymbol();
BuildMI(MBB, MBBI, dl,
TII.get(TargetOpcode::PROLOG_LABEL)).addSym(CSLabel);
DstML = MachineLocation(MachineLocation::VirtualFP, -8);
SrcML = MachineLocation(Mips::S1);
- Moves.push_back(MachineMove(CSLabel, DstML, SrcML));
+ MMI.addFrameMove(CSLabel, DstML, SrcML);
DstML = MachineLocation(MachineLocation::VirtualFP, -12);
SrcML = MachineLocation(Mips::S0);
- Moves.push_back(MachineMove(CSLabel, DstML, SrcML));
+ MMI.addFrameMove(CSLabel, DstML, SrcML);
DstML = MachineLocation(MachineLocation::VirtualFP, -4);
SrcML = MachineLocation(Mips::RA);
- Moves.push_back(MachineMove(CSLabel, DstML, SrcML));
+ MMI.addFrameMove(CSLabel, DstML, SrcML);
if (hasFP(MF))
BuildMI(MBB, MBBI, dl, TII.get(Mips::MoveR3216), Mips::S0)
diff --git a/lib/Target/Mips/Mips16HardFloat.cpp b/lib/Target/Mips/Mips16HardFloat.cpp
new file mode 100644
index 0000000000..4d1e61bb99
--- /dev/null
+++ b/lib/Target/Mips/Mips16HardFloat.cpp
@@ -0,0 +1,141 @@
+//===---- Mips16HardFloat.cpp for Mips16 Hard Float --------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a pass needed for Mips16 Hard Float
+//
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "mips16-hard-float"
+#include "Mips16HardFloat.h"
+#include "llvm/IR/Module.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+#include <string>
+
+//
+// Return types that matter for hard float are:
+// float, double, complex float, and complex double
+//
+enum FPReturnVariant {
+ FRet, DRet, CFRet, CDRet, NoFPRet
+};
+
+//
+// Determine which FP return type this function has
+//
+static FPReturnVariant whichFPReturnVariant(Type *T) {
+ switch (T->getTypeID()) {
+ case Type::FloatTyID:
+ return FRet;
+ case Type::DoubleTyID:
+ return DRet;
+ case Type::StructTyID:
+ if (T->getStructNumElements() != 2)
+ break;
+ if ((T->getContainedType(0)->isFloatTy()) &&
+ (T->getContainedType(1)->isFloatTy()))
+ return CFRet;
+ if ((T->getContainedType(0)->isDoubleTy()) &&
+ (T->getContainedType(1)->isDoubleTy()))
+ return CDRet;
+ break;
+ default:
+ break;
+ }
+ return NoFPRet;
+}
+
+//
+// Returns of float, double and complex need to be handled with a helper
+// function. The "AndCal" part is coming in a later patch.
+//
+static bool fixupFPReturnAndCall
+ (Function &F, Module *M, const MipsSubtarget &Subtarget) {
+ bool Modified = false;
+ LLVMContext &C = M->getContext();
+ Type *MyVoid = Type::getVoidTy(C);
+ for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB)
+ for (BasicBlock::iterator I = BB->begin(), E = BB->end();
+ I != E; ++I) {
+ Instruction &Inst = *I;
+ if (const ReturnInst *RI = dyn_cast<ReturnInst>(I)) {
+ Value *RVal = RI->getReturnValue();
+ if (!RVal) continue;
+ //
+ // If there is a return value and it needs a helper function,
+ // figure out which one and add a call before the actual
+ // return to this helper. The purpose of the helper is to move
+ // floating point values from their soft float return mapping to
+ // where they would have been mapped to in floating point registers.
+ //
+ Type *T = RVal->getType();
+ FPReturnVariant RV = whichFPReturnVariant(T);
+ if (RV == NoFPRet) continue;
+ static const char* Helper[NoFPRet] =
+ {"__mips16_ret_sf", "__mips16_ret_df", "__mips16_ret_sc",
+ "__mips16_ret_dc"};
+ const char *Name = Helper[RV];
+ AttributeSet A;
+ Value *Params[] = {RVal};
+ Modified = true;
+ //
+ // These helper functions have a different calling ABI so
+ // this __Mips16RetHelper indicates that so that later
+ // during call setup, the proper call lowering to the helper
+ // functions will take place.
+ //
+ A = A.addAttribute(C, AttributeSet::FunctionIndex,
+ "__Mips16RetHelper");
+ A = A.addAttribute(C, AttributeSet::FunctionIndex,
+ Attribute::ReadNone);
+ Value *F = (M->getOrInsertFunction(Name, A, MyVoid, T, NULL));
+ CallInst::Create(F, Params, "", &Inst );
+ }
+ }
+ return Modified;
+}
+
+namespace llvm {
+
+//
+// This pass only makes sense when the underlying chip has floating point but
+// we are compiling as mips16.
+// For all mips16 functions (that are not stubs we have already generated), or
+// declared via attributes as nomips16, we must:
+// 1) fixup all returns of float, double, single and double complex
+// by calling a helper function before the actual return.
+// 2) generate helper functions (stubs) that can be called by mips32 functions
+// that will move parameters passed normally passed in floating point
+// registers the soft float equivalents. (Coming in a later patch).
+// 3) in the case of static relocation, generate helper functions so that
+// mips16 functions can call extern functions of unknown type (mips16 or
+// mips32). (Coming in a later patch).
+// 4) TBD. For pic, calls to extern functions of unknown type are handled by
+// predefined helper functions in libc but this work is currently done
+// during call lowering but it should be moved here in the future.
+//
+bool Mips16HardFloat::runOnModule(Module &M) {
+ DEBUG(errs() << "Run on Module Mips16HardFloat\n");
+ bool Modified = false;
+ for (Module::iterator F = M.begin(), E = M.end(); F != E; ++F) {
+ if (F->isDeclaration() || F->hasFnAttribute("mips16_fp_stub") ||
+ F->hasFnAttribute("nomips16")) continue;
+ Modified |= fixupFPReturnAndCall(*F, &M, Subtarget);
+ }
+ return Modified;
+}
+
+char Mips16HardFloat::ID = 0;
+
+}
+
+ModulePass *llvm::createMips16HardFloat(MipsTargetMachine &TM) {
+ return new Mips16HardFloat(TM);
+}
+
diff --git a/lib/Target/Mips/Mips16HardFloat.h b/lib/Target/Mips/Mips16HardFloat.h
new file mode 100644
index 0000000000..b7f712af5b
--- /dev/null
+++ b/lib/Target/Mips/Mips16HardFloat.h
@@ -0,0 +1,54 @@
+//===---- Mips16HardFloat.h for Mips16 Hard Float --------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a phase which implements part of the floating point
+// interoperability between Mips16 and Mips32 code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "MCTargetDesc/MipsMCTargetDesc.h"
+#include "MipsTargetMachine.h"
+#include "llvm/Pass.h"
+#include "llvm/Target/TargetMachine.h"
+
+
+#ifndef MIPS16HARDFLOAT_H
+#define MIPS16HARDFLOAT_H
+
+using namespace llvm;
+
+namespace llvm {
+
+class Mips16HardFloat : public ModulePass {
+
+public:
+ static char ID;
+
+ Mips16HardFloat(MipsTargetMachine &TM_) : ModulePass(ID),
+ TM(TM_), Subtarget(TM.getSubtarget<MipsSubtarget>()) {
+ }
+
+ virtual const char *getPassName() const {
+ return "MIPS16 Hard Float Pass";
+ }
+
+ virtual bool runOnModule(Module &M);
+
+protected:
+ /// Keep a pointer to the MipsSubtarget around so that we can make the right
+ /// decision when generating code for different targets.
+ const TargetMachine &TM;
+ const MipsSubtarget &Subtarget;
+
+};
+
+ModulePass *createMips16HardFloat(MipsTargetMachine &TM);
+
+}
+#endif
diff --git a/lib/Target/Mips/Mips16ISelLowering.cpp b/lib/Target/Mips/Mips16ISelLowering.cpp
index f63318f1e6..c633d312bb 100644
--- a/lib/Target/Mips/Mips16ISelLowering.cpp
+++ b/lib/Target/Mips/Mips16ISelLowering.cpp
@@ -13,6 +13,7 @@
#define DEBUG_TYPE "mips-lower"
#include "Mips16ISelLowering.h"
#include "MipsRegisterInfo.h"
+#include "MipsTargetMachine.h"
#include "MCTargetDesc/MipsBaseInfo.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/Support/CommandLine.h"
@@ -21,11 +22,6 @@
using namespace llvm;
-static cl::opt<bool>
-Mips16HardFloat("mips16-hard-float", cl::NotHidden,
- cl::desc("MIPS: mips16 hard float enable."),
- cl::init(false));
-
static cl::opt<bool> DontExpandCondPseudos16(
"mips16-dont-expand-cond-pseudo",
cl::init(false),
@@ -50,7 +46,7 @@ Mips16TargetLowering::Mips16TargetLowering(MipsTargetMachine &TM)
// Set up the register classes
addRegisterClass(MVT::i32, &Mips::CPU16RegsRegClass);
- if (Mips16HardFloat)
+ if (Subtarget->inMips16HardFloat())
setMips16HardFloatLibCalls();
setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Expand);
@@ -374,7 +370,8 @@ getOpndList(SmallVectorImpl<SDValue> &Ops,
const char* Mips16HelperFunction = 0;
bool NeedMips16Helper = false;
- if (getTargetMachine().Options.UseSoftFloat && Mips16HardFloat) {
+ if (getTargetMachine().Options.UseSoftFloat &&
+ Subtarget->inMips16HardFloat()) {
//
// currently we don't have symbols tagged with the mips16 or mips32
// qualifier so we will assume that we don't know what kind it is.
diff --git a/lib/Target/Mips/MipsCallingConv.td b/lib/Target/Mips/MipsCallingConv.td
index 462def76cc..6e8c5d24a8 100644
--- a/lib/Target/Mips/MipsCallingConv.td
+++ b/lib/Target/Mips/MipsCallingConv.td
@@ -196,6 +196,13 @@ def CC_Mips_FastCC : CallingConv<[
CCDelegateTo<CC_MipsN_FastCC>
]>;
+//==
+
+def CC_Mips16RetHelper : CallingConv<[
+ // Integer arguments are passed in integer registers.
+ CCIfType<[i32], CCAssignToReg<[V0, V1, A0, A1]>>
+]>;
+
//===----------------------------------------------------------------------===//
// Mips Calling Convention Dispatch
//===----------------------------------------------------------------------===//
@@ -223,3 +230,6 @@ def CSR_N32 : CalleeSavedRegs<(add D31_64, D29_64, D27_64, D25_64, D24_64,
def CSR_N64 : CalleeSavedRegs<(add (sequence "D%u_64", 31, 24), RA_64, FP_64,
GP_64, (sequence "S%u_64", 7, 0))>;
+
+def CSR_Mips16RetHelper :
+ CalleeSavedRegs<(add V0, V1, (sequence "A%u", 3, 0), S0, S1)>;
diff --git a/lib/Target/Mips/MipsISelLowering.cpp b/lib/Target/Mips/MipsISelLowering.cpp
index 4d76181f92..ab105b3655 100644
--- a/lib/Target/Mips/MipsISelLowering.cpp
+++ b/lib/Target/Mips/MipsISelLowering.cpp
@@ -2229,6 +2229,15 @@ getOpndList(SmallVectorImpl<SDValue> &Ops,
const TargetRegisterInfo *TRI = getTargetMachine().getRegisterInfo();
const uint32_t *Mask = TRI->getCallPreservedMask(CLI.CallConv);
assert(Mask && "Missing call preserved mask for calling convention");
+ if (Subtarget->inMips16HardFloat()) {
+ if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(CLI.Callee)) {
+ llvm::StringRef Sym = G->getGlobal()->getName();
+ Function *F = G->getGlobal()->getParent()->getFunction(Sym);
+ if (F->hasFnAttribute("__Mips16RetHelper")) {
+ Mask = MipsRegisterInfo::getMips16RetHelperMask();
+ }
+ }
+ }
Ops.push_back(CLI.DAG.getRegisterMask(Mask));
if (InFlag.getNode())
@@ -2260,7 +2269,9 @@ MipsTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
SmallVector<CCValAssign, 16> ArgLocs;
CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(),
getTargetMachine(), ArgLocs, *DAG.getContext());
- MipsCC MipsCCInfo(CallConv, IsO32, CCInfo);
+ MipsCC::SpecialCallingConvType SpecialCallingConv =
+ getSpecialCallingConv(Callee);
+ MipsCC MipsCCInfo(CallConv, IsO32, CCInfo, SpecialCallingConv);
MipsCCInfo.analyzeCallOperands(Outs, IsVarArg,
getTargetMachine().Options.UseSoftFloat,
@@ -3029,13 +3040,32 @@ static bool originalTypeIsF128(const Type *Ty, const SDNode *CallNode) {
return (ES && Ty->isIntegerTy(128) && isF128SoftLibCall(ES->getSymbol()));
}
-MipsTargetLowering::MipsCC::MipsCC(CallingConv::ID CC, bool IsO32_,
- CCState &Info)
- : CCInfo(Info), CallConv(CC), IsO32(IsO32_) {
+MipsTargetLowering::MipsCC::SpecialCallingConvType
+ MipsTargetLowering::getSpecialCallingConv(SDValue Callee) const {
+ MipsCC::SpecialCallingConvType SpecialCallingConv =
+ MipsCC::NoSpecialCallingConv;;
+ if (Subtarget->inMips16HardFloat()) {
+ if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
+ llvm::StringRef Sym = G->getGlobal()->getName();
+ Function *F = G->getGlobal()->getParent()->getFunction(Sym);
+ if (F->hasFnAttribute("__Mips16RetHelper")) {
+ SpecialCallingConv = MipsCC::Mips16RetHelperConv;
+ }
+ }
+ }
+ return SpecialCallingConv;
+}
+
+MipsTargetLowering::MipsCC::MipsCC(
+ CallingConv::ID CC, bool IsO32_, CCState &Info,
+ MipsCC::SpecialCallingConvType SpecialCallingConv_)
+ : CCInfo(Info), CallConv(CC), IsO32(IsO32_),
+ SpecialCallingConv(SpecialCallingConv_){
// Pre-allocate reserved argument area.
CCInfo.AllocateStack(reservedArgArea(), 1);
}
+
void MipsTargetLowering::MipsCC::
analyzeCallOperands(const SmallVectorImpl<ISD::OutputArg> &Args,
bool IsVarArg, bool IsSoftFloat, const SDNode *CallNode,
@@ -3183,6 +3213,8 @@ llvm::CCAssignFn *MipsTargetLowering::MipsCC::fixedArgFn() const {
if (CallConv == CallingConv::Fast)
return CC_Mips_FastCC;
+ if (SpecialCallingConv == Mips16RetHelperConv)
+ return CC_Mips16RetHelper;
return IsO32 ? CC_MipsO32 : CC_MipsN;
}
diff --git a/lib/Target/Mips/MipsISelLowering.h b/lib/Target/Mips/MipsISelLowering.h
index 5587e8f581..233c11f789 100644
--- a/lib/Target/Mips/MipsISelLowering.h
+++ b/lib/Target/Mips/MipsISelLowering.h
@@ -240,7 +240,14 @@ namespace llvm {
/// arguments and inquire about calling convention information.
class MipsCC {
public:
- MipsCC(CallingConv::ID CallConv, bool IsO32, CCState &Info);
+ enum SpecialCallingConvType {
+ Mips16RetHelperConv, NoSpecialCallingConv
+ };
+
+ MipsCC(
+ CallingConv::ID CallConv, bool IsO32, CCState &Info,
+ SpecialCallingConvType SpecialCallingConv = NoSpecialCallingConv);
+
void analyzeCallOperands(const SmallVectorImpl<ISD::OutputArg> &Outs,
bool IsVarArg, bool IsSoftFloat,
@@ -313,15 +320,18 @@ namespace llvm {
CCState &CCInfo;
CallingConv::ID CallConv;
bool IsO32;
+ SpecialCallingConvType SpecialCallingConv;
SmallVector<ByValArgInfo, 2> ByValArgs;
};
-
+ protected:
// Subtarget Info
const MipsSubtarget *Subtarget;
bool HasMips64, IsN64, IsO32;
private:
+
+ MipsCC::SpecialCallingConvType getSpecialCallingConv(SDValue Callee) const;
// Lower Operand helpers
SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
CallingConv::ID CallConv, bool isVarArg,
diff --git a/lib/Target/Mips/MipsRegisterInfo.cpp b/lib/Target/Mips/MipsRegisterInfo.cpp
index dead07bacd..ae25e456d5 100644
--- a/lib/Target/Mips/MipsRegisterInfo.cpp
+++ b/lib/Target/Mips/MipsRegisterInfo.cpp
@@ -100,6 +100,10 @@ MipsRegisterInfo::getCallPreservedMask(CallingConv::ID) const {
return CSR_N64_RegMask;
}
+const uint32_t *MipsRegisterInfo::getMips16RetHelperMask() {
+ return CSR_Mips16RetHelper_RegMask;
+}
+
BitVector MipsRegisterInfo::
getReservedRegs(const MachineFunction &MF) const {
static const uint16_t ReservedCPURegs[] = {
diff --git a/lib/Target/Mips/MipsRegisterInfo.h b/lib/Target/Mips/MipsRegisterInfo.h
index 5ed5124139..20ba41d7fd 100644
--- a/lib/Target/Mips/MipsRegisterInfo.h
+++ b/lib/Target/Mips/MipsRegisterInfo.h
@@ -46,6 +46,7 @@ public:
MachineFunction &MF) const;
const uint16_t *getCalleeSavedRegs(const MachineFunction *MF = 0) const;
const uint32_t *getCallPreservedMask(CallingConv::ID) const;
+ static const uint32_t *getMips16RetHelperMask();
BitVector getReservedRegs(const MachineFunction &MF) const;
diff --git a/lib/Target/Mips/MipsSEFrameLowering.cpp b/lib/Target/Mips/MipsSEFrameLowering.cpp
index b295e911bd..535c17cd62 100644
--- a/lib/Target/Mips/MipsSEFrameLowering.cpp
+++ b/lib/Target/Mips/MipsSEFrameLowering.cpp
@@ -262,7 +262,6 @@ void MipsSEFrameLowering::emitPrologue(MachineFunction &MF) const {
if (StackSize == 0 && !MFI->adjustsStack()) return;
MachineModuleInfo &MMI = MF.getMMI();
- std::vector<MachineMove> &Moves = MMI.getFrameMoves();
MachineLocation DstML, SrcML;
// Adjust stack.
@@ -274,7 +273,7 @@ void MipsSEFrameLowering::emitPrologue(MachineFunction &MF) const {
TII.get(TargetOpcode::PROLOG_LABEL)).addSym(AdjustSPLabel);
DstML = MachineLocation(MachineLocation::VirtualFP);
SrcML = MachineLocation(MachineLocation::VirtualFP, -StackSize);
- Moves.push_back(MachineMove(AdjustSPLabel, DstML, SrcML));
+ MMI.addFrameMove(AdjustSPLabel, DstML, SrcML);
const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo();
@@ -306,13 +305,13 @@ void MipsSEFrameLowering::emitPrologue(MachineFunction &MF) const {
if (!STI.isLittle())
std::swap(SrcML0, SrcML1);
- Moves.push_back(MachineMove(CSLabel, DstML0, SrcML0));
- Moves.push_back(MachineMove(CSLabel, DstML1, SrcML1));
+ MMI.addFrameMove(CSLabel, DstML0, SrcML0);
+ MMI.addFrameMove(CSLabel, DstML1, SrcML1);
} else {
// Reg is either in CPURegs or FGR32.
DstML = MachineLocation(MachineLocation::VirtualFP, Offset);
SrcML = MachineLocation(Reg);
- Moves.push_back(MachineMove(CSLabel, DstML, SrcML));
+ MMI.addFrameMove(CSLabel, DstML, SrcML);
}
}
}
@@ -337,7 +336,7 @@ void MipsSEFrameLowering::emitPrologue(MachineFunction &MF) const {
int64_t Offset = MFI->getObjectOffset(MipsFI->getEhDataRegFI(I));
DstML = MachineLocation(MachineLocation::VirtualFP, Offset);
SrcML = MachineLocation(ehDataReg(I));
- Moves.push_back(MachineMove(CSLabel2, DstML, SrcML));
+ MMI.addFrameMove(CSLabel2, DstML, SrcML);
}
}
@@ -352,7 +351,7 @@ void MipsSEFrameLowering::emitPrologue(MachineFunction &MF) const {
TII.get(TargetOpcode::PROLOG_LABEL)).addSym(SetFPLabel);
DstML = MachineLocation(FP);
SrcML = MachineLocation(MachineLocation::VirtualFP);
- Moves.push_back(MachineMove(SetFPLabel, DstML, SrcML));
+ MMI.addFrameMove(SetFPLabel, DstML, SrcML);
}
}
diff --git a/lib/Target/Mips/MipsSubtarget.cpp b/lib/Target/Mips/MipsSubtarget.cpp
index 14a2b27795..259e68df32 100644
--- a/lib/Target/Mips/MipsSubtarget.cpp
+++ b/lib/Target/Mips/MipsSubtarget.cpp
@@ -48,6 +48,11 @@ static cl::opt<bool> Mips_Os16(
"floating point as Mips 16"),
cl::Hidden);
+static cl::opt<bool>
+Mips16HardFloat("mips16-hard-float", cl::NotHidden,
+ cl::desc("MIPS: mips16 hard float enable."),
+ cl::init(false));
+
void MipsSubtarget::anchor() { }
MipsSubtarget::MipsSubtarget(const std::string &TT, const std::string &CPU,
@@ -58,7 +63,8 @@ MipsSubtarget::MipsSubtarget(const std::string &TT, const std::string &CPU,
IsSingleFloat(false), IsFP64bit(false), IsGP64bit(false), HasVFPU(false),
IsLinux(true), HasSEInReg(false), HasCondMov(false), HasSwap(false),
HasBitCount(false), HasFPIdx(false),
- InMips16Mode(false), InMicroMipsMode(false), HasDSP(false), HasDSPR2(false),
+ InMips16Mode(false), InMips16HardFloat(Mips16HardFloat),
+ InMicroMipsMode(false), HasDSP(false), HasDSPR2(false),
AllowMixed16_32(Mixed16_32 | Mips_Os16), Os16(Mips_Os16),
RM(_RM), OverrideMode(NoOverride), TM(_TM)
{
diff --git a/lib/Target/Mips/MipsSubtarget.h b/lib/Target/Mips/MipsSubtarget.h
index f2f0e15887..ef7568a813 100644
--- a/lib/Target/Mips/MipsSubtarget.h
+++ b/lib/Target/Mips/MipsSubtarget.h
@@ -93,6 +93,9 @@ protected:
// InMips16 -- can process Mips16 instructions
bool InMips16Mode;
+ // Mips16 hard float
+ bool InMips16HardFloat;
+
// PreviousInMips16 -- the function we just processed was in Mips 16 Mode
bool PreviousInMips16Mode;
@@ -170,9 +173,12 @@ public:
}
llvm_unreachable("Unexpected mode");
}
- bool inMips16ModeDefault() {
+ bool inMips16ModeDefault() const {
return InMips16Mode;
}
+ bool inMips16HardFloat() const {
+ return inMips16Mode() && InMips16HardFloat;
+ }
bool inMicroMipsMode() const { return InMicroMipsMode; }
bool hasDSP() const { return HasDSP; }
bool hasDSPR2() const { return HasDSPR2; }
@@ -188,7 +194,8 @@ public:
bool hasBitCount() const { return HasBitCount; }
bool hasFPIdx() const { return HasFPIdx; }
- bool allowMixed16_32() const { return AllowMixed16_32;};
+ bool allowMixed16_32() const { return inMips16ModeDefault() |
+ AllowMixed16_32;}
bool os16() const { return Os16;};
diff --git a/lib/Target/Mips/MipsTargetMachine.cpp b/lib/Target/Mips/MipsTargetMachine.cpp
index ee28e2a122..a876f1c7f0 100644
--- a/lib/Target/Mips/MipsTargetMachine.cpp
+++ b/lib/Target/Mips/MipsTargetMachine.cpp
@@ -22,6 +22,7 @@
#include "MipsSEISelLowering.h"
#include "MipsSEISelDAGToDAG.h"
#include "Mips16FrameLowering.h"
+#include "Mips16HardFloat.h"
#include "Mips16InstrInfo.h"
#include "Mips16ISelDAGToDAG.h"
#include "Mips16ISelLowering.h"
@@ -156,6 +157,8 @@ void MipsPassConfig::addIRPasses() {
TargetPassConfig::addIRPasses();
if (getMipsSubtarget().os16())
addPass(createMipsOs16(getMipsTargetMachine()));
+ if (getMipsSubtarget().inMips16HardFloat())
+ addPass(createMips16HardFloat(getMipsTargetMachine()));
}
// Install an instruction selector pass using
// the ISelDag to gen Mips code.
diff --git a/lib/Target/NVPTX/MCTargetDesc/NVPTXMCAsmInfo.cpp b/lib/Target/NVPTX/MCTargetDesc/NVPTXMCAsmInfo.cpp
index 459cd96cb0..fa33a75c61 100644
--- a/lib/Target/NVPTX/MCTargetDesc/NVPTXMCAsmInfo.cpp
+++ b/lib/Target/NVPTX/MCTargetDesc/NVPTXMCAsmInfo.cpp
@@ -27,7 +27,7 @@ Debug("debug-compile", cl::desc("Compile for debugging"), cl::Hidden,
void NVPTXMCAsmInfo::anchor() {}
-NVPTXMCAsmInfo::NVPTXMCAsmInfo(const Target &T, const StringRef &TT) {
+NVPTXMCAsmInfo::NVPTXMCAsmInfo(const StringRef &TT) {
Triple TheTriple(TT);
if (TheTriple.getArch() == Triple::nvptx64) {
PointerSize = CalleeSaveStackSlotSize = 8;
diff --git a/lib/Target/NVPTX/MCTargetDesc/NVPTXMCAsmInfo.h b/lib/Target/NVPTX/MCTargetDesc/NVPTXMCAsmInfo.h
index 82097daa4a..7d1633f60d 100644
--- a/lib/Target/NVPTX/MCTargetDesc/NVPTXMCAsmInfo.h
+++ b/lib/Target/NVPTX/MCTargetDesc/NVPTXMCAsmInfo.h
@@ -23,7 +23,7 @@ class StringRef;
class NVPTXMCAsmInfo : public MCAsmInfo {
virtual void anchor();
public:
- explicit NVPTXMCAsmInfo(const Target &T, const StringRef &TT);
+ explicit NVPTXMCAsmInfo(const StringRef &TT);
};
} // namespace llvm
diff --git a/lib/Target/PowerPC/MCTargetDesc/PPCMCTargetDesc.cpp b/lib/Target/PowerPC/MCTargetDesc/PPCMCTargetDesc.cpp
index 2209f936ec..a01fa44a9a 100644
--- a/lib/Target/PowerPC/MCTargetDesc/PPCMCTargetDesc.cpp
+++ b/lib/Target/PowerPC/MCTargetDesc/PPCMCTargetDesc.cpp
@@ -58,7 +58,7 @@ static MCSubtargetInfo *createPPCMCSubtargetInfo(StringRef TT, StringRef CPU,
return X;
}
-static MCAsmInfo *createPPCMCAsmInfo(const Target &T, StringRef TT) {
+static MCAsmInfo *createPPCMCAsmInfo(StringRef TT) {
Triple TheTriple(TT);
bool isPPC64 = TheTriple.getArch() == Triple::ppc64;
diff --git a/lib/Target/PowerPC/PPCFrameLowering.cpp b/lib/Target/PowerPC/PPCFrameLowering.cpp
index 9ec10f62f5..cd70aeed87 100644
--- a/lib/Target/PowerPC/PPCFrameLowering.cpp
+++ b/lib/Target/PowerPC/PPCFrameLowering.cpp
@@ -515,8 +515,6 @@ void PPCFrameLowering::emitPrologue(MachineFunction &MF) const {
}
}
- std::vector<MachineMove> &Moves = MMI.getFrameMoves();
-
// Add the "machine moves" for the instructions we generated above, but in
// reverse order.
if (needsFrameMoves) {
@@ -528,22 +526,22 @@ void PPCFrameLowering::emitPrologue(MachineFunction &MF) const {
if (NegFrameSize) {
MachineLocation SPDst(MachineLocation::VirtualFP);
MachineLocation SPSrc(MachineLocation::VirtualFP, NegFrameSize);
- Moves.push_back(MachineMove(FrameLabel, SPDst, SPSrc));
+ MMI.addFrameMove(FrameLabel, SPDst, SPSrc);
} else {
MachineLocation SP(isPPC64 ? PPC::X31 : PPC::R31);
- Moves.push_back(MachineMove(FrameLabel, SP, SP));
+ MMI.addFrameMove(FrameLabel, SP, SP);
}
if (HasFP) {
MachineLocation FPDst(MachineLocation::VirtualFP, FPOffset);
MachineLocation FPSrc(isPPC64 ? PPC::X31 : PPC::R31);
- Moves.push_back(MachineMove(FrameLabel, FPDst, FPSrc));
+ MMI.addFrameMove(FrameLabel, FPDst, FPSrc);
}
if (MustSaveLR) {
MachineLocation LRDst(MachineLocation::VirtualFP, LROffset);
MachineLocation LRSrc(isPPC64 ? PPC::LR8 : PPC::LR);
- Moves.push_back(MachineMove(FrameLabel, LRDst, LRSrc));
+ MMI.addFrameMove(FrameLabel, LRDst, LRSrc);
}
}
@@ -570,7 +568,7 @@ void PPCFrameLowering::emitPrologue(MachineFunction &MF) const {
MachineLocation FPDst(HasFP ? (isPPC64 ? PPC::X31 : PPC::R31) :
(isPPC64 ? PPC::X1 : PPC::R1));
MachineLocation FPSrc(MachineLocation::VirtualFP);
- Moves.push_back(MachineMove(ReadyLabel, FPDst, FPSrc));
+ MMI.addFrameMove(ReadyLabel, FPDst, FPSrc);
}
}
@@ -602,14 +600,14 @@ void PPCFrameLowering::emitPrologue(MachineFunction &MF) const {
&& (PPC::CR2 <= Reg && Reg <= PPC::CR4)) {
MachineLocation CSDst(PPC::X1, 8);
MachineLocation CSSrc(PPC::CR2);
- Moves.push_back(MachineMove(Label, CSDst, CSSrc));
+ MMI.addFrameMove(Label, CSDst, CSSrc);
continue;
}
int Offset = MFI->getObjectOffset(CSI[I].getFrameIdx());
MachineLocation CSDst(MachineLocation::VirtualFP, Offset);
MachineLocation CSSrc(Reg);
- Moves.push_back(MachineMove(Label, CSDst, CSSrc));
+ MMI.addFrameMove(Label, CSDst, CSSrc);
}
}
}
diff --git a/lib/Target/R600/MCTargetDesc/AMDGPUMCAsmInfo.cpp b/lib/Target/R600/MCTargetDesc/AMDGPUMCAsmInfo.cpp
index 2aae26aa12..f1c44df975 100644
--- a/lib/Target/R600/MCTargetDesc/AMDGPUMCAsmInfo.cpp
+++ b/lib/Target/R600/MCTargetDesc/AMDGPUMCAsmInfo.cpp
@@ -11,7 +11,7 @@
#include "AMDGPUMCAsmInfo.h"
using namespace llvm;
-AMDGPUMCAsmInfo::AMDGPUMCAsmInfo(const Target &T, StringRef &TT) : MCAsmInfo() {
+AMDGPUMCAsmInfo::AMDGPUMCAsmInfo(StringRef &TT) : MCAsmInfo() {
HasSingleParameterDotFile = false;
WeakDefDirective = 0;
//===------------------------------------------------------------------===//
diff --git a/lib/Target/R600/MCTargetDesc/AMDGPUMCAsmInfo.h b/lib/Target/R600/MCTargetDesc/AMDGPUMCAsmInfo.h
index 3ad0fa6824..485167b3da 100644
--- a/lib/Target/R600/MCTargetDesc/AMDGPUMCAsmInfo.h
+++ b/lib/Target/R600/MCTargetDesc/AMDGPUMCAsmInfo.h
@@ -17,12 +17,11 @@
#include "llvm/MC/MCAsmInfo.h"
namespace llvm {
-class Target;
class StringRef;
class AMDGPUMCAsmInfo : public MCAsmInfo {
public:
- explicit AMDGPUMCAsmInfo(const Target &T, StringRef &TT);
+ explicit AMDGPUMCAsmInfo(StringRef &TT);
const char* getDataASDirective(unsigned int Size, unsigned int AS) const;
const MCSection* getNonexecutableStackSection(MCContext &CTX) const;
};
diff --git a/lib/Target/Sparc/MCTargetDesc/SparcMCAsmInfo.cpp b/lib/Target/Sparc/MCTargetDesc/SparcMCAsmInfo.cpp
index 3d4bfdcd5e..5a52abee03 100644
--- a/lib/Target/Sparc/MCTargetDesc/SparcMCAsmInfo.cpp
+++ b/lib/Target/Sparc/MCTargetDesc/SparcMCAsmInfo.cpp
@@ -18,7 +18,7 @@ using namespace llvm;
void SparcELFMCAsmInfo::anchor() { }
-SparcELFMCAsmInfo::SparcELFMCAsmInfo(const Target &T, StringRef TT) {
+SparcELFMCAsmInfo::SparcELFMCAsmInfo(StringRef TT) {
IsLittleEndian = false;
Triple TheTriple(TT);
if (TheTriple.getArch() == Triple::sparcv9) {
diff --git a/lib/Target/Sparc/MCTargetDesc/SparcMCAsmInfo.h b/lib/Target/Sparc/MCTargetDesc/SparcMCAsmInfo.h
index f0e1354c21..621e8ffadf 100644
--- a/lib/Target/Sparc/MCTargetDesc/SparcMCAsmInfo.h
+++ b/lib/Target/Sparc/MCTargetDesc/SparcMCAsmInfo.h
@@ -18,12 +18,11 @@
namespace llvm {
class StringRef;
- class Target;
class SparcELFMCAsmInfo : public MCAsmInfo {
virtual void anchor();
public:
- explicit SparcELFMCAsmInfo(const Target &T, StringRef TT);
+ explicit SparcELFMCAsmInfo(StringRef TT);
};
} // namespace llvm
diff --git a/lib/Target/SystemZ/MCTargetDesc/SystemZMCAsmInfo.cpp b/lib/Target/SystemZ/MCTargetDesc/SystemZMCAsmInfo.cpp
index c96a0d4c67..9e27aa004a 100644
--- a/lib/Target/SystemZ/MCTargetDesc/SystemZMCAsmInfo.cpp
+++ b/lib/Target/SystemZ/MCTargetDesc/SystemZMCAsmInfo.cpp
@@ -13,7 +13,7 @@
using namespace llvm;
-SystemZMCAsmInfo::SystemZMCAsmInfo(const Target &T, StringRef TT) {
+SystemZMCAsmInfo::SystemZMCAsmInfo(StringRef TT) {
PointerSize = 8;
CalleeSaveStackSlotSize = 8;
IsLittleEndian = false;
diff --git a/lib/Target/SystemZ/MCTargetDesc/SystemZMCAsmInfo.h b/lib/Target/SystemZ/MCTargetDesc/SystemZMCAsmInfo.h
index bac1bca381..d440787de5 100644
--- a/lib/Target/SystemZ/MCTargetDesc/SystemZMCAsmInfo.h
+++ b/lib/Target/SystemZ/MCTargetDesc/SystemZMCAsmInfo.h
@@ -14,12 +14,11 @@
#include "llvm/Support/Compiler.h"
namespace llvm {
-class Target;
class StringRef;
class SystemZMCAsmInfo : public MCAsmInfo {
public:
- explicit SystemZMCAsmInfo(const Target &T, StringRef TT);
+ explicit SystemZMCAsmInfo(StringRef TT);
// Override MCAsmInfo;
virtual const MCSection *getNonexecutableStackSection(MCContext &Ctx) const
diff --git a/lib/Target/SystemZ/MCTargetDesc/SystemZMCTargetDesc.cpp b/lib/Target/SystemZ/MCTargetDesc/SystemZMCTargetDesc.cpp
index 49a7f47e7d..6844f92ec9 100644
--- a/lib/Target/SystemZ/MCTargetDesc/SystemZMCTargetDesc.cpp
+++ b/lib/Target/SystemZ/MCTargetDesc/SystemZMCTargetDesc.cpp
@@ -27,8 +27,8 @@
using namespace llvm;
-static MCAsmInfo *createSystemZMCAsmInfo(const Target &T, StringRef TT) {
- MCAsmInfo *MAI = new SystemZMCAsmInfo(T, TT);
+static MCAsmInfo *createSystemZMCAsmInfo(StringRef TT) {
+ MCAsmInfo *MAI = new SystemZMCAsmInfo(TT);
MachineLocation FPDst(MachineLocation::VirtualFP);
MachineLocation FPSrc(SystemZ::R15D, -SystemZMC::CFAOffsetFromInitialSP);
MAI->addInitialFrameState(0, FPDst, FPSrc);
diff --git a/lib/Target/SystemZ/SystemZFrameLowering.cpp b/lib/Target/SystemZ/SystemZFrameLowering.cpp
index fda33dee48..ab1d45f128 100644
--- a/lib/Target/SystemZ/SystemZFrameLowering.cpp
+++ b/lib/Target/SystemZ/SystemZFrameLowering.cpp
@@ -297,7 +297,6 @@ void SystemZFrameLowering::emitPrologue(MachineFunction &MF) const {
SystemZMachineFunctionInfo *ZFI = MF.getInfo<SystemZMachineFunctionInfo>();
MachineBasicBlock::iterator MBBI = MBB.begin();
MachineModuleInfo &MMI = MF.getMMI();
- std::vector<MachineMove> &Moves = MMI.getFrameMoves();
const std::vector<CalleeSavedInfo> &CSI = MFFrame->getCalleeSavedInfo();
bool HasFP = hasFP(MF);
DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc();
@@ -323,7 +322,7 @@ void SystemZFrameLowering::emitPrologue(MachineFunction &MF) const {
int64_t Offset = SPOffsetFromCFA + RegSpillOffsets[Reg];
MachineLocation StackSlot(MachineLocation::VirtualFP, Offset);
MachineLocation RegValue(Reg);
- Moves.push_back(MachineMove(GPRSaveLabel, StackSlot, RegValue));
+ MMI.addFrameMove(GPRSaveLabel, StackSlot, RegValue);
}
}
}
@@ -340,7 +339,7 @@ void SystemZFrameLowering::emitPrologue(MachineFunction &MF) const {
.addSym(AdjustSPLabel);
MachineLocation FPDest(MachineLocation::VirtualFP);
MachineLocation FPSrc(MachineLocation::VirtualFP, SPOffsetFromCFA + Delta);
- Moves.push_back(MachineMove(AdjustSPLabel, FPDest, FPSrc));
+ MMI.addFrameMove(AdjustSPLabel, FPDest, FPSrc);
SPOffsetFromCFA += Delta;
}
@@ -355,7 +354,7 @@ void SystemZFrameLowering::emitPrologue(MachineFunction &MF) const {
.addSym(SetFPLabel);
MachineLocation HardFP(SystemZ::R11D);
MachineLocation VirtualFP(MachineLocation::VirtualFP);
- Moves.push_back(MachineMove(SetFPLabel, HardFP, VirtualFP));
+ MMI.addFrameMove(SetFPLabel, HardFP, VirtualFP);
// Mark the FramePtr as live at the beginning of every block except
// the entry block. (We'll have marked R11 as live on entry when
@@ -386,7 +385,7 @@ void SystemZFrameLowering::emitPrologue(MachineFunction &MF) const {
MachineLocation Slot(MachineLocation::VirtualFP,
SPOffsetFromCFA + Offset);
MachineLocation RegValue(Reg);
- Moves.push_back(MachineMove(FPRSaveLabel, Slot, RegValue));
+ MMI.addFrameMove(FPRSaveLabel, Slot, RegValue);
}
}
// Complete the CFI for the FPR saves, modelling them as taking effect
diff --git a/lib/Target/X86/AsmParser/X86AsmParser.cpp b/lib/Target/X86/AsmParser/X86AsmParser.cpp
index 520c4c0048..019a670083 100644
--- a/lib/Target/X86/AsmParser/X86AsmParser.cpp
+++ b/lib/Target/X86/AsmParser/X86AsmParser.cpp
@@ -2308,25 +2308,25 @@ MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
unsigned Match1, Match2, Match3, Match4;
Match1 = MatchInstructionImpl(Operands, Inst, ErrorInfoIgnore,
- isParsingIntelSyntax());
+ MatchingInlineAsm, isParsingIntelSyntax());
// If this returned as a missing feature failure, remember that.
if (Match1 == Match_MissingFeature)
ErrorInfoMissingFeature = ErrorInfoIgnore;
Tmp[Base.size()] = Suffixes[1];
Match2 = MatchInstructionImpl(Operands, Inst, ErrorInfoIgnore,
- isParsingIntelSyntax());
+ MatchingInlineAsm, isParsingIntelSyntax());
// If this returned as a missing feature failure, remember that.
if (Match2 == Match_MissingFeature)
ErrorInfoMissingFeature = ErrorInfoIgnore;
Tmp[Base.size()] = Suffixes[2];
Match3 = MatchInstructionImpl(Operands, Inst, ErrorInfoIgnore,
- isParsingIntelSyntax());
+ MatchingInlineAsm, isParsingIntelSyntax());
// If this returned as a missing feature failure, remember that.
if (Match3 == Match_MissingFeature)
ErrorInfoMissingFeature = ErrorInfoIgnore;
Tmp[Base.size()] = Suffixes[3];
Match4 = MatchInstructionImpl(Operands, Inst, ErrorInfoIgnore,
- isParsingIntelSyntax());
+ MatchingInlineAsm, isParsingIntelSyntax());
// If this returned as a missing feature failure, remember that.
if (Match4 == Match_MissingFeature)
ErrorInfoMissingFeature = ErrorInfoIgnore;
diff --git a/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.cpp b/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.cpp
index 5e84530cd7..226ebca8cb 100644
--- a/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.cpp
+++ b/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.cpp
@@ -263,7 +263,7 @@ static MCRegisterInfo *createX86MCRegisterInfo(StringRef TT) {
return X;
}
-static MCAsmInfo *createX86MCAsmInfo(const Target &T, StringRef TT) {
+static MCAsmInfo *createX86MCAsmInfo(StringRef TT) {
Triple TheTriple(TT);
bool is64Bit = TheTriple.getArch() == Triple::x86_64;
diff --git a/lib/Target/X86/X86FrameLowering.cpp b/lib/Target/X86/X86FrameLowering.cpp
index 42b4e73509..b9254d2ce3 100644
--- a/lib/Target/X86/X86FrameLowering.cpp
+++ b/lib/Target/X86/X86FrameLowering.cpp
@@ -312,7 +312,6 @@ void X86FrameLowering::emitCalleeSavedFrameMoves(MachineFunction &MF,
const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo();
if (CSI.empty()) return;
- std::vector<MachineMove> &Moves = MMI.getFrameMoves();
const X86RegisterInfo *RegInfo = TM.getRegisterInfo();
bool HasFP = hasFP(MF);
@@ -362,7 +361,7 @@ void X86FrameLowering::emitCalleeSavedFrameMoves(MachineFunction &MF,
MachineLocation CSDst(MachineLocation::VirtualFP, Offset);
MachineLocation CSSrc(Reg);
- Moves.push_back(MachineMove(Label, CSDst, CSSrc));
+ MMI.addFrameMove(Label, CSDst, CSSrc);
}
}
@@ -732,7 +731,6 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF) const {
// REG < 64 => DW_CFA_offset + Reg
// ELSE => DW_CFA_offset_extended
- std::vector<MachineMove> &Moves = MMI.getFrameMoves();
uint64_t NumBytes = 0;
int stackGrowth = -SlotSize;
@@ -768,17 +766,17 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF) const {
if (StackSize) {
MachineLocation SPDst(MachineLocation::VirtualFP);
MachineLocation SPSrc(MachineLocation::VirtualFP, 2 * stackGrowth);
- Moves.push_back(MachineMove(FrameLabel, SPDst, SPSrc));
+ MMI.addFrameMove(FrameLabel, SPDst, SPSrc);
} else {
MachineLocation SPDst(StackPtr);
MachineLocation SPSrc(StackPtr, stackGrowth);
- Moves.push_back(MachineMove(FrameLabel, SPDst, SPSrc));
+ MMI.addFrameMove(FrameLabel, SPDst, SPSrc);
}
// Change the rule for the FramePtr to be an "offset" rule.
MachineLocation FPDst(MachineLocation::VirtualFP, 2 * stackGrowth);
MachineLocation FPSrc(FramePtr);
- Moves.push_back(MachineMove(FrameLabel, FPDst, FPSrc));
+ MMI.addFrameMove(FrameLabel, FPDst, FPSrc);
}
// Update EBP with the new base value.
@@ -796,7 +794,7 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF) const {
// Define the current CFA to use the EBP/RBP register.
MachineLocation FPDst(FramePtr);
MachineLocation FPSrc(MachineLocation::VirtualFP);
- Moves.push_back(MachineMove(FrameLabel, FPDst, FPSrc));
+ MMI.addFrameMove(FrameLabel, FPDst, FPSrc);
}
// Mark the FramePtr as live-in in every block except the entry.
@@ -827,7 +825,7 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF) const {
unsigned Ptr = StackSize ? MachineLocation::VirtualFP : StackPtr;
MachineLocation SPDst(Ptr);
MachineLocation SPSrc(Ptr, StackOffset);
- Moves.push_back(MachineMove(Label, SPDst, SPSrc));
+ MMI.addFrameMove(Label, SPDst, SPSrc);
StackOffset += stackGrowth;
}
}
@@ -965,11 +963,11 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF) const {
MachineLocation SPDst(MachineLocation::VirtualFP);
MachineLocation SPSrc(MachineLocation::VirtualFP,
-StackSize + stackGrowth);
- Moves.push_back(MachineMove(Label, SPDst, SPSrc));
+ MMI.addFrameMove(Label, SPDst, SPSrc);
} else {
MachineLocation SPDst(StackPtr);
MachineLocation SPSrc(StackPtr, stackGrowth);
- Moves.push_back(MachineMove(Label, SPDst, SPSrc));
+ MMI.addFrameMove(Label, SPDst, SPSrc);
}
}
diff --git a/lib/Target/X86/X86InstrInfo.td b/lib/Target/X86/X86InstrInfo.td
index 3380d8c64e..ad26bce01b 100644
--- a/lib/Target/X86/X86InstrInfo.td
+++ b/lib/Target/X86/X86InstrInfo.td
@@ -884,12 +884,12 @@ def PUSHF64 : I<0x9C, RawFrm, (outs), (ins), "pushfq", [], IIC_PUSH_F>,
let Defs = [EDI, ESI, EBP, EBX, EDX, ECX, EAX, ESP], Uses = [ESP],
mayLoad = 1, neverHasSideEffects = 1, SchedRW = [WriteLoad] in {
-def POPA32 : I<0x61, RawFrm, (outs), (ins), "popa{l|d}", [], IIC_POP_A>,
+def POPA32 : I<0x61, RawFrm, (outs), (ins), "popa{l}", [], IIC_POP_A>,
Requires<[In32BitMode]>;
}
let Defs = [ESP], Uses = [EDI, ESI, EBP, EBX, EDX, ECX, EAX, ESP],
mayStore = 1, neverHasSideEffects = 1, SchedRW = [WriteStore] in {
-def PUSHA32 : I<0x60, RawFrm, (outs), (ins), "pusha{l|d}", [], IIC_PUSH_A>,
+def PUSHA32 : I<0x60, RawFrm, (outs), (ins), "pusha{l}", [], IIC_PUSH_A>,
Requires<[In32BitMode]>;
}
@@ -1867,6 +1867,9 @@ def : MnemonicAlias<"pushf", "pushfl", "att">, Requires<[In32BitMode]>;
def : MnemonicAlias<"pushf", "pushfq", "att">, Requires<[In64BitMode]>;
def : MnemonicAlias<"pushfd", "pushfl", "att">;
+def : MnemonicAlias<"popad", "popa", "intel">, Requires<[In32BitMode]>;
+def : MnemonicAlias<"pushad", "pusha", "intel">, Requires<[In32BitMode]>;
+
def : MnemonicAlias<"repe", "rep", "att">;
def : MnemonicAlias<"repz", "rep", "att">;
def : MnemonicAlias<"repnz", "repne", "att">;
diff --git a/lib/Target/XCore/MCTargetDesc/XCoreMCAsmInfo.cpp b/lib/Target/XCore/MCTargetDesc/XCoreMCAsmInfo.cpp
index 1cfdbda003..6f4455117a 100644
--- a/lib/Target/XCore/MCTargetDesc/XCoreMCAsmInfo.cpp
+++ b/lib/Target/XCore/MCTargetDesc/XCoreMCAsmInfo.cpp
@@ -13,7 +13,7 @@ using namespace llvm;
void XCoreMCAsmInfo::anchor() { }
-XCoreMCAsmInfo::XCoreMCAsmInfo(const Target &T, StringRef TT) {
+XCoreMCAsmInfo::XCoreMCAsmInfo(StringRef TT) {
SupportsDebugInformation = true;
Data16bitsDirective = "\t.short\t";
Data32bitsDirective = "\t.long\t";
diff --git a/lib/Target/XCore/MCTargetDesc/XCoreMCAsmInfo.h b/lib/Target/XCore/MCTargetDesc/XCoreMCAsmInfo.h
index 076777541e..b5a9660d5f 100644
--- a/lib/Target/XCore/MCTargetDesc/XCoreMCAsmInfo.h
+++ b/lib/Target/XCore/MCTargetDesc/XCoreMCAsmInfo.h
@@ -23,7 +23,7 @@ namespace llvm {
class XCoreMCAsmInfo : public MCAsmInfo {
virtual void anchor();
public:
- explicit XCoreMCAsmInfo(const Target &T, StringRef TT);
+ explicit XCoreMCAsmInfo(StringRef TT);
};
} // namespace llvm
diff --git a/lib/Target/XCore/MCTargetDesc/XCoreMCTargetDesc.cpp b/lib/Target/XCore/MCTargetDesc/XCoreMCTargetDesc.cpp
index c1773653f5..e38da34a81 100644
--- a/lib/Target/XCore/MCTargetDesc/XCoreMCTargetDesc.cpp
+++ b/lib/Target/XCore/MCTargetDesc/XCoreMCTargetDesc.cpp
@@ -51,8 +51,8 @@ static MCSubtargetInfo *createXCoreMCSubtargetInfo(StringRef TT, StringRef CPU,
return X;
}
-static MCAsmInfo *createXCoreMCAsmInfo(const Target &T, StringRef TT) {
- MCAsmInfo *MAI = new XCoreMCAsmInfo(T, TT);
+static MCAsmInfo *createXCoreMCAsmInfo(StringRef TT) {
+ MCAsmInfo *MAI = new XCoreMCAsmInfo(TT);
// Initial state of the frame pointer is SP.
MachineLocation Dst(MachineLocation::VirtualFP);
diff --git a/lib/Target/XCore/XCoreFrameLowering.cpp b/lib/Target/XCore/XCoreFrameLowering.cpp
index 8cd10b4e74..9798411381 100644
--- a/lib/Target/XCore/XCoreFrameLowering.cpp
+++ b/lib/Target/XCore/XCoreFrameLowering.cpp
@@ -132,7 +132,6 @@ void XCoreFrameLowering::emitPrologue(MachineFunction &MF) const {
BuildMI(MBB, MBBI, dl, TII.get(Opcode)).addImm(FrameSize);
if (emitFrameMoves) {
- std::vector<MachineMove> &Moves = MMI->getFrameMoves();
// Show update of SP.
MCSymbol *FrameLabel = MMI->getContext().CreateTempSymbol();
@@ -140,12 +139,12 @@ void XCoreFrameLowering::emitPrologue(MachineFunction &MF) const {
MachineLocation SPDst(MachineLocation::VirtualFP);
MachineLocation SPSrc(MachineLocation::VirtualFP, -FrameSize * 4);
- Moves.push_back(MachineMove(FrameLabel, SPDst, SPSrc));
+ MMI->addFrameMove(FrameLabel, SPDst, SPSrc);
if (LRSavedOnEntry) {
MachineLocation CSDst(MachineLocation::VirtualFP, 0);
MachineLocation CSSrc(XCore::LR);
- Moves.push_back(MachineMove(FrameLabel, CSDst, CSSrc));
+ MMI->addFrameMove(FrameLabel, CSDst, CSSrc);
}
}
}
@@ -159,7 +158,7 @@ void XCoreFrameLowering::emitPrologue(MachineFunction &MF) const {
BuildMI(MBB, MBBI, dl, TII.get(XCore::PROLOG_LABEL)).addSym(SaveLRLabel);
MachineLocation CSDst(MachineLocation::VirtualFP, LRSpillOffset);
MachineLocation CSSrc(XCore::LR);
- MMI->getFrameMoves().push_back(MachineMove(SaveLRLabel, CSDst, CSSrc));
+ MMI->addFrameMove(SaveLRLabel, CSDst, CSSrc);
}
}
@@ -174,7 +173,7 @@ void XCoreFrameLowering::emitPrologue(MachineFunction &MF) const {
BuildMI(MBB, MBBI, dl, TII.get(XCore::PROLOG_LABEL)).addSym(SaveR10Label);
MachineLocation CSDst(MachineLocation::VirtualFP, FPSpillOffset);
MachineLocation CSSrc(XCore::R10);
- MMI->getFrameMoves().push_back(MachineMove(SaveR10Label, CSDst, CSSrc));
+ MMI->addFrameMove(SaveR10Label, CSDst, CSSrc);
}
// Set the FP from the SP.
unsigned FramePtr = XCore::R10;
@@ -186,13 +185,12 @@ void XCoreFrameLowering::emitPrologue(MachineFunction &MF) const {
BuildMI(MBB, MBBI, dl, TII.get(XCore::PROLOG_LABEL)).addSym(FrameLabel);
MachineLocation SPDst(FramePtr);
MachineLocation SPSrc(MachineLocation::VirtualFP);
- MMI->getFrameMoves().push_back(MachineMove(FrameLabel, SPDst, SPSrc));
+ MMI->addFrameMove(FrameLabel, SPDst, SPSrc);
}
}
if (emitFrameMoves) {
// Frame moves for callee saved.
- std::vector<MachineMove> &Moves = MMI->getFrameMoves();
std::vector<std::pair<MCSymbol*, CalleeSavedInfo> >&SpillLabels =
XFI->getSpillLabels();
for (unsigned I = 0, E = SpillLabels.size(); I != E; ++I) {
@@ -202,7 +200,7 @@ void XCoreFrameLowering::emitPrologue(MachineFunction &MF) const {
unsigned Reg = CSI.getReg();
MachineLocation CSDst(MachineLocation::VirtualFP, Offset);
MachineLocation CSSrc(Reg);
- Moves.push_back(MachineMove(SpillLabel, CSDst, CSSrc));
+ MMI->addFrameMove(SpillLabel, CSDst, CSSrc);
}
}
}