summaryrefslogtreecommitdiff
path: root/lib/Target/AArch64/MCTargetDesc/AArch64MCCodeEmitter.cpp
diff options
context:
space:
mode:
authorTim Northover <Tim.Northover@arm.com>2013-01-31 12:12:40 +0000
committerTim Northover <Tim.Northover@arm.com>2013-01-31 12:12:40 +0000
commit72062f5744557e270a38192554c3126ea5f97434 (patch)
treeae6e4c8abb4e0572745e7849b4948c58fea3e8d0 /lib/Target/AArch64/MCTargetDesc/AArch64MCCodeEmitter.cpp
parentd72b4d321e317327330e1e82d0f652d4e237c171 (diff)
downloadllvm-72062f5744557e270a38192554c3126ea5f97434.tar.gz
llvm-72062f5744557e270a38192554c3126ea5f97434.tar.bz2
llvm-72062f5744557e270a38192554c3126ea5f97434.tar.xz
Add AArch64 as an experimental target.
This patch adds support for AArch64 (ARM's 64-bit architecture) to LLVM in the "experimental" category. Currently, it won't be built unless requested explicitly. This initial commit should have support for: + Assembly of all scalar (i.e. non-NEON, non-Crypto) instructions (except the late addition CRC instructions). + CodeGen features required for C++03 and C99. + Compilation for the "small" memory model: code+static data < 4GB. + Absolute and position-independent code. + GNU-style (i.e. "__thread") TLS. + Debugging information. The principal omission, currently, is performance tuning. This patch excludes the NEON support also reviewed due to an outbreak of batshit insanity in our legal department. That will be committed soon bringing the changes to precisely what has been approved. Further reviews would be gratefully received. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@174054 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/Target/AArch64/MCTargetDesc/AArch64MCCodeEmitter.cpp')
-rw-r--r--lib/Target/AArch64/MCTargetDesc/AArch64MCCodeEmitter.cpp517
1 files changed, 517 insertions, 0 deletions
diff --git a/lib/Target/AArch64/MCTargetDesc/AArch64MCCodeEmitter.cpp b/lib/Target/AArch64/MCTargetDesc/AArch64MCCodeEmitter.cpp
new file mode 100644
index 0000000000..f2bbd85209
--- /dev/null
+++ b/lib/Target/AArch64/MCTargetDesc/AArch64MCCodeEmitter.cpp
@@ -0,0 +1,517 @@
+//=- AArch64/AArch64MCCodeEmitter.cpp - Convert AArch64 code to machine code =//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the AArch64MCCodeEmitter class.
+//
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "mccodeemitter"
+#include "MCTargetDesc/AArch64BaseInfo.h"
+#include "MCTargetDesc/AArch64FixupKinds.h"
+#include "MCTargetDesc/AArch64MCExpr.h"
+#include "llvm/MC/MCCodeEmitter.h"
+#include "llvm/MC/MCContext.h"
+#include "llvm/MC/MCInst.h"
+#include "llvm/MC/MCInstrInfo.h"
+#include "llvm/MC/MCRegisterInfo.h"
+#include "llvm/MC/MCSubtargetInfo.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace llvm;
+
+namespace {
+class AArch64MCCodeEmitter : public MCCodeEmitter {
+ AArch64MCCodeEmitter(const AArch64MCCodeEmitter &); // DO NOT IMPLEMENT
+ void operator=(const AArch64MCCodeEmitter &); // DO NOT IMPLEMENT
+ const MCInstrInfo &MCII;
+ const MCSubtargetInfo &STI;
+ MCContext &Ctx;
+
+public:
+ AArch64MCCodeEmitter(const MCInstrInfo &mcii, const MCSubtargetInfo &sti,
+ MCContext &ctx)
+ : MCII(mcii), STI(sti), Ctx(ctx) {
+ }
+
+ ~AArch64MCCodeEmitter() {}
+
+ unsigned getAddSubImmOpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups) const;
+
+ unsigned getAdrpLabelOpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups) const;
+
+ template<int MemSize>
+ unsigned getOffsetUImm12OpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups) const {
+ return getOffsetUImm12OpValue(MI, OpIdx, Fixups, MemSize);
+ }
+
+ unsigned getOffsetUImm12OpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups,
+ int MemSize) const;
+
+ unsigned getBitfield32LSLOpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups) const;
+ unsigned getBitfield64LSLOpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups) const;
+
+
+ // Labels are handled mostly the same way: a symbol is needed, and
+ // just gets some fixup attached.
+ template<AArch64::Fixups fixupDesired>
+ unsigned getLabelOpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups) const;
+
+ unsigned getLoadLitLabelOpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups) const;
+
+
+ unsigned getMoveWideImmOpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups) const;
+
+
+ unsigned getAddressWithFixup(const MCOperand &MO,
+ unsigned FixupKind,
+ SmallVectorImpl<MCFixup> &Fixups) const;
+
+
+ // getBinaryCodeForInstr - TableGen'erated function for getting the
+ // binary encoding for an instruction.
+ uint64_t getBinaryCodeForInstr(const MCInst &MI,
+ SmallVectorImpl<MCFixup> &Fixups) const;
+
+ /// getMachineOpValue - Return binary encoding of operand. If the machine
+ /// operand requires relocation, record the relocation and return zero.
+ unsigned getMachineOpValue(const MCInst &MI,const MCOperand &MO,
+ SmallVectorImpl<MCFixup> &Fixups) const;
+
+
+ void EmitByte(unsigned char C, raw_ostream &OS) const {
+ OS << (char)C;
+ }
+
+ void EmitInstruction(uint32_t Val, raw_ostream &OS) const {
+ // Output the constant in little endian byte order.
+ for (unsigned i = 0; i != 4; ++i) {
+ EmitByte(Val & 0xff, OS);
+ Val >>= 8;
+ }
+ }
+
+
+ void EncodeInstruction(const MCInst &MI, raw_ostream &OS,
+ SmallVectorImpl<MCFixup> &Fixups) const;
+
+ unsigned fixFCMPImm(const MCInst &MI, unsigned EncodedValue) const;
+
+ template<int hasRs, int hasRt2> unsigned
+ fixLoadStoreExclusive(const MCInst &MI, unsigned EncodedValue) const;
+
+ unsigned fixMOVZ(const MCInst &MI, unsigned EncodedValue) const;
+
+ unsigned fixMulHigh(const MCInst &MI, unsigned EncodedValue) const;
+
+
+};
+
+} // end anonymous namespace
+
+unsigned AArch64MCCodeEmitter::getAddressWithFixup(const MCOperand &MO,
+ unsigned FixupKind,
+ SmallVectorImpl<MCFixup> &Fixups) const {
+ if (!MO.isExpr()) {
+ // This can occur for manually decoded or constructed MCInsts, but neither
+ // the assembly-parser nor instruction selection will currently produce an
+ // MCInst that's not a symbol reference.
+ assert(MO.isImm() && "Unexpected address requested");
+ return MO.getImm();
+ }
+
+ const MCExpr *Expr = MO.getExpr();
+ MCFixupKind Kind = MCFixupKind(FixupKind);
+ Fixups.push_back(MCFixup::Create(0, Expr, Kind));
+
+ return 0;
+}
+
+unsigned AArch64MCCodeEmitter::
+getOffsetUImm12OpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups,
+ int MemSize) const {
+ const MCOperand &ImmOp = MI.getOperand(OpIdx);
+ if (ImmOp.isImm())
+ return ImmOp.getImm();
+
+ assert(ImmOp.isExpr() && "Unexpected operand type");
+ const AArch64MCExpr *Expr = cast<AArch64MCExpr>(ImmOp.getExpr());
+ unsigned FixupKind;
+
+
+ switch (Expr->getKind()) {
+ default: llvm_unreachable("Unexpected operand modifier");
+ case AArch64MCExpr::VK_AARCH64_LO12: {
+ unsigned FixupsBySize[] = { AArch64::fixup_a64_ldst8_lo12,
+ AArch64::fixup_a64_ldst16_lo12,
+ AArch64::fixup_a64_ldst32_lo12,
+ AArch64::fixup_a64_ldst64_lo12,
+ AArch64::fixup_a64_ldst128_lo12 };
+ assert(MemSize <= 16 && "Invalid fixup for operation");
+ FixupKind = FixupsBySize[Log2_32(MemSize)];
+ break;
+ }
+ case AArch64MCExpr::VK_AARCH64_GOT_LO12:
+ assert(MemSize == 8 && "Invalid fixup for operation");
+ FixupKind = AArch64::fixup_a64_ld64_got_lo12_nc;
+ break;
+ case AArch64MCExpr::VK_AARCH64_DTPREL_LO12: {
+ unsigned FixupsBySize[] = { AArch64::fixup_a64_ldst8_dtprel_lo12,
+ AArch64::fixup_a64_ldst16_dtprel_lo12,
+ AArch64::fixup_a64_ldst32_dtprel_lo12,
+ AArch64::fixup_a64_ldst64_dtprel_lo12 };
+ assert(MemSize <= 8 && "Invalid fixup for operation");
+ FixupKind = FixupsBySize[Log2_32(MemSize)];
+ break;
+ }
+ case AArch64MCExpr::VK_AARCH64_DTPREL_LO12_NC: {
+ unsigned FixupsBySize[] = { AArch64::fixup_a64_ldst8_dtprel_lo12_nc,
+ AArch64::fixup_a64_ldst16_dtprel_lo12_nc,
+ AArch64::fixup_a64_ldst32_dtprel_lo12_nc,
+ AArch64::fixup_a64_ldst64_dtprel_lo12_nc };
+ assert(MemSize <= 8 && "Invalid fixup for operation");
+ FixupKind = FixupsBySize[Log2_32(MemSize)];
+ break;
+ }
+ case AArch64MCExpr::VK_AARCH64_GOTTPREL_LO12:
+ assert(MemSize == 8 && "Invalid fixup for operation");
+ FixupKind = AArch64::fixup_a64_ld64_gottprel_lo12_nc;
+ break;
+ case AArch64MCExpr::VK_AARCH64_TPREL_LO12:{
+ unsigned FixupsBySize[] = { AArch64::fixup_a64_ldst8_tprel_lo12,
+ AArch64::fixup_a64_ldst16_tprel_lo12,
+ AArch64::fixup_a64_ldst32_tprel_lo12,
+ AArch64::fixup_a64_ldst64_tprel_lo12 };
+ assert(MemSize <= 8 && "Invalid fixup for operation");
+ FixupKind = FixupsBySize[Log2_32(MemSize)];
+ break;
+ }
+ case AArch64MCExpr::VK_AARCH64_TPREL_LO12_NC: {
+ unsigned FixupsBySize[] = { AArch64::fixup_a64_ldst8_tprel_lo12_nc,
+ AArch64::fixup_a64_ldst16_tprel_lo12_nc,
+ AArch64::fixup_a64_ldst32_tprel_lo12_nc,
+ AArch64::fixup_a64_ldst64_tprel_lo12_nc };
+ assert(MemSize <= 8 && "Invalid fixup for operation");
+ FixupKind = FixupsBySize[Log2_32(MemSize)];
+ break;
+ }
+ case AArch64MCExpr::VK_AARCH64_TLSDESC_LO12:
+ assert(MemSize == 8 && "Invalid fixup for operation");
+ FixupKind = AArch64::fixup_a64_tlsdesc_ld64_lo12_nc;
+ break;
+ }
+
+ return getAddressWithFixup(ImmOp, FixupKind, Fixups);
+}
+
+unsigned
+AArch64MCCodeEmitter::getAddSubImmOpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups) const {
+ const MCOperand &MO = MI.getOperand(OpIdx);
+ if (MO.isImm())
+ return static_cast<unsigned>(MO.getImm());
+
+ assert(MO.isExpr());
+
+ unsigned FixupKind = 0;
+ switch(cast<AArch64MCExpr>(MO.getExpr())->getKind()) {
+ default: llvm_unreachable("Invalid expression modifier");
+ case AArch64MCExpr::VK_AARCH64_LO12:
+ FixupKind = AArch64::fixup_a64_add_lo12; break;
+ case AArch64MCExpr::VK_AARCH64_DTPREL_HI12:
+ FixupKind = AArch64::fixup_a64_add_dtprel_hi12; break;
+ case AArch64MCExpr::VK_AARCH64_DTPREL_LO12:
+ FixupKind = AArch64::fixup_a64_add_dtprel_lo12; break;
+ case AArch64MCExpr::VK_AARCH64_DTPREL_LO12_NC:
+ FixupKind = AArch64::fixup_a64_add_dtprel_lo12_nc; break;
+ case AArch64MCExpr::VK_AARCH64_TPREL_HI12:
+ FixupKind = AArch64::fixup_a64_add_tprel_hi12; break;
+ case AArch64MCExpr::VK_AARCH64_TPREL_LO12:
+ FixupKind = AArch64::fixup_a64_add_tprel_lo12; break;
+ case AArch64MCExpr::VK_AARCH64_TPREL_LO12_NC:
+ FixupKind = AArch64::fixup_a64_add_tprel_lo12_nc; break;
+ case AArch64MCExpr::VK_AARCH64_TLSDESC_LO12:
+ FixupKind = AArch64::fixup_a64_tlsdesc_add_lo12_nc; break;
+ }
+
+ return getAddressWithFixup(MO, FixupKind, Fixups);
+}
+
+unsigned
+AArch64MCCodeEmitter::getAdrpLabelOpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups) const {
+
+ const MCOperand &MO = MI.getOperand(OpIdx);
+ if (MO.isImm())
+ return static_cast<unsigned>(MO.getImm());
+
+ assert(MO.isExpr());
+
+ unsigned Modifier = AArch64MCExpr::VK_AARCH64_None;
+ if (const AArch64MCExpr *Expr = dyn_cast<AArch64MCExpr>(MO.getExpr()))
+ Modifier = Expr->getKind();
+
+ unsigned FixupKind = 0;
+ switch(Modifier) {
+ case AArch64MCExpr::VK_AARCH64_None:
+ FixupKind = AArch64::fixup_a64_adr_prel_page;
+ break;
+ case AArch64MCExpr::VK_AARCH64_GOT:
+ FixupKind = AArch64::fixup_a64_adr_prel_got_page;
+ break;
+ case AArch64MCExpr::VK_AARCH64_GOTTPREL:
+ FixupKind = AArch64::fixup_a64_adr_gottprel_page;
+ break;
+ case AArch64MCExpr::VK_AARCH64_TLSDESC:
+ FixupKind = AArch64::fixup_a64_tlsdesc_adr_page;
+ break;
+ default:
+ llvm_unreachable("Unknown symbol reference kind for ADRP instruction");
+ }
+
+ return getAddressWithFixup(MO, FixupKind, Fixups);
+}
+
+unsigned
+AArch64MCCodeEmitter::getBitfield32LSLOpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups) const {
+
+ const MCOperand &MO = MI.getOperand(OpIdx);
+ assert(MO.isImm() && "Only immediate expected for shift");
+
+ return ((32 - MO.getImm()) & 0x1f) | (31 - MO.getImm()) << 6;
+}
+
+unsigned
+AArch64MCCodeEmitter::getBitfield64LSLOpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups) const {
+
+ const MCOperand &MO = MI.getOperand(OpIdx);
+ assert(MO.isImm() && "Only immediate expected for shift");
+
+ return ((64 - MO.getImm()) & 0x3f) | (63 - MO.getImm()) << 6;
+}
+
+
+template<AArch64::Fixups fixupDesired> unsigned
+AArch64MCCodeEmitter::getLabelOpValue(const MCInst &MI,
+ unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups) const {
+ const MCOperand &MO = MI.getOperand(OpIdx);
+
+ if (MO.isExpr())
+ return getAddressWithFixup(MO, fixupDesired, Fixups);
+
+ assert(MO.isImm());
+ return MO.getImm();
+}
+
+unsigned
+AArch64MCCodeEmitter::getLoadLitLabelOpValue(const MCInst &MI,
+ unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups) const {
+ const MCOperand &MO = MI.getOperand(OpIdx);
+
+ if (MO.isImm())
+ return MO.getImm();
+
+ assert(MO.isExpr());
+
+ unsigned FixupKind;
+ if (isa<AArch64MCExpr>(MO.getExpr())) {
+ assert(dyn_cast<AArch64MCExpr>(MO.getExpr())->getKind()
+ == AArch64MCExpr::VK_AARCH64_GOTTPREL
+ && "Invalid symbol modifier for literal load");
+ FixupKind = AArch64::fixup_a64_ld_gottprel_prel19;
+ } else {
+ FixupKind = AArch64::fixup_a64_ld_prel;
+ }
+
+ return getAddressWithFixup(MO, FixupKind, Fixups);
+}
+
+
+unsigned
+AArch64MCCodeEmitter::getMachineOpValue(const MCInst &MI,
+ const MCOperand &MO,
+ SmallVectorImpl<MCFixup> &Fixups) const {
+ if (MO.isReg()) {
+ return Ctx.getRegisterInfo().getEncodingValue(MO.getReg());
+ } else if (MO.isImm()) {
+ return static_cast<unsigned>(MO.getImm());
+ }
+
+ llvm_unreachable("Unable to encode MCOperand!");
+ return 0;
+}
+
+unsigned
+AArch64MCCodeEmitter::getMoveWideImmOpValue(const MCInst &MI, unsigned OpIdx,
+ SmallVectorImpl<MCFixup> &Fixups) const {
+ const MCOperand &UImm16MO = MI.getOperand(OpIdx);
+ const MCOperand &ShiftMO = MI.getOperand(OpIdx + 1);
+
+ unsigned Result = static_cast<unsigned>(ShiftMO.getImm()) << 16;
+
+ if (UImm16MO.isImm()) {
+ Result |= UImm16MO.getImm();
+ return Result;
+ }
+
+ const AArch64MCExpr *A64E = cast<AArch64MCExpr>(UImm16MO.getExpr());
+ AArch64::Fixups requestedFixup;
+ switch (A64E->getKind()) {
+ default: llvm_unreachable("unexpected expression modifier");
+ case AArch64MCExpr::VK_AARCH64_ABS_G0:
+ requestedFixup = AArch64::fixup_a64_movw_uabs_g0; break;
+ case AArch64MCExpr::VK_AARCH64_ABS_G0_NC:
+ requestedFixup = AArch64::fixup_a64_movw_uabs_g0_nc; break;
+ case AArch64MCExpr::VK_AARCH64_ABS_G1:
+ requestedFixup = AArch64::fixup_a64_movw_uabs_g1; break;
+ case AArch64MCExpr::VK_AARCH64_ABS_G1_NC:
+ requestedFixup = AArch64::fixup_a64_movw_uabs_g1_nc; break;
+ case AArch64MCExpr::VK_AARCH64_ABS_G2:
+ requestedFixup = AArch64::fixup_a64_movw_uabs_g2; break;
+ case AArch64MCExpr::VK_AARCH64_ABS_G2_NC:
+ requestedFixup = AArch64::fixup_a64_movw_uabs_g2_nc; break;
+ case AArch64MCExpr::VK_AARCH64_ABS_G3:
+ requestedFixup = AArch64::fixup_a64_movw_uabs_g3; break;
+ case AArch64MCExpr::VK_AARCH64_SABS_G0:
+ requestedFixup = AArch64::fixup_a64_movw_sabs_g0; break;
+ case AArch64MCExpr::VK_AARCH64_SABS_G1:
+ requestedFixup = AArch64::fixup_a64_movw_sabs_g1; break;
+ case AArch64MCExpr::VK_AARCH64_SABS_G2:
+ requestedFixup = AArch64::fixup_a64_movw_sabs_g2; break;
+ case AArch64MCExpr::VK_AARCH64_DTPREL_G2:
+ requestedFixup = AArch64::fixup_a64_movw_dtprel_g2; break;
+ case AArch64MCExpr::VK_AARCH64_DTPREL_G1:
+ requestedFixup = AArch64::fixup_a64_movw_dtprel_g1; break;
+ case AArch64MCExpr::VK_AARCH64_DTPREL_G1_NC:
+ requestedFixup = AArch64::fixup_a64_movw_dtprel_g1_nc; break;
+ case AArch64MCExpr::VK_AARCH64_DTPREL_G0:
+ requestedFixup = AArch64::fixup_a64_movw_dtprel_g0; break;
+ case AArch64MCExpr::VK_AARCH64_DTPREL_G0_NC:
+ requestedFixup = AArch64::fixup_a64_movw_dtprel_g0_nc; break;
+ case AArch64MCExpr::VK_AARCH64_GOTTPREL_G1:
+ requestedFixup = AArch64::fixup_a64_movw_gottprel_g1; break;
+ case AArch64MCExpr::VK_AARCH64_GOTTPREL_G0_NC:
+ requestedFixup = AArch64::fixup_a64_movw_gottprel_g0_nc; break;
+ case AArch64MCExpr::VK_AARCH64_TPREL_G2:
+ requestedFixup = AArch64::fixup_a64_movw_tprel_g2; break;
+ case AArch64MCExpr::VK_AARCH64_TPREL_G1:
+ requestedFixup = AArch64::fixup_a64_movw_tprel_g1; break;
+ case AArch64MCExpr::VK_AARCH64_TPREL_G1_NC:
+ requestedFixup = AArch64::fixup_a64_movw_tprel_g1_nc; break;
+ case AArch64MCExpr::VK_AARCH64_TPREL_G0:
+ requestedFixup = AArch64::fixup_a64_movw_tprel_g0; break;
+ case AArch64MCExpr::VK_AARCH64_TPREL_G0_NC:
+ requestedFixup = AArch64::fixup_a64_movw_tprel_g0_nc; break;
+ }
+
+ return Result | getAddressWithFixup(UImm16MO, requestedFixup, Fixups);
+}
+
+unsigned AArch64MCCodeEmitter::fixFCMPImm(const MCInst &MI,
+ unsigned EncodedValue) const {
+ // For FCMP[E] Rn, #0.0, the Rm field has a canonical representation
+ // with 0s, but is architecturally ignored
+ EncodedValue &= ~0x1f0000u;
+
+ return EncodedValue;
+}
+
+template<int hasRs, int hasRt2> unsigned
+AArch64MCCodeEmitter::fixLoadStoreExclusive(const MCInst &MI,
+ unsigned EncodedValue) const {
+ if (!hasRs) EncodedValue |= 0x001F0000;
+ if (!hasRt2) EncodedValue |= 0x00007C00;
+
+ return EncodedValue;
+}
+
+unsigned
+AArch64MCCodeEmitter::fixMOVZ(const MCInst &MI, unsigned EncodedValue) const {
+ // If one of the signed fixup kinds is applied to a MOVZ instruction, the
+ // eventual result could be either a MOVZ or a MOVN. It's the MCCodeEmitter's
+ // job to ensure that any bits possibly affected by this are 0. This means we
+ // must zero out bit 30 (essentially emitting a MOVN).
+ MCOperand UImm16MO = MI.getOperand(1);
+
+ // Nothing to do if there's no fixup.
+ if (UImm16MO.isImm())
+ return EncodedValue;
+
+ const AArch64MCExpr *A64E = cast<AArch64MCExpr>(UImm16MO.getExpr());
+ switch (A64E->getKind()) {
+ case AArch64MCExpr::VK_AARCH64_SABS_G0:
+ case AArch64MCExpr::VK_AARCH64_SABS_G1:
+ case AArch64MCExpr::VK_AARCH64_SABS_G2:
+ case AArch64MCExpr::VK_AARCH64_DTPREL_G2:
+ case AArch64MCExpr::VK_AARCH64_DTPREL_G1:
+ case AArch64MCExpr::VK_AARCH64_DTPREL_G0:
+ case AArch64MCExpr::VK_AARCH64_GOTTPREL_G1:
+ case AArch64MCExpr::VK_AARCH64_TPREL_G2:
+ case AArch64MCExpr::VK_AARCH64_TPREL_G1:
+ case AArch64MCExpr::VK_AARCH64_TPREL_G0:
+ return EncodedValue & ~(1u << 30);
+ default:
+ // Nothing to do for an unsigned fixup.
+ return EncodedValue;
+ }
+
+ llvm_unreachable("Should have returned by now");
+}
+
+unsigned
+AArch64MCCodeEmitter::fixMulHigh(const MCInst &MI,
+ unsigned EncodedValue) const {
+ // The Ra field of SMULH and UMULH is unused: it should be assembled as 31
+ // (i.e. all bits 1) but is ignored by the processor.
+ EncodedValue |= 0x1f << 10;
+ return EncodedValue;
+}
+
+MCCodeEmitter *llvm::createAArch64MCCodeEmitter(const MCInstrInfo &MCII,
+ const MCRegisterInfo &MRI,
+ const MCSubtargetInfo &STI,
+ MCContext &Ctx) {
+ return new AArch64MCCodeEmitter(MCII, STI, Ctx);
+}
+
+void AArch64MCCodeEmitter::
+EncodeInstruction(const MCInst &MI, raw_ostream &OS,
+ SmallVectorImpl<MCFixup> &Fixups) const {
+ if (MI.getOpcode() == AArch64::TLSDESCCALL) {
+ // This is a directive which applies an R_AARCH64_TLSDESC_CALL to the
+ // following (BLR) instruction. It doesn't emit any code itself so it
+ // doesn't go through the normal TableGenerated channels.
+ MCFixupKind Fixup = MCFixupKind(AArch64::fixup_a64_tlsdesc_call);
+ const MCExpr *Expr;
+ Expr = AArch64MCExpr::CreateTLSDesc(MI.getOperand(0).getExpr(), Ctx);
+ Fixups.push_back(MCFixup::Create(0, Expr, Fixup));
+ return;
+ }
+
+ uint32_t Binary = getBinaryCodeForInstr(MI, Fixups);
+
+ EmitInstruction(Binary, OS);
+}
+
+
+#include "AArch64GenMCCodeEmitter.inc"