From 1415ca1781051bc823aa8e670dc1d0fad05bd3de Mon Sep 17 00:00:00 2001 From: Craig Topper Date: Sun, 2 Feb 2014 07:08:01 +0000 Subject: Merge HasVEXPrefix/HasEVEXPrefix/HasXOPPrefix into a 2-bit 'encoding' field in TSFlags. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@200624 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/MCTargetDesc/X86BaseInfo.h | 60 +++++++------ lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp | 70 ++++++++------- lib/Target/X86/X86CodeEmitter.cpp | 22 +++-- lib/Target/X86/X86InstrFormats.td | 108 +++++++++++++---------- lib/Target/X86/X86InstrInfo.td | 8 +- lib/Target/X86/X86InstrXOP.td | 46 +++++----- utils/TableGen/X86RecognizableInstr.cpp | 43 ++++----- utils/TableGen/X86RecognizableInstr.h | 14 ++- 8 files changed, 200 insertions(+), 171 deletions(-) diff --git a/lib/Target/X86/MCTargetDesc/X86BaseInfo.h b/lib/Target/X86/MCTargetDesc/X86BaseInfo.h index 78ed12dc4c..da70eb0207 100644 --- a/lib/Target/X86/MCTargetDesc/X86BaseInfo.h +++ b/lib/Target/X86/MCTargetDesc/X86BaseInfo.h @@ -445,40 +445,56 @@ namespace X86II { // 0 means normal, non-SSE instruction. SSEDomainShift = REPShift + 1, - OpcodeShift = SSEDomainShift + 2, + // Encoding + EncodingShift = SSEDomainShift + 2, + EncodingMask = 0x3 << EncodingShift, + + // VEX - encoding using 0xC4/0xC5 + VEX = 1, + + /// XOP - Opcode prefix used by XOP instructions. + XOP = 2, + + // VEX_EVEX - Specifies that this instruction use EVEX form which provides + // syntax support up to 32 512-bit register operands and up to 7 16-bit + // mask operands as well as source operand data swizzling/memory operand + // conversion, eviction hint, and rounding mode. + EVEX = 3, + + // Opcode + OpcodeShift = EncodingShift + 2, //===------------------------------------------------------------------===// /// VEX - The opcode prefix used by AVX instructions VEXShift = OpcodeShift + 8, - VEX = 1U << 0, /// VEX_W - Has a opcode specific functionality, but is used in the same /// way as REX_W is for regular SSE instructions. - VEX_W = 1U << 1, + VEX_W = 1U << 0, /// VEX_4V - Used to specify an additional AVX/SSE register. Several 2 /// address instructions in SSE are represented as 3 address ones in AVX /// and the additional register is encoded in VEX_VVVV prefix. - VEX_4V = 1U << 2, + VEX_4V = 1U << 1, /// VEX_4VOp3 - Similar to VEX_4V, but used on instructions that encode /// operand 3 with VEX.vvvv. - VEX_4VOp3 = 1U << 3, + VEX_4VOp3 = 1U << 2, /// VEX_I8IMM - Specifies that the last register used in a AVX instruction, /// must be encoded in the i8 immediate field. This usually happens in /// instructions with 4 operands. - VEX_I8IMM = 1U << 4, + VEX_I8IMM = 1U << 3, /// VEX_L - Stands for a bit in the VEX opcode prefix meaning the current /// instruction uses 256-bit wide registers. This is usually auto detected /// if a VR256 register is used, but some AVX instructions also have this /// field marked when using a f256 memory references. - VEX_L = 1U << 5, + VEX_L = 1U << 4, // VEX_LIG - Specifies that this instruction ignores the L-bit in the VEX // prefix. Usually used for scalar instructions. Needed by disassembler. - VEX_LIG = 1U << 6, + VEX_LIG = 1U << 5, // TODO: we should combine VEX_L and VEX_LIG together to form a 2-bit field // with following encoding: @@ -488,26 +504,20 @@ namespace X86II { // - 11 LIG (but, in insn encoding, leave VEX.L and EVEX.L in zeros. // this will save 1 tsflag bit - // VEX_EVEX - Specifies that this instruction use EVEX form which provides - // syntax support up to 32 512-bit register operands and up to 7 16-bit - // mask operands as well as source operand data swizzling/memory operand - // conversion, eviction hint, and rounding mode. - EVEX = 1U << 7, - // EVEX_K - Set if this instruction requires masking - EVEX_K = 1U << 8, + EVEX_K = 1U << 6, // EVEX_Z - Set if this instruction has EVEX.Z field set. - EVEX_Z = 1U << 9, + EVEX_Z = 1U << 7, // EVEX_L2 - Set if this instruction has EVEX.L' field set. - EVEX_L2 = 1U << 10, + EVEX_L2 = 1U << 8, // EVEX_B - Set if this instruction has EVEX.B field set. - EVEX_B = 1U << 11, + EVEX_B = 1U << 9, // EVEX_CD8E - compressed disp8 form, element-size - EVEX_CD8EShift = VEXShift + 12, + EVEX_CD8EShift = VEXShift + 10, EVEX_CD8EMask = 3, // EVEX_CD8V - compressed disp8 form, vector-width @@ -520,17 +530,14 @@ namespace X86II { /// storing a classifier in the imm8 field. To simplify our implementation, /// we handle this by storeing the classifier in the opcode field and using /// this flag to indicate that the encoder should do the wacky 3DNow! thing. - Has3DNow0F0FOpcode = 1U << 17, + Has3DNow0F0FOpcode = 1U << 15, /// MemOp4 - Used to indicate swapping of operand 3 and 4 to be encoded in /// ModRM or I8IMM. This is used for FMA4 and XOP instructions. - MemOp4 = 1U << 18, - - /// XOP - Opcode prefix used by XOP instructions. - XOP = 1U << 19, + MemOp4 = 1U << 16, /// Explicitly specified rounding control - EVEX_RC = 1U << 20 + EVEX_RC = 1U << 17 }; // getBaseOpcodeFor - This function returns the "base" X86 opcode for the @@ -651,8 +658,7 @@ namespace X86II { case X86II::MRMSrcMem: { bool HasVEX_4V = (TSFlags >> X86II::VEXShift) & X86II::VEX_4V; bool HasMemOp4 = (TSFlags >> X86II::VEXShift) & X86II::MemOp4; - bool HasEVEX = (TSFlags >> X86II::VEXShift) & X86II::EVEX; - bool HasEVEX_K = HasEVEX && ((TSFlags >> X86II::VEXShift) & X86II::EVEX_K); + bool HasEVEX_K = ((TSFlags >> X86II::VEXShift) & X86II::EVEX_K); unsigned FirstMemOp = 1; if (HasVEX_4V) ++FirstMemOp;// Skip the register source (which is encoded in VEX_VVVV). diff --git a/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp b/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp index 7affe800be..eceddeb192 100644 --- a/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp +++ b/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp @@ -604,8 +604,9 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte, int MemOperand, const MCInst &MI, const MCInstrDesc &Desc, raw_ostream &OS) const { - bool HasEVEX = (TSFlags >> X86II::VEXShift) & X86II::EVEX; - bool HasEVEX_K = HasEVEX && ((TSFlags >> X86II::VEXShift) & X86II::EVEX_K); + unsigned char Encoding = (TSFlags & X86II::EncodingMask) >> + X86II::EncodingShift; + bool HasEVEX_K = ((TSFlags >> X86II::VEXShift) & X86II::EVEX_K); bool HasVEX_4V = (TSFlags >> X86II::VEXShift) & X86II::VEX_4V; bool HasVEX_4VOp3 = (TSFlags >> X86II::VEXShift) & X86II::VEX_4VOp3; bool HasMemOp4 = (TSFlags >> X86II::VEXShift) & X86II::MemOp4; @@ -638,9 +639,6 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte, // opcode extension, or ignored, depending on the opcode byte) unsigned char VEX_W = 0; - // XOP: Use XOP prefix byte 0x8f instead of VEX. - bool XOP = (TSFlags >> X86II::VEXShift) & X86II::XOP; - // VEX_5M (VEX m-mmmmm field): // // 0b00000: Reserved for future use @@ -698,13 +696,13 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte, if ((TSFlags >> X86II::VEXShift) & X86II::VEX_L) VEX_L = 1; - if (HasEVEX && ((TSFlags >> X86II::VEXShift) & X86II::EVEX_L2)) + if (((TSFlags >> X86II::VEXShift) & X86II::EVEX_L2)) EVEX_L2 = 1; if (HasEVEX_K && ((TSFlags >> X86II::VEXShift) & X86II::EVEX_Z)) EVEX_z = 1; - if (HasEVEX && ((TSFlags >> X86II::VEXShift) & X86II::EVEX_B)) + if (((TSFlags >> X86II::VEXShift) & X86II::EVEX_B)) EVEX_b = 1; switch (TSFlags & X86II::OpPrefixMask) { @@ -744,7 +742,7 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte, if (X86II::isX86_64ExtendedReg(MI.getOperand(MemOperand + X86::AddrIndexReg).getReg())) VEX_X = 0x0; - if (HasEVEX && X86II::is32ExtendedReg(MI.getOperand(MemOperand + + if (X86II::is32ExtendedReg(MI.getOperand(MemOperand + X86::AddrIndexReg).getReg())) EVEX_V2 = 0x0; @@ -755,7 +753,7 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte, if (HasVEX_4V) { VEX_4V = getVEXRegisterEncoding(MI, CurOp); - if (HasEVEX && X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg())) + if (X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg())) EVEX_V2 = 0x0; CurOp++; } @@ -764,7 +762,7 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte, if (MO.isReg()) { if (X86II::isX86_64ExtendedReg(MO.getReg())) VEX_R = 0x0; - if (HasEVEX && X86II::is32ExtendedReg(MO.getReg())) + if (X86II::is32ExtendedReg(MO.getReg())) EVEX_R2 = 0x0; } break; @@ -781,7 +779,7 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte, // dst(ModR/M.reg), src1(VEX_4V), src2(VEX_I8IMM), src3(ModR/M), if (X86II::isX86_64ExtendedReg(MI.getOperand(CurOp).getReg())) VEX_R = 0x0; - if (HasEVEX && X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg())) + if (X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg())) EVEX_R2 = 0x0; CurOp++; @@ -790,7 +788,7 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte, if (HasVEX_4V) { VEX_4V = getVEXRegisterEncoding(MI, CurOp); - if (HasEVEX && X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg())) + if (X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg())) EVEX_V2 = 0x0; CurOp++; } @@ -801,8 +799,8 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte, if (X86II::isX86_64ExtendedReg( MI.getOperand(MemOperand+X86::AddrIndexReg).getReg())) VEX_X = 0x0; - if (HasEVEX && X86II::is32ExtendedReg(MI.getOperand(MemOperand + - X86::AddrIndexReg).getReg())) + if (X86II::is32ExtendedReg(MI.getOperand(MemOperand + + X86::AddrIndexReg).getReg())) EVEX_V2 = 0x0; if (HasVEX_4VOp3) @@ -822,7 +820,7 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte, // src1(VEX_4V), MemAddr if (HasVEX_4V) { VEX_4V = getVEXRegisterEncoding(MI, CurOp); - if (HasEVEX && X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg())) + if (X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg())) EVEX_V2 = 0x0; CurOp++; } @@ -849,7 +847,7 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte, // dst(ModR/M.reg), src1(VEX_4V), src2(VEX_I8IMM), src3(ModR/M), if (X86II::isX86_64ExtendedReg(MI.getOperand(CurOp).getReg())) VEX_R = 0x0; - if (HasEVEX && X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg())) + if (X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg())) EVEX_R2 = 0x0; CurOp++; @@ -858,7 +856,7 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte, if (HasVEX_4V) { VEX_4V = getVEXRegisterEncoding(MI, CurOp); - if (HasEVEX && X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg())) + if (X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg())) EVEX_V2 = 0x0; CurOp++; } @@ -868,7 +866,7 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte, if (X86II::isX86_64ExtendedReg(MI.getOperand(CurOp).getReg())) VEX_B = 0x0; - if (HasEVEX && X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg())) + if (X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg())) VEX_X = 0x0; CurOp++; if (HasVEX_4VOp3) @@ -889,7 +887,7 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte, // dst(ModR/M), src1(VEX_4V), src2(ModR/M) if (X86II::isX86_64ExtendedReg(MI.getOperand(CurOp).getReg())) VEX_B = 0x0; - if (HasEVEX && X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg())) + if (X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg())) VEX_X = 0x0; CurOp++; @@ -898,14 +896,14 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte, if (HasVEX_4V) { VEX_4V = getVEXRegisterEncoding(MI, CurOp); - if (HasEVEX && X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg())) + if (X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg())) EVEX_V2 = 0x0; CurOp++; } if (X86II::isX86_64ExtendedReg(MI.getOperand(CurOp).getReg())) VEX_R = 0x0; - if (HasEVEX && X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg())) + if (X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg())) EVEX_R2 = 0x0; if (EVEX_b) EncodeRC = true; @@ -918,21 +916,21 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte, // dst(VEX_4V), src(ModR/M), imm8 if (HasVEX_4V) { VEX_4V = getVEXRegisterEncoding(MI, CurOp); - if (HasEVEX && X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg())) + if (X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg())) EVEX_V2 = 0x0; CurOp++; - } + } if (HasEVEX_K) EVEX_aaa = getWriteMaskRegisterEncoding(MI, CurOp++); if (X86II::isX86_64ExtendedReg(MI.getOperand(CurOp).getReg())) VEX_B = 0x0; - if (HasEVEX && X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg())) + if (X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg())) VEX_X = 0x0; break; } - if (!HasEVEX) { + if (Encoding == X86II::VEX || Encoding == X86II::XOP) { // VEX opcode prefix can have 2 or 3 bytes // // 3 bytes: @@ -944,19 +942,25 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte, // | C5h | | R | vvvv | L | pp | // +-----+ +-------------------+ // + // XOP uses a similar prefix: + // +-----+ +--------------+ +-------------------+ + // | 8Fh | | RXB | m-mmmm | | W | vvvv | L | pp | + // +-----+ +--------------+ +-------------------+ unsigned char LastByte = VEX_PP | (VEX_L << 2) | (VEX_4V << 3); - if (VEX_B && VEX_X && !VEX_W && !XOP && (VEX_5M == 1)) { // 2 byte VEX prefix + // Can we use the 2 byte VEX prefix? + if (Encoding == X86II::VEX && VEX_B && VEX_X && !VEX_W && (VEX_5M == 1)) { EmitByte(0xC5, CurByte, OS); EmitByte(LastByte | (VEX_R << 7), CurByte, OS); return; } // 3 byte VEX prefix - EmitByte(XOP ? 0x8F : 0xC4, CurByte, OS); + EmitByte(Encoding == X86II::XOP ? 0x8F : 0xC4, CurByte, OS); EmitByte(VEX_R << 7 | VEX_X << 6 | VEX_B << 5 | VEX_5M, CurByte, OS); EmitByte(LastByte | (VEX_W << 7), CurByte, OS); } else { + assert(Encoding == X86II::EVEX && "unknown encoding!"); // EVEX opcode prefix can have 4 bytes // // +-----+ +--------------+ +-------------------+ +------------------------+ @@ -1187,8 +1191,9 @@ EncodeInstruction(const MCInst &MI, raw_ostream &OS, // Keep track of the current byte being emitted. unsigned CurByte = 0; - // Is this instruction encoded using the AVX VEX prefix? - bool HasVEXPrefix = (TSFlags >> X86II::VEXShift) & X86II::VEX; + // Encoding type for this instruction. + unsigned char Encoding = (TSFlags & X86II::EncodingMask) >> + X86II::EncodingShift; // It uses the VEX.VVVV field? bool HasVEX_4V = (TSFlags >> X86II::VEXShift) & X86II::VEX_4V; @@ -1197,9 +1202,8 @@ EncodeInstruction(const MCInst &MI, raw_ostream &OS, const unsigned MemOp4_I8IMMOperand = 2; // It uses the EVEX.aaa field? - bool HasEVEX = (TSFlags >> X86II::VEXShift) & X86II::EVEX; - bool HasEVEX_K = HasEVEX && ((TSFlags >> X86II::VEXShift) & X86II::EVEX_K); - bool HasEVEX_RC = HasEVEX && ((TSFlags >> X86II::VEXShift) & X86II::EVEX_RC); + bool HasEVEX_K = ((TSFlags >> X86II::VEXShift) & X86II::EVEX_K); + bool HasEVEX_RC = ((TSFlags >> X86II::VEXShift) & X86II::EVEX_RC); // Determine where the memory operand starts, if present. int MemoryOperand = X86II::getMemoryOperandNo(TSFlags, Opcode); @@ -1248,7 +1252,7 @@ EncodeInstruction(const MCInst &MI, raw_ostream &OS, if (need_address_override) EmitByte(0x67, CurByte, OS); - if (!HasVEXPrefix) + if (Encoding == 0) EmitOpcodePrefix(TSFlags, CurByte, MemoryOperand, MI, Desc, STI, OS); else EmitVEXOpcodePrefix(TSFlags, CurByte, MemoryOperand, MI, Desc, OS); diff --git a/lib/Target/X86/X86CodeEmitter.cpp b/lib/Target/X86/X86CodeEmitter.cpp index 5d36d936c5..8bd8d9df67 100644 --- a/lib/Target/X86/X86CodeEmitter.cpp +++ b/lib/Target/X86/X86CodeEmitter.cpp @@ -758,6 +758,8 @@ void Emitter::emitVEXOpcodePrefix(uint64_t TSFlags, int MemOperand, const MachineInstr &MI, const MCInstrDesc *Desc) const { + unsigned char Encoding = (TSFlags & X86II::EncodingMask) >> + X86II::EncodingShift; bool HasVEX_4V = (TSFlags >> X86II::VEXShift) & X86II::VEX_4V; bool HasVEX_4VOp3 = (TSFlags >> X86II::VEXShift) & X86II::VEX_4VOp3; bool HasMemOp4 = (TSFlags >> X86II::VEXShift) & X86II::MemOp4; @@ -788,9 +790,6 @@ void Emitter::emitVEXOpcodePrefix(uint64_t TSFlags, // opcode extension, or ignored, depending on the opcode byte) unsigned char VEX_W = 0; - // XOP: Use XOP prefix byte 0x8f instead of VEX. - bool XOP = (TSFlags >> X86II::VEXShift) & X86II::XOP; - // VEX_5M (VEX m-mmmmm field): // // 0b00000: Reserved for future use @@ -995,16 +994,21 @@ void Emitter::emitVEXOpcodePrefix(uint64_t TSFlags, // | C5h | | R | vvvv | L | pp | // +-----+ +-------------------+ // + // XOP uses a similar prefix: + // +-----+ +--------------+ +-------------------+ + // | 8Fh | | RXB | m-mmmm | | W | vvvv | L | pp | + // +-----+ +--------------+ +-------------------+ unsigned char LastByte = VEX_PP | (VEX_L << 2) | (VEX_4V << 3); - if (VEX_B && VEX_X && !VEX_W && !XOP && (VEX_5M == 1)) { // 2 byte VEX prefix + // Can this use the 2 byte VEX prefix? + if (Encoding == X86II::VEX && VEX_B && VEX_X && !VEX_W && (VEX_5M == 1)) { MCE.emitByte(0xC5); MCE.emitByte(LastByte | (VEX_R << 7)); return; } // 3 byte VEX prefix - MCE.emitByte(XOP ? 0x8F : 0xC4); + MCE.emitByte(Encoding == X86II::XOP ? 0x8F : 0xC4); MCE.emitByte(VEX_R << 7 | VEX_X << 6 | VEX_B << 5 | VEX_5M); MCE.emitByte(LastByte | (VEX_W << 7)); } @@ -1054,8 +1058,10 @@ void Emitter::emitInstruction(MachineInstr &MI, uint64_t TSFlags = Desc->TSFlags; - // Is this instruction encoded using the AVX VEX prefix? - bool HasVEXPrefix = (TSFlags >> X86II::VEXShift) & X86II::VEX; + // Encoding type for this instruction. + unsigned char Encoding = (TSFlags & X86II::EncodingMask) >> + X86II::EncodingShift; + // It uses the VEX.VVVV field? bool HasVEX_4V = (TSFlags >> X86II::VEXShift) & X86II::VEX_4V; bool HasVEX_4VOp3 = (TSFlags >> X86II::VEXShift) & X86II::VEX_4VOp3; @@ -1094,7 +1100,7 @@ void Emitter::emitInstruction(MachineInstr &MI, if (need_address_override) MCE.emitByte(0x67); - if (!HasVEXPrefix) + if (Encoding == 0) emitOpcodePrefix(TSFlags, MemoryOperand, MI, Desc); else emitVEXOpcodePrefix(TSFlags, MemoryOperand, MI, Desc); diff --git a/lib/Target/X86/X86InstrFormats.td b/lib/Target/X86/X86InstrFormats.td index 5ef80bdc31..453a27ef49 100644 --- a/lib/Target/X86/X86InstrFormats.td +++ b/lib/Target/X86/X86InstrFormats.td @@ -143,6 +143,15 @@ def DF : Map<14>; def A6 : Map<15>; def A7 : Map<16>; +// Class specifying the encoding +class Encoding val> { + bits<2> Value = val; +} +def EncNormal : Encoding<0>; +def EncVEX : Encoding<1>; +def EncXOP : Encoding<2>; +def EncEVEX : Encoding<3>; + // Prefix byte classes which are used to indicate to the ad-hoc machine code // emitter that various prefix bytes are required. class OpSize { bit hasOpSizePrefix = 1; } @@ -175,15 +184,15 @@ class T8XD : T8 { Prefix OpPrefix = XD; } class T8XS : T8 { Prefix OpPrefix = XS; } class TAPD : TA { Prefix OpPrefix = PD; } class TAXD : TA { Prefix OpPrefix = XD; } -class VEX { bit hasVEXPrefix = 1; } +class VEX { Encoding OpEnc = EncVEX; } class VEX_W { bit hasVEX_WPrefix = 1; } -class VEX_4V : VEX { bit hasVEX_4VPrefix = 1; } -class VEX_4VOp3 : VEX { bit hasVEX_4VOp3Prefix = 1; } +class VEX_4V : VEX { bit hasVEX_4V = 1; } +class VEX_4VOp3 : VEX { bit hasVEX_4VOp3 = 1; } class VEX_I8IMM { bit hasVEX_i8ImmReg = 1; } class VEX_L { bit hasVEX_L = 1; } class VEX_LIG { bit ignoresVEX_L = 1; } -class EVEX : VEX { bit hasEVEXPrefix = 1; } -class EVEX_4V : VEX_4V { bit hasEVEXPrefix = 1; } +class EVEX : VEX { Encoding OpEnc = EncEVEX; } +class EVEX_4V : VEX_4V { Encoding OpEnc = EncEVEX; } class EVEX_K { bit hasEVEX_K = 1; } class EVEX_KZ : EVEX_K { bit hasEVEX_Z = 1; } class EVEX_B { bit hasEVEX_B = 1; } @@ -198,7 +207,10 @@ class EVEX_CD8 { } class Has3DNow0F0FOpcode { bit has3DNow0F0FOpcode = 1; } class MemOp4 { bit hasMemOp4Prefix = 1; } -class XOP { bit hasXOP_Prefix = 1; } +class XOP { Encoding OpEnc = EncXOP; } +class XOP_4V : XOP { bit hasVEX_4V = 1; } +class XOP_4VOp3 : XOP { bit hasVEX_4VOp3 = 1; } + class X86Inst opcod, Format f, ImmType i, dag outs, dag ins, string AsmStr, InstrItinClass itin, @@ -238,16 +250,15 @@ class X86Inst opcod, Format f, ImmType i, dag outs, dag ins, bit hasLockPrefix = 0; // Does this inst have a 0xF0 prefix? Domain ExeDomain = d; bit hasREPPrefix = 0; // Does this inst have a REP prefix? - bit hasVEXPrefix = 0; // Does this inst require a VEX prefix? + Encoding OpEnc = EncNormal; // Encoding used by this instruction bit hasVEX_WPrefix = 0; // Does this inst set the VEX_W field? - bit hasVEX_4VPrefix = 0; // Does this inst require the VEX.VVVV field? - bit hasVEX_4VOp3Prefix = 0; // Does this inst require the VEX.VVVV field to - // encode the third operand? + bit hasVEX_4V = 0; // Does this inst require the VEX.VVVV field? + bit hasVEX_4VOp3 = 0; // Does this inst require the VEX.VVVV field to + // encode the third operand? bit hasVEX_i8ImmReg = 0; // Does this inst require the last source register // to be encoded in a immediate field? bit hasVEX_L = 0; // Does this inst use large (256-bit) registers? bit ignoresVEX_L = 0; // Does this instruction ignore the L-bit - bit hasEVEXPrefix = 0; // Does this inst require EVEX form? bit hasEVEX_K = 0; // Does this inst require masking? bit hasEVEX_Z = 0; // Does this inst set the EVEX_Z field? bit hasEVEX_L2 = 0; // Does this inst set the EVEX_L2 field? @@ -256,7 +267,6 @@ class X86Inst opcod, Format f, ImmType i, dag outs, dag ins, bits<3> EVEX_CD8V = 0; // Compressed disp8 form - vector-width. bit has3DNow0F0FOpcode =0;// Wacky 3dNow! encoding? bit hasMemOp4Prefix = 0; // Same bit as VEX_W, but used for swapping operands - bit hasXOP_Prefix = 0; // Does this inst require an XOP prefix? bit hasEVEX_RC = 0; // Explicitly specified rounding control in FP instruction. // TSFlags layout should be kept in sync with X86InstrInfo.h. @@ -272,15 +282,14 @@ class X86Inst opcod, Format f, ImmType i, dag outs, dag ins, let TSFlags{24} = hasLockPrefix; let TSFlags{25} = hasREPPrefix; let TSFlags{27-26} = ExeDomain.Value; - let TSFlags{35-28} = Opcode; - let TSFlags{36} = hasVEXPrefix; - let TSFlags{37} = hasVEX_WPrefix; - let TSFlags{38} = hasVEX_4VPrefix; - let TSFlags{39} = hasVEX_4VOp3Prefix; - let TSFlags{40} = hasVEX_i8ImmReg; - let TSFlags{41} = hasVEX_L; - let TSFlags{42} = ignoresVEX_L; - let TSFlags{43} = hasEVEXPrefix; + let TSFlags{29-28} = OpEnc.Value; + let TSFlags{37-30} = Opcode; + let TSFlags{38} = hasVEX_WPrefix; + let TSFlags{39} = hasVEX_4V; + let TSFlags{40} = hasVEX_4VOp3; + let TSFlags{41} = hasVEX_i8ImmReg; + let TSFlags{42} = hasVEX_L; + let TSFlags{43} = ignoresVEX_L; let TSFlags{44} = hasEVEX_K; let TSFlags{45} = hasEVEX_Z; let TSFlags{46} = hasEVEX_L2; @@ -289,8 +298,7 @@ class X86Inst opcod, Format f, ImmType i, dag outs, dag ins, let TSFlags{52-50} = EVEX_CD8V; let TSFlags{53} = has3DNow0F0FOpcode; let TSFlags{54} = hasMemOp4Prefix; - let TSFlags{55} = hasXOP_Prefix; - let TSFlags{56} = hasEVEX_RC; + let TSFlags{55} = hasEVEX_RC; } class PseudoI pattern> @@ -385,56 +393,58 @@ class Iseg32 o, Format f, dag outs, dag ins, string asm, let CodeSize = 3; } -def __xs : XS; -def __xd : XD; -def __pd : PD; - // SI - SSE 1 & 2 scalar instructions class SI o, Format F, dag outs, dag ins, string asm, list pattern, InstrItinClass itin = NoItinerary> : I { - let Predicates = !if(hasEVEXPrefix /* EVEX */, [HasAVX512], - !if(hasVEXPrefix /* VEX */, [UseAVX], - !if(!eq(OpPrefix.Value, __xs.OpPrefix.Value), [UseSSE1], - !if(!eq(OpPrefix.Value, __xd.OpPrefix.Value), [UseSSE2], - !if(!eq(OpPrefix.Value, __pd.OpPrefix.Value), [UseSSE2], + let Predicates = !if(!eq(OpEnc.Value, EncEVEX.Value), [HasAVX512], + !if(!eq(OpEnc.Value, EncVEX.Value), [UseAVX], + !if(!eq(OpPrefix.Value, XS.Value), [UseSSE1], + !if(!eq(OpPrefix.Value, XD.Value), [UseSSE2], + !if(!eq(OpPrefix.Value, PD.Value), [UseSSE2], [UseSSE1]))))); // AVX instructions have a 'v' prefix in the mnemonic - let AsmString = !if(hasVEXPrefix, !strconcat("v", asm), asm); + let AsmString = !if(!eq(OpEnc.Value, EncEVEX.Value), !strconcat("v", asm), + !if(!eq(OpEnc.Value, EncVEX.Value), !strconcat("v", asm), + asm)); } // SIi8 - SSE 1 & 2 scalar instructions class SIi8 o, Format F, dag outs, dag ins, string asm, list pattern, InstrItinClass itin = NoItinerary> : Ii8 { - let Predicates = !if(hasEVEXPrefix /* EVEX */, [HasAVX512], - !if(hasVEXPrefix /* VEX */, [UseAVX], - !if(!eq(OpPrefix.Value, __xs.OpPrefix.Value), [UseSSE1], + let Predicates = !if(!eq(OpEnc.Value, EncEVEX.Value), [HasAVX512], + !if(!eq(OpEnc.Value, EncVEX.Value), [UseAVX], + !if(!eq(OpPrefix.Value, XS.Value), [UseSSE1], [UseSSE2]))); // AVX instructions have a 'v' prefix in the mnemonic - let AsmString = !if(hasVEXPrefix, !strconcat("v", asm), asm); + let AsmString = !if(!eq(OpEnc.Value, EncEVEX.Value), !strconcat("v", asm), + !if(!eq(OpEnc.Value, EncVEX.Value), !strconcat("v", asm), + asm)); } // PI - SSE 1 & 2 packed instructions class PI o, Format F, dag outs, dag ins, string asm, list pattern, InstrItinClass itin, Domain d> : I { - let Predicates = !if(hasEVEXPrefix /* EVEX */, [HasAVX512], - !if(hasVEXPrefix /* VEX */, [HasAVX], - !if(!eq(OpPrefix.Value, __pd.OpPrefix.Value), [UseSSE2], + let Predicates = !if(!eq(OpEnc.Value, EncEVEX.Value), [HasAVX512], + !if(!eq(OpEnc.Value, EncVEX.Value), [HasAVX], + !if(!eq(OpPrefix.Value, PD.Value), [UseSSE2], [UseSSE1]))); // AVX instructions have a 'v' prefix in the mnemonic - let AsmString = !if(hasVEXPrefix, !strconcat("v", asm), asm); + let AsmString = !if(!eq(OpEnc.Value, EncEVEX.Value), !strconcat("v", asm), + !if(!eq(OpEnc.Value, EncVEX.Value), !strconcat("v", asm), + asm)); } // MMXPI - SSE 1 & 2 packed instructions with MMX operands class MMXPI o, Format F, dag outs, dag ins, string asm, list pattern, InstrItinClass itin, Domain d> : I { - let Predicates = !if(!eq(OpPrefix.Value, __pd.OpPrefix.Value), [HasSSE2], + let Predicates = !if(!eq(OpPrefix.Value, PD.Value), [HasSSE2], [HasSSE1]); } @@ -442,13 +452,15 @@ class MMXPI o, Format F, dag outs, dag ins, string asm, list patter class PIi8 o, Format F, dag outs, dag ins, string asm, list pattern, InstrItinClass itin, Domain d> : Ii8 { - let Predicates = !if(hasEVEXPrefix /* EVEX */, [HasAVX512], - !if(hasVEXPrefix /* VEX */, [HasAVX], - !if(!eq(OpPrefix.Value, __pd.OpPrefix.Value), [UseSSE2], + let Predicates = !if(!eq(OpEnc.Value, EncEVEX.Value), [HasAVX512], + !if(!eq(OpEnc.Value, EncVEX.Value), [HasAVX], + !if(!eq(OpPrefix.Value, PD.Value), [UseSSE2], [UseSSE1]))); // AVX instructions have a 'v' prefix in the mnemonic - let AsmString = !if(hasVEXPrefix, !strconcat("v", asm), asm); + let AsmString = !if(!eq(OpEnc.Value, EncEVEX.Value), !strconcat("v", asm), + !if(!eq(OpEnc.Value, EncVEX.Value), !strconcat("v", asm), + asm)); } // SSE1 Instruction Templates: @@ -761,13 +773,13 @@ class FMA4 o, Format F, dag outs, dag ins, string asm, class IXOP o, Format F, dag outs, dag ins, string asm, list pattern, InstrItinClass itin = NoItinerary> : I, - XOP, XOP9, Requires<[HasXOP]>; + XOP9, Requires<[HasXOP]>; // XOP 2, 3 and 4 Operand Instruction Templates with imm byte class IXOPi8 o, Format F, dag outs, dag ins, string asm, list pattern, InstrItinClass itin = NoItinerary> : Ii8, - XOP, XOP8, Requires<[HasXOP]>; + XOP8, Requires<[HasXOP]>; // XOP 5 operand instruction (VEX encoding!) class IXOP5 o, Format F, dag outs, dag ins, string asm, diff --git a/lib/Target/X86/X86InstrInfo.td b/lib/Target/X86/X86InstrInfo.td index 35709133a2..60e2f9b0b0 100644 --- a/lib/Target/X86/X86InstrInfo.td +++ b/lib/Target/X86/X86InstrInfo.td @@ -2089,13 +2089,13 @@ multiclass tbm_ternary_imm_intr opc, RegisterClass RC, string OpcodeStr, !strconcat(OpcodeStr, "\t{$cntl, $src1, $dst|$dst, $src1, $cntl}"), [(set RC:$dst, (Int RC:$src1, immoperator:$cntl))]>, - XOP, XOPA, VEX; + XOP, XOPA; def mi : Ii32, - XOP, XOPA, VEX; + XOP, XOPA; } defm BEXTRI32 : tbm_ternary_imm_intr<0x10, GR32, "bextr", i32mem, loadi32, @@ -2111,11 +2111,11 @@ multiclass tbm_binary_rm opc, Format FormReg, Format FormMem, let hasSideEffects = 0 in { def rr : I, XOP, XOP9, VEX_4V; + []>, XOP_4V, XOP9; let mayLoad = 1 in def rm : I, XOP, XOP9, VEX_4V; + []>, XOP_4V, XOP9; } } diff --git a/lib/Target/X86/X86InstrXOP.td b/lib/Target/X86/X86InstrXOP.td index 2b6ee5c39e..45e2ff0952 100644 --- a/lib/Target/X86/X86InstrXOP.td +++ b/lib/Target/X86/X86InstrXOP.td @@ -14,10 +14,10 @@ multiclass xop2op opc, string OpcodeStr, Intrinsic Int, PatFrag memop> { def rr : IXOP, VEX; + [(set VR128:$dst, (Int VR128:$src))]>, XOP; def rm : IXOP, VEX; + [(set VR128:$dst, (Int (bitconvert (memop addr:$src))))]>, XOP; } defm VPHSUBWD : xop2op<0xE2, "vphsubwd", int_x86_xop_vphsubwd, memopv2i64>; @@ -41,10 +41,10 @@ multiclass xop2opsld opc, string OpcodeStr, Intrinsic Int, Operand memop, ComplexPattern mem_cpat> { def rr : IXOP, VEX; + [(set VR128:$dst, (Int VR128:$src))]>, XOP; def rm : IXOP, VEX; + [(set VR128:$dst, (Int (bitconvert mem_cpat:$src)))]>, XOP; } defm VFRCZSS : xop2opsld<0x82, "vfrczss", int_x86_xop_vfrcz_ss, @@ -56,10 +56,10 @@ multiclass xop2op128 opc, string OpcodeStr, Intrinsic Int, PatFrag memop> { def rr : IXOP, VEX; + [(set VR128:$dst, (Int VR128:$src))]>, XOP; def rm : IXOP, VEX; + [(set VR128:$dst, (Int (bitconvert (memop addr:$src))))]>, XOP; } defm VFRCZPS : xop2op128<0x80, "vfrczps", int_x86_xop_vfrcz_ps, memopv4f32>; @@ -69,10 +69,10 @@ multiclass xop2op256 opc, string OpcodeStr, Intrinsic Int, PatFrag memop> { def rrY : IXOP, VEX, VEX_L; + [(set VR256:$dst, (Int VR256:$src))]>, XOP, VEX_L; def rmY : IXOP, VEX, VEX_L; + [(set VR256:$dst, (Int (bitconvert (memop addr:$src))))]>, XOP, VEX_L; } defm VFRCZPS : xop2op256<0x80, "vfrczps", int_x86_xop_vfrcz_ps_256, memopv8f32>; @@ -82,19 +82,19 @@ multiclass xop3op opc, string OpcodeStr, Intrinsic Int> { def rr : IXOP, VEX_4VOp3; + [(set VR128:$dst, (Int VR128:$src1, VR128:$src2))]>, XOP_4VOp3; def rm : IXOP, - VEX_4V, VEX_W; + XOP_4V, VEX_W; def mr : IXOP, - VEX_4VOp3; + XOP_4VOp3; } defm VPSHLW : xop3op<0x95, "vpshlw", int_x86_xop_vpshlw>; @@ -114,12 +114,12 @@ multiclass xop3opimm opc, string OpcodeStr, Intrinsic Int> { def ri : IXOPi8, VEX; + [(set VR128:$dst, (Int VR128:$src1, imm:$src2))]>, XOP; def mi : IXOPi8, VEX; + (Int (bitconvert (memopv2i64 addr:$src1)), imm:$src2))]>, XOP; } defm VPROTW : xop3opimm<0xC1, "vprotw", int_x86_xop_vprotwi>; @@ -134,14 +134,14 @@ multiclass xop4opm2 opc, string OpcodeStr, Intrinsic Int> { !strconcat(OpcodeStr, "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"), [(set VR128:$dst, - (Int VR128:$src1, VR128:$src2, VR128:$src3))]>, VEX_4V, VEX_I8IMM; + (Int VR128:$src1, VR128:$src2, VR128:$src3))]>, XOP_4V, VEX_I8IMM; def rm : IXOPi8, VEX_4V, VEX_I8IMM; + VR128:$src3))]>, XOP_4V, VEX_I8IMM; } defm VPMADCSWD : xop4opm2<0xB6, "vpmadcswd", int_x86_xop_vpmadcswd>; @@ -164,14 +164,14 @@ multiclass xop4opimm opc, string OpcodeStr, Intrinsic Int> { !strconcat(OpcodeStr, "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"), [(set VR128:$dst, (Int VR128:$src1, VR128:$src2, imm:$src3))]>, - VEX_4V; + XOP_4V; def mi : IXOPi8, VEX_4V; + imm:$src3))]>, XOP_4V; } defm VPCOMB : xop4opimm<0xCC, "vpcomb", int_x86_xop_vpcomb>; @@ -190,7 +190,7 @@ multiclass xop4op opc, string OpcodeStr, Intrinsic Int> { !strconcat(OpcodeStr, "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"), [(set VR128:$dst, (Int VR128:$src1, VR128:$src2, VR128:$src3))]>, - VEX_4V, VEX_I8IMM; + XOP_4V, VEX_I8IMM; def rm : IXOPi8 opc, string OpcodeStr, Intrinsic Int> { [(set VR128:$dst, (Int VR128:$src1, VR128:$src2, (bitconvert (memopv2i64 addr:$src3))))]>, - VEX_4V, VEX_I8IMM, VEX_W, MemOp4; + XOP_4V, VEX_I8IMM, VEX_W, MemOp4; def mr : IXOPi8 opc, string OpcodeStr, Intrinsic Int> { [(set VR128:$dst, (Int VR128:$src1, (bitconvert (memopv2i64 addr:$src2)), VR128:$src3))]>, - VEX_4V, VEX_I8IMM; + XOP_4V, VEX_I8IMM; } defm VPPERM : xop4op<0xA3, "vpperm", int_x86_xop_vpperm>; @@ -218,7 +218,7 @@ multiclass xop4op256 opc, string OpcodeStr, Intrinsic Int> { !strconcat(OpcodeStr, "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"), [(set VR256:$dst, (Int VR256:$src1, VR256:$src2, VR256:$src3))]>, - VEX_4V, VEX_I8IMM, VEX_L; + XOP_4V, VEX_I8IMM, VEX_L; def rmY : IXOPi8 opc, string OpcodeStr, Intrinsic Int> { [(set VR256:$dst, (Int VR256:$src1, VR256:$src2, (bitconvert (memopv4i64 addr:$src3))))]>, - VEX_4V, VEX_I8IMM, VEX_W, MemOp4, VEX_L; + XOP_4V, VEX_I8IMM, VEX_W, MemOp4, VEX_L; def mrY : IXOPi8 opc, string OpcodeStr, Intrinsic Int> { [(set VR256:$dst, (Int VR256:$src1, (bitconvert (memopv4i64 addr:$src2)), VR256:$src3))]>, - VEX_4V, VEX_I8IMM, VEX_L; + XOP_4V, VEX_I8IMM, VEX_L; } defm VPCMOV : xop4op256<0xA2, "vpcmov", int_x86_xop_vpcmov_256>; diff --git a/utils/TableGen/X86RecognizableInstr.cpp b/utils/TableGen/X86RecognizableInstr.cpp index 3c606225ba..e0306ac74f 100644 --- a/utils/TableGen/X86RecognizableInstr.cpp +++ b/utils/TableGen/X86RecognizableInstr.cpp @@ -85,6 +85,10 @@ namespace X86Local { enum { PD = 1, XS = 2, XD = 3 }; + + enum { + VEX = 1, XOP = 2, EVEX = 3 + }; } // If rows are added to the opcode extension tables, then corresponding entries @@ -228,18 +232,17 @@ RecognizableInstr::RecognizableInstr(DisassemblerTables &tables, OpMap = byteFromRec(Rec->getValueAsDef("OpMap"), "Value"); Opcode = byteFromRec(Rec, "Opcode"); Form = byteFromRec(Rec, "FormBits"); + Encoding = byteFromRec(Rec->getValueAsDef("OpEnc"), "Value"); HasOpSizePrefix = Rec->getValueAsBit("hasOpSizePrefix"); HasOpSize16Prefix = Rec->getValueAsBit("hasOpSize16Prefix"); HasAdSizePrefix = Rec->getValueAsBit("hasAdSizePrefix"); HasREX_WPrefix = Rec->getValueAsBit("hasREX_WPrefix"); - HasVEXPrefix = Rec->getValueAsBit("hasVEXPrefix"); - HasVEX_4VPrefix = Rec->getValueAsBit("hasVEX_4VPrefix"); - HasVEX_4VOp3Prefix = Rec->getValueAsBit("hasVEX_4VOp3Prefix"); + HasVEX_4V = Rec->getValueAsBit("hasVEX_4V"); + HasVEX_4VOp3 = Rec->getValueAsBit("hasVEX_4VOp3"); HasVEX_WPrefix = Rec->getValueAsBit("hasVEX_WPrefix"); HasMemOp4Prefix = Rec->getValueAsBit("hasMemOp4Prefix"); IgnoresVEX_L = Rec->getValueAsBit("ignoresVEX_L"); - HasEVEXPrefix = Rec->getValueAsBit("hasEVEXPrefix"); HasEVEX_L2Prefix = Rec->getValueAsBit("hasEVEX_L2"); HasEVEX_K = Rec->getValueAsBit("hasEVEX_K"); HasEVEX_KZ = Rec->getValueAsBit("hasEVEX_Z"); @@ -300,7 +303,7 @@ void RecognizableInstr::processInstr(DisassemblerTables &tables, InstructionContext RecognizableInstr::insnContext() const { InstructionContext insnContext; - if (HasEVEXPrefix) { + if (Encoding == X86Local::EVEX) { if (HasVEX_LPrefix && HasEVEX_L2Prefix) { errs() << "Don't support VEX.L if EVEX_L2 is enabled: " << Name << "\n"; llvm_unreachable("Don't support VEX.L if EVEX_L2 is enabled"); @@ -368,7 +371,7 @@ InstructionContext RecognizableInstr::insnContext() const { else insnContext = EVEX_KB(IC_EVEX); /// eof EVEX - } else if (HasVEX_4VPrefix || HasVEX_4VOp3Prefix|| HasVEXPrefix) { + } else if (Encoding == X86Local::VEX || Encoding == X86Local::XOP) { if (HasVEX_LPrefix && HasVEX_WPrefix) { if (HasOpSizePrefix || OpPrefix == X86Local::PD) insnContext = IC_VEX_L_W_OPSIZE; @@ -624,7 +627,7 @@ void RecognizableInstr::emitInstructionSpecifier() { // Operand 2 is a register operand in the Reg/Opcode field. // - In AVX, there is a register operand in the VEX.vvvv field here - // Operand 3 (optional) is an immediate. - if (HasVEX_4VPrefix) + if (HasVEX_4V) assert(numPhysicalOperands >= 3 && numPhysicalOperands <= 4 && "Unexpected number of operands for MRMDestRegFrm with VEX_4V"); else @@ -633,7 +636,7 @@ void RecognizableInstr::emitInstructionSpecifier() { HANDLE_OPERAND(rmRegister) - if (HasVEX_4VPrefix) + if (HasVEX_4V) // FIXME: In AVX, the register below becomes the one encoded // in ModRMVEX and the one above the one in the VEX.VVVV field HANDLE_OPERAND(vvvvRegister) @@ -646,7 +649,7 @@ void RecognizableInstr::emitInstructionSpecifier() { // Operand 2 is a register operand in the Reg/Opcode field. // - In AVX, there is a register operand in the VEX.vvvv field here - // Operand 3 (optional) is an immediate. - if (HasVEX_4VPrefix) + if (HasVEX_4V) assert(numPhysicalOperands >= 3 && numPhysicalOperands <= 4 && "Unexpected number of operands for MRMDestMemFrm with VEX_4V"); else @@ -657,7 +660,7 @@ void RecognizableInstr::emitInstructionSpecifier() { if (HasEVEX_K) HANDLE_OPERAND(writemaskRegister) - if (HasVEX_4VPrefix) + if (HasVEX_4V) // FIXME: In AVX, the register below becomes the one encoded // in ModRMVEX and the one above the one in the VEX.VVVV field HANDLE_OPERAND(vvvvRegister) @@ -672,7 +675,7 @@ void RecognizableInstr::emitInstructionSpecifier() { // Operand 3 (optional) is an immediate. // Operand 4 (optional) is an immediate. - if (HasVEX_4VPrefix || HasVEX_4VOp3Prefix) + if (HasVEX_4V || HasVEX_4VOp3) assert(numPhysicalOperands >= 3 && numPhysicalOperands <= 5 && "Unexpected number of operands for MRMSrcRegFrm with VEX_4V"); else @@ -684,7 +687,7 @@ void RecognizableInstr::emitInstructionSpecifier() { if (HasEVEX_K) HANDLE_OPERAND(writemaskRegister) - if (HasVEX_4VPrefix) + if (HasVEX_4V) // FIXME: In AVX, the register below becomes the one encoded // in ModRMVEX and the one above the one in the VEX.VVVV field HANDLE_OPERAND(vvvvRegister) @@ -694,7 +697,7 @@ void RecognizableInstr::emitInstructionSpecifier() { HANDLE_OPERAND(rmRegister) - if (HasVEX_4VOp3Prefix) + if (HasVEX_4VOp3) HANDLE_OPERAND(vvvvRegister) if (!HasMemOp4Prefix) @@ -708,7 +711,7 @@ void RecognizableInstr::emitInstructionSpecifier() { // - In AVX, there is a register operand in the VEX.vvvv field here - // Operand 3 (optional) is an immediate. - if (HasVEX_4VPrefix || HasVEX_4VOp3Prefix) + if (HasVEX_4V || HasVEX_4VOp3) assert(numPhysicalOperands >= 3 && numPhysicalOperands <= 5 && "Unexpected number of operands for MRMSrcMemFrm with VEX_4V"); else @@ -720,7 +723,7 @@ void RecognizableInstr::emitInstructionSpecifier() { if (HasEVEX_K) HANDLE_OPERAND(writemaskRegister) - if (HasVEX_4VPrefix) + if (HasVEX_4V) // FIXME: In AVX, the register below becomes the one encoded // in ModRMVEX and the one above the one in the VEX.VVVV field HANDLE_OPERAND(vvvvRegister) @@ -730,7 +733,7 @@ void RecognizableInstr::emitInstructionSpecifier() { HANDLE_OPERAND(memory) - if (HasVEX_4VOp3Prefix) + if (HasVEX_4VOp3) HANDLE_OPERAND(vvvvRegister) if (!HasMemOp4Prefix) @@ -750,11 +753,11 @@ void RecognizableInstr::emitInstructionSpecifier() { // Operand 2 (optional) is an immediate or relocation. // Operand 3 (optional) is an immediate. unsigned kOp = (HasEVEX_K) ? 1:0; - unsigned Op4v = (HasVEX_4VPrefix) ? 1:0; + unsigned Op4v = (HasVEX_4V) ? 1:0; if (numPhysicalOperands > 3 + kOp + Op4v) llvm_unreachable("Unexpected number of operands for MRMnr"); } - if (HasVEX_4VPrefix) + if (HasVEX_4V) HANDLE_OPERAND(vvvvRegister) if (HasEVEX_K) @@ -775,12 +778,12 @@ void RecognizableInstr::emitInstructionSpecifier() { // Operand 1 is a memory operand (possibly SIB-extended) // Operand 2 (optional) is an immediate or relocation. unsigned kOp = (HasEVEX_K) ? 1:0; - unsigned Op4v = (HasVEX_4VPrefix) ? 1:0; + unsigned Op4v = (HasVEX_4V) ? 1:0; if (numPhysicalOperands < 1 + kOp + Op4v || numPhysicalOperands > 2 + kOp + Op4v) llvm_unreachable("Unexpected number of operands for MRMnm"); } - if (HasVEX_4VPrefix) + if (HasVEX_4V) HANDLE_OPERAND(vvvvRegister) if (HasEVEX_K) HANDLE_OPERAND(writemaskRegister) diff --git a/utils/TableGen/X86RecognizableInstr.h b/utils/TableGen/X86RecognizableInstr.h index edaa03515b..46e3078363 100644 --- a/utils/TableGen/X86RecognizableInstr.h +++ b/utils/TableGen/X86RecognizableInstr.h @@ -46,6 +46,8 @@ private: uint8_t Opcode; /// The form field from the record uint8_t Form; + // The encoding field from the record + uint8_t Encoding; /// The hasOpSizePrefix field from the record bool HasOpSizePrefix; /// The hasOpSize16Prefix field from the record @@ -54,12 +56,10 @@ private: bool HasAdSizePrefix; /// The hasREX_WPrefix field from the record bool HasREX_WPrefix; - /// The hasVEXPrefix field from the record - bool HasVEXPrefix; - /// The hasVEX_4VPrefix field from the record - bool HasVEX_4VPrefix; - /// The hasVEX_4VOp3Prefix field from the record - bool HasVEX_4VOp3Prefix; + /// The hasVEX_4V field from the record + bool HasVEX_4V; + /// The hasVEX_4VOp3 field from the record + bool HasVEX_4VOp3; /// The hasVEX_WPrefix field from the record bool HasVEX_WPrefix; /// Inferred from the operands; indicates whether the L bit in the VEX prefix is set @@ -68,8 +68,6 @@ private: bool HasMemOp4Prefix; /// The ignoreVEX_L field from the record bool IgnoresVEX_L; - /// The hasEVEXPrefix field from the record - bool HasEVEXPrefix; /// The hasEVEX_L2Prefix field from the record bool HasEVEX_L2Prefix; /// The hasEVEX_K field from the record -- cgit v1.2.3