diff options
author | Elena Demikhovsky <elena.demikhovsky@intel.com> | 2014-01-01 15:12:34 +0000 |
---|---|---|
committer | Elena Demikhovsky <elena.demikhovsky@intel.com> | 2014-01-01 15:12:34 +0000 |
commit | 3062a311ac2d1bf053e15cba621e168572c83a07 (patch) | |
tree | 2f5f667852e1418991b5bae856584a9fad38ac00 /lib | |
parent | 979b2cd2bc97d4d6745f4959feb7e9706a9fb9f6 (diff) | |
download | llvm-3062a311ac2d1bf053e15cba621e168572c83a07.tar.gz llvm-3062a311ac2d1bf053e15cba621e168572c83a07.tar.bz2 llvm-3062a311ac2d1bf053e15cba621e168572c83a07.tar.xz |
AVX-512: Added intrinsics for vcvt, vcvtt, vrndscale, vcmp
Printing rounding control.
Enncoding for EVEX_RC (rounding control).
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@198277 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib')
-rw-r--r-- | lib/Target/X86/InstPrinter/X86ATTInstPrinter.cpp | 12 | ||||
-rw-r--r-- | lib/Target/X86/InstPrinter/X86ATTInstPrinter.h | 1 | ||||
-rw-r--r-- | lib/Target/X86/InstPrinter/X86IntelInstPrinter.cpp | 13 | ||||
-rw-r--r-- | lib/Target/X86/InstPrinter/X86IntelInstPrinter.h | 1 | ||||
-rw-r--r-- | lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp | 37 | ||||
-rw-r--r-- | lib/Target/X86/X86InstrAVX512.td | 258 | ||||
-rw-r--r-- | lib/Target/X86/X86InstrFragmentsSIMD.td | 10 | ||||
-rw-r--r-- | lib/Target/X86/X86InstrInfo.td | 4 |
8 files changed, 273 insertions, 63 deletions
diff --git a/lib/Target/X86/InstPrinter/X86ATTInstPrinter.cpp b/lib/Target/X86/InstPrinter/X86ATTInstPrinter.cpp index 44393115cc..e214e9b587 100644 --- a/lib/Target/X86/InstPrinter/X86ATTInstPrinter.cpp +++ b/lib/Target/X86/InstPrinter/X86ATTInstPrinter.cpp @@ -123,6 +123,18 @@ void X86ATTInstPrinter::printAVXCC(const MCInst *MI, unsigned Op, } } +void X86ATTInstPrinter::printRoundingControl(const MCInst *MI, unsigned Op, + raw_ostream &O) { + int64_t Imm = MI->getOperand(Op).getImm() & 0x1f; + switch (Imm) { + case 0: O << "{rn-sae}"; break; + case 1: O << "{rd-sae}"; break; + case 2: O << "{ru-sae}"; break; + case 3: O << "{rz-sae}"; break; + + default: llvm_unreachable("Invalid AVX-512 rounding control argument!"); + } +} /// printPCRelImm - This is used to print an immediate value that ends up /// being encoded as a pc-relative value (e.g. for jumps and calls). These /// print slightly differently than normal immediates. For example, a $ is not diff --git a/lib/Target/X86/InstPrinter/X86ATTInstPrinter.h b/lib/Target/X86/InstPrinter/X86ATTInstPrinter.h index a8fab72bc0..4dc4fe6eab 100644 --- a/lib/Target/X86/InstPrinter/X86ATTInstPrinter.h +++ b/lib/Target/X86/InstPrinter/X86ATTInstPrinter.h @@ -43,6 +43,7 @@ public: void printAVXCC(const MCInst *MI, unsigned Op, raw_ostream &OS); void printPCRelImm(const MCInst *MI, unsigned OpNo, raw_ostream &OS); void printMemOffset(const MCInst *MI, unsigned OpNo, raw_ostream &OS); + void printRoundingControl(const MCInst *MI, unsigned Op, raw_ostream &OS); void printopaquemem(const MCInst *MI, unsigned OpNo, raw_ostream &O) { printMemReference(MI, OpNo, O); diff --git a/lib/Target/X86/InstPrinter/X86IntelInstPrinter.cpp b/lib/Target/X86/InstPrinter/X86IntelInstPrinter.cpp index e7e7b151c3..320ac5addb 100644 --- a/lib/Target/X86/InstPrinter/X86IntelInstPrinter.cpp +++ b/lib/Target/X86/InstPrinter/X86IntelInstPrinter.cpp @@ -113,6 +113,19 @@ void X86IntelInstPrinter::printAVXCC(const MCInst *MI, unsigned Op, } } +void X86IntelInstPrinter::printRoundingControl(const MCInst *MI, unsigned Op, + raw_ostream &O) { + int64_t Imm = MI->getOperand(Op).getImm() & 0x1f; + switch (Imm) { + case 0: O << "{rn-sae}"; break; + case 1: O << "{rd-sae}"; break; + case 2: O << "{ru-sae}"; break; + case 3: O << "{rz-sae}"; break; + + default: llvm_unreachable("Invalid AVX-512 rounding control argument!"); + } +} + /// printPCRelImm - This is used to print an immediate value that ends up /// being encoded as a pc-relative value. void X86IntelInstPrinter::printPCRelImm(const MCInst *MI, unsigned OpNo, diff --git a/lib/Target/X86/InstPrinter/X86IntelInstPrinter.h b/lib/Target/X86/InstPrinter/X86IntelInstPrinter.h index 590bf68124..90d4e39784 100644 --- a/lib/Target/X86/InstPrinter/X86IntelInstPrinter.h +++ b/lib/Target/X86/InstPrinter/X86IntelInstPrinter.h @@ -40,6 +40,7 @@ public: void printAVXCC(const MCInst *MI, unsigned Op, raw_ostream &O); void printPCRelImm(const MCInst *MI, unsigned OpNo, raw_ostream &O); void printMemOffset(const MCInst *MI, unsigned OpNo, raw_ostream &O); + void printRoundingControl(const MCInst *MI, unsigned Op, raw_ostream &OS); void printopaquemem(const MCInst *MI, unsigned OpNo, raw_ostream &O) { O << "opaque ptr "; diff --git a/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp b/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp index a152f7e963..54a90f13a8 100644 --- a/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp +++ b/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp @@ -535,6 +535,7 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte, bool HasVEX_4V = (TSFlags >> X86II::VEXShift) & X86II::VEX_4V; bool HasVEX_4VOp3 = (TSFlags >> X86II::VEXShift) & X86II::VEX_4VOp3; bool HasMemOp4 = (TSFlags >> X86II::VEXShift) & X86II::MemOp4; + bool HasEVEX_RC = false; // VEX_R: opcode externsion equivalent to REX.R in // 1's complement (inverted) form @@ -610,6 +611,9 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte, // EVEX_b unsigned char EVEX_b = 0; + // EVEX_rc + unsigned char EVEX_rc = 0; + // EVEX_aaa unsigned char EVEX_aaa = 0; @@ -676,6 +680,7 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte, // Classify VEX_B, VEX_4V, VEX_R, VEX_X unsigned NumOps = Desc.getNumOperands(); + unsigned RcOperand = NumOps-1; unsigned CurOp = 0; if (NumOps > 1 && Desc.getOperandConstraint(1, MCOI::TIED_TO) == 0) ++CurOp; @@ -834,7 +839,12 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte, VEX_X = 0x0; CurOp++; if (HasVEX_4VOp3) - VEX_4V = getVEXRegisterEncoding(MI, CurOp); + VEX_4V = getVEXRegisterEncoding(MI, CurOp++); + if (EVEX_b) { + assert(RcOperand >= CurOp); + EVEX_rc = MI.getOperand(RcOperand).getImm() & 0x3; + HasEVEX_RC = true; + } break; case X86II::MRMDestReg: // MRMDestReg instructions forms: @@ -934,12 +944,19 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte, (VEX_4V << 3) | (EVEX_U << 2) | VEX_PP, CurByte, OS); - EmitByte((EVEX_z << 7) | - (EVEX_L2 << 6) | - (VEX_L << 5) | - (EVEX_b << 4) | - (EVEX_V2 << 3) | - EVEX_aaa, CurByte, OS); + if (HasEVEX_RC) + EmitByte((EVEX_z << 7) | + (EVEX_rc << 5) | + (EVEX_b << 4) | + (EVEX_V2 << 3) | + EVEX_aaa, CurByte, OS); + else + EmitByte((EVEX_z << 7) | + (EVEX_L2 << 6) | + (VEX_L << 5) | + (EVEX_b << 4) | + (EVEX_V2 << 3) | + EVEX_aaa, CurByte, OS); } } @@ -1206,7 +1223,8 @@ EncodeInstruction(const MCInst &MI, raw_ostream &OS, // It uses the EVEX.aaa field? bool HasEVEX = (TSFlags >> X86II::VEXShift) & X86II::EVEX; bool HasEVEX_K = HasEVEX && ((TSFlags >> X86II::VEXShift) & X86II::EVEX_K); - + bool HasEVEX_B = HasEVEX && ((TSFlags >> X86II::VEXShift) & X86II::EVEX_B); + // Determine where the memory operand starts, if present. int MemoryOperand = X86II::getMemoryOperandNo(TSFlags, Opcode); if (MemoryOperand != -1) MemoryOperand += CurOp; @@ -1302,6 +1320,9 @@ EncodeInstruction(const MCInst &MI, raw_ostream &OS, CurOp = HasMemOp4 ? SrcRegNum : SrcRegNum + 1; if (HasVEX_4VOp3) ++CurOp; + // do not count the rounding control operand + if (HasEVEX_B) + NumOps--; break; case X86II::MRMSrcMem: { diff --git a/lib/Target/X86/X86InstrAVX512.td b/lib/Target/X86/X86InstrAVX512.td index 5e5b6fbbc5..a12b298f5e 100644 --- a/lib/Target/X86/X86InstrAVX512.td +++ b/lib/Target/X86/X86InstrAVX512.td @@ -802,36 +802,42 @@ defm VPCMPUQZ : avx512_icmp_cc<0x1E, VK8, VR512, i512mem, memopv8i64, // avx512_cmp_packed - sse 1 & 2 compare packed instructions multiclass avx512_cmp_packed<RegisterClass KRC, RegisterClass RC, - X86MemOperand x86memop, Operand CC, - SDNode OpNode, ValueType vt, string asm, - string asm_alt, Domain d> { + X86MemOperand x86memop, ValueType vt, + string suffix, Domain d> { def rri : AVX512PIi8<0xC2, MRMSrcReg, - (outs KRC:$dst), (ins RC:$src1, RC:$src2, CC:$cc), asm, - [(set KRC:$dst, (OpNode (vt RC:$src1), (vt RC:$src2), imm:$cc))], d>; + (outs KRC:$dst), (ins RC:$src1, RC:$src2, AVXCC:$cc), + !strconcat("vcmp${cc}", suffix, + "\t{$src2, $src1, $dst|$dst, $src1, $src2}"), + [(set KRC:$dst, (X86cmpm (vt RC:$src1), (vt RC:$src2), imm:$cc))], d>; + def rrib: AVX512PIi8<0xC2, MRMSrcReg, + (outs KRC:$dst), (ins RC:$src1, RC:$src2, AVXCC:$cc, i32imm:$sae), + !strconcat("vcmp${cc}", suffix, + "\t{{sae}, $src2, $src1, $dst|$dst, $src1, $src2, {sae}}"), + [], d>, EVEX_B; def rmi : AVX512PIi8<0xC2, MRMSrcMem, - (outs KRC:$dst), (ins RC:$src1, x86memop:$src2, CC:$cc), asm, + (outs KRC:$dst), (ins RC:$src1, x86memop:$src2, AVXCC:$cc), + !strconcat("vcmp", suffix, + "\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}"), [(set KRC:$dst, - (OpNode (vt RC:$src1), (memop addr:$src2), imm:$cc))], d>; + (X86cmpm (vt RC:$src1), (memop addr:$src2), imm:$cc))], d>; // Accept explicit immediate argument form instead of comparison code. let neverHasSideEffects = 1 in { def rri_alt : AVX512PIi8<0xC2, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2, i8imm:$cc), - asm_alt, [], d>; + !strconcat("vcmp", suffix, + "\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}"), [], d>; def rmi_alt : AVX512PIi8<0xC2, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2, i8imm:$cc), - asm_alt, [], d>; + !strconcat("vcmp", suffix, + "\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}"), [], d>; } } -defm VCMPPSZ : avx512_cmp_packed<VK16, VR512, f512mem, AVXCC, X86cmpm, v16f32, - "vcmp${cc}ps\t{$src2, $src1, $dst|$dst, $src1, $src2}", - "vcmpps\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}", - SSEPackedSingle>, EVEX_4V, EVEX_V512, EVEX_CD8<32, CD8VF>; -defm VCMPPDZ : avx512_cmp_packed<VK8, VR512, f512mem, AVXCC, X86cmpm, v8f64, - "vcmp${cc}pd\t{$src2, $src1, $dst|$dst, $src1, $src2}", - "vcmppd\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}", - SSEPackedDouble>, OpSize, EVEX_4V, VEX_W, EVEX_V512, +defm VCMPPSZ : avx512_cmp_packed<VK16, VR512, f512mem, v16f32, + "ps", SSEPackedSingle>, EVEX_4V, EVEX_V512, EVEX_CD8<32, CD8VF>; +defm VCMPPDZ : avx512_cmp_packed<VK8, VR512, f512mem, v8f64, + "pd", SSEPackedDouble>, OpSize, EVEX_4V, VEX_W, EVEX_V512, EVEX_CD8<64, CD8VF>; def : Pat<(v8i1 (X86cmpm (v8f32 VR256X:$src1), (v8f32 VR256X:$src2), imm:$cc)), @@ -849,7 +855,31 @@ def : Pat<(v8i1 (X86cmpmu (v8i32 VR256X:$src1), (v8i32 VR256X:$src2), imm:$cc)), (v16i32 (SUBREG_TO_REG (i32 0), VR256X:$src1, sub_ymm)), (v16i32 (SUBREG_TO_REG (i32 0), VR256X:$src2, sub_ymm)), imm:$cc), VK8)>; - + +def : Pat<(i16 (int_x86_avx512_mask_cmp_ps_512 (v16f32 VR512:$src1), + (v16f32 VR512:$src2), imm:$cc, (i16 -1), + FROUND_NO_EXC)), + (COPY_TO_REGCLASS (VCMPPSZrrib VR512:$src1, VR512:$src2, + (I8Imm imm:$cc), (i32 0)), GR16)>; + +def : Pat<(i8 (int_x86_avx512_mask_cmp_pd_512 (v8f64 VR512:$src1), + (v8f64 VR512:$src2), imm:$cc, (i8 -1), + FROUND_NO_EXC)), + (COPY_TO_REGCLASS (VCMPPDZrrib VR512:$src1, VR512:$src2, + (I8Imm imm:$cc), (i32 0)), GR8)>; + +def : Pat<(i16 (int_x86_avx512_mask_cmp_ps_512 (v16f32 VR512:$src1), + (v16f32 VR512:$src2), imm:$cc, (i16 -1), + FROUND_CURRENT)), + (COPY_TO_REGCLASS (VCMPPSZrri VR512:$src1, VR512:$src2, + (I8Imm imm:$cc)), GR16)>; + +def : Pat<(i8 (int_x86_avx512_mask_cmp_pd_512 (v8f64 VR512:$src1), + (v8f64 VR512:$src2), imm:$cc, (i8 -1), + FROUND_CURRENT)), + (COPY_TO_REGCLASS (VCMPPDZrri VR512:$src1, VR512:$src2, + (I8Imm imm:$cc)), GR8)>; + // Mask register copy, including // - copy between mask registers // - load/store mask registers @@ -2704,6 +2734,9 @@ let neverHasSideEffects = 1 in { !strconcat(asm,"\t{$src, $dst|$dst, $src}"), [(set DstRC:$dst, (OpVT (OpNode (InVT SrcRC:$src))))], d>, EVEX; + def rrb : AVX512PI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src, AVX512RC:$rc), + !strconcat(asm,"\t{$rc, $src, $dst|$dst, $src, $rc}"), + [], d>, EVEX, EVEX_B; let mayLoad = 1 in def rm : AVX512PI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src), !strconcat(asm,"\t{$src, $dst|$dst, $src}"), @@ -2712,6 +2745,24 @@ let neverHasSideEffects = 1 in { } // neverHasSideEffects = 1 } +multiclass avx512_vcvtt_fp<bits<8> opc, string asm, RegisterClass SrcRC, + RegisterClass DstRC, SDNode OpNode, PatFrag mem_frag, + X86MemOperand x86memop, ValueType OpVT, ValueType InVT, + Domain d> { +let neverHasSideEffects = 1 in { + def rr : AVX512PI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src), + !strconcat(asm,"\t{$src, $dst|$dst, $src}"), + [(set DstRC:$dst, + (OpVT (OpNode (InVT SrcRC:$src))))], d>, EVEX; + let mayLoad = 1 in + def rm : AVX512PI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src), + !strconcat(asm,"\t{$src, $dst|$dst, $src}"), + [(set DstRC:$dst, + (OpVT (OpNode (InVT (bitconvert (mem_frag addr:$src))))))], d>, EVEX; +} // neverHasSideEffects = 1 +} + + defm VCVTPD2PSZ : avx512_vcvt_fp<0x5A, "vcvtpd2ps", VR512, VR256X, fround, memopv8f64, f512mem, v8f32, v8f64, SSEPackedSingle>, EVEX_V512, VEX_W, OpSize, @@ -2736,26 +2787,36 @@ defm VCVTDQ2PDZ : avx512_vcvt_fp<0xE6, "vcvtdq2pd", VR256X, VR512, sint_to_fp, SSEPackedDouble>, EVEX_V512, XS, EVEX_CD8<32, CD8VH>; -defm VCVTTPS2DQZ : avx512_vcvt_fp<0x5B, "vcvttps2dq", VR512, VR512, fp_to_sint, +defm VCVTTPS2DQZ : avx512_vcvtt_fp<0x5B, "vcvttps2dq", VR512, VR512, fp_to_sint, memopv16f32, f512mem, v16i32, v16f32, SSEPackedSingle>, EVEX_V512, XS, EVEX_CD8<32, CD8VF>; -defm VCVTTPD2DQZ : avx512_vcvt_fp<0xE6, "vcvttpd2dq", VR512, VR256X, fp_to_sint, +defm VCVTTPD2DQZ : avx512_vcvtt_fp<0xE6, "vcvttpd2dq", VR512, VR256X, fp_to_sint, memopv8f64, f512mem, v8i32, v8f64, SSEPackedDouble>, EVEX_V512, OpSize, VEX_W, EVEX_CD8<64, CD8VF>; -defm VCVTTPS2UDQZ : avx512_vcvt_fp<0x78, "vcvttps2udq", VR512, VR512, fp_to_uint, +defm VCVTTPS2UDQZ : avx512_vcvtt_fp<0x78, "vcvttps2udq", VR512, VR512, fp_to_uint, memopv16f32, f512mem, v16i32, v16f32, SSEPackedSingle>, EVEX_V512, EVEX_CD8<32, CD8VF>; -defm VCVTTPD2UDQZ : avx512_vcvt_fp<0x78, "vcvttpd2udq", VR512, VR256X, fp_to_uint, +// cvttps2udq (src, 0, mask-all-ones, sae-current) +def : Pat<(v16i32 (int_x86_avx512_mask_cvttps2udq_512 (v16f32 VR512:$src), + (v16i32 immAllZerosV), (i16 -1), FROUND_CURRENT)), + (VCVTTPS2UDQZrr VR512:$src)>; + +defm VCVTTPD2UDQZ : avx512_vcvtt_fp<0x78, "vcvttpd2udq", VR512, VR256X, fp_to_uint, memopv8f64, f512mem, v8i32, v8f64, SSEPackedDouble>, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>; +// cvttpd2udq (src, 0, mask-all-ones, sae-current) +def : Pat<(v8i32 (int_x86_avx512_mask_cvttpd2udq_512 (v8f64 VR512:$src), + (v8i32 immAllZerosV), (i8 -1), FROUND_CURRENT)), + (VCVTTPD2UDQZrr VR512:$src)>; + defm VCVTUDQ2PDZ : avx512_vcvt_fp<0x7A, "vcvtudq2pd", VR256X, VR512, uint_to_fp, memopv4i64, f256mem, v8f64, v8i32, SSEPackedDouble>, EVEX_V512, XS, @@ -2771,22 +2832,57 @@ def : Pat<(v8i32 (fp_to_uint (v8f32 VR256X:$src1))), (v16f32 (SUBREG_TO_REG (i32 0), VR256X:$src1, sub_ymm)))), sub_ymm)>; -def : Pat<(int_x86_avx512_cvtdq2_ps_512 VR512:$src), - (VCVTDQ2PSZrr VR512:$src)>; -def : Pat<(int_x86_avx512_cvtdq2_ps_512 (bitconvert (memopv8i64 addr:$src))), - (VCVTDQ2PSZrm addr:$src)>; +def : Pat<(v16f32 (int_x86_avx512_mask_cvtdq2ps_512 (v16i32 VR512:$src), + (v16f32 immAllZerosV), (i16 -1), imm:$rc)), + (VCVTDQ2PSZrrb VR512:$src, imm:$rc)>; -def VCVTPS2DQZrr : AVX512BI<0x5B, MRMSrcReg, (outs VR512:$dst), (ins VR512:$src), - "vcvtps2dq\t{$src, $dst|$dst, $src}", - [(set VR512:$dst, - (int_x86_avx512_cvt_ps2dq_512 VR512:$src))], - IIC_SSE_CVT_PS_RR>, EVEX, EVEX_V512; -def VCVTPS2DQZrm : AVX512BI<0x5B, MRMSrcMem, (outs VR512:$dst), (ins f512mem:$src), - "vcvtps2dq\t{$src, $dst|$dst, $src}", - [(set VR512:$dst, - (int_x86_avx512_cvt_ps2dq_512 (memopv16f32 addr:$src)))], - IIC_SSE_CVT_PS_RM>, EVEX, EVEX_V512, EVEX_CD8<32, CD8VF>; +multiclass avx512_vcvt_fp2int<bits<8> opc, string asm, RegisterClass SrcRC, + RegisterClass DstRC, PatFrag mem_frag, + X86MemOperand x86memop, Domain d> { +let neverHasSideEffects = 1 in { + def rr : AVX512PI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src), + !strconcat(asm,"\t{$src, $dst|$dst, $src}"), + [], d>, EVEX; + def rrb : AVX512PI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src, AVX512RC:$rc), + !strconcat(asm,"\t{$rc, $src, $dst|$dst, $src, $rc}"), + [], d>, EVEX, EVEX_B; + let mayLoad = 1 in + def rm : AVX512PI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src), + !strconcat(asm,"\t{$src, $dst|$dst, $src}"), + [], d>, EVEX; +} // neverHasSideEffects = 1 +} + +defm VCVTPS2DQZ : avx512_vcvt_fp2int<0x5B, "vcvtps2dq", VR512, VR512, + memopv16f32, f512mem, SSEPackedSingle>, OpSize, + EVEX_V512, EVEX_CD8<32, CD8VF>; +defm VCVTPD2DQZ : avx512_vcvt_fp2int<0xE6, "vcvtpd2dq", VR512, VR256X, + memopv8f64, f512mem, SSEPackedDouble>, XD, VEX_W, + EVEX_V512, EVEX_CD8<64, CD8VF>; + +def : Pat <(v16i32 (int_x86_avx512_mask_cvtps2dq_512 (v16f32 VR512:$src), + (v16i32 immAllZerosV), (i16 -1), imm:$rc)), + (VCVTPS2DQZrrb VR512:$src, imm:$rc)>; + +def : Pat <(v8i32 (int_x86_avx512_mask_cvtpd2dq_512 (v8f64 VR512:$src), + (v8i32 immAllZerosV), (i8 -1), imm:$rc)), + (VCVTPD2DQZrrb VR512:$src, imm:$rc)>; + +defm VCVTPS2UDQZ : avx512_vcvt_fp2int<0x79, "vcvtps2udq", VR512, VR512, + memopv16f32, f512mem, SSEPackedSingle>, + EVEX_V512, EVEX_CD8<32, CD8VF>; +defm VCVTPD2UDQZ : avx512_vcvt_fp2int<0x79, "vcvtpd2udq", VR512, VR256X, + memopv8f64, f512mem, SSEPackedDouble>, VEX_W, + EVEX_V512, EVEX_CD8<64, CD8VF>; + +def : Pat <(v16i32 (int_x86_avx512_mask_cvtps2udq_512 (v16f32 VR512:$src), + (v16i32 immAllZerosV), (i16 -1), imm:$rc)), + (VCVTPS2UDQZrrb VR512:$src, imm:$rc)>; + +def : Pat <(v8i32 (int_x86_avx512_mask_cvtpd2udq_512 (v8f64 VR512:$src), + (v8i32 immAllZerosV), (i8 -1), imm:$rc)), + (VCVTPD2UDQZrrb VR512:$src, imm:$rc)>; let Predicates = [HasAVX512] in { def : Pat<(v8f32 (fround (loadv8f64 addr:$src))), @@ -3251,18 +3347,70 @@ let ExeDomain = GenericDomain in { } // ExeDomain = GenericDomain } -let Predicates = [HasAVX512] in { - defm VRNDSCALE : avx512_fp_binop_rm<0x0A, 0x0B, "vrndscale", - int_x86_avx512_rndscale_ss, - int_x86_avx512_rndscale_sd>, EVEX_4V; +multiclass avx512_rndscale<bits<8> opc, string OpcodeStr, + X86MemOperand x86memop, RegisterClass RC, + PatFrag mem_frag, Domain d> { +let ExeDomain = d in { + // Intrinsic operation, reg. + // Vector intrinsic operation, reg + def r : AVX512AIi8<opc, MRMSrcReg, + (outs RC:$dst), (ins RC:$src1, i32i8imm:$src2), + !strconcat(OpcodeStr, + "\t{$src2, $src1, $dst|$dst, $src1, $src2}"), + []>, EVEX; - defm VRNDSCALEZ : avx512_fp_unop_rm<0x08, 0x09, "vrndscale", f256mem, VR512, - memopv16f32, memopv8f64, - int_x86_avx512_rndscale_ps_512, - int_x86_avx512_rndscale_pd_512, CD8VF>, - EVEX, EVEX_V512; + // Vector intrinsic operation, mem + def m : AVX512AIi8<opc, MRMSrcMem, + (outs RC:$dst), (ins x86memop:$src1, i32i8imm:$src2), + !strconcat(OpcodeStr, + "\t{$src2, $src1, $dst|$dst, $src1, $src2}"), + []>, EVEX; +} // ExeDomain } + +defm VRNDSCALEPSZ : avx512_rndscale<0x08, "vrndscaleps", f512mem, VR512, + memopv16f32, SSEPackedSingle>, EVEX_V512, + EVEX_CD8<32, CD8VF>; + +def : Pat<(v16f32 (int_x86_avx512_mask_rndscale_ps_512 (v16f32 VR512:$src1), + imm:$src2, (bc_v16f32 (v16i32 immAllZerosV)), (i16 -1), + FROUND_CURRENT)), + (VRNDSCALEPSZr VR512:$src1, imm:$src2)>; + + +defm VRNDSCALEPDZ : avx512_rndscale<0x09, "vrndscalepd", f512mem, VR512, + memopv8f64, SSEPackedDouble>, EVEX_V512, + VEX_W, EVEX_CD8<64, CD8VF>; + +def : Pat<(v8f64 (int_x86_avx512_mask_rndscale_pd_512 (v8f64 VR512:$src1), + imm:$src2, (bc_v8f64 (v16i32 immAllZerosV)), (i8 -1), + FROUND_CURRENT)), + (VRNDSCALEPDZr VR512:$src1, imm:$src2)>; + +multiclass avx512_rndscale_scalar<bits<8> opc, string OpcodeStr, + Operand x86memop, RegisterClass RC, Domain d> { +let ExeDomain = d in { + def r : AVX512AIi8<opc, MRMSrcReg, + (outs RC:$dst), (ins RC:$src1, RC:$src2, i32i8imm:$src3), + !strconcat(OpcodeStr, + "\t{$src2, $src1, $dst|$dst, $src1, $src2}"), + []>, EVEX_4V; + + def m : AVX512AIi8<opc, MRMSrcMem, + (outs RC:$dst), (ins RC:$src1, x86memop:$src2, i32i8imm:$src3), + !strconcat(OpcodeStr, + "\t{$src2, $src1, $dst|$dst, $src1, $src2}"), + []>, EVEX_4V; +} // ExeDomain +} + +defm VRNDSCALESS : avx512_rndscale_scalar<0x0A, "vrndscaless", ssmem, FR32X, + SSEPackedSingle>, EVEX_CD8<32, CD8VT1>; + +defm VRNDSCALESD : avx512_rndscale_scalar<0x0B, "vrndscalesd", sdmem, FR64X, + SSEPackedDouble>, EVEX_CD8<64, CD8VT1>; + def : Pat<(ffloor FR32X:$src), (VRNDSCALESSr (f32 (IMPLICIT_DEF)), FR32X:$src, (i32 0x1))>; def : Pat<(f64 (ffloor FR64X:$src)), @@ -3285,26 +3433,26 @@ def : Pat<(f64 (ftrunc FR64X:$src)), (VRNDSCALESDr (f64 (IMPLICIT_DEF)), FR64X:$src, (i32 0x3))>; def : Pat<(v16f32 (ffloor VR512:$src)), - (VRNDSCALEZPSr VR512:$src, (i32 0x1))>; + (VRNDSCALEPSZr VR512:$src, (i32 0x1))>; def : Pat<(v16f32 (fnearbyint VR512:$src)), - (VRNDSCALEZPSr VR512:$src, (i32 0xC))>; + (VRNDSCALEPSZr VR512:$src, (i32 0xC))>; def : Pat<(v16f32 (fceil VR512:$src)), - (VRNDSCALEZPSr VR512:$src, (i32 0x2))>; + (VRNDSCALEPSZr VR512:$src, (i32 0x2))>; def : Pat<(v16f32 (frint VR512:$src)), - (VRNDSCALEZPSr VR512:$src, (i32 0x4))>; + (VRNDSCALEPSZr VR512:$src, (i32 0x4))>; def : Pat<(v16f32 (ftrunc VR512:$src)), - (VRNDSCALEZPSr VR512:$src, (i32 0x3))>; + (VRNDSCALEPSZr VR512:$src, (i32 0x3))>; def : Pat<(v8f64 (ffloor VR512:$src)), - (VRNDSCALEZPDr VR512:$src, (i32 0x1))>; + (VRNDSCALEPDZr VR512:$src, (i32 0x1))>; def : Pat<(v8f64 (fnearbyint VR512:$src)), - (VRNDSCALEZPDr VR512:$src, (i32 0xC))>; + (VRNDSCALEPDZr VR512:$src, (i32 0xC))>; def : Pat<(v8f64 (fceil VR512:$src)), - (VRNDSCALEZPDr VR512:$src, (i32 0x2))>; + (VRNDSCALEPDZr VR512:$src, (i32 0x2))>; def : Pat<(v8f64 (frint VR512:$src)), - (VRNDSCALEZPDr VR512:$src, (i32 0x4))>; + (VRNDSCALEPDZr VR512:$src, (i32 0x4))>; def : Pat<(v8f64 (ftrunc VR512:$src)), - (VRNDSCALEZPDr VR512:$src, (i32 0x3))>; + (VRNDSCALEPDZr VR512:$src, (i32 0x3))>; //------------------------------------------------- // Integer truncate and extend operations diff --git a/lib/Target/X86/X86InstrFragmentsSIMD.td b/lib/Target/X86/X86InstrFragmentsSIMD.td index 2157de28f1..28e2cd1f48 100644 --- a/lib/Target/X86/X86InstrFragmentsSIMD.td +++ b/lib/Target/X86/X86InstrFragmentsSIMD.td @@ -470,6 +470,8 @@ def bc_v4i64 : PatFrag<(ops node:$in), (v4i64 (bitconvert node:$in))>; // 512-bit bitconvert pattern fragments def bc_v16i32 : PatFrag<(ops node:$in), (v16i32 (bitconvert node:$in))>; def bc_v8i64 : PatFrag<(ops node:$in), (v8i64 (bitconvert node:$in))>; +def bc_v8f64 : PatFrag<(ops node:$in), (v8f64 (bitconvert node:$in))>; +def bc_v16f32 : PatFrag<(ops node:$in), (v16f32 (bitconvert node:$in))>; def vzmovl_v2i64 : PatFrag<(ops node:$src), (bitconvert (v2i64 (X86vzmovl @@ -486,6 +488,14 @@ def fp32imm0 : PatLeaf<(f32 fpimm), [{ return N->isExactlyValue(+0.0); }]>; +def I8Imm : SDNodeXForm<imm, [{ + // Transformation function: get the low 8 bits. + return getI8Imm((uint8_t)N->getZExtValue()); +}]>; + +def FROUND_NO_EXC : ImmLeaf<i32, [{ return Imm == 8; }]>; +def FROUND_CURRENT : ImmLeaf<i32, [{ return Imm == 4; }]>; + // BYTE_imm - Transform bit immediates into byte immediates. def BYTE_imm : SDNodeXForm<imm, [{ // Transformation function: imm >> 3 diff --git a/lib/Target/X86/X86InstrInfo.td b/lib/Target/X86/X86InstrInfo.td index f006b919f8..2fea6e1773 100644 --- a/lib/Target/X86/X86InstrInfo.td +++ b/lib/Target/X86/X86InstrInfo.td @@ -510,6 +510,10 @@ def GR32orGR64 : RegisterOperand<GR32> { let ParserMatchClass = X86GR32orGR64AsmOperand; } +def AVX512RC : Operand<i32> { + let PrintMethod = "printRoundingControl"; + let OperandType = "OPERAND_IMMEDIATE"; +} // Sign-extended immediate classes. We don't need to define the full lattice // here because there is no instruction with an ambiguity between ImmSExti64i32 // and ImmSExti32i8. |