summaryrefslogtreecommitdiff
path: root/lib/Target/X86/X86CallingConv.td
diff options
context:
space:
mode:
authorElena Demikhovsky <elena.demikhovsky@intel.com>2013-07-24 11:02:47 +0000
committerElena Demikhovsky <elena.demikhovsky@intel.com>2013-07-24 11:02:47 +0000
commite3809eed34f000581a464689596eefde2a6d1f24 (patch)
treeaf76cefea15d1e003a6430ae5c335d36f748de68 /lib/Target/X86/X86CallingConv.td
parent9b3b2862478e5baec967d479a2d00731758e1bb8 (diff)
downloadllvm-e3809eed34f000581a464689596eefde2a6d1f24.tar.gz
llvm-e3809eed34f000581a464689596eefde2a6d1f24.tar.bz2
llvm-e3809eed34f000581a464689596eefde2a6d1f24.tar.xz
I'm starting to commit KNL backend. I'll push patches one-by-one. This patch includes support for the extended register set XMM16-31, YMM16-31, ZMM0-31.
The full ISA you can see here: http://software.intel.com/en-us/intel-isa-extensions git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@187030 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/Target/X86/X86CallingConv.td')
-rw-r--r--lib/Target/X86/X86CallingConv.td40
1 files changed, 37 insertions, 3 deletions
diff --git a/lib/Target/X86/X86CallingConv.td b/lib/Target/X86/X86CallingConv.td
index 40c5d91b60..38e25910b2 100644
--- a/lib/Target/X86/X86CallingConv.td
+++ b/lib/Target/X86/X86CallingConv.td
@@ -49,6 +49,12 @@ def RetCC_X86Common : CallingConv<[
CCIfType<[v32i8, v16i16, v8i32, v4i64, v8f32, v4f64],
CCAssignToReg<[YMM0,YMM1,YMM2,YMM3]>>,
+ // 512-bit vectors are returned in ZMM0 and ZMM1, when they fit. ZMM2 and ZMM3
+ // can only be used by ABI non-compliant code. This vector type is only
+ // supported while using the AVX-512 target feature.
+ CCIfType<[v16i32, v8i64, v16f32, v8f64],
+ CCAssignToReg<[ZMM0,ZMM1,ZMM2,ZMM3]>>,
+
// MMX vector types are always returned in MM0. If the target doesn't have
// MM0, it doesn't support these vector types.
CCIfType<[x86mmx], CCAssignToReg<[MM0]>>,
@@ -99,6 +105,10 @@ def RetCC_Intel_OCL_BI : CallingConv<[
CCIfType<[v8f32, v4f64, v8i32, v4i64],
CCAssignToReg<[YMM0,YMM1,YMM2,YMM3]>>,
+ // 512-bit FP vectors
+ CCIfType<[v16f32, v8f64, v16i32, v8i64],
+ CCAssignToReg<[ZMM0,ZMM1,ZMM2,ZMM3]>>,
+
// i32, i64 in the standard way
CCDelegateTo<RetCC_X86Common>
]>;
@@ -213,10 +223,15 @@ def CC_X86_64_C : CallingConv<[
// fixed arguments to vararg functions are supposed to be passed in
// registers. Actually modeling that would be a lot of work, though.
CCIfNotVarArg<CCIfType<[v32i8, v16i16, v8i32, v4i64, v8f32, v4f64],
- CCIfSubtarget<"hasAVX()",
+ CCIfSubtarget<"hasFp256()",
CCAssignToReg<[YMM0, YMM1, YMM2, YMM3,
YMM4, YMM5, YMM6, YMM7]>>>>,
+ // The first 8 512-bit vector arguments are passed in ZMM registers.
+ CCIfNotVarArg<CCIfType<[v16i32, v8i64, v16f32, v8f64],
+ CCIfSubtarget<"hasAVX512()",
+ CCAssignToReg<[ZMM0, ZMM1, ZMM2, ZMM3, ZMM4, ZMM5, ZMM6, ZMM7]>>>>,
+
// Integer/FP values get stored in stack slots that are 8 bytes in size and
// 8-byte aligned if there are no more registers to hold them.
CCIfType<[i32, i64, f32, f64], CCAssignToStack<8, 8>>,
@@ -230,7 +245,11 @@ def CC_X86_64_C : CallingConv<[
// 256-bit vectors get 32-byte stack slots that are 32-byte aligned.
CCIfType<[v32i8, v16i16, v8i32, v4i64, v8f32, v4f64],
- CCAssignToStack<32, 32>>
+ CCAssignToStack<32, 32>>,
+
+ // 512-bit vectors get 64-byte stack slots that are 64-byte aligned.
+ CCIfType<[v16i32, v8i64, v16f32, v8f64],
+ CCAssignToStack<64, 64>>
]>;
// Calling convention used on Win64
@@ -251,6 +270,9 @@ def CC_X86_Win64_C : CallingConv<[
// 256 bit vectors are passed by pointer
CCIfType<[v32i8, v16i16, v8i32, v4i64, v8f32, v4f64], CCPassIndirect<i64>>,
+ // 512 bit vectors are passed by pointer
+ CCIfType<[v16i32, v16f32, v8f64, v8i64], CCPassIndirect<i64>>,
+
// The first 4 MMX vector arguments are passed in GPRs.
CCIfType<[x86mmx], CCBitConvertToType<i64>>,
@@ -345,7 +367,7 @@ def CC_X86_32_Common : CallingConv<[
// The first 4 AVX 256-bit vector arguments are passed in YMM registers.
CCIfNotVarArg<CCIfType<[v32i8, v16i16, v8i32, v4i64, v8f32, v4f64],
- CCIfSubtarget<"hasAVX()",
+ CCIfSubtarget<"hasFp256()",
CCAssignToReg<[YMM0, YMM1, YMM2, YMM3]>>>>,
// Other SSE vectors get 16-byte stack slots that are 16-byte aligned.
@@ -469,6 +491,10 @@ def CC_Intel_OCL_BI : CallingConv<[
CCIfType<[v8f32, v4f64, v8i32, v4i64],
CCAssignToReg<[YMM0, YMM1, YMM2, YMM3]>>,
+ // The 512-bit vector arguments are passed in ZMM registers.
+ CCIfType<[v16f32, v8f64, v16i32, v8i64],
+ CCAssignToReg<[ZMM0, ZMM1, ZMM2, ZMM3]>>,
+
CCIfSubtarget<"isTargetWin64()", CCDelegateTo<CC_X86_Win64_C>>,
CCIfSubtarget<"is64Bit()", CCDelegateTo<CC_X86_64_C>>,
CCDelegateTo<CC_X86_32_C>
@@ -535,6 +561,10 @@ def CSR_Win64_Intel_OCL_BI_AVX : CalleeSavedRegs<(add RBX, RBP, RDI, RSI, R12,
R13, R14, R15,
(sequence "YMM%u", 6, 15))>;
+def CSR_Win64_Intel_OCL_BI_AVX512 : CalleeSavedRegs<(add RBX, RBP, RDI, RSI,
+ R12, R13, R14, R15,
+ (sequence "ZMM%u", 6, 21),
+ K4, K5, K6, K7)>;
//Standard C + XMM 8-15
def CSR_64_Intel_OCL_BI : CalleeSavedRegs<(add CSR_64,
(sequence "XMM%u", 8, 15))>;
@@ -542,3 +572,7 @@ def CSR_64_Intel_OCL_BI : CalleeSavedRegs<(add CSR_64,
//Standard C + YMM 8-15
def CSR_64_Intel_OCL_BI_AVX : CalleeSavedRegs<(add CSR_64,
(sequence "YMM%u", 8, 15))>;
+
+def CSR_64_Intel_OCL_BI_AVX512 : CalleeSavedRegs<(add CSR_64,
+ (sequence "ZMM%u", 16, 31),
+ K4, K5, K6, K7)>;