summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorEvan Cheng <evan.cheng@apple.com>2008-04-25 19:11:04 +0000
committerEvan Cheng <evan.cheng@apple.com>2008-04-25 19:11:04 +0000
commit10e864276bd94886f20fa067d28d8ab2c9399bf5 (patch)
treec8ec3d96aa6dc87dcd10768db3c452b11a6546c7
parent23ce502cb7d9d06a1786d276a10be42e65759eca (diff)
downloadllvm-10e864276bd94886f20fa067d28d8ab2c9399bf5.tar.gz
llvm-10e864276bd94886f20fa067d28d8ab2c9399bf5.tar.bz2
llvm-10e864276bd94886f20fa067d28d8ab2c9399bf5.tar.xz
Special handling for MMX values being passed in either GPR64 or lower 64-bits of XMM registers.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@50289 91177308-0d34-0410-b5e6-96231b3b80d8
-rw-r--r--lib/Target/X86/X86ISelLowering.cpp30
-rw-r--r--lib/Target/X86/X86InstrMMX.td4
-rw-r--r--test/CodeGen/X86/mmx-arg-passing2.ll25
3 files changed, 57 insertions, 2 deletions
diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp
index 38949601f6..21178ef78d 100644
--- a/lib/Target/X86/X86ISelLowering.cpp
+++ b/lib/Target/X86/X86ISelLowering.cpp
@@ -41,6 +41,9 @@
#include "llvm/ADT/StringExtras.h"
using namespace llvm;
+// Forward declarations.
+static SDOperand getMOVLMask(unsigned NumElems, SelectionDAG &DAG);
+
X86TargetLowering::X86TargetLowering(TargetMachine &TM)
: TargetLowering(TM) {
Subtarget = &TM.getSubtarget<X86Subtarget>();
@@ -1547,8 +1550,7 @@ SDOperand X86TargetLowering::LowerCALL(SDOperand Op, SelectionDAG &DAG) {
SDOperand StackPtr;
bool containsTailCallByValArg = false;
SmallVector<std::pair<unsigned, unsigned>, 8> TailCallByValClobberedVRegs;
- SmallVector<MVT::ValueType, 8> TailCallByValClobberedVRegTypes;
-
+ SmallVector<MVT::ValueType, 8> TailCallByValClobberedVRegTypes;
// Walk the register/memloc assignments, inserting copies/loads. For tail
// calls, remember all arguments for later special lowering.
@@ -1574,6 +1576,30 @@ SDOperand X86TargetLowering::LowerCALL(SDOperand Op, SelectionDAG &DAG) {
}
if (VA.isRegLoc()) {
+ if (Is64Bit) {
+ MVT::ValueType RegVT = VA.getLocVT();
+ if (MVT::isVector(RegVT) && MVT::getSizeInBits(RegVT) == 64)
+ switch (VA.getLocReg()) {
+ default:
+ break;
+ case X86::RDI: case X86::RSI: case X86::RDX: case X86::RCX:
+ case X86::R8: {
+ // Special case: passing MMX values in GPR registers.
+ Arg = DAG.getNode(ISD::BIT_CONVERT, MVT::i64, Arg);
+ break;
+ }
+ case X86::XMM0: case X86::XMM1: case X86::XMM2: case X86::XMM3:
+ case X86::XMM4: case X86::XMM5: case X86::XMM6: case X86::XMM7: {
+ // Special case: passing MMX values in XMM registers.
+ Arg = DAG.getNode(ISD::BIT_CONVERT, MVT::i64, Arg);
+ Arg = DAG.getNode(ISD::SCALAR_TO_VECTOR, MVT::v2i64, Arg);
+ Arg = DAG.getNode(ISD::VECTOR_SHUFFLE, MVT::v2i64,
+ DAG.getNode(ISD::UNDEF, MVT::v2i64), Arg,
+ getMOVLMask(2, DAG));
+ break;
+ }
+ }
+ }
RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
} else {
if (!IsTailCall || (IsTailCall && isByVal)) {
diff --git a/lib/Target/X86/X86InstrMMX.td b/lib/Target/X86/X86InstrMMX.td
index 6d9c02ba88..600b1e6e76 100644
--- a/lib/Target/X86/X86InstrMMX.td
+++ b/lib/Target/X86/X86InstrMMX.td
@@ -637,3 +637,7 @@ def : Pat<(v1i64 (and (xor VR64:$src1, (bc_v1i64 (v4i16 immAllOnesV_bc))),
def : Pat<(v1i64 (and (xor VR64:$src1, (bc_v1i64 (v8i8 immAllOnesV_bc))),
(load addr:$src2))),
(MMX_PANDNrm VR64:$src1, addr:$src2)>;
+
+// Move MMX to lower 64-bit of XMM
+def : Pat<(v2i64 (scalar_to_vector (i64 (bitconvert VR64:$src)))),
+ (v2i64 (MMX_MOVQ2DQrr VR64:$src))>;
diff --git a/test/CodeGen/X86/mmx-arg-passing2.ll b/test/CodeGen/X86/mmx-arg-passing2.ll
new file mode 100644
index 0000000000..c30ca1b08b
--- /dev/null
+++ b/test/CodeGen/X86/mmx-arg-passing2.ll
@@ -0,0 +1,25 @@
+; RUN: llvm-as < %s | llc -mtriple=x86_64-apple-darwin -mattr=+mmx,+sse2 | grep movq2dq
+; RUN: llvm-as < %s | llc -mtriple=x86_64-apple-darwin -mattr=+mmx,+sse2 | grep movd | count 1
+; RUN: llvm-as < %s | llc -mtriple=x86_64-apple-darwin -mattr=+mmx,+sse2 | grep movq | count 4
+
+@g_v8qi = external global <8 x i8>
+
+define void @t1() nounwind {
+ %tmp3 = load <8 x i8>* @g_v8qi, align 8
+ %tmp4 = tail call i32 (...)* @pass_v8qi( <8 x i8> %tmp3 ) nounwind
+ ret void
+}
+
+define void @t2(<8 x i8> %v1, <8 x i8> %v2) nounwind {
+ %tmp3 = add <8 x i8> %v1, %v2
+ %tmp4 = tail call i32 (...)* @pass_v8qi( <8 x i8> %tmp3 ) nounwind
+ ret void
+}
+
+define void @t3() nounwind {
+ call void @pass_v1di( <1 x i64> zeroinitializer )
+ ret void
+}
+
+declare i32 @pass_v8qi(...)
+declare void @pass_v1di(<1 x i64>)