summaryrefslogtreecommitdiff
path: root/lib/Target
diff options
context:
space:
mode:
authorDan Gohman <gohman@apple.com>2010-07-07 18:32:53 +0000
committerDan Gohman <gohman@apple.com>2010-07-07 18:32:53 +0000
commitf423a69839c4810b890f8a8b09fb8cfbd6bf0139 (patch)
tree645da31df650ccfb33454f4058865ee746a87398 /lib/Target
parentced9ec9baceb8fd89fc657cfeeb1697f27d2e6ec (diff)
downloadllvm-f423a69839c4810b890f8a8b09fb8cfbd6bf0139.tar.gz
llvm-f423a69839c4810b890f8a8b09fb8cfbd6bf0139.tar.bz2
llvm-f423a69839c4810b890f8a8b09fb8cfbd6bf0139.tar.xz
Add X86FastISel support for return statements. This entails refactoring
a bunch of stuff, to allow the target-independent calling convention logic to be employed. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@107800 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/Target')
-rw-r--r--lib/Target/X86/X86FastISel.cpp74
-rw-r--r--lib/Target/X86/X86ISelLowering.cpp5
-rw-r--r--lib/Target/X86/X86ISelLowering.h3
-rw-r--r--lib/Target/XCore/XCoreISelLowering.cpp5
-rw-r--r--lib/Target/XCore/XCoreISelLowering.h3
5 files changed, 78 insertions, 12 deletions
diff --git a/lib/Target/X86/X86FastISel.cpp b/lib/Target/X86/X86FastISel.cpp
index cdcbee6bc1..594ca4b112 100644
--- a/lib/Target/X86/X86FastISel.cpp
+++ b/lib/Target/X86/X86FastISel.cpp
@@ -23,6 +23,7 @@
#include "llvm/GlobalVariable.h"
#include "llvm/Instructions.h"
#include "llvm/IntrinsicInst.h"
+#include "llvm/CodeGen/Analysis.h"
#include "llvm/CodeGen/FastISel.h"
#include "llvm/CodeGen/FunctionLoweringInfo.h"
#include "llvm/CodeGen/MachineConstantPool.h"
@@ -84,6 +85,8 @@ private:
bool X86SelectStore(const Instruction *I);
+ bool X86SelectRet(const Instruction *I);
+
bool X86SelectCmp(const Instruction *I);
bool X86SelectZExt(const Instruction *I);
@@ -660,6 +663,67 @@ bool X86FastISel::X86SelectStore(const Instruction *I) {
return X86FastEmitStore(VT, I->getOperand(0), AM);
}
+/// X86SelectRet - Select and emit code to implement ret instructions.
+bool X86FastISel::X86SelectRet(const Instruction *I) {
+ const ReturnInst *Ret = cast<ReturnInst>(I);
+ const Function &F = *I->getParent()->getParent();
+
+ if (!FuncInfo.CanLowerReturn)
+ return false;
+
+ CallingConv::ID CC = F.getCallingConv();
+ if (CC != CallingConv::C &&
+ CC != CallingConv::Fast &&
+ CC != CallingConv::X86_FastCall)
+ return false;
+
+ if (Subtarget->isTargetWin64())
+ return false;
+
+ // fastcc with -tailcallopt is intended to provide a guaranteed
+ // tail call optimization. Fastisel doesn't know how to do that.
+ if (CC == CallingConv::Fast && GuaranteedTailCallOpt)
+ return false;
+
+ // Let SDISel handle vararg functions.
+ if (F.isVarArg())
+ return false;
+
+ SmallVector<ISD::OutputArg, 4> Outs;
+ GetReturnInfo(F.getReturnType(), F.getAttributes().getRetAttributes(),
+ Outs, TLI);
+
+ // Analyze operands of the call, assigning locations to each operand.
+ SmallVector<CCValAssign, 16> ValLocs;
+ CCState CCInfo(CC, F.isVarArg(), TM, ValLocs, I->getContext());
+ CCInfo.AnalyzeReturn(Outs, CCAssignFnForCall(CC));
+
+ // Copy the return value into registers.
+ for (unsigned i = 0, e = ValLocs.size(); i != e; ++i) {
+ CCValAssign &VA = ValLocs[i];
+
+ // Don't bother handling odd stuff for now.
+ if (VA.getLocInfo() != CCValAssign::Full)
+ return false;
+ if (!VA.isRegLoc())
+ return false;
+
+ const Value *RV = Ret->getOperand(VA.getValNo());
+ unsigned Reg = getRegForValue(RV);
+
+ TargetRegisterClass* RC = TLI.getRegClassFor(VA.getValVT());
+ bool Emitted = TII.copyRegToReg(*FuncInfo.MBB, FuncInfo.InsertPt,
+ VA.getLocReg(), Reg, RC, RC, DL);
+ assert(Emitted && "Failed to emit a copy instruction!"); Emitted=Emitted;
+
+ MRI.addLiveOut(X86::XMM0);
+ }
+
+ // Now emit the RET.
+ BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(X86::RET));
+ return true;
+}
+
/// X86SelectLoad - Select and emit code to implement load instructions.
///
bool X86FastISel::X86SelectLoad(const Instruction *I) {
@@ -1194,14 +1258,18 @@ bool X86FastISel::X86SelectExtractValue(const Instruction *I) {
switch (CI->getIntrinsicID()) {
default: break;
case Intrinsic::sadd_with_overflow:
- case Intrinsic::uadd_with_overflow:
+ case Intrinsic::uadd_with_overflow: {
// Cheat a little. We know that the registers for "add" and "seto" are
// allocated sequentially. However, we only keep track of the register
// for "add" in the value map. Use extractvalue's index to get the
// correct register for "seto".
- UpdateValueMap(I, lookUpRegForValue(Agg) + *EI->idx_begin());
+ unsigned OpReg = getRegForValue(Agg);
+ if (OpReg == 0)
+ return false;
+ UpdateValueMap(I, OpReg + *EI->idx_begin());
return true;
}
+ }
}
return false;
@@ -1664,6 +1732,8 @@ X86FastISel::TargetSelectInstruction(const Instruction *I) {
return X86SelectLoad(I);
case Instruction::Store:
return X86SelectStore(I);
+ case Instruction::Ret:
+ return X86SelectRet(I);
case Instruction::ICmp:
case Instruction::FCmp:
return X86SelectCmp(I);
diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp
index 5092d75402..e68cc25682 100644
--- a/lib/Target/X86/X86ISelLowering.cpp
+++ b/lib/Target/X86/X86ISelLowering.cpp
@@ -1218,13 +1218,12 @@ bool X86TargetLowering::getStackCookieLocation(unsigned &AddressSpace,
bool
X86TargetLowering::CanLowerReturn(CallingConv::ID CallConv, bool isVarArg,
- const SmallVectorImpl<EVT> &OutTys,
- const SmallVectorImpl<ISD::ArgFlagsTy> &ArgsFlags,
+ const SmallVectorImpl<ISD::OutputArg> &Outs,
LLVMContext &Context) const {
SmallVector<CCValAssign, 16> RVLocs;
CCState CCInfo(CallConv, isVarArg, getTargetMachine(),
RVLocs, Context);
- return CCInfo.CheckReturn(OutTys, ArgsFlags, RetCC_X86);
+ return CCInfo.CheckReturn(Outs, RetCC_X86);
}
SDValue
diff --git a/lib/Target/X86/X86ISelLowering.h b/lib/Target/X86/X86ISelLowering.h
index 39bbdac684..2d28e5cc2e 100644
--- a/lib/Target/X86/X86ISelLowering.h
+++ b/lib/Target/X86/X86ISelLowering.h
@@ -740,8 +740,7 @@ namespace llvm {
virtual bool
CanLowerReturn(CallingConv::ID CallConv, bool isVarArg,
- const SmallVectorImpl<EVT> &OutTys,
- const SmallVectorImpl<ISD::ArgFlagsTy> &ArgsFlags,
+ const SmallVectorImpl<ISD::OutputArg> &Outs,
LLVMContext &Context) const;
void ReplaceATOMIC_BINARY_64(SDNode *N, SmallVectorImpl<SDValue> &Results,
diff --git a/lib/Target/XCore/XCoreISelLowering.cpp b/lib/Target/XCore/XCoreISelLowering.cpp
index 52d040a98e..1938df699d 100644
--- a/lib/Target/XCore/XCoreISelLowering.cpp
+++ b/lib/Target/XCore/XCoreISelLowering.cpp
@@ -1135,13 +1135,12 @@ XCoreTargetLowering::LowerCCCArguments(SDValue Chain,
bool XCoreTargetLowering::
CanLowerReturn(CallingConv::ID CallConv, bool isVarArg,
- const SmallVectorImpl<EVT> &OutTys,
- const SmallVectorImpl<ISD::ArgFlagsTy> &ArgsFlags,
+ const SmallVectorImpl<ISD::OutputArg> &Outs,
LLVMContext &Context) const {
SmallVector<CCValAssign, 16> RVLocs;
CCState CCInfo(CallConv, isVarArg, getTargetMachine(),
RVLocs, Context);
- return CCInfo.CheckReturn(OutTys, ArgsFlags, RetCC_XCore);
+ return CCInfo.CheckReturn(Outs, RetCC_XCore);
}
SDValue
diff --git a/lib/Target/XCore/XCoreISelLowering.h b/lib/Target/XCore/XCoreISelLowering.h
index 46643014a0..febc198f4f 100644
--- a/lib/Target/XCore/XCoreISelLowering.h
+++ b/lib/Target/XCore/XCoreISelLowering.h
@@ -193,8 +193,7 @@ namespace llvm {
virtual bool
CanLowerReturn(CallingConv::ID CallConv, bool isVarArg,
- const SmallVectorImpl<EVT> &OutTys,
- const SmallVectorImpl<ISD::ArgFlagsTy> &ArgsFlags,
+ const SmallVectorImpl<ISD::OutputArg> &ArgsFlags,
LLVMContext &Context) const;
};
}