summaryrefslogtreecommitdiff
path: root/lib/Target/NVPTX/NVPTXISelLowering.cpp
diff options
context:
space:
mode:
authorJustin Holewinski <jholewinski@nvidia.com>2013-03-30 14:29:21 +0000
committerJustin Holewinski <jholewinski@nvidia.com>2013-03-30 14:29:21 +0000
commit3639ce2575660a0e6938d2e84e8bd9a738fd7051 (patch)
tree05b830d35c0d1865a2b18d7ff353cc4d6d572869 /lib/Target/NVPTX/NVPTXISelLowering.cpp
parenta9f83517fccbf5f0daf82afdddff81a0e0aea389 (diff)
downloadllvm-3639ce2575660a0e6938d2e84e8bd9a738fd7051.tar.gz
llvm-3639ce2575660a0e6938d2e84e8bd9a738fd7051.tar.bz2
llvm-3639ce2575660a0e6938d2e84e8bd9a738fd7051.tar.xz
[NVPTX] Run clang-format on all NVPTX sources.
Hopefully this resolves any outstanding style issues and gives us an automated way of ensuring we conform to the style guidelines. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@178415 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/Target/NVPTX/NVPTXISelLowering.cpp')
-rw-r--r--lib/Target/NVPTX/NVPTXISelLowering.cpp842
1 files changed, 423 insertions, 419 deletions
diff --git a/lib/Target/NVPTX/NVPTXISelLowering.cpp b/lib/Target/NVPTX/NVPTXISelLowering.cpp
index 987d34b2f3..6e01a5a820 100644
--- a/lib/Target/NVPTX/NVPTXISelLowering.cpp
+++ b/lib/Target/NVPTX/NVPTXISelLowering.cpp
@@ -11,7 +11,6 @@
//
//===----------------------------------------------------------------------===//
-
#include "NVPTXISelLowering.h"
#include "NVPTX.h"
#include "NVPTXTargetMachine.h"
@@ -44,14 +43,14 @@ using namespace llvm;
static unsigned int uniqueCallSite = 0;
-static cl::opt<bool>
-sched4reg("nvptx-sched4reg",
- cl::desc("NVPTX Specific: schedule for register pressue"),
- cl::init(false));
+static cl::opt<bool> sched4reg(
+ "nvptx-sched4reg",
+ cl::desc("NVPTX Specific: schedule for register pressue"), cl::init(false));
static bool IsPTXVectorType(MVT VT) {
switch (VT.SimpleTy) {
- default: return false;
+ default:
+ return false;
case MVT::v2i8:
case MVT::v4i8:
case MVT::v2i16:
@@ -62,22 +61,21 @@ static bool IsPTXVectorType(MVT VT) {
case MVT::v2f32:
case MVT::v4f32:
case MVT::v2f64:
- return true;
+ return true;
}
}
// NVPTXTargetLowering Constructor.
NVPTXTargetLowering::NVPTXTargetLowering(NVPTXTargetMachine &TM)
-: TargetLowering(TM, new NVPTXTargetObjectFile()),
- nvTM(&TM),
- nvptxSubtarget(TM.getSubtarget<NVPTXSubtarget>()) {
+ : TargetLowering(TM, new NVPTXTargetObjectFile()), nvTM(&TM),
+ nvptxSubtarget(TM.getSubtarget<NVPTXSubtarget>()) {
// always lower memset, memcpy, and memmove intrinsics to load/store
// instructions, rather
// then generating calls to memset, mempcy or memmove.
- MaxStoresPerMemset = (unsigned)0xFFFFFFFF;
- MaxStoresPerMemcpy = (unsigned)0xFFFFFFFF;
- MaxStoresPerMemmove = (unsigned)0xFFFFFFFF;
+ MaxStoresPerMemset = (unsigned) 0xFFFFFFFF;
+ MaxStoresPerMemcpy = (unsigned) 0xFFFFFFFF;
+ MaxStoresPerMemmove = (unsigned) 0xFFFFFFFF;
setBooleanContents(ZeroOrNegativeOneBooleanContent);
@@ -100,52 +98,50 @@ NVPTXTargetLowering::NVPTXTargetLowering(NVPTXTargetMachine &TM)
addRegisterClass(MVT::f64, &NVPTX::Float64RegsRegClass);
// Operations not directly supported by NVPTX.
- setOperationAction(ISD::SELECT_CC, MVT::Other, Expand);
- setOperationAction(ISD::BR_CC, MVT::f32, Expand);
- setOperationAction(ISD::BR_CC, MVT::f64, Expand);
- setOperationAction(ISD::BR_CC, MVT::i1, Expand);
- setOperationAction(ISD::BR_CC, MVT::i8, Expand);
- setOperationAction(ISD::BR_CC, MVT::i16, Expand);
- setOperationAction(ISD::BR_CC, MVT::i32, Expand);
- setOperationAction(ISD::BR_CC, MVT::i64, Expand);
+ setOperationAction(ISD::SELECT_CC, MVT::Other, Expand);
+ setOperationAction(ISD::BR_CC, MVT::f32, Expand);
+ setOperationAction(ISD::BR_CC, MVT::f64, Expand);
+ setOperationAction(ISD::BR_CC, MVT::i1, Expand);
+ setOperationAction(ISD::BR_CC, MVT::i8, Expand);
+ setOperationAction(ISD::BR_CC, MVT::i16, Expand);
+ setOperationAction(ISD::BR_CC, MVT::i32, Expand);
+ setOperationAction(ISD::BR_CC, MVT::i64, Expand);
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i64, Expand);
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Expand);
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand);
- setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8 , Expand);
- setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1 , Expand);
+ setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8, Expand);
+ setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
if (nvptxSubtarget.hasROT64()) {
- setOperationAction(ISD::ROTL , MVT::i64, Legal);
- setOperationAction(ISD::ROTR , MVT::i64, Legal);
- }
- else {
- setOperationAction(ISD::ROTL , MVT::i64, Expand);
- setOperationAction(ISD::ROTR , MVT::i64, Expand);
+ setOperationAction(ISD::ROTL, MVT::i64, Legal);
+ setOperationAction(ISD::ROTR, MVT::i64, Legal);
+ } else {
+ setOperationAction(ISD::ROTL, MVT::i64, Expand);
+ setOperationAction(ISD::ROTR, MVT::i64, Expand);
}
if (nvptxSubtarget.hasROT32()) {
- setOperationAction(ISD::ROTL , MVT::i32, Legal);
- setOperationAction(ISD::ROTR , MVT::i32, Legal);
- }
- else {
- setOperationAction(ISD::ROTL , MVT::i32, Expand);
- setOperationAction(ISD::ROTR , MVT::i32, Expand);
+ setOperationAction(ISD::ROTL, MVT::i32, Legal);
+ setOperationAction(ISD::ROTR, MVT::i32, Legal);
+ } else {
+ setOperationAction(ISD::ROTL, MVT::i32, Expand);
+ setOperationAction(ISD::ROTR, MVT::i32, Expand);
}
- setOperationAction(ISD::ROTL , MVT::i16, Expand);
- setOperationAction(ISD::ROTR , MVT::i16, Expand);
- setOperationAction(ISD::ROTL , MVT::i8, Expand);
- setOperationAction(ISD::ROTR , MVT::i8, Expand);
- setOperationAction(ISD::BSWAP , MVT::i16, Expand);
- setOperationAction(ISD::BSWAP , MVT::i32, Expand);
- setOperationAction(ISD::BSWAP , MVT::i64, Expand);
+ setOperationAction(ISD::ROTL, MVT::i16, Expand);
+ setOperationAction(ISD::ROTR, MVT::i16, Expand);
+ setOperationAction(ISD::ROTL, MVT::i8, Expand);
+ setOperationAction(ISD::ROTR, MVT::i8, Expand);
+ setOperationAction(ISD::BSWAP, MVT::i16, Expand);
+ setOperationAction(ISD::BSWAP, MVT::i32, Expand);
+ setOperationAction(ISD::BSWAP, MVT::i64, Expand);
// Indirect branch is not supported.
// This also disables Jump Table creation.
- setOperationAction(ISD::BR_JT, MVT::Other, Expand);
- setOperationAction(ISD::BRIND, MVT::Other, Expand);
+ setOperationAction(ISD::BR_JT, MVT::Other, Expand);
+ setOperationAction(ISD::BRIND, MVT::Other, Expand);
- setOperationAction(ISD::GlobalAddress , MVT::i32 , Custom);
- setOperationAction(ISD::GlobalAddress , MVT::i64 , Custom);
+ setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
+ setOperationAction(ISD::GlobalAddress, MVT::i64, Custom);
// We want to legalize constant related memmove and memcopy
// intrinsics.
@@ -168,16 +164,16 @@ NVPTXTargetLowering::NVPTXTargetLowering(NVPTXTargetMachine &TM)
setTruncStoreAction(MVT::i8, MVT::i1, Expand);
// This is legal in NVPTX
- setOperationAction(ISD::ConstantFP, MVT::f64, Legal);
- setOperationAction(ISD::ConstantFP, MVT::f32, Legal);
+ setOperationAction(ISD::ConstantFP, MVT::f64, Legal);
+ setOperationAction(ISD::ConstantFP, MVT::f32, Legal);
// TRAP can be lowered to PTX trap
- setOperationAction(ISD::TRAP, MVT::Other, Legal);
+ setOperationAction(ISD::TRAP, MVT::Other, Legal);
// Register custom handling for vector loads/stores
- for (int i = MVT::FIRST_VECTOR_VALUETYPE;
- i <= MVT::LAST_VECTOR_VALUETYPE; ++i) {
- MVT VT = (MVT::SimpleValueType)i;
+ for (int i = MVT::FIRST_VECTOR_VALUETYPE; i <= MVT::LAST_VECTOR_VALUETYPE;
+ ++i) {
+ MVT VT = (MVT::SimpleValueType) i;
if (IsPTXVectorType(VT)) {
setOperationAction(ISD::LOAD, VT, Custom);
setOperationAction(ISD::STORE, VT, Custom);
@@ -190,49 +186,86 @@ NVPTXTargetLowering::NVPTXTargetLowering(NVPTXTargetMachine &TM)
computeRegisterProperties();
}
-
const char *NVPTXTargetLowering::getTargetNodeName(unsigned Opcode) const {
switch (Opcode) {
- default: return 0;
- case NVPTXISD::CALL: return "NVPTXISD::CALL";
- case NVPTXISD::RET_FLAG: return "NVPTXISD::RET_FLAG";
- case NVPTXISD::Wrapper: return "NVPTXISD::Wrapper";
- case NVPTXISD::NVBuiltin: return "NVPTXISD::NVBuiltin";
- case NVPTXISD::DeclareParam: return "NVPTXISD::DeclareParam";
+ default:
+ return 0;
+ case NVPTXISD::CALL:
+ return "NVPTXISD::CALL";
+ case NVPTXISD::RET_FLAG:
+ return "NVPTXISD::RET_FLAG";
+ case NVPTXISD::Wrapper:
+ return "NVPTXISD::Wrapper";
+ case NVPTXISD::NVBuiltin:
+ return "NVPTXISD::NVBuiltin";
+ case NVPTXISD::DeclareParam:
+ return "NVPTXISD::DeclareParam";
case NVPTXISD::DeclareScalarParam:
return "NVPTXISD::DeclareScalarParam";
- case NVPTXISD::DeclareRet: return "NVPTXISD::DeclareRet";
- case NVPTXISD::DeclareRetParam: return "NVPTXISD::DeclareRetParam";
- case NVPTXISD::PrintCall: return "NVPTXISD::PrintCall";
- case NVPTXISD::LoadParam: return "NVPTXISD::LoadParam";
- case NVPTXISD::StoreParam: return "NVPTXISD::StoreParam";
- case NVPTXISD::StoreParamS32: return "NVPTXISD::StoreParamS32";
- case NVPTXISD::StoreParamU32: return "NVPTXISD::StoreParamU32";
- case NVPTXISD::MoveToParam: return "NVPTXISD::MoveToParam";
- case NVPTXISD::CallArgBegin: return "NVPTXISD::CallArgBegin";
- case NVPTXISD::CallArg: return "NVPTXISD::CallArg";
- case NVPTXISD::LastCallArg: return "NVPTXISD::LastCallArg";
- case NVPTXISD::CallArgEnd: return "NVPTXISD::CallArgEnd";
- case NVPTXISD::CallVoid: return "NVPTXISD::CallVoid";
- case NVPTXISD::CallVal: return "NVPTXISD::CallVal";
- case NVPTXISD::CallSymbol: return "NVPTXISD::CallSymbol";
- case NVPTXISD::Prototype: return "NVPTXISD::Prototype";
- case NVPTXISD::MoveParam: return "NVPTXISD::MoveParam";
- case NVPTXISD::MoveRetval: return "NVPTXISD::MoveRetval";
- case NVPTXISD::MoveToRetval: return "NVPTXISD::MoveToRetval";
- case NVPTXISD::StoreRetval: return "NVPTXISD::StoreRetval";
- case NVPTXISD::PseudoUseParam: return "NVPTXISD::PseudoUseParam";
- case NVPTXISD::RETURN: return "NVPTXISD::RETURN";
- case NVPTXISD::CallSeqBegin: return "NVPTXISD::CallSeqBegin";
- case NVPTXISD::CallSeqEnd: return "NVPTXISD::CallSeqEnd";
- case NVPTXISD::LoadV2: return "NVPTXISD::LoadV2";
- case NVPTXISD::LoadV4: return "NVPTXISD::LoadV4";
- case NVPTXISD::LDGV2: return "NVPTXISD::LDGV2";
- case NVPTXISD::LDGV4: return "NVPTXISD::LDGV4";
- case NVPTXISD::LDUV2: return "NVPTXISD::LDUV2";
- case NVPTXISD::LDUV4: return "NVPTXISD::LDUV4";
- case NVPTXISD::StoreV2: return "NVPTXISD::StoreV2";
- case NVPTXISD::StoreV4: return "NVPTXISD::StoreV4";
+ case NVPTXISD::DeclareRet:
+ return "NVPTXISD::DeclareRet";
+ case NVPTXISD::DeclareRetParam:
+ return "NVPTXISD::DeclareRetParam";
+ case NVPTXISD::PrintCall:
+ return "NVPTXISD::PrintCall";
+ case NVPTXISD::LoadParam:
+ return "NVPTXISD::LoadParam";
+ case NVPTXISD::StoreParam:
+ return "NVPTXISD::StoreParam";
+ case NVPTXISD::StoreParamS32:
+ return "NVPTXISD::StoreParamS32";
+ case NVPTXISD::StoreParamU32:
+ return "NVPTXISD::StoreParamU32";
+ case NVPTXISD::MoveToParam:
+ return "NVPTXISD::MoveToParam";
+ case NVPTXISD::CallArgBegin:
+ return "NVPTXISD::CallArgBegin";
+ case NVPTXISD::CallArg:
+ return "NVPTXISD::CallArg";
+ case NVPTXISD::LastCallArg:
+ return "NVPTXISD::LastCallArg";
+ case NVPTXISD::CallArgEnd:
+ return "NVPTXISD::CallArgEnd";
+ case NVPTXISD::CallVoid:
+ return "NVPTXISD::CallVoid";
+ case NVPTXISD::CallVal:
+ return "NVPTXISD::CallVal";
+ case NVPTXISD::CallSymbol:
+ return "NVPTXISD::CallSymbol";
+ case NVPTXISD::Prototype:
+ return "NVPTXISD::Prototype";
+ case NVPTXISD::MoveParam:
+ return "NVPTXISD::MoveParam";
+ case NVPTXISD::MoveRetval:
+ return "NVPTXISD::MoveRetval";
+ case NVPTXISD::MoveToRetval:
+ return "NVPTXISD::MoveToRetval";
+ case NVPTXISD::StoreRetval:
+ return "NVPTXISD::StoreRetval";
+ case NVPTXISD::PseudoUseParam:
+ return "NVPTXISD::PseudoUseParam";
+ case NVPTXISD::RETURN:
+ return "NVPTXISD::RETURN";
+ case NVPTXISD::CallSeqBegin:
+ return "NVPTXISD::CallSeqBegin";
+ case NVPTXISD::CallSeqEnd:
+ return "NVPTXISD::CallSeqEnd";
+ case NVPTXISD::LoadV2:
+ return "NVPTXISD::LoadV2";
+ case NVPTXISD::LoadV4:
+ return "NVPTXISD::LoadV4";
+ case NVPTXISD::LDGV2:
+ return "NVPTXISD::LDGV2";
+ case NVPTXISD::LDGV4:
+ return "NVPTXISD::LDGV4";
+ case NVPTXISD::LDUV2:
+ return "NVPTXISD::LDUV2";
+ case NVPTXISD::LDUV4:
+ return "NVPTXISD::LDUV4";
+ case NVPTXISD::StoreV2:
+ return "NVPTXISD::StoreV2";
+ case NVPTXISD::StoreV4:
+ return "NVPTXISD::StoreV4";
}
}
@@ -248,10 +281,9 @@ NVPTXTargetLowering::LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const {
return DAG.getNode(NVPTXISD::Wrapper, dl, getPointerTy(), Op);
}
-std::string NVPTXTargetLowering::getPrototype(Type *retTy,
- const ArgListTy &Args,
- const SmallVectorImpl<ISD::OutputArg> &Outs,
- unsigned retAlignment) const {
+std::string NVPTXTargetLowering::getPrototype(
+ Type *retTy, const ArgListTy &Args,
+ const SmallVectorImpl<ISD::OutputArg> &Outs, unsigned retAlignment) const {
bool isABI = (nvptxSubtarget.getSmVersion() >= 20);
@@ -267,54 +299,47 @@ std::string NVPTXTargetLowering::getPrototype(Type *retTy,
unsigned size = 0;
if (const IntegerType *ITy = dyn_cast<IntegerType>(retTy)) {
size = ITy->getBitWidth();
- if (size < 32) size = 32;
- }
- else {
+ if (size < 32)
+ size = 32;
+ } else {
assert(retTy->isFloatingPointTy() &&
"Floating point type expected here");
size = retTy->getPrimitiveSizeInBits();
}
O << ".param .b" << size << " _";
- }
- else if (isa<PointerType>(retTy))
- O << ".param .b" << getPointerTy().getSizeInBits()
- << " _";
+ } else if (isa<PointerType>(retTy))
+ O << ".param .b" << getPointerTy().getSizeInBits() << " _";
else {
if ((retTy->getTypeID() == Type::StructTyID) ||
isa<VectorType>(retTy)) {
SmallVector<EVT, 16> vtparts;
ComputeValueVTs(*this, retTy, vtparts);
unsigned totalsz = 0;
- for (unsigned i=0,e=vtparts.size(); i!=e; ++i) {
+ for (unsigned i = 0, e = vtparts.size(); i != e; ++i) {
unsigned elems = 1;
EVT elemtype = vtparts[i];
if (vtparts[i].isVector()) {
elems = vtparts[i].getVectorNumElements();
elemtype = vtparts[i].getVectorElementType();
}
- for (unsigned j=0, je=elems; j!=je; ++j) {
+ for (unsigned j = 0, je = elems; j != je; ++j) {
unsigned sz = elemtype.getSizeInBits();
- if (elemtype.isInteger() && (sz < 8)) sz = 8;
- totalsz += sz/8;
+ if (elemtype.isInteger() && (sz < 8))
+ sz = 8;
+ totalsz += sz / 8;
}
}
- O << ".param .align "
- << retAlignment
- << " .b8 _["
- << totalsz << "]";
- }
- else {
- assert(false &&
- "Unknown return type");
+ O << ".param .align " << retAlignment << " .b8 _[" << totalsz << "]";
+ } else {
+ assert(false && "Unknown return type");
}
}
- }
- else {
+ } else {
SmallVector<EVT, 16> vtparts;
ComputeValueVTs(*this, retTy, vtparts);
unsigned idx = 0;
- for (unsigned i=0,e=vtparts.size(); i!=e; ++i) {
+ for (unsigned i = 0, e = vtparts.size(); i != e; ++i) {
unsigned elems = 1;
EVT elemtype = vtparts[i];
if (vtparts[i].isVector()) {
@@ -322,14 +347,16 @@ std::string NVPTXTargetLowering::getPrototype(Type *retTy,
elemtype = vtparts[i].getVectorElementType();
}
- for (unsigned j=0, je=elems; j!=je; ++j) {
+ for (unsigned j = 0, je = elems; j != je; ++j) {
unsigned sz = elemtype.getSizeInBits();
- if (elemtype.isInteger() && (sz < 32)) sz = 32;
+ if (elemtype.isInteger() && (sz < 32))
+ sz = 32;
O << ".reg .b" << sz << " _";
- if (j<je-1) O << ", ";
+ if (j < je - 1)
+ O << ", ";
++idx;
}
- if (i < e-1)
+ if (i < e - 1)
O << ", ";
}
}
@@ -340,7 +367,7 @@ std::string NVPTXTargetLowering::getPrototype(Type *retTy,
bool first = true;
MVT thePointerTy = getPointerTy();
- for (unsigned i=0,e=Args.size(); i!=e; ++i) {
+ for (unsigned i = 0, e = Args.size(); i != e; ++i) {
const Type *Ty = Args[i].Ty;
if (!first) {
O << ", ";
@@ -351,9 +378,9 @@ std::string NVPTXTargetLowering::getPrototype(Type *retTy,
unsigned sz = 0;
if (isa<IntegerType>(Ty)) {
sz = cast<IntegerType>(Ty)->getBitWidth();
- if (sz < 32) sz = 32;
- }
- else if (isa<PointerType>(Ty))
+ if (sz < 32)
+ sz = 32;
+ } else if (isa<PointerType>(Ty))
sz = thePointerTy.getSizeInBits();
else
sz = Ty->getPrimitiveSizeInBits();
@@ -365,23 +392,20 @@ std::string NVPTXTargetLowering::getPrototype(Type *retTy,
continue;
}
const PointerType *PTy = dyn_cast<PointerType>(Ty);
- assert(PTy &&
- "Param with byval attribute should be a pointer type");
+ assert(PTy && "Param with byval attribute should be a pointer type");
Type *ETy = PTy->getElementType();
if (isABI) {
unsigned align = Outs[i].Flags.getByValAlign();
unsigned sz = getDataLayout()->getTypeAllocSize(ETy);
- O << ".param .align " << align
- << " .b8 ";
+ O << ".param .align " << align << " .b8 ";
O << "_";
O << "[" << sz << "]";
continue;
- }
- else {
+ } else {
SmallVector<EVT, 16> vtparts;
ComputeValueVTs(*this, ETy, vtparts);
- for (unsigned i=0,e=vtparts.size(); i!=e; ++i) {
+ for (unsigned i = 0, e = vtparts.size(); i != e; ++i) {
unsigned elems = 1;
EVT elemtype = vtparts[i];
if (vtparts[i].isVector()) {
@@ -389,14 +413,16 @@ std::string NVPTXTargetLowering::getPrototype(Type *retTy,
elemtype = vtparts[i].getVectorElementType();
}
- for (unsigned j=0,je=elems; j!=je; ++j) {
+ for (unsigned j = 0, je = elems; j != je; ++j) {
unsigned sz = elemtype.getSizeInBits();
- if (elemtype.isInteger() && (sz < 32)) sz = 32;
+ if (elemtype.isInteger() && (sz < 32))
+ sz = 32;
O << ".reg .b" << sz << " ";
O << "_";
- if (j<je-1) O << ", ";
+ if (j < je - 1)
+ O << ", ";
}
- if (i<e-1)
+ if (i < e - 1)
O << ", ";
}
continue;
@@ -406,27 +432,25 @@ std::string NVPTXTargetLowering::getPrototype(Type *retTy,
return O.str();
}
-
-SDValue
-NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
- SmallVectorImpl<SDValue> &InVals) const {
- SelectionDAG &DAG = CLI.DAG;
- DebugLoc &dl = CLI.DL;
+SDValue NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
+ SmallVectorImpl<SDValue> &InVals) const {
+ SelectionDAG &DAG = CLI.DAG;
+ DebugLoc &dl = CLI.DL;
SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs;
- SmallVector<SDValue, 32> &OutVals = CLI.OutVals;
- SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins;
- SDValue Chain = CLI.Chain;
- SDValue Callee = CLI.Callee;
- bool &isTailCall = CLI.IsTailCall;
- ArgListTy &Args = CLI.Args;
- Type *retTy = CLI.RetTy;
- ImmutableCallSite *CS = CLI.CS;
+ SmallVector<SDValue, 32> &OutVals = CLI.OutVals;
+ SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins;
+ SDValue Chain = CLI.Chain;
+ SDValue Callee = CLI.Callee;
+ bool &isTailCall = CLI.IsTailCall;
+ ArgListTy &Args = CLI.Args;
+ Type *retTy = CLI.RetTy;
+ ImmutableCallSite *CS = CLI.CS;
bool isABI = (nvptxSubtarget.getSmVersion() >= 20);
SDValue tempChain = Chain;
- Chain = DAG.getCALLSEQ_START(Chain,
- DAG.getIntPtrConstant(uniqueCallSite, true));
+ Chain =
+ DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(uniqueCallSite, true));
SDValue InFlag = Chain.getValue(1);
assert((Outs.size() == Args.size()) &&
@@ -434,7 +458,7 @@ NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
unsigned paramCount = 0;
// Declare the .params or .reg need to pass values
// to the function
- for (unsigned i=0, e=Outs.size(); i!=e; ++i) {
+ for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
EVT VT = Outs[i].VT;
if (Outs[i].Flags.isByVal() == false) {
@@ -445,19 +469,20 @@ NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
if (isABI)
isReg = 0;
unsigned sz = VT.getSizeInBits();
- if (VT.isInteger() && (sz < 32)) sz = 32;
+ if (VT.isInteger() && (sz < 32))
+ sz = 32;
SDVTList DeclareParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
SDValue DeclareParamOps[] = { Chain,
DAG.getConstant(paramCount, MVT::i32),
DAG.getConstant(sz, MVT::i32),
- DAG.getConstant(isReg, MVT::i32),
- InFlag };
+ DAG.getConstant(isReg, MVT::i32), InFlag };
Chain = DAG.getNode(NVPTXISD::DeclareScalarParam, dl, DeclareParamVTs,
DeclareParamOps, 5);
InFlag = Chain.getValue(1);
SDVTList CopyParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
SDValue CopyParamOps[] = { Chain, DAG.getConstant(paramCount, MVT::i32),
- DAG.getConstant(0, MVT::i32), OutVals[i], InFlag };
+ DAG.getConstant(0, MVT::i32), OutVals[i],
+ InFlag };
unsigned opcode = NVPTXISD::StoreParam;
if (isReg)
@@ -477,8 +502,7 @@ NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
// struct or vector
SmallVector<EVT, 16> vtparts;
const PointerType *PTy = dyn_cast<PointerType>(Args[i].Ty);
- assert(PTy &&
- "Type of a byval parameter should be pointer");
+ assert(PTy && "Type of a byval parameter should be pointer");
ComputeValueVTs(*this, PTy->getElementType(), vtparts);
if (isABI) {
@@ -488,40 +512,41 @@ NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
// The ByValAlign in the Outs[i].Flags is alway set at this point, so we
// don't need to
// worry about natural alignment or not. See TargetLowering::LowerCallTo()
- SDValue DeclareParamOps[] = { Chain,
- DAG.getConstant(Outs[i].Flags.getByValAlign(), MVT::i32),
- DAG.getConstant(paramCount, MVT::i32),
- DAG.getConstant(sz, MVT::i32),
- InFlag };
+ SDValue DeclareParamOps[] = {
+ Chain, DAG.getConstant(Outs[i].Flags.getByValAlign(), MVT::i32),
+ DAG.getConstant(paramCount, MVT::i32), DAG.getConstant(sz, MVT::i32),
+ InFlag
+ };
Chain = DAG.getNode(NVPTXISD::DeclareParam, dl, DeclareParamVTs,
DeclareParamOps, 5);
InFlag = Chain.getValue(1);
unsigned curOffset = 0;
- for (unsigned j=0,je=vtparts.size(); j!=je; ++j) {
+ for (unsigned j = 0, je = vtparts.size(); j != je; ++j) {
unsigned elems = 1;
EVT elemtype = vtparts[j];
if (vtparts[j].isVector()) {
elems = vtparts[j].getVectorNumElements();
elemtype = vtparts[j].getVectorElementType();
}
- for (unsigned k=0,ke=elems; k!=ke; ++k) {
+ for (unsigned k = 0, ke = elems; k != ke; ++k) {
unsigned sz = elemtype.getSizeInBits();
- if (elemtype.isInteger() && (sz < 8)) sz = 8;
- SDValue srcAddr = DAG.getNode(ISD::ADD, dl, getPointerTy(),
- OutVals[i],
- DAG.getConstant(curOffset,
- getPointerTy()));
- SDValue theVal = DAG.getLoad(elemtype, dl, tempChain, srcAddr,
- MachinePointerInfo(), false, false, false, 0);
+ if (elemtype.isInteger() && (sz < 8))
+ sz = 8;
+ SDValue srcAddr =
+ DAG.getNode(ISD::ADD, dl, getPointerTy(), OutVals[i],
+ DAG.getConstant(curOffset, getPointerTy()));
+ SDValue theVal =
+ DAG.getLoad(elemtype, dl, tempChain, srcAddr,
+ MachinePointerInfo(), false, false, false, 0);
SDVTList CopyParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
- SDValue CopyParamOps[] = { Chain, DAG.getConstant(paramCount,
- MVT::i32),
- DAG.getConstant(curOffset, MVT::i32),
- theVal, InFlag };
+ SDValue CopyParamOps[] = { Chain,
+ DAG.getConstant(paramCount, MVT::i32),
+ DAG.getConstant(curOffset, MVT::i32),
+ theVal, InFlag };
Chain = DAG.getNode(NVPTXISD::StoreParam, dl, CopyParamVTs,
CopyParamOps, 5);
InFlag = Chain.getValue(1);
- curOffset += sz/8;
+ curOffset += sz / 8;
}
}
++paramCount;
@@ -530,30 +555,31 @@ NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
// Non-abi, struct or vector
// Declare a bunch or .reg .b<size> .param<n>
unsigned curOffset = 0;
- for (unsigned j=0,je=vtparts.size(); j!=je; ++j) {
+ for (unsigned j = 0, je = vtparts.size(); j != je; ++j) {
unsigned elems = 1;
EVT elemtype = vtparts[j];
if (vtparts[j].isVector()) {
elems = vtparts[j].getVectorNumElements();
elemtype = vtparts[j].getVectorElementType();
}
- for (unsigned k=0,ke=elems; k!=ke; ++k) {
+ for (unsigned k = 0, ke = elems; k != ke; ++k) {
unsigned sz = elemtype.getSizeInBits();
- if (elemtype.isInteger() && (sz < 32)) sz = 32;
+ if (elemtype.isInteger() && (sz < 32))
+ sz = 32;
SDVTList DeclareParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
- SDValue DeclareParamOps[] = { Chain, DAG.getConstant(paramCount,
- MVT::i32),
- DAG.getConstant(sz, MVT::i32),
- DAG.getConstant(1, MVT::i32),
- InFlag };
+ SDValue DeclareParamOps[] = { Chain,
+ DAG.getConstant(paramCount, MVT::i32),
+ DAG.getConstant(sz, MVT::i32),
+ DAG.getConstant(1, MVT::i32), InFlag };
Chain = DAG.getNode(NVPTXISD::DeclareScalarParam, dl, DeclareParamVTs,
DeclareParamOps, 5);
InFlag = Chain.getValue(1);
- SDValue srcAddr = DAG.getNode(ISD::ADD, dl, getPointerTy(), OutVals[i],
- DAG.getConstant(curOffset,
- getPointerTy()));
- SDValue theVal = DAG.getLoad(elemtype, dl, tempChain, srcAddr,
- MachinePointerInfo(), false, false, false, 0);
+ SDValue srcAddr =
+ DAG.getNode(ISD::ADD, dl, getPointerTy(), OutVals[i],
+ DAG.getConstant(curOffset, getPointerTy()));
+ SDValue theVal =
+ DAG.getLoad(elemtype, dl, tempChain, srcAddr, MachinePointerInfo(),
+ false, false, false, 0);
SDVTList CopyParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
SDValue CopyParamOps[] = { Chain, DAG.getConstant(paramCount, MVT::i32),
DAG.getConstant(0, MVT::i32), theVal,
@@ -578,20 +604,21 @@ NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
// Declare one .param .align 16 .b8 func_retval0[<size>] for ABI or
// individual .reg .b<size> func_retval<0..> for non ABI
unsigned resultsz = 0;
- for (unsigned i=0,e=resvtparts.size(); i!=e; ++i) {
+ for (unsigned i = 0, e = resvtparts.size(); i != e; ++i) {
unsigned elems = 1;
EVT elemtype = resvtparts[i];
if (resvtparts[i].isVector()) {
elems = resvtparts[i].getVectorNumElements();
elemtype = resvtparts[i].getVectorElementType();
}
- for (unsigned j=0,je=elems; j!=je; ++j) {
+ for (unsigned j = 0, je = elems; j != je; ++j) {
unsigned sz = elemtype.getSizeInBits();
if (isABI == false) {
- if (elemtype.isInteger() && (sz < 32)) sz = 32;
- }
- else {
- if (elemtype.isInteger() && (sz < 8)) sz = 8;
+ if (elemtype.isInteger() && (sz < 32))
+ sz = 32;
+ } else {
+ if (elemtype.isInteger() && (sz < 8))
+ sz = 8;
}
if (isABI == false) {
SDVTList DeclareRetVTs = DAG.getVTList(MVT::Other, MVT::Glue);
@@ -609,7 +636,7 @@ NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
}
if (isABI) {
if (retTy->isPrimitiveType() || retTy->isIntegerTy() ||
- retTy->isPointerTy() ) {
+ retTy->isPointerTy()) {
// Scalar needs to be at least 32bit wide
if (resultsz < 32)
resultsz = 32;
@@ -620,8 +647,7 @@ NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
Chain = DAG.getNode(NVPTXISD::DeclareRet, dl, DeclareRetVTs,
DeclareRetOps, 5);
InFlag = Chain.getValue(1);
- }
- else {
+ } else {
if (Func) { // direct call
if (!llvm::getAlign(*(CS->getCalledFunction()), 0, retAlignment))
retAlignment = getDataLayout()->getABITypeAlignment(retTy);
@@ -631,10 +657,10 @@ NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
retAlignment = getDataLayout()->getABITypeAlignment(retTy);
}
SDVTList DeclareRetVTs = DAG.getVTList(MVT::Other, MVT::Glue);
- SDValue DeclareRetOps[] = { Chain, DAG.getConstant(retAlignment,
- MVT::i32),
- DAG.getConstant(resultsz/8, MVT::i32),
- DAG.getConstant(0, MVT::i32), InFlag };
+ SDValue DeclareRetOps[] = { Chain,
+ DAG.getConstant(retAlignment, MVT::i32),
+ DAG.getConstant(resultsz / 8, MVT::i32),
+ DAG.getConstant(0, MVT::i32), InFlag };
Chain = DAG.getNode(NVPTXISD::DeclareRetParam, dl, DeclareRetVTs,
DeclareRetOps, 5);
InFlag = Chain.getValue(1);
@@ -652,24 +678,24 @@ NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
// INLINEASM SDNode.
SDVTList InlineAsmVTs = DAG.getVTList(MVT::Other, MVT::Glue);
std::string proto_string = getPrototype(retTy, Args, Outs, retAlignment);
- const char *asmstr = nvTM->getManagedStrPool()->
- getManagedString(proto_string.c_str())->c_str();
- SDValue InlineAsmOps[] = { Chain,
- DAG.getTargetExternalSymbol(asmstr,
- getPointerTy()),
- DAG.getMDNode(0),
- DAG.getTargetConstant(0, MVT::i32), InFlag };
+ const char *asmstr = nvTM->getManagedStrPool()
+ ->getManagedString(proto_string.c_str())->c_str();
+ SDValue InlineAsmOps[] = {
+ Chain, DAG.getTargetExternalSymbol(asmstr, getPointerTy()),
+ DAG.getMDNode(0), DAG.getTargetConstant(0, MVT::i32), InFlag
+ };
Chain = DAG.getNode(ISD::INLINEASM, dl, InlineAsmVTs, InlineAsmOps, 5);
InFlag = Chain.getValue(1);
}
// Op to just print "call"
SDVTList PrintCallVTs = DAG.getVTList(MVT::Other, MVT::Glue);
- SDValue PrintCallOps[] = { Chain,
- DAG.getConstant(isABI ? ((Ins.size()==0) ? 0 : 1)
- : retCount, MVT::i32),
- InFlag };
- Chain = DAG.getNode(Func?(NVPTXISD::PrintCallUni):(NVPTXISD::PrintCall), dl,
- PrintCallVTs, PrintCallOps, 3);
+ SDValue PrintCallOps[] = {
+ Chain,
+ DAG.getConstant(isABI ? ((Ins.size() == 0) ? 0 : 1) : retCount, MVT::i32),
+ InFlag
+ };
+ Chain = DAG.getNode(Func ? (NVPTXISD::PrintCallUni) : (NVPTXISD::PrintCall),
+ dl, PrintCallVTs, PrintCallOps, 3);
InFlag = Chain.getValue(1);
// Ops to print out the function name
@@ -685,31 +711,28 @@ NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
CallArgBeginOps, 2);
InFlag = Chain.getValue(1);
- for (unsigned i=0, e=paramCount; i!=e; ++i) {
+ for (unsigned i = 0, e = paramCount; i != e; ++i) {
unsigned opcode;
- if (i==(e-1))
+ if (i == (e - 1))
opcode = NVPTXISD::LastCallArg;
else
opcode = NVPTXISD::CallArg;
SDVTList CallArgVTs = DAG.getVTList(MVT::Other, MVT::Glue);
SDValue CallArgOps[] = { Chain, DAG.getConstant(1, MVT::i32),
- DAG.getConstant(i, MVT::i32),
- InFlag };
+ DAG.getConstant(i, MVT::i32), InFlag };
Chain = DAG.getNode(opcode, dl, CallArgVTs, CallArgOps, 4);
InFlag = Chain.getValue(1);
}
SDVTList CallArgEndVTs = DAG.getVTList(MVT::Other, MVT::Glue);
- SDValue CallArgEndOps[] = { Chain,
- DAG.getConstant(Func ? 1 : 0, MVT::i32),
+ SDValue CallArgEndOps[] = { Chain, DAG.getConstant(Func ? 1 : 0, MVT::i32),
InFlag };
- Chain = DAG.getNode(NVPTXISD::CallArgEnd, dl, CallArgEndVTs, CallArgEndOps,
- 3);
+ Chain =
+ DAG.getNode(NVPTXISD::CallArgEnd, dl, CallArgEndVTs, CallArgEndOps, 3);
InFlag = Chain.getValue(1);
if (!Func) {
SDVTList PrototypeVTs = DAG.getVTList(MVT::Other, MVT::Glue);
- SDValue PrototypeOps[] = { Chain,
- DAG.getConstant(uniqueCallSite, MVT::i32),
+ SDValue PrototypeOps[] = { Chain, DAG.getConstant(uniqueCallSite, MVT::i32),
InFlag };
Chain = DAG.getNode(NVPTXISD::Prototype, dl, PrototypeVTs, PrototypeOps, 3);
InFlag = Chain.getValue(1);
@@ -719,32 +742,28 @@ NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
if (Ins.size() > 0) {
if (isABI) {
unsigned resoffset = 0;
- for (unsigned i=0,e=Ins.size(); i!=e; ++i) {
+ for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
unsigned sz = Ins[i].VT.getSizeInBits();
- if (Ins[i].VT.isInteger() && (sz < 8)) sz = 8;
+ if (Ins[i].VT.isInteger() && (sz < 8))
+ sz = 8;
EVT LoadRetVTs[] = { Ins[i].VT, MVT::Other, MVT::Glue };
- SDValue LoadRetOps[] = {
- Chain,
- DAG.getConstant(1, MVT::i32),
- DAG.getConstant(resoffset, MVT::i32),
- InFlag
- };
+ SDValue LoadRetOps[] = { Chain, DAG.getConstant(1, MVT::i32),
+ DAG.getConstant(resoffset, MVT::i32), InFlag };
SDValue retval = DAG.getNode(NVPTXISD::LoadParam, dl, LoadRetVTs,
LoadRetOps, array_lengthof(LoadRetOps));
Chain = retval.getValue(1);
InFlag = retval.getValue(2);
InVals.push_back(retval);
- resoffset += sz/8;
+ resoffset += sz / 8;
}
- }
- else {
+ } else {
SmallVector<EVT, 16> resvtparts;
ComputeValueVTs(*this, retTy, resvtparts);
assert(Ins.size() == resvtparts.size() &&
"Unexpected number of return values in non-ABI case");
unsigned paramNum = 0;
- for (unsigned i=0,e=Ins.size(); i!=e; ++i) {
+ for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
assert(EVT(Ins[i].VT) == resvtparts[i] &&
"Unexpected EVT type in non-ABI case");
unsigned numelems = 1;
@@ -754,14 +773,11 @@ NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
elemtype = Ins[i].VT.getVectorElementType();
}
std::vector<SDValue> tempRetVals;
- for (unsigned j=0; j<numelems; ++j) {
+ for (unsigned j = 0; j < numelems; ++j) {
EVT MoveRetVTs[] = { elemtype, MVT::Other, MVT::Glue };
- SDValue MoveRetOps[] = {
- Chain,
- DAG.getConstant(0, MVT::i32),
- DAG.getConstant(paramNum, MVT::i32),
- InFlag
- };
+ SDValue MoveRetOps[] = { Chain, DAG.getConstant(0, MVT::i32),
+ DAG.getConstant(paramNum, MVT::i32),
+ InFlag };
SDValue retval = DAG.getNode(NVPTXISD::LoadParam, dl, MoveRetVTs,
MoveRetOps, array_lengthof(MoveRetOps));
Chain = retval.getValue(1);
@@ -777,9 +793,8 @@ NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
}
}
}
- Chain = DAG.getCALLSEQ_END(Chain,
- DAG.getIntPtrConstant(uniqueCallSite, true),
- DAG.getIntPtrConstant(uniqueCallSite+1, true),
+ Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(uniqueCallSite, true),
+ DAG.getIntPtrConstant(uniqueCallSite + 1, true),
InFlag);
uniqueCallSite++;
@@ -792,45 +807,51 @@ NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
// By default CONCAT_VECTORS is lowered by ExpandVectorBuildThroughStack()
// (see LegalizeDAG.cpp). This is slow and uses local memory.
// We use extract/insert/build vector just as what LegalizeOp() does in llvm 2.5
-SDValue NVPTXTargetLowering::
-LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const {
+SDValue
+NVPTXTargetLowering::LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const {
SDNode *Node = Op.getNode();
DebugLoc dl = Node->getDebugLoc();
SmallVector<SDValue, 8> Ops;
unsigned NumOperands = Node->getNumOperands();
- for (unsigned i=0; i < NumOperands; ++i) {
+ for (unsigned i = 0; i < NumOperands; ++i) {
SDValue SubOp = Node->getOperand(i);
EVT VVT = SubOp.getNode()->getValueType(0);
EVT EltVT = VVT.getVectorElementType();
unsigned NumSubElem = VVT.getVectorNumElements();
- for (unsigned j=0; j < NumSubElem; ++j) {
+ for (unsigned j = 0; j < NumSubElem; ++j) {
Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, SubOp,
DAG.getIntPtrConstant(j)));
}
}
- return DAG.getNode(ISD::BUILD_VECTOR, dl, Node->getValueType(0),
- &Ops[0], Ops.size());
+ return DAG.getNode(ISD::BUILD_VECTOR, dl, Node->getValueType(0), &Ops[0],
+ Ops.size());
}
-SDValue NVPTXTargetLowering::
-LowerOperation(SDValue Op, SelectionDAG &DAG) const {
+SDValue
+NVPTXTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
switch (Op.getOpcode()) {
- case ISD::RETURNADDR: return SDValue();
- case ISD::FRAMEADDR: return SDValue();
- case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG);
- case ISD::INTRINSIC_W_CHAIN: return Op;
+ case ISD::RETURNADDR:
+ return SDValue();
+ case ISD::FRAMEADDR:
+ return SDValue();
+ case ISD::GlobalAddress:
+ return LowerGlobalAddress(Op, DAG);
+ case ISD::INTRINSIC_W_CHAIN:
+ return Op;
case ISD::BUILD_VECTOR:
case ISD::EXTRACT_SUBVECTOR:
return Op;
- case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG);
- case ISD::STORE: return LowerSTORE(Op, DAG);
- case ISD::LOAD: return LowerLOAD(Op, DAG);
+ case ISD::CONCAT_VECTORS:
+ return LowerCONCAT_VECTORS(Op, DAG);
+ case ISD::STORE:
+ return LowerSTORE(Op, DAG);
+ case ISD::LOAD:
+ return LowerLOAD(Op, DAG);
default:
llvm_unreachable("Custom lowering not defined for operation");
}
}
-
SDValue NVPTXTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
if (Op.getValueType() == MVT::i1)
return LowerLOADi1(Op, DAG);
@@ -842,24 +863,22 @@ SDValue NVPTXTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
// =>
// v1 = ld i8* addr
// v = trunc v1 to i1
-SDValue NVPTXTargetLowering::
-LowerLOADi1(SDValue Op, SelectionDAG &DAG) const {
+SDValue NVPTXTargetLowering::LowerLOADi1(SDValue Op, SelectionDAG &DAG) const {
SDNode *Node = Op.getNode();
LoadSDNode *LD = cast<LoadSDNode>(Node);
DebugLoc dl = Node->getDebugLoc();
- assert(LD->getExtensionType() == ISD::NON_EXTLOAD) ;
+ assert(LD->getExtensionType() == ISD::NON_EXTLOAD);
assert(Node->getValueType(0) == MVT::i1 &&
"Custom lowering for i1 load only");
- SDValue newLD = DAG.getLoad(MVT::i8, dl, LD->getChain(), LD->getBasePtr(),
- LD->getPointerInfo(),
- LD->isVolatile(), LD->isNonTemporal(),
- LD->isInvariant(),
- LD->getAlignment());
+ SDValue newLD =
+ DAG.getLoad(MVT::i8, dl, LD->getChain(), LD->getBasePtr(),
+ LD->getPointerInfo(), LD->isVolatile(), LD->isNonTemporal(),
+ LD->isInvariant(), LD->getAlignment());
SDValue result = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, newLD);
// The legalizer (the caller) is expecting two values from the legalized
// load, so we build a MergeValues node for it. See ExpandUnalignedLoad()
// in LegalizeDAG.cpp which also uses MergeValues.
- SDValue Ops[] = {result, LD->getChain()};
+ SDValue Ops[] = { result, LD->getChain() };
return DAG.getMergeValues(Ops, 2, dl);
}
@@ -887,7 +906,8 @@ NVPTXTargetLowering::LowerSTOREVector(SDValue Op, SelectionDAG &DAG) const {
if (!ValVT.isSimple())
return SDValue();
switch (ValVT.getSimpleVT().SimpleTy) {
- default: return SDValue();
+ default:
+ return SDValue();
case MVT::v2i8:
case MVT::v2i16:
case MVT::v2i32:
@@ -914,7 +934,8 @@ NVPTXTargetLowering::LowerSTOREVector(SDValue Op, SelectionDAG &DAG) const {
NeedExt = true;
switch (NumElts) {
- default: return SDValue();
+ default:
+ return SDValue();
case 2:
Opcode = NVPTXISD::StoreV2;
break;
@@ -947,11 +968,9 @@ NVPTXTargetLowering::LowerSTOREVector(SDValue Op, SelectionDAG &DAG) const {
MemSDNode *MemSD = cast<MemSDNode>(N);
- SDValue NewSt = DAG.getMemIntrinsicNode(Opcode, DL,
- DAG.getVTList(MVT::Other), &Ops[0],
- Ops.size(), MemSD->getMemoryVT(),
- MemSD->getMemOperand());
-
+ SDValue NewSt = DAG.getMemIntrinsicNode(
+ Opcode, DL, DAG.getVTList(MVT::Other), &Ops[0], Ops.size(),
+ MemSD->getMemoryVT(), MemSD->getMemOperand());
//return DCI.CombineTo(N, NewSt, true);
return NewSt;
@@ -964,8 +983,7 @@ NVPTXTargetLowering::LowerSTOREVector(SDValue Op, SelectionDAG &DAG) const {
// =>
// v1 = zxt v to i8
// st i8, addr
-SDValue NVPTXTargetLowering::
-LowerSTOREi1(SDValue Op, SelectionDAG &DAG) const {
+SDValue NVPTXTargetLowering::LowerSTOREi1(SDValue Op, SelectionDAG &DAG) const {
SDNode *Node = Op.getNode();
DebugLoc dl = Node->getDebugLoc();
StoreSDNode *ST = cast<StoreSDNode>(Node);
@@ -976,18 +994,14 @@ LowerSTOREi1(SDValue Op, SelectionDAG &DAG) const {
unsigned Alignment = ST->getAlignment();
bool isVolatile = ST->isVolatile();
bool isNonTemporal = ST->isNonTemporal();
- Tmp3 = DAG.getNode(ISD::ZERO_EXTEND, dl,
- MVT::i8, Tmp3);
- SDValue Result = DAG.getStore(Tmp1, dl, Tmp3, Tmp2,
- ST->getPointerInfo(), isVolatile,
- isNonTemporal, Alignment);
+ Tmp3 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i8, Tmp3);
+ SDValue Result = DAG.getStore(Tmp1, dl, Tmp3, Tmp2, ST->getPointerInfo(),
+ isVolatile, isNonTemporal, Alignment);
return Result;
}
-
-SDValue
-NVPTXTargetLowering::getExtSymb(SelectionDAG &DAG, const char *inname, int idx,
- EVT v) const {
+SDValue NVPTXTargetLowering::getExtSymb(SelectionDAG &DAG, const char *inname,
+ int idx, EVT v) const {
std::string *name = nvTM->getManagedStrPool()->getManagedString(inname);
std::stringstream suffix;
suffix << idx;
@@ -1000,19 +1014,16 @@ NVPTXTargetLowering::getParamSymbol(SelectionDAG &DAG, int idx, EVT v) const {
return getExtSymb(DAG, ".PARAM", idx, v);
}
-SDValue
-NVPTXTargetLowering::getParamHelpSymbol(SelectionDAG &DAG, int idx) {
+SDValue NVPTXTargetLowering::getParamHelpSymbol(SelectionDAG &DAG, int idx) {
return getExtSymb(DAG, ".HLPPARAM", idx);
}
// Check to see if the kernel argument is image*_t or sampler_t
bool llvm::isImageOrSamplerVal(const Value *arg, const Module *context) {
- static const char *const specialTypes[] = {
- "struct._image2d_t",
- "struct._image3d_t",
- "struct._sampler_t"
- };
+ static const char *const specialTypes[] = { "struct._image2d_t",
+ "struct._image3d_t",
+ "struct._sampler_t" };
const Type *Ty = arg->getType();
const PointerType *PTy = dyn_cast<PointerType>(Ty);
@@ -1033,12 +1044,10 @@ bool llvm::isImageOrSamplerVal(const Value *arg, const Module *context) {
return false;
}
-SDValue
-NVPTXTargetLowering::LowerFormalArguments(SDValue Chain,
- CallingConv::ID CallConv, bool isVarArg,
- const SmallVectorImpl<ISD::InputArg> &Ins,
- DebugLoc dl, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals) const {
+SDValue NVPTXTargetLowering::LowerFormalArguments(
+ SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
+ const SmallVectorImpl<ISD::InputArg> &Ins, DebugLoc dl, SelectionDAG &DAG,
+ SmallVectorImpl<SDValue> &InVals) const {
MachineFunction &MF = DAG.getMachineFunction();
const DataLayout *TD = getDataLayout();
@@ -1054,7 +1063,7 @@ NVPTXTargetLowering::LowerFormalArguments(SDValue Chain,
std::vector<Type *> argTypes;
std::vector<const Argument *> theArgs;
for (Function::const_arg_iterator I = F->arg_begin(), E = F->arg_end();
- I != E; ++I) {
+ I != E; ++I) {
theArgs.push_back(I);
argTypes.push_back(I->getType());
}
@@ -1062,7 +1071,7 @@ NVPTXTargetLowering::LowerFormalArguments(SDValue Chain,
// "Ins types and function types did not match");
int idx = 0;
- for (unsigned i=0, e=argTypes.size(); i!=e; ++i, ++idx) {
+ for (unsigned i = 0, e = argTypes.size(); i != e; ++i, ++idx) {
Type *Ty = argTypes[i];
EVT ObjectVT = getValueType(Ty);
//assert(ObjectVT == Ins[i].VT &&
@@ -1071,11 +1080,12 @@ NVPTXTargetLowering::LowerFormalArguments(SDValue Chain,
// If the kernel argument is image*_t or sampler_t, convert it to
// a i32 constant holding the parameter position. This can later
// matched in the AsmPrinter to output the correct mangled name.
- if (isImageOrSamplerVal(theArgs[i],
- (theArgs[i]->getParent() ?
- theArgs[i]->getParent()->getParent() : 0))) {
+ if (isImageOrSamplerVal(
+ theArgs[i],
+ (theArgs[i]->getParent() ? theArgs[i]->getParent()->getParent()
+ : 0))) {
assert(isKernel && "Only kernels can have image/sampler params");
- InVals.push_back(DAG.getConstant(i+1, MVT::i32));
+ InVals.push_back(DAG.getConstant(i + 1, MVT::i32));
continue;
}
@@ -1097,7 +1107,7 @@ NVPTXTargetLowering::LowerFormalArguments(SDValue Chain,
// to newly created nodes. The SDNOdes for params have to
// appear in the same order as their order of appearance
// in the original function. "idx+1" holds that order.
- if (PAL.hasAttribute(i+1, Attribute::ByVal) == false) {
+ if (PAL.hasAttribute(i + 1, Attribute::ByVal) == false) {
if (ObjectVT.isVector()) {
unsigned NumElts = ObjectVT.getVectorNumElements();
EVT EltVT = ObjectVT.getVectorElementType();
@@ -1110,14 +1120,12 @@ NVPTXTargetLowering::LowerFormalArguments(SDValue Chain,
//DAG.getConstant(Offset, getPointerTy()));
A, B);
Value *SrcValue = Constant::getNullValue(PointerType::get(
- EltVT.getTypeForEVT(F->getContext()),
- llvm::ADDRESS_SPACE_PARAM));
- SDValue Ld = DAG.getLoad(EltVT, dl, Root, Addr,
- MachinePointerInfo(SrcValue),
- false, false, false,
- TD->getABITypeAlignment(EltVT.getTypeForEVT(
- F->getContext())));
- Offset += EltVT.getStoreSizeInBits()/8;
+ EltVT.getTypeForEVT(F->getContext()), llvm::ADDRESS_SPACE_PARAM));
+ SDValue Ld = DAG.getLoad(
+ EltVT, dl, Root, Addr, MachinePointerInfo(SrcValue), false, false,
+ false,
+ TD->getABITypeAlignment(EltVT.getTypeForEVT(F->getContext())));
+ Offset += EltVT.getStoreSizeInBits() / 8;
InVals.push_back(Ld);
}
continue;
@@ -1129,24 +1137,22 @@ NVPTXTargetLowering::LowerFormalArguments(SDValue Chain,
SDValue Arg = getParamSymbol(DAG, idx);
// Conjure up a value that we can get the address space from.
// FIXME: Using a constant here is a hack.
- Value *srcValue = Constant::getNullValue(PointerType::get(
- ObjectVT.getTypeForEVT(F->getContext()),
- llvm::ADDRESS_SPACE_PARAM));
- SDValue p = DAG.getLoad(ObjectVT, dl, Root, Arg,
- MachinePointerInfo(srcValue), false, false,
- false,
- TD->getABITypeAlignment(ObjectVT.getTypeForEVT(
- F->getContext())));
+ Value *srcValue = Constant::getNullValue(
+ PointerType::get(ObjectVT.getTypeForEVT(F->getContext()),
+ llvm::ADDRESS_SPACE_PARAM));
+ SDValue p = DAG.getLoad(
+ ObjectVT, dl, Root, Arg, MachinePointerInfo(srcValue), false, false,
+ false,
+ TD->getABITypeAlignment(ObjectVT.getTypeForEVT(F->getContext())));
if (p.getNode())
- DAG.AssignOrdering(p.getNode(), idx+1);
+ DAG.AssignOrdering(p.getNode(), idx + 1);
InVals.push_back(p);
- }
- else {
+ } else {
// If no ABI, just move the param symbol
SDValue Arg = getParamSymbol(DAG, idx, ObjectVT);
SDValue p = DAG.getNode(NVPTXISD::MoveParam, dl, ObjectVT, Arg);
if (p.getNode())
- DAG.AssignOrdering(p.getNode(), idx+1);
+ DAG.AssignOrdering(p.getNode(), idx + 1);
InVals.push_back(p);
}
continue;
@@ -1163,47 +1169,49 @@ NVPTXTargetLowering::LowerFormalArguments(SDValue Chain,
SDValue Arg = getParamSymbol(DAG, idx, getPointerTy());
SDValue p = DAG.getNode(NVPTXISD::MoveParam, dl, ObjectVT, Arg);
if (p.getNode())
- DAG.AssignOrdering(p.getNode(), idx+1);
+ DAG.AssignOrdering(p.getNode(), idx + 1);
if (isKernel)
InVals.push_back(p);
else {
- SDValue p2 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, ObjectVT,
- DAG.getConstant(Intrinsic::nvvm_ptr_local_to_gen, MVT::i32),
- p);
+ SDValue p2 = DAG.getNode(
+ ISD::INTRINSIC_WO_CHAIN, dl, ObjectVT,
+ DAG.getConstant(Intrinsic::nvvm_ptr_local_to_gen, MVT::i32), p);
InVals.push_back(p2);
}
} else {
// Have to move a set of param symbols to registers and
// store them locally and return the local pointer in InVals
const PointerType *elemPtrType = dyn_cast<PointerType>(argTypes[i]);
- assert(elemPtrType &&
- "Byval parameter should be a pointer type");
+ assert(elemPtrType && "Byval parameter should be a pointer type");
Type *elemType = elemPtrType->getElementType();
// Compute the constituent parts
SmallVector<EVT, 16> vtparts;
SmallVector<uint64_t, 16> offsets;
ComputeValueVTs(*this, elemType, vtparts, &offsets, 0);
unsigned totalsize = 0;
- for (unsigned j=0, je=vtparts.size(); j!=je; ++j)
+ for (unsigned j = 0, je = vtparts.size(); j != je; ++j)
totalsize += vtparts[j].getStoreSizeInBits();
- SDValue localcopy = DAG.getFrameIndex(MF.getFrameInfo()->
- CreateStackObject(totalsize/8, 16, false),
- getPointerTy());
+ SDValue localcopy = DAG.getFrameIndex(
+ MF.getFrameInfo()->CreateStackObject(totalsize / 8, 16, false),
+ getPointerTy());
unsigned sizesofar = 0;
std::vector<SDValue> theChains;
- for (unsigned j=0, je=vtparts.size(); j!=je; ++j) {
+ for (unsigned j = 0, je = vtparts.size(); j != je; ++j) {
unsigned numElems = 1;
- if (vtparts[j].isVector()) numElems = vtparts[j].getVectorNumElements();
- for (unsigned k=0, ke=numElems; k!=ke; ++k) {
+ if (vtparts[j].isVector())
+ numElems = vtparts[j].getVectorNumElements();
+ for (unsigned k = 0, ke = numElems; k != ke; ++k) {
EVT tmpvt = vtparts[j];
- if (tmpvt.isVector()) tmpvt = tmpvt.getVectorElementType();
+ if (tmpvt.isVector())
+ tmpvt = tmpvt.getVectorElementType();
SDValue arg = DAG.getNode(NVPTXISD::MoveParam, dl, tmpvt,
getParamSymbol(DAG, idx, tmpvt));
- SDValue addr = DAG.getNode(ISD::ADD, dl, getPointerTy(), localcopy,
- DAG.getConstant(sizesofar, getPointerTy()));
- theChains.push_back(DAG.getStore(Chain, dl, arg, addr,
- MachinePointerInfo(), false, false, 0));
- sizesofar += tmpvt.getStoreSizeInBits()/8;
+ SDValue addr =
+ DAG.getNode(ISD::ADD, dl, getPointerTy(), localcopy,
+ DAG.getConstant(sizesofar, getPointerTy()));
+ theChains.push_back(DAG.getStore(
+ Chain, dl, arg, addr, MachinePointerInfo(), false, false, 0));
+ sizesofar += tmpvt.getStoreSizeInBits() / 8;
++idx;
}
}
@@ -1223,43 +1231,42 @@ NVPTXTargetLowering::LowerFormalArguments(SDValue Chain,
//}
if (!OutChains.empty())
- DAG.setRoot(DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
- &OutChains[0], OutChains.size()));
+ DAG.setRoot(DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &OutChains[0],
+ OutChains.size()));
return Chain;
}
-SDValue
-NVPTXTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
- bool isVarArg,
- const SmallVectorImpl<ISD::OutputArg> &Outs,
- const SmallVectorImpl<SDValue> &OutVals,
- DebugLoc dl, SelectionDAG &DAG) const {
+SDValue NVPTXTargetLowering::LowerReturn(
+ SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
+ const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals, DebugLoc dl,
+ SelectionDAG &DAG) const {
bool isABI = (nvptxSubtarget.getSmVersion() >= 20);
unsigned sizesofar = 0;
unsigned idx = 0;
- for (unsigned i=0, e=Outs.size(); i!=e; ++i) {
+ for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
SDValue theVal = OutVals[i];
EVT theValType = theVal.getValueType();
unsigned numElems = 1;
- if (theValType.isVector()) numElems = theValType.getVectorNumElements();
- for (unsigned j=0,je=numElems; j!=je; ++j) {
+ if (theValType.isVector())
+ numElems = theValType.getVectorNumElements();
+ for (unsigned j = 0, je = numElems; j != je; ++j) {
SDValue tmpval = theVal;
if (theValType.isVector())
tmpval = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
- theValType.getVectorElementType(),
- tmpval, DAG.getIntPtrConstant(j));
- Chain = DAG.getNode(isABI ? NVPTXISD::StoreRetval :NVPTXISD::MoveToRetval,
- dl, MVT::Other,
- Chain,
- DAG.getConstant(isABI ? sizesofar : idx, MVT::i32),
+ theValType.getVectorElementType(), tmpval,
+ DAG.getIntPtrConstant(j));
+ Chain = DAG.getNode(
+ isABI ? NVPTXISD::StoreRetval : NVPTXISD::MoveToRetval, dl,
+ MVT::Other, Chain, DAG.getConstant(isABI ? sizesofar : idx, MVT::i32),
tmpval);
if (theValType.isVector())
- sizesofar += theValType.getVectorElementType().getStoreSizeInBits()/8;
+ sizesofar += theValType.getVectorElementType().getStoreSizeInBits() / 8;
else
- sizesofar += theValType.getStoreSizeInBits()/8;
+ sizesofar += theValType.getStoreSizeInBits() / 8;
++idx;
}
}
@@ -1267,12 +1274,9 @@ NVPTXTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
return DAG.getNode(NVPTXISD::RET_FLAG, dl, MVT::Other, Chain);
}
-void
-NVPTXTargetLowering::LowerAsmOperandForConstraint(SDValue Op,
- std::string &Constraint,
- std::vector<SDValue> &Ops,
- SelectionDAG &DAG) const
-{
+void NVPTXTargetLowering::LowerAsmOperandForConstraint(
+ SDValue Op, std::string &Constraint, std::vector<SDValue> &Ops,
+ SelectionDAG &DAG) const {
if (Constraint.length() > 1)
return;
else
@@ -1282,8 +1286,7 @@ NVPTXTargetLowering::LowerAsmOperandForConstraint(SDValue Op,
// NVPTX suuport vector of legal types of any length in Intrinsics because the
// NVPTX specific type legalizer
// will legalize them to the PTX supported length.
-bool
-NVPTXTargetLowering::isTypeSupportedInIntrinsic(MVT VT) const {
+bool NVPTXTargetLowering::isTypeSupportedInIntrinsic(MVT VT) const {
if (isTypeLegal(VT))
return true;
if (VT.isVector()) {
@@ -1294,15 +1297,13 @@ NVPTXTargetLowering::isTypeSupportedInIntrinsic(MVT VT) const {
return false;
}
-
// llvm.ptx.memcpy.const and llvm.ptx.memmove.const need to be modeled as
// TgtMemIntrinsic
// because we need the information that is only available in the "Value" type
// of destination
// pointer. In particular, the address space information.
-bool
-NVPTXTargetLowering::getTgtMemIntrinsic(IntrinsicInfo& Info, const CallInst &I,
- unsigned Intrinsic) const {
+bool NVPTXTargetLowering::getTgtMemIntrinsic(
+ IntrinsicInfo &Info, const CallInst &I, unsigned Intrinsic) const {
switch (Intrinsic) {
default:
return false;
@@ -1358,9 +1359,8 @@ NVPTXTargetLowering::getTgtMemIntrinsic(IntrinsicInfo& Info, const CallInst &I,
/// Used to guide target specific optimizations, like loop strength reduction
/// (LoopStrengthReduce.cpp) and memory optimization for address mode
/// (CodeGenPrepare.cpp)
-bool
-NVPTXTargetLowering::isLegalAddressingMode(const AddrMode &AM,
- Type *Ty) const {
+bool NVPTXTargetLowering::isLegalAddressingMode(const AddrMode &AM,
+ Type *Ty) const {
// AddrMode - This represents an addressing mode of:
// BaseGV + BaseOffs + BaseReg + Scale*ScaleReg
@@ -1378,10 +1378,10 @@ NVPTXTargetLowering::isLegalAddressingMode(const AddrMode &AM,
}
switch (AM.Scale) {
- case 0: // "r", "r+i" or "i" is allowed
+ case 0: // "r", "r+i" or "i" is allowed
break;
case 1:
- if (AM.HasBaseReg) // "r+r+i" or "r+r" is not allowed.
+ if (AM.HasBaseReg) // "r+r+i" or "r+r" is not allowed.
return false;
// Otherwise we have r+i.
break;
@@ -1418,8 +1418,7 @@ NVPTXTargetLowering::getConstraintType(const std::string &Constraint) const {
return TargetLowering::getConstraintType(Constraint);
}
-
-std::pair<unsigned, const TargetRegisterClass*>
+std::pair<unsigned, const TargetRegisterClass *>
NVPTXTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
EVT VT) const {
if (Constraint.size() == 1) {
@@ -1442,8 +1441,6 @@ NVPTXTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT);
}
-
-
/// getFunctionAlignment - Return the Log2 alignment of this function.
unsigned NVPTXTargetLowering::getFunctionAlignment(const Function *) const {
return 4;
@@ -1451,7 +1448,7 @@ unsigned NVPTXTargetLowering::getFunctionAlignment(const Function *) const {
/// ReplaceVectorLoad - Convert vector loads into multi-output scalar loads.
static void ReplaceLoadVector(SDNode *N, SelectionDAG &DAG,
- SmallVectorImpl<SDValue>& Results) {
+ SmallVectorImpl<SDValue> &Results) {
EVT ResVT = N->getValueType(0);
DebugLoc DL = N->getDebugLoc();
@@ -1462,7 +1459,8 @@ static void ReplaceLoadVector(SDNode *N, SelectionDAG &DAG,
// but I'm leaving that as a TODO for now.
assert(ResVT.isSimple() && "Can only handle simple types");
switch (ResVT.getSimpleVT().SimpleTy) {
- default: return;
+ default:
+ return;
case MVT::v2i8:
case MVT::v2i16:
case MVT::v2i32:
@@ -1493,7 +1491,8 @@ static void ReplaceLoadVector(SDNode *N, SelectionDAG &DAG,
SDVTList LdResVTs;
switch (NumElts) {
- default: return;
+ default:
+ return;
case 2:
Opcode = NVPTXISD::LoadV2;
LdResVTs = DAG.getVTList(EltVT, EltVT, MVT::Other);
@@ -1533,14 +1532,14 @@ static void ReplaceLoadVector(SDNode *N, SelectionDAG &DAG,
SDValue LoadChain = NewLD.getValue(NumElts);
- SDValue BuildVec = DAG.getNode(ISD::BUILD_VECTOR, DL, ResVT, &ScalarRes[0], NumElts);
+ SDValue BuildVec =
+ DAG.getNode(ISD::BUILD_VECTOR, DL, ResVT, &ScalarRes[0], NumElts);
Results.push_back(BuildVec);
Results.push_back(LoadChain);
}
-static void ReplaceINTRINSIC_W_CHAIN(SDNode *N,
- SelectionDAG &DAG,
+static void ReplaceINTRINSIC_W_CHAIN(SDNode *N, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &Results) {
SDValue Chain = N->getOperand(0);
SDValue Intrin = N->getOperand(1);
@@ -1548,8 +1547,9 @@ static void ReplaceINTRINSIC_W_CHAIN(SDNode *N,
// Get the intrinsic ID
unsigned IntrinNo = cast<ConstantSDNode>(Intrin.getNode())->getZExtValue();
- switch(IntrinNo) {
- default: return;
+ switch (IntrinNo) {
+ default:
+ return;
case Intrinsic::nvvm_ldg_global_i:
case Intrinsic::nvvm_ldg_global_f:
case Intrinsic::nvvm_ldg_global_p:
@@ -1577,10 +1577,12 @@ static void ReplaceINTRINSIC_W_CHAIN(SDNode *N,
SDVTList LdResVTs;
switch (NumElts) {
- default: return;
+ default:
+ return;
case 2:
- switch(IntrinNo) {
- default: return;
+ switch (IntrinNo) {
+ default:
+ return;
case Intrinsic::nvvm_ldg_global_i:
case Intrinsic::nvvm_ldg_global_f:
case Intrinsic::nvvm_ldg_global_p:
@@ -1595,8 +1597,9 @@ static void ReplaceINTRINSIC_W_CHAIN(SDNode *N,
LdResVTs = DAG.getVTList(EltVT, EltVT, MVT::Other);
break;
case 4: {
- switch(IntrinNo) {
- default: return;
+ switch (IntrinNo) {
+ default:
+ return;
case Intrinsic::nvvm_ldg_global_i:
case Intrinsic::nvvm_ldg_global_f:
case Intrinsic::nvvm_ldg_global_p:
@@ -1619,29 +1622,31 @@ static void ReplaceINTRINSIC_W_CHAIN(SDNode *N,
// Copy regular operands
OtherOps.push_back(Chain); // Chain
- // Skip operand 1 (intrinsic ID)
- // Others
+ // Skip operand 1 (intrinsic ID)
+ // Others
for (unsigned i = 2, e = N->getNumOperands(); i != e; ++i)
OtherOps.push_back(N->getOperand(i));
MemIntrinsicSDNode *MemSD = cast<MemIntrinsicSDNode>(N);
- SDValue NewLD = DAG.getMemIntrinsicNode(Opcode, DL, LdResVTs, &OtherOps[0],
- OtherOps.size(), MemSD->getMemoryVT(),
- MemSD->getMemOperand());
+ SDValue NewLD = DAG.getMemIntrinsicNode(
+ Opcode, DL, LdResVTs, &OtherOps[0], OtherOps.size(),
+ MemSD->getMemoryVT(), MemSD->getMemOperand());
SmallVector<SDValue, 4> ScalarRes;
for (unsigned i = 0; i < NumElts; ++i) {
SDValue Res = NewLD.getValue(i);
if (NeedTrunc)
- Res = DAG.getNode(ISD::TRUNCATE, DL, ResVT.getVectorElementType(), Res);
+ Res =
+ DAG.getNode(ISD::TRUNCATE, DL, ResVT.getVectorElementType(), Res);
ScalarRes.push_back(Res);
}
SDValue LoadChain = NewLD.getValue(NumElts);
- SDValue BuildVec = DAG.getNode(ISD::BUILD_VECTOR, DL, ResVT, &ScalarRes[0], NumElts);
+ SDValue BuildVec =
+ DAG.getNode(ISD::BUILD_VECTOR, DL, ResVT, &ScalarRes[0], NumElts);
Results.push_back(BuildVec);
Results.push_back(LoadChain);
@@ -1662,10 +1667,9 @@ static void ReplaceINTRINSIC_W_CHAIN(SDNode *N,
// We make sure the memory type is i8, which will be used during isel
// to select the proper instruction.
- SDValue NewLD = DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL,
- LdResVTs, &Ops[0],
- Ops.size(), MVT::i8,
- MemSD->getMemOperand());
+ SDValue NewLD =
+ DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, LdResVTs, &Ops[0],
+ Ops.size(), MVT::i8, MemSD->getMemOperand());
Results.push_back(NewLD.getValue(0));
Results.push_back(NewLD.getValue(1));
@@ -1674,11 +1678,11 @@ static void ReplaceINTRINSIC_W_CHAIN(SDNode *N,
}
}
-void NVPTXTargetLowering::ReplaceNodeResults(SDNode *N,
- SmallVectorImpl<SDValue> &Results,
- SelectionDAG &DAG) const {
+void NVPTXTargetLowering::ReplaceNodeResults(
+ SDNode *N, SmallVectorImpl<SDValue> &Results, SelectionDAG &DAG) const {
switch (N->getOpcode()) {
- default: report_fatal_error("Unhandled custom legalization");
+ default:
+ report_fatal_error("Unhandled custom legalization");
case ISD::LOAD:
ReplaceLoadVector(N, DAG, Results);
return;