summaryrefslogtreecommitdiff
path: root/lib/Target
diff options
context:
space:
mode:
authorOwen Anderson <resistor@mac.com>2011-02-25 21:41:48 +0000
committerOwen Anderson <resistor@mac.com>2011-02-25 21:41:48 +0000
commit95771afbfd604ad003fa3723cac66c9370fed55d (patch)
treef90cbbd64c88ae50228221c55738a62d2b2fead7 /lib/Target
parent14a129a3cf8e609b9deb8546267e509088bf7dcd (diff)
downloadllvm-95771afbfd604ad003fa3723cac66c9370fed55d.tar.gz
llvm-95771afbfd604ad003fa3723cac66c9370fed55d.tar.bz2
llvm-95771afbfd604ad003fa3723cac66c9370fed55d.tar.xz
Allow targets to specify a the type of the RHS of a shift parameterized on the type of the LHS.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@126518 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/Target')
-rw-r--r--lib/Target/Alpha/AlphaISelLowering.cpp1
-rw-r--r--lib/Target/Alpha/AlphaISelLowering.h14
-rw-r--r--lib/Target/Blackfin/BlackfinISelLowering.cpp1
-rw-r--r--lib/Target/Blackfin/BlackfinISelLowering.h1
-rw-r--r--lib/Target/CellSPU/SPUISelLowering.cpp5
-rw-r--r--lib/Target/CellSPU/SPUISelLowering.h6
-rw-r--r--lib/Target/MSP430/MSP430ISelLowering.cpp6
-rw-r--r--lib/Target/MSP430/MSP430ISelLowering.h2
-rw-r--r--lib/Target/PowerPC/PPCISelLowering.cpp1
-rw-r--r--lib/Target/PowerPC/PPCISelLowering.h94
-rw-r--r--lib/Target/SystemZ/SystemZISelLowering.cpp3
-rw-r--r--lib/Target/SystemZ/SystemZISelLowering.h2
-rw-r--r--lib/Target/X86/X86ISelLowering.cpp20
-rw-r--r--lib/Target/X86/X86ISelLowering.h14
-rw-r--r--lib/Target/XCore/XCoreISelLowering.cpp125
-rw-r--r--lib/Target/XCore/XCoreISelLowering.h23
16 files changed, 161 insertions, 157 deletions
diff --git a/lib/Target/Alpha/AlphaISelLowering.cpp b/lib/Target/Alpha/AlphaISelLowering.cpp
index 9137d654ed..c4f43ab9e4 100644
--- a/lib/Target/Alpha/AlphaISelLowering.cpp
+++ b/lib/Target/Alpha/AlphaISelLowering.cpp
@@ -48,7 +48,6 @@ AlphaTargetLowering::AlphaTargetLowering(TargetMachine &TM)
: TargetLowering(TM, new TargetLoweringObjectFileELF()) {
// Set up the TargetLowering object.
//I am having problems with shr n i8 1
- setShiftAmountType(MVT::i64);
setBooleanContents(ZeroOrOneBooleanContent);
addRegisterClass(MVT::i64, Alpha::GPRCRegisterClass);
diff --git a/lib/Target/Alpha/AlphaISelLowering.h b/lib/Target/Alpha/AlphaISelLowering.h
index b429e9fc13..cb98f921dd 100644
--- a/lib/Target/Alpha/AlphaISelLowering.h
+++ b/lib/Target/Alpha/AlphaISelLowering.h
@@ -31,25 +31,25 @@ namespace llvm {
/// GPRelHi/GPRelLo - These represent the high and low 16-bit
/// parts of a global address respectively.
- GPRelHi, GPRelLo,
+ GPRelHi, GPRelLo,
/// RetLit - Literal Relocation of a Global
RelLit,
/// GlobalRetAddr - used to restore the return address
GlobalRetAddr,
-
+
/// CALL - Normal call.
CALL,
/// DIVCALL - used for special library calls for div and rem
DivCall,
-
+
/// return flag operand
RET_FLAG,
/// CHAIN = COND_BRANCH CHAIN, OPC, (G|F)PRC, DESTBB [, INFLAG] - This
- /// corresponds to the COND_BRANCH pseudo instruction.
+ /// corresponds to the COND_BRANCH pseudo instruction.
/// *PRC is the input register to compare to zero,
/// OPC is the branch opcode to use (e.g. Alpha::BEQ),
/// DESTBB is the destination block to branch to, and INFLAG is
@@ -62,7 +62,9 @@ namespace llvm {
class AlphaTargetLowering : public TargetLowering {
public:
explicit AlphaTargetLowering(TargetMachine &TM);
-
+
+ virtual MVT getShiftAmountTy(EVT LHSTy) const { return MVT::i64; }
+
/// getSetCCResultType - Get the SETCC result ValueType
virtual MVT::SimpleValueType getSetCCResultType(EVT VT) const;
@@ -92,7 +94,7 @@ namespace llvm {
ConstraintWeight getSingleConstraintMatchWeight(
AsmOperandInfo &info, const char *constraint) const;
- std::vector<unsigned>
+ std::vector<unsigned>
getRegClassForInlineAsmConstraint(const std::string &Constraint,
EVT VT) const;
diff --git a/lib/Target/Blackfin/BlackfinISelLowering.cpp b/lib/Target/Blackfin/BlackfinISelLowering.cpp
index dd27d0a0ff..7c80eec3ba 100644
--- a/lib/Target/Blackfin/BlackfinISelLowering.cpp
+++ b/lib/Target/Blackfin/BlackfinISelLowering.cpp
@@ -41,7 +41,6 @@ using namespace llvm;
BlackfinTargetLowering::BlackfinTargetLowering(TargetMachine &TM)
: TargetLowering(TM, new TargetLoweringObjectFileELF()) {
- setShiftAmountType(MVT::i16);
setBooleanContents(ZeroOrOneBooleanContent);
setStackPointerRegisterToSaveRestore(BF::SP);
setIntDivIsCheap(false);
diff --git a/lib/Target/Blackfin/BlackfinISelLowering.h b/lib/Target/Blackfin/BlackfinISelLowering.h
index 15a745fa87..102c830688 100644
--- a/lib/Target/Blackfin/BlackfinISelLowering.h
+++ b/lib/Target/Blackfin/BlackfinISelLowering.h
@@ -32,6 +32,7 @@ namespace llvm {
class BlackfinTargetLowering : public TargetLowering {
public:
BlackfinTargetLowering(TargetMachine &TM);
+ virtual MVT getShiftAmountTy(EVT LHSTy) const { return MVT::i16; }
virtual MVT::SimpleValueType getSetCCResultType(EVT VT) const;
virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const;
virtual void ReplaceNodeResults(SDNode *N,
diff --git a/lib/Target/CellSPU/SPUISelLowering.cpp b/lib/Target/CellSPU/SPUISelLowering.cpp
index e218fb92d1..743a4d7a0f 100644
--- a/lib/Target/CellSPU/SPUISelLowering.cpp
+++ b/lib/Target/CellSPU/SPUISelLowering.cpp
@@ -435,7 +435,6 @@ SPUTargetLowering::SPUTargetLowering(SPUTargetMachine &TM)
setOperationAction(ISD::FDIV, MVT::v4f32, Legal);
- setShiftAmountType(MVT::i32);
setBooleanContents(ZeroOrNegativeOneBooleanContent);
setStackPointerRegisterToSaveRestore(SPU::R1);
@@ -2190,7 +2189,7 @@ static SDValue LowerI8Math(SDValue Op, SelectionDAG &DAG, unsigned Opc,
{
SDValue N0 = Op.getOperand(0); // Everything has at least one operand
DebugLoc dl = Op.getDebugLoc();
- EVT ShiftVT = TLI.getShiftAmountTy();
+ EVT ShiftVT = TLI.getShiftAmountTy(N0.getValueType());
assert(Op.getValueType() == MVT::i8);
switch (Opc) {
@@ -3112,7 +3111,7 @@ SPUTargetLowering::getSingleConstraintMatchWeight(
switch (*constraint) {
default:
weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
- break;
+ break;
//FIXME: Seems like the supported constraint letters were just copied
// from PPC, as the following doesn't correspond to the GCC docs.
// I'm leaving it so until someone adds the corresponding lowering support.
diff --git a/lib/Target/CellSPU/SPUISelLowering.h b/lib/Target/CellSPU/SPUISelLowering.h
index 95d44afe37..dd48d7bafa 100644
--- a/lib/Target/CellSPU/SPUISelLowering.h
+++ b/lib/Target/CellSPU/SPUISelLowering.h
@@ -109,6 +109,8 @@ namespace llvm {
/// getSetCCResultType - Return the ValueType for ISD::SETCC
virtual MVT::SimpleValueType getSetCCResultType(EVT VT) const;
+ virtual MVT getShiftAmountTy(EVT LHSTy) const { return MVT::i32; }
+
//! Custom lowering hooks
virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const;
@@ -179,9 +181,9 @@ namespace llvm {
virtual bool isLegalICmpImmediate(int64_t Imm) const;
- virtual bool isLegalAddressingMode(const AddrMode &AM,
+ virtual bool isLegalAddressingMode(const AddrMode &AM,
const Type *Ty) const;
-
+
/// After allocating this many registers, the allocator should feel
/// register pressure. The value is a somewhat random guess, based on the
/// number of non callee saved registers in the C calling convention.
diff --git a/lib/Target/MSP430/MSP430ISelLowering.cpp b/lib/Target/MSP430/MSP430ISelLowering.cpp
index 30ef4f5da0..a95d59c057 100644
--- a/lib/Target/MSP430/MSP430ISelLowering.cpp
+++ b/lib/Target/MSP430/MSP430ISelLowering.cpp
@@ -77,10 +77,6 @@ MSP430TargetLowering::MSP430TargetLowering(MSP430TargetMachine &tm) :
// Division is expensive
setIntDivIsCheap(false);
- // Even if we have only 1 bit shift here, we can perform
- // shifts of the whole bitwidth 1 bit per step.
- setShiftAmountType(MVT::i8);
-
setStackPointerRegisterToSaveRestore(MSP430::SPW);
setBooleanContents(ZeroOrOneBooleanContent);
setSchedulingPreference(Sched::Latency);
@@ -330,7 +326,7 @@ MSP430TargetLowering::LowerCCCArguments(SDValue Chain,
// Arguments passed in registers
EVT RegVT = VA.getLocVT();
switch (RegVT.getSimpleVT().SimpleTy) {
- default:
+ default:
{
#ifndef NDEBUG
errs() << "LowerFormalArguments Unhandled argument type: "
diff --git a/lib/Target/MSP430/MSP430ISelLowering.h b/lib/Target/MSP430/MSP430ISelLowering.h
index 673c5433b9..19c9eac589 100644
--- a/lib/Target/MSP430/MSP430ISelLowering.h
+++ b/lib/Target/MSP430/MSP430ISelLowering.h
@@ -73,6 +73,8 @@ namespace llvm {
public:
explicit MSP430TargetLowering(MSP430TargetMachine &TM);
+ virtual MVT getShiftAmountTy(EVT LHSTy) const { return MVT::i8; }
+
/// LowerOperation - Provide custom lowering hooks for some operations.
virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const;
diff --git a/lib/Target/PowerPC/PPCISelLowering.cpp b/lib/Target/PowerPC/PPCISelLowering.cpp
index f1cc437d61..70d00e4b5c 100644
--- a/lib/Target/PowerPC/PPCISelLowering.cpp
+++ b/lib/Target/PowerPC/PPCISelLowering.cpp
@@ -362,7 +362,6 @@ PPCTargetLowering::PPCTargetLowering(PPCTargetMachine &TM)
setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom);
}
- setShiftAmountType(MVT::i32);
setBooleanContents(ZeroOrOneBooleanContent);
if (TM.getSubtarget<PPCSubtarget>().isPPC64()) {
diff --git a/lib/Target/PowerPC/PPCISelLowering.h b/lib/Target/PowerPC/PPCISelLowering.h
index 80cab75b96..33daae9b54 100644
--- a/lib/Target/PowerPC/PPCISelLowering.h
+++ b/lib/Target/PowerPC/PPCISelLowering.h
@@ -29,36 +29,36 @@ namespace llvm {
/// FSEL - Traditional three-operand fsel node.
///
FSEL,
-
+
/// FCFID - The FCFID instruction, taking an f64 operand and producing
/// and f64 value containing the FP representation of the integer that
/// was temporarily in the f64 operand.
FCFID,
-
- /// FCTI[D,W]Z - The FCTIDZ and FCTIWZ instructions, taking an f32 or f64
+
+ /// FCTI[D,W]Z - The FCTIDZ and FCTIWZ instructions, taking an f32 or f64
/// operand, producing an f64 value containing the integer representation
/// of that FP value.
FCTIDZ, FCTIWZ,
-
+
/// STFIWX - The STFIWX instruction. The first operand is an input token
/// chain, then an f64 value to store, then an address to store it to.
STFIWX,
-
+
// VMADDFP, VNMSUBFP - The VMADDFP and VNMSUBFP instructions, taking
// three v4f32 operands and producing a v4f32 result.
VMADDFP, VNMSUBFP,
-
+
/// VPERM - The PPC VPERM Instruction.
///
VPERM,
-
+
/// Hi/Lo - These represent the high and low 16-bit parts of a global
/// address respectively. These nodes have two operands, the first of
/// which must be a TargetGlobalAddress, and the second of which must be a
/// Constant. Selected naively, these turn into 'lis G+C' and 'li G+C',
/// though these are usually folded into other nodes.
Hi, Lo,
-
+
TOC_ENTRY,
/// The following three target-specific nodes are used for calls through
@@ -80,37 +80,37 @@ namespace llvm {
/// This instruction is lowered in PPCRegisterInfo::eliminateFrameIndex to
/// compute an allocation on the stack.
DYNALLOC,
-
+
/// GlobalBaseReg - On Darwin, this node represents the result of the mflr
/// at function entry, used for PIC code.
GlobalBaseReg,
-
+
/// These nodes represent the 32-bit PPC shifts that operate on 6-bit
/// shift amounts. These nodes are generated by the multi-precision shift
/// code.
SRL, SRA, SHL,
-
+
/// EXTSW_32 - This is the EXTSW instruction for use with "32-bit"
/// registers.
EXTSW_32,
/// CALL - A direct function call.
CALL_Darwin, CALL_SVR4,
-
+
/// NOP - Special NOP which follows 64-bit SVR4 calls.
NOP,
/// CHAIN,FLAG = MTCTR(VAL, CHAIN[, INFLAG]) - Directly corresponds to a
/// MTCTR instruction.
MTCTR,
-
+
/// CHAIN,FLAG = BCTRL(CHAIN, INFLAG) - Directly corresponds to a
/// BCTRL instruction.
BCTRL_Darwin, BCTRL_SVR4,
-
+
/// Return with a flag operand, matched by 'blr'
RET_FLAG,
-
+
/// R32 = MFCR(CRREG, INFLAG) - Represents the MFCRpseud/MFOCRF
/// instructions. This copies the bits corresponding to the specified
/// CRREG into the resultant GPR. Bits corresponding to other CR regs
@@ -122,20 +122,20 @@ namespace llvm {
/// encoding for the OPC field to identify the compare. For example, 838
/// is VCMPGTSH.
VCMP,
-
+
/// RESVEC, OUTFLAG = VCMPo(LHS, RHS, OPC) - Represents one of the
- /// altivec VCMP*o instructions. For lack of better number, we use the
+ /// altivec VCMP*o instructions. For lack of better number, we use the
/// opcode number encoding for the OPC field to identify the compare. For
/// example, 838 is VCMPGTSH.
VCMPo,
-
+
/// CHAIN = COND_BRANCH CHAIN, CRRC, OPC, DESTBB [, INFLAG] - This
/// corresponds to the COND_BRANCH pseudo instruction. CRRC is the
/// condition register to branch on, OPC is the branch opcode to use (e.g.
/// PPC::BLE), DESTBB is the destination block to branch to, and INFLAG is
/// an optional input flag argument.
COND_BRANCH,
-
+
// The following 5 instructions are used only as part of the
// long double-to-int conversion sequence.
@@ -150,7 +150,7 @@ namespace llvm {
MTFSB1,
/// F8RC, OUTFLAG = FADDRTZ F8RC, F8RC, INFLAG - This is an FADD done with
- /// rounding towards zero. It has flags added so it won't move past the
+ /// rounding towards zero. It has flags added so it won't move past the
/// FPSCR-setting instructions.
FADDRTZ,
@@ -174,14 +174,14 @@ namespace llvm {
/// STD_32 - This is the STD instruction for use with "32-bit" registers.
STD_32 = ISD::FIRST_TARGET_MEMORY_OPCODE,
-
- /// CHAIN = STBRX CHAIN, GPRC, Ptr, Type - This is a
+
+ /// CHAIN = STBRX CHAIN, GPRC, Ptr, Type - This is a
/// byte-swapping store instruction. It byte-swaps the low "Type" bits of
/// the GPRC input, then stores it through Ptr. Type can be either i16 or
/// i32.
- STBRX,
-
- /// GPRC, CHAIN = LBRX CHAIN, Ptr, Type - This is a
+ STBRX,
+
+ /// GPRC, CHAIN = LBRX CHAIN, Ptr, Type - This is a
/// byte-swapping load instruction. It loads "Type" bits, byte swaps it,
/// then puts it in the bottom bits of the GPRC. TYPE can be either i16
/// or i32.
@@ -194,7 +194,7 @@ namespace llvm {
/// isVPKUHUMShuffleMask - Return true if this is the shuffle mask for a
/// VPKUHUM instruction.
bool isVPKUHUMShuffleMask(ShuffleVectorSDNode *N, bool isUnary);
-
+
/// isVPKUWUMShuffleMask - Return true if this is the shuffle mask for a
/// VPKUWUM instruction.
bool isVPKUWUMShuffleMask(ShuffleVectorSDNode *N, bool isUnary);
@@ -208,16 +208,16 @@ namespace llvm {
/// a VRGH* instruction with the specified unit size (1,2 or 4 bytes).
bool isVMRGHShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize,
bool isUnary);
-
+
/// isVSLDOIShuffleMask - If this is a vsldoi shuffle mask, return the shift
/// amount, otherwise return -1.
int isVSLDOIShuffleMask(SDNode *N, bool isUnary);
-
+
/// isSplatShuffleMask - Return true if the specified VECTOR_SHUFFLE operand
/// specifies a splat of a single element that is suitable for input to
/// VSPLTB/VSPLTH/VSPLTW.
bool isSplatShuffleMask(ShuffleVectorSDNode *N, unsigned EltSize);
-
+
/// isAllNegativeZeroVector - Returns true if all elements of build_vector
/// are -0.0.
bool isAllNegativeZeroVector(SDNode *N);
@@ -225,24 +225,26 @@ namespace llvm {
/// getVSPLTImmediate - Return the appropriate VSPLT* immediate to splat the
/// specified isSplatShuffleMask VECTOR_SHUFFLE mask.
unsigned getVSPLTImmediate(SDNode *N, unsigned EltSize);
-
+
/// get_VSPLTI_elt - If this is a build_vector of constants which can be
/// formed by using a vspltis[bhw] instruction of the specified element
/// size, return the constant being splatted. The ByteSize field indicates
/// the number of bytes of each element [124] -> [bhw].
SDValue get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG);
}
-
+
class PPCTargetLowering : public TargetLowering {
const PPCSubtarget &PPCSubTarget;
public:
explicit PPCTargetLowering(PPCTargetMachine &TM);
-
+
/// getTargetNodeName() - This method returns the name of a target specific
/// DAG node.
virtual const char *getTargetNodeName(unsigned Opcode) const;
+ virtual MVT getShiftAmountTy(EVT LHSTy) const { return MVT::i32; }
+
/// getSetCCResultType - Return the ISD::SETCC ValueType
virtual MVT::SimpleValueType getSetCCResultType(EVT VT) const;
@@ -253,19 +255,19 @@ namespace llvm {
SDValue &Offset,
ISD::MemIndexedMode &AM,
SelectionDAG &DAG) const;
-
+
/// SelectAddressRegReg - Given the specified addressed, check to see if it
/// can be represented as an indexed [r+r] operation. Returns false if it
/// can be more efficiently represented with [r+imm].
bool SelectAddressRegReg(SDValue N, SDValue &Base, SDValue &Index,
SelectionDAG &DAG) const;
-
+
/// SelectAddressRegImm - Returns true if the address N can be represented
/// by a base register plus a signed 16-bit displacement [r+imm], and if it
/// is not better represented as reg+reg.
bool SelectAddressRegImm(SDValue N, SDValue &Disp, SDValue &Base,
SelectionDAG &DAG) const;
-
+
/// SelectAddressRegRegOnly - Given the specified addressed, force it to be
/// represented as an indexed [r+r] operation.
bool SelectAddressRegRegOnly(SDValue N, SDValue &Base, SDValue &Index,
@@ -277,7 +279,7 @@ namespace llvm {
bool SelectAddressRegImmShift(SDValue N, SDValue &Disp, SDValue &Base,
SelectionDAG &DAG) const;
-
+
/// LowerOperation - Provide custom lowering hooks for some operations.
///
virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const;
@@ -289,10 +291,10 @@ namespace llvm {
SelectionDAG &DAG) const;
virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const;
-
+
virtual void computeMaskedBitsForTargetNode(const SDValue Op,
const APInt &Mask,
- APInt &KnownZero,
+ APInt &KnownZero,
APInt &KnownOne,
const SelectionDAG &DAG,
unsigned Depth = 0) const;
@@ -300,13 +302,13 @@ namespace llvm {
virtual MachineBasicBlock *
EmitInstrWithCustomInserter(MachineInstr *MI,
MachineBasicBlock *MBB) const;
- MachineBasicBlock *EmitAtomicBinary(MachineInstr *MI,
+ MachineBasicBlock *EmitAtomicBinary(MachineInstr *MI,
MachineBasicBlock *MBB, bool is64Bit,
unsigned BinOpcode) const;
- MachineBasicBlock *EmitPartwordAtomicBinary(MachineInstr *MI,
- MachineBasicBlock *MBB,
+ MachineBasicBlock *EmitPartwordAtomicBinary(MachineInstr *MI,
+ MachineBasicBlock *MBB,
bool is8bit, unsigned Opcode) const;
-
+
ConstraintType getConstraintType(const std::string &Constraint) const;
/// Examine constraint string and operand type and determine a weight value.
@@ -314,7 +316,7 @@ namespace llvm {
ConstraintWeight getSingleConstraintMatchWeight(
AsmOperandInfo &info, const char *constraint) const;
- std::pair<unsigned, const TargetRegisterClass*>
+ std::pair<unsigned, const TargetRegisterClass*>
getRegForInlineAsmConstraint(const std::string &Constraint,
EVT VT) const;
@@ -329,11 +331,11 @@ namespace llvm {
char ConstraintLetter,
std::vector<SDValue> &Ops,
SelectionDAG &DAG) const;
-
+
/// isLegalAddressingMode - Return true if the addressing mode represented
/// by AM is legal for this target, for a load/store of the specified type.
virtual bool isLegalAddressingMode(const AddrMode &AM, const Type *Ty)const;
-
+
/// isLegalAddressImmediate - Return true if the integer value can be used
/// as the offset of the target addressing mode for load / store of the
/// given type.
@@ -344,7 +346,7 @@ namespace llvm {
virtual bool isLegalAddressImmediate(GlobalValue *GV) const;
virtual bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const;
-
+
/// getOptimalMemOpType - Returns the target specific optimal type for load
/// and store operations as a result of memset, memcpy, and memmove
/// lowering. If DstAlign is zero that means it's safe to destination
diff --git a/lib/Target/SystemZ/SystemZISelLowering.cpp b/lib/Target/SystemZ/SystemZISelLowering.cpp
index d694f2e67e..90939c3120 100644
--- a/lib/Target/SystemZ/SystemZISelLowering.cpp
+++ b/lib/Target/SystemZ/SystemZISelLowering.cpp
@@ -59,9 +59,6 @@ SystemZTargetLowering::SystemZTargetLowering(SystemZTargetMachine &tm) :
// Compute derived properties from the register classes
computeRegisterProperties();
- // Set shifts properties
- setShiftAmountType(MVT::i64);
-
// Provide all sorts of operation actions
setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote);
setLoadExtAction(ISD::ZEXTLOAD, MVT::i1, Promote);
diff --git a/lib/Target/SystemZ/SystemZISelLowering.h b/lib/Target/SystemZ/SystemZISelLowering.h
index 51d2df3a30..30192420dc 100644
--- a/lib/Target/SystemZ/SystemZISelLowering.h
+++ b/lib/Target/SystemZ/SystemZISelLowering.h
@@ -57,6 +57,8 @@ namespace llvm {
public:
explicit SystemZTargetLowering(SystemZTargetMachine &TM);
+ virtual MVT getShiftAmountTy(EVT LHSTy) const { return MVT::i64; }
+
/// LowerOperation - Provide custom lowering hooks for some operations.
virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const;
diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp
index bbd3a230e3..2f49dbcebf 100644
--- a/lib/Target/X86/X86ISelLowering.cpp
+++ b/lib/Target/X86/X86ISelLowering.cpp
@@ -220,7 +220,6 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
static MVT IntVTs[] = { MVT::i8, MVT::i16, MVT::i32, MVT::i64 };
// X86 is weird, it always uses i8 for shift amounts and setcc results.
- setShiftAmountType(MVT::i8);
setBooleanContents(ZeroOrOneBooleanContent);
setSchedulingPreference(Sched::RegPressure);
setStackPointerRegisterToSaveRestore(X86StackPtr);
@@ -4181,7 +4180,8 @@ static SDValue getVShift(bool isLeft, EVT VT, SDValue SrcOp,
SrcOp = DAG.getNode(ISD::BITCAST, dl, ShVT, SrcOp);
return DAG.getNode(ISD::BITCAST, dl, VT,
DAG.getNode(Opc, dl, ShVT, SrcOp,
- DAG.getConstant(NumBits, TLI.getShiftAmountTy())));
+ DAG.getConstant(NumBits,
+ TLI.getShiftAmountTy(SrcOp.getValueType()))));
}
SDValue
@@ -4330,15 +4330,15 @@ X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
// For AVX-length vectors, build the individual 128-bit pieces and
// use shuffles to put them in place.
- if (VT.getSizeInBits() > 256 &&
- Subtarget->hasAVX() &&
+ if (VT.getSizeInBits() > 256 &&
+ Subtarget->hasAVX() &&
!ISD::isBuildVectorAllZeros(Op.getNode())) {
SmallVector<SDValue, 8> V;
V.resize(NumElems);
for (unsigned i = 0; i < NumElems; ++i) {
V[i] = Op.getOperand(i);
}
-
+
EVT HVT = EVT::getVectorVT(*DAG.getContext(), ExtVT, NumElems/2);
// Build the lower subvector.
@@ -5046,7 +5046,8 @@ SDValue LowerVECTOR_SHUFFLEv16i8(ShuffleVectorSDNode *SVOp,
DAG.getIntPtrConstant(Elt1 / 2));
if ((Elt1 & 1) == 0)
InsElt = DAG.getNode(ISD::SHL, dl, MVT::i16, InsElt,
- DAG.getConstant(8, TLI.getShiftAmountTy()));
+ DAG.getConstant(8,
+ TLI.getShiftAmountTy(InsElt.getValueType())));
else if (Elt0 >= 0)
InsElt = DAG.getNode(ISD::AND, dl, MVT::i16, InsElt,
DAG.getConstant(0xFF00, MVT::i16));
@@ -5060,7 +5061,8 @@ SDValue LowerVECTOR_SHUFFLEv16i8(ShuffleVectorSDNode *SVOp,
Elt0Src, DAG.getIntPtrConstant(Elt0 / 2));
if ((Elt0 & 1) != 0)
InsElt0 = DAG.getNode(ISD::SRL, dl, MVT::i16, InsElt0,
- DAG.getConstant(8, TLI.getShiftAmountTy()));
+ DAG.getConstant(8,
+ TLI.getShiftAmountTy(InsElt0.getValueType())));
else if (Elt1 >= 0)
InsElt0 = DAG.getNode(ISD::AND, dl, MVT::i16, InsElt0,
DAG.getConstant(0x00FF, MVT::i16));
@@ -5477,7 +5479,7 @@ SDValue getMOVLP(SDValue &Op, DebugLoc &dl, SelectionDAG &DAG, bool HasSSE2) {
// Both of them can't be memory operations though.
if (MayFoldVectorLoad(V1) && MayFoldVectorLoad(V2))
CanFoldLoad = false;
-
+
if (CanFoldLoad) {
if (HasSSE2 && NumElems == 2)
return getTargetShuffleNode(X86ISD::MOVLPD, dl, VT, V1, V2, DAG);
@@ -6090,7 +6092,7 @@ X86TargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const {
SDValue ScaledN2 = N2;
if (Upper)
ScaledN2 = DAG.getNode(ISD::SUB, dl, N2.getValueType(), N2,
- DAG.getConstant(NumElems /
+ DAG.getConstant(NumElems /
(VT.getSizeInBits() / 128),
N2.getValueType()));
Op = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, SubN0.getValueType(), SubN0,
diff --git a/lib/Target/X86/X86ISelLowering.h b/lib/Target/X86/X86ISelLowering.h
index df78137ccd..6ec4a7de75 100644
--- a/lib/Target/X86/X86ISelLowering.h
+++ b/lib/Target/X86/X86ISelLowering.h
@@ -159,16 +159,16 @@ namespace llvm {
/// PSHUFB - Shuffle 16 8-bit values within a vector.
PSHUFB,
-
+
/// PANDN - and with not'd value.
PANDN,
-
+
/// PSIGNB/W/D - Copy integer sign.
- PSIGNB, PSIGNW, PSIGND,
-
+ PSIGNB, PSIGNW, PSIGND,
+
/// PBLENDVB - Variable blend
PBLENDVB,
-
+
/// FMAX, FMIN - Floating point max and min.
///
FMAX, FMIN,
@@ -212,7 +212,7 @@ namespace llvm {
// ADD, SUB, SMUL, etc. - Arithmetic operations with FLAGS results.
ADD, SUB, ADC, SBB, SMUL,
INC, DEC, OR, XOR, AND,
-
+
UMUL, // LOW, HI, FLAGS = umul LHS, RHS
// MUL_IMM - X86 specific multiply by immediate.
@@ -467,6 +467,8 @@ namespace llvm {
virtual unsigned getJumpTableEncoding() const;
+ virtual MVT getShiftAmountTy(EVT LHSTy) const { return MVT::i8; }
+
virtual const MCExpr *
LowerCustomJumpTableEntry(const MachineJumpTableInfo *MJTI,
const MachineBasicBlock *MBB, unsigned uid,
diff --git a/lib/Target/XCore/XCoreISelLowering.cpp b/lib/Target/XCore/XCoreISelLowering.cpp
index 828d6f92ca..4817787d75 100644
--- a/lib/Target/XCore/XCoreISelLowering.cpp
+++ b/lib/Target/XCore/XCoreISelLowering.cpp
@@ -42,9 +42,9 @@
using namespace llvm;
const char *XCoreTargetLowering::
-getTargetNodeName(unsigned Opcode) const
+getTargetNodeName(unsigned Opcode) const
{
- switch (Opcode)
+ switch (Opcode)
{
case XCoreISD::BL : return "XCoreISD::BL";
case XCoreISD::PCRelativeWrapper : return "XCoreISD::PCRelativeWrapper";
@@ -77,7 +77,6 @@ XCoreTargetLowering::XCoreTargetLowering(XCoreTargetMachine &XTM)
// Division is expensive
setIntDivIsCheap(false);
- setShiftAmountType(MVT::i32);
setStackPointerRegisterToSaveRestore(XCore::SP);
setSchedulingPreference(Sched::RegPressure);
@@ -95,7 +94,7 @@ XCoreTargetLowering::XCoreTargetLowering(XCoreTargetMachine &XTM)
// Stop the combiner recombining select and set_cc
setOperationAction(ISD::SELECT_CC, MVT::Other, Expand);
-
+
// 64bit
setOperationAction(ISD::ADD, MVT::i64, Custom);
setOperationAction(ISD::SUB, MVT::i64, Custom);
@@ -106,14 +105,14 @@ XCoreTargetLowering::XCoreTargetLowering(XCoreTargetMachine &XTM)
setOperationAction(ISD::SHL_PARTS, MVT::i32, Expand);
setOperationAction(ISD::SRA_PARTS, MVT::i32, Expand);
setOperationAction(ISD::SRL_PARTS, MVT::i32, Expand);
-
+
// Bit Manipulation
setOperationAction(ISD::CTPOP, MVT::i32, Expand);
setOperationAction(ISD::ROTL , MVT::i32, Expand);
setOperationAction(ISD::ROTR , MVT::i32, Expand);
-
+
setOperationAction(ISD::TRAP, MVT::Other, Legal);
-
+
// Jump tables.
setOperationAction(ISD::BR_JT, MVT::Other, Custom);
@@ -122,7 +121,7 @@ XCoreTargetLowering::XCoreTargetLowering(XCoreTargetMachine &XTM)
// Thread Local Storage
setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom);
-
+
// Conversion of i64 -> double produces constantpool nodes
setOperationAction(ISD::ConstantPool, MVT::i32, Custom);
@@ -143,7 +142,7 @@ XCoreTargetLowering::XCoreTargetLowering(XCoreTargetMachine &XTM)
setOperationAction(ISD::VACOPY, MVT::Other, Expand);
setOperationAction(ISD::VAARG, MVT::Other, Custom);
setOperationAction(ISD::VASTART, MVT::Other, Custom);
-
+
// Dynamic stack
setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
@@ -163,7 +162,7 @@ XCoreTargetLowering::XCoreTargetLowering(XCoreTargetMachine &XTM)
SDValue XCoreTargetLowering::
LowerOperation(SDValue Op, SelectionDAG &DAG) const {
- switch (Op.getOpcode())
+ switch (Op.getOpcode())
{
case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG);
case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG);
@@ -414,7 +413,7 @@ LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
SDValue Chain = LD->getChain();
SDValue BasePtr = LD->getBasePtr();
DebugLoc DL = Op.getDebugLoc();
-
+
SDValue Base;
int64_t Offset;
if (!LD->isVolatile() &&
@@ -437,10 +436,10 @@ LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
SDValue HighOffset = DAG.getConstant((Offset & ~0x3) + 4, MVT::i32);
SDValue LowShift = DAG.getConstant((Offset & 0x3) * 8, MVT::i32);
SDValue HighShift = DAG.getConstant(32 - (Offset & 0x3) * 8, MVT::i32);
-
+
SDValue LowAddr = DAG.getNode(ISD::ADD, DL, MVT::i32, Base, LowOffset);
SDValue HighAddr = DAG.getNode(ISD::ADD, DL, MVT::i32, Base, HighOffset);
-
+
SDValue Low = DAG.getLoad(getPointerTy(), DL, Chain,
LowAddr, MachinePointerInfo(), false, false, 0);
SDValue High = DAG.getLoad(getPointerTy(), DL, Chain,
@@ -453,7 +452,7 @@ LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
SDValue Ops[] = { Result, Chain };
return DAG.getMergeValues(Ops, 2, DL);
}
-
+
if (LD->getAlignment() == 2) {
SDValue Low = DAG.getExtLoad(ISD::ZEXTLOAD, DL, MVT::i32, Chain,
BasePtr, LD->getPointerInfo(), MVT::i16,
@@ -473,16 +472,16 @@ LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
SDValue Ops[] = { Result, Chain };
return DAG.getMergeValues(Ops, 2, DL);
}
-
+
// Lower to a call to __misaligned_load(BasePtr).
const Type *IntPtrTy = getTargetData()->getIntPtrType(*DAG.getContext());
TargetLowering::ArgListTy Args;
TargetLowering::ArgListEntry Entry;
-
+
Entry.Ty = IntPtrTy;
Entry.Node = BasePtr;
Args.push_back(Entry);
-
+
std::pair<SDValue, SDValue> CallResult =
LowerCallTo(Chain, IntPtrTy, false, false,
false, false, 0, CallingConv::C, false,
@@ -515,7 +514,7 @@ LowerSTORE(SDValue Op, SelectionDAG &DAG) const
SDValue BasePtr = ST->getBasePtr();
SDValue Value = ST->getValue();
DebugLoc dl = Op.getDebugLoc();
-
+
if (ST->getAlignment() == 2) {
SDValue Low = Value;
SDValue High = DAG.getNode(ISD::SRL, dl, MVT::i32, Value,
@@ -532,19 +531,19 @@ LowerSTORE(SDValue Op, SelectionDAG &DAG) const
ST->isNonTemporal(), 2);
return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, StoreLow, StoreHigh);
}
-
+
// Lower to a call to __misaligned_store(BasePtr, Value).
const Type *IntPtrTy = getTargetData()->getIntPtrType(*DAG.getContext());
TargetLowering::ArgListTy Args;
TargetLowering::ArgListEntry Entry;
-
+
Entry.Ty = IntPtrTy;
Entry.Node = BasePtr;
Args.push_back(Entry);
-
+
Entry.Node = Value;
Args.push_back(Entry);
-
+
std::pair<SDValue, SDValue> CallResult =
LowerCallTo(Chain, Type::getVoidTy(*DAG.getContext()), false, false,
false, false, 0, CallingConv::C, false,
@@ -722,7 +721,7 @@ ExpandADDSUB(SDNode *N, SelectionDAG &DAG) const
}
DebugLoc dl = N->getDebugLoc();
-
+
// Extract components
SDValue LHSL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
N->getOperand(0), DAG.getConstant(0, MVT::i32));
@@ -732,7 +731,7 @@ ExpandADDSUB(SDNode *N, SelectionDAG &DAG) const
N->getOperand(1), DAG.getConstant(0, MVT::i32));
SDValue RHSH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
N->getOperand(1), DAG.getConstant(1, MVT::i32));
-
+
// Expand
unsigned Opcode = (N->getOpcode() == ISD::ADD) ? XCoreISD::LADD :
XCoreISD::LSUB;
@@ -740,7 +739,7 @@ ExpandADDSUB(SDNode *N, SelectionDAG &DAG) const
SDValue Carry = DAG.getNode(Opcode, dl, DAG.getVTList(MVT::i32, MVT::i32),
LHSL, RHSL, Zero);
SDValue Lo(Carry.getNode(), 1);
-
+
SDValue Ignored = DAG.getNode(Opcode, dl, DAG.getVTList(MVT::i32, MVT::i32),
LHSH, RHSH, Carry);
SDValue Hi(Ignored.getNode(), 1);
@@ -761,8 +760,8 @@ LowerVAARG(SDValue Op, SelectionDAG &DAG) const
Node->getOperand(1), MachinePointerInfo(V),
false, false, 0);
// Increment the pointer, VAList, to the next vararg
- SDValue Tmp3 = DAG.getNode(ISD::ADD, dl, getPointerTy(), VAList,
- DAG.getConstant(VT.getSizeInBits(),
+ SDValue Tmp3 = DAG.getNode(ISD::ADD, dl, getPointerTy(), VAList,
+ DAG.getConstant(VT.getSizeInBits(),
getPointerTy()));
// Store the incremented VAList to the legalized pointer
Tmp3 = DAG.getStore(VAList.getValue(1), dl, Tmp3, Node->getOperand(1),
@@ -781,20 +780,20 @@ LowerVASTART(SDValue Op, SelectionDAG &DAG) const
MachineFunction &MF = DAG.getMachineFunction();
XCoreFunctionInfo *XFI = MF.getInfo<XCoreFunctionInfo>();
SDValue Addr = DAG.getFrameIndex(XFI->getVarArgsFrameIndex(), MVT::i32);
- return DAG.getStore(Op.getOperand(0), dl, Addr, Op.getOperand(1),
+ return DAG.getStore(Op.getOperand(0), dl, Addr, Op.getOperand(1),
MachinePointerInfo(), false, false, 0);
}
SDValue XCoreTargetLowering::LowerFRAMEADDR(SDValue Op,
SelectionDAG &DAG) const {
DebugLoc dl = Op.getDebugLoc();
- // Depths > 0 not supported yet!
+ // Depths > 0 not supported yet!
if (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue() > 0)
return SDValue();
-
+
MachineFunction &MF = DAG.getMachineFunction();
const TargetRegisterInfo *RegInfo = getTargetMachine().getRegisterInfo();
- return DAG.getCopyFromReg(DAG.getEntryNode(), dl,
+ return DAG.getCopyFromReg(DAG.getEntryNode(), dl,
RegInfo->getFrameRegister(MF), MVT::i32);
}
@@ -919,7 +918,7 @@ XCoreTargetLowering::LowerCCCCallTo(SDValue Chain, SDValue Callee,
// Get a count of how many bytes are to be pushed on the stack.
unsigned NumBytes = CCInfo.getNextStackOffset();
- Chain = DAG.getCALLSEQ_START(Chain,DAG.getConstant(NumBytes,
+ Chain = DAG.getCALLSEQ_START(Chain,DAG.getConstant(NumBytes,
getPointerTy(), true));
SmallVector<std::pair<unsigned, SDValue>, 4> RegsToPass;
@@ -944,8 +943,8 @@ XCoreTargetLowering::LowerCCCCallTo(SDValue Chain, SDValue Callee,
Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg);
break;
}
-
- // Arguments that can be passed on register must be kept at
+
+ // Arguments that can be passed on register must be kept at
// RegsToPass vector
if (VA.isRegLoc()) {
RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
@@ -954,7 +953,7 @@ XCoreTargetLowering::LowerCCCCallTo(SDValue Chain, SDValue Callee,
int Offset = VA.getLocMemOffset();
- MemOpChains.push_back(DAG.getNode(XCoreISD::STWSP, dl, MVT::Other,
+ MemOpChains.push_back(DAG.getNode(XCoreISD::STWSP, dl, MVT::Other,
Chain, Arg,
DAG.getConstant(Offset/4, MVT::i32)));
}
@@ -963,16 +962,16 @@ XCoreTargetLowering::LowerCCCCallTo(SDValue Chain, SDValue Callee,
// Transform all store nodes into one single node because
// all store nodes are independent of each other.
if (!MemOpChains.empty())
- Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
+ Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
&MemOpChains[0], MemOpChains.size());
- // Build a sequence of copy-to-reg nodes chained together with token
+ // Build a sequence of copy-to-reg nodes chained together with token
// chain and flag operands which copy the outgoing args into registers.
// The InFlag in necessary since all emited instructions must be
// stuck together.
SDValue InFlag;
for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
- Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
+ Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
RegsToPass[i].second, InFlag);
InFlag = Chain.getValue(1);
}
@@ -986,7 +985,7 @@ XCoreTargetLowering::LowerCCCCallTo(SDValue Chain, SDValue Callee,
Callee = DAG.getTargetExternalSymbol(E->getSymbol(), MVT::i32);
// XCoreBranchLink = #chain, #target_address, #opt_in_flags...
- // = Chain, Callee, Reg#1, Reg#2, ...
+ // = Chain, Callee, Reg#1, Reg#2, ...
//
// Returns a chain & a flag for retval copy to use.
SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
@@ -994,7 +993,7 @@ XCoreTargetLowering::LowerCCCCallTo(SDValue Chain, SDValue Callee,
Ops.push_back(Chain);
Ops.push_back(Callee);
- // Add argument registers to the end of the list so that they are
+ // Add argument registers to the end of the list so that they are
// known live into the call.
for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
Ops.push_back(DAG.getRegister(RegsToPass[i].first,
@@ -1098,11 +1097,11 @@ XCoreTargetLowering::LowerCCCArguments(SDValue Chain,
unsigned StackSlotSize = XCoreFrameLowering::stackSlotSize();
unsigned LRSaveSize = StackSlotSize;
-
+
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
CCValAssign &VA = ArgLocs[i];
-
+
if (VA.isRegLoc()) {
// Arguments passed in registers
EVT RegVT = VA.getLocVT();
@@ -1139,12 +1138,12 @@ XCoreTargetLowering::LowerCCCArguments(SDValue Chain,
// Create the SelectionDAG nodes corresponding to a load
//from this parameter
SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
- InVals.push_back(DAG.getLoad(VA.getLocVT(), dl, Chain, FIN,
+ InVals.push_back(DAG.getLoad(VA.getLocVT(), dl, Chain, FIN,
MachinePointerInfo::getFixedStack(FI),
false, false, 0));
}
}
-
+
if (isVarArg) {
/* Argument registers */
static const unsigned ArgRegs[] = {
@@ -1186,7 +1185,7 @@ XCoreTargetLowering::LowerCCCArguments(SDValue Chain,
true));
}
}
-
+
return Chain;
}
@@ -1222,7 +1221,7 @@ XCoreTargetLowering::LowerReturn(SDValue Chain,
// Analize return values.
CCInfo.AnalyzeReturn(Outs, RetCC_XCore);
- // If this is the first return lowered for this function, add
+ // If this is the first return lowered for this function, add
// the regs to the liveout set for the function.
if (DAG.getMachineFunction().getRegInfo().liveout_empty()) {
for (unsigned i = 0; i != RVLocs.size(); ++i)
@@ -1237,7 +1236,7 @@ XCoreTargetLowering::LowerReturn(SDValue Chain,
CCValAssign &VA = RVLocs[i];
assert(VA.isRegLoc() && "Can only return in registers!");
- Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(),
+ Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(),
OutVals[i], Flag);
// guarantee that all emitted copies are
@@ -1265,7 +1264,7 @@ XCoreTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
DebugLoc dl = MI->getDebugLoc();
assert((MI->getOpcode() == XCore::SELECT_CC) &&
"Unexpected instr type to insert");
-
+
// To "insert" a SELECT_CC instruction, we actually have to insert the diamond
// control-flow pattern. The incoming instruction knows the destination vreg
// to set, the condition code register to branch on, the true/false values to
@@ -1273,7 +1272,7 @@ XCoreTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
const BasicBlock *LLVM_BB = BB->getBasicBlock();
MachineFunction::iterator It = BB;
++It;
-
+
// thisMBB:
// ...
// TrueVal = ...
@@ -1296,7 +1295,7 @@ XCoreTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
// Next, add the true and fallthrough blocks as its successors.
BB->addSuccessor(copy0MBB);
BB->addSuccessor(sinkMBB);
-
+
BuildMI(BB, dl, TII.get(XCore::BRFT_lru6))
.addReg(MI->getOperand(1).getReg()).addMBB(sinkMBB);
@@ -1304,10 +1303,10 @@ XCoreTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
// %FalseValue = ...
// # fallthrough to sinkMBB
BB = copy0MBB;
-
+
// Update machine-CFG edges
BB->addSuccessor(sinkMBB);
-
+
// sinkMBB:
// %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
// ...
@@ -1316,7 +1315,7 @@ XCoreTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
TII.get(XCore::PHI), MI->getOperand(0).getReg())
.addReg(MI->getOperand(3).getReg()).addMBB(copy0MBB)
.addReg(MI->getOperand(2).getReg()).addMBB(thisMBB);
-
+
MI->eraseFromParent(); // The pseudo instruction is gone now.
return BB;
}
@@ -1354,7 +1353,7 @@ SDValue XCoreTargetLowering::PerformDAGCombine(SDNode *N,
// fold (ladd x, 0, y) -> 0, add x, y iff carry is unused and y has only the
// low bit set
- if (N1C && N1C->isNullValue() && N->hasNUsesOfValue(0, 0)) {
+ if (N1C && N1C->isNullValue() && N->hasNUsesOfValue(0, 0)) {
APInt KnownZero, KnownOne;
APInt Mask = APInt::getHighBitsSet(VT.getSizeInBits(),
VT.getSizeInBits() - 1);
@@ -1377,7 +1376,7 @@ SDValue XCoreTargetLowering::PerformDAGCombine(SDNode *N,
EVT VT = N0.getValueType();
// fold (lsub 0, 0, x) -> x, -x iff x has only the low bit set
- if (N0C && N0C->isNullValue() && N1C && N1C->isNullValue()) {
+ if (N0C && N0C->isNullValue() && N1C && N1C->isNullValue()) {
APInt KnownZero, KnownOne;
APInt Mask = APInt::getHighBitsSet(VT.getSizeInBits(),
VT.getSizeInBits() - 1);
@@ -1393,7 +1392,7 @@ SDValue XCoreTargetLowering::PerformDAGCombine(SDNode *N,
// fold (lsub x, 0, y) -> 0, sub x, y iff borrow is unused and y has only the
// low bit set
- if (N1C && N1C->isNullValue() && N->hasNUsesOfValue(0, 0)) {
+ if (N1C && N1C->isNullValue() && N->hasNUsesOfValue(0, 0)) {
APInt KnownZero, KnownOne;
APInt Mask = APInt::getHighBitsSet(VT.getSizeInBits(),
VT.getSizeInBits() - 1);
@@ -1557,7 +1556,7 @@ static inline bool isImmUs4(int64_t val)
/// isLegalAddressingMode - Return true if the addressing mode represented
/// by AM is legal for this target, for a load/store of the specified type.
bool
-XCoreTargetLowering::isLegalAddressingMode(const AddrMode &AM,
+XCoreTargetLowering::isLegalAddressingMode(const AddrMode &AM,
const Type *Ty) const {
if (Ty->getTypeID() == Type::VoidTyID)
return AM.Scale == 0 && isImmUs(AM.BaseOffs) && isImmUs4(AM.BaseOffs);
@@ -1568,7 +1567,7 @@ XCoreTargetLowering::isLegalAddressingMode(const AddrMode &AM,
return Size >= 4 && !AM.HasBaseReg && AM.Scale == 0 &&
AM.BaseOffs%4 == 0;
}
-
+
switch (Size) {
case 1:
// reg + imm
@@ -1593,7 +1592,7 @@ XCoreTargetLowering::isLegalAddressingMode(const AddrMode &AM,
// reg + reg<<2
return AM.Scale == 4 && AM.BaseOffs == 0;
}
-
+
return false;
}
@@ -1603,7 +1602,7 @@ XCoreTargetLowering::isLegalAddressingMode(const AddrMode &AM,
std::vector<unsigned> XCoreTargetLowering::
getRegClassForInlineAsmConstraint(const std::string &Constraint,
- EVT VT) const
+ EVT VT) const
{
if (Constraint.size() != 1)
return std::vector<unsigned>();
@@ -1611,9 +1610,9 @@ getRegClassForInlineAsmConstraint(const std::string &Constraint,
switch (Constraint[0]) {
default : break;
case 'r':
- return make_vector<unsigned>(XCore::R0, XCore::R1, XCore::R2,
- XCore::R3, XCore::R4, XCore::R5,
- XCore::R6, XCore::R7, XCore::R8,
+ return make_vector<unsigned>(XCore::R0, XCore::R1, XCore::R2,
+ XCore::R3, XCore::R4, XCore::R5,
+ XCore::R6, XCore::R7, XCore::R8,
XCore::R9, XCore::R10, XCore::R11, 0);
break;
}
diff --git a/lib/Target/XCore/XCoreISelLowering.h b/lib/Target/XCore/XCoreISelLowering.h
index 7e5dd2e8e5..bb3f2cc038 100644
--- a/lib/Target/XCore/XCoreISelLowering.h
+++ b/lib/Target/XCore/XCoreISelLowering.h
@@ -20,11 +20,11 @@
#include "XCore.h"
namespace llvm {
-
+
// Forward delcarations
class XCoreSubtarget;
class XCoreTargetMachine;
-
+
namespace XCoreISD {
enum NodeType {
// Start the numbering where the builtin ops and target ops leave off.
@@ -38,16 +38,16 @@ namespace llvm {
// dp relative address
DPRelativeWrapper,
-
+
// cp relative address
CPRelativeWrapper,
-
+
// Store word to stack
STWSP,
// Corresponds to retsp instruction
RETSP,
-
+
// Corresponds to LADD instruction
LADD,
@@ -74,13 +74,14 @@ namespace llvm {
//===--------------------------------------------------------------------===//
// TargetLowering Implementation
//===--------------------------------------------------------------------===//
- class XCoreTargetLowering : public TargetLowering
+ class XCoreTargetLowering : public TargetLowering
{
public:
explicit XCoreTargetLowering(XCoreTargetMachine &TM);
virtual unsigned getJumpTableEncoding() const;
+ virtual MVT getShiftAmountTy(EVT LHSTy) const { return MVT::i32; }
/// LowerOperation - Provide custom lowering hooks for some operations.
virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const;
@@ -91,10 +92,10 @@ namespace llvm {
virtual void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue>&Results,
SelectionDAG &DAG) const;
- /// getTargetNodeName - This method returns the name of a target specific
+ /// getTargetNodeName - This method returns the name of a target specific
// DAG node.
virtual const char *getTargetNodeName(unsigned Opcode) const;
-
+
virtual MachineBasicBlock *
EmitInstrWithCustomInserter(MachineInstr *MI,
MachineBasicBlock *MBB) const;
@@ -108,7 +109,7 @@ namespace llvm {
private:
const XCoreTargetMachine &TM;
const XCoreSubtarget &Subtarget;
-
+
// Lower Operand helpers
SDValue LowerCCCArguments(SDValue Chain,
CallingConv::ID CallConv,
@@ -148,12 +149,12 @@ namespace llvm {
SDValue LowerSMUL_LOHI(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerTRAMPOLINE(SDValue Op, SelectionDAG &DAG) const;
-
+
// Inline asm support
std::vector<unsigned>
getRegClassForInlineAsmConstraint(const std::string &Constraint,
EVT VT) const;
-
+
// Expand specifics
SDValue TryExpandADDWithMul(SDNode *Op, SelectionDAG &DAG) const;
SDValue ExpandADDSUB(SDNode *Op, SelectionDAG &DAG) const;