summaryrefslogtreecommitdiff
path: root/lib/Target/ARM/ARMFastISel.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'lib/Target/ARM/ARMFastISel.cpp')
-rw-r--r--lib/Target/ARM/ARMFastISel.cpp76
1 files changed, 41 insertions, 35 deletions
diff --git a/lib/Target/ARM/ARMFastISel.cpp b/lib/Target/ARM/ARMFastISel.cpp
index fcf5b75679..c3a5e6472e 100644
--- a/lib/Target/ARM/ARMFastISel.cpp
+++ b/lib/Target/ARM/ARMFastISel.cpp
@@ -178,24 +178,24 @@ class ARMFastISel : public FastISel {
bool isLoadTypeLegal(Type *Ty, MVT &VT);
bool ARMEmitCmp(const Value *Src1Value, const Value *Src2Value,
bool isZExt);
- bool ARMEmitLoad(MVT VT, unsigned &ResultReg, Address &Addr,
+ bool ARMEmitLoad(EVT VT, unsigned &ResultReg, Address &Addr,
unsigned Alignment = 0, bool isZExt = true,
bool allocReg = true);
- bool ARMEmitStore(MVT VT, unsigned SrcReg, Address &Addr,
+ bool ARMEmitStore(EVT VT, unsigned SrcReg, Address &Addr,
unsigned Alignment = 0);
bool ARMComputeAddress(const Value *Obj, Address &Addr);
void ARMSimplifyAddress(Address &Addr, EVT VT, bool useAM3);
bool ARMIsMemCpySmall(uint64_t Len);
bool ARMTryEmitSmallMemCpy(Address Dest, Address Src, uint64_t Len,
unsigned Alignment);
- unsigned ARMEmitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT, bool isZExt);
- unsigned ARMMaterializeFP(const ConstantFP *CFP, MVT VT);
- unsigned ARMMaterializeInt(const Constant *C, MVT VT);
- unsigned ARMMaterializeGV(const GlobalValue *GV, MVT VT);
- unsigned ARMMoveToFPReg(MVT VT, unsigned SrcReg);
- unsigned ARMMoveToIntReg(MVT VT, unsigned SrcReg);
+ unsigned ARMEmitIntExt(EVT SrcVT, unsigned SrcReg, EVT DestVT, bool isZExt);
+ unsigned ARMMaterializeFP(const ConstantFP *CFP, EVT VT);
+ unsigned ARMMaterializeInt(const Constant *C, EVT VT);
+ unsigned ARMMaterializeGV(const GlobalValue *GV, EVT VT);
+ unsigned ARMMoveToFPReg(EVT VT, unsigned SrcReg);
+ unsigned ARMMoveToIntReg(EVT VT, unsigned SrcReg);
unsigned ARMSelectCallOp(bool UseReg);
- unsigned ARMLowerPICELF(const GlobalValue *GV, unsigned Align, MVT VT);
+ unsigned ARMLowerPICELF(const GlobalValue *GV, unsigned Align, EVT VT);
// Call handling routines.
private:
@@ -487,7 +487,7 @@ unsigned ARMFastISel::FastEmitInst_extractsubreg(MVT RetVT,
// TODO: Don't worry about 64-bit now, but when this is fixed remove the
// checks from the various callers.
-unsigned ARMFastISel::ARMMoveToFPReg(MVT VT, unsigned SrcReg) {
+unsigned ARMFastISel::ARMMoveToFPReg(EVT VT, unsigned SrcReg) {
if (VT == MVT::f64) return 0;
unsigned MoveReg = createResultReg(TLI.getRegClassFor(VT));
@@ -497,7 +497,7 @@ unsigned ARMFastISel::ARMMoveToFPReg(MVT VT, unsigned SrcReg) {
return MoveReg;
}
-unsigned ARMFastISel::ARMMoveToIntReg(MVT VT, unsigned SrcReg) {
+unsigned ARMFastISel::ARMMoveToIntReg(EVT VT, unsigned SrcReg) {
if (VT == MVT::i64) return 0;
unsigned MoveReg = createResultReg(TLI.getRegClassFor(VT));
@@ -510,7 +510,7 @@ unsigned ARMFastISel::ARMMoveToIntReg(MVT VT, unsigned SrcReg) {
// For double width floating point we need to materialize two constants
// (the high and the low) into integer registers then use a move to get
// the combined constant into an FP reg.
-unsigned ARMFastISel::ARMMaterializeFP(const ConstantFP *CFP, MVT VT) {
+unsigned ARMFastISel::ARMMaterializeFP(const ConstantFP *CFP, EVT VT) {
const APFloat Val = CFP->getValueAPF();
bool is64bit = VT == MVT::f64;
@@ -554,7 +554,7 @@ unsigned ARMFastISel::ARMMaterializeFP(const ConstantFP *CFP, MVT VT) {
return DestReg;
}
-unsigned ARMFastISel::ARMMaterializeInt(const Constant *C, MVT VT) {
+unsigned ARMFastISel::ARMMaterializeInt(const Constant *C, EVT VT) {
if (VT != MVT::i32 && VT != MVT::i16 && VT != MVT::i8 && VT != MVT::i1)
return false;
@@ -616,7 +616,7 @@ unsigned ARMFastISel::ARMMaterializeInt(const Constant *C, MVT VT) {
return DestReg;
}
-unsigned ARMFastISel::ARMMaterializeGV(const GlobalValue *GV, MVT VT) {
+unsigned ARMFastISel::ARMMaterializeGV(const GlobalValue *GV, EVT VT) {
// For now 32-bit only.
if (VT != MVT::i32) return 0;
@@ -719,7 +719,10 @@ unsigned ARMFastISel::ARMMaterializeGV(const GlobalValue *GV, MVT VT) {
}
unsigned ARMFastISel::TargetMaterializeConstant(const Constant *C) {
- MVT VT = TLI.getSimpleValueType(C->getType(), true);
+ EVT VT = TLI.getValueType(C->getType(), true);
+
+ // Only handle simple types.
+ if (!VT.isSimple()) return 0;
if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C))
return ARMMaterializeFP(CFP, VT);
@@ -1000,13 +1003,14 @@ void ARMFastISel::AddLoadStoreOperands(EVT VT, Address &Addr,
AddOptionalDefs(MIB);
}
-bool ARMFastISel::ARMEmitLoad(MVT VT, unsigned &ResultReg, Address &Addr,
+bool ARMFastISel::ARMEmitLoad(EVT VT, unsigned &ResultReg, Address &Addr,
unsigned Alignment, bool isZExt, bool allocReg) {
+ assert(VT.isSimple() && "Non-simple types are invalid here!");
unsigned Opc;
bool useAM3 = false;
bool needVMOV = false;
const TargetRegisterClass *RC;
- switch (VT.SimpleTy) {
+ switch (VT.getSimpleVT().SimpleTy) {
// This is mostly going to be Neon/vector support.
default: return false;
case MVT::i1:
@@ -1123,11 +1127,11 @@ bool ARMFastISel::SelectLoad(const Instruction *I) {
return true;
}
-bool ARMFastISel::ARMEmitStore(MVT VT, unsigned SrcReg, Address &Addr,
+bool ARMFastISel::ARMEmitStore(EVT VT, unsigned SrcReg, Address &Addr,
unsigned Alignment) {
unsigned StrOpc;
bool useAM3 = false;
- switch (VT.SimpleTy) {
+ switch (VT.getSimpleVT().SimpleTy) {
// This is mostly going to be Neon/vector support.
default: return false;
case MVT::i1: {
@@ -1401,7 +1405,8 @@ bool ARMFastISel::SelectIndirectBr(const Instruction *I) {
bool ARMFastISel::ARMEmitCmp(const Value *Src1Value, const Value *Src2Value,
bool isZExt) {
Type *Ty = Src1Value->getType();
- MVT SrcVT = TLI.getSimpleValueType(Ty, true);
+ EVT SrcVT = TLI.getValueType(Ty, true);
+ if (!SrcVT.isSimple()) return false;
bool isFloat = (Ty->isFloatTy() || Ty->isDoubleTy());
if (isFloat && !Subtarget->hasVFP2())
@@ -1438,7 +1443,7 @@ bool ARMFastISel::ARMEmitCmp(const Value *Src1Value, const Value *Src2Value,
unsigned CmpOpc;
bool isICmp = true;
bool needsExt = false;
- switch (SrcVT.SimpleTy) {
+ switch (SrcVT.getSimpleVT().SimpleTy) {
default: return false;
// TODO: Verify compares.
case MVT::f32:
@@ -1590,7 +1595,7 @@ bool ARMFastISel::SelectIToFP(const Instruction *I, bool isSigned) {
return false;
Value *Src = I->getOperand(0);
- MVT SrcVT = TLI.getSimpleValueType(Src->getType(), true);
+ EVT SrcVT = TLI.getValueType(Src->getType(), true);
if (SrcVT != MVT::i32 && SrcVT != MVT::i16 && SrcVT != MVT::i8)
return false;
@@ -1599,7 +1604,7 @@ bool ARMFastISel::SelectIToFP(const Instruction *I, bool isSigned) {
// Handle sign-extension.
if (SrcVT == MVT::i16 || SrcVT == MVT::i8) {
- MVT DestVT = MVT::i32;
+ EVT DestVT = MVT::i32;
SrcReg = ARMEmitIntExt(SrcVT, SrcReg, DestVT,
/*isZExt*/!isSigned);
if (SrcReg == 0) return false;
@@ -1806,7 +1811,7 @@ bool ARMFastISel::SelectBinaryIntOp(const Instruction *I, unsigned ISDOpcode) {
}
bool ARMFastISel::SelectBinaryFPOp(const Instruction *I, unsigned ISDOpcode) {
- MVT VT = TLI.getSimpleValueType(I->getType(), true);
+ EVT VT = TLI.getValueType(I->getType(), true);
// We can get here in the case when we want to use NEON for our fp
// operations, but can't figure out how to. Just use the vfp instructions
@@ -2050,7 +2055,7 @@ bool ARMFastISel::FinishCall(MVT RetVT, SmallVectorImpl<unsigned> &UsedRegs,
if (RVLocs.size() == 2 && RetVT == MVT::f64) {
// For this move we copy into two registers and then move into the
// double fp reg we want.
- MVT DestVT = RVLocs[0].getValVT();
+ EVT DestVT = RVLocs[0].getValVT();
const TargetRegisterClass* DstRC = TLI.getRegClassFor(DestVT);
unsigned ResultReg = createResultReg(DstRC);
AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
@@ -2065,7 +2070,7 @@ bool ARMFastISel::FinishCall(MVT RetVT, SmallVectorImpl<unsigned> &UsedRegs,
UpdateValueMap(I, ResultReg);
} else {
assert(RVLocs.size() == 1 &&"Can't handle non-double multi-reg retvals!");
- MVT CopyVT = RVLocs[0].getValVT();
+ EVT CopyVT = RVLocs[0].getValVT();
// Special handling for extended integers.
if (RetVT == MVT::i1 || RetVT == MVT::i8 || RetVT == MVT::i16)
@@ -2124,8 +2129,8 @@ bool ARMFastISel::SelectRet(const Instruction *I) {
return false;
unsigned SrcReg = Reg + VA.getValNo();
- MVT RVVT = TLI.getSimpleValueType(RV->getType());
- MVT DestVT = VA.getValVT();
+ EVT RVVT = TLI.getValueType(RV->getType());
+ EVT DestVT = VA.getValVT();
// Special handling for extended integers.
if (RVVT != DestVT) {
if (RVVT != MVT::i1 && RVVT != MVT::i8 && RVVT != MVT::i16)
@@ -2170,7 +2175,7 @@ unsigned ARMFastISel::ARMSelectCallOp(bool UseReg) {
unsigned ARMFastISel::getLibcallReg(const Twine &Name) {
GlobalValue *GV = new GlobalVariable(Type::getInt32Ty(*Context), false,
GlobalValue::ExternalLinkage, 0, Name);
- return ARMMaterializeGV(GV, TLI.getSimpleValueType(GV->getType()));
+ return ARMMaterializeGV(GV, TLI.getValueType(GV->getType()));
}
// A quick function that will emit a call for a named libcall in F with the
@@ -2582,7 +2587,7 @@ bool ARMFastISel::SelectTrunc(const Instruction *I) {
return true;
}
-unsigned ARMFastISel::ARMEmitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT,
+unsigned ARMFastISel::ARMEmitIntExt(EVT SrcVT, unsigned SrcReg, EVT DestVT,
bool isZExt) {
if (DestVT != MVT::i32 && DestVT != MVT::i16 && DestVT != MVT::i8)
return 0;
@@ -2590,7 +2595,8 @@ unsigned ARMFastISel::ARMEmitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT,
unsigned Opc;
bool isBoolZext = false;
const TargetRegisterClass *RC = TLI.getRegClassFor(MVT::i32);
- switch (SrcVT.SimpleTy) {
+ if (!SrcVT.isSimple()) return 0;
+ switch (SrcVT.getSimpleVT().SimpleTy) {
default: return 0;
case MVT::i16:
if (!Subtarget->hasV6Ops()) return 0;
@@ -2637,9 +2643,9 @@ bool ARMFastISel::SelectIntExt(const Instruction *I) {
Value *Src = I->getOperand(0);
Type *SrcTy = Src->getType();
- MVT SrcVT, DestVT;
- SrcVT = TLI.getSimpleValueType(SrcTy, true);
- DestVT = TLI.getSimpleValueType(DestTy, true);
+ EVT SrcVT, DestVT;
+ SrcVT = TLI.getValueType(SrcTy, true);
+ DestVT = TLI.getValueType(DestTy, true);
bool isZExt = isa<ZExtInst>(I);
unsigned SrcReg = getRegForValue(Src);
@@ -2824,7 +2830,7 @@ bool ARMFastISel::TryToFoldLoad(MachineInstr *MI, unsigned OpNo,
}
unsigned ARMFastISel::ARMLowerPICELF(const GlobalValue *GV,
- unsigned Align, MVT VT) {
+ unsigned Align, EVT VT) {
bool UseGOTOFF = GV->hasLocalLinkage() || GV->hasHiddenVisibility();
ARMConstantPoolConstant *CPV =
ARMConstantPoolConstant::Create(GV, UseGOTOFF ? ARMCP::GOTOFF : ARMCP::GOT);