summaryrefslogtreecommitdiff
path: root/lib/Target/ARM/ARMInstrInfo.cpp
diff options
context:
space:
mode:
authorDavid Goodwin <david_goodwin@apple.com>2009-07-02 22:18:33 +0000
committerDavid Goodwin <david_goodwin@apple.com>2009-07-02 22:18:33 +0000
commitb50ea5c48f8b1ce259e034ca5c16dc14af1a582c (patch)
treeb704d8e0e95662d47240c2ac0411b76a5329be58 /lib/Target/ARM/ARMInstrInfo.cpp
parentb717fb0fe0d41629ae07800869157b6d178c545f (diff)
downloadllvm-b50ea5c48f8b1ce259e034ca5c16dc14af1a582c.tar.gz
llvm-b50ea5c48f8b1ce259e034ca5c16dc14af1a582c.tar.bz2
llvm-b50ea5c48f8b1ce259e034ca5c16dc14af1a582c.tar.xz
Checkpoint refactoring of ThumbInstrInfo and ThumbRegisterInfo into Thumb1InstrInfo, Thumb2InstrInfo, Thumb1RegisterInfo and Thumb2RegisterInfo. Move methods from ARMInstrInfo to ARMBaseInstrInfo to prepare for sharing with Thumb2.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@74731 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/Target/ARM/ARMInstrInfo.cpp')
-rw-r--r--lib/Target/ARM/ARMInstrInfo.cpp599
1 files changed, 306 insertions, 293 deletions
diff --git a/lib/Target/ARM/ARMInstrInfo.cpp b/lib/Target/ARM/ARMInstrInfo.cpp
index 482867e6ec..443fdc742e 100644
--- a/lib/Target/ARM/ARMInstrInfo.cpp
+++ b/lib/Target/ARM/ARMInstrInfo.cpp
@@ -47,91 +47,6 @@ ARMInstrInfo::ARMInstrInfo(const ARMSubtarget &STI)
: ARMBaseInstrInfo(STI), RI(*this, STI) {
}
-/// Return true if the instruction is a register to register move and
-/// leave the source and dest operands in the passed parameters.
-///
-bool ARMInstrInfo::isMoveInstr(const MachineInstr &MI,
- unsigned &SrcReg, unsigned &DstReg,
- unsigned& SrcSubIdx, unsigned& DstSubIdx) const {
- SrcSubIdx = DstSubIdx = 0; // No sub-registers.
-
- unsigned oc = MI.getOpcode();
- switch (oc) {
- default:
- return false;
- case ARM::FCPYS:
- case ARM::FCPYD:
- case ARM::VMOVD:
- case ARM::VMOVQ:
- SrcReg = MI.getOperand(1).getReg();
- DstReg = MI.getOperand(0).getReg();
- return true;
- case ARM::MOVr:
- assert(MI.getDesc().getNumOperands() >= 2 &&
- MI.getOperand(0).isReg() &&
- MI.getOperand(1).isReg() &&
- "Invalid ARM MOV instruction");
- SrcReg = MI.getOperand(1).getReg();
- DstReg = MI.getOperand(0).getReg();
- return true;
- }
-}
-
-unsigned ARMInstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
- int &FrameIndex) const {
- switch (MI->getOpcode()) {
- default: break;
- case ARM::LDR:
- if (MI->getOperand(1).isFI() &&
- MI->getOperand(2).isReg() &&
- MI->getOperand(3).isImm() &&
- MI->getOperand(2).getReg() == 0 &&
- MI->getOperand(3).getImm() == 0) {
- FrameIndex = MI->getOperand(1).getIndex();
- return MI->getOperand(0).getReg();
- }
- break;
- case ARM::FLDD:
- case ARM::FLDS:
- if (MI->getOperand(1).isFI() &&
- MI->getOperand(2).isImm() &&
- MI->getOperand(2).getImm() == 0) {
- FrameIndex = MI->getOperand(1).getIndex();
- return MI->getOperand(0).getReg();
- }
- break;
- }
- return 0;
-}
-
-unsigned ARMInstrInfo::isStoreToStackSlot(const MachineInstr *MI,
- int &FrameIndex) const {
- switch (MI->getOpcode()) {
- default: break;
- case ARM::STR:
- if (MI->getOperand(1).isFI() &&
- MI->getOperand(2).isReg() &&
- MI->getOperand(3).isImm() &&
- MI->getOperand(2).getReg() == 0 &&
- MI->getOperand(3).getImm() == 0) {
- FrameIndex = MI->getOperand(1).getIndex();
- return MI->getOperand(0).getReg();
- }
- break;
- case ARM::FSTD:
- case ARM::FSTS:
- if (MI->getOperand(1).isFI() &&
- MI->getOperand(2).isImm() &&
- MI->getOperand(2).getImm() == 0) {
- FrameIndex = MI->getOperand(1).getIndex();
- return MI->getOperand(0).getReg();
- }
- break;
- }
-
- return 0;
-}
-
void ARMInstrInfo::reMaterialize(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I,
unsigned DestReg,
@@ -335,10 +250,10 @@ ARMBaseInstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
// Branch analysis.
bool
- ARMBaseInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,MachineBasicBlock *&TBB,
- MachineBasicBlock *&FBB,
- SmallVectorImpl<MachineOperand> &Cond,
- bool AllowModify) const {
+ARMBaseInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,MachineBasicBlock *&TBB,
+ MachineBasicBlock *&FBB,
+ SmallVectorImpl<MachineOperand> &Cond,
+ bool AllowModify) const {
// If the block has no terminators, it just falls into the block after it.
MachineBasicBlock::iterator I = MBB.end();
if (I == MBB.begin() || !isUnpredicatedTerminator(--I))
@@ -478,11 +393,288 @@ ARMBaseInstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
return 2;
}
-bool ARMInstrInfo::copyRegToReg(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator I,
- unsigned DestReg, unsigned SrcReg,
- const TargetRegisterClass *DestRC,
- const TargetRegisterClass *SrcRC) const {
+bool
+ARMBaseInstrInfo::BlockHasNoFallThrough(const MachineBasicBlock &MBB) const {
+ if (MBB.empty()) return false;
+
+ switch (MBB.back().getOpcode()) {
+ case ARM::BX_RET: // Return.
+ case ARM::LDM_RET:
+ case ARM::tBX_RET:
+ case ARM::tBX_RET_vararg:
+ case ARM::tPOP_RET:
+ case ARM::B:
+ case ARM::tB:
+ case ARM::t2B: // Uncond branch.
+ case ARM::tBR_JTr:
+ case ARM::t2BR_JTr:
+ case ARM::BR_JTr: // Jumptable branch.
+ case ARM::t2BR_JTm:
+ case ARM::BR_JTm: // Jumptable branch through mem.
+ case ARM::t2BR_JTadd:
+ case ARM::BR_JTadd: // Jumptable branch add to pc.
+ return true;
+ default: return false;
+ }
+}
+
+bool ARMBaseInstrInfo::
+ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const {
+ ARMCC::CondCodes CC = (ARMCC::CondCodes)(int)Cond[0].getImm();
+ Cond[0].setImm(ARMCC::getOppositeCondition(CC));
+ return false;
+}
+
+bool ARMBaseInstrInfo::isPredicated(const MachineInstr *MI) const {
+ int PIdx = MI->findFirstPredOperandIdx();
+ return PIdx != -1 && MI->getOperand(PIdx).getImm() != ARMCC::AL;
+}
+
+bool ARMBaseInstrInfo::
+PredicateInstruction(MachineInstr *MI,
+ const SmallVectorImpl<MachineOperand> &Pred) const {
+ unsigned Opc = MI->getOpcode();
+ if (Opc == ARM::B || Opc == ARM::tB || Opc == ARM::t2B) {
+ MI->setDesc(get((Opc == ARM::B) ? ARM::Bcc :
+ ((Opc == ARM::tB) ? ARM::tBcc : ARM::t2Bcc)));
+ MI->addOperand(MachineOperand::CreateImm(Pred[0].getImm()));
+ MI->addOperand(MachineOperand::CreateReg(Pred[1].getReg(), false));
+ return true;
+ }
+
+ int PIdx = MI->findFirstPredOperandIdx();
+ if (PIdx != -1) {
+ MachineOperand &PMO = MI->getOperand(PIdx);
+ PMO.setImm(Pred[0].getImm());
+ MI->getOperand(PIdx+1).setReg(Pred[1].getReg());
+ return true;
+ }
+ return false;
+}
+
+bool ARMBaseInstrInfo::
+SubsumesPredicate(const SmallVectorImpl<MachineOperand> &Pred1,
+ const SmallVectorImpl<MachineOperand> &Pred2) const {
+ if (Pred1.size() > 2 || Pred2.size() > 2)
+ return false;
+
+ ARMCC::CondCodes CC1 = (ARMCC::CondCodes)Pred1[0].getImm();
+ ARMCC::CondCodes CC2 = (ARMCC::CondCodes)Pred2[0].getImm();
+ if (CC1 == CC2)
+ return true;
+
+ switch (CC1) {
+ default:
+ return false;
+ case ARMCC::AL:
+ return true;
+ case ARMCC::HS:
+ return CC2 == ARMCC::HI;
+ case ARMCC::LS:
+ return CC2 == ARMCC::LO || CC2 == ARMCC::EQ;
+ case ARMCC::GE:
+ return CC2 == ARMCC::GT;
+ case ARMCC::LE:
+ return CC2 == ARMCC::LT;
+ }
+}
+
+bool ARMBaseInstrInfo::DefinesPredicate(MachineInstr *MI,
+ std::vector<MachineOperand> &Pred) const {
+ const TargetInstrDesc &TID = MI->getDesc();
+ if (!TID.getImplicitDefs() && !TID.hasOptionalDef())
+ return false;
+
+ bool Found = false;
+ for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
+ const MachineOperand &MO = MI->getOperand(i);
+ if (MO.isReg() && MO.getReg() == ARM::CPSR) {
+ Pred.push_back(MO);
+ Found = true;
+ }
+ }
+
+ return Found;
+}
+
+
+/// FIXME: Works around a gcc miscompilation with -fstrict-aliasing
+static unsigned getNumJTEntries(const std::vector<MachineJumpTableEntry> &JT,
+ unsigned JTI) DISABLE_INLINE;
+static unsigned getNumJTEntries(const std::vector<MachineJumpTableEntry> &JT,
+ unsigned JTI) {
+ return JT[JTI].MBBs.size();
+}
+
+/// GetInstSize - Return the size of the specified MachineInstr.
+///
+unsigned ARMBaseInstrInfo::GetInstSizeInBytes(const MachineInstr *MI) const {
+ const MachineBasicBlock &MBB = *MI->getParent();
+ const MachineFunction *MF = MBB.getParent();
+ const TargetAsmInfo *TAI = MF->getTarget().getTargetAsmInfo();
+
+ // Basic size info comes from the TSFlags field.
+ const TargetInstrDesc &TID = MI->getDesc();
+ unsigned TSFlags = TID.TSFlags;
+
+ switch ((TSFlags & ARMII::SizeMask) >> ARMII::SizeShift) {
+ default: {
+ // If this machine instr is an inline asm, measure it.
+ if (MI->getOpcode() == ARM::INLINEASM)
+ return TAI->getInlineAsmLength(MI->getOperand(0).getSymbolName());
+ if (MI->isLabel())
+ return 0;
+ switch (MI->getOpcode()) {
+ default:
+ assert(0 && "Unknown or unset size field for instr!");
+ break;
+ case TargetInstrInfo::IMPLICIT_DEF:
+ case TargetInstrInfo::DECLARE:
+ case TargetInstrInfo::DBG_LABEL:
+ case TargetInstrInfo::EH_LABEL:
+ return 0;
+ }
+ break;
+ }
+ case ARMII::Size8Bytes: return 8; // Arm instruction x 2.
+ case ARMII::Size4Bytes: return 4; // Arm instruction.
+ case ARMII::Size2Bytes: return 2; // Thumb instruction.
+ case ARMII::SizeSpecial: {
+ switch (MI->getOpcode()) {
+ case ARM::CONSTPOOL_ENTRY:
+ // If this machine instr is a constant pool entry, its size is recorded as
+ // operand #2.
+ return MI->getOperand(2).getImm();
+ case ARM::Int_eh_sjlj_setjmp: return 12;
+ case ARM::BR_JTr:
+ case ARM::BR_JTm:
+ case ARM::BR_JTadd:
+ case ARM::t2BR_JTr:
+ case ARM::t2BR_JTm:
+ case ARM::t2BR_JTadd:
+ case ARM::tBR_JTr: {
+ // These are jumptable branches, i.e. a branch followed by an inlined
+ // jumptable. The size is 4 + 4 * number of entries.
+ unsigned NumOps = TID.getNumOperands();
+ MachineOperand JTOP =
+ MI->getOperand(NumOps - (TID.isPredicable() ? 3 : 2));
+ unsigned JTI = JTOP.getIndex();
+ const MachineJumpTableInfo *MJTI = MF->getJumpTableInfo();
+ const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables();
+ assert(JTI < JT.size());
+ // Thumb instructions are 2 byte aligned, but JT entries are 4 byte
+ // 4 aligned. The assembler / linker may add 2 byte padding just before
+ // the JT entries. The size does not include this padding; the
+ // constant islands pass does separate bookkeeping for it.
+ // FIXME: If we know the size of the function is less than (1 << 16) *2
+ // bytes, we can use 16-bit entries instead. Then there won't be an
+ // alignment issue.
+ return getNumJTEntries(JT, JTI) * 4 +
+ ((MI->getOpcode()==ARM::tBR_JTr) ? 2 : 4);
+ }
+ default:
+ // Otherwise, pseudo-instruction sizes are zero.
+ return 0;
+ }
+ }
+ }
+ return 0; // Not reached
+}
+
+/// Return true if the instruction is a register to register move and
+/// leave the source and dest operands in the passed parameters.
+///
+bool
+ARMBaseInstrInfo::isMoveInstr(const MachineInstr &MI,
+ unsigned &SrcReg, unsigned &DstReg,
+ unsigned& SrcSubIdx, unsigned& DstSubIdx) const {
+ SrcSubIdx = DstSubIdx = 0; // No sub-registers.
+
+ unsigned oc = MI.getOpcode();
+ switch (oc) {
+ default:
+ return false;
+ case ARM::FCPYS:
+ case ARM::FCPYD:
+ case ARM::VMOVD:
+ case ARM::VMOVQ:
+ SrcReg = MI.getOperand(1).getReg();
+ DstReg = MI.getOperand(0).getReg();
+ return true;
+ case ARM::MOVr:
+ assert(MI.getDesc().getNumOperands() >= 2 &&
+ MI.getOperand(0).isReg() &&
+ MI.getOperand(1).isReg() &&
+ "Invalid ARM MOV instruction");
+ SrcReg = MI.getOperand(1).getReg();
+ DstReg = MI.getOperand(0).getReg();
+ return true;
+ }
+}
+
+unsigned
+ARMBaseInstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
+ int &FrameIndex) const {
+ switch (MI->getOpcode()) {
+ default: break;
+ case ARM::LDR:
+ if (MI->getOperand(1).isFI() &&
+ MI->getOperand(2).isReg() &&
+ MI->getOperand(3).isImm() &&
+ MI->getOperand(2).getReg() == 0 &&
+ MI->getOperand(3).getImm() == 0) {
+ FrameIndex = MI->getOperand(1).getIndex();
+ return MI->getOperand(0).getReg();
+ }
+ break;
+ case ARM::FLDD:
+ case ARM::FLDS:
+ if (MI->getOperand(1).isFI() &&
+ MI->getOperand(2).isImm() &&
+ MI->getOperand(2).getImm() == 0) {
+ FrameIndex = MI->getOperand(1).getIndex();
+ return MI->getOperand(0).getReg();
+ }
+ break;
+ }
+ return 0;
+}
+
+unsigned
+ARMBaseInstrInfo::isStoreToStackSlot(const MachineInstr *MI,
+ int &FrameIndex) const {
+ switch (MI->getOpcode()) {
+ default: break;
+ case ARM::STR:
+ if (MI->getOperand(1).isFI() &&
+ MI->getOperand(2).isReg() &&
+ MI->getOperand(3).isImm() &&
+ MI->getOperand(2).getReg() == 0 &&
+ MI->getOperand(3).getImm() == 0) {
+ FrameIndex = MI->getOperand(1).getIndex();
+ return MI->getOperand(0).getReg();
+ }
+ break;
+ case ARM::FSTD:
+ case ARM::FSTS:
+ if (MI->getOperand(1).isFI() &&
+ MI->getOperand(2).isImm() &&
+ MI->getOperand(2).getImm() == 0) {
+ FrameIndex = MI->getOperand(1).getIndex();
+ return MI->getOperand(0).getReg();
+ }
+ break;
+ }
+
+ return 0;
+}
+
+bool
+ARMBaseInstrInfo::copyRegToReg(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I,
+ unsigned DestReg, unsigned SrcReg,
+ const TargetRegisterClass *DestRC,
+ const TargetRegisterClass *SrcRC) const {
DebugLoc DL = DebugLoc::getUnknownLoc();
if (I != MBB.end()) DL = I->getDebugLoc();
@@ -508,7 +700,7 @@ bool ARMInstrInfo::copyRegToReg(MachineBasicBlock &MBB,
return true;
}
-void ARMInstrInfo::
+void ARMBaseInstrInfo::
storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
unsigned SrcReg, bool isKill, int FI,
const TargetRegisterClass *RC) const {
@@ -531,11 +723,12 @@ storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
}
}
-void ARMInstrInfo::storeRegToAddr(MachineFunction &MF, unsigned SrcReg,
- bool isKill,
- SmallVectorImpl<MachineOperand> &Addr,
- const TargetRegisterClass *RC,
- SmallVectorImpl<MachineInstr*> &NewMIs) const{
+void
+ARMBaseInstrInfo::storeRegToAddr(MachineFunction &MF, unsigned SrcReg,
+ bool isKill,
+ SmallVectorImpl<MachineOperand> &Addr,
+ const TargetRegisterClass *RC,
+ SmallVectorImpl<MachineInstr*> &NewMIs) const{
DebugLoc DL = DebugLoc::getUnknownLoc();
unsigned Opc = 0;
if (RC == ARM::GPRRegisterClass) {
@@ -556,7 +749,7 @@ void ARMInstrInfo::storeRegToAddr(MachineFunction &MF, unsigned SrcReg,
return;
}
-void ARMInstrInfo::
+void ARMBaseInstrInfo::
loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
unsigned DestReg, int FI,
const TargetRegisterClass *RC) const {
@@ -576,7 +769,7 @@ loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
}
}
-void ARMInstrInfo::
+void ARMBaseInstrInfo::
loadRegFromAddr(MachineFunction &MF, unsigned DestReg,
SmallVectorImpl<MachineOperand> &Addr,
const TargetRegisterClass *RC,
@@ -600,7 +793,7 @@ loadRegFromAddr(MachineFunction &MF, unsigned DestReg,
return;
}
-MachineInstr *ARMInstrInfo::
+MachineInstr *ARMBaseInstrInfo::
foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI,
const SmallVectorImpl<unsigned> &Ops, int FI) const {
if (Ops.size() != 1) return NULL;
@@ -688,9 +881,17 @@ foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI,
return NewMI;
}
+MachineInstr*
+ARMBaseInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
+ MachineInstr* MI,
+ const SmallVectorImpl<unsigned> &Ops,
+ MachineInstr* LoadMI) const {
+ return 0;
+}
+
bool
-ARMInstrInfo::canFoldMemoryOperand(const MachineInstr *MI,
- const SmallVectorImpl<unsigned> &Ops) const {
+ARMBaseInstrInfo::canFoldMemoryOperand(const MachineInstr *MI,
+ const SmallVectorImpl<unsigned> &Ops) const {
if (Ops.size() != 1) return false;
unsigned Opc = MI->getOpcode();
@@ -710,191 +911,3 @@ ARMInstrInfo::canFoldMemoryOperand(const MachineInstr *MI,
return false;
}
-
-bool
-ARMBaseInstrInfo::BlockHasNoFallThrough(const MachineBasicBlock &MBB) const {
- if (MBB.empty()) return false;
-
- switch (MBB.back().getOpcode()) {
- case ARM::BX_RET: // Return.
- case ARM::LDM_RET:
- case ARM::tBX_RET:
- case ARM::tBX_RET_vararg:
- case ARM::tPOP_RET:
- case ARM::B:
- case ARM::tB:
- case ARM::t2B: // Uncond branch.
- case ARM::tBR_JTr:
- case ARM::t2BR_JTr:
- case ARM::BR_JTr: // Jumptable branch.
- case ARM::t2BR_JTm:
- case ARM::BR_JTm: // Jumptable branch through mem.
- case ARM::t2BR_JTadd:
- case ARM::BR_JTadd: // Jumptable branch add to pc.
- return true;
- default: return false;
- }
-}
-
-bool ARMBaseInstrInfo::
-ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const {
- ARMCC::CondCodes CC = (ARMCC::CondCodes)(int)Cond[0].getImm();
- Cond[0].setImm(ARMCC::getOppositeCondition(CC));
- return false;
-}
-
-bool ARMBaseInstrInfo::isPredicated(const MachineInstr *MI) const {
- int PIdx = MI->findFirstPredOperandIdx();
- return PIdx != -1 && MI->getOperand(PIdx).getImm() != ARMCC::AL;
-}
-
-bool ARMBaseInstrInfo::
-PredicateInstruction(MachineInstr *MI,
- const SmallVectorImpl<MachineOperand> &Pred) const {
- unsigned Opc = MI->getOpcode();
- if (Opc == ARM::B || Opc == ARM::tB || Opc == ARM::t2B) {
- MI->setDesc(get((Opc == ARM::B) ? ARM::Bcc :
- ((Opc == ARM::tB) ? ARM::tBcc : ARM::t2Bcc)));
- MI->addOperand(MachineOperand::CreateImm(Pred[0].getImm()));
- MI->addOperand(MachineOperand::CreateReg(Pred[1].getReg(), false));
- return true;
- }
-
- int PIdx = MI->findFirstPredOperandIdx();
- if (PIdx != -1) {
- MachineOperand &PMO = MI->getOperand(PIdx);
- PMO.setImm(Pred[0].getImm());
- MI->getOperand(PIdx+1).setReg(Pred[1].getReg());
- return true;
- }
- return false;
-}
-
-bool ARMBaseInstrInfo::
-SubsumesPredicate(const SmallVectorImpl<MachineOperand> &Pred1,
- const SmallVectorImpl<MachineOperand> &Pred2) const {
- if (Pred1.size() > 2 || Pred2.size() > 2)
- return false;
-
- ARMCC::CondCodes CC1 = (ARMCC::CondCodes)Pred1[0].getImm();
- ARMCC::CondCodes CC2 = (ARMCC::CondCodes)Pred2[0].getImm();
- if (CC1 == CC2)
- return true;
-
- switch (CC1) {
- default:
- return false;
- case ARMCC::AL:
- return true;
- case ARMCC::HS:
- return CC2 == ARMCC::HI;
- case ARMCC::LS:
- return CC2 == ARMCC::LO || CC2 == ARMCC::EQ;
- case ARMCC::GE:
- return CC2 == ARMCC::GT;
- case ARMCC::LE:
- return CC2 == ARMCC::LT;
- }
-}
-
-bool ARMBaseInstrInfo::DefinesPredicate(MachineInstr *MI,
- std::vector<MachineOperand> &Pred) const {
- const TargetInstrDesc &TID = MI->getDesc();
- if (!TID.getImplicitDefs() && !TID.hasOptionalDef())
- return false;
-
- bool Found = false;
- for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
- const MachineOperand &MO = MI->getOperand(i);
- if (MO.isReg() && MO.getReg() == ARM::CPSR) {
- Pred.push_back(MO);
- Found = true;
- }
- }
-
- return Found;
-}
-
-
-/// FIXME: Works around a gcc miscompilation with -fstrict-aliasing
-static unsigned getNumJTEntries(const std::vector<MachineJumpTableEntry> &JT,
- unsigned JTI) DISABLE_INLINE;
-static unsigned getNumJTEntries(const std::vector<MachineJumpTableEntry> &JT,
- unsigned JTI) {
- return JT[JTI].MBBs.size();
-}
-
-/// GetInstSize - Return the size of the specified MachineInstr.
-///
-unsigned ARMBaseInstrInfo::GetInstSizeInBytes(const MachineInstr *MI) const {
- const MachineBasicBlock &MBB = *MI->getParent();
- const MachineFunction *MF = MBB.getParent();
- const TargetAsmInfo *TAI = MF->getTarget().getTargetAsmInfo();
-
- // Basic size info comes from the TSFlags field.
- const TargetInstrDesc &TID = MI->getDesc();
- unsigned TSFlags = TID.TSFlags;
-
- switch ((TSFlags & ARMII::SizeMask) >> ARMII::SizeShift) {
- default: {
- // If this machine instr is an inline asm, measure it.
- if (MI->getOpcode() == ARM::INLINEASM)
- return TAI->getInlineAsmLength(MI->getOperand(0).getSymbolName());
- if (MI->isLabel())
- return 0;
- switch (MI->getOpcode()) {
- default:
- assert(0 && "Unknown or unset size field for instr!");
- break;
- case TargetInstrInfo::IMPLICIT_DEF:
- case TargetInstrInfo::DECLARE:
- case TargetInstrInfo::DBG_LABEL:
- case TargetInstrInfo::EH_LABEL:
- return 0;
- }
- break;
- }
- case ARMII::Size8Bytes: return 8; // Arm instruction x 2.
- case ARMII::Size4Bytes: return 4; // Arm instruction.
- case ARMII::Size2Bytes: return 2; // Thumb instruction.
- case ARMII::SizeSpecial: {
- switch (MI->getOpcode()) {
- case ARM::CONSTPOOL_ENTRY:
- // If this machine instr is a constant pool entry, its size is recorded as
- // operand #2.
- return MI->getOperand(2).getImm();
- case ARM::Int_eh_sjlj_setjmp: return 12;
- case ARM::BR_JTr:
- case ARM::BR_JTm:
- case ARM::BR_JTadd:
- case ARM::t2BR_JTr:
- case ARM::t2BR_JTm:
- case ARM::t2BR_JTadd:
- case ARM::tBR_JTr: {
- // These are jumptable branches, i.e. a branch followed by an inlined
- // jumptable. The size is 4 + 4 * number of entries.
- unsigned NumOps = TID.getNumOperands();
- MachineOperand JTOP =
- MI->getOperand(NumOps - (TID.isPredicable() ? 3 : 2));
- unsigned JTI = JTOP.getIndex();
- const MachineJumpTableInfo *MJTI = MF->getJumpTableInfo();
- const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables();
- assert(JTI < JT.size());
- // Thumb instructions are 2 byte aligned, but JT entries are 4 byte
- // 4 aligned. The assembler / linker may add 2 byte padding just before
- // the JT entries. The size does not include this padding; the
- // constant islands pass does separate bookkeeping for it.
- // FIXME: If we know the size of the function is less than (1 << 16) *2
- // bytes, we can use 16-bit entries instead. Then there won't be an
- // alignment issue.
- return getNumJTEntries(JT, JTI) * 4 +
- ((MI->getOpcode()==ARM::tBR_JTr) ? 2 : 4);
- }
- default:
- // Otherwise, pseudo-instruction sizes are zero.
- return 0;
- }
- }
- }
- return 0; // Not reached
-}