summaryrefslogtreecommitdiff
path: root/lib/CodeGen/ScheduleDAGInstrs.cpp
diff options
context:
space:
mode:
authorEvan Cheng <evan.cheng@apple.com>2011-06-28 19:10:37 +0000
committerEvan Cheng <evan.cheng@apple.com>2011-06-28 19:10:37 +0000
commite837dead3c8dc3445ef6a0e2322179c57e264a13 (patch)
tree57adf441e9376d2922b205181d6bad180c5dc80a /lib/CodeGen/ScheduleDAGInstrs.cpp
parent9bbe4d6c004f25bc491e2583cce7bc91891f68c7 (diff)
downloadllvm-e837dead3c8dc3445ef6a0e2322179c57e264a13.tar.gz
llvm-e837dead3c8dc3445ef6a0e2322179c57e264a13.tar.bz2
llvm-e837dead3c8dc3445ef6a0e2322179c57e264a13.tar.xz
- Rename TargetInstrDesc, TargetOperandInfo to MCInstrDesc and MCOperandInfo and
sink them into MC layer. - Added MCInstrInfo, which captures the tablegen generated static data. Chang TargetInstrInfo so it's based off MCInstrInfo. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@134021 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/CodeGen/ScheduleDAGInstrs.cpp')
-rw-r--r--lib/CodeGen/ScheduleDAGInstrs.cpp32
1 files changed, 16 insertions, 16 deletions
diff --git a/lib/CodeGen/ScheduleDAGInstrs.cpp b/lib/CodeGen/ScheduleDAGInstrs.cpp
index 2363df429e..94941ecf23 100644
--- a/lib/CodeGen/ScheduleDAGInstrs.cpp
+++ b/lib/CodeGen/ScheduleDAGInstrs.cpp
@@ -236,13 +236,13 @@ void ScheduleDAGInstrs::BuildSchedGraph(AliasAnalysis *AA) {
continue;
}
- const TargetInstrDesc &TID = MI->getDesc();
- assert(!TID.isTerminator() && !MI->isLabel() &&
+ const MCInstrDesc &MCID = MI->getDesc();
+ assert(!MCID.isTerminator() && !MI->isLabel() &&
"Cannot schedule terminators or labels!");
// Create the SUnit for this MI.
SUnit *SU = NewSUnit(MI);
- SU->isCall = TID.isCall();
- SU->isCommutable = TID.isCommutable();
+ SU->isCall = MCID.isCall();
+ SU->isCommutable = MCID.isCommutable();
// Assign the Latency field of SU using target-provided information.
if (UnitLatencies)
@@ -309,13 +309,13 @@ void ScheduleDAGInstrs::BuildSchedGraph(AliasAnalysis *AA) {
if (SpecialAddressLatency != 0 && !UnitLatencies &&
UseSU != &ExitSU) {
MachineInstr *UseMI = UseSU->getInstr();
- const TargetInstrDesc &UseTID = UseMI->getDesc();
+ const MCInstrDesc &UseMCID = UseMI->getDesc();
int RegUseIndex = UseMI->findRegisterUseOperandIdx(Reg);
assert(RegUseIndex >= 0 && "UseMI doesn's use register!");
if (RegUseIndex >= 0 &&
- (UseTID.mayLoad() || UseTID.mayStore()) &&
- (unsigned)RegUseIndex < UseTID.getNumOperands() &&
- UseTID.OpInfo[RegUseIndex].isLookupPtrRegClass())
+ (UseMCID.mayLoad() || UseMCID.mayStore()) &&
+ (unsigned)RegUseIndex < UseMCID.getNumOperands() &&
+ UseMCID.OpInfo[RegUseIndex].isLookupPtrRegClass())
LDataLatency += SpecialAddressLatency;
}
// Adjust the dependence latency using operand def/use
@@ -352,17 +352,17 @@ void ScheduleDAGInstrs::BuildSchedGraph(AliasAnalysis *AA) {
unsigned Count = I->second.second;
const MachineInstr *UseMI = UseMO->getParent();
unsigned UseMOIdx = UseMO - &UseMI->getOperand(0);
- const TargetInstrDesc &UseTID = UseMI->getDesc();
+ const MCInstrDesc &UseMCID = UseMI->getDesc();
// TODO: If we knew the total depth of the region here, we could
// handle the case where the whole loop is inside the region but
// is large enough that the isScheduleHigh trick isn't needed.
- if (UseMOIdx < UseTID.getNumOperands()) {
+ if (UseMOIdx < UseMCID.getNumOperands()) {
// Currently, we only support scheduling regions consisting of
// single basic blocks. Check to see if the instruction is in
// the same region by checking to see if it has the same parent.
if (UseMI->getParent() != MI->getParent()) {
unsigned Latency = SU->Latency;
- if (UseTID.OpInfo[UseMOIdx].isLookupPtrRegClass())
+ if (UseMCID.OpInfo[UseMOIdx].isLookupPtrRegClass())
Latency += SpecialAddressLatency;
// This is a wild guess as to the portion of the latency which
// will be overlapped by work done outside the current
@@ -374,7 +374,7 @@ void ScheduleDAGInstrs::BuildSchedGraph(AliasAnalysis *AA) {
/*isMustAlias=*/false,
/*isArtificial=*/true));
} else if (SpecialAddressLatency > 0 &&
- UseTID.OpInfo[UseMOIdx].isLookupPtrRegClass()) {
+ UseMCID.OpInfo[UseMOIdx].isLookupPtrRegClass()) {
// The entire loop body is within the current scheduling region
// and the latency of this operation is assumed to be greater
// than the latency of the loop.
@@ -417,9 +417,9 @@ void ScheduleDAGInstrs::BuildSchedGraph(AliasAnalysis *AA) {
// produce more precise dependence information.
#define STORE_LOAD_LATENCY 1
unsigned TrueMemOrderLatency = 0;
- if (TID.isCall() || MI->hasUnmodeledSideEffects() ||
+ if (MCID.isCall() || MI->hasUnmodeledSideEffects() ||
(MI->hasVolatileMemoryRef() &&
- (!TID.mayLoad() || !MI->isInvariantLoad(AA)))) {
+ (!MCID.mayLoad() || !MI->isInvariantLoad(AA)))) {
// Be conservative with these and add dependencies on all memory
// references, even those that are known to not alias.
for (std::map<const Value *, SUnit *>::iterator I =
@@ -458,7 +458,7 @@ void ScheduleDAGInstrs::BuildSchedGraph(AliasAnalysis *AA) {
PendingLoads.clear();
AliasMemDefs.clear();
AliasMemUses.clear();
- } else if (TID.mayStore()) {
+ } else if (MCID.mayStore()) {
bool MayAlias = true;
TrueMemOrderLatency = STORE_LOAD_LATENCY;
if (const Value *V = getUnderlyingObjectForInstr(MI, MFI, MayAlias)) {
@@ -514,7 +514,7 @@ void ScheduleDAGInstrs::BuildSchedGraph(AliasAnalysis *AA) {
/*Reg=*/0, /*isNormalMemory=*/false,
/*isMustAlias=*/false,
/*isArtificial=*/true));
- } else if (TID.mayLoad()) {
+ } else if (MCID.mayLoad()) {
bool MayAlias = true;
TrueMemOrderLatency = 0;
if (MI->isInvariantLoad(AA)) {