summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAndrew Trick <atrick@apple.com>2013-12-07 05:59:44 +0000
committerAndrew Trick <atrick@apple.com>2013-12-07 05:59:44 +0000
commitdcddd7146d9d990d4a551bc3192f8c585bbdc1af (patch)
tree7a77bf95086475e66319a7bc2a78e959260a9562
parenta49701db7d54967aea8a511743fedcfb9b056eea (diff)
downloadllvm-dcddd7146d9d990d4a551bc3192f8c585bbdc1af.tar.gz
llvm-dcddd7146d9d990d4a551bc3192f8c585bbdc1af.tar.bz2
llvm-dcddd7146d9d990d4a551bc3192f8c585bbdc1af.tar.xz
Factor out the SchedRemainder/SchedBoundary from GenericScheduler strategy.
These helper classes take care of the book-keeping the drives the GenericScheduler heuristics. It is likely that developers writing target-specific schedulers that work similarly to GenericScheduler will want to use these helpers too. The immediate goal is to develop a GenericPostScheduler that can run in place of the old PostRAScheduler, but will use the new machine model. No functionality change intended. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@196643 91177308-0d34-0410-b5e6-96231b3b80d8
-rw-r--r--include/llvm/CodeGen/MachineScheduler.h338
-rw-r--r--include/llvm/CodeGen/TargetSchedule.h8
-rw-r--r--lib/CodeGen/MachineScheduler.cpp1056
3 files changed, 729 insertions, 673 deletions
diff --git a/include/llvm/CodeGen/MachineScheduler.h b/include/llvm/CodeGen/MachineScheduler.h
index 7782895334..920abff28c 100644
--- a/include/llvm/CodeGen/MachineScheduler.h
+++ b/include/llvm/CodeGen/MachineScheduler.h
@@ -93,6 +93,7 @@ class MachineLoopInfo;
class RegisterClassInfo;
class ScheduleDAGInstrs;
class SchedDFSResult;
+class ScheduleHazardRecognizer;
/// MachineSchedContext provides enough context from the MachineScheduler pass
/// for the target to instantiate a scheduler.
@@ -204,63 +205,6 @@ public:
virtual void releaseBottomNode(SUnit *SU) = 0;
};
-/// ReadyQueue encapsulates vector of "ready" SUnits with basic convenience
-/// methods for pushing and removing nodes. ReadyQueue's are uniquely identified
-/// by an ID. SUnit::NodeQueueId is a mask of the ReadyQueues the SUnit is in.
-///
-/// This is a convenience class that may be used by implementations of
-/// MachineSchedStrategy.
-class ReadyQueue {
- unsigned ID;
- std::string Name;
- std::vector<SUnit*> Queue;
-
-public:
- ReadyQueue(unsigned id, const Twine &name): ID(id), Name(name.str()) {}
-
- unsigned getID() const { return ID; }
-
- StringRef getName() const { return Name; }
-
- // SU is in this queue if it's NodeQueueID is a superset of this ID.
- bool isInQueue(SUnit *SU) const { return (SU->NodeQueueId & ID); }
-
- bool empty() const { return Queue.empty(); }
-
- void clear() { Queue.clear(); }
-
- unsigned size() const { return Queue.size(); }
-
- typedef std::vector<SUnit*>::iterator iterator;
-
- iterator begin() { return Queue.begin(); }
-
- iterator end() { return Queue.end(); }
-
- ArrayRef<SUnit*> elements() { return Queue; }
-
- iterator find(SUnit *SU) {
- return std::find(Queue.begin(), Queue.end(), SU);
- }
-
- void push(SUnit *SU) {
- Queue.push_back(SU);
- SU->NodeQueueId |= ID;
- }
-
- iterator remove(iterator I) {
- (*I)->NodeQueueId &= ~ID;
- *I = Queue.back();
- unsigned idx = I - Queue.begin();
- Queue.pop_back();
- return Queue.begin() + idx;
- }
-
-#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
- void dump();
-#endif
-};
-
/// Mutate the DAG as a postpass after normal DAG building.
class ScheduleDAGMutation {
virtual void anchor();
@@ -470,6 +414,286 @@ protected:
void releasePredecessors(SUnit *SU);
};
+//===----------------------------------------------------------------------===//
+///
+/// Helpers for implementing custom MachineSchedStrategy classes. These take
+/// care of the book-keeping associated with list scheduling heuristics.
+///
+//===----------------------------------------------------------------------===//
+
+/// ReadyQueue encapsulates vector of "ready" SUnits with basic convenience
+/// methods for pushing and removing nodes. ReadyQueue's are uniquely identified
+/// by an ID. SUnit::NodeQueueId is a mask of the ReadyQueues the SUnit is in.
+///
+/// This is a convenience class that may be used by implementations of
+/// MachineSchedStrategy.
+class ReadyQueue {
+ unsigned ID;
+ std::string Name;
+ std::vector<SUnit*> Queue;
+
+public:
+ ReadyQueue(unsigned id, const Twine &name): ID(id), Name(name.str()) {}
+
+ unsigned getID() const { return ID; }
+
+ StringRef getName() const { return Name; }
+
+ // SU is in this queue if it's NodeQueueID is a superset of this ID.
+ bool isInQueue(SUnit *SU) const { return (SU->NodeQueueId & ID); }
+
+ bool empty() const { return Queue.empty(); }
+
+ void clear() { Queue.clear(); }
+
+ unsigned size() const { return Queue.size(); }
+
+ typedef std::vector<SUnit*>::iterator iterator;
+
+ iterator begin() { return Queue.begin(); }
+
+ iterator end() { return Queue.end(); }
+
+ ArrayRef<SUnit*> elements() { return Queue; }
+
+ iterator find(SUnit *SU) {
+ return std::find(Queue.begin(), Queue.end(), SU);
+ }
+
+ void push(SUnit *SU) {
+ Queue.push_back(SU);
+ SU->NodeQueueId |= ID;
+ }
+
+ iterator remove(iterator I) {
+ (*I)->NodeQueueId &= ~ID;
+ *I = Queue.back();
+ unsigned idx = I - Queue.begin();
+ Queue.pop_back();
+ return Queue.begin() + idx;
+ }
+
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+ void dump();
+#endif
+};
+
+/// Summarize the unscheduled region.
+struct SchedRemainder {
+ // Critical path through the DAG in expected latency.
+ unsigned CriticalPath;
+ unsigned CyclicCritPath;
+
+ // Scaled count of micro-ops left to schedule.
+ unsigned RemIssueCount;
+
+ bool IsAcyclicLatencyLimited;
+
+ // Unscheduled resources
+ SmallVector<unsigned, 16> RemainingCounts;
+
+ void reset() {
+ CriticalPath = 0;
+ CyclicCritPath = 0;
+ RemIssueCount = 0;
+ IsAcyclicLatencyLimited = false;
+ RemainingCounts.clear();
+ }
+
+ SchedRemainder() { reset(); }
+
+ void init(ScheduleDAGMI *DAG, const TargetSchedModel *SchedModel);
+};
+
+/// Each Scheduling boundary is associated with ready queues. It tracks the
+/// current cycle in the direction of movement, and maintains the state
+/// of "hazards" and other interlocks at the current cycle.
+class SchedBoundary {
+public:
+ /// SUnit::NodeQueueId: 0 (none), 1 (top), 2 (bot), 3 (both)
+ enum {
+ TopQID = 1,
+ BotQID = 2,
+ LogMaxQID = 2
+ };
+
+ ScheduleDAGMI *DAG;
+ const TargetSchedModel *SchedModel;
+ SchedRemainder *Rem;
+
+ ReadyQueue Available;
+ ReadyQueue Pending;
+
+ ScheduleHazardRecognizer *HazardRec;
+
+private:
+ /// True if the pending Q should be checked/updated before scheduling another
+ /// instruction.
+ bool CheckPending;
+
+ // For heuristics, keep a list of the nodes that immediately depend on the
+ // most recently scheduled node.
+ SmallPtrSet<const SUnit*, 8> NextSUs;
+
+ /// Number of cycles it takes to issue the instructions scheduled in this
+ /// zone. It is defined as: scheduled-micro-ops / issue-width + stalls.
+ /// See getStalls().
+ unsigned CurrCycle;
+
+ /// Micro-ops issued in the current cycle
+ unsigned CurrMOps;
+
+ /// MinReadyCycle - Cycle of the soonest available instruction.
+ unsigned MinReadyCycle;
+
+ // The expected latency of the critical path in this scheduled zone.
+ unsigned ExpectedLatency;
+
+ // The latency of dependence chains leading into this zone.
+ // For each node scheduled bottom-up: DLat = max DLat, N.Depth.
+ // For each cycle scheduled: DLat -= 1.
+ unsigned DependentLatency;
+
+ /// Count the scheduled (issued) micro-ops that can be retired by
+ /// time=CurrCycle assuming the first scheduled instr is retired at time=0.
+ unsigned RetiredMOps;
+
+ // Count scheduled resources that have been executed. Resources are
+ // considered executed if they become ready in the time that it takes to
+ // saturate any resource including the one in question. Counts are scaled
+ // for direct comparison with other resources. Counts can be compared with
+ // MOps * getMicroOpFactor and Latency * getLatencyFactor.
+ SmallVector<unsigned, 16> ExecutedResCounts;
+
+ /// Cache the max count for a single resource.
+ unsigned MaxExecutedResCount;
+
+ // Cache the critical resources ID in this scheduled zone.
+ unsigned ZoneCritResIdx;
+
+ // Is the scheduled region resource limited vs. latency limited.
+ bool IsResourceLimited;
+
+ // Record the highest cycle at which each resource has been reserved by a
+ // scheduled instruction.
+ SmallVector<unsigned, 16> ReservedCycles;
+
+#ifndef NDEBUG
+ // Remember the greatest operand latency as an upper bound on the number of
+ // times we should retry the pending queue because of a hazard.
+ unsigned MaxObservedLatency;
+#endif
+
+public:
+ /// Pending queues extend the ready queues with the same ID and the
+ /// PendingFlag set.
+ SchedBoundary(unsigned ID, const Twine &Name):
+ DAG(0), SchedModel(0), Rem(0), Available(ID, Name+".A"),
+ Pending(ID << LogMaxQID, Name+".P"),
+ HazardRec(0) {
+ reset();
+ }
+
+ ~SchedBoundary();
+
+ void reset();
+
+ void init(ScheduleDAGMI *dag, const TargetSchedModel *smodel,
+ SchedRemainder *rem);
+
+ bool isTop() const {
+ return Available.getID() == TopQID;
+ }
+
+ /// Number of cycles to issue the instructions scheduled in this zone.
+ unsigned getCurrCycle() const { return CurrCycle; }
+
+ /// Micro-ops issued in the current cycle
+ unsigned getCurrMOps() const { return CurrMOps; }
+
+ /// Return true if the given SU is used by the most recently scheduled
+ /// instruction.
+ bool isNextSU(const SUnit *SU) const { return NextSUs.count(SU); }
+
+ // The latency of dependence chains leading into this zone.
+ unsigned getDependentLatency() const { return DependentLatency; }
+
+ /// Get the number of latency cycles "covered" by the scheduled
+ /// instructions. This is the larger of the critical path within the zone
+ /// and the number of cycles required to issue the instructions.
+ unsigned getScheduledLatency() const {
+ return std::max(ExpectedLatency, CurrCycle);
+ }
+
+ unsigned getUnscheduledLatency(SUnit *SU) const {
+ return isTop() ? SU->getHeight() : SU->getDepth();
+ }
+
+ unsigned getResourceCount(unsigned ResIdx) const {
+ return ExecutedResCounts[ResIdx];
+ }
+
+ /// Get the scaled count of scheduled micro-ops and resources, including
+ /// executed resources.
+ unsigned getCriticalCount() const {
+ if (!ZoneCritResIdx)
+ return RetiredMOps * SchedModel->getMicroOpFactor();
+ return getResourceCount(ZoneCritResIdx);
+ }
+
+ /// Get a scaled count for the minimum execution time of the scheduled
+ /// micro-ops that are ready to execute by getExecutedCount. Notice the
+ /// feedback loop.
+ unsigned getExecutedCount() const {
+ return std::max(CurrCycle * SchedModel->getLatencyFactor(),
+ MaxExecutedResCount);
+ }
+
+ unsigned getZoneCritResIdx() const { return ZoneCritResIdx; }
+
+ // Is the scheduled region resource limited vs. latency limited.
+ bool isResourceLimited() const { return IsResourceLimited; }
+
+ /// Get the difference between the given SUnit's ready time and the current
+ /// cycle.
+ unsigned getLatencyStallCycles(SUnit *SU);
+
+ unsigned getNextResourceCycle(unsigned PIdx, unsigned Cycles);
+
+ bool checkHazard(SUnit *SU);
+
+ unsigned findMaxLatency(ArrayRef<SUnit*> ReadySUs);
+
+ unsigned getOtherResourceCount(unsigned &OtherCritIdx);
+
+ void releaseNode(SUnit *SU, unsigned ReadyCycle);
+
+ void releaseTopNode(SUnit *SU);
+
+ void releaseBottomNode(SUnit *SU);
+
+ void bumpCycle(unsigned NextCycle);
+
+ void incExecutedResources(unsigned PIdx, unsigned Count);
+
+ unsigned countResource(unsigned PIdx, unsigned Cycles, unsigned ReadyCycle);
+
+ void bumpNode(SUnit *SU);
+
+ void releasePending();
+
+ void removeReady(SUnit *SU);
+
+ /// Call this before applying any other heuristics to the Available queue.
+ /// Updates the Available/Pending Q's if necessary and returns the single
+ /// available instruction, or NULL if there are multiple candidates.
+ SUnit *pickOnlyChoice();
+
+#ifndef NDEBUG
+ void dumpScheduledState();
+#endif
+};
+
} // namespace llvm
#endif
diff --git a/include/llvm/CodeGen/TargetSchedule.h b/include/llvm/CodeGen/TargetSchedule.h
index 8ef26b7ca5..19a172beea 100644
--- a/include/llvm/CodeGen/TargetSchedule.h
+++ b/include/llvm/CodeGen/TargetSchedule.h
@@ -98,6 +98,14 @@ public:
return SchedModel.getProcResource(PIdx);
}
+#ifndef NDEBUG
+ const char *getResourceName(unsigned PIdx) const {
+ if (!PIdx)
+ return "MOps";
+ return SchedModel.getProcResource(PIdx)->Name;
+ }
+#endif
+
typedef const MCWriteProcResEntry *ProcResIter;
// \brief Get an iterator into the processor resources consumed by this
diff --git a/lib/CodeGen/MachineScheduler.cpp b/lib/CodeGen/MachineScheduler.cpp
index de1968f48b..c228de2b5f 100644
--- a/lib/CodeGen/MachineScheduler.cpp
+++ b/lib/CodeGen/MachineScheduler.cpp
@@ -1319,378 +1319,45 @@ void CopyConstrain::apply(ScheduleDAGMI *DAG) {
}
//===----------------------------------------------------------------------===//
-// GenericScheduler - Implementation of the generic MachineSchedStrategy.
-//===----------------------------------------------------------------------===//
+// MachineSchedStrategy helpers used by GenericScheduler, GenericPostScheduler
+// and possibly other custom schedulers.
+// ===----------------------------------------------------------------------===/
static const unsigned InvalidCycle = ~0U;
-namespace {
-/// GenericScheduler shrinks the unscheduled zone using heuristics to balance
-/// the schedule.
-class GenericScheduler : public MachineSchedStrategy {
-public:
- /// Represent the type of SchedCandidate found within a single queue.
- /// pickNodeBidirectional depends on these listed by decreasing priority.
- enum CandReason {
- NoCand, PhysRegCopy, RegExcess, RegCritical, Stall, Cluster, Weak, RegMax,
- ResourceReduce, ResourceDemand, BotHeightReduce, BotPathReduce,
- TopDepthReduce, TopPathReduce, NextDefUse, NodeOrder};
-
-#ifndef NDEBUG
- static const char *getReasonStr(GenericScheduler::CandReason Reason);
-#endif
-
- /// Policy for scheduling the next instruction in the candidate's zone.
- struct CandPolicy {
- bool ReduceLatency;
- unsigned ReduceResIdx;
- unsigned DemandResIdx;
-
- CandPolicy(): ReduceLatency(false), ReduceResIdx(0), DemandResIdx(0) {}
- };
-
- /// Status of an instruction's critical resource consumption.
- struct SchedResourceDelta {
- // Count critical resources in the scheduled region required by SU.
- unsigned CritResources;
-
- // Count critical resources from another region consumed by SU.
- unsigned DemandedResources;
-
- SchedResourceDelta(): CritResources(0), DemandedResources(0) {}
-
- bool operator==(const SchedResourceDelta &RHS) const {
- return CritResources == RHS.CritResources
- && DemandedResources == RHS.DemandedResources;
- }
- bool operator!=(const SchedResourceDelta &RHS) const {
- return !operator==(RHS);
- }
- };
-
- /// Store the state used by GenericScheduler heuristics, required for the
- /// lifetime of one invocation of pickNode().
- struct SchedCandidate {
- CandPolicy Policy;
-
- // The best SUnit candidate.
- SUnit *SU;
-
- // The reason for this candidate.
- CandReason Reason;
-
- // Set of reasons that apply to multiple candidates.
- uint32_t RepeatReasonSet;
-
- // Register pressure values for the best candidate.
- RegPressureDelta RPDelta;
-
- // Critical resource consumption of the best candidate.
- SchedResourceDelta ResDelta;
-
- SchedCandidate(const CandPolicy &policy)
- : Policy(policy), SU(NULL), Reason(NoCand), RepeatReasonSet(0) {}
-
- bool isValid() const { return SU; }
-
- // Copy the status of another candidate without changing policy.
- void setBest(SchedCandidate &Best) {
- assert(Best.Reason != NoCand && "uninitialized Sched candidate");
- SU = Best.SU;
- Reason = Best.Reason;
- RPDelta = Best.RPDelta;
- ResDelta = Best.ResDelta;
- }
-
- bool isRepeat(CandReason R) { return RepeatReasonSet & (1 << R); }
- void setRepeat(CandReason R) { RepeatReasonSet |= (1 << R); }
-
- void initResourceDelta(const ScheduleDAGMI *DAG,
- const TargetSchedModel *SchedModel);
- };
-
- /// Summarize the unscheduled region.
- struct SchedRemainder {
- // Critical path through the DAG in expected latency.
- unsigned CriticalPath;
- unsigned CyclicCritPath;
-
- // Scaled count of micro-ops left to schedule.
- unsigned RemIssueCount;
-
- bool IsAcyclicLatencyLimited;
-
- // Unscheduled resources
- SmallVector<unsigned, 16> RemainingCounts;
-
- void reset() {
- CriticalPath = 0;
- CyclicCritPath = 0;
- RemIssueCount = 0;
- IsAcyclicLatencyLimited = false;
- RemainingCounts.clear();
- }
-
- SchedRemainder() { reset(); }
-
- void init(ScheduleDAGMI *DAG, const TargetSchedModel *SchedModel);
- };
-
- /// Each Scheduling boundary is associated with ready queues. It tracks the
- /// current cycle in the direction of movement, and maintains the state
- /// of "hazards" and other interlocks at the current cycle.
- struct SchedBoundary {
- ScheduleDAGMI *DAG;
- const TargetSchedModel *SchedModel;
- SchedRemainder *Rem;
-
- ReadyQueue Available;
- ReadyQueue Pending;
- bool CheckPending;
-
- // For heuristics, keep a list of the nodes that immediately depend on the
- // most recently scheduled node.
- SmallPtrSet<const SUnit*, 8> NextSUs;
-
- ScheduleHazardRecognizer *HazardRec;
-
- /// Number of cycles it takes to issue the instructions scheduled in this
- /// zone. It is defined as: scheduled-micro-ops / issue-width + stalls.
- /// See getStalls().
- unsigned CurrCycle;
-
- /// Micro-ops issued in the current cycle
- unsigned CurrMOps;
-
- /// MinReadyCycle - Cycle of the soonest available instruction.
- unsigned MinReadyCycle;
-
- // The expected latency of the critical path in this scheduled zone.
- unsigned ExpectedLatency;
-
- // The latency of dependence chains leading into this zone.
- // For each node scheduled bottom-up: DLat = max DLat, N.Depth.
- // For each cycle scheduled: DLat -= 1.
- unsigned DependentLatency;
-
- /// Count the scheduled (issued) micro-ops that can be retired by
- /// time=CurrCycle assuming the first scheduled instr is retired at time=0.
- unsigned RetiredMOps;
-
- // Count scheduled resources that have been executed. Resources are
- // considered executed if they become ready in the time that it takes to
- // saturate any resource including the one in question. Counts are scaled
- // for direct comparison with other resources. Counts can be compared with
- // MOps * getMicroOpFactor and Latency * getLatencyFactor.
- SmallVector<unsigned, 16> ExecutedResCounts;
-
- /// Cache the max count for a single resource.
- unsigned MaxExecutedResCount;
-
- // Cache the critical resources ID in this scheduled zone.
- unsigned ZoneCritResIdx;
-
- // Is the scheduled region resource limited vs. latency limited.
- bool IsResourceLimited;
-
- // Record the highest cycle at which each resource has been reserved by a
- // scheduled instruction.
- SmallVector<unsigned, 16> ReservedCycles;
-
-#ifndef NDEBUG
- // Remember the greatest operand latency as an upper bound on the number of
- // times we should retry the pending queue because of a hazard.
- unsigned MaxObservedLatency;
-#endif
-
- void reset() {
- // A new HazardRec is created for each DAG and owned by SchedBoundary.
- // Destroying and reconstructing it is very expensive though. So keep
- // invalid, placeholder HazardRecs.
- if (HazardRec && HazardRec->isEnabled()) {
- delete HazardRec;
- HazardRec = 0;
- }
- Available.clear();
- Pending.clear();
- CheckPending = false;
- NextSUs.clear();
- CurrCycle = 0;
- CurrMOps = 0;
- MinReadyCycle = UINT_MAX;
- ExpectedLatency = 0;
- DependentLatency = 0;
- RetiredMOps = 0;
- MaxExecutedResCount = 0;
- ZoneCritResIdx = 0;
- IsResourceLimited = false;
- ReservedCycles.clear();
-#ifndef NDEBUG
- MaxObservedLatency = 0;
-#endif
- // Reserve a zero-count for invalid CritResIdx.
- ExecutedResCounts.resize(1);
- assert(!ExecutedResCounts[0] && "nonzero count for bad resource");
- }
-
- /// Pending queues extend the ready queues with the same ID and the
- /// PendingFlag set.
- SchedBoundary(unsigned ID, const Twine &Name):
- DAG(0), SchedModel(0), Rem(0), Available(ID, Name+".A"),
- Pending(ID << GenericScheduler::LogMaxQID, Name+".P"),
- HazardRec(0) {
- reset();
- }
-
- ~SchedBoundary() { delete HazardRec; }
-
- void init(ScheduleDAGMI *dag, const TargetSchedModel *smodel,
- SchedRemainder *rem);
-
- bool isTop() const {
- return Available.getID() == GenericScheduler::TopQID;
- }
-
-#ifndef NDEBUG
- const char *getResourceName(unsigned PIdx) {
- if (!PIdx)
- return "MOps";
- return SchedModel->getProcResource(PIdx)->Name;
- }
-#endif
-
- /// Get the number of latency cycles "covered" by the scheduled
- /// instructions. This is the larger of the critical path within the zone
- /// and the number of cycles required to issue the instructions.
- unsigned getScheduledLatency() const {
- return std::max(ExpectedLatency, CurrCycle);
- }
-
- unsigned getUnscheduledLatency(SUnit *SU) const {
- return isTop() ? SU->getHeight() : SU->getDepth();
- }
-
- unsigned getResourceCount(unsigned ResIdx) const {
- return ExecutedResCounts[ResIdx];
- }
-
- /// Get the scaled count of scheduled micro-ops and resources, including
- /// executed resources.
- unsigned getCriticalCount() const {
- if (!ZoneCritResIdx)
- return RetiredMOps * SchedModel->getMicroOpFactor();
- return getResourceCount(ZoneCritResIdx);
- }
-
- /// Get a scaled count for the minimum execution time of the scheduled
- /// micro-ops that are ready to execute by getExecutedCount. Notice the
- /// feedback loop.
- unsigned getExecutedCount() const {
- return std::max(CurrCycle * SchedModel->getLatencyFactor(),
- MaxExecutedResCount);
- }
-
- /// Get the difference between the given SUnit's ready time and the current
- /// cycle.
- unsigned getLatencyStallCycles(SUnit *SU);
-
- unsigned getNextResourceCycle(unsigned PIdx, unsigned Cycles);
-
- bool checkHazard(SUnit *SU);
-
- unsigned findMaxLatency(ArrayRef<SUnit*> ReadySUs);
-
- unsigned getOtherResourceCount(unsigned &OtherCritIdx);
-
- void setPolicy(CandPolicy &Policy, SchedBoundary &OtherZone);
-
- void releaseNode(SUnit *SU, unsigned ReadyCycle);
-
- void bumpCycle(unsigned NextCycle);
-
- void incExecutedResources(unsigned PIdx, unsigned Count);
-
- unsigned countResource(unsigned PIdx, unsigned Cycles, unsigned ReadyCycle);
-
- void bumpNode(SUnit *SU);
-
- void releasePending();
-
- void removeReady(SUnit *SU);
-
- SUnit *pickOnlyChoice();
-
-#ifndef NDEBUG
- void dumpScheduledState();
-#endif
- };
-
-private:
- const MachineSchedContext *Context;
- ScheduleDAGMI *DAG;
- const TargetSchedModel *SchedModel;
- const TargetRegisterInfo *TRI;
-
- // State of the top and bottom scheduled instruction boundaries.
- SchedRemainder Rem;
- SchedBoundary Top;
- SchedBoundary Bot;
-
- MachineSchedPolicy RegionPolicy;
-public:
- /// SUnit::NodeQueueId: 0 (none), 1 (top), 2 (bot), 3 (both)
- enum {
- TopQID = 1,
- BotQID = 2,
- LogMaxQID = 2
- };
-
- GenericScheduler(const MachineSchedContext *C):
- Context(C), DAG(0), SchedModel(0), TRI(0),
- Top(TopQID, "TopQ"), Bot(BotQID, "BotQ") {}
-
- virtual void initPolicy(MachineBasicBlock::iterator Begin,
- MachineBasicBlock::iterator End,
- unsigned NumRegionInstrs);
-
- bool shouldTrackPressure() const { return RegionPolicy.ShouldTrackPressure; }
-
- virtual void initialize(ScheduleDAGMI *dag);
-
- virtual SUnit *pickNode(bool &IsTopNode);
-
- virtual void schedNode(SUnit *SU, bool IsTopNode);
-
- virtual void releaseTopNode(SUnit *SU);
-
- virtual void releaseBottomNode(SUnit *SU);
-
- virtual void registerRoots();
-
-protected:
- void checkAcyclicLatency();
-
- void tryCandidate(SchedCandidate &Cand,
- SchedCandidate &TryCand,
- SchedBoundary &Zone,
- const RegPressureTracker &RPTracker,
- RegPressureTracker &TempTracker);
-
- SUnit *pickNodeBidirectional(bool &IsTopNode);
-
- void pickNodeFromQueue(SchedBoundary &Zone,
- const RegPressureTracker &RPTracker,
- SchedCandidate &Candidate);
-
- void reschedulePhysRegCopies(SUnit *SU, bool isTop);
+SchedBoundary::~SchedBoundary() { delete HazardRec; }
+void SchedBoundary::reset() {
+ // A new HazardRec is created for each DAG and owned by SchedBoundary.
+ // Destroying and reconstructing it is very expensive though. So keep
+ // invalid, placeholder HazardRecs.
+ if (HazardRec && HazardRec->isEnabled()) {
+ delete HazardRec;
+ HazardRec = 0;
+ }
+ Available.clear();
+ Pending.clear();
+ CheckPending = false;
+ NextSUs.clear();
+ CurrCycle = 0;
+ CurrMOps = 0;
+ MinReadyCycle = UINT_MAX;
+ ExpectedLatency = 0;
+ DependentLatency = 0;
+ RetiredMOps = 0;
+ MaxExecutedResCount = 0;
+ ZoneCritResIdx = 0;
+ IsResourceLimited = false;
+ ReservedCycles.clear();
#ifndef NDEBUG
- void traceCandidate(const SchedCandidate &Cand);
+ MaxObservedLatency = 0;
#endif
-};
-} // namespace
+ // Reserve a zero-count for invalid CritResIdx.
+ ExecutedResCounts.resize(1);
+ assert(!ExecutedResCounts[0] && "nonzero count for bad resource");
+}
-void GenericScheduler::SchedRemainder::
+void SchedRemainder::
init(ScheduleDAGMI *DAG, const TargetSchedModel *SchedModel) {
reset();
if (!SchedModel->hasInstrSchedModel())
@@ -1711,7 +1378,7 @@ init(ScheduleDAGMI *DAG, const TargetSchedModel *SchedModel) {
}
}
-void GenericScheduler::SchedBoundary::
+void SchedBoundary::
init(ScheduleDAGMI *dag, const TargetSchedModel *smodel, SchedRemainder *rem) {
reset();
DAG = dag;
@@ -1723,167 +1390,6 @@ init(ScheduleDAGMI *dag, const TargetSchedModel *smodel, SchedRemainder *rem) {
}
}
-/// Initialize the per-region scheduling policy.
-void GenericScheduler::initPolicy(MachineBasicBlock::iterator Begin,
- MachineBasicBlock::iterator End,
- unsigned NumRegionInstrs) {
- const TargetMachine &TM = Context->MF->getTarget();
-
- // Avoid setting up the register pressure tracker for small regions to save
- // compile time. As a rough heuristic, only track pressure when the number of
- // schedulable instructions exceeds half the integer register file.
- unsigned NIntRegs = Context->RegClassInfo->getNumAllocatableRegs(
- TM.getTargetLowering()->getRegClassFor(MVT::i32));
-
- RegionPolicy.ShouldTrackPressure = NumRegionInstrs > (NIntRegs / 2);
-
- // For generic targets, we default to bottom-up, because it's simpler and more
- // compile-time optimizations have been implemented in that direction.
- RegionPolicy.OnlyBottomUp = true;
-
- // Allow the subtarget to override default policy.
- const TargetSubtargetInfo &ST = TM.getSubtarget<TargetSubtargetInfo>();
- ST.overrideSchedPolicy(RegionPolicy, Begin, End, NumRegionInstrs);
-
- // After subtarget overrides, apply command line options.
- if (!EnableRegPressure)
- RegionPolicy.ShouldTrackPressure = false;
-
- // Check -misched-topdown/bottomup can force or unforce scheduling direction.
- // e.g. -misched-bottomup=false allows scheduling in both directions.
- assert((!ForceTopDown || !ForceBottomUp) &&
- "-misched-topdown incompatible with -misched-bottomup");
- if (ForceBottomUp.getNumOccurrences() > 0) {
- RegionPolicy.OnlyBottomUp = ForceBottomUp;
- if (RegionPolicy.OnlyBottomUp)
- RegionPolicy.OnlyTopDown = false;
- }
- if (ForceTopDown.getNumOccurrences() > 0) {
- RegionPolicy.OnlyTopDown = ForceTopDown;
- if (RegionPolicy.OnlyTopDown)
- RegionPolicy.OnlyBottomUp = false;
- }
-}
-
-void GenericScheduler::initialize(ScheduleDAGMI *dag) {
- DAG = dag;
- SchedModel = DAG->getSchedModel();
- TRI = DAG->TRI;
-
- Rem.init(DAG, SchedModel);
- Top.init(DAG, SchedModel, &Rem);
- Bot.init(DAG, SchedModel, &Rem);
-
- // Initialize resource counts.
-
- // Initialize the HazardRecognizers. If itineraries don't exist, are empty, or
- // are disabled, then these HazardRecs will be disabled.
- const InstrItineraryData *Itin = SchedModel->getInstrItineraries();
- const TargetMachine &TM = DAG->MF.getTarget();
- if (!Top.HazardRec) {
- Top.HazardRec =
- TM.getInstrInfo()->CreateTargetMIHazardRecognizer(Itin, DAG);
- }
- if (!Bot.HazardRec) {
- Bot.HazardRec =
- TM.getInstrInfo()->CreateTargetMIHazardRecognizer(Itin, DAG);
- }
-}
-
-void GenericScheduler::releaseTopNode(SUnit *SU) {
- if (SU->isScheduled)
- return;
-
- for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
- I != E; ++I) {
- if (I->isWeak())
- continue;
- unsigned PredReadyCycle = I->getSUnit()->TopReadyCycle;
- unsigned Latency = I->getLatency();
-#ifndef NDEBUG
- Top.MaxObservedLatency = std::max(Latency, Top.MaxObservedLatency);
-#endif
- if (SU->TopReadyCycle < PredReadyCycle + Latency)
- SU->TopReadyCycle = PredReadyCycle + Latency;
- }
- Top.releaseNode(SU, SU->TopReadyCycle);
-}
-
-void GenericScheduler::releaseBottomNode(SUnit *SU) {
- if (SU->isScheduled)
- return;
-
- assert(SU->getInstr() && "Scheduled SUnit must have instr");
-
- for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
- I != E; ++I) {
- if (I->isWeak())
- continue;
- unsigned SuccReadyCycle = I->getSUnit()->BotReadyCycle;
- unsigned Latency = I->getLatency();
-#ifndef NDEBUG
- Bot.MaxObservedLatency = std::max(Latency, Bot.MaxObservedLatency);
-#endif
- if (SU->BotReadyCycle < SuccReadyCycle + Latency)
- SU->BotReadyCycle = SuccReadyCycle + Latency;
- }
- Bot.releaseNode(SU, SU->BotReadyCycle);
-}
-
-/// Set IsAcyclicLatencyLimited if the acyclic path is longer than the cyclic
-/// critical path by more cycles than it takes to drain the instruction buffer.
-/// We estimate an upper bounds on in-flight instructions as:
-///
-/// CyclesPerIteration = max( CyclicPath, Loop-Resource-Height )
-/// InFlightIterations = AcyclicPath / CyclesPerIteration
-/// InFlightResources = InFlightIterations * LoopResources
-///
-/// TODO: Check execution resources in addition to IssueCount.
-void GenericScheduler::checkAcyclicLatency() {
- if (Rem.CyclicCritPath == 0 || Rem.CyclicCritPath >= Rem.CriticalPath)
- return;
-
- // Scaled number of cycles per loop iteration.
- unsigned IterCount =
- std::max(Rem.CyclicCritPath * SchedModel->getLatencyFactor(),
- Rem.RemIssueCount);
- // Scaled acyclic critical path.
- unsigned AcyclicCount = Rem.CriticalPath * SchedModel->getLatencyFactor();
- // InFlightCount = (AcyclicPath / IterCycles) * InstrPerLoop
- unsigned InFlightCount =
- (AcyclicCount * Rem.RemIssueCount + IterCount-1) / IterCount;
- unsigned BufferLimit =
- SchedModel->getMicroOpBufferSize() * SchedModel->getMicroOpFactor();
-
- Rem.IsAcyclicLatencyLimited = InFlightCount > BufferLimit;
-
- DEBUG(dbgs() << "IssueCycles="
- << Rem.RemIssueCount / SchedModel->getLatencyFactor() << "c "
- << "IterCycles=" << IterCount / SchedModel->getLatencyFactor()
- << "c NumIters=" << (AcyclicCount + IterCount-1) / IterCount
- << " InFlight=" << InFlightCount / SchedModel->getMicroOpFactor()
- << "m BufferLim=" << SchedModel->getMicroOpBufferSize() << "m\n";
- if (Rem.IsAcyclicLatencyLimited)
- dbgs() << " ACYCLIC LATENCY LIMIT\n");
-}
-
-void GenericScheduler::registerRoots() {
- Rem.CriticalPath = DAG->ExitSU.getDepth();
-
- // Some roots may not feed into ExitSU. Check all of them in case.
- for (std::vector<SUnit*>::const_iterator
- I = Bot.Available.begin(), E = Bot.Available.end(); I != E; ++I) {
- if ((*I)->getDepth() > Rem.CriticalPath)
- Rem.CriticalPath = (*I)->getDepth();
- }
- DEBUG(dbgs() << "Critical Path: " << Rem.CriticalPath << '\n');
-
- if (EnableCyclicPath) {
- Rem.CyclicCritPath = DAG->computeCyclicCriticalPath();
- checkAcyclicLatency();
- }
-}
-
/// Compute the stall cycles based on this SUnit's ready time. Heuristics treat
/// these "soft stalls" differently than the hard stall cycles based on CPU
/// resources and computed by checkHazard(). A fully in-order model
@@ -1891,7 +1397,7 @@ void GenericScheduler::registerRoots() {
/// available for scheduling until they are ready. However, a weaker in-order
/// model may use this for heuristics. For example, if a processor has in-order
/// behavior when reading certain resources, this may come into play.
-unsigned GenericScheduler::SchedBoundary::getLatencyStallCycles(SUnit *SU) {
+unsigned SchedBoundary::getLatencyStallCycles(SUnit *SU) {
if (!SU->isUnbuffered)
return 0;
@@ -1903,7 +1409,7 @@ unsigned GenericScheduler::SchedBoundary::getLatencyStallCycles(SUnit *SU) {
/// Compute the next cycle at which the given processor resource can be
/// scheduled.
-unsigned GenericScheduler::SchedBoundary::
+unsigned SchedBoundary::
getNextResourceCycle(unsigned PIdx, unsigned Cycles) {
unsigned NextUnreserved = ReservedCycles[PIdx];
// If this resource has never been used, always return cycle zero.
@@ -1928,7 +1434,7 @@ getNextResourceCycle(unsigned PIdx, unsigned Cycles) {
/// can dispatch per cycle.
///
/// TODO: Also check whether the SU must start a new group.
-bool GenericScheduler::SchedBoundary::checkHazard(SUnit *SU) {
+bool SchedBoundary::checkHazard(SUnit *SU) {
if (HazardRec->isEnabled())
return HazardRec->getHazardType(SU) != ScheduleHazardRecognizer::NoHazard;
@@ -1951,7 +1457,7 @@ bool GenericScheduler::SchedBoundary::checkHazard(SUnit *SU) {
}
// Find the unscheduled node in ReadySUs with the highest latency.
-unsigned GenericScheduler::SchedBoundary::
+unsigned SchedBoundary::
findMaxLatency(ArrayRef<SUnit*> ReadySUs) {
SUnit *LateSU = 0;
unsigned RemLatency = 0;
@@ -1973,7 +1479,7 @@ findMaxLatency(ArrayRef<SUnit*> ReadySUs) {
// Count resources in this zone and the remaining unscheduled
// instruction. Return the max count, scaled. Set OtherCritIdx to the critical
// resource index, or zero if the zone is issue limited.
-unsigned GenericScheduler::SchedBoundary::
+unsigned SchedBoundary::
getOtherResourceCount(unsigned &OtherCritIdx) {
OtherCritIdx = 0;
if (!SchedModel->hasInstrSchedModel())
@@ -1994,74 +1500,12 @@ getOtherResourceCount(unsigned &OtherCritIdx) {
if (OtherCritIdx) {
DEBUG(dbgs() << " " << Available.getName() << " + Remain CritRes: "
<< OtherCritCount / SchedModel->getResourceFactor(OtherCritIdx)
- << " " << getResourceName(OtherCritIdx) << "\n");
+ << " " << SchedModel->getResourceName(OtherCritIdx) << "\n");
}
return OtherCritCount;
}
-/// Set the CandPolicy for this zone given the current resources and latencies
-/// inside and outside the zone.
-void GenericScheduler::SchedBoundary::setPolicy(CandPolicy &Policy,
- SchedBoundary &OtherZone) {
- // Apply preemptive heuristics based on the the total latency and resources
- // inside and outside this zone. Potential stalls should be considered before
- // following this policy.
-
- // Compute remaining latency. We need this both to determine whether the
- // overall schedule has become latency-limited and whether the instructions
- // outside this zone are resource or latency limited.
- //
- // The "dependent" latency is updated incrementally during scheduling as the
- // max height/depth of scheduled nodes minus the cycles since it was
- // scheduled:
- // DLat = max (N.depth - (CurrCycle - N.ReadyCycle) for N in Zone
- //
- // The "independent" latency is the max ready queue depth:
- // ILat = max N.depth for N in Available|Pending
- //
- // RemainingLatency is the greater of independent and dependent latency.
- unsigned RemLatency = DependentLatency;
- RemLatency = std::max(RemLatency, findMaxLatency(Available.elements()));
- RemLatency = std::max(RemLatency, findMaxLatency(Pending.elements()));
-
- // Compute the critical resource outside the zone.
- unsigned OtherCritIdx;
- unsigned OtherCount = OtherZone.getOtherResourceCount(OtherCritIdx);
-
- bool OtherResLimited = false;
- if (SchedModel->hasInstrSchedModel()) {
- unsigned LFactor = SchedModel->getLatencyFactor();
- OtherResLimited = (int)(OtherCount - (RemLatency * LFactor)) > (int)LFactor;
- }
- if (!OtherResLimited && (RemLatency + CurrCycle > Rem->CriticalPath)) {
- Policy.ReduceLatency |= true;
- DEBUG(dbgs() << " " << Available.getName() << " RemainingLatency "
- << RemLatency << " + " << CurrCycle << "c > CritPath "
- << Rem->CriticalPath << "\n");
- }
- // If the same resource is limiting inside and outside the zone, do nothing.
- if (ZoneCritResIdx == OtherCritIdx)
- return;
-
- DEBUG(
- if (IsResourceLimited) {
- dbgs() << " " << Available.getName() << " ResourceLimited: "
- << getResourceName(ZoneCritResIdx) << "\n";
- }
- if (OtherResLimited)
- dbgs() << " RemainingLimit: " << getResourceName(OtherCritIdx) << "\n";
- if (!IsResourceLimited && !OtherResLimited)
- dbgs() << " Latency limited both directions.\n");
-
- if (IsResourceLimited && !Policy.ReduceResIdx)
- Policy.ReduceResIdx = ZoneCritResIdx;
-
- if (OtherResLimited)
- Policy.DemandResIdx = OtherCritIdx;
-}
-
-void GenericScheduler::SchedBoundary::releaseNode(SUnit *SU,
- unsigned ReadyCycle) {
+void SchedBoundary::releaseNode(SUnit *SU, unsigned ReadyCycle) {
if (ReadyCycle < MinReadyCycle)
MinReadyCycle = ReadyCycle;
@@ -2077,8 +1521,48 @@ void GenericScheduler::SchedBoundary::releaseNode(SUnit *SU,
NextSUs.insert(SU);
}
+void SchedBoundary::releaseTopNode(SUnit *SU) {
+ if (SU->isScheduled)
+ return;
+
+ for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
+ I != E; ++I) {
+ if (I->isWeak())
+ continue;
+ unsigned PredReadyCycle = I->getSUnit()->TopReadyCycle;
+ unsigned Latency = I->getLatency();
+#ifndef NDEBUG
+ MaxObservedLatency = std::max(Latency, MaxObservedLatency);
+#endif
+ if (SU->TopReadyCycle < PredReadyCycle + Latency)
+ SU->TopReadyCycle = PredReadyCycle + Latency;
+ }
+ releaseNode(SU, SU->TopReadyCycle);
+}
+
+void SchedBoundary::releaseBottomNode(SUnit *SU) {
+ if (SU->isScheduled)
+ return;
+
+ assert(SU->getInstr() && "Scheduled SUnit must have instr");
+
+ for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
+ I != E; ++I) {
+ if (I->isWeak())
+ continue;
+ unsigned SuccReadyCycle = I->getSUnit()->BotReadyCycle;
+ unsigned Latency = I->getLatency();
+#ifndef NDEBUG
+ MaxObservedLatency = std::max(Latency, MaxObservedLatency);
+#endif
+ if (SU->BotReadyCycle < SuccReadyCycle + Latency)
+ SU->BotReadyCycle = SuccReadyCycle + Latency;
+ }
+ releaseNode(SU, SU->BotReadyCycle);
+}
+
/// Move the boundary of scheduled code by one cycle.
-void GenericScheduler::SchedBoundary::bumpCycle(unsigned NextCycle) {
+void SchedBoundary::bumpCycle(unsigned NextCycle) {
if (SchedModel->getMicroOpBufferSize() == 0) {
assert(MinReadyCycle < UINT_MAX && "MinReadyCycle uninitialized");
if (MinReadyCycle > NextCycle)
@@ -2116,8 +1600,7 @@ void GenericScheduler::SchedBoundary::bumpCycle(unsigned NextCycle) {
DEBUG(dbgs() << "Cycle: " << CurrCycle << ' ' << Available.getName() << '\n');
}
-void GenericScheduler::SchedBoundary::incExecutedResources(unsigned PIdx,
- unsigned Count) {
+void SchedBoundary::incExecutedResources(unsigned PIdx, unsigned Count) {
ExecutedResCounts[PIdx] += Count;
if (ExecutedResCounts[PIdx] > MaxExecutedResCount)
MaxExecutedResCount = ExecutedResCounts[PIdx];
@@ -2130,11 +1613,11 @@ void GenericScheduler::SchedBoundary::incExecutedResources(unsigned PIdx,
///
/// \return the next cycle at which the instruction may execute without
/// oversubscribing resources.
-unsigned GenericScheduler::SchedBoundary::
+unsigned SchedBoundary::
countResource(unsigned PIdx, unsigned Cycles, unsigned NextCycle) {
unsigned Factor = SchedModel->getResourceFactor(PIdx);
unsigned Count = Factor * Cycles;
- DEBUG(dbgs() << " " << getResourceName(PIdx)
+ DEBUG(dbgs() << " " << SchedModel->getResourceName(PIdx)
<< " +" << Cycles << "x" << Factor << "u\n");
// Update Executed resources counts.
@@ -2147,7 +1630,7 @@ countResource(unsigned PIdx, unsigned Cycles, unsigned NextCycle) {
if (ZoneCritResIdx != PIdx && (getResourceCount(PIdx) > getCriticalCount())) {
ZoneCritResIdx = PIdx;
DEBUG(dbgs() << " *** Critical resource "
- << getResourceName(PIdx) << ": "
+ << SchedModel->getResourceName(PIdx) << ": "
<< getResourceCount(PIdx) / SchedModel->getLatencyFactor() << "c\n");
}
// For reserved resources, record the highest cycle using the resource.
@@ -2161,7 +1644,7 @@ countResource(unsigned PIdx, unsigned Cycles, unsigned NextCycle) {
}
/// Move the boundary of scheduled code by one SUnit.
-void GenericScheduler::SchedBoundary::bumpNode(SUnit *SU) {
+void SchedBoundary::bumpNode(SUnit *SU) {
// Update the reservation table.
if (HazardRec->isEnabled()) {
if (!isTop() && SU->isCall) {
@@ -2285,7 +1768,7 @@ void GenericScheduler::SchedBoundary::bumpNode(SUnit *SU) {
/// Release pending ready nodes in to the available queue. This makes them
/// visible to heuristics.
-void GenericScheduler::SchedBoundary::releasePending() {
+void SchedBoundary::releasePending() {
// If the available queue is empty, it is safe to reset MinReadyCycle.
if (Available.empty())
MinReadyCycle = UINT_MAX;
@@ -2315,7 +1798,7 @@ void GenericScheduler::SchedBoundary::releasePending() {
}
/// Remove SU from the ready set for this boundary.
-void GenericScheduler::SchedBoundary::removeReady(SUnit *SU) {
+void SchedBoundary::removeReady(SUnit *SU) {
if (Available.isInQueue(SU))
Available.remove(Available.find(SU));
else {
@@ -2327,7 +1810,7 @@ void GenericScheduler::SchedBoundary::removeReady(SUnit *SU) {
/// If this queue only has one ready candidate, return it. As a side effect,
/// defer any nodes that now hit a hazard, and advance the cycle until at least
/// one node is ready. If multiple instructions are ready, return NULL.
-SUnit *GenericScheduler::SchedBoundary::pickOnlyChoice() {
+SUnit *SchedBoundary::pickOnlyChoice() {
if (CheckPending)
releasePending();
@@ -2356,7 +1839,7 @@ SUnit *GenericScheduler::SchedBoundary::pickOnlyChoice() {
#ifndef NDEBUG
// This is useful information to dump after bumpNode.
// Note that the Queue contents are more useful before pickNodeFromQueue.
-void GenericScheduler::SchedBoundary::dumpScheduledState() {
+void SchedBoundary::dumpScheduledState() {
unsigned ResFactor;
unsigned ResCount;
if (ZoneCritResIdx) {
@@ -2372,13 +1855,351 @@ void GenericScheduler::SchedBoundary::dumpScheduledState() {
<< " Retired: " << RetiredMOps;
dbgs() << "\n Executed: " << getExecutedCount() / LFactor << "c";
dbgs() << "\n Critical: " << ResCount / LFactor << "c, "
- << ResCount / ResFactor << " " << getResourceName(ZoneCritResIdx)
+ << ResCount / ResFactor << " "
+ << SchedModel->getResourceName(ZoneCritResIdx)
<< "\n ExpectedLatency: " << ExpectedLatency << "c\n"
<< (IsResourceLimited ? " - Resource" : " - Latency")
<< " limited.\n";
}
#endif
+//===----------------------------------------------------------------------===//
+// GenericScheduler - Implementation of the generic MachineSchedStrategy.
+//===----------------------------------------------------------------------===//
+
+namespace {
+/// GenericScheduler shrinks the unscheduled zone using heuristics to balance
+/// the schedule.
+class GenericScheduler : public MachineSchedStrategy {
+public:
+ /// Represent the type of SchedCandidate found within a single queue.
+ /// pickNodeBidirectional depends on these listed by decreasing priority.
+ enum CandReason {
+ NoCand, PhysRegCopy, RegExcess, RegCritical, Stall, Cluster, Weak, RegMax,
+ ResourceReduce, ResourceDemand, BotHeightReduce, BotPathReduce,
+ TopDepthReduce, TopPathReduce, NextDefUse, NodeOrder};
+
+#ifndef NDEBUG
+ static const char *getReasonStr(GenericScheduler::CandReason Reason);
+#endif
+
+ /// Policy for scheduling the next instruction in the candidate's zone.
+ struct CandPolicy {
+ bool ReduceLatency;
+ unsigned ReduceResIdx;
+ unsigned DemandResIdx;
+
+ CandPolicy(): ReduceLatency(false), ReduceResIdx(0), DemandResIdx(0) {}
+ };
+
+ /// Status of an instruction's critical resource consumption.
+ struct SchedResourceDelta {
+ // Count critical resources in the scheduled region required by SU.
+ unsigned CritResources;
+
+ // Count critical resources from another region consumed by SU.
+ unsigned DemandedResources;
+
+ SchedResourceDelta(): CritResources(0), DemandedResources(0) {}
+
+ bool operator==(const SchedResourceDelta &RHS) const {
+ return CritResources == RHS.CritResources
+ && DemandedResources == RHS.DemandedResources;
+ }
+ bool operator!=(const SchedResourceDelta &RHS) const {
+ return !operator==(RHS);
+ }
+ };
+
+ /// Store the state used by GenericScheduler heuristics, required for the
+ /// lifetime of one invocation of pickNode().
+ struct SchedCandidate {
+ CandPolicy Policy;
+
+ // The best SUnit candidate.
+ SUnit *SU;
+
+ // The reason for this candidate.
+ CandReason Reason;
+
+ // Set of reasons that apply to multiple candidates.
+ uint32_t RepeatReasonSet;
+
+ // Register pressure values for the best candidate.
+ RegPressureDelta RPDelta;
+
+ // Critical resource consumption of the best candidate.
+ SchedResourceDelta ResDelta;
+
+ SchedCandidate(const CandPolicy &policy)
+ : Policy(policy), SU(NULL), Reason(NoCand), RepeatReasonSet(0) {}
+
+ bool isValid() const { return SU; }
+
+ // Copy the status of another candidate without changing policy.
+ void setBest(SchedCandidate &Best) {
+ assert(Best.Reason != NoCand && "uninitialized Sched candidate");
+ SU = Best.SU;
+ Reason = Best.Reason;
+ RPDelta = Best.RPDelta;
+ ResDelta = Best.ResDelta;
+ }
+
+ bool isRepeat(CandReason R) { return RepeatReasonSet & (1 << R); }
+ void setRepeat(CandReason R) { RepeatReasonSet |= (1 << R); }
+
+ void initResourceDelta(const ScheduleDAGMI *DAG,
+ const TargetSchedModel *SchedModel);
+ };
+
+private:
+ const MachineSchedContext *Context;
+ ScheduleDAGMI *DAG;
+ const TargetSchedModel *SchedModel;
+ const TargetRegisterInfo *TRI;
+
+ // State of the top and bottom scheduled instruction boundaries.
+ SchedRemainder Rem;
+ SchedBoundary Top;
+ SchedBoundary Bot;
+
+ MachineSchedPolicy RegionPolicy;
+public:
+ GenericScheduler(const MachineSchedContext *C):
+ Context(C), DAG(0), SchedModel(0), TRI(0),
+ Top(SchedBoundary::TopQID, "TopQ"), Bot(SchedBoundary::BotQID, "BotQ") {}
+
+ virtual void initPolicy(MachineBasicBlock::iterator Begin,
+ MachineBasicBlock::iterator End,
+ unsigned NumRegionInstrs);
+
+ bool shouldTrackPressure() const { return RegionPolicy.ShouldTrackPressure; }
+
+ virtual void initialize(ScheduleDAGMI *dag);
+
+ virtual SUnit *pickNode(bool &IsTopNode);
+
+ virtual void schedNode(SUnit *SU, bool IsTopNode);
+
+ virtual void releaseTopNode(SUnit *SU) { Top.releaseTopNode(SU); }
+
+ virtual void releaseBottomNode(SUnit *SU) { Bot.releaseBottomNode(SU); }
+
+ virtual void registerRoots();
+
+protected:
+ void checkAcyclicLatency();
+
+ void setPolicy(CandPolicy &Policy, SchedBoundary &CurrZone,
+ SchedBoundary &OtherZone);
+
+ void tryCandidate(SchedCandidate &Cand,
+ SchedCandidate &TryCand,
+ SchedBoundary &Zone,
+ const RegPressureTracker &RPTracker,
+ RegPressureTracker &TempTracker);
+
+ SUnit *pickNodeBidirectional(bool &IsTopNode);
+
+ void pickNodeFromQueue(SchedBoundary &Zone,
+ const RegPressureTracker &RPTracker,
+ SchedCandidate &Candidate);
+
+ void reschedulePhysRegCopies(SUnit *SU, bool isTop);
+
+#ifndef NDEBUG
+ void traceCandidate(const SchedCandidate &Cand);
+#endif
+};
+} // namespace
+
+void GenericScheduler::initialize(ScheduleDAGMI *dag) {
+ DAG = dag;
+ SchedModel = DAG->getSchedModel();
+ TRI = DAG->TRI;
+
+ Rem.init(DAG, SchedModel);
+ Top.init(DAG, SchedModel, &Rem);
+ Bot.init(DAG, SchedModel, &Rem);
+
+ // Initialize resource counts.
+
+ // Initialize the HazardRecognizers. If itineraries don't exist, are empty, or
+ // are disabled, then these HazardRecs will be disabled.
+ const InstrItineraryData *Itin = SchedModel->getInstrItineraries();
+ const TargetMachine &TM = DAG->MF.getTarget();
+ if (!Top.HazardRec) {
+ Top.HazardRec =
+ TM.getInstrInfo()->CreateTargetMIHazardRecognizer(Itin, DAG);
+ }
+ if (!Bot.HazardRec) {
+ Bot.HazardRec =
+ TM.getInstrInfo()->CreateTargetMIHazardRecognizer(Itin, DAG);
+ }
+}
+
+/// Initialize the per-region scheduling policy.
+void GenericScheduler::initPolicy(MachineBasicBlock::iterator Begin,
+ MachineBasicBlock::iterator End,
+ unsigned NumRegionInstrs) {
+ const TargetMachine &TM = Context->MF->getTarget();
+
+ // Avoid setting up the register pressure tracker for small regions to save
+ // compile time. As a rough heuristic, only track pressure when the number of
+ // schedulable instructions exceeds half the integer register file.
+ unsigned NIntRegs = Context->RegClassInfo->getNumAllocatableRegs(
+ TM.getTargetLowering()->getRegClassFor(MVT::i32));
+
+ RegionPolicy.ShouldTrackPressure = NumRegionInstrs > (NIntRegs / 2);
+
+ // For generic targets, we default to bottom-up, because it's simpler and more
+ // compile-time optimizations have been implemented in that direction.
+ RegionPolicy.OnlyBottomUp = true;
+
+ // Allow the subtarget to override default policy.
+ const TargetSubtargetInfo &ST = TM.getSubtarget<TargetSubtargetInfo>();
+ ST.overrideSchedPolicy(RegionPolicy, Begin, End, NumRegionInstrs);
+
+ // After subtarget overrides, apply command line options.
+ if (!EnableRegPressure)
+ RegionPolicy.ShouldTrackPressure = false;
+
+ // Check -misched-topdown/bottomup can force or unforce scheduling direction.
+ // e.g. -misched-bottomup=false allows scheduling in both directions.
+ assert((!ForceTopDown || !ForceBottomUp) &&
+ "-misched-topdown incompatible with -misched-bottomup");
+ if (ForceBottomUp.getNumOccurrences() > 0) {
+ RegionPolicy.OnlyBottomUp = ForceBottomUp;
+ if (RegionPolicy.OnlyBottomUp)
+ RegionPolicy.OnlyTopDown = false;
+ }
+ if (ForceTopDown.getNumOccurrences() > 0) {
+ RegionPolicy.OnlyTopDown = ForceTopDown;
+ if (RegionPolicy.OnlyTopDown)
+ RegionPolicy.OnlyBottomUp = false;
+ }
+}
+
+/// Set IsAcyclicLatencyLimited if the acyclic path is longer than the cyclic
+/// critical path by more cycles than it takes to drain the instruction buffer.
+/// We estimate an upper bounds on in-flight instructions as:
+///
+/// CyclesPerIteration = max( CyclicPath, Loop-Resource-Height )
+/// InFlightIterations = AcyclicPath / CyclesPerIteration
+/// InFlightResources = InFlightIterations * LoopResources
+///
+/// TODO: Check execution resources in addition to IssueCount.
+void GenericScheduler::checkAcyclicLatency() {
+ if (Rem.CyclicCritPath == 0 || Rem.CyclicCritPath >= Rem.CriticalPath)
+ return;
+
+ // Scaled number of cycles per loop iteration.
+ unsigned IterCount =
+ std::max(Rem.CyclicCritPath * SchedModel->getLatencyFactor(),
+ Rem.RemIssueCount);
+ // Scaled acyclic critical path.
+ unsigned AcyclicCount = Rem.CriticalPath * SchedModel->getLatencyFactor();
+ // InFlightCount = (AcyclicPath / IterCycles) * InstrPerLoop
+ unsigned InFlightCount =
+ (AcyclicCount * Rem.RemIssueCount + IterCount-1) / IterCount;
+ unsigned BufferLimit =
+ SchedModel->getMicroOpBufferSize() * SchedModel->getMicroOpFactor();
+
+ Rem.IsAcyclicLatencyLimited = InFlightCount > BufferLimit;
+
+ DEBUG(dbgs() << "IssueCycles="
+ << Rem.RemIssueCount / SchedModel->getLatencyFactor() << "c "
+ << "IterCycles=" << IterCount / SchedModel->getLatencyFactor()
+ << "c NumIters=" << (AcyclicCount + IterCount-1) / IterCount
+ << " InFlight=" << InFlightCount / SchedModel->getMicroOpFactor()
+ << "m BufferLim=" << SchedModel->getMicroOpBufferSize() << "m\n";
+ if (Rem.IsAcyclicLatencyLimited)
+ dbgs() << " ACYCLIC LATENCY LIMIT\n");
+}
+
+void GenericScheduler::registerRoots() {
+ Rem.CriticalPath = DAG->ExitSU.getDepth();
+
+ // Some roots may not feed into ExitSU. Check all of them in case.
+ for (std::vector<SUnit*>::const_iterator
+ I = Bot.Available.begin(), E = Bot.Available.end(); I != E; ++I) {
+ if ((*I)->getDepth() > Rem.CriticalPath)
+ Rem.CriticalPath = (*I)->getDepth();
+ }
+ DEBUG(dbgs() << "Critical Path: " << Rem.CriticalPath << '\n');
+
+ if (EnableCyclicPath) {
+ Rem.CyclicCritPath = DAG->computeCyclicCriticalPath();
+ checkAcyclicLatency();
+ }
+}
+
+/// Set the CandPolicy given a scheduling zone given the current resources and
+/// latencies inside and outside the zone.
+void GenericScheduler::setPolicy(CandPolicy &Policy, SchedBoundary &CurrZone,
+ SchedBoundary &OtherZone) {
+ // Apply preemptive heuristics based on the the total latency and resources
+ // inside and outside this zone. Potential stalls should be considered before
+ // following this policy.
+
+ // Compute remaining latency. We need this both to determine whether the
+ // overall schedule has become latency-limited and whether the instructions
+ // outside this zone are resource or latency limited.
+ //
+ // The "dependent" latency is updated incrementally during scheduling as the
+ // max height/depth of scheduled nodes minus the cycles since it was
+ // scheduled:
+ // DLat = max (N.depth - (CurrCycle - N.ReadyCycle) for N in Zone
+ //
+ // The "independent" latency is the max ready queue depth:
+ // ILat = max N.depth for N in Available|Pending
+ //
+ // RemainingLatency is the greater of independent and dependent latency.
+ unsigned RemLatency = CurrZone.getDependentLatency();
+ RemLatency = std::max(RemLatency,
+ CurrZone.findMaxLatency(CurrZone.Available.elements()));
+ RemLatency = std::max(RemLatency,
+ CurrZone.findMaxLatency(CurrZone.Pending.elements()));
+
+ // Compute the critical resource outside the zone.
+ unsigned OtherCritIdx;
+ unsigned OtherCount = OtherZone.getOtherResourceCount(OtherCritIdx);
+
+ bool OtherResLimited = false;
+ if (SchedModel->hasInstrSchedModel()) {
+ unsigned LFactor = SchedModel->getLatencyFactor();
+ OtherResLimited = (int)(OtherCount - (RemLatency * LFactor)) > (int)LFactor;
+ }
+ if (!OtherResLimited
+ && (RemLatency + CurrZone.getCurrCycle() > Rem.CriticalPath)) {
+ Policy.ReduceLatency |= true;
+ DEBUG(dbgs() << " " << CurrZone.Available.getName() << " RemainingLatency "
+ << RemLatency << " + " << CurrZone.getCurrCycle() << "c > CritPath "
+ << Rem.CriticalPath << "\n");
+ }
+ // If the same resource is limiting inside and outside the zone, do nothing.
+ if (CurrZone.getZoneCritResIdx() == OtherCritIdx)
+ return;
+
+ DEBUG(
+ if (CurrZone.isResourceLimited()) {
+ dbgs() << " " << CurrZone.Available.getName() << " ResourceLimited: "
+ << SchedModel->getResourceName(CurrZone.getZoneCritResIdx())
+ << "\n";
+ }
+ if (OtherResLimited)
+ dbgs() << " RemainingLimit: "
+ << SchedModel->getResourceName(OtherCritIdx) << "\n";
+ if (!CurrZone.isResourceLimited() && !OtherResLimited)
+ dbgs() << " Latency limited both directions.\n");
+
+ if (CurrZone.isResourceLimited() && !Policy.ReduceResIdx)
+ Policy.ReduceResIdx = CurrZone.getZoneCritResIdx();
+
+ if (OtherResLimited)
+ Policy.DemandResIdx = OtherCritIdx;
+}
+
void GenericScheduler::SchedCandidate::
initResourceDelta(const ScheduleDAGMI *DAG,
const TargetSchedModel *SchedModel) {
@@ -2396,7 +2217,6 @@ initResourceDelta(const ScheduleDAGMI *DAG,
}
}
-
/// Return true if this heuristic determines order.
static bool tryLess(int TryVal, int CandVal,
GenericScheduler::SchedCandidate &TryCand,
@@ -2490,7 +2310,7 @@ static int biasPhysRegCopy(const SUnit *SU, bool isTop) {
static bool tryLatency(GenericScheduler::SchedCandidate &TryCand,
GenericScheduler::SchedCandidate &Cand,
- GenericScheduler::SchedBoundary &Zone) {
+ SchedBoundary &Zone) {
if (Zone.isTop()) {
if (Cand.SU->getDepth() > Zone.getScheduledLatency()) {
if (tryLess(TryCand.SU->getDepth(), Cand.SU->getDepth(),
@@ -2591,7 +2411,7 @@ void GenericScheduler::tryCandidate(SchedCandidate &Cand,
// For loops that are acyclic path limited, aggressively schedule for latency.
// This can result in very long dependence chains scheduled in sequence, so
// once every cycle (when CurrMOps == 0), switch to normal heuristics.
- if (Rem.IsAcyclicLatencyLimited && !Zone.CurrMOps
+ if (Rem.IsAcyclicLatencyLimited && !Zone.getCurrMOps()
&& tryLatency(TryCand, Cand, Zone))
return;
@@ -2644,7 +2464,7 @@ void GenericScheduler::tryCandidate(SchedCandidate &Cand,
// Prefer immediate defs/users of the last scheduled instruction. This is a
// local pressure avoidance strategy that also makes the machine code
// readable.
- if (tryGreater(Zone.NextSUs.count(TryCand.SU), Zone.NextSUs.count(Cand.SU),
+ if (tryGreater(Zone.isNextSU(TryCand.SU), Zone.isNextSU(Cand.SU),
TryCand, Cand, NextDefUse))
return;
@@ -2785,8 +2605,12 @@ SUnit *GenericScheduler::pickNodeBidirectional(bool &IsTopNode) {
CandPolicy NoPolicy;
SchedCandidate BotCand(NoPolicy);
SchedCandidate TopCand(NoPolicy);
- Bot.setPolicy(BotCand.Policy, Top);
- Top.setPolicy(TopCand.Policy, Bot);
+ // Set the bottom-up policy based on the state of the current bottom zone and
+ // the instructions outside the zone, including the top zone.
+ setPolicy(BotCand.Policy, Bot, Top);
+ // Set the top-down policy based on the state of the current top zone and
+ // the instructions outside the zone, including the bottom zone.
+ setPolicy(TopCand.Policy, Top, Bot);
// Prefer bottom scheduling when heuristics are silent.
pickNodeFromQueue(Bot, DAG->getBotRPTracker(), BotCand);
@@ -2903,13 +2727,13 @@ void GenericScheduler::reschedulePhysRegCopies(SUnit *SU, bool isTop) {
/// them here. See comments in biasPhysRegCopy.
void GenericScheduler::schedNode(SUnit *SU, bool IsTopNode) {
if (IsTopNode) {
- SU->TopReadyCycle = std::max(SU->TopReadyCycle, Top.CurrCycle);
+ SU->TopReadyCycle = std::max(SU->TopReadyCycle, Top.getCurrCycle());
Top.bumpNode(SU);
if (SU->hasPhysRegUses)
reschedulePhysRegCopies(SU, true);
}
else {
- SU->BotReadyCycle = std::max(SU->BotReadyCycle, Bot.CurrCycle);
+ SU->BotReadyCycle = std::max(SU->BotReadyCycle, Bot.getCurrCycle());
Bot.bumpNode(SU);
if (SU->hasPhysRegDefs)
reschedulePhysRegCopies(SU, false);