summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
authorOwen Anderson <resistor@mac.com>2008-07-22 22:46:49 +0000
committerOwen Anderson <resistor@mac.com>2008-07-22 22:46:49 +0000
commita1566f2e12ce87a5bca30bc0189a0cdbb40136a4 (patch)
treefecdd6cd2ded8963a4015584d9de5f7f4329a765 /lib
parent38bcec13e89b33fd6b0553ec47667744c54fbb7b (diff)
downloadllvm-a1566f2e12ce87a5bca30bc0189a0cdbb40136a4.tar.gz
llvm-a1566f2e12ce87a5bca30bc0189a0cdbb40136a4.tar.bz2
llvm-a1566f2e12ce87a5bca30bc0189a0cdbb40136a4.tar.xz
Change the heuristics used in the coalescer, register allocator, and within
live intervals itself to use an instruction count approximation that is not affected by inserting empty indices. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@53937 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib')
-rw-r--r--lib/CodeGen/LiveIntervalAnalysis.cpp6
-rw-r--r--lib/CodeGen/RegAllocLinearScan.cpp3
-rw-r--r--lib/CodeGen/SimpleRegisterCoalescing.cpp16
-rw-r--r--lib/CodeGen/SimpleRegisterCoalescing.h2
4 files changed, 16 insertions, 11 deletions
diff --git a/lib/CodeGen/LiveIntervalAnalysis.cpp b/lib/CodeGen/LiveIntervalAnalysis.cpp
index f57cd2b7d1..295a1615b6 100644
--- a/lib/CodeGen/LiveIntervalAnalysis.cpp
+++ b/lib/CodeGen/LiveIntervalAnalysis.cpp
@@ -87,6 +87,8 @@ void LiveIntervals::computeNumbering() {
mi2iMap_.clear();
i2miMap_.clear();
+ FunctionSize = 0;
+
// Number MachineInstrs and MachineBasicBlocks.
// Initialize MBB indexes to a sentinal.
MBB2IdxMap.resize(mf_->getNumBlockIDs(), std::make_pair(~0U,~0U));
@@ -102,6 +104,8 @@ void LiveIntervals::computeNumbering() {
assert(inserted && "multiple MachineInstr -> index mappings");
i2miMap_.push_back(I);
MIIndex += InstrSlots::NUM;
+
+ FunctionSize++;
}
if (StartIdx == MIIndex) {
@@ -1789,7 +1793,7 @@ addIntervalsForSpills(const LiveInterval &li,
for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
LiveInterval *LI = NewLIs[i];
if (!LI->empty()) {
- LI->weight /= LI->getSize();
+ LI->weight /= getApproximateInstructionCount(*LI);
if (!AddedKill.count(LI)) {
LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
unsigned LastUseIdx = getBaseIndex(LR->end);
diff --git a/lib/CodeGen/RegAllocLinearScan.cpp b/lib/CodeGen/RegAllocLinearScan.cpp
index a26924be62..4df172d40c 100644
--- a/lib/CodeGen/RegAllocLinearScan.cpp
+++ b/lib/CodeGen/RegAllocLinearScan.cpp
@@ -851,7 +851,8 @@ void RALinScan::assignRegOrStackSlotAtInterval(LiveInterval* cur)
if (minWeight == HUGE_VALF) {
// All registers must have inf weight. Just grab one!
minReg = BestPhysReg ? BestPhysReg : *RC->allocation_order_begin(*mf_);
- if (cur->weight == HUGE_VALF || cur->getSize() == 1)
+ if (cur->weight == HUGE_VALF ||
+ li_->getApproximateInstructionCount(*cur) == 1)
// Spill a physical register around defs and uses.
li_->spillPhysRegAroundRegDefsUses(*cur, minReg, *vrm_);
}
diff --git a/lib/CodeGen/SimpleRegisterCoalescing.cpp b/lib/CodeGen/SimpleRegisterCoalescing.cpp
index 5b7f55dfc9..29e634b477 100644
--- a/lib/CodeGen/SimpleRegisterCoalescing.cpp
+++ b/lib/CodeGen/SimpleRegisterCoalescing.cpp
@@ -851,8 +851,8 @@ SimpleRegisterCoalescing::isProfitableToCoalesceToSubRC(unsigned SrcReg,
// Then make sure the intervals are *short*.
LiveInterval &SrcInt = li_->getInterval(SrcReg);
LiveInterval &DstInt = li_->getInterval(DstReg);
- unsigned SrcSize = SrcInt.getSize() / InstrSlots::NUM;
- unsigned DstSize = DstInt.getSize() / InstrSlots::NUM;
+ unsigned SrcSize = li_->getApproximateInstructionCount(SrcInt);
+ unsigned DstSize = li_->getApproximateInstructionCount(DstInt);
const TargetRegisterClass *RC = mri_->getRegClass(DstReg);
unsigned Threshold = allocatableRCRegs_[RC].count() * 2;
return (SrcSize + DstSize) <= Threshold;
@@ -1011,10 +1011,10 @@ bool SimpleRegisterCoalescing::JoinCopy(CopyRec &TheCopy, bool &Again) {
if (SubIdx) {
unsigned LargeReg = isExtSubReg ? SrcReg : DstReg;
unsigned SmallReg = isExtSubReg ? DstReg : SrcReg;
- unsigned LargeRegSize =
- li_->getInterval(LargeReg).getSize() / InstrSlots::NUM;
- unsigned SmallRegSize =
- li_->getInterval(SmallReg).getSize() / InstrSlots::NUM;
+ unsigned LargeRegSize =
+ li_->getApproximateInstructionCount(li_->getInterval(LargeReg));
+ unsigned SmallRegSize =
+ li_->getApproximateInstructionCount(li_->getInterval(SmallReg));
const TargetRegisterClass *RC = mri_->getRegClass(SmallReg);
unsigned Threshold = allocatableRCRegs_[RC].count();
// Be conservative. If both sides are virtual registers, do not coalesce
@@ -1081,7 +1081,7 @@ bool SimpleRegisterCoalescing::JoinCopy(CopyRec &TheCopy, bool &Again) {
// If the virtual register live interval is long but it has low use desity,
// do not join them, instead mark the physical register as its allocation
// preference.
- unsigned Length = JoinVInt.getSize() / InstrSlots::NUM;
+ unsigned Length = li_->getApproximateInstructionCount(JoinVInt);
if (Length > Threshold &&
(((float)std::distance(mri_->use_begin(JoinVReg),
mri_->use_end()) / Length) < (1.0 / Threshold))) {
@@ -2196,7 +2196,7 @@ bool SimpleRegisterCoalescing::runOnMachineFunction(MachineFunction &fn) {
// Divide the weight of the interval by its size. This encourages
// spilling of intervals that are large and have few uses, and
// discourages spilling of small intervals with many uses.
- LI.weight /= LI.getSize();
+ LI.weight /= li_->getApproximateInstructionCount(LI);
}
}
diff --git a/lib/CodeGen/SimpleRegisterCoalescing.h b/lib/CodeGen/SimpleRegisterCoalescing.h
index 3a65a9f4dd..de16fa1535 100644
--- a/lib/CodeGen/SimpleRegisterCoalescing.h
+++ b/lib/CodeGen/SimpleRegisterCoalescing.h
@@ -126,7 +126,7 @@ namespace llvm {
unsigned getRepIntervalSize(unsigned Reg) {
if (!li_->hasInterval(Reg))
return 0;
- return li_->getInterval(Reg).getSize();
+ return li_->getApproximateInstructionCount(li_->getInterval(Reg));
}
/// print - Implement the dump method.