summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
authorLang Hames <lhames@gmail.com>2009-11-03 23:52:08 +0000
committerLang Hames <lhames@gmail.com>2009-11-03 23:52:08 +0000
commit233a60ec40b41027ff429e2f2c27fa2be762f2e9 (patch)
tree85451aa736c6b83933b5646d0b81dac7f8145a8c /lib
parent888acc35a3e271d092f9b1efc7c32b94ff17fbf7 (diff)
downloadllvm-233a60ec40b41027ff429e2f2c27fa2be762f2e9.tar.gz
llvm-233a60ec40b41027ff429e2f2c27fa2be762f2e9.tar.bz2
llvm-233a60ec40b41027ff429e2f2c27fa2be762f2e9.tar.xz
The Indexes Patch.
This introduces a new pass, SlotIndexes, which is responsible for numbering instructions for register allocation (and other clients). SlotIndexes numbering is designed to match the existing scheme, so this patch should not cause any changes in the generated code. For consistency, and to avoid naming confusion, LiveIndex has been renamed SlotIndex. The processImplicitDefs method of the LiveIntervals analysis has been moved into its own pass so that it can be run prior to SlotIndexes. This was necessary to match the existing numbering scheme. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@85979 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib')
-rw-r--r--lib/CodeGen/LiveInterval.cpp89
-rw-r--r--lib/CodeGen/LiveIntervalAnalysis.cpp793
-rw-r--r--lib/CodeGen/LiveStackAnalysis.cpp9
-rw-r--r--lib/CodeGen/PreAllocSplitting.cpp198
-rw-r--r--lib/CodeGen/ProcessImplicitDefs.cpp231
-rw-r--r--lib/CodeGen/RegAllocLinearScan.cpp55
-rw-r--r--lib/CodeGen/RegAllocPBQP.cpp6
-rw-r--r--lib/CodeGen/SimpleRegisterCoalescing.cpp168
-rw-r--r--lib/CodeGen/SimpleRegisterCoalescing.h9
-rw-r--r--lib/CodeGen/SlotIndexes.cpp189
-rw-r--r--lib/CodeGen/Spiller.cpp68
-rw-r--r--lib/CodeGen/StackSlotColoring.cpp2
-rw-r--r--lib/CodeGen/StrongPHIElimination.cpp55
-rw-r--r--lib/CodeGen/VirtRegMap.cpp2
-rw-r--r--lib/CodeGen/VirtRegMap.h10
15 files changed, 919 insertions, 965 deletions
diff --git a/lib/CodeGen/LiveInterval.cpp b/lib/CodeGen/LiveInterval.cpp
index a02a4a6c83..8d632cb5ca 100644
--- a/lib/CodeGen/LiveInterval.cpp
+++ b/lib/CodeGen/LiveInterval.cpp
@@ -19,6 +19,7 @@
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/LiveInterval.h"
+#include "llvm/CodeGen/LiveIntervalAnalysis.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallSet.h"
@@ -28,11 +29,6 @@
#include <algorithm>
using namespace llvm;
-// Print a LiveIndex to a raw_ostream.
-void LiveIndex::print(raw_ostream &os) const {
- os << (index & ~PHI_BIT);
-}
-
// An example for liveAt():
//
// this = [1,4), liveAt(0) will return false. The instruction defining this
@@ -40,7 +36,7 @@ void LiveIndex::print(raw_ostream &os) const {
// variable it represents. This is because slot 1 is used (def slot) and spans
// up to slot 3 (store slot).
//
-bool LiveInterval::liveAt(LiveIndex I) const {
+bool LiveInterval::liveAt(SlotIndex I) const {
Ranges::const_iterator r = std::upper_bound(ranges.begin(), ranges.end(), I);
if (r == ranges.begin())
@@ -53,7 +49,7 @@ bool LiveInterval::liveAt(LiveIndex I) const {
// liveBeforeAndAt - Check if the interval is live at the index and the index
// just before it. If index is liveAt, check if it starts a new live range.
// If it does, then check if the previous live range ends at index-1.
-bool LiveInterval::liveBeforeAndAt(LiveIndex I) const {
+bool LiveInterval::liveBeforeAndAt(SlotIndex I) const {
Ranges::const_iterator r = std::upper_bound(ranges.begin(), ranges.end(), I);
if (r == ranges.begin())
@@ -131,7 +127,7 @@ bool LiveInterval::overlapsFrom(const LiveInterval& other,
/// overlaps - Return true if the live interval overlaps a range specified
/// by [Start, End).
-bool LiveInterval::overlaps(LiveIndex Start, LiveIndex End) const {
+bool LiveInterval::overlaps(SlotIndex Start, SlotIndex End) const {
assert(Start < End && "Invalid range");
const_iterator I = begin();
const_iterator E = end();
@@ -149,10 +145,10 @@ bool LiveInterval::overlaps(LiveIndex Start, LiveIndex End) const {
/// specified by I to end at the specified endpoint. To do this, we should
/// merge and eliminate all ranges that this will overlap with. The iterator is
/// not invalidated.
-void LiveInterval::extendIntervalEndTo(Ranges::iterator I, LiveIndex NewEnd) {
+void LiveInterval::extendIntervalEndTo(Ranges::iterator I, SlotIndex NewEnd) {
assert(I != ranges.end() && "Not a valid interval!");
VNInfo *ValNo = I->valno;
- LiveIndex OldEnd = I->end;
+ SlotIndex OldEnd = I->end;
// Search for the first interval that we can't merge with.
Ranges::iterator MergeTo = next(I);
@@ -167,7 +163,7 @@ void LiveInterval::extendIntervalEndTo(Ranges::iterator I, LiveIndex NewEnd) {
ranges.erase(next(I), MergeTo);
// Update kill info.
- ValNo->removeKills(OldEnd, I->end.prevSlot_());
+ ValNo->removeKills(OldEnd, I->end.getPrevSlot());
// If the newly formed range now touches the range after it and if they have
// the same value number, merge the two ranges into one range.
@@ -183,7 +179,7 @@ void LiveInterval::extendIntervalEndTo(Ranges::iterator I, LiveIndex NewEnd) {
/// specified by I to start at the specified endpoint. To do this, we should
/// merge and eliminate all ranges that this will overlap with.
LiveInterval::Ranges::iterator
-LiveInterval::extendIntervalStartTo(Ranges::iterator I, LiveIndex NewStart) {
+LiveInterval::extendIntervalStartTo(Ranges::iterator I, SlotIndex NewStart) {
assert(I != ranges.end() && "Not a valid interval!");
VNInfo *ValNo = I->valno;
@@ -216,7 +212,7 @@ LiveInterval::extendIntervalStartTo(Ranges::iterator I, LiveIndex NewStart) {
LiveInterval::iterator
LiveInterval::addRangeFrom(LiveRange LR, iterator From) {
- LiveIndex Start = LR.start, End = LR.end;
+ SlotIndex Start = LR.start, End = LR.end;
iterator it = std::upper_bound(From, ranges.end(), Start);
// If the inserted interval starts in the middle or right at the end of
@@ -268,7 +264,7 @@ LiveInterval::addRangeFrom(LiveRange LR, iterator From) {
/// isInOneLiveRange - Return true if the range specified is entirely in
/// a single LiveRange of the live interval.
-bool LiveInterval::isInOneLiveRange(LiveIndex Start, LiveIndex End) {
+bool LiveInterval::isInOneLiveRange(SlotIndex Start, SlotIndex End) {
Ranges::iterator I = std::upper_bound(ranges.begin(), ranges.end(), Start);
if (I == ranges.begin())
return false;
@@ -279,7 +275,7 @@ bool LiveInterval::isInOneLiveRange(LiveIndex Start, LiveIndex End) {
/// removeRange - Remove the specified range from this interval. Note that
/// the range must be in a single LiveRange in its entirety.
-void LiveInterval::removeRange(LiveIndex Start, LiveIndex End,
+void LiveInterval::removeRange(SlotIndex Start, SlotIndex End,
bool RemoveDeadValNo) {
// Find the LiveRange containing this span.
Ranges::iterator I = std::upper_bound(ranges.begin(), ranges.end(), Start);
@@ -331,7 +327,7 @@ void LiveInterval::removeRange(LiveIndex Start, LiveIndex End,
}
// Otherwise, we are splitting the LiveRange into two pieces.
- LiveIndex OldEnd = I->end;
+ SlotIndex OldEnd = I->end;
I->end = Start; // Trim the old interval.
// Insert the new one.
@@ -362,36 +358,11 @@ void LiveInterval::removeValNo(VNInfo *ValNo) {
ValNo->setIsUnused(true);
}
}
-
-/// scaleNumbering - Renumber VNI and ranges to provide gaps for new
-/// instructions.
-
-void LiveInterval::scaleNumbering(unsigned factor) {
- // Scale ranges.
- for (iterator RI = begin(), RE = end(); RI != RE; ++RI) {
- RI->start = RI->start.scale(factor);
- RI->end = RI->end.scale(factor);
- }
-
- // Scale VNI info.
- for (vni_iterator VNI = vni_begin(), VNIE = vni_end(); VNI != VNIE; ++VNI) {
- VNInfo *vni = *VNI;
-
- if (vni->isDefAccurate())
- vni->def = vni->def.scale(factor);
-
- for (unsigned i = 0; i < vni->kills.size(); ++i) {
- if (!vni->kills[i].isPHIIndex())
- vni->kills[i] = vni->kills[i].scale(factor);
- }
- }
-}
-
/// getLiveRangeContaining - Return the live range that contains the
/// specified index, or null if there is none.
LiveInterval::const_iterator
-LiveInterval::FindLiveRangeContaining(LiveIndex Idx) const {
+LiveInterval::FindLiveRangeContaining(SlotIndex Idx) const {
const_iterator It = std::upper_bound(begin(), end(), Idx);
if (It != ranges.begin()) {
--It;
@@ -403,7 +374,7 @@ LiveInterval::FindLiveRangeContaining(LiveIndex Idx) const {
}
LiveInterval::iterator
-LiveInterval::FindLiveRangeContaining(LiveIndex Idx) {
+LiveInterval::FindLiveRangeContaining(SlotIndex Idx) {
iterator It = std::upper_bound(begin(), end(), Idx);
if (It != begin()) {
--It;
@@ -416,7 +387,7 @@ LiveInterval::FindLiveRangeContaining(LiveIndex Idx) {
/// findDefinedVNInfo - Find the VNInfo defined by the specified
/// index (register interval).
-VNInfo *LiveInterval::findDefinedVNInfoForRegInt(LiveIndex Idx) const {
+VNInfo *LiveInterval::findDefinedVNInfoForRegInt(SlotIndex Idx) const {
for (LiveInterval::const_vni_iterator i = vni_begin(), e = vni_end();
i != e; ++i) {
if ((*i)->def == Idx)
@@ -440,7 +411,8 @@ VNInfo *LiveInterval::findDefinedVNInfoForStackInt(unsigned reg) const {
/// join - Join two live intervals (this, and other) together. This applies
/// mappings to the value numbers in the LHS/RHS intervals as specified. If
/// the intervals are not joinable, this aborts.
-void LiveInterval::join(LiveInterval &Other, const int *LHSValNoAssignments,
+void LiveInterval::join(LiveInterval &Other,
+ const int *LHSValNoAssignments,
const int *RHSValNoAssignments,
SmallVector<VNInfo*, 16> &NewVNInfo,
MachineRegisterInfo *MRI) {
@@ -554,14 +526,15 @@ void LiveInterval::MergeRangesInAsValue(const LiveInterval &RHS,
/// The LiveRanges in RHS are allowed to overlap with LiveRanges in the
/// current interval, it will replace the value numbers of the overlaped
/// live ranges with the specified value number.
-void LiveInterval::MergeValueInAsValue(const LiveInterval &RHS,
- const VNInfo *RHSValNo, VNInfo *LHSValNo) {
+void LiveInterval::MergeValueInAsValue(
+ const LiveInterval &RHS,
+ const VNInfo *RHSValNo, VNInfo *LHSValNo) {
SmallVector<VNInfo*, 4> ReplacedValNos;
iterator IP = begin();
for (const_iterator I = RHS.begin(), E = RHS.end(); I != E; ++I) {
if (I->valno != RHSValNo)
continue;
- LiveIndex Start = I->start, End = I->end;
+ SlotIndex Start = I->start, End = I->end;
IP = std::upper_bound(IP, end(), Start);
// If the start of this range overlaps with an existing liverange, trim it.
if (IP != begin() && IP[-1].end > Start) {
@@ -621,7 +594,8 @@ void LiveInterval::MergeValueInAsValue(const LiveInterval &RHS,
/// MergeInClobberRanges - For any live ranges that are not defined in the
/// current interval, but are defined in the Clobbers interval, mark them
/// used with an unknown definition value.
-void LiveInterval::MergeInClobberRanges(const LiveInterval &Clobbers,
+void LiveInterval::MergeInClobberRanges(LiveIntervals &li_,
+ const LiveInterval &Clobbers,
BumpPtrAllocator &VNInfoAllocator) {
if (Clobbers.empty()) return;
@@ -638,20 +612,20 @@ void LiveInterval::MergeInClobberRanges(const LiveInterval &Clobbers,
ClobberValNo = UnusedValNo;
else {
UnusedValNo = ClobberValNo =
- getNextValue(LiveIndex(), 0, false, VNInfoAllocator);
+ getNextValue(li_.getInvalidIndex(), 0, false, VNInfoAllocator);
ValNoMaps.insert(std::make_pair(I->valno, ClobberValNo));
}
bool Done = false;
- LiveIndex Start = I->start, End = I->end;
+ SlotIndex Start = I->start, End = I->end;
// If a clobber range starts before an existing range and ends after
// it, the clobber range will need to be split into multiple ranges.
// Loop until the entire clobber range is handled.
while (!Done) {
Done = true;
IP = std::upper_bound(IP, end(), Start);
- LiveIndex SubRangeStart = Start;
- LiveIndex SubRangeEnd = End;
+ SlotIndex SubRangeStart = Start;
+ SlotIndex SubRangeEnd = End;
// If the start of this range overlaps with an existing liverange, trim it.
if (IP != begin() && IP[-1].end > SubRangeStart) {
@@ -687,13 +661,14 @@ void LiveInterval::MergeInClobberRanges(const LiveInterval &Clobbers,
}
}
-void LiveInterval::MergeInClobberRange(LiveIndex Start,
- LiveIndex End,
+void LiveInterval::MergeInClobberRange(LiveIntervals &li_,
+ SlotIndex Start,
+ SlotIndex End,
BumpPtrAllocator &VNInfoAllocator) {
// Find a value # to use for the clobber ranges. If there is already a value#
// for unknown values, use it.
VNInfo *ClobberValNo =
- getNextValue(LiveIndex(), 0, false, VNInfoAllocator);
+ getNextValue(li_.getInvalidIndex(), 0, false, VNInfoAllocator);
iterator IP = begin();
IP = std::upper_bound(IP, end(), Start);
@@ -881,8 +856,6 @@ void LiveInterval::print(raw_ostream &OS, const TargetRegisterInfo *TRI) const {
OS << "-(";
for (unsigned j = 0; j != ee; ++j) {
OS << vni->kills[j];
- if (vni->kills[j].isPHIIndex())
- OS << "*";
if (j != ee-1)
OS << " ";
}
diff --git a/lib/CodeGen/LiveIntervalAnalysis.cpp b/lib/CodeGen/LiveIntervalAnalysis.cpp
index 79f46f3395..2a93a35b3f 100644
--- a/lib/CodeGen/LiveIntervalAnalysis.cpp
+++ b/lib/CodeGen/LiveIntervalAnalysis.cpp
@@ -28,6 +28,7 @@
#include "llvm/CodeGen/MachineMemOperand.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/Passes.h"
+#include "llvm/CodeGen/ProcessImplicitDefs.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetMachine.h"
@@ -80,6 +81,10 @@ void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
}
AU.addRequiredID(TwoAddressInstructionPassID);
+ AU.addPreserved<ProcessImplicitDefs>();
+ AU.addRequired<ProcessImplicitDefs>();
+ AU.addPreserved<SlotIndexes>();
+ AU.addRequiredTransitive<SlotIndexes>();
MachineFunctionPass::getAnalysisUsage(AU);
}
@@ -89,12 +94,7 @@ void LiveIntervals::releaseMemory() {
E = r2iMap_.end(); I != E; ++I)
delete I->second;
- MBB2IdxMap.clear();
- Idx2MBBMap.clear();
- mi2iMap_.clear();
- i2miMap_.clear();
r2iMap_.clear();
- terminatorGaps.clear();
phiJoinCopies.clear();
// Release VNInfo memroy regions after all VNInfo objects are dtor'd.
@@ -106,422 +106,6 @@ void LiveIntervals::releaseMemory() {
}
}
-static bool CanTurnIntoImplicitDef(MachineInstr *MI, unsigned Reg,
- unsigned OpIdx, const TargetInstrInfo *tii_){
- unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
- if (tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg) &&
- Reg == SrcReg)
- return true;
-
- if (OpIdx == 2 && MI->getOpcode() == TargetInstrInfo::SUBREG_TO_REG)
- return true;
- if (OpIdx == 1 && MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG)
- return true;
- return false;
-}
-
-/// processImplicitDefs - Process IMPLICIT_DEF instructions and make sure
-/// there is one implicit_def for each use. Add isUndef marker to
-/// implicit_def defs and their uses.
-void LiveIntervals::processImplicitDefs() {
- SmallSet<unsigned, 8> ImpDefRegs;
- SmallVector<MachineInstr*, 8> ImpDefMIs;
- MachineBasicBlock *Entry = mf_->begin();
- SmallPtrSet<MachineBasicBlock*,16> Visited;
- for (df_ext_iterator<MachineBasicBlock*, SmallPtrSet<MachineBasicBlock*,16> >
- DFI = df_ext_begin(Entry, Visited), E = df_ext_end(Entry, Visited);
- DFI != E; ++DFI) {
- MachineBasicBlock *MBB = *DFI;
- for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
- I != E; ) {
- MachineInstr *MI = &*I;
- ++I;
- if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
- unsigned Reg = MI->getOperand(0).getReg();
- ImpDefRegs.insert(Reg);
- if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
- for (const unsigned *SS = tri_->getSubRegisters(Reg); *SS; ++SS)
- ImpDefRegs.insert(*SS);
- }
- ImpDefMIs.push_back(MI);
- continue;
- }
-
- if (MI->getOpcode() == TargetInstrInfo::INSERT_SUBREG) {
- MachineOperand &MO = MI->getOperand(2);
- if (ImpDefRegs.count(MO.getReg())) {
- // %reg1032<def> = INSERT_SUBREG %reg1032, undef, 2
- // This is an identity copy, eliminate it now.
- if (MO.isKill()) {
- LiveVariables::VarInfo& vi = lv_->getVarInfo(MO.getReg());
- vi.removeKill(MI);
- }
- MI->eraseFromParent();
- continue;
- }
- }
-
- bool ChangedToImpDef = false;
- for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
- MachineOperand& MO = MI->getOperand(i);
- if (!MO.isReg() || !MO.isUse() || MO.isUndef())
- continue;
- unsigned Reg = MO.getReg();
- if (!Reg)
- continue;
- if (!ImpDefRegs.count(Reg))
- continue;
- // Use is a copy, just turn it into an implicit_def.
- if (CanTurnIntoImplicitDef(MI, Reg, i, tii_)) {
- bool isKill = MO.isKill();
- MI->setDesc(tii_->get(TargetInstrInfo::IMPLICIT_DEF));
- for (int j = MI->getNumOperands() - 1, ee = 0; j > ee; --j)
- MI->RemoveOperand(j);
- if (isKill) {
- ImpDefRegs.erase(Reg);
- LiveVariables::VarInfo& vi = lv_->getVarInfo(Reg);
- vi.removeKill(MI);
- }
- ChangedToImpDef = true;
- break;
- }
-
- MO.setIsUndef();
- if (MO.isKill() || MI->isRegTiedToDefOperand(i)) {
- // Make sure other uses of
- for (unsigned j = i+1; j != e; ++j) {
- MachineOperand &MOJ = MI->getOperand(j);
- if (MOJ.isReg() && MOJ.isUse() && MOJ.getReg() == Reg)
- MOJ.setIsUndef();
- }
- ImpDefRegs.erase(Reg);
- }
- }
-
- if (ChangedToImpDef) {
- // Backtrack to process this new implicit_def.
- --I;
- } else {
- for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
- MachineOperand& MO = MI->getOperand(i);
- if (!MO.isReg() || !MO.isDef())
- continue;
- ImpDefRegs.erase(MO.getReg());
- }
- }
- }
-
- // Any outstanding liveout implicit_def's?
- for (unsigned i = 0, e = ImpDefMIs.size(); i != e; ++i) {
- MachineInstr *MI = ImpDefMIs[i];
- unsigned Reg = MI->getOperand(0).getReg();
- if (TargetRegisterInfo::isPhysicalRegister(Reg) ||
- !ImpDefRegs.count(Reg)) {
- // Delete all "local" implicit_def's. That include those which define
- // physical registers since they cannot be liveout.
- MI->eraseFromParent();
- continue;
- }
-
- // If there are multiple defs of the same register and at least one
- // is not an implicit_def, do not insert implicit_def's before the
- // uses.
- bool Skip = false;
- for (MachineRegisterInfo::def_iterator DI = mri_->def_begin(Reg),
- DE = mri_->def_end(); DI != DE; ++DI) {
- if (DI->getOpcode() != TargetInstrInfo::IMPLICIT_DEF) {
- Skip = true;
- break;
- }
- }
- if (Skip)
- continue;
-
- // The only implicit_def which we want to keep are those that are live
- // out of its block.
- MI->eraseFromParent();
-
- for (MachineRegisterInfo::use_iterator UI = mri_->use_begin(Reg),
- UE = mri_->use_end(); UI != UE; ) {
- MachineOperand &RMO = UI.getOperand();
- MachineInstr *RMI = &*UI;
- ++UI;
- MachineBasicBlock *RMBB = RMI->getParent();
- if (RMBB == MBB)
- continue;
-
- // Turn a copy use into an implicit_def.
- unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
- if (tii_->isMoveInstr(*RMI, SrcReg, DstReg, SrcSubReg, DstSubReg) &&
- Reg == SrcReg) {
- RMI->setDesc(tii_->get(TargetInstrInfo::IMPLICIT_DEF));
- for (int j = RMI->getNumOperands() - 1, ee = 0; j > ee; --j)
- RMI->RemoveOperand(j);
- continue;
- }
-
- const TargetRegisterClass* RC = mri_->getRegClass(Reg);
- unsigned NewVReg = mri_->createVirtualRegister(RC);
- RMO.setReg(NewVReg);
- RMO.setIsUndef();
- RMO.setIsKill();
- }
- }
- ImpDefRegs.clear();
- ImpDefMIs.clear();
- }
-}
-
-
-void LiveIntervals::computeNumbering() {
- Index2MiMap OldI2MI = i2miMap_;
- std::vector<IdxMBBPair> OldI2MBB = Idx2MBBMap;
-
- Idx2MBBMap.clear();
- MBB2IdxMap.clear();
- mi2iMap_.clear();
- i2miMap_.clear();
- terminatorGaps.clear();
- phiJoinCopies.clear();
-
- FunctionSize = 0;
-
- // Number MachineInstrs and MachineBasicBlocks.
- // Initialize MBB indexes to a sentinal.
- MBB2IdxMap.resize(mf_->getNumBlockIDs(),
- std::make_pair(LiveIndex(),LiveIndex()));
-
- LiveIndex MIIndex;
- for (MachineFunction::iterator MBB = mf_->begin(), E = mf_->end();
- MBB != E; ++MBB) {
- LiveIndex StartIdx = MIIndex;
-
- // Insert an empty slot at the beginning of each block.
- MIIndex = getNextIndex(MIIndex);
- i2miMap_.push_back(0);
-
- for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
- I != E; ++I) {
-
- if (I == MBB->getFirstTerminator()) {
- // Leave a gap for before terminators, this is where we will point
- // PHI kills.
- LiveIndex tGap(true, MIIndex);
- bool inserted =
- terminatorGaps.insert(std::make_pair(&*MBB, tGap)).second;
- assert(inserted &&
- "Multiple 'first' terminators encountered during numbering.");
- inserted = inserted; // Avoid compiler warning if assertions turned off.
- i2miMap_.push_back(0);
-
- MIIndex = getNextIndex(MIIndex);
- }
-
- bool inserted = mi2iMap_.insert(std::make_pair(I, MIIndex)).second;
- assert(inserted && "multiple MachineInstr -> index mappings");
- inserted = true;
- i2miMap_.push_back(I);
- MIIndex = getNextIndex(MIIndex);
- FunctionSize++;
-
- // Insert max(1, numdefs) empty slots after every instruction.
- unsigned Slots = I->getDesc().getNumDefs();
- if (Slots == 0)
- Slots = 1;
- while (Slots--) {
- MIIndex = getNextIndex(MIIndex);
- i2miMap_.push_back(0);
- }
-
- }
-
- if (MBB->getFirstTerminator() == MBB->end()) {
- // Leave a gap for before terminators, this is where we will point
- // PHI kills.
- LiveIndex tGap(true, MIIndex);
- bool inserted =
- terminatorGaps.insert(std::make_pair(&*MBB, tGap)).second;
- assert(inserted &&
- "Multiple 'first' terminators encountered during numbering.");
- inserted = inserted; // Avoid compiler warning if assertions turned off.
- i2miMap_.push_back(0);
-
- MIIndex = getNextIndex(MIIndex);
- }
-
- // Set the MBB2IdxMap entry for this MBB.
- MBB2IdxMap[MBB->getNumber()] = std::make_pair(StartIdx, getPrevSlot(MIIndex));
- Idx2MBBMap.push_back(std::make_pair(StartIdx, MBB));
- }
-
- std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
-
- if (!OldI2MI.empty())
- for (iterator OI = begin(), OE = end(); OI != OE; ++OI) {
- for (LiveInterval::iterator LI = OI->second->begin(),
- LE = OI->second->end(); LI != LE; ++LI) {
-
- // Remap the start index of the live range to the corresponding new
- // number, or our best guess at what it _should_ correspond to if the
- // original instruction has been erased. This is either the following
- // instruction or its predecessor.
- unsigned index = LI->start.getVecIndex();
- LiveIndex::Slot offset = LI->start.getSlot();
- if (LI->start.isLoad()) {
- std::vector<IdxMBBPair>::const_iterator I =
- std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->start);
- // Take the pair containing the index
- std::vector<IdxMBBPair>::const_iterator J =
- (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I;
-
- LI->start = getMBBStartIdx(J->second);
- } else {
- LI->start = LiveIndex(
- LiveIndex(mi2iMap_[OldI2MI[index]]),
- (LiveIndex::Slot)offset);
- }
-
- // Remap the ending index in the same way that we remapped the start,
- // except for the final step where we always map to the immediately
- // following instruction.
- index = (getPrevSlot(LI->end)).getVecIndex();
- offset = LI->end.getSlot();
- if (LI->end.isLoad()) {
- // VReg dies at end of block.
- std::vector<IdxMBBPair>::const_iterator I =
- std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->end);
- --I;
-
- LI->end = getNextSlot(getMBBEndIdx(I->second));
- } else {
- unsigned idx = index;
- while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
-
- if (index != OldI2MI.size())
- LI->end =
- LiveIndex(mi2iMap_[OldI2MI[index]],
- (idx == index ? offset : LiveIndex::LOAD));
- else
- LI->end =
- LiveIndex(LiveIndex::NUM * i2miMap_.size());
- }
- }
-
- for (LiveInterval::vni_iterator VNI = OI->second->vni_begin(),
- VNE = OI->second->vni_end(); VNI != VNE; ++VNI) {
- VNInfo* vni = *VNI;
-
- // Remap the VNInfo def index, which works the same as the
- // start indices above. VN's with special sentinel defs
- // don't need to be remapped.
- if (vni->isDefAccurate() && !vni->isUnused()) {
- unsigned index = vni->def.getVecIndex();
- LiveIndex::Slot offset = vni->def.getSlot();
- if (vni->def.isLoad()) {
- std::vector<IdxMBBPair>::const_iterator I =
- std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->def);
- // Take the pair containing the index
- std::vector<IdxMBBPair>::const_iterator J =
- (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I;
-
- vni->def = getMBBStartIdx(J->second);
- } else {
- vni->def = LiveIndex(mi2iMap_[OldI2MI[index]], offset);
- }
- }
-
- // Remap the VNInfo kill indices, which works the same as
- // the end indices above.
- for (size_t i = 0; i < vni->kills.size(); ++i) {
- unsigned index = getPrevSlot(vni->kills[i]).getVecIndex();
- LiveIndex::Slot offset = vni->kills[i].getSlot();
-
- if (vni->kills[i].isLoad()) {
- assert("Value killed at a load slot.");
- /*std::vector<IdxMBBPair>::const_iterator I =
- std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->kills[i]);
- --I;
-
- vni->kills[i] = getMBBEndIdx(I->second);*/
- } else {
- if (vni->kills[i].isPHIIndex()) {
- std::vector<IdxMBBPair>::const_iterator I =
- std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->kills[i]);
- --I;
- vni->kills[i] = terminatorGaps[I->second];
- } else {
- assert(OldI2MI[index] != 0 &&
- "Kill refers to instruction not present in index maps.");
- vni->kills[i] = LiveIndex(mi2iMap_[OldI2MI[index]], offset);
- }
-
- /*
- unsigned idx = index;
- while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
-
- if (index != OldI2MI.size())
- vni->kills[i] = mi2iMap_[OldI2MI[index]] +
- (idx == index ? offset : 0);
- else
- vni->kills[i] = InstrSlots::NUM * i2miMap_.size();
- */
- }
- }
- }
- }
-}
-
-void LiveIntervals::scaleNumbering(int factor) {
- // Need to
- // * scale MBB begin and end points
- // * scale all ranges.
- // * Update VNI structures.
- // * Scale instruction numberings
-
- // Scale the MBB indices.
- Idx2MBBMap.clear();
- for (MachineFunction::iterator MBB = mf_->begin(), MBBE = mf_->end();
- MBB != MBBE; ++MBB) {
- std::pair<LiveIndex, LiveIndex> &mbbIndices = MBB2IdxMap[MBB->getNumber()];
- mbbIndices.first = mbbIndices.first.scale(factor);
- mbbIndices.second = mbbIndices.second.scale(factor);
- Idx2MBBMap.push_back(std::make_pair(mbbIndices.first, MBB));
- }
- std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
-
- // Scale terminator gaps.
- for (DenseMap<MachineBasicBlock*, LiveIndex>::iterator
- TGI = terminatorGaps.begin(), TGE = terminatorGaps.end();
- TGI != TGE; ++TGI) {
- terminatorGaps[TGI->first] = TGI->second.scale(factor);
- }
-
- // Scale the intervals.
- for (iterator LI = begin(), LE = end(); LI != LE; ++LI) {
- LI->second->scaleNumbering(factor);
- }
-
- // Scale MachineInstrs.
- Mi2IndexMap oldmi2iMap = mi2iMap_;
- LiveIndex highestSlot;
- for (Mi2IndexMap::iterator MI = oldmi2iMap.begin(), ME = oldmi2iMap.end();
- MI != ME; ++MI) {
- LiveIndex newSlot = MI->second.scale(factor);
- mi2iMap_[MI->first] = newSlot;
- highestSlot = std::max(highestSlot, newSlot);
- }
-
- unsigned highestVIndex = highestSlot.getVecIndex();
- i2miMap_.clear();
- i2miMap_.resize(highestVIndex + 1);
- for (Mi2IndexMap::iterator MI = mi2iMap_.begin(), ME = mi2iMap_.end();
- MI != ME; ++MI) {
- i2miMap_[MI->second.getVecIndex()] = const_cast<MachineInstr *>(MI->first);
- }
-
-}
-
-
/// runOnMachineFunction - Register allocate the whole function
///
bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
@@ -532,10 +116,9 @@ bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
tii_ = tm_->getInstrInfo();
aa_ = &getAnalysis<AliasAnalysis>();
lv_ = &getAnalysis<LiveVariables>();
+ indexes_ = &getAnalysis<SlotIndexes>();
allocatableRegs_ = tri_->getAllocatableSet(fn);
- processImplicitDefs();
- computeNumbering();
computeIntervals();
performEarlyCoalescing();
@@ -579,12 +162,13 @@ bool LiveIntervals::conflictsWithPhysRegDef(const LiveInterval &li,
VirtRegMap &vrm, unsigned reg) {
for (LiveInterval::Ranges::const_iterator
I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
- for (LiveIndex index = getBaseIndex(I->start),
- end = getNextIndex(getBaseIndex(getPrevSlot(I->end))); index != end;
- index = getNextIndex(index)) {
+ for (SlotIndex index = I->start.getBaseIndex(),
+ end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
+ index != end;
+ index = index.getNextIndex()) {
// skip deleted instructions
while (index != end && !getInstructionFromIndex(index))
- index = getNextIndex(index);
+ index = index.getNextIndex();
if (index == end) break;
MachineInstr *MI = getInstructionFromIndex(index);
@@ -620,16 +204,17 @@ bool LiveIntervals::conflictsWithPhysRegRef(LiveInterval &li,
SmallPtrSet<MachineInstr*,32> &JoinedCopies) {
for (LiveInterval::Ranges::const_iterator
I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
- for (LiveIndex index = getBaseIndex(I->start),
- end = getNextIndex(getBaseIndex(getPrevSlot(I->end))); index != end;
- index = getNextIndex(index)) {
+ for (SlotIndex index = I->start.getBaseIndex(),
+ end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
+ index != end;
+ index = index.getNextIndex()) {
// Skip deleted instructions.
MachineInstr *MI = 0;
while (index != end) {
MI = getInstructionFromIndex(index);
if (MI)
break;
- index = getNextIndex(index);
+ index = index.getNextIndex();
}
if (index == end) break;
@@ -664,7 +249,7 @@ static void printRegName(unsigned reg, const TargetRegisterInfo* tri_) {
void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
MachineBasicBlock::iterator mi,
- LiveIndex MIIdx,
+ SlotIndex MIIdx,
MachineOperand& MO,
unsigned MOIdx,
LiveInterval &interval) {
@@ -680,11 +265,11 @@ void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
if (interval.empty()) {
// Get the Idx of the defining instructions.
- LiveIndex defIndex = getDefIndex(MIIdx);
+ SlotIndex defIndex = MIIdx.getDefIndex();
// Earlyclobbers move back one, so that they overlap the live range
// of inputs.
if (MO.isEarlyClobber())
- defIndex = getUseIndex(MIIdx);
+ defIndex = MIIdx.getUseIndex();
VNInfo *ValNo;
MachineInstr *CopyMI = NULL;
unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
@@ -704,16 +289,11 @@ void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
// will be a single kill, in MBB, which comes after the definition.
if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
// FIXME: what about dead vars?
- LiveIndex killIdx;
+ SlotIndex killIdx;
if (vi.Kills[0] != mi)
- killIdx = getNextSlot(getUseIndex(getInstructionIndex(vi.Kills[0])));
- else if (MO.isEarlyClobber())
- // Earlyclobbers that die in this instruction move up one extra, to
- // compensate for having the starting point moved back one. This
- // gets them to overlap the live range of other outputs.
- killIdx = getNextSlot(getNextSlot(defIndex));
+ killIdx = getInstructionIndex(vi.Kills[0]).getDefIndex();
else
- killIdx = getNextSlot(defIndex);
+ killIdx = defIndex.getStoreIndex();
// If the kill happens after the definition, we have an intra-block
// live range.
@@ -732,7 +312,8 @@ void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
// of the defining block, potentially live across some blocks, then is
// live into some number of blocks, but gets killed. Start by adding a
// range that goes from this definition to the end of the defining block.
- LiveRange NewLR(defIndex, getNextSlot(getMBBEndIdx(mbb)), ValNo);
+ LiveRange NewLR(defIndex, getMBBEndIdx(mbb).getNextIndex().getLoadIndex(),
+ ValNo);
DEBUG(errs() << " +" << NewLR);
interval.addRange(NewLR);
@@ -741,9 +322,10 @@ void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
// live interval.
for (SparseBitVector<>::iterator I = vi.AliveBlocks.begin(),
E = vi.AliveBlocks.end(); I != E; ++I) {
- LiveRange LR(getMBBStartIdx(*I),
- getNextSlot(getMBBEndIdx(*I)), // MBB ends at -1.
- ValNo);
+ LiveRange LR(
+ getMBBStartIdx(mf_->getBlockNumbered(*I)),
+ getMBBEndIdx(mf_->getBlockNumbered(*I)).getNextIndex().getLoadIndex(),
+ ValNo);
interval.addRange(LR);
DEBUG(errs() << " +" << LR);
}
@@ -752,8 +334,8 @@ void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
// block to the 'use' slot of the killing instruction.
for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
MachineInstr *Kill = vi.Kills[i];
- LiveIndex killIdx =
- getNextSlot(getUseIndex(getInstructionIndex(Kill)));
+ SlotIndex killIdx =
+ getInstructionIndex(Kill).getDefIndex();
LiveRange LR(getMBBStartIdx(Kill->getParent()), killIdx, ValNo);
interval.addRange(LR);
ValNo->addKill(killIdx);
@@ -772,13 +354,13 @@ void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
// need to take the LiveRegion that defines this register and split it
// into two values.
assert(interval.containsOneValue());
- LiveIndex DefIndex = getDefIndex(interval.getValNumInfo(0)->def);
- LiveIndex RedefIndex = getDefIndex(MIIdx);
+ SlotIndex DefIndex = interval.getValNumInfo(0)->def.getDefIndex();
+ SlotIndex RedefIndex = MIIdx.getDefIndex();
if (MO.isEarlyClobber())
- RedefIndex = getUseIndex(MIIdx);
+ RedefIndex = MIIdx.getUseIndex();
const LiveRange *OldLR =
- interval.getLiveRangeContaining(getPrevSlot(RedefIndex));
+ interval.getLiveRangeContaining(RedefIndex.getUseIndex());
VNInfo *OldValNo = OldLR->valno;
// Delete the initial value, which should be short and continuous,
@@ -811,10 +393,8 @@ void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
// If this redefinition is dead, we need to add a dummy unit live
// range covering the def slot.
if (MO.isDead())
- interval.addRange(
- LiveRange(RedefIndex, MO.isEarlyClobber() ?
- getNextSlot(getNextSlot(RedefIndex)) :
- getNextSlot(RedefIndex), OldValNo));
+ interval.addRange(LiveRange(RedefIndex, RedefIndex.getStoreIndex(),
+ OldValNo));
DEBUG({
errs() << " RESULT: ";
@@ -829,9 +409,8 @@ void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
VNInfo *VNI = interval.getValNumInfo(0);
MachineInstr *Killer = vi.Kills[0];
phiJoinCopies.push_back(Killer);
- LiveIndex Start = getMBBStartIdx(Killer->getParent());
- LiveIndex End =
- getNextSlot(getUseIndex(getInstructionIndex(Killer)));
+ SlotIndex Start = getMBBStartIdx(Killer->getParent());
+ SlotIndex End = getInstructionIndex(Killer).getDefIndex();
DEBUG({
errs() << " Removing [" << Start << "," << End << "] from: ";
interval.print(errs(), tri_);
@@ -841,7 +420,7 @@ void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
assert(interval.ranges.size() == 1 &&
"Newly discovered PHI interval has >1 ranges.");
MachineBasicBlock *killMBB = getMBBFromIndex(interval.endIndex());
- VNI->addKill(terminatorGaps[killMBB]);
+ VNI->addKill(indexes_->getTerminatorGap(killMBB));
VNI->setHasPHIKill(true);
DEBUG({
errs() << " RESULT: ";
@@ -851,8 +430,8 @@ void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
// Replace the interval with one of a NEW value number. Note that this
// value number isn't actually defined by an instruction, weird huh? :)
LiveRange LR(Start, End,
- interval.getNextValue(LiveIndex(mbb->getNumber()),
- 0, false, VNInfoAllocator));
+ interval.getNextValue(SlotIndex(getMBBStartIdx(mbb), true),
+ 0, false, VNInfoAllocator));
LR.valno->setIsPHIDef(true);
DEBUG(errs() << " replace range with " << LR);
interval.addRange(LR);
@@ -866,9 +445,9 @@ void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
// In the case of PHI elimination, each variable definition is only
// live until the end of the block. We've already taken care of the
// rest of the live range.
- LiveIndex defIndex = getDefIndex(MIIdx);
+ SlotIndex defIndex = MIIdx.getDefIndex();
if (MO.isEarlyClobber())
- defIndex = getUseIndex(MIIdx);
+ defIndex = MIIdx.getUseIndex();
VNInfo *ValNo;
MachineInstr *CopyMI = NULL;
@@ -880,10 +459,10 @@ void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
CopyMI = mi;
ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
- LiveIndex killIndex = getNextSlot(getMBBEndIdx(mbb));
+ SlotIndex killIndex = getMBBEndIdx(mbb).getNextIndex().getLoadIndex();
LiveRange LR(defIndex, killIndex, ValNo);
interval.addRange(LR);
- ValNo->addKill(terminatorGaps[mbb]);
+ ValNo->addKill(indexes_->getTerminatorGap(mbb));
ValNo->setHasPHIKill(true);
DEBUG(errs() << " +" << LR);
}
@@ -894,7 +473,7 @@ void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
MachineBasicBlock::iterator mi,
- LiveIndex MIIdx,
+ SlotIndex MIIdx,
MachineOperand& MO,
LiveInterval &interval,
MachineInstr *CopyMI) {
@@ -905,12 +484,12 @@ void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
printRegName(interval.reg, tri_);
});
- LiveIndex baseIndex = MIIdx;
- LiveIndex start = getDefIndex(baseIndex);
+ SlotIndex baseIndex = MIIdx;
+ SlotIndex start = baseIndex.getDefIndex();
// Earlyclobbers move back one.
if (MO.isEarlyClobber())
- start = getUseIndex(MIIdx);
- LiveIndex end = start;
+ start = MIIdx.getUseIndex();
+ SlotIndex end = start;
// If it is not used after definition, it is considered dead at
// the instruction defining it. Hence its interval is:
@@ -919,53 +498,51 @@ void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
// advance below compensates.
if (MO.isDead()) {
DEBUG(errs() << " dead");
- if (MO.isEarlyClobber())
- end = getNextSlot(getNextSlot(start));
- else
- end = getNextSlot(start);
+ end = start.getStoreIndex();
goto exit;
}
// If it is not dead on definition, it must be killed by a
// subsequent instruction. Hence its interval is:
// [defSlot(def), useSlot(kill)+1)
- baseIndex = getNextIndex(baseIndex);
+ baseIndex = baseIndex.getNextIndex();
while (++mi != MBB->end()) {
- while (baseIndex.getVecIndex() < i2miMap_.size() &&
- getInstructionFromIndex(baseIndex) == 0)
- baseIndex = getNextIndex(baseIndex);
+
+ if (getInstructionFromIndex(baseIndex) == 0)
+ baseIndex = indexes_->getNextNonNullIndex(baseIndex);
+
if (mi->killsRegister(interval.reg, tri_)) {
DEBUG(errs() << " killed");
- end = getNextSlot(getUseIndex(baseIndex));
+ end = baseIndex.getDefIndex();
goto exit;
} else {
int DefIdx = mi->findRegisterDefOperandIdx(interval.reg, false, tri_);
if (DefIdx != -1) {
if (mi->isRegTiedToUseOperand(DefIdx)) {
// Two-address instruction.
- end = getDefIndex(baseIndex);
- if (mi->getOperand(DefIdx).isEarlyClobber())
- end = getUseIndex(baseIndex);
+ end = baseIndex.getDefIndex();
+ assert(!mi->getOperand(DefIdx).isEarlyClobber() &&
+ "Two address instruction is an early clobber?");
} else {
// Another instruction redefines the register before it is ever read.
// Then the register is essentially dead at the instruction that defines
// it. Hence its interval is:
// [defSlot(def), defSlot(def)+1)
DEBUG(errs() << " dead");
- end = getNextSlot(start);
+ end = start.getStoreIndex();
}
goto exit;
}
}
- baseIndex = getNextIndex(baseIndex);
+ baseIndex = baseIndex.getNextIndex();
}
// The only case we should have a dead physreg here without a killing or
// instruction where we know it's dead is if it is live-in to the function
// and never used. Another possible case is the implicit use of the
// physical register has been deleted by two-address pass.
- end = getNextSlot(start);
+ end = start.getStoreIndex();
exit:
assert(start < end && "did not find end of interval?");
@@ -985,7 +562,7 @@ exit:
void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
MachineBasicBlock::iterator MI,
- LiveIndex MIIdx,
+ SlotIndex MIIdx,
MachineOperand& MO,
unsigned MOIdx) {
if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
@@ -1012,7 +589,7 @@ void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
}
void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
- LiveIndex MIIdx,
+ SlotIndex MIIdx,
LiveInterval &interval, bool isAlias) {
DEBUG({
errs() << "\t\tlivein register: ";
@@ -1022,18 +599,18 @@ void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
// Look for kills, if it reaches a def before it's killed, then it shouldn't
// be considered a livein.
MachineBasicBlock::iterator mi = MBB->begin();
- LiveIndex baseIndex = MIIdx;
- LiveIndex start = baseIndex;
- while (baseIndex.getVecIndex() < i2miMap_.size() &&
- getInstructionFromIndex(baseIndex) == 0)
- baseIndex = getNextIndex(baseIndex);
- LiveIndex end = baseIndex;
+ SlotIndex baseIndex = MIIdx;
+ SlotIndex start = baseIndex;
+ if (getInstructionFromIndex(baseIndex) == 0)
+ baseIndex = indexes_->getNextNonNullIndex(baseIndex);
+
+ SlotIndex end = baseIndex;
bool SeenDefUse = false;
while (mi != MBB->end()) {
if (mi->killsRegister(interval.reg, tri_)) {
DEBUG(errs() << " killed");
- end = getNextSlot(getUseIndex(baseIndex));
+ end = baseIndex.getDefIndex();
SeenDefUse = true;
break;
} else if (mi->modifiesRegister(interval.reg, tri_)) {
@@ -1042,17 +619,14 @@ void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
// it. Hence its interval is:
// [defSlot(def), defSlot(def)+1)
DEBUG(errs() << " dead");
- end = getNextSlot(getDefIndex(start));
+ end = start.getStoreIndex();
SeenDefUse = true;
break;
}
- baseIndex = getNextIndex(baseIndex);
++mi;
if (mi != MBB->end()) {
- while (baseIndex.getVecIndex() < i2miMap_.size() &&
- getInstructionFromIndex(baseIndex) == 0)
- baseIndex = getNextIndex(baseIndex);
+ baseIndex = indexes_->getNextNonNullIndex(baseIndex);
}
}
@@ -1060,7 +634,7 @@ void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
if (!SeenDefUse) {
if (isAlias) {
DEBUG(errs() << " dead");
- end = getNextSlot(getDefIndex(MIIdx));
+ end = MIIdx.getStoreIndex();
} else {
DEBUG(errs() << " live through");
end = baseIndex;
@@ -1068,7 +642,7 @@ void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
}
VNInfo *vni =
- interval.getNextValue(LiveIndex(MBB->getNumber()),
+ interval.getNextValue(SlotIndex(getMBBStartIdx(MBB), true),
0, false, VNInfoAllocator);
vni->setIsPHIDef(true);
LiveRange LR(start, end, vni);
@@ -1139,11 +713,11 @@ void LiveIntervals::performEarlyCoalescing() {
MachineInstr *PHICopy = OtherCopies[i];
DEBUG(errs() << "Moving: " << *PHICopy);
- LiveIndex MIIndex = getInstructionIndex(PHICopy);
- LiveIndex DefIndex = getDefIndex(MIIndex);
+ SlotIndex MIIndex = getInstructionIndex(PHICopy);
+ SlotIndex DefIndex = MIIndex.getDefIndex();
LiveRange *SLR = SrcInt.getLiveRangeContaining(DefIndex);
- LiveIndex StartIndex = SLR->start;
- LiveIndex EndIndex = SLR->end;
+ SlotIndex StartIndex = SLR->start;
+ SlotIndex EndIndex = SLR->end;
// Delete val# defined by the now identity copy and add the range from
// beginning of the mbb to the end of the range.
@@ -1169,11 +743,11 @@ void LiveIntervals::performEarlyCoalescing() {
MachineInstr *PHICopy = IdentCopies[i];
DEBUG(errs() << "Coalescing: " << *PHICopy);
- LiveIndex MIIndex = getInstructionIndex(PHICopy);
- LiveIndex DefIndex = getDefIndex(MIIndex);
+ SlotIndex MIIndex = getInstructionIndex(PHICopy);
+ SlotIndex DefIndex = MIIndex.getDefIndex();
LiveRange *SLR = SrcInt.getLiveRangeContaining(DefIndex);
- LiveIndex StartIndex = SLR->start;
- LiveIndex EndIndex = SLR->end;
+ SlotIndex StartIndex = SLR->start;
+ SlotIndex EndIndex = SLR->end;
// Delete val# defined by the now identity copy and add the range from
// beginning of the mbb to the end of the range.
@@ -1186,9 +760,9 @@ void LiveIntervals::performEarlyCoalescing() {
}
// Remove the phi join and update the phi block liveness.
- LiveIndex MIIndex = getInstructionIndex(Join);
- LiveIndex UseIndex = getUseIndex(MIIndex);
- LiveIndex DefIndex = getDefIndex(MIIndex);
+ SlotIndex MIIndex = getInstructionIndex(Join);
+ SlotIndex UseIndex = MIIndex.getUseIndex();
+ SlotIndex DefIndex = MIIndex.getDefIndex();
LiveRange *SLR = SrcInt.getLiveRangeContaining(UseIndex);
LiveRange *DLR = DstInt.getLiveRangeContaining(DefIndex);
DLR->valno->setCopy(0);
@@ -1218,7 +792,7 @@ void LiveIntervals::computeIntervals() {
MBBI != E; ++MBBI) {
MachineBasicBlock *MBB = MBBI;
// Track the index of the current machine instr.
- LiveIndex MIIndex = getMBBStartIdx(MBB);
+ SlotIndex MIIndex = getMBBStartIdx(MBB);
DEBUG(errs() << ((Value*)MBB->getBasicBlock())->getName() << ":\n");
MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
@@ -1235,9 +809,8 @@ void LiveIntervals::computeIntervals() {
}
// Skip over empty initial indices.
- while (MIIndex.getVecIndex() < i2miMap_.size() &&
- getInstructionFromIndex(MIIndex) == 0)
- MIIndex = getNextIndex(MIIndex);
+ if (getInstructionFromIndex(MIIndex) == 0)
+ MIIndex = indexes_->getNextNonNullIndex(MIIndex);
for (; MI != miEnd; ++MI) {
DEBUG(errs() << MIIndex << "\t" << *MI);
@@ -1254,19 +827,9 @@ void LiveIntervals::computeIntervals() {
else if (MO.isUndef())
UndefUses.push_back(MO.getReg());
}
-
- // Skip over the empty slots after each instruction.
- unsigned Slots = MI->getDesc().getNumDefs();
- if (Slots == 0)
- Slots = 1;
-
- while (Slots--)
- MIIndex = getNextIndex(MIIndex);
- // Skip over empty indices.
- while (MIIndex.getVecIndex() < i2miMap_.size() &&
- getInstructionFromIndex(MIIndex) == 0)
- MIIndex = getNextIndex(MIIndex);
+ // Move to the next instr slot.
+ MIIndex = indexes_->getNextNonNullIndex(MIIndex);
}
}
@@ -1279,45 +842,6 @@ void LiveIntervals::computeIntervals() {
}
}
-bool LiveIntervals::findLiveInMBBs(
- LiveIndex Start, LiveIndex End,
- SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
- std::vector<IdxMBBPair>::const_iterator I =
- std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), Start);
-
- bool ResVal = false;
- while (I != Idx2MBBMap.end()) {
- if (I->first >= End)
- break;
- MBBs.push_back(I->second);
- ResVal = true;
- ++I;
- }
- return ResVal;
-}
-
-bool LiveIntervals::findReachableMBBs(
- LiveIndex Start, LiveIndex End,
- SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
- std::vector<IdxMBBPair>::const_iterator I =
- std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), Start);
-
- bool ResVal = false;
- while (I != Idx2MBBMap.end()) {
- if (I->first > End)
- break;
- MachineBasicBlock *MBB = I->second;
- if (getMBBEndIdx(MBB) > End)
- break;
- for (MachineBasicBlock::succ_iterator SI = MBB->succ_begin(),
- SE = MBB->succ_end(); SI != SE; ++SI)
- MBBs.push_back(*SI);
- ResVal = true;
- ++I;
- }
- return ResVal;
-}
-
LiveInterval* LiveIntervals::createInterval(unsigned reg) {
float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? HUGE_VALF : 0.0F;
return new LiveInterval(reg, Weight);
@@ -1389,8 +913,8 @@ unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
/// isValNoAvailableAt - Return true if the val# of the specified interval
/// which reaches the given instruction also reaches the specified use index.
bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
- LiveIndex UseIdx) const {
- LiveIndex Index = getInstructionIndex(MI);
+ SlotIndex UseIdx) const {
+ SlotIndex Index = getInstructionIndex(MI);
VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
return UI != li.end() && UI->valno == ValNo;
@@ -1417,7 +941,7 @@ bool LiveIntervals::isReMaterializable(const LiveInterval &li,
for (MachineRegisterInfo::use_iterator ri = mri_->use_begin(li.reg),
re = mri_->use_end(); ri != re; ++ri) {
MachineInstr *UseMI = &*ri;
- LiveIndex UseIdx = getInstructionIndex(UseMI);
+ SlotIndex UseIdx = getInstructionIndex(UseMI);
if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
continue;
if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
@@ -1502,7 +1026,7 @@ static bool FilterFoldedOps(MachineInstr *MI,
/// returns true.
bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
VirtRegMap &vrm, MachineInstr *DefMI,
- LiveIndex InstrIdx,
+ SlotIndex InstrIdx,
SmallVector<unsigned, 2> &Ops,
bool isSS, int Slot, unsigned Reg) {
// If it is an implicit def instruction, just delete it.
@@ -1540,9 +1064,7 @@ bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
vrm.transferSpillPts(MI, fmi);
vrm.transferRestorePts(MI, fmi);
vrm.transferEmergencySpills(MI, fmi);
- mi2iMap_.erase(MI);
- i2miMap_[InstrIdx.getVecIndex()] = fmi;
- mi2iMap_[fmi] = InstrIdx;
+ ReplaceMachineInstrInMaps(MI, fmi);
MI = MBB.insert(MBB.erase(MI), fmi);
++numFolds;
return true;
@@ -1570,19 +1092,21 @@ bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
}
bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
- SmallPtrSet<MachineBasicBlock*, 4> MBBs;
- for (LiveInterval::Ranges::const_iterator
- I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
- std::vector<IdxMBBPair>::const_iterator II =
- std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), I->start);
- if (II == Idx2MBBMap.end())
- continue;
- if (I->end > II->first) // crossing a MBB.
- return false;
- MBBs.insert(II->second);
- if (MBBs.size() > 1)
+ LiveInterval::Ranges::const_iterator itr = li.ranges.begin();
+
+ MachineBasicBlock *mbb = indexes_->getMBBCoveringRange(itr->start, itr->end);
+
+ if (mbb == 0)
+ return false;
+
+ for (++itr; itr != li.ranges.end(); ++itr) {
+ MachineBasicBlock *mbb2 =
+ indexes_->getMBBCoveringRange(itr->start, itr->end);
+
+ if (mbb2 != mbb)
return false;
}
+
return true;
}
@@ -1614,7 +1138,7 @@ void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
/// for addIntervalsForSpills to rewrite uses / defs for the given live range.
bool LiveIntervals::
rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
- bool TrySplit, LiveIndex index, LiveIndex end,
+ bool TrySplit, SlotIndex index, SlotIndex end,
MachineInstr *MI,
MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
unsigned Slot, int LdSlot,
@@ -1791,14 +1315,13 @@ rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
if (HasUse) {
if (CreatedNewVReg) {
- LiveRange LR(getLoadIndex(index), getNextSlot(getUseIndex(index)),
- nI.getNextValue(LiveIndex(), 0, false,
- VNInfoAllocator));
+ LiveRange LR(index.getLoadIndex(), index.getDefIndex(),
+ nI.getNextValue(SlotIndex(), 0, false, VNInfoAllocator));
DEBUG(errs() << " +" << LR);
nI.addRange(LR);
} else {
// Extend the split live interval to this def / use.
- LiveIndex End = getNextSlot(getUseIndex(index));
+ SlotIndex End = index.getDefIndex();
LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
nI.getValNumInfo(nI.getNumValNums()-1));
DEBUG(errs() << " +" << LR);
@@ -1806,9 +1329,8 @@ rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
}
}
if (HasDef) {
- LiveRange LR(getDefIndex(index), getStoreIndex(index),
- nI.getNextValue(LiveIndex(), 0, false,
- VNInfoAllocator));
+ LiveRange LR(index.getDefIndex(), index.getStoreIndex(),
+ nI.getNextValue(SlotIndex(), 0, false, VNInfoAllocator));
DEBUG(errs() << " +" << LR);
nI.addRange(LR);
}
@@ -1824,13 +1346,13 @@ rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
const VNInfo *VNI,
MachineBasicBlock *MBB,
- LiveIndex Idx) const {
- LiveIndex End = getMBBEndIdx(MBB);
+ SlotIndex Idx) const {
+ SlotIndex End = getMBBEndIdx(MBB);
for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
- if (VNI->kills[j].isPHIIndex())
+ if (VNI->kills[j].isPHI())
continue;
- LiveIndex KillIdx = VNI->kills[j];
+ SlotIndex KillIdx = VNI->kills[j];
if (KillIdx > Idx && KillIdx < End)
return true;
}
@@ -1841,11 +1363,11 @@ bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
/// during spilling.
namespace {
struct RewriteInfo {
- LiveIndex Index;
+ SlotIndex Index;
MachineInstr *MI;
bool HasUse;
bool HasDef;
- RewriteInfo(LiveIndex i, MachineInstr *mi, bool u, bool d)
+ RewriteInfo(SlotIndex i, MachineInstr *mi, bool u, bool d)
: Index(i), MI(mi), HasUse(u), HasDef(d) {}
};
@@ -1874,8 +1396,8 @@ rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
std::vector<LiveInterval*> &NewLIs) {
bool AllCanFold = true;
unsigned NewVReg = 0;
- LiveIndex start = getBaseIndex(I->start);
- LiveIndex end = getNextIndex(getBaseIndex(getPrevSlot(I->end)));
+ SlotIndex start = I->start.getBaseIndex();
+ SlotIndex end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
// First collect all the def / use in this live range that will be rewritten.
// Make sure they are sorted according to instruction index.
@@ -1886,7 +1408,7 @@ rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
MachineOperand &O = ri.getOperand();
++ri;
assert(!O.isImplicit() && "Spilling register that's used as implicit use?");
- LiveIndex index = getInstructionIndex(MI);
+ SlotIndex index = getInstructionIndex(MI);
if (index < start || index >= end)
continue;
@@ -1910,7 +1432,7 @@ rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
RewriteInfo &rwi = RewriteMIs[i];
++i;
- LiveIndex index = rwi.Index;
+ SlotIndex index = rwi.Index;
bool MIHasUse = rwi.HasUse;
bool MIHasDef = rwi.HasDef;
MachineInstr *MI = rwi.MI;
@@ -1993,12 +1515,12 @@ rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
if (MI != ReMatOrigDefMI || !CanDelete) {
bool HasKill = false;
if (!HasUse)
- HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, getDefIndex(index));
+ HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, index.getDefIndex());
else {
// If this is a two-address code, then this index starts a new VNInfo.
- const VNInfo *VNI = li.findDefinedVNInfoForRegInt(getDefIndex(index));
+ const VNInfo *VNI = li.findDefinedVNInfoForRegInt(index.getDefIndex());
if (VNI)
- HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, getDefIndex(index));
+ HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, index.getDefIndex());
}
DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
SpillIdxes.find(MBBId);
@@ -2071,7 +1593,7 @@ rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
}
}
-bool LiveIntervals::alsoFoldARestore(int Id, LiveIndex index,
+bool LiveIntervals::alsoFoldARestore(int Id, SlotIndex index,
unsigned vr, BitVector &RestoreMBBs,
DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
if (!RestoreMBBs[Id])
@@ -2085,7 +1607,7 @@ bool LiveIntervals::alsoFoldARestore(int Id, LiveIndex index,
return false;
}
-void LiveIntervals::eraseRestoreInfo(int Id, LiveIndex index,
+void LiveIntervals::eraseRestoreInfo(int Id, SlotIndex index,
unsigned vr, BitVector &RestoreMBBs,
DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
if (!RestoreMBBs[Id])
@@ -2093,7 +1615,7 @@ void LiveIntervals::eraseRestoreInfo(int Id, LiveIndex index,
std::vector<SRInfo> &Restores = RestoreIdxes[Id];
for (unsigned i = 0, e = Restores.size(); i != e; ++i)
if (Restores[i].index == index && Restores[i].vreg)
- Restores[i].index = LiveIndex();
+ Restores[i].index = SlotIndex();
}
/// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
@@ -2192,18 +1714,18 @@ addIntervalsForSpillsFast(const LiveInterval &li,
}
// Fill in the new live interval.
- LiveIndex index = getInstructionIndex(MI);
+ SlotIndex index = getInstructionIndex(MI);
if (HasUse) {
- LiveRange LR(getLoadIndex(index), getUseIndex(index),
- nI.getNextValue(LiveIndex(), 0, false,
+ LiveRange LR(index.getLoadIndex(), index.getUseIndex(),
+ nI.getNextValue(SlotIndex(), 0, false,
getVNInfoAllocator()));
DEBUG(errs() << " +" << LR);
nI.addRange(LR);
vrm.addRestorePoint(NewVReg, MI);
}
if (HasDef) {
- LiveRange LR(getDefIndex(index), getStoreIndex(index),
- nI.getNextValue(LiveIndex(), 0, false,
+ LiveRange LR(index.getDefIndex(), index.getStoreIndex(),
+ nI.getNextValue(SlotIndex(), 0, false,
getVNInfoAllocator()));
DEBUG(errs() << " +" << LR);
nI.addRange(LR);
@@ -2267,8 +1789,8 @@ addIntervalsForSpills(const LiveInterval &li,
if (vrm.getPreSplitReg(li.reg)) {
vrm.setIsSplitFromReg(li.reg, 0);
// Unset the split kill marker on the last use.
- LiveIndex KillIdx = vrm.getKillPoint(li.reg);
- if (KillIdx != LiveIndex()) {
+ SlotIndex KillIdx = vrm.getKillPoint(li.reg);
+ if (KillIdx != SlotIndex()) {
MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
assert(KillMI && "Last use disappeared?");
int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
@@ -2394,7 +1916,7 @@ addIntervalsForSpills(const LiveInterval &li,
while (Id != -1) {
std::vector<SRInfo> &spills = SpillIdxes[Id];
for (unsigned i = 0, e = spills.size(); i != e; ++i) {
- LiveIndex index = spills[i].index;
+ SlotIndex index = spills[i].index;
unsigned VReg = spills[i].vreg;
LiveInterval &nI = getOrCreateInterval(VReg);
bool isReMat = vrm.isReMaterialized(VReg);
@@ -2432,16 +1954,16 @@ addIntervalsForSpills(const LiveInterval &li,
if (FoundUse) {
// Also folded uses, do not issue a load.
eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
- nI.removeRange(getLoadIndex(index), getNextSlot(getUseIndex(index)));
+ nI.removeRange(index.getLoadIndex(), index.getDefIndex());
}
- nI.removeRange(getDefIndex(index), getStoreIndex(index));
+ nI.removeRange(index.getDefIndex(), index.getStoreIndex());
}
}
// Otherwise tell the spiller to issue a spill.
if (!Folded) {
LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
- bool isKill = LR->end == getStoreIndex(index);
+ bool isKill = LR->end == index.getStoreIndex();
if (!MI->registerDefIsDead(nI.reg))
// No need to spill a dead def.
vrm.addSpillPoint(VReg, isKill, MI);
@@ -2457,8 +1979,8 @@ addIntervalsForSpills(const LiveInterval &li,
while (Id != -1) {
std::vector<SRInfo> &restores = RestoreIdxes[Id];
for (unsigned i = 0, e = restores.size(); i != e; ++i) {
- LiveIndex index = restores[i].index;
- if (index == LiveIndex())
+ SlotIndex index = restores[i].index;
+ if (index == SlotIndex())
continue;
unsigned VReg = restores[i].vreg;
LiveInterval &nI = getOrCreateInterval(VReg);
@@ -2513,7 +2035,7 @@ addIntervalsForSpills(const LiveInterval &li,
// If folding is not possible / failed, then tell the spiller to issue a
// load / rematerialization for us.
if (Folded)
- nI.removeRange(getLoadIndex(index), getNextSlot(getUseIndex(index)));
+ nI.removeRange(index.getLoadIndex(), index.getDefIndex());
else
vrm.addRestorePoint(VReg, MI);
}
@@ -2526,10 +2048,10 @@ addIntervalsForSpills(const LiveInterval &li,
for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
LiveInterval *LI = NewLIs[i];
if (!LI->empty()) {
- LI->weight /= InstrSlots::NUM * getApproximateInstructionCount(*LI);
+ LI->weight /= SlotIndex::NUM * getApproximateInstructionCount(*LI);
if (!AddedKill.count(LI)) {
LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
- LiveIndex LastUseIdx = getBaseIndex(LR->end);
+ SlotIndex LastUseIdx = LR->end.getBaseIndex();
MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
assert(UseIdx != -1);
@@ -2580,7 +2102,7 @@ unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
E = mri_->reg_end(); I != E; ++I) {
MachineOperand &O = I.getOperand();
MachineInstr *MI = O.getParent();
- LiveIndex Index = getInstructionIndex(MI);
+ SlotIndex Index = getInstructionIndex(MI);
if (pli.liveAt(Index))
++NumConflicts;
}
@@ -2623,15 +2145,15 @@ bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
if (SeenMIs.count(MI))
continue;
SeenMIs.insert(MI);
- LiveIndex Index = getInstructionIndex(MI);
+ SlotIndex Index = getInstructionIndex(MI);
for (unsigned i = 0, e = PRegs.size(); i != e; ++i) {
unsigned PReg = PRegs[i];
LiveInterval &pli = getInterval(PReg);
if (!pli.liveAt(Index))
continue;
vrm.addEmergencySpill(PReg, MI);
- LiveIndex StartIdx = getLoadIndex(Index);
- LiveIndex EndIdx = getNextSlot(getStoreIndex(Index));
+ SlotIndex StartIdx = Index.getLoadIndex();
+ SlotIndex EndIdx = Index.getNextIndex().getBaseIndex();
if (pli.isInOneLiveRange(StartIdx, EndIdx)) {
pli.removeRange(StartIdx, EndIdx);
Cut = true;
@@ -2651,7 +2173,8 @@ bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
continue;
LiveInterval &spli = getInterval(*AS);
if (spli.liveAt(Index))
- spli.removeRange(getLoadIndex(Index), getNextSlot(getStoreIndex(Index)));
+ spli.removeRange(Index.getLoadIndex(),
+ Index.getNextIndex().getBaseIndex());
}
}
}
@@ -2662,13 +2185,13 @@ LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
MachineInstr* startInst) {
LiveInterval& Interval = getOrCreateInterval(reg);
VNInfo* VN = Interval.getNextValue(
- LiveIndex(getInstructionIndex(startInst), LiveIndex::DEF),
+ SlotIndex(getInstructionIndex(startInst).getDefIndex()),
startInst, true, getVNInfoAllocator());
VN->setHasPHIKill(true);
- VN->kills.push_back(terminatorGaps[startInst->getParent()]);
+ VN->kills.push_back(indexes_->getTerminatorGap(startInst->getParent()));
LiveRange LR(
- LiveIndex(getInstructionIndex(startInst), LiveIndex::DEF),
- getNextSlot(getMBBEndIdx(startInst->getParent())), VN);
+ SlotIndex(getInstructionIndex(startInst).getDefIndex()),
+ getMBBEndIdx(startInst->getParent()).getNextIndex().getBaseIndex(), VN);
Interval.addRange(LR);
return LR;
diff --git a/lib/CodeGen/LiveStackAnalysis.cpp b/lib/CodeGen/LiveStackAnalysis.cpp
index a7bea1fd4f..d2f3775288 100644
--- a/lib/CodeGen/LiveStackAnalysis.cpp
+++ b/lib/CodeGen/LiveStackAnalysis.cpp
@@ -27,15 +27,10 @@ using namespace llvm;
char LiveStacks::ID = 0;
static RegisterPass<LiveStacks> X("livestacks", "Live Stack Slot Analysis");
-void LiveStacks::scaleNumbering(int factor) {
- // Scale the intervals.
- for (iterator LI = begin(), LE = end(); LI != LE; ++LI) {
- LI->second.scaleNumbering(factor);
- }
-}
-
void LiveStacks::getAnalysisUsage(AnalysisUsage &AU) const {
AU.setPreservesAll();
+ AU.addPreserved<SlotIndexes>();
+ AU.addRequiredTransitive<SlotIndexes>();
MachineFunctionPass::getAnalysisUsage(AU);
}
diff --git a/lib/CodeGen/PreAllocSplitting.cpp b/lib/CodeGen/PreAllocSplitting.cpp
index b2a7312fd2..cce5ae817a 100644
--- a/lib/CodeGen/PreAllocSplitting.cpp
+++ b/lib/CodeGen/PreAllocSplitting.cpp
@@ -57,6 +57,7 @@ namespace {
const TargetRegisterInfo* TRI;
MachineFrameInfo *MFI;
MachineRegisterInfo *MRI;
+ SlotIndexes *SIs;
LiveIntervals *LIs;
LiveStacks *LSs;
VirtRegMap *VRM;
@@ -68,7 +69,7 @@ namespace {
MachineBasicBlock *BarrierMBB;
// Barrier - Current barrier index.
- LiveIndex BarrierIdx;
+ SlotIndex BarrierIdx;
// CurrLI - Current live interval being split.
LiveInterval *CurrLI;
@@ -83,16 +84,19 @@ namespace {
DenseMap<unsigned, int> IntervalSSMap;
// Def2SpillMap - A map from a def instruction index to spill index.
- DenseMap<LiveIndex, LiveIndex> Def2SpillMap;
+ DenseMap<SlotIndex, SlotIndex> Def2SpillMap;
public:
static char ID;
- PreAllocSplitting() : MachineFunctionPass(&ID) {}
+ PreAllocSplitting()
+ : MachineFunctionPass(&ID) {}
virtual bool runOnMachineFunction(MachineFunction &MF);
virtual void getAnalysisUsage(AnalysisUsage &AU) const {
AU.setPreservesCFG();
+ AU.addRequired<SlotIndexes>();
+ AU.addPreserved<SlotIndexes>();
AU.addRequired<LiveIntervals>();
AU.addPreserved<LiveIntervals>();
AU.addRequired<LiveStacks>();
@@ -129,23 +133,23 @@ namespace {
private:
MachineBasicBlock::iterator
findNextEmptySlot(MachineBasicBlock*, MachineInstr*,
- LiveIndex&);
+ SlotIndex&);
MachineBasicBlock::iterator
findSpillPoint(MachineBasicBlock*, MachineInstr*, MachineInstr*,
- SmallPtrSet<MachineInstr*, 4>&, LiveIndex&);
+ SmallPtrSet<MachineInstr*, 4>&, SlotIndex&);
MachineBasicBlock::iterator
- findRestorePoint(MachineBasicBlock*, MachineInstr*, LiveIndex,
- SmallPtrSet<MachineInstr*, 4>&, LiveIndex&);
+ findRestorePoint(MachineBasicBlock*, MachineInstr*, SlotIndex,
+ SmallPtrSet<MachineInstr*, 4>&, SlotIndex&);
int CreateSpillStackSlot(unsigned, const TargetRegisterClass *);
bool IsAvailableInStack(MachineBasicBlock*, unsigned,
- LiveIndex, LiveIndex,
- LiveIndex&, int&) const;
+ SlotIndex, SlotIndex,
+ SlotIndex&, int&) const;
- void UpdateSpillSlotInterval(VNInfo*, LiveIndex, LiveIndex);
+ void UpdateSpillSlotInterval(VNInfo*, SlotIndex, SlotIndex);
bool SplitRegLiveInterval(LiveInterval*);
@@ -157,7 +161,7 @@ namespace {
bool Rematerialize(unsigned vreg, VNInfo* ValNo,
MachineInstr* DefMI,
MachineBasicBlock::iterator RestorePt,
- LiveIndex RestoreIdx,
+ SlotIndex RestoreIdx,
SmallPtrSet<MachineInstr*, 4>& RefsInMBB);
MachineInstr* FoldSpill(unsigned vreg, const TargetRegisterClass* RC,
MachineInstr* DefMI,
@@ -209,12 +213,12 @@ const PassInfo *const llvm::PreAllocSplittingID = &X;
/// instruction index map. If there isn't one, return end().
MachineBasicBlock::iterator
PreAllocSplitting::findNextEmptySlot(MachineBasicBlock *MBB, MachineInstr *MI,
- LiveIndex &SpotIndex) {
+ SlotIndex &SpotIndex) {
MachineBasicBlock::iterator MII = MI;
if (++MII != MBB->end()) {
- LiveIndex Index =
+ SlotIndex Index =
LIs->findGapBeforeInstr(LIs->getInstructionIndex(MII));
- if (Index != LiveIndex()) {
+ if (Index != SlotIndex()) {
SpotIndex = Index;
return MII;
}
@@ -230,7 +234,7 @@ MachineBasicBlock::iterator
PreAllocSplitting::findSpillPoint(MachineBasicBlock *MBB, MachineInstr *MI,
MachineInstr *DefMI,
SmallPtrSet<MachineInstr*, 4> &RefsInMBB,
- LiveIndex &SpillIndex) {
+ SlotIndex &SpillIndex) {
MachineBasicBlock::iterator Pt = MBB->begin();
MachineBasicBlock::iterator MII = MI;
@@ -243,7 +247,7 @@ PreAllocSplitting::findSpillPoint(MachineBasicBlock *MBB, MachineInstr *MI,
if (MII == EndPt || RefsInMBB.count(MII)) return Pt;
while (MII != EndPt && !RefsInMBB.count(MII)) {
- LiveIndex Index = LIs->getInstructionIndex(MII);
+ SlotIndex Index = LIs->getInstructionIndex(MII);
// We can't insert the spill between the barrier (a call), and its
// corresponding call frame setup.
@@ -276,9 +280,9 @@ PreAllocSplitting::findSpillPoint(MachineBasicBlock *MBB, MachineInstr *MI,
/// found.
MachineBasicBlock::iterator
PreAllocSplitting::findRestorePoint(MachineBasicBlock *MBB, MachineInstr *MI,
- LiveIndex LastIdx,
+ SlotIndex LastIdx,
SmallPtrSet<MachineInstr*, 4> &RefsInMBB,
- LiveIndex &RestoreIndex) {
+ SlotIndex &RestoreIndex) {
// FIXME: Allow spill to be inserted to the beginning of the mbb. Update mbb
// begin index accordingly.
MachineBasicBlock::iterator Pt = MBB->end();
@@ -299,10 +303,10 @@ PreAllocSplitting::findRestorePoint(MachineBasicBlock *MBB, MachineInstr *MI,
// FIXME: Limit the number of instructions to examine to reduce
// compile time?
while (MII != EndPt) {
- LiveIndex Index = LIs->getInstructionIndex(MII);
+ SlotIndex Index = LIs->getInstructionIndex(MII);
if (Index > LastIdx)
break;
- LiveIndex Gap = LIs->findGapBeforeInstr(Index);
+ SlotIndex Gap = LIs->findGapBeforeInstr(Index);
// We can't insert a restore between the barrier (a call) and its
// corresponding call frame teardown.
@@ -311,7 +315,7 @@ PreAllocSplitting::findRestorePoint(MachineBasicBlock *MBB, MachineInstr *MI,
if (MII == EndPt || RefsInMBB.count(MII)) return Pt;
++MII;
} while (MII->getOpcode() != TRI->getCallFrameDestroyOpcode());
- } else if (Gap != LiveIndex()) {
+ } else if (Gap != SlotIndex()) {
Pt = MII;
RestoreIndex = Gap;
}
@@ -344,7 +348,7 @@ int PreAllocSplitting::CreateSpillStackSlot(unsigned Reg,
if (CurrSLI->hasAtLeastOneValue())
CurrSValNo = CurrSLI->getValNumInfo(0);
else
- CurrSValNo = CurrSLI->getNextValue(LiveIndex(), 0, false,
+ CurrSValNo = CurrSLI->getNextValue(SlotIndex(), 0, false,
LSs->getVNInfoAllocator());
return SS;
}
@@ -353,9 +357,9 @@ int PreAllocSplitting::CreateSpillStackSlot(unsigned Reg,
/// slot at the specified index.
bool
PreAllocSplitting::IsAvailableInStack(MachineBasicBlock *DefMBB,
- unsigned Reg, LiveIndex DefIndex,
- LiveIndex RestoreIndex,
- LiveIndex &SpillIndex,
+ unsigned Reg, SlotIndex DefIndex,
+ SlotIndex RestoreIndex,
+ SlotIndex &SpillIndex,
int& SS) const {
if (!DefMBB)
return false;
@@ -363,7 +367,7 @@ PreAllocSplitting::IsAvailableInStack(MachineBasicBlock *DefMBB,
DenseMap<unsigned, int>::iterator I = IntervalSSMap.find(Reg);
if (I == IntervalSSMap.end())
return false;
- DenseMap<LiveIndex, LiveIndex>::iterator
+ DenseMap<SlotIndex, SlotIndex>::iterator
II = Def2SpillMap.find(DefIndex);
if (II == Def2SpillMap.end())
return false;
@@ -384,8 +388,8 @@ PreAllocSplitting::IsAvailableInStack(MachineBasicBlock *DefMBB,
/// interval being split, and the spill and restore indicies, update the live
/// interval of the spill stack slot.
void
-PreAllocSplitting::UpdateSpillSlotInterval(VNInfo *ValNo, LiveIndex SpillIndex,
- LiveIndex RestoreIndex) {
+PreAllocSplitting::UpdateSpillSlotInterval(VNInfo *ValNo, SlotIndex SpillIndex,
+ SlotIndex RestoreIndex) {
assert(LIs->getMBBFromIndex(RestoreIndex) == BarrierMBB &&
"Expect restore in the barrier mbb");
@@ -398,8 +402,8 @@ PreAllocSplitting::UpdateSpillSlotInterval(VNInfo *ValNo, LiveIndex SpillIndex,
}
SmallPtrSet<MachineBasicBlock*, 4> Processed;
- LiveIndex EndIdx = LIs->getMBBEndIdx(MBB);
- LiveRange SLR(SpillIndex, LIs->getNextSlot(EndIdx), CurrSValNo);
+ SlotIndex EndIdx = LIs->getMBBEndIdx(MBB);
+ LiveRange SLR(SpillIndex, EndIdx.getNextSlot(), CurrSValNo);
CurrSLI->addRange(SLR);
Processed.insert(MBB);
@@ -418,7 +422,7 @@ PreAllocSplitting::UpdateSpillSlotInterval(VNInfo *ValNo, LiveIndex SpillIndex,
WorkList.pop_back();
if (Processed.count(MBB))
continue;
- LiveIndex Idx = LIs->getMBBStartIdx(MBB);
+ SlotIndex Idx = LIs->getMBBStartIdx(MBB);
LR = CurrLI->getLiveRangeContaining(Idx);
if (LR && LR->valno == ValNo) {
EndIdx = LIs->getMBBEndIdx(MBB);
@@ -428,7 +432,7 @@ PreAllocSplitting::UpdateSpillSlotInterval(VNInfo *ValNo, LiveIndex SpillIndex,
CurrSLI->addRange(SLR);
} else if (LR->end > EndIdx) {
// Live range extends beyond end of mbb, process successors.
- LiveRange SLR(Idx, LIs->getNextIndex(EndIdx), CurrSValNo);
+ LiveRange SLR(Idx, EndIdx.getNextIndex(), CurrSValNo);
CurrSLI->addRange(SLR);
for (MachineBasicBlock::succ_iterator SI = MBB->succ_begin(),
SE = MBB->succ_end(); SI != SE; ++SI)
@@ -491,12 +495,12 @@ PreAllocSplitting::PerformPHIConstruction(MachineBasicBlock::iterator UseI,
}
// Once we've found it, extend its VNInfo to our instruction.
- LiveIndex DefIndex = LIs->getInstructionIndex(Walker);
- DefIndex = LIs->getDefIndex(DefIndex);
- LiveIndex EndIndex = LIs->getMBBEndIdx(MBB);
+ SlotIndex DefIndex = LIs->getInstructionIndex(Walker);
+ DefIndex = DefIndex.getDefIndex();
+ SlotIndex EndIndex = LIs->getMBBEndIdx(MBB);
RetVNI = NewVNs[Walker];
- LI->addRange(LiveRange(DefIndex, LIs->getNextSlot(EndIndex), RetVNI));
+ LI->addRange(LiveRange(DefIndex, EndIndex.getNextSlot(), RetVNI));
} else if (!ContainsDefs && ContainsUses) {
SmallPtrSet<MachineInstr*, 2>& BlockUses = Uses[MBB];
@@ -528,12 +532,12 @@ PreAllocSplitting::PerformPHIConstruction(MachineBasicBlock::iterator UseI,
IsTopLevel, IsIntraBlock);
}
- LiveIndex UseIndex = LIs->getInstructionIndex(Walker);
- UseIndex = LIs->getUseIndex(UseIndex);
- LiveIndex EndIndex;
+ SlotIndex UseIndex = LIs->getInstructionIndex(Walker);
+ UseIndex = UseIndex.getUseIndex();
+ SlotIndex EndIndex;
if (IsIntraBlock) {
EndIndex = LIs->getInstructionIndex(UseI);
- EndIndex = LIs->getUseIndex(EndIndex);
+ EndIndex = EndIndex.getUseIndex();
} else
EndIndex = LIs->getMBBEndIdx(MBB);
@@ -542,7 +546,7 @@ PreAllocSplitting::PerformPHIConstruction(MachineBasicBlock::iterator UseI,
RetVNI = PerformPHIConstruction(Walker, MBB, LI, Visited, Defs, Uses,
NewVNs, LiveOut, Phis, false, true);
- LI->addRange(LiveRange(UseIndex, LIs->getNextSlot(EndIndex), RetVNI));
+ LI->addRange(LiveRange(UseIndex, EndIndex.getNextSlot(), RetVNI));
// FIXME: Need to set kills properly for inter-block stuff.
if (RetVNI->isKill(UseIndex)) RetVNI->removeKill(UseIndex);
@@ -588,13 +592,12 @@ PreAllocSplitting::PerformPHIConstruction(MachineBasicBlock::iterator UseI,
IsTopLevel, IsIntraBlock);
}
- LiveIndex StartIndex = LIs->getInstructionIndex(Walker);
- StartIndex = foundDef ? LIs->getDefIndex(StartIndex) :
- LIs->getUseIndex(StartIndex);
- LiveIndex EndIndex;
+ SlotIndex StartIndex = LIs->getInstructionIndex(Walker);
+ StartIndex = foundDef ? StartIndex.getDefIndex() : StartIndex.getUseIndex();
+ SlotIndex EndIndex;
if (IsIntraBlock) {
EndIndex = LIs->getInstructionIndex(UseI);
- EndIndex = LIs->getUseIndex(EndIndex);
+ EndIndex = EndIndex.getUseIndex();
} else
EndIndex = LIs->getMBBEndIdx(MBB);
@@ -604,7 +607,7 @@ PreAllocSplitting::PerformPHIConstruction(MachineBasicBlock::iterator UseI,
RetVNI = PerformPHIConstruction(Walker, MBB, LI, Visited, Defs, Uses,
NewVNs, LiveOut, Phis, false, true);
- LI->addRange(LiveRange(StartIndex, LIs->getNextSlot(EndIndex), RetVNI));
+ LI->addRange(LiveRange(StartIndex, EndIndex.getNextSlot(), RetVNI));
if (foundUse && RetVNI->isKill(StartIndex))
RetVNI->removeKill(StartIndex);
@@ -640,9 +643,9 @@ PreAllocSplitting::PerformPHIConstructionFallBack(MachineBasicBlock::iterator Us
// assume that we are not intrablock here.
if (Phis.count(MBB)) return Phis[MBB];
- LiveIndex StartIndex = LIs->getMBBStartIdx(MBB);
+ SlotIndex StartIndex = LIs->getMBBStartIdx(MBB);
VNInfo *RetVNI = Phis[MBB] =
- LI->getNextValue(LiveIndex(), /*FIXME*/ 0, false,
+ LI->getNextValue(SlotIndex(), /*FIXME*/ 0, false,
LIs->getVNInfoAllocator());
if (!IsIntraBlock) LiveOut[MBB] = RetVNI;
@@ -685,19 +688,19 @@ PreAllocSplitting::PerformPHIConstructionFallBack(MachineBasicBlock::iterator Us
for (DenseMap<MachineBasicBlock*, VNInfo*>::iterator I =
IncomingVNs.begin(), E = IncomingVNs.end(); I != E; ++I) {
I->second->setHasPHIKill(true);
- LiveIndex KillIndex = LIs->getMBBEndIdx(I->first);
+ SlotIndex KillIndex = LIs->getMBBEndIdx(I->first);
if (!I->second->isKill(KillIndex))
I->second->addKill(KillIndex);
}
}
- LiveIndex EndIndex;
+ SlotIndex EndIndex;
if (IsIntraBlock) {
EndIndex = LIs->getInstructionIndex(UseI);
- EndIndex = LIs->getUseIndex(EndIndex);
+ EndIndex = EndIndex.getUseIndex();
} else
EndIndex = LIs->getMBBEndIdx(MBB);
- LI->addRange(LiveRange(StartIndex, LIs->getNextSlot(EndIndex), RetVNI));
+ LI->addRange(LiveRange(StartIndex, EndIndex.getNextSlot(), RetVNI));
if (IsIntraBlock)
RetVNI->addKill(EndIndex);
@@ -733,8 +736,8 @@ void PreAllocSplitting::ReconstructLiveInterval(LiveInterval* LI) {
DE = MRI->def_end(); DI != DE; ++DI) {
Defs[(*DI).getParent()].insert(&*DI);
- LiveIndex DefIdx = LIs->getInstructionIndex(&*DI);
- DefIdx = LIs->getDefIndex(DefIdx);
+ SlotIndex DefIdx = LIs->getInstructionIndex(&*DI);
+ DefIdx = DefIdx.getDefIndex();
assert(DI->getOpcode() != TargetInstrInfo::PHI &&
"Following NewVN isPHIDef flag incorrect. Fix me!");
@@ -769,13 +772,13 @@ void PreAllocSplitting::ReconstructLiveInterval(LiveInterval* LI) {
// Add ranges for dead defs
for (MachineRegisterInfo::def_iterator DI = MRI->def_begin(LI->reg),
DE = MRI->def_end(); DI != DE; ++DI) {
- LiveIndex DefIdx = LIs->getInstructionIndex(&*DI);
- DefIdx = LIs->getDefIndex(DefIdx);
+ SlotIndex DefIdx = LIs->getInstructionIndex(&*DI);
+ DefIdx = DefIdx.getDefIndex();
if (LI->liveAt(DefIdx)) continue;
VNInfo* DeadVN = NewVNs[&*DI];
- LI->addRange(LiveRange(DefIdx, LIs->getNextSlot(DefIdx), DeadVN));
+ LI->addRange(LiveRange(DefIdx, DefIdx.getNextSlot(), DeadVN));
DeadVN->addKill(DefIdx);
}
@@ -784,8 +787,8 @@ void PreAllocSplitting::ReconstructLiveInterval(LiveInterval* LI) {
VI != VE; ++VI) {
VNInfo* VNI = *VI;
for (unsigned i = 0, e = VNI->kills.size(); i != e; ++i) {
- LiveIndex KillIdx = VNI->kills[i];
- if (KillIdx.isPHIIndex())
+ SlotIndex KillIdx = VNI->kills[i];
+ if (KillIdx.isPHI())
continue;
MachineInstr *KillMI = LIs->getInstructionFromIndex(KillIdx);
if (KillMI) {
@@ -826,14 +829,14 @@ void PreAllocSplitting::RenumberValno(VNInfo* VN) {
// Locate two-address redefinitions
for (VNInfo::KillSet::iterator KI = OldVN->kills.begin(),
KE = OldVN->kills.end(); KI != KE; ++KI) {
- assert(!KI->isPHIIndex() &&
+ assert(!KI->isPHI() &&
"VN previously reported having no PHI kills.");
MachineInstr* MI = LIs->getInstructionFromIndex(*KI);
unsigned DefIdx = MI->findRegisterDefOperandIdx(CurrLI->reg);
if (DefIdx == ~0U) continue;
if (MI->isRegTiedToUseOperand(DefIdx)) {
VNInfo* NextVN =
- CurrLI->findDefinedVNInfoForRegInt(LIs->getDefIndex(*KI));
+ CurrLI->findDefinedVNInfoForRegInt(KI->getDefIndex());
if (NextVN == OldVN) continue;
Stack.push_back(NextVN);
}
@@ -865,10 +868,10 @@ void PreAllocSplitting::RenumberValno(VNInfo* VN) {
for (MachineRegisterInfo::reg_iterator I = MRI->reg_begin(CurrLI->reg),
E = MRI->reg_end(); I != E; ++I) {
MachineOperand& MO = I.getOperand();
- LiveIndex InstrIdx = LIs->getInstructionIndex(&*I);
+ SlotIndex InstrIdx = LIs->getInstructionIndex(&*I);
- if ((MO.isUse() && NewLI.liveAt(LIs->getUseIndex(InstrIdx))) ||
- (MO.isDef() && NewLI.liveAt(LIs->getDefIndex(InstrIdx))))
+ if ((MO.isUse() && NewLI.liveAt(InstrIdx.getUseIndex())) ||
+ (MO.isDef() && NewLI.liveAt(InstrIdx.getDefIndex())))
OpsToChange.push_back(std::make_pair(&*I, I.getOperandNo()));
}
@@ -893,12 +896,12 @@ void PreAllocSplitting::RenumberValno(VNInfo* VN) {
bool PreAllocSplitting::Rematerialize(unsigned VReg, VNInfo* ValNo,
MachineInstr* DefMI,
MachineBasicBlock::iterator RestorePt,
- LiveIndex RestoreIdx,
+ SlotIndex RestoreIdx,
SmallPtrSet<MachineInstr*, 4>& RefsInMBB) {
MachineBasicBlock& MBB = *RestorePt->getParent();
MachineBasicBlock::iterator KillPt = BarrierMBB->end();
- LiveIndex KillIdx;
+ SlotIndex KillIdx;
if (!ValNo->isDefAccurate() || DefMI->getParent() == BarrierMBB)
KillPt = findSpillPoint(BarrierMBB, Barrier, NULL, RefsInMBB, KillIdx);
else
@@ -911,8 +914,8 @@ bool PreAllocSplitting::Rematerialize(unsigned VReg, VNInfo* ValNo,
LIs->InsertMachineInstrInMaps(prior(RestorePt), RestoreIdx);
ReconstructLiveInterval(CurrLI);
- LiveIndex RematIdx = LIs->getInstructionIndex(prior(RestorePt));
- RematIdx = LIs->getDefIndex(RematIdx);
+ SlotIndex RematIdx = LIs->getInstructionIndex(prior(RestorePt));
+ RematIdx = RematIdx.getDefIndex();
RenumberValno(CurrLI->findDefinedVNInfoForRegInt(RematIdx));
++NumSplits;
@@ -968,7 +971,7 @@ MachineInstr* PreAllocSplitting::FoldSpill(unsigned vreg,
if (CurrSLI->hasAtLeastOneValue())
CurrSValNo = CurrSLI->getValNumInfo(0);
else
- CurrSValNo = CurrSLI->getNextValue(LiveIndex(), 0, false,
+ CurrSValNo = CurrSLI->getNextValue(SlotIndex(), 0, false,
LSs->getVNInfoAllocator());
}
@@ -1052,11 +1055,14 @@ MachineInstr* PreAllocSplitting::FoldRestore(unsigned vreg,
/// so it would not cross the barrier that's being processed. Shrink wrap
/// (minimize) the live interval to the last uses.
bool PreAllocSplitting::SplitRegLiveInterval(LiveInterval *LI) {
+ DEBUG(errs() << "Pre-alloc splitting " << LI->reg << " for " << *Barrier
+ << " result: ");
+
CurrLI = LI;
// Find live range where current interval cross the barrier.
LiveInterval::iterator LR =
- CurrLI->FindLiveRangeContaining(LIs->getUseIndex(BarrierIdx));
+ CurrLI->FindLiveRangeContaining(BarrierIdx.getUseIndex());
VNInfo *ValNo = LR->valno;
assert(!ValNo->isUnused() && "Val# is defined by a dead def?");
@@ -1065,8 +1071,10 @@ bool PreAllocSplitting::SplitRegLiveInterval(LiveInterval *LI) {
? LIs->getInstructionFromIndex(ValNo->def) : NULL;
// If this would create a new join point, do not split.
- if (DefMI && createsNewJoin(LR, DefMI->getParent(), Barrier->getParent()))
+ if (DefMI && createsNewJoin(LR, DefMI->getParent(), Barrier->getParent())) {
+ DEBUG(errs() << "FAILED (would create a new join point).\n");
return false;
+ }
// Find all references in the barrier mbb.
SmallPtrSet<MachineInstr*, 4> RefsInMBB;
@@ -1078,21 +1086,25 @@ bool PreAllocSplitting::SplitRegLiveInterval(LiveInterval *LI) {
}
// Find a point to restore the value after the barrier.
- LiveIndex RestoreIndex;
+ SlotIndex RestoreIndex;
MachineBasicBlock::iterator RestorePt =
findRestorePoint(BarrierMBB, Barrier, LR->end, RefsInMBB, RestoreIndex);
- if (RestorePt == BarrierMBB->end())
+ if (RestorePt == BarrierMBB->end()) {
+ DEBUG(errs() << "FAILED (could not find a suitable restore point).\n");
return false;
+ }
if (DefMI && LIs->isReMaterializable(*LI, ValNo, DefMI))
if (Rematerialize(LI->reg, ValNo, DefMI, RestorePt,
- RestoreIndex, RefsInMBB))
- return true;
+ RestoreIndex, RefsInMBB)) {
+ DEBUG(errs() << "success (remat).\n");
+ return true;
+ }
// Add a spill either before the barrier or after the definition.
MachineBasicBlock *DefMBB = DefMI ? DefMI->getParent() : NULL;
const TargetRegisterClass *RC = MRI->getRegClass(CurrLI->reg);
- LiveIndex SpillIndex;
+ SlotIndex SpillIndex;
MachineInstr *SpillMI = NULL;
int SS = -1;
if (!ValNo->isDefAccurate()) {
@@ -1103,8 +1115,10 @@ bool PreAllocSplitting::SplitRegLiveInterval(LiveInterval *LI) {
} else {
MachineBasicBlock::iterator SpillPt =
findSpillPoint(BarrierMBB, Barrier, NULL, RefsInMBB, SpillIndex);
- if (SpillPt == BarrierMBB->begin())
+ if (SpillPt == BarrierMBB->begin()) {
+ DEBUG(errs() << "FAILED (could not find a suitable spill point).\n");
return false; // No gap to insert spill.
+ }
// Add spill.
SS = CreateSpillStackSlot(CurrLI->reg, RC);
@@ -1116,8 +1130,10 @@ bool PreAllocSplitting::SplitRegLiveInterval(LiveInterval *LI) {
RestoreIndex, SpillIndex, SS)) {
// If it's already split, just restore the value. There is no need to spill
// the def again.
- if (!DefMI)
+ if (!DefMI) {
+ DEBUG(errs() << "FAILED (def is dead).\n");
return false; // Def is dead. Do nothing.
+ }
if ((SpillMI = FoldSpill(LI->reg, RC, DefMI, Barrier,
BarrierMBB, SS, RefsInMBB))) {
@@ -1129,12 +1145,16 @@ bool PreAllocSplitting::SplitRegLiveInterval(LiveInterval *LI) {
// Add spill after the def and the last use before the barrier.
SpillPt = findSpillPoint(BarrierMBB, Barrier, DefMI,
RefsInMBB, SpillIndex);
- if (SpillPt == DefMBB->begin())
+ if (SpillPt == DefMBB->begin()) {
+ DEBUG(errs() << "FAILED (could not find a suitable spill point).\n");
return false; // No gap to insert spill.
+ }
} else {
SpillPt = findNextEmptySlot(DefMBB, DefMI, SpillIndex);
- if (SpillPt == DefMBB->end())
+ if (SpillPt == DefMBB->end()) {
+ DEBUG(errs() << "FAILED (could not find a suitable spill point).\n");
return false; // No gap to insert spill.
+ }
}
// Add spill.
SS = CreateSpillStackSlot(CurrLI->reg, RC);
@@ -1162,18 +1182,19 @@ bool PreAllocSplitting::SplitRegLiveInterval(LiveInterval *LI) {
}
// Update spill stack slot live interval.
- UpdateSpillSlotInterval(ValNo, LIs->getNextSlot(LIs->getUseIndex(SpillIndex)),
- LIs->getDefIndex(RestoreIndex));
+ UpdateSpillSlotInterval(ValNo, SpillIndex.getUseIndex().getNextSlot(),
+ RestoreIndex.getDefIndex());
ReconstructLiveInterval(CurrLI);
if (!FoldedRestore) {
- LiveIndex RestoreIdx = LIs->getInstructionIndex(prior(RestorePt));
- RestoreIdx = LIs->getDefIndex(RestoreIdx);
+ SlotIndex RestoreIdx = LIs->getInstructionIndex(prior(RestorePt));
+ RestoreIdx = RestoreIdx.getDefIndex();
RenumberValno(CurrLI->findDefinedVNInfoForRegInt(RestoreIdx));
}
++NumSplits;
+ DEBUG(errs() << "success.\n");
return true;
}
@@ -1254,8 +1275,8 @@ bool PreAllocSplitting::removeDeadSpills(SmallPtrSet<LiveInterval*, 8>& split) {
// reaching definition (VNInfo).
for (MachineRegisterInfo::use_iterator UI = MRI->use_begin((*LI)->reg),
UE = MRI->use_end(); UI != UE; ++UI) {
- LiveIndex index = LIs->getInstructionIndex(&*UI);
- index = LIs->getUseIndex(index);
+ SlotIndex index = LIs->getInstructionIndex(&*UI);
+ index = index.getUseIndex();
const LiveRange* LR = (*LI)->getLiveRangeContaining(index);
VNUseCount[LR->valno].insert(&*UI);
@@ -1404,7 +1425,7 @@ bool PreAllocSplitting::createsNewJoin(LiveRange* LR,
if (LR->valno->hasPHIKill())
return false;
- LiveIndex MBBEnd = LIs->getMBBEndIdx(BarrierMBB);
+ SlotIndex MBBEnd = LIs->getMBBEndIdx(BarrierMBB);
if (LR->end < MBBEnd)
return false;
@@ -1467,6 +1488,7 @@ bool PreAllocSplitting::runOnMachineFunction(MachineFunction &MF) {
TII = TM->getInstrInfo();
MFI = MF.getFrameInfo();
MRI = &MF.getRegInfo();
+ SIs = &getAnalysis<SlotIndexes>();
LIs = &getAnalysis<LiveIntervals>();
LSs = &getAnalysis<LiveStacks>();
VRM = &getAnalysis<VirtRegMap>();
diff --git a/lib/CodeGen/ProcessImplicitDefs.cpp b/lib/CodeGen/ProcessImplicitDefs.cpp
new file mode 100644
index 0000000000..48567a0fc7
--- /dev/null
+++ b/lib/CodeGen/ProcessImplicitDefs.cpp
@@ -0,0 +1,231 @@
+//===---------------------- ProcessImplicitDefs.cpp -----------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "processimplicitdefs"
+
+#include "llvm/CodeGen/ProcessImplicitDefs.h"
+
+#include "llvm/ADT/DepthFirstIterator.h"
+#include "llvm/ADT/SmallSet.h"
+#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/CodeGen/LiveVariables.h"
+#include "llvm/CodeGen/MachineInstr.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/Passes.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Target/TargetInstrInfo.h"
+#include "llvm/Target/TargetRegisterInfo.h"
+
+
+using namespace llvm;
+
+char ProcessImplicitDefs::ID = 0;
+static RegisterPass<ProcessImplicitDefs> X("processimpdefs",
+ "Process Implicit Definitions.");
+
+void ProcessImplicitDefs::getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.setPreservesCFG();
+ AU.addPreserved<AliasAnalysis>();
+ AU.addPreserved<LiveVariables>();
+ AU.addRequired<LiveVariables>();
+ AU.addPreservedID(MachineLoopInfoID);
+ AU.addPreservedID(MachineDominatorsID);
+ AU.addPreservedID(TwoAddressInstructionPassID);
+ AU.addPreservedID(PHIEliminationID);
+ MachineFunctionPass::getAnalysisUsage(AU);
+}
+
+bool ProcessImplicitDefs::CanTurnIntoImplicitDef(MachineInstr *MI,
+ unsigned Reg, unsigned OpIdx,
+ const TargetInstrInfo *tii_) {
+ unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
+ if (tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg) &&
+ Reg == SrcReg)
+ return true;
+
+ if (OpIdx == 2 && MI->getOpcode() == TargetInstrInfo::SUBREG_TO_REG)
+ return true;
+ if (OpIdx == 1 && MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG)
+ return true;
+ return false;
+}
+
+/// processImplicitDefs - Process IMPLICIT_DEF instructions and make sure
+/// there is one implicit_def for each use. Add isUndef marker to
+/// implicit_def defs and their uses.
+bool ProcessImplicitDefs::runOnMachineFunction(MachineFunction &fn) {
+
+ DEBUG(errs() << "********** PROCESS IMPLICIT DEFS **********\n"
+ << "********** Function: "
+ << ((Value*)fn.getFunction())->getName() << '\n');
+
+ bool Changed = false;
+
+ const TargetInstrInfo *tii_ = fn.getTarget().getInstrInfo();
+ const TargetRegisterInfo *tri_ = fn.getTarget().getRegisterInfo();
+ MachineRegisterInfo *mri_ = &fn.getRegInfo();
+
+ LiveVariables *lv_ = &getAnalysis<LiveVariables>();
+
+ SmallSet<unsigned, 8> ImpDefRegs;
+ SmallVector<MachineInstr*, 8> ImpDefMIs;
+ MachineBasicBlock *Entry = fn.begin();
+ SmallPtrSet<MachineBasicBlock*,16> Visited;
+
+ for (df_ext_iterator<MachineBasicBlock*, SmallPtrSet<MachineBasicBlock*,16> >
+ DFI = df_ext_begin(Entry, Visited), E = df_ext_end(Entry, Visited);
+ DFI != E; ++DFI) {
+ MachineBasicBlock *MBB = *DFI;
+ for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
+ I != E; ) {
+ MachineInstr *MI = &*I;
+ ++I;
+ if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
+ unsigned Reg = MI->getOperand(0).getReg();
+ ImpDefRegs.insert(Reg);
+ if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
+ for (const unsigned *SS = tri_->getSubRegisters(Reg); *SS; ++SS)
+ ImpDefRegs.insert(*SS);
+ }
+ ImpDefMIs.push_back(MI);
+ continue;
+ }
+
+ if (MI->getOpcode() == TargetInstrInfo::INSERT_SUBREG) {
+ MachineOperand &MO = MI->getOperand(2);
+ if (ImpDefRegs.count(MO.getReg())) {
+ // %reg1032<def> = INSERT_SUBREG %reg1032, undef, 2
+ // This is an identity copy, eliminate it now.
+ if (MO.isKill()) {
+ LiveVariables::VarInfo& vi = lv_->getVarInfo(MO.getReg());
+ vi.removeKill(MI);
+ }
+ MI->eraseFromParent();
+ Changed = true;
+ continue;
+ }
+ }
+
+ bool ChangedToImpDef = false;
+ for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
+ MachineOperand& MO = MI->getOperand(i);
+ if (!MO.isReg() || !MO.isUse() || MO.isUndef())
+ continue;
+ unsigned Reg = MO.getReg();
+ if (!Reg)
+ continue;
+ if (!ImpDefRegs.count(Reg))
+ continue;
+ // Use is a copy, just turn it into an implicit_def.
+ if (CanTurnIntoImplicitDef(MI, Reg, i, tii_)) {
+ bool isKill = MO.isKill();
+ MI->setDesc(tii_->get(TargetInstrInfo::IMPLICIT_DEF));
+ for (int j = MI->getNumOperands() - 1, ee = 0; j > ee; --j)
+ MI->RemoveOperand(j);
+ if (isKill) {
+ ImpDefRegs.erase(Reg);
+ LiveVariables::VarInfo& vi = lv_->getVarInfo(Reg);
+ vi.removeKill(MI);
+ }
+ ChangedToImpDef = true;
+ Changed = true;
+ break;
+ }
+
+ Changed = true;
+ MO.setIsUndef();
+ if (MO.isKill() || MI->isRegTiedToDefOperand(i)) {
+ // Make sure other uses of
+ for (unsigned j = i+1; j != e; ++j) {
+ MachineOperand &MOJ = MI->getOperand(j);
+ if (MOJ.isReg() && MOJ.isUse() && MOJ.getReg() == Reg)
+ MOJ.setIsUndef();
+ }
+ ImpDefRegs.erase(Reg);
+ }
+ }
+
+ if (ChangedToImpDef) {
+ // Backtrack to process this new implicit_def.
+ --I;
+ } else {
+ for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
+ MachineOperand& MO = MI->getOperand(i);
+ if (!MO.isReg() || !MO.isDef())
+ continue;
+ ImpDefRegs.erase(MO.getReg());
+ }
+ }
+ }
+
+ // Any outstanding liveout implicit_def's?
+ for (unsigned i = 0, e = ImpDefMIs.size(); i != e; ++i) {
+ MachineInstr *MI = ImpDefMIs[i];
+ unsigned Reg = MI->getOperand(0).getReg();
+ if (TargetRegisterInfo::isPhysicalRegister(Reg) ||
+ !ImpDefRegs.count(Reg)) {
+ // Delete all "local" implicit_def's. That include those which define
+ // physical registers since they cannot be liveout.
+ MI->eraseFromParent();
+ Changed = true;
+ continue;
+ }
+
+ // If there are multiple defs of the same register and at least one
+ // is not an implicit_def, do not insert implicit_def's before the
+ // uses.
+ bool Skip = false;
+ for (MachineRegisterInfo::def_iterator DI = mri_->def_begin(Reg),
+ DE = mri_->def_end(); DI != DE; ++DI) {
+ if (DI->getOpcode() != TargetInstrInfo::IMPLICIT_DEF) {
+ Skip = true;
+ break;
+ }
+ }
+ if (Skip)
+ continue;
+
+ // The only implicit_def which we want to keep are those that are live
+ // out of its block.
+ MI->eraseFromParent();
+ Changed = true;
+
+ for (MachineRegisterInfo::use_iterator UI = mri_->use_begin(Reg),
+ UE = mri_->use_end(); UI != UE; ) {
+ MachineOperand &RMO = UI.getOperand();
+ MachineInstr *RMI = &*UI;
+ ++UI;
+ MachineBasicBlock *RMBB = RMI->getParent();
+ if (RMBB == MBB)
+ continue;
+
+ // Turn a copy use into an implicit_def.
+ unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
+ if (tii_->isMoveInstr(*RMI, SrcReg, DstReg, SrcSubReg, DstSubReg) &&
+ Reg == SrcReg) {
+ RMI->setDesc(tii_->get(TargetInstrInfo::IMPLICIT_DEF));
+ for (int j = RMI->getNumOperands() - 1, ee = 0; j > ee; --j)
+ RMI->RemoveOperand(j);
+ continue;
+ }
+
+ const TargetRegisterClass* RC = mri_->getRegClass(Reg);
+ unsigned NewVReg = mri_->createVirtualRegister(RC);
+ RMO.setReg(NewVReg);
+ RMO.setIsUndef();
+ RMO.setIsKill();
+ }
+ }
+ ImpDefRegs.clear();
+ ImpDefMIs.clear();
+ }
+
+ return Changed;
+}
+
diff --git a/lib/CodeGen/RegAllocLinearScan.cpp b/lib/CodeGen/RegAllocLinearScan.cpp
index 836a3f02aa..6930abf87b 100644
--- a/lib/CodeGen/RegAllocLinearScan.cpp
+++ b/lib/CodeGen/RegAllocLinearScan.cpp
@@ -145,6 +145,7 @@ namespace {
virtual void getAnalysisUsage(AnalysisUsage &AU) const {
AU.setPreservesCFG();
AU.addRequired<LiveIntervals>();
+ AU.addPreserved<SlotIndexes>();
if (StrongPHIElim)
AU.addRequiredID(StrongPHIEliminationID);
// Make sure PassManager knows which analyses to make available
@@ -175,11 +176,11 @@ namespace {
/// processActiveIntervals - expire old intervals and move non-overlapping
/// ones to the inactive list.
- void processActiveIntervals(LiveIndex CurPoint);
+ void processActiveIntervals(SlotIndex CurPoint);
/// processInactiveIntervals - expire old intervals and move overlapping
/// ones to the active list.
- void processInactiveIntervals(LiveIndex CurPoint);
+ void processInactiveIntervals(SlotIndex CurPoint);
/// hasNextReloadInterval - Return the next liveinterval that's being
/// defined by a reload from the same SS as the specified one.
@@ -365,7 +366,7 @@ unsigned RALinScan::attemptTrivialCoalescing(LiveInterval &cur, unsigned Reg) {
return Reg;
VNInfo *vni = cur.begin()->valno;
- if ((vni->def == LiveIndex()) ||
+ if ((vni->def == SlotIndex()) ||
vni->isUnused() || !vni->isDefAccurate())
return Reg;
MachineInstr *CopyMI = li_->getInstructionFromIndex(vni->def);
@@ -402,7 +403,7 @@ unsigned RALinScan::attemptTrivialCoalescing(LiveInterval &cur, unsigned Reg) {
if (!O.isKill())
continue;
MachineInstr *MI = &*I;
- if (SrcLI.liveAt(li_->getDefIndex(li_->getInstructionIndex(MI))))
+ if (SrcLI.liveAt(li_->getInstructionIndex(MI).getDefIndex()))
O.setIsKill(false);
}
}
@@ -479,10 +480,17 @@ void RALinScan::initIntervalSets()
for (LiveIntervals::iterator i = li_->begin(), e = li_->end(); i != e; ++i) {
if (TargetRegisterInfo::isPhysicalRegister(i->second->reg)) {
- mri_->setPhysRegUsed(i->second->reg);
- fixed_.push_back(std::make_pair(i->second, i->second->begin()));
- } else
- unhandled_.push(i->second);
+ if (!i->second->empty()) {
+ mri_->setPhysRegUsed(i->second->reg);
+ fixed_.push_back(std::make_pair(i->second, i->second->begin()));
+ }
+ } else {
+ if (i->second->empty()) {
+ assignRegOrStackSlotAtInterval(i->second);
+ }
+ else
+ unhandled_.push(i->second);
+ }
}
}
@@ -502,13 +510,13 @@ void RALinScan::linearScan() {
++NumIters;
DEBUG(errs() << "\n*** CURRENT ***: " << *cur << '\n');
- if (!cur->empty()) {
- processActiveIntervals(cur->beginIndex());
- processInactiveIntervals(cur->beginIndex());
+ assert(!cur->empty() && "Empty interval in unhandled set.");
- assert(TargetRegisterInfo::isVirtualRegister(cur->reg) &&
- "Can only allocate virtual registers!");
- }
+ processActiveIntervals(cur->beginIndex());
+ processInactiveIntervals(cur->beginIndex());
+
+ assert(TargetRegisterInfo::isVirtualRegister(cur->reg) &&
+ "Can only allocate virtual registers!");
// Allocating a virtual register. try to find a free
// physical register or spill an interval (possibly this one) in order to
@@ -585,7 +593,7 @@ void RALinScan::linearScan() {
/// processActiveIntervals - expire old intervals and move non-overlapping ones
/// to the inactive list.
-void RALinScan::processActiveIntervals(LiveIndex CurPoint)
+void RALinScan::processActiveIntervals(SlotIndex CurPoint)
{
DEBUG(errs() << "\tprocessing active intervals:\n");
@@ -631,7 +639,7 @@ void RALinScan::processActiveIntervals(LiveIndex CurPoint)
/// processInactiveIntervals - expire old intervals and move overlapping
/// ones to the active list.
-void RALinScan::processInactiveIntervals(LiveIndex CurPoint)
+void RALinScan::processInactiveIntervals(SlotIndex CurPoint)
{
DEBUG(errs() << "\tprocessing inactive intervals:\n");
@@ -712,7 +720,7 @@ FindIntervalInVector(RALinScan::IntervalPtrs &IP, LiveInterval *LI) {
return IP.end();
}
-static void RevertVectorIteratorsTo(RALinScan::IntervalPtrs &V, LiveIndex Point){
+static void RevertVectorIteratorsTo(RALinScan::IntervalPtrs &V, SlotIndex Point){
for (unsigned i = 0, e = V.size(); i != e; ++i) {
RALinScan::IntervalPtr &IP = V[i];
LiveInterval::iterator I = std::upper_bound(IP.first->begin(),
@@ -738,7 +746,7 @@ static void addStackInterval(LiveInterval *cur, LiveStacks *ls_,
if (SI.hasAtLeastOneValue())
VNI = SI.getValNumInfo(0);
else
- VNI = SI.getNextValue(LiveIndex(), 0, false,
+ VNI = SI.getNextValue(SlotIndex(), 0, false,
ls_->getVNInfoAllocator());
LiveInterval &RI = li_->getInterval(cur->reg);
@@ -906,7 +914,7 @@ void RALinScan::assignRegOrStackSlotAtInterval(LiveInterval* cur) {
backUpRegUses();
std::vector<std::pair<unsigned, float> > SpillWeightsToAdd;
- LiveIndex StartPosition = cur->beginIndex();
+ SlotIndex StartPosition = cur->beginIndex();
const TargetRegisterClass *RCLeader = RelatedRegClasses.getLeaderValue(RC);
// If start of this live interval is defined by a move instruction and its
@@ -916,7 +924,7 @@ void RALinScan::assignRegOrStackSlotAtInterval(LiveInterval* cur) {
// one, e.g. X86::mov32to32_. These move instructions are not coalescable.
if (!vrm_->getRegAllocPref(cur->reg) && cur->hasAtLeastOneValue()) {
VNInfo *vni = cur->begin()->valno;
- if ((vni->def != LiveIndex()) && !vni->isUnused() &&
+ if ((vni->def != SlotIndex()) && !vni->isUnused() &&
vni->isDefAccurate()) {
MachineInstr *CopyMI = li_->getInstructionFromIndex(vni->def);
unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
@@ -1118,6 +1126,7 @@ void RALinScan::assignRegOrStackSlotAtInterval(LiveInterval* cur) {
DowngradedRegs.clear();
assignRegOrStackSlotAtInterval(cur);
} else {
+ assert(false && "Ran out of registers during register allocation!");
llvm_report_error("Ran out of registers during register allocation!");
}
return;
@@ -1172,7 +1181,7 @@ void RALinScan::assignRegOrStackSlotAtInterval(LiveInterval* cur) {
LiveInterval *ReloadLi = added[i];
if (ReloadLi->weight == HUGE_VALF &&
li_->getApproximateInstructionCount(*ReloadLi) == 0) {
- LiveIndex ReloadIdx = ReloadLi->beginIndex();
+ SlotIndex ReloadIdx = ReloadLi->beginIndex();
MachineBasicBlock *ReloadMBB = li_->getMBBFromIndex(ReloadIdx);
int ReloadSS = vrm_->getStackSlot(ReloadLi->reg);
if (LastReloadMBB == ReloadMBB && LastReloadSS == ReloadSS) {
@@ -1242,7 +1251,7 @@ void RALinScan::assignRegOrStackSlotAtInterval(LiveInterval* cur) {
spilled.insert(sli->reg);
}
- LiveIndex earliestStart = earliestStartInterval->beginIndex();
+ SlotIndex earliestStart = earliestStartInterval->beginIndex();
DEBUG(errs() << "\t\trolling back to: " << earliestStart << '\n');
@@ -1323,7 +1332,7 @@ void RALinScan::assignRegOrStackSlotAtInterval(LiveInterval* cur) {
LiveInterval *ReloadLi = added[i];
if (ReloadLi->weight == HUGE_VALF &&
li_->getApproximateInstructionCount(*ReloadLi) == 0) {
- LiveIndex ReloadIdx = ReloadLi->beginIndex();
+ SlotIndex ReloadIdx = ReloadLi->beginIndex();
MachineBasicBlock *ReloadMBB = li_->getMBBFromIndex(ReloadIdx);
int ReloadSS = vrm_->getStackSlot(ReloadLi->reg);
if (LastReloadMBB == ReloadMBB && LastReloadSS == ReloadSS) {
diff --git a/lib/CodeGen/RegAllocPBQP.cpp b/lib/CodeGen/RegAllocPBQP.cpp
index a6e33fa073..5757e47554 100644
--- a/lib/CodeGen/RegAllocPBQP.cpp
+++ b/lib/CodeGen/RegAllocPBQP.cpp
@@ -85,6 +85,8 @@ namespace {
/// PBQP analysis usage.
virtual void getAnalysisUsage(AnalysisUsage &au) const {
+ au.addRequired<SlotIndexes>();
+ au.addPreserved<SlotIndexes>();
au.addRequired<LiveIntervals>();
//au.addRequiredID(SplitCriticalEdgesID);
au.addRequired<RegisterCoalescer>();
@@ -684,7 +686,7 @@ void PBQPRegAlloc::addStackInterval(const LiveInterval *spilled,
vni = stackInterval.getValNumInfo(0);
else
vni = stackInterval.getNextValue(
- LiveIndex(), 0, false, lss->getVNInfoAllocator());
+ SlotIndex(), 0, false, lss->getVNInfoAllocator());
LiveInterval &rhsInterval = lis->getInterval(spilled->reg);
stackInterval.MergeRangesInAsValue(rhsInterval, vni);
@@ -832,7 +834,7 @@ bool PBQPRegAlloc::runOnMachineFunction(MachineFunction &MF) {
tm = &mf->getTarget();
tri = tm->getRegisterInfo();
tii = tm->getInstrInfo();
- mri = &mf->getRegInfo();
+ mri = &mf->getRegInfo();
lis = &getAnalysis<LiveIntervals>();
lss = &getAnalysis<LiveStacks>();
diff --git a/lib/CodeGen/SimpleRegisterCoalescing.cpp b/lib/CodeGen/SimpleRegisterCoalescing.cpp
index 2aa6307502..e711a06be7 100644
--- a/lib/CodeGen/SimpleRegisterCoalescing.cpp
+++ b/lib/CodeGen/SimpleRegisterCoalescing.cpp
@@ -76,6 +76,7 @@ void SimpleRegisterCoalescing::getAnalysisUsage(AnalysisUsage &AU) const {
AU.addRequired<AliasAnalysis>();
AU.addRequired<LiveIntervals>();
AU.addPreserved<LiveIntervals>();
+ AU.addPreserved<SlotIndexes>();
AU.addRequired<MachineLoopInfo>();
AU.addPreserved<MachineLoopInfo>();
AU.addPreservedID(MachineDominatorsID);
@@ -105,7 +106,7 @@ void SimpleRegisterCoalescing::getAnalysisUsage(AnalysisUsage &AU) const {
bool SimpleRegisterCoalescing::AdjustCopiesBackFrom(LiveInterval &IntA,
LiveInterval &IntB,
MachineInstr *CopyMI) {
- LiveIndex CopyIdx = li_->getDefIndex(li_->getInstructionIndex(CopyMI));
+ SlotIndex CopyIdx = li_->getInstructionIndex(CopyMI).getDefIndex();
// BValNo is a value number in B that is defined by a copy from A. 'B3' in
// the example above.
@@ -120,7 +121,7 @@ bool SimpleRegisterCoalescing::AdjustCopiesBackFrom(LiveInterval &IntA,
assert(BValNo->def == CopyIdx && "Copy doesn't define the value?");
// AValNo is the value number in A that defines the copy, A3 in the example.
- LiveIndex CopyUseIdx = li_->getUseIndex(CopyIdx);
+ SlotIndex CopyUseIdx = CopyIdx.getUseIndex();
LiveInterval::iterator ALR = IntA.FindLiveRangeContaining(CopyUseIdx);
assert(ALR != IntA.end() && "Live range not found!");
VNInfo *AValNo = ALR->valno;
@@ -158,13 +159,13 @@ bool SimpleRegisterCoalescing::AdjustCopiesBackFrom(LiveInterval &IntA,
// Get the LiveRange in IntB that this value number starts with.
LiveInterval::iterator ValLR =
- IntB.FindLiveRangeContaining(li_->getPrevSlot(AValNo->def));
+ IntB.FindLiveRangeContaining(AValNo->def.getPrevSlot());
assert(ValLR != IntB.end() && "Live range not found!");
// Make sure that the end of the live range is inside the same block as
// CopyMI.
MachineInstr *ValLREndInst =
- li_->getInstructionFromIndex(li_->getPrevSlot(ValLR->end));
+ li_->getInstructionFromIndex(ValLR->end.getPrevSlot());
if (!ValLREndInst ||
ValLREndInst->getParent() != CopyMI->getParent()) return false;
@@ -193,7 +194,7 @@ bool SimpleRegisterCoalescing::AdjustCopiesBackFrom(LiveInterval &IntA,
IntB.print(errs(), tri_);
});
- LiveIndex FillerStart = ValLR->end, FillerEnd = BLR->start;
+ SlotIndex FillerStart = ValLR->end, FillerEnd = BLR->start;
// We are about to delete CopyMI, so need to remove it as the 'instruction
// that defines this value #'. Update the the valnum with the new defining
// instruction #.
@@ -306,8 +307,8 @@ TransferImplicitOps(MachineInstr *MI, MachineInstr *NewMI) {
bool SimpleRegisterCoalescing::RemoveCopyByCommutingDef(LiveInterval &IntA,
LiveInterval &IntB,
MachineInstr *CopyMI) {
- LiveIndex CopyIdx =
- li_->getDefIndex(li_->getInstructionIndex(CopyMI));
+ SlotIndex CopyIdx =
+ li_->getInstructionIndex(CopyMI).getDefIndex();
// FIXME: For now, only eliminate the copy by commuting its def when the
// source register is a virtual register. We want to guard against cases
@@ -330,7 +331,7 @@ bool SimpleRegisterCoalescing::RemoveCopyByCommutingDef(LiveInterval &IntA,
// AValNo is the value number in A that defines the copy, A3 in the example.
LiveInterval::iterator ALR =
- IntA.FindLiveRangeContaining(li_->getPrevSlot(CopyIdx));
+ IntA.FindLiveRangeContaining(CopyIdx.getUseIndex()); //
assert(ALR != IntA.end() && "Live range not found!");
VNInfo *AValNo = ALR->valno;
@@ -376,7 +377,7 @@ bool SimpleRegisterCoalescing::RemoveCopyByCommutingDef(LiveInterval &IntA,
for (MachineRegisterInfo::use_iterator UI = mri_->use_begin(IntA.reg),
UE = mri_->use_end(); UI != UE; ++UI) {
MachineInstr *UseMI = &*UI;
- LiveIndex UseIdx = li_->getInstructionIndex(UseMI);
+ SlotIndex UseIdx = li_->getInstructionIndex(UseMI);
LiveInterval::iterator ULR = IntA.FindLiveRangeContaining(UseIdx);
if (ULR == IntA.end())
continue;
@@ -401,7 +402,7 @@ bool SimpleRegisterCoalescing::RemoveCopyByCommutingDef(LiveInterval &IntA,
bool BHasPHIKill = BValNo->hasPHIKill();
SmallVector<VNInfo*, 4> BDeadValNos;
VNInfo::KillSet BKills;
- std::map<LiveIndex, LiveIndex> BExtend;
+ std::map<SlotIndex, SlotIndex> BExtend;
// If ALR and BLR overlaps and end of BLR extends beyond end of ALR, e.g.
// A = or A, B
@@ -428,7 +429,7 @@ bool SimpleRegisterCoalescing::RemoveCopyByCommutingDef(LiveInterval &IntA,
++UI;
if (JoinedCopies.count(UseMI))
continue;
- LiveIndex UseIdx= li_->getUseIndex(li_->getInstructionIndex(UseMI));
+ SlotIndex UseIdx = li_->getInstructionIndex(UseMI).getUseIndex();
LiveInterval::iterator ULR = IntA.FindLiveRangeContaining(UseIdx);
if (ULR == IntA.end() || ULR->valno != AValNo)
continue;
@@ -439,7 +440,7 @@ bool SimpleRegisterCoalescing::RemoveCopyByCommutingDef(LiveInterval &IntA,
if (Extended)
UseMO.setIsKill(false);
else
- BKills.push_back(li_->getNextSlot(UseIdx));
+ BKills.push_back(UseIdx.getDefIndex());
}
unsigned SrcReg, DstReg, SrcSubIdx, DstSubIdx;
if (!tii_->isMoveInstr(*UseMI, SrcReg, DstReg, SrcSubIdx, DstSubIdx))
@@ -448,7 +449,7 @@ bool SimpleRegisterCoalescing::RemoveCopyByCommutingDef(LiveInterval &IntA,
// This copy will become a noop. If it's defining a new val#,
// remove that val# as well. However this live range is being
// extended to the end of the existing live range defined by the copy.
- LiveIndex DefIdx = li_->getDefIndex(UseIdx);
+ SlotIndex DefIdx = UseIdx.getDefIndex();
const LiveRange *DLR = IntB.getLiveRangeContaining(DefIdx);
BHasPHIKill |= DLR->valno->hasPHIKill();
assert(DLR->valno->def == DefIdx);
@@ -495,8 +496,8 @@ bool SimpleRegisterCoalescing::RemoveCopyByCommutingDef(LiveInterval &IntA,
for (LiveInterval::iterator AI = IntA.begin(), AE = IntA.end();
AI != AE; ++AI) {
if (AI->valno != AValNo) continue;
- LiveIndex End = AI->end;
- std::map<LiveIndex, LiveIndex>::iterator
+ SlotIndex End = AI->end;
+ std::map<SlotIndex, SlotIndex>::iterator
EI = BExtend.find(End);
if (EI != BExtend.end())
End = EI->second;
@@ -507,7 +508,7 @@ bool SimpleRegisterCoalescing::RemoveCopyByCommutingDef(LiveInterval &IntA,
if (BHasSubRegs) {
for (const unsigned *SR = tri_->getSubRegisters(IntB.reg); *SR; ++SR) {
LiveInterval &SRLI = li_->getInterval(*SR);
- SRLI.MergeInClobberRange(AI->start, End, li_->getVNInfoAllocator());
+ SRLI.MergeInClobberRange(*li_, AI->start, End, li_->getVNInfoAllocator());
}
}
}
@@ -551,7 +552,7 @@ static bool isSameOrFallThroughBB(MachineBasicBlock *MBB,
/// from a physical register live interval as well as from the live intervals
/// of its sub-registers.
static void removeRange(LiveInterval &li,
- LiveIndex Start, LiveIndex End,
+ SlotIndex Start, SlotIndex End,
LiveIntervals *li_, const TargetRegisterInfo *tri_) {
li.removeRange(Start, End, true);
if (TargetRegisterInfo::isPhysicalRegister(li.reg)) {
@@ -559,8 +560,9 @@ static void removeRange(LiveInterval &li,
if (!li_->hasInterval(*SR))
continue;
LiveInterval &sli = li_->getInterval(*SR);
- LiveIndex RemoveStart = Start;
- LiveIndex RemoveEnd = Start;
+ SlotIndex RemoveStart = Start;
+ SlotIndex RemoveEnd = Start;
+
while (RemoveEnd != End) {
LiveInterval::iterator LR = sli.FindLiveRangeContaining(RemoveStart);
if (LR == sli.end())
@@ -577,14 +579,14 @@ static void removeRange(LiveInterval &li,
/// as the copy instruction, trim the live interval to the last use and return
/// true.
bool
-SimpleRegisterCoalescing::TrimLiveIntervalToLastUse(LiveIndex CopyIdx,
+SimpleRegisterCoalescing::TrimLiveIntervalToLastUse(SlotIndex CopyIdx,
MachineBasicBlock *CopyMBB,
LiveInterval &li,
const LiveRange *LR) {
- LiveIndex MBBStart = li_->getMBBStartIdx(CopyMBB);
- LiveIndex LastUseIdx;
+ SlotIndex MBBStart = li_->getMBBStartIdx(CopyMBB);
+ SlotIndex LastUseIdx;
MachineOperand *LastUse =
- lastRegisterUse(LR->start, li_->getPrevSlot(CopyIdx), li.reg, LastUseIdx);
+ lastRegisterUse(LR->start, CopyIdx.getPrevSlot(), li.reg, LastUseIdx);
if (LastUse) {
MachineInstr *LastUseMI = LastUse->getParent();
if (!isSameOrFallThroughBB(LastUseMI->getParent(), CopyMBB, tii_)) {
@@ -603,8 +605,8 @@ SimpleRegisterCoalescing::TrimLiveIntervalToLastUse(LiveIndex CopyIdx,
// There are uses before the copy, just shorten the live range to the end
// of last use.
LastUse->setIsKill();
- removeRange(li, li_->getDefIndex(LastUseIdx), LR->end, li_, tri_);
- LR->valno->addKill(li_->getNextSlot(LastUseIdx));
+ removeRange(li, LastUseIdx.getDefIndex(), LR->end, li_, tri_);
+ LR->valno->addKill(LastUseIdx.getDefIndex());
unsigned SrcReg, DstReg, SrcSubIdx, DstSubIdx;
if (tii_->isMoveInstr(*LastUseMI, SrcReg, DstReg, SrcSubIdx, DstSubIdx) &&
DstReg == li.reg) {
@@ -617,7 +619,7 @@ SimpleRegisterCoalescing::TrimLiveIntervalToLastUse(LiveIndex CopyIdx,
// Is it livein?
if (LR->start <= MBBStart && LR->end > MBBStart) {
- if (LR->start == LiveIndex()) {
+ if (LR->start == li_->getZeroIndex()) {
assert(TargetRegisterInfo::isPhysicalRegister(li.reg));
// Live-in to the function but dead. Remove it from entry live-in set.
mf_->begin()->removeLiveIn(li.reg);
@@ -634,7 +636,7 @@ bool SimpleRegisterCoalescing::ReMaterializeTrivialDef(LiveInterval &SrcInt,
unsigned DstReg,
unsigned DstSubIdx,
MachineInstr *CopyMI) {
- LiveIndex CopyIdx = li_->getUseIndex(li_->getInstructionIndex(CopyMI));
+ SlotIndex CopyIdx = li_->getInstructionIndex(CopyMI).getUseIndex();
LiveInterval::iterator SrcLR = SrcInt.FindLiveRangeContaining(CopyIdx);
assert(SrcLR != SrcInt.end() && "Live range not found!");
VNInfo *ValNo = SrcLR->valno;
@@ -683,7 +685,7 @@ bool SimpleRegisterCoalescing::ReMaterializeTrivialDef(LiveInterval &SrcInt,
return false;
}
- LiveIndex DefIdx = li_->getDefIndex(CopyIdx);
+ SlotIndex DefIdx = CopyIdx.getDefIndex();
const LiveRange *DLR= li_->getInterval(DstReg).getLiveRangeContaining(DefIdx);
DLR->valno->setCopy(0);
// Don't forget to update sub-register intervals.
@@ -716,7 +718,7 @@ bool SimpleRegisterCoalescing::ReMaterializeTrivialDef(LiveInterval &SrcInt,
// should mark it dead:
if (DefMI->getParent() == MBB) {
DefMI->addRegisterDead(SrcInt.reg, tri_);
- SrcLR->end = li_->getNextSlot(SrcLR->start);
+ SrcLR->end = SrcLR->start.getNextSlot();
}
}
@@ -815,8 +817,8 @@ SimpleRegisterCoalescing::UpdateRegDefsUses(unsigned SrcReg, unsigned DstReg,
(TargetRegisterInfo::isVirtualRegister(CopyDstReg) ||
allocatableRegs_[CopyDstReg])) {
LiveInterval &LI = li_->getInterval(CopyDstReg);
- LiveIndex DefIdx =
- li_->getDefIndex(li_->getInstructionIndex(UseMI));
+ SlotIndex DefIdx =
+ li_->getInstructionIndex(UseMI).getDefIndex();
if (const LiveRange *DLR = LI.getLiveRangeContaining(DefIdx)) {
if (DLR->valno->def == DefIdx)
DLR->valno->setCopy(UseMI);
@@ -835,12 +837,12 @@ void SimpleRegisterCoalescing::RemoveUnnecessaryKills(unsigned Reg,
if (!UseMO.isKill())
continue;
MachineInstr *UseMI = UseMO.getParent();
- LiveIndex UseIdx =
- li_->getUseIndex(li_->getInstructionIndex(UseMI));
+ SlotIndex UseIdx =
+ li_->getInstructionIndex(UseMI).getUseIndex();
const LiveRange *LR = LI.getLiveRangeContaining(UseIdx);
if (!LR ||
- (!LR->valno->isKill(li_->getNextSlot(UseIdx)) &&
- LR->valno->def != li_->getNextSlot(UseIdx))) {
+ (!LR->valno->isKill(UseIdx.getDefIndex()) &&
+ LR->valno->def != UseIdx.getDefIndex())) {
// Interesting problem. After coalescing reg1027's def and kill are both
// at the same point: %reg1027,0.000000e+00 = [56,814:0) 0@70-(814)
//
@@ -881,16 +883,16 @@ static bool removeIntervalIfEmpty(LiveInterval &li, LiveIntervals *li_,
/// Return true if live interval is removed.
bool SimpleRegisterCoalescing::ShortenDeadCopyLiveRange(LiveInterval &li,
MachineInstr *CopyMI) {
- LiveIndex CopyIdx = li_->getInstructionIndex(CopyMI);
+ SlotIndex CopyIdx = li_->getInstructionIndex(CopyMI);
LiveInterval::iterator MLR =
- li.FindLiveRangeContaining(li_->getDefIndex(CopyIdx));
+ li.FindLiveRangeContaining(CopyIdx.getDefIndex());
if (MLR == li.end())
return false; // Already removed by ShortenDeadCopySrcLiveRange.
- LiveIndex RemoveStart = MLR->start;
- LiveIndex RemoveEnd = MLR->end;
- LiveIndex DefIdx = li_->getDefIndex(CopyIdx);
+ SlotIndex RemoveStart = MLR->start;
+ SlotIndex RemoveEnd = MLR->end;
+ SlotIndex DefIdx = CopyIdx.getDefIndex();
// Remove the liverange that's defined by this.
- if (RemoveStart == DefIdx && RemoveEnd == li_->getNextSlot(DefIdx)) {
+ if (RemoveStart == DefIdx && RemoveEnd == DefIdx.getStoreIndex()) {
removeRange(li, RemoveStart, RemoveEnd, li_, tri_);
return removeIntervalIfEmpty(li, li_, tri_);
}
@@ -901,7 +903,7 @@ bool SimpleRegisterCoalescing::ShortenDeadCopyLiveRange(LiveInterval &li,
/// the val# it defines. If the live interval becomes empty, remove it as well.
bool SimpleRegisterCoalescing::RemoveDeadDef(LiveInterval &li,
MachineInstr *DefMI) {
- LiveIndex DefIdx = li_->getDefIndex(li_->getInstructionIndex(DefMI));
+ SlotIndex DefIdx = li_->getInstructionIndex(DefMI).getDefIndex();
LiveInterval::iterator MLR = li.FindLiveRangeContaining(DefIdx);
if (DefIdx != MLR->valno->def)
return false;
@@ -912,10 +914,10 @@ bool SimpleRegisterCoalescing::RemoveDeadDef(LiveInterval &li,
/// PropagateDeadness - Propagate the dead marker to the instruction which
/// defines the val#.
static void PropagateDeadness(LiveInterval &li, MachineInstr *CopyMI,
- LiveIndex &LRStart, LiveIntervals *li_,
+ SlotIndex &LRStart, LiveIntervals *li_,
const TargetRegisterInfo* tri_) {
MachineInstr *DefMI =
- li_->getInstructionFromIndex(li_->getDefIndex(LRStart));
+ li_->getInstructionFromIndex(LRStart.getDefIndex());
if (DefMI && DefMI != CopyMI) {
int DeadIdx = DefMI->findRegisterDefOperandIdx(li.reg, false);
if (DeadIdx != -1)
@@ -923,7 +925,7 @@ static void PropagateDeadness(LiveInterval &li, MachineInstr *CopyMI,
else
DefMI->addOperand(MachineOperand::CreateReg(li.reg,
/*def*/true, /*implicit*/true, /*kill*/false, /*dead*/true));
- LRStart = li_->getNextSlot(LRStart);
+ LRStart = LRStart.getNextSlot();
}
}
@@ -934,8 +936,8 @@ static void PropagateDeadness(LiveInterval &li, MachineInstr *CopyMI,
bool
SimpleRegisterCoalescing::ShortenDeadCopySrcLiveRange(LiveInterval &li,
MachineInstr *CopyMI) {
- LiveIndex CopyIdx = li_->getInstructionIndex(CopyMI);
- if (CopyIdx == LiveIndex()) {
+ SlotIndex CopyIdx = li_->getInstructionIndex(CopyMI);
+ if (CopyIdx == SlotIndex()) {
// FIXME: special case: function live in. It can be a general case if the
// first instruction index starts at > 0 value.
assert(TargetRegisterInfo::isPhysicalRegister(li.reg));
@@ -948,13 +950,13 @@ SimpleRegisterCoalescing::ShortenDeadCopySrcLiveRange(LiveInterval &li,
}
LiveInterval::iterator LR =
- li.FindLiveRangeContaining(li_->getPrevSlot(CopyIdx));
+ li.FindLiveRangeContaining(CopyIdx.getPrevIndex().getStoreIndex());
if (LR == li.end())
// Livein but defined by a phi.
return false;
- LiveIndex RemoveStart = LR->start;
- LiveIndex RemoveEnd = li_->getNextSlot(li_->getDefIndex(CopyIdx));
+ SlotIndex RemoveStart = LR->start;
+ SlotIndex RemoveEnd = CopyIdx.getStoreIndex();
if (LR->end > RemoveEnd)
// More uses past this copy? Nothing to do.
return false;
@@ -974,7 +976,7 @@ SimpleRegisterCoalescing::ShortenDeadCopySrcLiveRange(LiveInterval &li,
// If the live range starts in another mbb and the copy mbb is not a fall
// through mbb, then we can only cut the range from the beginning of the
// copy mbb.
- RemoveStart = li_->getNextSlot(li_->getMBBStartIdx(CopyMBB));
+ RemoveStart = li_->getMBBStartIdx(CopyMBB).getNextIndex().getBaseIndex();
if (LR->valno->def == RemoveStart) {
// If the def MI defines the val# and this copy is the only kill of the
@@ -1030,14 +1032,14 @@ SimpleRegisterCoalescing::isWinToJoinVRWithSrcPhysReg(MachineInstr *CopyMI,
// If the virtual register live interval extends into a loop, turn down
// aggressiveness.
- LiveIndex CopyIdx =
- li_->getDefIndex(li_->getInstructionIndex(CopyMI));
+ SlotIndex CopyIdx =
+ li_->getInstructionIndex(CopyMI).getDefIndex();
const MachineLoop *L = loopInfo->getLoopFor(CopyMBB);
if (!L) {
// Let's see if the virtual register live interval extends into the loop.
LiveInterval::iterator DLR = DstInt.FindLiveRangeContaining(CopyIdx);
assert(DLR != DstInt.end() && "Live range not found!");
- DLR = DstInt.FindLiveRangeContaining(li_->getNextSlot(DLR->end));
+ DLR = DstInt.FindLiveRangeContaining(DLR->end.getNextSlot());
if (DLR != DstInt.end()) {
CopyMBB = li_->getMBBFromIndex(DLR->start);
L = loopInfo->getLoopFor(CopyMBB);
@@ -1047,7 +1049,7 @@ SimpleRegisterCoalescing::isWinToJoinVRWithSrcPhysReg(MachineInstr *CopyMI,
if (!L || Length <= Threshold)
return true;
- LiveIndex UseIdx = li_->getUseIndex(CopyIdx);
+ SlotIndex UseIdx = CopyIdx.getUseIndex();
LiveInterval::iterator SLR = SrcInt.FindLiveRangeContaining(UseIdx);
MachineBasicBlock *SMBB = li_->getMBBFromIndex(SLR->start);
if (loopInfo->getLoopFor(SMBB) != L) {
@@ -1060,7 +1062,7 @@ SimpleRegisterCoalescing::isWinToJoinVRWithSrcPhysReg(MachineInstr *CopyMI,
if (SuccMBB == CopyMBB)
continue;
if (DstInt.overlaps(li_->getMBBStartIdx(SuccMBB),
- li_->getNextSlot(li_->getMBBEndIdx(SuccMBB))))
+ li_->getMBBEndIdx(SuccMBB).getNextIndex().getBaseIndex()))
return false;
}
}
@@ -1091,12 +1093,12 @@ SimpleRegisterCoalescing::isWinToJoinVRWithDstPhysReg(MachineInstr *CopyMI,
// If the virtual register live interval is defined or cross a loop, turn
// down aggressiveness.
- LiveIndex CopyIdx =
- li_->getDefIndex(li_->getInstructionIndex(CopyMI));
- LiveIndex UseIdx = li_->getUseIndex(CopyIdx);
+ SlotIndex CopyIdx =
+ li_->getInstructionIndex(CopyMI).getDefIndex();
+ SlotIndex UseIdx = CopyIdx.getUseIndex();
LiveInterval::iterator SLR = SrcInt.FindLiveRangeContaining(UseIdx);
assert(SLR != SrcInt.end() && "Live range not found!");
- SLR = SrcInt.FindLiveRangeContaining(li_->getPrevSlot(SLR->start));
+ SLR = SrcInt.FindLiveRangeContaining(SLR->start.getPrevSlot());
if (SLR == SrcInt.end())
return true;
MachineBasicBlock *SMBB = li_->getMBBFromIndex(SLR->start);
@@ -1116,7 +1118,7 @@ SimpleRegisterCoalescing::isWinToJoinVRWithDstPhysReg(MachineInstr *CopyMI,
if (PredMBB == SMBB)
continue;
if (SrcInt.overlaps(li_->getMBBStartIdx(PredMBB),
- li_->getNextSlot(li_->getMBBEndIdx(PredMBB))))
+ li_->getMBBEndIdx(PredMBB).getNextIndex().getBaseIndex()))
return false;
}
}
@@ -1705,7 +1707,7 @@ bool SimpleRegisterCoalescing::JoinCopy(CopyRec &TheCopy, bool &Again) {
// Update the liveintervals of sub-registers.
for (const unsigned *AS = tri_->getSubRegisters(DstReg); *AS; ++AS)
- li_->getOrCreateInterval(*AS).MergeInClobberRanges(*ResSrcInt,
+ li_->getOrCreateInterval(*AS).MergeInClobberRanges(*li_, *ResSrcInt,
li_->getVNInfoAllocator());
}
@@ -1867,7 +1869,7 @@ bool SimpleRegisterCoalescing::RangeIsDefinedByCopyFromReg(LiveInterval &li,
/// is live at the given point.
bool SimpleRegisterCoalescing::ValueLiveAt(LiveInterval::iterator LRItr,
LiveInterval::iterator LREnd,
- LiveIndex defPoint) const {
+ SlotIndex defPoint) const {
for (const VNInfo *valno = LRItr->valno;
(LRItr != LREnd) && (LRItr->valno == valno); ++LRItr) {
if (LRItr->contains(defPoint))
@@ -2047,7 +2049,7 @@ bool SimpleRegisterCoalescing::SimpleJoin(LiveInterval &LHS, LiveInterval &RHS){
// Update the liveintervals of sub-registers.
if (TargetRegisterInfo::isPhysicalRegister(LHS.reg))
for (const unsigned *AS = tri_->getSubRegisters(LHS.reg); *AS; ++AS)
- li_->getOrCreateInterval(*AS).MergeInClobberRanges(LHS,
+ li_->getOrCreateInterval(*AS).MergeInClobberRanges(*li_, LHS,
li_->getVNInfoAllocator());
return true;
@@ -2148,7 +2150,7 @@ SimpleRegisterCoalescing::JoinIntervals(LiveInterval &LHS, LiveInterval &RHS,
} else {
// It was defined as a copy from the LHS, find out what value # it is.
RHSValNoInfo =
- LHS.getLiveRangeContaining(li_->getPrevSlot(RHSValNoInfo0->def))->valno;
+ LHS.getLiveRangeContaining(RHSValNoInfo0->def.getPrevSlot())->valno;
RHSValID = RHSValNoInfo->id;
RHSVal0DefinedFromLHS = RHSValID;
}
@@ -2212,7 +2214,7 @@ SimpleRegisterCoalescing::JoinIntervals(LiveInterval &LHS, LiveInterval &RHS,
// Figure out the value # from the RHS.
LHSValsDefinedFromRHS[VNI]=
- RHS.getLiveRangeContaining(li_->getPrevSlot(VNI->def))->valno;
+ RHS.getLiveRangeContaining(VNI->def.getPrevSlot())->valno;
}
// Loop over the value numbers of the RHS, seeing if any are defined from
@@ -2230,7 +2232,7 @@ SimpleRegisterCoalescing::JoinIntervals(LiveInterval &LHS, LiveInterval &RHS,
// Figure out the value # from the LHS.
RHSValsDefinedFromLHS[VNI]=
- LHS.getLiveRangeContaining(li_->getPrevSlot(VNI->def))->valno;
+ LHS.getLiveRangeContaining(VNI->def.getPrevSlot())->valno;
}
LHSValNoAssignments.resize(LHS.getNumValNums(), -1);
@@ -2494,11 +2496,11 @@ SimpleRegisterCoalescing::differingRegisterClasses(unsigned RegA,
/// lastRegisterUse - Returns the last use of the specific register between
/// cycles Start and End or NULL if there are no uses.
MachineOperand *
-SimpleRegisterCoalescing::lastRegisterUse(LiveIndex Start,
- LiveIndex End,
+SimpleRegisterCoalescing::lastRegisterUse(SlotIndex Start,
+ SlotIndex End,
unsigned Reg,
- LiveIndex &UseIdx) const{
- UseIdx = LiveIndex();
+ SlotIndex &UseIdx) const{
+ UseIdx = SlotIndex();
if (TargetRegisterInfo::isVirtualRegister(Reg)) {
MachineOperand *LastUse = NULL;
for (MachineRegisterInfo::use_iterator I = mri_->use_begin(Reg),
@@ -2510,22 +2512,24 @@ SimpleRegisterCoalescing::lastRegisterUse(LiveIndex Start,
SrcReg == DstReg)
// Ignore identity copies.
continue;
- LiveIndex Idx = li_->getInstructionIndex(UseMI);
+ SlotIndex Idx = li_->getInstructionIndex(UseMI);
+ // FIXME: Should this be Idx != UseIdx? SlotIndex() will return something
+ // that compares higher than any other interval.
if (Idx >= Start && Idx < End && Idx >= UseIdx) {
LastUse = &Use;
- UseIdx = li_->getUseIndex(Idx);
+ UseIdx = Idx.getUseIndex();
}
}
return LastUse;
}
- LiveIndex s = Start;
- LiveIndex e = li_->getBaseIndex(li_->getPrevSlot(End));
+ SlotIndex s = Start;
+ SlotIndex e = End.getPrevSlot().getBaseIndex();
while (e >= s) {
// Skip deleted instructions
MachineInstr *MI = li_->getInstructionFromIndex(e);
- while (e != LiveIndex() && li_->getPrevIndex(e) >= s && !MI) {
- e = li_->getPrevIndex(e);
+ while (e != SlotIndex() && e.getPrevIndex() >= s && !MI) {
+ e = e.getPrevIndex();
MI = li_->getInstructionFromIndex(e);
}
if (e < s || MI == NULL)
@@ -2539,12 +2543,12 @@ SimpleRegisterCoalescing::lastRegisterUse(LiveIndex Start,
MachineOperand &Use = MI->getOperand(i);
if (Use.isReg() && Use.isUse() && Use.getReg() &&
tri_->regsOverlap(Use.getReg(), Reg)) {
- UseIdx = li_->getUseIndex(e);
+ UseIdx = e.getUseIndex();
return &Use;
}
}
- e = li_->getPrevIndex(e);
+ e = e.getPrevIndex();
}
return NULL;
@@ -2568,7 +2572,7 @@ void SimpleRegisterCoalescing::releaseMemory() {
static bool isZeroLengthInterval(LiveInterval *li, LiveIntervals *li_) {
for (LiveInterval::Ranges::const_iterator
i = li->ranges.begin(), e = li->ranges.end(); i != e; ++i)
- if (li_->getPrevIndex(i->end) > i->start)
+ if (i->end.getPrevIndex() > i->start)
return false;
return true;
}
@@ -2579,7 +2583,7 @@ void SimpleRegisterCoalescing::CalculateSpillWeights() {
for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
mbbi != mbbe; ++mbbi) {
MachineBasicBlock* MBB = mbbi;
- LiveIndex MBBEnd = li_->getMBBEndIdx(MBB);
+ SlotIndex MBBEnd = li_->getMBBEndIdx(MBB);
MachineLoop* loop = loopInfo->getLoopFor(MBB);
unsigned loopDepth = loop ? loop->getLoopDepth() : 0;
bool isExiting = loop ? loop->isLoopExiting(MBB) : false;
@@ -2621,7 +2625,7 @@ void SimpleRegisterCoalescing::CalculateSpillWeights() {
float Weight = li_->getSpillWeight(HasDef, HasUse, loopDepth);
if (HasDef && isExiting) {
// Looks like this is a loop count variable update.
- LiveIndex DefIdx = li_->getDefIndex(li_->getInstructionIndex(MI));
+ SlotIndex DefIdx = li_->getInstructionIndex(MI).getDefIndex();
const LiveRange *DLR =
li_->getInterval(Reg).getLiveRangeContaining(DefIdx);
if (DLR->end > MBBEnd)
diff --git a/lib/CodeGen/SimpleRegisterCoalescing.h b/lib/CodeGen/SimpleRegisterCoalescing.h
index 7f282a43dd..78f8a9a563 100644
--- a/lib/CodeGen/SimpleRegisterCoalescing.h
+++ b/lib/CodeGen/SimpleRegisterCoalescing.h
@@ -146,7 +146,7 @@ namespace llvm {
/// TrimLiveIntervalToLastUse - If there is a last use in the same basic
/// block as the copy instruction, trim the ive interval to the last use
/// and return true.
- bool TrimLiveIntervalToLastUse(LiveIndex CopyIdx,
+ bool TrimLiveIntervalToLastUse(SlotIndex CopyIdx,
MachineBasicBlock *CopyMBB,
LiveInterval &li, const LiveRange *LR);
@@ -205,7 +205,7 @@ namespace llvm {
/// iterator, or any subsequent range with the same value number,
/// is live at the given point.
bool ValueLiveAt(LiveInterval::iterator LRItr, LiveInterval::iterator LREnd,
- LiveIndex defPoint) const;
+ SlotIndex defPoint) const;
/// RangeIsDefinedByCopyFromReg - Return true if the specified live range of
/// the specified live interval is defined by a copy from the specified
@@ -241,9 +241,8 @@ namespace llvm {
/// lastRegisterUse - Returns the last use of the specific register between
/// cycles Start and End or NULL if there are no uses.
- MachineOperand *lastRegisterUse(LiveIndex Start,
- LiveIndex End, unsigned Reg,
- LiveIndex &LastUseIdx) const;
+ MachineOperand *lastRegisterUse(SlotIndex Start, SlotIndex End,
+ unsigned Reg, SlotIndex &LastUseIdx) const;
/// CalculateSpillWeights - Compute spill weights for all virtual register
/// live intervals.
diff --git a/lib/CodeGen/SlotIndexes.cpp b/lib/CodeGen/SlotIndexes.cpp
new file mode 100644
index 0000000000..6b04029fba
--- /dev/null
+++ b/lib/CodeGen/SlotIndexes.cpp
@@ -0,0 +1,189 @@
+//===-- SlotIndexes.cpp - Slot Indexes Pass ------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "slotindexes"
+
+#include "llvm/CodeGen/SlotIndexes.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace llvm;
+
+std::auto_ptr<IndexListEntry> SlotIndex::emptyKeyPtr(0),
+ SlotIndex::tombstoneKeyPtr(0);
+
+char SlotIndexes::ID = 0;
+static RegisterPass<SlotIndexes> X("slotindexes", "Slot index numbering");
+
+void SlotIndexes::getAnalysisUsage(AnalysisUsage &au) const {
+ au.setPreservesAll();
+ MachineFunctionPass::getAnalysisUsage(au);
+}
+
+void SlotIndexes::releaseMemory() {
+ mi2iMap.clear();
+ mbb2IdxMap.clear();
+ idx2MBBMap.clear();
+ terminatorGaps.clear();
+ clearList();
+}
+
+bool SlotIndexes::runOnMachineFunction(MachineFunction &fn) {
+
+ // Compute numbering as follows:
+ // Grab an iterator to the start of the index list.
+ // Iterate over all MBBs, and within each MBB all MIs, keeping the MI
+ // iterator in lock-step (though skipping it over indexes which have
+ // null pointers in the instruction field).
+ // At each iteration assert that the instruction pointed to in the index
+ // is the same one pointed to by the MI iterator. This
+
+ // FIXME: This can be simplified. The mi2iMap_, Idx2MBBMap, etc. should
+ // only need to be set up once after the first numbering is computed.
+
+ mf = &fn;
+ initList();
+
+ const unsigned gap = 1;
+
+ // Check that the list contains only the sentinal.
+ assert(indexListHead->getNext() == 0 &&
+ "Index list non-empty at initial numbering?");
+ assert(idx2MBBMap.empty() &&
+ "Index -> MBB mapping non-empty at initial numbering?");
+ assert(mbb2IdxMap.empty() &&
+ "MBB -> Index mapping non-empty at initial numbering?");
+ assert(mi2iMap.empty() &&
+ "MachineInstr -> Index mapping non-empty at initial numbering?");
+
+ functionSize = 0;
+ /*
+ for (unsigned s = 0; s < SlotIndex::NUM; ++s) {
+ indexList.push_back(createEntry(0, s));
+ }
+
+ unsigned index = gap * SlotIndex::NUM;
+ */
+
+ unsigned index = 0;
+
+ // Iterate over the the function.
+ for (MachineFunction::iterator mbbItr = mf->begin(), mbbEnd = mf->end();
+ mbbItr != mbbEnd; ++mbbItr) {
+ MachineBasicBlock *mbb = &*mbbItr;
+
+ // Insert an index for the MBB start.
+ push_back(createEntry(0, index));
+ SlotIndex blockStartIndex(back(), SlotIndex::LOAD);
+
+ index += gap * SlotIndex::NUM;
+
+ for (MachineBasicBlock::iterator miItr = mbb->begin(), miEnd = mbb->end();
+ miItr != miEnd; ++miItr) {
+ MachineInstr *mi = &*miItr;
+
+ if (miItr == mbb->getFirstTerminator()) {
+ push_back(createEntry(0, index));
+ terminatorGaps.insert(
+ std::make_pair(mbb, SlotIndex(back(), SlotIndex::PHI_BIT)));
+ index += gap * SlotIndex::NUM;
+ }
+
+ // Insert a store index for the instr.
+ push_back(createEntry(mi, index));
+
+ // Save this base index in the maps.
+ mi2iMap.insert(
+ std::make_pair(mi, SlotIndex(back(), SlotIndex::LOAD)));
+
+ ++functionSize;
+
+ unsigned Slots = mi->getDesc().getNumDefs();
+ if (Slots == 0)
+ Slots = 1;
+
+ index += (Slots + 1) * gap * SlotIndex::NUM;
+ }
+
+ if (mbb->getFirstTerminator() == mbb->end()) {
+ push_back(createEntry(0, index));
+ terminatorGaps.insert(
+ std::make_pair(mbb, SlotIndex(back(), SlotIndex::PHI_BIT)));
+ index += gap * SlotIndex::NUM;
+ }
+
+ SlotIndex blockEndIndex(back(), SlotIndex::STORE);
+ mbb2IdxMap.insert(
+ std::make_pair(mbb, std::make_pair(blockStartIndex, blockEndIndex)));
+
+ idx2MBBMap.push_back(IdxMBBPair(blockStartIndex, mbb));
+ }
+
+ // One blank instruction at the end.
+ push_back(createEntry(0, index));
+
+ // Sort the Idx2MBBMap
+ std::sort(idx2MBBMap.begin(), idx2MBBMap.end(), Idx2MBBCompare());
+
+ DEBUG(dump());
+
+ // And we're done!
+ return false;
+}
+
+void SlotIndexes::renumber() {
+ assert(false && "SlotIndexes::runmuber is not fully implemented yet.");
+
+ // Compute numbering as follows:
+ // Grab an iterator to the start of the index list.
+ // Iterate over all MBBs, and within each MBB all MIs, keeping the MI
+ // iterator in lock-step (though skipping it over indexes which have
+ // null pointers in the instruction field).
+ // At each iteration assert that the instruction pointed to in the index
+ // is the same one pointed to by the MI iterator. This
+
+ // FIXME: This can be simplified. The mi2iMap_, Idx2MBBMap, etc. should
+ // only need to be set up once - when the first numbering is computed.
+
+ assert(false && "Renumbering not supported yet.");
+}
+
+void SlotIndexes::dump() const {
+ for (const IndexListEntry *itr = front(); itr != getTail();
+ itr = itr->getNext()) {
+ errs() << itr->getIndex() << " ";
+
+ if (itr->getInstr() != 0) {
+ errs() << *itr->getInstr();
+ } else {
+ errs() << "\n";
+ }
+ }
+
+ for (MBB2IdxMap::iterator itr = mbb2IdxMap.begin();
+ itr != mbb2IdxMap.end(); ++itr) {
+ errs() << "MBB " << itr->first->getNumber() << " (" << itr->first << ") - ["
+ << itr->second.first << ", " << itr->second.second << "]\n";
+ }
+}
+
+// Print a SlotIndex to a raw_ostream.
+void SlotIndex::print(raw_ostream &os) const {
+ os << getIndex();
+ if (isPHI())
+ os << "*";
+}
+
+// Dump a SlotIndex to stderr.
+void SlotIndex::dump() const {
+ print(errs());
+ errs() << "\n";
+}
+
diff --git a/lib/CodeGen/Spiller.cpp b/lib/CodeGen/Spiller.cpp
index 0277d64cdd..95e85be5b8 100644
--- a/lib/CodeGen/Spiller.cpp
+++ b/lib/CodeGen/Spiller.cpp
@@ -51,13 +51,15 @@ protected:
/// Ensures there is space before the given machine instruction, returns the
/// instruction's new number.
- LiveIndex makeSpaceBefore(MachineInstr *mi) {
+ SlotIndex makeSpaceBefore(MachineInstr *mi) {
if (!lis->hasGapBeforeInstr(lis->getInstructionIndex(mi))) {
- lis->scaleNumbering(2);
- ls->scaleNumbering(2);
+ // FIXME: Should be updated to use rewrite-in-place methods when they're
+ // introduced. Currently broken.
+ //lis->scaleNumbering(2);
+ //ls->scaleNumbering(2);
}
- LiveIndex miIdx = lis->getInstructionIndex(mi);
+ SlotIndex miIdx = lis->getInstructionIndex(mi);
assert(lis->hasGapBeforeInstr(miIdx));
@@ -66,13 +68,15 @@ protected:
/// Ensure there is space after the given machine instruction, returns the
/// instruction's new number.
- LiveIndex makeSpaceAfter(MachineInstr *mi) {
+ SlotIndex makeSpaceAfter(MachineInstr *mi) {
if (!lis->hasGapAfterInstr(lis->getInstructionIndex(mi))) {
- lis->scaleNumbering(2);
- ls->scaleNumbering(2);
+ // FIXME: Should be updated to use rewrite-in-place methods when they're
+ // introduced. Currently broken.
+ // lis->scaleNumbering(2);
+ // ls->scaleNumbering(2);
}
- LiveIndex miIdx = lis->getInstructionIndex(mi);
+ SlotIndex miIdx = lis->getInstructionIndex(mi);
assert(lis->hasGapAfterInstr(miIdx));
@@ -83,19 +87,19 @@ protected:
/// after the given instruction. Returns the base index of the inserted
/// instruction. The caller is responsible for adding an appropriate
/// LiveInterval to the LiveIntervals analysis.
- LiveIndex insertStoreAfter(MachineInstr *mi, unsigned ss,
+ SlotIndex insertStoreAfter(MachineInstr *mi, unsigned ss,
unsigned vreg,
const TargetRegisterClass *trc) {
MachineBasicBlock::iterator nextInstItr(next(mi));
- LiveIndex miIdx = makeSpaceAfter(mi);
+ SlotIndex miIdx = makeSpaceAfter(mi);
tii->storeRegToStackSlot(*mi->getParent(), nextInstItr, vreg,
true, ss, trc);
MachineBasicBlock::iterator storeInstItr(next(mi));
MachineInstr *storeInst = &*storeInstItr;
- LiveIndex storeInstIdx = lis->getNextIndex(miIdx);
+ SlotIndex storeInstIdx = miIdx.getNextIndex();
assert(lis->getInstructionFromIndex(storeInstIdx) == 0 &&
"Store inst index already in use.");
@@ -108,15 +112,15 @@ protected:
/// Insert a store of the given vreg to the given stack slot immediately
/// before the given instructnion. Returns the base index of the inserted
/// Instruction.
- LiveIndex insertStoreBefore(MachineInstr *mi, unsigned ss,
+ SlotIndex insertStoreBefore(MachineInstr *mi, unsigned ss,
unsigned vreg,
const TargetRegisterClass *trc) {
- LiveIndex miIdx = makeSpaceBefore(mi);
+ SlotIndex miIdx = makeSpaceBefore(mi);
tii->storeRegToStackSlot(*mi->getParent(), mi, vreg, true, ss, trc);
MachineBasicBlock::iterator storeInstItr(prior(mi));
MachineInstr *storeInst = &*storeInstItr;
- LiveIndex storeInstIdx = lis->getPrevIndex(miIdx);
+ SlotIndex storeInstIdx = miIdx.getPrevIndex();
assert(lis->getInstructionFromIndex(storeInstIdx) == 0 &&
"Store inst index already in use.");
@@ -131,9 +135,9 @@ protected:
unsigned vreg,
const TargetRegisterClass *trc) {
- LiveIndex storeInstIdx = insertStoreAfter(mi, ss, vreg, trc);
- LiveIndex start = lis->getDefIndex(lis->getInstructionIndex(mi)),
- end = lis->getUseIndex(storeInstIdx);
+ SlotIndex storeInstIdx = insertStoreAfter(mi, ss, vreg, trc);
+ SlotIndex start = lis->getInstructionIndex(mi).getDefIndex(),
+ end = storeInstIdx.getUseIndex();
VNInfo *vni =
li->getNextValue(storeInstIdx, 0, true, lis->getVNInfoAllocator());
@@ -149,18 +153,18 @@ protected:
/// after the given instruction. Returns the base index of the inserted
/// instruction. The caller is responsibel for adding/removing an appropriate
/// range vreg's LiveInterval.
- LiveIndex insertLoadAfter(MachineInstr *mi, unsigned ss,
+ SlotIndex insertLoadAfter(MachineInstr *mi, unsigned ss,
unsigned vreg,
const TargetRegisterClass *trc) {
MachineBasicBlock::iterator nextInstItr(next(mi));
- LiveIndex miIdx = makeSpaceAfter(mi);
+ SlotIndex miIdx = makeSpaceAfter(mi);
tii->loadRegFromStackSlot(*mi->getParent(), nextInstItr, vreg, ss, trc);
MachineBasicBlock::iterator loadInstItr(next(mi));
MachineInstr *loadInst = &*loadInstItr;
- LiveIndex loadInstIdx = lis->getNextIndex(miIdx);
+ SlotIndex loadInstIdx = miIdx.getNextIndex();
assert(lis->getInstructionFromIndex(loadInstIdx) == 0 &&
"Store inst index already in use.");
@@ -174,15 +178,15 @@ protected:
/// before the given instruction. Returns the base index of the inserted
/// instruction. The caller is responsible for adding an appropriate
/// LiveInterval to the LiveIntervals analysis.
- LiveIndex insertLoadBefore(MachineInstr *mi, unsigned ss,
+ SlotIndex insertLoadBefore(MachineInstr *mi, unsigned ss,
unsigned vreg,
const TargetRegisterClass *trc) {
- LiveIndex miIdx = makeSpaceBefore(mi);
+ SlotIndex miIdx = makeSpaceBefore(mi);
tii->loadRegFromStackSlot(*mi->getParent(), mi, vreg, ss, trc);
MachineBasicBlock::iterator loadInstItr(prior(mi));
MachineInstr *loadInst = &*loadInstItr;
- LiveIndex loadInstIdx = lis->getPrevIndex(miIdx);
+ SlotIndex loadInstIdx = miIdx.getPrevIndex();
assert(lis->getInstructionFromIndex(loadInstIdx) == 0 &&
"Load inst index already in use.");
@@ -197,9 +201,9 @@ protected:
unsigned vreg,
const TargetRegisterClass *trc) {
- LiveIndex loadInstIdx = insertLoadBefore(mi, ss, vreg, trc);
- LiveIndex start = lis->getDefIndex(loadInstIdx),
- end = lis->getUseIndex(lis->getInstructionIndex(mi));
+ SlotIndex loadInstIdx = insertLoadBefore(mi, ss, vreg, trc);
+ SlotIndex start = loadInstIdx.getDefIndex(),
+ end = lis->getInstructionIndex(mi).getUseIndex();
VNInfo *vni =
li->getNextValue(loadInstIdx, 0, true, lis->getVNInfoAllocator());
@@ -321,21 +325,21 @@ public:
vrm->assignVirt2StackSlot(li->reg, ss);
MachineInstr *mi = 0;
- LiveIndex storeIdx = LiveIndex();
+ SlotIndex storeIdx = SlotIndex();
if (valno->isDefAccurate()) {
// If we have an accurate def we can just grab an iterator to the instr
// after the def.
mi = lis->getInstructionFromIndex(valno->def);
- storeIdx = lis->getDefIndex(insertStoreAfter(mi, ss, li->reg, trc));
+ storeIdx = insertStoreAfter(mi, ss, li->reg, trc).getDefIndex();
} else {
// if we get here we have a PHI def.
mi = &lis->getMBBFromIndex(valno->def)->front();
- storeIdx = lis->getDefIndex(insertStoreBefore(mi, ss, li->reg, trc));
+ storeIdx = insertStoreBefore(mi, ss, li->reg, trc).getDefIndex();
}
MachineBasicBlock *defBlock = mi->getParent();
- LiveIndex loadIdx = LiveIndex();
+ SlotIndex loadIdx = SlotIndex();
// Now we need to find the load...
MachineBasicBlock::iterator useItr(mi);
@@ -343,11 +347,11 @@ public:
if (useItr != defBlock->end()) {
MachineInstr *loadInst = useItr;
- loadIdx = lis->getUseIndex(insertLoadBefore(loadInst, ss, li->reg, trc));
+ loadIdx = insertLoadBefore(loadInst, ss, li->reg, trc).getUseIndex();
}
else {
MachineInstr *loadInst = &defBlock->back();
- loadIdx = lis->getUseIndex(insertLoadAfter(loadInst, ss, li->reg, trc));
+ loadIdx = insertLoadAfter(loadInst, ss, li->reg, trc).getUseIndex();
}
li->removeRange(storeIdx, loadIdx, true);
diff --git a/lib/CodeGen/StackSlotColoring.cpp b/lib/CodeGen/StackSlotColoring.cpp
index eed2daa0a9..c299192b22 100644
--- a/lib/CodeGen/StackSlotColoring.cpp
+++ b/lib/CodeGen/StackSlotColoring.cpp
@@ -98,6 +98,8 @@ namespace {
virtual void getAnalysisUsage(AnalysisUsage &AU) const {
AU.setPreservesCFG();
+ AU.addRequired<SlotIndexes>();
+ AU.addPreserved<SlotIndexes>();
AU.addRequired<LiveStacks>();
AU.addRequired<VirtRegMap>();
AU.addPreserved<VirtRegMap>();
diff --git a/lib/CodeGen/StrongPHIElimination.cpp b/lib/CodeGen/StrongPHIElimination.cpp
index 5658a65abc..3c139068e6 100644
--- a/lib/CodeGen/StrongPHIElimination.cpp
+++ b/lib/CodeGen/StrongPHIElimination.cpp
@@ -72,6 +72,8 @@ namespace {
virtual void getAnalysisUsage(AnalysisUsage &AU) const {
AU.setPreservesCFG();
AU.addRequired<MachineDominatorTree>();
+ AU.addRequired<SlotIndexes>();
+ AU.addPreserved<SlotIndexes>();
AU.addRequired<LiveIntervals>();
// TODO: Actually make this true.
@@ -294,7 +296,7 @@ StrongPHIElimination::computeDomForest(
static bool isLiveIn(unsigned r, MachineBasicBlock* MBB,
LiveIntervals& LI) {
LiveInterval& I = LI.getOrCreateInterval(r);
- LiveIndex idx = LI.getMBBStartIdx(MBB);
+ SlotIndex idx = LI.getMBBStartIdx(MBB);
return I.liveAt(idx);
}
@@ -427,7 +429,7 @@ void StrongPHIElimination::processBlock(MachineBasicBlock* MBB) {
}
LiveInterval& PI = LI.getOrCreateInterval(DestReg);
- LiveIndex pIdx = LI.getDefIndex(LI.getInstructionIndex(P));
+ SlotIndex pIdx = LI.getInstructionIndex(P).getDefIndex();
VNInfo* PVN = PI.getLiveRangeContaining(pIdx)->valno;
PhiValueNumber.insert(std::make_pair(DestReg, PVN->id));
@@ -747,7 +749,7 @@ void StrongPHIElimination::ScheduleCopies(MachineBasicBlock* MBB,
LiveInterval& I = LI.getInterval(curr.second);
MachineBasicBlock::iterator term = MBB->getFirstTerminator();
- LiveIndex endIdx = LiveIndex();
+ SlotIndex endIdx = SlotIndex();
if (term != MBB->end())
endIdx = LI.getInstructionIndex(term);
else
@@ -771,7 +773,7 @@ void StrongPHIElimination::ScheduleCopies(MachineBasicBlock* MBB,
// Renumber the instructions so that we can perform the index computations
// needed to create new live intervals.
- LI.computeNumbering();
+ LI.renumber();
// For copies that we inserted at the ends of predecessors, we construct
// live intervals. This is pretty easy, since we know that the destination
@@ -783,15 +785,15 @@ void StrongPHIElimination::ScheduleCopies(MachineBasicBlock* MBB,
InsertedPHIDests.begin(), E = InsertedPHIDests.end(); I != E; ++I) {
if (RegHandled.insert(I->first).second) {
LiveInterval& Int = LI.getOrCreateInterval(I->first);
- LiveIndex instrIdx = LI.getInstructionIndex(I->second);
- if (Int.liveAt(LI.getDefIndex(instrIdx)))
- Int.removeRange(LI.getDefIndex(instrIdx),
- LI.getNextSlot(LI.getMBBEndIdx(I->second->getParent())),
+ SlotIndex instrIdx = LI.getInstructionIndex(I->second);
+ if (Int.liveAt(instrIdx.getDefIndex()))
+ Int.removeRange(instrIdx.getDefIndex(),
+ LI.getMBBEndIdx(I->second->getParent()).getNextSlot(),
true);
LiveRange R = LI.addLiveRangeToEndOfBlock(I->first, I->second);
R.valno->setCopy(I->second);
- R.valno->def = LI.getDefIndex(LI.getInstructionIndex(I->second));
+ R.valno->def = LI.getInstructionIndex(I->second).getDefIndex();
}
}
}
@@ -816,8 +818,8 @@ void StrongPHIElimination::InsertCopies(MachineDomTreeNode* MDTN,
Stacks[I->getOperand(i).getReg()].size()) {
// Remove the live range for the old vreg.
LiveInterval& OldInt = LI.getInterval(I->getOperand(i).getReg());
- LiveInterval::iterator OldLR = OldInt.FindLiveRangeContaining(
- LI.getUseIndex(LI.getInstructionIndex(I)));
+ LiveInterval::iterator OldLR =
+ OldInt.FindLiveRangeContaining(LI.getInstructionIndex(I).getUseIndex());
if (OldLR != OldInt.end())
OldInt.removeRange(*OldLR, true);
@@ -829,11 +831,10 @@ void StrongPHIElimination::InsertCopies(MachineDomTreeNode* MDTN,
VNInfo* FirstVN = *Int.vni_begin();
FirstVN->setHasPHIKill(false);
if (I->getOperand(i).isKill())
- FirstVN->addKill(
- LI.getUseIndex(LI.getInstructionIndex(I)));
+ FirstVN->addKill(LI.getInstructionIndex(I).getUseIndex());
LiveRange LR (LI.getMBBStartIdx(I->getParent()),
- LI.getNextSlot(LI.getUseIndex(LI.getInstructionIndex(I))),
+ LI.getInstructionIndex(I).getUseIndex().getNextSlot(),
FirstVN);
Int.addRange(LR);
@@ -862,14 +863,14 @@ bool StrongPHIElimination::mergeLiveIntervals(unsigned primary,
LiveInterval& LHS = LI.getOrCreateInterval(primary);
LiveInterval& RHS = LI.getOrCreateInterval(secondary);
- LI.computeNumbering();
+ LI.renumber();
DenseMap<VNInfo*, VNInfo*> VNMap;
for (LiveInterval::iterator I = RHS.begin(), E = RHS.end(); I != E; ++I) {
LiveRange R = *I;
- LiveIndex Start = R.start;
- LiveIndex End = R.end;
+ SlotIndex Start = R.start;
+ SlotIndex End = R.end;
if (LHS.getLiveRangeContaining(Start))
return false;
@@ -963,19 +964,19 @@ bool StrongPHIElimination::runOnMachineFunction(MachineFunction &Fn) {
TII->copyRegToReg(*SI->second, SI->second->getFirstTerminator(),
I->first, SI->first, RC, RC);
- LI.computeNumbering();
+ LI.renumber();
LiveInterval& Int = LI.getOrCreateInterval(I->first);
- LiveIndex instrIdx =
+ SlotIndex instrIdx =
LI.getInstructionIndex(--SI->second->getFirstTerminator());
- if (Int.liveAt(LI.getDefIndex(instrIdx)))
- Int.removeRange(LI.getDefIndex(instrIdx),
- LI.getNextSlot(LI.getMBBEndIdx(SI->second)), true);
+ if (Int.liveAt(instrIdx.getDefIndex()))
+ Int.removeRange(instrIdx.getDefIndex(),
+ LI.getMBBEndIdx(SI->second).getNextSlot(), true);
LiveRange R = LI.addLiveRangeToEndOfBlock(I->first,
--SI->second->getFirstTerminator());
R.valno->setCopy(--SI->second->getFirstTerminator());
- R.valno->def = LI.getDefIndex(instrIdx);
+ R.valno->def = instrIdx.getDefIndex();
DEBUG(errs() << "Renaming failed: " << SI->first << " -> "
<< I->first << "\n");
@@ -1010,7 +1011,7 @@ bool StrongPHIElimination::runOnMachineFunction(MachineFunction &Fn) {
if (PI.containsOneValue()) {
LI.removeInterval(DestReg);
} else {
- LiveIndex idx = LI.getDefIndex(LI.getInstructionIndex(PInstr));
+ SlotIndex idx = LI.getInstructionIndex(PInstr).getDefIndex();
PI.removeRange(*PI.getLiveRangeContaining(idx), true);
}
} else {
@@ -1024,7 +1025,7 @@ bool StrongPHIElimination::runOnMachineFunction(MachineFunction &Fn) {
LiveInterval& InputI = LI.getInterval(reg);
if (MBB != PInstr->getParent() &&
InputI.liveAt(LI.getMBBStartIdx(PInstr->getParent())) &&
- InputI.expiredAt(LI.getNextIndex(LI.getInstructionIndex(PInstr))))
+ InputI.expiredAt(LI.getInstructionIndex(PInstr).getNextIndex()))
InputI.removeRange(LI.getMBBStartIdx(PInstr->getParent()),
LI.getInstructionIndex(PInstr),
true);
@@ -1032,7 +1033,7 @@ bool StrongPHIElimination::runOnMachineFunction(MachineFunction &Fn) {
// If the PHI is not dead, then the valno defined by the PHI
// now has an unknown def.
- LiveIndex idx = LI.getDefIndex(LI.getInstructionIndex(PInstr));
+ SlotIndex idx = LI.getInstructionIndex(PInstr).getDefIndex();
const LiveRange* PLR = PI.getLiveRangeContaining(idx);
PLR->valno->setIsPHIDef(true);
LiveRange R (LI.getMBBStartIdx(PInstr->getParent()),
@@ -1044,7 +1045,7 @@ bool StrongPHIElimination::runOnMachineFunction(MachineFunction &Fn) {
PInstr->eraseFromParent();
}
- LI.computeNumbering();
+ LI.renumber();
return true;
}
diff --git a/lib/CodeGen/VirtRegMap.cpp b/lib/CodeGen/VirtRegMap.cpp
index cac098bace..ce3eed17c7 100644
--- a/lib/CodeGen/VirtRegMap.cpp
+++ b/lib/CodeGen/VirtRegMap.cpp
@@ -56,7 +56,7 @@ bool VirtRegMap::runOnMachineFunction(MachineFunction &mf) {
TII = mf.getTarget().getInstrInfo();
TRI = mf.getTarget().getRegisterInfo();
MF = &mf;
-
+
ReMatId = MAX_STACK_SLOT+1;
LowSpillSlot = HighSpillSlot = NO_STACK_SLOT;
diff --git a/lib/CodeGen/VirtRegMap.h b/lib/CodeGen/VirtRegMap.h
index bdc2d1f712..a5599f68b6 100644
--- a/lib/CodeGen/VirtRegMap.h
+++ b/lib/CodeGen/VirtRegMap.h
@@ -80,7 +80,7 @@ namespace llvm {
/// Virt2SplitKillMap - This is splitted virtual register to its last use
/// (kill) index mapping.
- IndexedMap<LiveIndex> Virt2SplitKillMap;
+ IndexedMap<SlotIndex> Virt2SplitKillMap;
/// ReMatMap - This is virtual register to re-materialized instruction
/// mapping. Each virtual register whose definition is going to be
@@ -142,7 +142,7 @@ namespace llvm {
VirtRegMap() : MachineFunctionPass(&ID), Virt2PhysMap(NO_PHYS_REG),
Virt2StackSlotMap(NO_STACK_SLOT),
Virt2ReMatIdMap(NO_STACK_SLOT), Virt2SplitMap(0),
- Virt2SplitKillMap(LiveIndex()), ReMatMap(NULL),
+ Virt2SplitKillMap(SlotIndex()), ReMatMap(NULL),
ReMatId(MAX_STACK_SLOT+1),
LowSpillSlot(NO_STACK_SLOT), HighSpillSlot(NO_STACK_SLOT) { }
virtual bool runOnMachineFunction(MachineFunction &MF);
@@ -266,17 +266,17 @@ namespace llvm {
}
/// @brief record the last use (kill) of a split virtual register.
- void addKillPoint(unsigned virtReg, LiveIndex index) {
+ void addKillPoint(unsigned virtReg, SlotIndex index) {
Virt2SplitKillMap[virtReg] = index;
}
- LiveIndex getKillPoint(unsigned virtReg) const {
+ SlotIndex getKillPoint(unsigned virtReg) const {
return Virt2SplitKillMap[virtReg];
}
/// @brief remove the last use (kill) of a split virtual register.
void removeKillPoint(unsigned virtReg) {
- Virt2SplitKillMap[virtReg] = LiveIndex();
+ Virt2SplitKillMap[virtReg] = SlotIndex();
}
/// @brief returns true if the specified MachineInstr is a spill point.