summaryrefslogtreecommitdiff
path: root/lib/CodeGen/RegAllocFast.cpp
diff options
context:
space:
mode:
authorJim Grosbach <grosbach@apple.com>2010-09-01 19:16:29 +0000
committerJim Grosbach <grosbach@apple.com>2010-09-01 19:16:29 +0000
commit07cb689d6260b78861d829bb05b188e1558c528e (patch)
treebf29d948da800227f062118cad45e4d32871887c /lib/CodeGen/RegAllocFast.cpp
parentee70d3b27faf9f57038a5724b3a993cfbf588729 (diff)
downloadllvm-07cb689d6260b78861d829bb05b188e1558c528e.tar.gz
llvm-07cb689d6260b78861d829bb05b188e1558c528e.tar.bz2
llvm-07cb689d6260b78861d829bb05b188e1558c528e.tar.xz
tidy up a few 80-column and trailing whitespace bits.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@112726 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/CodeGen/RegAllocFast.cpp')
-rw-r--r--lib/CodeGen/RegAllocFast.cpp35
1 files changed, 19 insertions, 16 deletions
diff --git a/lib/CodeGen/RegAllocFast.cpp b/lib/CodeGen/RegAllocFast.cpp
index 237686859f..3df4ab71e4 100644
--- a/lib/CodeGen/RegAllocFast.cpp
+++ b/lib/CodeGen/RegAllocFast.cpp
@@ -113,9 +113,9 @@ namespace {
// Allocatable - vector of allocatable physical registers.
BitVector Allocatable;
- // SkippedInstrs - Descriptors of instructions whose clobber list was ignored
- // because all registers were spilled. It is still necessary to mark all the
- // clobbered registers as used by the function.
+ // SkippedInstrs - Descriptors of instructions whose clobber list was
+ // ignored because all registers were spilled. It is still necessary to
+ // mark all the clobbered registers as used by the function.
SmallPtrSet<const TargetInstrDesc*, 4> SkippedInstrs;
// isBulkSpilling - This flag is set when LiveRegMap will be cleared
@@ -267,11 +267,11 @@ void RAFast::spillVirtReg(MachineBasicBlock::iterator MI,
TII->storeRegToStackSlot(*MBB, MI, LR.PhysReg, SpillKill, FI, RC, TRI);
++NumStores; // Update statistics
- // If this register is used by DBG_VALUE then insert new DBG_VALUE to
+ // If this register is used by DBG_VALUE then insert new DBG_VALUE to
// identify spilled location as the place to find corresponding variable's
// value.
if (MachineInstr *DBG = LiveDbgValueMap.lookup(LRI->first)) {
- const MDNode *MDPtr =
+ const MDNode *MDPtr =
DBG->getOperand(DBG->getNumOperands()-1).getMetadata();
int64_t Offset = 0;
if (DBG->getOperand(1).isImm())
@@ -284,7 +284,7 @@ void RAFast::spillVirtReg(MachineBasicBlock::iterator MI,
}
else
DL = MI->getDebugLoc();
- if (MachineInstr *NewDV =
+ if (MachineInstr *NewDV =
TII->emitFrameIndexDebugValue(*MF, FI, Offset, MDPtr, DL)) {
MachineBasicBlock *MBB = DBG->getParent();
MBB->insert(MI, NewDV);
@@ -796,16 +796,18 @@ void RAFast::AllocateBasicBlock() {
else {
int SS = StackSlotForVirtReg[Reg];
if (SS == -1)
- MO.setReg(0); // We can't allocate a physreg for a DebugValue, sorry!
+ // We can't allocate a physreg for a DebugValue, sorry!
+ MO.setReg(0);
else {
// Modify DBG_VALUE now that the value is in a spill slot.
int64_t Offset = MI->getOperand(1).getImm();
- const MDNode *MDPtr =
+ const MDNode *MDPtr =
MI->getOperand(MI->getNumOperands()-1).getMetadata();
DebugLoc DL = MI->getDebugLoc();
- if (MachineInstr *NewDV =
+ if (MachineInstr *NewDV =
TII->emitFrameIndexDebugValue(*MF, SS, Offset, MDPtr, DL)) {
- DEBUG(dbgs() << "Modifying debug info due to spill:" << "\t" << *MI);
+ DEBUG(dbgs() << "Modifying debug info due to spill:" <<
+ "\t" << *MI);
MachineBasicBlock *MBB = MI->getParent();
MBB->insert(MBB->erase(MI), NewDV);
// Scan NewDV operands from the beginning.
@@ -813,7 +815,8 @@ void RAFast::AllocateBasicBlock() {
ScanDbgValue = true;
break;
} else
- MO.setReg(0); // We can't allocate a physreg for a DebugValue, sorry!
+ // We can't allocate a physreg for a DebugValue; sorry!
+ MO.setReg(0);
}
}
}
@@ -876,8 +879,8 @@ void RAFast::AllocateBasicBlock() {
// operands. If there are also physical defs, these registers must avoid
// both physical defs and uses, making them more constrained than normal
// operands.
- // Similarly, if there are multiple defs and tied operands, we must make sure
- // the same register is allocated to uses and defs.
+ // Similarly, if there are multiple defs and tied operands, we must make
+ // sure the same register is allocated to uses and defs.
// We didn't detect inline asm tied operands above, so just make this extra
// pass for all inline asm.
if (MI->isInlineAsm() || hasEarlyClobbers || hasPartialRedefs ||
@@ -928,9 +931,9 @@ void RAFast::AllocateBasicBlock() {
unsigned DefOpEnd = MI->getNumOperands();
if (TID.isCall()) {
// Spill all virtregs before a call. This serves two purposes: 1. If an
- // exception is thrown, the landing pad is going to expect to find registers
- // in their spill slots, and 2. we don't have to wade through all the
- // <imp-def> operands on the call instruction.
+ // exception is thrown, the landing pad is going to expect to find
+ // registers in their spill slots, and 2. we don't have to wade through
+ // all the <imp-def> operands on the call instruction.
DefOpEnd = VirtOpEnd;
DEBUG(dbgs() << " Spilling remaining registers before call.\n");
spillAll(MI);