summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
authorAndrew Trick <atrick@apple.com>2010-12-24 04:28:06 +0000
committerAndrew Trick <atrick@apple.com>2010-12-24 04:28:06 +0000
commit6e8f4c404825b79f9b9176483653f1aa927dfbde (patch)
tree4a5837073327bcc312eb18562400aba20481609f /lib
parentef485d86585123b5e31a7f88aef22725ebd07e7a (diff)
downloadllvm-6e8f4c404825b79f9b9176483653f1aa927dfbde.tar.gz
llvm-6e8f4c404825b79f9b9176483653f1aa927dfbde.tar.bz2
llvm-6e8f4c404825b79f9b9176483653f1aa927dfbde.tar.xz
whitespace
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@122539 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib')
-rw-r--r--lib/CodeGen/LatencyPriorityQueue.cpp12
-rw-r--r--lib/CodeGen/SelectionDAG/ScheduleDAGList.cpp30
-rw-r--r--lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp314
-rw-r--r--lib/Target/ARM/ARMBaseInstrInfo.cpp2
-rw-r--r--lib/Target/ARM/ARMISelDAGToDAG.cpp2
-rw-r--r--lib/Target/ARM/ARMSubtarget.cpp4
-rw-r--r--lib/Target/ARM/ARMSubtarget.h2
-rw-r--r--lib/Target/CellSPU/SPUHazardRecognizers.h2
-rw-r--r--lib/Target/PowerPC/PPCHazardRecognizers.cpp48
-rw-r--r--lib/Target/PowerPC/PPCHazardRecognizers.h18
-rw-r--r--lib/Target/PowerPC/PPCISelDAGToDAG.cpp172
-rw-r--r--lib/Target/PowerPC/PPCInstrInfo.cpp58
-rw-r--r--lib/Target/PowerPC/PPCInstrInfo.h20
-rw-r--r--lib/Target/TargetInstrInfo.cpp10
14 files changed, 347 insertions, 347 deletions
diff --git a/lib/CodeGen/LatencyPriorityQueue.cpp b/lib/CodeGen/LatencyPriorityQueue.cpp
index b9527fafbe..f0d830b11e 100644
--- a/lib/CodeGen/LatencyPriorityQueue.cpp
+++ b/lib/CodeGen/LatencyPriorityQueue.cpp
@@ -35,14 +35,14 @@ bool latency_sort::operator()(const SUnit *LHS, const SUnit *RHS) const {
unsigned RHSLatency = PQ->getLatency(RHSNum);
if (LHSLatency < RHSLatency) return true;
if (LHSLatency > RHSLatency) return false;
-
+
// After that, if two nodes have identical latencies, look to see if one will
// unblock more other nodes than the other.
unsigned LHSBlocked = PQ->getNumSolelyBlockNodes(LHSNum);
unsigned RHSBlocked = PQ->getNumSolelyBlockNodes(RHSNum);
if (LHSBlocked < RHSBlocked) return true;
if (LHSBlocked > RHSBlocked) return false;
-
+
// Finally, just to provide a stable ordering, use the node number as a
// deciding factor.
return LHSNum < RHSNum;
@@ -64,7 +64,7 @@ SUnit *LatencyPriorityQueue::getSingleUnscheduledPred(SUnit *SU) {
OnlyAvailablePred = &Pred;
}
}
-
+
return OnlyAvailablePred;
}
@@ -78,7 +78,7 @@ void LatencyPriorityQueue::push(SUnit *SU) {
++NumNodesBlocking;
}
NumNodesSolelyBlocking[SU->NodeNum] = NumNodesBlocking;
-
+
Queue.push_back(SU);
}
@@ -102,10 +102,10 @@ void LatencyPriorityQueue::ScheduledNode(SUnit *SU) {
/// node of the same priority that will not make a node available.
void LatencyPriorityQueue::AdjustPriorityOfUnscheduledPreds(SUnit *SU) {
if (SU->isAvailable) return; // All preds scheduled.
-
+
SUnit *OnlyAvailablePred = getSingleUnscheduledPred(SU);
if (OnlyAvailablePred == 0 || !OnlyAvailablePred->isAvailable) return;
-
+
// Okay, we found a single predecessor that is available, but not scheduled.
// Since it is available, it must be in the priority queue. First remove it.
remove(OnlyAvailablePred);
diff --git a/lib/CodeGen/SelectionDAG/ScheduleDAGList.cpp b/lib/CodeGen/SelectionDAG/ScheduleDAGList.cpp
index 56f5ded500..11df0963c3 100644
--- a/lib/CodeGen/SelectionDAG/ScheduleDAGList.cpp
+++ b/lib/CodeGen/SelectionDAG/ScheduleDAGList.cpp
@@ -40,7 +40,7 @@ STATISTIC(NumStalls, "Number of pipeline stalls");
static RegisterScheduler
tdListDAGScheduler("list-td", "Top-down list scheduler",
createTDListDAGScheduler);
-
+
namespace {
//===----------------------------------------------------------------------===//
/// ScheduleDAGList - The actual list scheduler implementation. This supports
@@ -51,7 +51,7 @@ private:
/// AvailableQueue - The priority queue to use for the available SUnits.
///
SchedulingPriorityQueue *AvailableQueue;
-
+
/// PendingQueue - This contains all of the instructions whose operands have
/// been issued, but their results are not ready yet (due to the latency of
/// the operation). Once the operands become available, the instruction is
@@ -87,14 +87,14 @@ private:
/// Schedule - Schedule the DAG using list scheduling.
void ScheduleDAGList::Schedule() {
DEBUG(dbgs() << "********** List Scheduling **********\n");
-
+
// Build the scheduling graph.
BuildSchedGraph(NULL);
AvailableQueue->initNodes(SUnits);
-
+
ListScheduleTopDown();
-
+
AvailableQueue->releaseState();
}
@@ -118,7 +118,7 @@ void ScheduleDAGList::ReleaseSucc(SUnit *SU, const SDep &D) {
--SuccSU->NumPredsLeft;
SuccSU->setDepthToAtLeast(SU->getDepth() + D.getLatency());
-
+
// If all the node's predecessors are scheduled, this node is ready
// to be scheduled. Ignore the special ExitSU node.
if (SuccSU->NumPredsLeft == 0 && SuccSU != &ExitSU)
@@ -142,7 +142,7 @@ void ScheduleDAGList::ReleaseSuccessors(SUnit *SU) {
void ScheduleDAGList::ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle) {
DEBUG(dbgs() << "*** Scheduling [" << CurCycle << "]: ");
DEBUG(SU->dump(this));
-
+
Sequence.push_back(SU);
assert(CurCycle >= SU->getDepth() && "Node scheduled above its depth!");
SU->setDepthToAtLeast(CurCycle);
@@ -168,7 +168,7 @@ void ScheduleDAGList::ListScheduleTopDown() {
SUnits[i].isAvailable = true;
}
}
-
+
// While Available queue is not empty, grab the node with the highest
// priority. If it is not ready put it back. Schedule the node.
std::vector<SUnit*> NotReady;
@@ -187,7 +187,7 @@ void ScheduleDAGList::ListScheduleTopDown() {
assert(PendingQueue[i]->getDepth() > CurCycle && "Negative latency?");
}
}
-
+
// If there are no instructions available, don't try to issue anything, and
// don't advance the hazard recognizer.
if (AvailableQueue->empty()) {
@@ -196,24 +196,24 @@ void ScheduleDAGList::ListScheduleTopDown() {
}
SUnit *FoundSUnit = 0;
-
+
bool HasNoopHazards = false;
while (!AvailableQueue->empty()) {
SUnit *CurSUnit = AvailableQueue->pop();
-
+
ScheduleHazardRecognizer::HazardType HT =
HazardRec->getHazardType(CurSUnit);
if (HT == ScheduleHazardRecognizer::NoHazard) {
FoundSUnit = CurSUnit;
break;
}
-
+
// Remember if this is a noop hazard.
HasNoopHazards |= HT == ScheduleHazardRecognizer::NoopHazard;
-
+
NotReady.push_back(CurSUnit);
}
-
+
// Add the nodes that aren't ready back onto the available list.
if (!NotReady.empty()) {
AvailableQueue->push_all(NotReady);
@@ -228,7 +228,7 @@ void ScheduleDAGList::ListScheduleTopDown() {
// If this is a pseudo-op node, we don't want to increment the current
// cycle.
if (FoundSUnit->Latency) // Don't increment CurCycle for pseudo-ops!
- ++CurCycle;
+ ++CurCycle;
} else if (!HasNoopHazards) {
// Otherwise, we have a pipeline stall, but no other problem, just advance
// the current cycle and try again.
diff --git a/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp b/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
index fd1d6c7770..b7e90be8b3 100644
--- a/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
+++ b/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
@@ -59,9 +59,9 @@ STATISTIC(NumDAGBlocks, "Number of blocks selected using DAG");
STATISTIC(NumDAGIselRetries,"Number of times dag isel has to try another path");
#ifndef NDEBUG
-STATISTIC(NumBBWithOutOfOrderLineInfo,
+STATISTIC(NumBBWithOutOfOrderLineInfo,
"Number of blocks with out of order line number info");
-STATISTIC(NumMBBWithOutOfOrderLineInfo,
+STATISTIC(NumMBBWithOutOfOrderLineInfo,
"Number of machine blocks with out of order line number info");
#endif
@@ -252,7 +252,7 @@ static void SplitCriticalSideEffectEdges(Function &Fn, Pass *SDISel) {
for (Function::iterator BB = Fn.begin(), E = Fn.end(); BB != E; ++BB) {
PHINode *PN = dyn_cast<PHINode>(BB->begin());
if (PN == 0) continue;
-
+
ReprocessBlock:
// For each block with a PHI node, check to see if any of the input values
// are potentially trapping constant expressions. Constant expressions are
@@ -262,14 +262,14 @@ static void SplitCriticalSideEffectEdges(Function &Fn, Pass *SDISel) {
for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
ConstantExpr *CE = dyn_cast<ConstantExpr>(PN->getIncomingValue(i));
if (CE == 0 || !CE->canTrap()) continue;
-
+
// The only case we have to worry about is when the edge is critical.
// Since this block has a PHI Node, we assume it has multiple input
// edges: check to see if the pred has multiple successors.
BasicBlock *Pred = PN->getIncomingBlock(i);
if (Pred->getTerminator()->getNumSuccessors() == 1)
continue;
-
+
// Okay, we have to split this edge.
SplitCriticalEdge(Pred->getTerminator(),
GetSuccessorNumber(Pred, BB), SDISel, true);
@@ -297,7 +297,7 @@ bool SelectionDAGISel::runOnMachineFunction(MachineFunction &mf) {
DEBUG(dbgs() << "\n\n\n=== " << Fn.getName() << "\n");
SplitCriticalSideEffectEdges(const_cast<Function&>(Fn), this);
-
+
CurDAG->init(*MF);
FuncInfo->set(Fn, *MF);
SDB->init(GFI, *AA);
@@ -314,7 +314,7 @@ bool SelectionDAGISel::runOnMachineFunction(MachineFunction &mf) {
if (!FuncInfo->ArgDbgValues.empty())
for (MachineRegisterInfo::livein_iterator LI = RegInfo->livein_begin(),
E = RegInfo->livein_end(); LI != E; ++LI)
- if (LI->second)
+ if (LI->second)
LiveInMap.insert(std::make_pair(LI->first, LI->second));
// Insert DBG_VALUE instructions for function arguments to the entry block.
@@ -335,11 +335,11 @@ bool SelectionDAGISel::runOnMachineFunction(MachineFunction &mf) {
if (LDI != LiveInMap.end()) {
MachineInstr *Def = RegInfo->getVRegDef(LDI->second);
MachineBasicBlock::iterator InsertPos = Def;
- const MDNode *Variable =
+ const MDNode *Variable =
MI->getOperand(MI->getNumOperands()-1).getMetadata();
unsigned Offset = MI->getOperand(1).getImm();
// Def is never a terminator here, so it is ok to increment InsertPos.
- BuildMI(*EntryMBB, ++InsertPos, MI->getDebugLoc(),
+ BuildMI(*EntryMBB, ++InsertPos, MI->getDebugLoc(),
TII.get(TargetOpcode::DBG_VALUE))
.addReg(LDI->second, RegState::Debug)
.addImm(Offset).addMetadata(Variable);
@@ -348,8 +348,8 @@ bool SelectionDAGISel::runOnMachineFunction(MachineFunction &mf) {
// that COPY instructions also need DBG_VALUE, if it is the only
// user of LDI->second.
MachineInstr *CopyUseMI = NULL;
- for (MachineRegisterInfo::use_iterator
- UI = RegInfo->use_begin(LDI->second);
+ for (MachineRegisterInfo::use_iterator
+ UI = RegInfo->use_begin(LDI->second);
MachineInstr *UseMI = UI.skipInstruction();) {
if (UseMI->isDebugValue()) continue;
if (UseMI->isCopy() && !CopyUseMI && UseMI->getParent() == EntryMBB) {
@@ -360,7 +360,7 @@ bool SelectionDAGISel::runOnMachineFunction(MachineFunction &mf) {
}
if (CopyUseMI) {
MachineInstr *NewMI =
- BuildMI(*MF, CopyUseMI->getDebugLoc(),
+ BuildMI(*MF, CopyUseMI->getDebugLoc(),
TII.get(TargetOpcode::DBG_VALUE))
.addReg(CopyUseMI->getOperand(0).getReg(), RegState::Debug)
.addImm(Offset).addMetadata(Variable);
@@ -646,19 +646,19 @@ void SelectionDAGISel::DoInstructionSelection() {
DEBUG(errs() << "===== Instruction selection begins:\n");
PreprocessISelDAG();
-
+
// Select target instructions for the DAG.
{
// Number all nodes with a topological order and set DAGSize.
DAGSize = CurDAG->AssignTopologicalOrder();
-
+
// Create a dummy node (which is not added to allnodes), that adds
// a reference to the root node, preventing it from being deleted,
// and tracking any changes of the root.
HandleSDNode Dummy(CurDAG->getRoot());
ISelPosition = SelectionDAG::allnodes_iterator(CurDAG->getRoot().getNode());
++ISelPosition;
-
+
// The AllNodes list is now topological-sorted. Visit the
// nodes by starting at the end of the list (the root of the
// graph) and preceding back toward the beginning (the entry
@@ -670,19 +670,19 @@ void SelectionDAGISel::DoInstructionSelection() {
// makes it theoretically possible to disable the DAGCombiner.
if (Node->use_empty())
continue;
-
+
SDNode *ResNode = Select(Node);
-
+
// FIXME: This is pretty gross. 'Select' should be changed to not return
// anything at all and this code should be nuked with a tactical strike.
-
+
// If node should not be replaced, continue with the next one.
if (ResNode == Node || Node->getOpcode() == ISD::DELETED_NODE)
continue;
// Replace node.
if (ResNode)
ReplaceUses(Node, ResNode);
-
+
// If after the replacement this node is not used any more,
// remove this dead node.
if (Node->use_empty()) { // Don't delete EntryToken, etc.
@@ -690,9 +690,9 @@ void SelectionDAGISel::DoInstructionSelection() {
CurDAG->RemoveDeadNode(Node, &ISU);
}
}
-
+
CurDAG->setRoot(Dummy.getValue());
- }
+ }
DEBUG(errs() << "===== Instruction selection ends:\n");
@@ -746,13 +746,13 @@ void SelectionDAGISel::PrepareEHLandingPad() {
-
+
bool SelectionDAGISel::TryToFoldFastISelLoad(const LoadInst *LI,
FastISel *FastIS) {
// Don't try to fold volatile loads. Target has to deal with alignment
// constraints.
if (LI->isVolatile()) return false;
-
+
// Figure out which vreg this is going into.
unsigned LoadReg = FastIS->getRegForValue(LI);
assert(LoadReg && "Load isn't already assigned a vreg? ");
@@ -762,7 +762,7 @@ bool SelectionDAGISel::TryToFoldFastISelLoad(const LoadInst *LI,
MachineRegisterInfo::reg_iterator RI = RegInfo->reg_begin(LoadReg);
if (RI == RegInfo->reg_end())
return false;
-
+
// See if there is exactly one use of the vreg. If there are multiple uses,
// then the instruction got lowered to multiple machine instructions or the
// use of the loaded value ended up being multiple operands of the result, in
@@ -770,7 +770,7 @@ bool SelectionDAGISel::TryToFoldFastISelLoad(const LoadInst *LI,
MachineRegisterInfo::reg_iterator PostRI = RI; ++PostRI;
if (PostRI != RegInfo->reg_end())
return false;
-
+
assert(RI.getOperand().isUse() &&
"The only use of the vreg must be a use, we haven't emitted the def!");
@@ -797,9 +797,9 @@ static void CheckLineNumbers(const BasicBlock *BB) {
Line = L;
Col = C;
}
-}
+}
-/// CheckLineNumbers - Check if machine basic block instructions follow source
+/// CheckLineNumbers - Check if machine basic block instructions follow source
/// order or not.
static void CheckLineNumbers(const MachineBasicBlock *MBB) {
unsigned Line = 0;
@@ -817,7 +817,7 @@ static void CheckLineNumbers(const MachineBasicBlock *MBB) {
Line = L;
Col = C;
}
-}
+}
#endif
void SelectionDAGISel::SelectAllBasicBlocks(const Function &Fn) {
@@ -844,7 +844,7 @@ void SelectionDAGISel::SelectAllBasicBlocks(const Function &Fn) {
// Setup an EH landing-pad block.
if (FuncInfo->MBB->isLandingPad())
PrepareEHLandingPad();
-
+
// Lower any arguments needed in this block if this is the entry block.
if (LLVMBB == &Fn.getEntryBlock())
LowerArguments(LLVMBB);
@@ -897,7 +897,7 @@ void SelectionDAGISel::SelectAllBasicBlocks(const Function &Fn) {
TryToFoldFastISelLoad(cast<LoadInst>(BeforeInst), FastIS)) {
// If we succeeded, don't re-select the load.
--BI;
- }
+ }
continue;
}
@@ -1349,7 +1349,7 @@ static bool findNonImmUse(SDNode *Use, SDNode* Def, SDNode *ImmedUse,
// uses.
if ((Use->getNodeId() < Def->getNodeId() && Use->getNodeId() != -1))
return false;
-
+
// Don't revisit nodes if we already scanned it and didn't fail, we know we
// won't fail if we scan it again.
if (!Visited.insert(Use))
@@ -1359,7 +1359,7 @@ static bool findNonImmUse(SDNode *Use, SDNode* Def, SDNode *ImmedUse,
// Ignore chain uses, they are validated by HandleMergeInputChains.
if (Use->getOperand(i).getValueType() == MVT::Other && IgnoreChains)
continue;
-
+
SDNode *N = Use->getOperand(i).getNode();
if (N == Def) {
if (Use == ImmedUse || Use == Root)
@@ -1441,14 +1441,14 @@ bool SelectionDAGISel::IsLegalToFold(SDValue N, SDNode *U, SDNode *Root,
break;
Root = GU;
VT = Root->getValueType(Root->getNumValues()-1);
-
+
// If our query node has a glue result with a use, we've walked up it. If
// the user (which has already been selected) has a chain or indirectly uses
// the chain, our WalkChainUsers predicate will not consider it. Because of
// this, we cannot ignore chains in this predicate.
IgnoreChains = false;
}
-
+
SmallPtrSet<SDNode*, 16> Visited;
return !findNonImmUse(Root, N.getNode(), U, Root, Visited, IgnoreChains);
@@ -1457,7 +1457,7 @@ bool SelectionDAGISel::IsLegalToFold(SDValue N, SDNode *U, SDNode *Root,
SDNode *SelectionDAGISel::Select_INLINEASM(SDNode *N) {
std::vector<SDValue> Ops(N->op_begin(), N->op_end());
SelectInlineAsmMemoryOperands(Ops);
-
+
std::vector<EVT> VTs;
VTs.push_back(MVT::Other);
VTs.push_back(MVT::Glue);
@@ -1476,7 +1476,7 @@ LLVM_ATTRIBUTE_ALWAYS_INLINE static uint64_t
GetVBR(uint64_t Val, const unsigned char *MatcherTable, unsigned &Idx) {
assert(Val >= 128 && "Not a VBR");
Val &= 127; // Remove first vbr bit.
-
+
unsigned Shift = 7;
uint64_t NextBits;
do {
@@ -1484,7 +1484,7 @@ GetVBR(uint64_t Val, const unsigned char *MatcherTable, unsigned &Idx) {
Val |= (NextBits&127) << Shift;
Shift += 7;
} while (NextBits & 128);
-
+
return Val;
}
@@ -1498,7 +1498,7 @@ UpdateChainsAndGlue(SDNode *NodeToMatch, SDValue InputChain,
const SmallVectorImpl<SDNode*> &GlueResultNodesMatched,
bool isMorphNodeTo) {
SmallVector<SDNode*, 4> NowDeadNodes;
-
+
ISelUpdater ISU(ISelPosition);
// Now that all the normal results are replaced, we replace the chain and
@@ -1510,55 +1510,55 @@ UpdateChainsAndGlue(SDNode *NodeToMatch, SDValue InputChain,
// Replace all the chain results with the final chain we ended up with.
for (unsigned i = 0, e = ChainNodesMatched.size(); i != e; ++i) {
SDNode *ChainNode = ChainNodesMatched[i];
-
+
// If this node was already deleted, don't look at it.
if (ChainNode->getOpcode() == ISD::DELETED_NODE)
continue;
-
+
// Don't replace the results of the root node if we're doing a
// MorphNodeTo.
if (ChainNode == NodeToMatch && isMorphNodeTo)
continue;
-
+
SDValue ChainVal = SDValue(ChainNode, ChainNode->getNumValues()-1);
if (ChainVal.getValueType() == MVT::Glue)
ChainVal = ChainVal.getValue(ChainVal->getNumValues()-2);
assert(ChainVal.getValueType() == MVT::Other && "Not a chain?");
CurDAG->ReplaceAllUsesOfValueWith(ChainVal, InputChain, &ISU);
-
+
// If the node became dead and we haven't already seen it, delete it.
if (ChainNode->use_empty() &&
!std::count(NowDeadNodes.begin(), NowDeadNodes.end(), ChainNode))
NowDeadNodes.push_back(ChainNode);
}
}
-
+
// If the result produces glue, update any glue results in the matched
// pattern with the glue result.
if (InputGlue.getNode() != 0) {
// Handle any interior nodes explicitly marked.
for (unsigned i = 0, e = GlueResultNodesMatched.size(); i != e; ++i) {
SDNode *FRN = GlueResultNodesMatched[i];
-
+
// If this node was already deleted, don't look at it.
if (FRN->getOpcode() == ISD::DELETED_NODE)
continue;
-
+
assert(FRN->getValueType(FRN->getNumValues()-1) == MVT::Glue &&
"Doesn't have a glue result");
CurDAG->ReplaceAllUsesOfValueWith(SDValue(FRN, FRN->getNumValues()-1),
InputGlue, &ISU);
-
+
// If the node became dead and we haven't already seen it, delete it.
if (FRN->use_empty() &&
!std::count(NowDeadNodes.begin(), NowDeadNodes.end(), FRN))
NowDeadNodes.push_back(FRN);
}
}
-
+
if (!NowDeadNodes.empty())
CurDAG->RemoveDeadNodes(NowDeadNodes, &ISU);
-
+
DEBUG(errs() << "ISEL: Match complete!\n");
}
@@ -1577,17 +1577,17 @@ enum ChainResult {
///
/// The walk we do here is guaranteed to be small because we quickly get down to
/// already selected nodes "below" us.
-static ChainResult
+static ChainResult
WalkChainUsers(SDNode *ChainedNode,
SmallVectorImpl<SDNode*> &ChainedNodesInPattern,
SmallVectorImpl<SDNode*> &InteriorChainedNodes) {
ChainResult Result = CR_Simple;
-
+
for (SDNode::use_iterator UI = ChainedNode->use_begin(),
E = ChainedNode->use_end(); UI != E; ++UI) {
// Make sure the use is of the chain, not some other value we produce.
if (UI.getUse().getValueType() != MVT::Other) continue;
-
+
SDNode *User = *UI;
// If we see an already-selected machine node, then we've gone beyond the
@@ -1596,7 +1596,7 @@ WalkChainUsers(SDNode *ChainedNode,
if (User->isMachineOpcode() ||
User->getOpcode() == ISD::HANDLENODE) // Root of the graph.
continue;
-
+
if (User->getOpcode() == ISD::CopyToReg ||
User->getOpcode() == ISD::CopyFromReg ||
User->getOpcode() == ISD::INLINEASM ||
@@ -1622,7 +1622,7 @@ WalkChainUsers(SDNode *ChainedNode,
if (!std::count(ChainedNodesInPattern.begin(),
ChainedNodesInPattern.end(), User))
return CR_InducesCycle;
-
+
// Otherwise we found a node that is part of our pattern. For example in:
// x = load ptr
// y = x+4
@@ -1634,7 +1634,7 @@ WalkChainUsers(SDNode *ChainedNode,
InteriorChainedNodes.push_back(User);
continue;
}
-
+
// If we found a TokenFactor, there are two cases to consider: first if the
// TokenFactor is just hanging "below" the pattern we're matching (i.e. no
// uses of the TF are in our pattern) we just want to ignore it. Second,
@@ -1671,7 +1671,7 @@ WalkChainUsers(SDNode *ChainedNode,
case CR_LeadsToInteriorNode:
break; // Otherwise, keep processing.
}
-
+
// Okay, we know we're in the interesting interior case. The TokenFactor
// is now going to be considered part of the pattern so that we rewrite its
// uses (it may have uses that are not part of the pattern) with the
@@ -1682,7 +1682,7 @@ WalkChainUsers(SDNode *ChainedNode,
InteriorChainedNodes.push_back(User);
continue;
}
-
+
return Result;
}
@@ -1704,7 +1704,7 @@ HandleMergeInputChains(SmallVectorImpl<SDNode*> &ChainNodesMatched,
InteriorChainedNodes) == CR_InducesCycle)
return SDValue(); // Would induce a cycle.
}
-
+
// Okay, we have walked all the matched nodes and collected TokenFactor nodes
// that we are interested in. Form our input TokenFactor node.
SmallVector<SDValue, 3> InputChains;
@@ -1715,14 +1715,14 @@ HandleMergeInputChains(SmallVectorImpl<SDNode*> &ChainNodesMatched,
if (N->getOpcode() != ISD::TokenFactor) {
if (std::count(InteriorChainedNodes.begin(),InteriorChainedNodes.end(),N))
continue;
-
+
// Otherwise, add the input chain.
SDValue InChain = ChainNodesMatched[i]->getOperand(0);
assert(InChain.getValueType() == MVT::Other && "Not a chain");
InputChains.push_back(InChain);
continue;
}
-
+
// If we have a token factor, we want to add all inputs of the token factor
// that are not part of the pattern we're matching.
for (unsigned op = 0, e = N->getNumOperands(); op != e; ++op) {
@@ -1731,13 +1731,13 @@ HandleMergeInputChains(SmallVectorImpl<SDNode*> &ChainNodesMatched,
InputChains.push_back(N->getOperand(op));
}
}
-
+
SDValue Res;
if (InputChains.size() == 1)
return InputChains[0];
return CurDAG->getNode(ISD::TokenFactor, ChainNodesMatched[0]->getDebugLoc(),
MVT::Other, &InputChains[0], InputChains.size());
-}
+}
/// MorphNode - Handle morphing a node in place for the selector.
SDNode *SelectionDAGISel::
@@ -1777,7 +1777,7 @@ MorphNode(SDNode *Node, unsigned TargetOpc, SDVTList VTList,
// Move the glue if needed.
if ((EmitNodeInfo & OPFL_GlueOutput) && OldGlueResultNo != -1 &&
(unsigned)OldGlueResultNo != ResNumResults-1)
- CurDAG->ReplaceAllUsesOfValueWith(SDValue(Node, OldGlueResultNo),
+ CurDAG->ReplaceAllUsesOfValueWith(SDValue(Node, OldGlueResultNo),
SDValue(Res, ResNumResults-1));
if ((EmitNodeInfo & OPFL_GlueOutput) != 0)
@@ -1786,14 +1786,14 @@ MorphNode(SDNode *Node, unsigned TargetOpc, SDVTList VTList,
// Move the chain reference if needed.
if ((EmitNodeInfo & OPFL_Chain) && OldChainResultNo != -1 &&
(unsigned)OldChainResultNo != ResNumResults-1)
- CurDAG->ReplaceAllUsesOfValueWith(SDValue(Node, OldChainResultNo),
+ CurDAG->ReplaceAllUsesOfValueWith(SDValue(Node, OldChainResultNo),
SDValue(Res, ResNumResults-1));
// Otherwise, no replacement happened because the node already exists. Replace
// Uses of the old node with the new one.
if (Res != Node)
CurDAG->ReplaceAllUsesWith(Node, Res);
-
+
return Res;
}
@@ -1807,7 +1807,7 @@ CheckSame(const unsigned char *MatcherTable, unsigned &MatcherIndex,
assert(RecNo < RecordedNodes.size() && "Invalid CheckSame");
return N == RecordedNodes[RecNo].first;
}
-
+
/// CheckPatternPredicate - Implements OP_CheckPatternPredicate.
LLVM_ATTRIBUTE_ALWAYS_INLINE static bool
CheckPatternPredicate(const unsigned char *MatcherTable, unsigned &MatcherIndex,
@@ -1835,7 +1835,7 @@ CheckType(const unsigned char *MatcherTable, unsigned &MatcherIndex,
SDValue N, const TargetLowering &TLI) {
MVT::SimpleValueType VT = (MVT::SimpleValueType)MatcherTable[MatcherIndex++];
if (N.getValueType() == VT) return true;
-
+
// Handle the case when VT is iPTR.
return VT == MVT::iPTR && N.getValueType() == TLI.getPointerTy();
}
@@ -1863,7 +1863,7 @@ CheckValueType(const unsigned char *MatcherTable, unsigned &MatcherIndex,
MVT::SimpleValueType VT = (MVT::SimpleValueType)MatcherTable[MatcherIndex++];
if (cast<VTSDNode>(N)->getVT() == VT)
return true;
-
+
// Handle the case when VT is iPTR.
return VT == MVT::iPTR && cast<VTSDNode>(N)->getVT() == TLI.getPointerTy();
}
@@ -1874,7 +1874,7 @@ CheckInteger(const unsigned char *MatcherTable, unsigned &MatcherIndex,
int64_t Val = MatcherTable[MatcherIndex++];
if (Val & 128)
Val = GetVBR(Val, MatcherTable, MatcherIndex);
-
+
ConstantSDNode *C = dyn_cast<ConstantSDNode>(N);
return C != 0 && C->getSExtValue() == Val;
}
@@ -1885,9 +1885,9 @@ CheckAndImm(const unsigned char *MatcherTable, unsigned &MatcherIndex,
int64_t Val = MatcherTable[MatcherIndex++];
if (Val & 128)
Val = GetVBR(Val, MatcherTable, MatcherIndex);
-
+
if (N->getOpcode() != ISD::AND) return false;
-
+
ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1));
return C != 0 && SDISel.CheckAndMask(N.getOperand(0), C, Val);
}
@@ -1898,9 +1898,9 @@ CheckOrImm(const unsigned char *MatcherTable, unsigned &MatcherIndex,
int64_t Val = MatcherTable[MatcherIndex++];
if (Val & 128)
Val = GetVBR(Val, MatcherTable, MatcherIndex);
-
+
if (N->getOpcode() != ISD::OR) return false;
-
+
ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1));
return C != 0 && SDISel.CheckOrMask(N.getOperand(0), C, Val);
}
@@ -1910,7 +1910,7 @@ CheckOrImm(const unsigned char *MatcherTable, unsigned &MatcherIndex,
/// fail, set Result=true and return anything. If the current predicate is
/// known to pass, set Result=false and return the MatcherIndex to continue
/// with. If the current predicate is unknown, set Result=false and return the
-/// MatcherIndex to continue with.
+/// MatcherIndex to continue with.
static unsigned IsPredicateKnownToFail(const unsigned char *Table,
unsigned Index, SDValue N,
bool &Result, SelectionDAGISel &SDISel,
@@ -1968,17 +1968,17 @@ namespace {
struct MatchScope {
/// FailIndex - If this match fails, this is the index to continue with.
unsigned FailIndex;
-
+
/// NodeStack - The node stack when the scope was formed.
SmallVector<SDValue, 4> NodeStack;
-
+
/// NumRecordedNodes - The number of recorded nodes when the scope was formed.
unsigned NumRecordedNodes;
-
+
/// NumMatchedMemRefs - The number of matched memref entries.
unsigned NumMatchedMemRefs;
-
- /// InputChain/InputGlue - The current chain/glue
+
+ /// InputChain/InputGlue - The current chain/glue
SDValue InputChain, InputGlue;
/// HasChainNodesMatched - True if the ChainNodesMatched list is non-empty.
@@ -2024,7 +2024,7 @@ SelectCodeCommon(SDNode *NodeToMatch, const unsigned char *MatcherTable,
case ISD::INLINEASM: return Select_INLINEASM(NodeToMatch);
case ISD::UNDEF: return Select_UNDEF(NodeToMatch);
}
-
+
assert(!NodeToMatch->isMachineOpcode() && "Node already selected!");
// Set up the node stack with NodeToMatch as the only node on the stack.
@@ -2035,38 +2035,38 @@ SelectCodeCommon(SDNode *NodeToMatch, const unsigned char *MatcherTable,
// MatchScopes - Scopes used when matching, if a match failure happens, this
// indicates where to continue checking.
SmallVector<MatchScope, 8> MatchScopes;
-
+
// RecordedNodes - This is the set of nodes that have been recorded by the
// state machine. The second value is the parent of the node, or null if the
// root is recorded.
SmallVector<std::pair<SDValue, SDNode*>, 8> RecordedNodes;
-
+
// MatchedMemRefs - This is the set of MemRef's we've seen in the input
// pattern.
SmallVector<MachineMemOperand*, 2> MatchedMemRefs;
-
+
// These are the current input chain and glue for use when generating nodes.
// Various Emit operations change these. For example, emitting a copytoreg
// uses and updates these.
SDValue InputChain, InputGlue;
-
+
// ChainNodesMatched - If a pattern matches nodes that have input/output
// chains, the OPC_EmitMergeInputChains operation is emitted which indicates
// which ones they are. The result is captured into this list so that we can
// update the chain results when the pattern is complete.
SmallVector<SDNode*, 3> ChainNodesMatched;
SmallVector<SDNode*, 3> GlueResultNodesMatched;
-
+
DEBUG(errs() << "ISEL: Starting pattern match on root node: ";
NodeToMatch->dump(CurDAG);
errs() << '\n');
-
+
// Determine where to start the interpreter. Normally we start at opcode #0,
// but if the state machine starts with an OPC_SwitchOpcode, then we
// accelerate the first lookup (which is guaranteed to be hot) with the
// OpcodeOffset table.
unsigned MatcherIndex = 0;
-
+
if (!OpcodeOffset.empty()) {
// Already computed the OpcodeOffset table, just index into it.
if (N.getOpcode() < OpcodeOffset.size())
@@ -2098,7 +2098,7 @@ SelectCodeCommon(SDNode *NodeToMatch, const unsigned char *MatcherTable,
if (N.getOpcode() < OpcodeOffset.size())
MatcherIndex = OpcodeOffset[N.getOpcode()];
}
-
+
while (1) {
assert(MatcherIndex < TableSize && "Invalid index");
#ifndef NDEBUG
@@ -2113,7 +2113,7 @@ SelectCodeCommon(SDNode *NodeToMatch, const unsigned char *MatcherTable,
// determine immediately that the first check (or first several) will
// immediately fail, don't even bother pushing a scope for them.
unsigned FailIndex;
-
+
while (1) {
unsigned NumToSkip = MatcherTable[MatcherIndex++];
if (NumToSkip & 128)
@@ -2123,12 +2123,12 @@ SelectCodeCommon(SDNode *NodeToMatch, const unsigned char *MatcherTable,
FailIndex = 0;
break;
}
-
+
FailIndex = MatcherIndex+NumToSkip;
-
+
unsigned MatcherIndexOfPredicate = MatcherIndex;
(void)MatcherIndexOfPredicate; // silence warning.
-
+
// If we can't evaluate this predicate without pushing a scope (e.g. if
// it is a 'MoveParent') or if the predicate succeeds on this node, we
// push the scope and evaluate the full predicate chain.
@@ -2137,20 +2137,20 @@ SelectCodeCommon(SDNode *NodeToMatch, const unsigned char *MatcherTable,
Result, *this, RecordedNodes);
if (!Result)
break;
-
+
DEBUG(errs() << " Skipped scope entry (due to false predicate) at "
<< "index " << MatcherIndexOfPredicate
<< ", continuing at " << FailIndex << "\n");
++NumDAGIselRetries;
-
+
// Otherwise, we know that this case of the Scope is guaranteed to fail,
// move to the next case.
MatcherIndex = FailIndex;
}
-
+
// If the whole scope failed to match, bail.
if (FailIndex == 0) break;
-
+
// Push a MatchScope which indicates where to go if the first child fails
// to match.
MatchScope NewEntry;
@@ -2173,7 +2173,7 @@ SelectCodeCommon(SDNode *NodeToMatch, const unsigned char *MatcherTable,
RecordedNodes.push_back(std::make_pair(N, Parent));
continue;
}
-
+
case OPC_RecordChild0: case OPC_RecordChild1:
case OPC_RecordChild2: case OPC_RecordChild3:
case OPC_RecordChild4: case OPC_RecordChild5:
@@ -2189,14 +2189,14 @@ SelectCodeCommon(SDNode *NodeToMatch, const unsigned char *MatcherTable,
case OPC_RecordMemRef:
MatchedMemRefs.push_back(cast<MemSDNode>(N)->getMemOperand());
continue;
-
+
case OPC_CaptureGlueInput:
// If the current node has an input glue, capture it in InputGlue.
if (N->getNumOperands() != 0 &&
N->getOperand(N->getNumOperands()-1).getValueType() == MVT::Glue)
InputGlue = N->getOperand(N->getNumOperands()-1);
continue;
-
+
case OPC_MoveChild: {
unsigned ChildNo = MatcherTable[MatcherIndex++];
if (ChildNo >= N.getNumOperands())
@@ -2205,14 +2205,14 @@ SelectCodeCommon(SDNode *NodeToMatch, const unsigned char *MatcherTable,
NodeStack.push_back(N);
continue;
}
-
+
case OPC_MoveParent:
// Pop the current node off the NodeStack.
NodeStack.pop_back();
assert(!NodeStack.empty() && "Node stack imbalance!");
- N = NodeStack.back();
+ N = NodeStack.back();
continue;
-
+
case OPC_CheckSame:
if (!::CheckSame(MatcherTable, MatcherIndex, N, RecordedNodes)) break;
continue;
@@ -2237,11 +2237,11 @@ SelectCodeCommon(SDNode *NodeToMatch, const unsigned char *MatcherTable,
case OPC_CheckOpcode:
if (!::CheckOpcode(MatcherTable, MatcherIndex, N.getNode())) break;
continue;
-
+
case OPC_CheckType:
if (!::CheckType(MatcherTable, MatcherIndex, N, TLI)) break;
continue;
-
+
case OPC_SwitchOpcode: {
unsigned CurNodeOpcode = N.getOpcode();
unsigned SwitchStart = MatcherIndex-1; (void)SwitchStart;
@@ -2259,20 +2259,20 @@ SelectCodeCommon(SDNode *NodeToMatch, const unsigned char *MatcherTable,
// If the opcode matches, then we will execute this case.
if (CurNodeOpcode == Opc)
break;
-
+
// Otherwise, skip over this case.
MatcherIndex += CaseSize;
}
-
+
// If no cases matched, bail out.
if (CaseSize == 0) break;
-
+
// Otherwise, execute the case we found.
DEBUG(errs() << " OpcodeSwitch from " << SwitchStart
<< " to " << MatcherIndex << "\n");
continue;
}
-
+
case OPC_SwitchType: {
MVT CurNodeVT = N.getValueType().getSimpleVT();
unsigned SwitchStart = MatcherIndex-1; (void)SwitchStart;
@@ -2283,22 +2283,22 @@ SelectCodeCommon(SDNode *NodeToMatch, const unsigned char *MatcherTable,
if (CaseSize & 128)
CaseSize = GetVBR(CaseSize, MatcherTable, MatcherIndex);
if (CaseSize == 0) break;
-
+
MVT CaseVT = (MVT::SimpleValueType)MatcherTable[MatcherIndex++];
if (CaseVT == MVT::iPTR)
CaseVT = TLI.getPointerTy();
-
+
// If the VT matches, then we will execute this case.
if (CurNodeVT == CaseVT)
break;
-
+
// Otherwise, skip over this case.
MatcherIndex += CaseSize;
}
-
+
// If no cases matched, bail out.
if (CaseSize == 0) break;
-
+
// Otherwise, execute the case we found.
DEBUG(errs() << " TypeSwitch[" << EVT(CurNodeVT).getEVTString()
<< "] from " << SwitchStart << " to " << MatcherIndex<<'\n');
@@ -2327,7 +2327,7 @@ SelectCodeCommon(SDNode *NodeToMatch, const unsigned char *MatcherTable,
case OPC_CheckOrImm:
if (!::CheckOrImm(MatcherTable, MatcherIndex, N, *this)) break;
continue;
-
+
case OPC_CheckFoldableChainNode: {
assert(NodeStack.size() != 1 && "No parent node");
// Verify that all intermediate nodes between the root and this one have
@@ -2348,7 +2348,7 @@ SelectCodeCommon(SDNode *NodeToMatch, const unsigned char *MatcherTable,
NodeToMatch, OptLevel,
true/*We validate our own chains*/))
break;
-
+
continue;
}
case OPC_EmitInteger: {
@@ -2369,7 +2369,7 @@ SelectCodeCommon(SDNode *NodeToMatch, const unsigned char *MatcherTable,
CurDAG->getRegister(RegNo, VT), (SDNode*)0));
continue;
}
-
+
case OPC_EmitConvertToTarget: {
// Convert from IMM/FPIMM to target version.
unsigned RecNo = MatcherTable[MatcherIndex++];
@@ -2383,11 +2383,11 @@ SelectCodeCommon(SDNode *NodeToMatch, const unsigned char *MatcherTable,
const ConstantFP *Val=cast<ConstantFPSDNode>(Imm)->getConstantFPValue();
Imm = CurDAG->getTargetConstantFP(*Val, Imm.getValueType());
}
-
+
RecordedNodes.push_back(std::make_pair(Imm, RecordedNodes[RecNo].second));
continue;
}
-
+
case OPC_EmitMergeInputChains1_0: // OPC_EmitMergeInputChains, 1, 0
case OPC_EmitMergeInputChains1_1: { // OPC_EmitMergeInputChains, 1, 1
// These are space-optimized forms of OPC_EmitMergeInputChains.
@@ -2395,12 +2395,12 @@ SelectCodeCommon(SDNode *NodeToMatch, const unsigned char *MatcherTable,
"EmitMergeInputChains should be the first chain producing node");
assert(ChainNodesMatched.empty() &&
"Should only have one EmitMergeInputChains per match");
-
+
// Read all of the chained nodes.
unsigned RecNo = Opcode == OPC_EmitMergeInputChains1_1;
assert(RecNo < RecordedNodes.size() && "Invalid CheckSame");
ChainNodesMatched.push_back(RecordedNodes[RecNo].first.getNode());
-
+
// FIXME: What if other value results of the node have uses not matched
// by this pattern?
if (ChainNodesMatched.back() != NodeToMatch &&
@@ -2408,15 +2408,15 @@ SelectCodeCommon(SDNode *NodeToMatch, const unsigned char *MatcherTable,
ChainNodesMatched.clear();
break;
}
-
+
// Merge the input chains if they are not intra-pattern references.
InputChain = HandleMergeInputChains(ChainNodesMatched, CurDAG);
-
+
if (InputChain.getNode() == 0)
break; // Failed to merge.
continue;
}
-
+
case OPC_EmitMergeInputChains: {
assert(InputChain.getNode() == 0 &&
"EmitMergeInputChains should be the first chain producing node");
@@ -2437,7 +2437,7 @@ SelectCodeCommon(SDNode *NodeToMatch, const unsigned char *MatcherTable,
unsigned RecNo = MatcherTable[MatcherIndex++];
assert(RecNo < RecordedNodes.size() && "Invalid CheckSame");
ChainNodesMatched.push_back(RecordedNodes[RecNo].first.getNode());
-
+
// FIXME: What if other value results of the node have uses not matched
// by this pattern?
if (ChainNodesMatched.back() != NodeToMatch &&
@@ -2446,36 +2446,36 @@ SelectCodeCommon(SDNode *NodeToMatch, const unsigned char *MatcherTable,
break;
}
}
-
+
// If the inner loop broke out, the match fails.
if (ChainNodesMatched.empty())
break;
// Merge the input chains if they are not intra-pattern references.
InputChain = HandleMergeInputChains(ChainNodesMatched, CurDAG);
-
+
if (InputChain.getNode() == 0)
break; // Failed to merge.
continue;
}
-
+
case OPC_EmitCopyToReg: {
unsigned RecNo = MatcherTable[MatcherIndex++];
assert(RecNo < RecordedNodes.size() && "Invalid CheckSame");
unsigned DestPhysReg = MatcherTable[MatcherIndex++];
-
+
if (InputChain.getNode() == 0)
InputChain = CurDAG->getEntryNode();
-
+
InputChain = CurDAG->getCopyToReg(InputChain, NodeToMatch->getDebugLoc(),
DestPhysReg, RecordedNodes[RecNo].first,
InputGlue);
-
+
InputGlue = InputChain.getValue(1);
continue;
}
-
+
case OPC_EmitNodeXForm: {
unsigned XFormNo = MatcherTable[MatcherIndex++];
unsigned RecNo = MatcherTable[MatcherIndex++];
@@ -2484,7 +2484,7 @@ SelectCodeCommon(SDNode *NodeToMatch, const unsigned char *MatcherTable,
RecordedNodes.push_back(std::pair<SDValue,SDNode*>(Res, (SDNode*) 0));
continue;
}
-
+
case OPC_EmitNode:
case OPC_MorphNodeTo: {
uint16_t TargetOpc = MatcherTable[MatcherIndex++];
@@ -2499,12 +2499,12 @@ SelectCodeCommon(SDNode *NodeToMatch, const unsigned char *MatcherTable,
if (VT == MVT::iPTR) VT = TLI.getPointerTy().SimpleTy;
VTs.push_back(VT);
}
-
+
if (EmitNodeInfo & OPFL_Chain)
VTs.push_back(MVT::Other);
if (EmitNodeInfo & OPFL_GlueOutput)
VTs.push_back(MVT::Glue);
-
+
// This is hot code, so optimize the two most common cases of 1 and 2
// results.
SDVTList VTList;
@@ -2522,11 +2522,11 @@ SelectCodeCommon(SDNode *NodeToMatch, const unsigned char *MatcherTable,
unsigned RecNo = MatcherTable[MatcherIndex++];
if (RecNo & 128)
RecNo = GetVBR(RecNo, MatcherTable, MatcherIndex);
-
+
assert(RecNo < RecordedNodes.size() && "Invalid EmitNode");
Ops.push_back(RecordedNodes[RecNo].first);
}
-
+
// If there are variadic operands to add, handle them now.
if (EmitNodeInfo & OPFL_VariadicInfo) {
// Determine the start index to copy from.
@@ -2543,13 +2543,13 @@ SelectCodeCommon(SDNode *NodeToMatch, const unsigned char *MatcherTable,
Ops.push_back(V);
}
}
-
+
// If this has chain/glue inputs, add them.
if (EmitNodeInfo & OPFL_Chain)
Ops.push_back(InputChain);
if ((EmitNodeInfo & OPFL_GlueInput) && InputGlue.getNode() != 0)
Ops.push_back(InputGlue);
-
+
// Create the node.
SDNode *Res = 0;
if (Opcode != OPC_MorphNodeTo) {
@@ -2557,19 +2557,19 @@ SelectCodeCommon(SDNode *NodeToMatch, const unsigned char *MatcherTable,
// add the results to the RecordedNodes list.
Res = CurDAG->getMachineNode(TargetOpc, NodeToMatch->getDebugLoc(),
VTList, Ops.data(), Ops.size());
-
+
// Add all the non-glue/non-chain results to the RecordedNodes list.
for (unsigned i = 0, e = VTs.size(); i != e; ++i) {
if (VTs[i] == MVT::Other || VTs[i] == MVT::Glue) break;
RecordedNodes.push_back(std::pair<SDValue,SDNode*>(SDValue(Res, i),
(SDNode*) 0));
}
-
+
} else {
Res = MorphNode(NodeToMatch, TargetOpc, VTList, Ops.data(), Ops.size(),
EmitNodeInfo);
}
-
+
// If the node had chain/glue results, update our notion of the current
// chain and glue.
if (EmitNodeInfo & OPFL_GlueOutput) {
@@ -2592,11 +2592,11 @@ SelectCodeCommon(SDNode *NodeToMatch, const unsigned char *MatcherTable,
cast<MachineSDNode>(Res)
->setMemRefs(MemRefs, MemRefs + MatchedMemRefs.size());
}
-
+
DEBUG(errs() << " "
<< (Opcode == OPC_MorphNodeTo ? "Morphed" : "Created")
<< " node: "; Res->dump(CurDAG); errs() << "\n");
-
+
// If this was a MorphNodeTo then we're completely done!
if (Opcode == OPC_MorphNodeTo) {
// Update chain and glue uses.
@@ -2604,13 +2604,13 @@ SelectCodeCommon(SDNode *NodeToMatch, const unsigned char *MatcherTable,
InputGlue, GlueResultNodesMatched, true);
return Res;
}
-
+
continue;
}
-
+
case OPC_MarkGlueResults: {
unsigned NumNodes = MatcherTable[MatcherIndex++];
-
+
// Read and remember all the glue-result nodes.
for (unsigned i = 0; i != NumNodes; ++i) {
unsigned RecNo = MatcherTable[MatcherIndex++];
@@ -2622,7 +2622,7 @@ SelectCodeCommon(SDNode *NodeToMatch, const unsigned char *MatcherTable,
}
continue;
}
-
+
case OPC_CompleteMatch: {
// The match has been completed, and any new nodes (if any) have been
// created. Patch up references to the matched dag to use the newly
@@ -2633,10 +2633,10 @@ SelectCodeCommon(SDNode *NodeToMatch, const unsigned char *MatcherTable,
unsigned ResSlot = MatcherTable[MatcherIndex++];
if (ResSlot & 128)
ResSlot = GetVBR(ResSlot, MatcherTable, MatcherIndex);
-
+
assert(ResSlot < RecordedNodes.size() && "Invalid CheckSame");
SDValue Res = RecordedNodes[ResSlot].first;
-
+
assert(i < NodeToMatch->getNumValues() &&
NodeToMatch->getValueType(i) != MVT::Other &&
NodeToMatch->getValueType(i) != MVT::Glue &&
@@ -2653,20 +2653,20 @@ SelectCodeCommon(SDNode *NodeToMatch, const unsigned char *MatcherTable,
// If the root node defines glue, add it to the glue nodes to update list.
if (NodeToMatch->getValueType(NodeToMatch->getNumValues()-1) == MVT::Glue)
GlueResultNodesMatched.push_back(NodeToMatch);
-
+
// Update chain and glue uses.
UpdateChainsAndGlue(NodeToMatch, InputChain, ChainNodesMatched,
InputGlue, GlueResultNodesMatched, false);
-
+
assert(NodeToMatch->use_empty() &&
"Didn't replace all uses of the node?");
-
+
// FIXME: We just return here, which interacts correctly with SelectRoot
// above. We should fix this to not return an SDNode* anymore.
return 0;
}
}
-
+
// If the code reached this point, then the match failed. See if there is
// another child to try in the current 'Scope', otherwise pop it until we
// find a case to check.
@@ -2689,9 +2689,9 @@ SelectCodeCommon(SDNode *NodeToMatch, const unsigned char *MatcherTable,
if (LastScope.NumMatchedMemRefs != MatchedMemRefs.size())
MatchedMemRefs.resize(LastScope.NumMatchedMemRefs);
MatcherIndex = LastScope.FailIndex;
-
+
DEBUG(errs() << " Continuing at " << MatcherIndex << "\n");
-
+
InputChain = LastScope.InputChain;
InputGlue = LastScope.InputGlue;
if (!LastScope.HasChainNodesMatched)
@@ -2712,21 +2712,21 @@ SelectCodeCommon(SDNode *NodeToMatch, const unsigned char *MatcherTable,
LastScope.FailIndex = MatcherIndex+NumToSkip;
break;
}
-
+
// End of this scope, pop it and try the next child in the containing
// scope.
MatchScopes.pop_back();
}
}
}
-
+
void SelectionDAGISel::CannotYetSelect(SDNode *N) {
std::string msg;
raw_string_ostream Msg(msg);
Msg << "Cannot select: ";
-
+
if (N->getOpcode() != ISD::INTRINSIC_W_CHAIN &&
N->getOpcode() != ISD::INTRINSIC_WO_CHAIN &&
N->getOpcode() != ISD::INTRINSIC_VOID) {
diff --git a/lib/Target/ARM/ARMBaseInstrInfo.cpp b/lib/Target/ARM/ARMBaseInstrInfo.cpp
index 35c4b0d3c8..8edc0e7f12 100644
--- a/lib/Target/ARM/ARMBaseInstrInfo.cpp
+++ b/lib/Target/ARM/ARMBaseInstrInfo.cpp
@@ -1687,7 +1687,7 @@ ARMBaseInstrInfo::getNumMicroOps(const InstrItineraryData *ItinData,
// The number of uOps for load / store multiple are determined by the number
// registers.
- //
+ //
// On Cortex-A8, each pair of register loads / stores can be scheduled on the
// same cycle. The scheduling for the first load / store must be done
// separately by assuming the the address is not 64-bit aligned.
diff --git a/lib/Target/ARM/ARMISelDAGToDAG.cpp b/lib/Target/ARM/ARMISelDAGToDAG.cpp
index de8e41126b..b19cdc1dad 100644
--- a/lib/Target/ARM/ARMISelDAGToDAG.cpp
+++ b/lib/Target/ARM/ARMISelDAGToDAG.cpp
@@ -2039,7 +2039,7 @@ SelectARMCMOVImmOp(SDNode *N, SDValue FalseVal, SDValue TrueVal,
SDValue Ops[] = { FalseVal, True, CC, CCR, InFlag };
return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops, 5);
}
-
+
return 0;
}
diff --git a/lib/Target/ARM/ARMSubtarget.cpp b/lib/Target/ARM/ARMSubtarget.cpp
index 5508dc8578..b0057bd209 100644
--- a/lib/Target/ARM/ARMSubtarget.cpp
+++ b/lib/Target/ARM/ARMSubtarget.cpp
@@ -201,7 +201,7 @@ ARMSubtarget::GVIsIndirectSymbol(const GlobalValue *GV,
// through a stub.
if (!isDecl && !GV->isWeakForLinker())
return false;
-
+
// Unless we have a symbol with hidden visibility, we have to go through a
// normal $non_lazy_ptr stub because this symbol might be resolved late.
if (!GV->hasHiddenVisibility()) // Non-hidden $non_lazy_ptr reference.
@@ -219,7 +219,7 @@ unsigned ARMSubtarget::getMispredictionPenalty() const {
return 13;
else if (isCortexA9())
return 8;
-
+
// Otherwise, just return a sensible default.
return 10;
}
diff --git a/lib/Target/ARM/ARMSubtarget.h b/lib/Target/ARM/ARMSubtarget.h
index 240364fa21..fdd394dce3 100644
--- a/lib/Target/ARM/ARMSubtarget.h
+++ b/lib/Target/ARM/ARMSubtarget.h
@@ -205,7 +205,7 @@ protected:
const std::string & getCPUString() const { return CPUString; }
unsigned getMispredictionPenalty() const;
-
+
/// enablePostRAScheduler - True at 'More' optimization.
bool enablePostRAScheduler(CodeGenOpt::Level OptLevel,
TargetSubtarget::AntiDepBreakMode& Mode,
diff --git a/lib/Target/CellSPU/SPUHazardRecognizers.h b/lib/Target/CellSPU/SPUHazardRecognizers.h
index d0ae2d8e71..3469292d57 100644
--- a/lib/Target/CellSPU/SPUHazardRecognizers.h
+++ b/lib/Target/CellSPU/SPUHazardRecognizers.h
@@ -20,7 +20,7 @@
namespace llvm {
class TargetInstrInfo;
-
+
/// SPUHazardRecognizer
class SPUHazardRecognizer : public ScheduleHazardRecognizer
{
diff --git a/lib/Target/PowerPC/PPCHazardRecognizers.cpp b/lib/Target/PowerPC/PPCHazardRecognizers.cpp
index 51d5d866fb..301e89cdcb 100644
--- a/lib/Target/PowerPC/PPCHazardRecognizers.cpp
+++ b/lib/Target/PowerPC/PPCHazardRecognizers.cpp
@@ -26,7 +26,7 @@ using namespace llvm;
//
// This models the dispatch group formation of the PPC970 processor. Dispatch
// groups are bundles of up to five instructions that can contain various mixes
-// of instructions. The PPC970 can dispatch a peak of 4 non-branch and one
+// of instructions. The PPC970 can dispatch a peak of 4 non-branch and one
// branch instruction per-cycle.
//
// There are a number of restrictions to dispatch group formation: some
@@ -55,14 +55,14 @@ PPCHazardRecognizer970::PPCHazardRecognizer970(const TargetInstrInfo &tii)
void PPCHazardRecognizer970::EndDispatchGroup() {
DEBUG(errs() << "=== Start of dispatch group\n");
NumIssued = 0;
-
+
// Structural hazard info.
HasCTRSet = false;
NumStores = 0;
}
-PPCII::PPC970_Unit
+PPCII::PPC970_Unit
PPCHazardRecognizer970::GetInstrType(unsigned Opcode,
bool &isFirst, bool &isSingle,
bool &isCracked,
@@ -72,14 +72,14 @@ PPCHazardRecognizer970::GetInstrType(unsigned Opcode,
return PPCII::PPC970_Pseudo;
}
Opcode = ~Opcode;
-
+
const TargetInstrDesc &TID = TII.get(Opcode);
-
+
isLoad = TID.mayLoad();
isStore = TID.mayStore();
-
+
uint64_t TSFlags = TID.TSFlags;
-
+
isFirst = TSFlags & PPCII::PPC970_First;
isSingle = TSFlags & PPCII::PPC970_Single;
isCracked = TSFlags & PPCII::PPC970_Cracked;
@@ -96,7 +96,7 @@ isLoadOfStoredAddress(unsigned LoadSize, SDValue Ptr1, SDValue Ptr2) const {
return true;
if (Ptr2 == StorePtr1[i] && Ptr1 == StorePtr2[i])
return true;
-
+
// Okay, we don't have an exact match, if this is an indexed offset, see if
// we have overlap (which happens during fp->int conversion for example).
if (StorePtr2[i] == Ptr2) {
@@ -125,23 +125,23 @@ ScheduleHazardRecognizer::HazardType PPCHazardRecognizer970::
getHazardType(SUnit *SU) {
const SDNode *Node = SU->getNode()->getGluedMachineNode();
bool isFirst, isSingle, isCracked, isLoad, isStore;
- PPCII::PPC970_Unit InstrType =
+ PPCII::PPC970_Unit InstrType =
GetInstrType(Node->getOpcode(), isFirst, isSingle, isCracked,
isLoad, isStore);
- if (InstrType == PPCII::PPC970_Pseudo) return NoHazard;
+ if (InstrType == PPCII::PPC970_Pseudo) return NoHazard;
unsigned Opcode = Node->getMachineOpcode();
// We can only issue a PPC970_First/PPC970_Single instruction (such as
// crand/mtspr/etc) if this is the first cycle of the dispatch group.
if (NumIssued != 0 && (isFirst || isSingle))
return Hazard;
-
+
// If this instruction is cracked into two ops by the decoder, we know that
// it is not a branch and that it cannot issue if 3 other instructions are
// already in the dispatch group.
if (isCracked && NumIssued > 2)
return Hazard;
-
+
switch (InstrType) {
default: llvm_unreachable("Unknown instruction type!");
case PPCII::PPC970_FXU:
@@ -159,11 +159,11 @@ getHazardType(SUnit *SU) {
case PPCII::PPC970_BRU:
break;
}
-
+
// Do not allow MTCTR and BCTRL to be in the same dispatch group.
if (HasCTRSet && (Opcode == PPC::BCTRL_Darwin || Opcode == PPC::BCTRL_SVR4))
return NoopHazard;
-
+
// If this is a load following a store, make sure it's not to the same or
// overlapping address.
if (isLoad && NumStores) {
@@ -212,27 +212,27 @@ getHazardType(SUnit *SU) {
LoadSize = 16;
break;
}
-
- if (isLoadOfStoredAddress(LoadSize,
+
+ if (isLoadOfStoredAddress(LoadSize,
Node->getOperand(0), Node->getOperand(1)))
return NoopHazard;
}
-
+
return NoHazard;
}
void PPCHazardRecognizer970::EmitInstruction(SUnit *SU) {
const SDNode *Node = SU->getNode()->getGluedMachineNode();
bool isFirst, isSingle, isCracked, isLoad, isStore;
- PPCII::PPC970_Unit InstrType =
+ PPCII::PPC970_Unit InstrType =
GetInstrType(Node->getOpcode(), isFirst, isSingle, isCracked,
isLoad, isStore);
- if (InstrType == PPCII::PPC970_Pseudo) return;
+ if (InstrType == PPCII::PPC970_Pseudo) return;
unsigned Opcode = Node->getMachineOpcode();
// Update structural hazard information.
if (Opcode == PPC::MTCTR) HasCTRSet = true;
-
+
// Track the address stored to.
if (isStore) {
unsigned ThisStoreSize;
@@ -278,22 +278,22 @@ void PPCHazardRecognizer970::EmitInstruction(SUnit *SU) {
ThisStoreSize = 16;
break;
}
-
+
StoreSize[NumStores] = ThisStoreSize;
StorePtr1[NumStores] = Node->getOperand(1);
StorePtr2[NumStores] = Node->getOperand(2);
++NumStores;
}
-
+
if (InstrType == PPCII::PPC970_BRU || isSingle)
NumIssued = 4; // Terminate a d-group.
++NumIssued;
-
+
// If this instruction is cracked into two ops by the decoder, remember that
// we issued two pieces.
if (isCracked)
++NumIssued;
-
+
if (NumIssued == 5)
EndDispatchGroup();
}
diff --git a/lib/Target/PowerPC/PPCHazardRecognizers.h b/lib/Target/PowerPC/PPCHazardRecognizers.h
index 74bf8e52d8..ca95f7be9c 100644
--- a/lib/Target/PowerPC/PPCHazardRecognizers.h
+++ b/lib/Target/PowerPC/PPCHazardRecognizers.h
@@ -19,7 +19,7 @@
#include "PPCInstrInfo.h"
namespace llvm {
-
+
/// PPCHazardRecognizer970 - This class defines a finite state automata that
/// models the dispatch logic on the PowerPC 970 (aka G5) processor. This
/// promotes good dispatch group formation and implements noop insertion to
@@ -28,14 +28,14 @@ namespace llvm {
/// or storing then loading from the same address within a dispatch group.
class PPCHazardRecognizer970 : public ScheduleHazardRecognizer {
const TargetInstrInfo &TII;
-
+
unsigned NumIssued; // Number of insts issued, including advanced cycles.
-
+
// Various things that can cause a structural hazard.
-
+
// HasCTRSet - If the CTR register is set in this group, disallow BCTRL.
bool HasCTRSet;
-
+
// StoredPtr - Keep track of the address of any store. If we see a load from
// the same address (or one that aliases it), disallow the store. We can have
// up to four stores in one dispatch group, hence we track up to 4.
@@ -45,24 +45,24 @@ class PPCHazardRecognizer970 : public ScheduleHazardRecognizer {
SDValue StorePtr1[4], StorePtr2[4];
unsigned StoreSize[4];
unsigned NumStores;
-
+
public:
PPCHazardRecognizer970(const TargetInstrInfo &TII);
virtual HazardType getHazardType(SUnit *SU);
virtual void EmitInstruction(SUnit *SU);
virtual void AdvanceCycle();
-
+
private:
/// EndDispatchGroup - Called when we are finishing a new dispatch group.
///
void EndDispatchGroup();
-
+
/// GetInstrType - Classify the specified powerpc opcode according to its
/// pipeline.
PPCII::PPC970_Unit GetInstrType(unsigned Opcode,
bool &isFirst, bool &isSingle,bool &isCracked,
bool &isLoad, bool &isStore);
-
+
bool isLoadOfStoredAddress(unsigned LoadSize,
SDValue Ptr1, SDValue Ptr2) const;
};
diff --git a/lib/Target/PowerPC/PPCISelDAGToDAG.cpp b/lib/Target/PowerPC/PPCISelDAGToDAG.cpp
index b7f0ef3292..0d624d08cd 100644
--- a/lib/Target/PowerPC/PPCISelDAGToDAG.cpp
+++ b/lib/Target/PowerPC/PPCISelDAGToDAG.cpp
@@ -49,16 +49,16 @@ namespace {
: SelectionDAGISel(tm), TM(tm),
PPCLowering(*TM.getTargetLowering()),
PPCSubTarget(*TM.getSubtargetImpl()) {}
-
+
virtual bool runOnMachineFunction(MachineFunction &MF) {
// Make sure we re-emit a set of the global base reg if necessary
GlobalBaseReg = 0;
SelectionDAGISel::runOnMachineFunction(MF);
-
+
InsertVRSaveCode(MF);
return true;
}
-
+
/// getI32Imm - Return a target constant with the specified value, of type
/// i32.
inline SDValue getI32Imm(unsigned Imm) {
@@ -70,13 +70,13 @@ namespace {
inline SDValue getI64Imm(uint64_t Imm) {
return CurDAG->getTargetConstant(Imm, MVT::i64);
}
-
+
/// getSmallIPtrImm - Return a target constant of pointer type.
inline SDValue getSmallIPtrImm(unsigned Imm) {
return CurDAG->getTargetConstant(Imm, PPCLowering.getPointerTy());
}
-
- /// isRunOfOnes - Returns true iff Val consists of one contiguous run of 1s
+
+ /// isRunOfOnes - Returns true iff Val consists of one contiguous run of 1s
/// with any number of 0s on either side. The 1s are allowed to wrap from
/// LSB to MSB, so 0x000FFF0, 0x0000FFFF, and 0xFF0000FF are all runs.
/// 0x0F0F0000 is not, since all 1s are not contiguous.
@@ -87,15 +87,15 @@ namespace {
/// rotate and mask opcode and mask operation.
static bool isRotateAndMask(SDNode *N, unsigned Mask, bool isShiftMask,
unsigned &SH, unsigned &MB, unsigned &ME);
-
+
/// getGlobalBaseReg - insert code into the entry mbb to materialize the PIC
/// base register. Return the virtual register that holds this value.
SDNode *getGlobalBaseReg();
-
+
// Select - Convert the specified operand from a target-independent to a
// target-specific node if it hasn't already been changed.
SDNode *Select(SDNode *N);
-
+
SDNode *SelectBitfieldInsert(SDNode *N);
/// SelectCC - Select a comparison of the specified values with the
@@ -108,7 +108,7 @@ namespace {
SDValue &Base) {
return PPCLowering.SelectAddressRegImm(N, Disp, Base, *CurDAG);
}
-
+
/// SelectAddrImmOffs - Return true if the operand is valid for a preinc
/// immediate field. Because preinc imms have already been validated, just
/// accept it.
@@ -116,14 +116,14 @@ namespace {
Out = N;
return true;
}
-
+
/// SelectAddrIdx - Given the specified addressed, check to see if it can be
/// represented as an indexed [r+r] operation. Returns false if it can
/// be represented by [r+imm], which are preferred.
bool SelectAddrIdx(SDValue N, SDValue &Base, SDValue &Index) {
return PPCLowering.SelectAddressRegReg(N, Base, Index, *CurDAG);
}
-
+
/// SelectAddrIdxOnly - Given the specified addressed, force it to be
/// represented as an indexed [r+r] operation.
bool SelectAddrIdxOnly(SDValue N, SDValue &Base, SDValue &Index) {
@@ -136,7 +136,7 @@ namespace {
bool SelectAddrImmShift(SDValue N, SDValue &Disp, SDValue &Base) {
return PPCLowering.SelectAddressRegImmShift(N, Disp, Base, *CurDAG);
}
-
+
/// SelectInlineAsmMemoryOperand - Implement addressing mode selection for
/// inline asm expressions. It is always correct to compute the value into
/// a register. The case of adding a (possibly relocatable) constant to a
@@ -148,13 +148,13 @@ namespace {
OutOps.push_back(Op);
return false;
}
-
+
void InsertVRSaveCode(MachineFunction &MF);
virtual const char *getPassName() const {
return "PowerPC DAG->DAG Pattern Instruction Selection";
- }
-
+ }
+
/// CreateTargetHazardRecognizer - Return the hazard recognizer to use for
/// this target when scheduling the DAG.
virtual ScheduleHazardRecognizer *CreateTargetHazardRecognizer() {
@@ -162,12 +162,12 @@ namespace {
// now, always return a PPC970 recognizer.
const TargetInstrInfo *II = TM.getInstrInfo();
assert(II && "No InstrInfo?");
- return new PPCHazardRecognizer970(*II);
+ return new PPCHazardRecognizer970(*II);
}
// Include the pieces autogenerated from the target description.
#include "PPCGenDAGISel.inc"
-
+
private:
SDNode *SelectSETCC(SDNode *N);
};
@@ -178,19 +178,19 @@ private:
/// check to see if we need to save/restore VRSAVE. If so, do it.
void PPCDAGToDAGISel::InsertVRSaveCode(MachineFunction &Fn) {
// Check to see if this function uses vector registers, which means we have to
- // save and restore the VRSAVE register and update it with the regs we use.
+ // save and restore the VRSAVE register and update it with the regs we use.
//
// In this case, there will be virtual registers of vector type created
// by the scheduler. Detect them now.
bool HasVectorVReg = false;
- for (unsigned i = TargetRegisterInfo::FirstVirtualRegister,
+ for (unsigned i = TargetRegisterInfo::FirstVirtualRegister,
e = RegInfo->getLastVirtReg()+1; i != e; ++i)
if (RegInfo->getRegClass(i) == &PPC::VRRCRegClass) {
HasVectorVReg = true;
break;
}
if (!HasVectorVReg) return; // nothing to do.
-
+
// If we have a vector register, we want to emit code into the entry and exit
// blocks to save and restore the VRSAVE register. We do this here (instead
// of marking all vector instructions as clobbering VRSAVE) for two reasons:
@@ -205,7 +205,7 @@ void PPCDAGToDAGISel::InsertVRSaveCode(MachineFunction &Fn) {
// function and one for the value after having bits or'd into it.
unsigned InVRSAVE = RegInfo->createVirtualRegister(&PPC::GPRCRegClass);
unsigned UpdatedVRSAVE = RegInfo->createVirtualRegister(&PPC::GPRCRegClass);
-
+
const TargetInstrInfo &TII = *TM.getInstrInfo();
MachineBasicBlock &EntryBB = *Fn.begin();
DebugLoc dl;
@@ -218,21 +218,21 @@ void PPCDAGToDAGISel::InsertVRSaveCode(MachineFunction &Fn) {
BuildMI(EntryBB, IP, dl, TII.get(PPC::UPDATE_VRSAVE),
UpdatedVRSAVE).addReg(InVRSAVE);
BuildMI(EntryBB, IP, dl, TII.get(PPC::MTVRSAVE)).addReg(UpdatedVRSAVE);
-
+
// Find all return blocks, outputting a restore in each epilog.
for (MachineFunction::iterator BB = Fn.begin(), E = Fn.end(); BB != E; ++BB) {
if (!BB->empty() && BB->back().getDesc().isReturn()) {
IP = BB->end(); --IP;
-
+
// Skip over all terminator instructions, which are part of the return
// sequence.
MachineBasicBlock::iterator I2 = IP;
while (I2 != BB->begin() && (--I2)->getDesc().isTerminator())
IP = I2;
-
+
// Emit: MTVRSAVE InVRSave
BuildMI(*BB, IP, dl, TII.get(PPC::MTVRSAVE)).addReg(InVRSAVE);
- }
+ }
}
}
@@ -338,8 +338,8 @@ bool PPCDAGToDAGISel::isRunOfOnes(unsigned Val, unsigned &MB, unsigned &ME) {
return false;
}
-bool PPCDAGToDAGISel::isRotateAndMask(SDNode *N, unsigned Mask,
- bool isShiftMask, unsigned &SH,
+bool PPCDAGToDAGISel::isRotateAndMask(SDNode *N, unsigned Mask,
+ bool isShiftMask, unsigned &SH,
unsigned &MB, unsigned &ME) {
// Don't even go down this path for i64, since different logic will be
// necessary for rldicl/rldicr/rldimi.
@@ -352,13 +352,13 @@ bool PPCDAGToDAGISel::isRotateAndMask(SDNode *N, unsigned Mask,
if (N->getNumOperands() != 2 ||
!isInt32Immediate(N->getOperand(1).getNode(), Shift) || (Shift > 31))
return false;
-
+
if (Opcode == ISD::SHL) {
// apply shift left to mask if it comes first
if (isShiftMask) Mask = Mask << Shift;
// determine which bits are made indeterminant by shift
Indeterminant = ~(0xFFFFFFFFu << Shift);
- } else if (Opcode == ISD::SRL) {
+ } else if (Opcode == ISD::SRL) {
// apply shift right to mask if it comes first
if (isShiftMask) Mask = Mask >> Shift;
// determine which bits are made indeterminant by shift
@@ -370,7 +370,7 @@ bool PPCDAGToDAGISel::isRotateAndMask(SDNode *N, unsigned Mask,
} else {
return false;
}
-
+
// if the mask doesn't intersect any Indeterminant bits
if (Mask && !(Mask & Indeterminant)) {
SH = Shift & 31;
@@ -386,14 +386,14 @@ SDNode *PPCDAGToDAGISel::SelectBitfieldInsert(SDNode *N) {
SDValue Op0 = N->getOperand(0);
SDValue Op1 = N->getOperand(1);
DebugLoc dl = N->getDebugLoc();
-
+
APInt LKZ, LKO, RKZ, RKO;
CurDAG->ComputeMaskedBits(Op0, APInt::getAllOnesValue(32), LKZ, LKO);
CurDAG->ComputeMaskedBits(Op1, APInt::getAllOnesValue(32), RKZ, RKO);
-
+
unsigned TargetMask = LKZ.getZExtValue();
unsigned InsertMask = RKZ.getZExtValue();
-
+
if ((TargetMask | InsertMask) == 0xFFFFFFFF) {
unsigned Op0Opc = Op0.getOpcode();
unsigned Op1Opc = Op1.getOpcode();
@@ -421,7 +421,7 @@ SDNode *PPCDAGToDAGISel::SelectBitfieldInsert(SDNode *N) {
std::swap(TargetMask, InsertMask);
}
}
-
+
unsigned MB, ME;
if (InsertMask && isRunOfOnes(InsertMask, MB, ME)) {
SDValue Tmp1, Tmp2;
@@ -457,7 +457,7 @@ SDValue PPCDAGToDAGISel::SelectCC(SDValue LHS, SDValue RHS,
ISD::CondCode CC, DebugLoc dl) {
// Always select the LHS.
unsigned Opc;
-
+
if (LHS.getValueType() == MVT::i32) {
unsigned Imm;
if (CC == ISD::SETEQ || CC == ISD::SETNE) {
@@ -470,11 +470,11 @@ SDValue PPCDAGToDAGISel::SelectCC(SDValue LHS, SDValue RHS,
if (isInt<16>((int)Imm))
return SDValue(CurDAG->getMachineNode(PPC::CMPWI, dl, MVT::i32, LHS,
getI32Imm(Imm & 0xFFFF)), 0);
-
+
// For non-equality comparisons, the default code would materialize the
// constant, then compare against it, like this:
// lis r2, 4660
- // ori r2, r2, 22136
+ // ori r2, r2, 22136
// cmpw cr0, r3, r2
// Since we are just comparing for equality, we can emit this instead:
// xoris r0,r3,0x1234
@@ -511,11 +511,11 @@ SDValue PPCDAGToDAGISel::SelectCC(SDValue LHS, SDValue RHS,
if (isInt<16>(Imm))
return SDValue(CurDAG->getMachineNode(PPC::CMPDI, dl, MVT::i64, LHS,
getI32Imm(Imm & 0xFFFF)), 0);
-
+
// For non-equality comparisons, the default code would materialize the
// constant, then compare against it, like this:
// lis r2, 4660
- // ori r2, r2, 22136
+ // ori r2, r2, 22136
// cmpd cr0, r3, r2
// Since we are just comparing for equality, we can emit this instead:
// xoris r0,r3,0x1234
@@ -604,9 +604,9 @@ static unsigned getCRIdxForSetCC(ISD::CondCode CC, bool &Invert, int &Other) {
case ISD::SETUNE:
case ISD::SETNE: Invert = true; return 2; // !Bit #2 = SETUNE
case ISD::SETO: Invert = true; return 3; // !Bit #3 = SETO
- case ISD::SETUEQ:
- case ISD::SETOGE:
- case ISD::SETOLE:
+ case ISD::SETUEQ:
+ case ISD::SETOGE:
+ case ISD::SETOLE:
case ISD::SETONE:
llvm_unreachable("Invalid branch code: should be expanded by legalize");
// These are invalid for floating point. Assume integer.
@@ -637,7 +637,7 @@ SDNode *PPCDAGToDAGISel::SelectSETCC(SDNode *N) {
SDValue AD =
SDValue(CurDAG->getMachineNode(PPC::ADDIC, dl, MVT::i32, MVT::Glue,
Op, getI32Imm(~0U)), 0);
- return CurDAG->SelectNodeTo(N, PPC::SUBFE, MVT::i32, AD, Op,
+ return CurDAG->SelectNodeTo(N, PPC::SUBFE, MVT::i32, AD, Op,
AD.getValue(1));
}
case ISD::SETLT: {
@@ -659,8 +659,8 @@ SDNode *PPCDAGToDAGISel::SelectSETCC(SDNode *N) {
case ISD::SETEQ:
Op = SDValue(CurDAG->getMachineNode(PPC::ADDIC, dl, MVT::i32, MVT::Glue,
Op, getI32Imm(1)), 0);
- return CurDAG->SelectNodeTo(N, PPC::ADDZE, MVT::i32,
- SDValue(CurDAG->getMachineNode(PPC::LI, dl,
+ return CurDAG->SelectNodeTo(N, PPC::ADDZE, MVT::i32,
+ SDValue(CurDAG->getMachineNode(PPC::LI, dl,
MVT::i32,
getI32Imm(0)), 0),
Op.getValue(1));
@@ -681,35 +681,35 @@ SDNode *PPCDAGToDAGISel::SelectSETCC(SDNode *N) {
}
case ISD::SETGT: {
SDValue Ops[] = { Op, getI32Imm(1), getI32Imm(31), getI32Imm(31) };
- Op = SDValue(CurDAG->getMachineNode(PPC::RLWINM, dl, MVT::i32, Ops, 4),
+ Op = SDValue(CurDAG->getMachineNode(PPC::RLWINM, dl, MVT::i32, Ops, 4),
0);
- return CurDAG->SelectNodeTo(N, PPC::XORI, MVT::i32, Op,
+ return CurDAG->SelectNodeTo(N, PPC::XORI, MVT::i32, Op,
getI32Imm(1));
}
}
}
}
-
+
bool Inv;
int OtherCondIdx;
unsigned Idx = getCRIdxForSetCC(CC, Inv, OtherCondIdx);
SDValue CCReg = SelectCC(N->getOperand(0), N->getOperand(1), CC, dl);
SDValue IntCR;
-
+
// Force the ccreg into CR7.
SDValue CR7Reg = CurDAG->getRegister(PPC::CR7, MVT::i32);
-
+
SDValue InFlag(0, 0); // Null incoming flag value.
- CCReg = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, CR7Reg, CCReg,
+ CCReg = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, CR7Reg, CCReg,
InFlag).getValue(1);
-
+
if (PPCSubTarget.isGigaProcessor() && OtherCondIdx == -1)
IntCR = SDValue(CurDAG->getMachineNode(PPC::MFOCRF, dl, MVT::i32, CR7Reg,
CCReg), 0);
else
IntCR = SDValue(CurDAG->getMachineNode(PPC::MFCRpseud, dl, MVT::i32,
CR7Reg, CCReg), 0);
-
+
SDValue Ops[] = { IntCR, getI32Imm((32-(3-Idx)) & 31),
getI32Imm(31), getI32Imm(31) };
if (OtherCondIdx == -1 && !Inv)
@@ -728,7 +728,7 @@ SDNode *PPCDAGToDAGISel::SelectSETCC(SDNode *N) {
// Get the other bit of the comparison.
Ops[1] = getI32Imm((32-(3-OtherCondIdx)) & 31);
- SDValue OtherCond =
+ SDValue OtherCond =
SDValue(CurDAG->getMachineNode(PPC::RLWINM, dl, MVT::i32, Ops, 4), 0);
return CurDAG->SelectNodeTo(N, PPC::OR, MVT::i32, Tmp, OtherCond);
@@ -744,7 +744,7 @@ SDNode *PPCDAGToDAGISel::Select(SDNode *N) {
switch (N->getOpcode()) {
default: break;
-
+
case ISD::Constant: {
if (N->getValueType(0) == MVT::i64) {
// Get 64 bit value.
@@ -753,12 +753,12 @@ SDNode *PPCDAGToDAGISel::Select(SDNode *N) {
unsigned Remainder = 0;
// Assume no shift required.
unsigned Shift = 0;
-
+
// If it can't be represented as a 32 bit value.
if (!isInt<32>(Imm)) {
Shift = CountTrailingZeros_64(Imm);
int64_t ImmSh = static_cast<uint64_t>(Imm) >> Shift;
-
+
// If the shifted value fits 32 bits.
if (isInt<32>(ImmSh)) {
// Go with the shifted value.
@@ -770,14 +770,14 @@ SDNode *PPCDAGToDAGISel::Select(SDNode *N) {
Imm >>= 32;
}
}
-
+
// Intermediate operand.
SDNode *Result;
// Handle first 32 bits.
unsigned Lo = Imm & 0xFFFF;
unsigned Hi = (Imm >> 16) & 0xFFFF;
-
+
// Simple value.
if (isInt<16>(Imm)) {
// Just the Lo bits.
@@ -793,7 +793,7 @@ SDNode *PPCDAGToDAGISel::Select(SDNode *N) {
// Just the Hi bits.
Result = CurDAG->getMachineNode(PPC::LIS8, dl, MVT::i64, getI32Imm(Hi));
}
-
+
// If no shift, we're done.
if (!Shift) return Result;
@@ -809,22 +809,22 @@ SDNode *PPCDAGToDAGISel::Select(SDNode *N) {
if ((Hi = (Remainder >> 16) & 0xFFFF)) {
Result = CurDAG->getMachineNode(PPC::ORIS8, dl, MVT::i64,
SDValue(Result, 0), getI32Imm(Hi));
- }
+ }
if ((Lo = Remainder & 0xFFFF)) {
Result = CurDAG->getMachineNode(PPC::ORI8, dl, MVT::i64,
SDValue(Result, 0), getI32Imm(Lo));
}
-
+
return Result;
}
break;
}
-
+
case ISD::SETCC:
return SelectSETCC(N);
case PPCISD::GlobalBaseReg:
return getGlobalBaseReg();
-
+
case ISD::FrameIndex: {
int FI = cast<FrameIndexSDNode>(N)->getIndex();
SDValue TFI = CurDAG->getTargetFrameIndex(FI, N->getValueType(0));
@@ -846,11 +846,11 @@ SDNode *PPCDAGToDAGISel::Select(SDNode *N) {
return CurDAG->getMachineNode(PPC::MFCRpseud, dl, MVT::i32,
N->getOperand(0), InFlag);
}
-
+
case ISD::SDIV: {
// FIXME: since this depends on the setting of the carry flag from the srawi
// we should really be making notes about that for the scheduler.
- // FIXME: It sure would be nice if we could cheaply recognize the
+ // FIXME: It sure would be nice if we could cheaply recognize the
// srl/add/sra pattern the dag combiner will generate for this as
// sra/addze rather than having to handle sdiv ourselves. oh well.
unsigned Imm;
@@ -860,7 +860,7 @@ SDNode *PPCDAGToDAGISel::Select(SDNode *N) {
SDNode *Op =
CurDAG->getMachineNode(PPC::SRAWI, dl, MVT::i32, MVT::Glue,
N0, getI32Imm(Log2_32(Imm)));
- return CurDAG->SelectNodeTo(N, PPC::ADDZE, MVT::i32,
+ return CurDAG->SelectNodeTo(N, PPC::ADDZE, MVT::i32,
SDValue(Op, 0), SDValue(Op, 1));
} else if ((signed)Imm < 0 && isPowerOf2_32(-Imm)) {
SDNode *Op =
@@ -873,24 +873,24 @@ SDNode *PPCDAGToDAGISel::Select(SDNode *N) {
return CurDAG->SelectNodeTo(N, PPC::NEG, MVT::i32, PT);
}
}
-
+
// Other cases are autogenerated.
break;
}
-
+
case ISD::LOAD: {
// Handle preincrement loads.
LoadSDNode *LD = cast<LoadSDNode>(N);
EVT LoadedVT = LD->getMemoryVT();
-
+
// Normal loads are handled by code generated from the .td file.
if (LD->getAddressingMode() != ISD::PRE_INC)
break;
-
+
SDValue Offset = LD->getOffset();
if (isa<ConstantSDNode>(Offset) ||
Offset.getOpcode() == ISD::TargetGlobalAddress) {
-
+
unsigned Opcode;
bool isSExt = LD->getExtensionType() == ISD::SEXTLOAD;
if (LD->getValueType(0) != MVT::i64) {
@@ -917,7 +917,7 @@ SDNode *PPCDAGToDAGISel::Select(SDNode *N) {
case MVT::i8: Opcode = PPC::LBZU8; break;
}
}
-
+
SDValue Chain = LD->getChain();
SDValue Base = LD->getBasePtr();
SDValue Ops[] = { Offset, Base, Chain };
@@ -929,7 +929,7 @@ SDNode *PPCDAGToDAGISel::Select(SDNode *N) {
llvm_unreachable("R+R preindex loads not supported yet!");
}
}
-
+
case ISD::AND: {
unsigned Imm, Imm2, SH, MB, ME;
@@ -944,7 +944,7 @@ SDNode *PPCDAGToDAGISel::Select(SDNode *N) {
// If this is just a masked value where the input is not handled above, and
// is not a rotate-left (handled by a pattern in the .td file), emit rlwinm
if (isInt32Immediate(N->getOperand(1), Imm) &&
- isRunOfOnes(Imm, MB, ME) &&
+ isRunOfOnes(Imm, MB, ME) &&
N->getOperand(0).getOpcode() != ISD::ROTL) {
SDValue Val = N->getOperand(0);
SDValue Ops[] = { Val, getI32Imm(0), getI32Imm(MB), getI32Imm(ME) };
@@ -957,7 +957,7 @@ SDNode *PPCDAGToDAGISel::Select(SDNode *N) {
}
// ISD::OR doesn't get all the bitfield insertion fun.
// (and (or x, c1), c2) where isRunOfOnes(~(c1^c2)) is a bitfield insert
- if (isInt32Immediate(N->getOperand(1), Imm) &&
+ if (isInt32Immediate(N->getOperand(1), Imm) &&
N->getOperand(0).getOpcode() == ISD::OR &&
isInt32Immediate(N->getOperand(0).getOperand(1), Imm2)) {
unsigned MB, ME;
@@ -969,7 +969,7 @@ SDNode *PPCDAGToDAGISel::Select(SDNode *N) {
return CurDAG->getMachineNode(PPC::RLWIMI, dl, MVT::i32, Ops, 5);
}
}
-
+
// Other cases are autogenerated.
break;
}
@@ -977,7 +977,7 @@ SDNode *PPCDAGToDAGISel::Select(SDNode *N) {
if (N->getValueType(0) == MVT::i32)
if (SDNode *I = SelectBitfieldInsert(N))
return I;
-
+
// Other cases are autogenerated.
break;
case ISD::SHL: {
@@ -988,25 +988,25 @@ SDNode *PPCDAGToDAGISel::Select(SDNode *N) {
getI32Imm(SH), getI32Imm(MB), getI32Imm(ME) };
return CurDAG->SelectNodeTo(N, PPC::RLWINM, MVT::i32, Ops, 4);
}
-
+
// Other cases are autogenerated.
break;
}
case ISD::SRL: {
unsigned Imm, SH, MB, ME;
if (isOpcWithIntImmediate(N->getOperand(0).getNode(), ISD::AND, Imm) &&
- isRotateAndMask(N, Imm, true, SH, MB, ME)) {
+ isRotateAndMask(N, Imm, true, SH, MB, ME)) {
SDValue Ops[] = { N->getOperand(0).getOperand(0),
getI32Imm(SH), getI32Imm(MB), getI32Imm(ME) };
return CurDAG->SelectNodeTo(N, PPC::RLWINM, MVT::i32, Ops, 4);
}
-
+
// Other cases are autogenerated.
break;
}
case ISD::SELECT_CC: {
ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(4))->get();
-
+
// Handle the setcc cases here. select_cc lhs, 0, 1, 0, cc
if (ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N->getOperand(1)))
if (ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N->getOperand(2)))
@@ -1058,7 +1058,7 @@ SDNode *PPCDAGToDAGISel::Select(SDNode *N) {
case ISD::BR_CC: {
ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(1))->get();
SDValue CondCode = SelectCC(N->getOperand(2), N->getOperand(3), CC, dl);
- SDValue Ops[] = { getI32Imm(getPredicateForSetCC(CC)), CondCode,
+ SDValue Ops[] = { getI32Imm(getPredicateForSetCC(CC)), CondCode,
N->getOperand(4), N->getOperand(0) };
return CurDAG->SelectNodeTo(N, PPC::BCC, MVT::Other, Ops, 4);
}
@@ -1072,13 +1072,13 @@ SDNode *PPCDAGToDAGISel::Select(SDNode *N) {
return CurDAG->SelectNodeTo(N, PPC::BCTR, MVT::Other, Chain);
}
}
-
+
return SelectCode(N);
}
-/// createPPCISelDag - This pass converts a legalized DAG into a
+/// createPPCISelDag - This pass converts a legalized DAG into a
/// PowerPC-specific DAG, ready for instruction scheduling.
///
FunctionPass *llvm::createPPCISelDag(PPCTargetMachine &TM) {
diff --git a/lib/Target/PowerPC/PPCInstrInfo.cpp b/lib/Target/PowerPC/PPCInstrInfo.cpp
index abc6325cc3..8093789127 100644
--- a/lib/Target/PowerPC/PPCInstrInfo.cpp
+++ b/lib/Target/PowerPC/PPCInstrInfo.cpp
@@ -39,7 +39,7 @@ PPCInstrInfo::PPCInstrInfo(PPCTargetMachine &tm)
: TargetInstrInfoImpl(PPCInsts, array_lengthof(PPCInsts)), TM(tm),
RI(*TM.getSubtargetImpl(), *this) {}
-unsigned PPCInstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
+unsigned PPCInstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
int &FrameIndex) const {
switch (MI->getOpcode()) {
default: break;
@@ -57,7 +57,7 @@ unsigned PPCInstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
return 0;
}
-unsigned PPCInstrInfo::isStoreToStackSlot(const MachineInstr *MI,
+unsigned PPCInstrInfo::isStoreToStackSlot(const MachineInstr *MI,
int &FrameIndex) const {
switch (MI->getOpcode()) {
default: break;
@@ -84,11 +84,11 @@ PPCInstrInfo::commuteInstruction(MachineInstr *MI, bool NewMI) const {
// Normal instructions can be commuted the obvious way.
if (MI->getOpcode() != PPC::RLWIMI)
return TargetInstrInfoImpl::commuteInstruction(MI, NewMI);
-
+
// Cannot commute if it has a non-zero rotate count.
if (MI->getOperand(3).getImm() != 0)
return 0;
-
+
// If we have a zero rotate count, we have:
// M = mask(MB,ME)
// Op0 = (Op1 & ~M) | (Op2 & M)
@@ -135,14 +135,14 @@ PPCInstrInfo::commuteInstruction(MachineInstr *MI, bool NewMI) const {
MI->getOperand(1).setReg(Reg2);
MI->getOperand(2).setIsKill(Reg1IsKill);
MI->getOperand(1).setIsKill(Reg2IsKill);
-
+
// Swap the mask around.
MI->getOperand(4).setImm((ME+1) & 31);
MI->getOperand(5).setImm((MB-1) & 31);
return MI;
}
-void PPCInstrInfo::insertNoop(MachineBasicBlock &MBB,
+void PPCInstrInfo::insertNoop(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI) const {
DebugLoc DL;
BuildMI(MBB, MI, DL, get(PPC::NOP));
@@ -169,7 +169,7 @@ bool PPCInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,MachineBasicBlock *&TBB,
// Get the last instruction in the block.
MachineInstr *LastInst = I;
-
+
// If there is only one terminator instruction, process it.
if (I == MBB.begin() || !isUnpredicatedTerminator(--I)) {
if (LastInst->getOpcode() == PPC::B) {
@@ -189,7 +189,7 @@ bool PPCInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,MachineBasicBlock *&TBB,
// Otherwise, don't know what this is.
return true;
}
-
+
// Get the instruction before it if it's a terminator.
MachineInstr *SecondLastInst = I;
@@ -197,9 +197,9 @@ bool PPCInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,MachineBasicBlock *&TBB,
if (SecondLastInst && I != MBB.begin() &&
isUnpredicatedTerminator(--I))
return true;
-
+
// If the block ends with PPC::B and PPC:BCC, handle it.
- if (SecondLastInst->getOpcode() == PPC::BCC &&
+ if (SecondLastInst->getOpcode() == PPC::BCC &&
LastInst->getOpcode() == PPC::B) {
if (!SecondLastInst->getOperand(2).isMBB() ||
!LastInst->getOperand(0).isMBB())
@@ -210,10 +210,10 @@ bool PPCInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,MachineBasicBlock *&TBB,
FBB = LastInst->getOperand(0).getMBB();
return false;
}
-
+
// If the block ends with two PPC:Bs, handle it. The second one is not
// executed, so remove it.
- if (SecondLastInst->getOpcode() == PPC::B &&
+ if (SecondLastInst->getOpcode() == PPC::B &&
LastInst->getOpcode() == PPC::B) {
if (!SecondLastInst->getOperand(0).isMBB())
return true;
@@ -239,17 +239,17 @@ unsigned PPCInstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
}
if (I->getOpcode() != PPC::B && I->getOpcode() != PPC::BCC)
return 0;
-
+
// Remove the branch.
I->eraseFromParent();
-
+
I = MBB.end();
if (I == MBB.begin()) return 1;
--I;
if (I->getOpcode() != PPC::BCC)
return 1;
-
+
// Remove the branch.
I->eraseFromParent();
return 2;
@@ -262,9 +262,9 @@ PPCInstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
DebugLoc DL) const {
// Shouldn't be a fall through.
assert(TBB && "InsertBranch must not be told to insert a fallthrough");
- assert((Cond.size() == 2 || Cond.size() == 0) &&
+ assert((Cond.size() == 2 || Cond.size() == 0) &&
"PPC branch conditions have two components!");
-
+
// One-way branch.
if (FBB == 0) {
if (Cond.empty()) // Unconditional branch
@@ -274,7 +274,7 @@ PPCInstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
.addImm(Cond[0].getImm()).addReg(Cond[1].getReg()).addMBB(TBB);
return 1;
}
-
+
// Two-way Conditional Branch.
BuildMI(&MBB, DL, get(PPC::BCC))
.addImm(Cond[0].getImm()).addReg(Cond[1].getReg()).addMBB(TBB);
@@ -377,11 +377,11 @@ PPCInstrInfo::StoreRegToStackSlot(MachineFunction &MF,
// We need to store the CR in the low 4-bits of the saved value. First,
// issue a MFCR to save all of the CRBits.
- unsigned ScratchReg = TM.getSubtargetImpl()->isDarwinABI() ?
+ unsigned ScratchReg = TM.getSubtargetImpl()->isDarwinABI() ?
PPC::R2 : PPC::R0;
NewMIs.push_back(BuildMI(MF, DL, get(PPC::MFCRpseud), ScratchReg)
.addReg(SrcReg, getKillRegState(isKill)));
-
+
// If the saved register wasn't CR0, shift the bits left so that they are
// in CR0's slot.
if (SrcReg != PPC::CR0) {
@@ -391,7 +391,7 @@ PPCInstrInfo::StoreRegToStackSlot(MachineFunction &MF,
.addReg(ScratchReg).addImm(ShiftBits)
.addImm(0).addImm(31));
}
-
+
NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::STW))
.addReg(ScratchReg,
getKillRegState(isKill)),
@@ -428,14 +428,14 @@ PPCInstrInfo::StoreRegToStackSlot(MachineFunction &MF,
SrcReg == PPC::CR7EQ || SrcReg == PPC::CR7UN)
Reg = PPC::CR7;
- return StoreRegToStackSlot(MF, Reg, isKill, FrameIdx,
+ return StoreRegToStackSlot(MF, Reg, isKill, FrameIdx,
PPC::CRRCRegisterClass, NewMIs);
} else if (RC == PPC::VRRCRegisterClass) {
// We don't have indexed addressing for vector loads. Emit:
// R0 = ADDI FI#
// STVX VAL, 0, R0
- //
+ //
// FIXME: We use R0 here, because it isn't available for RA.
NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::ADDI), PPC::R0),
FrameIdx, 0, 0));
@@ -514,9 +514,9 @@ PPCInstrInfo::LoadRegFromStackSlot(MachineFunction &MF, DebugLoc DL,
// at the moment.
unsigned ScratchReg = TM.getSubtargetImpl()->isDarwinABI() ?
PPC::R2 : PPC::R0;
- NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::LWZ),
+ NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::LWZ),
ScratchReg), FrameIdx));
-
+
// If the reloaded register isn't CR0, shift the bits right so that they are
// in the right CR's slot.
if (DestReg != PPC::CR0) {
@@ -526,11 +526,11 @@ PPCInstrInfo::LoadRegFromStackSlot(MachineFunction &MF, DebugLoc DL,
.addReg(ScratchReg).addImm(32-ShiftBits).addImm(0)
.addImm(31));
}
-
+
NewMIs.push_back(BuildMI(MF, DL, get(PPC::MTCRF), DestReg)
.addReg(ScratchReg));
} else if (RC == PPC::CRBITRCRegisterClass) {
-
+
unsigned Reg = 0;
if (DestReg == PPC::CR0LT || DestReg == PPC::CR0GT ||
DestReg == PPC::CR0EQ || DestReg == PPC::CR0UN)
@@ -557,14 +557,14 @@ PPCInstrInfo::LoadRegFromStackSlot(MachineFunction &MF, DebugLoc DL,
DestReg == PPC::CR7EQ || DestReg == PPC::CR7UN)
Reg = PPC::CR7;
- return LoadRegFromStackSlot(MF, DL, Reg, FrameIdx,
+ return LoadRegFromStackSlot(MF, DL, Reg, FrameIdx,
PPC::CRRCRegisterClass, NewMIs);
} else if (RC == PPC::VRRCRegisterClass) {
// We don't have indexed addressing for vector loads. Emit:
// R0 = ADDI FI#
// Dest = LVX 0, R0
- //
+ //
// FIXME: We use R0 here, because it isn't available for RA.
NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::ADDI), PPC::R0),
FrameIdx, 0, 0));
diff --git a/lib/Target/PowerPC/PPCInstrInfo.h b/lib/Target/PowerPC/PPCInstrInfo.h
index 92369d6f1d..4083577997 100644
--- a/lib/Target/PowerPC/PPCInstrInfo.h
+++ b/lib/Target/PowerPC/PPCInstrInfo.h
@@ -32,7 +32,7 @@ enum {
/// PPC970_First - This instruction starts a new dispatch group, so it will
/// always be the first one in the group.
PPC970_First = 0x1,
-
+
/// PPC970_Single - This instruction starts a new dispatch group and
/// terminates it, so it will be the sole instruction in the group.
PPC970_Single = 0x2,
@@ -40,7 +40,7 @@ enum {
/// PPC970_Cracked - This instruction is cracked into two pieces, requiring
/// two dispatch pipes to be available to issue.
PPC970_Cracked = 0x4,
-
+
/// PPC970_Mask/Shift - This is a bitmask that selects the pipeline type that
/// an instruction is issued to.
PPC970_Shift = 3,
@@ -59,8 +59,8 @@ enum PPC970_Unit {
PPC970_BRU = 7 << PPC970_Shift // Branch Unit
};
} // end namespace PPCII
-
-
+
+
class PPCInstrInfo : public TargetInstrInfoImpl {
PPCTargetMachine &TM;
const PPCRegisterInfo RI;
@@ -69,7 +69,7 @@ class PPCInstrInfo : public TargetInstrInfoImpl {
unsigned SrcReg, bool isKill, int FrameIdx,
const TargetRegisterClass *RC,
SmallVectorImpl<MachineInstr*> &NewMIs) const;
- void LoadRegFromStackSlot(MachineFunction &MF, DebugLoc DL,
+ void LoadRegFromStackSlot(MachineFunction &MF, DebugLoc DL,
unsigned DestReg, int FrameIdx,
const TargetRegisterClass *RC,
SmallVectorImpl<MachineInstr*> &NewMIs) const;
@@ -90,8 +90,8 @@ public:
// commuteInstruction - We can commute rlwimi instructions, but only if the
// rotate amt is zero. We also have to munge the immediates a bit.
virtual MachineInstr *commuteInstruction(MachineInstr *MI, bool NewMI) const;
-
- virtual void insertNoop(MachineBasicBlock &MBB,
+
+ virtual void insertNoop(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI) const;
@@ -109,7 +109,7 @@ public:
MachineBasicBlock::iterator I, DebugLoc DL,
unsigned DestReg, unsigned SrcReg,
bool KillSrc) const;
-
+
virtual void storeRegToStackSlot(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI,
unsigned SrcReg, bool isKill, int FrameIndex,
@@ -121,7 +121,7 @@ public:
unsigned DestReg, int FrameIndex,
const TargetRegisterClass *RC,
const TargetRegisterInfo *TRI) const;
-
+
virtual MachineInstr *emitFrameIndexDebugValue(MachineFunction &MF,
int FrameIx,
uint64_t Offset,
@@ -130,7 +130,7 @@ public:
virtual
bool ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const;
-
+
/// GetInstSize - Return the number of bytes of code the specified
/// instruction may be. This returns the maximum number of bytes.
///
diff --git a/lib/Target/TargetInstrInfo.cpp b/lib/Target/TargetInstrInfo.cpp
index 25b407d646..97f3bf6e57 100644
--- a/lib/Target/TargetInstrInfo.cpp
+++ b/lib/Target/TargetInstrInfo.cpp
@@ -128,7 +128,7 @@ bool TargetInstrInfo::hasLowDefLatency(const InstrItineraryData *ItinData,
/// insertNoop - Insert a noop into the instruction stream at the specified
/// point.
-void TargetInstrInfo::insertNoop(MachineBasicBlock &MBB,
+void TargetInstrInfo::insertNoop(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI) const {
llvm_unreachable("Target didn't implement insertNoop!");
}
@@ -137,7 +137,7 @@ void TargetInstrInfo::insertNoop(MachineBasicBlock &MBB,
bool TargetInstrInfo::isUnpredicatedTerminator(const MachineInstr *MI) const {
const TargetInstrDesc &TID = MI->getDesc();
if (!TID.isTerminator()) return false;
-
+
// Conditional branch is a special case.
if (TID.isBranch() && !TID.isBarrier())
return true;
@@ -157,8 +157,8 @@ bool TargetInstrInfo::isUnpredicatedTerminator(const MachineInstr *MI) const {
/// may be overloaded in the target code to do that.
unsigned TargetInstrInfo::getInlineAsmLength(const char *Str,
const MCAsmInfo &MAI) const {
-
-
+
+
// Count the number of instructions in the asm.
bool atInsnStart = true;
unsigned Length = 0;
@@ -173,6 +173,6 @@ unsigned TargetInstrInfo::getInlineAsmLength(const char *Str,
strlen(MAI.getCommentString())) == 0)
atInsnStart = false;
}
-
+
return Length;
}