summaryrefslogtreecommitdiff
path: root/lib/CodeGen/SelectionDAG
diff options
context:
space:
mode:
authorHal Finkel <hfinkel@anl.gov>2014-01-24 20:12:02 +0000
committerHal Finkel <hfinkel@anl.gov>2014-01-24 20:12:02 +0000
commit86720f7c65c4481c8d8518d71e2bdce59a85969e (patch)
tree31053f6a41ee186251578acdba3aadff375ca2db /lib/CodeGen/SelectionDAG
parentd221469cc6c120e0b6ef28741d01bf99c62b16df (diff)
downloadllvm-86720f7c65c4481c8d8518d71e2bdce59a85969e.tar.gz
llvm-86720f7c65c4481c8d8518d71e2bdce59a85969e.tar.bz2
llvm-86720f7c65c4481c8d8518d71e2bdce59a85969e.tar.xz
Fix DAGCombiner::GatherAllAliases to account for non-chain dependencies
DAGCombiner::GatherAllAliases, which is only used when AA used is enabled during DAGCombine, had a fundamentally incorrect assumption for which this change compensates. GatherAllAliases, which is used to find aliasing predecessor chain nodes (so that a better chain can be selected for a load or store to enable subsequent optimizations) assumed that walking up the chain would always catch all possibly-aliasing loads and stores. This is not true: To really find all aliases, we also need to search for aliases through the value operand of a store, etc. Consider the following situation: Token1 = ... L1 = load Token1, %52 S1 = store Token1, L1, %51 L2 = load Token1, %52+8 S2 = store Token1, L2, %51+8 Token2 = Token(S1, S2) L3 = load Token2, %53 S3 = store Token2, L3, %52 L4 = load Token2, %53+8 S4 = store Token2, L4, %52+8 If we search for aliases of S3 (which loads address %52), and we look only through the chain, then we'll miss the trivial dependence on L1 (which loads from %52). We then might change all loads and stores to use Token1 as their chain operand, which could result in copying %53 into %52 before copying %52 into %51 (which should happen first). The problem is, however, that searching for such data dependencies can become expensive, and the cost is not directly related to the chain depth. Instead, we'll rule out such configurations by insisting that we've visited all chain users (except for users of the original chain, which is not necessary). When doing this, we need to look through nodes we don't care about (otherwise, things like register copies will interfere with trivial use cases). Unfortunately, I don't have a small test case for this problem. Creating the underlying situation is not hard (a pair of memcpys will do it), but arranging for the default instruction schedule to be incorrect is very fragile. This unbreaks self hosting on PPC64 when using -mllvm -combiner-global-alias-analysis -mllvm -combiner-alias-analysis. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@200033 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/CodeGen/SelectionDAG')
-rw-r--r--lib/CodeGen/SelectionDAG/DAGCombiner.cpp59
1 files changed, 58 insertions, 1 deletions
diff --git a/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index 591b2c7694..69e2ce198e 100644
--- a/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -11142,7 +11142,7 @@ void DAGCombiner::GatherAllAliases(SDNode *N, SDValue OriginalChain,
if (Depth > 6 || Aliases.size() == 2) {
Aliases.clear();
Aliases.push_back(OriginalChain);
- break;
+ return;
}
// Don't bother if we've been before.
@@ -11204,6 +11204,63 @@ void DAGCombiner::GatherAllAliases(SDNode *N, SDValue OriginalChain,
break;
}
}
+
+ // We need to be careful here to also search for aliases through the
+ // value operand of a store, etc. Consider the following situation:
+ // Token1 = ...
+ // L1 = load Token1, %52
+ // S1 = store Token1, L1, %51
+ // L2 = load Token1, %52+8
+ // S2 = store Token1, L2, %51+8
+ // Token2 = Token(S1, S2)
+ // L3 = load Token2, %53
+ // S3 = store Token2, L3, %52
+ // L4 = load Token2, %53+8
+ // S4 = store Token2, L4, %52+8
+ // If we search for aliases of S3 (which loads address %52), and we look
+ // only through the chain, then we'll miss the trivial dependence on L1
+ // (which also loads from %52). We then might change all loads and
+ // stores to use Token1 as their chain operand, which could result in
+ // copying %53 into %52 before copying %52 into %51 (which should
+ // happen first).
+ //
+ // The problem is, however, that searching for such data dependencies
+ // can become expensive, and the cost is not directly related to the
+ // chain depth. Instead, we'll rule out such configurations here by
+ // insisting that we've visited all chain users (except for users
+ // of the original chain, which is not necessary). When doing this,
+ // we need to look through nodes we don't care about (otherwise, things
+ // like register copies will interfere with trivial cases).
+
+ SmallVector<const SDNode *, 16> Worklist;
+ for (SmallPtrSet<SDNode *, 16>::iterator I = Visited.begin(),
+ IE = Visited.end(); I != IE; ++I)
+ if (*I != OriginalChain.getNode())
+ Worklist.push_back(*I);
+
+ while (!Worklist.empty()) {
+ const SDNode *M = Worklist.pop_back_val();
+
+ // We have already visited M, and want to make sure we've visited any uses
+ // of M that we care about. For uses that we've not visisted, and don't
+ // care about, queue them to the worklist.
+
+ for (SDNode::use_iterator UI = M->use_begin(),
+ UIE = M->use_end(); UI != UIE; ++UI)
+ if (UI.getUse().getValueType() == MVT::Other && Visited.insert(*UI)) {
+ if (isa<MemIntrinsicSDNode>(*UI) || isa<MemSDNode>(*UI)) {
+ // We've not visited this use, and we care about it (it could have an
+ // ordering dependency with the original node).
+ Aliases.clear();
+ Aliases.push_back(OriginalChain);
+ return;
+ }
+
+ // We've not visited this use, but we don't care about it. Mark it as
+ // visited and enqueue it to the worklist.
+ Worklist.push_back(*UI);
+ }
+ }
}
/// FindBetterChain - Walk up chain skipping non-aliasing memory nodes, looking