summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--include/llvm/Analysis/LazyCallGraph.h337
-rw-r--r--lib/Analysis/CMakeLists.txt1
-rw-r--r--lib/Analysis/LazyCallGraph.cpp195
-rw-r--r--test/Analysis/LazyCallGraph/basic.ll126
-rw-r--r--tools/opt/NewPMDriver.cpp5
-rw-r--r--tools/opt/Passes.cpp6
6 files changed, 670 insertions, 0 deletions
diff --git a/include/llvm/Analysis/LazyCallGraph.h b/include/llvm/Analysis/LazyCallGraph.h
new file mode 100644
index 0000000000..d355b9cf5a
--- /dev/null
+++ b/include/llvm/Analysis/LazyCallGraph.h
@@ -0,0 +1,337 @@
+//===- LazyCallGraph.h - Analysis of a Module's call graph ------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// Implements a lazy call graph analysis and related passes for the new pass
+/// manager.
+///
+/// NB: This is *not* a traditional call graph! It is a graph which models both
+/// the current calls and potential calls. As a consequence there are many
+/// edges in this call graph that do not correspond to a 'call' or 'invoke'
+/// instruction.
+///
+/// The primary use cases of this graph analysis is to facilitate iterating
+/// across the functions of a module in ways that ensure all callees are
+/// visited prior to a caller (given any SCC constraints), or vice versa. As
+/// such is it particularly well suited to organizing CGSCC optimizations such
+/// as inlining, outlining, argument promotion, etc. That is its primary use
+/// case and motivates the design. It may not be appropriate for other
+/// purposes. The use graph of functions or some other conservative analysis of
+/// call instructions may be interesting for optimizations and subsequent
+/// analyses which don't work in the context of an overly specified
+/// potential-call-edge graph.
+///
+/// To understand the specific rules and nature of this call graph analysis,
+/// see the documentation of the \c LazyCallGraph below.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_LAZY_CALL_GRAPH
+#define LLVM_ANALYSIS_LAZY_CALL_GRAPH
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/PointerUnion.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/IR/Module.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/BasicBlock.h"
+#include "llvm/Support/Allocator.h"
+#include <iterator>
+
+namespace llvm {
+class ModuleAnalysisManager;
+class PreservedAnalyses;
+class raw_ostream;
+
+/// \brief A lazily constructed view of the call graph of a module.
+///
+/// With the edges of this graph, the motivating constraint that we are
+/// attempting to maintain is that function-local optimization, CGSCC-local
+/// optimizations, and optimizations transforming a pair of functions connected
+/// by an edge in the graph, do not invalidate a bottom-up traversal of the SCC
+/// DAG. That is, no optimizations will delete, remove, or add an edge such
+/// that functions already visited in a bottom-up order of the SCC DAG are no
+/// longer valid to have visited, or such that functions not yet visited in
+/// a bottom-up order of the SCC DAG are not required to have already been
+/// visited.
+///
+/// Within this constraint, the desire is to minimize the merge points of the
+/// SCC DAG. The greater the fanout of the SCC DAG and the fewer merge points
+/// in the SCC DAG, the more independence there is in optimizing within it.
+/// There is a strong desire to enable parallelization of optimizations over
+/// the call graph, and both limited fanout and merge points will (artificially
+/// in some cases) limit the scaling of such an effort.
+///
+/// To this end, graph represents both direct and any potential resolution to
+/// an indirect call edge. Another way to think about it is that it represents
+/// both the direct call edges and any direct call edges that might be formed
+/// through static optimizations. Specifically, it considers taking the address
+/// of a function to be an edge in the call graph because this might be
+/// forwarded to become a direct call by some subsequent function-local
+/// optimization. The result is that the graph closely follows the use-def
+/// edges for functions. Walking "up" the graph can be done by looking at all
+/// of the uses of a function.
+///
+/// The roots of the call graph are the external functions and functions
+/// escaped into global variables. Those functions can be called from outside
+/// of the module or via unknowable means in the IR -- we may not be able to
+/// form even a potential call edge from a function body which may dynamically
+/// load the function and call it.
+///
+/// This analysis still requires updates to remain valid after optimizations
+/// which could potentially change the set of potential callees. The
+/// constraints it operates under only make the traversal order remain valid.
+///
+/// The entire analysis must be re-computed if full interprocedural
+/// optimizations run at any point. For example, globalopt completely
+/// invalidates the information in this analysis.
+///
+/// FIXME: This class is named LazyCallGraph in a lame attempt to distinguish
+/// it from the existing CallGraph. At some point, it is expected that this
+/// will be the only call graph and it will be renamed accordingly.
+class LazyCallGraph {
+public:
+ class Node;
+ typedef SmallVector<PointerUnion<Function *, Node *>, 4> NodeVectorT;
+ typedef SmallVectorImpl<PointerUnion<Function *, Node *> > NodeVectorImplT;
+
+ /// \brief A lazy iterator used for both the entry nodes and child nodes.
+ ///
+ /// When this iterator is dereferenced, if not yet available, a function will
+ /// be scanned for "calls" or uses of functions and its child information
+ /// will be constructed. All of these results are accumulated and cached in
+ /// the graph.
+ class iterator : public std::iterator<std::bidirectional_iterator_tag, Node *,
+ ptrdiff_t, Node *, Node *> {
+ friend class LazyCallGraph;
+ friend class LazyCallGraph::Node;
+ typedef std::iterator<std::bidirectional_iterator_tag, Node *, ptrdiff_t,
+ Node *, Node *> BaseT;
+
+ /// \brief Nonce type to select the constructor for the end iterator.
+ struct IsAtEndT {};
+
+ LazyCallGraph &G;
+ NodeVectorImplT::iterator NI;
+
+ // Build the begin iterator for a node.
+ explicit iterator(LazyCallGraph &G, NodeVectorImplT &Nodes)
+ : G(G), NI(Nodes.begin()) {}
+
+ // Build the end iterator for a node. This is selected purely by overload.
+ iterator(LazyCallGraph &G, NodeVectorImplT &Nodes, IsAtEndT /*Nonce*/)
+ : G(G), NI(Nodes.end()) {}
+
+ public:
+ iterator(const iterator &Arg) : G(Arg.G), NI(Arg.NI) {}
+
+ iterator &operator=(iterator Arg) {
+ std::swap(Arg, *this);
+ return *this;
+ }
+
+ bool operator==(const iterator &Arg) { return NI == Arg.NI; }
+ bool operator!=(const iterator &Arg) { return !operator==(Arg); }
+
+ reference operator*() const {
+ if (NI->is<Node *>())
+ return NI->get<Node *>();
+
+ Function *F = NI->get<Function *>();
+ Node *ChildN = G.get(*F);
+ *NI = ChildN;
+ return ChildN;
+ }
+ pointer operator->() const { return operator*(); }
+
+ iterator &operator++() {
+ ++NI;
+ return *this;
+ }
+ iterator operator++(int) {
+ iterator prev = *this;
+ ++*this;
+ return prev;
+ }
+
+ iterator &operator--() {
+ --NI;
+ return *this;
+ }
+ iterator operator--(int) {
+ iterator next = *this;
+ --*this;
+ return next;
+ }
+ };
+
+ /// \brief Construct a graph for the given module.
+ ///
+ /// This sets up the graph and computes all of the entry points of the graph.
+ /// No function definitions are scanned until their nodes in the graph are
+ /// requested during traversal.
+ LazyCallGraph(Module &M);
+
+ /// \brief Copy constructor.
+ ///
+ /// This does a deep copy of the graph. It does no verification that the
+ /// graph remains valid for the module. It is also relatively expensive.
+ LazyCallGraph(const LazyCallGraph &G);
+
+#if LLVM_HAS_RVALUE_REFERENCES
+ /// \brief Move constructor.
+ ///
+ /// This is a deep move. It leaves G in an undefined but destroyable state.
+ /// Any other operation on G is likely to fail.
+ LazyCallGraph(LazyCallGraph &&G);
+#endif
+
+ iterator begin() { return iterator(*this, EntryNodes); }
+ iterator end() { return iterator(*this, EntryNodes, iterator::IsAtEndT()); }
+
+ /// \brief Lookup a function in the graph which has already been scanned and
+ /// added.
+ Node *lookup(const Function &F) const { return NodeMap.lookup(&F); }
+
+ /// \brief Get a graph node for a given function, scanning it to populate the
+ /// graph data as necessary.
+ Node *get(Function &F) {
+ Node *&N = NodeMap[&F];
+ if (N)
+ return N;
+
+ return insertInto(F, N);
+ }
+
+private:
+ Module &M;
+
+ /// \brief Allocator that holds all the call graph nodes.
+ SpecificBumpPtrAllocator<Node> BPA;
+
+ /// \brief Maps function->node for fast lookup.
+ DenseMap<const Function *, Node *> NodeMap;
+
+ /// \brief The entry nodes to the graph.
+ ///
+ /// These nodes are reachable through "external" means. Put another way, they
+ /// escape at the module scope.
+ NodeVectorT EntryNodes;
+
+ /// \brief Set of the entry nodes to the graph.
+ SmallPtrSet<Function *, 4> EntryNodeSet;
+
+ /// \brief Helper to insert a new function, with an already looked-up entry in
+ /// the NodeMap.
+ Node *insertInto(Function &F, Node *&MappedN);
+
+ /// \brief Helper to copy a node from another graph into this one.
+ Node *copyInto(const Node &OtherN);
+
+#if LLVM_HAS_RVALUE_REFERENCES
+ /// \brief Helper to move a node from another graph into this one.
+ Node *moveInto(Node &&OtherN);
+#endif
+};
+
+/// \brief A node in the call graph.
+///
+/// This represents a single node. It's primary roles are to cache the list of
+/// callees, de-duplicate and provide fast testing of whether a function is
+/// a callee, and facilitate iteration of child nodes in the graph.
+class LazyCallGraph::Node {
+ friend LazyCallGraph;
+
+ LazyCallGraph &G;
+ Function &F;
+ mutable NodeVectorT Callees;
+ SmallPtrSet<Function *, 4> CalleeSet;
+
+ /// \brief Basic constructor implements the scanning of F into Callees and
+ /// CalleeSet.
+ Node(LazyCallGraph &G, Function &F);
+
+ /// \brief Constructor used when copying a node from one graph to another.
+ Node(LazyCallGraph &G, const Node &OtherN);
+
+#if LLVM_HAS_RVALUE_REFERENCES
+ /// \brief Constructor used when moving a node from one graph to another.
+ Node(LazyCallGraph &G, Node &&OtherN);
+#endif
+
+public:
+ typedef LazyCallGraph::iterator iterator;
+
+ Function &getFunction() const {
+ return F;
+ };
+
+ iterator begin() const { return iterator(G, Callees); }
+ iterator end() const { return iterator(G, Callees, iterator::IsAtEndT()); }
+
+ /// Equality is defined as address equality.
+ bool operator==(const Node &N) const { return this == &N; }
+ bool operator!=(const Node &N) const { return !operator==(N); }
+};
+
+// Provide GraphTraits specializations for call graphs.
+template <> struct GraphTraits<LazyCallGraph::Node *> {
+ typedef LazyCallGraph::Node NodeType;
+ typedef LazyCallGraph::iterator ChildIteratorType;
+
+ static NodeType *getEntryNode(NodeType *N) { return N; }
+ static ChildIteratorType child_begin(NodeType *N) { return N->begin(); }
+ static ChildIteratorType child_end(NodeType *N) { return N->end(); }
+};
+template <> struct GraphTraits<LazyCallGraph *> {
+ typedef LazyCallGraph::Node NodeType;
+ typedef LazyCallGraph::iterator ChildIteratorType;
+
+ static NodeType *getEntryNode(NodeType *N) { return N; }
+ static ChildIteratorType child_begin(NodeType *N) { return N->begin(); }
+ static ChildIteratorType child_end(NodeType *N) { return N->end(); }
+};
+
+/// \brief An analysis pass which computes the call graph for a module.
+class LazyCallGraphAnalysis {
+public:
+ /// \brief Inform generic clients of the result type.
+ typedef LazyCallGraph Result;
+
+ static void *ID() { return (void *)&PassID; }
+
+ /// \brief Compute the \c LazyCallGraph for a the module \c M.
+ ///
+ /// This just builds the set of entry points to the call graph. The rest is
+ /// built lazily as it is walked.
+ LazyCallGraph run(Module *M) { return LazyCallGraph(*M); }
+
+private:
+ static char PassID;
+};
+
+/// \brief A pass which prints the call graph to a \c raw_ostream.
+///
+/// This is primarily useful for testing the analysis.
+class LazyCallGraphPrinterPass {
+ raw_ostream &OS;
+
+public:
+ explicit LazyCallGraphPrinterPass(raw_ostream &OS);
+
+ PreservedAnalyses run(Module *M, ModuleAnalysisManager *AM);
+
+ static StringRef name() { return "LazyCallGraphPrinterPass"; }
+};
+
+}
+
+#endif
diff --git a/lib/Analysis/CMakeLists.txt b/lib/Analysis/CMakeLists.txt
index 3624aac450..c6d4573885 100644
--- a/lib/Analysis/CMakeLists.txt
+++ b/lib/Analysis/CMakeLists.txt
@@ -23,6 +23,7 @@ add_llvm_library(LLVMAnalysis
InstructionSimplify.cpp
Interval.cpp
IntervalPartition.cpp
+ LazyCallGraph.cpp
LazyValueInfo.cpp
LibCallAliasAnalysis.cpp
LibCallSemantics.cpp
diff --git a/lib/Analysis/LazyCallGraph.cpp b/lib/Analysis/LazyCallGraph.cpp
new file mode 100644
index 0000000000..b89bf70b43
--- /dev/null
+++ b/lib/Analysis/LazyCallGraph.cpp
@@ -0,0 +1,195 @@
+//===- LazyCallGraph.cpp - Analysis of a Module's call graph --------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Analysis/LazyCallGraph.h"
+#include "llvm/ADT/SCCIterator.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/PassManager.h"
+#include "llvm/Support/CallSite.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/InstVisitor.h"
+
+using namespace llvm;
+
+static void findCallees(
+ SmallVectorImpl<Constant *> &Worklist, SmallPtrSetImpl<Constant *> &Visited,
+ SmallVectorImpl<PointerUnion<Function *, LazyCallGraph::Node *> > &Callees,
+ SmallPtrSetImpl<Function *> &CalleeSet) {
+ while (!Worklist.empty()) {
+ Constant *C = Worklist.pop_back_val();
+
+ if (Function *F = dyn_cast<Function>(C)) {
+ // Note that we consider *any* function with a definition to be a viable
+ // edge. Even if the function's definition is subject to replacement by
+ // some other module (say, a weak definition) there may still be
+ // optimizations which essentially speculate based on the definition and
+ // a way to check that the specific definition is in fact the one being
+ // used. For example, this could be done by moving the weak definition to
+ // a strong (internal) definition and making the weak definition be an
+ // alias. Then a test of the address of the weak function against the new
+ // strong definition's address would be an effective way to determine the
+ // safety of optimizing a direct call edge.
+ if (!F->isDeclaration() && CalleeSet.insert(F))
+ Callees.push_back(F);
+ continue;
+ }
+
+ for (User::value_op_iterator OI = C->value_op_begin(),
+ OE = C->value_op_end();
+ OI != OE; ++OI)
+ if (Visited.insert(cast<Constant>(*OI)))
+ Worklist.push_back(cast<Constant>(*OI));
+ }
+}
+
+LazyCallGraph::Node::Node(LazyCallGraph &G, Function &F) : G(G), F(F) {
+ SmallVector<Constant *, 16> Worklist;
+ SmallPtrSet<Constant *, 16> Visited;
+ // Find all the potential callees in this function. First walk the
+ // instructions and add every operand which is a constant to the worklist.
+ for (Function::iterator BBI = F.begin(), BBE = F.end(); BBI != BBE; ++BBI)
+ for (BasicBlock::iterator II = BBI->begin(), IE = BBI->end(); II != IE;
+ ++II)
+ for (User::value_op_iterator OI = II->value_op_begin(),
+ OE = II->value_op_end();
+ OI != OE; ++OI)
+ if (Constant *C = dyn_cast<Constant>(*OI))
+ if (Visited.insert(C))
+ Worklist.push_back(C);
+
+ // We've collected all the constant (and thus potentially function or
+ // function containing) operands to all of the instructions in the function.
+ // Process them (recursively) collecting every function found.
+ findCallees(Worklist, Visited, Callees, CalleeSet);
+}
+
+LazyCallGraph::Node::Node(LazyCallGraph &G, const Node &OtherN)
+ : G(G), F(OtherN.F), CalleeSet(OtherN.CalleeSet) {
+ // Loop over the other node's callees, adding the Function*s to our list
+ // directly, and recursing to add the Node*s.
+ Callees.reserve(OtherN.Callees.size());
+ for (NodeVectorImplT::iterator OI = OtherN.Callees.begin(),
+ OE = OtherN.Callees.end();
+ OI != OE; ++OI)
+ if (Function *Callee = OI->dyn_cast<Function *>())
+ Callees.push_back(Callee);
+ else
+ Callees.push_back(G.copyInto(*OI->get<Node *>()));
+}
+
+#if LLVM_HAS_RVALUE_REFERENCES
+LazyCallGraph::Node::Node(LazyCallGraph &G, Node &&OtherN)
+ : G(G), F(OtherN.F), Callees(std::move(OtherN.Callees)),
+ CalleeSet(std::move(OtherN.CalleeSet)) {
+ // Loop over our Callees. They've been moved from another node, but we need
+ // to move the Node*s to live under our bump ptr allocator.
+ for (NodeVectorImplT::iterator CI = Callees.begin(), CE = Callees.end();
+ CI != CE; ++CI)
+ if (Node *ChildN = CI->dyn_cast<Node *>())
+ *CI = G.moveInto(std::move(*ChildN));
+}
+#endif
+
+LazyCallGraph::LazyCallGraph(Module &M) : M(M) {
+ for (Module::iterator FI = M.begin(), FE = M.end(); FI != FE; ++FI)
+ if (!FI->isDeclaration() && !FI->hasLocalLinkage())
+ if (EntryNodeSet.insert(&*FI))
+ EntryNodes.push_back(&*FI);
+
+ // Now add entry nodes for functions reachable via initializers to globals.
+ SmallVector<Constant *, 16> Worklist;
+ SmallPtrSet<Constant *, 16> Visited;
+ for (Module::global_iterator GI = M.global_begin(), GE = M.global_end(); GI != GE; ++GI)
+ if (GI->hasInitializer())
+ if (Visited.insert(GI->getInitializer()))
+ Worklist.push_back(GI->getInitializer());
+
+ findCallees(Worklist, Visited, EntryNodes, EntryNodeSet);
+}
+
+LazyCallGraph::LazyCallGraph(const LazyCallGraph &G)
+ : M(G.M), EntryNodeSet(G.EntryNodeSet) {
+ EntryNodes.reserve(EntryNodes.size());
+ for (NodeVectorImplT::iterator EI = EntryNodes.begin(),
+ EE = EntryNodes.end();
+ EI != EE; ++EI)
+ if (Function *Callee = EI->dyn_cast<Function *>())
+ EntryNodes.push_back(Callee);
+ else
+ EntryNodes.push_back(copyInto(*EI->get<Node *>()));
+}
+
+#if LLVM_HAS_RVALUE_REFERENCES
+// FIXME: This would be crazy simpler if BumpPtrAllocator were movable without
+// invalidating any of the allocated memory. We should make that be the case at
+// some point and delete this.
+LazyCallGraph::LazyCallGraph(LazyCallGraph &&G)
+ : M(G.M), EntryNodes(std::move(G.EntryNodes)),
+ EntryNodeSet(std::move(G.EntryNodeSet)) {
+ // Loop over our EntryNodes. They've been moved from another graph, but we
+ // need to move the Node*s to live under our bump ptr allocator.
+ for (NodeVectorImplT::iterator EI = EntryNodes.begin(), EE = EntryNodes.end();
+ EI != EE; ++EI)
+ if (Node *EntryN = EI->dyn_cast<Node *>())
+ *EI = G.moveInto(std::move(*EntryN));
+}
+#endif
+
+LazyCallGraph::Node *LazyCallGraph::insertInto(Function &F, Node *&MappedN) {
+ return new (MappedN = BPA.Allocate()) Node(*this, F);
+}
+
+LazyCallGraph::Node *LazyCallGraph::copyInto(const Node &OtherN) {
+ Node *&N = NodeMap[&OtherN.F];
+ if (N)
+ return N;
+
+ return new (N = BPA.Allocate()) Node(*this, OtherN);
+}
+
+#if LLVM_HAS_RVALUE_REFERENCES
+LazyCallGraph::Node *LazyCallGraph::moveInto(Node &&OtherN) {
+ Node *&N = NodeMap[&OtherN.F];
+ if (N)
+ return N;
+
+ return new (N = BPA.Allocate()) Node(*this, std::move(OtherN));
+}
+#endif
+
+char LazyCallGraphAnalysis::PassID;
+
+LazyCallGraphPrinterPass::LazyCallGraphPrinterPass(raw_ostream &OS) : OS(OS) {}
+
+static void printNodes(raw_ostream &OS, LazyCallGraph::Node &N,
+ SmallPtrSetImpl<LazyCallGraph::Node *> &Printed) {
+ // Recurse depth first through the nodes.
+ for (LazyCallGraph::iterator I = N.begin(), E = N.end(); I != E; ++I)
+ if (Printed.insert(*I))
+ printNodes(OS, **I, Printed);
+
+ OS << " Call edges in function: " << N.getFunction().getName() << "\n";
+ for (LazyCallGraph::iterator I = N.begin(), E = N.end(); I != E; ++I)
+ OS << " -> " << I->getFunction().getName() << "\n";
+
+ OS << "\n";
+}
+
+PreservedAnalyses LazyCallGraphPrinterPass::run(Module *M, ModuleAnalysisManager *AM) {
+ LazyCallGraph &G = AM->getResult<LazyCallGraphAnalysis>(M);
+
+ OS << "Printing the call graph for module: " << M->getModuleIdentifier() << "\n\n";
+
+ SmallPtrSet<LazyCallGraph::Node *, 16> Printed;
+ for (LazyCallGraph::iterator I = G.begin(), E = G.end(); I != E; ++I)
+ if (Printed.insert(*I))
+ printNodes(OS, **I, Printed);
+
+ return PreservedAnalyses::all();
+}
diff --git a/test/Analysis/LazyCallGraph/basic.ll b/test/Analysis/LazyCallGraph/basic.ll
new file mode 100644
index 0000000000..ebadb75154
--- /dev/null
+++ b/test/Analysis/LazyCallGraph/basic.ll
@@ -0,0 +1,126 @@
+; RUN: opt -disable-output -passes=print-cg %s 2>&1 | FileCheck %s
+;
+; Basic validation of the call graph analysis used in the new pass manager.
+
+define void @f() {
+; CHECK-LABEL: Call edges in function: f
+; CHECK-NOT: ->
+
+entry:
+ ret void
+}
+
+; A bunch more functions just to make it easier to test several call edges at once.
+define void @f1() {
+ ret void
+}
+define void @f2() {
+ ret void
+}
+define void @f3() {
+ ret void
+}
+define void @f4() {
+ ret void
+}
+define void @f5() {
+ ret void
+}
+define void @f6() {
+ ret void
+}
+define void @f7() {
+ ret void
+}
+define void @f8() {
+ ret void
+}
+define void @f9() {
+ ret void
+}
+define void @f10() {
+ ret void
+}
+define void @f11() {
+ ret void
+}
+define void @f12() {
+ ret void
+}
+
+declare i32 @__gxx_personality_v0(...)
+
+define void @test0() {
+; CHECK-LABEL: Call edges in function: test0
+; CHECK-NEXT: -> f
+; CHECK-NOT: ->
+
+entry:
+ call void @f()
+ call void @f()
+ call void @f()
+ call void @f()
+ ret void
+}
+
+define void ()* @test1(void ()** %x) {
+; CHECK-LABEL: Call edges in function: test1
+; CHECK-NEXT: -> f12
+; CHECK-NEXT: -> f11
+; CHECK-NEXT: -> f10
+; CHECK-NEXT: -> f7
+; CHECK-NEXT: -> f9
+; CHECK-NEXT: -> f8
+; CHECK-NEXT: -> f6
+; CHECK-NEXT: -> f5
+; CHECK-NEXT: -> f4
+; CHECK-NEXT: -> f3
+; CHECK-NEXT: -> f2
+; CHECK-NEXT: -> f1
+; CHECK-NOT: ->
+
+entry:
+ br label %next
+
+dead:
+ br label %next
+
+next:
+ phi void ()* [ @f1, %entry ], [ @f2, %dead ]
+ select i1 true, void ()* @f3, void ()* @f4
+ store void ()* @f5, void ()** %x
+ call void @f6()
+ call void (void ()*, void ()*)* bitcast (void ()* @f7 to void (void ()*, void ()*)*)(void ()* @f8, void ()* @f9)
+ invoke void @f10() to label %exit unwind label %unwind
+
+exit:
+ ret void ()* @f11
+
+unwind:
+ %res = landingpad { i8*, i32 } personality i32 (...)* @__gxx_personality_v0
+ cleanup
+ resume { i8*, i32 } { i8* bitcast (void ()* @f12 to i8*), i32 42 }
+}
+
+@g = global void ()* @f1
+@g1 = global [4 x void ()*] [void ()* @f2, void ()* @f3, void ()* @f4, void ()* @f5]
+@g2 = global {i8, void ()*, i8} {i8 1, void ()* @f6, i8 2}
+@h = constant void ()* @f7
+
+define void @test2() {
+; CHECK-LABEL: Call edges in function: test2
+; CHECK-NEXT: -> f7
+; CHECK-NEXT: -> f6
+; CHECK-NEXT: -> f5
+; CHECK-NEXT: -> f4
+; CHECK-NEXT: -> f3
+; CHECK-NEXT: -> f2
+; CHECK-NEXT: -> f1
+; CHECK-NOT: ->
+
+ load i8** bitcast (void ()** @g to i8**)
+ load i8** bitcast (void ()** getelementptr ([4 x void ()*]* @g1, i32 0, i32 2) to i8**)
+ load i8** bitcast (void ()** getelementptr ({i8, void ()*, i8}* @g2, i32 0, i32 1) to i8**)
+ load i8** bitcast (void ()** @h to i8**)
+ ret void
+}
diff --git a/tools/opt/NewPMDriver.cpp b/tools/opt/NewPMDriver.cpp
index c25bd73cbc..fc4a1bf76c 100644
--- a/tools/opt/NewPMDriver.cpp
+++ b/tools/opt/NewPMDriver.cpp
@@ -16,6 +16,7 @@
#include "NewPMDriver.h"
#include "Passes.h"
#include "llvm/ADT/StringRef.h"
+#include "llvm/Analysis/LazyCallGraph.h"
#include "llvm/Bitcode/BitcodeWriterPass.h"
#include "llvm/IR/IRPrintingPasses.h"
#include "llvm/IR/LLVMContext.h"
@@ -35,6 +36,10 @@ bool llvm::runPassPipeline(StringRef Arg0, LLVMContext &Context, Module &M,
FunctionAnalysisManager FAM;
ModuleAnalysisManager MAM;
+ // FIXME: Lift this registration of analysis passes into a .def file adjacent
+ // to the one used to associate names with passes.
+ MAM.registerPass(LazyCallGraphAnalysis());
+
// Cross register the analysis managers through their proxies.
MAM.registerPass(FunctionAnalysisManagerModuleProxy(FAM));
FAM.registerPass(ModuleAnalysisManagerFunctionProxy(MAM));
diff --git a/tools/opt/Passes.cpp b/tools/opt/Passes.cpp
index 36fe6ad126..ca143042f6 100644
--- a/tools/opt/Passes.cpp
+++ b/tools/opt/Passes.cpp
@@ -15,6 +15,7 @@
//===----------------------------------------------------------------------===//
#include "Passes.h"
+#include "llvm/Analysis/LazyCallGraph.h"
#include "llvm/IR/IRPrintingPasses.h"
#include "llvm/IR/PassManager.h"
#include "llvm/IR/Verifier.h"
@@ -43,6 +44,7 @@ struct NoOpFunctionPass {
static bool isModulePassName(StringRef Name) {
if (Name == "no-op-module") return true;
if (Name == "print") return true;
+ if (Name == "print-cg") return true;
return false;
}
@@ -63,6 +65,10 @@ static bool parseModulePassName(ModulePassManager &MPM, StringRef Name) {
MPM.addPass(PrintModulePass(dbgs()));
return true;
}
+ if (Name == "print-cg") {
+ MPM.addPass(LazyCallGraphPrinterPass(dbgs()));
+ return true;
+ }
return false;
}