summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--include/llvm/Analysis/ScalarEvolutionExpander.h4
-rw-r--r--include/llvm/CodeGen/CommandFlags.h228
-rw-r--r--include/llvm/InitializePasses.h1
-rw-r--r--include/llvm/Target/TargetMachine.h6
-rw-r--r--include/llvm/Target/TargetTransformImpl.h54
-rw-r--r--include/llvm/TargetTransformInfo.h126
-rw-r--r--include/llvm/Transforms/Scalar.h7
-rw-r--r--lib/Analysis/ScalarEvolutionExpander.cpp12
-rw-r--r--lib/CodeGen/Passes.cpp4
-rw-r--r--lib/Target/ARM/ARMTargetMachine.cpp6
-rw-r--r--lib/Target/ARM/ARMTargetMachine.h18
-rw-r--r--lib/Target/CMakeLists.txt1
-rw-r--r--lib/Target/CellSPU/SPUTargetMachine.cpp3
-rw-r--r--lib/Target/CellSPU/SPUTargetMachine.h9
-rw-r--r--lib/Target/Hexagon/HexagonTargetMachine.cpp5
-rw-r--r--lib/Target/Hexagon/HexagonTargetMachine.h11
-rw-r--r--lib/Target/MBlaze/MBlazeTargetMachine.cpp2
-rw-r--r--lib/Target/MBlaze/MBlazeTargetMachine.h7
-rw-r--r--lib/Target/MSP430/MSP430TargetMachine.cpp2
-rw-r--r--lib/Target/MSP430/MSP430TargetMachine.h10
-rw-r--r--lib/Target/Mips/MipsTargetMachine.cpp2
-rw-r--r--lib/Target/Mips/MipsTargetMachine.h3
-rw-r--r--lib/Target/NVPTX/NVPTXTargetMachine.cpp3
-rw-r--r--lib/Target/NVPTX/NVPTXTargetMachine.h10
-rw-r--r--lib/Target/PowerPC/PPCTargetMachine.cpp3
-rw-r--r--lib/Target/PowerPC/PPCTargetMachine.h9
-rw-r--r--lib/Target/Sparc/SparcTargetMachine.cpp2
-rw-r--r--lib/Target/Sparc/SparcTargetMachine.h9
-rw-r--r--lib/Target/Target.cpp1
-rw-r--r--lib/Target/TargetTransformImpl.cpp43
-rw-r--r--lib/Target/X86/X86TargetMachine.cpp6
-rw-r--r--lib/Target/X86/X86TargetMachine.h17
-rw-r--r--lib/Target/XCore/XCoreTargetMachine.cpp2
-rw-r--r--lib/Target/XCore/XCoreTargetMachine.h9
-rw-r--r--lib/Transforms/Scalar/LoopStrengthReduce.cpp134
-rw-r--r--lib/Transforms/Utils/LowerInvoke.cpp32
-rw-r--r--lib/VMCore/CMakeLists.txt1
-rw-r--r--lib/VMCore/TargetTransformInfo.cpp27
-rw-r--r--tools/llc/llc.cpp215
-rw-r--r--tools/opt/CMakeLists.txt2
-rw-r--r--tools/opt/LLVMBuild.txt2
-rw-r--r--tools/opt/Makefile2
-rw-r--r--tools/opt/opt.cpp78
43 files changed, 811 insertions, 317 deletions
diff --git a/include/llvm/Analysis/ScalarEvolutionExpander.h b/include/llvm/Analysis/ScalarEvolutionExpander.h
index 3f8f149cb4..3ab9c8256b 100644
--- a/include/llvm/Analysis/ScalarEvolutionExpander.h
+++ b/include/llvm/Analysis/ScalarEvolutionExpander.h
@@ -22,7 +22,7 @@
#include <set>
namespace llvm {
- class TargetLowering;
+ class ScalarTargetTransformInfo;
/// Return true if the given expression is safe to expand in the sense that
/// all materialized values are safe to speculate.
@@ -129,7 +129,7 @@ namespace llvm {
/// representative. Return the number of phis eliminated.
unsigned replaceCongruentIVs(Loop *L, const DominatorTree *DT,
SmallVectorImpl<WeakVH> &DeadInsts,
- const TargetLowering *TLI = NULL);
+ const ScalarTargetTransformInfo *STTI = NULL);
/// expandCodeFor - Insert code to directly compute the specified SCEV
/// expression into the program. The inserted code is inserted into the
diff --git a/include/llvm/CodeGen/CommandFlags.h b/include/llvm/CodeGen/CommandFlags.h
new file mode 100644
index 0000000000..90ee234244
--- /dev/null
+++ b/include/llvm/CodeGen/CommandFlags.h
@@ -0,0 +1,228 @@
+//===-- CommandFlags.h - Register Coalescing Interface ----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains codegen-specific flags that are shared between different
+// command line tools. The tools "llc" and "opt" both use this file to prevent
+// flag duplication.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_COMMAND_LINE_FLAGS_H
+#define LLVM_CODEGEN_COMMAND_LINE_FLAGS_H
+
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/CodeGen.h"
+#include "llvm/Target/TargetMachine.h"
+
+#include <string>
+using namespace llvm;
+
+cl::opt<std::string>
+MArch("march", cl::desc("Architecture to generate code for (see --version)"));
+
+cl::opt<std::string>
+MCPU("mcpu",
+ cl::desc("Target a specific cpu type (-mcpu=help for details)"),
+ cl::value_desc("cpu-name"),
+ cl::init(""));
+
+cl::list<std::string>
+MAttrs("mattr",
+ cl::CommaSeparated,
+ cl::desc("Target specific attributes (-mattr=help for details)"),
+ cl::value_desc("a1,+a2,-a3,..."));
+
+cl::opt<Reloc::Model>
+RelocModel("relocation-model",
+ cl::desc("Choose relocation model"),
+ cl::init(Reloc::Default),
+ cl::values(
+ clEnumValN(Reloc::Default, "default",
+ "Target default relocation model"),
+ clEnumValN(Reloc::Static, "static",
+ "Non-relocatable code"),
+ clEnumValN(Reloc::PIC_, "pic",
+ "Fully relocatable, position independent code"),
+ clEnumValN(Reloc::DynamicNoPIC, "dynamic-no-pic",
+ "Relocatable external references, non-relocatable code"),
+ clEnumValEnd));
+
+cl::opt<llvm::CodeModel::Model>
+CMModel("code-model",
+ cl::desc("Choose code model"),
+ cl::init(CodeModel::Default),
+ cl::values(clEnumValN(CodeModel::Default, "default",
+ "Target default code model"),
+ clEnumValN(CodeModel::Small, "small",
+ "Small code model"),
+ clEnumValN(CodeModel::Kernel, "kernel",
+ "Kernel code model"),
+ clEnumValN(CodeModel::Medium, "medium",
+ "Medium code model"),
+ clEnumValN(CodeModel::Large, "large",
+ "Large code model"),
+ clEnumValEnd));
+
+cl::opt<bool>
+RelaxAll("mc-relax-all",
+ cl::desc("When used with filetype=obj, "
+ "relax all fixups in the emitted object file"));
+
+cl::opt<TargetMachine::CodeGenFileType>
+FileType("filetype", cl::init(TargetMachine::CGFT_AssemblyFile),
+ cl::desc("Choose a file type (not all types are supported by all targets):"),
+ cl::values(
+ clEnumValN(TargetMachine::CGFT_AssemblyFile, "asm",
+ "Emit an assembly ('.s') file"),
+ clEnumValN(TargetMachine::CGFT_ObjectFile, "obj",
+ "Emit a native object ('.o') file"),
+ clEnumValN(TargetMachine::CGFT_Null, "null",
+ "Emit nothing, for performance testing"),
+ clEnumValEnd));
+
+cl::opt<bool> DisableDotLoc("disable-dot-loc", cl::Hidden,
+ cl::desc("Do not use .loc entries"));
+
+cl::opt<bool> DisableCFI("disable-cfi", cl::Hidden,
+ cl::desc("Do not use .cfi_* directives"));
+
+cl::opt<bool> EnableDwarfDirectory("enable-dwarf-directory", cl::Hidden,
+ cl::desc("Use .file directives with an explicit directory."));
+
+cl::opt<bool>
+DisableRedZone("disable-red-zone",
+ cl::desc("Do not emit code that uses the red zone."),
+ cl::init(false));
+
+cl::opt<bool>
+EnableFPMAD("enable-fp-mad",
+ cl::desc("Enable less precise MAD instructions to be generated"),
+ cl::init(false));
+
+cl::opt<bool>
+DisableFPElim("disable-fp-elim",
+ cl::desc("Disable frame pointer elimination optimization"),
+ cl::init(false));
+
+cl::opt<bool>
+DisableFPElimNonLeaf("disable-non-leaf-fp-elim",
+ cl::desc("Disable frame pointer elimination optimization for non-leaf funcs"),
+ cl::init(false));
+
+cl::opt<bool>
+EnableUnsafeFPMath("enable-unsafe-fp-math",
+ cl::desc("Enable optimizations that may decrease FP precision"),
+ cl::init(false));
+
+cl::opt<bool>
+EnableNoInfsFPMath("enable-no-infs-fp-math",
+ cl::desc("Enable FP math optimizations that assume no +-Infs"),
+ cl::init(false));
+
+cl::opt<bool>
+EnableNoNaNsFPMath("enable-no-nans-fp-math",
+ cl::desc("Enable FP math optimizations that assume no NaNs"),
+ cl::init(false));
+
+cl::opt<bool>
+EnableHonorSignDependentRoundingFPMath("enable-sign-dependent-rounding-fp-math",
+ cl::Hidden,
+ cl::desc("Force codegen to assume rounding mode can change dynamically"),
+ cl::init(false));
+
+cl::opt<bool>
+GenerateSoftFloatCalls("soft-float",
+ cl::desc("Generate software floating point library calls"),
+ cl::init(false));
+
+cl::opt<llvm::FloatABI::ABIType>
+FloatABIForCalls("float-abi",
+ cl::desc("Choose float ABI type"),
+ cl::init(FloatABI::Default),
+ cl::values(
+ clEnumValN(FloatABI::Default, "default",
+ "Target default float ABI type"),
+ clEnumValN(FloatABI::Soft, "soft",
+ "Soft float ABI (implied by -soft-float)"),
+ clEnumValN(FloatABI::Hard, "hard",
+ "Hard float ABI (uses FP registers)"),
+ clEnumValEnd));
+
+cl::opt<llvm::FPOpFusion::FPOpFusionMode>
+FuseFPOps("fp-contract",
+ cl::desc("Enable aggresive formation of fused FP ops"),
+ cl::init(FPOpFusion::Standard),
+ cl::values(
+ clEnumValN(FPOpFusion::Fast, "fast",
+ "Fuse FP ops whenever profitable"),
+ clEnumValN(FPOpFusion::Standard, "on",
+ "Only fuse 'blessed' FP ops."),
+ clEnumValN(FPOpFusion::Strict, "off",
+ "Only fuse FP ops when the result won't be effected."),
+ clEnumValEnd));
+
+cl::opt<bool>
+DontPlaceZerosInBSS("nozero-initialized-in-bss",
+ cl::desc("Don't place zero-initialized symbols into bss section"),
+ cl::init(false));
+
+cl::opt<bool>
+EnableGuaranteedTailCallOpt("tailcallopt",
+ cl::desc("Turn fastcc calls into tail calls by (potentially) changing ABI."),
+ cl::init(false));
+
+cl::opt<bool>
+DisableTailCalls("disable-tail-calls",
+ cl::desc("Never emit tail calls"),
+ cl::init(false));
+
+cl::opt<unsigned>
+OverrideStackAlignment("stack-alignment",
+ cl::desc("Override default stack alignment"),
+ cl::init(0));
+
+cl::opt<bool>
+EnableRealignStack("realign-stack",
+ cl::desc("Realign stack if needed"),
+ cl::init(true));
+
+cl::opt<std::string>
+TrapFuncName("trap-func", cl::Hidden,
+ cl::desc("Emit a call to trap function rather than a trap instruction"),
+ cl::init(""));
+
+cl::opt<bool>
+EnablePIE("enable-pie",
+ cl::desc("Assume the creation of a position independent executable."),
+ cl::init(false));
+
+cl::opt<bool>
+SegmentedStacks("segmented-stacks",
+ cl::desc("Use segmented stacks if possible."),
+ cl::init(false));
+
+cl::opt<bool>
+UseInitArray("use-init-array",
+ cl::desc("Use .init_array instead of .ctors."),
+ cl::init(false));
+
+cl::opt<std::string> StopAfter("stop-after",
+ cl::desc("Stop compilation after a specific pass"),
+ cl::value_desc("pass-name"),
+ cl::init(""));
+cl::opt<std::string> StartAfter("start-after",
+ cl::desc("Resume compilation after a specific pass"),
+ cl::value_desc("pass-name"),
+ cl::init(""));
+
+cl::opt<unsigned>
+SSPBufferSize("stack-protector-buffer-size", cl::init(8),
+ cl::desc("Lower bound for a buffer to be considered for "
+ "stack protection"));
+#endif
diff --git a/include/llvm/InitializePasses.h b/include/llvm/InitializePasses.h
index 409246cf14..067d8dae1d 100644
--- a/include/llvm/InitializePasses.h
+++ b/include/llvm/InitializePasses.h
@@ -247,6 +247,7 @@ void initializeTailCallElimPass(PassRegistry&);
void initializeTailDuplicatePassPass(PassRegistry&);
void initializeTargetPassConfigPass(PassRegistry&);
void initializeDataLayoutPass(PassRegistry&);
+void initializeTargetTransformInfoPass(PassRegistry&);
void initializeTargetLibraryInfoPass(PassRegistry&);
void initializeTwoAddressInstructionPassPass(PassRegistry&);
void initializeTypeBasedAliasAnalysisPass(PassRegistry&);
diff --git a/include/llvm/Target/TargetMachine.h b/include/llvm/Target/TargetMachine.h
index 988916f9d9..18e589e2bc 100644
--- a/include/llvm/Target/TargetMachine.h
+++ b/include/llvm/Target/TargetMachine.h
@@ -17,6 +17,8 @@
#include "llvm/Pass.h"
#include "llvm/Support/CodeGen.h"
#include "llvm/Target/TargetOptions.h"
+#include "llvm/TargetTransformInfo.h"
+#include "llvm/Target/TargetTransformImpl.h"
#include "llvm/ADT/StringRef.h"
#include <cassert>
#include <string>
@@ -107,6 +109,10 @@ public:
virtual const TargetLowering *getTargetLowering() const { return 0; }
virtual const TargetSelectionDAGInfo *getSelectionDAGInfo() const{ return 0; }
virtual const DataLayout *getDataLayout() const { return 0; }
+ virtual const ScalarTargetTransformInfo*
+ getScalarTargetTransformInfo() const { return 0; }
+ virtual const VectorTargetTransformInfo*
+ getVectorTargetTransformInfo() const { return 0; }
/// getMCAsmInfo - Return target specific asm information.
///
diff --git a/include/llvm/Target/TargetTransformImpl.h b/include/llvm/Target/TargetTransformImpl.h
new file mode 100644
index 0000000000..e240cdf64a
--- /dev/null
+++ b/include/llvm/Target/TargetTransformImpl.h
@@ -0,0 +1,54 @@
+//=- llvm/Target/TargetTransformImpl.h - Target Loop Trans Info----*- C++ -*-=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the target-specific implementations of the
+// TargetTransform interfaces.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TARGET_TARGET_TRANSFORMATION_IMPL_H
+#define LLVM_TARGET_TARGET_TRANSFORMATION_IMPL_H
+
+#include "llvm/TargetTransformInfo.h"
+
+namespace llvm {
+
+class TargetLowering;
+
+/// ScalarTargetTransformInfo - This is a default implementation for the
+/// ScalarTargetTransformInfo interface. Different targets can implement
+/// this interface differently.
+class ScalarTargetTransformImpl : public ScalarTargetTransformInfo {
+private:
+ const TargetLowering *TLI;
+
+public:
+ /// Ctor
+ explicit ScalarTargetTransformImpl(const TargetLowering *TL): TLI(TL) {}
+
+ virtual bool isLegalAddImmediate(int64_t imm) const;
+
+ virtual bool isLegalICmpImmediate(int64_t imm) const;
+
+ virtual bool isLegalAddressingMode(const AddrMode &AM, Type *Ty) const;
+
+ virtual bool isTruncateFree(Type *Ty1, Type *Ty2) const;
+
+ virtual bool isTypeLegal(Type *Ty) const;
+
+ virtual unsigned getJumpBufAlignment() const;
+
+ virtual unsigned getJumpBufSize() const;
+};
+
+class VectorTargetTransformImpl : public VectorTargetTransformInfo { };
+
+} // end llvm namespace
+
+#endif
diff --git a/include/llvm/TargetTransformInfo.h b/include/llvm/TargetTransformInfo.h
new file mode 100644
index 0000000000..e1be3ba5a1
--- /dev/null
+++ b/include/llvm/TargetTransformInfo.h
@@ -0,0 +1,126 @@
+//===- llvm/Transforms/TargetTransformInfo.h --------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass exposes codegen information to IR-level passes. Every
+// transformation that uses codegen information is broken into three parts:
+// 1. The IR-level analysis pass.
+// 2. The IR-level transformation interface which provides the needed
+// information.
+// 3. Codegen-level implementation which uses target-specific hooks.
+//
+// This file defines #2, which is the interface that IR-level transformations
+// use for querying the codegen.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_TARGET_TRANSFORM_INTERFACE
+#define LLVM_TRANSFORMS_TARGET_TRANSFORM_INTERFACE
+
+#include "llvm/Pass.h"
+#include "llvm/AddressingMode.h"
+#include "llvm/Support/DataTypes.h"
+#include "llvm/Type.h"
+
+namespace llvm {
+
+class ScalarTargetTransformInfo;
+class VectorTargetTransformInfo;
+
+/// TargetTransformInfo - This pass provides access to the codegen
+/// interfaces that are needed for IR-level transformations.
+class TargetTransformInfo : public ImmutablePass {
+private:
+ const ScalarTargetTransformInfo *STTI;
+ const VectorTargetTransformInfo *VTTI;
+public:
+ /// Default ctor.
+ ///
+ /// @note This has to exist, because this is a pass, but it should never be
+ /// used.
+ TargetTransformInfo();
+
+ explicit TargetTransformInfo(const ScalarTargetTransformInfo* S,
+ const VectorTargetTransformInfo *V)
+ : ImmutablePass(ID), STTI(S), VTTI(V) {
+ initializeTargetTransformInfoPass(*PassRegistry::getPassRegistry());
+ }
+
+ TargetTransformInfo(const TargetTransformInfo &T) :
+ ImmutablePass(ID), STTI(T.STTI), VTTI(T.VTTI) { }
+
+ const ScalarTargetTransformInfo* getScalarTargetTransformInfo() {
+ return STTI;
+ }
+ const VectorTargetTransformInfo* getVectorTargetTransformInfo() {
+ return VTTI;
+ }
+
+ /// Pass identification, replacement for typeid.
+ static char ID;
+};
+
+// ---------------------------------------------------------------------------//
+// The classes below are inherited and implemented by target-specific classes
+// in the codegen.
+// ---------------------------------------------------------------------------//
+
+/// ScalarTargetTransformInfo - This interface is used by IR-level passes
+/// that need target-dependent information for generic scalar transformations.
+/// LSR, and LowerInvoke use this interface.
+class ScalarTargetTransformInfo {
+public:
+ /// isLegalAddImmediate - Return true if the specified immediate is legal
+ /// add immediate, that is the target has add instructions which can add
+ /// a register with the immediate without having to materialize the
+ /// immediate into a register.
+ virtual bool isLegalAddImmediate(int64_t) const {
+ return false;
+ }
+ /// isLegalICmpImmediate - Return true if the specified immediate is legal
+ /// icmp immediate, that is the target has icmp instructions which can compare
+ /// a register against the immediate without having to materialize the
+ /// immediate into a register.
+ virtual bool isLegalICmpImmediate(int64_t) const {
+ return false;
+ }
+ /// isLegalAddressingMode - Return true if the addressing mode represented by
+ /// AM is legal for this target, for a load/store of the specified type.
+ /// The type may be VoidTy, in which case only return true if the addressing
+ /// mode is legal for a load/store of any legal type.
+ /// TODO: Handle pre/postinc as well.
+ virtual bool isLegalAddressingMode(const AddrMode &AM, Type *Ty) const {
+ return false;
+ }
+ /// isTruncateFree - Return true if it's free to truncate a value of
+ /// type Ty1 to type Ty2. e.g. On x86 it's free to truncate a i32 value in
+ /// register EAX to i16 by referencing its sub-register AX.
+ virtual bool isTruncateFree(Type * /*Ty1*/, Type * /*Ty2*/) const {
+ return false;
+ }
+ /// Is this type legal.
+ virtual bool isTypeLegal(Type *Ty) const {
+ return false;
+ }
+ /// getJumpBufAlignment - returns the target's jmp_buf alignment in bytes
+ virtual unsigned getJumpBufAlignment() const {
+ return 0;
+ }
+ /// getJumpBufSize - returns the target's jmp_buf size in bytes.
+ virtual unsigned getJumpBufSize() const {
+ return 0;
+ }
+};
+
+class VectorTargetTransformInfo {
+ // TODO: define an interface for VectorTargetTransformInfo.
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/include/llvm/Transforms/Scalar.h b/include/llvm/Transforms/Scalar.h
index a5d8eed746..3b665bf4b6 100644
--- a/include/llvm/Transforms/Scalar.h
+++ b/include/llvm/Transforms/Scalar.h
@@ -119,7 +119,7 @@ Pass *createLICMPass();
// optional parameter used to consult the target machine whether certain
// transformations are profitable.
//
-Pass *createLoopStrengthReducePass(const TargetLowering *TLI = 0);
+Pass *createLoopStrengthReducePass();
Pass *createGlobalMergePass(const TargetLowering *TLI = 0);
@@ -249,9 +249,8 @@ extern char &LowerSwitchID;
// purpose "my LLVM-to-LLVM pass doesn't support the invoke instruction yet"
// lowering pass.
//
-FunctionPass *createLowerInvokePass(const TargetLowering *TLI = 0);
-FunctionPass *createLowerInvokePass(const TargetLowering *TLI,
- bool useExpensiveEHSupport);
+FunctionPass *createLowerInvokePass();
+FunctionPass *createLowerInvokePass(bool useExpensiveEHSupport);
extern char &LowerInvokePassID;
//===----------------------------------------------------------------------===//
diff --git a/lib/Analysis/ScalarEvolutionExpander.cpp b/lib/Analysis/ScalarEvolutionExpander.cpp
index 5e05f4c8ca..5c2a49e767 100644
--- a/lib/Analysis/ScalarEvolutionExpander.cpp
+++ b/lib/Analysis/ScalarEvolutionExpander.cpp
@@ -19,8 +19,8 @@
#include "llvm/LLVMContext.h"
#include "llvm/Support/Debug.h"
#include "llvm/DataLayout.h"
-#include "llvm/Target/TargetLowering.h"
#include "llvm/ADT/STLExtras.h"
+#include "llvm/TargetTransformInfo.h"
using namespace llvm;
@@ -1599,15 +1599,15 @@ static bool width_descending(Value *lhs, Value *rhs) {
/// This does not depend on any SCEVExpander state but should be used in
/// the same context that SCEVExpander is used.
unsigned SCEVExpander::replaceCongruentIVs(Loop *L, const DominatorTree *DT,
- SmallVectorImpl<WeakVH> &DeadInsts,
- const TargetLowering *TLI) {
+ SmallVectorImpl<WeakVH> &DeadInsts,
+ const ScalarTargetTransformInfo *STTI) {
// Find integer phis in order of increasing width.
SmallVector<PHINode*, 8> Phis;
for (BasicBlock::iterator I = L->getHeader()->begin();
PHINode *Phi = dyn_cast<PHINode>(I); ++I) {
Phis.push_back(Phi);
}
- if (TLI)
+ if (STTI)
std::sort(Phis.begin(), Phis.end(), width_descending);
unsigned NumElim = 0;
@@ -1624,8 +1624,8 @@ unsigned SCEVExpander::replaceCongruentIVs(Loop *L, const DominatorTree *DT,
PHINode *&OrigPhiRef = ExprToIVMap[SE.getSCEV(Phi)];
if (!OrigPhiRef) {
OrigPhiRef = Phi;
- if (Phi->getType()->isIntegerTy() && TLI
- && TLI->isTruncateFree(Phi->getType(), Phis.back()->getType())) {
+ if (Phi->getType()->isIntegerTy() && STTI &&
+ STTI->isTruncateFree(Phi->getType(), Phis.back()->getType())) {
// This phi can be freely truncated to the narrowest phi type. Map the
// truncated expression to it so it will be reused for narrow types.
const SCEV *TruncExpr =
diff --git a/lib/CodeGen/Passes.cpp b/lib/CodeGen/Passes.cpp
index 4ea21d4ff7..abd62efc02 100644
--- a/lib/CodeGen/Passes.cpp
+++ b/lib/CodeGen/Passes.cpp
@@ -359,7 +359,7 @@ void TargetPassConfig::addIRPasses() {
// Run loop strength reduction before anything else.
if (getOptLevel() != CodeGenOpt::None && !DisableLSR) {
- addPass(createLoopStrengthReducePass(getTargetLowering()));
+ addPass(createLoopStrengthReducePass());
if (PrintLSR)
addPass(createPrintFunctionPass("\n\n*** Code after LSR ***\n", &dbgs()));
}
@@ -389,7 +389,7 @@ void TargetPassConfig::addPassesToHandleExceptions() {
addPass(createDwarfEHPass(TM));
break;
case ExceptionHandling::None:
- addPass(createLowerInvokePass(TM->getTargetLowering()));
+ addPass(createLowerInvokePass());
// The lower invoke pass may create unreachable code. Remove it.
addPass(createUnreachableBlockEliminationPass());
diff --git a/lib/Target/ARM/ARMTargetMachine.cpp b/lib/Target/ARM/ARMTargetMachine.cpp
index 6fdf873a8f..c51ae24c50 100644
--- a/lib/Target/ARM/ARMTargetMachine.cpp
+++ b/lib/Target/ARM/ARMTargetMachine.cpp
@@ -71,7 +71,8 @@ ARMTargetMachine::ARMTargetMachine(const Target &T, StringRef TT,
ELFWriterInfo(*this),
TLInfo(*this),
TSInfo(*this),
- FrameLowering(Subtarget) {
+ FrameLowering(Subtarget),
+ STTI(&TLInfo) {
if (!Subtarget.hasARMOps())
report_fatal_error("CPU: '" + Subtarget.getCPUString() + "' does not "
"support ARM mode execution!");
@@ -104,7 +105,8 @@ ThumbTargetMachine::ThumbTargetMachine(const Target &T, StringRef TT,
TSInfo(*this),
FrameLowering(Subtarget.hasThumb2()
? new ARMFrameLowering(Subtarget)
- : (ARMFrameLowering*)new Thumb1FrameLowering(Subtarget)) {
+ : (ARMFrameLowering*)new Thumb1FrameLowering(Subtarget)),
+ STTI(&TLInfo){
}
namespace {
diff --git a/lib/Target/ARM/ARMTargetMachine.h b/lib/Target/ARM/ARMTargetMachine.h
index f91e5bbd47..7a65a7f062 100644
--- a/lib/Target/ARM/ARMTargetMachine.h
+++ b/lib/Target/ARM/ARMTargetMachine.h
@@ -25,6 +25,7 @@
#include "Thumb1FrameLowering.h"
#include "Thumb2InstrInfo.h"
#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetTransformImpl.h"
#include "llvm/DataLayout.h"
#include "llvm/MC/MCStreamer.h"
#include "llvm/ADT/OwningPtr.h"
@@ -67,6 +68,8 @@ class ARMTargetMachine : public ARMBaseTargetMachine {
ARMTargetLowering TLInfo;
ARMSelectionDAGInfo TSInfo;
ARMFrameLowering FrameLowering;
+ ScalarTargetTransformImpl STTI;
+ VectorTargetTransformImpl VTTI;
public:
ARMTargetMachine(const Target &T, StringRef TT,
StringRef CPU, StringRef FS,
@@ -88,7 +91,12 @@ class ARMTargetMachine : public ARMBaseTargetMachine {
virtual const ARMFrameLowering *getFrameLowering() const {
return &FrameLowering;
}
-
+ virtual const ScalarTargetTransformInfo *getScalarTargetTransformInfo()const {
+ return &STTI;
+ }
+ virtual const VectorTargetTransformInfo *getVectorTargetTransformInfo()const {
+ return &VTTI;
+ }
virtual const ARMInstrInfo *getInstrInfo() const { return &InstrInfo; }
virtual const DataLayout *getDataLayout() const { return &DL; }
virtual const ARMELFWriterInfo *getELFWriterInfo() const {
@@ -110,6 +118,8 @@ class ThumbTargetMachine : public ARMBaseTargetMachine {
ARMSelectionDAGInfo TSInfo;
// Either Thumb1FrameLowering or ARMFrameLowering.
OwningPtr<ARMFrameLowering> FrameLowering;
+ ScalarTargetTransformImpl STTI;
+ VectorTargetTransformImpl VTTI;
public:
ThumbTargetMachine(const Target &T, StringRef TT,
StringRef CPU, StringRef FS,
@@ -138,6 +148,12 @@ public:
virtual const ARMFrameLowering *getFrameLowering() const {
return FrameLowering.get();
}
+ virtual const ScalarTargetTransformInfo *getScalarTargetTransformInfo()const {
+ return &STTI;
+ }
+ virtual const VectorTargetTransformInfo *getVectorTargetTransformInfo()const {
+ return &VTTI;
+ }
virtual const DataLayout *getDataLayout() const { return &DL; }
virtual const ARMELFWriterInfo *getELFWriterInfo() const {
return Subtarget.isTargetELF() ? &ELFWriterInfo : 0;
diff --git a/lib/Target/CMakeLists.txt b/lib/Target/CMakeLists.txt
index 096ef001ed..48df199437 100644
--- a/lib/Target/CMakeLists.txt
+++ b/lib/Target/CMakeLists.txt
@@ -11,6 +11,7 @@ add_llvm_library(LLVMTarget
TargetMachineC.cpp
TargetRegisterInfo.cpp
TargetSubtargetInfo.cpp
+ TargetTransformImpl.cpp
)
foreach(t ${LLVM_TARGETS_TO_BUILD})
diff --git a/lib/Target/CellSPU/SPUTargetMachine.cpp b/lib/Target/CellSPU/SPUTargetMachine.cpp
index a37ad7f85a..e92ad01e1d 100644
--- a/lib/Target/CellSPU/SPUTargetMachine.cpp
+++ b/lib/Target/CellSPU/SPUTargetMachine.cpp
@@ -43,7 +43,8 @@ SPUTargetMachine::SPUTargetMachine(const Target &T, StringRef TT,
FrameLowering(Subtarget),
TLInfo(*this),
TSInfo(*this),
- InstrItins(Subtarget.getInstrItineraryData()) {
+ InstrItins(Subtarget.getInstrItineraryData()),
+ STTI(&TLInfo){
}
//===----------------------------------------------------------------------===//
diff --git a/lib/Target/CellSPU/SPUTargetMachine.h b/lib/Target/CellSPU/SPUTargetMachine.h
index 58699a30d2..7f53ea6fbe 100644
--- a/lib/Target/CellSPU/SPUTargetMachine.h
+++ b/lib/Target/CellSPU/SPUTargetMachine.h
@@ -20,6 +20,7 @@
#include "SPUSelectionDAGInfo.h"
#include "SPUFrameLowering.h"
#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetTransformImpl.h"
#include "llvm/DataLayout.h"
namespace llvm {
@@ -34,6 +35,8 @@ class SPUTargetMachine : public LLVMTargetMachine {
SPUTargetLowering TLInfo;
SPUSelectionDAGInfo TSInfo;
InstrItineraryData InstrItins;
+ ScalarTargetTransformImpl STTI;
+ VectorTargetTransformImpl VTTI;
public:
SPUTargetMachine(const Target &T, StringRef TT,
StringRef CPU, StringRef FS, const TargetOptions &Options,
@@ -77,6 +80,12 @@ public:
virtual const InstrItineraryData *getInstrItineraryData() const {
return &InstrItins;
}
+ virtual const ScalarTargetTransformInfo *getScalarTargetTransformInfo()const {
+ return &STTI;
+ }
+ virtual const VectorTargetTransformInfo *getVectorTargetTransformInfo()const {
+ return &VTTI;
+ }
// Pass Pipeline Configuration
virtual TargetPassConfig *createPassConfig(PassManagerBase &PM);
diff --git a/lib/Target/Hexagon/HexagonTargetMachine.cpp b/lib/Target/Hexagon/HexagonTargetMachine.cpp
index d198a3f45b..353542a809 100644
--- a/lib/Target/Hexagon/HexagonTargetMachine.cpp
+++ b/lib/Target/Hexagon/HexagonTargetMachine.cpp
@@ -74,7 +74,8 @@ HexagonTargetMachine::HexagonTargetMachine(const Target &T, StringRef TT,
Subtarget(TT, CPU, FS), InstrInfo(Subtarget), TLInfo(*this),
TSInfo(*this),
FrameLowering(Subtarget),
- InstrItins(&Subtarget.getInstrItineraryData()) {
+ InstrItins(&Subtarget.getInstrItineraryData()),
+ STTI(&TLInfo) {
setMCUseCFI(false);
}
@@ -87,7 +88,7 @@ bool HexagonTargetMachine::addPassesForOptimizations(PassManagerBase &PM) {
PM.add(createDeadCodeEliminationPass());
PM.add(createConstantPropagationPass());
PM.add(createLoopUnrollPass());
- PM.add(createLoopStrengthReducePass(getTargetLowering()));
+ PM.add(createLoopStrengthReducePass());
return true;
}
diff --git a/lib/Target/Hexagon/HexagonTargetMachine.h b/lib/Target/Hexagon/HexagonTargetMachine.h
index ade5b3e9c1..7a4215c119 100644
--- a/lib/Target/Hexagon/HexagonTargetMachine.h
+++ b/lib/Target/Hexagon/HexagonTargetMachine.h
@@ -21,6 +21,7 @@
#include "HexagonFrameLowering.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/DataLayout.h"
+#include "llvm/Target/TargetTransformImpl.h"
namespace llvm {
@@ -34,6 +35,8 @@ class HexagonTargetMachine : public LLVMTargetMachine {
HexagonSelectionDAGInfo TSInfo;
HexagonFrameLowering FrameLowering;
const InstrItineraryData* InstrItins;
+ ScalarTargetTransformImpl STTI;
+ VectorTargetTransformImpl VTTI;
public:
HexagonTargetMachine(const Target &T, StringRef TT,StringRef CPU,
@@ -68,6 +71,14 @@ public:
return &TSInfo;
}
+ virtual const ScalarTargetTransformInfo *getScalarTargetTransformInfo()const {
+ return &STTI;
+ }
+
+ virtual const VectorTargetTransformInfo *getVectorTargetTransformInfo()const {
+ return &VTTI;
+ }
+
virtual const DataLayout *getDataLayout() const { return &DL; }
static unsigned getModuleMatchQuality(const Module &M);
diff --git a/lib/Target/MBlaze/MBlazeTargetMachine.cpp b/lib/Target/MBlaze/MBlazeTargetMachine.cpp
index 1f2cf6d9d2..cb5f46062d 100644
--- a/lib/Target/MBlaze/MBlazeTargetMachine.cpp
+++ b/lib/Target/MBlaze/MBlazeTargetMachine.cpp
@@ -42,7 +42,7 @@ MBlazeTargetMachine(const Target &T, StringRef TT,
InstrInfo(*this),
FrameLowering(Subtarget),
TLInfo(*this), TSInfo(*this), ELFWriterInfo(*this),
- InstrItins(Subtarget.getInstrItineraryData()) {
+ InstrItins(Subtarget.getInstrItineraryData()), STTI(&TLInfo) {
}
namespace {
diff --git a/lib/Target/MBlaze/MBlazeTargetMachine.h b/lib/Target/MBlaze/MBlazeTargetMachine.h
index d949e54f0d..34648b9b9a 100644
--- a/lib/Target/MBlaze/MBlazeTargetMachine.h
+++ b/lib/Target/MBlaze/MBlazeTargetMachine.h
@@ -25,6 +25,7 @@
#include "llvm/Target/TargetMachine.h"
#include "llvm/DataLayout.h"
#include "llvm/Target/TargetFrameLowering.h"
+#include "llvm/Target/TargetTransformImpl.h"
namespace llvm {
class formatted_raw_ostream;
@@ -39,6 +40,8 @@ namespace llvm {
MBlazeIntrinsicInfo IntrinsicInfo;
MBlazeELFWriterInfo ELFWriterInfo;
InstrItineraryData InstrItins;
+ ScalarTargetTransformImpl STTI;
+ VectorTargetTransformImpl VTTI;
public:
MBlazeTargetMachine(const Target &T, StringRef TT,
@@ -77,6 +80,10 @@ namespace llvm {
virtual const MBlazeELFWriterInfo *getELFWriterInfo() const {
return &ELFWriterInfo;
}
+ virtual const ScalarTargetTransformInfo *getScalarTargetTransformInfo()const
+ { return &STTI; }
+ virtual const VectorTargetTransformInfo *getVectorTargetTransformInfo()const
+ { return &VTTI; }
// Pass Pipeline Configuration
virtual TargetPassConfig *createPassConfig(PassManagerBase &PM);
diff --git a/lib/Target/MSP430/MSP430TargetMachine.cpp b/lib/Target/MSP430/MSP430TargetMachine.cpp
index da5899b86d..29ea681216 100644
--- a/lib/Target/MSP430/MSP430TargetMachine.cpp
+++ b/lib/Target/MSP430/MSP430TargetMachine.cpp
@@ -36,7 +36,7 @@ MSP430TargetMachine::MSP430TargetMachine(const Target &T,
// FIXME: Check DataLayout string.
DL("e-p:16:16:16-i8:8:8-i16:16:16-i32:16:32-n8:16"),
InstrInfo(*this), TLInfo(*this), TSInfo(*this),
- FrameLowering(Subtarget) { }
+ FrameLowering(Subtarget), STTI(&TLInfo) { }
namespace {
/// MSP430 Code Generator Pass Configuration Options.
diff --git a/lib/Target/MSP430/MSP430TargetMachine.h b/lib/Target/MSP430/MSP430TargetMachine.h
index ba3cef1f2a..186172ede4 100644
--- a/lib/Target/MSP430/MSP430TargetMachine.h
+++ b/lib/Target/MSP430/MSP430TargetMachine.h
@@ -24,6 +24,7 @@
#include "llvm/DataLayout.h"
#include "llvm/Target/TargetFrameLowering.h"
#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetTransformImpl.h"
namespace llvm {
@@ -36,6 +37,8 @@ class MSP430TargetMachine : public LLVMTargetMachine {
MSP430TargetLowering TLInfo;
MSP430SelectionDAGInfo TSInfo;
MSP430FrameLowering FrameLowering;
+ ScalarTargetTransformImpl STTI;
+ VectorTargetTransformImpl VTTI;
public:
MSP430TargetMachine(const Target &T, StringRef TT,
@@ -61,7 +64,12 @@ public:
virtual const MSP430SelectionDAGInfo* getSelectionDAGInfo() const {
return &TSInfo;
}
-
+ virtual const ScalarTargetTransformInfo *getScalarTargetTransformInfo()const {
+ return &STTI;
+ }
+ virtual const VectorTargetTransformInfo *getVectorTargetTransformInfo()const {
+ return &VTTI;
+ }
virtual TargetPassConfig *createPassConfig(PassManagerBase &PM);
}; // MSP430TargetMachine.
diff --git a/lib/Target/Mips/MipsTargetMachine.cpp b/lib/Target/Mips/MipsTargetMachine.cpp
index 9c196dd82f..4c3981d9f6 100644
--- a/lib/Target/Mips/MipsTargetMachine.cpp
+++ b/lib/Target/Mips/MipsTargetMachine.cpp
@@ -53,7 +53,7 @@ MipsTargetMachine(const Target &T, StringRef TT,
InstrInfo(MipsInstrInfo::create(*this)),
FrameLowering(MipsFrameLowering::create(*this, Subtarget)),
TLInfo(*this), TSInfo(*this), JITInfo(),
- ELFWriterInfo(false, isLittle) {
+ ELFWriterInfo(false, isLittle), STTI(&TLInfo) {
}
void MipsebTargetMachine::anchor() { }
diff --git a/lib/Target/Mips/MipsTargetMachine.h b/lib/Target/Mips/MipsTargetMachine.h
index 3a01828dd1..55524e89ef 100644
--- a/lib/Target/Mips/MipsTargetMachine.h
+++ b/lib/Target/Mips/MipsTargetMachine.h
@@ -24,6 +24,7 @@
#include "llvm/Target/TargetMachine.h"
#include "llvm/DataLayout.h"
#include "llvm/Target/TargetFrameLowering.h"
+#include "llvm/Target/TargetTransformImpl.h"
namespace llvm {
class formatted_raw_ostream;
@@ -38,6 +39,8 @@ class MipsTargetMachine : public LLVMTargetMachine {
MipsSelectionDAGInfo TSInfo;
MipsJITInfo JITInfo;
MipsELFWriterInfo ELFWriterInfo;
+ ScalarTargetTransformImpl STTI;
+ VectorTargetTransformImpl VTTI;
public:
MipsTargetMachine(const Target &T, StringRef TT,
diff --git a/lib/Target/NVPTX/NVPTXTargetMachine.cpp b/lib/Target/NVPTX/NVPTXTargetMachine.cpp
index dbfc660687..7519b4a083 100644
--- a/lib/Target/NVPTX/NVPTXTargetMachine.cpp
+++ b/lib/Target/NVPTX/NVPTXTargetMachine.cpp
@@ -72,7 +72,8 @@ NVPTXTargetMachine::NVPTXTargetMachine(const Target &T,
: LLVMTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL),
Subtarget(TT, CPU, FS, is64bit),
DL(Subtarget.getDataLayout()),
- InstrInfo(*this), TLInfo(*this), TSInfo(*this), FrameLowering(*this,is64bit)
+ InstrInfo(*this), TLInfo(*this), TSInfo(*this), FrameLowering(*this,is64bit),
+ STTI(&TLInfo)
/*FrameInfo(TargetFrameInfo::StackGrowsUp, 8, 0)*/ {
}
diff --git a/lib/Target/NVPTX/NVPTXTargetMachine.h b/lib/Target/NVPTX/NVPTXTargetMachine.h
index d58a076858..11bc9d4fa6 100644
--- a/lib/Target/NVPTX/NVPTXTargetMachine.h
+++ b/lib/Target/NVPTX/NVPTXTargetMachine.h
@@ -25,6 +25,7 @@
#include "llvm/Target/TargetFrameLowering.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetSelectionDAGInfo.h"
+#include "llvm/Target/TargetTransformImpl.h"
namespace llvm {
@@ -44,6 +45,9 @@ class NVPTXTargetMachine : public LLVMTargetMachine {
// Hold Strings that can be free'd all together with NVPTXTargetMachine
ManagedStringPool ManagedStrPool;
+ ScalarTargetTransformImpl STTI;
+ VectorTargetTransformImpl VTTI;
+
//bool addCommonCodeGenPasses(PassManagerBase &, CodeGenOpt::Level,
// bool DisableVerify, MCContext *&OutCtx);
@@ -72,6 +76,12 @@ public:
virtual const TargetSelectionDAGInfo *getSelectionDAGInfo() const {
return &TSInfo;
}
+ virtual const ScalarTargetTransformInfo *getScalarTargetTransformInfo()const {
+ return &STTI;
+ }
+ virtual const VectorTargetTransformInfo *getVectorTargetTransformInfo()const {
+ return &VTTI;
+ }
//virtual bool addInstSelector(PassManagerBase &PM,
// CodeGenOpt::Level OptLevel);
diff --git a/lib/Target/PowerPC/PPCTargetMachine.cpp b/lib/Target/PowerPC/PPCTargetMachine.cpp
index 5f39b8d2c2..b861383475 100644
--- a/lib/Target/PowerPC/PPCTargetMachine.cpp
+++ b/lib/Target/PowerPC/PPCTargetMachine.cpp
@@ -43,7 +43,8 @@ PPCTargetMachine::PPCTargetMachine(const Target &T, StringRef TT,
DL(Subtarget.getDataLayoutString()), InstrInfo(*this),
FrameLowering(Subtarget), JITInfo(*this, is64Bit),
TLInfo(*this), TSInfo(*this),
- InstrItins(Subtarget.getInstrItineraryData()) {
+ InstrItins(Subtarget.getInstrItineraryData()),
+ STTI(&TLInfo){
// The binutils for the BG/P are too old for CFI.
if (Subtarget.isBGP())
diff --git a/lib/Target/PowerPC/PPCTargetMachine.h b/lib/Target/PowerPC/PPCTargetMachine.h
index 02d69fd15d..c168433a71 100644
--- a/lib/Target/PowerPC/PPCTargetMachine.h
+++ b/lib/Target/PowerPC/PPCTargetMachine.h
@@ -21,6 +21,7 @@
#include "PPCISelLowering.h"
#include "PPCSelectionDAGInfo.h"
#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetTransformImpl.h"
#include "llvm/DataLayout.h"
namespace llvm {
@@ -36,6 +37,8 @@ class PPCTargetMachine : public LLVMTargetMachine {
PPCTargetLowering TLInfo;
PPCSelectionDAGInfo TSInfo;
InstrItineraryData InstrItins;
+ ScalarTargetTransformImpl STTI;
+ VectorTargetTransformImpl VTTI;
public:
PPCTargetMachine(const Target &T, StringRef TT,
@@ -63,6 +66,12 @@ public:
virtual const InstrItineraryData *getInstrItineraryData() const {
return &InstrItins;
}
+ virtual const ScalarTargetTransformInfo *getScalarTargetTransformInfo()const {
+ return &STTI;
+ }
+ virtual const VectorTargetTransformInfo *getVectorTargetTransformInfo()const {
+ return &VTTI;
+ }
// Pass Pipeline Configuration
virtual TargetPassConfig *createPassConfig(PassManagerBase &PM);
diff --git a/lib/Target/Sparc/SparcTargetMachine.cpp b/lib/Target/Sparc/SparcTargetMachine.cpp
index 8b7559c2f9..1d8cc771dd 100644
--- a/lib/Target/Sparc/SparcTargetMachine.cpp
+++ b/lib/Target/Sparc/SparcTargetMachine.cpp
@@ -36,7 +36,7 @@ SparcTargetMachine::SparcTargetMachine(const Target &T, StringRef TT,
DL(Subtarget.getDataLayout()),
InstrInfo(Subtarget),
TLInfo(*this), TSInfo(*this),
- FrameLowering(Subtarget) {
+ FrameLowering(Subtarget),STTI(&TLInfo) {
}
namespace {
diff --git a/lib/Target/Sparc/SparcTargetMachine.h b/lib/Target/Sparc/SparcTargetMachine.h
index c9f2d68eb1..0fbe2d7cda 100644
--- a/lib/Target/Sparc/SparcTargetMachine.h
+++ b/lib/Target/Sparc/SparcTargetMachine.h
@@ -22,6 +22,7 @@
#include "llvm/Target/TargetMachine.h"
#include "llvm/DataLayout.h"
#include "llvm/Target/TargetFrameLowering.h"
+#include "llvm/Target/TargetTransformImpl.h"
namespace llvm {
@@ -32,6 +33,8 @@ class SparcTargetMachine : public LLVMTargetMachine {
SparcTargetLowering TLInfo;
SparcSelectionDAGInfo TSInfo;
SparcFrameLowering FrameLowering;
+ ScalarTargetTransformImpl STTI;
+ VectorTargetTransformImpl VTTI;
public:
SparcTargetMachine(const Target &T, StringRef TT,
StringRef CPU, StringRef FS, const TargetOptions &Options,
@@ -52,6 +55,12 @@ public:
virtual const SparcSelectionDAGInfo* getSelectionDAGInfo() const {
return &TSInfo;
}
+ virtual const ScalarTargetTransformInfo *getScalarTargetTransformInfo()const {
+ return &STTI;
+ }
+ virtual const VectorTargetTransformInfo *getVectorTargetTransformInfo()const {
+ return &VTTI;
+ }
virtual const DataLayout *getDataLayout() const { return &DL; }
// Pass Pipeline Configuration
diff --git a/lib/Target/Target.cpp b/lib/Target/Target.cpp
index b0b5c875b8..219cbf1afc 100644
--- a/lib/Target/Target.cpp
+++ b/lib/Target/Target.cpp
@@ -26,6 +26,7 @@ using namespace llvm;
void llvm::initializeTarget(PassRegistry &Registry) {
initializeDataLayoutPass(Registry);
initializeTargetLibraryInfoPass(Registry);
+ initializeTargetTransformInfoPass(Registry);
}
void LLVMInitializeTarget(LLVMPassRegistryRef R) {
diff --git a/lib/Target/TargetTransformImpl.cpp b/lib/Target/TargetTransformImpl.cpp
new file mode 100644
index 0000000000..1cb5edab9d
--- /dev/null
+++ b/lib/Target/TargetTransformImpl.cpp
@@ -0,0 +1,43 @@
+// llvm/Target/TargetTransformImpl.cpp - Target Loop Trans Info ---*- C++ -*-=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Target/TargetTransformImpl.h"
+#include "llvm/Target/TargetLowering.h"
+
+using namespace llvm;
+
+bool ScalarTargetTransformImpl::isLegalAddImmediate(int64_t imm) const {
+ return TLI->isLegalAddImmediate(imm);
+}
+
+bool ScalarTargetTransformImpl::isLegalICmpImmediate(int64_t imm) const {
+ return TLI->isLegalICmpImmediate(imm);
+}
+
+bool ScalarTargetTransformImpl::isLegalAddressingMode(const AddrMode &AM,
+ Type *Ty) const {
+ return TLI->isLegalAddressingMode(AM, Ty);
+}
+
+bool ScalarTargetTransformImpl::isTruncateFree(Type *Ty1, Type *Ty2) const {
+ return TLI->isTruncateFree(Ty1, Ty2);
+}
+
+bool ScalarTargetTransformImpl::isTypeLegal(Type *Ty) const {
+ EVT T = TLI->getValueType(Ty);
+ return TLI->isTypeLegal(T);
+}
+
+unsigned ScalarTargetTransformImpl::getJumpBufAlignment() const {
+ return TLI->getJumpBufAlignment();
+}
+
+unsigned ScalarTargetTransformImpl::getJumpBufSize() const {
+ return TLI->getJumpBufSize();
+}
diff --git a/lib/Target/X86/X86TargetMachine.cpp b/lib/Target/X86/X86TargetMachine.cpp
index f8cced885d..655ede79ba 100644
--- a/lib/Target/X86/X86TargetMachine.cpp
+++ b/lib/Target/X86/X86TargetMachine.cpp
@@ -48,7 +48,8 @@ X86_32TargetMachine::X86_32TargetMachine(const Target &T, StringRef TT,
InstrInfo(*this),
TSInfo(*this),
TLInfo(*this),
- JITInfo(*this) {
+ JITInfo(*this),
+ STTI(&TLInfo) {
}
void X86_64TargetMachine::anchor() { }
@@ -64,7 +65,8 @@ X86_64TargetMachine::X86_64TargetMachine(const Target &T, StringRef TT,
InstrInfo(*this),
TSInfo(*this),
TLInfo(*this),
- JITInfo(*this) {
+ JITInfo(*this),
+ STTI(&TLInfo) {
}
/// X86TargetMachine ctor - Create an X86 target.
diff --git a/lib/Target/X86/X86TargetMachine.h b/lib/Target/X86/X86TargetMachine.h
index 5301299c1f..4bad695b4c 100644
--- a/lib/Target/X86/X86TargetMachine.h
+++ b/lib/Target/X86/X86TargetMachine.h
@@ -25,6 +25,7 @@
#include "llvm/Target/TargetMachine.h"
#include "llvm/DataLayout.h"
#include "llvm/Target/TargetFrameLowering.h"
+#include "llvm/Target/TargetTransformImpl.h"
namespace llvm {
@@ -85,6 +86,8 @@ class X86_32TargetMachine : public X86TargetMachine {
X86SelectionDAGInfo TSInfo;
X86TargetLowering TLInfo;
X86JITInfo JITInfo;
+ ScalarTargetTransformImpl STTI;
+ VectorTargetTransformImpl VTTI;
public:
X86_32TargetMachine(const Target &T, StringRef TT,
StringRef CPU, StringRef FS, const TargetOptions &Options,
@@ -103,6 +106,12 @@ public:
virtual X86JITInfo *getJITInfo() {
return &JITInfo;
}
+ virtual const ScalarTargetTransformInfo *getScalarTargetTransformInfo()const {
+ return &STTI;
+ }
+ virtual const VectorTargetTransformInfo *getVectorTargetTransformInfo()const {
+ return &VTTI;
+ }
};
/// X86_64TargetMachine - X86 64-bit target machine.
@@ -114,6 +123,8 @@ class X86_64TargetMachine : public X86TargetMachine {
X86SelectionDAGInfo TSInfo;
X86TargetLowering TLInfo;
X86JITInfo JITInfo;
+ ScalarTargetTransformImpl STTI;
+ VectorTargetTransformImpl VTTI;
public:
X86_64TargetMachine(const Target &T, StringRef TT,
StringRef CPU, StringRef FS, const TargetOptions &Options,
@@ -132,6 +143,12 @@ public:
virtual X86JITInfo *getJITInfo() {
return &JITInfo;
}
+ virtual const ScalarTargetTransformInfo *getScalarTargetTransformInfo()const {
+ return &STTI;
+ }
+ virtual const VectorTargetTransformInfo *getVectorTargetTransformInfo()const {
+ return &VTTI;
+ }
};
} // End llvm namespace
diff --git a/lib/Target/XCore/XCoreTargetMachine.cpp b/lib/Target/XCore/XCoreTargetMachine.cpp
index c71d978ad8..0b7e3e10d4 100644
--- a/lib/Target/XCore/XCoreTargetMachine.cpp
+++ b/lib/Target/XCore/XCoreTargetMachine.cpp
@@ -32,7 +32,7 @@ XCoreTargetMachine::XCoreTargetMachine(const Target &T, StringRef TT,
InstrInfo(),
FrameLowering(Subtarget),
TLInfo(*this),
- TSInfo(*this) {
+ TSInfo(*this), STTI(&TLInfo) {
}
namespace {
diff --git a/lib/Target/XCore/XCoreTargetMachine.h b/lib/Target/XCore/XCoreTargetMachine.h
index f7fec29f54..c60c6a37f9 100644
--- a/lib/Target/XCore/XCoreTargetMachine.h
+++ b/lib/Target/XCore/XCoreTargetMachine.h
@@ -20,6 +20,7 @@
#include "XCoreISelLowering.h"
#include "XCoreSelectionDAGInfo.h"
#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetTransformImpl.h"
#include "llvm/DataLayout.h"
namespace llvm {
@@ -31,6 +32,8 @@ class XCoreTargetMachine : public LLVMTargetMachine {
XCoreFrameLowering FrameLowering;
XCoreTargetLowering TLInfo;
XCoreSelectionDAGInfo TSInfo;
+ ScalarTargetTransformImpl STTI;
+ VectorTargetTransformImpl VTTI;
public:
XCoreTargetMachine(const Target &T, StringRef TT,
StringRef CPU, StringRef FS, const TargetOptions &Options,
@@ -53,6 +56,12 @@ public:
virtual const TargetRegisterInfo *getRegisterInfo() const {
return &InstrInfo.getRegisterInfo();
}
+ virtual const ScalarTargetTransformInfo *getScalarTargetTransformInfo()const {
+ return &STTI;
+ }
+ virtual const VectorTargetTransformInfo *getVectorTargetTransformInfo()const {
+ return &VTTI;
+ }
virtual const DataLayout *getDataLayout() const { return &DL; }
// Pass Pipeline Configuration
diff --git a/lib/Transforms/Scalar/LoopStrengthReduce.cpp b/lib/Transforms/Scalar/LoopStrengthReduce.cpp
index 958348d9fa..99a62dbe62 100644
--- a/lib/Transforms/Scalar/LoopStrengthReduce.cpp
+++ b/lib/Transforms/Scalar/LoopStrengthReduce.cpp
@@ -37,7 +37,7 @@
//
// TODO: Handle multiple loops at a time.
//
-// TODO: Should TargetLowering::AddrMode::BaseGV be changed to a ConstantExpr
+// TODO: Should AddrMode::BaseGV be changed to a ConstantExpr
// instead of a GlobalValue?
//
// TODO: When truncation is free, truncate ICmp users' operands to make it a
@@ -67,6 +67,7 @@
#include "llvm/Transforms/Scalar.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/Transforms/Utils/Local.h"
+#include "llvm/TargetTransformInfo.h"
#include "llvm/ADT/SmallBitVector.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/DenseSet.h"
@@ -74,7 +75,6 @@
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/ValueHandle.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/Target/TargetLowering.h"
#include <algorithm>
using namespace llvm;
@@ -1118,7 +1118,7 @@ public:
enum KindType {
Basic, ///< A normal use, with no folding.
Special, ///< A special case of basic, allowing -1 scales.
- Address, ///< An address use; folding according to TargetLowering
+ Address, ///< An address use; folding according to ScalarTargetTransformInfo.
ICmpZero ///< An equality icmp with both operands folded into one.
// TODO: Add a generic icmp too?
};
@@ -1272,12 +1272,12 @@ void LSRUse::dump() const {
/// address-mode folding and special icmp tricks.
static bool isLegalUse(const AddrMode &AM,
LSRUse::KindType Kind, Type *AccessTy,
- const TargetLowering *TLI) {
+ const ScalarTargetTransformInfo *STTI) {
switch (Kind) {
case LSRUse::Address:
// If we have low-level target information, ask the target if it can
// completely fold this address.
- if (TLI) return TLI->isLegalAddressingMode(AM, AccessTy);
+ if (STTI) return STTI->isLegalAddressingMode(AM, AccessTy);
// Otherwise, just guess that reg+reg addressing is legal.
return !AM.BaseGV && AM.BaseOffs == 0 && AM.Scale <= 1;
@@ -1300,7 +1300,7 @@ static bool isLegalUse(const AddrMode &AM,
// If we have low-level target information, ask the target if it can fold an
// integer immediate on an icmp.
if (AM.BaseOffs != 0) {
- if (!TLI)
+ if (!STTI)
return false;
// We have one of:
// ICmpZero BaseReg + Offset => ICmp BaseReg, -Offset
@@ -1309,7 +1309,7 @@ static bool isLegalUse(const AddrMode &AM,
int64_t Offs = AM.BaseOffs;
if (AM.Scale == 0)
Offs = -(uint64_t)Offs; // The cast does the right thing with INT64_MIN.
- return TLI->isLegalICmpImmediate(Offs);
+ return STTI->isLegalICmpImmediate(Offs);
}
// ICmpZero BaseReg + -1*ScaleReg => ICmp BaseReg, ScaleReg
@@ -1330,20 +1330,20 @@ static bool isLegalUse(const AddrMode &AM,
static bool isLegalUse(AddrMode AM,
int64_t MinOffset, int64_t MaxOffset,
LSRUse::KindType Kind, Type *AccessTy,
- const TargetLowering *TLI) {
+ const ScalarTargetTransformInfo *LTTI) {
// Check for overflow.
if (((int64_t)((uint64_t)AM.BaseOffs + MinOffset) > AM.BaseOffs) !=
(MinOffset > 0))
return false;
AM.BaseOffs = (uint64_t)AM.BaseOffs + MinOffset;
- if (isLegalUse(AM, Kind, AccessTy, TLI)) {
+ if (isLegalUse(AM, Kind, AccessTy, LTTI)) {
AM.BaseOffs = (uint64_t)AM.BaseOffs - MinOffset;
// Check for overflow.
if (((int64_t)((uint64_t)AM.BaseOffs + MaxOffset) > AM.BaseOffs) !=
(MaxOffset > 0))
return false;
AM.BaseOffs = (uint64_t)AM.BaseOffs + MaxOffset;
- return isLegalUse(AM, Kind, AccessTy, TLI);
+ return isLegalUse(AM, Kind, AccessTy, LTTI);
}
return false;
}
@@ -1352,7 +1352,7 @@ static bool isAlwaysFoldable(int64_t BaseOffs,
GlobalValue *BaseGV,
bool HasBaseReg,
LSRUse::KindType Kind, Type *AccessTy,
- const TargetLowering *TLI) {
+ const ScalarTargetTransformInfo *LTTI) {
// Fast-path: zero is always foldable.
if (BaseOffs == 0 && !BaseGV) return true;
@@ -1371,14 +1371,14 @@ static bool isAlwaysFoldable(int64_t BaseOffs,
AM.HasBaseReg = true;
}
- return isLegalUse(AM, Kind, AccessTy, TLI);
+ return isLegalUse(AM, Kind, AccessTy, LTTI);
}
static bool isAlwaysFoldable(const SCEV *S,
int64_t MinOffset, int64_t MaxOffset,
bool HasBaseReg,
LSRUse::KindType Kind, Type *AccessTy,
- const TargetLowering *TLI,
+ const ScalarTargetTransformInfo *LTTI,
ScalarEvolution &SE) {
// Fast-path: zero is always foldable.
if (S->isZero()) return true;
@@ -1402,7 +1402,7 @@ static bool isAlwaysFoldable(const SCEV *S,
AM.HasBaseReg = HasBaseReg;
AM.Scale = Kind == LSRUse::ICmpZero ? -1 : 1;
- return isLegalUse(AM, MinOffset, MaxOffset, Kind, AccessTy, TLI);
+ return isLegalUse(AM, MinOffset, MaxOffset, Kind, AccessTy, LTTI);
}
namespace {
@@ -1502,7 +1502,7 @@ class LSRInstance {
ScalarEvolution &SE;
DominatorTree &DT;
LoopInfo &LI;
- const TargetLowering *const TLI;
+ const ScalarTargetTransformInfo *const STTI;
Loop *const L;
bool Changed;
@@ -1638,7 +1638,7 @@ class LSRInstance {
Pass *P);
public:
- LSRInstance(const TargetLowering *tli, Loop *l, Pass *P);
+ LSRInstance(const ScalarTargetTransformInfo *ltti, Loop *l, Pass *P);
bool getChanged() const { return Changed; }
@@ -1688,11 +1688,10 @@ void LSRInstance::OptimizeShadowIV() {
}
if (!DestTy) continue;
- if (TLI) {
+ if (STTI) {
// If target does not support DestTy natively then do not apply
// this transformation.
- EVT DVT = TLI->getValueType(DestTy);
- if (!TLI->isTypeLegal(DVT)) continue;
+ if (!STTI->isTypeLegal(DestTy)) continue;
}
PHINode *PH = dyn_cast<PHINode>(ShadowUse->getOperand(0));
@@ -2015,18 +2014,18 @@ LSRInstance::OptimizeLoopTermCond() {
if (C->getValue().getMinSignedBits() >= 64 ||
C->getValue().isMinSignedValue())
goto decline_post_inc;
- // Without TLI, assume that any stride might be valid, and so any
+ // Without STTI, assume that any stride might be valid, and so any
// use might be shared.
- if (!TLI)
+ if (!STTI)
goto decline_post_inc;
// Check for possible scaled-address reuse.
Type *AccessTy = getAccessType(UI->getUser());
AddrMode AM;
AM.Scale = C->getSExtValue();
- if (TLI->isLegalAddressingMode(AM, AccessTy))
+ if (STTI->isLegalAddressingMode(AM, AccessTy))
goto decline_post_inc;
AM.Scale = -AM.Scale;
- if (TLI->isLegalAddressingMode(AM, AccessTy))
+ if (STTI->isLegalAddressingMode(AM, AccessTy))
goto decline_post_inc;
}
}
@@ -2097,12 +2096,12 @@ LSRInstance::reconcileNewOffset(LSRUse &LU, int64_t NewOffset, bool HasBaseReg,
// Conservatively assume HasBaseReg is true for now.
if (NewOffset < LU.MinOffset) {
if (!isAlwaysFoldable(LU.MaxOffset - NewOffset, 0, HasBaseReg,
- Kind, AccessTy, TLI))
+ Kind, AccessTy, STTI))
return false;
NewMinOffset = NewOffset;
} else if (NewOffset > LU.MaxOffset) {
if (!isAlwaysFoldable(NewOffset - LU.MinOffset, 0, HasBaseReg,
- Kind, AccessTy, TLI))
+ Kind, AccessTy, STTI))
return false;
NewMaxOffset = NewOffset;
}
@@ -2131,7 +2130,7 @@ LSRInstance::getUse(const SCEV *&Expr,
int64_t Offset = ExtractImmediate(Expr, SE);
// Basic uses can't accept any offset, for example.
- if (!isAlwaysFoldable(Offset, 0, /*HasBaseReg=*/true, Kind, AccessTy, TLI)) {
+ if (!isAlwaysFoldable(Offset, 0, /*HasBaseReg=*/true, Kind, AccessTy, STTI)) {
Expr = Copy;
Offset = 0;
}
@@ -2396,7 +2395,7 @@ bool IVChain::isProfitableIncrement(const SCEV *OperExpr,
/// TODO: Consider IVInc free if it's already used in another chains.
static bool
isProfitableChain(IVChain &Chain, SmallPtrSet<Instruction*, 4> &Users,
- ScalarEvolution &SE, const TargetLowering *TLI) {
+ ScalarEvolution &SE, const ScalarTargetTransformInfo *STTI) {
if (StressIVChain)
return true;
@@ -2654,7 +2653,7 @@ void LSRInstance::CollectChains() {
for (unsigned UsersIdx = 0, NChains = IVChainVec.size();
UsersIdx < NChains; ++UsersIdx) {
if (!isProfitableChain(IVChainVec[UsersIdx],
- ChainUsersVec[UsersIdx].FarUsers, SE, TLI))
+ ChainUsersVec[UsersIdx].FarUsers, SE, STTI))
continue;
// Preserve the chain at UsesIdx.
if (ChainIdx != UsersIdx)
@@ -2681,7 +2680,8 @@ void LSRInstance::FinalizeChain(IVChain &Chain) {
/// Return true if the IVInc can be folded into an addressing mode.
static bool canFoldIVIncExpr(const SCEV *IncExpr, Instruction *UserInst,
- Value *Operand, const TargetLowering *TLI) {
+ Value *Operand,
+ const ScalarTargetTransformInfo *STTI) {
const SCEVConstant *IncConst = dyn_cast<SCEVConstant>(IncExpr);
if (!IncConst || !isAddressUse(UserInst, Operand))
return false;
@@ -2691,7 +2691,7 @@ static bool canFoldIVIncExpr(const SCEV *IncExpr, Instruction *UserInst,
int64_t IncOffset = IncConst->getValue()->getSExtValue();
if (!isAlwaysFoldable(IncOffset, /*BaseGV=*/0, /*HaseBaseReg=*/false,
- LSRUse::Address, getAccessType(UserInst), TLI))
+ LSRUse::Address, getAccessType(UserInst), STTI))
return false;
return true;
@@ -2762,7 +2762,7 @@ void LSRInstance::GenerateIVChain(const IVChain &Chain, SCEVExpander &Rewriter,
// If an IV increment can't be folded, use it as the next IV value.
if (!canFoldIVIncExpr(LeftOverExpr, IncI->UserInst, IncI->IVOperand,
- TLI)) {
+ STTI)) {
assert(IVTy == IVOper->getType() && "inconsistent IV increment type");
IVSrc = IVOper;
LeftOverExpr = 0;
@@ -3108,7 +3108,7 @@ void LSRInstance::GenerateReassociations(LSRUse &LU, unsigned LUIdx,
// into an immediate field.
if (isAlwaysFoldable(*J, LU.MinOffset, LU.MaxOffset,
Base.getNumRegs() > 1,
- LU.Kind, LU.AccessTy, TLI, SE))
+ LU.Kind, LU.AccessTy, STTI, SE))
continue;
// Collect all operands except *J.
@@ -3122,7 +3122,7 @@ void LSRInstance::GenerateReassociations(LSRUse &LU, unsigned LUIdx,
if (InnerAddOps.size() == 1 &&
isAlwaysFoldable(InnerAddOps[0], LU.MinOffset, LU.MaxOffset,
Base.getNumRegs() > 1,
- LU.Kind, LU.AccessTy, TLI, SE))
+ LU.Kind, LU.AccessTy, STTI, SE))
continue;
const SCEV *InnerSum = SE.getAddExpr(InnerAddOps);
@@ -3132,9 +3132,9 @@ void LSRInstance::GenerateReassociations(LSRUse &LU, unsigned LUIdx,
// Add the remaining pieces of the add back into the new formula.
const SCEVConstant *InnerSumSC = dyn_cast<SCEVConstant>(InnerSum);
- if (TLI && InnerSumSC &&
+ if (STTI && InnerSumSC &&
SE.getTypeSizeInBits(InnerSumSC->getType()) <= 64 &&
- TLI->isLegalAddImmediate((uint64_t)F.UnfoldedOffset +
+ STTI->isLegalAddImmediate((uint64_t)F.UnfoldedOffset +
InnerSumSC->getValue()->getZExtValue())) {
F.UnfoldedOffset = (uint64_t)F.UnfoldedOffset +
InnerSumSC->getValue()->getZExtValue();
@@ -3144,8 +3144,8 @@ void LSRInstance::GenerateReassociations(LSRUse &LU, unsigned LUIdx,
// Add J as its own register, or an unfolded immediate.
const SCEVConstant *SC = dyn_cast<SCEVConstant>(*J);
- if (TLI && SC && SE.getTypeSizeInBits(SC->getType()) <= 64 &&
- TLI->isLegalAddImmediate((uint64_t)F.UnfoldedOffset +
+ if (STTI && SC && SE.getTypeSizeInBits(SC->getType()) <= 64 &&
+ STTI->isLegalAddImmediate((uint64_t)F.UnfoldedOffset +
SC->getValue()->getZExtValue()))
F.UnfoldedOffset = (uint64_t)F.UnfoldedOffset +
SC->getValue()->getZExtValue();
@@ -3205,7 +3205,7 @@ void LSRInstance::GenerateSymbolicOffsets(LSRUse &LU, unsigned LUIdx,
Formula F = Base;
F.AM.BaseGV = GV;
if (!isLegalUse(F.AM, LU.MinOffset, LU.MaxOffset,
- LU.Kind, LU.AccessTy, TLI))
+ LU.Kind, LU.AccessTy, STTI))
continue;
F.BaseRegs[i] = G;
(void)InsertFormula(LU, LUIdx, F);
@@ -3230,7 +3230,7 @@ void LSRInstance::GenerateConstantOffsets(LSRUse &LU, unsigned LUIdx,
Formula F = Base;
F.AM.BaseOffs = (uint64_t)Base.AM.BaseOffs - *I;
if (isLegalUse(F.AM, LU.MinOffset - *I, LU.MaxOffset - *I,
- LU.Kind, LU.AccessTy, TLI)) {
+ LU.Kind, LU.AccessTy, STTI)) {
// Add the offset to the base register.
const SCEV *NewG = SE.getAddExpr(SE.getConstant(G->getType(), *I), G);
// If it cancelled out, drop the base register, otherwise update it.
@@ -3250,7 +3250,7 @@ void LSRInstance::GenerateConstantOffsets(LSRUse &LU, unsigned LUIdx,
Formula F = Base;
F.AM.BaseOffs = (uint64_t)F.AM.BaseOffs + Imm;
if (!isLegalUse(F.AM, LU.MinOffset, LU.MaxOffset,
- LU.Kind, LU.AccessTy, TLI))
+ LU.Kind, LU.AccessTy, STTI))
continue;
F.BaseRegs[i] = G;
(void)InsertFormula(LU, LUIdx, F);
@@ -3297,7 +3297,7 @@ void LSRInstance::GenerateICmpZeroScales(LSRUse &LU, unsigned LUIdx,
F.AM.BaseOffs = NewBaseOffs;
// Check that this scale is legal.
- if (!isLegalUse(F.AM, Offset, Offset, LU.Kind, LU.AccessTy, TLI))
+ if (!isLegalUse(F.AM, Offset, Offset, LU.Kind, LU.AccessTy, STTI))
continue;
// Compensate for the use having MinOffset built into it.
@@ -3353,12 +3353,12 @@ void LSRInstance::GenerateScales(LSRUse &LU, unsigned LUIdx, Formula Base) {
Base.AM.HasBaseReg = Base.BaseRegs.size() > 1;
// Check whether this scale is going to be legal.
if (!isLegalUse(Base.AM, LU.MinOffset, LU.MaxOffset,
- LU.Kind, LU.AccessTy, TLI)) {
+ LU.Kind, LU.AccessTy, STTI)) {
// As a special-case, handle special out-of-loop Basic users specially.
// TODO: Reconsider this special case.
if (LU.Kind == LSRUse::Basic &&
isLegalUse(Base.AM, LU.MinOffset, LU.MaxOffset,
- LSRUse::Special, LU.AccessTy, TLI) &&
+ LSRUse::Special, LU.AccessTy, STTI) &&
LU.AllFixupsOutsideLoop)
LU.Kind = LSRUse::Special;
else
@@ -3391,8 +3391,8 @@ void LSRInstance::GenerateScales(LSRUse &LU, unsigned LUIdx, Formula Base) {
/// GenerateTruncates - Generate reuse formulae from different IV types.
void LSRInstance::GenerateTruncates(LSRUse &LU, unsigned LUIdx, Formula Base) {
- // This requires TargetLowering to tell us which truncates are free.
- if (!TLI) return;
+ // This requires ScalarTargetTransformInfo to tell us which truncates are free.
+ if (!STTI) return;
// Don't bother truncating symbolic values.
if (Base.AM.BaseGV) return;
@@ -3405,7 +3405,7 @@ void LSRInstance::GenerateTruncates(LSRUse &LU, unsigned LUIdx, Formula Base) {
for (SmallSetVector<Type *, 4>::const_iterator
I = Types.begin(), E = Types.end(); I != E; ++I) {
Type *SrcTy = *I;
- if (SrcTy != DstTy && TLI->isTruncateFree(SrcTy, DstTy)) {
+ if (SrcTy != DstTy && STTI->isTruncateFree(SrcTy, DstTy)) {
Formula F = Base;
if (F.ScaledReg) F.ScaledReg = SE.getAnyExtendExpr(F.ScaledReg, *I);
@@ -3561,7 +3561,7 @@ void LSRInstance::GenerateCrossUseConstantOffsets() {
Formula NewF = F;
NewF.AM.BaseOffs = Offs;
if (!isLegalUse(NewF.AM, LU.MinOffset, LU.MaxOffset,
- LU.Kind, LU.AccessTy, TLI))
+ LU.Kind, LU.AccessTy, STTI))
continue;
NewF.ScaledReg = SE.getAddExpr(NegImmS, NewF.ScaledReg);
@@ -3586,9 +3586,9 @@ void LSRInstance::GenerateCrossUseConstantOffsets() {
Formula NewF = F;
NewF.AM.BaseOffs = (uint64_t)NewF.AM.BaseOffs + Imm;
if (!isLegalUse(NewF.AM, LU.MinOffset, LU.MaxOffset,
- LU.Kind, LU.AccessTy, TLI)) {
- if (!TLI ||
- !TLI->isLegalAddImmediate((uint64_t)NewF.UnfoldedOffset + Imm))
+ LU.Kind, LU.AccessTy, STTI)) {
+ if (!STTI ||
+ !STTI->isLegalAddImmediate((uint64_t)NewF.UnfoldedOffset + Imm))
continue;
NewF = F;
NewF.UnfoldedOffset = (uint64_t)NewF.UnfoldedOffset + Imm;
@@ -3900,7 +3900,7 @@ void LSRInstance::NarrowSearchSpaceByCollapsingUnrolledCode() {
Formula &F = LUThatHas->Formulae[i];
if (!isLegalUse(F.AM,
LUThatHas->MinOffset, LUThatHas->MaxOffset,
- LUThatHas->Kind, LUThatHas->AccessTy, TLI)) {
+ LUThatHas->Kind, LUThatHas->AccessTy, STTI)) {
DEBUG(dbgs() << " Deleting "; F.print(dbgs());
dbgs() << '\n');
LUThatHas->DeleteFormula(F);
@@ -4589,12 +4589,12 @@ LSRInstance::ImplementSolution(const SmallVectorImpl<const Formula *> &Solution,
Changed |= DeleteTriviallyDeadInstructions(DeadInsts);
}
-LSRInstance::LSRInstance(const TargetLowering *tli, Loop *l, Pass *P)
+LSRInstance::LSRInstance(const ScalarTargetTransformInfo *stti, Loop *l, Pass *P)
: IU(P->getAnalysis<IVUsers>()),
SE(P->getAnalysis<ScalarEvolution>()),
DT(P->getAnalysis<DominatorTree>()),
LI(P->getAnalysis<LoopInfo>()),
- TLI(tli), L(l), Changed(false), IVIncInsertPos(0) {
+ STTI(stti), L(l), Changed(false), IVIncInsertPos(0) {
// If LoopSimplify form is not available, stay out of trouble.
if (!L->isLoopSimplifyForm())
@@ -4684,7 +4684,7 @@ LSRInstance::LSRInstance(const TargetLowering *tli, Loop *l, Pass *P)
for (SmallVectorImpl<Formula>::const_iterator J = LU.Formulae.begin(),
JE = LU.Formulae.end(); J != JE; ++J)
assert(isLegalUse(J->AM, LU.MinOffset, LU.MaxOffset,
- LU.Kind, LU.AccessTy, TLI) &&
+ LU.Kind, LU.AccessTy, STTI) &&
"Illegal formula generated!");
};
#endif
@@ -4757,13 +4757,13 @@ void LSRInstance::dump() const {
namespace {
class LoopStrengthReduce : public LoopPass {
- /// TLI - Keep a pointer of a TargetLowering to consult for determining
- /// transformation profitability.
- const TargetLowering *const TLI;
+ /// ScalarTargetTransformInfo provides target information that is needed
+ /// for strength reducing loops.
+ const ScalarTargetTransformInfo *STTI;
public:
static char ID; // Pass ID, replacement for typeid
- explicit LoopStrengthReduce(const TargetLowering *tli = 0);
+ LoopStrengthReduce();
private:
bool runOnLoop(Loop *L, LPPassManager &LPM);
@@ -4783,13 +4783,12 @@ INITIALIZE_PASS_DEPENDENCY(LoopSimplify)
INITIALIZE_PASS_END(LoopStrengthReduce, "loop-reduce",
"Loop Strength Reduction", false, false)
-
-Pass *llvm::createLoopStrengthReducePass(const TargetLowering *TLI) {
- return new LoopStrengthReduce(TLI);
+Pass *llvm::createLoopStrengthReducePass() {
+ return new LoopStrengthReduce();
}
-LoopStrengthReduce::LoopStrengthReduce(const TargetLowering *tli)
- : LoopPass(ID), TLI(tli) {
+LoopStrengthReduce::LoopStrengthReduce()
+ : LoopPass(ID), STTI(0) {
initializeLoopStrengthReducePass(*PassRegistry::getPassRegistry());
}
@@ -4815,8 +4814,13 @@ void LoopStrengthReduce::getAnalysisUsage(AnalysisUsage &AU) const {
bool LoopStrengthReduce::runOnLoop(Loop *L, LPPassManager & /*LPM*/) {
bool Changed = false;
+ TargetTransformInfo *TTI = getAnalysisIfAvailable<TargetTransformInfo>();
+
+ if (TTI)
+ STTI = TTI->getScalarTargetTransformInfo();
+
// Run the main LSR transformation.
- Changed |= LSRInstance(TLI, L, this).getChanged();
+ Changed |= LSRInstance(STTI, L, this).getChanged();
// Remove any extra phis created by processing inner loops.
Changed |= DeleteDeadPHIs(L->getHeader());
@@ -4827,7 +4831,7 @@ bool LoopStrengthReduce::runOnLoop(Loop *L, LPPassManager & /*LPM*/) {
Rewriter.setDebugType(DEBUG_TYPE);
#endif
unsigned numFolded = Rewriter.
- replaceCongruentIVs(L, &getAnalysis<DominatorTree>(), DeadInsts, TLI);
+ replaceCongruentIVs(L, &getAnalysis<DominatorTree>(), DeadInsts, STTI);
if (numFolded) {
Changed = true;
DeleteTriviallyDeadInstructions(DeadInsts);
diff --git a/lib/Transforms/Utils/LowerInvoke.cpp b/lib/Transforms/Utils/LowerInvoke.cpp
index 930555424d..f35cbbdde5 100644
--- a/lib/Transforms/Utils/LowerInvoke.cpp
+++ b/lib/Transforms/Utils/LowerInvoke.cpp
@@ -45,10 +45,10 @@
#include "llvm/Pass.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/Transforms/Utils/Local.h"
+#include "llvm/TargetTransformInfo.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Support/CommandLine.h"
-#include "llvm/Target/TargetLowering.h"
#include <csetjmp>
#include <set>
using namespace llvm;
@@ -70,15 +70,14 @@ namespace {
Constant *SetJmpFn, *LongJmpFn, *StackSaveFn, *StackRestoreFn;
bool useExpensiveEHSupport;
- // We peek in TLI to grab the target's jmp_buf size and alignment
- const TargetLowering *TLI;
+ // We peek in STTI to grab the target's jmp_buf size and alignment
+ const ScalarTargetTransformInfo *STTI;
public:
static char ID; // Pass identification, replacement for typeid
- explicit LowerInvoke(const TargetLowering *tli = NULL,
- bool useExpensiveEHSupport = ExpensiveEHSupport)
+ explicit LowerInvoke(bool useExpensiveEHSupport = ExpensiveEHSupport)
: FunctionPass(ID), useExpensiveEHSupport(useExpensiveEHSupport),
- TLI(tli) {
+ STTI(0) {
initializeLowerInvokePass(*PassRegistry::getPassRegistry());
}
bool doInitialization(Module &M);
@@ -108,21 +107,24 @@ INITIALIZE_PASS(LowerInvoke, "lowerinvoke",
char &llvm::LowerInvokePassID = LowerInvoke::ID;
// Public Interface To the LowerInvoke pass.
-FunctionPass *llvm::createLowerInvokePass(const TargetLowering *TLI) {
- return new LowerInvoke(TLI, ExpensiveEHSupport);
+FunctionPass *llvm::createLowerInvokePass() {
+ return new LowerInvoke(ExpensiveEHSupport);
}
-FunctionPass *llvm::createLowerInvokePass(const TargetLowering *TLI,
- bool useExpensiveEHSupport) {
- return new LowerInvoke(TLI, useExpensiveEHSupport);
+FunctionPass *llvm::createLowerInvokePass(bool useExpensiveEHSupport) {
+ return new LowerInvoke(useExpensiveEHSupport);
}
// doInitialization - Make sure that there is a prototype for abort in the
// current module.
bool LowerInvoke::doInitialization(Module &M) {
+ TargetTransformInfo *TTI = getAnalysisIfAvailable<TargetTransformInfo>();
+ if (TTI)
+ STTI = TTI->getScalarTargetTransformInfo();
+
Type *VoidPtrTy = Type::getInt8PtrTy(M.getContext());
if (useExpensiveEHSupport) {
// Insert a type for the linked list of jump buffers.
- unsigned JBSize = TLI ? TLI->getJumpBufSize() : 0;
+ unsigned JBSize = STTI ? STTI->getJumpBufSize() : 0;
JBSize = JBSize ? JBSize : 200;
Type *JmpBufTy = ArrayType::get(VoidPtrTy, JBSize);
@@ -430,7 +432,7 @@ bool LowerInvoke::insertExpensiveEHSupport(Function &F) {
// Create an alloca for the incoming jump buffer ptr and the new jump buffer
// that needs to be restored on all exits from the function. This is an
// alloca because the value needs to be live across invokes.
- unsigned Align = TLI ? TLI->getJumpBufAlignment() : 0;
+ unsigned Align = STTI ? STTI->getJumpBufAlignment() : 0;
AllocaInst *JmpBuf =
new AllocaInst(JBLinkTy, 0, Align,
"jblink", F.begin()->begin());
@@ -575,6 +577,10 @@ bool LowerInvoke::insertExpensiveEHSupport(Function &F) {
}
bool LowerInvoke::runOnFunction(Function &F) {
+ TargetTransformInfo *TTI = getAnalysisIfAvailable<TargetTransformInfo>();
+ if (TTI)
+ STTI = TTI->getScalarTargetTransformInfo();
+
if (useExpensiveEHSupport)
return insertExpensiveEHSupport(F);
else
diff --git a/lib/VMCore/CMakeLists.txt b/lib/VMCore/CMakeLists.txt
index 6c30967974..ba807fcacc 100644
--- a/lib/VMCore/CMakeLists.txt
+++ b/lib/VMCore/CMakeLists.txt
@@ -33,6 +33,7 @@ add_llvm_library(LLVMCore
PrintModulePass.cpp
Type.cpp
TypeFinder.cpp
+ TargetTransformInfo.cpp
Use.cpp
User.cpp
Value.cpp
diff --git a/lib/VMCore/TargetTransformInfo.cpp b/lib/VMCore/TargetTransformInfo.cpp
new file mode 100644
index 0000000000..3af0222a21
--- /dev/null
+++ b/lib/VMCore/TargetTransformInfo.cpp
@@ -0,0 +1,27 @@
+//===- llvm/VMCore/TargetTransformInfo.cpp ----------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/TargetTransformInfo.h"
+#include "llvm/Support/ErrorHandling.h"
+
+using namespace llvm;
+
+/// Default ctor.
+///
+/// @note This has to exist, because this is a pass, but it should never be
+/// used.
+TargetTransformInfo::TargetTransformInfo() : ImmutablePass(ID) {
+ report_fatal_error("Bad TargetTransformInfo ctor used. "
+ "Tool did not specify a TargetTransformInfo to use?");
+}
+
+INITIALIZE_PASS(TargetTransformInfo, "TargetTransformInfo",
+ "Target Transform Info", false, true)
+char TargetTransformInfo::ID = 0;
+
diff --git a/tools/llc/llc.cpp b/tools/llc/llc.cpp
index 04e5bca385..4d4a74c009 100644
--- a/tools/llc/llc.cpp
+++ b/tools/llc/llc.cpp
@@ -21,6 +21,7 @@
#include "llvm/ADT/Triple.h"
#include "llvm/Assembly/PrintModulePass.h"
#include "llvm/Support/IRReader.h"
+#include "llvm/CodeGen/CommandFlags.h"
#include "llvm/CodeGen/LinkAllAsmWriterComponents.h"
#include "llvm/CodeGen/LinkAllCodegenComponents.h"
#include "llvm/MC/SubtargetFeature.h"
@@ -62,216 +63,13 @@ OptLevel("O",
static cl::opt<std::string>
TargetTriple("mtriple", cl::desc("Override target triple for module"));
-static cl::opt<std::string>
-MArch("march", cl::desc("Architecture to generate code for (see --version)"));
-
-static cl::opt<std::string>
-MCPU("mcpu",
- cl::desc("Target a specific cpu type (-mcpu=help for details)"),
- cl::value_desc("cpu-name"),
- cl::init(""));
-
-static cl::list<std::string>
-MAttrs("mattr",
- cl::CommaSeparated,
- cl::desc("Target specific attributes (-mattr=help for details)"),
- cl::value_desc("a1,+a2,-a3,..."));
-
-static cl::opt<Reloc::Model>
-RelocModel("relocation-model",
- cl::desc("Choose relocation model"),
- cl::init(Reloc::Default),
- cl::values(
- clEnumValN(Reloc::Default, "default",
- "Target default relocation model"),
- clEnumValN(Reloc::Static, "static",
- "Non-relocatable code"),
- clEnumValN(Reloc::PIC_, "pic",
- "Fully relocatable, position independent code"),
- clEnumValN(Reloc::DynamicNoPIC, "dynamic-no-pic",
- "Relocatable external references, non-relocatable code"),
- clEnumValEnd));
-
-static cl::opt<llvm::CodeModel::Model>
-CMModel("code-model",
- cl::desc("Choose code model"),
- cl::init(CodeModel::Default),
- cl::values(clEnumValN(CodeModel::Default, "default",
- "Target default code model"),
- clEnumValN(CodeModel::Small, "small",
- "Small code model"),
- clEnumValN(CodeModel::Kernel, "kernel",
- "Kernel code model"),
- clEnumValN(CodeModel::Medium, "medium",
- "Medium code model"),
- clEnumValN(CodeModel::Large, "large",
- "Large code model"),
- clEnumValEnd));
-
-static cl::opt<bool>
-RelaxAll("mc-relax-all",
- cl::desc("When used with filetype=obj, "
- "relax all fixups in the emitted object file"));
-
-cl::opt<TargetMachine::CodeGenFileType>
-FileType("filetype", cl::init(TargetMachine::CGFT_AssemblyFile),
- cl::desc("Choose a file type (not all types are supported by all targets):"),
- cl::values(
- clEnumValN(TargetMachine::CGFT_AssemblyFile, "asm",
- "Emit an assembly ('.s') file"),
- clEnumValN(TargetMachine::CGFT_ObjectFile, "obj",
- "Emit a native object ('.o') file"),
- clEnumValN(TargetMachine::CGFT_Null, "null",
- "Emit nothing, for performance testing"),
- clEnumValEnd));
-
cl::opt<bool> NoVerify("disable-verify", cl::Hidden,
cl::desc("Do not verify input module"));
-cl::opt<bool> DisableDotLoc("disable-dot-loc", cl::Hidden,
- cl::desc("Do not use .loc entries"));
-
-cl::opt<bool> DisableCFI("disable-cfi", cl::Hidden,
- cl::desc("Do not use .cfi_* directives"));
-
-cl::opt<bool> EnableDwarfDirectory("enable-dwarf-directory", cl::Hidden,
- cl::desc("Use .file directives with an explicit directory."));
-
-static cl::opt<bool>
-DisableRedZone("disable-red-zone",
- cl::desc("Do not emit code that uses the red zone."),
- cl::init(false));
-
-static cl::opt<bool>
-EnableFPMAD("enable-fp-mad",
- cl::desc("Enable less precise MAD instructions to be generated"),
- cl::init(false));
-
-static cl::opt<bool>
-DisableFPElim("disable-fp-elim",
- cl::desc("Disable frame pointer elimination optimization"),
- cl::init(false));
-
-static cl::opt<bool>
-DisableFPElimNonLeaf("disable-non-leaf-fp-elim",
- cl::desc("Disable frame pointer elimination optimization for non-leaf funcs"),
- cl::init(false));
-
-static cl::opt<bool>
-EnableUnsafeFPMath("enable-unsafe-fp-math",
- cl::desc("Enable optimizations that may decrease FP precision"),
- cl::init(false));
-
-static cl::opt<bool>
-EnableNoInfsFPMath("enable-no-infs-fp-math",
- cl::desc("Enable FP math optimizations that assume no +-Infs"),
- cl::init(false));
-
-static cl::opt<bool>
-EnableNoNaNsFPMath("enable-no-nans-fp-math",
- cl::desc("Enable FP math optimizations that assume no NaNs"),
- cl::init(false));
-
-static cl::opt<bool>
-EnableHonorSignDependentRoundingFPMath("enable-sign-dependent-rounding-fp-math",
- cl::Hidden,
- cl::desc("Force codegen to assume rounding mode can change dynamically"),
- cl::init(false));
-
-static cl::opt<bool>
-GenerateSoftFloatCalls("soft-float",
- cl::desc("Generate software floating point library calls"),
- cl::init(false));
-
-static cl::opt<llvm::FloatABI::ABIType>
-FloatABIForCalls("float-abi",
- cl::desc("Choose float ABI type"),
- cl::init(FloatABI::Default),
- cl::values(
- clEnumValN(FloatABI::Default, "default",
- "Target default float ABI type"),
- clEnumValN(FloatABI::Soft, "soft",
- "Soft float ABI (implied by -soft-float)"),
- clEnumValN(FloatABI::Hard, "hard",
- "Hard float ABI (uses FP registers)"),
- clEnumValEnd));
-
-static cl::opt<llvm::FPOpFusion::FPOpFusionMode>
-FuseFPOps("fp-contract",
- cl::desc("Enable aggresive formation of fused FP ops"),
- cl::init(FPOpFusion::Standard),
- cl::values(
- clEnumValN(FPOpFusion::Fast, "fast",
- "Fuse FP ops whenever profitable"),
- clEnumValN(FPOpFusion::Standard, "on",
- "Only fuse 'blessed' FP ops."),
- clEnumValN(FPOpFusion::Strict, "off",
- "Only fuse FP ops when the result won't be effected."),
- clEnumValEnd));
-
-static cl::opt<bool>
-DontPlaceZerosInBSS("nozero-initialized-in-bss",
- cl::desc("Don't place zero-initialized symbols into bss section"),
- cl::init(false));
-
-static cl::opt<bool>
+cl::opt<bool>
DisableSimplifyLibCalls("disable-simplify-libcalls",
- cl::desc("Disable simplify-libcalls"),
- cl::init(false));
-
-static cl::opt<bool>
-EnableGuaranteedTailCallOpt("tailcallopt",
- cl::desc("Turn fastcc calls into tail calls by (potentially) changing ABI."),
- cl::init(false));
-
-static cl::opt<bool>
-DisableTailCalls("disable-tail-calls",
- cl::desc("Never emit tail calls"),
- cl::init(false));
-
-static cl::opt<unsigned>
-OverrideStackAlignment("stack-alignment",
- cl::desc("Override default stack alignment"),
- cl::init(0));
-
-static cl::opt<bool>
-EnableRealignStack("realign-stack",
- cl::desc("Realign stack if needed"),
- cl::init(true));
-
-static cl::opt<std::string>
-TrapFuncName("trap-func", cl::Hidden,
- cl::desc("Emit a call to trap function rather than a trap instruction"),
- cl::init(""));
-
-static cl::opt<bool>
-EnablePIE("enable-pie",
- cl::desc("Assume the creation of a position independent executable."),
- cl::init(false));
-
-static cl::opt<bool>
-SegmentedStacks("segmented-stacks",
- cl::desc("Use segmented stacks if possible."),
- cl::init(false));
-
-static cl::opt<bool>
-UseInitArray("use-init-array",
- cl::desc("Use .init_array instead of .ctors."),
- cl::init(false));
-
-static cl::opt<std::string> StopAfter("stop-after",
- cl::desc("Stop compilation after a specific pass"),
- cl::value_desc("pass-name"),
- cl::init(""));
-static cl::opt<std::string> StartAfter("start-after",
- cl::desc("Resume compilation after a specific pass"),
- cl::value_desc("pass-name"),
- cl::init(""));
-
-static cl::opt<unsigned>
-SSPBufferSize("stack-protector-buffer-size", cl::init(8),
- cl::desc("Lower bound for a buffer to be considered for "
- "stack protection"));
+ cl::desc("Disable simplify-libcalls"),
+ cl::init(false));
// GetFileNameRoot - Helper function to get the basename of a filename.
static inline std::string
@@ -505,6 +303,11 @@ int main(int argc, char **argv) {
TLI->disableAllFunctions();
PM.add(TLI);
+ if (target.get()) {
+ PM.add(new TargetTransformInfo(target->getScalarTargetTransformInfo(),
+ target->getVectorTargetTransformInfo()));
+ }
+
// Add the target data from the target machine, if it exists, or the module.
if (const DataLayout *TD = Target.getDataLayout())
PM.add(new DataLayout(*TD));
diff --git a/tools/opt/CMakeLists.txt b/tools/opt/CMakeLists.txt
index 7daf22aa9e..32de6d4060 100644
--- a/tools/opt/CMakeLists.txt
+++ b/tools/opt/CMakeLists.txt
@@ -1,4 +1,4 @@
-set(LLVM_LINK_COMPONENTS bitreader asmparser bitwriter instrumentation scalaropts ipo vectorize)
+set(LLVM_LINK_COMPONENTS ${LLVM_TARGETS_TO_BUILD} bitreader asmparser bitwriter instrumentation scalaropts ipo vectorize)
add_llvm_tool(opt
AnalysisWrappers.cpp
diff --git a/tools/opt/LLVMBuild.txt b/tools/opt/LLVMBuild.txt
index 4de99f51c8..b174431e04 100644
--- a/tools/opt/LLVMBuild.txt
+++ b/tools/opt/LLVMBuild.txt
@@ -19,4 +19,4 @@
type = Tool
name = opt
parent = Tools
-required_libraries = AsmParser BitReader BitWriter IPO Instrumentation Scalar
+required_libraries = AsmParser BitReader BitWriter IPO Instrumentation Scalar all-targets
diff --git a/tools/opt/Makefile b/tools/opt/Makefile
index 16d116da5d..ee7e1cf796 100644
--- a/tools/opt/Makefile
+++ b/tools/opt/Makefile
@@ -9,6 +9,6 @@
LEVEL := ../..
TOOLNAME := opt
-LINK_COMPONENTS := bitreader bitwriter asmparser instrumentation scalaropts ipo vectorize
+LINK_COMPONENTS := bitreader bitwriter asmparser instrumentation scalaropts ipo vectorize all-targets
include $(LEVEL)/Makefile.common
diff --git a/tools/opt/opt.cpp b/tools/opt/opt.cpp
index 706a7d5138..8d8d73179e 100644
--- a/tools/opt/opt.cpp
+++ b/tools/opt/opt.cpp
@@ -18,6 +18,7 @@
#include "llvm/Module.h"
#include "llvm/PassManager.h"
#include "llvm/CallGraphSCCPass.h"
+#include "llvm/CodeGen/CommandFlags.h"
#include "llvm/Bitcode/ReaderWriter.h"
#include "llvm/Assembly/PrintModulePass.h"
#include "llvm/Analysis/Verifier.h"
@@ -36,7 +37,9 @@
#include "llvm/Support/PluginLoader.h"
#include "llvm/Support/PrettyStackTrace.h"
#include "llvm/Support/SystemUtils.h"
+#include "llvm/Support/TargetRegistry.h"
#include "llvm/Support/ToolOutputFile.h"
+#include "llvm/MC/SubtargetFeature.h"
#include "llvm/LinkAllPasses.h"
#include "llvm/LinkAllVMCore.h"
#include "llvm/Transforms/IPO/PassManagerBuilder.h"
@@ -478,6 +481,75 @@ static void AddStandardLinkPasses(PassManagerBase &PM) {
/*RunInliner=*/ !DisableInline);
}
+//===----------------------------------------------------------------------===//
+// CodeGen-related helper functions.
+//
+static TargetOptions GetTargetOptions() {
+ TargetOptions Options;
+ Options.LessPreciseFPMADOption = EnableFPMAD;
+ Options.NoFramePointerElim = DisableFPElim;
+ Options.NoFramePointerElimNonLeaf = DisableFPElimNonLeaf;
+ Options.AllowFPOpFusion = FuseFPOps;
+ Options.UnsafeFPMath = EnableUnsafeFPMath;
+ Options.NoInfsFPMath = EnableNoInfsFPMath;
+ Options.NoNaNsFPMath = EnableNoNaNsFPMath;
+ Options.HonorSignDependentRoundingFPMathOption =
+ EnableHonorSignDependentRoundingFPMath;
+ Options.UseSoftFloat = GenerateSoftFloatCalls;
+ if (FloatABIForCalls != FloatABI::Default)
+ Options.FloatABIType = FloatABIForCalls;
+ Options.NoZerosInBSS = DontPlaceZerosInBSS;
+ Options.GuaranteedTailCallOpt = EnableGuaranteedTailCallOpt;
+ Options.DisableTailCalls = DisableTailCalls;
+ Options.StackAlignmentOverride = OverrideStackAlignment;
+ Options.RealignStack = EnableRealignStack;
+ Options.TrapFuncName = TrapFuncName;
+ Options.PositionIndependentExecutable = EnablePIE;
+ Options.EnableSegmentedStacks = SegmentedStacks;
+ Options.UseInitArray = UseInitArray;
+ Options.SSPBufferSize = SSPBufferSize;
+ return Options;
+}
+
+CodeGenOpt::Level GetCodeGenOptLevel() {
+ if (OptLevelO1)
+ return CodeGenOpt::Less;
+ if (OptLevelO2)
+ return CodeGenOpt::Default;
+ if (OptLevelO3)
+ return CodeGenOpt::Aggressive;
+ return CodeGenOpt::None;
+}
+
+// Returns the TargetMachine instance or zero if no triple is provided.
+static TargetMachine* GetTargetMachine(std::string TripleStr) {
+ if (TripleStr.empty())
+ return 0;
+
+ // Get the target specific parser.
+ std::string Error;
+ Triple TheTriple(Triple::normalize(TargetTriple));
+
+ const Target *TheTarget = TargetRegistry::lookupTarget(MArch, TheTriple,
+ Error);
+ if (!TheTarget) {
+ return 0;
+ }
+
+ // Package up features to be passed to target/subtarget
+ std::string FeaturesStr;
+ if (MAttrs.size()) {
+ SubtargetFeatures Features;
+ for (unsigned i = 0; i != MAttrs.size(); ++i)
+ Features.AddFeature(MAttrs[i]);
+ FeaturesStr = Features.getString();
+ }
+
+ return TheTarget->createTargetMachine(TheTriple.getTriple(),
+ MCPU, FeaturesStr, GetTargetOptions(),
+ RelocModel, CMModel,
+ GetCodeGenOptLevel());
+}
//===----------------------------------------------------------------------===//
// main for opt
@@ -579,6 +651,12 @@ int main(int argc, char **argv) {
if (TD)
Passes.add(TD);
+ std::auto_ptr<TargetMachine> TM(GetTargetMachine(TargetTriple));
+ if (TM.get()) {
+ Passes.add(new TargetTransformInfo(TM->getScalarTargetTransformInfo(),
+ TM->getVectorTargetTransformInfo()));
+ }
+
OwningPtr<FunctionPassManager> FPasses;
if (OptLevelO1 || OptLevelO2 || OptLevelOs || OptLevelOz || OptLevelO3) {
FPasses.reset(new FunctionPassManager(M.get()));