summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--include/llvm/CodeGen/FastISel.h5
-rw-r--r--lib/CodeGen/SelectionDAG/FastISel.cpp13
-rw-r--r--lib/Target/X86/X86FastISel.cpp71
-rw-r--r--test/CodeGen/X86/fast-isel-mem.ll17
4 files changed, 75 insertions, 31 deletions
diff --git a/include/llvm/CodeGen/FastISel.h b/include/llvm/CodeGen/FastISel.h
index 6ac401d240..7b5fbf97d3 100644
--- a/include/llvm/CodeGen/FastISel.h
+++ b/include/llvm/CodeGen/FastISel.h
@@ -80,6 +80,11 @@ public:
/// be assigned the value for the given LLVM value.
unsigned getRegForValue(Value *V);
+ /// lookUpRegForValue - Look up the value to see if its value is already
+ /// cached in a register. It may be defined by instructions across blocks or
+ /// defined locally.
+ unsigned lookUpRegForValue(Value *V);
+
virtual ~FastISel();
protected:
diff --git a/lib/CodeGen/SelectionDAG/FastISel.cpp b/lib/CodeGen/SelectionDAG/FastISel.cpp
index 58467b8e0e..33f4591b66 100644
--- a/lib/CodeGen/SelectionDAG/FastISel.cpp
+++ b/lib/CodeGen/SelectionDAG/FastISel.cpp
@@ -40,6 +40,9 @@ unsigned FastISel::getRegForValue(Value *V) {
// Don't cache constant materializations. To do so would require
// tracking what uses they dominate.
Reg = FastEmit_i(VT, VT, ISD::Constant, CI->getZExtValue());
+ } else if (isa<GlobalValue>(V)) {
+ return TargetMaterializeConstant(dyn_cast<Constant>(V),
+ MBB->getParent()->getConstantPool());
} else if (isa<ConstantPointerNull>(V)) {
Reg = FastEmit_i(VT, VT, ISD::Constant, 0);
} else if (ConstantFP *CF = dyn_cast<ConstantFP>(V)) {
@@ -85,6 +88,16 @@ unsigned FastISel::getRegForValue(Value *V) {
return Reg;
}
+unsigned FastISel::lookUpRegForValue(Value *V) {
+ // Look up the value to see if we already have a register for it. We
+ // cache values defined by Instructions across blocks, and other values
+ // only locally. This is because Instructions already have the SSA
+ // def-dominatess-use requirement enforced.
+ if (ValueMap.count(V))
+ return ValueMap[V];
+ return LocalValueMap[V];
+}
+
/// UpdateValueMap - Update the value map to include the new mapping for this
/// instruction, or insert an extra copy to get the result in a previous
/// determined register.
diff --git a/lib/Target/X86/X86FastISel.cpp b/lib/Target/X86/X86FastISel.cpp
index 4584bde7ad..3f5d6f99f6 100644
--- a/lib/Target/X86/X86FastISel.cpp
+++ b/lib/Target/X86/X86FastISel.cpp
@@ -74,7 +74,8 @@ private:
bool X86FastEmitExtend(ISD::NodeType Opc, MVT DstVT, unsigned Src, MVT SrcVT,
unsigned &ResultReg);
- bool X86SelectConstAddr(Value *V, unsigned &Op0, bool isCall = false);
+ bool X86SelectConstAddr(Value *V, unsigned &Op0,
+ bool isCall = false, bool inReg = false);
bool X86SelectLoad(Instruction *I);
@@ -285,7 +286,8 @@ bool X86FastISel::X86FastEmitExtend(ISD::NodeType Opc, MVT DstVT,
/// X86SelectConstAddr - Select and emit code to materialize constant address.
///
-bool X86FastISel::X86SelectConstAddr(Value *V, unsigned &Op0, bool isCall) {
+bool X86FastISel::X86SelectConstAddr(Value *V, unsigned &Op0,
+ bool isCall, bool inReg) {
// FIXME: Only GlobalAddress for now.
GlobalValue *GV = dyn_cast<GlobalValue>(V);
if (!GV)
@@ -308,7 +310,24 @@ bool X86FastISel::X86SelectConstAddr(Value *V, unsigned &Op0, bool isCall) {
addFullAddress(BuildMI(MBB, TII.get(Opc), Op0), AM);
// Prevent loading GV stub multiple times in same MBB.
LocalValueMap[V] = Op0;
+ } else if (inReg) {
+ unsigned Opc = 0;
+ const TargetRegisterClass *RC = NULL;
+ if (TLI.getPointerTy() == MVT::i32) {
+ Opc = X86::LEA32r;
+ RC = X86::GR32RegisterClass;
+ } else {
+ Opc = X86::LEA64r;
+ RC = X86::GR64RegisterClass;
+ }
+ Op0 = createResultReg(RC);
+ X86AddressMode AM;
+ AM.GV = GV;
+ addFullAddress(BuildMI(MBB, TII.get(Opc), Op0), AM);
+ // Prevent materializing GV address multiple times in same MBB.
+ LocalValueMap[V] = Op0;
}
+
return true;
}
@@ -323,12 +342,17 @@ bool X86FastISel::X86SelectStore(Instruction* I) {
return false;
Value *V = I->getOperand(1);
- unsigned Ptr = getRegForValue(V);
- if (Ptr == 0) {
- // Handle constant store address.
- if (!isa<Constant>(V) || !X86SelectConstAddr(V, Ptr))
- // Unhandled operand. Halt "fast" selection and bail.
- return false;
+ unsigned Ptr = lookUpRegForValue(V);
+ if (!Ptr) {
+ // Handle constant load address.
+ // FIXME: If load type is something we can't handle, this can result in
+ // a dead stub load instruction.
+ if (!isa<Constant>(V) || !X86SelectConstAddr(V, Ptr)) {
+ Ptr = getRegForValue(V);
+ if (Ptr == 0)
+ // Unhandled operand. Halt "fast" selection and bail.
+ return false;
+ }
}
return X86FastEmitStore(VT, Val, Ptr, 0, V);
@@ -342,14 +366,17 @@ bool X86FastISel::X86SelectLoad(Instruction *I) {
return false;
Value *V = I->getOperand(0);
- unsigned Ptr = getRegForValue(V);
- if (Ptr == 0) {
+ unsigned Ptr = lookUpRegForValue(V);
+ if (!Ptr) {
// Handle constant load address.
// FIXME: If load type is something we can't handle, this can result in
// a dead stub load instruction.
- if (!isa<Constant>(V) || !X86SelectConstAddr(V, Ptr))
- // Unhandled operand. Halt "fast" selection and bail.
- return false;
+ if (!isa<Constant>(V) || !X86SelectConstAddr(V, Ptr)) {
+ Ptr = getRegForValue(V);
+ if (Ptr == 0)
+ // Unhandled operand. Halt "fast" selection and bail.
+ return false;
+ }
}
unsigned ResultReg = 0;
@@ -917,18 +944,8 @@ unsigned X86FastISel::TargetMaterializeConstant(Constant *C,
if (TM.getRelocationModel() == Reloc::PIC_)
return 0;
- MVT VT = MVT::getMVT(C->getType(), /*HandleUnknown=*/true);
- if (VT == MVT::Other || !VT.isSimple())
- // Unhandled type. Halt "fast" selection and bail.
- return false;
- if (VT == MVT::iPTR)
- // Use pointer type.
- VT = TLI.getPointerTy();
- // We only handle legal types. For example, on x86-32 the instruction
- // selector contains all of the 64-bit instructions from x86-64,
- // under the assumption that i64 won't be used if the target doesn't
- // support it.
- if (!TLI.isTypeLegal(VT))
+ MVT VT;
+ if (!isTypeLegal(C->getType(), TLI, VT))
return false;
// Get opcode and regclass of the output for the given load instruction.
@@ -979,9 +996,7 @@ unsigned X86FastISel::TargetMaterializeConstant(Constant *C,
unsigned ResultReg = createResultReg(RC);
if (isa<GlobalValue>(C)) {
- // FIXME: If store value type is something we can't handle, this can result
- // in a dead stub load instruction.
- if (X86SelectConstAddr(C, ResultReg))
+ if (X86SelectConstAddr(C, ResultReg, false, true))
return ResultReg;
return 0;
}
diff --git a/test/CodeGen/X86/fast-isel-mem.ll b/test/CodeGen/X86/fast-isel-mem.ll
index ca175c48bf..5ee5676157 100644
--- a/test/CodeGen/X86/fast-isel-mem.ll
+++ b/test/CodeGen/X86/fast-isel-mem.ll
@@ -1,5 +1,7 @@
-; RUN: llvm-as < %s | llc -fast-isel -mtriple=i386-apple-darwin -mattr=sse2 | \
-; RUN: grep mov | grep lazy_ptr | count 1
+; RUN: llvm-as < %s | llc -fast-isel -mtriple=i386-apple-darwin | \
+; RUN: grep mov | grep lazy_ptr | count 2
+; RUN: llvm-as < %s | llc -fast-isel -march=x86 -relocation-model=static | \
+; RUN: grep lea
@src = external global i32
@@ -8,6 +10,15 @@ entry:
%0 = load i32* @src, align 4
%1 = load i32* @src, align 4
%2 = add i32 %0, %1
- store i32 %2, i32* @src
+ store i32 %2, i32* @src
ret i32 %2
}
+
+%stuff = type { i32 (...)** }
+@LotsStuff = external constant [4 x i32 (...)*]
+
+define void @t(%stuff* %this) nounwind {
+entry:
+ store i32 (...)** getelementptr ([4 x i32 (...)*]* @LotsStuff, i32 0, i32 2), i32 (...)*** null, align 4
+ ret void
+}