summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorChris Lattner <sabre@nondot.org>2009-02-12 06:56:08 +0000
committerChris Lattner <sabre@nondot.org>2009-02-12 06:56:08 +0000
commit65c02fbf9bda089398a67c887e32a99799afa522 (patch)
treefdb91e305ba60400d584c73d4a1513bc5a88a902
parentafa77434bb412c358f9d120fe9b2c3bc39da9365 (diff)
downloadllvm-65c02fbf9bda089398a67c887e32a99799afa522.tar.gz
llvm-65c02fbf9bda089398a67c887e32a99799afa522.tar.bz2
llvm-65c02fbf9bda089398a67c887e32a99799afa522.tar.xz
fix PR3537: if resetting bbi back to the start of a block, we need to
forget about already inserted expressions. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@64362 91177308-0d34-0410-b5e6-96231b3b80d8
-rw-r--r--lib/Transforms/Scalar/CodeGenPrepare.cpp13
-rw-r--r--test/CodeGen/X86/2009-02-11-codegenprepare-reuse.ll35
2 files changed, 44 insertions, 4 deletions
diff --git a/lib/Transforms/Scalar/CodeGenPrepare.cpp b/lib/Transforms/Scalar/CodeGenPrepare.cpp
index 0a1c641e2b..12c76e8525 100644
--- a/lib/Transforms/Scalar/CodeGenPrepare.cpp
+++ b/lib/Transforms/Scalar/CodeGenPrepare.cpp
@@ -1241,11 +1241,13 @@ bool CodeGenPrepare::OptimizeMemoryInst(Instruction *MemoryInst, Value *Addr,
// computation.
Value *&SunkAddr = SunkAddrs[Addr];
if (SunkAddr) {
- DEBUG(cerr << "CGP: Reusing nonlocal addrmode: " << AddrMode << "\n");
+ DEBUG(cerr << "CGP: Reusing nonlocal addrmode: " << AddrMode << " for "
+ << *MemoryInst);
if (SunkAddr->getType() != Addr->getType())
SunkAddr = new BitCastInst(SunkAddr, Addr->getType(), "tmp", InsertPt);
} else {
- DEBUG(cerr << "CGP: SINKING nonlocal addrmode: " << AddrMode << "\n");
+ DEBUG(cerr << "CGP: SINKING nonlocal addrmode: " << AddrMode << " for "
+ << *MemoryInst);
const Type *IntPtrTy = TLI->getTargetData()->getIntPtrType();
Value *Result = 0;
@@ -1505,9 +1507,12 @@ bool CodeGenPrepare::OptimizeBlock(BasicBlock &BB) {
if (TLI && isa<InlineAsm>(CI->getCalledValue()))
if (const TargetAsmInfo *TAI =
TLI->getTargetMachine().getTargetAsmInfo()) {
- if (TAI->ExpandInlineAsm(CI))
+ if (TAI->ExpandInlineAsm(CI)) {
BBI = BB.begin();
- else
+ // Avoid processing instructions out of order, which could cause
+ // reuse before a value is defined.
+ SunkAddrs.clear();
+ } else
// Sink address computing for memory operands into the block.
MadeChange |= OptimizeInlineAsmInst(I, &(*CI), SunkAddrs);
}
diff --git a/test/CodeGen/X86/2009-02-11-codegenprepare-reuse.ll b/test/CodeGen/X86/2009-02-11-codegenprepare-reuse.ll
new file mode 100644
index 0000000000..7b73a86a72
--- /dev/null
+++ b/test/CodeGen/X86/2009-02-11-codegenprepare-reuse.ll
@@ -0,0 +1,35 @@
+; RUN: llvm-as < %s | llc
+; PR3537
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
+target triple = "i386-apple-darwin9.6"
+ %struct.GetBitContext = type <{ i8*, i8*, i32, i32 }>
+
+define i32 @alac_decode_frame() nounwind {
+entry:
+ %tmp2 = load i8** null ; <i8*> [#uses=2]
+ %tmp34 = getelementptr i8* %tmp2, i32 4 ; <i8*> [#uses=2]
+ %tmp5.i424 = bitcast i8* %tmp34 to i8** ; <i8**> [#uses=2]
+ %tmp15.i = getelementptr i8* %tmp2, i32 12 ; <i8*> [#uses=1]
+ %0 = bitcast i8* %tmp15.i to i32* ; <i32*> [#uses=1]
+ br i1 false, label %if.then43, label %if.end47
+
+if.then43: ; preds = %entry
+ ret i32 0
+
+if.end47: ; preds = %entry
+ %tmp5.i590 = load i8** %tmp5.i424 ; <i8*> [#uses=0]
+ store i32 19, i32* %0
+ %tmp6.i569 = load i8** %tmp5.i424 ; <i8*> [#uses=0]
+ %1 = call i32 asm "bswap $0", "=r,0,~{dirflag},~{fpsr},~{flags}"(i32 0) nounwind ; <i32> [#uses=0]
+ br i1 false, label %bb.nph, label %if.then63
+
+if.then63: ; preds = %if.end47
+ unreachable
+
+bb.nph: ; preds = %if.end47
+ %2 = bitcast i8* %tmp34 to %struct.GetBitContext* ; <%struct.GetBitContext*> [#uses=1]
+ %call9.i = call fastcc i32 @decode_scalar(%struct.GetBitContext* %2, i32 0, i32 0, i32 0) nounwind ; <i32> [#uses=0]
+ unreachable
+}
+
+declare fastcc i32 @decode_scalar(%struct.GetBitContext* nocapture, i32, i32, i32) nounwind