summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--lib/Transforms/Scalar/GVN.cpp170
-rw-r--r--test/Transforms/GVN/2007-07-26-InterlockingLoops.ll8
-rw-r--r--test/Transforms/GVN/2008-07-02-Unreachable.ll8
-rw-r--r--test/Transforms/GVN/cond_br.ll55
-rw-r--r--test/Transforms/GVN/cond_br2.ll139
-rw-r--r--test/Transforms/GVN/local-pre.ll6
-rw-r--r--test/Transforms/GVN/rle-nonlocal.ll5
-rw-r--r--test/Transforms/GVN/rle-semidominated.ll5
-rw-r--r--test/Transforms/GVN/rle.ll16
9 files changed, 384 insertions, 28 deletions
diff --git a/lib/Transforms/Scalar/GVN.cpp b/lib/Transforms/Scalar/GVN.cpp
index bc418af142..2e4d428c8e 100644
--- a/lib/Transforms/Scalar/GVN.cpp
+++ b/lib/Transforms/Scalar/GVN.cpp
@@ -21,6 +21,7 @@
#include "llvm/ADT/DepthFirstIterator.h"
#include "llvm/ADT/Hashing.h"
#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Analysis/CFG.h"
@@ -507,7 +508,9 @@ namespace {
enum ValType {
SimpleVal, // A simple offsetted value that is accessed.
LoadVal, // A value produced by a load.
- MemIntrin // A memory intrinsic which is loaded from.
+ MemIntrin, // A memory intrinsic which is loaded from.
+ UndefVal // A UndefValue representing a value from dead block (which
+ // is not yet physically removed from the CFG).
};
/// V - The value that is live out of the block.
@@ -545,10 +548,20 @@ namespace {
Res.Offset = Offset;
return Res;
}
-
+
+ static AvailableValueInBlock getUndef(BasicBlock *BB) {
+ AvailableValueInBlock Res;
+ Res.BB = BB;
+ Res.Val.setPointer(0);
+ Res.Val.setInt(UndefVal);
+ Res.Offset = 0;
+ return Res;
+ }
+
bool isSimpleValue() const { return Val.getInt() == SimpleVal; }
bool isCoercedLoadValue() const { return Val.getInt() == LoadVal; }
bool isMemIntrinValue() const { return Val.getInt() == MemIntrin; }
+ bool isUndefValue() const { return Val.getInt() == UndefVal; }
Value *getSimpleValue() const {
assert(isSimpleValue() && "Wrong accessor");
@@ -576,6 +589,7 @@ namespace {
DominatorTree *DT;
const DataLayout *TD;
const TargetLibraryInfo *TLI;
+ SetVector<BasicBlock *> DeadBlocks;
ValueTable VN;
@@ -698,6 +712,9 @@ namespace {
unsigned replaceAllDominatedUsesWith(Value *From, Value *To,
const BasicBlockEdge &Root);
bool propagateEquality(Value *LHS, Value *RHS, const BasicBlockEdge &Root);
+ bool processFoldableCondBr(BranchInst *BI);
+ void addDeadBlock(BasicBlock *BB);
+ void assignValNumForDeadCode();
};
char GVN::ID = 0;
@@ -1253,8 +1270,10 @@ static Value *ConstructSSAForLoadSet(LoadInst *LI,
// just use the dominating value directly.
if (ValuesPerBlock.size() == 1 &&
gvn.getDominatorTree().properlyDominates(ValuesPerBlock[0].BB,
- LI->getParent()))
+ LI->getParent())) {
+ assert(!ValuesPerBlock[0].isUndefValue() && "Dead BB dominate this block");
return ValuesPerBlock[0].MaterializeAdjustedValue(LI->getType(), gvn);
+ }
// Otherwise, we have to construct SSA form.
SmallVector<PHINode*, 8> NewPHIs;
@@ -1324,7 +1343,7 @@ Value *AvailableValueInBlock::MaterializeAdjustedValue(Type *LoadTy, GVN &gvn) c
<< *getCoercedLoadValue() << '\n'
<< *Res << '\n' << "\n\n\n");
}
- } else {
+ } else if (isMemIntrinValue()) {
const DataLayout *TD = gvn.getDataLayout();
assert(TD && "Need target data to handle type mismatch case");
Res = GetMemInstValueForLoad(getMemIntrinValue(), Offset,
@@ -1332,6 +1351,10 @@ Value *AvailableValueInBlock::MaterializeAdjustedValue(Type *LoadTy, GVN &gvn) c
DEBUG(dbgs() << "GVN COERCED NONLOCAL MEM INTRIN:\nOffset: " << Offset
<< " " << *getMemIntrinValue() << '\n'
<< *Res << '\n' << "\n\n\n");
+ } else {
+ assert(isUndefValue() && "Should be UndefVal");
+ DEBUG(dbgs() << "GVN COERCED NONLOCAL Undef:\n";);
+ return UndefValue::get(LoadTy);
}
return Res;
}
@@ -1355,6 +1378,13 @@ void GVN::AnalyzeLoadAvailability(LoadInst *LI, LoadDepVect &Deps,
BasicBlock *DepBB = Deps[i].getBB();
MemDepResult DepInfo = Deps[i].getResult();
+ if (DeadBlocks.count(DepBB)) {
+ // Dead dependent mem-op disguise as a load evaluating the same value
+ // as the load in question.
+ ValuesPerBlock.push_back(AvailableValueInBlock::getUndef(DepBB));
+ continue;
+ }
+
if (!DepInfo.isDef() && !DepInfo.isClobber()) {
UnavailableBlocks.push_back(DepBB);
continue;
@@ -2191,11 +2221,13 @@ bool GVN::processInstruction(Instruction *I) {
// For conditional branches, we can perform simple conditional propagation on
// the condition value itself.
if (BranchInst *BI = dyn_cast<BranchInst>(I)) {
- if (!BI->isConditional() || isa<Constant>(BI->getCondition()))
+ if (!BI->isConditional())
return false;
- Value *BranchCond = BI->getCondition();
+ if (isa<Constant>(BI->getCondition()))
+ return processFoldableCondBr(BI);
+ Value *BranchCond = BI->getCondition();
BasicBlock *TrueSucc = BI->getSuccessor(0);
BasicBlock *FalseSucc = BI->getSuccessor(1);
// Avoid multiple edges early.
@@ -2312,6 +2344,9 @@ bool GVN::runOnFunction(Function& F) {
}
if (EnablePRE) {
+ // Fabricate val-num for dead-code in order to suppress assertion in
+ // performPRE().
+ assignValNumForDeadCode();
bool PREChanged = true;
while (PREChanged) {
PREChanged = performPRE(F);
@@ -2325,6 +2360,9 @@ bool GVN::runOnFunction(Function& F) {
// Actually, when this happens, we should just fully integrate PRE into GVN.
cleanupGlobalSets();
+ // Do not cleanup DeadBlocks in cleanupGlobalSets() as it's called for each
+ // iteration.
+ DeadBlocks.clear();
return Changed;
}
@@ -2335,6 +2373,9 @@ bool GVN::processBlock(BasicBlock *BB) {
// (and incrementing BI before processing an instruction).
assert(InstrsToErase.empty() &&
"We expect InstrsToErase to be empty across iterations");
+ if (DeadBlocks.count(BB))
+ return false;
+
bool ChangedFunction = false;
for (BasicBlock::iterator BI = BB->begin(), BE = BB->end();
@@ -2628,3 +2669,120 @@ void GVN::verifyRemoved(const Instruction *Inst) const {
}
}
}
+
+// BB is declared dead, which implied other blocks become dead as well. This
+// function is to add all these blocks to "DeadBlocks". For the dead blocks'
+// live successors, update their phi nodes by replacing the operands
+// corresponding to dead blocks with UndefVal.
+//
+void GVN::addDeadBlock(BasicBlock *BB) {
+ SmallVector<BasicBlock *, 4> NewDead;
+ SmallSetVector<BasicBlock *, 4> DF;
+
+ NewDead.push_back(BB);
+ while (!NewDead.empty()) {
+ BasicBlock *D = NewDead.pop_back_val();
+ if (DeadBlocks.count(D))
+ continue;
+
+ // All blocks dominated by D are dead.
+ SmallVector<BasicBlock *, 8> Dom;
+ DT->getDescendants(D, Dom);
+ DeadBlocks.insert(Dom.begin(), Dom.end());
+
+ // Figure out the dominance-frontier(D).
+ for (SmallVectorImpl<BasicBlock *>::iterator I = Dom.begin(),
+ E = Dom.end(); I != E; I++) {
+ BasicBlock *B = *I;
+ for (succ_iterator SI = succ_begin(B), SE = succ_end(B); SI != SE; SI++) {
+ BasicBlock *S = *SI;
+ if (DeadBlocks.count(S))
+ continue;
+
+ bool AllPredDead = true;
+ for (pred_iterator PI = pred_begin(S), PE = pred_end(S); PI != PE; PI++)
+ if (!DeadBlocks.count(*PI)) {
+ AllPredDead = false;
+ break;
+ }
+
+ if (!AllPredDead) {
+ // S could be proved dead later on. That is why we don't update phi
+ // operands at this moment.
+ DF.insert(S);
+ } else {
+ // While S is not dominated by D, it is dead by now. This could take
+ // place if S already have a dead predecessor before D is declared
+ // dead.
+ NewDead.push_back(S);
+ }
+ }
+ }
+ }
+
+ // For the dead blocks' live successors, update their phi nodes by replacing
+ // the operands corresponding to dead blocks with UndefVal.
+ for(SmallSetVector<BasicBlock *, 4>::iterator I = DF.begin(), E = DF.end();
+ I != E; I++) {
+ BasicBlock *B = *I;
+ if (DeadBlocks.count(B))
+ continue;
+
+ for (pred_iterator PI = pred_begin(B), PE = pred_end(B); PI != PE; PI++) {
+ BasicBlock *P = *PI;
+ if (!DeadBlocks.count(P))
+ continue;
+ for (BasicBlock::iterator II = B->begin(); isa<PHINode>(II); ++II) {
+ PHINode &Phi = cast<PHINode>(*II);
+ Phi.setIncomingValue(Phi.getBasicBlockIndex(P),
+ UndefValue::get(Phi.getType()));
+ }
+ }
+ }
+}
+
+// If the given branch is recognized as a foldable branch (i.e. conditional
+// branch with constant condition), it will perform following analyses and
+// transformation.
+// 1) If the dead out-coming edge is a critical-edge, split it. Let
+// R be the target of the dead out-coming edge.
+// 1) Identify the set of dead blocks implied by the branch's dead outcoming
+// edge. The result of this step will be {X| X is dominated by R}
+// 2) Identify those blocks which haves at least one dead prodecessor. The
+// result of this step will be dominance-frontier(R).
+// 3) Update the PHIs in DF(R) by replacing the operands corresponding to
+// dead blocks with "UndefVal" in an hope these PHIs will optimized away.
+//
+// Return true iff *NEW* dead code are found.
+bool GVN::processFoldableCondBr(BranchInst *BI) {
+ if (!BI || BI->isUnconditional())
+ return false;
+
+ ConstantInt *Cond = dyn_cast<ConstantInt>(BI->getCondition());
+ if (!Cond)
+ return false;
+
+ BasicBlock *DeadRoot = Cond->getZExtValue() ?
+ BI->getSuccessor(1) : BI->getSuccessor(0);
+ if (DeadBlocks.count(DeadRoot))
+ return false;
+
+ if (!DeadRoot->getSinglePredecessor())
+ DeadRoot = splitCriticalEdges(BI->getParent(), DeadRoot);
+
+ addDeadBlock(DeadRoot);
+ return true;
+}
+
+// performPRE() will trigger assert if it come across an instruciton without
+// associated val-num. As it normally has far more live instructions than dead
+// instructions, it makes more sense just to "fabricate" a val-number for the
+// dead code than checking if instruction involved is dead or not.
+void GVN::assignValNumForDeadCode() {
+ for (SetVector<BasicBlock *>::iterator I = DeadBlocks.begin(),
+ E = DeadBlocks.end(); I != E; I++) {
+ for (BasicBlock::iterator II = (*I)->begin(), EE = (*I)->end();
+ II != EE; II++)
+ VN.lookup_or_add(&*II);
+ }
+}
diff --git a/test/Transforms/GVN/2007-07-26-InterlockingLoops.ll b/test/Transforms/GVN/2007-07-26-InterlockingLoops.ll
index a1cc0083f1..5a15f0e43a 100644
--- a/test/Transforms/GVN/2007-07-26-InterlockingLoops.ll
+++ b/test/Transforms/GVN/2007-07-26-InterlockingLoops.ll
@@ -2,7 +2,7 @@
@last = external global [65 x i32*]
-define i32 @NextRootMove(i32 %wtm) {
+define i32 @NextRootMove(i32 %wtm, i32 %x, i32 %y, i32 %z) {
entry:
%A = alloca i32*
%tmp17618 = load i32** getelementptr ([65 x i32*]* @last, i32 0, i32 1), align 4
@@ -15,12 +15,14 @@ entry:
br label %cond_true116
cond_true116:
- br i1 false, label %cond_true128, label %cond_true145
+ %cmp = icmp eq i32 %x, %y
+ br i1 %cmp, label %cond_true128, label %cond_true145
cond_true128:
%tmp17625 = load i32** getelementptr ([65 x i32*]* @last, i32 0, i32 1), align 4
store i32* %tmp17625, i32** %A
- br i1 false, label %bb98.backedge, label %return.loopexit
+ %cmp1 = icmp eq i32 %x, %z
+ br i1 %cmp1 , label %bb98.backedge, label %return.loopexit
bb98.backedge:
br label %cond_true116
diff --git a/test/Transforms/GVN/2008-07-02-Unreachable.ll b/test/Transforms/GVN/2008-07-02-Unreachable.ll
index 4f07868a1c..ce83fa4e4b 100644
--- a/test/Transforms/GVN/2008-07-02-Unreachable.ll
+++ b/test/Transforms/GVN/2008-07-02-Unreachable.ll
@@ -3,10 +3,11 @@
@g_3 = external global i8 ; <i8*> [#uses=2]
-define i8 @func_1() nounwind {
+define i8 @func_1(i32 %x, i32 %y) nounwind {
entry:
%A = alloca i8
- br i1 false, label %ifelse, label %ifthen
+ %cmp = icmp eq i32 %x, %y
+ br i1 %cmp, label %ifelse, label %ifthen
ifthen: ; preds = %entry
br label %ifend
@@ -14,9 +15,6 @@ ifthen: ; preds = %entry
ifelse: ; preds = %entry
%tmp3 = load i8* @g_3 ; <i8> [#uses=0]
store i8 %tmp3, i8* %A
- br label %forcond.thread
-
-forcond.thread: ; preds = %ifelse
br label %afterfor
forcond: ; preds = %forinc
diff --git a/test/Transforms/GVN/cond_br.ll b/test/Transforms/GVN/cond_br.ll
new file mode 100644
index 0000000000..918e7d41f1
--- /dev/null
+++ b/test/Transforms/GVN/cond_br.ll
@@ -0,0 +1,55 @@
+; RUN: opt -basicaa -gvn -S < %s | FileCheck %s
+@y = external global i32
+@z = external global i32
+
+; Function Attrs: nounwind ssp uwtable
+define void @foo(i32 %x) {
+; CHECK: @foo(i32 %x)
+; CHECK: %.pre = load i32* @y
+; CHECK: call void @bar(i32 %.pre)
+
+ %t = sub i32 %x, %x
+ %.pre = load i32* @y, align 4
+ %cmp = icmp sgt i32 %t, 2
+ br i1 %cmp, label %if.then, label %entry.if.end_crit_edge
+
+entry.if.end_crit_edge: ; preds = %entry
+ br label %if.end
+
+if.then: ; preds = %entry
+ %add = add nsw i32 %x, 3
+ store i32 %add, i32* @y, align 4
+ br label %if.end
+
+if.end: ; preds = %entry.if.end_crit_edge, %if.then
+ %1 = phi i32 [ %.pre, %entry.if.end_crit_edge ], [ %add, %if.then ]
+ tail call void @bar(i32 %1)
+ ret void
+}
+
+define void @foo2(i32 %x) {
+; CHECK: @foo2(i32 %x)
+; CHECK: %.pre = load i32* @y
+; CHECK: tail call void @bar(i32 %.pre)
+entry:
+ %t = sub i32 %x, %x
+ %.pre = load i32* @y, align 4
+ %cmp = icmp sgt i32 %t, 2
+ br i1 %cmp, label %if.then, label %if.else
+
+if.then: ; preds = %entry
+ %add = add nsw i32 %x, 3
+ store i32 %add, i32* @y, align 4
+ br label %if.end
+
+if.else: ; preds = %entry
+ store i32 1, i32* @z, align 4
+ br label %if.end
+
+if.end: ; preds = %if.else, %if.then
+ %0 = phi i32 [ %.pre, %if.else ], [ %add, %if.then ]
+ tail call void @bar(i32 %0)
+ ret void
+}
+
+declare void @bar(i32)
diff --git a/test/Transforms/GVN/cond_br2.ll b/test/Transforms/GVN/cond_br2.ll
new file mode 100644
index 0000000000..e0ef39dbf8
--- /dev/null
+++ b/test/Transforms/GVN/cond_br2.ll
@@ -0,0 +1,139 @@
+; RUN: opt -basicaa -gvn -S < %s | FileCheck %s
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
+
+%"class.llvm::SmallVector" = type { %"class.llvm::SmallVectorImpl", [1 x %"union.llvm::SmallVectorBase::U"] }
+%"class.llvm::SmallVectorImpl" = type { %"class.llvm::SmallVectorTemplateBase" }
+%"class.llvm::SmallVectorTemplateBase" = type { %"class.llvm::SmallVectorTemplateCommon" }
+%"class.llvm::SmallVectorTemplateCommon" = type { %"class.llvm::SmallVectorBase" }
+%"class.llvm::SmallVectorBase" = type { i8*, i8*, i8*, %"union.llvm::SmallVectorBase::U" }
+%"union.llvm::SmallVectorBase::U" = type { x86_fp80 }
+
+; Function Attrs: ssp uwtable
+define void @_Z4testv() #0 {
+; CHECK: @_Z4testv()
+; CHECK: invoke.cont:
+; CHECK: br i1 true, label %new.notnull.i11, label %if.end.i14
+; CHECK: Retry.i10:
+
+entry:
+ %sv = alloca %"class.llvm::SmallVector", align 16
+ %0 = bitcast %"class.llvm::SmallVector"* %sv to i8*
+ call void @llvm.lifetime.start(i64 64, i8* %0) #1
+ %BeginX.i.i.i.i.i.i = getelementptr inbounds %"class.llvm::SmallVector"* %sv, i64 0, i32 0, i32 0, i32 0, i32 0, i32 0
+ %FirstEl.i.i.i.i.i.i = getelementptr inbounds %"class.llvm::SmallVector"* %sv, i64 0, i32 0, i32 0, i32 0, i32 0, i32 3
+ %1 = bitcast %"union.llvm::SmallVectorBase::U"* %FirstEl.i.i.i.i.i.i to i8*
+ store i8* %1, i8** %BeginX.i.i.i.i.i.i, align 16, !tbaa !0
+ %EndX.i.i.i.i.i.i = getelementptr inbounds %"class.llvm::SmallVector"* %sv, i64 0, i32 0, i32 0, i32 0, i32 0, i32 1
+ store i8* %1, i8** %EndX.i.i.i.i.i.i, align 8, !tbaa !0
+ %CapacityX.i.i.i.i.i.i = getelementptr inbounds %"class.llvm::SmallVector"* %sv, i64 0, i32 0, i32 0, i32 0, i32 0, i32 2
+ %add.ptr.i.i.i.i2.i.i = getelementptr inbounds %"union.llvm::SmallVectorBase::U"* %FirstEl.i.i.i.i.i.i, i64 2
+ %add.ptr.i.i.i.i.i.i = bitcast %"union.llvm::SmallVectorBase::U"* %add.ptr.i.i.i.i2.i.i to i8*
+ store i8* %add.ptr.i.i.i.i.i.i, i8** %CapacityX.i.i.i.i.i.i, align 16, !tbaa !0
+ %EndX.i = getelementptr inbounds %"class.llvm::SmallVector"* %sv, i64 0, i32 0, i32 0, i32 0, i32 0, i32 1
+ %2 = load i8** %EndX.i, align 8, !tbaa !0
+ %CapacityX.i = getelementptr inbounds %"class.llvm::SmallVector"* %sv, i64 0, i32 0, i32 0, i32 0, i32 0, i32 2
+ %cmp.i = icmp ult i8* %2, %add.ptr.i.i.i.i.i.i
+ br i1 %cmp.i, label %Retry.i, label %if.end.i
+
+Retry.i: ; preds = %.noexc, %entry
+ %3 = phi i8* [ %2, %entry ], [ %.pre.i, %.noexc ]
+ %new.isnull.i = icmp eq i8* %3, null
+ br i1 %new.isnull.i, label %invoke.cont, label %new.notnull.i
+
+new.notnull.i: ; preds = %Retry.i
+ %4 = bitcast i8* %3 to i32*
+ store i32 1, i32* %4, align 4, !tbaa !3
+ br label %invoke.cont
+
+if.end.i: ; preds = %entry
+ %5 = getelementptr inbounds %"class.llvm::SmallVector"* %sv, i64 0, i32 0, i32 0, i32 0, i32 0
+ invoke void @_ZN4llvm15SmallVectorBase8grow_podEmm(%"class.llvm::SmallVectorBase"* %5, i64 0, i64 4)
+ to label %.noexc unwind label %lpad
+
+.noexc: ; preds = %if.end.i
+ %.pre.i = load i8** %EndX.i, align 8, !tbaa !0
+ br label %Retry.i
+
+invoke.cont: ; preds = %new.notnull.i, %Retry.i
+ %add.ptr.i = getelementptr inbounds i8* %3, i64 4
+ store i8* %add.ptr.i, i8** %EndX.i, align 8, !tbaa !0
+ %6 = load i8** %CapacityX.i, align 16, !tbaa !0
+ %cmp.i8 = icmp ult i8* %add.ptr.i, %6
+ br i1 %cmp.i8, label %new.notnull.i11, label %if.end.i14
+
+Retry.i10: ; preds = %if.end.i14
+ %.pre.i13 = load i8** %EndX.i, align 8, !tbaa !0
+ %new.isnull.i9 = icmp eq i8* %.pre.i13, null
+ br i1 %new.isnull.i9, label %invoke.cont2, label %new.notnull.i11
+
+new.notnull.i11: ; preds = %invoke.cont, %Retry.i10
+ %7 = phi i8* [ %.pre.i13, %Retry.i10 ], [ %add.ptr.i, %invoke.cont ]
+ %8 = bitcast i8* %7 to i32*
+ store i32 2, i32* %8, align 4, !tbaa !3
+ br label %invoke.cont2
+
+if.end.i14: ; preds = %invoke.cont
+ %9 = getelementptr inbounds %"class.llvm::SmallVector"* %sv, i64 0, i32 0, i32 0, i32 0, i32 0
+ invoke void @_ZN4llvm15SmallVectorBase8grow_podEmm(%"class.llvm::SmallVectorBase"* %9, i64 0, i64 4)
+ to label %Retry.i10 unwind label %lpad
+
+invoke.cont2: ; preds = %new.notnull.i11, %Retry.i10
+ %10 = phi i8* [ null, %Retry.i10 ], [ %7, %new.notnull.i11 ]
+ %add.ptr.i12 = getelementptr inbounds i8* %10, i64 4
+ store i8* %add.ptr.i12, i8** %EndX.i, align 8, !tbaa !0
+ invoke void @_Z1gRN4llvm11SmallVectorIiLj8EEE(%"class.llvm::SmallVector"* %sv)
+ to label %invoke.cont3 unwind label %lpad
+
+invoke.cont3: ; preds = %invoke.cont2
+ %11 = load i8** %BeginX.i.i.i.i.i.i, align 16, !tbaa !0
+ %cmp.i.i.i.i19 = icmp eq i8* %11, %1
+ br i1 %cmp.i.i.i.i19, label %_ZN4llvm11SmallVectorIiLj8EED1Ev.exit21, label %if.then.i.i.i20
+
+if.then.i.i.i20: ; preds = %invoke.cont3
+ call void @free(i8* %11) #1
+ br label %_ZN4llvm11SmallVectorIiLj8EED1Ev.exit21
+
+_ZN4llvm11SmallVectorIiLj8EED1Ev.exit21: ; preds = %invoke.cont3, %if.then.i.i.i20
+ call void @llvm.lifetime.end(i64 64, i8* %0) #1
+ ret void
+
+lpad: ; preds = %if.end.i14, %if.end.i, %invoke.cont2
+ %12 = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*)
+ cleanup
+ %13 = load i8** %BeginX.i.i.i.i.i.i, align 16, !tbaa !0
+ %cmp.i.i.i.i = icmp eq i8* %13, %1
+ br i1 %cmp.i.i.i.i, label %eh.resume, label %if.then.i.i.i
+
+if.then.i.i.i: ; preds = %lpad
+ call void @free(i8* %13) #1
+ br label %eh.resume
+
+eh.resume: ; preds = %if.then.i.i.i, %lpad
+ resume { i8*, i32 } %12
+}
+
+; Function Attrs: nounwind
+declare void @llvm.lifetime.start(i64, i8* nocapture) #1
+
+declare i32 @__gxx_personality_v0(...)
+
+declare void @_Z1gRN4llvm11SmallVectorIiLj8EEE(%"class.llvm::SmallVector"*) #2
+
+; Function Attrs: nounwind
+declare void @llvm.lifetime.end(i64, i8* nocapture) #1
+
+declare void @_ZN4llvm15SmallVectorBase8grow_podEmm(%"class.llvm::SmallVectorBase"*, i64, i64) #2
+
+; Function Attrs: nounwind
+declare void @free(i8* nocapture) #3
+
+attributes #0 = { ssp uwtable "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { nounwind }
+attributes #2 = { "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #3 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+
+!0 = metadata !{metadata !"any pointer", metadata !1}
+!1 = metadata !{metadata !"omnipotent char", metadata !2}
+!2 = metadata !{metadata !"Simple C/C++ TBAA"}
+!3 = metadata !{metadata !"int", metadata !1}
+
diff --git a/test/Transforms/GVN/local-pre.ll b/test/Transforms/GVN/local-pre.ll
index 1d0dadfbe0..2c92699dca 100644
--- a/test/Transforms/GVN/local-pre.ll
+++ b/test/Transforms/GVN/local-pre.ll
@@ -1,9 +1,9 @@
; RUN: opt < %s -gvn -enable-pre -S | grep "b.pre"
-define i32 @main(i32 %p) {
+define i32 @main(i32 %p, i32 %q) {
block1:
-
- br i1 true, label %block2, label %block3
+ %cmp = icmp eq i32 %p, %q
+ br i1 %cmp, label %block2, label %block3
block2:
%a = add i32 %p, 1
diff --git a/test/Transforms/GVN/rle-nonlocal.ll b/test/Transforms/GVN/rle-nonlocal.ll
index 6b74e9a946..8229aaa142 100644
--- a/test/Transforms/GVN/rle-nonlocal.ll
+++ b/test/Transforms/GVN/rle-nonlocal.ll
@@ -1,8 +1,9 @@
; RUN: opt < %s -basicaa -gvn -S | FileCheck %s
-define i32 @main(i32** %p) {
+define i32 @main(i32** %p, i32 %x, i32 %y) {
block1:
- br i1 true, label %block2, label %block3
+ %cmp = icmp eq i32 %x, %y
+ br i1 %cmp , label %block2, label %block3
block2:
%a = load i32** %p
diff --git a/test/Transforms/GVN/rle-semidominated.ll b/test/Transforms/GVN/rle-semidominated.ll
index 71aa548ab1..923cd03ecd 100644
--- a/test/Transforms/GVN/rle-semidominated.ll
+++ b/test/Transforms/GVN/rle-semidominated.ll
@@ -1,9 +1,10 @@
; RUN: opt < %s -basicaa -gvn -S | grep "DEAD = phi i32 "
-define i32 @main(i32* %p) {
+define i32 @main(i32* %p, i32 %x, i32 %y) {
block1:
%z = load i32* %p
- br i1 true, label %block2, label %block3
+ %cmp = icmp eq i32 %x, %y
+ br i1 %cmp, label %block2, label %block3
block2:
br label %block4
diff --git a/test/Transforms/GVN/rle.ll b/test/Transforms/GVN/rle.ll
index 8787dd5333..a928a16556 100644
--- a/test/Transforms/GVN/rle.ll
+++ b/test/Transforms/GVN/rle.ll
@@ -357,13 +357,14 @@ Cont:
; CHECK: ret i8 %A
}
-define i32 @chained_load(i32** %p) {
+define i32 @chained_load(i32** %p, i32 %x, i32 %y) {
block1:
%A = alloca i32*
%z = load i32** %p
store i32* %z, i32** %A
- br i1 true, label %block2, label %block3
+ %cmp = icmp eq i32 %x, %y
+ br i1 %cmp, label %block2, label %block3
block2:
%a = load i32** %p
@@ -427,10 +428,11 @@ TY:
ret i32 0
}
-define i32 @phi_trans3(i32* %p) {
+define i32 @phi_trans3(i32* %p, i32 %x, i32 %y, i32 %z) {
; CHECK-LABEL: @phi_trans3(
block1:
- br i1 true, label %block2, label %block3
+ %cmpxy = icmp eq i32 %x, %y
+ br i1 %cmpxy, label %block2, label %block3
block2:
store i32 87, i32* %p
@@ -443,7 +445,7 @@ block3:
block4:
%A = phi i32 [-1, %block2], [42, %block3]
- br i1 true, label %block5, label %exit
+ br i1 %cmpxy, label %block5, label %exit
; CHECK: block4:
; CHECK-NEXT: %D = phi i32 [ 87, %block2 ], [ 97, %block3 ]
@@ -451,11 +453,11 @@ block4:
block5:
%B = add i32 %A, 1
- br i1 true, label %block6, label %exit
+ br i1 %cmpxy, label %block6, label %exit
block6:
%C = getelementptr i32* %p, i32 %B
- br i1 true, label %block7, label %exit
+ br i1 %cmpxy, label %block7, label %exit
block7:
%D = load i32* %C