summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--lib/Transforms/Vectorize/SLPVectorizer.cpp11
-rw-r--r--test/Transforms/SLPVectorizer/X86/simplebb.ll43
2 files changed, 3 insertions, 51 deletions
diff --git a/lib/Transforms/Vectorize/SLPVectorizer.cpp b/lib/Transforms/Vectorize/SLPVectorizer.cpp
index 4d82bc428d..af1c0e7423 100644
--- a/lib/Transforms/Vectorize/SLPVectorizer.cpp
+++ b/lib/Transforms/Vectorize/SLPVectorizer.cpp
@@ -786,14 +786,13 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth) {
}
case Instruction::Load: {
// Check if the loads are consecutive or of we need to swizzle them.
- for (unsigned i = 0, e = VL.size() - 1; i < e; ++i) {
- LoadInst *L = cast<LoadInst>(VL[i]);
- if (!L->isSimple() || !isConsecutiveAccess(VL[i], VL[i + 1])) {
+ for (unsigned i = 0, e = VL.size() - 1; i < e; ++i)
+ if (!isConsecutiveAccess(VL[i], VL[i + 1])) {
newTreeEntry(VL, false);
DEBUG(dbgs() << "SLP: Need to swizzle loads.\n");
return;
}
- }
+
newTreeEntry(VL, true);
DEBUG(dbgs() << "SLP: added a vector of loads.\n");
return;
@@ -1912,10 +1911,6 @@ unsigned SLPVectorizer::collectStores(BasicBlock *BB, BoUpSLP &R) {
if (!SI)
continue;
- // Don't touch volatile stores.
- if (!SI->isSimple())
- continue;
-
// Check that the pointer points to scalars.
Type *Ty = SI->getValueOperand()->getType();
if (Ty->isAggregateType() || Ty->isVectorTy())
diff --git a/test/Transforms/SLPVectorizer/X86/simplebb.ll b/test/Transforms/SLPVectorizer/X86/simplebb.ll
index 7d682e5e46..0b76bec07b 100644
--- a/test/Transforms/SLPVectorizer/X86/simplebb.ll
+++ b/test/Transforms/SLPVectorizer/X86/simplebb.ll
@@ -44,46 +44,3 @@ entry:
store double %mul5, double* %arrayidx5, align 8
ret void
}
-
-; Don't vectorize volatile loads.
-; CHECK: test_volatile_load
-; CHECK-NOT: load <2 x double>
-; CHECK: store <2 x double>
-; CHECK: ret
-define void @test_volatile_load(double* %a, double* %b, double* %c) {
-entry:
- %i0 = load volatile double* %a, align 8
- %i1 = load volatile double* %b, align 8
- %mul = fmul double %i0, %i1
- %arrayidx3 = getelementptr inbounds double* %a, i64 1
- %i3 = load double* %arrayidx3, align 8
- %arrayidx4 = getelementptr inbounds double* %b, i64 1
- %i4 = load double* %arrayidx4, align 8
- %mul5 = fmul double %i3, %i4
- store double %mul, double* %c, align 8
- %arrayidx5 = getelementptr inbounds double* %c, i64 1
- store double %mul5, double* %arrayidx5, align 8
- ret void
-}
-
-; Don't vectorize volatile stores.
-; CHECK: test_volatile_store
-; CHECK-NOT: store <2 x double>
-; CHECK: ret
-define void @test_volatile_store(double* %a, double* %b, double* %c) {
-entry:
- %i0 = load double* %a, align 8
- %i1 = load double* %b, align 8
- %mul = fmul double %i0, %i1
- %arrayidx3 = getelementptr inbounds double* %a, i64 1
- %i3 = load double* %arrayidx3, align 8
- %arrayidx4 = getelementptr inbounds double* %b, i64 1
- %i4 = load double* %arrayidx4, align 8
- %mul5 = fmul double %i3, %i4
- store volatile double %mul, double* %c, align 8
- %arrayidx5 = getelementptr inbounds double* %c, i64 1
- store volatile double %mul5, double* %arrayidx5, align 8
- ret void
-}
-
-