diff options
author | Arnold Schwaighofer <aschwaighofer@apple.com> | 2013-11-01 03:05:07 +0000 |
---|---|---|
committer | Arnold Schwaighofer <aschwaighofer@apple.com> | 2013-11-01 03:05:07 +0000 |
commit | 0097e155025767c11790912dcf780f82dffaffb1 (patch) | |
tree | bd5e660c6fe3dcd9d182c532fe9ff3e506d1851d /test | |
parent | d272a1223314a69e4678816feeff2cfb3e740f8f (diff) | |
download | llvm-0097e155025767c11790912dcf780f82dffaffb1.tar.gz llvm-0097e155025767c11790912dcf780f82dffaffb1.tar.bz2 llvm-0097e155025767c11790912dcf780f82dffaffb1.tar.xz |
LoopVectorizer: If dependency checks fail try runtime checks
When a dependence check fails we can still try to vectorize loops with runtime
array bounds checks.
This helps linpack to vectorize a loop in dgefa. And we are back to 2x of the
scalar performance on a corei7-avx.
radar://15339680
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@193853 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test')
-rw-r--r-- | test/Transforms/LoopVectorize/runtime-check.ll | 28 |
1 files changed, 28 insertions, 0 deletions
diff --git a/test/Transforms/LoopVectorize/runtime-check.ll b/test/Transforms/LoopVectorize/runtime-check.ll index 47722566e1..d15479d202 100644 --- a/test/Transforms/LoopVectorize/runtime-check.ll +++ b/test/Transforms/LoopVectorize/runtime-check.ll @@ -34,3 +34,31 @@ for.body: ; preds = %entry, %for.body for.end: ; preds = %for.body, %entry ret i32 undef } + +; Make sure that we try to vectorize loops with a runtime check if the +; dependency check fails. + +; CHECK-LABEL: test_runtime_check +; CHECK: <4 x float> +define void @test_runtime_check(float* %a, float %b, i64 %offset, i64 %offset2, i64 %n) { +entry: + br label %for.body + +for.body: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ] + %ind.sum = add i64 %iv, %offset + %arr.idx = getelementptr inbounds float* %a, i64 %ind.sum + %l1 = load float* %arr.idx, align 4 + %ind.sum2 = add i64 %iv, %offset2 + %arr.idx2 = getelementptr inbounds float* %a, i64 %ind.sum2 + %l2 = load float* %arr.idx2, align 4 + %m = fmul fast float %b, %l2 + %ad = fadd fast float %l1, %m + store float %ad, float* %arr.idx, align 4 + %iv.next = add nuw nsw i64 %iv, 1 + %exitcond = icmp eq i64 %iv.next, %n + br i1 %exitcond, label %loopexit, label %for.body + +loopexit: + ret void +} |