diff options
author | Arnold Schwaighofer <aschwaighofer@apple.com> | 2014-01-10 18:20:32 +0000 |
---|---|---|
committer | Arnold Schwaighofer <aschwaighofer@apple.com> | 2014-01-10 18:20:32 +0000 |
commit | ee3f7de62e5616242441a76a8e92260d7b0f10e5 (patch) | |
tree | a747ef919609000d0dbfca52d43e1bdcbbc341b3 /test/Transforms/LoopVectorize/version-mem-access.ll | |
parent | db81071b34eb4f5a9a27b4b5f8d32cc9f989db96 (diff) | |
download | llvm-ee3f7de62e5616242441a76a8e92260d7b0f10e5.tar.gz llvm-ee3f7de62e5616242441a76a8e92260d7b0f10e5.tar.bz2 llvm-ee3f7de62e5616242441a76a8e92260d7b0f10e5.tar.xz |
LoopVectorizer: Handle strided memory accesses by versioning
for (i = 0; i < N; ++i)
A[i * Stride1] += B[i * Stride2];
We take loops like this and check that the symbolic strides 'Strided1/2' are one
and drop to the scalar loop if they are not.
This is currently disabled by default and hidden behind the flag
'enable-mem-access-versioning'.
radar://13075509
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@198950 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test/Transforms/LoopVectorize/version-mem-access.ll')
-rw-r--r-- | test/Transforms/LoopVectorize/version-mem-access.ll | 50 |
1 files changed, 50 insertions, 0 deletions
diff --git a/test/Transforms/LoopVectorize/version-mem-access.ll b/test/Transforms/LoopVectorize/version-mem-access.ll new file mode 100644 index 0000000000..e712728111 --- /dev/null +++ b/test/Transforms/LoopVectorize/version-mem-access.ll @@ -0,0 +1,50 @@ +; RUN: opt -basicaa -loop-vectorize -enable-mem-access-versioning -force-vector-width=2 -force-vector-unroll=1 < %s -S | FileCheck %s + +target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128" + +; CHECK-LABEL: test +define void @test(i32* noalias %A, i64 %AStride, + i32* noalias %B, i32 %BStride, + i32* noalias %C, i64 %CStride, i32 %N) { +entry: + %cmp13 = icmp eq i32 %N, 0 + br i1 %cmp13, label %for.end, label %for.body.preheader + +; CHECK-DAG: icmp ne i64 %AStride, 1 +; CHECK-DAG: icmp ne i32 %BStride, 1 +; CHECK-DAG: icmp ne i64 %CStride, 1 +; CHECK: or +; CHECK: or +; CHECK: br + +; CHECK: vector.body +; CHECK: load <2 x i32> + +for.body.preheader: + br label %for.body + +for.body: + %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %for.body.preheader ] + %iv.trunc = trunc i64 %indvars.iv to i32 + %mul = mul i32 %iv.trunc, %BStride + %mul64 = zext i32 %mul to i64 + %arrayidx = getelementptr inbounds i32* %B, i64 %mul64 + %0 = load i32* %arrayidx, align 4 + %mul2 = mul nsw i64 %indvars.iv, %CStride + %arrayidx3 = getelementptr inbounds i32* %C, i64 %mul2 + %1 = load i32* %arrayidx3, align 4 + %mul4 = mul nsw i32 %1, %0 + %mul3 = mul nsw i64 %indvars.iv, %AStride + %arrayidx7 = getelementptr inbounds i32* %A, i64 %mul3 + store i32 %mul4, i32* %arrayidx7, align 4 + %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1 + %lftr.wideiv = trunc i64 %indvars.iv.next to i32 + %exitcond = icmp eq i32 %lftr.wideiv, %N + br i1 %exitcond, label %for.end.loopexit, label %for.body + +for.end.loopexit: + br label %for.end + +for.end: + ret void +} |