summaryrefslogtreecommitdiff
path: root/test/Transforms/ScalarRepl
diff options
context:
space:
mode:
authorBob Wilson <bob.wilson@apple.com>2013-06-25 19:09:50 +0000
committerBob Wilson <bob.wilson@apple.com>2013-06-25 19:09:50 +0000
commita1fe2948ed4039e68d1784494c3b23a4ce4126b4 (patch)
tree4a63fabad494aa789ebf7c51c28166308920a07a /test/Transforms/ScalarRepl
parent2560e242c87b205143ca5e0e87a216e06f51a434 (diff)
downloadllvm-a1fe2948ed4039e68d1784494c3b23a4ce4126b4.tar.gz
llvm-a1fe2948ed4039e68d1784494c3b23a4ce4126b4.tar.bz2
llvm-a1fe2948ed4039e68d1784494c3b23a4ce4126b4.tar.xz
Fix SROA to avoid unnecessary scalar conversions for 1-element vectors.
When a 1-element vector alloca is promoted, a store instruction can often be rewritten without converting the value to a scalar and using an insertelement instruction to stuff it into the new alloca. This patch just adds a check to skip that conversion when it is unnecessary. This turns out to be really important for some ARM Neon operations where <1 x i64> is used to get around the fact that i64 is not a legal type. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@184870 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test/Transforms/ScalarRepl')
-rw-r--r--test/Transforms/ScalarRepl/vector_promote.ll24
1 files changed, 24 insertions, 0 deletions
diff --git a/test/Transforms/ScalarRepl/vector_promote.ll b/test/Transforms/ScalarRepl/vector_promote.ll
index 5c82ae4d19..03ef475c3e 100644
--- a/test/Transforms/ScalarRepl/vector_promote.ll
+++ b/test/Transforms/ScalarRepl/vector_promote.ll
@@ -111,3 +111,27 @@ entry:
; CHECK-NOT: alloca
; CHECK: and i192
}
+
+; When promoting an alloca to a 1-element vector type, instructions that
+; produce that same vector type should not be changed to insert one element
+; into a new vector. <rdar://problem/14249078>
+define <1 x i64> @test8(<1 x i64> %a) {
+entry:
+ %a.addr = alloca <1 x i64>, align 8
+ %__a = alloca <1 x i64>, align 8
+ %tmp = alloca <1 x i64>, align 8
+ store <1 x i64> %a, <1 x i64>* %a.addr, align 8
+ %0 = load <1 x i64>* %a.addr, align 8
+ store <1 x i64> %0, <1 x i64>* %__a, align 8
+ %1 = load <1 x i64>* %__a, align 8
+ %2 = bitcast <1 x i64> %1 to <8 x i8>
+ %3 = bitcast <8 x i8> %2 to <1 x i64>
+ %vshl_n = shl <1 x i64> %3, <i64 4>
+ store <1 x i64> %vshl_n, <1 x i64>* %tmp
+ %4 = load <1 x i64>* %tmp
+ ret <1 x i64> %4
+; CHECK: @test8
+; CHECK-NOT: alloca
+; CHECK-NOT: insertelement
+; CHECK: ret <1 x i64>
+}