summaryrefslogtreecommitdiff
path: root/test/CodeGen/ARM/vrev.ll
diff options
context:
space:
mode:
authorBob Wilson <bob.wilson@apple.com>2010-10-27 20:38:28 +0000
committerBob Wilson <bob.wilson@apple.com>2010-10-27 20:38:28 +0000
commitf20700ca773acb0d79ce69ad5834e00884ad31f0 (patch)
tree41267bb74ec168bb6e867eb1503b074bb0668d72 /test/CodeGen/ARM/vrev.ll
parentbc82d8b84f6ae15985d1b01e720ed5c37d714012 (diff)
downloadllvm-f20700ca773acb0d79ce69ad5834e00884ad31f0.tar.gz
llvm-f20700ca773acb0d79ce69ad5834e00884ad31f0.tar.bz2
llvm-f20700ca773acb0d79ce69ad5834e00884ad31f0.tar.xz
SelectionDAG shuffle nodes do not allow operands with different numbers of
elements than the result vector type. So, when an instruction like: %8 = shufflevector <2 x float> %4, <2 x float> %7, <4 x i32> <i32 1, i32 0, i32 3, i32 2> is translated to a DAG, each operand is changed to a concat_vectors node that appends 2 undef elements. That is: shuffle [a,b], [c,d] is changed to: shuffle [a,b,u,u], [c,d,u,u] That's probably the right thing for x86 but for NEON, we'd much rather have: shuffle [a,b,c,d], undef Teach the DAG combiner how to do that transformation for ARM. Radar 8597007. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@117482 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test/CodeGen/ARM/vrev.ll')
-rw-r--r--test/CodeGen/ARM/vrev.ll18
1 files changed, 18 insertions, 0 deletions
diff --git a/test/CodeGen/ARM/vrev.ll b/test/CodeGen/ARM/vrev.ll
index e1fe64b02d..f0f9e4e339 100644
--- a/test/CodeGen/ARM/vrev.ll
+++ b/test/CodeGen/ARM/vrev.ll
@@ -129,3 +129,21 @@ define <8 x i16> @test_vrev32Q16_undef(<8 x i16>* %A) nounwind {
%tmp2 = shufflevector <8 x i16> %tmp1, <8 x i16> undef, <8 x i32> <i32 undef, i32 0, i32 undef, i32 2, i32 5, i32 4, i32 7, i32 undef>
ret <8 x i16> %tmp2
}
+
+; A vcombine feeding a VREV should not obscure things. Radar 8597007.
+
+define void @test_with_vcombine(<4 x float>* %v) nounwind {
+;CHECK: test_with_vcombine:
+;CHECK-NOT: vext
+;CHECK: vrev64.32
+ %tmp1 = load <4 x float>* %v, align 16
+ %tmp2 = bitcast <4 x float> %tmp1 to <2 x double>
+ %tmp3 = extractelement <2 x double> %tmp2, i32 0
+ %tmp4 = bitcast double %tmp3 to <2 x float>
+ %tmp5 = extractelement <2 x double> %tmp2, i32 1
+ %tmp6 = bitcast double %tmp5 to <2 x float>
+ %tmp7 = fadd <2 x float> %tmp6, %tmp6
+ %tmp8 = shufflevector <2 x float> %tmp4, <2 x float> %tmp7, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
+ store <4 x float> %tmp8, <4 x float>* %v, align 16
+ ret void
+}