summaryrefslogtreecommitdiff
path: root/test/CodeGen/PowerPC/vec_shuffle.ll
diff options
context:
space:
mode:
authorChris Lattner <sabre@nondot.org>2006-04-06 22:02:11 +0000
committerChris Lattner <sabre@nondot.org>2006-04-06 22:02:11 +0000
commitcc8b1ac5cb9d61fade8e07b269054e83ea8ecfdb (patch)
treed781e4db25829d9a9ac7d1b1162cffc679972644 /test/CodeGen/PowerPC/vec_shuffle.ll
parent7917d3af3f51a3584a87a44548d584a5ac253cfc (diff)
downloadllvm-cc8b1ac5cb9d61fade8e07b269054e83ea8ecfdb.tar.gz
llvm-cc8b1ac5cb9d61fade8e07b269054e83ea8ecfdb.tar.bz2
llvm-cc8b1ac5cb9d61fade8e07b269054e83ea8ecfdb.tar.xz
Add vmrg(x,x) tests
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@27462 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test/CodeGen/PowerPC/vec_shuffle.ll')
-rw-r--r--test/CodeGen/PowerPC/vec_shuffle.ll158
1 files changed, 156 insertions, 2 deletions
diff --git a/test/CodeGen/PowerPC/vec_shuffle.ll b/test/CodeGen/PowerPC/vec_shuffle.ll
index fce646def5..01d2a26347 100644
--- a/test/CodeGen/PowerPC/vec_shuffle.ll
+++ b/test/CodeGen/PowerPC/vec_shuffle.ll
@@ -1,7 +1,7 @@
; RUN: llvm-as < %s | opt -instcombine | llc -march=ppc32 -mcpu=g5 | not grep vperm &&
; RUN: llvm-as < %s | llc -march=ppc32 -mcpu=g5 | grep vsldoi | wc -l | grep 2 &&
-; RUN: llvm-as < %s | llc -march=ppc32 -mcpu=g5 | grep vmrgh | wc -l | grep 3 &&
-; RUN: llvm-as < %s | llc -march=ppc32 -mcpu=g5 | grep vmrgl | wc -l | grep 3
+; RUN: llvm-as < %s | llc -march=ppc32 -mcpu=g5 | grep vmrgh | wc -l | grep 6 &&
+; RUN: llvm-as < %s | llc -march=ppc32 -mcpu=g5 | grep vmrgl | wc -l | grep 6
void %VSLDOI_xy(<8 x short>* %A, <8 x short>* %B) {
entry:
@@ -262,3 +262,157 @@ entry:
store <4 x int> %tmp9, <4 x int>* %A
ret void
}
+
+void %VMRG_UNARY_tb_l(<16 x sbyte>* %A, <16 x sbyte>* %B) {
+entry:
+ %tmp = load <16 x sbyte>* %A ; <<16 x sbyte>> [#uses=16]
+ %tmp = extractelement <16 x sbyte> %tmp, uint 8 ; <sbyte> [#uses=1]
+ %tmp3 = extractelement <16 x sbyte> %tmp, uint 8 ; <sbyte> [#uses=1]
+ %tmp4 = extractelement <16 x sbyte> %tmp, uint 9 ; <sbyte> [#uses=1]
+ %tmp5 = extractelement <16 x sbyte> %tmp, uint 9 ; <sbyte> [#uses=1]
+ %tmp6 = extractelement <16 x sbyte> %tmp, uint 10 ; <sbyte> [#uses=1]
+ %tmp7 = extractelement <16 x sbyte> %tmp, uint 10 ; <sbyte> [#uses=1]
+ %tmp8 = extractelement <16 x sbyte> %tmp, uint 11 ; <sbyte> [#uses=1]
+ %tmp9 = extractelement <16 x sbyte> %tmp, uint 11 ; <sbyte> [#uses=1]
+ %tmp10 = extractelement <16 x sbyte> %tmp, uint 12 ; <sbyte> [#uses=1]
+ %tmp11 = extractelement <16 x sbyte> %tmp, uint 12 ; <sbyte> [#uses=1]
+ %tmp12 = extractelement <16 x sbyte> %tmp, uint 13 ; <sbyte> [#uses=1]
+ %tmp13 = extractelement <16 x sbyte> %tmp, uint 13 ; <sbyte> [#uses=1]
+ %tmp14 = extractelement <16 x sbyte> %tmp, uint 14 ; <sbyte> [#uses=1]
+ %tmp15 = extractelement <16 x sbyte> %tmp, uint 14 ; <sbyte> [#uses=1]
+ %tmp16 = extractelement <16 x sbyte> %tmp, uint 15 ; <sbyte> [#uses=1]
+ %tmp17 = extractelement <16 x sbyte> %tmp, uint 15 ; <sbyte> [#uses=1]
+ %tmp18 = insertelement <16 x sbyte> undef, sbyte %tmp, uint 0 ; <<16 x sbyte>> [#uses=1]
+ %tmp19 = insertelement <16 x sbyte> %tmp18, sbyte %tmp3, uint 1 ; <<16 x sbyte>> [#uses=1]
+ %tmp20 = insertelement <16 x sbyte> %tmp19, sbyte %tmp4, uint 2 ; <<16 x sbyte>> [#uses=1]
+ %tmp21 = insertelement <16 x sbyte> %tmp20, sbyte %tmp5, uint 3 ; <<16 x sbyte>> [#uses=1]
+ %tmp22 = insertelement <16 x sbyte> %tmp21, sbyte %tmp6, uint 4 ; <<16 x sbyte>> [#uses=1]
+ %tmp23 = insertelement <16 x sbyte> %tmp22, sbyte %tmp7, uint 5 ; <<16 x sbyte>> [#uses=1]
+ %tmp24 = insertelement <16 x sbyte> %tmp23, sbyte %tmp8, uint 6 ; <<16 x sbyte>> [#uses=1]
+ %tmp25 = insertelement <16 x sbyte> %tmp24, sbyte %tmp9, uint 7 ; <<16 x sbyte>> [#uses=1]
+ %tmp26 = insertelement <16 x sbyte> %tmp25, sbyte %tmp10, uint 8 ; <<16 x sbyte>> [#uses=1]
+ %tmp27 = insertelement <16 x sbyte> %tmp26, sbyte %tmp11, uint 9 ; <<16 x sbyte>> [#uses=1]
+ %tmp28 = insertelement <16 x sbyte> %tmp27, sbyte %tmp12, uint 10 ; <<16 x sbyte>> [#uses=1]
+ %tmp29 = insertelement <16 x sbyte> %tmp28, sbyte %tmp13, uint 11 ; <<16 x sbyte>> [#uses=1]
+ %tmp30 = insertelement <16 x sbyte> %tmp29, sbyte %tmp14, uint 12 ; <<16 x sbyte>> [#uses=1]
+ %tmp31 = insertelement <16 x sbyte> %tmp30, sbyte %tmp15, uint 13 ; <<16 x sbyte>> [#uses=1]
+ %tmp32 = insertelement <16 x sbyte> %tmp31, sbyte %tmp16, uint 14 ; <<16 x sbyte>> [#uses=1]
+ %tmp33 = insertelement <16 x sbyte> %tmp32, sbyte %tmp17, uint 15 ; <<16 x sbyte>> [#uses=1]
+ store <16 x sbyte> %tmp33, <16 x sbyte>* %A
+ ret void
+}
+
+void %VMRG_UNARY_th_l(<8 x short>* %A, <8 x short>* %B) {
+entry:
+ %tmp = load <8 x short>* %A ; <<8 x short>> [#uses=8]
+ %tmp = extractelement <8 x short> %tmp, uint 4 ; <short> [#uses=1]
+ %tmp3 = extractelement <8 x short> %tmp, uint 4 ; <short> [#uses=1]
+ %tmp4 = extractelement <8 x short> %tmp, uint 5 ; <short> [#uses=1]
+ %tmp5 = extractelement <8 x short> %tmp, uint 5 ; <short> [#uses=1]
+ %tmp6 = extractelement <8 x short> %tmp, uint 6 ; <short> [#uses=1]
+ %tmp7 = extractelement <8 x short> %tmp, uint 6 ; <short> [#uses=1]
+ %tmp8 = extractelement <8 x short> %tmp, uint 7 ; <short> [#uses=1]
+ %tmp9 = extractelement <8 x short> %tmp, uint 7 ; <short> [#uses=1]
+ %tmp10 = insertelement <8 x short> undef, short %tmp, uint 0 ; <<8 x short>> [#uses=1]
+ %tmp11 = insertelement <8 x short> %tmp10, short %tmp3, uint 1 ; <<8 x short>> [#uses=1]
+ %tmp12 = insertelement <8 x short> %tmp11, short %tmp4, uint 2 ; <<8 x short>> [#uses=1]
+ %tmp13 = insertelement <8 x short> %tmp12, short %tmp5, uint 3 ; <<8 x short>> [#uses=1]
+ %tmp14 = insertelement <8 x short> %tmp13, short %tmp6, uint 4 ; <<8 x short>> [#uses=1]
+ %tmp15 = insertelement <8 x short> %tmp14, short %tmp7, uint 5 ; <<8 x short>> [#uses=1]
+ %tmp16 = insertelement <8 x short> %tmp15, short %tmp8, uint 6 ; <<8 x short>> [#uses=1]
+ %tmp17 = insertelement <8 x short> %tmp16, short %tmp9, uint 7 ; <<8 x short>> [#uses=1]
+ store <8 x short> %tmp17, <8 x short>* %A
+ ret void
+}
+
+void %VMRG_UNARY_tw_l(<4 x int>* %A, <4 x int>* %B) {
+entry:
+ %tmp = load <4 x int>* %A ; <<4 x int>> [#uses=4]
+ %tmp = extractelement <4 x int> %tmp, uint 2 ; <int> [#uses=1]
+ %tmp3 = extractelement <4 x int> %tmp, uint 2 ; <int> [#uses=1]
+ %tmp4 = extractelement <4 x int> %tmp, uint 3 ; <int> [#uses=1]
+ %tmp5 = extractelement <4 x int> %tmp, uint 3 ; <int> [#uses=1]
+ %tmp6 = insertelement <4 x int> undef, int %tmp, uint 0 ; <<4 x int>> [#uses=1]
+ %tmp7 = insertelement <4 x int> %tmp6, int %tmp3, uint 1 ; <<4 x int>> [#uses=1]
+ %tmp8 = insertelement <4 x int> %tmp7, int %tmp4, uint 2 ; <<4 x int>> [#uses=1]
+ %tmp9 = insertelement <4 x int> %tmp8, int %tmp5, uint 3 ; <<4 x int>> [#uses=1]
+ store <4 x int> %tmp9, <4 x int>* %A
+ ret void
+}
+
+void %VMRG_UNARY_tb_h(<16 x sbyte>* %A, <16 x sbyte>* %B) {
+entry:
+ %tmp = load <16 x sbyte>* %A ; <<16 x sbyte>> [#uses=16]
+ %tmp = extractelement <16 x sbyte> %tmp, uint 0 ; <sbyte> [#uses=1]
+ %tmp3 = extractelement <16 x sbyte> %tmp, uint 0 ; <sbyte> [#uses=1]
+ %tmp4 = extractelement <16 x sbyte> %tmp, uint 1 ; <sbyte> [#uses=1]
+ %tmp5 = extractelement <16 x sbyte> %tmp, uint 1 ; <sbyte> [#uses=1]
+ %tmp6 = extractelement <16 x sbyte> %tmp, uint 2 ; <sbyte> [#uses=1]
+ %tmp7 = extractelement <16 x sbyte> %tmp, uint 2 ; <sbyte> [#uses=1]
+ %tmp8 = extractelement <16 x sbyte> %tmp, uint 3 ; <sbyte> [#uses=1]
+ %tmp9 = extractelement <16 x sbyte> %tmp, uint 3 ; <sbyte> [#uses=1]
+ %tmp10 = extractelement <16 x sbyte> %tmp, uint 4 ; <sbyte> [#uses=1]
+ %tmp11 = extractelement <16 x sbyte> %tmp, uint 4 ; <sbyte> [#uses=1]
+ %tmp12 = extractelement <16 x sbyte> %tmp, uint 5 ; <sbyte> [#uses=1]
+ %tmp13 = extractelement <16 x sbyte> %tmp, uint 5 ; <sbyte> [#uses=1]
+ %tmp14 = extractelement <16 x sbyte> %tmp, uint 6 ; <sbyte> [#uses=1]
+ %tmp15 = extractelement <16 x sbyte> %tmp, uint 6 ; <sbyte> [#uses=1]
+ %tmp16 = extractelement <16 x sbyte> %tmp, uint 7 ; <sbyte> [#uses=1]
+ %tmp17 = extractelement <16 x sbyte> %tmp, uint 7 ; <sbyte> [#uses=1]
+ %tmp18 = insertelement <16 x sbyte> undef, sbyte %tmp, uint 0 ; <<16 x sbyte>> [#uses=1]
+ %tmp19 = insertelement <16 x sbyte> %tmp18, sbyte %tmp3, uint 1 ; <<16 x sbyte>> [#uses=1]
+ %tmp20 = insertelement <16 x sbyte> %tmp19, sbyte %tmp4, uint 2 ; <<16 x sbyte>> [#uses=1]
+ %tmp21 = insertelement <16 x sbyte> %tmp20, sbyte %tmp5, uint 3 ; <<16 x sbyte>> [#uses=1]
+ %tmp22 = insertelement <16 x sbyte> %tmp21, sbyte %tmp6, uint 4 ; <<16 x sbyte>> [#uses=1]
+ %tmp23 = insertelement <16 x sbyte> %tmp22, sbyte %tmp7, uint 5 ; <<16 x sbyte>> [#uses=1]
+ %tmp24 = insertelement <16 x sbyte> %tmp23, sbyte %tmp8, uint 6 ; <<16 x sbyte>> [#uses=1]
+ %tmp25 = insertelement <16 x sbyte> %tmp24, sbyte %tmp9, uint 7 ; <<16 x sbyte>> [#uses=1]
+ %tmp26 = insertelement <16 x sbyte> %tmp25, sbyte %tmp10, uint 8 ; <<16 x sbyte>> [#uses=1]
+ %tmp27 = insertelement <16 x sbyte> %tmp26, sbyte %tmp11, uint 9 ; <<16 x sbyte>> [#uses=1]
+ %tmp28 = insertelement <16 x sbyte> %tmp27, sbyte %tmp12, uint 10 ; <<16 x sbyte>> [#uses=1]
+ %tmp29 = insertelement <16 x sbyte> %tmp28, sbyte %tmp13, uint 11 ; <<16 x sbyte>> [#uses=1]
+ %tmp30 = insertelement <16 x sbyte> %tmp29, sbyte %tmp14, uint 12 ; <<16 x sbyte>> [#uses=1]
+ %tmp31 = insertelement <16 x sbyte> %tmp30, sbyte %tmp15, uint 13 ; <<16 x sbyte>> [#uses=1]
+ %tmp32 = insertelement <16 x sbyte> %tmp31, sbyte %tmp16, uint 14 ; <<16 x sbyte>> [#uses=1]
+ %tmp33 = insertelement <16 x sbyte> %tmp32, sbyte %tmp17, uint 15 ; <<16 x sbyte>> [#uses=1]
+ store <16 x sbyte> %tmp33, <16 x sbyte>* %A
+ ret void
+}
+
+void %VMRG_UNARY_th_h(<8 x short>* %A, <8 x short>* %B) {
+entry:
+ %tmp = load <8 x short>* %A ; <<8 x short>> [#uses=8]
+ %tmp = extractelement <8 x short> %tmp, uint 0 ; <short> [#uses=1]
+ %tmp3 = extractelement <8 x short> %tmp, uint 0 ; <short> [#uses=1]
+ %tmp4 = extractelement <8 x short> %tmp, uint 1 ; <short> [#uses=1]
+ %tmp5 = extractelement <8 x short> %tmp, uint 1 ; <short> [#uses=1]
+ %tmp6 = extractelement <8 x short> %tmp, uint 2 ; <short> [#uses=1]
+ %tmp7 = extractelement <8 x short> %tmp, uint 2 ; <short> [#uses=1]
+ %tmp8 = extractelement <8 x short> %tmp, uint 3 ; <short> [#uses=1]
+ %tmp9 = extractelement <8 x short> %tmp, uint 3 ; <short> [#uses=1]
+ %tmp10 = insertelement <8 x short> undef, short %tmp, uint 0 ; <<8 x short>> [#uses=1]
+ %tmp11 = insertelement <8 x short> %tmp10, short %tmp3, uint 1 ; <<8 x short>> [#uses=1]
+ %tmp12 = insertelement <8 x short> %tmp11, short %tmp4, uint 2 ; <<8 x short>> [#uses=1]
+ %tmp13 = insertelement <8 x short> %tmp12, short %tmp5, uint 3 ; <<8 x short>> [#uses=1]
+ %tmp14 = insertelement <8 x short> %tmp13, short %tmp6, uint 4 ; <<8 x short>> [#uses=1]
+ %tmp15 = insertelement <8 x short> %tmp14, short %tmp7, uint 5 ; <<8 x short>> [#uses=1]
+ %tmp16 = insertelement <8 x short> %tmp15, short %tmp8, uint 6 ; <<8 x short>> [#uses=1]
+ %tmp17 = insertelement <8 x short> %tmp16, short %tmp9, uint 7 ; <<8 x short>> [#uses=1]
+ store <8 x short> %tmp17, <8 x short>* %A
+ ret void
+}
+
+void %VMRG_UNARY_tw_h(<4 x int>* %A, <4 x int>* %B) {
+entry:
+ %tmp = load <4 x int>* %A ; <<4 x int>> [#uses=4]
+ %tmp = extractelement <4 x int> %tmp, uint 0 ; <int> [#uses=1]
+ %tmp3 = extractelement <4 x int> %tmp, uint 0 ; <int> [#uses=1]
+ %tmp4 = extractelement <4 x int> %tmp, uint 1 ; <int> [#uses=1]
+ %tmp5 = extractelement <4 x int> %tmp, uint 1 ; <int> [#uses=1]
+ %tmp6 = insertelement <4 x int> undef, int %tmp, uint 0 ; <<4 x int>> [#uses=1]
+ %tmp7 = insertelement <4 x int> %tmp6, int %tmp3, uint 1 ; <<4 x int>> [#uses=1]
+ %tmp8 = insertelement <4 x int> %tmp7, int %tmp4, uint 2 ; <<4 x int>> [#uses=1]
+ %tmp9 = insertelement <4 x int> %tmp8, int %tmp5, uint 3 ; <<4 x int>> [#uses=1]
+ store <4 x int> %tmp9, <4 x int>* %A
+ ret void
+}