summaryrefslogtreecommitdiff
path: root/test/CodeGen
diff options
context:
space:
mode:
authorKalle Raiskila <kalle.raiskila@nokia.com>2011-03-04 13:19:18 +0000
committerKalle Raiskila <kalle.raiskila@nokia.com>2011-03-04 13:19:18 +0000
commit31cbac1cfea8703098e09e7ff5fa8a626eebc920 (patch)
tree0b2d9b3125b06540b0a247a67758989e91e03380 /test/CodeGen
parent7f5de8b4c64280587c2c9a9a0ba4e1ada7e050e5 (diff)
downloadllvm-31cbac1cfea8703098e09e7ff5fa8a626eebc920.tar.gz
llvm-31cbac1cfea8703098e09e7ff5fa8a626eebc920.tar.bz2
llvm-31cbac1cfea8703098e09e7ff5fa8a626eebc920.tar.xz
Allow vector shifts (shl,lshr,ashr) on SPU.
There was a previous implementation with patterns that would have matched e.g. shl <v4i32> <i32>, but this is not valid LLVM IR so they never were selected. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@126998 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test/CodeGen')
-rw-r--r--test/CodeGen/CellSPU/shift_ops.ll61
1 files changed, 55 insertions, 6 deletions
diff --git a/test/CodeGen/CellSPU/shift_ops.ll b/test/CodeGen/CellSPU/shift_ops.ll
index 92390abf94..c4a5abd290 100644
--- a/test/CodeGen/CellSPU/shift_ops.ll
+++ b/test/CodeGen/CellSPU/shift_ops.ll
@@ -1,7 +1,7 @@
; RUN: llc < %s -march=cellspu > %t1.s
-; RUN: grep {shlh } %t1.s | count 9
+; RUN: grep {shlh } %t1.s | count 10
; RUN: grep {shlhi } %t1.s | count 3
-; RUN: grep {shl } %t1.s | count 9
+; RUN: grep {shl } %t1.s | count 11
; RUN: grep {shli } %t1.s | count 3
; RUN: grep {xshw } %t1.s | count 5
; RUN: grep {and } %t1.s | count 14
@@ -14,15 +14,12 @@
; RUN: grep {rotqbyi } %t1.s | count 1
; RUN: grep {rotqbii } %t1.s | count 2
; RUN: grep {rotqbybi } %t1.s | count 1
-; RUN: grep {sfi } %t1.s | count 4
+; RUN: grep {sfi } %t1.s | count 6
; RUN: cat %t1.s | FileCheck %s
target datalayout = "E-p:32:32:128-f64:64:128-f32:32:128-i64:32:128-i32:32:128-i16:16:128-i8:8:128-i1:8:128-a0:0:128-v128:128:128-s0:128:128"
target triple = "spu"
-; Vector shifts are not currently supported in gcc or llvm assembly. These are
-; not tested.
-
; Shift left i16 via register, note that the second operand to shl is promoted
; to a 32-bit type:
@@ -293,3 +290,55 @@ define i128 @test_lshr_i128( i128 %val ) {
%rv = lshr i128 %val, 64
ret i128 %rv
}
+
+;Vector shifts
+define <2 x i32> @shl_v2i32(<2 x i32> %val, <2 x i32> %sh) {
+;CHECK: shl
+;CHECK: bi $lr
+ %rv = shl <2 x i32> %val, %sh
+ ret <2 x i32> %rv
+}
+
+define <4 x i32> @shl_v4i32(<4 x i32> %val, <4 x i32> %sh) {
+;CHECK: shl
+;CHECK: bi $lr
+ %rv = shl <4 x i32> %val, %sh
+ ret <4 x i32> %rv
+}
+
+define <8 x i16> @shl_v8i16(<8 x i16> %val, <8 x i16> %sh) {
+;CHECK: shlh
+;CHECK: bi $lr
+ %rv = shl <8 x i16> %val, %sh
+ ret <8 x i16> %rv
+}
+
+define <4 x i32> @lshr_v4i32(<4 x i32> %val, <4 x i32> %sh) {
+;CHECK: rotm
+;CHECK: bi $lr
+ %rv = lshr <4 x i32> %val, %sh
+ ret <4 x i32> %rv
+}
+
+define <8 x i16> @lshr_v8i16(<8 x i16> %val, <8 x i16> %sh) {
+;CHECK: sfhi
+;CHECK: rothm
+;CHECK: bi $lr
+ %rv = lshr <8 x i16> %val, %sh
+ ret <8 x i16> %rv
+}
+
+define <4 x i32> @ashr_v4i32(<4 x i32> %val, <4 x i32> %sh) {
+;CHECK: rotma
+;CHECK: bi $lr
+ %rv = ashr <4 x i32> %val, %sh
+ ret <4 x i32> %rv
+}
+
+define <8 x i16> @ashr_v8i16(<8 x i16> %val, <8 x i16> %sh) {
+;CHECK: sfhi
+;CHECK: rotmah
+;CHECK: bi $lr
+ %rv = ashr <8 x i16> %val, %sh
+ ret <8 x i16> %rv
+}