diff options
author | Justin Holewinski <jholewinski@nvidia.com> | 2014-06-27 18:35:33 +0000 |
---|---|---|
committer | Justin Holewinski <jholewinski@nvidia.com> | 2014-06-27 18:35:33 +0000 |
commit | 508c80f11f2f52f549caf86f9f6e07d07cea6006 (patch) | |
tree | 3998153eadbb353cf664b0807dae5ef8c12fbe93 /test | |
parent | 1f75f4a0ee049f65eb833109a0fe4f7466ca4f9a (diff) | |
download | llvm-508c80f11f2f52f549caf86f9f6e07d07cea6006.tar.gz llvm-508c80f11f2f52f549caf86f9f6e07d07cea6006.tar.bz2 llvm-508c80f11f2f52f549caf86f9f6e07d07cea6006.tar.xz |
[NVPTX] Add support for efficient rotate instructions on SM 3.2+
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@211934 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test')
-rw-r--r-- | test/CodeGen/NVPTX/rotate.ll | 58 |
1 files changed, 58 insertions, 0 deletions
diff --git a/test/CodeGen/NVPTX/rotate.ll b/test/CodeGen/NVPTX/rotate.ll new file mode 100644 index 0000000000..dfc8b4fd5f --- /dev/null +++ b/test/CodeGen/NVPTX/rotate.ll @@ -0,0 +1,58 @@ +; RUN: llc < %s -march=nvptx -mcpu=sm_20 | FileCheck --check-prefix=SM20 %s +; RUN: llc < %s -march=nvptx -mcpu=sm_35 | FileCheck --check-prefix=SM35 %s + + +declare i32 @llvm.nvvm.rotate.b32(i32, i32) +declare i64 @llvm.nvvm.rotate.b64(i64, i32) +declare i64 @llvm.nvvm.rotate.right.b64(i64, i32) + +; SM20: rotate32 +; SM35: rotate32 +define i32 @rotate32(i32 %a, i32 %b) { +; SM20: shl.b32 +; SM20: sub.s32 +; SM20: shr.b32 +; SM20: add.u32 +; SM35: shf.l.wrap.b32 + %val = tail call i32 @llvm.nvvm.rotate.b32(i32 %a, i32 %b) + ret i32 %val +} + +; SM20: rotate64 +; SM35: rotate64 +define i64 @rotate64(i64 %a, i32 %b) { +; SM20: shl.b64 +; SM20: sub.u32 +; SM20: shr.b64 +; SM20: add.u64 +; SM35: shf.l.wrap.b32 +; SM35: shf.l.wrap.b32 + %val = tail call i64 @llvm.nvvm.rotate.b64(i64 %a, i32 %b) + ret i64 %val +} + +; SM20: rotateright64 +; SM35: rotateright64 +define i64 @rotateright64(i64 %a, i32 %b) { +; SM20: shr.b64 +; SM20: sub.u32 +; SM20: shl.b64 +; SM20: add.u64 +; SM35: shf.r.wrap.b32 +; SM35: shf.r.wrap.b32 + %val = tail call i64 @llvm.nvvm.rotate.right.b64(i64 %a, i32 %b) + ret i64 %val +} + +; SM20: rotl0 +; SM35: rotl0 +define i32 @rotl0(i32 %x) { +; SM20: shl.b32 +; SM20: shr.b32 +; SM20: add.u32 +; SM35: shf.l.wrap.b32 + %t0 = shl i32 %x, 8 + %t1 = lshr i32 %x, 24 + %t2 = or i32 %t0, %t1 + ret i32 %t2 +} |