summaryrefslogtreecommitdiff
path: root/test
diff options
context:
space:
mode:
authorJustin Holewinski <jholewinski@nvidia.com>2014-06-27 18:35:30 +0000
committerJustin Holewinski <jholewinski@nvidia.com>2014-06-27 18:35:30 +0000
commit1f75f4a0ee049f65eb833109a0fe4f7466ca4f9a (patch)
tree2d43fda71a02134810f2854064be8fb274108e28 /test
parentef92cf50d6101ee56445c9056d4f476047b8d180 (diff)
downloadllvm-1f75f4a0ee049f65eb833109a0fe4f7466ca4f9a.tar.gz
llvm-1f75f4a0ee049f65eb833109a0fe4f7466ca4f9a.tar.bz2
llvm-1f75f4a0ee049f65eb833109a0fe4f7466ca4f9a.tar.xz
[NVPTX] Add missing isel patterns for 64-bit atomics
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@211933 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test')
-rw-r--r--test/CodeGen/NVPTX/atomics.ll141
1 files changed, 141 insertions, 0 deletions
diff --git a/test/CodeGen/NVPTX/atomics.ll b/test/CodeGen/NVPTX/atomics.ll
new file mode 100644
index 0000000000..10ab73d541
--- /dev/null
+++ b/test/CodeGen/NVPTX/atomics.ll
@@ -0,0 +1,141 @@
+; RUN: llc < %s -march=nvptx -mcpu=sm_20 | FileCheck %s
+
+
+; CHECK: atom0
+define i32 @atom0(i32* %addr, i32 %val) {
+; CHECK: atom.add.u32
+ %ret = atomicrmw add i32* %addr, i32 %val seq_cst
+ ret i32 %ret
+}
+
+; CHECK: atom1
+define i64 @atom1(i64* %addr, i64 %val) {
+; CHECK: atom.add.u64
+ %ret = atomicrmw add i64* %addr, i64 %val seq_cst
+ ret i64 %ret
+}
+
+; CHECK: atom2
+define i32 @atom2(i32* %subr, i32 %val) {
+; CHECK: neg.s32
+; CHECK: atom.add.u32
+ %ret = atomicrmw sub i32* %subr, i32 %val seq_cst
+ ret i32 %ret
+}
+
+; CHECK: atom3
+define i64 @atom3(i64* %subr, i64 %val) {
+; CHECK: neg.s64
+; CHECK: atom.add.u64
+ %ret = atomicrmw sub i64* %subr, i64 %val seq_cst
+ ret i64 %ret
+}
+
+; CHECK: atom4
+define i32 @atom4(i32* %subr, i32 %val) {
+; CHECK: atom.and.b32
+ %ret = atomicrmw and i32* %subr, i32 %val seq_cst
+ ret i32 %ret
+}
+
+; CHECK: atom5
+define i64 @atom5(i64* %subr, i64 %val) {
+; CHECK: atom.and.b64
+ %ret = atomicrmw and i64* %subr, i64 %val seq_cst
+ ret i64 %ret
+}
+
+;; NAND not yet supported
+;define i32 @atom6(i32* %subr, i32 %val) {
+; %ret = atomicrmw nand i32* %subr, i32 %val seq_cst
+; ret i32 %ret
+;}
+
+;define i64 @atom7(i64* %subr, i64 %val) {
+; %ret = atomicrmw nand i64* %subr, i64 %val seq_cst
+; ret i64 %ret
+;}
+
+; CHECK: atom8
+define i32 @atom8(i32* %subr, i32 %val) {
+; CHECK: atom.or.b32
+ %ret = atomicrmw or i32* %subr, i32 %val seq_cst
+ ret i32 %ret
+}
+
+; CHECK: atom9
+define i64 @atom9(i64* %subr, i64 %val) {
+; CHECK: atom.or.b64
+ %ret = atomicrmw or i64* %subr, i64 %val seq_cst
+ ret i64 %ret
+}
+
+; CHECK: atom10
+define i32 @atom10(i32* %subr, i32 %val) {
+; CHECK: atom.xor.b32
+ %ret = atomicrmw xor i32* %subr, i32 %val seq_cst
+ ret i32 %ret
+}
+
+; CHECK: atom11
+define i64 @atom11(i64* %subr, i64 %val) {
+; CHECK: atom.xor.b64
+ %ret = atomicrmw xor i64* %subr, i64 %val seq_cst
+ ret i64 %ret
+}
+
+; CHECK: atom12
+define i32 @atom12(i32* %subr, i32 %val) {
+; CHECK: atom.max.s32
+ %ret = atomicrmw max i32* %subr, i32 %val seq_cst
+ ret i32 %ret
+}
+
+; CHECK: atom13
+define i64 @atom13(i64* %subr, i64 %val) {
+; CHECK: atom.max.s64
+ %ret = atomicrmw max i64* %subr, i64 %val seq_cst
+ ret i64 %ret
+}
+
+; CHECK: atom14
+define i32 @atom14(i32* %subr, i32 %val) {
+; CHECK: atom.min.s32
+ %ret = atomicrmw min i32* %subr, i32 %val seq_cst
+ ret i32 %ret
+}
+
+; CHECK: atom15
+define i64 @atom15(i64* %subr, i64 %val) {
+; CHECK: atom.min.s64
+ %ret = atomicrmw min i64* %subr, i64 %val seq_cst
+ ret i64 %ret
+}
+
+; CHECK: atom16
+define i32 @atom16(i32* %subr, i32 %val) {
+; CHECK: atom.max.u32
+ %ret = atomicrmw umax i32* %subr, i32 %val seq_cst
+ ret i32 %ret
+}
+
+; CHECK: atom17
+define i64 @atom17(i64* %subr, i64 %val) {
+; CHECK: atom.max.u64
+ %ret = atomicrmw umax i64* %subr, i64 %val seq_cst
+ ret i64 %ret
+}
+
+; CHECK: atom18
+define i32 @atom18(i32* %subr, i32 %val) {
+; CHECK: atom.min.u32
+ %ret = atomicrmw umin i32* %subr, i32 %val seq_cst
+ ret i32 %ret
+}
+
+; CHECK: atom19
+define i64 @atom19(i64* %subr, i64 %val) {
+; CHECK: atom.min.u64
+ %ret = atomicrmw umin i64* %subr, i64 %val seq_cst
+ ret i64 %ret
+}