diff options
author | Matt Arsenault <Matthew.Arsenault@amd.com> | 2014-06-11 18:08:54 +0000 |
---|---|---|
committer | Matt Arsenault <Matthew.Arsenault@amd.com> | 2014-06-11 18:08:54 +0000 |
commit | c9dbd0da7a127ace39c064f79fe0cbe19d4f4d77 (patch) | |
tree | 5bbad7ccdd0976dace0c23f48d8055ac66c1c245 /test | |
parent | 481a071a8b79010c4ce40bea2f3589f514ca6e39 (diff) | |
download | llvm-c9dbd0da7a127ace39c064f79fe0cbe19d4f4d77.tar.gz llvm-c9dbd0da7a127ace39c064f79fe0cbe19d4f4d77.tar.bz2 llvm-c9dbd0da7a127ace39c064f79fe0cbe19d4f4d77.tar.xz |
R600/SI: Add common 64-bit LDS atomics
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@210680 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test')
-rw-r--r-- | test/CodeGen/R600/atomic_cmp_swap_local.ll | 19 | ||||
-rw-r--r-- | test/CodeGen/R600/local-atomics64.ll | 243 |
2 files changed, 262 insertions, 0 deletions
diff --git a/test/CodeGen/R600/atomic_cmp_swap_local.ll b/test/CodeGen/R600/atomic_cmp_swap_local.ll index 7ab651f5a8..fd5ca64ac9 100644 --- a/test/CodeGen/R600/atomic_cmp_swap_local.ll +++ b/test/CodeGen/R600/atomic_cmp_swap_local.ll @@ -14,3 +14,22 @@ define void @lds_atomic_cmpxchg_ret_i32_offset(i32 addrspace(1)* %out, i32 addrs store i32 %result, i32 addrspace(1)* %out, align 4 ret void } + +; FUNC-LABEL: @lds_atomic_cmpxchg_ret_i64_offset: +; SI: S_LOAD_DWORDX2 s{{\[}}[[LOSWAP:[0-9]+]]:[[HISWAP:[0-9]+]]{{\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0xd +; SI: S_LOAD_DWORD [[PTR:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xb +; SI: S_MOV_B64 s{{\[}}[[LOSCMP:[0-9]+]]:[[HISCMP:[0-9]+]]{{\]}}, 7 +; SI-DAG: V_MOV_B32_e32 v[[LOVCMP:[0-9]+]], s[[LOSCMP]] +; SI-DAG: V_MOV_B32_e32 v[[HIVCMP:[0-9]+]], s[[HISCMP]] +; SI-DAG: V_MOV_B32_e32 [[VPTR:v[0-9]+]], [[PTR]] +; SI-DAG: V_MOV_B32_e32 v[[LOSWAPV:[0-9]+]], s[[LOSWAP]] +; SI-DAG: V_MOV_B32_e32 v[[HISWAPV:[0-9]+]], s[[HISWAP]] +; SI: DS_CMPST_RTN_B64 [[RESULT:v\[[0-9]+:[0-9]+\]]], [[VPTR]], v{{\[}}[[LOVCMP]]:[[HIVCMP]]{{\]}}, v{{\[}}[[LOSWAPV]]:[[HISWAPV]]{{\]}}, 0x20, [M0] +; SI: BUFFER_STORE_DWORDX2 [[RESULT]], +; SI: S_ENDPGM +define void @lds_atomic_cmpxchg_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr, i64 %swap) nounwind { + %gep = getelementptr i64 addrspace(3)* %ptr, i32 4 + %result = cmpxchg i64 addrspace(3)* %gep, i64 7, i64 %swap seq_cst monotonic + store i64 %result, i64 addrspace(1)* %out, align 8 + ret void +} diff --git a/test/CodeGen/R600/local-atomics64.ll b/test/CodeGen/R600/local-atomics64.ll new file mode 100644 index 0000000000..399cfe73c6 --- /dev/null +++ b/test/CodeGen/R600/local-atomics64.ll @@ -0,0 +1,243 @@ +; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s + +; FUNC-LABEL: @lds_atomic_xchg_ret_i64: +; SI: DS_WRXCHG_RTN_B64 +; SI: S_ENDPGM +define void @lds_atomic_xchg_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind { + %result = atomicrmw xchg i64 addrspace(3)* %ptr, i64 4 seq_cst + store i64 %result, i64 addrspace(1)* %out, align 8 + ret void +} + +; FUNC-LABEL: @lds_atomic_xchg_ret_i64_offset: +; SI: DS_WRXCHG_RTN_B64 {{.*}} 0x20 +; SI: S_ENDPGM +define void @lds_atomic_xchg_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind { + %gep = getelementptr i64 addrspace(3)* %ptr, i32 4 + %result = atomicrmw xchg i64 addrspace(3)* %gep, i64 4 seq_cst + store i64 %result, i64 addrspace(1)* %out, align 8 + ret void +} + +; FUNC-LABEL: @lds_atomic_add_ret_i64: +; SI: DS_ADD_RTN_U64 +; SI: S_ENDPGM +define void @lds_atomic_add_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind { + %result = atomicrmw add i64 addrspace(3)* %ptr, i64 4 seq_cst + store i64 %result, i64 addrspace(1)* %out, align 8 + ret void +} + +; FUNC-LABEL: @lds_atomic_add_ret_i64_offset: +; SI: S_LOAD_DWORD [[PTR:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xb +; SI: S_MOV_B64 s{{\[}}[[LOSDATA:[0-9]+]]:[[HISDATA:[0-9]+]]{{\]}}, 9 +; SI-DAG: V_MOV_B32_e32 v[[LOVDATA:[0-9]+]], s[[LOSDATA]] +; SI-DAG: V_MOV_B32_e32 v[[HIVDATA:[0-9]+]], s[[HISDATA]] +; SI-DAG: V_MOV_B32_e32 [[VPTR:v[0-9]+]], [[PTR]] +; SI: DS_ADD_RTN_U64 [[RESULT:v\[[0-9]+:[0-9]+\]]], [[VPTR]], v{{\[}}[[LOVDATA]]:[[HIVDATA]]{{\]}}, 0x20, [M0] +; SI: BUFFER_STORE_DWORDX2 [[RESULT]], +; SI: S_ENDPGM +define void @lds_atomic_add_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind { + %gep = getelementptr i64 addrspace(3)* %ptr, i64 4 + %result = atomicrmw add i64 addrspace(3)* %gep, i64 9 seq_cst + store i64 %result, i64 addrspace(1)* %out, align 8 + ret void +} + +; FUNC-LABEL: @lds_atomic_inc_ret_i64: +; SI: DS_INC_RTN_U64 +; SI: S_ENDPGM +define void @lds_atomic_inc_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind { + %result = atomicrmw add i64 addrspace(3)* %ptr, i64 1 seq_cst + store i64 %result, i64 addrspace(1)* %out, align 8 + ret void +} + +; FUNC-LABEL: @lds_atomic_inc_ret_i64_offset: +; SI: DS_INC_RTN_U64 {{.*}} 0x20 +; SI: S_ENDPGM +define void @lds_atomic_inc_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind { + %gep = getelementptr i64 addrspace(3)* %ptr, i32 4 + %result = atomicrmw add i64 addrspace(3)* %gep, i64 1 seq_cst + store i64 %result, i64 addrspace(1)* %out, align 8 + ret void +} + +; FUNC-LABEL: @lds_atomic_sub_ret_i64: +; SI: DS_SUB_RTN_U64 +; SI: S_ENDPGM +define void @lds_atomic_sub_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind { + %result = atomicrmw sub i64 addrspace(3)* %ptr, i64 4 seq_cst + store i64 %result, i64 addrspace(1)* %out, align 8 + ret void +} + +; FUNC-LABEL: @lds_atomic_sub_ret_i64_offset: +; SI: DS_SUB_RTN_U64 {{.*}} 0x20 +; SI: S_ENDPGM +define void @lds_atomic_sub_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind { + %gep = getelementptr i64 addrspace(3)* %ptr, i32 4 + %result = atomicrmw sub i64 addrspace(3)* %gep, i64 4 seq_cst + store i64 %result, i64 addrspace(1)* %out, align 8 + ret void +} + +; FUNC-LABEL: @lds_atomic_dec_ret_i64: +; SI: DS_DEC_RTN_U64 +; SI: S_ENDPGM +define void @lds_atomic_dec_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind { + %result = atomicrmw sub i64 addrspace(3)* %ptr, i64 1 seq_cst + store i64 %result, i64 addrspace(1)* %out, align 8 + ret void +} + +; FUNC-LABEL: @lds_atomic_dec_ret_i64_offset: +; SI: DS_DEC_RTN_U64 {{.*}} 0x20 +; SI: S_ENDPGM +define void @lds_atomic_dec_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind { + %gep = getelementptr i64 addrspace(3)* %ptr, i32 4 + %result = atomicrmw sub i64 addrspace(3)* %gep, i64 1 seq_cst + store i64 %result, i64 addrspace(1)* %out, align 8 + ret void +} + +; FUNC-LABEL: @lds_atomic_and_ret_i64: +; SI: DS_AND_RTN_B64 +; SI: S_ENDPGM +define void @lds_atomic_and_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind { + %result = atomicrmw and i64 addrspace(3)* %ptr, i64 4 seq_cst + store i64 %result, i64 addrspace(1)* %out, align 8 + ret void +} + +; FUNC-LABEL: @lds_atomic_and_ret_i64_offset: +; SI: DS_AND_RTN_B64 {{.*}} 0x20 +; SI: S_ENDPGM +define void @lds_atomic_and_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind { + %gep = getelementptr i64 addrspace(3)* %ptr, i32 4 + %result = atomicrmw and i64 addrspace(3)* %gep, i64 4 seq_cst + store i64 %result, i64 addrspace(1)* %out, align 8 + ret void +} + +; FUNC-LABEL: @lds_atomic_or_ret_i64: +; SI: DS_OR_RTN_B64 +; SI: S_ENDPGM +define void @lds_atomic_or_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind { + %result = atomicrmw or i64 addrspace(3)* %ptr, i64 4 seq_cst + store i64 %result, i64 addrspace(1)* %out, align 8 + ret void +} + +; FUNC-LABEL: @lds_atomic_or_ret_i64_offset: +; SI: DS_OR_RTN_B64 {{.*}} 0x20 +; SI: S_ENDPGM +define void @lds_atomic_or_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind { + %gep = getelementptr i64 addrspace(3)* %ptr, i32 4 + %result = atomicrmw or i64 addrspace(3)* %gep, i64 4 seq_cst + store i64 %result, i64 addrspace(1)* %out, align 8 + ret void +} + +; FUNC-LABEL: @lds_atomic_xor_ret_i64: +; SI: DS_XOR_RTN_B64 +; SI: S_ENDPGM +define void @lds_atomic_xor_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind { + %result = atomicrmw xor i64 addrspace(3)* %ptr, i64 4 seq_cst + store i64 %result, i64 addrspace(1)* %out, align 8 + ret void +} + +; FUNC-LABEL: @lds_atomic_xor_ret_i64_offset: +; SI: DS_XOR_RTN_B64 {{.*}} 0x20 +; SI: S_ENDPGM +define void @lds_atomic_xor_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind { + %gep = getelementptr i64 addrspace(3)* %ptr, i32 4 + %result = atomicrmw xor i64 addrspace(3)* %gep, i64 4 seq_cst + store i64 %result, i64 addrspace(1)* %out, align 8 + ret void +} + +; FIXME: There is no atomic nand instr +; XFUNC-LABEL: @lds_atomic_nand_ret_i64:uction, so we somehow need to expand this. +; define void @lds_atomic_nand_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind { +; %result = atomicrmw nand i64 addrspace(3)* %ptr, i32 4 seq_cst +; store i64 %result, i64 addrspace(1)* %out, align 8 +; ret void +; } + +; FUNC-LABEL: @lds_atomic_min_ret_i64: +; SI: DS_MIN_RTN_I64 +; SI: S_ENDPGM +define void @lds_atomic_min_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind { + %result = atomicrmw min i64 addrspace(3)* %ptr, i64 4 seq_cst + store i64 %result, i64 addrspace(1)* %out, align 8 + ret void +} + +; FUNC-LABEL: @lds_atomic_min_ret_i64_offset: +; SI: DS_MIN_RTN_I64 {{.*}} 0x20 +; SI: S_ENDPGM +define void @lds_atomic_min_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind { + %gep = getelementptr i64 addrspace(3)* %ptr, i32 4 + %result = atomicrmw min i64 addrspace(3)* %gep, i64 4 seq_cst + store i64 %result, i64 addrspace(1)* %out, align 8 + ret void +} + +; FUNC-LABEL: @lds_atomic_max_ret_i64: +; SI: DS_MAX_RTN_I64 +; SI: S_ENDPGM +define void @lds_atomic_max_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind { + %result = atomicrmw max i64 addrspace(3)* %ptr, i64 4 seq_cst + store i64 %result, i64 addrspace(1)* %out, align 8 + ret void +} + +; FUNC-LABEL: @lds_atomic_max_ret_i64_offset: +; SI: DS_MAX_RTN_I64 {{.*}} 0x20 +; SI: S_ENDPGM +define void @lds_atomic_max_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind { + %gep = getelementptr i64 addrspace(3)* %ptr, i32 4 + %result = atomicrmw max i64 addrspace(3)* %gep, i64 4 seq_cst + store i64 %result, i64 addrspace(1)* %out, align 8 + ret void +} + +; FUNC-LABEL: @lds_atomic_umin_ret_i64: +; SI: DS_MIN_RTN_U64 +; SI: S_ENDPGM +define void @lds_atomic_umin_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind { + %result = atomicrmw umin i64 addrspace(3)* %ptr, i64 4 seq_cst + store i64 %result, i64 addrspace(1)* %out, align 8 + ret void +} + +; FUNC-LABEL: @lds_atomic_umin_ret_i64_offset: +; SI: DS_MIN_RTN_U64 {{.*}} 0x20 +; SI: S_ENDPGM +define void @lds_atomic_umin_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind { + %gep = getelementptr i64 addrspace(3)* %ptr, i32 4 + %result = atomicrmw umin i64 addrspace(3)* %gep, i64 4 seq_cst + store i64 %result, i64 addrspace(1)* %out, align 8 + ret void +} + +; FUNC-LABEL: @lds_atomic_umax_ret_i64: +; SI: DS_MAX_RTN_U64 +; SI: S_ENDPGM +define void @lds_atomic_umax_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind { + %result = atomicrmw umax i64 addrspace(3)* %ptr, i64 4 seq_cst + store i64 %result, i64 addrspace(1)* %out, align 8 + ret void +} + +; FUNC-LABEL: @lds_atomic_umax_ret_i64_offset: +; SI: DS_MAX_RTN_U64 {{.*}} 0x20 +; SI: S_ENDPGM +define void @lds_atomic_umax_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) nounwind { + %gep = getelementptr i64 addrspace(3)* %ptr, i32 4 + %result = atomicrmw umax i64 addrspace(3)* %gep, i64 4 seq_cst + store i64 %result, i64 addrspace(1)* %out, align 8 + ret void +} |