summaryrefslogtreecommitdiff
path: root/test
diff options
context:
space:
mode:
authorTom Stellard <thomas.stellard@amd.com>2013-07-23 23:54:56 +0000
committerTom Stellard <thomas.stellard@amd.com>2013-07-23 23:54:56 +0000
commit8ea83d499935f406f9fc190de783710c2293ab63 (patch)
treeb95a57b1b8f241a424a9930e64322ebda0c957aa /test
parent7894ffd006bfabd45c75696c9b7cfdda7c7bad9d (diff)
downloadllvm-8ea83d499935f406f9fc190de783710c2293ab63.tar.gz
llvm-8ea83d499935f406f9fc190de783710c2293ab63.tar.bz2
llvm-8ea83d499935f406f9fc190de783710c2293ab63.tar.xz
R600: Treat CONSTANT_ADDRESS loads like GLOBAL_ADDRESS loads when necessary
These are really the same address space in hardware. The only difference is that CONSTANT_ADDRESS uses a special cache for faster access. When we are unable to use the constant kcache for some reason (e.g. smaller types or lack of indirect addressing) then the instruction selector must use GLOBAL_ADDRESS loads instead. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@187006 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test')
-rw-r--r--test/CodeGen/R600/load.ll147
1 files changed, 122 insertions, 25 deletions
diff --git a/test/CodeGen/R600/load.ll b/test/CodeGen/R600/load.ll
index f36f20c63d..6eef7c7966 100644
--- a/test/CodeGen/R600/load.ll
+++ b/test/CodeGen/R600/load.ll
@@ -2,6 +2,10 @@
; RUN: llc < %s -march=r600 -mcpu=cayman | FileCheck --check-prefix=R600-CHECK %s
; RUN: llc < %s -march=r600 -mcpu=SI | FileCheck --check-prefix=SI-CHECK %s
+;===------------------------------------------------------------------------===;
+; GLOBAL ADDRESS SPACE
+;===------------------------------------------------------------------------===;
+
; Load an i8 value from the global address space.
; R600-CHECK: @load_i8
; R600-CHECK: VTX_READ_8 T{{[0-9]+\.X, T[0-9]+\.X}}
@@ -100,31 +104,6 @@ entry:
ret void
}
-; Load an i32 value from the constant address space.
-; R600-CHECK: @load_const_addrspace_i32
-; R600-CHECK: VTX_READ_32 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 0
-
-; SI-CHECK: @load_const_addrspace_i32
-; SI-CHECK: S_LOAD_DWORD SGPR{{[0-9]+}}
-define void @load_const_addrspace_i32(i32 addrspace(1)* %out, i32 addrspace(2)* %in) {
-entry:
- %0 = load i32 addrspace(2)* %in
- store i32 %0, i32 addrspace(1)* %out
- ret void
-}
-
-; Load a f32 value from the constant address space.
-; R600-CHECK: @load_const_addrspace_f32
-; R600-CHECK: VTX_READ_32 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 0
-
-; SI-CHECK: @load_const_addrspace_f32
-; SI-CHECK: S_LOAD_DWORD SGPR{{[0-9]+}}
-define void @load_const_addrspace_f32(float addrspace(1)* %out, float addrspace(2)* %in) {
- %1 = load float addrspace(2)* %in
- store float %1, float addrspace(1)* %out
- ret void
-}
-
; R600-CHECK: @load_i64
; R600-CHECK: RAT
; R600-CHECK: RAT
@@ -166,3 +145,121 @@ entry:
store i64 %1, i64 addrspace(1)* %out
ret void
}
+
+;===------------------------------------------------------------------------===;
+; CONSTANT ADDRESS SPACE
+;===------------------------------------------------------------------------===;
+
+; Load a sign-extended i8 value
+; R600-CHECK: @load_const_i8_sext
+; R600-CHECK: VTX_READ_8 [[DST:T[0-9]\.[XYZW]]], [[DST]]
+; R600-CHECK: LSHL {{[* ]*}}T{{[0-9]}}.[[LSHL_CHAN:[XYZW]]], [[DST]]
+; R600-CHECK: 24
+; R600-CHECK: ASHR {{[* ]*}}T{{[0-9]\.[XYZW]}}, PV.[[LSHL_CHAN]]
+; R600-CHECK: 24
+; SI-CHECK: @load_const_i8_sext
+; SI-CHECK: BUFFER_LOAD_SBYTE VGPR{{[0-9]+}},
+define void @load_const_i8_sext(i32 addrspace(1)* %out, i8 addrspace(2)* %in) {
+entry:
+ %0 = load i8 addrspace(2)* %in
+ %1 = sext i8 %0 to i32
+ store i32 %1, i32 addrspace(1)* %out
+ ret void
+}
+
+; Load an aligned i8 value
+; R600-CHECK: @load_const_i8_aligned
+; R600-CHECK: VTX_READ_8 T{{[0-9]+\.X, T[0-9]+\.X}}
+; SI-CHECK: @load_const_i8_aligned
+; SI-CHECK: BUFFER_LOAD_UBYTE VGPR{{[0-9]+}},
+define void @load_const_i8_aligned(i32 addrspace(1)* %out, i8 addrspace(2)* %in) {
+entry:
+ %0 = load i8 addrspace(2)* %in
+ %1 = zext i8 %0 to i32
+ store i32 %1, i32 addrspace(1)* %out
+ ret void
+}
+
+; Load an un-aligned i8 value
+; R600-CHECK: @load_const_i8_unaligned
+; R600-CHECK: VTX_READ_8 T{{[0-9]+\.X, T[0-9]+\.X}}
+; SI-CHECK: @load_const_i8_unaligned
+; SI-CHECK: BUFFER_LOAD_UBYTE VGPR{{[0-9]+}},
+define void @load_const_i8_unaligned(i32 addrspace(1)* %out, i8 addrspace(2)* %in) {
+entry:
+ %0 = getelementptr i8 addrspace(2)* %in, i32 1
+ %1 = load i8 addrspace(2)* %0
+ %2 = zext i8 %1 to i32
+ store i32 %2, i32 addrspace(1)* %out
+ ret void
+}
+
+; Load a sign-extended i16 value
+; R600-CHECK: @load_const_i16_sext
+; R600-CHECK: VTX_READ_16 [[DST:T[0-9]\.[XYZW]]], [[DST]]
+; R600-CHECK: LSHL {{[* ]*}}T{{[0-9]}}.[[LSHL_CHAN:[XYZW]]], [[DST]]
+; R600-CHECK: 16
+; R600-CHECK: ASHR {{[* ]*}}T{{[0-9]\.[XYZW]}}, PV.[[LSHL_CHAN]]
+; R600-CHECK: 16
+; SI-CHECK: @load_const_i16_sext
+; SI-CHECK: BUFFER_LOAD_SSHORT
+define void @load_const_i16_sext(i32 addrspace(1)* %out, i16 addrspace(2)* %in) {
+entry:
+ %0 = load i16 addrspace(2)* %in
+ %1 = sext i16 %0 to i32
+ store i32 %1, i32 addrspace(1)* %out
+ ret void
+}
+
+; Load an aligned i16 value
+; R600-CHECK: @load_const_i16_aligned
+; R600-CHECK: VTX_READ_16 T{{[0-9]+\.X, T[0-9]+\.X}}
+; SI-CHECK: @load_const_i16_aligned
+; SI-CHECK: BUFFER_LOAD_USHORT
+define void @load_const_i16_aligned(i32 addrspace(1)* %out, i16 addrspace(2)* %in) {
+entry:
+ %0 = load i16 addrspace(2)* %in
+ %1 = zext i16 %0 to i32
+ store i32 %1, i32 addrspace(1)* %out
+ ret void
+}
+
+; Load an un-aligned i16 value
+; R600-CHECK: @load_const_i16_unaligned
+; R600-CHECK: VTX_READ_16 T{{[0-9]+\.X, T[0-9]+\.X}}
+; SI-CHECK: @load_const_i16_unaligned
+; SI-CHECK: BUFFER_LOAD_USHORT
+define void @load_const_i16_unaligned(i32 addrspace(1)* %out, i16 addrspace(2)* %in) {
+entry:
+ %0 = getelementptr i16 addrspace(2)* %in, i32 1
+ %1 = load i16 addrspace(2)* %0
+ %2 = zext i16 %1 to i32
+ store i32 %2, i32 addrspace(1)* %out
+ ret void
+}
+
+; Load an i32 value from the constant address space.
+; R600-CHECK: @load_const_addrspace_i32
+; R600-CHECK: VTX_READ_32 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 0
+
+; SI-CHECK: @load_const_addrspace_i32
+; SI-CHECK: S_LOAD_DWORD SGPR{{[0-9]+}}
+define void @load_const_addrspace_i32(i32 addrspace(1)* %out, i32 addrspace(2)* %in) {
+entry:
+ %0 = load i32 addrspace(2)* %in
+ store i32 %0, i32 addrspace(1)* %out
+ ret void
+}
+
+; Load a f32 value from the constant address space.
+; R600-CHECK: @load_const_addrspace_f32
+; R600-CHECK: VTX_READ_32 T{{[0-9]+}}.X, T{{[0-9]+}}.X, 0
+
+; SI-CHECK: @load_const_addrspace_f32
+; SI-CHECK: S_LOAD_DWORD SGPR{{[0-9]+}}
+define void @load_const_addrspace_f32(float addrspace(1)* %out, float addrspace(2)* %in) {
+ %1 = load float addrspace(2)* %in
+ store float %1, float addrspace(1)* %out
+ ret void
+}
+