summaryrefslogtreecommitdiff
path: root/test/CodeGen/NVPTX
diff options
context:
space:
mode:
authorJustin Holewinski <jholewinski@nvidia.com>2013-02-12 14:18:49 +0000
committerJustin Holewinski <jholewinski@nvidia.com>2013-02-12 14:18:49 +0000
commit7eacad03efda36e09ebd96e95d7891cadaaa9087 (patch)
treec66658286eca956701f8334550a8edefe236b468 /test/CodeGen/NVPTX
parentc8a196ae8fad3cba7a777e2e7916fd36ebf70fe6 (diff)
downloadllvm-7eacad03efda36e09ebd96e95d7891cadaaa9087.tar.gz
llvm-7eacad03efda36e09ebd96e95d7891cadaaa9087.tar.bz2
llvm-7eacad03efda36e09ebd96e95d7891cadaaa9087.tar.xz
[NVPTX] Disable vector registers
Vectors were being manually scalarized by the backend. Instead, let the target-independent code do all of the work. The manual scalarization was from a time before good target-independent support for scalarization in LLVM. However, this forces us to specially-handle vector loads and stores, which we can turn into PTX instructions that produce/consume multiple operands. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@174968 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test/CodeGen/NVPTX')
-rw-r--r--test/CodeGen/NVPTX/vector-loads.ll66
1 files changed, 66 insertions, 0 deletions
diff --git a/test/CodeGen/NVPTX/vector-loads.ll b/test/CodeGen/NVPTX/vector-loads.ll
new file mode 100644
index 0000000000..f5a1795e3c
--- /dev/null
+++ b/test/CodeGen/NVPTX/vector-loads.ll
@@ -0,0 +1,66 @@
+; RUN: llc < %s -march=nvptx -mcpu=sm_20 | FileCheck %s
+
+; Even though general vector types are not supported in PTX, we can still
+; optimize loads/stores with pseudo-vector instructions of the form:
+;
+; ld.v2.f32 {%f0, %f1}, [%r0]
+;
+; which will load two floats at once into scalar registers.
+
+define void @foo(<2 x float>* %a) {
+; CHECK: .func foo
+; CHECK: ld.v2.f32 {%f{{[0-9]+}}, %f{{[0-9]+}}}, [%r{{[0-9]+}}];
+ %t1 = load <2 x float>* %a
+ %t2 = fmul <2 x float> %t1, %t1
+ store <2 x float> %t2, <2 x float>* %a
+ ret void
+}
+
+define void @foo2(<4 x float>* %a) {
+; CHECK: .func foo2
+; CHECK: ld.v4.f32 {%f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}}, [%r{{[0-9]+}}];
+ %t1 = load <4 x float>* %a
+ %t2 = fmul <4 x float> %t1, %t1
+ store <4 x float> %t2, <4 x float>* %a
+ ret void
+}
+
+define void @foo3(<8 x float>* %a) {
+; CHECK: .func foo3
+; CHECK: ld.v4.f32 {%f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}}, [%r{{[0-9]+}}];
+; CHECK-NEXT: ld.v4.f32 {%f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}}, [%r{{[0-9]+}}+16];
+ %t1 = load <8 x float>* %a
+ %t2 = fmul <8 x float> %t1, %t1
+ store <8 x float> %t2, <8 x float>* %a
+ ret void
+}
+
+
+
+define void @foo4(<2 x i32>* %a) {
+; CHECK: .func foo4
+; CHECK: ld.v2.u32 {%r{{[0-9]+}}, %r{{[0-9]+}}}, [%r{{[0-9]+}}];
+ %t1 = load <2 x i32>* %a
+ %t2 = mul <2 x i32> %t1, %t1
+ store <2 x i32> %t2, <2 x i32>* %a
+ ret void
+}
+
+define void @foo5(<4 x i32>* %a) {
+; CHECK: .func foo5
+; CHECK: ld.v4.u32 {%r{{[0-9]+}}, %r{{[0-9]+}}, %r{{[0-9]+}}, %r{{[0-9]+}}}, [%r{{[0-9]+}}];
+ %t1 = load <4 x i32>* %a
+ %t2 = mul <4 x i32> %t1, %t1
+ store <4 x i32> %t2, <4 x i32>* %a
+ ret void
+}
+
+define void @foo6(<8 x i32>* %a) {
+; CHECK: .func foo6
+; CHECK: ld.v4.u32 {%r{{[0-9]+}}, %r{{[0-9]+}}, %r{{[0-9]+}}, %r{{[0-9]+}}}, [%r{{[0-9]+}}];
+; CHECK-NEXT: ld.v4.u32 {%r{{[0-9]+}}, %r{{[0-9]+}}, %r{{[0-9]+}}, %r{{[0-9]+}}}, [%r{{[0-9]+}}+16];
+ %t1 = load <8 x i32>* %a
+ %t2 = mul <8 x i32> %t1, %t1
+ store <8 x i32> %t2, <8 x i32>* %a
+ ret void
+}