summaryrefslogtreecommitdiff
path: root/test/CodeGen/SystemZ
diff options
context:
space:
mode:
authorRichard Sandiford <rsandifo@linux.vnet.ibm.com>2013-08-16 10:55:47 +0000
committerRichard Sandiford <rsandifo@linux.vnet.ibm.com>2013-08-16 10:55:47 +0000
commit6a079fef4fad3e6c2e07c9e1d0776e20a0b05b1e (patch)
treea9a17235ca1c2d7da4a78b31edb3aca2965731b5 /test/CodeGen/SystemZ
parent6c51f89498dd813c8dd16e46069decf2897b31b2 (diff)
downloadllvm-6a079fef4fad3e6c2e07c9e1d0776e20a0b05b1e.tar.gz
llvm-6a079fef4fad3e6c2e07c9e1d0776e20a0b05b1e.tar.bz2
llvm-6a079fef4fad3e6c2e07c9e1d0776e20a0b05b1e.tar.xz
[SystemZ] Fix handling of 64-bit memcmp results
Generalize r188163 to cope with return types other than MVT::i32, just as the existing visitMemCmpCall code did. I've split this out into a subroutine so that it can be used for other upcoming patches. I also noticed that I'd used the wrong API to record the out chain. It's a load that uses DAG.getRoot() rather than getRoot(), so the out chain should go on PendingLoads. I don't have a testcase for that because we don't do any interesting scheduling on z yet. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@188540 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test/CodeGen/SystemZ')
-rw-r--r--test/CodeGen/SystemZ/memcmp-01.ll2
-rw-r--r--test/CodeGen/SystemZ/memcmp-02.ll135
2 files changed, 136 insertions, 1 deletions
diff --git a/test/CodeGen/SystemZ/memcmp-01.ll b/test/CodeGen/SystemZ/memcmp-01.ll
index 4d8cd14880..5f5752b336 100644
--- a/test/CodeGen/SystemZ/memcmp-01.ll
+++ b/test/CodeGen/SystemZ/memcmp-01.ll
@@ -1,4 +1,4 @@
-; Test memcmp using CLC.
+; Test memcmp using CLC, with i32 results.
;
; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
diff --git a/test/CodeGen/SystemZ/memcmp-02.ll b/test/CodeGen/SystemZ/memcmp-02.ll
new file mode 100644
index 0000000000..cae3d3d494
--- /dev/null
+++ b/test/CodeGen/SystemZ/memcmp-02.ll
@@ -0,0 +1,135 @@
+; Test memcmp using CLC, with i64 results.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
+
+declare i64 @memcmp(i8 *%src1, i8 *%src2, i64 %size)
+
+; Zero-length comparisons should be optimized away.
+define i64 @f1(i8 *%src1, i8 *%src2) {
+; CHECK-LABEL: f1:
+; CHECK: lghi %r2, 0
+; CHECK: br %r14
+ %res = call i64 @memcmp(i8 *%src1, i8 *%src2, i64 0)
+ ret i64 %res
+}
+
+; Check a case where the result is used as an integer.
+define i64 @f2(i8 *%src1, i8 *%src2) {
+; CHECK-LABEL: f2:
+; CHECK: clc 0(2,%r2), 0(%r3)
+; CHECK: ipm [[REG:%r[0-5]]]
+; CHECK: srl [[REG]], 28
+; CHECK: rll [[REG]], [[REG]], 31
+; CHECK: lgfr %r2, [[REG]]
+; CHECK: br %r14
+ %res = call i64 @memcmp(i8 *%src1, i8 *%src2, i64 2)
+ ret i64 %res
+}
+
+; Check a case where the result is tested for equality.
+define void @f3(i8 *%src1, i8 *%src2, i64 *%dest) {
+; CHECK-LABEL: f3:
+; CHECK: clc 0(3,%r2), 0(%r3)
+; CHECK-NEXT: je {{\..*}}
+; CHECK: br %r14
+ %res = call i64 @memcmp(i8 *%src1, i8 *%src2, i64 3)
+ %cmp = icmp eq i64 %res, 0
+ br i1 %cmp, label %exit, label %store
+
+store:
+ store i64 0, i64 *%dest
+ br label %exit
+
+exit:
+ ret void
+}
+
+; Check a case where the result is tested for inequality.
+define void @f4(i8 *%src1, i8 *%src2, i64 *%dest) {
+; CHECK-LABEL: f4:
+; CHECK: clc 0(4,%r2), 0(%r3)
+; CHECK-NEXT: jlh {{\..*}}
+; CHECK: br %r14
+entry:
+ %res = call i64 @memcmp(i8 *%src1, i8 *%src2, i64 4)
+ %cmp = icmp ne i64 %res, 0
+ br i1 %cmp, label %exit, label %store
+
+store:
+ store i64 0, i64 *%dest
+ br label %exit
+
+exit:
+ ret void
+}
+
+; Check a case where the result is tested via slt.
+define void @f5(i8 *%src1, i8 *%src2, i64 *%dest) {
+; CHECK-LABEL: f5:
+; CHECK: clc 0(5,%r2), 0(%r3)
+; CHECK-NEXT: jl {{\..*}}
+; CHECK: br %r14
+entry:
+ %res = call i64 @memcmp(i8 *%src1, i8 *%src2, i64 5)
+ %cmp = icmp slt i64 %res, 0
+ br i1 %cmp, label %exit, label %store
+
+store:
+ store i64 0, i64 *%dest
+ br label %exit
+
+exit:
+ ret void
+}
+
+; Check a case where the result is tested for sgt.
+define void @f6(i8 *%src1, i8 *%src2, i64 *%dest) {
+; CHECK-LABEL: f6:
+; CHECK: clc 0(6,%r2), 0(%r3)
+; CHECK-NEXT: jh {{\..*}}
+; CHECK: br %r14
+entry:
+ %res = call i64 @memcmp(i8 *%src1, i8 *%src2, i64 6)
+ %cmp = icmp sgt i64 %res, 0
+ br i1 %cmp, label %exit, label %store
+
+store:
+ store i64 0, i64 *%dest
+ br label %exit
+
+exit:
+ ret void
+}
+
+; Check the upper end of the CLC range. Here the result is used both as
+; an integer and for branching.
+define i64 @f7(i8 *%src1, i8 *%src2, i64 *%dest) {
+; CHECK-LABEL: f7:
+; CHECK: clc 0(256,%r2), 0(%r3)
+; CHECK: ipm [[REG:%r[0-5]]]
+; CHECK: srl [[REG]], 28
+; CHECK: rll [[REG]], [[REG]], 31
+; CHECK: lgfr %r2, [[REG]]
+; CHECK: jl {{.L*}}
+; CHECK: br %r14
+entry:
+ %res = call i64 @memcmp(i8 *%src1, i8 *%src2, i64 256)
+ %cmp = icmp slt i64 %res, 0
+ br i1 %cmp, label %exit, label %store
+
+store:
+ store i64 0, i64 *%dest
+ br label %exit
+
+exit:
+ ret i64 %res
+}
+
+; 257 bytes is too big for a single CLC. For now expect a call instead.
+define i64 @f8(i8 *%src1, i8 *%src2) {
+; CHECK-LABEL: f8:
+; CHECK: brasl %r14, memcmp@PLT
+; CHECK: br %r14
+ %res = call i64 @memcmp(i8 *%src1, i8 *%src2, i64 257)
+ ret i64 %res
+}