diff options
author | Ulrich Weigand <ulrich.weigand@de.ibm.com> | 2013-05-06 16:17:29 +0000 |
---|---|---|
committer | Ulrich Weigand <ulrich.weigand@de.ibm.com> | 2013-05-06 16:17:29 +0000 |
commit | b503b49b5105b6aad7d2a015468b84b0f64dfe8e (patch) | |
tree | a60966043fae51838cb2faa08531a7ed078e4fb6 /test/CodeGen/SystemZ/int-add-10.ll | |
parent | 1d09d56fe1e3f3faadd4bf4ccf3e585ddb3c3b07 (diff) | |
download | llvm-b503b49b5105b6aad7d2a015468b84b0f64dfe8e.tar.gz llvm-b503b49b5105b6aad7d2a015468b84b0f64dfe8e.tar.bz2 llvm-b503b49b5105b6aad7d2a015468b84b0f64dfe8e.tar.xz |
[SystemZ] Add CodeGen test cases
This adds all CodeGen tests for the SystemZ target.
This version of the patch incorporates feedback from a review by
Sean Silva. Thanks to all reviewers!
Patch by Richard Sandiford.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@181204 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test/CodeGen/SystemZ/int-add-10.ll')
-rw-r--r-- | test/CodeGen/SystemZ/int-add-10.ll | 165 |
1 files changed, 165 insertions, 0 deletions
diff --git a/test/CodeGen/SystemZ/int-add-10.ll b/test/CodeGen/SystemZ/int-add-10.ll new file mode 100644 index 0000000000..17cfdbe337 --- /dev/null +++ b/test/CodeGen/SystemZ/int-add-10.ll @@ -0,0 +1,165 @@ +; Test 128-bit addition in which the second operand is a zero-extended i32. +; +; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s + +; Check register additions. The XOR ensures that we don't instead zero-extend +; %b into a register and use memory addition. +define void @f1(i128 *%aptr, i32 %b) { +; CHECK: f1: +; CHECK: algfr {{%r[0-5]}}, %r3 +; CHECK: alcgr +; CHECK: br %r14 + %a = load i128 *%aptr + %xor = xor i128 %a, 127 + %bext = zext i32 %b to i128 + %add = add i128 %xor, %bext + store i128 %add, i128 *%aptr + ret void +} + +; Like f1, but using an "in-register" extension. +define void @f2(i128 *%aptr, i64 %b) { +; CHECK: f2: +; CHECK: algfr {{%r[0-5]}}, %r3 +; CHECK: alcgr +; CHECK: br %r14 + %a = load i128 *%aptr + %xor = xor i128 %a, 127 + %trunc = trunc i64 %b to i32 + %bext = zext i32 %trunc to i128 + %add = add i128 %xor, %bext + store i128 %add, i128 *%aptr + ret void +} + +; Test register addition in cases where the second operand is zero extended +; from i64 rather than i32, but is later masked to i32 range. +define void @f3(i128 *%aptr, i64 %b) { +; CHECK: f3: +; CHECK: algfr {{%r[0-5]}}, %r3 +; CHECK: alcgr +; CHECK: br %r14 + %a = load i128 *%aptr + %xor = xor i128 %a, 127 + %bext = zext i64 %b to i128 + %and = and i128 %bext, 4294967295 + %add = add i128 %xor, %and + store i128 %add, i128 *%aptr + ret void +} + +; Test ALGF with no offset. +define void @f4(i128 *%aptr, i32 *%bsrc) { +; CHECK: f4: +; CHECK: algf {{%r[0-5]}}, 0(%r3) +; CHECK: alcgr +; CHECK: br %r14 + %a = load i128 *%aptr + %xor = xor i128 %a, 127 + %b = load i32 *%bsrc + %bext = zext i32 %b to i128 + %add = add i128 %xor, %bext + store i128 %add, i128 *%aptr + ret void +} + +; Check the high end of the ALGF range. +define void @f5(i128 *%aptr, i32 *%bsrc) { +; CHECK: f5: +; CHECK: algf {{%r[0-5]}}, 524284(%r3) +; CHECK: alcgr +; CHECK: br %r14 + %a = load i128 *%aptr + %xor = xor i128 %a, 127 + %ptr = getelementptr i32 *%bsrc, i64 131071 + %b = load i32 *%ptr + %bext = zext i32 %b to i128 + %add = add i128 %xor, %bext + store i128 %add, i128 *%aptr + ret void +} + +; Check the next word up, which must use separate address logic. +; Other sequences besides this one would be OK. +define void @f6(i128 *%aptr, i32 *%bsrc) { +; CHECK: f6: +; CHECK: agfi %r3, 524288 +; CHECK: algf {{%r[0-5]}}, 0(%r3) +; CHECK: alcgr +; CHECK: br %r14 + %a = load i128 *%aptr + %xor = xor i128 %a, 127 + %ptr = getelementptr i32 *%bsrc, i64 131072 + %b = load i32 *%ptr + %bext = zext i32 %b to i128 + %add = add i128 %xor, %bext + store i128 %add, i128 *%aptr + ret void +} + +; Check the high end of the negative aligned ALGF range. +define void @f7(i128 *%aptr, i32 *%bsrc) { +; CHECK: f7: +; CHECK: algf {{%r[0-5]}}, -4(%r3) +; CHECK: alcgr +; CHECK: br %r14 + %a = load i128 *%aptr + %xor = xor i128 %a, 127 + %ptr = getelementptr i32 *%bsrc, i128 -1 + %b = load i32 *%ptr + %bext = zext i32 %b to i128 + %add = add i128 %xor, %bext + store i128 %add, i128 *%aptr + ret void +} + +; Check the low end of the ALGF range. +define void @f8(i128 *%aptr, i32 *%bsrc) { +; CHECK: f8: +; CHECK: algf {{%r[0-5]}}, -524288(%r3) +; CHECK: alcgr +; CHECK: br %r14 + %a = load i128 *%aptr + %xor = xor i128 %a, 127 + %ptr = getelementptr i32 *%bsrc, i128 -131072 + %b = load i32 *%ptr + %bext = zext i32 %b to i128 + %add = add i128 %xor, %bext + store i128 %add, i128 *%aptr + ret void +} + +; Check the next word down, which needs separate address logic. +; Other sequences besides this one would be OK. +define void @f9(i128 *%aptr, i32 *%bsrc) { +; CHECK: f9: +; CHECK: agfi %r3, -524292 +; CHECK: algf {{%r[0-5]}}, 0(%r3) +; CHECK: alcgr +; CHECK: br %r14 + %a = load i128 *%aptr + %xor = xor i128 %a, 127 + %ptr = getelementptr i32 *%bsrc, i128 -131073 + %b = load i32 *%ptr + %bext = zext i32 %b to i128 + %add = add i128 %xor, %bext + store i128 %add, i128 *%aptr + ret void +} + +; Check that ALGF allows an index. +define void @f10(i128 *%aptr, i64 %src, i64 %index) { +; CHECK: f10: +; CHECK: algf {{%r[0-5]}}, 524284({{%r4,%r3|%r3,%r4}}) +; CHECK: br %r14 + %a = load i128 *%aptr + %xor = xor i128 %a, 127 + %add1 = add i64 %src, %index + %add2 = add i64 %add1, 524284 + %ptr = inttoptr i64 %add2 to i32 * + %b = load i32 *%ptr + %bext = zext i32 %b to i128 + %add = add i128 %xor, %bext + store i128 %add, i128 *%aptr + ret void +} |