diff options
author | Richard Sandiford <rsandifo@linux.vnet.ibm.com> | 2013-07-03 10:10:02 +0000 |
---|---|---|
committer | Richard Sandiford <rsandifo@linux.vnet.ibm.com> | 2013-07-03 10:10:02 +0000 |
commit | fa487e83a83c260d6a50f3df00a0eb012553a912 (patch) | |
tree | f6ddd72df044eaa9cabbce37fd4b04f64b978139 /test/CodeGen/SystemZ/int-conv-08.ll | |
parent | b81b477cd4392a51112c3af0659ea9fc176e74f1 (diff) | |
download | llvm-fa487e83a83c260d6a50f3df00a0eb012553a912.tar.gz llvm-fa487e83a83c260d6a50f3df00a0eb012553a912.tar.bz2 llvm-fa487e83a83c260d6a50f3df00a0eb012553a912.tar.xz |
[SystemZ] Fold more spills
Add a mapping from register-based <INSN>R instructions to the corresponding
memory-based <INSN>. Use it to cut down on the number of spill loads.
Some instructions extend their operands from smaller fields, so this
required a new TSFlags field to say how big the unextended operand is.
This optimisation doesn't trigger for C(G)R and CL(G)R because in practice
we always combine those instructions with a branch. Adding a test for every
other case probably seems excessive, but it did catch a missed optimisation
for DSGF (fixed in r185435).
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@185529 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test/CodeGen/SystemZ/int-conv-08.ll')
-rw-r--r-- | test/CodeGen/SystemZ/int-conv-08.ll | 94 |
1 files changed, 94 insertions, 0 deletions
diff --git a/test/CodeGen/SystemZ/int-conv-08.ll b/test/CodeGen/SystemZ/int-conv-08.ll index 090cc75402..0616f1e456 100644 --- a/test/CodeGen/SystemZ/int-conv-08.ll +++ b/test/CodeGen/SystemZ/int-conv-08.ll @@ -112,3 +112,97 @@ define i64 @f10(i64 %src, i64 %index) { %ext = zext i16 %half to i64 ret i64 %ext } + +; Test a case where we spill the source of at least one LLGHR. We want +; to use LLGH if possible. +define void @f11(i64 *%ptr) { +; CHECK: f11: +; CHECK: llgh {{%r[0-9]+}}, 166(%r15) +; CHECK: br %r14 + %val0 = load volatile i64 *%ptr + %val1 = load volatile i64 *%ptr + %val2 = load volatile i64 *%ptr + %val3 = load volatile i64 *%ptr + %val4 = load volatile i64 *%ptr + %val5 = load volatile i64 *%ptr + %val6 = load volatile i64 *%ptr + %val7 = load volatile i64 *%ptr + %val8 = load volatile i64 *%ptr + %val9 = load volatile i64 *%ptr + %val10 = load volatile i64 *%ptr + %val11 = load volatile i64 *%ptr + %val12 = load volatile i64 *%ptr + %val13 = load volatile i64 *%ptr + %val14 = load volatile i64 *%ptr + %val15 = load volatile i64 *%ptr + + %trunc0 = trunc i64 %val0 to i16 + %trunc1 = trunc i64 %val1 to i16 + %trunc2 = trunc i64 %val2 to i16 + %trunc3 = trunc i64 %val3 to i16 + %trunc4 = trunc i64 %val4 to i16 + %trunc5 = trunc i64 %val5 to i16 + %trunc6 = trunc i64 %val6 to i16 + %trunc7 = trunc i64 %val7 to i16 + %trunc8 = trunc i64 %val8 to i16 + %trunc9 = trunc i64 %val9 to i16 + %trunc10 = trunc i64 %val10 to i16 + %trunc11 = trunc i64 %val11 to i16 + %trunc12 = trunc i64 %val12 to i16 + %trunc13 = trunc i64 %val13 to i16 + %trunc14 = trunc i64 %val14 to i16 + %trunc15 = trunc i64 %val15 to i16 + + %ext0 = zext i16 %trunc0 to i64 + %ext1 = zext i16 %trunc1 to i64 + %ext2 = zext i16 %trunc2 to i64 + %ext3 = zext i16 %trunc3 to i64 + %ext4 = zext i16 %trunc4 to i64 + %ext5 = zext i16 %trunc5 to i64 + %ext6 = zext i16 %trunc6 to i64 + %ext7 = zext i16 %trunc7 to i64 + %ext8 = zext i16 %trunc8 to i64 + %ext9 = zext i16 %trunc9 to i64 + %ext10 = zext i16 %trunc10 to i64 + %ext11 = zext i16 %trunc11 to i64 + %ext12 = zext i16 %trunc12 to i64 + %ext13 = zext i16 %trunc13 to i64 + %ext14 = zext i16 %trunc14 to i64 + %ext15 = zext i16 %trunc15 to i64 + + store volatile i64 %val0, i64 *%ptr + store volatile i64 %val1, i64 *%ptr + store volatile i64 %val2, i64 *%ptr + store volatile i64 %val3, i64 *%ptr + store volatile i64 %val4, i64 *%ptr + store volatile i64 %val5, i64 *%ptr + store volatile i64 %val6, i64 *%ptr + store volatile i64 %val7, i64 *%ptr + store volatile i64 %val8, i64 *%ptr + store volatile i64 %val9, i64 *%ptr + store volatile i64 %val10, i64 *%ptr + store volatile i64 %val11, i64 *%ptr + store volatile i64 %val12, i64 *%ptr + store volatile i64 %val13, i64 *%ptr + store volatile i64 %val14, i64 *%ptr + store volatile i64 %val15, i64 *%ptr + + store volatile i64 %ext0, i64 *%ptr + store volatile i64 %ext1, i64 *%ptr + store volatile i64 %ext2, i64 *%ptr + store volatile i64 %ext3, i64 *%ptr + store volatile i64 %ext4, i64 *%ptr + store volatile i64 %ext5, i64 *%ptr + store volatile i64 %ext6, i64 *%ptr + store volatile i64 %ext7, i64 *%ptr + store volatile i64 %ext8, i64 *%ptr + store volatile i64 %ext9, i64 *%ptr + store volatile i64 %ext10, i64 *%ptr + store volatile i64 %ext11, i64 *%ptr + store volatile i64 %ext12, i64 *%ptr + store volatile i64 %ext13, i64 *%ptr + store volatile i64 %ext14, i64 *%ptr + store volatile i64 %ext15, i64 *%ptr + + ret void +} |