summaryrefslogtreecommitdiff
path: root/lib/Target/README.txt
diff options
context:
space:
mode:
authorBenjamin Kramer <benny.kra@googlemail.com>2011-02-26 22:48:07 +0000
committerBenjamin Kramer <benny.kra@googlemail.com>2011-02-26 22:48:07 +0000
commit7466678003f38f985d5b2dffd0917643137b11cf (patch)
tree8a0315a7b150080615e132dfa9cfc54c82fa42b1 /lib/Target/README.txt
parent981b1c4c62054e4e39570a9230c0318f13f9a0a9 (diff)
downloadllvm-7466678003f38f985d5b2dffd0917643137b11cf.tar.gz
llvm-7466678003f38f985d5b2dffd0917643137b11cf.tar.bz2
llvm-7466678003f38f985d5b2dffd0917643137b11cf.tar.xz
Add some DAGCombines for (adde 0, 0, glue), which are useful to optimize legalized code for large integer arithmetic.
1. Inform users of ADDEs with two 0 operands that it never sets carry 2. Fold other ADDs or ADDCs into the ADDE if possible It would be neat if we could do the same thing for SETCC+ADD eventually, but we can't do that in target independent code. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@126557 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/Target/README.txt')
-rw-r--r--lib/Target/README.txt37
1 files changed, 0 insertions, 37 deletions
diff --git a/lib/Target/README.txt b/lib/Target/README.txt
index f85914b61d..e01df01043 100644
--- a/lib/Target/README.txt
+++ b/lib/Target/README.txt
@@ -1780,43 +1780,6 @@ case it choses instead to keep the max operation obvious.
//===---------------------------------------------------------------------===//
-Take the following testcase on x86-64 (similar testcases exist for all targets
-with addc/adde):
-
-define void @a(i64* nocapture %s, i64* nocapture %t, i64 %a, i64 %b,
-i64 %c) nounwind {
-entry:
- %0 = zext i64 %a to i128 ; <i128> [#uses=1]
- %1 = zext i64 %b to i128 ; <i128> [#uses=1]
- %2 = add i128 %1, %0 ; <i128> [#uses=2]
- %3 = zext i64 %c to i128 ; <i128> [#uses=1]
- %4 = shl i128 %3, 64 ; <i128> [#uses=1]
- %5 = add i128 %4, %2 ; <i128> [#uses=1]
- %6 = lshr i128 %5, 64 ; <i128> [#uses=1]
- %7 = trunc i128 %6 to i64 ; <i64> [#uses=1]
- store i64 %7, i64* %s, align 8
- %8 = trunc i128 %2 to i64 ; <i64> [#uses=1]
- store i64 %8, i64* %t, align 8
- ret void
-}
-
-Generated code:
- addq %rcx, %rdx
- sbbq %rax, %rax
- subq %rax, %r8
- movq %r8, (%rdi)
- movq %rdx, (%rsi)
- ret
-
-Expected code:
- addq %rcx, %rdx
- adcq $0, %r8
- movq %r8, (%rdi)
- movq %rdx, (%rsi)
- ret
-
-//===---------------------------------------------------------------------===//
-
Switch lowering generates less than ideal code for the following switch:
define void @a(i32 %x) nounwind {
entry: