summaryrefslogtreecommitdiff
path: root/test/CodeGen/X86/rot64.ll
diff options
context:
space:
mode:
authorDan Gohman <gohman@apple.com>2008-10-17 01:23:35 +0000
committerDan Gohman <gohman@apple.com>2008-10-17 01:23:35 +0000
commit74feef261a43392bc85280f66c75fbd4e2ccf73d (patch)
treec3632ba600f38a66d23a0d10aadb7f53ca87f058 /test/CodeGen/X86/rot64.ll
parenta1fcd77ccfc61087dfad4fad69752a414179836c (diff)
downloadllvm-74feef261a43392bc85280f66c75fbd4e2ccf73d.tar.gz
llvm-74feef261a43392bc85280f66c75fbd4e2ccf73d.tar.bz2
llvm-74feef261a43392bc85280f66c75fbd4e2ccf73d.tar.xz
Define patterns for shld and shrd that match immediate
shift counts, and patterns that match dynamic shift counts when the subtract is obscured by a truncate node. Add DAGCombiner support for recognizing rotate patterns when the shift counts are defined by truncate nodes. Fix and simplify the code for commuting shld and shrd instructions to work even when the given instruction doesn't have a parent, and when the caller needs a new instruction. These changes allow LLVM to use the shld, shrd, rol, and ror instructions on x86 to replace equivalent code using two shifts and an or in many more cases. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@57662 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test/CodeGen/X86/rot64.ll')
-rw-r--r--test/CodeGen/X86/rot64.ll73
1 files changed, 73 insertions, 0 deletions
diff --git a/test/CodeGen/X86/rot64.ll b/test/CodeGen/X86/rot64.ll
new file mode 100644
index 0000000000..2408359a14
--- /dev/null
+++ b/test/CodeGen/X86/rot64.ll
@@ -0,0 +1,73 @@
+; RUN: llvm-as < %s | llc -march=x86-64 > %t
+; RUN: grep rol %t | count 3
+; RUN: grep ror %t | count 1
+; RUN: grep shld %t | count 2
+; RUN: grep shrd %t | count 2
+
+define i64 @foo(i64 %x, i64 %y, i64 %z) nounwind readnone {
+entry:
+ %0 = shl i64 %x, %z
+ %1 = sub i64 64, %z
+ %2 = lshr i64 %x, %1
+ %3 = or i64 %2, %0
+ ret i64 %3
+}
+
+define i64 @bar(i64 %x, i64 %y, i64 %z) nounwind readnone {
+entry:
+ %0 = shl i64 %y, %z
+ %1 = sub i64 64, %z
+ %2 = lshr i64 %x, %1
+ %3 = or i64 %2, %0
+ ret i64 %3
+}
+
+define i64 @un(i64 %x, i64 %y, i64 %z) nounwind readnone {
+entry:
+ %0 = lshr i64 %x, %z
+ %1 = sub i64 64, %z
+ %2 = shl i64 %x, %1
+ %3 = or i64 %2, %0
+ ret i64 %3
+}
+
+define i64 @bu(i64 %x, i64 %y, i64 %z) nounwind readnone {
+entry:
+ %0 = lshr i64 %y, %z
+ %1 = sub i64 64, %z
+ %2 = shl i64 %x, %1
+ %3 = or i64 %2, %0
+ ret i64 %3
+}
+
+define i64 @xfoo(i64 %x, i64 %y, i64 %z) nounwind readnone {
+entry:
+ %0 = lshr i64 %x, 57
+ %1 = shl i64 %x, 7
+ %2 = or i64 %0, %1
+ ret i64 %2
+}
+
+define i64 @xbar(i64 %x, i64 %y, i64 %z) nounwind readnone {
+entry:
+ %0 = shl i64 %y, 7
+ %1 = lshr i64 %x, 57
+ %2 = or i64 %0, %1
+ ret i64 %2
+}
+
+define i64 @xun(i64 %x, i64 %y, i64 %z) nounwind readnone {
+entry:
+ %0 = lshr i64 %x, 7
+ %1 = shl i64 %x, 57
+ %2 = or i64 %0, %1
+ ret i64 %2
+}
+
+define i64 @xbu(i64 %x, i64 %y, i64 %z) nounwind readnone {
+entry:
+ %0 = lshr i64 %y, 7
+ %1 = shl i64 %x, 57
+ %2 = or i64 %0, %1
+ ret i64 %2
+}