summaryrefslogtreecommitdiff
path: root/test/CodeGen/X86/narrow_op-1.ll
diff options
context:
space:
mode:
authorEvan Cheng <evan.cheng@apple.com>2009-05-28 00:35:15 +0000
committerEvan Cheng <evan.cheng@apple.com>2009-05-28 00:35:15 +0000
commit8b944d39b356135676459152385f05c496951f6c (patch)
tree6a1c962c410e68929b12e2753749314181570188 /test/CodeGen/X86/narrow_op-1.ll
parentc2695eba5700c785c2ae144eede7e1a932e3f5f3 (diff)
downloadllvm-8b944d39b356135676459152385f05c496951f6c.tar.gz
llvm-8b944d39b356135676459152385f05c496951f6c.tar.bz2
llvm-8b944d39b356135676459152385f05c496951f6c.tar.xz
Added optimization that narrow load / op / store and the 'op' is a bit twiddling instruction and its second operand is an immediate. If bits that are touched by 'op' can be done with a narrower instruction, reduce the width of the load and store as well. This happens a lot with bitfield manipulation code.
e.g. orl $65536, 8(%rax) => orb $1, 10(%rax) Since narrowing is not always a win, e.g. i32 -> i16 is a loss on x86, dag combiner consults with the target before performing the optimization. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@72507 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test/CodeGen/X86/narrow_op-1.ll')
-rw-r--r--test/CodeGen/X86/narrow_op-1.ll23
1 files changed, 23 insertions, 0 deletions
diff --git a/test/CodeGen/X86/narrow_op-1.ll b/test/CodeGen/X86/narrow_op-1.ll
new file mode 100644
index 0000000000..0ee11b4955
--- /dev/null
+++ b/test/CodeGen/X86/narrow_op-1.ll
@@ -0,0 +1,23 @@
+; RUN: llvm-as < %s | llc -march=x86-64 | grep orb | count 1
+; RUN: llvm-as < %s | llc -march=x86-64 | grep orb | grep 1
+; RUN: llvm-as < %s | llc -march=x86-64 | grep orl | count 1
+; RUN: llvm-as < %s | llc -march=x86-64 | grep orl | grep 16842752
+
+ %struct.bf = type { i64, i16, i16, i32 }
+@bfi = common global %struct.bf zeroinitializer, align 16
+
+define void @t1() nounwind optsize ssp {
+entry:
+ %0 = load i32* bitcast (i16* getelementptr (%struct.bf* @bfi, i32 0, i32 1) to i32*), align 8
+ %1 = or i32 %0, 65536
+ store i32 %1, i32* bitcast (i16* getelementptr (%struct.bf* @bfi, i32 0, i32 1) to i32*), align 8
+ ret void
+}
+
+define void @t2() nounwind optsize ssp {
+entry:
+ %0 = load i32* bitcast (i16* getelementptr (%struct.bf* @bfi, i32 0, i32 1) to i32*), align 8
+ %1 = or i32 %0, 16842752
+ store i32 %1, i32* bitcast (i16* getelementptr (%struct.bf* @bfi, i32 0, i32 1) to i32*), align 8
+ ret void
+}