summaryrefslogtreecommitdiff
path: root/test
diff options
context:
space:
mode:
authorChris Lattner <sabre@nondot.org>2010-08-28 01:20:38 +0000
committerChris Lattner <sabre@nondot.org>2010-08-28 01:20:38 +0000
commit3dd08734c1812e47ae5f6aceba15f28865f75943 (patch)
tree47f3b17dc73ef69603199c12f8a90e78b0704f70 /test
parentbadcda4afa35bf86ce2e6e77ca20f1ce2365b549 (diff)
downloadllvm-3dd08734c1812e47ae5f6aceba15f28865f75943.tar.gz
llvm-3dd08734c1812e47ae5f6aceba15f28865f75943.tar.bz2
llvm-3dd08734c1812e47ae5f6aceba15f28865f75943.tar.xz
optimize bitcasts from large integers to vector into vector
element insertion from the pieces that feed into the vector. This handles a pattern that occurs frequently due to code generated for the x86-64 abi. We now compile something like this: struct S { float A, B, C, D; }; struct S g; struct S bar() { struct S A = g; ++A.A; ++A.C; return A; } into all nice vector operations: _bar: ## @bar ## BB#0: ## %entry movq _g@GOTPCREL(%rip), %rax movss LCPI1_0(%rip), %xmm1 movss (%rax), %xmm0 addss %xmm1, %xmm0 pshufd $16, %xmm0, %xmm0 movss 4(%rax), %xmm2 movss 12(%rax), %xmm3 pshufd $16, %xmm2, %xmm2 unpcklps %xmm2, %xmm0 addss 8(%rax), %xmm1 pshufd $16, %xmm1, %xmm1 pshufd $16, %xmm3, %xmm2 unpcklps %xmm2, %xmm1 ret instead of icky integer operations: _bar: ## @bar movq _g@GOTPCREL(%rip), %rax movss LCPI1_0(%rip), %xmm1 movss (%rax), %xmm0 addss %xmm1, %xmm0 movd %xmm0, %ecx movl 4(%rax), %edx movl 12(%rax), %esi shlq $32, %rdx addq %rcx, %rdx movd %rdx, %xmm0 addss 8(%rax), %xmm1 movd %xmm1, %eax shlq $32, %rsi addq %rax, %rsi movd %rsi, %xmm1 ret This resolves rdar://8360454 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@112343 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test')
-rw-r--r--test/Transforms/InstCombine/bitcast.ll31
1 files changed, 31 insertions, 0 deletions
diff --git a/test/Transforms/InstCombine/bitcast.ll b/test/Transforms/InstCombine/bitcast.ll
index 10898397b9..87e413ea27 100644
--- a/test/Transforms/InstCombine/bitcast.ll
+++ b/test/Transforms/InstCombine/bitcast.ll
@@ -60,3 +60,34 @@ define float @test3(<2 x float> %A, <2 x i64> %B) {
; CHECK-NEXT: %add = fadd float %tmp24, %tmp4
; CHECK-NEXT: ret float %add
}
+
+
+define <2 x i32> @test4(i32 %A, i32 %B){
+ %tmp38 = zext i32 %A to i64
+ %tmp32 = zext i32 %B to i64
+ %tmp33 = shl i64 %tmp32, 32
+ %ins35 = or i64 %tmp33, %tmp38
+ %tmp43 = bitcast i64 %ins35 to <2 x i32>
+ ret <2 x i32> %tmp43
+ ; CHECK: @test4
+ ; CHECK-NEXT: insertelement <2 x i32> undef, i32 %A, i32 0
+ ; CHECK-NEXT: insertelement <2 x i32> {{.*}}, i32 %B, i32 1
+ ; CHECK-NEXT: ret <2 x i32>
+
+}
+
+; rdar://8360454
+define <2 x float> @test5(float %A, float %B) {
+ %tmp37 = bitcast float %A to i32
+ %tmp38 = zext i32 %tmp37 to i64
+ %tmp31 = bitcast float %B to i32
+ %tmp32 = zext i32 %tmp31 to i64
+ %tmp33 = shl i64 %tmp32, 32
+ %ins35 = or i64 %tmp33, %tmp38
+ %tmp43 = bitcast i64 %ins35 to <2 x float>
+ ret <2 x float> %tmp43
+ ; CHECK: @test5
+ ; CHECK-NEXT: insertelement <2 x float> undef, float %A, i32 0
+ ; CHECK-NEXT: insertelement <2 x float> {{.*}}, float %B, i32 1
+ ; CHECK-NEXT: ret <2 x float>
+}