summaryrefslogtreecommitdiff
path: root/test/CodeGen/X86/sse3.ll
diff options
context:
space:
mode:
authorDavid Goodwin <david_goodwin@apple.com>2009-10-20 19:54:44 +0000
committerDavid Goodwin <david_goodwin@apple.com>2009-10-20 19:54:44 +0000
commit480c529e026942f28e1a792d2cec6d6b5bc0edba (patch)
treeb5b710094f99f251014b29ca9104eb1da4597f7e /test/CodeGen/X86/sse3.ll
parent03d02d4faa6cf8ae1337f64bc83dcbd9de570372 (diff)
downloadllvm-480c529e026942f28e1a792d2cec6d6b5bc0edba.tar.gz
llvm-480c529e026942f28e1a792d2cec6d6b5bc0edba.tar.bz2
llvm-480c529e026942f28e1a792d2cec6d6b5bc0edba.tar.xz
Checkpoint more aggressive anti-dependency breaking for post-ra scheduler.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@84658 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test/CodeGen/X86/sse3.ll')
-rw-r--r--test/CodeGen/X86/sse3.ll12
1 files changed, 6 insertions, 6 deletions
diff --git a/test/CodeGen/X86/sse3.ll b/test/CodeGen/X86/sse3.ll
index 6319cb887a..30b4c2f0f7 100644
--- a/test/CodeGen/X86/sse3.ll
+++ b/test/CodeGen/X86/sse3.ll
@@ -51,9 +51,9 @@ define <8 x i16> @t3(<8 x i16> %A, <8 x i16> %B) nounwind {
%tmp = shufflevector <8 x i16> %A, <8 x i16> %A, <8 x i32> < i32 8, i32 3, i32 2, i32 13, i32 7, i32 6, i32 5, i32 4 >
ret <8 x i16> %tmp
; X64: t3:
+; X64: pshuflw $44, %xmm0, %xmm1
; X64: pextrw $5, %xmm0, %eax
-; X64: pshuflw $44, %xmm0, %xmm0
-; X64: pshufhw $27, %xmm0, %xmm0
+; X64: pshufhw $27, %xmm1, %xmm0
; X64: pinsrw $3, %eax, %xmm0
; X64: ret
}
@@ -168,11 +168,11 @@ define internal void @t10() nounwind {
ret void
; X64: t10:
; X64: pextrw $4, %xmm0, %eax
-; X64: pextrw $6, %xmm0, %edx
+; X64: pextrw $6, %xmm0, %ecx
; X64: movlhps %xmm1, %xmm1
; X64: pshuflw $8, %xmm1, %xmm1
; X64: pinsrw $2, %eax, %xmm1
-; X64: pinsrw $3, %edx, %xmm1
+; X64: pinsrw $3, %ecx, %xmm1
}
@@ -250,9 +250,9 @@ entry:
%tmp9 = shufflevector <16 x i8> %tmp8, <16 x i8> %T0, <16 x i32> < i32 0, i32 1, i32 2, i32 17, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef , i32 undef >
ret <16 x i8> %tmp9
; X64: t16:
+; X64: pextrw $8, %xmm0, %ecx
; X64: pinsrw $0, %eax, %xmm1
-; X64: pextrw $8, %xmm0, %eax
-; X64: pinsrw $1, %eax, %xmm1
+; X64: pinsrw $1, %ecx, %xmm1
; X64: pextrw $1, %xmm1, %ecx
; X64: movd %xmm1, %edx
; X64: pinsrw $0, %edx, %xmm1