summaryrefslogtreecommitdiff
path: root/test/CodeGen/X86/memset-2.ll
diff options
context:
space:
mode:
authorEvan Cheng <evan.cheng@apple.com>2011-01-07 19:35:30 +0000
committerEvan Cheng <evan.cheng@apple.com>2011-01-07 19:35:30 +0000
commita5e1362f968568d66d76ddcdcff4ab98e203a48c (patch)
tree53e266c315432b49be8ad6f3a2d2a5873265ab53 /test/CodeGen/X86/memset-2.ll
parent1434f66b2e132a707e2c8ccb3350ea13fb5aa051 (diff)
downloadllvm-a5e1362f968568d66d76ddcdcff4ab98e203a48c.tar.gz
llvm-a5e1362f968568d66d76ddcdcff4ab98e203a48c.tar.bz2
llvm-a5e1362f968568d66d76ddcdcff4ab98e203a48c.tar.xz
Revert r122955. It seems using movups to lower memcpy can cause massive regression (even on Nehalem) in edge cases. I also didn't see any real performance benefit.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@123015 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test/CodeGen/X86/memset-2.ll')
-rw-r--r--test/CodeGen/X86/memset-2.ll16
1 files changed, 1 insertions, 15 deletions
diff --git a/test/CodeGen/X86/memset-2.ll b/test/CodeGen/X86/memset-2.ll
index 128799d0f5..993583b4a4 100644
--- a/test/CodeGen/X86/memset-2.ll
+++ b/test/CodeGen/X86/memset-2.ll
@@ -5,21 +5,7 @@ declare void @llvm.memset.i32(i8*, i8, i32, i32) nounwind
define fastcc void @t1() nounwind {
entry:
; CHECK: t1:
-; CHECK: pxor %xmm0, %xmm0
-; CHECK: movups %xmm0, 160
-; CHECK: movups %xmm0, 144
-; CHECK: movups %xmm0, 128
-; CHECK: movups %xmm0, 112
-; CHECK: movups %xmm0, 96
-; CHECK: movups %xmm0, 80
-; CHECK: movups %xmm0, 64
-; CHECK: movups %xmm0, 48
-; CHECK: movups %xmm0, 32
-; CHECK: movups %xmm0, 16
-; CHECK: movups %xmm0, 0
-; CHECK: movl $0, 184
-; CHECK: movl $0, 180
-; CHECK: movl $0, 176
+; CHECK: calll _memset
call void @llvm.memset.i32( i8* null, i8 0, i32 188, i32 1 ) nounwind
unreachable
}