summaryrefslogtreecommitdiff
path: root/test/CodeGen/X86/2010-06-25-CoalescerSubRegDefDead.ll
diff options
context:
space:
mode:
authorEvan Cheng <evan.cheng@apple.com>2011-01-06 07:58:36 +0000
committerEvan Cheng <evan.cheng@apple.com>2011-01-06 07:58:36 +0000
commit461f1fc359dff438dad25e809499845b10a3d032 (patch)
tree143a2a682ffdd84409d6bd1673e22630d42d565e /test/CodeGen/X86/2010-06-25-CoalescerSubRegDefDead.ll
parentcce240d26bbf1c2bec9cfff4838d8d807b215586 (diff)
downloadllvm-461f1fc359dff438dad25e809499845b10a3d032.tar.gz
llvm-461f1fc359dff438dad25e809499845b10a3d032.tar.bz2
llvm-461f1fc359dff438dad25e809499845b10a3d032.tar.xz
Use movups to lower memcpy and memset even if it's not fast (like corei7).
The theory is it's still faster than a pair of movq / a quad of movl. This will probably hurt older chips like P4 but should run faster on current and future Intel processors. rdar://8817010 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@122955 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test/CodeGen/X86/2010-06-25-CoalescerSubRegDefDead.ll')
-rw-r--r--test/CodeGen/X86/2010-06-25-CoalescerSubRegDefDead.ll2
1 files changed, 1 insertions, 1 deletions
diff --git a/test/CodeGen/X86/2010-06-25-CoalescerSubRegDefDead.ll b/test/CodeGen/X86/2010-06-25-CoalescerSubRegDefDead.ll
index 6db3ce1f42..f31cdad861 100644
--- a/test/CodeGen/X86/2010-06-25-CoalescerSubRegDefDead.ll
+++ b/test/CodeGen/X86/2010-06-25-CoalescerSubRegDefDead.ll
@@ -26,7 +26,7 @@ bb:
; CHECK: rep;stosl
%tmp5 = bitcast i32* %tmp4 to i8*
- call void @llvm.memset.p0i8.i64(i8* %tmp5, i8 0, i64 84, i32 4, i1 false)
+ call void @llvm.memset.p0i8.i64(i8* %tmp5, i8 0, i64 124, i32 4, i1 false)
%tmp6 = getelementptr inbounds %struct.type* %s, i32 0, i32 62
store i32* null, i32** %tmp6, align 8
br label %bb1