From 461f1fc359dff438dad25e809499845b10a3d032 Mon Sep 17 00:00:00 2001 From: Evan Cheng Date: Thu, 6 Jan 2011 07:58:36 +0000 Subject: Use movups to lower memcpy and memset even if it's not fast (like corei7). The theory is it's still faster than a pair of movq / a quad of movl. This will probably hurt older chips like P4 but should run faster on current and future Intel processors. rdar://8817010 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@122955 91177308-0d34-0410-b5e6-96231b3b80d8 --- test/CodeGen/X86/2010-06-25-CoalescerSubRegDefDead.ll | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'test/CodeGen/X86/2010-06-25-CoalescerSubRegDefDead.ll') diff --git a/test/CodeGen/X86/2010-06-25-CoalescerSubRegDefDead.ll b/test/CodeGen/X86/2010-06-25-CoalescerSubRegDefDead.ll index 6db3ce1f42..f31cdad861 100644 --- a/test/CodeGen/X86/2010-06-25-CoalescerSubRegDefDead.ll +++ b/test/CodeGen/X86/2010-06-25-CoalescerSubRegDefDead.ll @@ -26,7 +26,7 @@ bb: ; CHECK: rep;stosl %tmp5 = bitcast i32* %tmp4 to i8* - call void @llvm.memset.p0i8.i64(i8* %tmp5, i8 0, i64 84, i32 4, i1 false) + call void @llvm.memset.p0i8.i64(i8* %tmp5, i8 0, i64 124, i32 4, i1 false) %tmp6 = getelementptr inbounds %struct.type* %s, i32 0, i32 62 store i32* null, i32** %tmp6, align 8 br label %bb1 -- cgit v1.2.3