From d5f323d70bd2d9bc8a63a68bfe439a69e0104bbf Mon Sep 17 00:00:00 2001 From: Bill Wendling Date: Tue, 12 Apr 2011 22:46:31 +0000 Subject: Remove the unaligned load intrinsics in favor of using native unaligned loads. Now that we have a first-class way to represent unaligned loads, the unaligned load intrinsics are superfluous. First part of . git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@129401 91177308-0d34-0410-b5e6-96231b3b80d8 --- test/CodeGen/X86/avx-intrinsics-x86.ll | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'test/CodeGen/X86/avx-intrinsics-x86.ll') diff --git a/test/CodeGen/X86/avx-intrinsics-x86.ll b/test/CodeGen/X86/avx-intrinsics-x86.ll index 6c32396a41..5201688686 100644 --- a/test/CodeGen/X86/avx-intrinsics-x86.ll +++ b/test/CodeGen/X86/avx-intrinsics-x86.ll @@ -247,7 +247,7 @@ declare <2 x double> @llvm.x86.sse2.div.sd(<2 x double>, <2 x double>) nounwind define <16 x i8> @test_x86_sse2_loadu_dq(i8* %a0) { ; CHECK: movl - ; CHECK: vmovdqu + ; CHECK: vmovups %res = call <16 x i8> @llvm.x86.sse2.loadu.dq(i8* %a0) ; <<16 x i8>> [#uses=1] ret <16 x i8> %res } @@ -256,7 +256,7 @@ declare <16 x i8> @llvm.x86.sse2.loadu.dq(i8*) nounwind readonly define <2 x double> @test_x86_sse2_loadu_pd(i8* %a0) { ; CHECK: movl - ; CHECK: vmovupd + ; CHECK: vmovups %res = call <2 x double> @llvm.x86.sse2.loadu.pd(i8* %a0) ; <<2 x double>> [#uses=1] ret <2 x double> %res } -- cgit v1.2.3