summaryrefslogtreecommitdiff
path: root/test/CodeGen/X86/avx-intrinsics-x86.ll
diff options
context:
space:
mode:
authorBill Wendling <isanbard@gmail.com>2011-04-12 22:46:31 +0000
committerBill Wendling <isanbard@gmail.com>2011-04-12 22:46:31 +0000
commitd5f323d70bd2d9bc8a63a68bfe439a69e0104bbf (patch)
tree51593a18718d656918be4f28c4a5f5706e0a0366 /test/CodeGen/X86/avx-intrinsics-x86.ll
parent0b756349a718e046abba84c316877a682eb0ff2f (diff)
downloadllvm-d5f323d70bd2d9bc8a63a68bfe439a69e0104bbf.tar.gz
llvm-d5f323d70bd2d9bc8a63a68bfe439a69e0104bbf.tar.bz2
llvm-d5f323d70bd2d9bc8a63a68bfe439a69e0104bbf.tar.xz
Remove the unaligned load intrinsics in favor of using native unaligned loads.
Now that we have a first-class way to represent unaligned loads, the unaligned load intrinsics are superfluous. First part of <rdar://problem/8460511>. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@129401 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test/CodeGen/X86/avx-intrinsics-x86.ll')
-rw-r--r--test/CodeGen/X86/avx-intrinsics-x86.ll4
1 files changed, 2 insertions, 2 deletions
diff --git a/test/CodeGen/X86/avx-intrinsics-x86.ll b/test/CodeGen/X86/avx-intrinsics-x86.ll
index 6c32396a41..5201688686 100644
--- a/test/CodeGen/X86/avx-intrinsics-x86.ll
+++ b/test/CodeGen/X86/avx-intrinsics-x86.ll
@@ -247,7 +247,7 @@ declare <2 x double> @llvm.x86.sse2.div.sd(<2 x double>, <2 x double>) nounwind
define <16 x i8> @test_x86_sse2_loadu_dq(i8* %a0) {
; CHECK: movl
- ; CHECK: vmovdqu
+ ; CHECK: vmovups
%res = call <16 x i8> @llvm.x86.sse2.loadu.dq(i8* %a0) ; <<16 x i8>> [#uses=1]
ret <16 x i8> %res
}
@@ -256,7 +256,7 @@ declare <16 x i8> @llvm.x86.sse2.loadu.dq(i8*) nounwind readonly
define <2 x double> @test_x86_sse2_loadu_pd(i8* %a0) {
; CHECK: movl
- ; CHECK: vmovupd
+ ; CHECK: vmovups
%res = call <2 x double> @llvm.x86.sse2.loadu.pd(i8* %a0) ; <<2 x double>> [#uses=1]
ret <2 x double> %res
}