summaryrefslogtreecommitdiff
path: root/test/Assembler/AutoUpgradeIntrinsics.ll
diff options
context:
space:
mode:
authorBill Wendling <isanbard@gmail.com>2011-04-12 22:46:31 +0000
committerBill Wendling <isanbard@gmail.com>2011-04-12 22:46:31 +0000
commitd5f323d70bd2d9bc8a63a68bfe439a69e0104bbf (patch)
tree51593a18718d656918be4f28c4a5f5706e0a0366 /test/Assembler/AutoUpgradeIntrinsics.ll
parent0b756349a718e046abba84c316877a682eb0ff2f (diff)
downloadllvm-d5f323d70bd2d9bc8a63a68bfe439a69e0104bbf.tar.gz
llvm-d5f323d70bd2d9bc8a63a68bfe439a69e0104bbf.tar.bz2
llvm-d5f323d70bd2d9bc8a63a68bfe439a69e0104bbf.tar.xz
Remove the unaligned load intrinsics in favor of using native unaligned loads.
Now that we have a first-class way to represent unaligned loads, the unaligned load intrinsics are superfluous. First part of <rdar://problem/8460511>. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@129401 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test/Assembler/AutoUpgradeIntrinsics.ll')
-rw-r--r--test/Assembler/AutoUpgradeIntrinsics.ll12
1 files changed, 12 insertions, 0 deletions
diff --git a/test/Assembler/AutoUpgradeIntrinsics.ll b/test/Assembler/AutoUpgradeIntrinsics.ll
index 6752bd8281..e4e2d3a56e 100644
--- a/test/Assembler/AutoUpgradeIntrinsics.ll
+++ b/test/Assembler/AutoUpgradeIntrinsics.ll
@@ -7,6 +7,8 @@
; RUN: llvm-as < %s | llvm-dis | \
; RUN: not grep {llvm\\.bswap\\.i\[0-9\]*\\.i\[0-9\]*}
; RUN: llvm-as < %s | llvm-dis | \
+; RUN: not grep {llvm\\.x86\\.sse2\\.loadu}
+; RUN: llvm-as < %s | llvm-dis | \
; RUN: grep {llvm\\.x86\\.mmx\\.ps} | grep {x86_mmx} | count 16
declare i32 @llvm.ctpop.i28(i28 %val)
@@ -79,3 +81,13 @@ define void @sh64(<1 x i64> %A, <2 x i32> %B) {
%r2 = call <1 x i64> @llvm.x86.mmx.psrl.q( <1 x i64> %A, <2 x i32> %B ) ; <<1 x i64>> [#uses=0]
ret void
}
+
+declare <4 x float> @llvm.x86.sse.loadu.ps(i8*) nounwind readnone
+declare <16 x i8> @llvm.x86.sse2.loadu.dq(i8*) nounwind readnone
+declare <2 x double> @llvm.x86.sse2.loadu.pd(double*) nounwind readnone
+define void @test_loadu(i8* %a, double* %b) {
+ %v0 = call <4 x float> @llvm.x86.sse.loadu.ps(i8* %a)
+ %v1 = call <16 x i8> @llvm.x86.sse2.loadu.dq(i8* %a)
+ %v2 = call <2 x double> @llvm.x86.sse2.loadu.pd(double* %b)
+ ret void
+}