summaryrefslogtreecommitdiff
path: root/test/CodeGen/X86/mmx-shift.ll
diff options
context:
space:
mode:
authorDale Johannesen <dalej@apple.com>2010-09-30 23:57:10 +0000
committerDale Johannesen <dalej@apple.com>2010-09-30 23:57:10 +0000
commit0488fb649a56b7fc89a5814df5308813f9e5a85d (patch)
tree21913bb81960866b73c6b49f29782c142563304c /test/CodeGen/X86/mmx-shift.ll
parenta7e3b564773cc45a1b08383c09fe86b17b3ffe92 (diff)
downloadllvm-0488fb649a56b7fc89a5814df5308813f9e5a85d.tar.gz
llvm-0488fb649a56b7fc89a5814df5308813f9e5a85d.tar.bz2
llvm-0488fb649a56b7fc89a5814df5308813f9e5a85d.tar.xz
Massive rewrite of MMX:
The x86_mmx type is used for MMX intrinsics, parameters and return values where these use MMX registers, and is also supported in load, store, and bitcast. Only the above operations generate MMX instructions, and optimizations do not operate on or produce MMX intrinsics. MMX-sized vectors <2 x i32> etc. are lowered to XMM or split into smaller pieces. Optimizations may occur on these forms and the result casted back to x86_mmx, provided the result feeds into a previous existing x86_mmx operation. The point of all this is prevent optimizations from introducing MMX operations, which is unsafe due to the EMMS problem. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@115243 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test/CodeGen/X86/mmx-shift.ll')
-rw-r--r--test/CodeGen/X86/mmx-shift.ll24
1 files changed, 12 insertions, 12 deletions
diff --git a/test/CodeGen/X86/mmx-shift.ll b/test/CodeGen/X86/mmx-shift.ll
index dd0aa2ca31..bafc75444d 100644
--- a/test/CodeGen/X86/mmx-shift.ll
+++ b/test/CodeGen/X86/mmx-shift.ll
@@ -5,28 +5,28 @@
define i64 @t1(<1 x i64> %mm1) nounwind {
entry:
- %tmp6 = tail call <1 x i64> @llvm.x86.mmx.pslli.q( <1 x i64> %mm1, i32 32 ) ; <<1 x i64>> [#uses=1]
- %retval1112 = bitcast <1 x i64> %tmp6 to i64 ; <i64> [#uses=1]
+ %tmp = bitcast <1 x i64> %mm1 to x86_mmx
+ %tmp6 = tail call x86_mmx @llvm.x86.mmx.pslli.q( x86_mmx %tmp, i32 32 ) ; <x86_mmx> [#uses=1]
+ %retval1112 = bitcast x86_mmx %tmp6 to i64
ret i64 %retval1112
}
-declare <1 x i64> @llvm.x86.mmx.pslli.q(<1 x i64>, i32) nounwind readnone
+declare x86_mmx @llvm.x86.mmx.pslli.q(x86_mmx, i32) nounwind readnone
-define i64 @t2(<2 x i32> %mm1, <2 x i32> %mm2) nounwind {
+define i64 @t2(x86_mmx %mm1, x86_mmx %mm2) nounwind {
entry:
- %tmp7 = tail call <2 x i32> @llvm.x86.mmx.psra.d( <2 x i32> %mm1, <2 x i32> %mm2 ) nounwind readnone ; <<2 x i32>> [#uses=1]
- %retval1112 = bitcast <2 x i32> %tmp7 to i64 ; <i64> [#uses=1]
+ %tmp7 = tail call x86_mmx @llvm.x86.mmx.psra.d( x86_mmx %mm1, x86_mmx %mm2 ) nounwind readnone ; <x86_mmx> [#uses=1]
+ %retval1112 = bitcast x86_mmx %tmp7 to i64
ret i64 %retval1112
}
-declare <2 x i32> @llvm.x86.mmx.psra.d(<2 x i32>, <2 x i32>) nounwind readnone
+declare x86_mmx @llvm.x86.mmx.psra.d(x86_mmx, x86_mmx) nounwind readnone
-define i64 @t3(<1 x i64> %mm1, i32 %bits) nounwind {
+define i64 @t3(x86_mmx %mm1, i32 %bits) nounwind {
entry:
- %tmp6 = bitcast <1 x i64> %mm1 to <4 x i16> ; <<4 x i16>> [#uses=1]
- %tmp8 = tail call <4 x i16> @llvm.x86.mmx.psrli.w( <4 x i16> %tmp6, i32 %bits ) nounwind readnone ; <<4 x i16>> [#uses=1]
- %retval1314 = bitcast <4 x i16> %tmp8 to i64 ; <i64> [#uses=1]
+ %tmp8 = tail call x86_mmx @llvm.x86.mmx.psrli.w( x86_mmx %mm1, i32 %bits ) nounwind readnone ; <x86_mmx> [#uses=1]
+ %retval1314 = bitcast x86_mmx %tmp8 to i64
ret i64 %retval1314
}
-declare <4 x i16> @llvm.x86.mmx.psrli.w(<4 x i16>, i32) nounwind readnone
+declare x86_mmx @llvm.x86.mmx.psrli.w(x86_mmx, i32) nounwind readnone