From 22b942aa4df824adbd3f6eaede53abe451f616e9 Mon Sep 17 00:00:00 2001 From: Evan Cheng Date: Sat, 3 May 2008 00:52:09 +0000 Subject: Add separate intrinsics for MMX / SSE shifts with i32 integer operands. This allow us to simplify the horribly complicated matching code. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@50601 91177308-0d34-0410-b5e6-96231b3b80d8 --- test/CodeGen/X86/mmx-shift.ll | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'test/CodeGen/X86/mmx-shift.ll') diff --git a/test/CodeGen/X86/mmx-shift.ll b/test/CodeGen/X86/mmx-shift.ll index d68af2d1d3..82eeafd075 100644 --- a/test/CodeGen/X86/mmx-shift.ll +++ b/test/CodeGen/X86/mmx-shift.ll @@ -1,14 +1,15 @@ ; RUN: llvm-as < %s | llc -march=x86 -mattr=+mmx | grep psllq | grep 32 +; RUN: llvm-as < %s | llc -march=x86-64 -mattr=+mmx | grep psllq | grep 32 ; RUN: llvm-as < %s | llc -march=x86 -mattr=+mmx | grep psrad define i64 @t1(<1 x i64> %mm1) nounwind { entry: - %tmp6 = tail call <1 x i64> @llvm.x86.mmx.psll.q( <1 x i64> %mm1, <1 x i64> ) ; <<1 x i64>> [#uses=1] + %tmp6 = tail call <1 x i64> @llvm.x86.mmx.pslli.q( <1 x i64> %mm1, i32 32 ) ; <<1 x i64>> [#uses=1] %retval1112 = bitcast <1 x i64> %tmp6 to i64 ; [#uses=1] ret i64 %retval1112 } -declare <1 x i64> @llvm.x86.mmx.psll.q(<1 x i64>, <1 x i64>) nounwind readnone +declare <1 x i64> @llvm.x86.mmx.pslli.q(<1 x i64>, i32) nounwind readnone define i64 @t2(<2 x i32> %mm1, <2 x i32> %mm2) nounwind { entry: -- cgit v1.2.3