summaryrefslogtreecommitdiff
path: root/test/CodeGen/X86/mmx-arith.ll
diff options
context:
space:
mode:
Diffstat (limited to 'test/CodeGen/X86/mmx-arith.ll')
-rw-r--r--test/CodeGen/X86/mmx-arith.ll380
1 files changed, 279 insertions, 101 deletions
diff --git a/test/CodeGen/X86/mmx-arith.ll b/test/CodeGen/X86/mmx-arith.ll
index e4dfdbfe1b..6817487324 100644
--- a/test/CodeGen/X86/mmx-arith.ll
+++ b/test/CodeGen/X86/mmx-arith.ll
@@ -1,131 +1,309 @@
; RUN: llc < %s -march=x86 -mattr=+mmx
;; A basic sanity check to make sure that MMX arithmetic actually compiles.
+;; First is a straight translation of the original with bitcasts as needed.
-define void @foo(<8 x i8>* %A, <8 x i8>* %B) {
+define void @foo(x86_mmx* %A, x86_mmx* %B) {
entry:
- %tmp1 = load <8 x i8>* %A ; <<8 x i8>> [#uses=1]
- %tmp3 = load <8 x i8>* %B ; <<8 x i8>> [#uses=1]
- %tmp4 = add <8 x i8> %tmp1, %tmp3 ; <<8 x i8>> [#uses=2]
- store <8 x i8> %tmp4, <8 x i8>* %A
- %tmp7 = load <8 x i8>* %B ; <<8 x i8>> [#uses=1]
- %tmp12 = tail call <8 x i8> @llvm.x86.mmx.padds.b( <8 x i8> %tmp4, <8 x i8> %tmp7 ) ; <<8 x i8>> [#uses=2]
- store <8 x i8> %tmp12, <8 x i8>* %A
- %tmp16 = load <8 x i8>* %B ; <<8 x i8>> [#uses=1]
- %tmp21 = tail call <8 x i8> @llvm.x86.mmx.paddus.b( <8 x i8> %tmp12, <8 x i8> %tmp16 ) ; <<8 x i8>> [#uses=2]
- store <8 x i8> %tmp21, <8 x i8>* %A
- %tmp27 = load <8 x i8>* %B ; <<8 x i8>> [#uses=1]
- %tmp28 = sub <8 x i8> %tmp21, %tmp27 ; <<8 x i8>> [#uses=2]
- store <8 x i8> %tmp28, <8 x i8>* %A
- %tmp31 = load <8 x i8>* %B ; <<8 x i8>> [#uses=1]
- %tmp36 = tail call <8 x i8> @llvm.x86.mmx.psubs.b( <8 x i8> %tmp28, <8 x i8> %tmp31 ) ; <<8 x i8>> [#uses=2]
- store <8 x i8> %tmp36, <8 x i8>* %A
- %tmp40 = load <8 x i8>* %B ; <<8 x i8>> [#uses=1]
- %tmp45 = tail call <8 x i8> @llvm.x86.mmx.psubus.b( <8 x i8> %tmp36, <8 x i8> %tmp40 ) ; <<8 x i8>> [#uses=2]
- store <8 x i8> %tmp45, <8 x i8>* %A
- %tmp51 = load <8 x i8>* %B ; <<8 x i8>> [#uses=1]
- %tmp52 = mul <8 x i8> %tmp45, %tmp51 ; <<8 x i8>> [#uses=2]
- store <8 x i8> %tmp52, <8 x i8>* %A
- %tmp57 = load <8 x i8>* %B ; <<8 x i8>> [#uses=1]
- %tmp58 = and <8 x i8> %tmp52, %tmp57 ; <<8 x i8>> [#uses=2]
- store <8 x i8> %tmp58, <8 x i8>* %A
- %tmp63 = load <8 x i8>* %B ; <<8 x i8>> [#uses=1]
- %tmp64 = or <8 x i8> %tmp58, %tmp63 ; <<8 x i8>> [#uses=2]
- store <8 x i8> %tmp64, <8 x i8>* %A
- %tmp69 = load <8 x i8>* %B ; <<8 x i8>> [#uses=1]
- %tmp70 = xor <8 x i8> %tmp64, %tmp69 ; <<8 x i8>> [#uses=1]
- store <8 x i8> %tmp70, <8 x i8>* %A
+ %tmp1 = load x86_mmx* %A ; <x86_mmx> [#uses=1]
+ %tmp3 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
+ %tmp1a = bitcast x86_mmx %tmp1 to <8 x i8>
+ %tmp3a = bitcast x86_mmx %tmp3 to <8 x i8>
+ %tmp4 = add <8 x i8> %tmp1a, %tmp3a ; <<8 x i8>> [#uses=2]
+ %tmp4a = bitcast <8 x i8> %tmp4 to x86_mmx
+ store x86_mmx %tmp4a, x86_mmx* %A
+ %tmp7 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
+ %tmp12 = tail call x86_mmx @llvm.x86.mmx.padds.b( x86_mmx %tmp4a, x86_mmx %tmp7 ) ; <x86_mmx> [#uses=2]
+ store x86_mmx %tmp12, x86_mmx* %A
+ %tmp16 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
+ %tmp21 = tail call x86_mmx @llvm.x86.mmx.paddus.b( x86_mmx %tmp12, x86_mmx %tmp16 ) ; <x86_mmx> [#uses=2]
+ store x86_mmx %tmp21, x86_mmx* %A
+ %tmp27 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
+ %tmp21a = bitcast x86_mmx %tmp21 to <8 x i8>
+ %tmp27a = bitcast x86_mmx %tmp27 to <8 x i8>
+ %tmp28 = sub <8 x i8> %tmp21a, %tmp27a ; <<8 x i8>> [#uses=2]
+ %tmp28a = bitcast <8 x i8> %tmp28 to x86_mmx
+ store x86_mmx %tmp28a, x86_mmx* %A
+ %tmp31 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
+ %tmp36 = tail call x86_mmx @llvm.x86.mmx.psubs.b( x86_mmx %tmp28a, x86_mmx %tmp31 ) ; <x86_mmx> [#uses=2]
+ store x86_mmx %tmp36, x86_mmx* %A
+ %tmp40 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
+ %tmp45 = tail call x86_mmx @llvm.x86.mmx.psubus.b( x86_mmx %tmp36, x86_mmx %tmp40 ) ; <x86_mmx> [#uses=2]
+ store x86_mmx %tmp45, x86_mmx* %A
+ %tmp51 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
+ %tmp45a = bitcast x86_mmx %tmp45 to <8 x i8>
+ %tmp51a = bitcast x86_mmx %tmp51 to <8 x i8>
+ %tmp52 = mul <8 x i8> %tmp45a, %tmp51a ; <<8 x i8>> [#uses=2]
+ %tmp52a = bitcast <8 x i8> %tmp52 to x86_mmx
+ store x86_mmx %tmp52a, x86_mmx* %A
+ %tmp57 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
+ %tmp57a = bitcast x86_mmx %tmp57 to <8 x i8>
+ %tmp58 = and <8 x i8> %tmp52, %tmp57a ; <<8 x i8>> [#uses=2]
+ %tmp58a = bitcast <8 x i8> %tmp58 to x86_mmx
+ store x86_mmx %tmp58a, x86_mmx* %A
+ %tmp63 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
+ %tmp63a = bitcast x86_mmx %tmp63 to <8 x i8>
+ %tmp64 = or <8 x i8> %tmp58, %tmp63a ; <<8 x i8>> [#uses=2]
+ %tmp64a = bitcast <8 x i8> %tmp64 to x86_mmx
+ store x86_mmx %tmp64a, x86_mmx* %A
+ %tmp69 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
+ %tmp69a = bitcast x86_mmx %tmp69 to <8 x i8>
+ %tmp64b = bitcast x86_mmx %tmp64a to <8 x i8>
+ %tmp70 = xor <8 x i8> %tmp64b, %tmp69a ; <<8 x i8>> [#uses=1]
+ %tmp70a = bitcast <8 x i8> %tmp70 to x86_mmx
+ store x86_mmx %tmp70a, x86_mmx* %A
tail call void @llvm.x86.mmx.emms( )
ret void
}
-define void @baz(<2 x i32>* %A, <2 x i32>* %B) {
+define void @baz(x86_mmx* %A, x86_mmx* %B) {
entry:
- %tmp1 = load <2 x i32>* %A ; <<2 x i32>> [#uses=1]
- %tmp3 = load <2 x i32>* %B ; <<2 x i32>> [#uses=1]
- %tmp4 = add <2 x i32> %tmp1, %tmp3 ; <<2 x i32>> [#uses=2]
- store <2 x i32> %tmp4, <2 x i32>* %A
- %tmp9 = load <2 x i32>* %B ; <<2 x i32>> [#uses=1]
- %tmp10 = sub <2 x i32> %tmp4, %tmp9 ; <<2 x i32>> [#uses=2]
- store <2 x i32> %tmp10, <2 x i32>* %A
- %tmp15 = load <2 x i32>* %B ; <<2 x i32>> [#uses=1]
- %tmp16 = mul <2 x i32> %tmp10, %tmp15 ; <<2 x i32>> [#uses=2]
- store <2 x i32> %tmp16, <2 x i32>* %A
- %tmp21 = load <2 x i32>* %B ; <<2 x i32>> [#uses=1]
- %tmp22 = and <2 x i32> %tmp16, %tmp21 ; <<2 x i32>> [#uses=2]
- store <2 x i32> %tmp22, <2 x i32>* %A
- %tmp27 = load <2 x i32>* %B ; <<2 x i32>> [#uses=1]
- %tmp28 = or <2 x i32> %tmp22, %tmp27 ; <<2 x i32>> [#uses=2]
- store <2 x i32> %tmp28, <2 x i32>* %A
- %tmp33 = load <2 x i32>* %B ; <<2 x i32>> [#uses=1]
- %tmp34 = xor <2 x i32> %tmp28, %tmp33 ; <<2 x i32>> [#uses=1]
- store <2 x i32> %tmp34, <2 x i32>* %A
+ %tmp1 = load x86_mmx* %A ; <x86_mmx> [#uses=1]
+ %tmp3 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
+ %tmp1a = bitcast x86_mmx %tmp1 to <2 x i32>
+ %tmp3a = bitcast x86_mmx %tmp3 to <2 x i32>
+ %tmp4 = add <2 x i32> %tmp1a, %tmp3a ; <<2 x i32>> [#uses=2]
+ %tmp4a = bitcast <2 x i32> %tmp4 to x86_mmx
+ store x86_mmx %tmp4a, x86_mmx* %A
+ %tmp9 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
+ %tmp9a = bitcast x86_mmx %tmp9 to <2 x i32>
+ %tmp10 = sub <2 x i32> %tmp4, %tmp9a ; <<2 x i32>> [#uses=2]
+ %tmp10a = bitcast <2 x i32> %tmp4 to x86_mmx
+ store x86_mmx %tmp10a, x86_mmx* %A
+ %tmp15 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
+ %tmp10b = bitcast x86_mmx %tmp10a to <2 x i32>
+ %tmp15a = bitcast x86_mmx %tmp15 to <2 x i32>
+ %tmp16 = mul <2 x i32> %tmp10b, %tmp15a ; <<2 x i32>> [#uses=2]
+ %tmp16a = bitcast <2 x i32> %tmp16 to x86_mmx
+ store x86_mmx %tmp16a, x86_mmx* %A
+ %tmp21 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
+ %tmp16b = bitcast x86_mmx %tmp16a to <2 x i32>
+ %tmp21a = bitcast x86_mmx %tmp21 to <2 x i32>
+ %tmp22 = and <2 x i32> %tmp16b, %tmp21a ; <<2 x i32>> [#uses=2]
+ %tmp22a = bitcast <2 x i32> %tmp22 to x86_mmx
+ store x86_mmx %tmp22a, x86_mmx* %A
+ %tmp27 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
+ %tmp22b = bitcast x86_mmx %tmp22a to <2 x i32>
+ %tmp27a = bitcast x86_mmx %tmp27 to <2 x i32>
+ %tmp28 = or <2 x i32> %tmp22b, %tmp27a ; <<2 x i32>> [#uses=2]
+ %tmp28a = bitcast <2 x i32> %tmp28 to x86_mmx
+ store x86_mmx %tmp28a, x86_mmx* %A
+ %tmp33 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
+ %tmp28b = bitcast x86_mmx %tmp28a to <2 x i32>
+ %tmp33a = bitcast x86_mmx %tmp33 to <2 x i32>
+ %tmp34 = xor <2 x i32> %tmp28b, %tmp33a ; <<2 x i32>> [#uses=1]
+ %tmp34a = bitcast <2 x i32> %tmp34 to x86_mmx
+ store x86_mmx %tmp34a, x86_mmx* %A
tail call void @llvm.x86.mmx.emms( )
ret void
}
-define void @bar(<4 x i16>* %A, <4 x i16>* %B) {
+define void @bar(x86_mmx* %A, x86_mmx* %B) {
entry:
- %tmp1 = load <4 x i16>* %A ; <<4 x i16>> [#uses=1]
- %tmp3 = load <4 x i16>* %B ; <<4 x i16>> [#uses=1]
- %tmp4 = add <4 x i16> %tmp1, %tmp3 ; <<4 x i16>> [#uses=2]
- store <4 x i16> %tmp4, <4 x i16>* %A
- %tmp7 = load <4 x i16>* %B ; <<4 x i16>> [#uses=1]
- %tmp12 = tail call <4 x i16> @llvm.x86.mmx.padds.w( <4 x i16> %tmp4, <4 x i16> %tmp7 ) ; <<4 x i16>> [#uses=2]
- store <4 x i16> %tmp12, <4 x i16>* %A
- %tmp16 = load <4 x i16>* %B ; <<4 x i16>> [#uses=1]
- %tmp21 = tail call <4 x i16> @llvm.x86.mmx.paddus.w( <4 x i16> %tmp12, <4 x i16> %tmp16 ) ; <<4 x i16>> [#uses=2]
- store <4 x i16> %tmp21, <4 x i16>* %A
- %tmp27 = load <4 x i16>* %B ; <<4 x i16>> [#uses=1]
- %tmp28 = sub <4 x i16> %tmp21, %tmp27 ; <<4 x i16>> [#uses=2]
- store <4 x i16> %tmp28, <4 x i16>* %A
- %tmp31 = load <4 x i16>* %B ; <<4 x i16>> [#uses=1]
- %tmp36 = tail call <4 x i16> @llvm.x86.mmx.psubs.w( <4 x i16> %tmp28, <4 x i16> %tmp31 ) ; <<4 x i16>> [#uses=2]
- store <4 x i16> %tmp36, <4 x i16>* %A
- %tmp40 = load <4 x i16>* %B ; <<4 x i16>> [#uses=1]
- %tmp45 = tail call <4 x i16> @llvm.x86.mmx.psubus.w( <4 x i16> %tmp36, <4 x i16> %tmp40 ) ; <<4 x i16>> [#uses=2]
- store <4 x i16> %tmp45, <4 x i16>* %A
- %tmp51 = load <4 x i16>* %B ; <<4 x i16>> [#uses=1]
- %tmp52 = mul <4 x i16> %tmp45, %tmp51 ; <<4 x i16>> [#uses=2]
- store <4 x i16> %tmp52, <4 x i16>* %A
- %tmp55 = load <4 x i16>* %B ; <<4 x i16>> [#uses=1]
- %tmp60 = tail call <4 x i16> @llvm.x86.mmx.pmulh.w( <4 x i16> %tmp52, <4 x i16> %tmp55 ) ; <<4 x i16>> [#uses=2]
- store <4 x i16> %tmp60, <4 x i16>* %A
- %tmp64 = load <4 x i16>* %B ; <<4 x i16>> [#uses=1]
- %tmp69 = tail call <2 x i32> @llvm.x86.mmx.pmadd.wd( <4 x i16> %tmp60, <4 x i16> %tmp64 ) ; <<2 x i32>> [#uses=1]
- %tmp70 = bitcast <2 x i32> %tmp69 to <4 x i16> ; <<4 x i16>> [#uses=2]
- store <4 x i16> %tmp70, <4 x i16>* %A
- %tmp75 = load <4 x i16>* %B ; <<4 x i16>> [#uses=1]
- %tmp76 = and <4 x i16> %tmp70, %tmp75 ; <<4 x i16>> [#uses=2]
- store <4 x i16> %tmp76, <4 x i16>* %A
- %tmp81 = load <4 x i16>* %B ; <<4 x i16>> [#uses=1]
- %tmp82 = or <4 x i16> %tmp76, %tmp81 ; <<4 x i16>> [#uses=2]
- store <4 x i16> %tmp82, <4 x i16>* %A
- %tmp87 = load <4 x i16>* %B ; <<4 x i16>> [#uses=1]
- %tmp88 = xor <4 x i16> %tmp82, %tmp87 ; <<4 x i16>> [#uses=1]
- store <4 x i16> %tmp88, <4 x i16>* %A
+ %tmp1 = load x86_mmx* %A ; <x86_mmx> [#uses=1]
+ %tmp3 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
+ %tmp1a = bitcast x86_mmx %tmp1 to <4 x i16>
+ %tmp3a = bitcast x86_mmx %tmp3 to <4 x i16>
+ %tmp4 = add <4 x i16> %tmp1a, %tmp3a ; <<4 x i16>> [#uses=2]
+ %tmp4a = bitcast <4 x i16> %tmp4 to x86_mmx
+ store x86_mmx %tmp4a, x86_mmx* %A
+ %tmp7 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
+ %tmp12 = tail call x86_mmx @llvm.x86.mmx.padds.w( x86_mmx %tmp4a, x86_mmx %tmp7 ) ; <x86_mmx> [#uses=2]
+ store x86_mmx %tmp12, x86_mmx* %A
+ %tmp16 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
+ %tmp21 = tail call x86_mmx @llvm.x86.mmx.paddus.w( x86_mmx %tmp12, x86_mmx %tmp16 ) ; <x86_mmx> [#uses=2]
+ store x86_mmx %tmp21, x86_mmx* %A
+ %tmp27 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
+ %tmp21a = bitcast x86_mmx %tmp21 to <4 x i16>
+ %tmp27a = bitcast x86_mmx %tmp27 to <4 x i16>
+ %tmp28 = sub <4 x i16> %tmp21a, %tmp27a ; <<4 x i16>> [#uses=2]
+ %tmp28a = bitcast <4 x i16> %tmp28 to x86_mmx
+ store x86_mmx %tmp28a, x86_mmx* %A
+ %tmp31 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
+ %tmp36 = tail call x86_mmx @llvm.x86.mmx.psubs.w( x86_mmx %tmp28a, x86_mmx %tmp31 ) ; <x86_mmx> [#uses=2]
+ store x86_mmx %tmp36, x86_mmx* %A
+ %tmp40 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
+ %tmp45 = tail call x86_mmx @llvm.x86.mmx.psubus.w( x86_mmx %tmp36, x86_mmx %tmp40 ) ; <x86_mmx> [#uses=2]
+ store x86_mmx %tmp45, x86_mmx* %A
+ %tmp51 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
+ %tmp45a = bitcast x86_mmx %tmp45 to <4 x i16>
+ %tmp51a = bitcast x86_mmx %tmp51 to <4 x i16>
+ %tmp52 = mul <4 x i16> %tmp45a, %tmp51a ; <<4 x i16>> [#uses=2]
+ %tmp52a = bitcast <4 x i16> %tmp52 to x86_mmx
+ store x86_mmx %tmp52a, x86_mmx* %A
+ %tmp55 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
+ %tmp60 = tail call x86_mmx @llvm.x86.mmx.pmulh.w( x86_mmx %tmp52a, x86_mmx %tmp55 ) ; <x86_mmx> [#uses=2]
+ store x86_mmx %tmp60, x86_mmx* %A
+ %tmp64 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
+ %tmp69 = tail call x86_mmx @llvm.x86.mmx.pmadd.wd( x86_mmx %tmp60, x86_mmx %tmp64 ) ; <x86_mmx> [#uses=1]
+ %tmp70 = bitcast x86_mmx %tmp69 to x86_mmx ; <x86_mmx> [#uses=2]
+ store x86_mmx %tmp70, x86_mmx* %A
+ %tmp75 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
+ %tmp70a = bitcast x86_mmx %tmp70 to <4 x i16>
+ %tmp75a = bitcast x86_mmx %tmp75 to <4 x i16>
+ %tmp76 = and <4 x i16> %tmp70a, %tmp75a ; <<4 x i16>> [#uses=2]
+ %tmp76a = bitcast <4 x i16> %tmp76 to x86_mmx
+ store x86_mmx %tmp76a, x86_mmx* %A
+ %tmp81 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
+ %tmp76b = bitcast x86_mmx %tmp76a to <4 x i16>
+ %tmp81a = bitcast x86_mmx %tmp81 to <4 x i16>
+ %tmp82 = or <4 x i16> %tmp76b, %tmp81a ; <<4 x i16>> [#uses=2]
+ %tmp82a = bitcast <4 x i16> %tmp82 to x86_mmx
+ store x86_mmx %tmp82a, x86_mmx* %A
+ %tmp87 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
+ %tmp82b = bitcast x86_mmx %tmp82a to <4 x i16>
+ %tmp87a = bitcast x86_mmx %tmp87 to <4 x i16>
+ %tmp88 = xor <4 x i16> %tmp82b, %tmp87a ; <<4 x i16>> [#uses=1]
+ %tmp88a = bitcast <4 x i16> %tmp88 to x86_mmx
+ store x86_mmx %tmp88a, x86_mmx* %A
tail call void @llvm.x86.mmx.emms( )
ret void
}
-declare <8 x i8> @llvm.x86.mmx.padds.b(<8 x i8>, <8 x i8>)
+;; The following is modified to use MMX intrinsics everywhere they work.
-declare <8 x i8> @llvm.x86.mmx.paddus.b(<8 x i8>, <8 x i8>)
+define void @fooa(x86_mmx* %A, x86_mmx* %B) {
+entry:
+ %tmp1 = load x86_mmx* %A ; <x86_mmx> [#uses=1]
+ %tmp3 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
+ %tmp4 = tail call x86_mmx @llvm.x86.mmx.padd.b( x86_mmx %tmp1, x86_mmx %tmp3 ) ; <x86_mmx> [#uses=2]
+ store x86_mmx %tmp4, x86_mmx* %A
+ %tmp7 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
+ %tmp12 = tail call x86_mmx @llvm.x86.mmx.padds.b( x86_mmx %tmp4, x86_mmx %tmp7 ) ; <x86_mmx> [#uses=2]
+ store x86_mmx %tmp12, x86_mmx* %A
+ %tmp16 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
+ %tmp21 = tail call x86_mmx @llvm.x86.mmx.paddus.b( x86_mmx %tmp12, x86_mmx %tmp16 ) ; <x86_mmx> [#uses=2]
+ store x86_mmx %tmp21, x86_mmx* %A
+ %tmp27 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
+ %tmp28 = tail call x86_mmx @llvm.x86.mmx.psub.b( x86_mmx %tmp21, x86_mmx %tmp27 ) ; <x86_mmx> [#uses=2]
+ store x86_mmx %tmp28, x86_mmx* %A
+ %tmp31 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
+ %tmp36 = tail call x86_mmx @llvm.x86.mmx.psubs.b( x86_mmx %tmp28, x86_mmx %tmp31 ) ; <x86_mmx> [#uses=2]
+ store x86_mmx %tmp36, x86_mmx* %A
+ %tmp40 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
+ %tmp45 = tail call x86_mmx @llvm.x86.mmx.psubus.b( x86_mmx %tmp36, x86_mmx %tmp40 ) ; <x86_mmx> [#uses=2]
+ store x86_mmx %tmp45, x86_mmx* %A
+ %tmp51 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
+ %tmp51a = bitcast x86_mmx %tmp51 to i64
+ %tmp51aa = bitcast i64 %tmp51a to <8 x i8>
+ %tmp51b = bitcast x86_mmx %tmp45 to <8 x i8>
+ %tmp52 = mul <8 x i8> %tmp51b, %tmp51aa ; <x86_mmx> [#uses=2]
+ %tmp52a = bitcast <8 x i8> %tmp52 to i64
+ %tmp52aa = bitcast i64 %tmp52a to x86_mmx
+ store x86_mmx %tmp52aa, x86_mmx* %A
+ %tmp57 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
+ %tmp58 = tail call x86_mmx @llvm.x86.mmx.pand( x86_mmx %tmp51, x86_mmx %tmp57 ) ; <x86_mmx> [#uses=2]
+ store x86_mmx %tmp58, x86_mmx* %A
+ %tmp63 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
+ %tmp64 = tail call x86_mmx @llvm.x86.mmx.por( x86_mmx %tmp58, x86_mmx %tmp63 ) ; <x86_mmx> [#uses=2]
+ store x86_mmx %tmp64, x86_mmx* %A
+ %tmp69 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
+ %tmp70 = tail call x86_mmx @llvm.x86.mmx.pxor( x86_mmx %tmp64, x86_mmx %tmp69 ) ; <x86_mmx> [#uses=2]
+ store x86_mmx %tmp70, x86_mmx* %A
+ tail call void @llvm.x86.mmx.emms( )
+ ret void
+}
-declare <8 x i8> @llvm.x86.mmx.psubs.b(<8 x i8>, <8 x i8>)
+define void @baza(x86_mmx* %A, x86_mmx* %B) {
+entry:
+ %tmp1 = load x86_mmx* %A ; <x86_mmx> [#uses=1]
+ %tmp3 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
+ %tmp4 = tail call x86_mmx @llvm.x86.mmx.padd.d( x86_mmx %tmp1, x86_mmx %tmp3 ) ; <x86_mmx> [#uses=2]
+ store x86_mmx %tmp4, x86_mmx* %A
+ %tmp9 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
+ %tmp10 = tail call x86_mmx @llvm.x86.mmx.psub.d( x86_mmx %tmp4, x86_mmx %tmp9 ) ; <x86_mmx> [#uses=2]
+ store x86_mmx %tmp10, x86_mmx* %A
+ %tmp15 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
+ %tmp10a = bitcast x86_mmx %tmp10 to <2 x i32>
+ %tmp15a = bitcast x86_mmx %tmp15 to <2 x i32>
+ %tmp16 = mul <2 x i32> %tmp10a, %tmp15a ; <x86_mmx> [#uses=2]
+ %tmp16a = bitcast <2 x i32> %tmp16 to x86_mmx
+ store x86_mmx %tmp16a, x86_mmx* %A
+ %tmp21 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
+ %tmp22 = tail call x86_mmx @llvm.x86.mmx.pand( x86_mmx %tmp16a, x86_mmx %tmp21 ) ; <x86_mmx> [#uses=2]
+ store x86_mmx %tmp22, x86_mmx* %A
+ %tmp27 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
+ %tmp28 = tail call x86_mmx @llvm.x86.mmx.por( x86_mmx %tmp22, x86_mmx %tmp27 ) ; <x86_mmx> [#uses=2]
+ store x86_mmx %tmp28, x86_mmx* %A
+ %tmp33 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
+ %tmp34 = tail call x86_mmx @llvm.x86.mmx.pxor( x86_mmx %tmp28, x86_mmx %tmp33 ) ; <x86_mmx> [#uses=2]
+ store x86_mmx %tmp34, x86_mmx* %A
+ tail call void @llvm.x86.mmx.emms( )
+ ret void
+}
-declare <8 x i8> @llvm.x86.mmx.psubus.b(<8 x i8>, <8 x i8>)
+define void @bara(x86_mmx* %A, x86_mmx* %B) {
+entry:
+ %tmp1 = load x86_mmx* %A ; <x86_mmx> [#uses=1]
+ %tmp3 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
+ %tmp4 = tail call x86_mmx @llvm.x86.mmx.padd.w( x86_mmx %tmp1, x86_mmx %tmp3 ) ; <x86_mmx> [#uses=2]
+ store x86_mmx %tmp4, x86_mmx* %A
+ %tmp7 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
+ %tmp12 = tail call x86_mmx @llvm.x86.mmx.padds.w( x86_mmx %tmp4, x86_mmx %tmp7 ) ; <x86_mmx> [#uses=2]
+ store x86_mmx %tmp12, x86_mmx* %A
+ %tmp16 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
+ %tmp21 = tail call x86_mmx @llvm.x86.mmx.paddus.w( x86_mmx %tmp12, x86_mmx %tmp16 ) ; <x86_mmx> [#uses=2]
+ store x86_mmx %tmp21, x86_mmx* %A
+ %tmp27 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
+ %tmp28 = tail call x86_mmx @llvm.x86.mmx.psub.w( x86_mmx %tmp21, x86_mmx %tmp27 ) ; <x86_mmx> [#uses=2]
+ store x86_mmx %tmp28, x86_mmx* %A
+ %tmp31 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
+ %tmp36 = tail call x86_mmx @llvm.x86.mmx.psubs.w( x86_mmx %tmp28, x86_mmx %tmp31 ) ; <x86_mmx> [#uses=2]
+ store x86_mmx %tmp36, x86_mmx* %A
+ %tmp40 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
+ %tmp45 = tail call x86_mmx @llvm.x86.mmx.psubus.w( x86_mmx %tmp36, x86_mmx %tmp40 ) ; <x86_mmx> [#uses=2]
+ store x86_mmx %tmp45, x86_mmx* %A
+ %tmp51 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
+ %tmp52 = tail call x86_mmx @llvm.x86.mmx.pmull.w( x86_mmx %tmp45, x86_mmx %tmp51 ) ; <x86_mmx> [#uses=2]
+ store x86_mmx %tmp52, x86_mmx* %A
+ %tmp55 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
+ %tmp60 = tail call x86_mmx @llvm.x86.mmx.pmulh.w( x86_mmx %tmp52, x86_mmx %tmp55 ) ; <x86_mmx> [#uses=2]
+ store x86_mmx %tmp60, x86_mmx* %A
+ %tmp64 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
+ %tmp69 = tail call x86_mmx @llvm.x86.mmx.pmadd.wd( x86_mmx %tmp60, x86_mmx %tmp64 ) ; <x86_mmx> [#uses=1]
+ %tmp70 = bitcast x86_mmx %tmp69 to x86_mmx ; <x86_mmx> [#uses=2]
+ store x86_mmx %tmp70, x86_mmx* %A
+ %tmp75 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
+ %tmp76 = tail call x86_mmx @llvm.x86.mmx.pand( x86_mmx %tmp70, x86_mmx %tmp75 ) ; <x86_mmx> [#uses=2]
+ store x86_mmx %tmp76, x86_mmx* %A
+ %tmp81 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
+ %tmp82 = tail call x86_mmx @llvm.x86.mmx.por( x86_mmx %tmp76, x86_mmx %tmp81 ) ; <x86_mmx> [#uses=2]
+ store x86_mmx %tmp82, x86_mmx* %A
+ %tmp87 = load x86_mmx* %B ; <x86_mmx> [#uses=1]
+ %tmp88 = tail call x86_mmx @llvm.x86.mmx.pxor( x86_mmx %tmp82, x86_mmx %tmp87 ) ; <x86_mmx> [#uses=2]
+ store x86_mmx %tmp88, x86_mmx* %A
+ tail call void @llvm.x86.mmx.emms( )
+ ret void
+}
-declare <4 x i16> @llvm.x86.mmx.padds.w(<4 x i16>, <4 x i16>)
+declare x86_mmx @llvm.x86.mmx.paddus.b(x86_mmx, x86_mmx)
-declare <4 x i16> @llvm.x86.mmx.paddus.w(<4 x i16>, <4 x i16>)
+declare x86_mmx @llvm.x86.mmx.psubus.b(x86_mmx, x86_mmx)
-declare <4 x i16> @llvm.x86.mmx.psubs.w(<4 x i16>, <4 x i16>)
+declare x86_mmx @llvm.x86.mmx.paddus.w(x86_mmx, x86_mmx)
-declare <4 x i16> @llvm.x86.mmx.psubus.w(<4 x i16>, <4 x i16>)
+declare x86_mmx @llvm.x86.mmx.psubus.w(x86_mmx, x86_mmx)
-declare <4 x i16> @llvm.x86.mmx.pmulh.w(<4 x i16>, <4 x i16>)
+declare x86_mmx @llvm.x86.mmx.pmulh.w(x86_mmx, x86_mmx)
-declare <2 x i32> @llvm.x86.mmx.pmadd.wd(<4 x i16>, <4 x i16>)
+declare x86_mmx @llvm.x86.mmx.pmadd.wd(x86_mmx, x86_mmx)
declare void @llvm.x86.mmx.emms()
+
+declare x86_mmx @llvm.x86.mmx.padd.b(x86_mmx, x86_mmx)
+declare x86_mmx @llvm.x86.mmx.padd.w(x86_mmx, x86_mmx)
+declare x86_mmx @llvm.x86.mmx.padd.d(x86_mmx, x86_mmx)
+declare x86_mmx @llvm.x86.mmx.padds.b(x86_mmx, x86_mmx)
+declare x86_mmx @llvm.x86.mmx.padds.w(x86_mmx, x86_mmx)
+declare x86_mmx @llvm.x86.mmx.padds.d(x86_mmx, x86_mmx)
+declare x86_mmx @llvm.x86.mmx.psubs.b(x86_mmx, x86_mmx)
+declare x86_mmx @llvm.x86.mmx.psubs.w(x86_mmx, x86_mmx)
+declare x86_mmx @llvm.x86.mmx.psubs.d(x86_mmx, x86_mmx)
+declare x86_mmx @llvm.x86.mmx.psub.b(x86_mmx, x86_mmx)
+declare x86_mmx @llvm.x86.mmx.psub.w(x86_mmx, x86_mmx)
+declare x86_mmx @llvm.x86.mmx.psub.d(x86_mmx, x86_mmx)
+declare x86_mmx @llvm.x86.mmx.pmull.w(x86_mmx, x86_mmx)
+declare x86_mmx @llvm.x86.mmx.pand(x86_mmx, x86_mmx)
+declare x86_mmx @llvm.x86.mmx.por(x86_mmx, x86_mmx)
+declare x86_mmx @llvm.x86.mmx.pxor(x86_mmx, x86_mmx)
+