summaryrefslogtreecommitdiff
path: root/test/CodeGen/X86/2006-05-11-InstrSched.ll
diff options
context:
space:
mode:
authorDan Gohman <gohman@apple.com>2009-02-16 00:44:23 +0000
committerDan Gohman <gohman@apple.com>2009-02-16 00:44:23 +0000
commit926b0a27b9adf79b89042785b8496cced5a49883 (patch)
tree20ce4c7266df5215aac60fa4fcca9082eac4397e /test/CodeGen/X86/2006-05-11-InstrSched.ll
parent97d116346cdf47ce0c8536a69ff6c606309cb4c0 (diff)
downloadllvm-926b0a27b9adf79b89042785b8496cced5a49883.tar.gz
llvm-926b0a27b9adf79b89042785b8496cced5a49883.tar.bz2
llvm-926b0a27b9adf79b89042785b8496cced5a49883.tar.xz
Change these tests to use regular loads instead of llvm.x86.sse2.loadu.dq.
Enhance instcombine to use the preferred field of GetOrEnforceKnownAlignment in more cases, so that regular IR operations are optimized in the same way that the intrinsics currently are. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@64623 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test/CodeGen/X86/2006-05-11-InstrSched.ll')
-rw-r--r--test/CodeGen/X86/2006-05-11-InstrSched.ll10
1 files changed, 4 insertions, 6 deletions
diff --git a/test/CodeGen/X86/2006-05-11-InstrSched.ll b/test/CodeGen/X86/2006-05-11-InstrSched.ll
index 774e7243fd..6c0e76b34a 100644
--- a/test/CodeGen/X86/2006-05-11-InstrSched.ll
+++ b/test/CodeGen/X86/2006-05-11-InstrSched.ll
@@ -14,8 +14,8 @@ cond_true: ; preds = %cond_true, %entry
%k.0.0 = bitcast i32 %tmp.10 to i32 ; <i32> [#uses=2]
%tmp31 = add i32 %k.0.0, -1 ; <i32> [#uses=4]
%tmp32 = getelementptr i32* %mpp, i32 %tmp31 ; <i32*> [#uses=1]
- %tmp34 = bitcast i32* %tmp32 to i8* ; <i8*> [#uses=1]
- %tmp = tail call <16 x i8> @llvm.x86.sse2.loadu.dq( i8* %tmp34 ) ; <<16 x i8>> [#uses=1]
+ %tmp34 = bitcast i32* %tmp32 to <16 x i8>* ; <i8*> [#uses=1]
+ %tmp = load <16 x i8>* %tmp34, align 1
%tmp42 = getelementptr i32* %tpmm, i32 %tmp31 ; <i32*> [#uses=1]
%tmp42.upgrd.1 = bitcast i32* %tmp42 to <4 x i32>* ; <<4 x i32>*> [#uses=1]
%tmp46 = load <4 x i32>* %tmp42.upgrd.1 ; <<4 x i32>> [#uses=1]
@@ -23,8 +23,8 @@ cond_true: ; preds = %cond_true, %entry
%tmp55 = add <4 x i32> %tmp54, %tmp46 ; <<4 x i32>> [#uses=2]
%tmp55.upgrd.2 = bitcast <4 x i32> %tmp55 to <2 x i64> ; <<2 x i64>> [#uses=1]
%tmp62 = getelementptr i32* %ip, i32 %tmp31 ; <i32*> [#uses=1]
- %tmp65 = bitcast i32* %tmp62 to i8* ; <i8*> [#uses=1]
- %tmp66 = tail call <16 x i8> @llvm.x86.sse2.loadu.dq( i8* %tmp65 ) ; <<16 x i8>> [#uses=1]
+ %tmp65 = bitcast i32* %tmp62 to <16 x i8>* ; <i8*> [#uses=1]
+ %tmp66 = load <16 x i8>* %tmp65, align 1
%tmp73 = getelementptr i32* %tpim, i32 %tmp31 ; <i32*> [#uses=1]
%tmp73.upgrd.3 = bitcast i32* %tmp73 to <4 x i32>* ; <<4 x i32>*> [#uses=1]
%tmp77 = load <4 x i32>* %tmp73.upgrd.3 ; <<4 x i32>> [#uses=1]
@@ -50,6 +50,4 @@ return: ; preds = %cond_true, %entry
ret void
}
-declare <16 x i8> @llvm.x86.sse2.loadu.dq(i8*)
-
declare <4 x i32> @llvm.x86.sse2.pcmpgt.d(<4 x i32>, <4 x i32>)