summaryrefslogtreecommitdiff
path: root/test
diff options
context:
space:
mode:
authorNadav Rotem <nadav.rotem@intel.com>2012-07-10 13:25:08 +0000
committerNadav Rotem <nadav.rotem@intel.com>2012-07-10 13:25:08 +0000
commit2dd83eb1ab3b7d7cdef2e244317caefd78be8a45 (patch)
tree4abb6576dcba0e4b8311743bed378be4ac8e9659 /test
parentfae96f17b4b022fccd94a143698112a17d8ddf05 (diff)
downloadllvm-2dd83eb1ab3b7d7cdef2e244317caefd78be8a45.tar.gz
llvm-2dd83eb1ab3b7d7cdef2e244317caefd78be8a45.tar.bz2
llvm-2dd83eb1ab3b7d7cdef2e244317caefd78be8a45.tar.xz
Improve the loading of load-anyext vectors by allowing the codegen to load
multiple scalars and insert them into a vector. Next, we shuffle the elements into the correct places, as before. Also fix a small dagcombine bug in SimplifyBinOpWithSameOpcodeHands, when the migration of bitcasts happened too late in the SelectionDAG process. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@159991 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test')
-rw-r--r--test/CodeGen/X86/2012-07-10-extload64.ll13
-rw-r--r--test/CodeGen/X86/vec_compare-2.ll3
-rw-r--r--test/CodeGen/X86/widen_load-0.ll14
3 files changed, 18 insertions, 12 deletions
diff --git a/test/CodeGen/X86/2012-07-10-extload64.ll b/test/CodeGen/X86/2012-07-10-extload64.ll
new file mode 100644
index 0000000000..3284f5e7e3
--- /dev/null
+++ b/test/CodeGen/X86/2012-07-10-extload64.ll
@@ -0,0 +1,13 @@
+; RUN: llc < %s -march=x86 -mcpu=corei7 -mtriple=i686-pc-win32 | FileCheck %s
+
+; CHECK: load_store
+define void @load_store(<4 x i16>* %in) {
+entry:
+ %A27 = load <4 x i16>* %in, align 4
+ %A28 = add <4 x i16> %A27, %A27
+ store <4 x i16> %A28, <4 x i16>* %in, align 4
+ ret void
+; CHECK: movd
+; CHECK: pinsrd
+; CHECK: ret
+}
diff --git a/test/CodeGen/X86/vec_compare-2.ll b/test/CodeGen/X86/vec_compare-2.ll
index 91777f7aa6..46d6a23554 100644
--- a/test/CodeGen/X86/vec_compare-2.ll
+++ b/test/CodeGen/X86/vec_compare-2.ll
@@ -10,8 +10,7 @@ define void @blackDespeckle_wrapper(i8** %args_list, i64* %gtid, i64 %xend) {
entry:
; CHECK: cfi_def_cfa_offset
; CHECK-NOT: set
-; CHECK: movzwl
-; CHECK: movzwl
+; CHECK: punpcklwd
; CHECK: pshufd
; CHECK: pshufb
%shr.i = ashr <4 x i32> zeroinitializer, <i32 3, i32 3, i32 3, i32 3> ; <<4 x i32>> [#uses=1]
diff --git a/test/CodeGen/X86/widen_load-0.ll b/test/CodeGen/X86/widen_load-0.ll
index 4aeec9136d..d5437281b2 100644
--- a/test/CodeGen/X86/widen_load-0.ll
+++ b/test/CodeGen/X86/widen_load-0.ll
@@ -1,18 +1,12 @@
; RUN: llc < %s -o - -mtriple=x86_64-linux -mcpu=corei7 | FileCheck %s
-; RUN: llc < %s -o - -mtriple=x86_64-win32 -mcpu=corei7 | FileCheck %s -check-prefix=WIN64
; PR4891
; Both loads should happen before either store.
-; CHECK: movd ({{.*}}), {{.*}}
-; CHECK: movd ({{.*}}), {{.*}}
-; CHECK: movd {{.*}}, ({{.*}})
-; CHECK: movd {{.*}}, ({{.*}})
-
-; WIN64: movd ({{.*}}), {{.*}}
-; WIN64: movd ({{.*}}), {{.*}}
-; WIN64: movd {{.*}}, ({{.*}})
-; WIN64: movd {{.*}}, ({{.*}})
+; CHECK: movl ({{.*}}), {{.*}}
+; CHECK: movl ({{.*}}), {{.*}}
+; CHECK: movl {{.*}}, ({{.*}})
+; CHECK: movl {{.*}}, ({{.*}})
define void @short2_int_swap(<2 x i16>* nocapture %b, i32* nocapture %c) nounwind {
entry: