summaryrefslogtreecommitdiff
path: root/test/CodeGen/X86
diff options
context:
space:
mode:
authorNadav Rotem <nrotem@apple.com>2013-01-19 08:38:41 +0000
committerNadav Rotem <nrotem@apple.com>2013-01-19 08:38:41 +0000
commitba9586544164e69754039a25cb0ef7907d27382d (patch)
tree98ebe27964ae162fa9c5dd28f27e7bfbdb4182ad /test/CodeGen/X86
parentcfcab21e4d0e4d7444b147898d6aed1348df3043 (diff)
downloadllvm-ba9586544164e69754039a25cb0ef7907d27382d.tar.gz
llvm-ba9586544164e69754039a25cb0ef7907d27382d.tar.bz2
llvm-ba9586544164e69754039a25cb0ef7907d27382d.tar.xz
On Sandybridge split unaligned 256bit stores into two xmm-sized stores.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@172894 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test/CodeGen/X86')
-rw-r--r--test/CodeGen/X86/2012-01-11-split-cv.ll2
-rw-r--r--test/CodeGen/X86/MergeConsecutiveStores.ll2
-rw-r--r--test/CodeGen/X86/avx-load-store.ll11
-rwxr-xr-xtest/CodeGen/X86/avx-sext.ll12
-rw-r--r--test/CodeGen/X86/fp-load-trunc.ll4
-rw-r--r--test/CodeGen/X86/sandybridge-loads.ll24
-rw-r--r--test/CodeGen/X86/v8i1-masks.ll8
-rw-r--r--test/CodeGen/X86/vec_fpext.ll2
8 files changed, 38 insertions, 27 deletions
diff --git a/test/CodeGen/X86/2012-01-11-split-cv.ll b/test/CodeGen/X86/2012-01-11-split-cv.ll
index 6b90072919..7e914984fe 100644
--- a/test/CodeGen/X86/2012-01-11-split-cv.ll
+++ b/test/CodeGen/X86/2012-01-11-split-cv.ll
@@ -2,7 +2,7 @@
;CHECK: add18i16
define void @add18i16(<18 x i16>* nocapture sret %ret, <18 x i16>* %bp) nounwind {
-;CHECK: vmovups
+;CHECK: vmovaps
%b = load <18 x i16>* %bp, align 16
%x = add <18 x i16> zeroinitializer, %b
store <18 x i16> %x, <18 x i16>* %ret, align 16
diff --git a/test/CodeGen/X86/MergeConsecutiveStores.ll b/test/CodeGen/X86/MergeConsecutiveStores.ll
index 64825bac97..52deadc792 100644
--- a/test/CodeGen/X86/MergeConsecutiveStores.ll
+++ b/test/CodeGen/X86/MergeConsecutiveStores.ll
@@ -42,7 +42,7 @@ define void @merge_const_store(i32 %count, %struct.A* nocapture %p) nounwind uwt
; Move the constants using a single vector store.
; CHECK: merge_const_store_vec
-; CHECK: vmovups %ymm0, (%rsi)
+; CHECK: vmovups
; CHECK: ret
define void @merge_const_store_vec(i32 %count, %struct.B* nocapture %p) nounwind uwtable noinline ssp {
%1 = icmp sgt i32 %count, 0
diff --git a/test/CodeGen/X86/avx-load-store.ll b/test/CodeGen/X86/avx-load-store.ll
index c9fc66a8a7..77a7c4f945 100644
--- a/test/CodeGen/X86/avx-load-store.ll
+++ b/test/CodeGen/X86/avx-load-store.ll
@@ -53,19 +53,24 @@ define void @storev16i16(<16 x i16> %a) nounwind {
unreachable
}
-; CHECK: vmovups %ymm
+; CHECK: storev16i16_01
+; CHECK: vextractf128
+; CHECK: vmovaps %xmm
define void @storev16i16_01(<16 x i16> %a) nounwind {
store <16 x i16> %a, <16 x i16>* undef, align 4
unreachable
}
+; CHECK: storev32i8
; CHECK: vmovaps %ymm
define void @storev32i8(<32 x i8> %a) nounwind {
store <32 x i8> %a, <32 x i8>* undef, align 32
unreachable
}
-; CHECK: vmovups %ymm
+; CHECK: storev32i8_01
+; CHECK: vextractf128
+; CHECK: vmovups %xmm
define void @storev32i8_01(<32 x i8> %a) nounwind {
store <32 x i8> %a, <32 x i8>* undef, align 4
unreachable
@@ -76,7 +81,7 @@ define void @storev32i8_01(<32 x i8> %a) nounwind {
; CHECK: _double_save
; CHECK-NOT: vinsertf128 $1
; CHECK-NOT: vinsertf128 $0
-; CHECK: vmovaps %xmm
+; CHECK: vmovups %xmm
; CHECK: vmovaps %xmm
define void @double_save(<4 x i32> %A, <4 x i32> %B, <8 x i32>* %P) nounwind ssp {
entry:
diff --git a/test/CodeGen/X86/avx-sext.ll b/test/CodeGen/X86/avx-sext.ll
index 5201575f12..adee9bbe24 100755
--- a/test/CodeGen/X86/avx-sext.ll
+++ b/test/CodeGen/X86/avx-sext.ll
@@ -186,18 +186,6 @@ define void @sext_4(<4 x i16>* %inbuf, <4 x i64>* %outbuf) {
ret void
}
-; AVX: sext_5
-; AVX: vpmovsxbw
-; AVX: vpmovsxwd
-; AVX: vpmovsxwd
-; AVX: vpmovsxdq
-; AVX: ret
-define void @sext_5(<8 x i8>* %inbuf, <8 x i64>* %outbuf) {
- %v0 = load <8 x i8>* %inbuf
- %r = sext <8 x i8> %v0 to <8 x i64>
- store <8 x i64> %r, <8 x i64>* %outbuf
- ret void
-}
; AVX: sext_6
; AVX: vpmovsxbw
; AVX: vpmovsxwd
diff --git a/test/CodeGen/X86/fp-load-trunc.ll b/test/CodeGen/X86/fp-load-trunc.ll
index 2ae65c97d9..a973befdaf 100644
--- a/test/CodeGen/X86/fp-load-trunc.ll
+++ b/test/CodeGen/X86/fp-load-trunc.ll
@@ -49,8 +49,8 @@ define <8 x float> @test4(<8 x double>* %p) nounwind {
; CHECK: movlhps
; CHECK: ret
; AVX: test4
-; AVX: vcvtpd2psy {{[0-9]*}}(%{{.*}})
-; AVX: vcvtpd2psy {{[0-9]*}}(%{{.*}})
+; AVX: vcvtpd2psy
+; AVX: vcvtpd2psy
; AVX: vinsertf128
; AVX: ret
%x = load <8 x double>* %p
diff --git a/test/CodeGen/X86/sandybridge-loads.ll b/test/CodeGen/X86/sandybridge-loads.ll
index d85c32eaa7..5a23cf136d 100644
--- a/test/CodeGen/X86/sandybridge-loads.ll
+++ b/test/CodeGen/X86/sandybridge-loads.ll
@@ -3,7 +3,7 @@
;CHECK: wideloads
;CHECK: vmovaps
;CHECK: vinsertf128
-;CHECK: vmovups
+;CHECK: vmovaps
;CHECK-NOT: vinsertf128
;CHECK: ret
@@ -11,11 +11,29 @@ define void @wideloads(<8 x float>* %a, <8 x float>* %b, <8 x float>* %c) nounwi
%v0 = load <8 x float>* %a, align 16 ; <---- unaligned!
%v1 = load <8 x float>* %b, align 32 ; <---- aligned!
%m0 = fcmp olt <8 x float> %v1, %v0
- %v2 = load <8 x float>* %c, align 16
+ %v2 = load <8 x float>* %c, align 32 ; <---- aligned!
%m1 = fcmp olt <8 x float> %v2, %v0
%mand = and <8 x i1> %m1, %m0
%r = zext <8 x i1> %mand to <8 x i32>
- store <8 x i32> %r, <8 x i32>* undef, align 16
+ store <8 x i32> %r, <8 x i32>* undef, align 32
+ ret void
+}
+
+; CHECK: widestores
+; loads:
+; CHECK: vmovaps
+; CHECK: vmovaps
+; stores:
+; CHECK: vmovaps
+; CHECK: vextractf128
+; CHECK: vmovaps
+;CHECK: ret
+
+define void @widestores(<8 x float>* %a, <8 x float>* %b, <8 x float>* %c) nounwind uwtable noinline ssp {
+ %v0 = load <8 x float>* %a, align 32
+ %v1 = load <8 x float>* %b, align 32
+ store <8 x float> %v0, <8 x float>* %b, align 32 ; <--- aligned
+ store <8 x float> %v1, <8 x float>* %a, align 16 ; <--- unaligned
ret void
}
diff --git a/test/CodeGen/X86/v8i1-masks.ll b/test/CodeGen/X86/v8i1-masks.ll
index ea231aff5b..8cbfb5d724 100644
--- a/test/CodeGen/X86/v8i1-masks.ll
+++ b/test/CodeGen/X86/v8i1-masks.ll
@@ -6,7 +6,7 @@
;CHECK: vcmpltp
;CHECK: vandps
;CHECK: vandps
-;CHECK: vmovups
+;CHECK: vmovaps
;CHECK: ret
define void @and_masks(<8 x float>* %a, <8 x float>* %b, <8 x float>* %c) nounwind uwtable noinline ssp {
@@ -17,7 +17,7 @@ define void @and_masks(<8 x float>* %a, <8 x float>* %b, <8 x float>* %c) nounwi
%m1 = fcmp olt <8 x float> %v2, %v0
%mand = and <8 x i1> %m1, %m0
%r = zext <8 x i1> %mand to <8 x i32>
- store <8 x i32> %r, <8 x i32>* undef, align 16
+ store <8 x i32> %r, <8 x i32>* undef, align 32
ret void
}
@@ -25,7 +25,7 @@ define void @and_masks(<8 x float>* %a, <8 x float>* %b, <8 x float>* %c) nounwi
;CHECK: vcmpltps
;CHECK: vxorps
;CHECK: vandps
-;CHECK: vmovups
+;CHECK: vmovaps
;CHECK: ret
define void @neg_masks(<8 x float>* %a, <8 x float>* %b, <8 x float>* %c) nounwind uwtable noinline ssp {
%v0 = load <8 x float>* %a, align 16
@@ -33,7 +33,7 @@ define void @neg_masks(<8 x float>* %a, <8 x float>* %b, <8 x float>* %c) nounwi
%m0 = fcmp olt <8 x float> %v1, %v0
%mand = xor <8 x i1> %m0, <i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1>
%r = zext <8 x i1> %mand to <8 x i32>
- store <8 x i32> %r, <8 x i32>* undef, align 16
+ store <8 x i32> %r, <8 x i32>* undef, align 32
ret void
}
diff --git a/test/CodeGen/X86/vec_fpext.ll b/test/CodeGen/X86/vec_fpext.ll
index dc0464ff9e..e4a8f46cbc 100644
--- a/test/CodeGen/X86/vec_fpext.ll
+++ b/test/CodeGen/X86/vec_fpext.ll
@@ -29,8 +29,8 @@ entry:
; CHECK: cvtps2pd 8(%{{.+}}), %xmm{{[0-9]+}}
; CHECK: cvtps2pd 16(%{{.+}}), %xmm{{[0-9]+}}
; CHECK: cvtps2pd 24(%{{.+}}), %xmm{{[0-9]+}}
-; AVX: vcvtps2pd (%{{.+}}), %ymm{{[0-9]+}}
; AVX: vcvtps2pd 16(%{{.+}}), %ymm{{[0-9]+}}
+; AVX: vcvtps2pd (%{{.+}}), %ymm{{[0-9]+}}
%0 = load <8 x float>* %in
%1 = fpext <8 x float> %0 to <8 x double>
store <8 x double> %1, <8 x double>* %out, align 1