summaryrefslogtreecommitdiff
path: root/test/CodeGen/X86
diff options
context:
space:
mode:
authorFilipe Cabecinhas <me@filcab.net>2014-05-31 00:52:23 +0000
committerFilipe Cabecinhas <me@filcab.net>2014-05-31 00:52:23 +0000
commitc3648ce2dca6d9a0a86ce4d736c24b896a9ad7a6 (patch)
treee84bedbdd16ac6bfe7580b1533a336adb98b4e47 /test/CodeGen/X86
parent41dac4aa87a7eba67c2722f1abf26bb97b3a5f6f (diff)
downloadllvm-c3648ce2dca6d9a0a86ce4d736c24b896a9ad7a6.tar.gz
llvm-c3648ce2dca6d9a0a86ce4d736c24b896a9ad7a6.tar.bz2
llvm-c3648ce2dca6d9a0a86ce4d736c24b896a9ad7a6.tar.xz
Make blend tests more specific
Following the lead set by r209324, I'm making these tests match the whole instruction, so we can be sure we're lowering them correctly. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@209947 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test/CodeGen/X86')
-rw-r--r--test/CodeGen/X86/avx-blend.ll17
-rw-r--r--test/CodeGen/X86/avx2-shuffle.ll4
-rw-r--r--test/CodeGen/X86/sse41-blend.ll4
3 files changed, 17 insertions, 8 deletions
diff --git a/test/CodeGen/X86/avx-blend.ll b/test/CodeGen/X86/avx-blend.ll
index a66dc549b4..43cdf7edf7 100644
--- a/test/CodeGen/X86/avx-blend.ll
+++ b/test/CodeGen/X86/avx-blend.ll
@@ -162,7 +162,16 @@ declare <4 x double> @llvm.x86.avx.blendv.pd.256(<4 x double>, <4 x double>, <4
;; 4 tests for shufflevectors that optimize to blend + immediate
; CHECK-LABEL: @blend_shufflevector_4xfloat
define <4 x float> @blend_shufflevector_4xfloat(<4 x float> %a, <4 x float> %b) {
-; CHECK: vblendps
+; Equivalent select mask is <i1 true, i1 false, i1 true, i1 false>.
+; Big endian representation is 0101 = 5.
+; '1' means takes the first argument, '0' means takes the second argument.
+; This is the opposite of the intel syntax, thus we expect
+; Inverted mask: 1010 = 10.
+; According to the ABI:
+; a is in xmm0 => first argument is xmm0.
+; b is in xmm1 => second argument is xmm1.
+; Result is in xmm0 => destination argument.
+; CHECK: vblendps $10, %xmm1, %xmm0, %xmm0
; CHECK: ret
%1 = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
ret <4 x float> %1
@@ -170,7 +179,7 @@ define <4 x float> @blend_shufflevector_4xfloat(<4 x float> %a, <4 x float> %b)
; CHECK-LABEL: @blend_shufflevector_8xfloat
define <8 x float> @blend_shufflevector_8xfloat(<8 x float> %a, <8 x float> %b) {
-; CHECK: vblendps
+; CHECK: vblendps $190, %ymm1, %ymm0, %ymm0
; CHECK: ret
%1 = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> <i32 0, i32 9, i32 10, i32 11, i32 12, i32 13, i32 6, i32 15>
ret <8 x float> %1
@@ -178,7 +187,7 @@ define <8 x float> @blend_shufflevector_8xfloat(<8 x float> %a, <8 x float> %b)
; CHECK-LABEL: @blend_shufflevector_4xdouble
define <4 x double> @blend_shufflevector_4xdouble(<4 x double> %a, <4 x double> %b) {
-; CHECK: vblendpd
+; CHECK: vblendpd $2, %ymm1, %ymm0, %ymm0
; CHECK: ret
%1 = shufflevector <4 x double> %a, <4 x double> %b, <4 x i32> <i32 0, i32 5, i32 2, i32 3>
ret <4 x double> %1
@@ -186,7 +195,7 @@ define <4 x double> @blend_shufflevector_4xdouble(<4 x double> %a, <4 x double>
; CHECK-LABEL: @blend_shufflevector_4xi64
define <4 x i64> @blend_shufflevector_4xi64(<4 x i64> %a, <4 x i64> %b) {
-; CHECK: vblendpd
+; CHECK: vblendpd $13, %ymm1, %ymm0, %ymm0
; CHECK: ret
%1 = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> <i32 4, i32 1, i32 6, i32 7>
ret <4 x i64> %1
diff --git a/test/CodeGen/X86/avx2-shuffle.ll b/test/CodeGen/X86/avx2-shuffle.ll
index 5b2e7a40bd..185b989458 100644
--- a/test/CodeGen/X86/avx2-shuffle.ll
+++ b/test/CodeGen/X86/avx2-shuffle.ll
@@ -62,7 +62,7 @@ define <4 x i64> @blend_test4(<4 x i64> %a, <4 x i64> %b) nounwind alwaysinline
;; 2 tests for shufflevectors that optimize to blend + immediate
; CHECK-LABEL: @blend_test5
-; CHECK: vpblendd
+; CHECK: vpblendd $10, %xmm1, %xmm0, %xmm0
; CHECK: ret
define <4 x i32> @blend_test5(<4 x i32> %a, <4 x i32> %b) {
%1 = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
@@ -70,7 +70,7 @@ define <4 x i32> @blend_test5(<4 x i32> %a, <4 x i32> %b) {
}
; CHECK-LABEL: @blend_test6
-; CHECK: vpblendw
+; CHECK: vpblendw $134, %ymm1, %ymm0, %ymm0
; CHECK: ret
define <16 x i16> @blend_test6(<16 x i16> %a, <16 x i16> %b) {
%1 = shufflevector <16 x i16> %a, <16 x i16> %b, <16 x i32> <i32 0, i32 17, i32 18, i32 3, i32 4, i32 5, i32 6, i32 23,
diff --git a/test/CodeGen/X86/sse41-blend.ll b/test/CodeGen/X86/sse41-blend.ll
index ca106e158e..3a4812119f 100644
--- a/test/CodeGen/X86/sse41-blend.ll
+++ b/test/CodeGen/X86/sse41-blend.ll
@@ -124,7 +124,7 @@ declare <2 x double> @llvm.x86.sse41.blendvpd(<2 x double>, <2 x double>, <2 x d
;; 2 tests for shufflevectors that optimize to blend + immediate
; CHECK-LABEL: @blend_shufflevector_4xfloat
-; CHECK: blendps
+; CHECK: blendps $6, %xmm1, %xmm0
; CHECK: ret
define <4 x float> @blend_shufflevector_4xfloat(<4 x float> %a, <4 x float> %b) {
%1 = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 0, i32 5, i32 6, i32 3>
@@ -132,7 +132,7 @@ define <4 x float> @blend_shufflevector_4xfloat(<4 x float> %a, <4 x float> %b)
}
; CHECK-LABEL: @blend_shufflevector_8xi16
-; CHECK: pblendw
+; CHECK: pblendw $134, %xmm1, %xmm0
; CHECK: ret
define <8 x i16> @blend_shufflevector_8xi16(<8 x i16> %a, <8 x i16> %b) {
%1 = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 0, i32 9, i32 10, i32 3, i32 4, i32 5, i32 6, i32 15>