summaryrefslogtreecommitdiff
path: root/test/CodeGen/X86/select-with-and-or.ll
diff options
context:
space:
mode:
authorMichael Liao <michael.liao@intel.com>2013-04-11 05:15:54 +0000
committerMichael Liao <michael.liao@intel.com>2013-04-11 05:15:54 +0000
commitbf53841cfe3c341ebc0fca102d641c2018855254 (patch)
tree0288e1038ed3afd3eadc5ecf44b98d8dec9e63a3 /test/CodeGen/X86/select-with-and-or.ll
parent02d2e612521954b5ff7c1ba6fd53e36bc51e1c48 (diff)
downloadllvm-bf53841cfe3c341ebc0fca102d641c2018855254.tar.gz
llvm-bf53841cfe3c341ebc0fca102d641c2018855254.tar.bz2
llvm-bf53841cfe3c341ebc0fca102d641c2018855254.tar.xz
Optimize vector select from all 0s or all 1s
As packed comparisons in AVX/SSE produce all 0s or all 1s in each SIMD lane, vector select could be simplified to AND/OR or removed if one or both values being selected is all 0s or all 1s. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@179267 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test/CodeGen/X86/select-with-and-or.ll')
-rw-r--r--test/CodeGen/X86/select-with-and-or.ll72
1 files changed, 72 insertions, 0 deletions
diff --git a/test/CodeGen/X86/select-with-and-or.ll b/test/CodeGen/X86/select-with-and-or.ll
new file mode 100644
index 0000000000..1ccf30bf20
--- /dev/null
+++ b/test/CodeGen/X86/select-with-and-or.ll
@@ -0,0 +1,72 @@
+; RUN: opt < %s -O3 | \
+; RUN: llc -mtriple=x86_64-apple-darwin -mcpu=corei7-avx -mattr=+avx | FileCheck %s
+
+define <4 x i32> @test1(<4 x float> %a, <4 x float> %b, <4 x i32> %c) {
+ %f = fcmp ult <4 x float> %a, %b
+ %r = select <4 x i1> %f, <4 x i32> %c, <4 x i32> zeroinitializer
+ ret <4 x i32> %r
+; CHECK: test1
+; CHECK: cmpnle
+; CHECK-NEXT: andps
+; CHECK: ret
+}
+
+define <4 x i32> @test2(<4 x float> %a, <4 x float> %b, <4 x i32> %c) {
+ %f = fcmp ult <4 x float> %a, %b
+ %r = select <4 x i1> %f, <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, <4 x i32> %c
+ ret <4 x i32> %r
+; CHECK: test2
+; CHECK: cmpnle
+; CHECK-NEXT: orps
+; CHECK: ret
+}
+
+define <4 x i32> @test3(<4 x float> %a, <4 x float> %b, <4 x i32> %c) {
+ %f = fcmp ult <4 x float> %a, %b
+ %r = select <4 x i1> %f, <4 x i32> zeroinitializer, <4 x i32> %c
+ ret <4 x i32> %r
+; CHECK: test3
+; CHECK: cmple
+; CHECK-NEXT: andps
+; CHECK: ret
+}
+
+define <4 x i32> @test4(<4 x float> %a, <4 x float> %b, <4 x i32> %c) {
+ %f = fcmp ult <4 x float> %a, %b
+ %r = select <4 x i1> %f, <4 x i32> %c, <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>
+ ret <4 x i32> %r
+; CHECK: test4
+; CHECK: cmple
+; CHECK-NEXT: orps
+; CHECK: ret
+}
+
+define <4 x i32> @test5(<4 x float> %a, <4 x float> %b, <4 x i32> %c) {
+ %f = fcmp ult <4 x float> %a, %b
+ %r = select <4 x i1> %f, <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, <4 x i32> zeroinitializer
+ ret <4 x i32> %r
+; CHECK: test5
+; CHECK: cmpnle
+; CHECK-NEXT: ret
+}
+
+define <4 x i32> @test6(<4 x float> %a, <4 x float> %b, <4 x i32> %c) {
+ %f = fcmp ult <4 x float> %a, %b
+ %r = select <4 x i1> %f, <4 x i32> zeroinitializer, <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>
+ ret <4 x i32> %r
+; CHECK: test6
+; CHECK: cmple
+; CHECK-NEXT: ret
+}
+
+define <4 x i32> @test7(<4 x float> %a, <4 x float> %b, <4 x i32>* %p) {
+ %f = fcmp ult <4 x float> %a, %b
+ %s = sext <4 x i1> %f to <4 x i32>
+ %l = load <4 x i32>* %p
+ %r = and <4 x i32> %l, %s
+ ret <4 x i32> %r
+; CHECK: test7
+; CHECK: cmpnle
+; CHECK-NEXT: andps
+; CHECK: ret
+}