summaryrefslogtreecommitdiff
path: root/test
diff options
context:
space:
mode:
authorAndrea Di Biagio <Andrea_DiBiagio@sn.scee.net>2014-01-27 18:45:30 +0000
committerAndrea Di Biagio <Andrea_DiBiagio@sn.scee.net>2014-01-27 18:45:30 +0000
commite9c0b5aba6eefdac099dfba0a43bd4cac89fb6ff (patch)
treee24706674bfac37b502028df83680a667d69b6c3 /test
parent0583b2c08ef5ddfa8b38370f09116295d4c45505 (diff)
downloadllvm-e9c0b5aba6eefdac099dfba0a43bd4cac89fb6ff.tar.gz
llvm-e9c0b5aba6eefdac099dfba0a43bd4cac89fb6ff.tar.bz2
llvm-e9c0b5aba6eefdac099dfba0a43bd4cac89fb6ff.tar.xz
[DAGCombiner] Teach how to fold sext/aext/zext of constant build vectors.
This patch teaches the DAGCombiner how to fold a sext/aext/zext dag node when the operand in input is a build vector of constants (or UNDEFs). The inability to fold a sext/zext of a constant build_vector was the root cause of some pcg bugs affecting vselect expansion on x86-64 with AVX support. Before this change, the DAGCombiner only knew how to fold a sext/zext/aext of a ConstantSDNode. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@200234 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test')
-rw-r--r--test/CodeGen/Mips/msa/compare_float.ll16
-rw-r--r--test/CodeGen/X86/avx-blend.ll13
-rw-r--r--test/CodeGen/X86/fold-vector-sext-zext.ll291
3 files changed, 308 insertions, 12 deletions
diff --git a/test/CodeGen/Mips/msa/compare_float.ll b/test/CodeGen/Mips/msa/compare_float.ll
index 2fc61f89c7..f5e8d9d9d6 100644
--- a/test/CodeGen/Mips/msa/compare_float.ll
+++ b/test/CodeGen/Mips/msa/compare_float.ll
@@ -32,12 +32,9 @@ define void @false_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) noun
store <2 x i64> %4, <2 x i64>* %c
ret void
- ; FIXME: This code is correct, but poor. Ideally it would be similar to
- ; the code in @false_v4f32
+ ; (setcc $a, $b, SETFALSE) is always folded
; CHECK-DAG: ldi.b [[R1:\$w[0-9]+]], 0
- ; CHECK-DAG: slli.d [[R3:\$w[0-9]+]], [[R1]], 63
- ; CHECK-DAG: srai.d [[R4:\$w[0-9]+]], [[R3]], 63
- ; CHECK-DAG: st.d [[R4]], 0($4)
+ ; CHECK-DAG: st.w [[R1]], 0($4)
; CHECK: .size false_v2f64
}
@@ -509,12 +506,9 @@ define void @true_v2f64(<2 x i64>* %c, <2 x double>* %a, <2 x double>* %b) nounw
store <2 x i64> %4, <2 x i64>* %c
ret void
- ; FIXME: This code is correct, but poor. Ideally it would be similar to
- ; the code in @true_v4f32
- ; CHECK-DAG: ldi.d [[R1:\$w[0-9]+]], 1
- ; CHECK-DAG: slli.d [[R3:\$w[0-9]+]], [[R1]], 63
- ; CHECK-DAG: srai.d [[R4:\$w[0-9]+]], [[R3]], 63
- ; CHECK-DAG: st.d [[R4]], 0($4)
+ ; (setcc $a, $b, SETTRUE) is always folded.
+ ; CHECK-DAG: ldi.b [[R1:\$w[0-9]+]], -1
+ ; CHECK-DAG: st.w [[R1]], 0($4)
; CHECK: .size true_v2f64
}
diff --git a/test/CodeGen/X86/avx-blend.ll b/test/CodeGen/X86/avx-blend.ll
index e9bfce663f..5fcd5ff5f4 100644
--- a/test/CodeGen/X86/avx-blend.ll
+++ b/test/CodeGen/X86/avx-blend.ll
@@ -51,6 +51,7 @@ define <16 x i8> @vsel_i8(<16 x i8> %v1, <16 x i8> %v2) {
;CHECK-LABEL: vsel_float8:
+;CHECK-NOT: vinsertf128
;CHECK: vblendvps
;CHECK: ret
define <8 x float> @vsel_float8(<8 x float> %v1, <8 x float> %v2) {
@@ -59,8 +60,9 @@ define <8 x float> @vsel_float8(<8 x float> %v1, <8 x float> %v2) {
}
;CHECK-LABEL: vsel_i328:
+;CHECK-NOT: vinsertf128
;CHECK: vblendvps
-;CHECK: ret
+;CHECK-NEXT: ret
define <8 x i32> @vsel_i328(<8 x i32> %v1, <8 x i32> %v2) {
%vsel = select <8 x i1> <i1 true, i1 false, i1 false, i1 false, i1 true, i1 false, i1 false, i1 false>, <8 x i32> %v1, <8 x i32> %v2
ret <8 x i32> %vsel
@@ -82,6 +84,15 @@ define <8 x i64> @vsel_i648(<8 x i64> %v1, <8 x i64> %v2) {
ret <8 x i64> %vsel
}
+;CHECK-LABEL: vsel_double4:
+;CHECK-NOT: vinsertf128
+;CHECK: vblendvpd
+;CHECK-NEXT: ret
+define <4 x double> @vsel_double4(<4 x double> %v1, <4 x double> %v2) {
+ %vsel = select <4 x i1> <i1 true, i1 false, i1 true, i1 false>, <4 x double> %v1, <4 x double> %v2
+ ret <4 x double> %vsel
+}
+
;; TEST blend + compares
; CHECK: testa
define <2 x double> @testa(<2 x double> %x, <2 x double> %y) {
diff --git a/test/CodeGen/X86/fold-vector-sext-zext.ll b/test/CodeGen/X86/fold-vector-sext-zext.ll
new file mode 100644
index 0000000000..aeaab44790
--- /dev/null
+++ b/test/CodeGen/X86/fold-vector-sext-zext.ll
@@ -0,0 +1,291 @@
+; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=corei7-avx -mattr=+avx | FileCheck %s
+
+; Verify that the backend correctly folds a sign/zero extend of a vector where
+; elements are all constant values or UNDEFs.
+; The backend should be able to optimize all the test functions below into
+; simple loads from constant pool of the result. That is because the resulting
+; vector should be known at static time.
+
+
+define <4 x i16> @test1() {
+ %1 = insertelement <4 x i8> undef, i8 0, i32 0
+ %2 = insertelement <4 x i8> %1, i8 -1, i32 1
+ %3 = insertelement <4 x i8> %2, i8 2, i32 2
+ %4 = insertelement <4 x i8> %3, i8 -3, i32 3
+ %5 = sext <4 x i8> %4 to <4 x i16>
+ ret <4 x i16> %5
+}
+; CHECK-LABEL: test1
+; CHECK: vmovaps
+; CHECK-NEXT: ret
+
+define <4 x i16> @test2() {
+ %1 = insertelement <4 x i8> undef, i8 undef, i32 0
+ %2 = insertelement <4 x i8> %1, i8 -1, i32 1
+ %3 = insertelement <4 x i8> %2, i8 undef, i32 2
+ %4 = insertelement <4 x i8> %3, i8 -3, i32 3
+ %5 = sext <4 x i8> %4 to <4 x i16>
+ ret <4 x i16> %5
+}
+; CHECK-LABEL: test2
+; CHECK: vmovaps
+; CHECK-NEXT: ret
+
+define <4 x i32> @test3() {
+ %1 = insertelement <4 x i8> undef, i8 0, i32 0
+ %2 = insertelement <4 x i8> %1, i8 -1, i32 1
+ %3 = insertelement <4 x i8> %2, i8 2, i32 2
+ %4 = insertelement <4 x i8> %3, i8 -3, i32 3
+ %5 = sext <4 x i8> %4 to <4 x i32>
+ ret <4 x i32> %5
+}
+; CHECK-LABEL: test3
+; CHECK: vmovaps
+; CHECK-NEXT: ret
+
+define <4 x i32> @test4() {
+ %1 = insertelement <4 x i8> undef, i8 undef, i32 0
+ %2 = insertelement <4 x i8> %1, i8 -1, i32 1
+ %3 = insertelement <4 x i8> %2, i8 undef, i32 2
+ %4 = insertelement <4 x i8> %3, i8 -3, i32 3
+ %5 = sext <4 x i8> %4 to <4 x i32>
+ ret <4 x i32> %5
+}
+; CHECK-LABEL: test4
+; CHECK: vmovaps
+; CHECK-NEXT: ret
+
+
+define <4 x i64> @test5() {
+ %1 = insertelement <4 x i8> undef, i8 0, i32 0
+ %2 = insertelement <4 x i8> %1, i8 -1, i32 1
+ %3 = insertelement <4 x i8> %2, i8 2, i32 2
+ %4 = insertelement <4 x i8> %3, i8 -3, i32 3
+ %5 = sext <4 x i8> %4 to <4 x i64>
+ ret <4 x i64> %5
+}
+; CHECK-LABEL: test5
+; CHECK-NOT: vinsertf128
+; CHECK: vmovaps
+; CHECK-NEXT: ret
+
+define <4 x i64> @test6() {
+ %1 = insertelement <4 x i8> undef, i8 undef, i32 0
+ %2 = insertelement <4 x i8> %1, i8 -1, i32 1
+ %3 = insertelement <4 x i8> %2, i8 undef, i32 2
+ %4 = insertelement <4 x i8> %3, i8 -3, i32 3
+ %5 = sext <4 x i8> %4 to <4 x i64>
+ ret <4 x i64> %5
+}
+; CHECK-LABEL: test6
+; CHECK-NOT: vinsertf128
+; CHECK: vmovaps
+; CHECK-NEXT: ret
+
+define <8 x i16> @test7() {
+ %1 = insertelement <8 x i8> undef, i8 0, i32 0
+ %2 = insertelement <8 x i8> %1, i8 -1, i32 1
+ %3 = insertelement <8 x i8> %2, i8 2, i32 2
+ %4 = insertelement <8 x i8> %3, i8 -3, i32 3
+ %5 = insertelement <8 x i8> %4, i8 4, i32 4
+ %6 = insertelement <8 x i8> %5, i8 -5, i32 5
+ %7 = insertelement <8 x i8> %6, i8 6, i32 6
+ %8 = insertelement <8 x i8> %7, i8 -7, i32 7
+ %9 = sext <8 x i8> %4 to <8 x i16>
+ ret <8 x i16> %9
+}
+; CHECK-LABEL: test7
+; CHECK: vmovaps
+; CHECK-NEXT: ret
+
+define <8 x i32> @test8() {
+ %1 = insertelement <8 x i8> undef, i8 0, i32 0
+ %2 = insertelement <8 x i8> %1, i8 -1, i32 1
+ %3 = insertelement <8 x i8> %2, i8 2, i32 2
+ %4 = insertelement <8 x i8> %3, i8 -3, i32 3
+ %5 = insertelement <8 x i8> %4, i8 4, i32 4
+ %6 = insertelement <8 x i8> %5, i8 -5, i32 5
+ %7 = insertelement <8 x i8> %6, i8 6, i32 6
+ %8 = insertelement <8 x i8> %7, i8 -7, i32 7
+ %9 = sext <8 x i8> %4 to <8 x i32>
+ ret <8 x i32> %9
+}
+; CHECK-LABEL: test8
+; CHECK-NOT: vinsertf128
+; CHECK: vmovaps
+; CHECK-NEXT: ret
+
+define <8 x i16> @test9() {
+ %1 = insertelement <8 x i8> undef, i8 undef, i32 0
+ %2 = insertelement <8 x i8> %1, i8 -1, i32 1
+ %3 = insertelement <8 x i8> %2, i8 undef, i32 2
+ %4 = insertelement <8 x i8> %3, i8 -3, i32 3
+ %5 = insertelement <8 x i8> %4, i8 undef, i32 4
+ %6 = insertelement <8 x i8> %5, i8 -5, i32 5
+ %7 = insertelement <8 x i8> %6, i8 undef, i32 6
+ %8 = insertelement <8 x i8> %7, i8 -7, i32 7
+ %9 = sext <8 x i8> %4 to <8 x i16>
+ ret <8 x i16> %9
+}
+; CHECK-LABEL: test9
+; CHECK: vmovaps
+; CHECK-NEXT: ret
+
+define <8 x i32> @test10() {
+ %1 = insertelement <8 x i8> undef, i8 0, i32 0
+ %2 = insertelement <8 x i8> %1, i8 undef, i32 1
+ %3 = insertelement <8 x i8> %2, i8 2, i32 2
+ %4 = insertelement <8 x i8> %3, i8 undef, i32 3
+ %5 = insertelement <8 x i8> %4, i8 4, i32 4
+ %6 = insertelement <8 x i8> %5, i8 undef, i32 5
+ %7 = insertelement <8 x i8> %6, i8 6, i32 6
+ %8 = insertelement <8 x i8> %7, i8 undef, i32 7
+ %9 = sext <8 x i8> %4 to <8 x i32>
+ ret <8 x i32> %9
+}
+; CHECK-LABEL: test10
+; CHECK-NOT: vinsertf128
+; CHECK: vmovaps
+; CHECK-NEXT: ret
+
+
+define <4 x i16> @test11() {
+ %1 = insertelement <4 x i8> undef, i8 0, i32 0
+ %2 = insertelement <4 x i8> %1, i8 -1, i32 1
+ %3 = insertelement <4 x i8> %2, i8 2, i32 2
+ %4 = insertelement <4 x i8> %3, i8 -3, i32 3
+ %5 = zext <4 x i8> %4 to <4 x i16>
+ ret <4 x i16> %5
+}
+; CHECK-LABEL: test11
+; CHECK: vmovaps
+; CHECK-NEXT: ret
+
+define <4 x i32> @test12() {
+ %1 = insertelement <4 x i8> undef, i8 0, i32 0
+ %2 = insertelement <4 x i8> %1, i8 -1, i32 1
+ %3 = insertelement <4 x i8> %2, i8 2, i32 2
+ %4 = insertelement <4 x i8> %3, i8 -3, i32 3
+ %5 = zext <4 x i8> %4 to <4 x i32>
+ ret <4 x i32> %5
+}
+; CHECK-LABEL: test12
+; CHECK: vmovaps
+; CHECK-NEXT: ret
+
+define <4 x i64> @test13() {
+ %1 = insertelement <4 x i8> undef, i8 0, i32 0
+ %2 = insertelement <4 x i8> %1, i8 -1, i32 1
+ %3 = insertelement <4 x i8> %2, i8 2, i32 2
+ %4 = insertelement <4 x i8> %3, i8 -3, i32 3
+ %5 = zext <4 x i8> %4 to <4 x i64>
+ ret <4 x i64> %5
+}
+; CHECK-LABEL: test13
+; CHECK-NOT: vinsertf128
+; CHECK: vmovaps
+; CHECK-NEXT: ret
+
+define <4 x i16> @test14() {
+ %1 = insertelement <4 x i8> undef, i8 undef, i32 0
+ %2 = insertelement <4 x i8> %1, i8 -1, i32 1
+ %3 = insertelement <4 x i8> %2, i8 undef, i32 2
+ %4 = insertelement <4 x i8> %3, i8 -3, i32 3
+ %5 = zext <4 x i8> %4 to <4 x i16>
+ ret <4 x i16> %5
+}
+; CHECK-LABEL: test14
+; CHECK: vmovaps
+; CHECK-NEXT: ret
+
+define <4 x i32> @test15() {
+ %1 = insertelement <4 x i8> undef, i8 0, i32 0
+ %2 = insertelement <4 x i8> %1, i8 undef, i32 1
+ %3 = insertelement <4 x i8> %2, i8 2, i32 2
+ %4 = insertelement <4 x i8> %3, i8 undef, i32 3
+ %5 = zext <4 x i8> %4 to <4 x i32>
+ ret <4 x i32> %5
+}
+; CHECK-LABEL: test15
+; CHECK: vmovaps
+; CHECK-NEXT: ret
+
+define <4 x i64> @test16() {
+ %1 = insertelement <4 x i8> undef, i8 undef, i32 0
+ %2 = insertelement <4 x i8> %1, i8 -1, i32 1
+ %3 = insertelement <4 x i8> %2, i8 2, i32 2
+ %4 = insertelement <4 x i8> %3, i8 undef, i32 3
+ %5 = zext <4 x i8> %4 to <4 x i64>
+ ret <4 x i64> %5
+}
+; CHECK-LABEL: test16
+; CHECK-NOT: vinsertf128
+; CHECK: vmovaps
+; CHECK-NEXT: ret
+
+define <8 x i16> @test17() {
+ %1 = insertelement <8 x i8> undef, i8 0, i32 0
+ %2 = insertelement <8 x i8> %1, i8 -1, i32 1
+ %3 = insertelement <8 x i8> %2, i8 2, i32 2
+ %4 = insertelement <8 x i8> %3, i8 -3, i32 3
+ %5 = insertelement <8 x i8> %4, i8 4, i32 4
+ %6 = insertelement <8 x i8> %5, i8 -5, i32 5
+ %7 = insertelement <8 x i8> %6, i8 6, i32 6
+ %8 = insertelement <8 x i8> %7, i8 -7, i32 7
+ %9 = zext <8 x i8> %8 to <8 x i16>
+ ret <8 x i16> %9
+}
+; CHECK-LABEL: test17
+; CHECK: vmovaps
+; CHECK-NEXT: ret
+
+define <8 x i32> @test18() {
+ %1 = insertelement <8 x i8> undef, i8 0, i32 0
+ %2 = insertelement <8 x i8> %1, i8 -1, i32 1
+ %3 = insertelement <8 x i8> %2, i8 2, i32 2
+ %4 = insertelement <8 x i8> %3, i8 -3, i32 3
+ %5 = insertelement <8 x i8> %4, i8 4, i32 4
+ %6 = insertelement <8 x i8> %5, i8 -5, i32 5
+ %7 = insertelement <8 x i8> %6, i8 6, i32 6
+ %8 = insertelement <8 x i8> %7, i8 -7, i32 7
+ %9 = zext <8 x i8> %8 to <8 x i32>
+ ret <8 x i32> %9
+}
+; CHECK-LABEL: test18
+; CHECK-NOT: vinsertf128
+; CHECK: vmovaps
+; CHECK-NEXT: ret
+
+define <8 x i16> @test19() {
+ %1 = insertelement <8 x i8> undef, i8 undef, i32 0
+ %2 = insertelement <8 x i8> %1, i8 -1, i32 1
+ %3 = insertelement <8 x i8> %2, i8 undef, i32 2
+ %4 = insertelement <8 x i8> %3, i8 -3, i32 3
+ %5 = insertelement <8 x i8> %4, i8 undef, i32 4
+ %6 = insertelement <8 x i8> %5, i8 -5, i32 5
+ %7 = insertelement <8 x i8> %6, i8 undef, i32 6
+ %8 = insertelement <8 x i8> %7, i8 -7, i32 7
+ %9 = zext <8 x i8> %8 to <8 x i16>
+ ret <8 x i16> %9
+}
+; CHECK-LABEL: test19
+; CHECK: vmovaps
+; CHECK-NEXT: ret
+
+define <8 x i32> @test20() {
+ %1 = insertelement <8 x i8> undef, i8 0, i32 0
+ %2 = insertelement <8 x i8> %1, i8 undef, i32 1
+ %3 = insertelement <8 x i8> %2, i8 2, i32 2
+ %4 = insertelement <8 x i8> %3, i8 -3, i32 3
+ %5 = insertelement <8 x i8> %4, i8 4, i32 4
+ %6 = insertelement <8 x i8> %5, i8 undef, i32 5
+ %7 = insertelement <8 x i8> %6, i8 6, i32 6
+ %8 = insertelement <8 x i8> %7, i8 undef, i32 7
+ %9 = zext <8 x i8> %8 to <8 x i32>
+ ret <8 x i32> %9
+}
+; CHECK-LABEL: test20
+; CHECK-NOT: vinsertf128
+; CHECK: vmovaps
+; CHECK-NEXT: ret
+