diff options
author | Benjamin Kramer <benny.kra@googlemail.com> | 2013-10-23 19:19:04 +0000 |
---|---|---|
committer | Benjamin Kramer <benny.kra@googlemail.com> | 2013-10-23 19:19:04 +0000 |
commit | 7377cff9e7641c75678fd5c80472942fd7ef869a (patch) | |
tree | 8ff95078dc08a96effbc21e3cb59dc2630005b96 | |
parent | 531f025361555e7a695eb559ec02645c054ee146 (diff) | |
download | llvm-7377cff9e7641c75678fd5c80472942fd7ef869a.tar.gz llvm-7377cff9e7641c75678fd5c80472942fd7ef869a.tar.bz2 llvm-7377cff9e7641c75678fd5c80472942fd7ef869a.tar.xz |
X86: Custom lower zext v16i8 to v16i16.
On sandy bridge (PR17654) we now get
vpxor %xmm1, %xmm1, %xmm1
vpunpckhbw %xmm1, %xmm0, %xmm2
vpunpcklbw %xmm1, %xmm0, %xmm0
vinsertf128 $1, %xmm2, %ymm0, %ymm0
On haswell it's a simple
vpmovzxbw %xmm0, %ymm0
There is a maze of duplicated and dead transforms and patterns in this
area. Remove the dead custom lowering of zext v8i16 to v8i32, that's
already handled by LowerAVXExtend.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@193262 91177308-0d34-0410-b5e6-96231b3b80d8
-rw-r--r-- | lib/Target/X86/X86ISelLowering.cpp | 25 | ||||
-rw-r--r-- | lib/Target/X86/X86InstrSSE.td | 2 | ||||
-rw-r--r-- | test/CodeGen/X86/avx-zext.ll | 12 | ||||
-rw-r--r-- | test/CodeGen/X86/avx2-conversions.ll | 9 |
4 files changed, 29 insertions, 19 deletions
diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index 7dddb49bee..5f29f4f8ce 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -1160,6 +1160,7 @@ void X86TargetLowering::resetOperationActions() { setOperationAction(ISD::SINT_TO_FP, MVT::v8i32, Legal); setOperationAction(ISD::FP_ROUND, MVT::v4f32, Legal); + setOperationAction(ISD::ZERO_EXTEND, MVT::v16i16, Custom); setOperationAction(ISD::ZERO_EXTEND, MVT::v8i32, Custom); setOperationAction(ISD::UINT_TO_FP, MVT::v8i8, Custom); setOperationAction(ISD::UINT_TO_FP, MVT::v8i16, Custom); @@ -8864,7 +8865,8 @@ static SDValue LowerAVXExtend(SDValue Op, SelectionDAG &DAG, // Concat upper and lower parts. // - if (((VT != MVT::v8i32) || (InVT != MVT::v8i16)) && + if (((VT != MVT::v16i16) || (InVT != MVT::v16i8)) && + ((VT != MVT::v8i32) || (InVT != MVT::v8i16)) && ((VT != MVT::v4i64) || (InVT != MVT::v4i32))) return SDValue(); @@ -8944,24 +8946,9 @@ static SDValue LowerZERO_EXTEND(SDValue Op, const X86Subtarget *Subtarget, return Res; } - if (!VT.is256BitVector() || !SVT.is128BitVector() || - VT.getVectorNumElements() != SVT.getVectorNumElements()) - return SDValue(); - - assert(Subtarget->hasFp256() && "256-bit vector is observed without AVX!"); - - // AVX2 has better support of integer extending. - if (Subtarget->hasInt256()) - return DAG.getNode(X86ISD::VZEXT, DL, VT, In); - - SDValue Lo = DAG.getNode(X86ISD::VZEXT, DL, MVT::v4i32, In); - static const int Mask[] = {4, 5, 6, 7, -1, -1, -1, -1}; - SDValue Hi = DAG.getNode(X86ISD::VZEXT, DL, MVT::v4i32, - DAG.getVectorShuffle(MVT::v8i16, DL, In, - DAG.getUNDEF(MVT::v8i16), - &Mask[0])); - - return DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v8i32, Lo, Hi); + assert(!VT.is256BitVector() || !SVT.is128BitVector() || + VT.getVectorNumElements() != SVT.getVectorNumElements()); + return SDValue(); } SDValue X86TargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const { diff --git a/lib/Target/X86/X86InstrSSE.td b/lib/Target/X86/X86InstrSSE.td index bf09191954..004710b67c 100644 --- a/lib/Target/X86/X86InstrSSE.td +++ b/lib/Target/X86/X86InstrSSE.td @@ -5596,6 +5596,8 @@ let Predicates = [HasAVX2] in { (VPMOVZXDQYrr VR128:$src)>; def : Pat<(v8i32 (X86vzmovly (v8i16 VR128:$src))), (VPMOVZXWDYrr VR128:$src)>; + def : Pat<(v16i16 (X86vzmovly (v16i8 VR128:$src))), + (VPMOVZXBWYrr VR128:$src)>; } def : Pat<(v4i64 (X86vsmovl (v4i32 VR128:$src))), (VPMOVSXDQYrr VR128:$src)>; diff --git a/test/CodeGen/X86/avx-zext.ll b/test/CodeGen/X86/avx-zext.ll index e2b6c552da..75117463bc 100644 --- a/test/CodeGen/X86/avx-zext.ll +++ b/test/CodeGen/X86/avx-zext.ll @@ -27,3 +27,15 @@ define <8 x i32> @zext_8i8_to_8i32(<8 x i8> %z) { %t = zext <8 x i8> %z to <8 x i32> ret <8 x i32> %t } + +; PR17654 +define <16 x i16> @zext_16i8_to_16i16(<16 x i8> %z) { +; CHECK-LABEL: zext_16i8_to_16i16: +; CHECK: vpxor +; CHECK: vpunpckhbw +; CHECK: vpunpcklbw +; CHECK: vinsertf128 +; CHECK: ret + %t = zext <16 x i8> %z to <16 x i16> + ret <16 x i16> %t +} diff --git a/test/CodeGen/X86/avx2-conversions.ll b/test/CodeGen/X86/avx2-conversions.ll index 3ce08dcc73..0143f18fe2 100644 --- a/test/CodeGen/X86/avx2-conversions.ll +++ b/test/CodeGen/X86/avx2-conversions.ll @@ -63,6 +63,15 @@ define <8 x i32> @zext_8i8_8i32(<8 x i8> %A) nounwind { ret <8 x i32>%B } +; CHECK-LABEL: zext_16i8_16i16: +; CHECK: vpmovzxbw +; CHECK-NOT: vinsert +; CHECK: ret +define <16 x i16> @zext_16i8_16i16(<16 x i8> %z) { + %t = zext <16 x i8> %z to <16 x i16> + ret <16 x i16> %t +} + ; CHECK: load_sext_test1 ; CHECK: vpmovsxdq (%r{{[^,]*}}), %ymm{{.*}} ; CHECK: ret |