diff options
author | Chandler Carruth <chandlerc@gmail.com> | 2014-01-20 08:18:01 +0000 |
---|---|---|
committer | Chandler Carruth <chandlerc@gmail.com> | 2014-01-20 08:18:01 +0000 |
commit | ce30a8106d7d33af9c18518c11e808eaeebc2cce (patch) | |
tree | ac7f59da1d9a8c228cbbf9ffef17cb626e80bb53 | |
parent | d15717170fbaaa24dacf2afbd8c6e9d8da4e8fa3 (diff) | |
download | llvm-ce30a8106d7d33af9c18518c11e808eaeebc2cce.tar.gz llvm-ce30a8106d7d33af9c18518c11e808eaeebc2cce.tar.bz2 llvm-ce30a8106d7d33af9c18518c11e808eaeebc2cce.tar.xz |
Revert r199628: "[AArch64 NEON] Fix a bug caused by undef lane when generating VEXT."
This test fails the newly added regression tests.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@199631 91177308-0d34-0410-b5e6-96231b3b80d8
-rw-r--r-- | lib/Target/AArch64/AArch64ISelLowering.cpp | 36 | ||||
-rw-r--r-- | test/CodeGen/AArch64/neon-extract.ll | 32 |
2 files changed, 15 insertions, 53 deletions
diff --git a/lib/Target/AArch64/AArch64ISelLowering.cpp b/lib/Target/AArch64/AArch64ISelLowering.cpp index 27277c47f3..e98fbe1d3a 100644 --- a/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -4654,28 +4654,22 @@ AArch64TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, // it into NEON_VEXTRACT. if (V1EltNum == Length) { // Check if the shuffle mask is sequential. - int SkipUndef = 0; - while (ShuffleMask[SkipUndef] == -1) { - SkipUndef++; - } - int CurMask = ShuffleMask[SkipUndef]; - if (CurMask >= SkipUndef) { - bool IsSequential = true; - for (int I = SkipUndef; I < Length; ++I) { - if (ShuffleMask[I] != -1 && ShuffleMask[I] != CurMask) { - IsSequential = false; - break; - } - CurMask++; - } - if (IsSequential) { - assert((EltSize % 8 == 0) && "Bitsize of vector element is incorrect"); - unsigned VecSize = EltSize * V1EltNum; - unsigned Index = (EltSize / 8) * (ShuffleMask[SkipUndef] - SkipUndef); - if (VecSize == 64 || VecSize == 128) - return DAG.getNode(AArch64ISD::NEON_VEXTRACT, dl, VT, V1, V2, - DAG.getConstant(Index, MVT::i64)); + bool IsSequential = true; + int CurMask = ShuffleMask[0]; + for (int I = 0; I < Length; ++I) { + if (ShuffleMask[I] != CurMask) { + IsSequential = false; + break; } + CurMask++; + } + if (IsSequential) { + assert((EltSize % 8 == 0) && "Bitsize of vector element is incorrect"); + unsigned VecSize = EltSize * V1EltNum; + unsigned Index = (EltSize/8) * ShuffleMask[0]; + if (VecSize == 64 || VecSize == 128) + return DAG.getNode(AArch64ISD::NEON_VEXTRACT, dl, VT, V1, V2, + DAG.getConstant(Index, MVT::i64)); } } diff --git a/test/CodeGen/AArch64/neon-extract.ll b/test/CodeGen/AArch64/neon-extract.ll index ce1031e581..5c52cd3067 100644 --- a/test/CodeGen/AArch64/neon-extract.ll +++ b/test/CodeGen/AArch64/neon-extract.ll @@ -188,35 +188,3 @@ entry: %vext = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10> ret <8 x i16> %vext } - -define <8 x i8> @test_undef_vext_s8(<8 x i8> %a) { -; CHECK: test_undef_vext_s8: -; CHECK: ext {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #0x2 -entry: - %vext = shufflevector <8 x i8> %a, <8 x i8> undef, <8 x i32> <i32 10, i32 10, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9> - ret <8 x i8> %vext -} - -define <16 x i8> @test_undef_vextq_s8(<16 x i8> %a) { -; CHECK: test_undef_vextq_s8: -; CHECK: ext {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #0x6 -entry: - %vext = shufflevector <16 x i8> %a, <16 x i8> undef, <16 x i32> <i32 20, i32 20, i32 20, i32 20, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 20, i32 20, i32 20, i32 20, i32 20> - ret <16 x i8> %vext -} - -define <4 x i16> @test_undef_vext_s16(<4 x i16> %a) { -; CHECK: test_undef_vext_s16: -; CHECK: ext {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, #0x2 -entry: - %vext = shufflevector <4 x i16> %a, <4 x i16> undef, <4 x i32> <i32 4, i32 2, i32 3, i32 4> - ret <4 x i16> %vext -} - -define <8 x i16> @test_undef_vextq_s16(<8 x i16> %a) { -; CHECK: test_undef_vextq_s16: -; CHECK: ext {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, #0x6 -entry: - %vext = shufflevector <8 x i16> %a, <8 x i16> undef, <8 x i32> <i32 10, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10> - ret <8 x i16> %vext -} |