diff options
author | Jim Grosbach <grosbach@apple.com> | 2014-04-09 23:28:11 +0000 |
---|---|---|
committer | Jim Grosbach <grosbach@apple.com> | 2014-04-09 23:28:11 +0000 |
commit | e9915738beed0d8b4b50dca8c0ed8c76e7d320db (patch) | |
tree | 246222ddaa0805566f60a11dbe446b3b0828efca /test | |
parent | 34d14bb5343753b6418dd2302349497c48db95eb (diff) | |
download | llvm-e9915738beed0d8b4b50dca8c0ed8c76e7d320db.tar.gz llvm-e9915738beed0d8b4b50dca8c0ed8c76e7d320db.tar.bz2 llvm-e9915738beed0d8b4b50dca8c0ed8c76e7d320db.tar.xz |
SelectionDAG: Don't constant fold target-specific nodes.
FoldConstantArithmetic() only knows how to deal with a few target independent
ISD opcodes. Bail early if it sees a target-specific ISD node. These node do
funny things with operand types which may break the assumptions of the code
that follows, and there's no actual folding that can be done anyway. For example,
non-constant 256 bit vector shifts on X86 have a shift-amount operand that's a
128-bit v4i32 vector regardless of what the first operand type is and that breaks
the assumption that the operand types must match.
rdar://16530923
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@205937 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test')
-rw-r--r-- | test/CodeGen/X86/avx2-vector-shifts.ll | 10 |
1 files changed, 10 insertions, 0 deletions
diff --git a/test/CodeGen/X86/avx2-vector-shifts.ll b/test/CodeGen/X86/avx2-vector-shifts.ll index 4ae2905ef2..e355301dd0 100644 --- a/test/CodeGen/X86/avx2-vector-shifts.ll +++ b/test/CodeGen/X86/avx2-vector-shifts.ll @@ -52,6 +52,16 @@ entry: ; CHECK: vpaddd %ymm0, %ymm0, %ymm0 ; CHECK: ret +define <8 x i32> @test_vpslld_var(i32 %shift) { + %amt = insertelement <8 x i32> undef, i32 %shift, i32 0 + %tmp = shl <8 x i32> <i32 192, i32 193, i32 194, i32 195, i32 196, i32 197, i32 198, i32 199>, %amt + ret <8 x i32> %tmp +} + +; CHECK-LABEL: test_vpslld_var: +; CHECK: vpslld %xmm0, %ymm1, %ymm0 +; CHECK: ret + define <8 x i32> @test_slld_3(<8 x i32> %InVec) { entry: %shl = shl <8 x i32> %InVec, <i32 31, i32 31, i32 31, i32 31, i32 31, i32 31, i32 31, i32 31> |