diff options
author | Nadav Rotem <nadav.rotem@intel.com> | 2011-02-24 21:01:34 +0000 |
---|---|---|
committer | Nadav Rotem <nadav.rotem@intel.com> | 2011-02-24 21:01:34 +0000 |
commit | 8c20ec54d98176d31f310e4684d1d7f2ea0639bc (patch) | |
tree | ac3c7cf68c4c21b8c368b8a758d6a2cf0ceae0f2 /test/CodeGen/X86/vec_anyext.ll | |
parent | 0412d5b40a3bde02580db6e3050ada0598205716 (diff) | |
download | llvm-8c20ec54d98176d31f310e4684d1d7f2ea0639bc.tar.gz llvm-8c20ec54d98176d31f310e4684d1d7f2ea0639bc.tar.bz2 llvm-8c20ec54d98176d31f310e4684d1d7f2ea0639bc.tar.xz |
Enable support for vector sext and trunc:
Limit the folding of any_ext and sext into the load operation to scalars.
Limit the active-bits trunc optimization to scalars.
Document vector trunc and vector sext in LangRef.
Similar to commit 126080 (for enabling zext).
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@126424 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test/CodeGen/X86/vec_anyext.ll')
-rw-r--r-- | test/CodeGen/X86/vec_anyext.ll | 77 |
1 files changed, 77 insertions, 0 deletions
diff --git a/test/CodeGen/X86/vec_anyext.ll b/test/CodeGen/X86/vec_anyext.ll new file mode 100644 index 0000000000..d2a4c7f60d --- /dev/null +++ b/test/CodeGen/X86/vec_anyext.ll @@ -0,0 +1,77 @@ +; RUN: llc < %s -march=x86-64 +; PR 9267 + +define<4 x i16> @func_16_32() { + %F = load <4 x i32>* undef + %G = trunc <4 x i32> %F to <4 x i16> + %H = load <4 x i32>* undef + %Y = trunc <4 x i32> %H to <4 x i16> + %T = add <4 x i16> %Y, %G + store <4 x i16>%T , <4 x i16>* undef + ret <4 x i16> %T +} + +define<4 x i16> @func_16_64() { + %F = load <4 x i64>* undef + %G = trunc <4 x i64> %F to <4 x i16> + %H = load <4 x i64>* undef + %Y = trunc <4 x i64> %H to <4 x i16> + %T = xor <4 x i16> %Y, %G + store <4 x i16>%T , <4 x i16>* undef + ret <4 x i16> %T +} + +define<4 x i32> @func_32_64() { + %F = load <4 x i64>* undef + %G = trunc <4 x i64> %F to <4 x i32> + %H = load <4 x i64>* undef + %Y = trunc <4 x i64> %H to <4 x i32> + %T = or <4 x i32> %Y, %G + ret <4 x i32> %T +} + +define<4 x i8> @func_8_16() { + %F = load <4 x i16>* undef + %G = trunc <4 x i16> %F to <4 x i8> + %H = load <4 x i16>* undef + %Y = trunc <4 x i16> %H to <4 x i8> + %T = add <4 x i8> %Y, %G + ret <4 x i8> %T +} + +define<4 x i8> @func_8_32() { + %F = load <4 x i32>* undef + %G = trunc <4 x i32> %F to <4 x i8> + %H = load <4 x i32>* undef + %Y = trunc <4 x i32> %H to <4 x i8> + %T = sub <4 x i8> %Y, %G + ret <4 x i8> %T +} + +define<4 x i8> @func_8_64() { + %F = load <4 x i64>* undef + %G = trunc <4 x i64> %F to <4 x i8> + %H = load <4 x i64>* undef + %Y = trunc <4 x i64> %H to <4 x i8> + %T = add <4 x i8> %Y, %G + ret <4 x i8> %T +} + +define<4 x i16> @const_16_32() { + %G = trunc <4 x i32> <i32 0, i32 3, i32 8, i32 7> to <4 x i16> + ret <4 x i16> %G +} + +define<4 x i16> @const_16_64() { + %G = trunc <4 x i64> <i64 0, i64 3, i64 8, i64 7> to <4 x i16> + ret <4 x i16> %G +} + +define void @bugOnTruncBitwidthReduce() nounwind { +meh: + %0 = xor <4 x i64> zeroinitializer, zeroinitializer + %1 = trunc <4 x i64> %0 to <4 x i32> + %2 = lshr <4 x i32> %1, <i32 18, i32 18, i32 18, i32 18> + %3 = xor <4 x i32> %2, %1 + ret void +} |