summaryrefslogtreecommitdiff
path: root/test/CodeGen/X86/vec_zext.ll
diff options
context:
space:
mode:
authorNadav Rotem <nadav.rotem@intel.com>2011-02-20 12:37:50 +0000
committerNadav Rotem <nadav.rotem@intel.com>2011-02-20 12:37:50 +0000
commited9b934f65d82324506f03e2db2834682c7a8914 (patch)
treef9eb5310a278d45abf817090ac930efe6f9c42f2 /test/CodeGen/X86/vec_zext.ll
parent1a4021a2be4a59e9f9010776cb6f72107241aeb5 (diff)
downloadllvm-ed9b934f65d82324506f03e2db2834682c7a8914.tar.gz
llvm-ed9b934f65d82324506f03e2db2834682c7a8914.tar.bz2
llvm-ed9b934f65d82324506f03e2db2834682c7a8914.tar.xz
Fix 9267; Add vector zext support.
The DAGCombiner folds the zext into complex load instructions. This patch prevents this optimization on vectors since none of the supported targets knows how to perform load+vector_zext in one instruction. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@126080 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test/CodeGen/X86/vec_zext.ll')
-rw-r--r--test/CodeGen/X86/vec_zext.ll69
1 files changed, 69 insertions, 0 deletions
diff --git a/test/CodeGen/X86/vec_zext.ll b/test/CodeGen/X86/vec_zext.ll
new file mode 100644
index 0000000000..615a50b7af
--- /dev/null
+++ b/test/CodeGen/X86/vec_zext.ll
@@ -0,0 +1,69 @@
+; RUN: llc < %s -march=x86-64
+; PR 9267
+
+define<4 x i32> @func_16_32() {
+ %F = load <4 x i16>* undef
+ %G = zext <4 x i16> %F to <4 x i32>
+ %H = load <4 x i16>* undef
+ %Y = zext <4 x i16> %H to <4 x i32>
+ %T = add <4 x i32> %Y, %G
+ store <4 x i32>%T , <4 x i32>* undef
+ ret <4 x i32> %T
+}
+
+define<4 x i64> @func_16_64() {
+ %F = load <4 x i16>* undef
+ %G = zext <4 x i16> %F to <4 x i64>
+ %H = load <4 x i16>* undef
+ %Y = zext <4 x i16> %H to <4 x i64>
+ %T = xor <4 x i64> %Y, %G
+ store <4 x i64>%T , <4 x i64>* undef
+ ret <4 x i64> %T
+}
+
+define<4 x i64> @func_32_64() {
+ %F = load <4 x i32>* undef
+ %G = zext <4 x i32> %F to <4 x i64>
+ %H = load <4 x i32>* undef
+ %Y = zext <4 x i32> %H to <4 x i64>
+ %T = or <4 x i64> %Y, %G
+ ret <4 x i64> %T
+}
+
+define<4 x i16> @func_8_16() {
+ %F = load <4 x i8>* undef
+ %G = zext <4 x i8> %F to <4 x i16>
+ %H = load <4 x i8>* undef
+ %Y = zext <4 x i8> %H to <4 x i16>
+ %T = add <4 x i16> %Y, %G
+ ret <4 x i16> %T
+}
+
+define<4 x i32> @func_8_32() {
+ %F = load <4 x i8>* undef
+ %G = zext <4 x i8> %F to <4 x i32>
+ %H = load <4 x i8>* undef
+ %Y = zext <4 x i8> %H to <4 x i32>
+ %T = sub <4 x i32> %Y, %G
+ ret <4 x i32> %T
+}
+
+define<4 x i64> @func_8_64() {
+ %F = load <4 x i8>* undef
+ %G = zext <4 x i8> %F to <4 x i64>
+ %H = load <4 x i8>* undef
+ %Y = zext <4 x i8> %H to <4 x i64>
+ %T = add <4 x i64> %Y, %G
+ ret <4 x i64> %T
+}
+
+define<4 x i32> @const_16_32() {
+ %G = zext <4 x i16> <i16 0, i16 3, i16 8, i16 7> to <4 x i32>
+ ret <4 x i32> %G
+}
+
+define<4 x i64> @const_16_64() {
+ %G = zext <4 x i16> <i16 0, i16 3, i16 8, i16 7> to <4 x i64>
+ ret <4 x i64> %G
+}
+