summaryrefslogtreecommitdiff
path: root/test/CodeGen/PowerPC/buildvec_canonicalize.ll
diff options
context:
space:
mode:
authorAdhemerval Zanella <azanella@linux.vnet.ibm.com>2012-11-30 13:05:44 +0000
committerAdhemerval Zanella <azanella@linux.vnet.ibm.com>2012-11-30 13:05:44 +0000
commit375cbe414329ebade5b50cf3648611af58ae9de7 (patch)
treebd03bd50956bfa36ed1305fae79fdd8fe0544edb /test/CodeGen/PowerPC/buildvec_canonicalize.ll
parent98c63d0e1c38942257e12c658f58ddc2a15d75a1 (diff)
downloadllvm-375cbe414329ebade5b50cf3648611af58ae9de7.tar.gz
llvm-375cbe414329ebade5b50cf3648611af58ae9de7.tar.bz2
llvm-375cbe414329ebade5b50cf3648611af58ae9de7.tar.xz
This patch fixes the Altivec addend construction for the fused multiply-add
instruction (vmaddfp) to conform with IEEE to ensure the sign of a zero result when resulting product is -0.0. The -0.0 vector addend to vmaddfp is generated by a creating a vector with full bits sets and then shifting each elements by 31-bits to the left, resulting in a vector of 0x80000000 (or -0.0 as float). The 'buildvec_canonicalize.ll' was adjusted to reflect this change and the 'vec_mul.ll' was complemented with the float vector multiplication test. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@168998 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test/CodeGen/PowerPC/buildvec_canonicalize.ll')
-rw-r--r--test/CodeGen/PowerPC/buildvec_canonicalize.ll16
1 files changed, 8 insertions, 8 deletions
diff --git a/test/CodeGen/PowerPC/buildvec_canonicalize.ll b/test/CodeGen/PowerPC/buildvec_canonicalize.ll
index 0454c584bc..514ccdd6bd 100644
--- a/test/CodeGen/PowerPC/buildvec_canonicalize.ll
+++ b/test/CodeGen/PowerPC/buildvec_canonicalize.ll
@@ -1,10 +1,4 @@
-; There should be exactly one vxor here.
-; RUN: llc < %s -march=ppc32 -mcpu=g5 --enable-unsafe-fp-math | \
-; RUN: grep vxor | count 1
-
-; There should be exactly one vsplti here.
-; RUN: llc < %s -march=ppc32 -mcpu=g5 --enable-unsafe-fp-math | \
-; RUN: grep vsplti | count 1
+; RUN: llc < %s -mattr=+altivec --enable-unsafe-fp-math | FileCheck %s
define void @VXOR(<4 x float>* %P1, <4 x i32>* %P2, <4 x float>* %P3) {
%tmp = load <4 x float>* %P3 ; <<4 x float>> [#uses=1]
@@ -15,10 +9,16 @@ define void @VXOR(<4 x float>* %P1, <4 x i32>* %P2, <4 x float>* %P3) {
store <4 x i32> zeroinitializer, <4 x i32>* %P2
ret void
}
+; The fmul will spill a vspltisw to create a -0.0 vector used as the addend
+; to vmaddfp (so it would IEEE compliant with zero sign propagation).
+; CHECK: @VXOR
+; CHECK: vsplti
+; CHECK: vxor
define void @VSPLTI(<4 x i32>* %P2, <8 x i16>* %P3) {
store <4 x i32> bitcast (<16 x i8> < i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1 > to <4 x i32>), <4 x i32>* %P2
store <8 x i16> < i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1 >, <8 x i16>* %P3
ret void
}
-
+; CHECK: @VSPLTI
+; CHECK: vsplti