summaryrefslogtreecommitdiff
path: root/lib/Target/PowerPC/PPCISelLowering.h
diff options
context:
space:
mode:
authorBill Schmidt <wschmidt@linux.vnet.ibm.com>2013-02-20 15:50:31 +0000
committerBill Schmidt <wschmidt@linux.vnet.ibm.com>2013-02-20 15:50:31 +0000
commitb34c79e4bbe5accbb54d0291e8bef5d2bfef32e4 (patch)
treeb1fd629ad95c3cf90e10d668e4d94897d89bfbc5 /lib/Target/PowerPC/PPCISelLowering.h
parent94e9d0d491f76aaab13f8bc7764ad73de11fa2bf (diff)
downloadllvm-b34c79e4bbe5accbb54d0291e8bef5d2bfef32e4.tar.gz
llvm-b34c79e4bbe5accbb54d0291e8bef5d2bfef32e4.tar.bz2
llvm-b34c79e4bbe5accbb54d0291e8bef5d2bfef32e4.tar.xz
Fix PR15155: lost vadd/vsplat optimization.
During lowering of a BUILD_VECTOR, we look for opportunities to use a vector splat. When the splatted value fits in 5 signed bits, a single splat does the job. When it doesn't fit in 5 bits but does fit in 6, and is an even value, we can splat on half the value and add the result to itself. This last optimization hasn't been working recently because of improved constant folding. To circumvent this, create a pseudo VADD_SPLAT that can be expanded during instruction selection. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@175632 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/Target/PowerPC/PPCISelLowering.h')
-rw-r--r--lib/Target/PowerPC/PPCISelLowering.h5
1 files changed, 5 insertions, 0 deletions
diff --git a/lib/Target/PowerPC/PPCISelLowering.h b/lib/Target/PowerPC/PPCISelLowering.h
index 12b3df7c9a..7cc2d1ac32 100644
--- a/lib/Target/PowerPC/PPCISelLowering.h
+++ b/lib/Target/PowerPC/PPCISelLowering.h
@@ -237,6 +237,11 @@ namespace llvm {
/// sym@got@dtprel@l.
ADDI_DTPREL_L,
+ /// VRRC = VADD_SPLAT Elt, EltSize - Temporary node to be expanded
+ /// into an ADD of a VSPLTI with itself during instruction selection.
+ /// Necessary to avoid losing this optimization due to constant folds.
+ VADD_SPLAT,
+
/// STD_32 - This is the STD instruction for use with "32-bit" registers.
STD_32 = ISD::FIRST_TARGET_MEMORY_OPCODE,