diff options
author | Scott Michel <scottm@aero.org> | 2008-12-27 04:51:36 +0000 |
---|---|---|
committer | Scott Michel <scottm@aero.org> | 2008-12-27 04:51:36 +0000 |
commit | f0569be4a948c7ed816bfa2b8774a5a18458ee23 (patch) | |
tree | 541905fcbd5e64ef95599b1ca3c4182adc972688 /lib/Target/CellSPU/SPUISelLowering.h | |
parent | 1323e8bf6a7bec163c5d43006f5b3b78042cef61 (diff) | |
download | llvm-f0569be4a948c7ed816bfa2b8774a5a18458ee23.tar.gz llvm-f0569be4a948c7ed816bfa2b8774a5a18458ee23.tar.bz2 llvm-f0569be4a948c7ed816bfa2b8774a5a18458ee23.tar.xz |
- Remove Tilmann's custom truncate lowering: it completely hosed over
DAGcombine's ability to find reasons to remove truncates when they were not
needed. Consequently, the CellSPU backend would produce correct, but _really
slow and horrible_, code.
Replaced with instruction sequences that do the equivalent truncation in
SPUInstrInfo.td.
- Re-examine how unaligned loads and stores work. Generated unaligned
load code has been tested on the CellSPU hardware; see the i32operations.c
and i64operations.c in CodeGen/CellSPU/useful-harnesses. (While they may be
toy test code, it does prove that some real world code does compile
correctly.)
- Fix truncating stores in bug 3193 (note: unpack_df.ll will still make llc
fault because i64 ult is not yet implemented.)
- Added i64 eq and neq for setcc and select/setcc; started new instruction
information file for them in SPU64InstrInfo.td. Additional i64 operations
should be added to this file and not to SPUInstrInfo.td.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@61447 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/Target/CellSPU/SPUISelLowering.h')
-rw-r--r-- | lib/Target/CellSPU/SPUISelLowering.h | 6 |
1 files changed, 5 insertions, 1 deletions
diff --git a/lib/Target/CellSPU/SPUISelLowering.h b/lib/Target/CellSPU/SPUISelLowering.h index dd1f97f8d3..8d2e994545 100644 --- a/lib/Target/CellSPU/SPUISelLowering.h +++ b/lib/Target/CellSPU/SPUISelLowering.h @@ -39,7 +39,7 @@ namespace llvm { SHUFB, ///< Vector shuffle (permute) SHUFFLE_MASK, ///< Shuffle mask CNTB, ///< Count leading ones in bytes - PROMOTE_SCALAR, ///< Promote scalar->vector + PREFSLOT2VEC, ///< Promote scalar->vector VEC2PREFSLOT, ///< Extract element 0 MPY, ///< 16-bit Multiply (low parts of a 32-bit) MPYU, ///< Multiply Unsigned @@ -58,6 +58,7 @@ namespace llvm { ROTBYTES_LEFT_BITS, ///< Rotate bytes left by bit shift count SELECT_MASK, ///< Select Mask (FSM, FSMB, FSMH, FSMBI) SELB, ///< Select bits -> (b & mask) | (a & ~mask) + GATHER_BITS, ///< Gather bits from bytes/words/halfwords ADD_EXTENDED, ///< Add extended, with carry CARRY_GENERATE, ///< Carry generate for ADD_EXTENDED SUB_EXTENDED, ///< Subtract extended, with borrow @@ -120,6 +121,9 @@ namespace llvm { const SelectionDAG &DAG, unsigned Depth = 0) const; + virtual unsigned ComputeNumSignBitsForTargetNode(SDValue Op, + unsigned Depth = 0) const; + ConstraintType getConstraintType(const std::string &ConstraintLetter) const; std::pair<unsigned, const TargetRegisterClass*> |