summaryrefslogtreecommitdiff
path: root/lib/Target/CellSPU/SPUISelLowering.h
diff options
context:
space:
mode:
authorKalle Raiskila <kalle.raiskila@nokia.com>2010-11-12 10:14:03 +0000
committerKalle Raiskila <kalle.raiskila@nokia.com>2010-11-12 10:14:03 +0000
commit7ea1ab5f41299563eb648aed159cfaff09e774d8 (patch)
tree9352a3ac97282d9786b31ae08a2d03057186984e /lib/Target/CellSPU/SPUISelLowering.h
parentd0c82a683e965f326e36a2bcaa85c00e917f8282 (diff)
downloadllvm-7ea1ab5f41299563eb648aed159cfaff09e774d8.tar.gz
llvm-7ea1ab5f41299563eb648aed159cfaff09e774d8.tar.bz2
llvm-7ea1ab5f41299563eb648aed159cfaff09e774d8.tar.xz
Fix memory access lowering on SPU, adding
support for the case where alignment<value size. These cases were silently miscompiled before this patch. Now they are overly verbose -especially storing is- and any front-end should still avoid misaligned memory accesses as much as possible. The bit juggling algorithm added here probably has some room for improvement still. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@118889 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/Target/CellSPU/SPUISelLowering.h')
-rw-r--r--lib/Target/CellSPU/SPUISelLowering.h5
1 files changed, 3 insertions, 2 deletions
diff --git a/lib/Target/CellSPU/SPUISelLowering.h b/lib/Target/CellSPU/SPUISelLowering.h
index 41d0826758..82f10270db 100644
--- a/lib/Target/CellSPU/SPUISelLowering.h
+++ b/lib/Target/CellSPU/SPUISelLowering.h
@@ -41,8 +41,9 @@ namespace llvm {
CNTB, ///< Count leading ones in bytes
PREFSLOT2VEC, ///< Promote scalar->vector
VEC2PREFSLOT, ///< Extract element 0
- SHLQUAD_L_BITS, ///< Rotate quad left, by bits
- SHLQUAD_L_BYTES, ///< Rotate quad left, by bytes
+ SHL_BITS, ///< Shift quad left, by bits
+ SHL_BYTES, ///< Shift quad left, by bytes
+ SRL_BYTES, ///< Shift quad right, by bytes. Insert zeros.
VEC_ROTL, ///< Vector rotate left
VEC_ROTR, ///< Vector rotate right
ROTBYTES_LEFT, ///< Rotate bytes (loads -> ROTQBYI)