summaryrefslogtreecommitdiff
path: root/lib/Target/ARM/ARMLoadStoreOptimizer.cpp
diff options
context:
space:
mode:
authorJim Grosbach <grosbach@apple.com>2011-08-05 20:35:44 +0000
committerJim Grosbach <grosbach@apple.com>2011-08-05 20:35:44 +0000
commit19dec207fcc0f04902b7f097b7771ba7abba43fb (patch)
tree3bfe008dcdc9fde53ca32f11f84be39625b8c370 /lib/Target/ARM/ARMLoadStoreOptimizer.cpp
parent6fc1c08635a6bdd6caea234b756f0dd62581e73c (diff)
downloadllvm-19dec207fcc0f04902b7f097b7771ba7abba43fb.tar.gz
llvm-19dec207fcc0f04902b7f097b7771ba7abba43fb.tar.bz2
llvm-19dec207fcc0f04902b7f097b7771ba7abba43fb.tar.xz
ARM refactor indexed store instructions.
Refactor STR[B] pre and post indexed instructions to use addressing modes for memory operands, which is necessary for assembly parsing and is more consistent with the rest of the memory instruction definitions. Make some incremental progress on refactoring away the mega-operand addrmode2 along the way, which is nice. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@136978 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/Target/ARM/ARMLoadStoreOptimizer.cpp')
-rw-r--r--lib/Target/ARM/ARMLoadStoreOptimizer.cpp10
1 files changed, 8 insertions, 2 deletions
diff --git a/lib/Target/ARM/ARMLoadStoreOptimizer.cpp b/lib/Target/ARM/ARMLoadStoreOptimizer.cpp
index ee0028c2ea..d29ce3ab4e 100644
--- a/lib/Target/ARM/ARMLoadStoreOptimizer.cpp
+++ b/lib/Target/ARM/ARMLoadStoreOptimizer.cpp
@@ -894,7 +894,10 @@ bool ARMLoadStoreOpt::MergeBaseUpdateLoadStore(MachineBasicBlock &MBB,
return false;
unsigned Offset = 0;
- if (isAM2)
+ // FIXME: Loads still use a combined reg/imm offset operand. When
+ // AM2 refactoring is complete, this can go away and just always use
+ // the raw Offset value.
+ if (isAM2 && isLd)
Offset = ARM_AM::getAM2Opc(AddSub, Bytes, ARM_AM::no_shift);
else if (!isAM5)
Offset = AddSub == ARM_AM::sub ? -Bytes : Bytes;
@@ -924,7 +927,10 @@ bool ARMLoadStoreOpt::MergeBaseUpdateLoadStore(MachineBasicBlock &MBB,
.addReg(Base).addImm(Offset).addImm(Pred).addReg(PredReg);
} else {
MachineOperand &MO = MI->getOperand(0);
- if (isAM2)
+ // FIXME: post-indexed stores use am2offset_imm, which still encodes
+ // the vestigal zero-reg offset register. When that's fixed, this clause
+ // can be removed entirely.
+ if (isAM2 && NewOpc == ARM::STR_POST_IMM)
// STR_PRE, STR_POST
BuildMI(MBB, MBBI, dl, TII->get(NewOpc), Base)
.addReg(MO.getReg(), getKillRegState(MO.isKill()))