summaryrefslogtreecommitdiff
path: root/lib/Target/ARM/ARMConstantIslandPass.cpp
diff options
context:
space:
mode:
authorJakob Stoklund Olesen <stoklund@2pi.dk>2012-01-10 22:32:14 +0000
committerJakob Stoklund Olesen <stoklund@2pi.dk>2012-01-10 22:32:14 +0000
commit19d0bf3a9273d337b776ca33c284fd2b5da485ab (patch)
tree4a52e45f1a517a6d6c51ffb88500c84ab3d44da1 /lib/Target/ARM/ARMConstantIslandPass.cpp
parent75fda5dcaebf3daafdbd06f44e89c7683b1be770 (diff)
downloadllvm-19d0bf3a9273d337b776ca33c284fd2b5da485ab.tar.gz
llvm-19d0bf3a9273d337b776ca33c284fd2b5da485ab.tar.bz2
llvm-19d0bf3a9273d337b776ca33c284fd2b5da485ab.tar.xz
Consider unknown alignment caused by OptimizeThumb2Instructions().
This function runs after all constant islands have been placed, and may shrink some instructions to their 2-byte forms. This can actually cause some constant pool entries to move out of range because of growing alignment padding. Treat instructions that may be shrunk the same as inline asm - they erode the known alignment bits. Also reinstate an old assertion in verify(). It is correct now that basic block offsets include alignments. Add a single large test case that will hopefully exercise many parts of the constant island pass. <rdar://problem/10670199> git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@147885 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/Target/ARM/ARMConstantIslandPass.cpp')
-rw-r--r--lib/Target/ARM/ARMConstantIslandPass.cpp29
1 files changed, 25 insertions, 4 deletions
diff --git a/lib/Target/ARM/ARMConstantIslandPass.cpp b/lib/Target/ARM/ARMConstantIslandPass.cpp
index e2ff4905e2..26cf8622f3 100644
--- a/lib/Target/ARM/ARMConstantIslandPass.cpp
+++ b/lib/Target/ARM/ARMConstantIslandPass.cpp
@@ -309,6 +309,7 @@ namespace {
bool FixUpConditionalBr(ImmBranch &Br);
bool FixUpUnconditionalBr(ImmBranch &Br);
bool UndoLRSpillRestore();
+ bool mayOptimizeThumb2Instruction(const MachineInstr *MI) const;
bool OptimizeThumb2Instructions();
bool OptimizeThumb2Branches();
bool ReorderThumb2JumpTables();
@@ -347,10 +348,8 @@ void ARMConstantIslands::verify() {
for (unsigned i = 0, e = CPUsers.size(); i != e; ++i) {
CPUser &U = CPUsers[i];
unsigned UserOffset = GetUserOffset(U);
- unsigned CPEOffset = GetOffsetOf(U.CPEMI);
- unsigned Disp = UserOffset < CPEOffset ? CPEOffset - UserOffset :
- UserOffset - CPEOffset;
- assert(Disp <= U.getMaxDisp() || "Constant pool entry out of range!");
+ assert(CPEIsInRange(U.MI, UserOffset, U.CPEMI, U.getMaxDisp(), U.NegOk) &&
+ "Constant pool entry out of range!");
}
#endif
}
@@ -807,6 +806,9 @@ void ARMConstantIslands::ComputeBlockSize(MachineBasicBlock *MBB) {
// The actual size may be smaller, but still a multiple of the instr size.
if (I->isInlineAsm())
BBI.Unalign = isThumb ? 1 : 2;
+ // Also consider instructions that may be shrunk later.
+ else if (isThumb && mayOptimizeThumb2Instruction(I))
+ BBI.Unalign = 1;
}
// tBR_JTr contains a .align 2 directive.
@@ -1676,6 +1678,25 @@ bool ARMConstantIslands::UndoLRSpillRestore() {
return MadeChange;
}
+// mayOptimizeThumb2Instruction - Returns true if OptimizeThumb2Instructions
+// below may shrink MI.
+bool
+ARMConstantIslands::mayOptimizeThumb2Instruction(const MachineInstr *MI) const {
+ switch(MI->getOpcode()) {
+ // OptimizeThumb2Instructions.
+ case ARM::t2LEApcrel:
+ case ARM::t2LDRpci:
+ // OptimizeThumb2Branches.
+ case ARM::t2B:
+ case ARM::t2Bcc:
+ case ARM::tBcc:
+ // OptimizeThumb2JumpTables.
+ case ARM::t2BR_JT:
+ return true;
+ }
+ return false;
+}
+
bool ARMConstantIslands::OptimizeThumb2Instructions() {
bool MadeChange = false;