summaryrefslogtreecommitdiff
path: root/lib/Target/ARM/ARMConstantIslandPass.cpp
diff options
context:
space:
mode:
authorJakob Stoklund Olesen <stoklund@2pi.dk>2012-04-27 22:58:38 +0000
committerJakob Stoklund Olesen <stoklund@2pi.dk>2012-04-27 22:58:38 +0000
commit456ff46e668f361f79e8e91f145152089cd7d933 (patch)
treefae7ca5096aa1013b9a8e2c1e9de77831baf76fb /lib/Target/ARM/ARMConstantIslandPass.cpp
parent0e47cfd5b647e8480274c6b29c4e2d01c41a9e82 (diff)
downloadllvm-456ff46e668f361f79e8e91f145152089cd7d933.tar.gz
llvm-456ff46e668f361f79e8e91f145152089cd7d933.tar.bz2
llvm-456ff46e668f361f79e8e91f145152089cd7d933.tar.xz
Track worst case alignment padding more accurately.
Previously, ARMConstantIslandPass would conservatively compute the address of an aligned basic block as: RoundUpToAlignment(Offset + UnknownPadding) This worked fine for the layout algorithm itself, but it could fool the verify() function because it accounts for alignment padding twice: Once when adding the worst case UnknownPadding, and again by rounding up the fictional block offset. This meant that when optimizeThumb2Instructions would shrink an instruction, the conservative distance estimate could grow. That shouldn't be possible since the woorst case alignment padding wss already included. This patch drops the use of RoundUpToAlignment, and depends only on worst case padding to compute conservative block offsets. This has the weird effect that the computed offset for an aligned block may not be aligned. The important difference is that shrinking an instruction can never cause the estimated distance between two instructions to grow. The estimated distance is always larger than the real distance that only the assembler knows. <rdar://problem/11339352> git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@155744 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/Target/ARM/ARMConstantIslandPass.cpp')
-rw-r--r--lib/Target/ARM/ARMConstantIslandPass.cpp55
1 files changed, 13 insertions, 42 deletions
diff --git a/lib/Target/ARM/ARMConstantIslandPass.cpp b/lib/Target/ARM/ARMConstantIslandPass.cpp
index fc35c7cb02..896955170a 100644
--- a/lib/Target/ARM/ARMConstantIslandPass.cpp
+++ b/lib/Target/ARM/ARMConstantIslandPass.cpp
@@ -69,27 +69,6 @@ static inline unsigned UnknownPadding(unsigned LogAlign, unsigned KnownBits) {
return 0;
}
-/// WorstCaseAlign - Assuming only the low KnownBits bits in Offset are exact,
-/// add padding such that:
-///
-/// 1. The result is aligned to 1 << LogAlign.
-///
-/// 2. No other value of the unknown bits would require more padding.
-///
-/// This may add more padding than is required to satisfy just one of the
-/// constraints. It is necessary to compute alignment this way to guarantee
-/// that we don't underestimate the padding before an aligned block. If the
-/// real padding before a block is larger than we think, constant pool entries
-/// may go out of range.
-static inline unsigned WorstCaseAlign(unsigned Offset, unsigned LogAlign,
- unsigned KnownBits) {
- // Add the worst possible padding that the unknown bits could cause.
- Offset += UnknownPadding(LogAlign, KnownBits);
-
- // Then align the result.
- return RoundUpToAlignment(Offset, 1u << LogAlign);
-}
-
namespace {
/// ARMConstantIslands - Due to limited PC-relative displacements, ARM
/// requires constant pool entries to be scattered among the instructions
@@ -109,7 +88,12 @@ namespace {
/// Offset - Distance from the beginning of the function to the beginning
/// of this basic block.
///
- /// The offset is always aligned as required by the basic block.
+ /// Offsets are computed assuming worst case padding before an aligned
+ /// block. This means that subtracting basic block offsets always gives a
+ /// conservative estimate of the real distance which may be smaller.
+ ///
+ /// Because worst case padding is used, the computed offset of an aligned
+ /// block may not actually be aligned.
unsigned Offset;
/// Size - Size of the basic block in bytes. If the block contains
@@ -152,7 +136,7 @@ namespace {
if (!LA)
return PO;
// Add alignment padding from the terminator.
- return WorstCaseAlign(PO, LA, internalKnownBits());
+ return PO + UnknownPadding(LA, internalKnownBits());
}
/// Compute the number of known low bits of postOffset. If this block
@@ -342,9 +326,7 @@ void ARMConstantIslands::verify() {
for (MachineFunction::iterator MBBI = MF->begin(), E = MF->end();
MBBI != E; ++MBBI) {
MachineBasicBlock *MBB = MBBI;
- unsigned Align = MBB->getAlignment();
unsigned MBBId = MBB->getNumber();
- assert(BBInfo[MBBId].Offset % (1u << Align) == 0);
assert(!MBBId || BBInfo[MBBId - 1].postOffset() <= BBInfo[MBBId].Offset);
}
DEBUG(dbgs() << "Verifying " << CPUsers.size() << " CP users.\n");
@@ -1045,7 +1027,6 @@ bool ARMConstantIslands::isCPEntryInRange(MachineInstr *MI, unsigned UserOffset,
MachineInstr *CPEMI, unsigned MaxDisp,
bool NegOk, bool DoDump) {
unsigned CPEOffset = getOffsetOf(CPEMI);
- assert(CPEOffset % 4 == 0 && "Misaligned CPE");
if (DoDump) {
DEBUG({
@@ -1256,11 +1237,8 @@ void ARMConstantIslands::createNewWater(unsigned CPUserIndex,
if (BBHasFallthrough(UserMBB)) {
// Size of branch to insert.
unsigned Delta = isThumb1 ? 2 : 4;
- // End of UserBlock after adding a branch.
- unsigned UserBlockEnd = UserBBI.postOffset() + Delta;
// Compute the offset where the CPE will begin.
- unsigned CPEOffset = WorstCaseAlign(UserBlockEnd, CPELogAlign,
- UserBBI.postKnownBits());
+ unsigned CPEOffset = UserBBI.postOffset(CPELogAlign) + Delta;
if (isOffsetInRange(UserOffset, CPEOffset, U)) {
DEBUG(dbgs() << "Split at end of BB#" << UserMBB->getNumber()
@@ -1299,20 +1277,16 @@ void ARMConstantIslands::createNewWater(unsigned CPUserIndex,
// up the insertion point.
// Try to split the block so it's fully aligned. Compute the latest split
- // point where we can add a 4-byte branch instruction, and then
- // WorstCaseAlign to LogAlign.
+ // point where we can add a 4-byte branch instruction, and then align to
+ // LogAlign which is the largest possible alignment in the function.
unsigned LogAlign = MF->getAlignment();
assert(LogAlign >= CPELogAlign && "Over-aligned constant pool entry");
unsigned KnownBits = UserBBI.internalKnownBits();
unsigned UPad = UnknownPadding(LogAlign, KnownBits);
- unsigned BaseInsertOffset = UserOffset + U.getMaxDisp();
+ unsigned BaseInsertOffset = UserOffset + U.getMaxDisp() - UPad;
DEBUG(dbgs() << format("Split in middle of big block before %#x",
BaseInsertOffset));
- // Account for alignment and unknown padding.
- BaseInsertOffset &= ~((1u << LogAlign) - 1);
- BaseInsertOffset -= UPad;
-
// The 4 in the following is for the unconditional branch we'll be inserting
// (allows for long branch on Thumb1). Alignment of the island is handled
// inside isOffsetInRange.
@@ -1330,8 +1304,7 @@ void ARMConstantIslands::createNewWater(unsigned CPUserIndex,
if (BaseInsertOffset >= BBInfo[UserMBB->getNumber()+1].Offset)
BaseInsertOffset = BBInfo[UserMBB->getNumber()+1].Offset -
(isThumb1 ? 6 : 8);
- unsigned EndInsertOffset =
- WorstCaseAlign(BaseInsertOffset + 4, LogAlign, KnownBits) +
+ unsigned EndInsertOffset = BaseInsertOffset + 4 + UPad +
CPEMI->getOperand(2).getImm();
MachineBasicBlock::iterator MI = UserMI;
++MI;
@@ -1353,9 +1326,7 @@ void ARMConstantIslands::createNewWater(unsigned CPUserIndex,
// reused within the block, but it doesn't matter much. Also assume CPEs
// are added in order with alignment padding. We may eventually be able
// to pack the aligned CPEs better.
- EndInsertOffset = RoundUpToAlignment(EndInsertOffset,
- 1u << getCPELogAlign(U.CPEMI)) +
- U.CPEMI->getOperand(2).getImm();
+ EndInsertOffset += U.CPEMI->getOperand(2).getImm();
CPUIndex++;
}