summaryrefslogtreecommitdiff
path: root/lib/Target/AArch64/AArch64BranchFixupPass.cpp
blob: 11e7f41a39647d73d7748b3dc5ca70e8a5c2d630 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
//===-- AArch64BranchFixupPass.cpp - AArch64 branch fixup -----------------===//
//
//                     The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file contains a pass that fixes AArch64 branches which have ended up out
// of range for their immediate operands.
//
//===----------------------------------------------------------------------===//

#define DEBUG_TYPE "aarch64-branch-fixup"
#include "AArch64.h"
#include "AArch64InstrInfo.h"
#include "Utils/AArch64BaseInfo.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/Format.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/ADT/Statistic.h"
using namespace llvm;

STATISTIC(NumSplit,      "Number of uncond branches inserted");
STATISTIC(NumCBrFixed,   "Number of cond branches fixed");

/// Return the worst case padding that could result from unknown offset bits.
/// This does not include alignment padding caused by known offset bits.
///
/// @param LogAlign log2(alignment)
/// @param KnownBits Number of known low offset bits.
static inline unsigned UnknownPadding(unsigned LogAlign, unsigned KnownBits) {
  if (KnownBits < LogAlign)
    return (1u << LogAlign) - (1u << KnownBits);
  return 0;
}

namespace {
  /// Due to limited PC-relative displacements, conditional branches to distant
  /// blocks may need converting into an unconditional equivalent. For example:
  ///     tbz w1, #0, far_away
  /// becomes
  ///     tbnz w1, #0, skip
  ///     b far_away
  ///   skip:
  class AArch64BranchFixup : public MachineFunctionPass {
    /// Information about the offset and size of a single basic block.
    struct BasicBlockInfo {
      /// Distance from the beginning of the function to the beginning of this
      /// basic block.
      ///
      /// Offsets are computed assuming worst case padding before an aligned
      /// block. This means that subtracting basic block offsets always gives a
      /// conservative estimate of the real distance which may be smaller.
      ///
      /// Because worst case padding is used, the computed offset of an aligned
      /// block may not actually be aligned.
      unsigned Offset;

      /// Size of the basic block in bytes.  If the block contains inline
      /// assembly, this is a worst case estimate.
      ///
      /// The size does not include any alignment padding whether from the
      /// beginning of the block, or from an aligned jump table at the end.
      unsigned Size;

      /// The number of low bits in Offset that are known to be exact.  The
      /// remaining bits of Offset are an upper bound.
      uint8_t KnownBits;

      /// When non-zero, the block contains instructions (inline asm) of unknown
      /// size.  The real size may be smaller than Size bytes by a multiple of 1
      /// << Unalign.
      uint8_t Unalign;

      BasicBlockInfo() : Offset(0), Size(0), KnownBits(0), Unalign(0) {}

      /// Compute the number of known offset bits internally to this block.
      /// This number should be used to predict worst case padding when
      /// splitting the block.
      unsigned internalKnownBits() const {
        unsigned Bits = Unalign ? Unalign : KnownBits;
        // If the block size isn't a multiple of the known bits, assume the
        // worst case padding.
        if (Size & ((1u << Bits) - 1))
          Bits = countTrailingZeros(Size);
        return Bits;
      }

      /// Compute the offset immediately following this block.  If LogAlign is
      /// specified, return the offset the successor block will get if it has
      /// this alignment.
      unsigned postOffset(unsigned LogAlign = 0) const {
        unsigned PO = Offset + Size;
        if (!LogAlign)
          return PO;
        // Add alignment padding from the terminator.
        return PO + UnknownPadding(LogAlign, internalKnownBits());
      }

      /// Compute the number of known low bits of postOffset.  If this block
      /// contains inline asm, the number of known bits drops to the
      /// instruction alignment.  An aligned terminator may increase the number
      /// of know bits.
      /// If LogAlign is given, also consider the alignment of the next block.
      unsigned postKnownBits(unsigned LogAlign = 0) const {
        return std::max(LogAlign, internalKnownBits());
      }
    };

    std::vector<BasicBlockInfo> BBInfo;

    /// One per immediate branch, keeping the machine instruction pointer,
    /// conditional or unconditional, the max displacement, and (if IsCond is
    /// true) the corresponding inverted branch opcode.
    struct ImmBranch {
      MachineInstr *MI;
      unsigned OffsetBits : 31;
      bool IsCond : 1;
      ImmBranch(MachineInstr *mi, unsigned offsetbits, bool cond)
        : MI(mi), OffsetBits(offsetbits), IsCond(cond) {}
    };

    /// Keep track of all the immediate branch instructions.
    ///
    std::vector<ImmBranch> ImmBranches;

    MachineFunction *MF;
    const AArch64InstrInfo *TII;
  public:
    static char ID;
    AArch64BranchFixup() : MachineFunctionPass(ID) {}

    virtual bool runOnMachineFunction(MachineFunction &MF);

    virtual const char *getPassName() const {
      return "AArch64 branch fixup pass";
    }

  private:
    void initializeFunctionInfo();
    MachineBasicBlock *splitBlockBeforeInstr(MachineInstr *MI);
    void adjustBBOffsetsAfter(MachineBasicBlock *BB);
    bool isBBInRange(MachineInstr *MI, MachineBasicBlock *BB,
                     unsigned OffsetBits);
    bool fixupImmediateBr(ImmBranch &Br);
    bool fixupConditionalBr(ImmBranch &Br);

    void computeBlockSize(MachineBasicBlock *MBB);
    unsigned getOffsetOf(MachineInstr *MI) const;
    void dumpBBs();
    void verify();
  };
  char AArch64BranchFixup::ID = 0;
}

/// check BBOffsets
void AArch64BranchFixup::verify() {
#ifndef NDEBUG
  for (MachineFunction::iterator MBBI = MF->begin(), E = MF->end();
       MBBI != E; ++MBBI) {
    MachineBasicBlock *MBB = MBBI;
    unsigned MBBId = MBB->getNumber();
    assert(!MBBId || BBInfo[MBBId - 1].postOffset() <= BBInfo[MBBId].Offset);
  }
#endif
}

/// print block size and offset information - debugging
void AArch64BranchFixup::dumpBBs() {
  DEBUG({
    for (unsigned J = 0, E = BBInfo.size(); J !=E; ++J) {
      const BasicBlockInfo &BBI = BBInfo[J];
      dbgs() << format("%08x BB#%u\t", BBI.Offset, J)
             << " kb=" << unsigned(BBI.KnownBits)
             << " ua=" << unsigned(BBI.Unalign)
             << format(" size=%#x\n", BBInfo[J].Size);
    }
  });
}

/// Returns an instance of the branch fixup pass.
FunctionPass *llvm::createAArch64BranchFixupPass() {
  return new AArch64BranchFixup();
}

bool AArch64BranchFixup::runOnMachineFunction(MachineFunction &mf) {
  MF = &mf;
  DEBUG(dbgs() << "***** AArch64BranchFixup ******");
  TII = (const AArch64InstrInfo*)MF->getTarget().getInstrInfo();

  // This pass invalidates liveness information when it splits basic blocks.
  MF->getRegInfo().invalidateLiveness();

  // Renumber all of the machine basic blocks in the function, guaranteeing that
  // the numbers agree with the position of the block in the function.
  MF->RenumberBlocks();

  // Do the initial scan of the function, building up information about the
  // sizes of each block and location of each immediate branch.
  initializeFunctionInfo();

  // Iteratively fix up branches until there is no change.
  unsigned NoBRIters = 0;
  bool MadeChange = false;
  while (true) {
    DEBUG(dbgs() << "Beginning iteration #" << NoBRIters << '\n');
    bool BRChange = false;
    for (unsigned i = 0, e = ImmBranches.size(); i != e; ++i)
      BRChange |= fixupImmediateBr(ImmBranches[i]);
    if (BRChange && ++NoBRIters > 30)
      report_fatal_error("Branch Fix Up pass failed to converge!");
    DEBUG(dumpBBs());

    if (!BRChange)
      break;
    MadeChange = true;
  }

  // After a while, this might be made debug-only, but it is not expensive.
  verify();

  DEBUG(dbgs() << '\n'; dumpBBs());

  BBInfo.clear();
  ImmBranches.clear();

  return MadeChange;
}

/// Return true if the specified basic block can fallthrough into the block
/// immediately after it.
static bool BBHasFallthrough(MachineBasicBlock *MBB) {
  // Get the next machine basic block in the function.
  MachineFunction::iterator MBBI = MBB;
  // Can't fall off end of function.
  if (llvm::next(MBBI) == MBB->getParent()->end())
    return false;

  MachineBasicBlock *NextBB = llvm::next(MBBI);
  for (MachineBasicBlock::succ_iterator I = MBB->succ_begin(),
       E = MBB->succ_end(); I != E; ++I)
    if (*I == NextBB)
      return true;

  return false;
}

/// Do the initial scan of the function, building up information about the sizes
/// of each block, and each immediate branch.
void AArch64BranchFixup::initializeFunctionInfo() {
  BBInfo.clear();
  BBInfo.resize(MF->getNumBlockIDs());

  // First thing, compute the size of all basic blocks, and see if the function
  // has any inline assembly in it. If so, we have to be conservative about
  // alignment assumptions, as we don't know for sure the size of any
  // instructions in the inline assembly.
  for (MachineFunction::iterator I = MF->begin(), E = MF->end(); I != E; ++I)
    computeBlockSize(I);

  // The known bits of the entry block offset are determined by the function
  // alignment.
  BBInfo.front().KnownBits = MF->getAlignment();

  // Compute block offsets and known bits.
  adjustBBOffsetsAfter(MF->begin());

  // Now go back through the instructions and build up our data structures.
  for (MachineFunction::iterator MBBI = MF->begin(), E = MF->end();
       MBBI != E; ++MBBI) {
    MachineBasicBlock &MBB = *MBBI;

    for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end();
         I != E; ++I) {
      if (I->isDebugValue())
        continue;

      int Opc = I->getOpcode();
      if (I->isBranch()) {
        bool IsCond = false;

        // The offsets encoded in instructions here scale by the instruction
        // size (4 bytes), effectively increasing their range by 2 bits.
        unsigned Bits = 0;
        switch (Opc) {
        default:
          continue;  // Ignore other JT branches
        case AArch64::TBZxii:
        case AArch64::TBZwii:
        case AArch64::TBNZxii:
        case AArch64::TBNZwii:
          IsCond = true;
          Bits = 14 + 2;
          break;
        case AArch64::Bcc:
        case AArch64::CBZx:
        case AArch64::CBZw:
        case AArch64::CBNZx:
        case AArch64::CBNZw:
          IsCond = true;
          Bits = 19 + 2;
          break;
        case AArch64::Bimm:
          Bits = 26 + 2;
          break;
        }

        // Record this immediate branch.
        ImmBranches.push_back(ImmBranch(I, Bits, IsCond));
      }
    }
  }
}

/// Compute the size and some alignment information for MBB.  This function
/// updates BBInfo directly.
void AArch64BranchFixup::computeBlockSize(MachineBasicBlock *MBB) {
  BasicBlockInfo &BBI = BBInfo[MBB->getNumber()];
  BBI.Size = 0;
  BBI.Unalign = 0;

  for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end(); I != E;
       ++I) {
    BBI.Size += TII->getInstSizeInBytes(*I);
    // For inline asm, GetInstSizeInBytes returns a conservative estimate.
    // The actual size may be smaller, but still a multiple of the instr size.
    if (I->isInlineAsm())
      BBI.Unalign = 2;
  }
}

/// Return the current offset of the specified machine instruction from the
/// start of the function.  This offset changes as stuff is moved around inside
/// the function.
unsigned AArch64BranchFixup::getOffsetOf(MachineInstr *MI) const {
  MachineBasicBlock *MBB = MI->getParent();

  // The offset is composed of two things: the sum of the sizes of all MBB's
  // before this instruction's block, and the offset from the start of the block
  // it is in.
  unsigned Offset = BBInfo[MBB->getNumber()].Offset;

  // Sum instructions before MI in MBB.
  for (MachineBasicBlock::iterator I = MBB->begin(); &*I != MI; ++I) {
    assert(I != MBB->end() && "Didn't find MI in its own basic block?");
    Offset += TII->getInstSizeInBytes(*I);
  }
  return Offset;
}

/// Split the basic block containing MI into two blocks, which are joined by
/// an unconditional branch.  Update data structures and renumber blocks to
/// account for this change and returns the newly created block.
MachineBasicBlock *
AArch64BranchFixup::splitBlockBeforeInstr(MachineInstr *MI) {
  MachineBasicBlock *OrigBB = MI->getParent();

  // Create a new MBB for the code after the OrigBB.
  MachineBasicBlock *NewBB =
    MF->CreateMachineBasicBlock(OrigBB->getBasicBlock());
  MachineFunction::iterator MBBI = OrigBB; ++MBBI;
  MF->insert(MBBI, NewBB);

  // Splice the instructions starting with MI over to NewBB.
  NewBB->splice(NewBB->end(), OrigBB, MI, OrigBB->end());

  // Add an unconditional branch from OrigBB to NewBB.
  // Note the new unconditional branch is not being recorded.
  // There doesn't seem to be meaningful DebugInfo available; this doesn't
  // correspond to anything in the source.
  BuildMI(OrigBB, DebugLoc(), TII->get(AArch64::Bimm)).addMBB(NewBB);
  ++NumSplit;

  // Update the CFG.  All succs of OrigBB are now succs of NewBB.
  NewBB->transferSuccessors(OrigBB);

  // OrigBB branches to NewBB.
  OrigBB->addSuccessor(NewBB);

  // Update internal data structures to account for the newly inserted MBB.
  MF->RenumberBlocks(NewBB);

  // Insert an entry into BBInfo to align it properly with the (newly
  // renumbered) block numbers.
  BBInfo.insert(BBInfo.begin() + NewBB->getNumber(), BasicBlockInfo());

  // Figure out how large the OrigBB is.  As the first half of the original
  // block, it cannot contain a tablejump.  The size includes
  // the new jump we added.  (It should be possible to do this without
  // recounting everything, but it's very confusing, and this is rarely
  // executed.)
  computeBlockSize(OrigBB);

  // Figure out how large the NewMBB is.  As the second half of the original
  // block, it may contain a tablejump.
  computeBlockSize(NewBB);

  // All BBOffsets following these blocks must be modified.
  adjustBBOffsetsAfter(OrigBB);

  return NewBB;
}

void AArch64BranchFixup::adjustBBOffsetsAfter(MachineBasicBlock *BB) {
  unsigned BBNum = BB->getNumber();
  for(unsigned i = BBNum + 1, e = MF->getNumBlockIDs(); i < e; ++i) {
    // Get the offset and known bits at the end of the layout predecessor.
    // Include the alignment of the current block.
    unsigned LogAlign = MF->getBlockNumbered(i)->getAlignment();
    unsigned Offset = BBInfo[i - 1].postOffset(LogAlign);
    unsigned KnownBits = BBInfo[i - 1].postKnownBits(LogAlign);

    // This is where block i begins.  Stop if the offset is already correct,
    // and we have updated 2 blocks.  This is the maximum number of blocks
    // changed before calling this function.
    if (i > BBNum + 2 &&
        BBInfo[i].Offset == Offset &&
        BBInfo[i].KnownBits == KnownBits)
      break;

    BBInfo[i].Offset = Offset;
    BBInfo[i].KnownBits = KnownBits;
  }
}

/// Returns true if the distance between specific MI and specific BB can fit in
/// MI's displacement field.
bool AArch64BranchFixup::isBBInRange(MachineInstr *MI,
                                     MachineBasicBlock *DestBB,
                                     unsigned OffsetBits) {
  int64_t BrOffset   = getOffsetOf(MI);
  int64_t DestOffset = BBInfo[DestBB->getNumber()].Offset;

  DEBUG(dbgs() << "Branch of destination BB#" << DestBB->getNumber()
               << " from BB#" << MI->getParent()->getNumber()
               << " bits available=" << OffsetBits
               << " from " << getOffsetOf(MI) << " to " << DestOffset
               << " offset " << int(DestOffset-BrOffset) << "\t" << *MI);

  return isIntN(OffsetBits, DestOffset - BrOffset);
}

/// Fix up an immediate branch whose destination is too far away to fit in its
/// displacement field.
bool AArch64BranchFixup::fixupImmediateBr(ImmBranch &Br) {
  MachineInstr *MI = Br.MI;
  MachineBasicBlock *DestBB = 0;
  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
    if (MI->getOperand(i).isMBB()) {
      DestBB = MI->getOperand(i).getMBB();
      break;
    }
  }
  assert(DestBB && "Branch with no destination BB?");

  // Check to see if the DestBB is already in-range.
  if (isBBInRange(MI, DestBB, Br.OffsetBits))
    return false;

  assert(Br.IsCond && "Only conditional branches should need fixup");
  return fixupConditionalBr(Br);
}

/// Fix up a conditional branch whose destination is too far away to fit in its
/// displacement field. It is converted to an inverse conditional branch + an
/// unconditional branch to the destination.
bool
AArch64BranchFixup::fixupConditionalBr(ImmBranch &Br) {
  MachineInstr *MI = Br.MI;
  MachineBasicBlock *MBB = MI->getParent();
  unsigned CondBrMBBOperand = 0;

  // The general idea is to add an unconditional branch to the destination and
  // invert the conditional branch to jump over it. Complications occur around
  // fallthrough and unreachable ends to the block.
  //   b.lt L1
  //   =>
  //   b.ge L2
  //   b   L1
  // L2:

  // First we invert the conditional branch, by creating a replacement if
  // necessary. This if statement contains all the special handling of different
  // branch types.
  if (MI->getOpcode() == AArch64::Bcc) {
    // The basic block is operand number 1 for Bcc
    CondBrMBBOperand = 1;

    A64CC::CondCodes CC = (A64CC::CondCodes)MI->getOperand(0).getImm();
    CC = A64InvertCondCode(CC);
    MI->getOperand(0).setImm(CC);
  } else {
    MachineInstrBuilder InvertedMI;
    int InvertedOpcode;
    switch (MI->getOpcode()) {
    default: llvm_unreachable("Unknown branch type");
    case AArch64::TBZxii: InvertedOpcode = AArch64::TBNZxii; break;
    case AArch64::TBZwii: InvertedOpcode = AArch64::TBNZwii; break;
    case AArch64::TBNZxii: InvertedOpcode = AArch64::TBZxii; break;
    case AArch64::TBNZwii: InvertedOpcode = AArch64::TBZwii; break;
    case AArch64::CBZx: InvertedOpcode = AArch64::CBNZx; break;
    case AArch64::CBZw: InvertedOpcode = AArch64::CBNZw; break;
    case AArch64::CBNZx: InvertedOpcode = AArch64::CBZx; break;
    case AArch64::CBNZw: InvertedOpcode = AArch64::CBZw; break;
    }

    InvertedMI = BuildMI(*MBB, MI, MI->getDebugLoc(), TII->get(InvertedOpcode));
    for (unsigned i = 0, e= MI->getNumOperands(); i != e; ++i) {
      InvertedMI.addOperand(MI->getOperand(i));
      if (MI->getOperand(i).isMBB())
        CondBrMBBOperand = i;
    }

    MI->eraseFromParent();
    MI = Br.MI = InvertedMI;
  }

  // If the branch is at the end of its MBB and that has a fall-through block,
  // direct the updated conditional branch to the fall-through
  // block. Otherwise, split the MBB before the next instruction.
  MachineInstr *BMI = &MBB->back();
  bool NeedSplit = (BMI != MI) || !BBHasFallthrough(MBB);

  ++NumCBrFixed;
  if (BMI != MI) {
    if (llvm::next(MachineBasicBlock::iterator(MI)) == prior(MBB->end()) &&
        BMI->getOpcode() == AArch64::Bimm) {
      // Last MI in the BB is an unconditional branch. We can swap destinations:
      // b.eq L1 (temporarily b.ne L1 after first change)
      // b   L2
      // =>
      // b.ne L2
      // b   L1
      MachineBasicBlock *NewDest = BMI->getOperand(0).getMBB();
      if (isBBInRange(MI, NewDest, Br.OffsetBits)) {
        DEBUG(dbgs() << "  Invert Bcc condition and swap its destination with "
                     << *BMI);
        MachineBasicBlock *DestBB = MI->getOperand(CondBrMBBOperand).getMBB();
        BMI->getOperand(0).setMBB(DestBB);
        MI->getOperand(CondBrMBBOperand).setMBB(NewDest);
        return true;
      }
    }
  }

  if (NeedSplit) {
    MachineBasicBlock::iterator MBBI = MI; ++MBBI;
    splitBlockBeforeInstr(MBBI);
    // No need for the branch to the next block. We're adding an unconditional
    // branch to the destination.
    int delta = TII->getInstSizeInBytes(MBB->back());
    BBInfo[MBB->getNumber()].Size -= delta;
    MBB->back().eraseFromParent();
    // BBInfo[SplitBB].Offset is wrong temporarily, fixed below
  }

  // After splitting and removing the unconditional branch from the original BB,
  // the structure is now:
  // oldbb:
  //   [things]
  //   b.invertedCC L1
  // splitbb/fallthroughbb:
  //   [old b L2/real continuation]
  //
  // We now have to change the conditional branch to point to splitbb and add an
  // unconditional branch after it to L1, giving the final structure:
  // oldbb:
  //   [things]
  //   b.invertedCC splitbb
  //   b L1
  // splitbb/fallthroughbb:
  //   [old b L2/real continuation]
  MachineBasicBlock *NextBB = llvm::next(MachineFunction::iterator(MBB));

  DEBUG(dbgs() << "  Insert B to BB#"
               << MI->getOperand(CondBrMBBOperand).getMBB()->getNumber()
               << " also invert condition and change dest. to BB#"
               << NextBB->getNumber() << "\n");

  // Insert a new unconditional branch and fixup the destination of the
  // conditional one.  Also update the ImmBranch as well as adding a new entry
  // for the new branch.
  BuildMI(MBB, DebugLoc(), TII->get(AArch64::Bimm))
    .addMBB(MI->getOperand(CondBrMBBOperand).getMBB());
  MI->getOperand(CondBrMBBOperand).setMBB(NextBB);

  BBInfo[MBB->getNumber()].Size += TII->getInstSizeInBytes(MBB->back());

  // 26 bits written down in Bimm, specifying a multiple of 4.
  unsigned OffsetBits = 26 + 2;
  ImmBranches.push_back(ImmBranch(&MBB->back(), OffsetBits, false));

  adjustBBOffsetsAfter(MBB);
  return true;
}