summaryrefslogtreecommitdiff
path: root/lib/Target
diff options
context:
space:
mode:
authorJakob Stoklund Olesen <stoklund@2pi.dk>2012-08-27 23:58:52 +0000
committerJakob Stoklund Olesen <stoklund@2pi.dk>2012-08-27 23:58:52 +0000
commitdd364419ee64cd5bb234af006ce0cb285e4a84ca (patch)
tree39b3a87aca20346bf316390cb68c42812ce683ab /lib/Target
parent94a935f072452d00207b1e8c1da75c31bb2a5f9b (diff)
downloadllvm-dd364419ee64cd5bb234af006ce0cb285e4a84ca.tar.gz
llvm-dd364419ee64cd5bb234af006ce0cb285e4a84ca.tar.bz2
llvm-dd364419ee64cd5bb234af006ce0cb285e4a84ca.tar.xz
Add ATOMIC_LDR* pseudo-instructions to model atomic_load on ARM.
It is not safe to use normal LDR instructions because they may be reordered by the scheduler. The ATOMIC_LDR pseudos have a mayStore flag that prevents reordering. Atomic loads are also prevented from participating in rematerialization and load folding. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@162713 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/Target')
-rw-r--r--lib/Target/ARM/ARMBaseInstrInfo.cpp20
-rw-r--r--lib/Target/ARM/ARMBaseRegisterInfo.cpp10
-rw-r--r--lib/Target/ARM/ARMInstrInfo.td41
-rw-r--r--lib/Target/ARM/ARMInstrThumb.td47
-rw-r--r--lib/Target/ARM/ARMInstrThumb2.td68
-rw-r--r--lib/Target/ARM/Thumb2SizeReduction.cpp26
6 files changed, 169 insertions, 43 deletions
diff --git a/lib/Target/ARM/ARMBaseInstrInfo.cpp b/lib/Target/ARM/ARMBaseInstrInfo.cpp
index 2112992dd8..378331f382 100644
--- a/lib/Target/ARM/ARMBaseInstrInfo.cpp
+++ b/lib/Target/ARM/ARMBaseInstrInfo.cpp
@@ -2778,8 +2778,8 @@ static int adjustDefLatency(const ARMSubtarget &Subtarget,
// variants are one cycle cheaper.
switch (DefMCID->getOpcode()) {
default: break;
- case ARM::LDRrs:
- case ARM::LDRBrs: {
+ case ARM::LDRrs: case ARM::ATOMIC_LDRrs:
+ case ARM::LDRBrs: case ARM::ATOMIC_LDRBrs: {
unsigned ShOpVal = DefMI->getOperand(3).getImm();
unsigned ShImm = ARM_AM::getAM2Offset(ShOpVal);
if (ShImm == 0 ||
@@ -2787,9 +2787,9 @@ static int adjustDefLatency(const ARMSubtarget &Subtarget,
--Adjust;
break;
}
- case ARM::t2LDRs:
- case ARM::t2LDRBs:
- case ARM::t2LDRHs:
+ case ARM::t2LDRs: case ARM::ATOMIC_t2LDRs:
+ case ARM::t2LDRBs: case ARM::ATOMIC_t2LDRBs:
+ case ARM::t2LDRHs: case ARM::ATOMIC_t2LDRHs:
case ARM::t2LDRSHs: {
// Thumb2 mode: lsl only.
unsigned ShAmt = DefMI->getOperand(3).getImm();
@@ -3046,8 +3046,8 @@ ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
// variants are one cycle cheaper.
switch (DefMCID.getOpcode()) {
default: break;
- case ARM::LDRrs:
- case ARM::LDRBrs: {
+ case ARM::LDRrs: case ARM::ATOMIC_LDRrs:
+ case ARM::LDRBrs: case ARM::ATOMIC_LDRBrs: {
unsigned ShOpVal =
cast<ConstantSDNode>(DefNode->getOperand(2))->getZExtValue();
unsigned ShImm = ARM_AM::getAM2Offset(ShOpVal);
@@ -3056,9 +3056,9 @@ ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
--Latency;
break;
}
- case ARM::t2LDRs:
- case ARM::t2LDRBs:
- case ARM::t2LDRHs:
+ case ARM::t2LDRs: case ARM::ATOMIC_t2LDRs:
+ case ARM::t2LDRBs: case ARM::ATOMIC_t2LDRBs:
+ case ARM::t2LDRHs: case ARM::ATOMIC_t2LDRHs:
case ARM::t2LDRSHs: {
// Thumb2 mode: lsl only.
unsigned ShAmt =
diff --git a/lib/Target/ARM/ARMBaseRegisterInfo.cpp b/lib/Target/ARM/ARMBaseRegisterInfo.cpp
index 9deb96ea9e..828e7d4e95 100644
--- a/lib/Target/ARM/ARMBaseRegisterInfo.cpp
+++ b/lib/Target/ARM/ARMBaseRegisterInfo.cpp
@@ -870,10 +870,14 @@ needsFrameBaseReg(MachineInstr *MI, int64_t Offset) const {
// return false for everything else.
unsigned Opc = MI->getOpcode();
switch (Opc) {
- case ARM::LDRi12: case ARM::LDRH: case ARM::LDRBi12:
+ case ARM::LDRi12: case ARM::ATOMIC_LDRi12:
+ case ARM::LDRH: case ARM::ATOMIC_LDRH:
+ case ARM::LDRBi12: case ARM::ATOMIC_LDRBi12:
case ARM::STRi12: case ARM::STRH: case ARM::STRBi12:
- case ARM::t2LDRi12: case ARM::t2LDRi8:
- case ARM::t2STRi12: case ARM::t2STRi8:
+ case ARM::t2LDRi12: case ARM::ATOMIC_t2LDRi12:
+ case ARM::t2LDRi8: case ARM::ATOMIC_t2LDRi8:
+ case ARM::t2STRi12:
+ case ARM::t2STRi8:
case ARM::VLDRS: case ARM::VLDRD:
case ARM::VSTRS: case ARM::VSTRD:
case ARM::tSTRspi: case ARM::tLDRspi:
diff --git a/lib/Target/ARM/ARMInstrInfo.td b/lib/Target/ARM/ARMInstrInfo.td
index 6d701ea5d5..ae51972604 100644
--- a/lib/Target/ARM/ARMInstrInfo.td
+++ b/lib/Target/ARM/ARMInstrInfo.td
@@ -4199,6 +4199,37 @@ let usesCustomInserter = 1 in {
}
}
+// Pseudo-instructions for atomic loads.
+// These are marked with mayStore so they can't be reordered.
+let mayLoad = 1, mayStore = 1, hasSideEffects = 0 in {
+def ATOMIC_LDRBrs : ARMPseudoExpand<(outs GPRnopc:$Rt),
+ (ins ldst_so_reg:$shift, pred:$p),
+ 4, IIC_iLoad_bh_r, [],
+ (LDRBrs GPRnopc:$Rt, ldst_so_reg:$shift, pred:$p)>;
+def ATOMIC_LDRBi12 : ARMPseudoExpand<(outs GPRnopc:$Rt),
+ (ins addrmode_imm12:$addr, pred:$p),
+ 4, IIC_iLoad_bh_si, [],
+ (LDRBi12 GPRnopc:$Rt, addrmode_imm12:$addr, pred:$p)> {
+ let AM = AddrMode_i12;
+}
+def ATOMIC_LDRH : ARMPseudoExpand<(outs GPR:$Rt),
+ (ins addrmode3:$addr, pred:$p),
+ 4, IIC_iLoad_bh_r, [],
+ (LDRH GPR:$Rt, addrmode3:$addr, pred:$p)> {
+ let AM = AddrMode3;
+}
+def ATOMIC_LDRi12 : ARMPseudoExpand<(outs GPR:$Rt),
+ (ins addrmode_imm12:$addr, pred:$p),
+ 4, IIC_iLoad_si, [],
+ (LDRi12 GPR:$Rt, addrmode_imm12:$addr, pred:$p)> {
+ let AM = AddrMode_i12;
+}
+def ATOMIC_LDRrs : ARMPseudoExpand<(outs GPR:$Rt),
+ (ins ldst_so_reg:$shift, pred:$p),
+ 4, IIC_iLoad_r, [],
+ (LDRrs GPR:$Rt, ldst_so_reg:$shift, pred:$p)>;
+}
+
let usesCustomInserter = 1 in {
def COPY_STRUCT_BYVAL_I32 : PseudoInst<
(outs), (ins GPR:$dst, GPR:$src, i32imm:$size, i32imm:$alignment),
@@ -4902,15 +4933,15 @@ def : ARMV6Pat<(add GPR:$Rn, (sext_inreg GPRnopc:$Rm, i16)),
// Atomic load/store patterns
def : ARMPat<(atomic_load_8 ldst_so_reg:$src),
- (LDRBrs ldst_so_reg:$src)>;
+ (ATOMIC_LDRBrs ldst_so_reg:$src)>;
def : ARMPat<(atomic_load_8 addrmode_imm12:$src),
- (LDRBi12 addrmode_imm12:$src)>;
+ (ATOMIC_LDRBi12 addrmode_imm12:$src)>;
def : ARMPat<(atomic_load_16 addrmode3:$src),
- (LDRH addrmode3:$src)>;
+ (ATOMIC_LDRH addrmode3:$src)>;
def : ARMPat<(atomic_load_32 ldst_so_reg:$src),
- (LDRrs ldst_so_reg:$src)>;
+ (ATOMIC_LDRrs ldst_so_reg:$src)>;
def : ARMPat<(atomic_load_32 addrmode_imm12:$src),
- (LDRi12 addrmode_imm12:$src)>;
+ (ATOMIC_LDRi12 addrmode_imm12:$src)>;
def : ARMPat<(atomic_store_8 ldst_so_reg:$ptr, GPR:$val),
(STRBrs GPR:$val, ldst_so_reg:$ptr)>;
def : ARMPat<(atomic_store_8 addrmode_imm12:$ptr, GPR:$val),
diff --git a/lib/Target/ARM/ARMInstrThumb.td b/lib/Target/ARM/ARMInstrThumb.td
index e171f8b092..bad7740c72 100644
--- a/lib/Target/ARM/ARMInstrThumb.td
+++ b/lib/Target/ARM/ARMInstrThumb.td
@@ -681,6 +681,41 @@ def tSTRspi : T1pIs<(outs), (ins tGPR:$Rt, t_addrmode_sp:$addr), IIC_iStore_i,
let Inst{7-0} = addr;
}
+// Atomic loads. These pseudos expand to the loads above, but the have mayStore
+// = 1 so they can't be reordered.
+let mayLoad = 1, mayStore = 1, hasSideEffects = 0 in {
+let AM = AddrModeT1_1 in {
+def ATOMIC_tLDRBi : tPseudoExpand<(outs tGPR:$Rt),
+ (ins t_addrmode_is1:$addr, pred:$p),
+ 2, IIC_iLoad_bh_i, [],
+ (tLDRBi tGPR:$Rt, t_addrmode_is1:$addr, pred:$p)>;
+def ATOMIC_tLDRBr : tPseudoExpand<(outs tGPR:$Rt),
+ (ins t_addrmode_rrs1:$addr, pred:$p),
+ 2, IIC_iLoad_bh_r, [],
+ (tLDRBr tGPR:$Rt, t_addrmode_rrs1:$addr, pred:$p)>;
+}
+let AM = AddrModeT1_2 in {
+def ATOMIC_tLDRHi : tPseudoExpand<(outs tGPR:$Rt),
+ (ins t_addrmode_is2:$addr, pred:$p),
+ 2, IIC_iLoad_bh_i, [],
+ (tLDRHi tGPR:$Rt, t_addrmode_is2:$addr, pred:$p)>;
+def ATOMIC_tLDRHr : tPseudoExpand<(outs tGPR:$Rt),
+ (ins t_addrmode_rrs2:$addr, pred:$p),
+ 2, IIC_iLoad_bh_r, [],
+ (tLDRHr tGPR:$Rt, t_addrmode_rrs2:$addr, pred:$p)>;
+}
+let AM = AddrModeT1_4 in {
+def ATOMIC_tLDRi : tPseudoExpand<(outs tGPR:$Rt),
+ (ins t_addrmode_is4:$addr, pred:$p),
+ 2, IIC_iLoad_i, [],
+ (tLDRi tGPR:$Rt, t_addrmode_is4:$addr, pred:$p)>;
+def ATOMIC_tLDRr : tPseudoExpand<(outs tGPR:$Rt),
+ (ins t_addrmode_rrs4:$addr, pred:$p),
+ 2, IIC_iLoad_r, [],
+ (tLDRr tGPR:$Rt, t_addrmode_rrs4:$addr, pred:$p)>;
+}
+}
+
//===----------------------------------------------------------------------===//
// Load / store multiple Instructions.
//
@@ -1334,17 +1369,17 @@ def : T1Pat<(sextloadi16 t_addrmode_is2:$addr),
(tASRri (tLSLri (tLDRHi t_addrmode_is2:$addr), 16), 16)>;
def : T1Pat<(atomic_load_8 t_addrmode_is1:$src),
- (tLDRBi t_addrmode_is1:$src)>;
+ (ATOMIC_tLDRBi t_addrmode_is1:$src)>;
def : T1Pat<(atomic_load_8 t_addrmode_rrs1:$src),
- (tLDRBr t_addrmode_rrs1:$src)>;
+ (ATOMIC_tLDRBr t_addrmode_rrs1:$src)>;
def : T1Pat<(atomic_load_16 t_addrmode_is2:$src),
- (tLDRHi t_addrmode_is2:$src)>;
+ (ATOMIC_tLDRHi t_addrmode_is2:$src)>;
def : T1Pat<(atomic_load_16 t_addrmode_rrs2:$src),
- (tLDRHr t_addrmode_rrs2:$src)>;
+ (ATOMIC_tLDRHr t_addrmode_rrs2:$src)>;
def : T1Pat<(atomic_load_32 t_addrmode_is4:$src),
- (tLDRi t_addrmode_is4:$src)>;
+ (ATOMIC_tLDRi t_addrmode_is4:$src)>;
def : T1Pat<(atomic_load_32 t_addrmode_rrs4:$src),
- (tLDRr t_addrmode_rrs4:$src)>;
+ (ATOMIC_tLDRr t_addrmode_rrs4:$src)>;
def : T1Pat<(atomic_store_8 t_addrmode_is1:$ptr, tGPR:$val),
(tSTRBi tGPR:$val, t_addrmode_is1:$ptr)>;
def : T1Pat<(atomic_store_8 t_addrmode_rrs1:$ptr, tGPR:$val),
diff --git a/lib/Target/ARM/ARMInstrThumb2.td b/lib/Target/ARM/ARMInstrThumb2.td
index dbb8ffcb08..c039caa16b 100644
--- a/lib/Target/ARM/ARMInstrThumb2.td
+++ b/lib/Target/ARM/ARMInstrThumb2.td
@@ -1590,6 +1590,46 @@ defm t2PLD : T2Ipl<0, 0, "pld">, Requires<[IsThumb2]>;
defm t2PLDW : T2Ipl<1, 0, "pldw">, Requires<[IsThumb2,HasV7,HasMP]>;
defm t2PLI : T2Ipl<0, 1, "pli">, Requires<[IsThumb2,HasV7]>;
+// Pseudos for atomic loads. Setting mayStore prevents reordering.
+let mayLoad = 1, mayStore = 1, hasSideEffects = 0 in {
+def ATOMIC_t2LDRBi12 : t2PseudoExpand<(outs rGPR:$Rt),
+ (ins t2addrmode_imm12:$addr, pred:$p),
+ 4, IIC_iLoad_bh_i, [],
+ (t2LDRBi12 rGPR:$Rt, t2addrmode_imm12:$addr, pred:$p)>;
+def ATOMIC_t2LDRBi8 : t2PseudoExpand<(outs rGPR:$Rt),
+ (ins t2addrmode_negimm8:$addr, pred:$p),
+ 4, IIC_iLoad_bh_i, [],
+ (t2LDRBi8 rGPR:$Rt, t2addrmode_negimm8:$addr, pred:$p)>;
+def ATOMIC_t2LDRBs : t2PseudoExpand<(outs rGPR:$Rt),
+ (ins t2addrmode_so_reg:$addr, pred:$p),
+ 4, IIC_iLoad_bh_si, [],
+ (t2LDRBs rGPR:$Rt, t2addrmode_so_reg:$addr, pred:$p)>;
+def ATOMIC_t2LDRHi12 : t2PseudoExpand<(outs rGPR:$Rt),
+ (ins t2addrmode_imm12:$addr, pred:$p),
+ 4, IIC_iLoad_bh_i, [],
+ (t2LDRHi12 rGPR:$Rt, t2addrmode_imm12:$addr, pred:$p)>;
+def ATOMIC_t2LDRHi8 : t2PseudoExpand<(outs rGPR:$Rt),
+ (ins t2addrmode_negimm8:$addr, pred:$p),
+ 4, IIC_iLoad_bh_i, [],
+ (t2LDRHi8 rGPR:$Rt, t2addrmode_negimm8:$addr, pred:$p)>;
+def ATOMIC_t2LDRHs : t2PseudoExpand<(outs rGPR:$Rt),
+ (ins t2addrmode_so_reg:$addr, pred:$p),
+ 4, IIC_iLoad_bh_si, [],
+ (t2LDRHs rGPR:$Rt, t2addrmode_so_reg:$addr, pred:$p)>;
+def ATOMIC_t2LDRi12 : t2PseudoExpand<(outs GPR:$Rt),
+ (ins t2addrmode_imm12:$addr, pred:$p),
+ 4, IIC_iLoad_i, [],
+ (t2LDRi12 GPR:$Rt, t2addrmode_imm12:$addr, pred:$p)>;
+def ATOMIC_t2LDRi8 : t2PseudoExpand<(outs GPR:$Rt),
+ (ins t2addrmode_negimm8:$addr, pred:$p),
+ 4, IIC_iLoad_i, [],
+ (t2LDRi8 GPR:$Rt, t2addrmode_negimm8:$addr, pred:$p)>;
+def ATOMIC_t2LDRs : t2PseudoExpand<(outs GPR:$Rt),
+ (ins t2addrmode_so_reg:$addr, pred:$p),
+ 4, IIC_iLoad_si, [],
+ (t2LDRs GPR:$Rt, t2addrmode_so_reg:$addr, pred:$p)>;
+}
+
//===----------------------------------------------------------------------===//
// Load / store multiple Instructions.
//
@@ -3968,24 +4008,24 @@ def : T2Pat<(add rGPR:$Rn, (sext_inreg rGPR:$Rm, i16)),
Requires<[HasT2ExtractPack, IsThumb2]>;
// Atomic load/store patterns
-def : T2Pat<(atomic_load_8 t2addrmode_imm12:$addr),
- (t2LDRBi12 t2addrmode_imm12:$addr)>;
-def : T2Pat<(atomic_load_8 t2addrmode_negimm8:$addr),
- (t2LDRBi8 t2addrmode_negimm8:$addr)>;
+def : T2Pat<(atomic_load_8 t2addrmode_imm12:$addr),
+ (ATOMIC_t2LDRBi12 t2addrmode_imm12:$addr)>;
+def : T2Pat<(atomic_load_8 t2addrmode_negimm8:$addr),
+ (ATOMIC_t2LDRBi8 t2addrmode_negimm8:$addr)>;
def : T2Pat<(atomic_load_8 t2addrmode_so_reg:$addr),
- (t2LDRBs t2addrmode_so_reg:$addr)>;
-def : T2Pat<(atomic_load_16 t2addrmode_imm12:$addr),
- (t2LDRHi12 t2addrmode_imm12:$addr)>;
-def : T2Pat<(atomic_load_16 t2addrmode_negimm8:$addr),
- (t2LDRHi8 t2addrmode_negimm8:$addr)>;
+ (ATOMIC_t2LDRBs t2addrmode_so_reg:$addr)>;
+def : T2Pat<(atomic_load_16 t2addrmode_imm12:$addr),
+ (ATOMIC_t2LDRHi12 t2addrmode_imm12:$addr)>;
+def : T2Pat<(atomic_load_16 t2addrmode_negimm8:$addr),
+ (ATOMIC_t2LDRHi8 t2addrmode_negimm8:$addr)>;
def : T2Pat<(atomic_load_16 t2addrmode_so_reg:$addr),
- (t2LDRHs t2addrmode_so_reg:$addr)>;
-def : T2Pat<(atomic_load_32 t2addrmode_imm12:$addr),
- (t2LDRi12 t2addrmode_imm12:$addr)>;
+ (ATOMIC_t2LDRHs t2addrmode_so_reg:$addr)>;
+def : T2Pat<(atomic_load_32 t2addrmode_imm12:$addr),
+ (ATOMIC_t2LDRi12 t2addrmode_imm12:$addr)>;
def : T2Pat<(atomic_load_32 t2addrmode_negimm8:$addr),
- (t2LDRi8 t2addrmode_negimm8:$addr)>;
+ (ATOMIC_t2LDRi8 t2addrmode_negimm8:$addr)>;
def : T2Pat<(atomic_load_32 t2addrmode_so_reg:$addr),
- (t2LDRs t2addrmode_so_reg:$addr)>;
+ (ATOMIC_t2LDRs t2addrmode_so_reg:$addr)>;
def : T2Pat<(atomic_store_8 t2addrmode_imm12:$addr, GPR:$val),
(t2STRBi12 GPR:$val, t2addrmode_imm12:$addr)>;
def : T2Pat<(atomic_store_8 t2addrmode_negimm8:$addr, GPR:$val),
diff --git a/lib/Target/ARM/Thumb2SizeReduction.cpp b/lib/Target/ARM/Thumb2SizeReduction.cpp
index f18f491f49..796927cac5 100644
--- a/lib/Target/ARM/Thumb2SizeReduction.cpp
+++ b/lib/Target/ARM/Thumb2SizeReduction.cpp
@@ -114,6 +114,22 @@ namespace {
{ ARM::t2LDRHs, ARM::tLDRHr, 0, 0, 0, 1, 0, 0,0, 0,1 },
{ ARM::t2LDRSBs,ARM::tLDRSB, 0, 0, 0, 1, 0, 0,0, 0,1 },
{ ARM::t2LDRSHs,ARM::tLDRSH, 0, 0, 0, 1, 0, 0,0, 0,1 },
+
+ // At this point it is safe to translate acquire loads to normal loads.
+ // There is no risk of reordering loads.
+ { ARM::ATOMIC_t2LDRi12,
+ ARM::tLDRi, ARM::tLDRspi, 5, 8, 1, 0, 0,0, 0,1 },
+ { ARM::ATOMIC_t2LDRs,
+ ARM::tLDRr, 0, 0, 0, 1, 0, 0,0, 0,1 },
+ { ARM::ATOMIC_t2LDRBi12,
+ ARM::tLDRBi, 0, 5, 0, 1, 0, 0,0, 0,1 },
+ { ARM::ATOMIC_t2LDRBs,
+ ARM::tLDRBr, 0, 0, 0, 1, 0, 0,0, 0,1 },
+ { ARM::ATOMIC_t2LDRHi12,
+ ARM::tLDRHi, 0, 5, 0, 1, 0, 0,0, 0,1 },
+ { ARM::ATOMIC_t2LDRHs,
+ ARM::tLDRHr, 0, 0, 0, 1, 0, 0,0, 0,1 },
+
{ ARM::t2STRi12,ARM::tSTRi, ARM::tSTRspi, 5, 8, 1, 0, 0,0, 0,1 },
{ ARM::t2STRs, ARM::tSTRr, 0, 0, 0, 1, 0, 0,0, 0,1 },
{ ARM::t2STRBi12,ARM::tSTRBi, 0, 5, 0, 1, 0, 0,0, 0,1 },
@@ -341,7 +357,7 @@ Thumb2SizeReduce::ReduceLoadStore(MachineBasicBlock &MBB, MachineInstr *MI,
switch (Entry.WideOpc) {
default:
llvm_unreachable("Unexpected Thumb2 load / store opcode!");
- case ARM::t2LDRi12:
+ case ARM::t2LDRi12: case ARM::ATOMIC_t2LDRi12:
case ARM::t2STRi12:
if (MI->getOperand(1).getReg() == ARM::SP) {
Opc = Entry.NarrowOpc2;
@@ -353,7 +369,7 @@ Thumb2SizeReduce::ReduceLoadStore(MachineBasicBlock &MBB, MachineInstr *MI,
HasImmOffset = true;
HasOffReg = false;
break;
- case ARM::t2LDRBi12:
+ case ARM::t2LDRBi12: case ARM::ATOMIC_t2LDRBi12:
case ARM::t2STRBi12:
HasImmOffset = true;
HasOffReg = false;
@@ -364,9 +380,9 @@ Thumb2SizeReduce::ReduceLoadStore(MachineBasicBlock &MBB, MachineInstr *MI,
HasImmOffset = true;
HasOffReg = false;
break;
- case ARM::t2LDRs:
- case ARM::t2LDRBs:
- case ARM::t2LDRHs:
+ case ARM::t2LDRs: case ARM::ATOMIC_t2LDRs:
+ case ARM::t2LDRBs: case ARM::ATOMIC_t2LDRBs:
+ case ARM::t2LDRHs: case ARM::ATOMIC_t2LDRHs:
case ARM::t2LDRSBs:
case ARM::t2LDRSHs:
case ARM::t2STRs: