summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Target/ARM/ARMFastISel.cpp12
-rw-r--r--lib/Target/ARM/ARMISelLowering.cpp3
2 files changed, 10 insertions, 5 deletions
diff --git a/lib/Target/ARM/ARMFastISel.cpp b/lib/Target/ARM/ARMFastISel.cpp
index 6611862ca0..7527c8496d 100644
--- a/lib/Target/ARM/ARMFastISel.cpp
+++ b/lib/Target/ARM/ARMFastISel.cpp
@@ -1028,7 +1028,8 @@ bool ARMFastISel::ARMEmitLoad(EVT VT, unsigned &ResultReg, Address &Addr,
RC = &ARM::GPRRegClass;
break;
case MVT::i16:
- if (Alignment && Alignment < 2 && !Subtarget->allowsUnalignedMem())
+ if (Alignment && Alignment < 2 && (!Subtarget->allowsUnalignedMem() ||
+ TM.Options.StrictAlign))
return false;
if (isThumb2) {
@@ -1043,7 +1044,8 @@ bool ARMFastISel::ARMEmitLoad(EVT VT, unsigned &ResultReg, Address &Addr,
RC = &ARM::GPRRegClass;
break;
case MVT::i32:
- if (Alignment && Alignment < 4 && !Subtarget->allowsUnalignedMem())
+ if (Alignment && Alignment < 4 && (!Subtarget->allowsUnalignedMem() ||
+ TM.Options.StrictAlign))
return false;
if (isThumb2) {
@@ -1152,7 +1154,8 @@ bool ARMFastISel::ARMEmitStore(EVT VT, unsigned SrcReg, Address &Addr,
}
break;
case MVT::i16:
- if (Alignment && Alignment < 2 && !Subtarget->allowsUnalignedMem())
+ if (Alignment && Alignment < 2 && (!Subtarget->allowsUnalignedMem() ||
+ TM.Options.StrictAlign))
return false;
if (isThumb2) {
@@ -1166,7 +1169,8 @@ bool ARMFastISel::ARMEmitStore(EVT VT, unsigned SrcReg, Address &Addr,
}
break;
case MVT::i32:
- if (Alignment && Alignment < 4 && !Subtarget->allowsUnalignedMem())
+ if (Alignment && Alignment < 4 && (!Subtarget->allowsUnalignedMem() ||
+ TM.Options.StrictAlign))
return false;
if (isThumb2) {
diff --git a/lib/Target/ARM/ARMISelLowering.cpp b/lib/Target/ARM/ARMISelLowering.cpp
index 3b9558bc2a..65cc49e1c3 100644
--- a/lib/Target/ARM/ARMISelLowering.cpp
+++ b/lib/Target/ARM/ARMISelLowering.cpp
@@ -9119,7 +9119,8 @@ bool ARMTargetLowering::isDesirableToTransformToIntegerOp(unsigned Opc,
bool ARMTargetLowering::allowsUnalignedMemoryAccesses(EVT VT) const {
// The AllowsUnaliged flag models the SCTLR.A setting in ARM cpus
- bool AllowsUnaligned = Subtarget->allowsUnalignedMem();
+ bool AllowsUnaligned = Subtarget->allowsUnalignedMem() &&
+ !getTargetMachine().Options.StrictAlign;
switch (VT.getSimpleVT().SimpleTy) {
default: