summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorChad Rosier <mcrosier@apple.com>2012-11-09 17:29:38 +0000
committerChad Rosier <mcrosier@apple.com>2012-11-09 17:29:38 +0000
commitd054eda44114df411a2749e7b6b85d27509a0af1 (patch)
tree8744edd554eca7b5cdd98a3a285432ae748fea26
parentb754687fd7391213f455ffa52d1bcfbe11052bc0 (diff)
downloadllvm-d054eda44114df411a2749e7b6b85d27509a0af1.tar.gz
llvm-d054eda44114df411a2749e7b6b85d27509a0af1.tar.bz2
llvm-d054eda44114df411a2749e7b6b85d27509a0af1.tar.xz
Add support for -mstrict-align compiler option for ARM targets.
rdar://12340498 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@167620 91177308-0d34-0410-b5e6-96231b3b80d8
-rw-r--r--include/llvm/Target/TargetOptions.h12
-rw-r--r--lib/Target/ARM/ARMFastISel.cpp12
-rw-r--r--lib/Target/ARM/ARMISelLowering.cpp3
3 files changed, 18 insertions, 9 deletions
diff --git a/include/llvm/Target/TargetOptions.h b/include/llvm/Target/TargetOptions.h
index 68ca567836..75cfa8c59c 100644
--- a/include/llvm/Target/TargetOptions.h
+++ b/include/llvm/Target/TargetOptions.h
@@ -48,10 +48,10 @@ namespace llvm {
UseSoftFloat(false), NoZerosInBSS(false), JITExceptionHandling(false),
JITEmitDebugInfo(false), JITEmitDebugInfoToDisk(false),
GuaranteedTailCallOpt(false), DisableTailCalls(false),
- StackAlignmentOverride(0), RealignStack(true), EnableFastISel(false),
- PositionIndependentExecutable(false), EnableSegmentedStacks(false),
- UseInitArray(false), TrapFuncName(""), FloatABIType(FloatABI::Default),
- AllowFPOpFusion(FPOpFusion::Standard)
+ StackAlignmentOverride(0), RealignStack(true), StrictAlign(false),
+ EnableFastISel(false), PositionIndependentExecutable(false),
+ EnableSegmentedStacks(false), UseInitArray(false), TrapFuncName(""),
+ FloatABIType(FloatABI::Default), AllowFPOpFusion(FPOpFusion::Standard)
{}
/// PrintMachineCode - This flag is enabled when the -print-machineinstrs
@@ -155,6 +155,10 @@ namespace llvm {
/// automatically realigned, if needed.
unsigned RealignStack : 1;
+ /// StrictAlign - This flag indicates that all memory accesses must be
+ /// aligned. (ARM only)
+ unsigned StrictAlign : 1;
+
/// SSPBufferSize - The minimum size of buffers that will receive stack
/// smashing protection when -fstack-protection is used.
unsigned SSPBufferSize;
diff --git a/lib/Target/ARM/ARMFastISel.cpp b/lib/Target/ARM/ARMFastISel.cpp
index 6611862ca0..7527c8496d 100644
--- a/lib/Target/ARM/ARMFastISel.cpp
+++ b/lib/Target/ARM/ARMFastISel.cpp
@@ -1028,7 +1028,8 @@ bool ARMFastISel::ARMEmitLoad(EVT VT, unsigned &ResultReg, Address &Addr,
RC = &ARM::GPRRegClass;
break;
case MVT::i16:
- if (Alignment && Alignment < 2 && !Subtarget->allowsUnalignedMem())
+ if (Alignment && Alignment < 2 && (!Subtarget->allowsUnalignedMem() ||
+ TM.Options.StrictAlign))
return false;
if (isThumb2) {
@@ -1043,7 +1044,8 @@ bool ARMFastISel::ARMEmitLoad(EVT VT, unsigned &ResultReg, Address &Addr,
RC = &ARM::GPRRegClass;
break;
case MVT::i32:
- if (Alignment && Alignment < 4 && !Subtarget->allowsUnalignedMem())
+ if (Alignment && Alignment < 4 && (!Subtarget->allowsUnalignedMem() ||
+ TM.Options.StrictAlign))
return false;
if (isThumb2) {
@@ -1152,7 +1154,8 @@ bool ARMFastISel::ARMEmitStore(EVT VT, unsigned SrcReg, Address &Addr,
}
break;
case MVT::i16:
- if (Alignment && Alignment < 2 && !Subtarget->allowsUnalignedMem())
+ if (Alignment && Alignment < 2 && (!Subtarget->allowsUnalignedMem() ||
+ TM.Options.StrictAlign))
return false;
if (isThumb2) {
@@ -1166,7 +1169,8 @@ bool ARMFastISel::ARMEmitStore(EVT VT, unsigned SrcReg, Address &Addr,
}
break;
case MVT::i32:
- if (Alignment && Alignment < 4 && !Subtarget->allowsUnalignedMem())
+ if (Alignment && Alignment < 4 && (!Subtarget->allowsUnalignedMem() ||
+ TM.Options.StrictAlign))
return false;
if (isThumb2) {
diff --git a/lib/Target/ARM/ARMISelLowering.cpp b/lib/Target/ARM/ARMISelLowering.cpp
index 3b9558bc2a..65cc49e1c3 100644
--- a/lib/Target/ARM/ARMISelLowering.cpp
+++ b/lib/Target/ARM/ARMISelLowering.cpp
@@ -9119,7 +9119,8 @@ bool ARMTargetLowering::isDesirableToTransformToIntegerOp(unsigned Opc,
bool ARMTargetLowering::allowsUnalignedMemoryAccesses(EVT VT) const {
// The AllowsUnaliged flag models the SCTLR.A setting in ARM cpus
- bool AllowsUnaligned = Subtarget->allowsUnalignedMem();
+ bool AllowsUnaligned = Subtarget->allowsUnalignedMem() &&
+ !getTargetMachine().Options.StrictAlign;
switch (VT.getSimpleVT().SimpleTy) {
default: