summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorChad Rosier <mcrosier@apple.com>2011-11-14 04:09:28 +0000
committerChad Rosier <mcrosier@apple.com>2011-11-14 04:09:28 +0000
commitdc9205d9c29171f1ddcf2de7eb172a583cadbe63 (patch)
tree17d30e2043fd9be771f4691cbc78ec2838d55d18
parent194eb71a11a77c7fb576780783a77e64924dfb10 (diff)
downloadllvm-dc9205d9c29171f1ddcf2de7eb172a583cadbe63.tar.gz
llvm-dc9205d9c29171f1ddcf2de7eb172a583cadbe63.tar.bz2
llvm-dc9205d9c29171f1ddcf2de7eb172a583cadbe63.tar.xz
Add support for ARM halfword load/stores and signed byte loads with negative
offsets. rdar://10412592 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@144518 91177308-0d34-0410-b5e6-96231b3b80d8
-rw-r--r--lib/Target/ARM/ARMFastISel.cpp23
-rw-r--r--test/CodeGen/ARM/fast-isel-ldrh-strh-arm.ll55
2 files changed, 48 insertions, 30 deletions
diff --git a/lib/Target/ARM/ARMFastISel.cpp b/lib/Target/ARM/ARMFastISel.cpp
index d330367125..432abb500c 100644
--- a/lib/Target/ARM/ARMFastISel.cpp
+++ b/lib/Target/ARM/ARMFastISel.cpp
@@ -875,8 +875,7 @@ void ARMFastISel::ARMSimplifyAddress(Address &Addr, EVT VT, bool useAM3) {
needsLowering = ((Addr.Offset & 0xfff) != Addr.Offset);
else
// ARM halfword load/stores and signed byte loads use +/-imm8 offsets.
- // FIXME: Negative offsets require special handling.
- needsLowering = (Addr.Offset > 255 || Addr.Offset < 0);
+ needsLowering = (Addr.Offset > 255 || Addr.Offset < -255);
break;
case MVT::f32:
case MVT::f64:
@@ -933,18 +932,26 @@ void ARMFastISel::AddLoadStoreOperands(EVT VT, Address &Addr,
MIB.addFrameIndex(FI);
// ARM halfword load/stores and signed byte loads need an additional operand.
- if (useAM3) MIB.addReg(0);
-
- MIB.addImm(Addr.Offset);
+ if (useAM3) {
+ signed Imm = (Addr.Offset < 0) ? (0x100 | -Addr.Offset) : Addr.Offset;
+ MIB.addReg(0);
+ MIB.addImm(Imm);
+ } else {
+ MIB.addImm(Addr.Offset);
+ }
MIB.addMemOperand(MMO);
} else {
// Now add the rest of the operands.
MIB.addReg(Addr.Base.Reg);
// ARM halfword load/stores and signed byte loads need an additional operand.
- if (useAM3) MIB.addReg(0);
-
- MIB.addImm(Addr.Offset);
+ if (useAM3) {
+ signed Imm = (Addr.Offset < 0) ? (0x100 | -Addr.Offset) : Addr.Offset;
+ MIB.addReg(0);
+ MIB.addImm(Imm);
+ } else {
+ MIB.addImm(Addr.Offset);
+ }
}
AddOptionalDefs(MIB);
}
diff --git a/test/CodeGen/ARM/fast-isel-ldrh-strh-arm.ll b/test/CodeGen/ARM/fast-isel-ldrh-strh-arm.ll
index 62c6e0ce4d..dcfc9d0ea5 100644
--- a/test/CodeGen/ARM/fast-isel-ldrh-strh-arm.ll
+++ b/test/CodeGen/ARM/fast-isel-ldrh-strh-arm.ll
@@ -1,48 +1,33 @@
; RUN: llc < %s -O0 -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=armv7-apple-darwin | FileCheck %s --check-prefix=ARM
; rdar://10418009
-; TODO: We currently don't support ldrh/strh for negative offsets. Likely a
-; rare case, but possibly worth pursuing. Comments above the test case show
-; what could be selected.
-
-; ldrh r0, [r0, #-16]
define zeroext i16 @t1(i16* nocapture %a) nounwind uwtable readonly ssp {
entry:
; ARM: t1
%add.ptr = getelementptr inbounds i16* %a, i64 -8
%0 = load i16* %add.ptr, align 2
-; ARM: mvn r{{[1-9]}}, #15
-; ARM: add r0, r0, r{{[1-9]}}
-; ARM: ldrh r0, [r0]
+; ARM: ldrh r0, [r0, #-16]
ret i16 %0
}
-; ldrh r0, [r0, #-32]
define zeroext i16 @t2(i16* nocapture %a) nounwind uwtable readonly ssp {
entry:
; ARM: t2
%add.ptr = getelementptr inbounds i16* %a, i64 -16
%0 = load i16* %add.ptr, align 2
-; ARM: mvn r{{[1-9]}}, #31
-; ARM: add r0, r0, r{{[1-9]}}
-; ARM: ldrh r0, [r0]
+; ARM: ldrh r0, [r0, #-32]
ret i16 %0
}
-; ldrh r0, [r0, #-254]
define zeroext i16 @t3(i16* nocapture %a) nounwind uwtable readonly ssp {
entry:
; ARM: t3
%add.ptr = getelementptr inbounds i16* %a, i64 -127
%0 = load i16* %add.ptr, align 2
-; ARM: mvn r{{[1-9]}}, #253
-; ARM: add r0, r0, r{{[1-9]}}
-; ARM: ldrh r0, [r0]
+; ARM: ldrh r0, [r0, #-254]
ret i16 %0
}
-; mvn r1, #255
-; ldrh r0, [r0, r1]
define zeroext i16 @t4(i16* nocapture %a) nounwind uwtable readonly ssp {
entry:
; ARM: t4
@@ -91,15 +76,12 @@ entry:
ret i16 %0
}
-; strh r1, [r0, #-16]
define void @t9(i16* nocapture %a) nounwind uwtable ssp {
entry:
; ARM: t9
%add.ptr = getelementptr inbounds i16* %a, i64 -8
store i16 0, i16* %add.ptr, align 2
-; ARM: mvn r{{[1-9]}}, #15
-; ARM: add r0, r0, r{{[1-9]}}
-; ARM: strh r{{[1-9]}}, [r0]
+; ARM: strh r1, [r0, #-16]
ret void
}
@@ -136,3 +118,32 @@ entry:
; ARM: strh r{{[1-9]}}, [r0]
ret void
}
+
+define signext i8 @t13(i8* nocapture %a) nounwind uwtable readonly ssp {
+entry:
+; ARM: t13
+ %add.ptr = getelementptr inbounds i8* %a, i64 -8
+ %0 = load i8* %add.ptr, align 2
+; ARM: ldrsb r0, [r0, #-8]
+ ret i8 %0
+}
+
+define signext i8 @t14(i8* nocapture %a) nounwind uwtable readonly ssp {
+entry:
+; ARM: t14
+ %add.ptr = getelementptr inbounds i8* %a, i64 -255
+ %0 = load i8* %add.ptr, align 2
+; ARM: ldrsb r0, [r0, #-255]
+ ret i8 %0
+}
+
+define signext i8 @t15(i8* nocapture %a) nounwind uwtable readonly ssp {
+entry:
+; ARM: t15
+ %add.ptr = getelementptr inbounds i8* %a, i64 -256
+ %0 = load i8* %add.ptr, align 2
+; ARM: mvn r{{[1-9]}}, #255
+; ARM: add r0, r0, r{{[1-9]}}
+; ARM: ldrsb r0, [r0]
+ ret i8 %0
+}