summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorChad Rosier <mcrosier@apple.com>2011-11-14 22:34:48 +0000
committerChad Rosier <mcrosier@apple.com>2011-11-14 22:34:48 +0000
commite489af8dce12249be26ac0c8e371557378886bc2 (patch)
treef37a03ac917deddae57256542e1e9deeee89872f
parent02e3d9268fe456ebe4fe6ae277507bb7933ec3df (diff)
downloadllvm-e489af8dce12249be26ac0c8e371557378886bc2.tar.gz
llvm-e489af8dce12249be26ac0c8e371557378886bc2.tar.bz2
llvm-e489af8dce12249be26ac0c8e371557378886bc2.tar.xz
Fix a performance regression from r144565. Positive offsets were being lowered
into registers, rather then encoded directly in the load/store. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@144576 91177308-0d34-0410-b5e6-96231b3b80d8
-rw-r--r--lib/Target/ARM/ARMFastISel.cpp6
-rw-r--r--test/CodeGen/ARM/fast-isel-ldr-str-arm.ll55
2 files changed, 58 insertions, 3 deletions
diff --git a/lib/Target/ARM/ARMFastISel.cpp b/lib/Target/ARM/ARMFastISel.cpp
index 81a93b1ed3..0b728a94fa 100644
--- a/lib/Target/ARM/ARMFastISel.cpp
+++ b/lib/Target/ARM/ARMFastISel.cpp
@@ -874,9 +874,9 @@ void ARMFastISel::ARMSimplifyAddress(Address &Addr, EVT VT, bool useAM3) {
// Integer loads/stores handle 12-bit offsets.
needsLowering = ((Addr.Offset & 0xfff) != Addr.Offset);
// Handle negative offsets.
- if (isThumb2)
- needsLowering = !(needsLowering && Subtarget->hasV6T2Ops() &&
- Addr.Offset < 0 && Addr.Offset > -256);
+ if (needsLowering && isThumb2)
+ needsLowering = !(Subtarget->hasV6T2Ops() && Addr.Offset < 0 &&
+ Addr.Offset > -256);
} else {
// ARM halfword load/stores and signed byte loads use +/-imm8 offsets.
needsLowering = (Addr.Offset > 255 || Addr.Offset < -255);
diff --git a/test/CodeGen/ARM/fast-isel-ldr-str-arm.ll b/test/CodeGen/ARM/fast-isel-ldr-str-arm.ll
new file mode 100644
index 0000000000..e9532d61bd
--- /dev/null
+++ b/test/CodeGen/ARM/fast-isel-ldr-str-arm.ll
@@ -0,0 +1,55 @@
+; RUN: llc < %s -O0 -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=thumbv7-apple-darwin | FileCheck %s --check-prefix=ARM
+
+define i32 @t1(i32* nocapture %ptr) nounwind readonly {
+entry:
+; ARM: t1
+ %add.ptr = getelementptr inbounds i32* %ptr, i32 1
+ %0 = load i32* %add.ptr, align 4
+; ARM: ldr r{{[0-9]}}, [r0, #4]
+ ret i32 %0
+}
+
+define i32 @t2(i32* nocapture %ptr) nounwind readonly {
+entry:
+; ARM: t2
+ %add.ptr = getelementptr inbounds i32* %ptr, i32 63
+ %0 = load i32* %add.ptr, align 4
+; ARM: ldr.w r{{[0-9]}}, [r0, #252]
+ ret i32 %0
+}
+
+define zeroext i16 @t3(i16* nocapture %ptr) nounwind readonly {
+entry:
+; ARM: t3
+ %add.ptr = getelementptr inbounds i16* %ptr, i16 1
+ %0 = load i16* %add.ptr, align 4
+; ARM: ldrh r{{[0-9]}}, [r0, #2]
+ ret i16 %0
+}
+
+define zeroext i16 @t4(i16* nocapture %ptr) nounwind readonly {
+entry:
+; ARM: t4
+ %add.ptr = getelementptr inbounds i16* %ptr, i16 63
+ %0 = load i16* %add.ptr, align 4
+; ARM: ldrh.w r{{[0-9]}}, [r0, #126]
+ ret i16 %0
+}
+
+define zeroext i8 @t5(i8* nocapture %ptr) nounwind readonly {
+entry:
+; ARM: t5
+ %add.ptr = getelementptr inbounds i8* %ptr, i8 1
+ %0 = load i8* %add.ptr, align 4
+; ARM: ldrb r{{[0-9]}}, [r0, #1]
+ ret i8 %0
+}
+
+define zeroext i8 @t6(i8* nocapture %ptr) nounwind readonly {
+entry:
+; ARM: t6
+ %add.ptr = getelementptr inbounds i8* %ptr, i8 63
+ %0 = load i8* %add.ptr, align 4
+; ARM: ldrb.w r{{[0-9]}}, [r0, #63]
+ ret i8 %0
+} \ No newline at end of file