summaryrefslogtreecommitdiff
path: root/test/CodeGen
diff options
context:
space:
mode:
authorJF Bastien <jfb@google.com>2013-06-07 20:10:37 +0000
committerJF Bastien <jfb@google.com>2013-06-07 20:10:37 +0000
commit8fc760cbe8d42e788f29b4a21537bc5e25d5ffa3 (patch)
treedcecbe4ed5eb38c277f470c5ecbdbad3ff116f2b /test/CodeGen
parent1983a4cbf112c4f97fb332a4068aa42a9284cad1 (diff)
downloadllvm-8fc760cbe8d42e788f29b4a21537bc5e25d5ffa3.tar.gz
llvm-8fc760cbe8d42e788f29b4a21537bc5e25d5ffa3.tar.bz2
llvm-8fc760cbe8d42e788f29b4a21537bc5e25d5ffa3.tar.xz
ARM FastISel integer sext/zext improvements
My recent ARM FastISel patch exposed this bug: http://llvm.org/bugs/show_bug.cgi?id=16178 The root cause is that it can't select integer sext/zext pre-ARMv6 and asserts out. The current integer sext/zext code doesn't handle other cases gracefully either, so this patch makes it handle all sext and zext from i1/i8/i16 to i8/i16/i32, with and without ARMv6, both in Thumb and ARM mode. This should fix the bug as well as make FastISel faster because it bails to SelectionDAG less often. See fastisel-ext.patch for this. fastisel-ext-tests.patch changes current tests to always use reg-imm AND for 8-bit zext instead of UXTB. This simplifies code since it is supported on ARMv4t and later, and at least on A15 both should perform exactly the same (both have exec 1 uop 1, type I). 2013-05-31-char-shift-crash.ll is a bitcode version of the above bug 16178 repro. fast-isel-ext.ll tests all sext/zext combinations that ARM FastISel should now handle. Note that my ARM FastISel enabling patch was reverted due to a separate failure when dealing with MCJIT, I'll fix this second failure and then turn FastISel on again for non-iOS ARM targets. I've tested "make check-all" on my x86 box, and "lnt test-suite" on A15 hardware. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@183551 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test/CodeGen')
-rw-r--r--test/CodeGen/ARM/2013-05-31-char-shift-crash.ll21
-rw-r--r--test/CodeGen/ARM/fast-isel-call.ll28
-rw-r--r--test/CodeGen/ARM/fast-isel-conversion.ll8
-rw-r--r--test/CodeGen/ARM/fast-isel-ext.ll134
-rw-r--r--test/CodeGen/ARM/fast-isel-fold.ll4
-rw-r--r--test/CodeGen/ARM/fast-isel-icmp.ll8
-rw-r--r--test/CodeGen/ARM/fast-isel-intrinsic.ll4
-rw-r--r--test/CodeGen/ARM/fast-isel-ret.ll2
-rw-r--r--test/CodeGen/ARM/fast-isel.ll8
9 files changed, 188 insertions, 29 deletions
diff --git a/test/CodeGen/ARM/2013-05-31-char-shift-crash.ll b/test/CodeGen/ARM/2013-05-31-char-shift-crash.ll
new file mode 100644
index 0000000000..0130f7ab68
--- /dev/null
+++ b/test/CodeGen/ARM/2013-05-31-char-shift-crash.ll
@@ -0,0 +1,21 @@
+; RUN: llc < %s -O0 -mtriple=armv4t--linux-eabi-android
+; RUN: llc < %s -O0 -mtriple=armv4t-unknown-linux
+; RUN: llc < %s -O0 -mtriple=armv5-unknown-linux
+
+; See http://llvm.org/bugs/show_bug.cgi?id=16178
+; ARMFastISel used to fail emitting sext/zext in pre-ARMv6.
+
+; Function Attrs: nounwind
+define arm_aapcscc void @f2(i8 signext %a) #0 {
+entry:
+ %a.addr = alloca i8, align 1
+ store i8 %a, i8* %a.addr, align 1
+ %0 = load i8* %a.addr, align 1
+ %conv = sext i8 %0 to i32
+ %shr = ashr i32 %conv, 56
+ %conv1 = trunc i32 %shr to i8
+ call arm_aapcscc void @f1(i8 signext %conv1)
+ ret void
+}
+
+declare arm_aapcscc void @f1(i8 signext) #1
diff --git a/test/CodeGen/ARM/fast-isel-call.ll b/test/CodeGen/ARM/fast-isel-call.ll
index 6ee2c349ab..f4a5d99c15 100644
--- a/test/CodeGen/ARM/fast-isel-call.ll
+++ b/test/CodeGen/ARM/fast-isel-call.ll
@@ -48,9 +48,9 @@ define void @foo(i8 %a, i16 %b) nounwind {
; THUMB: sxtb r2, r1
; THUMB: mov r0, r2
%2 = call i32 @t1(i8 signext %a)
-; ARM: uxtb r2, r1
+; ARM: and r2, r1, #255
; ARM: mov r0, r2
-; THUMB: uxtb r2, r1
+; THUMB: and r2, r1, #255
; THUMB: mov r0, r2
%3 = call i32 @t2(i8 zeroext %a)
; ARM: sxth r2, r1
@@ -98,13 +98,13 @@ entry:
; ARM: movw [[R3:l?r[0-9]*]], #28
; ARM: movw [[R4:l?r[0-9]*]], #40
; ARM: movw [[R5:l?r[0-9]*]], #186
-; ARM: uxtb [[R0]], [[R0]]
-; ARM: uxtb [[R1]], [[R1]]
-; ARM: uxtb [[R2]], [[R2]]
-; ARM: uxtb [[R3]], [[R3]]
-; ARM: uxtb [[R4]], [[R4]]
+; ARM: and [[R0]], [[R0]], #255
+; ARM: and [[R1]], [[R1]], #255
+; ARM: and [[R2]], [[R2]], #255
+; ARM: and [[R3]], [[R3]], #255
+; ARM: and [[R4]], [[R4]], #255
; ARM: str [[R4]], [sp]
-; ARM: uxtb [[R4]], [[R5]]
+; ARM: and [[R4]], [[R5]], #255
; ARM: str [[R4]], [sp, #4]
; ARM: bl {{_?}}bar
; ARM-LONG: @t10
@@ -125,13 +125,13 @@ entry:
; THUMB: movt [[R4]], #0
; THUMB: movw [[R5:l?r[0-9]*]], #186
; THUMB: movt [[R5]], #0
-; THUMB: uxtb [[R0]], [[R0]]
-; THUMB: uxtb [[R1]], [[R1]]
-; THUMB: uxtb [[R2]], [[R2]]
-; THUMB: uxtb [[R3]], [[R3]]
-; THUMB: uxtb.w [[R4]], [[R4]]
+; THUMB: and [[R0]], [[R0]], #255
+; THUMB: and [[R1]], [[R1]], #255
+; THUMB: and [[R2]], [[R2]], #255
+; THUMB: and [[R3]], [[R3]], #255
+; THUMB: and [[R4]], [[R4]], #255
; THUMB: str.w [[R4]], [sp]
-; THUMB: uxtb.w [[R4]], [[R5]]
+; THUMB: and [[R4]], [[R5]], #255
; THUMB: str.w [[R4]], [sp, #4]
; THUMB: bl {{_?}}bar
; THUMB-LONG: @t10
diff --git a/test/CodeGen/ARM/fast-isel-conversion.ll b/test/CodeGen/ARM/fast-isel-conversion.ll
index 686ccad029..94654f39d1 100644
--- a/test/CodeGen/ARM/fast-isel-conversion.ll
+++ b/test/CodeGen/ARM/fast-isel-conversion.ll
@@ -130,11 +130,11 @@ entry:
define void @uitofp_single_i8(i8 %a) nounwind ssp {
entry:
; ARM: uitofp_single_i8
-; ARM: uxtb r0, r0
+; ARM: and r0, r0, #255
; ARM: vmov s0, r0
; ARM: vcvt.f32.u32 s0, s0
; THUMB: uitofp_single_i8
-; THUMB: uxtb r0, r0
+; THUMB: and r0, r0, #255
; THUMB: vmov s0, r0
; THUMB: vcvt.f32.u32 s0, s0
%b.addr = alloca float, align 4
@@ -176,11 +176,11 @@ entry:
define void @uitofp_double_i8(i8 %a, double %b) nounwind ssp {
entry:
; ARM: uitofp_double_i8
-; ARM: uxtb r0, r0
+; ARM: and r0, r0, #255
; ARM: vmov s0, r0
; ARM: vcvt.f64.u32 d16, s0
; THUMB: uitofp_double_i8
-; THUMB: uxtb r0, r0
+; THUMB: and r0, r0, #255
; THUMB: vmov s0, r0
; THUMB: vcvt.f64.u32 d16, s0
%b.addr = alloca double, align 8
diff --git a/test/CodeGen/ARM/fast-isel-ext.ll b/test/CodeGen/ARM/fast-isel-ext.ll
new file mode 100644
index 0000000000..cb6e9ba1a1
--- /dev/null
+++ b/test/CodeGen/ARM/fast-isel-ext.ll
@@ -0,0 +1,134 @@
+; RUN: llc < %s -O0 -fast-isel-abort -mtriple=armv7-apple-ios | FileCheck %s --check-prefix=v7
+; RUN: llc < %s -O0 -fast-isel-abort -mtriple=armv4t-apple-ios | FileCheck %s --check-prefix=prev6
+; RUN: llc < %s -O0 -fast-isel-abort -mtriple=armv5-apple-ios | FileCheck %s --check-prefix=prev6
+; RUN: llc < %s -O0 -fast-isel-abort -mtriple=thumbv7-apple-ios | FileCheck %s --check-prefix=v7
+
+; Can't test pre-ARMv6 Thumb because ARM FastISel currently only supports
+; Thumb2. The ARMFastISel::ARMEmitIntExt code should work for Thumb by always
+; using two shifts.
+
+; Note that lsl, asr and lsr in Thumb are all encoded as 16-bit instructions
+; and therefore must set flags. {{s?}} below denotes this, instead of
+; duplicating tests.
+
+; zext
+
+define i8 @zext_1_8(i1 %a) nounwind ssp {
+; v7: zext_1_8:
+; v7: and r0, r0, #1
+; prev6: zext_1_8:
+; prev6: and r0, r0, #1
+ %r = zext i1 %a to i8
+ ret i8 %r
+}
+
+define i16 @zext_1_16(i1 %a) nounwind ssp {
+; v7: zext_1_16:
+; v7: and r0, r0, #1
+; prev6: zext_1_16:
+; prev6: and r0, r0, #1
+ %r = zext i1 %a to i16
+ ret i16 %r
+}
+
+define i32 @zext_1_32(i1 %a) nounwind ssp {
+; v7: zext_1_32:
+; v7: and r0, r0, #1
+; prev6: zext_1_32:
+; prev6: and r0, r0, #1
+ %r = zext i1 %a to i32
+ ret i32 %r
+}
+
+define i16 @zext_8_16(i8 %a) nounwind ssp {
+; v7: zext_8_16:
+; v7: and r0, r0, #255
+; prev6: zext_8_16:
+; prev6: and r0, r0, #255
+ %r = zext i8 %a to i16
+ ret i16 %r
+}
+
+define i32 @zext_8_32(i8 %a) nounwind ssp {
+; v7: zext_8_32:
+; v7: and r0, r0, #255
+; prev6: zext_8_32:
+; prev6: and r0, r0, #255
+ %r = zext i8 %a to i32
+ ret i32 %r
+}
+
+define i32 @zext_16_32(i16 %a) nounwind ssp {
+; v7: zext_16_32:
+; v7: uxth r0, r0
+; prev6: zext_16_32:
+; prev6: lsl{{s?}} r0, r0, #16
+; prev6: lsr{{s?}} r0, r0, #16
+ %r = zext i16 %a to i32
+ ret i32 %r
+}
+
+; sext
+
+define i8 @sext_1_8(i1 %a) nounwind ssp {
+; v7: sext_1_8:
+; v7: lsl{{s?}} r0, r0, #31
+; v7: asr{{s?}} r0, r0, #31
+; prev6: sext_1_8:
+; prev6: lsl{{s?}} r0, r0, #31
+; prev6: asr{{s?}} r0, r0, #31
+ %r = sext i1 %a to i8
+ ret i8 %r
+}
+
+define i16 @sext_1_16(i1 %a) nounwind ssp {
+; v7: sext_1_16:
+; v7: lsl{{s?}} r0, r0, #31
+; v7: asr{{s?}} r0, r0, #31
+; prev6: sext_1_16:
+; prev6: lsl{{s?}} r0, r0, #31
+; prev6: asr{{s?}} r0, r0, #31
+ %r = sext i1 %a to i16
+ ret i16 %r
+}
+
+define i32 @sext_1_32(i1 %a) nounwind ssp {
+; v7: sext_1_32:
+; v7: lsl{{s?}} r0, r0, #31
+; v7: asr{{s?}} r0, r0, #31
+; prev6: sext_1_32:
+; prev6: lsl{{s?}} r0, r0, #31
+; prev6: asr{{s?}} r0, r0, #31
+ %r = sext i1 %a to i32
+ ret i32 %r
+}
+
+define i16 @sext_8_16(i8 %a) nounwind ssp {
+; v7: sext_8_16:
+; v7: sxtb r0, r0
+; prev6: sext_8_16:
+; prev6: lsl{{s?}} r0, r0, #24
+; prev6: asr{{s?}} r0, r0, #24
+ %r = sext i8 %a to i16
+ ret i16 %r
+}
+
+define i32 @sext_8_32(i8 %a) nounwind ssp {
+; v7: sext_8_32:
+; v7: sxtb r0, r0
+; prev6: sext_8_32:
+; prev6: lsl{{s?}} r0, r0, #24
+; prev6: asr{{s?}} r0, r0, #24
+ %r = sext i8 %a to i32
+ ret i32 %r
+}
+
+define i32 @sext_16_32(i16 %a) nounwind ssp {
+; v7: sext_16_32:
+; v7: sxth r0, r0
+; prev6: sext_16_32:
+; prev6: lsl{{s?}} r0, r0, #16
+; prev6: asr{{s?}} r0, r0, #16
+ %r = sext i16 %a to i32
+ ret i32 %r
+}
diff --git a/test/CodeGen/ARM/fast-isel-fold.ll b/test/CodeGen/ARM/fast-isel-fold.ll
index 7a65295f01..c920f9b5e7 100644
--- a/test/CodeGen/ARM/fast-isel-fold.ll
+++ b/test/CodeGen/ARM/fast-isel-fold.ll
@@ -8,9 +8,11 @@ define void @t1() nounwind uwtable ssp {
; ARM: t1
; ARM: ldrb
; ARM-NOT: uxtb
+; ARM-NOT: and{{.*}}, #255
; THUMB: t1
; THUMB: ldrb
; THUMB-NOT: uxtb
+; THUMB-NOT: and{{.*}}, #255
%1 = load i8* @a, align 1
call void @foo1(i8 zeroext %1)
ret void
@@ -35,9 +37,11 @@ define i32 @t3() nounwind uwtable ssp {
; ARM: t3
; ARM: ldrb
; ARM-NOT: uxtb
+; ARM-NOT: and{{.*}}, #255
; THUMB: t3
; THUMB: ldrb
; THUMB-NOT: uxtb
+; THUMB-NOT: and{{.*}}, #255
%1 = load i8* @a, align 1
%2 = zext i8 %1 to i32
ret i32 %2
diff --git a/test/CodeGen/ARM/fast-isel-icmp.ll b/test/CodeGen/ARM/fast-isel-icmp.ll
index 8357ed5c54..9d4beb6eeb 100644
--- a/test/CodeGen/ARM/fast-isel-icmp.ll
+++ b/test/CodeGen/ARM/fast-isel-icmp.ll
@@ -49,12 +49,12 @@ entry:
define i32 @icmp_i8_unsigned(i8 %a, i8 %b) nounwind {
entry:
; ARM: icmp_i8_unsigned
-; ARM: uxtb r0, r0
-; ARM: uxtb r1, r1
+; ARM: and r0, r0, #255
+; ARM: and r1, r1, #255
; ARM: cmp r0, r1
; THUMB: icmp_i8_unsigned
-; THUMB: uxtb r0, r0
-; THUMB: uxtb r1, r1
+; THUMB: and r0, r0, #255
+; THUMB: and r1, r1, #255
; THUMB: cmp r0, r1
%cmp = icmp ugt i8 %a, %b
%conv2 = zext i1 %cmp to i32
diff --git a/test/CodeGen/ARM/fast-isel-intrinsic.ll b/test/CodeGen/ARM/fast-isel-intrinsic.ll
index bc9769a537..d2d208b3cf 100644
--- a/test/CodeGen/ARM/fast-isel-intrinsic.ll
+++ b/test/CodeGen/ARM/fast-isel-intrinsic.ll
@@ -17,7 +17,7 @@ define void @t1() nounwind ssp {
; ARM: add r0, r0, #5
; ARM: movw r1, #64
; ARM: movw r2, #10
-; ARM: uxtb r1, r1
+; ARM: and r1, r1, #255
; ARM: bl {{_?}}memset
; ARM-LONG: t1
; ARM-LONG: movw r3, :lower16:L_memset$non_lazy_ptr
@@ -32,7 +32,7 @@ define void @t1() nounwind ssp {
; THUMB: movt r1, #0
; THUMB: movs r2, #10
; THUMB: movt r2, #0
-; THUMB: uxtb r1, r1
+; THUMB: and r1, r1, #255
; THUMB: bl {{_?}}memset
; THUMB-LONG: t1
; THUMB-LONG: movw r3, :lower16:L_memset$non_lazy_ptr
diff --git a/test/CodeGen/ARM/fast-isel-ret.ll b/test/CodeGen/ARM/fast-isel-ret.ll
index 689b169ee3..4091dc6f6b 100644
--- a/test/CodeGen/ARM/fast-isel-ret.ll
+++ b/test/CodeGen/ARM/fast-isel-ret.ll
@@ -26,7 +26,7 @@ entry:
define zeroext i8 @ret3(i8 signext %a) nounwind uwtable ssp {
entry:
; CHECK: ret3
-; CHECK: uxtb r0, r0
+; CHECK: and r0, r0, #255
; CHECK: bx lr
ret i8 %a
}
diff --git a/test/CodeGen/ARM/fast-isel.ll b/test/CodeGen/ARM/fast-isel.ll
index c4274c5eb5..f877e78c6e 100644
--- a/test/CodeGen/ARM/fast-isel.ll
+++ b/test/CodeGen/ARM/fast-isel.ll
@@ -80,12 +80,12 @@ bb1:
; THUMB: and
; THUMB: strb
-; THUMB: uxtb
+; THUMB: and{{.*}}, #255
; THUMB: strh
; THUMB: uxth
; ARM: and
; ARM: strb
-; ARM: uxtb
+; ARM: and{{.*}}, #255
; ARM: strh
; ARM: uxth
@@ -121,13 +121,13 @@ bb3:
; THUMB: ldrb
; THUMB: ldrh
-; THUMB: uxtb
+; THUMB: and{{.*}}, #255
; THUMB: sxth
; THUMB: add
; THUMB: sub
; ARM: ldrb
; ARM: ldrh
-; ARM: uxtb
+; ARM: and{{.*}}, #255
; ARM: sxth
; ARM: add
; ARM: sub