summaryrefslogtreecommitdiff
path: root/test/CodeGen/AArch64
diff options
context:
space:
mode:
authorTim Northover <Tim.Northover@arm.com>2013-02-15 09:33:43 +0000
committerTim Northover <Tim.Northover@arm.com>2013-02-15 09:33:43 +0000
commit1e8839302b70d77de63844332bdee9ce7d06f2c9 (patch)
tree13c018c5dfc7095a77e603f5a5de9db46e28204a /test/CodeGen/AArch64
parent148ac534fc5592ed7031efde9a577890f078068b (diff)
downloadllvm-1e8839302b70d77de63844332bdee9ce7d06f2c9.tar.gz
llvm-1e8839302b70d77de63844332bdee9ce7d06f2c9.tar.bz2
llvm-1e8839302b70d77de63844332bdee9ce7d06f2c9.tar.xz
AArch64: remove ConstantIsland pass & put literals in separate section.
This implements the review suggestion to simplify the AArch64 backend. If we later discover that we *really* need the extra complexity of the ConstantIslands pass for performance reasons it can be resurrected. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@175258 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test/CodeGen/AArch64')
-rw-r--r--test/CodeGen/AArch64/adrp-relocation.ll22
-rw-r--r--test/CodeGen/AArch64/extern-weak.ll5
-rw-r--r--test/CodeGen/AArch64/fp-cond-sel.ll4
-rw-r--r--test/CodeGen/AArch64/fp128-folding.ll2
-rw-r--r--test/CodeGen/AArch64/fp128.ll10
-rw-r--r--test/CodeGen/AArch64/fpimm.ll4
-rw-r--r--test/CodeGen/AArch64/func-argpassing.ll2
-rw-r--r--test/CodeGen/AArch64/func-calls.ll2
-rw-r--r--test/CodeGen/AArch64/literal_pools.ll18
9 files changed, 38 insertions, 31 deletions
diff --git a/test/CodeGen/AArch64/adrp-relocation.ll b/test/CodeGen/AArch64/adrp-relocation.ll
index 3eeb53e329..c33b442624 100644
--- a/test/CodeGen/AArch64/adrp-relocation.ll
+++ b/test/CodeGen/AArch64/adrp-relocation.ll
@@ -1,16 +1,16 @@
; RUN: llc -mtriple=aarch64-none-linux-gnu -verify-machineinstrs -filetype=obj < %s | elf-dump | FileCheck %s
-define fp128 @testfn() nounwind {
+define i64 @testfn() nounwind {
entry:
- ret fp128 0xL00000000000000004004500000000000
+ ret i64 0
}
-define fp128 @foo() nounwind {
+define i64 @foo() nounwind {
entry:
- %bar = alloca fp128 ()*, align 8
- store fp128 ()* @testfn, fp128 ()** %bar, align 8
- %call = call fp128 @testfn()
- ret fp128 %call
+ %bar = alloca i64 ()*, align 8
+ store i64 ()* @testfn, i64 ()** %bar, align 8
+ %call = call i64 @testfn()
+ ret i64 %call
}
; The above should produce an ADRP/ADD pair to calculate the address of
@@ -22,14 +22,14 @@ entry:
; CHECK: .rela.text
; CHECK: # Relocation 0
-; CHECK-NEXT: (('r_offset', 0x0000000000000028)
-; CHECK-NEXT: ('r_sym', 0x00000009)
+; CHECK-NEXT: (('r_offset', 0x0000000000000010)
+; CHECK-NEXT: ('r_sym', 0x00000007)
; CHECK-NEXT: ('r_type', 0x00000113)
; CHECK-NEXT: ('r_addend', 0x0000000000000000)
; CHECK-NEXT: ),
; CHECK-NEXT: Relocation 1
-; CHECK-NEXT: (('r_offset', 0x000000000000002c)
-; CHECK-NEXT: ('r_sym', 0x00000009)
+; CHECK-NEXT: (('r_offset', 0x0000000000000014)
+; CHECK-NEXT: ('r_sym', 0x00000007)
; CHECK-NEXT: ('r_type', 0x00000115)
; CHECK-NEXT: ('r_addend', 0x0000000000000000)
; CHECK-NEXT: ),
diff --git a/test/CodeGen/AArch64/extern-weak.ll b/test/CodeGen/AArch64/extern-weak.ll
index 54baab2200..298977682b 100644
--- a/test/CodeGen/AArch64/extern-weak.ll
+++ b/test/CodeGen/AArch64/extern-weak.ll
@@ -6,8 +6,9 @@ define i32()* @foo() {
; The usual ADRP/ADD pair can't be used for a weak reference because it must
; evaluate to 0 if the symbol is undefined. We use a litpool entry.
ret i32()* @var
-; CHECK: ldr x0, .LCPI0_0
-
; CHECK: .LCPI0_0:
; CHECK-NEXT: .xword var
+
+; CHECK: ldr x0, [{{x[0-9]+}}, #:lo12:.LCPI0_0]
+
}
diff --git a/test/CodeGen/AArch64/fp-cond-sel.ll b/test/CodeGen/AArch64/fp-cond-sel.ll
index 0d5882b3f5..56e8f16f9b 100644
--- a/test/CodeGen/AArch64/fp-cond-sel.ll
+++ b/test/CodeGen/AArch64/fp-cond-sel.ll
@@ -9,15 +9,15 @@ define void @test_csel(i32 %lhs32, i32 %rhs32, i64 %lhs64) {
%tst1 = icmp ugt i32 %lhs32, %rhs32
%val1 = select i1 %tst1, float 0.0, float 1.0
store float %val1, float* @varfloat
+; CHECK: ldr [[FLT0:s[0-9]+]], [{{x[0-9]+}}, #:lo12:.LCPI
; CHECK: fmov [[FLT1:s[0-9]+]], #1.0
-; CHECK: ldr [[FLT0:s[0-9]+]], .LCPI
; CHECK: fcsel {{s[0-9]+}}, [[FLT0]], [[FLT1]], hi
%rhs64 = sext i32 %rhs32 to i64
%tst2 = icmp sle i64 %lhs64, %rhs64
%val2 = select i1 %tst2, double 1.0, double 0.0
store double %val2, double* @vardouble
-; CHECK: ldr [[FLT0:d[0-9]+]], .LCPI
+; CHECK: ldr [[FLT0:d[0-9]+]], [{{x[0-9]+}}, #:lo12:.LCPI
; CHECK: fmov [[FLT1:d[0-9]+]], #1.0
; CHECK: fcsel {{d[0-9]+}}, [[FLT1]], [[FLT0]], le
diff --git a/test/CodeGen/AArch64/fp128-folding.ll b/test/CodeGen/AArch64/fp128-folding.ll
index b2c3040b6d..b5bdcf4f37 100644
--- a/test/CodeGen/AArch64/fp128-folding.ll
+++ b/test/CodeGen/AArch64/fp128-folding.ll
@@ -12,6 +12,6 @@ define fp128 @test_folding() {
%fpval = sitofp i32 %val to fp128
; If the value is loaded from a constant pool into an fp128, it's been folded
; successfully.
-; CHECK: ldr {{q[0-9]+}}, .LCPI
+; CHECK: ldr {{q[0-9]+}}, [{{x[0-9]+}}, #:lo12:.LCPI
ret fp128 %fpval
} \ No newline at end of file
diff --git a/test/CodeGen/AArch64/fp128.ll b/test/CodeGen/AArch64/fp128.ll
index 8fd8a30597..258d34b8f8 100644
--- a/test/CodeGen/AArch64/fp128.ll
+++ b/test/CodeGen/AArch64/fp128.ll
@@ -261,6 +261,10 @@ define void @test_extend() {
}
define fp128 @test_neg(fp128 %in) {
+; CHECK: [[MINUS0:.LCPI[0-9]+_0]]:
+; Make sure the weird hex constant below *is* -0.0
+; CHECK-NEXT: fp128 -0
+
; CHECK: test_neg:
; Could in principle be optimized to fneg which we can't select, this makes
@@ -268,13 +272,9 @@ define fp128 @test_neg(fp128 %in) {
%ret = fsub fp128 0xL00000000000000008000000000000000, %in
; CHECK: str q0, [sp, #-16]
; CHECK-NEXT: ldr q1, [sp], #16
-; CHECK: ldr q0, [[MINUS0:.LCPI[0-9]+_0]]
+; CHECK: ldr q0, [{{x[0-9]+}}, #:lo12:[[MINUS0]]]
; CHECK: bl __subtf3
ret fp128 %ret
; CHECK: ret
-
-; CHECK: [[MINUS0]]:
-; Make sure the weird hex constant below *is* -0.0
-; CHECK-NEXT: fp128 -0
}
diff --git a/test/CodeGen/AArch64/fpimm.ll b/test/CodeGen/AArch64/fpimm.ll
index fad2151e51..fd28aeef92 100644
--- a/test/CodeGen/AArch64/fpimm.ll
+++ b/test/CodeGen/AArch64/fpimm.ll
@@ -13,7 +13,7 @@ define void @check_float() {
%newval2 = fadd float %val, 128.0
store volatile float %newval2, float* @varf32
-; CHECK: ldr {{s[0-9]+}}, .LCPI0_0
+; CHECK: ldr {{s[0-9]+}}, [{{x[0-9]+}}, #:lo12:.LCPI0_0
ret void
}
@@ -28,7 +28,7 @@ define void @check_double() {
%newval2 = fadd double %val, 128.0
store volatile double %newval2, double* @varf64
-; CHECK: ldr {{d[0-9]+}}, .LCPI1_0
+; CHECK: ldr {{d[0-9]+}}, [{{x[0-9]+}}, #:lo12:.LCPI1_0
ret void
}
diff --git a/test/CodeGen/AArch64/func-argpassing.ll b/test/CodeGen/AArch64/func-argpassing.ll
index 760c8d49d0..5675e5a1f9 100644
--- a/test/CodeGen/AArch64/func-argpassing.ll
+++ b/test/CodeGen/AArch64/func-argpassing.ll
@@ -83,7 +83,7 @@ define i32 @return_int() {
define double @return_double() {
; CHECK: return_double:
ret double 3.14
-; CHECK: ldr d0, .LCPI
+; CHECK: ldr d0, [{{x[0-9]+}}, #:lo12:.LCPI
}
; This is the kind of IR clang will produce for returning a struct
diff --git a/test/CodeGen/AArch64/func-calls.ll b/test/CodeGen/AArch64/func-calls.ll
index 8810d1ca56..abb09a5e53 100644
--- a/test/CodeGen/AArch64/func-calls.ll
+++ b/test/CodeGen/AArch64/func-calls.ll
@@ -90,7 +90,7 @@ define void @check_stack_args() {
call void @stacked_fpu(float -1.0, double 1.0, float 4.0, float 2.0,
float -2.0, float -8.0, float 16.0, float 1.0,
float 64.0)
-; CHECK: ldr s[[STACKEDREG:[0-9]+]], .LCPI
+; CHECK: ldr s[[STACKEDREG:[0-9]+]], [{{x[0-9]+}}, #:lo12:.LCPI
; CHECK: mov x0, sp
; CHECK: str d[[STACKEDREG]], [x0]
; CHECK bl stacked_fpu
diff --git a/test/CodeGen/AArch64/literal_pools.ll b/test/CodeGen/AArch64/literal_pools.ll
index a14dfc1919..e09084148f 100644
--- a/test/CodeGen/AArch64/literal_pools.ll
+++ b/test/CodeGen/AArch64/literal_pools.ll
@@ -10,19 +10,23 @@ define void @foo() {
%val32_lit32 = and i32 %val32, 123456785
store volatile i32 %val32_lit32, i32* @var32
-; CHECK: ldr {{w[0-9]+}}, .LCPI0
+; CHECK: adrp x[[LITBASE:[0-9]+]], [[CURLIT:.LCPI0_[0-9]+]]
+; CHECK: ldr {{w[0-9]+}}, [x[[LITBASE]], #:lo12:[[CURLIT]]]
%val64_lit32 = and i64 %val64, 305402420
store volatile i64 %val64_lit32, i64* @var64
-; CHECK: ldr {{w[0-9]+}}, .LCPI0
+; CHECK: adrp x[[LITBASE:[0-9]+]], [[CURLIT:.LCPI0_[0-9]+]]
+; CHECK: ldr {{w[0-9]+}}, [x[[LITBASE]], #:lo12:[[CURLIT]]]
%val64_lit32signed = and i64 %val64, -12345678
store volatile i64 %val64_lit32signed, i64* @var64
-; CHECK: ldrsw {{x[0-9]+}}, .LCPI0
+; CHECK: adrp x[[LITBASE:[0-9]+]], [[CURLIT:.LCPI0_[0-9]+]]
+; CHECK: ldrsw {{x[0-9]+}}, [x[[LITBASE]], #:lo12:[[CURLIT]]]
%val64_lit64 = and i64 %val64, 1234567898765432
store volatile i64 %val64_lit64, i64* @var64
-; CHECK: ldr {{x[0-9]+}}, .LCPI0
+; CHECK: adrp x[[LITBASE:[0-9]+]], [[CURLIT:.LCPI0_[0-9]+]]
+; CHECK: ldr {{x[0-9]+}}, [x[[LITBASE]], #:lo12:[[CURLIT]]]
ret void
}
@@ -35,13 +39,15 @@ define void @floating_lits() {
%floatval = load float* @varfloat
%newfloat = fadd float %floatval, 128.0
-; CHECK: ldr {{s[0-9]+}}, .LCPI1
+; CHECK: adrp x[[LITBASE:[0-9]+]], [[CURLIT:.LCPI1_[0-9]+]]
+; CHECK: ldr {{s[0-9]+}}, [x[[LITBASE]], #:lo12:[[CURLIT]]]
; CHECK: fadd
store float %newfloat, float* @varfloat
%doubleval = load double* @vardouble
%newdouble = fadd double %doubleval, 129.0
-; CHECK: ldr {{d[0-9]+}}, .LCPI1
+; CHECK: adrp x[[LITBASE:[0-9]+]], [[CURLIT:.LCPI1_[0-9]+]]
+; CHECK: ldr {{d[0-9]+}}, [x[[LITBASE]], #:lo12:[[CURLIT]]]
; CHECK: fadd
store double %newdouble, double* @vardouble