summaryrefslogtreecommitdiff
path: root/test
diff options
context:
space:
mode:
authorJames Molloy <james.molloy@arm.com>2011-10-19 14:11:07 +0000
committerJames Molloy <james.molloy@arm.com>2011-10-19 14:11:07 +0000
commitcdd8e46bec4e975d00a5abea808d8eb4138515c5 (patch)
tree5feaecc08b9dae81374c129cfe5d28552a998c25 /test
parent40230c4c06e0ad1afbef217229363cab077368b3 (diff)
downloadllvm-cdd8e46bec4e975d00a5abea808d8eb4138515c5.tar.gz
llvm-cdd8e46bec4e975d00a5abea808d8eb4138515c5.tar.bz2
llvm-cdd8e46bec4e975d00a5abea808d8eb4138515c5.tar.xz
Use literal pool loads instead of MOVW/MOVT for materializing global addresses when optimizing for size.
On spec/gcc, this caused a codesize improvement of ~1.9% for ARM mode and ~4.9% for Thumb(2) mode. This is codesize including literal pools. The pools themselves doubled in size for ARM mode and quintupled for Thumb mode, leaving suggestion that there is still perhaps redundancy in LLVM's use of constant pools that could be decreased by sharing entries. Fixes PR11087. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@142530 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test')
-rw-r--r--test/CodeGen/ARM/2011-10-18-DisableMovtSize.ll27
1 files changed, 27 insertions, 0 deletions
diff --git a/test/CodeGen/ARM/2011-10-18-DisableMovtSize.ll b/test/CodeGen/ARM/2011-10-18-DisableMovtSize.ll
new file mode 100644
index 0000000000..6dae75be91
--- /dev/null
+++ b/test/CodeGen/ARM/2011-10-18-DisableMovtSize.ll
@@ -0,0 +1,27 @@
+; RUN: llc < %s -mtriple=armv7-apple-darwin | FileCheck %s
+; RUN: llc < %s -mtriple=armv7-unknown-linux-eabi | FileCheck %s
+
+; Check that when optimizing for size, a literal pool load is used
+; instead of the (potentially faster) movw/movt pair when loading
+; a large constant.
+
+@x = global i32* inttoptr (i32 305419888 to i32*), align 4
+
+define i32 @f() optsize {
+ ; CHECK: f:
+ ; CHECK: ldr r{{.}}, {{.?}}LCPI{{.}}_{{.}}
+ ; CHECK: ldr r{{.}}, [{{(pc, )?}}r{{.}}]
+ ; CHECK: ldr r{{.}}, [r{{.}}]
+ %1 = load i32** @x, align 4
+ %2 = load i32* %1
+ ret i32 %2
+}
+
+define i32 @g() {
+ ; CHECK: g:
+ ; CHECK: movw
+ ; CHECK: movt
+ %1 = load i32** @x, align 4
+ %2 = load i32* %1
+ ret i32 %2
+}