summaryrefslogtreecommitdiff
path: root/test/CodeGen/Thumb2/thumb2-ldr.ll
diff options
context:
space:
mode:
authorEvan Cheng <evan.cheng@apple.com>2009-06-29 07:51:04 +0000
committerEvan Cheng <evan.cheng@apple.com>2009-06-29 07:51:04 +0000
commit055b0310f862b91f33699037ce67d3ab8137c20c (patch)
tree6924272e732298bf99146b69c12f9029a5e1adc5 /test/CodeGen/Thumb2/thumb2-ldr.ll
parent95c1f5ba64e7ad505781235839b65a2a8f64a733 (diff)
downloadllvm-055b0310f862b91f33699037ce67d3ab8137c20c.tar.gz
llvm-055b0310f862b91f33699037ce67d3ab8137c20c.tar.bz2
llvm-055b0310f862b91f33699037ce67d3ab8137c20c.tar.xz
Implement Thumb2 ldr.
After much back and forth, I decided to deviate from ARM design and split LDR into 4 instructions (r + imm12, r + imm8, r + r << imm12, constantpool). The advantage of this is 1) it follows the latest ARM technical manual, and 2) makes it easier to reduce the width of the instruction later. The down side is this creates more inconsistency between the two sub-targets. We should split ARM LDR instruction in a similar fashion later. I've added a README entry for this. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@74420 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test/CodeGen/Thumb2/thumb2-ldr.ll')
-rw-r--r--test/CodeGen/Thumb2/thumb2-ldr.ll59
1 files changed, 59 insertions, 0 deletions
diff --git a/test/CodeGen/Thumb2/thumb2-ldr.ll b/test/CodeGen/Thumb2/thumb2-ldr.ll
new file mode 100644
index 0000000000..19c75849e1
--- /dev/null
+++ b/test/CodeGen/Thumb2/thumb2-ldr.ll
@@ -0,0 +1,59 @@
+; RUN: llvm-as < %s | llc -march=thumb -mattr=+thumb2 | grep {ldr r0} | count 7
+; RUN: llvm-as < %s | llc -march=thumb -mattr=+thumb2 | grep mov | grep 1
+; RUN: llvm-as < %s | llc -march=thumb -mattr=+thumb2 | not grep mvn
+; RUN: llvm-as < %s | llc -march=thumb -mattr=+thumb2 | grep ldr | grep lsl
+; RUN: llvm-as < %s | llc -march=thumb -mattr=+thumb2 | grep lsr | not grep ldr
+
+define i32 @f1(i32* %v) {
+entry:
+ %tmp = load i32* %v
+ ret i32 %tmp
+}
+
+define i32 @f2(i32* %v) {
+entry:
+ %tmp2 = getelementptr i32* %v, i32 1023
+ %tmp = load i32* %tmp2
+ ret i32 %tmp
+}
+
+define i32 @f3(i32* %v) {
+entry:
+ %tmp2 = getelementptr i32* %v, i32 1024
+ %tmp = load i32* %tmp2
+ ret i32 %tmp
+}
+
+define i32 @f4(i32 %base) {
+entry:
+ %tmp1 = sub i32 %base, 128
+ %tmp2 = inttoptr i32 %tmp1 to i32*
+ %tmp3 = load i32* %tmp2
+ ret i32 %tmp3
+}
+
+define i32 @f5(i32 %base, i32 %offset) {
+entry:
+ %tmp1 = add i32 %base, %offset
+ %tmp2 = inttoptr i32 %tmp1 to i32*
+ %tmp3 = load i32* %tmp2
+ ret i32 %tmp3
+}
+
+define i32 @f6(i32 %base, i32 %offset) {
+entry:
+ %tmp1 = shl i32 %offset, 2
+ %tmp2 = add i32 %base, %tmp1
+ %tmp3 = inttoptr i32 %tmp2 to i32*
+ %tmp4 = load i32* %tmp3
+ ret i32 %tmp4
+}
+
+define i32 @f7(i32 %base, i32 %offset) {
+entry:
+ %tmp1 = lshr i32 %offset, 2
+ %tmp2 = add i32 %base, %tmp1
+ %tmp3 = inttoptr i32 %tmp2 to i32*
+ %tmp4 = load i32* %tmp3
+ ret i32 %tmp4
+}