summaryrefslogtreecommitdiff
path: root/test/CodeGen/XCore
diff options
context:
space:
mode:
authorRichard Osborne <richard@xmos.com>2009-07-16 10:42:35 +0000
committerRichard Osborne <richard@xmos.com>2009-07-16 10:42:35 +0000
commitccb7e96ef0ef743c822ce19aac324b429198bff2 (patch)
treefe61e988b5308bc5c137f6148e43bbed11adf4de /test/CodeGen/XCore
parent7f47ce966219b8dbc37cf8c289660dd83923289f (diff)
downloadllvm-ccb7e96ef0ef743c822ce19aac324b429198bff2.tar.gz
llvm-ccb7e96ef0ef743c822ce19aac324b429198bff2.tar.bz2
llvm-ccb7e96ef0ef743c822ce19aac324b429198bff2.tar.xz
Expand unaligned 32 bit loads from an address which is a constant
offset from a 32 bit aligned base as follows: ldw low, base[offset >> 2] ldw high, base[(offset >> 2) + 1] shr low_shifted, low, (offset & 0x3) * 8 shl high_shifted, high, 32 - (offset & 0x3) * 8 or result, low_shifted, high_shifted Expand 32 bit loads / stores with 16 bit alignment into two 16 bit loads / stores. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@75902 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test/CodeGen/XCore')
-rw-r--r--test/CodeGen/XCore/unaligned_load.ll22
-rw-r--r--test/CodeGen/XCore/unaligned_store.ll9
2 files changed, 31 insertions, 0 deletions
diff --git a/test/CodeGen/XCore/unaligned_load.ll b/test/CodeGen/XCore/unaligned_load.ll
index a6a50893b9..c1372ed42f 100644
--- a/test/CodeGen/XCore/unaligned_load.ll
+++ b/test/CodeGen/XCore/unaligned_load.ll
@@ -1,5 +1,11 @@
; RUN: llvm-as < %s | llc -march=xcore > %t1.s
; RUN: grep "bl __misaligned_load" %t1.s | count 1
+; RUN: grep ld16s %t1.s | count 2
+; RUN: grep ldw %t1.s | count 2
+; RUN: grep shl %t1.s | count 2
+; RUN: grep shr %t1.s | count 1
+; RUN: grep zext %t1.s | count 1
+; RUN: grep "or " %t1.s | count 2
; Byte aligned load. Expands to call to __misaligned_load.
define i32 @align1(i32* %p) nounwind {
@@ -7,3 +13,19 @@ entry:
%0 = load i32* %p, align 1 ; <i32> [#uses=1]
ret i32 %0
}
+
+; Half word aligned load. Expands to two 16bit loads.
+define i32 @align2(i32* %p) nounwind {
+entry:
+ %0 = load i32* %p, align 2 ; <i32> [#uses=1]
+ ret i32 %0
+}
+
+@a = global [5 x i8] zeroinitializer, align 4
+
+; Constant offset from word aligned base. Expands to two 32bit loads.
+define i32 @align3() nounwind {
+entry:
+ %0 = load i32* bitcast (i8* getelementptr ([5 x i8]* @a, i32 0, i32 1) to i32*), align 1
+ ret i32 %0
+}
diff --git a/test/CodeGen/XCore/unaligned_store.ll b/test/CodeGen/XCore/unaligned_store.ll
index b7a519299f..120d6529ec 100644
--- a/test/CodeGen/XCore/unaligned_store.ll
+++ b/test/CodeGen/XCore/unaligned_store.ll
@@ -1,5 +1,7 @@
; RUN: llvm-as < %s | llc -march=xcore > %t1.s
; RUN: grep "bl __misaligned_store" %t1.s | count 1
+; RUN: grep st16 %t1.s | count 2
+; RUN: grep shr %t1.s | count 1
; Byte aligned store. Expands to call to __misaligned_store.
define void @align1(i32* %p, i32 %val) nounwind {
@@ -7,3 +9,10 @@ entry:
store i32 %val, i32* %p, align 1
ret void
}
+
+; Half word aligned store. Expands to two 16bit stores.
+define void @align2(i32* %p, i32 %val) nounwind {
+entry:
+ store i32 %val, i32* %p, align 2
+ ret void
+}