summaryrefslogtreecommitdiff
path: root/test/CodeGen/PowerPC/unaligned.ll
diff options
context:
space:
mode:
authorHal Finkel <hfinkel@anl.gov>2013-03-15 15:27:13 +0000
committerHal Finkel <hfinkel@anl.gov>2013-03-15 15:27:13 +0000
commit2d37f7b979a24930c444f9783173a90a6e548118 (patch)
treea30c316b5d17f708c2c44d5d0461baa8452e1622 /test/CodeGen/PowerPC/unaligned.ll
parentc0d8dc0eb6e1df872affadba01f60e42275e2863 (diff)
downloadllvm-2d37f7b979a24930c444f9783173a90a6e548118.tar.gz
llvm-2d37f7b979a24930c444f9783173a90a6e548118.tar.bz2
llvm-2d37f7b979a24930c444f9783173a90a6e548118.tar.xz
Enable unaligned memory access on PPC for scalar types
Unaligned access is supported on PPC for non-vector types, and is generally more efficient than manually expanding the loads and stores. A few of the existing test cases were using expanded unaligned loads and stores to test other features (like load/store with update), and for these test cases, unaligned access remains disabled. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@177160 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test/CodeGen/PowerPC/unaligned.ll')
-rw-r--r--test/CodeGen/PowerPC/unaligned.ll73
1 files changed, 73 insertions, 0 deletions
diff --git a/test/CodeGen/PowerPC/unaligned.ll b/test/CodeGen/PowerPC/unaligned.ll
new file mode 100644
index 0000000000..d05080338f
--- /dev/null
+++ b/test/CodeGen/PowerPC/unaligned.ll
@@ -0,0 +1,73 @@
+; RUN: llc < %s -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr7 | FileCheck %s
+target datalayout = "E-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f128:64:128-n32"
+
+define void @foo1(i16* %p, i16* %r) nounwind {
+entry:
+ %v = load i16* %p, align 1
+ store i16 %v, i16* %r, align 1
+ ret void
+
+; CHECK: @foo1
+; CHECK: lhz
+; CHECK: sth
+}
+
+define void @foo2(i32* %p, i32* %r) nounwind {
+entry:
+ %v = load i32* %p, align 1
+ store i32 %v, i32* %r, align 1
+ ret void
+
+; CHECK: @foo2
+; CHECK: lwz
+; CHECK: stw
+}
+
+define void @foo3(i64* %p, i64* %r) nounwind {
+entry:
+ %v = load i64* %p, align 1
+ store i64 %v, i64* %r, align 1
+ ret void
+
+; CHECK: @foo3
+; CHECK: ld
+; CHECK: std
+}
+
+define void @foo4(float* %p, float* %r) nounwind {
+entry:
+ %v = load float* %p, align 1
+ store float %v, float* %r, align 1
+ ret void
+
+; CHECK: @foo4
+; CHECK: lfs
+; CHECK: stfs
+}
+
+define void @foo5(double* %p, double* %r) nounwind {
+entry:
+ %v = load double* %p, align 1
+ store double %v, double* %r, align 1
+ ret void
+
+; CHECK: @foo5
+; CHECK: lfd
+; CHECK: stfd
+}
+
+define void @foo6(<4 x float>* %p, <4 x float>* %r) nounwind {
+entry:
+ %v = load <4 x float>* %p, align 1
+ store <4 x float> %v, <4 x float>* %r, align 1
+ ret void
+
+; These loads and stores are legalized into aligned loads and stores
+; using aligned stack slots.
+; CHECK: @foo6
+; CHECK: ld
+; CHECK: ld
+; CHECK: std
+; CHECK: std
+}
+