summaryrefslogtreecommitdiff
path: root/lib/Target/PowerPC/README_ALTIVEC.txt
diff options
context:
space:
mode:
authorChris Lattner <sabre@nondot.org>2006-04-12 19:07:14 +0000
committerChris Lattner <sabre@nondot.org>2006-04-12 19:07:14 +0000
commitac225ca0514b75e29347933d75e86d03ab8a28e1 (patch)
tree15ca45d7603b8fb726224f0d82ccb4acfc9a53e7 /lib/Target/PowerPC/README_ALTIVEC.txt
parentd853fd6b6d6ae9733c8af4194c55e2a6f81a97e9 (diff)
downloadllvm-ac225ca0514b75e29347933d75e86d03ab8a28e1.tar.gz
llvm-ac225ca0514b75e29347933d75e86d03ab8a28e1.tar.bz2
llvm-ac225ca0514b75e29347933d75e86d03ab8a28e1.tar.xz
Add a new way to match vector constants, which make it easier to bang bits of
different types. Codegen spltw(0x7FFFFFFF) and spltw(0x80000000) without a constant pool load, implementing PowerPC/vec_constants.ll:test1. This compiles: typedef float vf __attribute__ ((vector_size (16))); typedef int vi __attribute__ ((vector_size (16))); void test(vi *P1, vi *P2, vf *P3) { *P1 &= (vi){0x80000000,0x80000000,0x80000000,0x80000000}; *P2 &= (vi){0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF}; *P3 = vec_abs((vector float)*P3); } to: _test: mfspr r2, 256 oris r6, r2, 49152 mtspr 256, r6 vspltisw v0, -1 vslw v0, v0, v0 lvx v1, 0, r3 vand v1, v1, v0 stvx v1, 0, r3 lvx v1, 0, r4 vandc v1, v1, v0 stvx v1, 0, r4 lvx v1, 0, r5 vandc v0, v1, v0 stvx v0, 0, r5 mtspr 256, r2 blr instead of (with two constant pool entries): _test: mfspr r2, 256 oris r6, r2, 49152 mtspr 256, r6 li r6, lo16(LCPI1_0) lis r7, ha16(LCPI1_0) li r8, lo16(LCPI1_1) lis r9, ha16(LCPI1_1) lvx v0, r7, r6 lvx v1, 0, r3 vand v0, v1, v0 stvx v0, 0, r3 lvx v0, r9, r8 lvx v1, 0, r4 vand v1, v1, v0 stvx v1, 0, r4 lvx v1, 0, r5 vand v0, v1, v0 stvx v0, 0, r5 mtspr 256, r2 blr GCC produces (with 2 cp entries): _test: mfspr r0,256 stw r0,-4(r1) oris r0,r0,0xc00c mtspr 256,r0 lis r2,ha16(LC0) lis r9,ha16(LC1) la r2,lo16(LC0)(r2) lvx v0,0,r3 lvx v1,0,r5 la r9,lo16(LC1)(r9) lwz r12,-4(r1) lvx v12,0,r2 lvx v13,0,r9 vand v0,v0,v12 stvx v0,0,r3 vspltisw v0,-1 vslw v12,v0,v0 vandc v1,v1,v12 stvx v1,0,r5 lvx v0,0,r4 vand v0,v0,v13 stvx v0,0,r4 mtspr 256,r12 blr git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@27624 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/Target/PowerPC/README_ALTIVEC.txt')
-rw-r--r--lib/Target/PowerPC/README_ALTIVEC.txt9
1 files changed, 6 insertions, 3 deletions
diff --git a/lib/Target/PowerPC/README_ALTIVEC.txt b/lib/Target/PowerPC/README_ALTIVEC.txt
index 7e92f0b788..78ea2cd879 100644
--- a/lib/Target/PowerPC/README_ALTIVEC.txt
+++ b/lib/Target/PowerPC/README_ALTIVEC.txt
@@ -43,7 +43,8 @@ There are a wide range of vector constants we can generate with combinations of
altivec instructions. Examples
GCC does: "t=vsplti*, r = t+t" for constants it can't generate with one vsplti
- -0.0 (sign bit): vspltisw v0,-1 / vslw v0,v0,v0
+This should be added to the ISD::BUILD_VECTOR case in
+PPCTargetLowering::LowerOperation.
//===----------------------------------------------------------------------===//
@@ -110,8 +111,10 @@ e.g. x86 cmov (not supported on bytes).
This would fix two problems:
1. Writing patterns multiple times.
-2. Identical operations in different types are not getting CSE'd (e.g.
- { 0U, 0U, 0U, 0U } and {0.0, 0.0, 0.0, 0.0}.
+2. Identical operations in different types are not getting CSE'd.
+
+We already do this for shuffle and build_vector. We need load,undef,and,or,xor,
+etc.
//===----------------------------------------------------------------------===//