summaryrefslogtreecommitdiff
path: root/test
diff options
context:
space:
mode:
authorChad Rosier <mcrosier@apple.com>2011-06-16 01:21:54 +0000
committerChad Rosier <mcrosier@apple.com>2011-06-16 01:21:54 +0000
commit689edc8b2845f7a6cf9403722de2000598f68489 (patch)
tree396e45022d75ee68d919f41a76a1ed5a6c8c804e /test
parentc06b5bf34004a9b01048905c8750761146094586 (diff)
downloadllvm-689edc8b2845f7a6cf9403722de2000598f68489.tar.gz
llvm-689edc8b2845f7a6cf9403722de2000598f68489.tar.bz2
llvm-689edc8b2845f7a6cf9403722de2000598f68489.tar.xz
Revision r128665 added an optimization to make use of NEON multiplier
accumulator forwarding. Specifically (from SVN log entry): Distribute (A + B) * C to (A * C) + (B * C) to make use of NEON multiplier accumulator forwarding: vadd d3, d0, d1 vmul d3, d3, d2 => vmul d3, d0, d2 vmla d3, d1, d2 Make sure it catches cases where operand 1 is add/fadd/sub/fsub, which was intended in the original revision. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@133127 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test')
-rw-r--r--test/CodeGen/ARM/vmul.ll22
1 files changed, 22 insertions, 0 deletions
diff --git a/test/CodeGen/ARM/vmul.ll b/test/CodeGen/ARM/vmul.ll
index 1fd6581ae0..d62b5d415f 100644
--- a/test/CodeGen/ARM/vmul.ll
+++ b/test/CodeGen/ARM/vmul.ll
@@ -492,3 +492,25 @@ entry:
store <8 x i8> %10, <8 x i8>* %11, align 8
ret void
}
+
+define void @distribute2_commutative(%struct.uint8x8_t* nocapture %dst, i8* %src, i32 %mul) nounwind {
+entry:
+; CHECK: distribute2_commutative
+; CHECK-NOT: vadd.i8
+; CHECK: vmul.i8
+; CHECK: vmla.i8
+ %0 = trunc i32 %mul to i8
+ %1 = insertelement <8 x i8> undef, i8 %0, i32 0
+ %2 = shufflevector <8 x i8> %1, <8 x i8> undef, <8 x i32> zeroinitializer
+ %3 = tail call <16 x i8> @llvm.arm.neon.vld1.v16i8(i8* %src, i32 1)
+ %4 = bitcast <16 x i8> %3 to <2 x double>
+ %5 = extractelement <2 x double> %4, i32 1
+ %6 = bitcast double %5 to <8 x i8>
+ %7 = extractelement <2 x double> %4, i32 0
+ %8 = bitcast double %7 to <8 x i8>
+ %9 = add <8 x i8> %6, %8
+ %10 = mul <8 x i8> %2, %9
+ %11 = getelementptr inbounds %struct.uint8x8_t* %dst, i32 0, i32 0
+ store <8 x i8> %10, <8 x i8>* %11, align 8
+ ret void
+}