diff options
author | Jiangning Liu <jiangning.liu@arm.com> | 2013-10-05 08:22:10 +0000 |
---|---|---|
committer | Jiangning Liu <jiangning.liu@arm.com> | 2013-10-05 08:22:10 +0000 |
commit | beb6afa84397a27e48a9d72ac1d588bc6fcaf564 (patch) | |
tree | e4c47d31248bdeca916aa69eb24edf9cdcf6685a /test/CodeGen/AArch64/neon-across.ll | |
parent | 936910d9293f7118056498c75c7bca79a7fc579c (diff) | |
download | llvm-beb6afa84397a27e48a9d72ac1d588bc6fcaf564.tar.gz llvm-beb6afa84397a27e48a9d72ac1d588bc6fcaf564.tar.bz2 llvm-beb6afa84397a27e48a9d72ac1d588bc6fcaf564.tar.xz |
Implement aarch64 neon instruction set AdvSIMD (Across).
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@192028 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test/CodeGen/AArch64/neon-across.ll')
-rw-r--r-- | test/CodeGen/AArch64/neon-across.ll | 476 |
1 files changed, 476 insertions, 0 deletions
diff --git a/test/CodeGen/AArch64/neon-across.ll b/test/CodeGen/AArch64/neon-across.ll new file mode 100644 index 0000000000..733db970cf --- /dev/null +++ b/test/CodeGen/AArch64/neon-across.ll @@ -0,0 +1,476 @@ +; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon | FileCheck %s + +declare <1 x float> @llvm.aarch64.neon.vminnmv.v1f32.v4f32(<4 x float>) + +declare <1 x float> @llvm.aarch64.neon.vmaxnmv.v1f32.v4f32(<4 x float>) + +declare <1 x float> @llvm.aarch64.neon.vminv.v1f32.v4f32(<4 x float>) + +declare <1 x float> @llvm.aarch64.neon.vmaxv.v1f32.v4f32(<4 x float>) + +declare <1 x i32> @llvm.aarch64.neon.vaddv.v1i32.v4i32(<4 x i32>) + +declare <1 x i16> @llvm.aarch64.neon.vaddv.v1i16.v8i16(<8 x i16>) + +declare <1 x i8> @llvm.aarch64.neon.vaddv.v1i8.v16i8(<16 x i8>) + +declare <1 x i16> @llvm.aarch64.neon.vaddv.v1i16.v4i16(<4 x i16>) + +declare <1 x i8> @llvm.aarch64.neon.vaddv.v1i8.v8i8(<8 x i8>) + +declare <1 x i32> @llvm.aarch64.neon.uminv.v1i32.v4i32(<4 x i32>) + +declare <1 x i16> @llvm.aarch64.neon.uminv.v1i16.v8i16(<8 x i16>) + +declare <1 x i8> @llvm.aarch64.neon.uminv.v1i8.v16i8(<16 x i8>) + +declare <1 x i32> @llvm.aarch64.neon.sminv.v1i32.v4i32(<4 x i32>) + +declare <1 x i16> @llvm.aarch64.neon.sminv.v1i16.v8i16(<8 x i16>) + +declare <1 x i8> @llvm.aarch64.neon.sminv.v1i8.v16i8(<16 x i8>) + +declare <1 x i16> @llvm.aarch64.neon.uminv.v1i16.v4i16(<4 x i16>) + +declare <1 x i8> @llvm.aarch64.neon.uminv.v1i8.v8i8(<8 x i8>) + +declare <1 x i16> @llvm.aarch64.neon.sminv.v1i16.v4i16(<4 x i16>) + +declare <1 x i8> @llvm.aarch64.neon.sminv.v1i8.v8i8(<8 x i8>) + +declare <1 x i32> @llvm.aarch64.neon.umaxv.v1i32.v4i32(<4 x i32>) + +declare <1 x i16> @llvm.aarch64.neon.umaxv.v1i16.v8i16(<8 x i16>) + +declare <1 x i8> @llvm.aarch64.neon.umaxv.v1i8.v16i8(<16 x i8>) + +declare <1 x i32> @llvm.aarch64.neon.smaxv.v1i32.v4i32(<4 x i32>) + +declare <1 x i16> @llvm.aarch64.neon.smaxv.v1i16.v8i16(<8 x i16>) + +declare <1 x i8> @llvm.aarch64.neon.smaxv.v1i8.v16i8(<16 x i8>) + +declare <1 x i16> @llvm.aarch64.neon.umaxv.v1i16.v4i16(<4 x i16>) + +declare <1 x i8> @llvm.aarch64.neon.umaxv.v1i8.v8i8(<8 x i8>) + +declare <1 x i16> @llvm.aarch64.neon.smaxv.v1i16.v4i16(<4 x i16>) + +declare <1 x i8> @llvm.aarch64.neon.smaxv.v1i8.v8i8(<8 x i8>) + +declare <1 x i64> @llvm.aarch64.neon.uaddlv.v1i64.v4i32(<4 x i32>) + +declare <1 x i32> @llvm.aarch64.neon.uaddlv.v1i32.v8i16(<8 x i16>) + +declare <1 x i16> @llvm.aarch64.neon.uaddlv.v1i16.v16i8(<16 x i8>) + +declare <1 x i64> @llvm.aarch64.neon.saddlv.v1i64.v4i32(<4 x i32>) + +declare <1 x i32> @llvm.aarch64.neon.saddlv.v1i32.v8i16(<8 x i16>) + +declare <1 x i16> @llvm.aarch64.neon.saddlv.v1i16.v16i8(<16 x i8>) + +declare <1 x i32> @llvm.aarch64.neon.uaddlv.v1i32.v4i16(<4 x i16>) + +declare <1 x i16> @llvm.aarch64.neon.uaddlv.v1i16.v8i8(<8 x i8>) + +declare <1 x i32> @llvm.aarch64.neon.saddlv.v1i32.v4i16(<4 x i16>) + +declare <1 x i16> @llvm.aarch64.neon.saddlv.v1i16.v8i8(<8 x i8>) + +define i16 @test_vaddlv_s8(<8 x i8> %a) { +; CHECK: test_vaddlv_s8: +; CHECK: saddlv h{{[0-9]+}}, {{v[0-9]+}}.8b +entry: + %saddlv.i = tail call <1 x i16> @llvm.aarch64.neon.saddlv.v1i16.v8i8(<8 x i8> %a) + %0 = extractelement <1 x i16> %saddlv.i, i32 0 + ret i16 %0 +} + +define i32 @test_vaddlv_s16(<4 x i16> %a) { +; CHECK: test_vaddlv_s16: +; CHECK: saddlv s{{[0-9]+}}, {{v[0-9]+}}.4h +entry: + %saddlv.i = tail call <1 x i32> @llvm.aarch64.neon.saddlv.v1i32.v4i16(<4 x i16> %a) + %0 = extractelement <1 x i32> %saddlv.i, i32 0 + ret i32 %0 +} + +define i16 @test_vaddlv_u8(<8 x i8> %a) { +; CHECK: test_vaddlv_u8: +; CHECK: uaddlv h{{[0-9]+}}, {{v[0-9]+}}.8b +entry: + %uaddlv.i = tail call <1 x i16> @llvm.aarch64.neon.uaddlv.v1i16.v8i8(<8 x i8> %a) + %0 = extractelement <1 x i16> %uaddlv.i, i32 0 + ret i16 %0 +} + +define i32 @test_vaddlv_u16(<4 x i16> %a) { +; CHECK: test_vaddlv_u16: +; CHECK: uaddlv s{{[0-9]+}}, {{v[0-9]+}}.4h +entry: + %uaddlv.i = tail call <1 x i32> @llvm.aarch64.neon.uaddlv.v1i32.v4i16(<4 x i16> %a) + %0 = extractelement <1 x i32> %uaddlv.i, i32 0 + ret i32 %0 +} + +define i16 @test_vaddlvq_s8(<16 x i8> %a) { +; CHECK: test_vaddlvq_s8: +; CHECK: saddlv h{{[0-9]+}}, {{v[0-9]+}}.16b +entry: + %saddlv.i = tail call <1 x i16> @llvm.aarch64.neon.saddlv.v1i16.v16i8(<16 x i8> %a) + %0 = extractelement <1 x i16> %saddlv.i, i32 0 + ret i16 %0 +} + +define i32 @test_vaddlvq_s16(<8 x i16> %a) { +; CHECK: test_vaddlvq_s16: +; CHECK: saddlv s{{[0-9]+}}, {{v[0-9]+}}.8h +entry: + %saddlv.i = tail call <1 x i32> @llvm.aarch64.neon.saddlv.v1i32.v8i16(<8 x i16> %a) + %0 = extractelement <1 x i32> %saddlv.i, i32 0 + ret i32 %0 +} + +define i64 @test_vaddlvq_s32(<4 x i32> %a) { +; CHECK: test_vaddlvq_s32: +; CHECK: saddlv d{{[0-9]+}}, {{v[0-9]+}}.4s +entry: + %saddlv.i = tail call <1 x i64> @llvm.aarch64.neon.saddlv.v1i64.v4i32(<4 x i32> %a) + %0 = extractelement <1 x i64> %saddlv.i, i32 0 + ret i64 %0 +} + +define i16 @test_vaddlvq_u8(<16 x i8> %a) { +; CHECK: test_vaddlvq_u8: +; CHECK: uaddlv h{{[0-9]+}}, {{v[0-9]+}}.16b +entry: + %uaddlv.i = tail call <1 x i16> @llvm.aarch64.neon.uaddlv.v1i16.v16i8(<16 x i8> %a) + %0 = extractelement <1 x i16> %uaddlv.i, i32 0 + ret i16 %0 +} + +define i32 @test_vaddlvq_u16(<8 x i16> %a) { +; CHECK: test_vaddlvq_u16: +; CHECK: uaddlv s{{[0-9]+}}, {{v[0-9]+}}.8h +entry: + %uaddlv.i = tail call <1 x i32> @llvm.aarch64.neon.uaddlv.v1i32.v8i16(<8 x i16> %a) + %0 = extractelement <1 x i32> %uaddlv.i, i32 0 + ret i32 %0 +} + +define i64 @test_vaddlvq_u32(<4 x i32> %a) { +; CHECK: test_vaddlvq_u32: +; CHECK: uaddlv d{{[0-9]+}}, {{v[0-9]+}}.4s +entry: + %uaddlv.i = tail call <1 x i64> @llvm.aarch64.neon.uaddlv.v1i64.v4i32(<4 x i32> %a) + %0 = extractelement <1 x i64> %uaddlv.i, i32 0 + ret i64 %0 +} + +define i8 @test_vmaxv_s8(<8 x i8> %a) { +; CHECK: test_vmaxv_s8: +; CHECK: smaxv b{{[0-9]+}}, {{v[0-9]+}}.8b +entry: + %smaxv.i = tail call <1 x i8> @llvm.aarch64.neon.smaxv.v1i8.v8i8(<8 x i8> %a) + %0 = extractelement <1 x i8> %smaxv.i, i32 0 + ret i8 %0 +} + +define i16 @test_vmaxv_s16(<4 x i16> %a) { +; CHECK: test_vmaxv_s16: +; CHECK: smaxv h{{[0-9]+}}, {{v[0-9]+}}.4h +entry: + %smaxv.i = tail call <1 x i16> @llvm.aarch64.neon.smaxv.v1i16.v4i16(<4 x i16> %a) + %0 = extractelement <1 x i16> %smaxv.i, i32 0 + ret i16 %0 +} + +define i8 @test_vmaxv_u8(<8 x i8> %a) { +; CHECK: test_vmaxv_u8: +; CHECK: umaxv b{{[0-9]+}}, {{v[0-9]+}}.8b +entry: + %umaxv.i = tail call <1 x i8> @llvm.aarch64.neon.umaxv.v1i8.v8i8(<8 x i8> %a) + %0 = extractelement <1 x i8> %umaxv.i, i32 0 + ret i8 %0 +} + +define i16 @test_vmaxv_u16(<4 x i16> %a) { +; CHECK: test_vmaxv_u16: +; CHECK: umaxv h{{[0-9]+}}, {{v[0-9]+}}.4h +entry: + %umaxv.i = tail call <1 x i16> @llvm.aarch64.neon.umaxv.v1i16.v4i16(<4 x i16> %a) + %0 = extractelement <1 x i16> %umaxv.i, i32 0 + ret i16 %0 +} + +define i8 @test_vmaxvq_s8(<16 x i8> %a) { +; CHECK: test_vmaxvq_s8: +; CHECK: smaxv b{{[0-9]+}}, {{v[0-9]+}}.16b +entry: + %smaxv.i = tail call <1 x i8> @llvm.aarch64.neon.smaxv.v1i8.v16i8(<16 x i8> %a) + %0 = extractelement <1 x i8> %smaxv.i, i32 0 + ret i8 %0 +} + +define i16 @test_vmaxvq_s16(<8 x i16> %a) { +; CHECK: test_vmaxvq_s16: +; CHECK: smaxv h{{[0-9]+}}, {{v[0-9]+}}.8h +entry: + %smaxv.i = tail call <1 x i16> @llvm.aarch64.neon.smaxv.v1i16.v8i16(<8 x i16> %a) + %0 = extractelement <1 x i16> %smaxv.i, i32 0 + ret i16 %0 +} + +define i32 @test_vmaxvq_s32(<4 x i32> %a) { +; CHECK: test_vmaxvq_s32: +; CHECK: smaxv s{{[0-9]+}}, {{v[0-9]+}}.4s +entry: + %smaxv.i = tail call <1 x i32> @llvm.aarch64.neon.smaxv.v1i32.v4i32(<4 x i32> %a) + %0 = extractelement <1 x i32> %smaxv.i, i32 0 + ret i32 %0 +} + +define i8 @test_vmaxvq_u8(<16 x i8> %a) { +; CHECK: test_vmaxvq_u8: +; CHECK: umaxv b{{[0-9]+}}, {{v[0-9]+}}.16b +entry: + %umaxv.i = tail call <1 x i8> @llvm.aarch64.neon.umaxv.v1i8.v16i8(<16 x i8> %a) + %0 = extractelement <1 x i8> %umaxv.i, i32 0 + ret i8 %0 +} + +define i16 @test_vmaxvq_u16(<8 x i16> %a) { +; CHECK: test_vmaxvq_u16: +; CHECK: umaxv h{{[0-9]+}}, {{v[0-9]+}}.8h +entry: + %umaxv.i = tail call <1 x i16> @llvm.aarch64.neon.umaxv.v1i16.v8i16(<8 x i16> %a) + %0 = extractelement <1 x i16> %umaxv.i, i32 0 + ret i16 %0 +} + +define i32 @test_vmaxvq_u32(<4 x i32> %a) { +; CHECK: test_vmaxvq_u32: +; CHECK: umaxv s{{[0-9]+}}, {{v[0-9]+}}.4s +entry: + %umaxv.i = tail call <1 x i32> @llvm.aarch64.neon.umaxv.v1i32.v4i32(<4 x i32> %a) + %0 = extractelement <1 x i32> %umaxv.i, i32 0 + ret i32 %0 +} + +define i8 @test_vminv_s8(<8 x i8> %a) { +; CHECK: test_vminv_s8: +; CHECK: sminv b{{[0-9]+}}, {{v[0-9]+}}.8b +entry: + %sminv.i = tail call <1 x i8> @llvm.aarch64.neon.sminv.v1i8.v8i8(<8 x i8> %a) + %0 = extractelement <1 x i8> %sminv.i, i32 0 + ret i8 %0 +} + +define i16 @test_vminv_s16(<4 x i16> %a) { +; CHECK: test_vminv_s16: +; CHECK: sminv h{{[0-9]+}}, {{v[0-9]+}}.4h +entry: + %sminv.i = tail call <1 x i16> @llvm.aarch64.neon.sminv.v1i16.v4i16(<4 x i16> %a) + %0 = extractelement <1 x i16> %sminv.i, i32 0 + ret i16 %0 +} + +define i8 @test_vminv_u8(<8 x i8> %a) { +; CHECK: test_vminv_u8: +; CHECK: uminv b{{[0-9]+}}, {{v[0-9]+}}.8b +entry: + %uminv.i = tail call <1 x i8> @llvm.aarch64.neon.uminv.v1i8.v8i8(<8 x i8> %a) + %0 = extractelement <1 x i8> %uminv.i, i32 0 + ret i8 %0 +} + +define i16 @test_vminv_u16(<4 x i16> %a) { +; CHECK: test_vminv_u16: +; CHECK: uminv h{{[0-9]+}}, {{v[0-9]+}}.4h +entry: + %uminv.i = tail call <1 x i16> @llvm.aarch64.neon.uminv.v1i16.v4i16(<4 x i16> %a) + %0 = extractelement <1 x i16> %uminv.i, i32 0 + ret i16 %0 +} + +define i8 @test_vminvq_s8(<16 x i8> %a) { +; CHECK: test_vminvq_s8: +; CHECK: sminv b{{[0-9]+}}, {{v[0-9]+}}.16b +entry: + %sminv.i = tail call <1 x i8> @llvm.aarch64.neon.sminv.v1i8.v16i8(<16 x i8> %a) + %0 = extractelement <1 x i8> %sminv.i, i32 0 + ret i8 %0 +} + +define i16 @test_vminvq_s16(<8 x i16> %a) { +; CHECK: test_vminvq_s16: +; CHECK: sminv h{{[0-9]+}}, {{v[0-9]+}}.8h +entry: + %sminv.i = tail call <1 x i16> @llvm.aarch64.neon.sminv.v1i16.v8i16(<8 x i16> %a) + %0 = extractelement <1 x i16> %sminv.i, i32 0 + ret i16 %0 +} + +define i32 @test_vminvq_s32(<4 x i32> %a) { +; CHECK: test_vminvq_s32: +; CHECK: sminv s{{[0-9]+}}, {{v[0-9]+}}.4s +entry: + %sminv.i = tail call <1 x i32> @llvm.aarch64.neon.sminv.v1i32.v4i32(<4 x i32> %a) + %0 = extractelement <1 x i32> %sminv.i, i32 0 + ret i32 %0 +} + +define i8 @test_vminvq_u8(<16 x i8> %a) { +; CHECK: test_vminvq_u8: +; CHECK: uminv b{{[0-9]+}}, {{v[0-9]+}}.16b +entry: + %uminv.i = tail call <1 x i8> @llvm.aarch64.neon.uminv.v1i8.v16i8(<16 x i8> %a) + %0 = extractelement <1 x i8> %uminv.i, i32 0 + ret i8 %0 +} + +define i16 @test_vminvq_u16(<8 x i16> %a) { +; CHECK: test_vminvq_u16: +; CHECK: uminv h{{[0-9]+}}, {{v[0-9]+}}.8h +entry: + %uminv.i = tail call <1 x i16> @llvm.aarch64.neon.uminv.v1i16.v8i16(<8 x i16> %a) + %0 = extractelement <1 x i16> %uminv.i, i32 0 + ret i16 %0 +} + +define i32 @test_vminvq_u32(<4 x i32> %a) { +; CHECK: test_vminvq_u32: +; CHECK: uminv s{{[0-9]+}}, {{v[0-9]+}}.4s +entry: + %uminv.i = tail call <1 x i32> @llvm.aarch64.neon.uminv.v1i32.v4i32(<4 x i32> %a) + %0 = extractelement <1 x i32> %uminv.i, i32 0 + ret i32 %0 +} + +define i8 @test_vaddv_s8(<8 x i8> %a) { +; CHECK: test_vaddv_s8: +; CHECK: addv b{{[0-9]+}}, {{v[0-9]+}}.8b +entry: + %vaddv.i = tail call <1 x i8> @llvm.aarch64.neon.vaddv.v1i8.v8i8(<8 x i8> %a) + %0 = extractelement <1 x i8> %vaddv.i, i32 0 + ret i8 %0 +} + +define i16 @test_vaddv_s16(<4 x i16> %a) { +; CHECK: test_vaddv_s16: +; CHECK: addv h{{[0-9]+}}, {{v[0-9]+}}.4h +entry: + %vaddv.i = tail call <1 x i16> @llvm.aarch64.neon.vaddv.v1i16.v4i16(<4 x i16> %a) + %0 = extractelement <1 x i16> %vaddv.i, i32 0 + ret i16 %0 +} + +define i8 @test_vaddv_u8(<8 x i8> %a) { +; CHECK: test_vaddv_u8: +; CHECK: addv b{{[0-9]+}}, {{v[0-9]+}}.8b +entry: + %vaddv.i = tail call <1 x i8> @llvm.aarch64.neon.vaddv.v1i8.v8i8(<8 x i8> %a) + %0 = extractelement <1 x i8> %vaddv.i, i32 0 + ret i8 %0 +} + +define i16 @test_vaddv_u16(<4 x i16> %a) { +; CHECK: test_vaddv_u16: +; CHECK: addv h{{[0-9]+}}, {{v[0-9]+}}.4h +entry: + %vaddv.i = tail call <1 x i16> @llvm.aarch64.neon.vaddv.v1i16.v4i16(<4 x i16> %a) + %0 = extractelement <1 x i16> %vaddv.i, i32 0 + ret i16 %0 +} + +define i8 @test_vaddvq_s8(<16 x i8> %a) { +; CHECK: test_vaddvq_s8: +; CHECK: addv b{{[0-9]+}}, {{v[0-9]+}}.16b +entry: + %vaddv.i = tail call <1 x i8> @llvm.aarch64.neon.vaddv.v1i8.v16i8(<16 x i8> %a) + %0 = extractelement <1 x i8> %vaddv.i, i32 0 + ret i8 %0 +} + +define i16 @test_vaddvq_s16(<8 x i16> %a) { +; CHECK: test_vaddvq_s16: +; CHECK: addv h{{[0-9]+}}, {{v[0-9]+}}.8h +entry: + %vaddv.i = tail call <1 x i16> @llvm.aarch64.neon.vaddv.v1i16.v8i16(<8 x i16> %a) + %0 = extractelement <1 x i16> %vaddv.i, i32 0 + ret i16 %0 +} + +define i32 @test_vaddvq_s32(<4 x i32> %a) { +; CHECK: test_vaddvq_s32: +; CHECK: addv s{{[0-9]+}}, {{v[0-9]+}}.4s +entry: + %vaddv.i = tail call <1 x i32> @llvm.aarch64.neon.vaddv.v1i32.v4i32(<4 x i32> %a) + %0 = extractelement <1 x i32> %vaddv.i, i32 0 + ret i32 %0 +} + +define i8 @test_vaddvq_u8(<16 x i8> %a) { +; CHECK: test_vaddvq_u8: +; CHECK: addv b{{[0-9]+}}, {{v[0-9]+}}.16b +entry: + %vaddv.i = tail call <1 x i8> @llvm.aarch64.neon.vaddv.v1i8.v16i8(<16 x i8> %a) + %0 = extractelement <1 x i8> %vaddv.i, i32 0 + ret i8 %0 +} + +define i16 @test_vaddvq_u16(<8 x i16> %a) { +; CHECK: test_vaddvq_u16: +; CHECK: addv h{{[0-9]+}}, {{v[0-9]+}}.8h +entry: + %vaddv.i = tail call <1 x i16> @llvm.aarch64.neon.vaddv.v1i16.v8i16(<8 x i16> %a) + %0 = extractelement <1 x i16> %vaddv.i, i32 0 + ret i16 %0 +} + +define i32 @test_vaddvq_u32(<4 x i32> %a) { +; CHECK: test_vaddvq_u32: +; CHECK: addv s{{[0-9]+}}, {{v[0-9]+}}.4s +entry: + %vaddv.i = tail call <1 x i32> @llvm.aarch64.neon.vaddv.v1i32.v4i32(<4 x i32> %a) + %0 = extractelement <1 x i32> %vaddv.i, i32 0 + ret i32 %0 +} + +define float @test_vmaxvq_f32(<4 x float> %a) { +; CHECK: test_vmaxvq_f32: +; CHECK: fmaxv s{{[0-9]+}}, {{v[0-9]+}}.4s +entry: + %vmaxv.i = tail call <1 x float> @llvm.aarch64.neon.vmaxv.v1f32.v4f32(<4 x float> %a) + %0 = extractelement <1 x float> %vmaxv.i, i32 0 + ret float %0 +} + +define float @test_vminvq_f32(<4 x float> %a) { +; CHECK: test_vminvq_f32: +; CHECK: fminv s{{[0-9]+}}, {{v[0-9]+}}.4s +entry: + %vminv.i = tail call <1 x float> @llvm.aarch64.neon.vminv.v1f32.v4f32(<4 x float> %a) + %0 = extractelement <1 x float> %vminv.i, i32 0 + ret float %0 +} + +define float @test_vmaxnmvq_f32(<4 x float> %a) { +; CHECK: test_vmaxnmvq_f32: +; CHECK: fmaxnmv s{{[0-9]+}}, {{v[0-9]+}}.4s +entry: + %vmaxnmv.i = tail call <1 x float> @llvm.aarch64.neon.vmaxnmv.v1f32.v4f32(<4 x float> %a) + %0 = extractelement <1 x float> %vmaxnmv.i, i32 0 + ret float %0 +} + +define float @test_vminnmvq_f32(<4 x float> %a) { +; CHECK: test_vminnmvq_f32: +; CHECK: fminnmv s{{[0-9]+}}, {{v[0-9]+}}.4s +entry: + %vminnmv.i = tail call <1 x float> @llvm.aarch64.neon.vminnmv.v1f32.v4f32(<4 x float> %a) + %0 = extractelement <1 x float> %vminnmv.i, i32 0 + ret float %0 +} + |