summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--include/llvm/CodeGen/ValueTypes.td2
-rw-r--r--include/llvm/IR/IntrinsicsAArch64.td19
-rw-r--r--lib/Target/AArch64/AArch64InstrFormats.td19
-rw-r--r--lib/Target/AArch64/AArch64InstrNEON.td125
-rw-r--r--test/CodeGen/AArch64/neon-across.ll476
-rw-r--r--test/MC/AArch64/neon-across.s101
-rw-r--r--test/MC/AArch64/neon-diagnostics.s163
7 files changed, 904 insertions, 1 deletions
diff --git a/include/llvm/CodeGen/ValueTypes.td b/include/llvm/CodeGen/ValueTypes.td
index 5fc98bf220..b5fa0e8c6a 100644
--- a/include/llvm/CodeGen/ValueTypes.td
+++ b/include/llvm/CodeGen/ValueTypes.td
@@ -26,7 +26,7 @@ def i16 : ValueType<16 , 3>; // 16-bit integer value
def i32 : ValueType<32 , 4>; // 32-bit integer value
def i64 : ValueType<64 , 5>; // 64-bit integer value
def i128 : ValueType<128, 6>; // 128-bit integer value
-def f16 : ValueType<16 , 7>; // 32-bit floating point value
+def f16 : ValueType<16 , 7>; // 16-bit floating point value
def f32 : ValueType<32 , 8>; // 32-bit floating point value
def f64 : ValueType<64 , 9>; // 64-bit floating point value
def f80 : ValueType<80 , 10>; // 80-bit floating point value
diff --git a/include/llvm/IR/IntrinsicsAArch64.td b/include/llvm/IR/IntrinsicsAArch64.td
index 4f7252d920..526ebefbf8 100644
--- a/include/llvm/IR/IntrinsicsAArch64.td
+++ b/include/llvm/IR/IntrinsicsAArch64.td
@@ -65,6 +65,25 @@ def int_aarch64_neon_vuqshrn : Neon_N2V_Narrow_Intrinsic;
def int_aarch64_neon_vsqrshrn : Neon_N2V_Narrow_Intrinsic;
def int_aarch64_neon_vuqrshrn : Neon_N2V_Narrow_Intrinsic;
+// Vector across
+class Neon_Across_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty], [llvm_anyvector_ty], [IntrNoMem]>;
+
+class Neon_2Arg_Across_Float_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty], [llvm_v4f32_ty], [IntrNoMem]>;
+
+def int_aarch64_neon_saddlv : Neon_Across_Intrinsic;
+def int_aarch64_neon_uaddlv : Neon_Across_Intrinsic;
+def int_aarch64_neon_smaxv : Neon_Across_Intrinsic;
+def int_aarch64_neon_umaxv : Neon_Across_Intrinsic;
+def int_aarch64_neon_sminv : Neon_Across_Intrinsic;
+def int_aarch64_neon_uminv : Neon_Across_Intrinsic;
+def int_aarch64_neon_vaddv : Neon_Across_Intrinsic;
+def int_aarch64_neon_vmaxv : Neon_Across_Intrinsic;
+def int_aarch64_neon_vminv : Neon_Across_Intrinsic;
+def int_aarch64_neon_vmaxnmv : Neon_Across_Intrinsic;
+def int_aarch64_neon_vminnmv : Neon_Across_Intrinsic;
+
// Scalar Add
def int_aarch64_neon_vaddds :
Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v1i64_ty], [IntrNoMem]>;
diff --git a/lib/Target/AArch64/AArch64InstrFormats.td b/lib/Target/AArch64/AArch64InstrFormats.td
index 9a7a0bb793..fb87db605d 100644
--- a/lib/Target/AArch64/AArch64InstrFormats.td
+++ b/lib/Target/AArch64/AArch64InstrFormats.td
@@ -1159,5 +1159,24 @@ class NeonI_ScalarPair<bit u, bits<2> size, bits<5> opcode,
// Inherit Rd in 4-0
}
+// Format AdvSIMD 2 vector across lanes
+class NeonI_2VAcross<bit q, bit u, bits<2> size, bits<5> opcode,
+ dag outs, dag ins, string asmstr,
+ list<dag> patterns, InstrItinClass itin>
+ : A64InstRdn<outs, ins, asmstr, patterns, itin>
+{
+ let Inst{31} = 0b0;
+ let Inst{30} = q;
+ let Inst{29} = u;
+ let Inst{28-24} = 0b01110;
+ let Inst{23-22} = size;
+ let Inst{21-17} = 0b11000;
+ let Inst{16-12} = opcode;
+ let Inst{11-10} = 0b10;
+
+ // Inherit Rn in 9-5
+ // Inherit Rd in 4-0
+}
+
}
diff --git a/lib/Target/AArch64/AArch64InstrNEON.td b/lib/Target/AArch64/AArch64InstrNEON.td
index 9ea0ad6acd..c780f3acd9 100644
--- a/lib/Target/AArch64/AArch64InstrNEON.td
+++ b/lib/Target/AArch64/AArch64InstrNEON.td
@@ -2202,6 +2202,131 @@ multiclass Neon_sshll2_0<SDNode ext>
defm NI_sext_high : Neon_sshll2_0<sext>;
defm NI_zext_high : Neon_sshll2_0<zext>;
+
+//===----------------------------------------------------------------------===//
+// Multiclasses for NeonI_Across
+//===----------------------------------------------------------------------===//
+
+// Variant 1
+
+multiclass NeonI_2VAcross_1<bit u, bits<5> opcode,
+ string asmop, SDPatternOperator opnode>
+{
+ def _1h8b: NeonI_2VAcross<0b0, u, 0b00, opcode,
+ (outs FPR16:$Rd), (ins VPR64:$Rn),
+ asmop # "\t$Rd, $Rn.8b",
+ [(set (v1i16 FPR16:$Rd),
+ (v1i16 (opnode (v8i8 VPR64:$Rn))))],
+ NoItinerary>;
+
+ def _1h16b: NeonI_2VAcross<0b1, u, 0b00, opcode,
+ (outs FPR16:$Rd), (ins VPR128:$Rn),
+ asmop # "\t$Rd, $Rn.16b",
+ [(set (v1i16 FPR16:$Rd),
+ (v1i16 (opnode (v16i8 VPR128:$Rn))))],
+ NoItinerary>;
+
+ def _1s4h: NeonI_2VAcross<0b0, u, 0b01, opcode,
+ (outs FPR32:$Rd), (ins VPR64:$Rn),
+ asmop # "\t$Rd, $Rn.4h",
+ [(set (v1i32 FPR32:$Rd),
+ (v1i32 (opnode (v4i16 VPR64:$Rn))))],
+ NoItinerary>;
+
+ def _1s8h: NeonI_2VAcross<0b1, u, 0b01, opcode,
+ (outs FPR32:$Rd), (ins VPR128:$Rn),
+ asmop # "\t$Rd, $Rn.8h",
+ [(set (v1i32 FPR32:$Rd),
+ (v1i32 (opnode (v8i16 VPR128:$Rn))))],
+ NoItinerary>;
+
+ // _1d2s doesn't exist!
+
+ def _1d4s: NeonI_2VAcross<0b1, u, 0b10, opcode,
+ (outs FPR64:$Rd), (ins VPR128:$Rn),
+ asmop # "\t$Rd, $Rn.4s",
+ [(set (v1i64 FPR64:$Rd),
+ (v1i64 (opnode (v4i32 VPR128:$Rn))))],
+ NoItinerary>;
+}
+
+defm SADDLV : NeonI_2VAcross_1<0b0, 0b00011, "saddlv", int_aarch64_neon_saddlv>;
+defm UADDLV : NeonI_2VAcross_1<0b1, 0b00011, "uaddlv", int_aarch64_neon_uaddlv>;
+
+// Variant 2
+
+multiclass NeonI_2VAcross_2<bit u, bits<5> opcode,
+ string asmop, SDPatternOperator opnode>
+{
+ def _1b8b: NeonI_2VAcross<0b0, u, 0b00, opcode,
+ (outs FPR8:$Rd), (ins VPR64:$Rn),
+ asmop # "\t$Rd, $Rn.8b",
+ [(set (v1i8 FPR8:$Rd),
+ (v1i8 (opnode (v8i8 VPR64:$Rn))))],
+ NoItinerary>;
+
+ def _1b16b: NeonI_2VAcross<0b1, u, 0b00, opcode,
+ (outs FPR8:$Rd), (ins VPR128:$Rn),
+ asmop # "\t$Rd, $Rn.16b",
+ [(set (v1i8 FPR8:$Rd),
+ (v1i8 (opnode (v16i8 VPR128:$Rn))))],
+ NoItinerary>;
+
+ def _1h4h: NeonI_2VAcross<0b0, u, 0b01, opcode,
+ (outs FPR16:$Rd), (ins VPR64:$Rn),
+ asmop # "\t$Rd, $Rn.4h",
+ [(set (v1i16 FPR16:$Rd),
+ (v1i16 (opnode (v4i16 VPR64:$Rn))))],
+ NoItinerary>;
+
+ def _1h8h: NeonI_2VAcross<0b1, u, 0b01, opcode,
+ (outs FPR16:$Rd), (ins VPR128:$Rn),
+ asmop # "\t$Rd, $Rn.8h",
+ [(set (v1i16 FPR16:$Rd),
+ (v1i16 (opnode (v8i16 VPR128:$Rn))))],
+ NoItinerary>;
+
+ // _1s2s doesn't exist!
+
+ def _1s4s: NeonI_2VAcross<0b1, u, 0b10, opcode,
+ (outs FPR32:$Rd), (ins VPR128:$Rn),
+ asmop # "\t$Rd, $Rn.4s",
+ [(set (v1i32 FPR32:$Rd),
+ (v1i32 (opnode (v4i32 VPR128:$Rn))))],
+ NoItinerary>;
+}
+
+defm SMAXV : NeonI_2VAcross_2<0b0, 0b01010, "smaxv", int_aarch64_neon_smaxv>;
+defm UMAXV : NeonI_2VAcross_2<0b1, 0b01010, "umaxv", int_aarch64_neon_umaxv>;
+
+defm SMINV : NeonI_2VAcross_2<0b0, 0b11010, "sminv", int_aarch64_neon_sminv>;
+defm UMINV : NeonI_2VAcross_2<0b1, 0b11010, "uminv", int_aarch64_neon_uminv>;
+
+defm ADDV : NeonI_2VAcross_2<0b0, 0b11011, "addv", int_aarch64_neon_vaddv>;
+
+// Variant 3
+
+multiclass NeonI_2VAcross_3<bit u, bits<5> opcode, bits<2> size,
+ string asmop, SDPatternOperator opnode>
+{
+ def _1s4s: NeonI_2VAcross<0b1, u, size, opcode,
+ (outs FPR32:$Rd), (ins VPR128:$Rn),
+ asmop # "\t$Rd, $Rn.4s",
+ [(set (v1f32 FPR32:$Rd),
+ (v1f32 (opnode (v4f32 VPR128:$Rn))))],
+ NoItinerary>;
+}
+
+defm FMAXNMV : NeonI_2VAcross_3<0b1, 0b01100, 0b00, "fmaxnmv",
+ int_aarch64_neon_vmaxnmv>;
+defm FMINNMV : NeonI_2VAcross_3<0b1, 0b01100, 0b10, "fminnmv",
+ int_aarch64_neon_vminnmv>;
+
+defm FMAXV : NeonI_2VAcross_3<0b1, 0b01111, 0b00, "fmaxv",
+ int_aarch64_neon_vmaxv>;
+defm FMINV : NeonI_2VAcross_3<0b1, 0b01111, 0b10, "fminv",
+ int_aarch64_neon_vminv>;
+
// The followings are for instruction class (3V Diff)
// normal long/long2 pattern
diff --git a/test/CodeGen/AArch64/neon-across.ll b/test/CodeGen/AArch64/neon-across.ll
new file mode 100644
index 0000000000..733db970cf
--- /dev/null
+++ b/test/CodeGen/AArch64/neon-across.ll
@@ -0,0 +1,476 @@
+; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon | FileCheck %s
+
+declare <1 x float> @llvm.aarch64.neon.vminnmv.v1f32.v4f32(<4 x float>)
+
+declare <1 x float> @llvm.aarch64.neon.vmaxnmv.v1f32.v4f32(<4 x float>)
+
+declare <1 x float> @llvm.aarch64.neon.vminv.v1f32.v4f32(<4 x float>)
+
+declare <1 x float> @llvm.aarch64.neon.vmaxv.v1f32.v4f32(<4 x float>)
+
+declare <1 x i32> @llvm.aarch64.neon.vaddv.v1i32.v4i32(<4 x i32>)
+
+declare <1 x i16> @llvm.aarch64.neon.vaddv.v1i16.v8i16(<8 x i16>)
+
+declare <1 x i8> @llvm.aarch64.neon.vaddv.v1i8.v16i8(<16 x i8>)
+
+declare <1 x i16> @llvm.aarch64.neon.vaddv.v1i16.v4i16(<4 x i16>)
+
+declare <1 x i8> @llvm.aarch64.neon.vaddv.v1i8.v8i8(<8 x i8>)
+
+declare <1 x i32> @llvm.aarch64.neon.uminv.v1i32.v4i32(<4 x i32>)
+
+declare <1 x i16> @llvm.aarch64.neon.uminv.v1i16.v8i16(<8 x i16>)
+
+declare <1 x i8> @llvm.aarch64.neon.uminv.v1i8.v16i8(<16 x i8>)
+
+declare <1 x i32> @llvm.aarch64.neon.sminv.v1i32.v4i32(<4 x i32>)
+
+declare <1 x i16> @llvm.aarch64.neon.sminv.v1i16.v8i16(<8 x i16>)
+
+declare <1 x i8> @llvm.aarch64.neon.sminv.v1i8.v16i8(<16 x i8>)
+
+declare <1 x i16> @llvm.aarch64.neon.uminv.v1i16.v4i16(<4 x i16>)
+
+declare <1 x i8> @llvm.aarch64.neon.uminv.v1i8.v8i8(<8 x i8>)
+
+declare <1 x i16> @llvm.aarch64.neon.sminv.v1i16.v4i16(<4 x i16>)
+
+declare <1 x i8> @llvm.aarch64.neon.sminv.v1i8.v8i8(<8 x i8>)
+
+declare <1 x i32> @llvm.aarch64.neon.umaxv.v1i32.v4i32(<4 x i32>)
+
+declare <1 x i16> @llvm.aarch64.neon.umaxv.v1i16.v8i16(<8 x i16>)
+
+declare <1 x i8> @llvm.aarch64.neon.umaxv.v1i8.v16i8(<16 x i8>)
+
+declare <1 x i32> @llvm.aarch64.neon.smaxv.v1i32.v4i32(<4 x i32>)
+
+declare <1 x i16> @llvm.aarch64.neon.smaxv.v1i16.v8i16(<8 x i16>)
+
+declare <1 x i8> @llvm.aarch64.neon.smaxv.v1i8.v16i8(<16 x i8>)
+
+declare <1 x i16> @llvm.aarch64.neon.umaxv.v1i16.v4i16(<4 x i16>)
+
+declare <1 x i8> @llvm.aarch64.neon.umaxv.v1i8.v8i8(<8 x i8>)
+
+declare <1 x i16> @llvm.aarch64.neon.smaxv.v1i16.v4i16(<4 x i16>)
+
+declare <1 x i8> @llvm.aarch64.neon.smaxv.v1i8.v8i8(<8 x i8>)
+
+declare <1 x i64> @llvm.aarch64.neon.uaddlv.v1i64.v4i32(<4 x i32>)
+
+declare <1 x i32> @llvm.aarch64.neon.uaddlv.v1i32.v8i16(<8 x i16>)
+
+declare <1 x i16> @llvm.aarch64.neon.uaddlv.v1i16.v16i8(<16 x i8>)
+
+declare <1 x i64> @llvm.aarch64.neon.saddlv.v1i64.v4i32(<4 x i32>)
+
+declare <1 x i32> @llvm.aarch64.neon.saddlv.v1i32.v8i16(<8 x i16>)
+
+declare <1 x i16> @llvm.aarch64.neon.saddlv.v1i16.v16i8(<16 x i8>)
+
+declare <1 x i32> @llvm.aarch64.neon.uaddlv.v1i32.v4i16(<4 x i16>)
+
+declare <1 x i16> @llvm.aarch64.neon.uaddlv.v1i16.v8i8(<8 x i8>)
+
+declare <1 x i32> @llvm.aarch64.neon.saddlv.v1i32.v4i16(<4 x i16>)
+
+declare <1 x i16> @llvm.aarch64.neon.saddlv.v1i16.v8i8(<8 x i8>)
+
+define i16 @test_vaddlv_s8(<8 x i8> %a) {
+; CHECK: test_vaddlv_s8:
+; CHECK: saddlv h{{[0-9]+}}, {{v[0-9]+}}.8b
+entry:
+ %saddlv.i = tail call <1 x i16> @llvm.aarch64.neon.saddlv.v1i16.v8i8(<8 x i8> %a)
+ %0 = extractelement <1 x i16> %saddlv.i, i32 0
+ ret i16 %0
+}
+
+define i32 @test_vaddlv_s16(<4 x i16> %a) {
+; CHECK: test_vaddlv_s16:
+; CHECK: saddlv s{{[0-9]+}}, {{v[0-9]+}}.4h
+entry:
+ %saddlv.i = tail call <1 x i32> @llvm.aarch64.neon.saddlv.v1i32.v4i16(<4 x i16> %a)
+ %0 = extractelement <1 x i32> %saddlv.i, i32 0
+ ret i32 %0
+}
+
+define i16 @test_vaddlv_u8(<8 x i8> %a) {
+; CHECK: test_vaddlv_u8:
+; CHECK: uaddlv h{{[0-9]+}}, {{v[0-9]+}}.8b
+entry:
+ %uaddlv.i = tail call <1 x i16> @llvm.aarch64.neon.uaddlv.v1i16.v8i8(<8 x i8> %a)
+ %0 = extractelement <1 x i16> %uaddlv.i, i32 0
+ ret i16 %0
+}
+
+define i32 @test_vaddlv_u16(<4 x i16> %a) {
+; CHECK: test_vaddlv_u16:
+; CHECK: uaddlv s{{[0-9]+}}, {{v[0-9]+}}.4h
+entry:
+ %uaddlv.i = tail call <1 x i32> @llvm.aarch64.neon.uaddlv.v1i32.v4i16(<4 x i16> %a)
+ %0 = extractelement <1 x i32> %uaddlv.i, i32 0
+ ret i32 %0
+}
+
+define i16 @test_vaddlvq_s8(<16 x i8> %a) {
+; CHECK: test_vaddlvq_s8:
+; CHECK: saddlv h{{[0-9]+}}, {{v[0-9]+}}.16b
+entry:
+ %saddlv.i = tail call <1 x i16> @llvm.aarch64.neon.saddlv.v1i16.v16i8(<16 x i8> %a)
+ %0 = extractelement <1 x i16> %saddlv.i, i32 0
+ ret i16 %0
+}
+
+define i32 @test_vaddlvq_s16(<8 x i16> %a) {
+; CHECK: test_vaddlvq_s16:
+; CHECK: saddlv s{{[0-9]+}}, {{v[0-9]+}}.8h
+entry:
+ %saddlv.i = tail call <1 x i32> @llvm.aarch64.neon.saddlv.v1i32.v8i16(<8 x i16> %a)
+ %0 = extractelement <1 x i32> %saddlv.i, i32 0
+ ret i32 %0
+}
+
+define i64 @test_vaddlvq_s32(<4 x i32> %a) {
+; CHECK: test_vaddlvq_s32:
+; CHECK: saddlv d{{[0-9]+}}, {{v[0-9]+}}.4s
+entry:
+ %saddlv.i = tail call <1 x i64> @llvm.aarch64.neon.saddlv.v1i64.v4i32(<4 x i32> %a)
+ %0 = extractelement <1 x i64> %saddlv.i, i32 0
+ ret i64 %0
+}
+
+define i16 @test_vaddlvq_u8(<16 x i8> %a) {
+; CHECK: test_vaddlvq_u8:
+; CHECK: uaddlv h{{[0-9]+}}, {{v[0-9]+}}.16b
+entry:
+ %uaddlv.i = tail call <1 x i16> @llvm.aarch64.neon.uaddlv.v1i16.v16i8(<16 x i8> %a)
+ %0 = extractelement <1 x i16> %uaddlv.i, i32 0
+ ret i16 %0
+}
+
+define i32 @test_vaddlvq_u16(<8 x i16> %a) {
+; CHECK: test_vaddlvq_u16:
+; CHECK: uaddlv s{{[0-9]+}}, {{v[0-9]+}}.8h
+entry:
+ %uaddlv.i = tail call <1 x i32> @llvm.aarch64.neon.uaddlv.v1i32.v8i16(<8 x i16> %a)
+ %0 = extractelement <1 x i32> %uaddlv.i, i32 0
+ ret i32 %0
+}
+
+define i64 @test_vaddlvq_u32(<4 x i32> %a) {
+; CHECK: test_vaddlvq_u32:
+; CHECK: uaddlv d{{[0-9]+}}, {{v[0-9]+}}.4s
+entry:
+ %uaddlv.i = tail call <1 x i64> @llvm.aarch64.neon.uaddlv.v1i64.v4i32(<4 x i32> %a)
+ %0 = extractelement <1 x i64> %uaddlv.i, i32 0
+ ret i64 %0
+}
+
+define i8 @test_vmaxv_s8(<8 x i8> %a) {
+; CHECK: test_vmaxv_s8:
+; CHECK: smaxv b{{[0-9]+}}, {{v[0-9]+}}.8b
+entry:
+ %smaxv.i = tail call <1 x i8> @llvm.aarch64.neon.smaxv.v1i8.v8i8(<8 x i8> %a)
+ %0 = extractelement <1 x i8> %smaxv.i, i32 0
+ ret i8 %0
+}
+
+define i16 @test_vmaxv_s16(<4 x i16> %a) {
+; CHECK: test_vmaxv_s16:
+; CHECK: smaxv h{{[0-9]+}}, {{v[0-9]+}}.4h
+entry:
+ %smaxv.i = tail call <1 x i16> @llvm.aarch64.neon.smaxv.v1i16.v4i16(<4 x i16> %a)
+ %0 = extractelement <1 x i16> %smaxv.i, i32 0
+ ret i16 %0
+}
+
+define i8 @test_vmaxv_u8(<8 x i8> %a) {
+; CHECK: test_vmaxv_u8:
+; CHECK: umaxv b{{[0-9]+}}, {{v[0-9]+}}.8b
+entry:
+ %umaxv.i = tail call <1 x i8> @llvm.aarch64.neon.umaxv.v1i8.v8i8(<8 x i8> %a)
+ %0 = extractelement <1 x i8> %umaxv.i, i32 0
+ ret i8 %0
+}
+
+define i16 @test_vmaxv_u16(<4 x i16> %a) {
+; CHECK: test_vmaxv_u16:
+; CHECK: umaxv h{{[0-9]+}}, {{v[0-9]+}}.4h
+entry:
+ %umaxv.i = tail call <1 x i16> @llvm.aarch64.neon.umaxv.v1i16.v4i16(<4 x i16> %a)
+ %0 = extractelement <1 x i16> %umaxv.i, i32 0
+ ret i16 %0
+}
+
+define i8 @test_vmaxvq_s8(<16 x i8> %a) {
+; CHECK: test_vmaxvq_s8:
+; CHECK: smaxv b{{[0-9]+}}, {{v[0-9]+}}.16b
+entry:
+ %smaxv.i = tail call <1 x i8> @llvm.aarch64.neon.smaxv.v1i8.v16i8(<16 x i8> %a)
+ %0 = extractelement <1 x i8> %smaxv.i, i32 0
+ ret i8 %0
+}
+
+define i16 @test_vmaxvq_s16(<8 x i16> %a) {
+; CHECK: test_vmaxvq_s16:
+; CHECK: smaxv h{{[0-9]+}}, {{v[0-9]+}}.8h
+entry:
+ %smaxv.i = tail call <1 x i16> @llvm.aarch64.neon.smaxv.v1i16.v8i16(<8 x i16> %a)
+ %0 = extractelement <1 x i16> %smaxv.i, i32 0
+ ret i16 %0
+}
+
+define i32 @test_vmaxvq_s32(<4 x i32> %a) {
+; CHECK: test_vmaxvq_s32:
+; CHECK: smaxv s{{[0-9]+}}, {{v[0-9]+}}.4s
+entry:
+ %smaxv.i = tail call <1 x i32> @llvm.aarch64.neon.smaxv.v1i32.v4i32(<4 x i32> %a)
+ %0 = extractelement <1 x i32> %smaxv.i, i32 0
+ ret i32 %0
+}
+
+define i8 @test_vmaxvq_u8(<16 x i8> %a) {
+; CHECK: test_vmaxvq_u8:
+; CHECK: umaxv b{{[0-9]+}}, {{v[0-9]+}}.16b
+entry:
+ %umaxv.i = tail call <1 x i8> @llvm.aarch64.neon.umaxv.v1i8.v16i8(<16 x i8> %a)
+ %0 = extractelement <1 x i8> %umaxv.i, i32 0
+ ret i8 %0
+}
+
+define i16 @test_vmaxvq_u16(<8 x i16> %a) {
+; CHECK: test_vmaxvq_u16:
+; CHECK: umaxv h{{[0-9]+}}, {{v[0-9]+}}.8h
+entry:
+ %umaxv.i = tail call <1 x i16> @llvm.aarch64.neon.umaxv.v1i16.v8i16(<8 x i16> %a)
+ %0 = extractelement <1 x i16> %umaxv.i, i32 0
+ ret i16 %0
+}
+
+define i32 @test_vmaxvq_u32(<4 x i32> %a) {
+; CHECK: test_vmaxvq_u32:
+; CHECK: umaxv s{{[0-9]+}}, {{v[0-9]+}}.4s
+entry:
+ %umaxv.i = tail call <1 x i32> @llvm.aarch64.neon.umaxv.v1i32.v4i32(<4 x i32> %a)
+ %0 = extractelement <1 x i32> %umaxv.i, i32 0
+ ret i32 %0
+}
+
+define i8 @test_vminv_s8(<8 x i8> %a) {
+; CHECK: test_vminv_s8:
+; CHECK: sminv b{{[0-9]+}}, {{v[0-9]+}}.8b
+entry:
+ %sminv.i = tail call <1 x i8> @llvm.aarch64.neon.sminv.v1i8.v8i8(<8 x i8> %a)
+ %0 = extractelement <1 x i8> %sminv.i, i32 0
+ ret i8 %0
+}
+
+define i16 @test_vminv_s16(<4 x i16> %a) {
+; CHECK: test_vminv_s16:
+; CHECK: sminv h{{[0-9]+}}, {{v[0-9]+}}.4h
+entry:
+ %sminv.i = tail call <1 x i16> @llvm.aarch64.neon.sminv.v1i16.v4i16(<4 x i16> %a)
+ %0 = extractelement <1 x i16> %sminv.i, i32 0
+ ret i16 %0
+}
+
+define i8 @test_vminv_u8(<8 x i8> %a) {
+; CHECK: test_vminv_u8:
+; CHECK: uminv b{{[0-9]+}}, {{v[0-9]+}}.8b
+entry:
+ %uminv.i = tail call <1 x i8> @llvm.aarch64.neon.uminv.v1i8.v8i8(<8 x i8> %a)
+ %0 = extractelement <1 x i8> %uminv.i, i32 0
+ ret i8 %0
+}
+
+define i16 @test_vminv_u16(<4 x i16> %a) {
+; CHECK: test_vminv_u16:
+; CHECK: uminv h{{[0-9]+}}, {{v[0-9]+}}.4h
+entry:
+ %uminv.i = tail call <1 x i16> @llvm.aarch64.neon.uminv.v1i16.v4i16(<4 x i16> %a)
+ %0 = extractelement <1 x i16> %uminv.i, i32 0
+ ret i16 %0
+}
+
+define i8 @test_vminvq_s8(<16 x i8> %a) {
+; CHECK: test_vminvq_s8:
+; CHECK: sminv b{{[0-9]+}}, {{v[0-9]+}}.16b
+entry:
+ %sminv.i = tail call <1 x i8> @llvm.aarch64.neon.sminv.v1i8.v16i8(<16 x i8> %a)
+ %0 = extractelement <1 x i8> %sminv.i, i32 0
+ ret i8 %0
+}
+
+define i16 @test_vminvq_s16(<8 x i16> %a) {
+; CHECK: test_vminvq_s16:
+; CHECK: sminv h{{[0-9]+}}, {{v[0-9]+}}.8h
+entry:
+ %sminv.i = tail call <1 x i16> @llvm.aarch64.neon.sminv.v1i16.v8i16(<8 x i16> %a)
+ %0 = extractelement <1 x i16> %sminv.i, i32 0
+ ret i16 %0
+}
+
+define i32 @test_vminvq_s32(<4 x i32> %a) {
+; CHECK: test_vminvq_s32:
+; CHECK: sminv s{{[0-9]+}}, {{v[0-9]+}}.4s
+entry:
+ %sminv.i = tail call <1 x i32> @llvm.aarch64.neon.sminv.v1i32.v4i32(<4 x i32> %a)
+ %0 = extractelement <1 x i32> %sminv.i, i32 0
+ ret i32 %0
+}
+
+define i8 @test_vminvq_u8(<16 x i8> %a) {
+; CHECK: test_vminvq_u8:
+; CHECK: uminv b{{[0-9]+}}, {{v[0-9]+}}.16b
+entry:
+ %uminv.i = tail call <1 x i8> @llvm.aarch64.neon.uminv.v1i8.v16i8(<16 x i8> %a)
+ %0 = extractelement <1 x i8> %uminv.i, i32 0
+ ret i8 %0
+}
+
+define i16 @test_vminvq_u16(<8 x i16> %a) {
+; CHECK: test_vminvq_u16:
+; CHECK: uminv h{{[0-9]+}}, {{v[0-9]+}}.8h
+entry:
+ %uminv.i = tail call <1 x i16> @llvm.aarch64.neon.uminv.v1i16.v8i16(<8 x i16> %a)
+ %0 = extractelement <1 x i16> %uminv.i, i32 0
+ ret i16 %0
+}
+
+define i32 @test_vminvq_u32(<4 x i32> %a) {
+; CHECK: test_vminvq_u32:
+; CHECK: uminv s{{[0-9]+}}, {{v[0-9]+}}.4s
+entry:
+ %uminv.i = tail call <1 x i32> @llvm.aarch64.neon.uminv.v1i32.v4i32(<4 x i32> %a)
+ %0 = extractelement <1 x i32> %uminv.i, i32 0
+ ret i32 %0
+}
+
+define i8 @test_vaddv_s8(<8 x i8> %a) {
+; CHECK: test_vaddv_s8:
+; CHECK: addv b{{[0-9]+}}, {{v[0-9]+}}.8b
+entry:
+ %vaddv.i = tail call <1 x i8> @llvm.aarch64.neon.vaddv.v1i8.v8i8(<8 x i8> %a)
+ %0 = extractelement <1 x i8> %vaddv.i, i32 0
+ ret i8 %0
+}
+
+define i16 @test_vaddv_s16(<4 x i16> %a) {
+; CHECK: test_vaddv_s16:
+; CHECK: addv h{{[0-9]+}}, {{v[0-9]+}}.4h
+entry:
+ %vaddv.i = tail call <1 x i16> @llvm.aarch64.neon.vaddv.v1i16.v4i16(<4 x i16> %a)
+ %0 = extractelement <1 x i16> %vaddv.i, i32 0
+ ret i16 %0
+}
+
+define i8 @test_vaddv_u8(<8 x i8> %a) {
+; CHECK: test_vaddv_u8:
+; CHECK: addv b{{[0-9]+}}, {{v[0-9]+}}.8b
+entry:
+ %vaddv.i = tail call <1 x i8> @llvm.aarch64.neon.vaddv.v1i8.v8i8(<8 x i8> %a)
+ %0 = extractelement <1 x i8> %vaddv.i, i32 0
+ ret i8 %0
+}
+
+define i16 @test_vaddv_u16(<4 x i16> %a) {
+; CHECK: test_vaddv_u16:
+; CHECK: addv h{{[0-9]+}}, {{v[0-9]+}}.4h
+entry:
+ %vaddv.i = tail call <1 x i16> @llvm.aarch64.neon.vaddv.v1i16.v4i16(<4 x i16> %a)
+ %0 = extractelement <1 x i16> %vaddv.i, i32 0
+ ret i16 %0
+}
+
+define i8 @test_vaddvq_s8(<16 x i8> %a) {
+; CHECK: test_vaddvq_s8:
+; CHECK: addv b{{[0-9]+}}, {{v[0-9]+}}.16b
+entry:
+ %vaddv.i = tail call <1 x i8> @llvm.aarch64.neon.vaddv.v1i8.v16i8(<16 x i8> %a)
+ %0 = extractelement <1 x i8> %vaddv.i, i32 0
+ ret i8 %0
+}
+
+define i16 @test_vaddvq_s16(<8 x i16> %a) {
+; CHECK: test_vaddvq_s16:
+; CHECK: addv h{{[0-9]+}}, {{v[0-9]+}}.8h
+entry:
+ %vaddv.i = tail call <1 x i16> @llvm.aarch64.neon.vaddv.v1i16.v8i16(<8 x i16> %a)
+ %0 = extractelement <1 x i16> %vaddv.i, i32 0
+ ret i16 %0
+}
+
+define i32 @test_vaddvq_s32(<4 x i32> %a) {
+; CHECK: test_vaddvq_s32:
+; CHECK: addv s{{[0-9]+}}, {{v[0-9]+}}.4s
+entry:
+ %vaddv.i = tail call <1 x i32> @llvm.aarch64.neon.vaddv.v1i32.v4i32(<4 x i32> %a)
+ %0 = extractelement <1 x i32> %vaddv.i, i32 0
+ ret i32 %0
+}
+
+define i8 @test_vaddvq_u8(<16 x i8> %a) {
+; CHECK: test_vaddvq_u8:
+; CHECK: addv b{{[0-9]+}}, {{v[0-9]+}}.16b
+entry:
+ %vaddv.i = tail call <1 x i8> @llvm.aarch64.neon.vaddv.v1i8.v16i8(<16 x i8> %a)
+ %0 = extractelement <1 x i8> %vaddv.i, i32 0
+ ret i8 %0
+}
+
+define i16 @test_vaddvq_u16(<8 x i16> %a) {
+; CHECK: test_vaddvq_u16:
+; CHECK: addv h{{[0-9]+}}, {{v[0-9]+}}.8h
+entry:
+ %vaddv.i = tail call <1 x i16> @llvm.aarch64.neon.vaddv.v1i16.v8i16(<8 x i16> %a)
+ %0 = extractelement <1 x i16> %vaddv.i, i32 0
+ ret i16 %0
+}
+
+define i32 @test_vaddvq_u32(<4 x i32> %a) {
+; CHECK: test_vaddvq_u32:
+; CHECK: addv s{{[0-9]+}}, {{v[0-9]+}}.4s
+entry:
+ %vaddv.i = tail call <1 x i32> @llvm.aarch64.neon.vaddv.v1i32.v4i32(<4 x i32> %a)
+ %0 = extractelement <1 x i32> %vaddv.i, i32 0
+ ret i32 %0
+}
+
+define float @test_vmaxvq_f32(<4 x float> %a) {
+; CHECK: test_vmaxvq_f32:
+; CHECK: fmaxv s{{[0-9]+}}, {{v[0-9]+}}.4s
+entry:
+ %vmaxv.i = tail call <1 x float> @llvm.aarch64.neon.vmaxv.v1f32.v4f32(<4 x float> %a)
+ %0 = extractelement <1 x float> %vmaxv.i, i32 0
+ ret float %0
+}
+
+define float @test_vminvq_f32(<4 x float> %a) {
+; CHECK: test_vminvq_f32:
+; CHECK: fminv s{{[0-9]+}}, {{v[0-9]+}}.4s
+entry:
+ %vminv.i = tail call <1 x float> @llvm.aarch64.neon.vminv.v1f32.v4f32(<4 x float> %a)
+ %0 = extractelement <1 x float> %vminv.i, i32 0
+ ret float %0
+}
+
+define float @test_vmaxnmvq_f32(<4 x float> %a) {
+; CHECK: test_vmaxnmvq_f32:
+; CHECK: fmaxnmv s{{[0-9]+}}, {{v[0-9]+}}.4s
+entry:
+ %vmaxnmv.i = tail call <1 x float> @llvm.aarch64.neon.vmaxnmv.v1f32.v4f32(<4 x float> %a)
+ %0 = extractelement <1 x float> %vmaxnmv.i, i32 0
+ ret float %0
+}
+
+define float @test_vminnmvq_f32(<4 x float> %a) {
+; CHECK: test_vminnmvq_f32:
+; CHECK: fminnmv s{{[0-9]+}}, {{v[0-9]+}}.4s
+entry:
+ %vminnmv.i = tail call <1 x float> @llvm.aarch64.neon.vminnmv.v1f32.v4f32(<4 x float> %a)
+ %0 = extractelement <1 x float> %vminnmv.i, i32 0
+ ret float %0
+}
+
diff --git a/test/MC/AArch64/neon-across.s b/test/MC/AArch64/neon-across.s
new file mode 100644
index 0000000000..8b1c2d421b
--- /dev/null
+++ b/test/MC/AArch64/neon-across.s
@@ -0,0 +1,101 @@
+// RUN: llvm-mc -triple=aarch64 -mattr=+neon -show-encoding < %s | FileCheck %s
+
+// Check that the assembler can handle the documented syntax for AArch64
+
+//------------------------------------------------------------------------------
+// Instructions across vector registers
+//------------------------------------------------------------------------------
+
+ saddlv h0, v1.8b
+ saddlv h0, v1.16b
+ saddlv s0, v1.4h
+ saddlv s0, v1.8h
+ saddlv d0, v1.4s
+
+// CHECK: saddlv h0, v1.8b // encoding: [0x20,0x38,0x30,0x0e]
+// CHECK: saddlv h0, v1.16b // encoding: [0x20,0x38,0x30,0x4e]
+// CHECK: saddlv s0, v1.4h // encoding: [0x20,0x38,0x70,0x0e]
+// CHECK: saddlv s0, v1.8h // encoding: [0x20,0x38,0x70,0x4e]
+// CHECK: saddlv d0, v1.4s // encoding: [0x20,0x38,0xb0,0x4e]
+
+ uaddlv h0, v1.8b
+ uaddlv h0, v1.16b
+ uaddlv s0, v1.4h
+ uaddlv s0, v1.8h
+ uaddlv d0, v1.4s
+
+// CHECK: uaddlv h0, v1.8b // encoding: [0x20,0x38,0x30,0x2e]
+// CHECK: uaddlv h0, v1.16b // encoding: [0x20,0x38,0x30,0x6e]
+// CHECK: uaddlv s0, v1.4h // encoding: [0x20,0x38,0x70,0x2e]
+// CHECK: uaddlv s0, v1.8h // encoding: [0x20,0x38,0x70,0x6e]
+// CHECK: uaddlv d0, v1.4s // encoding: [0x20,0x38,0xb0,0x6e]
+
+ smaxv b0, v1.8b
+ smaxv b0, v1.16b
+ smaxv h0, v1.4h
+ smaxv h0, v1.8h
+ smaxv s0, v1.4s
+
+// CHECK: smaxv b0, v1.8b // encoding: [0x20,0xa8,0x30,0x0e]
+// CHECK: smaxv b0, v1.16b // encoding: [0x20,0xa8,0x30,0x4e]
+// CHECK: smaxv h0, v1.4h // encoding: [0x20,0xa8,0x70,0x0e]
+// CHECK: smaxv h0, v1.8h // encoding: [0x20,0xa8,0x70,0x4e]
+// CHECK: smaxv s0, v1.4s // encoding: [0x20,0xa8,0xb0,0x4e]
+
+ sminv b0, v1.8b
+ sminv b0, v1.16b
+ sminv h0, v1.4h
+ sminv h0, v1.8h
+ sminv s0, v1.4s
+
+// CHECK: sminv b0, v1.8b // encoding: [0x20,0xa8,0x31,0x0e]
+// CHECK: sminv b0, v1.16b // encoding: [0x20,0xa8,0x31,0x4e]
+// CHECK: sminv h0, v1.4h // encoding: [0x20,0xa8,0x71,0x0e]
+// CHECK: sminv h0, v1.8h // encoding: [0x20,0xa8,0x71,0x4e]
+// CHECK: sminv s0, v1.4s // encoding: [0x20,0xa8,0xb1,0x4e]
+
+ umaxv b0, v1.8b
+ umaxv b0, v1.16b
+ umaxv h0, v1.4h
+ umaxv h0, v1.8h
+ umaxv s0, v1.4s
+
+// CHECK: umaxv b0, v1.8b // encoding: [0x20,0xa8,0x30,0x2e]
+// CHECK: umaxv b0, v1.16b // encoding: [0x20,0xa8,0x30,0x6e]
+// CHECK: umaxv h0, v1.4h // encoding: [0x20,0xa8,0x70,0x2e]
+// CHECK: umaxv h0, v1.8h // encoding: [0x20,0xa8,0x70,0x6e]
+// CHECK: umaxv s0, v1.4s // encoding: [0x20,0xa8,0xb0,0x6e]
+
+ uminv b0, v1.8b
+ uminv b0, v1.16b
+ uminv h0, v1.4h
+ uminv h0, v1.8h
+ uminv s0, v1.4s
+
+// CHECK: uminv b0, v1.8b // encoding: [0x20,0xa8,0x31,0x2e]
+// CHECK: uminv b0, v1.16b // encoding: [0x20,0xa8,0x31,0x6e]
+// CHECK: uminv h0, v1.4h // encoding: [0x20,0xa8,0x71,0x2e]
+// CHECK: uminv h0, v1.8h // encoding: [0x20,0xa8,0x71,0x6e]
+// CHECK: uminv s0, v1.4s // encoding: [0x20,0xa8,0xb1,0x6e]
+
+ addv b0, v1.8b
+ addv b0, v1.16b
+ addv h0, v1.4h
+ addv h0, v1.8h
+ addv s0, v1.4s
+
+// CHECK: addv b0, v1.8b // encoding: [0x20,0xb8,0x31,0x0e]
+// CHECK: addv b0, v1.16b // encoding: [0x20,0xb8,0x31,0x4e]
+// CHECK: addv h0, v1.4h // encoding: [0x20,0xb8,0x71,0x0e]
+// CHECK: addv h0, v1.8h // encoding: [0x20,0xb8,0x71,0x4e]
+// CHECK: addv s0, v1.4s // encoding: [0x20,0xb8,0xb1,0x4e]
+
+ fmaxnmv s0, v1.4s
+ fminnmv s0, v1.4s
+ fmaxv s0, v1.4s
+ fminv s0, v1.4s
+
+// CHECK: fmaxnmv s0, v1.4s // encoding: [0x20,0xc8,0x30,0x6e]
+// CHECK: fminnmv s0, v1.4s // encoding: [0x20,0xc8,0xb0,0x6e]
+// CHECK: fmaxv s0, v1.4s // encoding: [0x20,0xf8,0x30,0x6e]
+// CHECK: fminv s0, v1.4s // encoding: [0x20,0xf8,0xb0,0x6e]
diff --git a/test/MC/AArch64/neon-diagnostics.s b/test/MC/AArch64/neon-diagnostics.s
index 211bc9aa5c..a86796ff2a 100644
--- a/test/MC/AArch64/neon-diagnostics.s
+++ b/test/MC/AArch64/neon-diagnostics.s
@@ -3608,3 +3608,166 @@
// CHECK-ERROR: error: invalid operand for instruction
// CHECK-ERROR: sqrdmulh v0.2d, v1.2d, v22.d[1]
// CHECK-ERROR: ^
+
+//----------------------------------------------------------------------
+// Across vectors
+//----------------------------------------------------------------------
+
+ saddlv b0, v1.8b
+ saddlv b0, v1.16b
+ saddlv h0, v1.4h
+ saddlv h0, v1.8h
+ saddlv s0, v1.2s
+ saddlv s0, v1.4s
+ saddlv d0, v1.2s
+
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR: saddlv b0, v1.8b
+// CHECK-ERROR: ^
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR: saddlv b0, v1.16b
+// CHECK-ERROR: ^
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR: saddlv h0, v1.4h
+// CHECK-ERROR: ^
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR: saddlv h0, v1.8h
+// CHECK-ERROR: ^
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR: saddlv s0, v1.2s
+// CHECK-ERROR: ^
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR: saddlv s0, v1.4s
+// CHECK-ERROR: ^
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR: saddlv d0, v1.2s
+// CHECK-ERROR: ^
+
+ uaddlv b0, v1.8b
+ uaddlv b0, v1.16b
+ uaddlv h0, v1.4h
+ uaddlv h0, v1.8h
+ uaddlv s0, v1.2s
+ uaddlv s0, v1.4s
+ uaddlv d0, v1.2s
+
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR: uaddlv b0, v1.8b
+// CHECK-ERROR: ^
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR: uaddlv b0, v1.16b
+// CHECK-ERROR: ^
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR: uaddlv h0, v1.4h
+// CHECK-ERROR: ^
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR: uaddlv h0, v1.8h
+// CHECK-ERROR: ^
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR: uaddlv s0, v1.2s
+// CHECK-ERROR: ^
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR: uaddlv s0, v1.4s
+// CHECK-ERROR: ^
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR: uaddlv d0, v1.2s
+// CHECK-ERROR: ^
+
+ smaxv s0, v1.2s
+ sminv s0, v1.2s
+ umaxv s0, v1.2s
+ uminv s0, v1.2s
+ addv s0, v1.2s
+
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR: smaxv s0, v1.2s
+// CHECK-ERROR: ^
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR: sminv s0, v1.2s
+// CHECK-ERROR: ^
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR: umaxv s0, v1.2s
+// CHECK-ERROR: ^
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR: uminv s0, v1.2s
+// CHECK-ERROR: ^
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR: addv s0, v1.2s
+// CHECK-ERROR: ^
+
+ smaxv d0, v1.2d
+ sminv d0, v1.2d
+ umaxv d0, v1.2d
+ uminv d0, v1.2d
+ addv d0, v1.2d
+
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR: smaxv d0, v1.2d
+// CHECK-ERROR: ^
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR: sminv d0, v1.2d
+// CHECK-ERROR: ^
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR: umaxv d0, v1.2d
+// CHECK-ERROR: ^
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR: uminv d0, v1.2d
+// CHECK-ERROR: ^
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR: addv d0, v1.2d
+// CHECK-ERROR: ^
+
+ fmaxnmv b0, v1.16b
+ fminnmv b0, v1.16b
+ fmaxv b0, v1.16b
+ fminv b0, v1.16b
+
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR: fmaxnmv b0, v1.16b
+// CHECK-ERROR: ^
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR: fminnmv b0, v1.16b
+// CHECK-ERROR: ^
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR: fmaxv b0, v1.16b
+// CHECK-ERROR: ^
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR: fminv b0, v1.16b
+// CHECK-ERROR: ^
+
+ fmaxnmv h0, v1.8h
+ fminnmv h0, v1.8h
+ fmaxv h0, v1.8h
+ fminv h0, v1.8h
+
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR: fmaxnmv h0, v1.8h
+// CHECK-ERROR: ^
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR: fminnmv h0, v1.8h
+// CHECK-ERROR: ^
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR: fmaxv h0, v1.8h
+// CHECK-ERROR: ^
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR: fminv h0, v1.8h
+// CHECK-ERROR: ^
+
+ fmaxnmv d0, v1.2d
+ fminnmv d0, v1.2d
+ fmaxv d0, v1.2d
+ fminv d0, v1.2d
+
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR: fmaxnmv d0, v1.2d
+// CHECK-ERROR: ^
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR: fminnmv d0, v1.2d
+// CHECK-ERROR: ^
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR: fmaxv d0, v1.2d
+// CHECK-ERROR: ^
+// CHECK-ERROR: error: invalid operand for instruction
+// CHECK-ERROR: fminv d0, v1.2d
+// CHECK-ERROR: ^
+