summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--include/llvm/IR/IntrinsicsAArch64.td4
-rw-r--r--lib/Target/AArch64/AArch64InstrNEON.td36
-rw-r--r--test/CodeGen/AArch64/neon-scalar-mul.ll44
3 files changed, 56 insertions, 28 deletions
diff --git a/include/llvm/IR/IntrinsicsAArch64.td b/include/llvm/IR/IntrinsicsAArch64.td
index 7fa5a06288..944c1449eb 100644
--- a/include/llvm/IR/IntrinsicsAArch64.td
+++ b/include/llvm/IR/IntrinsicsAArch64.td
@@ -206,10 +206,10 @@ def int_aarch64_neon_vneg :
Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty], [IntrNoMem]>;
// Signed Saturating Doubling Multiply-Add Long
-def int_aarch64_neon_vqdmlal : Neon_2Arg_Long_Intrinsic;
+def int_aarch64_neon_vqdmlal : Neon_3Arg_Long_Intrinsic;
// Signed Saturating Doubling Multiply-Subtract Long
-def int_aarch64_neon_vqdmlsl : Neon_2Arg_Long_Intrinsic;
+def int_aarch64_neon_vqdmlsl : Neon_3Arg_Long_Intrinsic;
// Signed Saturating Doubling Multiply Long
def int_aarch64_neon_vqdmull : Neon_2Arg_Long_Intrinsic;
diff --git a/lib/Target/AArch64/AArch64InstrNEON.td b/lib/Target/AArch64/AArch64InstrNEON.td
index 361909a398..9efbb0d34f 100644
--- a/lib/Target/AArch64/AArch64InstrNEON.td
+++ b/lib/Target/AArch64/AArch64InstrNEON.td
@@ -3246,6 +3246,21 @@ multiclass NeonI_Scalar3Diff_HS_size<bit u, bits<4> opcode, string asmop> {
NoItinerary>;
}
+multiclass NeonI_Scalar3Diff_ml_HS_size<bit u, bits<4> opcode, string asmop> {
+ let Constraints = "$Src = $Rd" in {
+ def shh : NeonI_Scalar3Diff<u, 0b01, opcode,
+ (outs FPR32:$Rd), (ins FPR32:$Src, FPR16:$Rn, FPR16:$Rm),
+ !strconcat(asmop, " $Rd, $Rn, $Rm"),
+ [],
+ NoItinerary>;
+ def dss : NeonI_Scalar3Diff<u, 0b10, opcode,
+ (outs FPR64:$Rd), (ins FPR64:$Src, FPR32:$Rn, FPR32:$Rm),
+ !strconcat(asmop, " $Rd, $Rn, $Rm"),
+ [],
+ NoItinerary>;
+ }
+}
+
multiclass Neon_Scalar3Diff_HS_size_patterns<SDPatternOperator opnode,
Instruction INSTH,
Instruction INSTS> {
@@ -3255,6 +3270,15 @@ multiclass Neon_Scalar3Diff_HS_size_patterns<SDPatternOperator opnode,
(INSTS FPR32:$Rn, FPR32:$Rm)>;
}
+multiclass Neon_Scalar3Diff_ml_HS_size_patterns<SDPatternOperator opnode,
+ Instruction INSTH,
+ Instruction INSTS> {
+ def : Pat<(v1i32 (opnode (v1i32 FPR32:$Src), (v1i16 FPR16:$Rn), (v1i16 FPR16:$Rm))),
+ (INSTH FPR32:$Src, FPR16:$Rn, FPR16:$Rm)>;
+ def : Pat<(v1i64 (opnode (v1i64 FPR64:$Src), (v1i32 FPR32:$Rn), (v1i32 FPR32:$Rm))),
+ (INSTS FPR64:$Src, FPR32:$Rn, FPR32:$Rm)>;
+}
+
// Scalar Two Registers Miscellaneous
multiclass NeonI_Scalar2SameMisc_SD_size<bit u, bit size_high, bits<5> opcode,
@@ -3553,14 +3577,14 @@ defm : Neon_Scalar3Same_D_size_patterns<int_arm_neon_vqrshifts, SQRSHLddd>;
defm : Neon_Scalar3Same_D_size_patterns<int_arm_neon_vqrshiftu, UQRSHLddd>;
// Signed Saturating Doubling Multiply-Add Long
-defm SQDMLAL : NeonI_Scalar3Diff_HS_size<0b0, 0b1001, "sqdmlal">;
-defm : Neon_Scalar3Diff_HS_size_patterns<int_aarch64_neon_vqdmlal,
- SQDMLALshh, SQDMLALdss>;
+defm SQDMLAL : NeonI_Scalar3Diff_ml_HS_size<0b0, 0b1001, "sqdmlal">;
+defm : Neon_Scalar3Diff_ml_HS_size_patterns<int_aarch64_neon_vqdmlal,
+ SQDMLALshh, SQDMLALdss>;
// Signed Saturating Doubling Multiply-Subtract Long
-defm SQDMLSL : NeonI_Scalar3Diff_HS_size<0b0, 0b1011, "sqdmlsl">;
-defm : Neon_Scalar3Diff_HS_size_patterns<int_aarch64_neon_vqdmlsl,
- SQDMLSLshh, SQDMLSLdss>;
+defm SQDMLSL : NeonI_Scalar3Diff_ml_HS_size<0b0, 0b1011, "sqdmlsl">;
+defm : Neon_Scalar3Diff_ml_HS_size_patterns<int_aarch64_neon_vqdmlsl,
+ SQDMLSLshh, SQDMLSLdss>;
// Signed Saturating Doubling Multiply Long
defm SQDMULL : NeonI_Scalar3Diff_HS_size<0b0, 0b1101, "sqdmull">;
diff --git a/test/CodeGen/AArch64/neon-scalar-mul.ll b/test/CodeGen/AArch64/neon-scalar-mul.ll
index 3129df20f0..a58294b209 100644
--- a/test/CodeGen/AArch64/neon-scalar-mul.ll
+++ b/test/CodeGen/AArch64/neon-scalar-mul.ll
@@ -69,55 +69,59 @@ define double @test_vmulxd_f64(double %a, double %b) {
declare <1 x float> @llvm.aarch64.neon.vmulx.v1f32(<1 x float>, <1 x float>)
declare <1 x double> @llvm.aarch64.neon.vmulx.v1f64(<1 x double>, <1 x double>)
-define i32 @test_vqdmlalh_s16(i16 %a, i16 %b) {
+define i32 @test_vqdmlalh_s16(i32 %a, i16 %b, i16 %c) {
; CHECK: test_vqdmlalh_s16
; CHECK: sqdmlal {{s[0-9]+}}, {{h[0-9]+}}, {{h[0-9]+}}
entry:
- %vqdmlal.i = insertelement <1 x i16> undef, i16 %a, i32 0
+ %vqdmlal.i = insertelement <1 x i32> undef, i32 %a, i32 0
%vqdmlal1.i = insertelement <1 x i16> undef, i16 %b, i32 0
- %vqdmlal2.i = call <1 x i32> @llvm.aarch64.neon.vqdmlal.v1i32(<1 x i16> %vqdmlal.i, <1 x i16> %vqdmlal1.i)
- %0 = extractelement <1 x i32> %vqdmlal2.i, i32 0
+ %vqdmlal2.i = insertelement <1 x i16> undef, i16 %c, i32 0
+ %vqdmlal3.i = call <1 x i32> @llvm.aarch64.neon.vqdmlal.v1i32(<1 x i32> %vqdmlal.i, <1 x i16> %vqdmlal1.i, <1 x i16> %vqdmlal2.i)
+ %0 = extractelement <1 x i32> %vqdmlal3.i, i32 0
ret i32 %0
}
-define i64 @test_vqdmlals_s32(i32 %a, i32 %b) {
+define i64 @test_vqdmlals_s32(i64 %a, i32 %b, i32 %c) {
; CHECK: test_vqdmlals_s32
; CHECK: sqdmlal {{d[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}}
entry:
- %vqdmlal.i = insertelement <1 x i32> undef, i32 %a, i32 0
+ %vqdmlal.i = insertelement <1 x i64> undef, i64 %a, i32 0
%vqdmlal1.i = insertelement <1 x i32> undef, i32 %b, i32 0
- %vqdmlal2.i = call <1 x i64> @llvm.aarch64.neon.vqdmlal.v1i64(<1 x i32> %vqdmlal.i, <1 x i32> %vqdmlal1.i)
- %0 = extractelement <1 x i64> %vqdmlal2.i, i32 0
+ %vqdmlal2.i = insertelement <1 x i32> undef, i32 %c, i32 0
+ %vqdmlal3.i = call <1 x i64> @llvm.aarch64.neon.vqdmlal.v1i64(<1 x i64> %vqdmlal.i, <1 x i32> %vqdmlal1.i, <1 x i32> %vqdmlal2.i)
+ %0 = extractelement <1 x i64> %vqdmlal3.i, i32 0
ret i64 %0
}
-declare <1 x i32> @llvm.aarch64.neon.vqdmlal.v1i32(<1 x i16>, <1 x i16>)
-declare <1 x i64> @llvm.aarch64.neon.vqdmlal.v1i64(<1 x i32>, <1 x i32>)
+declare <1 x i32> @llvm.aarch64.neon.vqdmlal.v1i32(<1 x i32>, <1 x i16>, <1 x i16>)
+declare <1 x i64> @llvm.aarch64.neon.vqdmlal.v1i64(<1 x i64>, <1 x i32>, <1 x i32>)
-define i32 @test_vqdmlslh_s16(i16 %a, i16 %b) {
+define i32 @test_vqdmlslh_s16(i32 %a, i16 %b, i16 %c) {
; CHECK: test_vqdmlslh_s16
; CHECK: sqdmlsl {{s[0-9]+}}, {{h[0-9]+}}, {{h[0-9]+}}
entry:
- %vqdmlsl.i = insertelement <1 x i16> undef, i16 %a, i32 0
+ %vqdmlsl.i = insertelement <1 x i32> undef, i32 %a, i32 0
%vqdmlsl1.i = insertelement <1 x i16> undef, i16 %b, i32 0
- %vqdmlsl2.i = call <1 x i32> @llvm.aarch64.neon.vqdmlsl.v1i32(<1 x i16> %vqdmlsl.i, <1 x i16> %vqdmlsl1.i)
- %0 = extractelement <1 x i32> %vqdmlsl2.i, i32 0
+ %vqdmlsl2.i = insertelement <1 x i16> undef, i16 %c, i32 0
+ %vqdmlsl3.i = call <1 x i32> @llvm.aarch64.neon.vqdmlsl.v1i32(<1 x i32> %vqdmlsl.i, <1 x i16> %vqdmlsl1.i, <1 x i16> %vqdmlsl2.i)
+ %0 = extractelement <1 x i32> %vqdmlsl3.i, i32 0
ret i32 %0
}
-define i64 @test_vqdmlsls_s32(i32 %a, i32 %b) {
+define i64 @test_vqdmlsls_s32(i64 %a, i32 %b, i32 %c) {
; CHECK: test_vqdmlsls_s32
; CHECK: sqdmlsl {{d[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}}
entry:
- %vqdmlsl.i = insertelement <1 x i32> undef, i32 %a, i32 0
+ %vqdmlsl.i = insertelement <1 x i64> undef, i64 %a, i32 0
%vqdmlsl1.i = insertelement <1 x i32> undef, i32 %b, i32 0
- %vqdmlsl2.i = call <1 x i64> @llvm.aarch64.neon.vqdmlsl.v1i64(<1 x i32> %vqdmlsl.i, <1 x i32> %vqdmlsl1.i)
- %0 = extractelement <1 x i64> %vqdmlsl2.i, i32 0
+ %vqdmlsl2.i = insertelement <1 x i32> undef, i32 %c, i32 0
+ %vqdmlsl3.i = call <1 x i64> @llvm.aarch64.neon.vqdmlsl.v1i64(<1 x i64> %vqdmlsl.i, <1 x i32> %vqdmlsl1.i, <1 x i32> %vqdmlsl2.i)
+ %0 = extractelement <1 x i64> %vqdmlsl3.i, i32 0
ret i64 %0
}
-declare <1 x i32> @llvm.aarch64.neon.vqdmlsl.v1i32(<1 x i16>, <1 x i16>)
-declare <1 x i64> @llvm.aarch64.neon.vqdmlsl.v1i64(<1 x i32>, <1 x i32>)
+declare <1 x i32> @llvm.aarch64.neon.vqdmlsl.v1i32(<1 x i32>, <1 x i16>, <1 x i16>)
+declare <1 x i64> @llvm.aarch64.neon.vqdmlsl.v1i64(<1 x i64>, <1 x i32>, <1 x i32>)
define i32 @test_vqdmullh_s16(i16 %a, i16 %b) {
; CHECK: test_vqdmullh_s16