From 7582d8d76f225ecbe879d6346741e925b06d1d4d Mon Sep 17 00:00:00 2001 From: Kevin Qin Date: Mon, 20 Jan 2014 02:14:05 +0000 Subject: [AArch64 NEON] Accept both #0.0 and #0 for comparing with floating point zero in asm parser. For FCMEQ, FCMGE, FCMGT, FCMLE and FCMLT, floating point zero will be printed as #0.0 instead of #0. To support the history codes using #0, we consider to let asm parser accept both #0.0 and #0. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@199621 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/AArch64/AArch64InstrNEON.td | 40 ++++++++++++------- lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp | 48 +++++++++++++++++++++++ 2 files changed, 74 insertions(+), 14 deletions(-) (limited to 'lib') diff --git a/lib/Target/AArch64/AArch64InstrNEON.td b/lib/Target/AArch64/AArch64InstrNEON.td index 41c6c86f36..3f9743989b 100644 --- a/lib/Target/AArch64/AArch64InstrNEON.td +++ b/lib/Target/AArch64/AArch64InstrNEON.td @@ -813,29 +813,41 @@ def FCMLTvvv_2S : NeonI_compare_aliases<"fcmlt", ".2s", FCMGTvvv_2S, VPR64>; def FCMLTvvv_4S : NeonI_compare_aliases<"fcmlt", ".4s", FCMGTvvv_4S, VPR128>; def FCMLTvvv_2D : NeonI_compare_aliases<"fcmlt", ".2d", FCMGTvvv_2D, VPR128>; +def fpzero_izero_asmoperand : AsmOperandClass { + let Name = "FPZeroIZero"; + let ParserMethod = "ParseFPImm0AndImm0Operand"; + let DiagnosticType = "FPZero"; +} + +def fpzz32 : Operand, + ComplexPattern { + let ParserMatchClass = fpzero_izero_asmoperand; + let PrintMethod = "printFPZeroOperand"; + let DecoderMethod = "DecodeFPZeroOperand"; +} multiclass NeonI_fpcmpz_sizes opcode, string asmop, CondCode CC> { def _2S : NeonI_2VMisc<0b0, u, {size, 0b0}, opcode, - (outs VPR64:$Rd), (ins VPR64:$Rn, fpz32:$FPImm), + (outs VPR64:$Rd), (ins VPR64:$Rn, fpzz32:$FPImm), asmop # "\t$Rd.2s, $Rn.2s, $FPImm", [(set (v2i32 VPR64:$Rd), - (v2i32 (Neon_cmpz (v2f32 VPR64:$Rn), (f32 fpz32:$FPImm), CC)))], + (v2i32 (Neon_cmpz (v2f32 VPR64:$Rn), (f32 fpzz32:$FPImm), CC)))], NoItinerary>; def _4S : NeonI_2VMisc<0b1, u, {size, 0b0}, opcode, - (outs VPR128:$Rd), (ins VPR128:$Rn, fpz32:$FPImm), + (outs VPR128:$Rd), (ins VPR128:$Rn, fpzz32:$FPImm), asmop # "\t$Rd.4s, $Rn.4s, $FPImm", [(set (v4i32 VPR128:$Rd), - (v4i32 (Neon_cmpz (v4f32 VPR128:$Rn), (f32 fpz32:$FPImm), CC)))], + (v4i32 (Neon_cmpz (v4f32 VPR128:$Rn), (f32 fpzz32:$FPImm), CC)))], NoItinerary>; def _2D : NeonI_2VMisc<0b1, u, {size, 0b1}, opcode, - (outs VPR128:$Rd), (ins VPR128:$Rn, fpz32:$FPImm), + (outs VPR128:$Rd), (ins VPR128:$Rn, fpzz32:$FPImm), asmop # "\t$Rd.2d, $Rn.2d, $FPImm", [(set (v2i64 VPR128:$Rd), - (v2i64 (Neon_cmpz (v2f64 VPR128:$Rn), (f32 fpz32:$FPImm), CC)))], + (v2i64 (Neon_cmpz (v2f64 VPR128:$Rn), (f32 fpzz32:$FPImm), CC)))], NoItinerary>; } @@ -4512,12 +4524,12 @@ class NeonI_Scalar2SameMisc_cmpz_D_size opcode, string asmop> multiclass NeonI_Scalar2SameMisc_cmpz_SD_size opcode, string asmop> { def ssi : NeonI_Scalar2SameMisc; def ddi : NeonI_Scalar2SameMisc; @@ -4539,12 +4551,12 @@ multiclass Neon_Scalar2SameMisc_cmpz_SD_size_patterns { - def : Pat<(v1i32 (opnode (f32 FPR32:$Rn), (f32 fpz32:$FPImm))), - (INSTS FPR32:$Rn, fpz32:$FPImm)>; - def : Pat<(v1i64 (opnode (f64 FPR64:$Rn), (f32 fpz32:$FPImm))), - (INSTD FPR64:$Rn, fpz32:$FPImm)>; - def : Pat<(v1i64 (Neon_cmpz (v1f64 FPR64:$Rn), (f32 fpz32:$FPImm), CC)), - (INSTD FPR64:$Rn, fpz32:$FPImm)>; + def : Pat<(v1i32 (opnode (f32 FPR32:$Rn), (f32 fpzz32:$FPImm))), + (INSTS FPR32:$Rn, fpzz32:$FPImm)>; + def : Pat<(v1i64 (opnode (f64 FPR64:$Rn), (f32 fpzz32:$FPImm))), + (INSTD FPR64:$Rn, fpzz32:$FPImm)>; + def : Pat<(v1i64 (Neon_cmpz (v1f64 FPR64:$Rn), (f32 fpzz32:$FPImm), CC)), + (INSTD FPR64:$Rn, fpzz32:$FPImm)>; } multiclass Neon_Scalar2SameMisc_D_size_patterns &Operands); + OperandMatchResultTy + ParseFPImm0AndImm0Operand( SmallVectorImpl &Operands); + template OperandMatchResultTy ParseNamedImmOperand(SmallVectorImpl &Operands) { return ParseNamedImmOperand(SomeNamedImmMapper(), Operands); @@ -826,6 +829,10 @@ public: return CE->getValue() == N; } + bool isFPZeroIZero() const { + return isFPZero(); + } + static AArch64Operand *CreateImmWithLSL(const MCExpr *Val, unsigned ShiftAmount, bool ImplicitAmount, @@ -965,6 +972,10 @@ public: Inst.addOperand(MCOperand::CreateImm(0)); } + void addFPZeroIZeroOperands(MCInst &Inst, unsigned N) const { + addFPZeroOperands(Inst, N); + } + void addInvCondCodeOperands(MCInst &Inst, unsigned N) const { assert(N == 1 && "Invalid number of operands!"); unsigned Encoded = A64InvertCondCode(getCondCode()); @@ -1605,6 +1616,43 @@ AArch64AsmParser::ParseFPImmOperand( return MatchOperand_Success; } +AArch64AsmParser::OperandMatchResultTy +AArch64AsmParser::ParseFPImm0AndImm0Operand( + SmallVectorImpl &Operands) { + // FIXME?: I want to live in a world where immediates must start with + // #. Please don't dash my hopes (well, do if you have a good reason). + + //This function is only used in floating compare with zero instructions to get + //those instructions accept both #0.0 and #0. + if (Parser.getTok().isNot(AsmToken::Hash)) return MatchOperand_NoMatch; + + SMLoc S = Parser.getTok().getLoc(); + Parser.Lex(); // Eat '#' + + APFloat RealVal(0.0); + if (Parser.getTok().is(AsmToken::Real)) { + if(Parser.getTok().getString() != "0.0") { + Error(S, "only #0.0 is acceptable as immediate"); + return MatchOperand_ParseFail; + } + } + else if (Parser.getTok().is(AsmToken::Integer)) { + if(Parser.getTok().getIntVal() != 0) { + Error(S, "only #0.0 is acceptable as immediate"); + return MatchOperand_ParseFail; + } + } + else { + Error(S, "only #0.0 is acceptable as immediate"); + return MatchOperand_ParseFail; + } + + Parser.Lex(); // Eat real number + SMLoc E = Parser.getTok().getLoc(); + + Operands.push_back(AArch64Operand::CreateFPImm(0.0, S, E)); + return MatchOperand_Success; +} // Automatically generated static unsigned MatchRegisterName(StringRef Name); -- cgit v1.2.3