summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJohnny Chen <johnny.chen@apple.com>2010-03-17 17:52:21 +0000
committerJohnny Chen <johnny.chen@apple.com>2010-03-17 17:52:21 +0000
commit9e08876a2ae329feb7a76dbfe33666cb58033c00 (patch)
tree436813e36b90abc43c404faaf6db8bdd8d05acb7
parentb0a72ec2eb7843b5feca7cd22450903988f0e368 (diff)
downloadllvm-9e08876a2ae329feb7a76dbfe33666cb58033c00.tar.gz
llvm-9e08876a2ae329feb7a76dbfe33666cb58033c00.tar.bz2
llvm-9e08876a2ae329feb7a76dbfe33666cb58033c00.tar.xz
Added sub-formats to the NeonI/NeonXI instructions to further refine the NEONFrm
instructions to help disassembly. We also changed the output of the addressing modes to omit the '+' from the assembler syntax #+/-<imm> or +/-<Rm>. See, for example, A8.6.57/58/60. And modified test cases to not expect '+' in +reg or #+num. For example, ; CHECK: ldr.w r9, [r7, #28] git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@98745 91177308-0d34-0410-b5e6-96231b3b80d8
-rw-r--r--lib/Target/ARM/ARMAddressingModes.h20
-rw-r--r--lib/Target/ARM/ARMInstrFormats.td43
-rw-r--r--lib/Target/ARM/ARMInstrNEON.td495
-rw-r--r--lib/Target/ARM/AsmPrinter/ARMAsmPrinter.cpp46
-rw-r--r--lib/Target/ARM/AsmPrinter/ARMInstPrinter.cpp427
-rw-r--r--lib/Target/ARM/AsmPrinter/ARMInstPrinter.h40
-rw-r--r--test/CodeGen/ARM/2009-10-27-double-align.ll4
-rw-r--r--test/CodeGen/ARM/2009-10-30.ll2
-rw-r--r--test/CodeGen/ARM/arm-negative-stride.ll2
-rw-r--r--test/CodeGen/ARM/globals.ll4
-rw-r--r--test/CodeGen/ARM/ldrd.ll4
-rw-r--r--test/CodeGen/ARM/str_pre-2.ll2
-rw-r--r--test/CodeGen/ARM/tls2.ll4
-rw-r--r--test/CodeGen/Thumb2/ldr-str-imm12.ll8
-rw-r--r--test/CodeGen/Thumb2/thumb2-ldr.ll2
-rw-r--r--test/CodeGen/Thumb2/thumb2-ldrh.ll2
-rw-r--r--test/CodeGen/Thumb2/thumb2-str.ll2
-rw-r--r--test/CodeGen/Thumb2/thumb2-str_pre.ll4
-rw-r--r--test/CodeGen/Thumb2/thumb2-strb.ll2
-rw-r--r--test/CodeGen/Thumb2/thumb2-strh.ll2
20 files changed, 952 insertions, 163 deletions
diff --git a/lib/Target/ARM/ARMAddressingModes.h b/lib/Target/ARM/ARMAddressingModes.h
index abdd938cc1..0b3a7d4192 100644
--- a/lib/Target/ARM/ARMAddressingModes.h
+++ b/lib/Target/ARM/ARMAddressingModes.h
@@ -35,6 +35,10 @@ namespace ARM_AM {
add = '+', sub = '-'
};
+ static inline const char *getAddrOpcStr(AddrOpc Op) {
+ return Op == sub ? "-" : "";
+ }
+
static inline const char *getShiftOpcStr(ShiftOpc Op) {
switch (Op) {
default: assert(0 && "Unknown shift opc!");
@@ -127,6 +131,20 @@ namespace ARM_AM {
return (Imm >> 8) * 2;
}
+ /// getSOImmValOneRotate - Try to handle Imm with an immediate shifter
+ /// operand, computing the rotate amount to use. If this immediate value
+ /// cannot be handled with a single shifter-op, return 0.
+ static inline unsigned getSOImmValOneRotate(unsigned Imm) {
+ // A5.2.4 Constants with multiple encodings
+ // The lowest unsigned value of rotation wins!
+ for (unsigned R = 1; R <= 15; ++R)
+ if ((Imm & rotr32(~255U, 2*R)) == 0)
+ return 2*R;
+
+ // Failed to find a suitable rotate amount.
+ return 0;
+ }
+
/// getSOImmValRotate - Try to handle Imm with an immediate shifter operand,
/// computing the rotate amount to use. If this immediate value cannot be
/// handled with a single shifter-op, determine a good rotate amount that will
@@ -179,7 +197,7 @@ namespace ARM_AM {
// of zero.
if ((Arg & ~255U) == 0) return Arg;
- unsigned RotAmt = getSOImmValRotate(Arg);
+ unsigned RotAmt = getSOImmValOneRotate(Arg);
// If this cannot be handled with a single shifter_op, bail out.
if (rotr32(~255U, RotAmt) & Arg)
diff --git a/lib/Target/ARM/ARMInstrFormats.td b/lib/Target/ARM/ARMInstrFormats.td
index 258a96b921..52553f5477 100644
--- a/lib/Target/ARM/ARMInstrFormats.td
+++ b/lib/Target/ARM/ARMInstrFormats.td
@@ -1464,6 +1464,29 @@ class AVConv5I<bits<8> opcod1, bits<4> opcod2, dag oops, dag iops,
// ARM NEON Instruction templates.
//
+// NSFormat specifies further details of a NEON instruction. This is used by
+// the disassembler to classify NEONFrm instructions for disassembly purpose.
+class NSFormat<bits<5> val> {
+ bits<5> Value = val;
+}
+def NSFormatNone : NSFormat<0>;
+def VLDSTLaneFrm : NSFormat<1>;
+def VLDSTLaneDblFrm : NSFormat<2>;
+def VLDSTRQFrm : NSFormat<3>;
+def NVdImmFrm : NSFormat<4>;
+def NVdVmImmFrm : NSFormat<5>;
+def NVdVmImmVCVTFrm : NSFormat<6>;
+def NVdVmImmVDupLaneFrm : NSFormat<7>;
+def NVdVmImmVSHLLFrm : NSFormat<8>;
+def NVectorShuffleFrm : NSFormat<9>;
+def NVectorShiftFrm : NSFormat<10>;
+def NVectorShift2Frm : NSFormat<11>;
+def NVdVnVmImmFrm : NSFormat<12>;
+def NVdVnVmImmVectorShiftFrm : NSFormat<13>;
+def NVdVnVmImmVectorExtractFrm : NSFormat<14>;
+def NVdVnVmImmMulScalarFrm : NSFormat<15>;
+def VTBLFrm : NSFormat<16>;
+
class NeonI<dag oops, dag iops, AddrMode am, IndexMode im, InstrItinClass itin,
string opc, string dt, string asm, string cstr, list<dag> pattern>
: InstARM<am, Size4Bytes, im, NEONFrm, NeonDomain, cstr, itin> {
@@ -1474,6 +1497,8 @@ class NeonI<dag oops, dag iops, AddrMode am, IndexMode im, InstrItinClass itin,
!strconcat("\t", asm));
let Pattern = pattern;
list<Predicate> Predicates = [HasNEON];
+ NSFormat NSF = NSFormatNone; // For disassembly.
+ bits<5> NSForm = NSFormatNone.Value; // For disassembly.
}
// Same as NeonI except it does not have a "data type" specifier.
@@ -1485,6 +1510,8 @@ class NeonXI<dag oops, dag iops, AddrMode am, IndexMode im, InstrItinClass itin,
let AsmString = !strconcat(!strconcat(opc, "${p}"), !strconcat("\t", asm));
let Pattern = pattern;
list<Predicate> Predicates = [HasNEON];
+ NSFormat NSF = NSFormatNone; // For disassembly.
+ bits<5> NSForm = NSFormatNone.Value; // For disassembly.
}
class NI<dag oops, dag iops, InstrItinClass itin, string opc, string asm,
@@ -1497,6 +1524,8 @@ class NI4<dag oops, dag iops, InstrItinClass itin, string opc,
string asm, list<dag> pattern>
: NeonXI<oops, iops, AddrMode4, IndexModeNone, itin, opc, asm, "",
pattern> {
+ let NSF = VLDSTRQFrm; // For disassembly.
+ let NSForm = VLDSTRQFrm.Value; // For disassembly.
}
class NLdSt<bit op23, bits<2> op21_20, bits<4> op11_8, bits<4> op7_4,
@@ -1509,6 +1538,8 @@ class NLdSt<bit op23, bits<2> op21_20, bits<4> op11_8, bits<4> op7_4,
let Inst{21-20} = op21_20;
let Inst{11-8} = op11_8;
let Inst{7-4} = op7_4;
+ let NSF = VLDSTLaneFrm; // For disassembly.
+ let NSForm = VLDSTLaneFrm.Value; // For disassembly.
}
class NDataI<dag oops, dag iops, InstrItinClass itin,
@@ -1538,6 +1569,8 @@ class N1ModImm<bit op23, bits<3> op21_19, bits<4> op11_8, bit op7, bit op6,
let Inst{6} = op6;
let Inst{5} = op5;
let Inst{4} = op4;
+ let NSF = NVdImmFrm; // For disassembly.
+ let NSForm = NVdImmFrm.Value; // For disassembly.
}
// NEON 2 vector register format.
@@ -1553,6 +1586,8 @@ class N2V<bits<2> op24_23, bits<2> op21_20, bits<2> op19_18, bits<2> op17_16,
let Inst{11-7} = op11_7;
let Inst{6} = op6;
let Inst{4} = op4;
+ let NSF = NVdVmImmFrm; // For disassembly.
+ let NSForm = NVdVmImmFrm.Value; // For disassembly.
}
// Same as N2V except it doesn't have a datatype suffix.
@@ -1568,6 +1603,8 @@ class N2VX<bits<2> op24_23, bits<2> op21_20, bits<2> op19_18, bits<2> op17_16,
let Inst{11-7} = op11_7;
let Inst{6} = op6;
let Inst{4} = op4;
+ let NSF = NVdVmImmFrm; // For disassembly.
+ let NSForm = NVdVmImmFrm.Value; // For disassembly.
}
// NEON 2 vector register with immediate.
@@ -1581,6 +1618,8 @@ class N2VImm<bit op24, bit op23, bits<4> op11_8, bit op7, bit op6, bit op4,
let Inst{7} = op7;
let Inst{6} = op6;
let Inst{4} = op4;
+ let NSF = NVdVmImmFrm; // For disassembly.
+ let NSForm = NVdVmImmFrm.Value; // For disassembly.
}
// NEON 3 vector register format.
@@ -1594,6 +1633,8 @@ class N3V<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op6, bit op4,
let Inst{11-8} = op11_8;
let Inst{6} = op6;
let Inst{4} = op4;
+ let NSF = NVdVnVmImmFrm; // For disassembly.
+ let NSForm = NVdVnVmImmFrm.Value; // For disassembly.
}
// Same as N3VX except it doesn't have a data type suffix.
@@ -1607,6 +1648,8 @@ class N3VX<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op6, bit op4
let Inst{11-8} = op11_8;
let Inst{6} = op6;
let Inst{4} = op4;
+ let NSF = NVdVnVmImmFrm; // For disassembly.
+ let NSForm = NVdVnVmImmFrm.Value; // For disassembly.
}
// NEON VMOVs between scalar and core registers.
diff --git a/lib/Target/ARM/ARMInstrNEON.td b/lib/Target/ARM/ARMInstrNEON.td
index 8fee6fa952..da2babf05d 100644
--- a/lib/Target/ARM/ARMInstrNEON.td
+++ b/lib/Target/ARM/ARMInstrNEON.td
@@ -213,7 +213,10 @@ def VLD2q32 : VLD2Q<0b1000, "vld2", "32">;
class VLD2Ddbl<bits<4> op7_4, string OpcodeStr, string Dt>
: NLdSt<0,0b10,0b1001,op7_4, (outs DPR:$dst1, DPR:$dst2),
(ins addrmode6:$addr), IIC_VLD2,
- OpcodeStr, Dt, "\\{$dst1, $dst2\\}, $addr", "", []>;
+ OpcodeStr, Dt, "\\{$dst1, $dst2\\}, $addr", "", []> {
+ let NSF = VLDSTLaneDblFrm; // For disassembly.
+ let NSForm = VLDSTLaneDblFrm.Value; // For disassembly.
+}
def VLD2d8D : VLD2Ddbl<0b0000, "vld2", "8">;
def VLD2d16D : VLD2Ddbl<0b0100, "vld2", "16">;
@@ -228,7 +231,10 @@ class VLD3WB<bits<4> op7_4, string OpcodeStr, string Dt>
: NLdSt<0,0b10,0b0101,op7_4, (outs DPR:$dst1, DPR:$dst2, DPR:$dst3, GPR:$wb),
(ins addrmode6:$addr), IIC_VLD3,
OpcodeStr, Dt, "\\{$dst1, $dst2, $dst3\\}, $addr",
- "$addr.addr = $wb", []>;
+ "$addr.addr = $wb", []> {
+ let NSF = VLDSTLaneDblFrm; // For disassembly.
+ let NSForm = VLDSTLaneDblFrm.Value; // For disassembly.
+}
def VLD3d8 : VLD3D<0b0000, "vld3", "8">;
def VLD3d16 : VLD3D<0b0100, "vld3", "16">;
@@ -260,7 +266,10 @@ class VLD4WB<bits<4> op7_4, string OpcodeStr, string Dt>
(outs DPR:$dst1, DPR:$dst2, DPR:$dst3, DPR:$dst4, GPR:$wb),
(ins addrmode6:$addr), IIC_VLD4,
OpcodeStr, Dt, "\\{$dst1, $dst2, $dst3, $dst4\\}, $addr",
- "$addr.addr = $wb", []>;
+ "$addr.addr = $wb", []> {
+ let NSF = VLDSTLaneDblFrm; // For disassembly.
+ let NSForm = VLDSTLaneDblFrm.Value; // For disassembly.
+}
def VLD4d8 : VLD4D<0b0000, "vld4", "8">;
def VLD4d16 : VLD4D<0b0100, "vld4", "16">;
@@ -297,12 +306,28 @@ def VLD2LNd16 : VLD2LN<0b0101, "vld2", "16"> { let Inst{5} = 0; }
def VLD2LNd32 : VLD2LN<0b1001, "vld2", "32"> { let Inst{6} = 0; }
// vld2 to double-spaced even registers.
-def VLD2LNq16a: VLD2LN<0b0101, "vld2", "16"> { let Inst{5} = 1; }
-def VLD2LNq32a: VLD2LN<0b1001, "vld2", "32"> { let Inst{6} = 1; }
+def VLD2LNq16a: VLD2LN<0b0101, "vld2", "16"> {
+ let Inst{5} = 1;
+ let NSF = VLDSTLaneDblFrm; // For disassembly.
+ let NSForm = VLDSTLaneDblFrm.Value; // For disassembly.
+}
+def VLD2LNq32a: VLD2LN<0b1001, "vld2", "32"> {
+ let Inst{6} = 1;
+ let NSF = VLDSTLaneDblFrm; // For disassembly.
+ let NSForm = VLDSTLaneDblFrm.Value; // For disassembly.
+}
// vld2 to double-spaced odd registers.
-def VLD2LNq16b: VLD2LN<0b0101, "vld2", "16"> { let Inst{5} = 1; }
-def VLD2LNq32b: VLD2LN<0b1001, "vld2", "32"> { let Inst{6} = 1; }
+def VLD2LNq16b: VLD2LN<0b0101, "vld2", "16"> {
+ let Inst{5} = 1;
+ let NSF = VLDSTLaneDblFrm; // For disassembly.
+ let NSForm = VLDSTLaneDblFrm.Value; // For disassembly.
+}
+def VLD2LNq32b: VLD2LN<0b1001, "vld2", "32"> {
+ let Inst{6} = 1;
+ let NSF = VLDSTLaneDblFrm; // For disassembly.
+ let NSForm = VLDSTLaneDblFrm.Value; // For disassembly.
+}
// VLD3LN : Vector Load (single 3-element structure to one lane)
class VLD3LN<bits<4> op11_8, string OpcodeStr, string Dt>
@@ -318,7 +343,11 @@ def VLD3LNd16 : VLD3LN<0b0110, "vld3", "16"> { let Inst{5-4} = 0b00; }
def VLD3LNd32 : VLD3LN<0b1010, "vld3", "32"> { let Inst{6-4} = 0b000; }
// vld3 to double-spaced even registers.
-def VLD3LNq16a: VLD3LN<0b0110, "vld3", "16"> { let Inst{5-4} = 0b10; }
+def VLD3LNq16a: VLD3LN<0b0110, "vld3", "16"> {
+ let Inst{5-4} = 0b10;
+ let NSF = VLDSTLaneDblFrm; // For disassembly.
+ let NSForm = VLDSTLaneDblFrm.Value; // For disassembly.
+}
def VLD3LNq32a: VLD3LN<0b1010, "vld3", "32"> { let Inst{6-4} = 0b100; }
// vld3 to double-spaced odd registers.
@@ -340,12 +369,28 @@ def VLD4LNd16 : VLD4LN<0b0111, "vld4", "16"> { let Inst{5} = 0; }
def VLD4LNd32 : VLD4LN<0b1011, "vld4", "32"> { let Inst{6} = 0; }
// vld4 to double-spaced even registers.
-def VLD4LNq16a: VLD4LN<0b0111, "vld4", "16"> { let Inst{5} = 1; }
-def VLD4LNq32a: VLD4LN<0b1011, "vld4", "32"> { let Inst{6} = 1; }
+def VLD4LNq16a: VLD4LN<0b0111, "vld4", "16"> {
+ let Inst{5} = 1;
+ let NSF = VLDSTLaneDblFrm; // For disassembly.
+ let NSForm = VLDSTLaneDblFrm.Value; // For disassembly.
+}
+def VLD4LNq32a: VLD4LN<0b1011, "vld4", "32"> {
+ let Inst{6} = 1;
+ let NSF = VLDSTLaneDblFrm; // For disassembly.
+ let NSForm = VLDSTLaneDblFrm.Value; // For disassembly.
+}
// vld4 to double-spaced odd registers.
-def VLD4LNq16b: VLD4LN<0b0111, "vld4", "16"> { let Inst{5} = 1; }
-def VLD4LNq32b: VLD4LN<0b1011, "vld4", "32"> { let Inst{6} = 1; }
+def VLD4LNq16b: VLD4LN<0b0111, "vld4", "16"> {
+ let Inst{5} = 1;
+ let NSF = VLDSTLaneDblFrm; // For disassembly.
+ let NSForm = VLDSTLaneDblFrm.Value; // For disassembly.
+}
+def VLD4LNq32b: VLD4LN<0b1011, "vld4", "32"> {
+ let Inst{6} = 1;
+ let NSF = VLDSTLaneDblFrm; // For disassembly.
+ let NSForm = VLDSTLaneDblFrm.Value; // For disassembly.
+}
// VLD1DUP : Vector Load (single element to all lanes)
// VLD2DUP : Vector Load (single 2-element structure to all lanes)
@@ -433,7 +478,10 @@ def VST2q32 : VST2Q<0b1000, "vst2", "32">;
class VST2Ddbl<bits<4> op7_4, string OpcodeStr, string Dt>
: NLdSt<0, 0b00, 0b1001, op7_4, (outs),
(ins addrmode6:$addr, DPR:$src1, DPR:$src2), IIC_VST,
- OpcodeStr, Dt, "\\{$src1, $src2\\}, $addr", "", []>;
+ OpcodeStr, Dt, "\\{$src1, $src2\\}, $addr", "", []> {
+ let NSF = VLDSTLaneDblFrm; // For disassembly.
+ let NSForm = VLDSTLaneDblFrm.Value; // For disassembly.
+}
def VST2d8D : VST2Ddbl<0b0000, "vst2", "8">;
def VST2d16D : VST2Ddbl<0b0100, "vst2", "16">;
@@ -448,7 +496,10 @@ class VST3WB<bits<4> op7_4, string OpcodeStr, string Dt>
: NLdSt<0,0b00,0b0101,op7_4, (outs GPR:$wb),
(ins addrmode6:$addr, DPR:$src1, DPR:$src2, DPR:$src3), IIC_VST,
OpcodeStr, Dt, "\\{$src1, $src2, $src3\\}, $addr",
- "$addr.addr = $wb", []>;
+ "$addr.addr = $wb", []> {
+ let NSF = VLDSTLaneDblFrm; // For disassembly.
+ let NSForm = VLDSTLaneDblFrm.Value; // For disassembly.
+}
def VST3d8 : VST3D<0b0000, "vst3", "8">;
def VST3d16 : VST3D<0b0100, "vst3", "16">;
@@ -478,7 +529,10 @@ class VST4WB<bits<4> op7_4, string OpcodeStr, string Dt>
: NLdSt<0,0b00,0b0001,op7_4, (outs GPR:$wb),
(ins addrmode6:$addr, DPR:$src1, DPR:$src2, DPR:$src3, DPR:$src4),
IIC_VST, OpcodeStr, Dt, "\\{$src1, $src2, $src3, $src4\\}, $addr",
- "$addr.addr = $wb", []>;
+ "$addr.addr = $wb", []> {
+ let NSF = VLDSTLaneDblFrm; // For disassembly.
+ let NSForm = VLDSTLaneDblFrm.Value; // For disassembly.
+}
def VST4d8 : VST4D<0b0000, "vst4", "8">;
def VST4d16 : VST4D<0b0100, "vst4", "16">;
@@ -515,12 +569,28 @@ def VST2LNd16 : VST2LN<0b0101, "vst2", "16"> { let Inst{5} = 0; }
def VST2LNd32 : VST2LN<0b1001, "vst2", "32"> { let Inst{6} = 0; }
// vst2 to double-spaced even registers.
-def VST2LNq16a: VST2LN<0b0101, "vst2", "16"> { let Inst{5} = 1; }
-def VST2LNq32a: VST2LN<0b1001, "vst2", "32"> { let Inst{6} = 1; }
+def VST2LNq16a: VST2LN<0b0101, "vst2", "16"> {
+ let Inst{5} = 1;
+ let NSF = VLDSTLaneDblFrm; // For disassembly.
+ let NSForm = VLDSTLaneDblFrm.Value; // For disassembly.
+}
+def VST2LNq32a: VST2LN<0b1001, "vst2", "32"> {
+ let Inst{6} = 1;
+ let NSF = VLDSTLaneDblFrm; // For disassembly.
+ let NSForm = VLDSTLaneDblFrm.Value; // For disassembly.
+}
// vst2 to double-spaced odd registers.
-def VST2LNq16b: VST2LN<0b0101, "vst2", "16"> { let Inst{5} = 1; }
-def VST2LNq32b: VST2LN<0b1001, "vst2", "32"> { let Inst{6} = 1; }
+def VST2LNq16b: VST2LN<0b0101, "vst2", "16"> {
+ let Inst{5} = 1;
+ let NSF = VLDSTLaneDblFrm; // For disassembly.
+ let NSForm = VLDSTLaneDblFrm.Value; // For disassembly.
+}
+def VST2LNq32b: VST2LN<0b1001, "vst2", "32"> {
+ let Inst{6} = 1;
+ let NSF = VLDSTLaneDblFrm; // For disassembly.
+ let NSForm = VLDSTLaneDblFrm.Value; // For disassembly.
+}
// VST3LN : Vector Store (single 3-element structure from one lane)
class VST3LN<bits<4> op11_8, string OpcodeStr, string Dt>
@@ -535,12 +605,28 @@ def VST3LNd16 : VST3LN<0b0110, "vst3", "16"> { let Inst{5-4} = 0b00; }
def VST3LNd32 : VST3LN<0b1010, "vst3", "32"> { let Inst{6-4} = 0b000; }
// vst3 to double-spaced even registers.
-def VST3LNq16a: VST3LN<0b0110, "vst3", "16"> { let Inst{5-4} = 0b10; }
-def VST3LNq32a: VST3LN<0b1010, "vst3", "32"> { let Inst{6-4} = 0b100; }
+def VST3LNq16a: VST3LN<0b0110, "vst3", "16"> {
+ let Inst{5-4} = 0b10;
+ let NSF = VLDSTLaneDblFrm; // For disassembly.
+ let NSForm = VLDSTLaneDblFrm.Value; // For disassembly.
+}
+def VST3LNq32a: VST3LN<0b1010, "vst3", "32"> {
+ let Inst{6-4} = 0b100;
+ let NSF = VLDSTLaneDblFrm; // For disassembly.
+ let NSForm = VLDSTLaneDblFrm.Value; // For disassembly.
+}
// vst3 to double-spaced odd registers.
-def VST3LNq16b: VST3LN<0b0110, "vst3", "16"> { let Inst{5-4} = 0b10; }
-def VST3LNq32b: VST3LN<0b1010, "vst3", "32"> { let Inst{6-4} = 0b100; }
+def VST3LNq16b: VST3LN<0b0110, "vst3", "16"> {
+ let Inst{5-4} = 0b10;
+ let NSF = VLDSTLaneDblFrm; // For disassembly.
+ let NSForm = VLDSTLaneDblFrm.Value; // For disassembly.
+}
+def VST3LNq32b: VST3LN<0b1010, "vst3", "32"> {
+ let Inst{6-4} = 0b100;
+ let NSF = VLDSTLaneDblFrm; // For disassembly.
+ let NSForm = VLDSTLaneDblFrm.Value; // For disassembly.
+}
// VST4LN : Vector Store (single 4-element structure from one lane)
class VST4LN<bits<4> op11_8, string OpcodeStr, string Dt>
@@ -556,12 +642,28 @@ def VST4LNd16 : VST4LN<0b0111, "vst4", "16"> { let Inst{5} = 0; }
def VST4LNd32 : VST4LN<0b1011, "vst4", "32"> { let Inst{6} = 0; }
// vst4 to double-spaced even registers.
-def VST4LNq16a: VST4LN<0b0111, "vst4", "16"> { let Inst{5} = 1; }
-def VST4LNq32a: VST4LN<0b1011, "vst4", "32"> { let Inst{6} = 1; }
+def VST4LNq16a: VST4LN<0b0111, "vst4", "16"> {
+ let Inst{5} = 1;
+ let NSF = VLDSTLaneDblFrm; // For disassembly.
+ let NSForm = VLDSTLaneDblFrm.Value; // For disassembly.
+}
+def VST4LNq32a: VST4LN<0b1011, "vst4", "32"> {
+ let Inst{6} = 1;
+ let NSF = VLDSTLaneDblFrm; // For disassembly.
+ let NSForm = VLDSTLaneDblFrm.Value; // For disassembly.
+}
// vst4 to double-spaced odd registers.
-def VST4LNq16b: VST4LN<0b0111, "vst4", "16"> { let Inst{5} = 1; }
-def VST4LNq32b: VST4LN<0b1011, "vst4", "32"> { let Inst{6} = 1; }
+def VST4LNq16b: VST4LN<0b0111, "vst4", "16"> {
+ let Inst{5} = 1;
+ let NSF = VLDSTLaneDblFrm; // For disassembly.
+ let NSForm = VLDSTLaneDblFrm.Value; // For disassembly.
+}
+def VST4LNq32b: VST4LN<0b1011, "vst4", "32"> {
+ let Inst{6} = 1;
+ let NSF = VLDSTLaneDblFrm; // For disassembly.
+ let NSForm = VLDSTLaneDblFrm.Value; // For disassembly.
+}
} // mayStore = 1, hasExtraSrcRegAllocReq = 1
@@ -668,12 +770,18 @@ class N2VDShuffle<bits<2> op19_18, bits<5> op11_7, string OpcodeStr, string Dt>
: N2V<0b11, 0b11, op19_18, 0b10, op11_7, 0, 0, (outs DPR:$dst1, DPR:$dst2),
(ins DPR:$src1, DPR:$src2), IIC_VPERMD,
OpcodeStr, Dt, "$dst1, $dst2",
- "$src1 = $dst1, $src2 = $dst2", []>;
+ "$src1 = $dst1, $src2 = $dst2", []> {
+ let NSF = NVectorShuffleFrm; // For disassembly.
+ let NSForm = NVectorShuffleFrm.Value; // For disassembly.
+}
class N2VQShuffle<bits<2> op19_18, bits<5> op11_7,
InstrItinClass itin, string OpcodeStr, string Dt>
: N2V<0b11, 0b11, op19_18, 0b10, op11_7, 1, 0, (outs QPR:$dst1, QPR:$dst2),
(ins QPR:$src1, QPR:$src2), itin, OpcodeStr, Dt, "$dst1, $dst2",
- "$src1 = $dst1, $src2 = $dst2", []>;
+ "$src1 = $dst1, $src2 = $dst2", []> {
+ let NSF = NVectorShuffleFrm; // For disassembly.
+ let NSForm = NVectorShuffleFrm.Value; // For disassembly.
+}
// Basic 3-register operations: single-, double- and quad-register.
class N3VS<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
@@ -715,6 +823,8 @@ class N3VDSL<bits<2> op21_20, bits<4> op11_8,
(Ty (ShOp (Ty DPR:$src1),
(Ty (NEONvduplane (Ty DPR_VFP2:$src2), imm:$lane)))))]>{
let isCommutable = 0;
+ let NSF = NVdVnVmImmMulScalarFrm; // For disassembly.
+ let NSForm = NVdVnVmImmMulScalarFrm.Value; // For disassembly.
}
class N3VDSL16<bits<2> op21_20, bits<4> op11_8,
string OpcodeStr, string Dt, ValueType Ty, SDNode ShOp>
@@ -725,6 +835,8 @@ class N3VDSL16<bits<2> op21_20, bits<4> op11_8,
(Ty (ShOp (Ty DPR:$src1),
(Ty (NEONvduplane (Ty DPR_8:$src2), imm:$lane)))))]> {
let isCommutable = 0;
+ let NSF = NVdVnVmImmMulScalarFrm; // For disassembly.
+ let NSForm = NVdVnVmImmMulScalarFrm.Value; // For disassembly.
}
class N3VQ<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
@@ -756,6 +868,8 @@ class N3VQSL<bits<2> op21_20, bits<4> op11_8,
(ResTy (NEONvduplane (OpTy DPR_VFP2:$src2),
imm:$lane)))))]> {
let isCommutable = 0;
+ let NSF = NVdVnVmImmMulScalarFrm; // For disassembly.
+ let NSForm = NVdVnVmImmMulScalarFrm.Value; // For disassembly.
}
class N3VQSL16<bits<2> op21_20, bits<4> op11_8, string OpcodeStr, string Dt,
ValueType ResTy, ValueType OpTy, SDNode ShOp>
@@ -767,6 +881,8 @@ class N3VQSL16<bits<2> op21_20, bits<4> op11_8, string OpcodeStr, string Dt,
(ResTy (NEONvduplane (OpTy DPR_8:$src2),
imm:$lane)))))]> {
let isCommutable = 0;
+ let NSF = NVdVnVmImmMulScalarFrm; // For disassembly.
+ let NSForm = NVdVnVmImmMulScalarFrm.Value; // For disassembly.
}
// Basic 3-register intrinsics, both double- and quad-register.
@@ -789,6 +905,8 @@ class N3VDIntSL<bits<2> op21_20, bits<4> op11_8, InstrItinClass itin,
(Ty (NEONvduplane (Ty DPR_VFP2:$src2),
imm:$lane)))))]> {
let isCommutable = 0;
+ let NSF = NVdVnVmImmMulScalarFrm; // For disassembly.
+ let NSForm = NVdVnVmImmMulScalarFrm.Value; // For disassembly.
}
class N3VDIntSL16<bits<2> op21_20, bits<4> op11_8, InstrItinClass itin,
string OpcodeStr, string Dt, ValueType Ty, Intrinsic IntOp>
@@ -800,6 +918,8 @@ class N3VDIntSL16<bits<2> op21_20, bits<4> op11_8, InstrItinClass itin,
(Ty (NEONvduplane (Ty DPR_8:$src2),
imm:$lane)))))]> {
let isCommutable = 0;
+ let NSF = NVdVnVmImmMulScalarFrm; // For disassembly.
+ let NSForm = NVdVnVmImmMulScalarFrm.Value; // For disassembly.
}
class N3VQInt<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
@@ -822,6 +942,8 @@ class N3VQIntSL<bits<2> op21_20, bits<4> op11_8, InstrItinClass itin,
(ResTy (NEONvduplane (OpTy DPR_VFP2:$src2),
imm:$lane)))))]> {
let isCommutable = 0;
+ let NSF = NVdVnVmImmMulScalarFrm; // For disassembly.
+ let NSForm = NVdVnVmImmMulScalarFrm.Value; // For disassembly.
}
class N3VQIntSL16<bits<2> op21_20, bits<4> op11_8, InstrItinClass itin,
string OpcodeStr, string Dt,
@@ -834,6 +956,8 @@ class N3VQIntSL16<bits<2> op21_20, bits<4> op11_8, InstrItinClass itin,
(ResTy (NEONvduplane (OpTy DPR_8:$src2),
imm:$lane)))))]> {
let isCommutable = 0;
+ let NSF = NVdVnVmImmMulScalarFrm; // For disassembly.
+ let NSForm = NVdVnVmImmMulScalarFrm.Value; // For disassembly.
}
// Multiply-Add/Sub operations: single-, double- and quad-register.
@@ -864,7 +988,10 @@ class N3VDMulOpSL<bits<2> op21_20, bits<4> op11_8, InstrItinClass itin,
(Ty (ShOp (Ty DPR:$src1),
(Ty (MulOp DPR:$src2,
(Ty (NEONvduplane (Ty DPR_VFP2:$src3),
- imm:$lane)))))))]>;
+ imm:$lane)))))))]> {
+ let NSF = NVdVnVmImmMulScalarFrm; // For disassembly.
+ let NSForm = NVdVnVmImmMulScalarFrm.Value; // For disassembly.
+}
class N3VDMulOpSL16<bits<2> op21_20, bits<4> op11_8, InstrItinClass itin,
string OpcodeStr, string Dt,
ValueType Ty, SDNode MulOp, SDNode ShOp>
@@ -876,7 +1003,10 @@ class N3VDMulOpSL16<bits<2> op21_20, bits<4> op11_8, InstrItinClass itin,
(Ty (ShOp (Ty DPR:$src1),
(Ty (MulOp DPR:$src2,
(Ty (NEONvduplane (Ty DPR_8:$src3),
- imm:$lane)))))))]>;
+ imm:$lane)))))))]> {
+ let NSF = NVdVnVmImmMulScalarFrm; // For disassembly.
+ let NSForm = NVdVnVmImmMulScalarFrm.Value; // For disassembly.
+}
class N3VQMulOp<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
InstrItinClass itin, string OpcodeStr, string Dt, ValueType Ty,
@@ -897,7 +1027,10 @@ class N3VQMulOpSL<bits<2> op21_20, bits<4> op11_8, InstrItinClass itin,
(ResTy (ShOp (ResTy QPR:$src1),
(ResTy (MulOp QPR:$src2,
(ResTy (NEONvduplane (OpTy DPR_VFP2:$src3),
- imm:$lane)))))))]>;
+ imm:$lane)))))))]> {
+ let NSF = NVdVnVmImmMulScalarFrm; // For disassembly.
+ let NSForm = NVdVnVmImmMulScalarFrm.Value; // For disassembly.
+}
class N3VQMulOpSL16<bits<2> op21_20, bits<4> op11_8, InstrItinClass itin,
string OpcodeStr, string Dt,
ValueType ResTy, ValueType OpTy,
@@ -910,7 +1043,10 @@ class N3VQMulOpSL16<bits<2> op21_20, bits<4> op11_8, InstrItinClass itin,
(ResTy (ShOp (ResTy QPR:$src1),
(ResTy (MulOp QPR:$src2,
(ResTy (NEONvduplane (OpTy DPR_8:$src3),
- imm:$lane)))))))]>;
+ imm:$lane)))))))]> {
+ let NSF = NVdVnVmImmMulScalarFrm; // For disassembly.
+ let NSForm = NVdVnVmImmMulScalarFrm.Value; // For disassembly.
+}
// Neon 3-argument intrinsics, both double- and quad-register.
// The destination register is also used as the first source operand register.
@@ -996,7 +1132,10 @@ class N3VLIntSL<bit op24, bits<2> op21_20, bits<4> op11_8, InstrItinClass itin,
[(set (ResTy QPR:$dst),
(ResTy (IntOp (OpTy DPR:$src1),
(OpTy (NEONvduplane (OpTy DPR_VFP2:$src2),
- imm:$lane)))))]>;
+ imm:$lane)))))]> {
+ let NSF = NVdVnVmImmMulScalarFrm; // For disassembly.
+ let NSForm = NVdVnVmImmMulScalarFrm.Value; // For disassembly.
+}
class N3VLIntSL16<bit op24, bits<2> op21_20, bits<4> op11_8,
InstrItinClass itin, string OpcodeStr, string Dt,
ValueType ResTy, ValueType OpTy, Intrinsic IntOp>
@@ -1006,7 +1145,10 @@ class N3VLIntSL16<bit op24, bits<2> op21_20, bits<4> op11_8,
[(set (ResTy QPR:$dst),
(ResTy (IntOp (OpTy DPR:$src1),
(OpTy (NEONvduplane (OpTy DPR_8:$src2),
- imm:$lane)))))]>;
+ imm:$lane)))))]> {
+ let NSF = NVdVnVmImmMulScalarFrm; // For disassembly.
+ let NSForm = NVdVnVmImmMulScalarFrm.Value; // For disassembly.
+}
// Wide 3-register intrinsics.
class N3VWInt<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
@@ -1055,6 +1197,10 @@ class N2VQPLInt2<bits<2> op24_23, bits<2> op21_20, bits<2> op19_18,
OpcodeStr, Dt, "$dst, $src2", "$src1 = $dst",
[(set QPR:$dst, (ResTy (IntOp (ResTy QPR:$src1), (OpTy QPR:$src2))))]>;
+// This is a big let * in block to mark these instructions NVectorShiftFrm to
+// help the disassembler.
+let NSF = NVectorShiftFrm, NSForm = NVectorShiftFrm.Value in {
+
// Shift by immediate,
// both double- and quad-register.
class N2VDSh<bit op24, bit op23, bits<4> op11_8, bit op7, bit op4,
@@ -1072,16 +1218,6 @@ class N2VQSh<bit op24, bit op23, bits<4> op11_8, bit op7, bit op4,
OpcodeStr, Dt, "$dst, $src, $SIMM", "",
[(set QPR:$dst, (Ty (OpNode (Ty QPR:$src), (i32 imm:$SIMM))))]>;
-// Long shift by immediate.
-class N2VLSh<bit op24, bit op23, bits<4> op11_8, bit op7, bit op6, bit op4,
- string OpcodeStr, string Dt,
- ValueType ResTy, ValueType OpTy, SDNode OpNode>
- : N2VImm<op24, op23, op11_8, op7, op6, op4,
- (outs QPR:$dst), (ins DPR:$src, i32imm:$SIMM), IIC_VSHLiD,
- OpcodeStr, Dt, "$dst, $src, $SIMM", "",
- [(set QPR:$dst, (ResTy (OpNode (OpTy DPR:$src),
- (i32 imm:$SIMM))))]>;
-
// Narrow shift by immediate.
class N2VNSh<bit op24, bit op23, bits<4> op11_8, bit op7, bit op6, bit op4,
InstrItinClass itin, string OpcodeStr, string Dt,
@@ -1124,8 +1260,26 @@ class N2VQShIns<bit op24, bit op23, bits<4> op11_8, bit op7, bit op4,
OpcodeStr, Dt, "$dst, $src2, $SIMM", "$src1 = $dst",
[(set QPR:$dst, (Ty (ShOp QPR:$src1, QPR:$src2, (i32 imm:$SIMM))))]>;
+} // End of "let NSF = NVectorShiftFrm, ..."
+
+// Long shift by immediate.
+class N2VLSh<bit op24, bit op23, bits<4> op11_8, bit op7, bit op6, bit op4,
+ string OpcodeStr, string Dt,
+ ValueType ResTy, ValueType OpTy, SDNode OpNode>
+ : N2VImm<op24, op23, op11_8, op7, op6, op4,
+ (outs QPR:$dst), (ins DPR:$src, i32imm:$SIMM), IIC_VSHLiD,
+ OpcodeStr, Dt, "$dst, $src, $SIMM", "",
+ [(set QPR:$dst, (ResTy (OpNode (OpTy DPR:$src),
+ (i32 imm:$SIMM))))]> {
+ // This has a different interpretation of the shift amount encoding than
+ // NVectorShiftFrm.
+ let NSF = NVectorShift2Frm; // For disassembly.
+ let NSForm = NVectorShift2Frm.Value; // For disassembly.
+}
+
// Convert, with fractional bits immediate,
// both double- and quad-register.
+let NSF = NVdVmImmVCVTFrm, NSForm = NVdVmImmVCVTFrm.Value in {
class N2VCvtD<bit op24, bit op23, bits<4> op11_8, bit op7, bit op4,
string OpcodeStr, string Dt, ValueType ResTy, ValueType OpTy,
Intrinsic IntOp>
@@ -1140,6 +1294,7 @@ class N2VCvtQ<bit op24, bit op23, bits<4> op11_8, bit op7, bit op4,
(outs QPR:$dst), (ins QPR:$src, i32imm:$SIMM), IIC_VUNAQ,
OpcodeStr, Dt, "$dst, $src, $SIMM", "",
[(set QPR:$dst, (ResTy (IntOp (OpTy QPR:$src), (i32 imm:$SIMM))))]>;
+}
//===----------------------------------------------------------------------===//
// Multiclasses
@@ -1350,6 +1505,60 @@ multiclass N3VInt_QHSD<bit op24, bit op23, bits<4> op11_8, bit op4,
v2i64, v2i64, IntOp, Commutable>;
}
+// Same as N3VInt_QHSD, except they're for Vector Shift (Register) Instructions.
+// D:Vd M:Vm N:Vn (notice that M:Vm is the first operand)
+// This helps the disassembler.
+let NSF = NVdVnVmImmVectorShiftFrm, NSForm = NVdVnVmImmVectorShiftFrm.Value in {
+multiclass N3VInt_HS2<bit op24, bit op23, bits<4> op11_8, bit op4,
+ InstrItinClass itinD16, InstrItinClass itinD32,
+ InstrItinClass itinQ16, InstrItinClass itinQ32,
+ string OpcodeStr, string Dt,
+ Intrinsic IntOp, bit Commutable = 0> {
+ // 64-bit vector types.
+ def v4i16 : N3VDInt<op24, op23, 0b01, op11_8, op4, itinD16,
+ OpcodeStr, !strconcat(Dt, "16"),
+ v4i16, v4i16, IntOp, Commutable>;
+ def v2i32 : N3VDInt<op24, op23, 0b10, op11_8, op4, itinD32,
+ OpcodeStr, !strconcat(Dt, "32"),
+ v2i32, v2i32, IntOp, Commutable>;
+
+ // 128-bit vector types.
+ def v8i16 : N3VQInt<op24, op23, 0b01, op11_8, op4, itinQ16,
+ OpcodeStr, !strconcat(Dt, "16"),
+ v8i16, v8i16, IntOp, Commutable>;
+ def v4i32 : N3VQInt<op24, op23, 0b10, op11_8, op4, itinQ32,
+ OpcodeStr, !strconcat(Dt, "32"),
+ v4i32, v4i32, IntOp, Commutable>;
+}
+multiclass N3VInt_QHS2<bit op24, bit op23, bits<4> op11_8, bit op4,
+ InstrItinClass itinD16, InstrItinClass itinD32,
+ InstrItinClass itinQ16, InstrItinClass itinQ32,
+ string OpcodeStr, string Dt,
+ Intrinsic IntOp, bit Commutable = 0>
+ : N3VInt_HS2<op24, op23, op11_8, op4, itinD16, itinD32, itinQ16, itinQ32,
+ OpcodeStr, Dt, IntOp, Commutable> {
+ def v8i8 : N3VDInt<op24, op23, 0b00, op11_8, op4, itinD16,
+ OpcodeStr, !strconcat(Dt, "8"),
+ v8i8, v8i8, IntOp, Commutable>;
+ def v16i8 : N3VQInt<op24, op23, 0b00, op11_8, op4, itinQ16,
+ OpcodeStr, !strconcat(Dt, "8"),
+ v16i8, v16i8, IntOp, Commutable>;
+}
+multiclass N3VInt_QHSD2<bit op24, bit op23, bits<4> op11_8, bit op4,
+ InstrItinClass itinD16, InstrItinClass itinD32,
+ InstrItinClass itinQ16, InstrItinClass itinQ32,
+ string OpcodeStr, string Dt,
+ Intrinsic IntOp, bit Commutable = 0>
+ : N3VInt_QHS2<op24, op23, op11_8, op4, itinD16, itinD32, itinQ16, itinQ32,
+ OpcodeStr, Dt, IntOp, Commutable> {
+ def v1i64 : N3VDInt<op24, op23, 0b11, op11_8, op4, itinD32,
+ OpcodeStr, !strconcat(Dt, "64"),
+ v1i64, v1i64, IntOp, Commutable>;
+ def v2i64 : N3VQInt<op24, op23, 0b11, op11_8, op4, itinQ32,
+ OpcodeStr, !strconcat(Dt, "64"),
+ v2i64, v2i64, IntOp, Commutable>;
+}
+}
// Neon Narrowing 3-register vector intrinsics,
// source operand element sizes of 16, 32 and 64 bits:
@@ -1619,6 +1828,47 @@ multiclass N2VSh_QHSD<bit op24, bit op23, bits<4> op11_8, bit op4,
// imm6 = xxxxxx
}
+// Same as N2VSh_QHSD, except the instructions have a differnt interpretation of
+// the shift amount. This helps the disassembler.
+let NSF = NVectorShift2Frm, NSForm = NVectorShift2Frm.Value in {
+multiclass N2VSh_QHSD2<bit op24, bit op23, bits<4> op11_8, bit op4,
+ InstrItinClass itin, string OpcodeStr, string Dt,
+ SDNode OpNode> {
+ // 64-bit vector types.
+ def v8i8 : N2VDSh<op24, op23, op11_8, 0, op4, itin,
+ OpcodeStr, !strconcat(Dt, "8"), v8i8, OpNode> {
+ let Inst{21-19} = 0b001; // imm6 = 001xxx
+ }
+ def v4i16 : N2VDSh<op24, op23, op11_8, 0, op4, itin,
+ OpcodeStr, !strconcat(Dt, "16"), v4i16, OpNode> {
+ let Inst{21-20} = 0b01; // imm6 = 01xxxx
+ }
+ def v2i32 : N2VDSh<op24, op23, op11_8, 0, op4, itin,
+ OpcodeStr, !strconcat(Dt, "32"), v2i32, OpNode> {
+ let Inst{21} = 0b1; // imm6 = 1xxxxx
+ }
+ def v1i64 : N2VDSh<op24, op23, op11_8, 1, op4, itin,
+ OpcodeStr, !strconcat(Dt, "64"), v1i64, OpNode>;
+ // imm6 = xxxxxx
+
+ // 128-bit vector types.
+ def v16i8 : N2VQSh<op24, op23, op11_8, 0, op4, itin,
+ OpcodeStr, !strconcat(Dt, "8"), v16i8, OpNode> {
+ let Inst{21-19} = 0b001; // imm6 = 001xxx
+ }
+ def v8i16 : N2VQSh<op24, op23, op11_8, 0, op4, itin,
+ OpcodeStr, !strconcat(Dt, "16"), v8i16, OpNode> {
+ let Inst{21-20} = 0b01; // imm6 = 01xxxx
+ }
+ def v4i32 : N2VQSh<op24, op23, op11_8, 0, op4, itin,
+ OpcodeStr, !strconcat(Dt, "32"), v4i32, OpNode> {
+ let Inst{21} = 0b1; // imm6 = 1xxxxx
+ }
+ def v2i64 : N2VQSh<op24, op23, op11_8, 1, op4, itin,
+ OpcodeStr, !strconcat(Dt, "64"), v2i64, OpNode>;
+ // imm6 = xxxxxx
+}
+}
// Neon Shift-Accumulate vector operations,
// element sizes of 8, 16, 32 and 64 bits:
@@ -1699,6 +1949,47 @@ multiclass N2VShIns_QHSD<bit op24, bit op23, bits<4> op11_8, bit op4,
// imm6 = xxxxxx
}
+// Same as N2VShIns_QHSD, except the instructions have a differnt interpretation
+// of the shift amount. This helps the disassembler.
+let NSF = NVectorShift2Frm, NSForm = NVectorShift2Frm.Value in {
+multiclass N2VShIns_QHSD2<bit op24, bit op23, bits<4> op11_8, bit op4,
+ string OpcodeStr, SDNode ShOp> {
+ // 64-bit vector types.
+ def v8i8 : N2VDShIns<op24, op23, op11_8, 0, op4,
+ OpcodeStr, "8", v8i8, ShOp> {
+ let Inst{21-19} = 0b001; // imm6 = 001xxx
+ }
+ def v4i16 : N2VDShIns<op24, op23, op11_8, 0, op4,
+ OpcodeStr, "16", v4i16, ShOp> {
+ let Inst{21-20} = 0b01; // imm6 = 01xxxx
+ }
+ def v2i32 : N2VDShIns<op24, op23, op11_8, 0, op4,
+ OpcodeStr, "32", v2i32, ShOp> {
+ let Inst{21} = 0b1; // imm6 = 1xxxxx
+ }
+ def v1i64 : N2VDShIns<op24, op23, op11_8, 1, op4,
+ OpcodeStr, "64", v1i64, ShOp>;
+ // imm6 = xxxxxx
+
+ // 128-bit vector types.
+ def v16i8 : N2VQShIns<op24, op23, op11_8, 0, op4,
+ OpcodeStr, "8", v16i8, ShOp> {
+ let Inst{21-19} = 0b001; // imm6 = 001xxx
+ }
+ def v8i16 : N2VQShIns<op24, op23, op11_8, 0, op4,
+ OpcodeStr, "16", v8i16, ShOp> {
+ let Inst{21-20} = 0b01; // imm6 = 01xxxx
+ }
+ def v4i32 : N2VQShIns<op24, op23, op11_8, 0, op4,
+ OpcodeStr, "32", v4i32, ShOp> {
+ let Inst{21} = 0b1; // imm6 = 1xxxxx
+ }
+ def v2i64 : N2VQShIns<op24, op23, op11_8, 1, op4,
+ OpcodeStr, "64", v2i64, ShOp>;
+ // imm6 = xxxxxx
+}
+}
+
// Neon Shift Long operations,
// element sizes of 8, 16, 32 bits:
multiclass N2VLSh_QHS<bit op24, bit op23, bits<4> op11_8, bit op7, bit op6,
@@ -2329,18 +2620,21 @@ def VRSQRTSfq : N3VQInt<0, 0, 0b10, 0b1111, 1,
// Vector Shifts.
// VSHL : Vector Shift
-defm VSHLs : N3VInt_QHSD<0, 0, 0b0100, 0, IIC_VSHLiD, IIC_VSHLiD, IIC_VSHLiQ,
- IIC_VSHLiQ, "vshl", "s", int_arm_neon_vshifts, 0>;
-defm VSHLu : N3VInt_QHSD<1, 0, 0b0100, 0, IIC_VSHLiD, IIC_VSHLiD, IIC_VSHLiQ,
- IIC_VSHLiQ, "vshl", "u", int_arm_neon_vshiftu, 0>;
+defm VSHLs : N3VInt_QHSD2<0,0, 0b0100, 0, IIC_VSHLiD, IIC_VSHLiD, IIC_VSHLiQ,
+ IIC_VSHLiQ, "vshl", "s", int_arm_neon_vshifts, 0>;
+defm VSHLu : N3VInt_QHSD2<1,0, 0b0100, 0, IIC_VSHLiD, IIC_VSHLiD, IIC_VSHLiQ,
+ IIC_VSHLiQ, "vshl", "u", int_arm_neon_vshiftu, 0>;
// VSHL : Vector Shift Left (Immediate)
-defm VSHLi : N2VSh_QHSD<0, 1, 0b0101, 1, IIC_VSHLiD, "vshl", "i", NEONvshl>;
+// (disassembly note: this has a different interpretation of the shift amont)
+defm VSHLi : N2VSh_QHSD2<0, 1, 0b0101, 1, IIC_VSHLiD, "vshl", "i", NEONvshl>;
// VSHR : Vector Shift Right (Immediate)
defm VSHRs : N2VSh_QHSD<0, 1, 0b0000, 1, IIC_VSHLiD, "vshr", "s", NEONvshrs>;
defm VSHRu : N2VSh_QHSD<1, 1, 0b0000, 1, IIC_VSHLiD, "vshr", "u", NEONvshru>;
// VSHLL : Vector Shift Left Long
+// (disassembly note: this has a different interpretation of the shift amont)
defm VSHLLs : N2VLSh_QHS<0, 1, 0b1010, 0, 0, 1, "vshll", "s", NEONvshlls>;
+// (disassembly note: this has a different interpretation of the shift amont)
defm VSHLLu : N2VLSh_QHS<1, 1, 0b1010, 0, 0, 1, "vshll", "u", NEONvshllu>;
// VSHLL : Vector Shift Left Long (with maximum shift count)
@@ -2350,6 +2644,8 @@ class N2VLShMax<bit op24, bit op23, bits<6> op21_16, bits<4> op11_8, bit op7,
: N2VLSh<op24, op23, op11_8, op7, op6, op4, OpcodeStr, Dt,
ResTy, OpTy, OpNode> {
let Inst{21-16} = op21_16;
+ let NSF = NVdVmImmVSHLLFrm; // For disassembly.
+ let NSForm = NVdVmImmVSHLLFrm.Value; // For disassembly.
}
def VSHLLi8 : N2VLShMax<1, 1, 0b110010, 0b0011, 0, 0, 0, "vshll", "i8",
v8i16, v8i8, NEONvshlli>;
@@ -2363,10 +2659,10 @@ defm VSHRN : N2VNSh_HSD<0,1,0b1000,0,0,1, IIC_VSHLiD, "vshrn", "i",
NEONvshrn>;
// VRSHL : Vector Rounding Shift
-defm VRSHLs : N3VInt_QHSD<0,0,0b0101,0, IIC_VSHLi4D, IIC_VSHLi4D, IIC_VSHLi4Q,
- IIC_VSHLi4Q, "vrshl", "s", int_arm_neon_vrshifts,0>;
-defm VRSHLu : N3VInt_QHSD<1,0,0b0101,0, IIC_VSHLi4D, IIC_VSHLi4D, IIC_VSHLi4Q,
- IIC_VSHLi4Q, "vrshl", "u", int_arm_neon_vrshiftu,0>;
+defm VRSHLs : N3VInt_QHSD2<0,0,0b0101,0,IIC_VSHLi4D, IIC_VSHLi4D, IIC_VSHLi4Q,
+ IIC_VSHLi4Q,"vrshl", "s", int_arm_neon_vrshifts,0>;
+defm VRSHLu : N3VInt_QHSD2<1,0,0b0101,0,IIC_VSHLi4D, IIC_VSHLi4D, IIC_VSHLi4Q,
+ IIC_VSHLi4Q,"vrshl", "u", int_arm_neon_vrshiftu,0>;
// VRSHR : Vector Rounding Shift Right
defm VRSHRs : N2VSh_QHSD<0,1,0b0010,1, IIC_VSHLi4D, "vrshr", "s", NEONvrshrs>;
defm VRSHRu : N2VSh_QHSD<1,1,0b0010,1, IIC_VSHLi4D, "vrshr", "u", NEONvrshru>;
@@ -2376,15 +2672,18 @@ defm VRSHRN : N2VNSh_HSD<0, 1, 0b1000, 0, 1, 1, IIC_VSHLi4D, "vrshrn", "i",
NEONvrshrn>;
// VQSHL : Vector Saturating Shift
-defm VQSHLs : N3VInt_QHSD<0,0,0b0100,1, IIC_VSHLi4D, IIC_VSHLi4D, IIC_VSHLi4Q,
- IIC_VSHLi4Q, "vqshl", "s", int_arm_neon_vqshifts,0>;
-defm VQSHLu : N3VInt_QHSD<1,0,0b0100,1, IIC_VSHLi4D, IIC_VSHLi4D, IIC_VSHLi4Q,
- IIC_VSHLi4Q, "vqshl", "u", int_arm_neon_vqshiftu,0>;
+defm VQSHLs : N3VInt_QHSD2<0,0,0b0100,1,IIC_VSHLi4D, IIC_VSHLi4D, IIC_VSHLi4Q,
+ IIC_VSHLi4Q, "vqshl", "s", int_arm_neon_vqshifts,0>;
+defm VQSHLu : N3VInt_QHSD2<1,0,0b0100,1,IIC_VSHLi4D, IIC_VSHLi4D, IIC_VSHLi4Q,
+ IIC_VSHLi4Q, "vqshl", "u", int_arm_neon_vqshiftu,0>;
// VQSHL : Vector Saturating Shift Left (Immediate)
-defm VQSHLsi : N2VSh_QHSD<0,1,0b0111,1, IIC_VSHLi4D, "vqshl", "s", NEONvqshls>;
-defm VQSHLui : N2VSh_QHSD<1,1,0b0111,1, IIC_VSHLi4D, "vqshl", "u", NEONvqshlu>;
+// (disassembly note: this has a different interpretation of the shift amont)
+defm VQSHLsi : N2VSh_QHSD2<0,1,0b0111,1, IIC_VSHLi4D, "vqshl", "s", NEONvqshls>;
+// (disassembly note: this has a different interpretation of the shift amont)
+defm VQSHLui : N2VSh_QHSD2<1,1,0b0111,1, IIC_VSHLi4D, "vqshl", "u", NEONvqshlu>;
// VQSHLU : Vector Saturating Shift Left (Immediate, Unsigned)
-defm VQSHLsu : N2VSh_QHSD<1,1,0b0110,1, IIC_VSHLi4D, "vqshlu","s",NEONvqshlsu>;
+// (disassembly note: this has a different interpretation of the shift amont)
+defm VQSHLsu : N2VSh_QHSD2<1,1,0b0110,1, IIC_VSHLi4D, "vqshlu","s",NEONvqshlsu>;
// VQSHRN : Vector Saturating Shift Right and Narrow
defm VQSHRNs : N2VNSh_HSD<0, 1, 0b1001, 0, 0, 1, IIC_VSHLi4D, "vqshrn", "s",
@@ -2397,12 +2696,12 @@ defm VQSHRUN : N2VNSh_HSD<1, 1, 0b1000, 0, 0, 1, IIC_VSHLi4D, "vqshrun", "s",
NEONvqshrnsu>;
// VQRSHL : Vector Saturating Rounding Shift
-defm VQRSHLs : N3VInt_QHSD<0,0,0b0101,1, IIC_VSHLi4D, IIC_VSHLi4D, IIC_VSHLi4Q,
- IIC_VSHLi4Q, "vqrshl", "s",
- int_arm_neon_vqrshifts, 0>;
-defm VQRSHLu : N3VInt_QHSD<1,0,0b0101,1, IIC_VSHLi4D, IIC_VSHLi4D, IIC_VSHLi4Q,
- IIC_VSHLi4Q, "vqrshl", "u",
- int_arm_neon_vqrshiftu, 0>;
+defm VQRSHLs : N3VInt_QHSD2<0,0,0b0101,1, IIC_VSHLi4D, IIC_VSHLi4D, IIC_VSHLi4Q,
+ IIC_VSHLi4Q, "vqrshl", "s",
+ int_arm_neon_vqrshifts, 0>;
+defm VQRSHLu : N3VInt_QHSD2<1,0,0b0101,1, IIC_VSHLi4D, IIC_VSHLi4D, IIC_VSHLi4Q,
+ IIC_VSHLi4Q, "vqrshl", "u",
+ int_arm_neon_vqrshiftu, 0>;
// VQRSHRN : Vector Saturating Rounding Shift Right and Narrow
defm VQRSHRNs : N2VNSh_HSD<0, 1, 0b1001, 0, 1, 1, IIC_VSHLi4D, "vqrshrn", "s",
@@ -2422,7 +2721,8 @@ defm VRSRAs : N2VShAdd_QHSD<0, 1, 0b0011, 1, "vrsra", "s", NEONvrshrs>;
defm VRSRAu : N2VShAdd_QHSD<1, 1, 0b0011, 1, "vrsra", "u", NEONvrshru>;
// VSLI : Vector Shift Left and Insert
-defm VSLI : N2VShIns_QHSD<1, 1, 0b0101, 1, "vsli", NEONvsli>;
+// (disassembly note: this has a different interpretation of the shift amont)
+defm VSLI : N2VShIns_QHSD2<1, 1, 0b0101, 1, "vsli", NEONvsli>;
// VSRI : Vector Shift Right and Insert
defm VSRI : N2VShIns_QHSD<1, 1, 0b0100, 1, "vsri", NEONvsri>;
@@ -2518,10 +2818,13 @@ def VSWPq : N2VX<0b11, 0b11, 0b00, 0b10, 0b00000, 1, 0,
// VMOV : Vector Move (Register)
+// Mark these instructions as 2-register instructions to help the disassembler.
+let NSF = NVdVmImmFrm, NSForm = NVdVmImmFrm.Value in {
def VMOVDneon: N3VX<0, 0, 0b10, 0b0001, 0, 1, (outs DPR:$dst), (ins DPR:$src),
IIC_VMOVD, "vmov", "$dst, $src", "", []>;
def VMOVQ : N3VX<0, 0, 0b10, 0b0001, 1, 1, (outs QPR:$dst), (ins QPR:$src),
IIC_VMOVD, "vmov", "$dst, $src", "", []>;
+}
// VMOV : Vector Move (Immediate)
@@ -2762,6 +3065,7 @@ def VDUPfq : NVDup<0b11101010, 0b1011, 0b00, (outs QPR:$dst), (ins GPR:$src),
// VDUP : Vector Duplicate Lane (from scalar to all elements)
+let NSF = NVdVmImmVDupLaneFrm, NSForm = NVdVmImmVDupLaneFrm.Value in {
class VDUPLND<bits<2> op19_18, bits<2> op17_16,
string OpcodeStr, string Dt, ValueType Ty>
: N2V<0b11, 0b11, op19_18, op17_16, 0b11000, 0, 0,
@@ -2775,6 +3079,7 @@ class VDUPLNQ<bits<2> op19_18, bits<2> op17_16, string OpcodeStr, string Dt,
(outs QPR:$dst), (ins DPR:$src, nohash_imm:$lane), IIC_VMOVD,
OpcodeStr, Dt, "$dst, $src[$lane]", "",
[(set QPR:$dst, (ResTy (NEONvduplane (OpTy DPR:$src), imm:$lane)))]>;
+}
// Inst{19-16} is partially specified depending on the element size.
@@ -2843,24 +3148,37 @@ defm VMOVLu : N2VLInt_QHS<0b11,0b10100,0,1, "vmovl", "u",
// Vector Conversions.
+let NSF = NVdVmImmVCVTFrm, NSForm = NVdVmImmVCVTFrm.Value in {
+class N2VDX<bits<2> op24_23, bits<2> op21_20, bits<2> op19_18,
+ bits<2> op17_16, bits<5> op11_7, bit op4, string OpcodeStr,
+ string Dt, ValueType ResTy, ValueType OpTy, SDNode OpNode>
+ : N2VD<op24_23, op21_20, op19_18, op17_16, op11_7, op4, OpcodeStr, Dt,
+ ResTy, OpTy, OpNode>;
+class N2VQX<bits<2> op24_23, bits<2> op21_20, bits<2> op19_18,
+ bits<2> op17_16, bits<5> op11_7, bit op4, string OpcodeStr,
+ string Dt, ValueType ResTy, ValueType OpTy, SDNode OpNode>
+ : N2VQ<op24_23, op21_20, op19_18, op17_16, op11_7, op4, OpcodeStr, Dt,
+ ResTy, OpTy, OpNode>;
+}
+
// VCVT : Vector Convert Between Floating-Point and Integers
-def VCVTf2sd : N2VD<0b11, 0b11, 0b10, 0b11, 0b01110, 0, "vcvt", "s32.f32",
- v2i32, v2f32, fp_to_sint>;
-def VCVTf2ud : N2VD<0b11, 0b11, 0b10, 0b11, 0b01111, 0, "vcvt", "u32.f32",
- v2i32, v2f32, fp_to_uint>;
-def VCVTs2fd : N2VD<0b11, 0b11, 0b10, 0b11, 0b01100, 0, "vcvt", "f32.s32",
- v2f32, v2i32, sint_to_fp>;
-def VCVTu2fd : N2VD<0b11, 0b11, 0b10, 0b11, 0b01101, 0, "vcvt", "f32.u32",
- v2f32, v2i32, uint_to_fp>;
-
-def VCVTf2sq : N2VQ<0b11, 0b11, 0b10, 0b11, 0b01110, 0, "vcvt", "s32.f32",
- v4i32, v4f32, fp_to_sint>;
-def VCVTf2uq : N2VQ<0b11, 0b11, 0b10, 0b11, 0b01111, 0, "vcvt", "u32.f32",
- v4i32, v4f32, fp_to_uint>;
-def VCVTs2fq : N2VQ<0b11, 0b11, 0b10, 0b11, 0b01100, 0, "vcvt", "f32.s32",
- v4f32, v4i32, sint_to_fp>;
-def VCVTu2fq : N2VQ<0b11, 0b11, 0b10, 0b11, 0b01101, 0, "vcvt", "f32.u32",
- v4f32, v4i32, uint_to_fp>;
+def VCVTf2sd : N2VDX<0b11, 0b11, 0b10, 0b11, 0b01110, 0, "vcvt", "s32.f32",
+ v2i32, v2f32, fp_to_sint>;
+def VCVTf2ud : N2VDX<0b11, 0b11, 0b10, 0b11, 0b01111, 0, "vcvt", "u32.f32",
+ v2i32, v2f32, fp_to_uint>;
+def VCVTs2fd : N2VDX<0b11, 0b11, 0b10, 0b11, 0b01100, 0, "vcvt", "f32.s32",
+ v2f32, v2i32, sint_to_fp>;
+def VCVTu2fd : N2VDX<0b11, 0b11, 0b10, 0b11, 0b01101, 0, "vcvt", "f32.u32",
+ v2f32, v2i32, uint_to_fp>;
+
+def VCVTf2sq : N2VQX<0b11, 0b11, 0b10, 0b11, 0b01110, 0, "vcvt", "s32.f32",
+ v4i32, v4f32, fp_to_sint>;
+def VCVTf2uq : N2VQX<0b11, 0b11, 0b10, 0b11, 0b01111, 0, "vcvt", "u32.f32",
+ v4i32, v4f32, fp_to_uint>;
+def VCVTs2fq : N2VQX<0b11, 0b11, 0b10, 0b11, 0b01100, 0, "vcvt", "f32.s32",
+ v4f32, v4i32, sint_to_fp>;
+def VCVTu2fq : N2VQX<0b11, 0b11, 0b10, 0b11, 0b01101, 0, "vcvt", "f32.u32",
+ v4f32, v4i32, uint_to_fp>;
// VCVT : Vector Convert Between Floating-Point and Fixed-Point.
def VCVTf2xsd : N2VCvtD<0, 1, 0b1111, 0, 1, "vcvt", "s32.f32",
@@ -2945,6 +3263,8 @@ def VREV16q8 : VREV16Q<0b00, "vrev16", "8", v16i8>;
// VEXT : Vector Extract
+let NSF = NVdVnVmImmVectorExtractFrm,
+ NSForm = NVdVnVmImmVectorExtractFrm.Value in {
class VEXTd<string OpcodeStr, string Dt, ValueType Ty>
: N3V<0,1,0b11,{?,?,?,?},0,0, (outs DPR:$dst),
(ins DPR:$lhs, DPR:$rhs, i32imm:$index), IIC_VEXTD,
@@ -2958,6 +3278,7 @@ class VEXTq<string OpcodeStr, string Dt, ValueType Ty>
OpcodeStr, Dt, "$dst, $lhs, $rhs, $index", "",
[(set QPR:$dst, (Ty (NEONvext (Ty QPR:$lhs),
(Ty QPR:$rhs), imm:$index)))]>;
+}
def VEXTd8 : VEXTd<"vext", "8", v8i8>;
def VEXTd16 : VEXTd<"vext", "16", v4i16>;
@@ -3001,6 +3322,8 @@ def VZIPq32 : N2VQShuffle<0b10, 0b00011, IIC_VPERMQ3, "vzip", "32">;
// Vector Table Lookup and Table Extension.
+let NSF = VTBLFrm, NSForm = VTBLFrm.Value in {
+
// VTBL : Vector Table Lookup
def VTBL1
: N3V<1,1,0b11,0b1000,0,0, (outs DPR:$dst),
@@ -3057,6 +3380,8 @@ def VTBX4
DPR:$tbl2, DPR:$tbl3, DPR:$tbl4, DPR:$src)))]>;
} // hasExtraSrcRegAllocReq = 1
+} // End of "let NSF = VTBLFrm, ..."
+
//===----------------------------------------------------------------------===//
// NEON instructions for single-precision FP math
//===----------------------------------------------------------------------===//
diff --git a/lib/Target/ARM/AsmPrinter/ARMAsmPrinter.cpp b/lib/Target/ARM/AsmPrinter/ARMAsmPrinter.cpp
index 447afdd4fa..bf7e919235 100644
--- a/lib/Target/ARM/AsmPrinter/ARMAsmPrinter.cpp
+++ b/lib/Target/ARM/AsmPrinter/ARMAsmPrinter.cpp
@@ -120,7 +120,7 @@ namespace {
void printT2AddrModeImm8Operand(const MachineInstr *MI, int OpNum);
void printT2AddrModeImm8s4Operand(const MachineInstr *MI, int OpNum);
void printT2AddrModeImm8OffsetOperand(const MachineInstr *MI, int OpNum);
- void printT2AddrModeImm8s4OffsetOperand(const MachineInstr *MI, int OpNum) {}
+ void printT2AddrModeImm8s4OffsetOperand(const MachineInstr *MI, int OpNum);
void printT2AddrModeSoRegOperand(const MachineInstr *MI, int OpNum);
void printCPSOptionOperand(const MachineInstr *MI, int OpNum) {}
@@ -431,16 +431,16 @@ void ARMAsmPrinter::printAddrMode2Operand(const MachineInstr *MI, int Op) {
O << "[" << getRegisterName(MO1.getReg());
if (!MO2.getReg()) {
- if (ARM_AM::getAM2Offset(MO3.getImm())) // Don't print +0.
+ if (ARM_AM::getAM2Offset(MO3.getImm())) // Don't print +0.
O << ", #"
- << (char)ARM_AM::getAM2Op(MO3.getImm())
+ << ARM_AM::getAddrOpcStr(ARM_AM::getAM2Op(MO3.getImm()))
<< ARM_AM::getAM2Offset(MO3.getImm());
O << "]";
return;
}
O << ", "
- << (char)ARM_AM::getAM2Op(MO3.getImm())
+ << ARM_AM::getAddrOpcStr(ARM_AM::getAM2Op(MO3.getImm()))
<< getRegisterName(MO2.getReg());
if (unsigned ShImm = ARM_AM::getAM2Offset(MO3.getImm()))
@@ -458,12 +458,12 @@ void ARMAsmPrinter::printAddrMode2OffsetOperand(const MachineInstr *MI, int Op){
unsigned ImmOffs = ARM_AM::getAM2Offset(MO2.getImm());
assert(ImmOffs && "Malformed indexed load / store!");
O << "#"
- << (char)ARM_AM::getAM2Op(MO2.getImm())
+ << ARM_AM::getAddrOpcStr(ARM_AM::getAM2Op(MO2.getImm()))
<< ImmOffs;
return;
}
- O << (char)ARM_AM::getAM2Op(MO2.getImm())
+ O << ARM_AM::getAddrOpcStr(ARM_AM::getAM2Op(MO2.getImm()))
<< getRegisterName(MO1.getReg());
if (unsigned ShImm = ARM_AM::getAM2Offset(MO2.getImm()))
@@ -490,7 +490,7 @@ void ARMAsmPrinter::printAddrMode3Operand(const MachineInstr *MI, int Op) {
if (unsigned ImmOffs = ARM_AM::getAM3Offset(MO3.getImm()))
O << ", #"
- << (char)ARM_AM::getAM3Op(MO3.getImm())
+ << ARM_AM::getAddrOpcStr(ARM_AM::getAM3Op(MO3.getImm()))
<< ImmOffs;
O << "]";
}
@@ -508,7 +508,7 @@ void ARMAsmPrinter::printAddrMode3OffsetOperand(const MachineInstr *MI, int Op){
unsigned ImmOffs = ARM_AM::getAM3Offset(MO2.getImm());
assert(ImmOffs && "Malformed indexed load / store!");
O << "#"
- << (char)ARM_AM::getAM3Op(MO2.getImm())
+ << ARM_AM::getAddrOpcStr(ARM_AM::getAM3Op(MO2.getImm()))
<< ImmOffs;
}
@@ -553,7 +553,7 @@ void ARMAsmPrinter::printAddrMode5Operand(const MachineInstr *MI, int Op,
if (unsigned ImmOffs = ARM_AM::getAM5Offset(MO2.getImm())) {
O << ", #"
- << (char)ARM_AM::getAM5Op(MO2.getImm())
+ << ARM_AM::getAddrOpcStr(ARM_AM::getAM5Op(MO2.getImm()))
<< ImmOffs*4;
}
O << "]";
@@ -589,7 +589,7 @@ void ARMAsmPrinter::printAddrModePCOperand(const MachineInstr *MI, int Op,
const MachineOperand &MO1 = MI->getOperand(Op);
assert(TargetRegisterInfo::isPhysicalRegister(MO1.getReg()));
- O << "[pc, +" << getRegisterName(MO1.getReg()) << "]";
+ O << "[pc, " << getRegisterName(MO1.getReg()) << "]";
}
void
@@ -612,10 +612,11 @@ void
ARMAsmPrinter::printThumbITMask(const MachineInstr *MI, int Op) {
// (3 - the number of trailing zeros) is the number of then / else.
unsigned Mask = MI->getOperand(Op).getImm();
+ unsigned CondBit0 = Mask >> 4 & 1;
unsigned NumTZ = CountTrailingZeros_32(Mask);
assert(NumTZ <= 3 && "Invalid IT mask!");
for (unsigned Pos = 3, e = NumTZ; Pos > e; --Pos) {
- bool T = (Mask & (1 << Pos)) == 0;
+ bool T = ((Mask >> Pos) & 1) == CondBit0;
if (T)
O << 't';
else
@@ -647,7 +648,7 @@ ARMAsmPrinter::printThumbAddrModeRI5Operand(const MachineInstr *MI, int Op,
if (MO3.getReg())
O << ", " << getRegisterName(MO3.getReg());
else if (unsigned ImmOffs = MO2.getImm())
- O << ", #+" << ImmOffs * Scale;
+ O << ", #" << ImmOffs * Scale;
O << "]";
}
@@ -669,7 +670,7 @@ void ARMAsmPrinter::printThumbAddrModeSPOperand(const MachineInstr *MI,int Op) {
const MachineOperand &MO2 = MI->getOperand(Op+1);
O << "[" << getRegisterName(MO1.getReg());
if (unsigned ImmOffs = MO2.getImm())
- O << ", #+" << ImmOffs*4;
+ O << ", #" << ImmOffs*4;
O << "]";
}
@@ -705,7 +706,7 @@ void ARMAsmPrinter::printT2AddrModeImm12Operand(const MachineInstr *MI,
unsigned OffImm = MO2.getImm();
if (OffImm) // Don't print +0.
- O << ", #+" << OffImm;
+ O << ", #" << OffImm;
O << "]";
}
@@ -721,7 +722,7 @@ void ARMAsmPrinter::printT2AddrModeImm8Operand(const MachineInstr *MI,
if (OffImm < 0)
O << ", #-" << -OffImm;
else if (OffImm > 0)
- O << ", #+" << OffImm;
+ O << ", #" << OffImm;
O << "]";
}
@@ -737,7 +738,7 @@ void ARMAsmPrinter::printT2AddrModeImm8s4Operand(const MachineInstr *MI,
if (OffImm < 0)
O << ", #-" << -OffImm * 4;
else if (OffImm > 0)
- O << ", #+" << OffImm * 4;
+ O << ", #" << OffImm * 4;
O << "]";
}
@@ -749,7 +750,18 @@ void ARMAsmPrinter::printT2AddrModeImm8OffsetOperand(const MachineInstr *MI,
if (OffImm < 0)
O << "#-" << -OffImm;
else if (OffImm > 0)
- O << "#+" << OffImm;
+ O << "#" << OffImm;
+}
+
+void ARMAsmPrinter::printT2AddrModeImm8s4OffsetOperand(const MachineInstr *MI,
+ int OpNum) {
+ const MachineOperand &MO1 = MI->getOperand(OpNum);
+ int32_t OffImm = (int32_t)MO1.getImm() / 4;
+ // Don't print +0.
+ if (OffImm < 0)
+ O << "#-" << -OffImm * 4;
+ else if (OffImm > 0)
+ O << "#" << OffImm * 4;
}
void ARMAsmPrinter::printT2AddrModeSoRegOperand(const MachineInstr *MI,
diff --git a/lib/Target/ARM/AsmPrinter/ARMInstPrinter.cpp b/lib/Target/ARM/AsmPrinter/ARMInstPrinter.cpp
index c1d68fbdf2..fba380df28 100644
--- a/lib/Target/ARM/AsmPrinter/ARMInstPrinter.cpp
+++ b/lib/Target/ARM/AsmPrinter/ARMInstPrinter.cpp
@@ -28,7 +28,159 @@ using namespace llvm;
#undef MachineInstr
#undef ARMAsmPrinter
-void ARMInstPrinter::printInst(const MCInst *MI) { printInstruction(MI); }
+static unsigned NextReg(unsigned Reg) {
+ switch (Reg) {
+ case ARM::D0:
+ return ARM::D1;
+ case ARM::D1:
+ return ARM::D2;
+ case ARM::D2:
+ return ARM::D3;
+ case ARM::D3:
+ return ARM::D4;
+ case ARM::D4:
+ return ARM::D5;
+ case ARM::D5:
+ return ARM::D6;
+ case ARM::D6:
+ return ARM::D7;
+ case ARM::D7:
+ return ARM::D8;
+ case ARM::D8:
+ return ARM::D9;
+ case ARM::D9:
+ return ARM::D10;
+ case ARM::D10:
+ return ARM::D11;
+ case ARM::D11:
+ return ARM::D12;
+ case ARM::D12:
+ return ARM::D13;
+ case ARM::D13:
+ return ARM::D14;
+ case ARM::D14:
+ return ARM::D15;
+ case ARM::D15:
+ return ARM::D16;
+ case ARM::D16:
+ return ARM::D17;
+ case ARM::D17:
+ return ARM::D18;
+ case ARM::D18:
+ return ARM::D19;
+ case ARM::D19:
+ return ARM::D20;
+ case ARM::D20:
+ return ARM::D21;
+ case ARM::D21:
+ return ARM::D22;
+ case ARM::D22:
+ return ARM::D23;
+ case ARM::D23:
+ return ARM::D24;
+ case ARM::D24:
+ return ARM::D25;
+ case ARM::D25:
+ return ARM::D26;
+ case ARM::D26:
+ return ARM::D27;
+ case ARM::D27:
+ return ARM::D28;
+ case ARM::D28:
+ return ARM::D29;
+ case ARM::D29:
+ return ARM::D30;
+ case ARM::D30:
+ return ARM::D31;
+
+ default:
+ assert(0 && "Unexpected register enum");
+ }
+}
+
+void ARMInstPrinter::printInst(const MCInst *MI) {
+ // Check for MOVs and print canonical forms, instead.
+ if (MI->getOpcode() == ARM::MOVs) {
+ const MCOperand &Dst = MI->getOperand(0);
+ const MCOperand &MO1 = MI->getOperand(1);
+ const MCOperand &MO2 = MI->getOperand(2);
+ const MCOperand &MO3 = MI->getOperand(3);
+
+ O << '\t' << ARM_AM::getShiftOpcStr(ARM_AM::getSORegShOp(MO3.getImm()));
+ printSBitModifierOperand(MI, 6);
+ printPredicateOperand(MI, 4);
+
+ O << '\t' << getRegisterName(Dst.getReg())
+ << ", " << getRegisterName(MO1.getReg());
+
+ if (ARM_AM::getSORegShOp(MO3.getImm()) == ARM_AM::rrx)
+ return;
+
+ O << ", ";
+
+ if (MO2.getReg()) {
+ O << getRegisterName(MO2.getReg());
+ assert(ARM_AM::getSORegOffset(MO3.getImm()) == 0);
+ } else {
+ O << "#" << ARM_AM::getSORegOffset(MO3.getImm());
+ }
+ return;
+ }
+
+ // A8.6.123 PUSH
+ if ((MI->getOpcode() == ARM::STM_UPD || MI->getOpcode() == ARM::t2STM_UPD) &&
+ MI->getOperand(0).getReg() == ARM::SP) {
+ const MCOperand &MO1 = MI->getOperand(2);
+ if (ARM_AM::getAM4SubMode(MO1.getImm()) == ARM_AM::db) {
+ O << '\t' << "push";
+ printPredicateOperand(MI, 3);
+ O << '\t';
+ printRegisterList(MI, 5);
+ return;
+ }
+ }
+
+ // A8.6.122 POP
+ if ((MI->getOpcode() == ARM::LDM_UPD || MI->getOpcode() == ARM::t2LDM_UPD) &&
+ MI->getOperand(0).getReg() == ARM::SP) {
+ const MCOperand &MO1 = MI->getOperand(2);
+ if (ARM_AM::getAM4SubMode(MO1.getImm()) == ARM_AM::ia) {
+ O << '\t' << "pop";
+ printPredicateOperand(MI, 3);
+ O << '\t';
+ printRegisterList(MI, 5);
+ return;
+ }
+ }
+
+ // A8.6.355 VPUSH
+ if ((MI->getOpcode() == ARM::VSTMS_UPD || MI->getOpcode() ==ARM::VSTMD_UPD) &&
+ MI->getOperand(0).getReg() == ARM::SP) {
+ const MCOperand &MO1 = MI->getOperand(2);
+ if (ARM_AM::getAM5SubMode(MO1.getImm()) == ARM_AM::db) {
+ O << '\t' << "vpush";
+ printPredicateOperand(MI, 3);
+ O << '\t';
+ printRegisterList(MI, 5);
+ return;
+ }
+ }
+
+ // A8.6.354 VPOP
+ if ((MI->getOpcode() == ARM::VLDMS_UPD || MI->getOpcode() ==ARM::VLDMD_UPD) &&
+ MI->getOperand(0).getReg() == ARM::SP) {
+ const MCOperand &MO1 = MI->getOperand(2);
+ if (ARM_AM::getAM5SubMode(MO1.getImm()) == ARM_AM::ia) {
+ O << '\t' << "vpop";
+ printPredicateOperand(MI, 3);
+ O << '\t';
+ printRegisterList(MI, 5);
+ return;
+ }
+ }
+
+ printInstruction(MI);
+ }
void ARMInstPrinter::printOperand(const MCInst *MI, unsigned OpNo,
const char *Modifier) {
@@ -36,6 +188,9 @@ void ARMInstPrinter::printOperand(const MCInst *MI, unsigned OpNo,
if (Op.isReg()) {
unsigned Reg = Op.getReg();
if (Modifier && strcmp(Modifier, "dregpair") == 0) {
+ O << '{' << getRegisterName(Reg) << ", "
+ << getRegisterName(NextReg(Reg)) << '}';
+#if 0
// FIXME: Breaks e.g. ARM/vmul.ll.
assert(0);
/*
@@ -44,6 +199,7 @@ void ARMInstPrinter::printOperand(const MCInst *MI, unsigned OpNo,
O << '{'
<< getRegisterName(DRegLo) << ',' << getRegisterName(DRegHi)
<< '}';*/
+#endif
} else if (Modifier && strcmp(Modifier, "lane") == 0) {
assert(0);
/*
@@ -56,7 +212,9 @@ void ARMInstPrinter::printOperand(const MCInst *MI, unsigned OpNo,
O << getRegisterName(Reg);
}
} else if (Op.isImm()) {
- assert((Modifier == 0 || Modifier[0] == 0) && "No modifiers supported");
+ bool isCallOp = Modifier && !strcmp(Modifier, "call");
+ assert(isCallOp ||
+ ((Modifier == 0 || Modifier[0] == 0) && "No modifiers supported"));
O << '#' << Op.getImm();
} else {
assert((Modifier == 0 || Modifier[0] == 0) && "No modifiers supported");
@@ -142,17 +300,17 @@ void ARMInstPrinter::printAddrMode2Operand(const MCInst *MI, unsigned Op) {
O << "[" << getRegisterName(MO1.getReg());
if (!MO2.getReg()) {
- if (ARM_AM::getAM2Offset(MO3.getImm())) // Don't print +0.
+ if (ARM_AM::getAM2Offset(MO3.getImm())) // Don't print +0.
O << ", #"
- << (char)ARM_AM::getAM2Op(MO3.getImm())
- << ARM_AM::getAM2Offset(MO3.getImm());
+ << ARM_AM::getAddrOpcStr(ARM_AM::getAM2Op(MO3.getImm()))
+ << ARM_AM::getAM2Offset(MO3.getImm());
O << "]";
return;
}
O << ", "
- << (char)ARM_AM::getAM2Op(MO3.getImm())
- << getRegisterName(MO2.getReg());
+ << ARM_AM::getAddrOpcStr(ARM_AM::getAM2Op(MO3.getImm()))
+ << getRegisterName(MO2.getReg());
if (unsigned ShImm = ARM_AM::getAM2Offset(MO3.getImm()))
O << ", "
@@ -169,11 +327,14 @@ void ARMInstPrinter::printAddrMode2OffsetOperand(const MCInst *MI,
if (!MO1.getReg()) {
unsigned ImmOffs = ARM_AM::getAM2Offset(MO2.getImm());
assert(ImmOffs && "Malformed indexed load / store!");
- O << '#' << (char)ARM_AM::getAM2Op(MO2.getImm()) << ImmOffs;
+ O << '#'
+ << ARM_AM::getAddrOpcStr(ARM_AM::getAM2Op(MO2.getImm()))
+ << ImmOffs;
return;
}
- O << (char)ARM_AM::getAM2Op(MO2.getImm()) << getRegisterName(MO1.getReg());
+ O << ARM_AM::getAddrOpcStr(ARM_AM::getAM2Op(MO2.getImm()))
+ << getRegisterName(MO1.getReg());
if (unsigned ShImm = ARM_AM::getAM2Offset(MO2.getImm()))
O << ", "
@@ -196,8 +357,8 @@ void ARMInstPrinter::printAddrMode3Operand(const MCInst *MI, unsigned OpNum) {
if (unsigned ImmOffs = ARM_AM::getAM3Offset(MO3.getImm()))
O << ", #"
- << (char)ARM_AM::getAM3Op(MO3.getImm())
- << ImmOffs;
+ << ARM_AM::getAddrOpcStr(ARM_AM::getAM3Op(MO3.getImm()))
+ << ImmOffs;
O << ']';
}
@@ -214,9 +375,9 @@ void ARMInstPrinter::printAddrMode3OffsetOperand(const MCInst *MI,
unsigned ImmOffs = ARM_AM::getAM3Offset(MO2.getImm());
assert(ImmOffs && "Malformed indexed load / store!");
- O << "#"
- << (char)ARM_AM::getAM3Op(MO2.getImm())
- << ImmOffs;
+ O << '#'
+ << ARM_AM::getAddrOpcStr(ARM_AM::getAM3Op(MO2.getImm()))
+ << ImmOffs;
}
@@ -259,7 +420,7 @@ void ARMInstPrinter::printAddrMode5Operand(const MCInst *MI, unsigned OpNum,
if (unsigned ImmOffs = ARM_AM::getAM5Offset(MO2.getImm())) {
O << ", #"
- << (char)ARM_AM::getAM5Op(MO2.getImm())
+ << ARM_AM::getAddrOpcStr(ARM_AM::getAM5Op(MO2.getImm()))
<< ImmOffs*4;
}
O << "]";
@@ -298,14 +459,56 @@ void ARMInstPrinter::printBitfieldInvMaskImmOperand (const MCInst *MI,
void ARMInstPrinter::printRegisterList(const MCInst *MI, unsigned OpNum) {
O << "{";
- // Always skip the first operand, it's the optional (and implicit writeback).
- for (unsigned i = OpNum+1, e = MI->getNumOperands(); i != e; ++i) {
- if (i != OpNum+1) O << ", ";
+ for (unsigned i = OpNum, e = MI->getNumOperands(); i != e; ++i) {
+ if (i != OpNum) O << ", ";
O << getRegisterName(MI->getOperand(i).getReg());
}
O << "}";
}
+void ARMInstPrinter::printCPSOptionOperand(const MCInst *MI, unsigned OpNum) {
+ const MCOperand &Op = MI->getOperand(OpNum);
+ unsigned option = Op.getImm();
+ unsigned mode = option & 31;
+ bool changemode = option >> 5 & 1;
+ unsigned AIF = option >> 6 & 7;
+ unsigned imod = option >> 9 & 3;
+ if (imod == 2)
+ O << "ie";
+ else if (imod == 3)
+ O << "id";
+ O << '\t';
+ if (imod > 1) {
+ if (AIF & 4) O << 'a';
+ if (AIF & 2) O << 'i';
+ if (AIF & 1) O << 'f';
+ if (AIF > 0 && changemode) O << ", ";
+ }
+ if (changemode)
+ O << '#' << mode;
+}
+
+void ARMInstPrinter::printMSRMaskOperand(const MCInst *MI, unsigned OpNum) {
+ const MCOperand &Op = MI->getOperand(OpNum);
+ unsigned Mask = Op.getImm();
+ if (Mask) {
+ O << '_';
+ if (Mask & 8) O << 'f';
+ if (Mask & 4) O << 's';
+ if (Mask & 2) O << 'x';
+ if (Mask & 1) O << 'c';
+ }
+}
+
+void ARMInstPrinter::printNegZeroOperand(const MCInst *MI, unsigned OpNum){
+ const MCOperand &Op = MI->getOperand(OpNum);
+ O << '#';
+ if (Op.getImm() < 0)
+ O << '-' << (-Op.getImm() - 1);
+ else
+ O << Op.getImm();
+}
+
void ARMInstPrinter::printPredicateOperand(const MCInst *MI, unsigned OpNum) {
ARMCC::CondCodes CC = (ARMCC::CondCodes)MI->getOperand(OpNum).getImm();
if (CC != ARMCC::AL)
@@ -347,3 +550,191 @@ void ARMInstPrinter::printPCLabel(const MCInst *MI, unsigned OpNum) {
void ARMInstPrinter::printThumbS4ImmOperand(const MCInst *MI, unsigned OpNum) {
O << "#" << MI->getOperand(OpNum).getImm() * 4;
}
+
+void ARMInstPrinter::printThumbITMask(const MCInst *MI, unsigned OpNum) {
+ // (3 - the number of trailing zeros) is the number of then / else.
+ unsigned Mask = MI->getOperand(OpNum).getImm();
+ unsigned CondBit0 = Mask >> 4 & 1;
+ unsigned NumTZ = CountTrailingZeros_32(Mask);
+ assert(NumTZ <= 3 && "Invalid IT mask!");
+ for (unsigned Pos = 3, e = NumTZ; Pos > e; --Pos) {
+ bool T = ((Mask >> Pos) & 1) == CondBit0;
+ if (T)
+ O << 't';
+ else
+ O << 'e';
+ }
+}
+
+void ARMInstPrinter::printThumbAddrModeRROperand(const MCInst *MI, unsigned Op)
+{
+ const MCOperand &MO1 = MI->getOperand(Op);
+ const MCOperand &MO2 = MI->getOperand(Op+1);
+ O << "[" << getRegisterName(MO1.getReg());
+ O << ", " << getRegisterName(MO2.getReg()) << "]";
+}
+
+void ARMInstPrinter::printThumbAddrModeRI5Operand(const MCInst *MI, unsigned Op,
+ unsigned Scale) {
+ const MCOperand &MO1 = MI->getOperand(Op);
+ const MCOperand &MO2 = MI->getOperand(Op+1);
+ const MCOperand &MO3 = MI->getOperand(Op+2);
+
+ if (!MO1.isReg()) { // FIXME: This is for CP entries, but isn't right.
+ printOperand(MI, Op);
+ return;
+ }
+
+ O << "[" << getRegisterName(MO1.getReg());
+ if (MO3.getReg())
+ O << ", " << getRegisterName(MO3.getReg());
+ else if (unsigned ImmOffs = MO2.getImm())
+ O << ", #" << ImmOffs * Scale;
+ O << "]";
+}
+
+void ARMInstPrinter::printThumbAddrModeS1Operand(const MCInst *MI, unsigned Op)
+{
+ printThumbAddrModeRI5Operand(MI, Op, 1);
+}
+
+void ARMInstPrinter::printThumbAddrModeS2Operand(const MCInst *MI, unsigned Op)
+{
+ printThumbAddrModeRI5Operand(MI, Op, 2);
+}
+
+void ARMInstPrinter::printThumbAddrModeS4Operand(const MCInst *MI, unsigned Op)
+{
+ printThumbAddrModeRI5Operand(MI, Op, 4);
+}
+
+void ARMInstPrinter::printThumbAddrModeSPOperand(const MCInst *MI,unsigned Op) {
+ const MCOperand &MO1 = MI->getOperand(Op);
+ const MCOperand &MO2 = MI->getOperand(Op+1);
+ O << "[" << getRegisterName(MO1.getReg());
+ if (unsigned ImmOffs = MO2.getImm())
+ O << ", #" << ImmOffs*4;
+ O << "]";
+}
+
+void ARMInstPrinter::printTBAddrMode(const MCInst *MI, unsigned OpNum) {
+ O << "[pc, " << getRegisterName(MI->getOperand(OpNum).getReg());
+ if (MI->getOpcode() == ARM::t2TBH)
+ O << ", lsl #1";
+ O << ']';
+}
+
+// Constant shifts t2_so_reg is a 2-operand unit corresponding to the Thumb2
+// register with shift forms.
+// REG 0 0 - e.g. R5
+// REG IMM, SH_OPC - e.g. R5, LSL #3
+void ARMInstPrinter::printT2SOOperand(const MCInst *MI, unsigned OpNum) {
+ const MCOperand &MO1 = MI->getOperand(OpNum);
+ const MCOperand &MO2 = MI->getOperand(OpNum+1);
+
+ unsigned Reg = MO1.getReg();
+ O << getRegisterName(Reg);
+
+ // Print the shift opc.
+ O << ", "
+ << ARM_AM::getShiftOpcStr(ARM_AM::getSORegShOp(MO2.getImm()))
+ << " ";
+
+ assert(MO2.isImm() && "Not a valid t2_so_reg value!");
+ O << "#" << ARM_AM::getSORegOffset(MO2.getImm());
+}
+
+void ARMInstPrinter::printT2AddrModeImm12Operand(const MCInst *MI,
+ unsigned OpNum) {
+ const MCOperand &MO1 = MI->getOperand(OpNum);
+ const MCOperand &MO2 = MI->getOperand(OpNum+1);
+
+ O << "[" << getRegisterName(MO1.getReg());
+
+ unsigned OffImm = MO2.getImm();
+ if (OffImm) // Don't print +0.
+ O << ", #" << OffImm;
+ O << "]";
+}
+
+void ARMInstPrinter::printT2AddrModeImm8Operand(const MCInst *MI,
+ unsigned OpNum) {
+ const MCOperand &MO1 = MI->getOperand(OpNum);
+ const MCOperand &MO2 = MI->getOperand(OpNum+1);
+
+ O << "[" << getRegisterName(MO1.getReg());
+
+ int32_t OffImm = (int32_t)MO2.getImm();
+ // Don't print +0.
+ if (OffImm < 0)
+ O << ", #-" << -OffImm;
+ else if (OffImm > 0)
+ O << ", #" << OffImm;
+ O << "]";
+}
+
+void ARMInstPrinter::printT2AddrModeImm8s4Operand(const MCInst *MI,
+ unsigned OpNum) {
+ const MCOperand &MO1 = MI->getOperand(OpNum);
+ const MCOperand &MO2 = MI->getOperand(OpNum+1);
+
+ O << "[" << getRegisterName(MO1.getReg());
+
+ int32_t OffImm = (int32_t)MO2.getImm() / 4;
+ // Don't print +0.
+ if (OffImm < 0)
+ O << ", #-" << -OffImm * 4;
+ else if (OffImm > 0)
+ O << ", #" << OffImm * 4;
+ O << "]";
+}
+
+void ARMInstPrinter::printT2AddrModeImm8OffsetOperand(const MCInst *MI,
+ unsigned OpNum) {
+ const MCOperand &MO1 = MI->getOperand(OpNum);
+ int32_t OffImm = (int32_t)MO1.getImm();
+ // Don't print +0.
+ if (OffImm < 0)
+ O << "#-" << -OffImm;
+ else if (OffImm > 0)
+ O << "#" << OffImm;
+}
+
+void ARMInstPrinter::printT2AddrModeImm8s4OffsetOperand(const MCInst *MI,
+ unsigned OpNum) {
+ const MCOperand &MO1 = MI->getOperand(OpNum);
+ int32_t OffImm = (int32_t)MO1.getImm() / 4;
+ // Don't print +0.
+ if (OffImm < 0)
+ O << "#-" << -OffImm * 4;
+ else if (OffImm > 0)
+ O << "#" << OffImm * 4;
+}
+
+void ARMInstPrinter::printT2AddrModeSoRegOperand(const MCInst *MI,
+ unsigned OpNum) {
+ const MCOperand &MO1 = MI->getOperand(OpNum);
+ const MCOperand &MO2 = MI->getOperand(OpNum+1);
+ const MCOperand &MO3 = MI->getOperand(OpNum+2);
+
+ O << "[" << getRegisterName(MO1.getReg());
+
+ assert(MO2.getReg() && "Invalid so_reg load / store address!");
+ O << ", " << getRegisterName(MO2.getReg());
+
+ unsigned ShAmt = MO3.getImm();
+ if (ShAmt) {
+ assert(ShAmt <= 3 && "Not a valid Thumb2 addressing mode!");
+ O << ", lsl #" << ShAmt;
+ }
+ O << "]";
+}
+
+void ARMInstPrinter::printVFPf32ImmOperand(const MCInst *MI, unsigned OpNum) {
+ O << '#' << MI->getOperand(OpNum).getImm();
+}
+
+void ARMInstPrinter::printVFPf64ImmOperand(const MCInst *MI, unsigned OpNum) {
+ O << '#' << MI->getOperand(OpNum).getImm();
+}
+
diff --git a/lib/Target/ARM/AsmPrinter/ARMInstPrinter.h b/lib/Target/ARM/AsmPrinter/ARMInstPrinter.h
index 9a3cbc3f5d..ceb641d95b 100644
--- a/lib/Target/ARM/AsmPrinter/ARMInstPrinter.h
+++ b/lib/Target/ARM/AsmPrinter/ARMInstPrinter.h
@@ -54,26 +54,26 @@ public:
void printBitfieldInvMaskImmOperand(const MCInst *MI, unsigned OpNum);
void printThumbS4ImmOperand(const MCInst *MI, unsigned OpNum);
- void printThumbITMask(const MCInst *MI, unsigned OpNum) {}
- void printThumbAddrModeRROperand(const MCInst *MI, unsigned OpNum) {}
+ void printThumbITMask(const MCInst *MI, unsigned OpNum);
+ void printThumbAddrModeRROperand(const MCInst *MI, unsigned OpNum);
void printThumbAddrModeRI5Operand(const MCInst *MI, unsigned OpNum,
- unsigned Scale) {}
- void printThumbAddrModeS1Operand(const MCInst *MI, unsigned OpNum) {}
- void printThumbAddrModeS2Operand(const MCInst *MI, unsigned OpNum) {}
- void printThumbAddrModeS4Operand(const MCInst *MI, unsigned OpNum) {}
- void printThumbAddrModeSPOperand(const MCInst *MI, unsigned OpNum) {}
+ unsigned Scale);
+ void printThumbAddrModeS1Operand(const MCInst *MI, unsigned OpNum);
+ void printThumbAddrModeS2Operand(const MCInst *MI, unsigned OpNum);
+ void printThumbAddrModeS4Operand(const MCInst *MI, unsigned OpNum);
+ void printThumbAddrModeSPOperand(const MCInst *MI, unsigned OpNum);
- void printT2SOOperand(const MCInst *MI, unsigned OpNum) {}
- void printT2AddrModeImm12Operand(const MCInst *MI, unsigned OpNum) {}
- void printT2AddrModeImm8Operand(const MCInst *MI, unsigned OpNum) {}
- void printT2AddrModeImm8s4Operand(const MCInst *MI, unsigned OpNum) {}
- void printT2AddrModeImm8OffsetOperand(const MCInst *MI, unsigned OpNum) {}
- void printT2AddrModeImm8s4OffsetOperand(const MCInst *MI, unsigned OpNum) {}
- void printT2AddrModeSoRegOperand(const MCInst *MI, unsigned OpNum) {}
+ void printT2SOOperand(const MCInst *MI, unsigned OpNum);
+ void printT2AddrModeImm12Operand(const MCInst *MI, unsigned OpNum);
+ void printT2AddrModeImm8Operand(const MCInst *MI, unsigned OpNum);
+ void printT2AddrModeImm8s4Operand(const MCInst *MI, unsigned OpNum);
+ void printT2AddrModeImm8OffsetOperand(const MCInst *MI, unsigned OpNum);
+ void printT2AddrModeImm8s4OffsetOperand(const MCInst *MI, unsigned OpNum);
+ void printT2AddrModeSoRegOperand(const MCInst *MI, unsigned OpNum);
- void printCPSOptionOperand(const MCInst *MI, unsigned OpNum) {}
- void printMSRMaskOperand(const MCInst *MI, unsigned OpNum) {}
- void printNegZeroOperand(const MCInst *MI, unsigned OpNum) {}
+ void printCPSOptionOperand(const MCInst *MI, unsigned OpNum);
+ void printMSRMaskOperand(const MCInst *MI, unsigned OpNum);
+ void printNegZeroOperand(const MCInst *MI, unsigned OpNum);
void printPredicateOperand(const MCInst *MI, unsigned OpNum);
void printMandatoryPredicateOperand(const MCInst *MI, unsigned OpNum);
void printSBitModifierOperand(const MCInst *MI, unsigned OpNum);
@@ -82,10 +82,10 @@ public:
const char *Modifier);
void printJTBlockOperand(const MCInst *MI, unsigned OpNum) {}
void printJT2BlockOperand(const MCInst *MI, unsigned OpNum) {}
- void printTBAddrMode(const MCInst *MI, unsigned OpNum) {}
+ void printTBAddrMode(const MCInst *MI, unsigned OpNum);
void printNoHashImmediate(const MCInst *MI, unsigned OpNum);
- void printVFPf32ImmOperand(const MCInst *MI, int OpNum) {}
- void printVFPf64ImmOperand(const MCInst *MI, int OpNum) {}
+ void printVFPf32ImmOperand(const MCInst *MI, unsigned OpNum);
+ void printVFPf64ImmOperand(const MCInst *MI, unsigned OpNum);
void printHex8ImmOperand(const MCInst *MI, int OpNum) {}
void printHex16ImmOperand(const MCInst *MI, int OpNum) {}
void printHex32ImmOperand(const MCInst *MI, int OpNum) {}
diff --git a/test/CodeGen/ARM/2009-10-27-double-align.ll b/test/CodeGen/ARM/2009-10-27-double-align.ll
index a4e76859d1..f17d059ed0 100644
--- a/test/CodeGen/ARM/2009-10-27-double-align.ll
+++ b/test/CodeGen/ARM/2009-10-27-double-align.ll
@@ -4,8 +4,8 @@
define arm_aapcscc void @g() {
entry:
-;CHECK: [sp, #+8]
-;CHECK: [sp, #+12]
+;CHECK: [sp, #8]
+;CHECK: [sp, #12]
;CHECK: [sp]
tail call arm_aapcscc void (i8*, ...)* @f(i8* getelementptr ([1 x i8]* @.str, i32 0, i32 0), i32 1, double 2.000000e+00, i32 3, double 4.000000e+00)
ret void
diff --git a/test/CodeGen/ARM/2009-10-30.ll b/test/CodeGen/ARM/2009-10-30.ll
index 90a5bd2a75..87d1a8b9e9 100644
--- a/test/CodeGen/ARM/2009-10-30.ll
+++ b/test/CodeGen/ARM/2009-10-30.ll
@@ -6,7 +6,7 @@ define void @f(i32 %a1, i32 %a2, i32 %a3, i32 %a4, i32 %a5, ...) {
entry:
;CHECK: sub sp, sp, #4
;CHECK: add r{{[0-9]+}}, sp, #8
-;CHECK: str r{{[0-9]+}}, [sp], #+4
+;CHECK: str r{{[0-9]+}}, [sp], #4
;CHECK: bx lr
%ap = alloca i8*, align 4
%ap1 = bitcast i8** %ap to i8*
diff --git a/test/CodeGen/ARM/arm-negative-stride.ll b/test/CodeGen/ARM/arm-negative-stride.ll
index 52ab8717c1..fb0f8ff879 100644
--- a/test/CodeGen/ARM/arm-negative-stride.ll
+++ b/test/CodeGen/ARM/arm-negative-stride.ll
@@ -5,7 +5,7 @@
define void @test(i32* %P, i32 %A, i32 %i) nounwind {
entry:
-; CHECK: str r1, [{{r.*}}, +{{r.*}}, lsl #2]
+; CHECK: str r1, [{{r.*}}, {{r.*}}, lsl #2]
icmp eq i32 %i, 0 ; <i1>:0 [#uses=1]
br i1 %0, label %return, label %bb
diff --git a/test/CodeGen/ARM/globals.ll b/test/CodeGen/ARM/globals.ll
index 886c0d55cf..adb4497397 100644
--- a/test/CodeGen/ARM/globals.ll
+++ b/test/CodeGen/ARM/globals.ll
@@ -41,7 +41,7 @@ define i32 @test1() {
; DarwinPIC: _test1:
; DarwinPIC: ldr r0, LCPI1_0
; DarwinPIC: LPC1_0:
-; DarwinPIC: ldr r0, [pc, +r0]
+; DarwinPIC: ldr r0, [pc, r0]
; DarwinPIC: ldr r0, [r0]
; DarwinPIC: bx lr
@@ -63,7 +63,7 @@ define i32 @test1() {
; LinuxPIC: .LPC1_0:
; LinuxPIC: add r0, pc, r0
-; LinuxPIC: ldr r0, [r1, +r0]
+; LinuxPIC: ldr r0, [r1, r0]
; LinuxPIC: ldr r0, [r0]
; LinuxPIC: bx lr
diff --git a/test/CodeGen/ARM/ldrd.ll b/test/CodeGen/ARM/ldrd.ll
index c366e2dca5..895562a1d3 100644
--- a/test/CodeGen/ARM/ldrd.ll
+++ b/test/CodeGen/ARM/ldrd.ll
@@ -10,10 +10,10 @@ entry:
;V6: ldrd r2, [r2]
;V5: ldr r3, [r2]
-;V5: ldr r2, [r2, #+4]
+;V5: ldr r2, [r2, #4]
;EABI: ldr r3, [r2]
-;EABI: ldr r2, [r2, #+4]
+;EABI: ldr r2, [r2, #4]
%0 = load i64** @b, align 4
%1 = load i64* %0, align 4
diff --git a/test/CodeGen/ARM/str_pre-2.ll b/test/CodeGen/ARM/str_pre-2.ll
index f8d3df29c4..553cd64fce 100644
--- a/test/CodeGen/ARM/str_pre-2.ll
+++ b/test/CodeGen/ARM/str_pre-2.ll
@@ -1,5 +1,5 @@
; RUN: llc < %s -mtriple=arm-linux-gnu | grep {str.*\\!}
-; RUN: llc < %s -mtriple=arm-linux-gnu | grep {ldr.*\\\[.*\], #+4}
+; RUN: llc < %s -mtriple=arm-linux-gnu | grep {ldr.*\\\[.*\], #4}
@b = external global i64*
diff --git a/test/CodeGen/ARM/tls2.ll b/test/CodeGen/ARM/tls2.ll
index d932f90e4c..57370c4de1 100644
--- a/test/CodeGen/ARM/tls2.ll
+++ b/test/CodeGen/ARM/tls2.ll
@@ -7,7 +7,7 @@
define i32 @f() {
; CHECK-NONPIC: f:
-; CHECK-NONPIC: ldr {{r.}}, [pc, +{{r.}}]
+; CHECK-NONPIC: ldr {{r.}}, [pc, {{r.}}]
; CHECK-NONPIC: i(gottpoff)
; CHECK-PIC: f:
; CHECK-PIC: __tls_get_addr
@@ -18,7 +18,7 @@ entry:
define i32* @g() {
; CHECK-NONPIC: g:
-; CHECK-NONPIC: ldr {{r.}}, [pc, +{{r.}}]
+; CHECK-NONPIC: ldr {{r.}}, [pc, {{r.}}]
; CHECK-NONPIC: i(gottpoff)
; CHECK-PIC: g:
; CHECK-PIC: __tls_get_addr
diff --git a/test/CodeGen/Thumb2/ldr-str-imm12.ll b/test/CodeGen/Thumb2/ldr-str-imm12.ll
index f007b5c697..55cdac983b 100644
--- a/test/CodeGen/Thumb2/ldr-str-imm12.ll
+++ b/test/CodeGen/Thumb2/ldr-str-imm12.ll
@@ -22,7 +22,7 @@
define arm_apcscc %union.rec* @Manifest(%union.rec* %x, %union.rec* %env, %struct.STYLE* %style, %union.rec** %bthr, %union.rec** %fthr, %union.rec** %target, %union.rec** %crs, i32 %ok, i32 %need_expand, %union.rec** %enclose, i32 %fcr) nounwind {
entry:
-; CHECK: ldr.w r9, [r7, #+28]
+; CHECK: ldr.w r9, [r7, #28]
%xgaps.i = alloca [32 x %union.rec*], align 4 ; <[32 x %union.rec*]*> [#uses=0]
%ycomp.i = alloca [32 x %union.rec*], align 4 ; <[32 x %union.rec*]*> [#uses=0]
br i1 false, label %bb, label %bb20
@@ -50,9 +50,9 @@ bb119: ; preds = %bb20, %bb20
bb420: ; preds = %bb20, %bb20
; CHECK: bb420
; CHECK: str r{{[0-7]}}, [sp]
-; CHECK: str r{{[0-7]}}, [sp, #+4]
-; CHECK: str r{{[0-7]}}, [sp, #+8]
-; CHECK: str{{(.w)?}} r{{[0-9]+}}, [sp, #+24]
+; CHECK: str r{{[0-7]}}, [sp, #4]
+; CHECK: str r{{[0-7]}}, [sp, #8]
+; CHECK: str{{(.w)?}} r{{[0-9]+}}, [sp, #24]
store %union.rec* null, %union.rec** @zz_hold, align 4
store %union.rec* null, %union.rec** @zz_res, align 4
store %union.rec* %x, %union.rec** @zz_hold, align 4
diff --git a/test/CodeGen/Thumb2/thumb2-ldr.ll b/test/CodeGen/Thumb2/thumb2-ldr.ll
index 94888fd940..88434f1c7d 100644
--- a/test/CodeGen/Thumb2/thumb2-ldr.ll
+++ b/test/CodeGen/Thumb2/thumb2-ldr.ll
@@ -11,7 +11,7 @@ entry:
define i32 @f2(i32* %v) {
entry:
; CHECK: f2:
-; CHECK: ldr.w r0, [r0, #+4092]
+; CHECK: ldr.w r0, [r0, #4092]
%tmp2 = getelementptr i32* %v, i32 1023
%tmp = load i32* %tmp2
ret i32 %tmp
diff --git a/test/CodeGen/Thumb2/thumb2-ldrh.ll b/test/CodeGen/Thumb2/thumb2-ldrh.ll
index f1fb79c35e..fee97bf689 100644
--- a/test/CodeGen/Thumb2/thumb2-ldrh.ll
+++ b/test/CodeGen/Thumb2/thumb2-ldrh.ll
@@ -11,7 +11,7 @@ entry:
define i16 @f2(i16* %v) {
entry:
; CHECK: f2:
-; CHECK: ldrh.w r0, [r0, #+2046]
+; CHECK: ldrh.w r0, [r0, #2046]
%tmp2 = getelementptr i16* %v, i16 1023
%tmp = load i16* %tmp2
ret i16 %tmp
diff --git a/test/CodeGen/Thumb2/thumb2-str.ll b/test/CodeGen/Thumb2/thumb2-str.ll
index 3eeec8c385..11bb936d1e 100644
--- a/test/CodeGen/Thumb2/thumb2-str.ll
+++ b/test/CodeGen/Thumb2/thumb2-str.ll
@@ -9,7 +9,7 @@ define i32 @f1(i32 %a, i32* %v) {
define i32 @f2(i32 %a, i32* %v) {
; CHECK: f2:
-; CHECK: str.w r0, [r1, #+4092]
+; CHECK: str.w r0, [r1, #4092]
%tmp2 = getelementptr i32* %v, i32 1023
store i32 %a, i32* %tmp2
ret i32 %a
diff --git a/test/CodeGen/Thumb2/thumb2-str_pre.ll b/test/CodeGen/Thumb2/thumb2-str_pre.ll
index 9af960bbeb..1e6616a91c 100644
--- a/test/CodeGen/Thumb2/thumb2-str_pre.ll
+++ b/test/CodeGen/Thumb2/thumb2-str_pre.ll
@@ -2,7 +2,7 @@
define void @test1(i32* %X, i32* %A, i32** %dest) {
; CHECK: test1
-; CHECK: str r1, [r0, #+16]!
+; CHECK: str r1, [r0, #16]!
%B = load i32* %A ; <i32> [#uses=1]
%Y = getelementptr i32* %X, i32 4 ; <i32*> [#uses=2]
store i32 %B, i32* %Y
@@ -12,7 +12,7 @@ define void @test1(i32* %X, i32* %A, i32** %dest) {
define i16* @test2(i16* %X, i32* %A) {
; CHECK: test2
-; CHECK: strh r1, [r0, #+8]!
+; CHECK: strh r1, [r0, #8]!
%B = load i32* %A ; <i32> [#uses=1]
%Y = getelementptr i16* %X, i32 4 ; <i16*> [#uses=2]
%tmp = trunc i32 %B to i16 ; <i16> [#uses=1]
diff --git a/test/CodeGen/Thumb2/thumb2-strb.ll b/test/CodeGen/Thumb2/thumb2-strb.ll
index 1ebb938b1a..7978e7fa91 100644
--- a/test/CodeGen/Thumb2/thumb2-strb.ll
+++ b/test/CodeGen/Thumb2/thumb2-strb.ll
@@ -9,7 +9,7 @@ define i8 @f1(i8 %a, i8* %v) {
define i8 @f2(i8 %a, i8* %v) {
; CHECK: f2:
-; CHECK: strb.w r0, [r1, #+4092]
+; CHECK: strb.w r0, [r1, #4092]
%tmp2 = getelementptr i8* %v, i32 4092
store i8 %a, i8* %tmp2
ret i8 %a
diff --git a/test/CodeGen/Thumb2/thumb2-strh.ll b/test/CodeGen/Thumb2/thumb2-strh.ll
index b0eb8c12f5..97110a726f 100644
--- a/test/CodeGen/Thumb2/thumb2-strh.ll
+++ b/test/CodeGen/Thumb2/thumb2-strh.ll
@@ -9,7 +9,7 @@ define i16 @f1(i16 %a, i16* %v) {
define i16 @f2(i16 %a, i16* %v) {
; CHECK: f2:
-; CHECK: strh.w r0, [r1, #+4092]
+; CHECK: strh.w r0, [r1, #4092]
%tmp2 = getelementptr i16* %v, i32 2046
store i16 %a, i16* %tmp2
ret i16 %a