diff options
author | Hao Liu <Hao.Liu@arm.com> | 2013-12-30 02:38:12 +0000 |
---|---|---|
committer | Hao Liu <Hao.Liu@arm.com> | 2013-12-30 02:38:12 +0000 |
commit | afcdbf7400c28c5b3605c8c3672e127900f4eff0 (patch) | |
tree | 5d3220967f14e8d7dcc30b650414f2f6f085a341 /lib/Target/AArch64 | |
parent | 43ffcc571cfd300b6a84d0ce0ca42cb783d5c8d9 (diff) | |
download | llvm-afcdbf7400c28c5b3605c8c3672e127900f4eff0.tar.gz llvm-afcdbf7400c28c5b3605c8c3672e127900f4eff0.tar.bz2 llvm-afcdbf7400c28c5b3605c8c3672e127900f4eff0.tar.xz |
[AArch64]Add code to spill/fill Q register tuples such as QPair/QTriple/QQuad.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@198193 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/Target/AArch64')
-rw-r--r-- | lib/Target/AArch64/AArch64InstrInfo.cpp | 58 | ||||
-rw-r--r-- | lib/Target/AArch64/AArch64RegisterInfo.cpp | 27 |
2 files changed, 69 insertions, 16 deletions
diff --git a/lib/Target/AArch64/AArch64InstrInfo.cpp b/lib/Target/AArch64/AArch64InstrInfo.cpp index 180110a84d..1e19eb0c74 100644 --- a/lib/Target/AArch64/AArch64InstrInfo.cpp +++ b/lib/Target/AArch64/AArch64InstrInfo.cpp @@ -418,10 +418,8 @@ AArch64InstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB, default: llvm_unreachable("Unknown size for regclass"); } - } else { - assert((RC->hasType(MVT::f32) || RC->hasType(MVT::f64) || - RC->hasType(MVT::f128)) - && "Expected integer or floating type for store"); + } else if (RC->hasType(MVT::f32) || RC->hasType(MVT::f64) || + RC->hasType(MVT::f128)) { switch (RC->getSize()) { case 4: StoreOp = AArch64::LSFP32_STR; break; case 8: StoreOp = AArch64::LSFP64_STR; break; @@ -429,6 +427,22 @@ AArch64InstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB, default: llvm_unreachable("Unknown size for regclass"); } + } else { // The spill of D tuples is implemented by Q tuples + if (RC == &AArch64::QPairRegClass) + StoreOp = AArch64::ST1x2_16B; + else if (RC == &AArch64::QTripleRegClass) + StoreOp = AArch64::ST1x3_16B; + else if (RC == &AArch64::QQuadRegClass) + StoreOp = AArch64::ST1x4_16B; + else + llvm_unreachable("Unknown reg class"); + + MachineInstrBuilder NewMI = BuildMI(MBB, MBBI, DL, get(StoreOp)); + // Vector store has different operands from other store instructions. + NewMI.addFrameIndex(FrameIdx) + .addReg(SrcReg, getKillRegState(isKill)) + .addMemOperand(MMO); + return; } MachineInstrBuilder NewMI = BuildMI(MBB, MBBI, DL, get(StoreOp)); @@ -464,10 +478,8 @@ AArch64InstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB, default: llvm_unreachable("Unknown size for regclass"); } - } else { - assert((RC->hasType(MVT::f32) || RC->hasType(MVT::f64) - || RC->hasType(MVT::f128)) - && "Expected integer or floating type for store"); + } else if (RC->hasType(MVT::f32) || RC->hasType(MVT::f64) || + RC->hasType(MVT::f128)) { switch (RC->getSize()) { case 4: LoadOp = AArch64::LSFP32_LDR; break; case 8: LoadOp = AArch64::LSFP64_LDR; break; @@ -475,6 +487,21 @@ AArch64InstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB, default: llvm_unreachable("Unknown size for regclass"); } + } else { // The spill of D tuples is implemented by Q tuples + if (RC == &AArch64::QPairRegClass) + LoadOp = AArch64::LD1x2_16B; + else if (RC == &AArch64::QTripleRegClass) + LoadOp = AArch64::LD1x3_16B; + else if (RC == &AArch64::QQuadRegClass) + LoadOp = AArch64::LD1x4_16B; + else + llvm_unreachable("Unknown reg class"); + + MachineInstrBuilder NewMI = BuildMI(MBB, MBBI, DL, get(LoadOp), DestReg); + // Vector load has different operands from other load instructions. + NewMI.addFrameIndex(FrameIdx) + .addMemOperand(MMO); + return; } MachineInstrBuilder NewMI = BuildMI(MBB, MBBI, DL, get(LoadOp), DestReg); @@ -572,6 +599,21 @@ void AArch64InstrInfo::getAddressConstraints(const MachineInstr &MI, MinOffset = -0x40 * AccessScale; MaxOffset = 0x3f * AccessScale; return; + case AArch64::LD1x2_16B: case AArch64::ST1x2_16B: + AccessScale = 32; + MinOffset = 0; + MaxOffset = 0xfff * AccessScale; + return; + case AArch64::LD1x3_16B: case AArch64::ST1x3_16B: + AccessScale = 48; + MinOffset = 0; + MaxOffset = 0xfff * AccessScale; + return; + case AArch64::LD1x4_16B: case AArch64::ST1x4_16B: + AccessScale = 64; + MinOffset = 0; + MaxOffset = 0xfff * AccessScale; + return; } } diff --git a/lib/Target/AArch64/AArch64RegisterInfo.cpp b/lib/Target/AArch64/AArch64RegisterInfo.cpp index 75ec44f3fe..618f6fb928 100644 --- a/lib/Target/AArch64/AArch64RegisterInfo.cpp +++ b/lib/Target/AArch64/AArch64RegisterInfo.cpp @@ -76,6 +76,12 @@ AArch64RegisterInfo::getReservedRegs(const MachineFunction &MF) const { return Reserved; } +static bool hasFrameOffset(int opcode) { + return opcode != AArch64::LD1x2_16B && opcode != AArch64::LD1x3_16B && + opcode != AArch64::LD1x4_16B && opcode != AArch64::ST1x2_16B && + opcode != AArch64::ST1x3_16B && opcode != AArch64::ST1x4_16B; +} + void AArch64RegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator MBBI, int SPAdj, @@ -110,8 +116,10 @@ AArch64RegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator MBBI, int64_t Offset; Offset = TFI->resolveFrameIndexReference(MF, FrameIndex, FrameReg, SPAdj, IsCalleeSaveOp); - - Offset += MI.getOperand(FIOperandNum + 1).getImm(); + // A vector load/store instruction doesn't have an offset operand. + bool HasOffsetOp = hasFrameOffset(MI.getOpcode()); + if (HasOffsetOp) + Offset += MI.getOperand(FIOperandNum + 1).getImm(); // DBG_VALUE instructions have no real restrictions so they can be handled // easily. @@ -124,7 +132,7 @@ AArch64RegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator MBBI, const AArch64InstrInfo &TII = *static_cast<const AArch64InstrInfo*>(MF.getTarget().getInstrInfo()); int MinOffset, MaxOffset, OffsetScale; - if (MI.getOpcode() == AArch64::ADDxxi_lsl0_s) { + if (MI.getOpcode() == AArch64::ADDxxi_lsl0_s || !HasOffsetOp) { MinOffset = 0; MaxOffset = 0xfff; OffsetScale = 1; @@ -133,10 +141,12 @@ AArch64RegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator MBBI, TII.getAddressConstraints(MI, OffsetScale, MinOffset, MaxOffset); } - // The frame lowering has told us a base and offset it thinks we should use to - // access this variable, but it's still up to us to make sure the values are - // legal for the instruction in question. - if (Offset % OffsetScale != 0 || Offset < MinOffset || Offset > MaxOffset) { + // There are two situations we don't use frame + offset directly in the + // instruction: + // (1) The offset can't really be scaled + // (2) Can't encode offset as it doesn't have an offset operand + if ((Offset % OffsetScale != 0 || Offset < MinOffset || Offset > MaxOffset) || + (!HasOffsetOp && Offset != 0)) { unsigned BaseReg = MF.getRegInfo().createVirtualRegister(&AArch64::GPR64RegClass); emitRegUpdate(MBB, MBBI, MBBI->getDebugLoc(), TII, @@ -150,7 +160,8 @@ AArch64RegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator MBBI, assert(Offset >= 0 && "Unexpected negative offset from SP"); MI.getOperand(FIOperandNum).ChangeToRegister(FrameReg, false, false, true); - MI.getOperand(FIOperandNum + 1).ChangeToImmediate(Offset / OffsetScale); + if (HasOffsetOp) + MI.getOperand(FIOperandNum + 1).ChangeToImmediate(Offset / OffsetScale); } unsigned |