summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMatt Arsenault <Matthew.Arsenault@amd.com>2014-03-24 20:08:13 +0000
committerMatt Arsenault <Matthew.Arsenault@amd.com>2014-03-24 20:08:13 +0000
commitadd2e2ec8fcb21cc8a51387b2095fd1b4abc2f50 (patch)
tree431987fb01a2f08b3695eac4c484e756bd78a6f3
parent3a96e61469fd80bbb2c5bcf2b4dcee89e3a68ab3 (diff)
downloadllvm-add2e2ec8fcb21cc8a51387b2095fd1b4abc2f50.tar.gz
llvm-add2e2ec8fcb21cc8a51387b2095fd1b4abc2f50.tar.bz2
llvm-add2e2ec8fcb21cc8a51387b2095fd1b4abc2f50.tar.xz
R600/SI: Fix extra mov from legalizing 64-bit SALU ops.
Check the register class of each operand individually to avoid an extra copy to a vgpr. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@204662 91177308-0d34-0410-b5e6-96231b3b80d8
-rw-r--r--lib/Target/R600/SIInstrInfo.cpp40
-rw-r--r--test/CodeGen/R600/or.ll10
2 files changed, 31 insertions, 19 deletions
diff --git a/lib/Target/R600/SIInstrInfo.cpp b/lib/Target/R600/SIInstrInfo.cpp
index eb5172c896..336f6aa566 100644
--- a/lib/Target/R600/SIInstrInfo.cpp
+++ b/lib/Target/R600/SIInstrInfo.cpp
@@ -1028,29 +1028,41 @@ void SIInstrInfo::splitScalar64BitOp(SmallVectorImpl<MachineInstr *> &Worklist,
MachineBasicBlock::iterator MII = Inst;
const MCInstrDesc &InstDesc = get(Opcode);
- const TargetRegisterClass *RC = MRI.getRegClass(Src0.getReg());
- const TargetRegisterClass *SubRC = RI.getSubRegClass(RC, AMDGPU::sub0);
- MachineOperand SrcReg0Sub0 = buildExtractSubRegOrImm(MII, MRI, Src0, RC,
- AMDGPU::sub0, SubRC);
- MachineOperand SrcReg1Sub0 = buildExtractSubRegOrImm(MII, MRI, Src1, RC,
- AMDGPU::sub0, SubRC);
-
- unsigned DestSub0 = MRI.createVirtualRegister(SubRC);
+ const TargetRegisterClass *Src0RC = Src0.isReg() ?
+ MRI.getRegClass(Src0.getReg()) :
+ &AMDGPU::SGPR_32RegClass;
+
+ const TargetRegisterClass *Src0SubRC = RI.getSubRegClass(Src0RC, AMDGPU::sub0);
+ const TargetRegisterClass *Src1RC = Src1.isReg() ?
+ MRI.getRegClass(Src1.getReg()) :
+ &AMDGPU::SGPR_32RegClass;
+
+ const TargetRegisterClass *Src1SubRC = RI.getSubRegClass(Src1RC, AMDGPU::sub0);
+
+ MachineOperand SrcReg0Sub0 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC,
+ AMDGPU::sub0, Src0SubRC);
+ MachineOperand SrcReg1Sub0 = buildExtractSubRegOrImm(MII, MRI, Src1, Src1RC,
+ AMDGPU::sub0, Src1SubRC);
+
+ const TargetRegisterClass *DestRC = MRI.getRegClass(Dest.getReg());
+ const TargetRegisterClass *DestSubRC = RI.getSubRegClass(DestRC, AMDGPU::sub0);
+
+ unsigned DestSub0 = MRI.createVirtualRegister(DestRC);
MachineInstr *LoHalf = BuildMI(MBB, MII, DL, InstDesc, DestSub0)
.addOperand(SrcReg0Sub0)
.addOperand(SrcReg1Sub0);
- MachineOperand SrcReg0Sub1 = buildExtractSubRegOrImm(MII, MRI, Src0, RC,
- AMDGPU::sub1, SubRC);
- MachineOperand SrcReg1Sub1 = buildExtractSubRegOrImm(MII, MRI, Src1, RC,
- AMDGPU::sub1, SubRC);
+ MachineOperand SrcReg0Sub1 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC,
+ AMDGPU::sub1, Src0SubRC);
+ MachineOperand SrcReg1Sub1 = buildExtractSubRegOrImm(MII, MRI, Src1, Src1RC,
+ AMDGPU::sub1, Src1SubRC);
- unsigned DestSub1 = MRI.createVirtualRegister(SubRC);
+ unsigned DestSub1 = MRI.createVirtualRegister(DestSubRC);
MachineInstr *HiHalf = BuildMI(MBB, MII, DL, InstDesc, DestSub1)
.addOperand(SrcReg0Sub1)
.addOperand(SrcReg1Sub1);
- unsigned FullDestReg = MRI.createVirtualRegister(RC);
+ unsigned FullDestReg = MRI.createVirtualRegister(DestRC);
BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), FullDestReg)
.addReg(DestSub0)
.addImm(AMDGPU::sub0)
diff --git a/test/CodeGen/R600/or.ll b/test/CodeGen/R600/or.ll
index 8e985c75cb..be984b2712 100644
--- a/test/CodeGen/R600/or.ll
+++ b/test/CodeGen/R600/or.ll
@@ -89,11 +89,11 @@ define void @scalar_vector_or_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %a,
}
; SI-LABEL: @vector_or_i64_loadimm
-; SI-DAG: S_MOV_B32
-; SI-DAG: S_MOV_B32
-; SI-DAG: BUFFER_LOAD_DWORDX2
-; SI: V_OR_B32_e32
-; SI: V_OR_B32_e32
+; SI-DAG: S_MOV_B32 [[LO_S_IMM:s[0-9]+]], -545810305
+; SI-DAG: S_MOV_B32 [[HI_S_IMM:s[0-9]+]], 5231
+; SI-DAG: BUFFER_LOAD_DWORDX2 v{{\[}}[[LO_VREG:[0-9]+]]:[[HI_VREG:[0-9]+]]{{\]}},
+; SI-DAG: V_OR_B32_e32 {{v[0-9]+}}, [[LO_S_IMM]], v[[LO_VREG]]
+; SI-DAG: V_OR_B32_e32 {{v[0-9]+}}, [[HI_S_IMM]], v[[HI_VREG]]
; SI: S_ENDPGM
define void @vector_or_i64_loadimm(i64 addrspace(1)* %out, i64 addrspace(1)* %a, i64 addrspace(1)* %b) {
%loada = load i64 addrspace(1)* %a, align 8