summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTom Stellard <thomas.stellard@amd.com>2013-08-06 23:08:28 +0000
committerTom Stellard <thomas.stellard@amd.com>2013-08-06 23:08:28 +0000
commit3492eefa4b2509c87598678a6977074a3f6a50e6 (patch)
tree6e479880fecb17ad357750d49759395f7cf2dd0a
parent3406d882c02a6cd1e16f4636351c23dcb68d785f (diff)
downloadllvm-3492eefa4b2509c87598678a6977074a3f6a50e6.tar.gz
llvm-3492eefa4b2509c87598678a6977074a3f6a50e6.tar.bz2
llvm-3492eefa4b2509c87598678a6977074a3f6a50e6.tar.xz
R600/SI: Use VSrc_* register classes as the default classes for types
Since the VSrc_* register classes contain both VGPRs and SGPRs, copies that used be emitted by isel like this: SGPR = COPY VGPR Will now be emitted like this: VSrC = COPY VGPR This patch also adds a pass that tries to identify and fix situations where a VGPR to SGPR copy may occur. Hopefully, these changes will make it impossible for the compiler to generate illegal VGPR to SGPR copies. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@187831 91177308-0d34-0410-b5e6-96231b3b80d8
-rw-r--r--lib/Target/R600/AMDGPU.h1
-rw-r--r--lib/Target/R600/AMDGPUISelDAGToDAG.cpp24
-rw-r--r--lib/Target/R600/AMDGPUTargetMachine.cpp2
-rw-r--r--lib/Target/R600/SIFixSGPRCopies.cpp152
-rw-r--r--lib/Target/R600/SIISelLowering.cpp28
-rw-r--r--test/CodeGen/R600/sgpr-copy.ll84
6 files changed, 247 insertions, 44 deletions
diff --git a/lib/Target/R600/AMDGPU.h b/lib/Target/R600/AMDGPU.h
index 51d0d3c072..6b374cbfff 100644
--- a/lib/Target/R600/AMDGPU.h
+++ b/lib/Target/R600/AMDGPU.h
@@ -36,6 +36,7 @@ FunctionPass *createAMDGPUCFGStructurizerPass(TargetMachine &tm);
// SI Passes
FunctionPass *createSIAnnotateControlFlowPass();
FunctionPass *createSILowerControlFlowPass(TargetMachine &tm);
+FunctionPass *createSIFixSGPRCopiesPass(TargetMachine &tm);
FunctionPass *createSICodeEmitterPass(formatted_raw_ostream &OS);
FunctionPass *createSIInsertWaits(TargetMachine &tm);
diff --git a/lib/Target/R600/AMDGPUISelDAGToDAG.cpp b/lib/Target/R600/AMDGPUISelDAGToDAG.cpp
index 38a5f24178..f222901b8f 100644
--- a/lib/Target/R600/AMDGPUISelDAGToDAG.cpp
+++ b/lib/Target/R600/AMDGPUISelDAGToDAG.cpp
@@ -302,7 +302,7 @@ SDNode *AMDGPUDAGToDAGISel::Select(SDNode *N) {
SubReg0 = CurDAG->getTargetConstant(AMDGPU::sub0_sub1, MVT::i32);
SubReg1 = CurDAG->getTargetConstant(AMDGPU::sub2_sub3, MVT::i32);
} else if (N->getValueType(0) == MVT::i64) {
- RC = CurDAG->getTargetConstant(AMDGPU::SReg_64RegClassID, MVT::i32);
+ RC = CurDAG->getTargetConstant(AMDGPU::VSrc_64RegClassID, MVT::i32);
SubReg0 = CurDAG->getTargetConstant(AMDGPU::sub0, MVT::i32);
SubReg1 = CurDAG->getTargetConstant(AMDGPU::sub1, MVT::i32);
} else {
@@ -816,28 +816,6 @@ void AMDGPUDAGToDAGISel::PostprocessISelDAG() {
E = CurDAG->allnodes_end(); I != E; ++I) {
SDNode *Node = I;
- switch (Node->getOpcode()) {
- // Fix the register class in copy to CopyToReg nodes - ISel will always
- // use SReg classes for 64-bit copies, but this is not always what we want.
- case ISD::CopyToReg: {
- unsigned Reg = cast<RegisterSDNode>(Node->getOperand(1))->getReg();
- SDValue Val = Node->getOperand(2);
- const TargetRegisterClass *RC = RegInfo->getRegClass(Reg);
- if (RC != &AMDGPU::SReg_64RegClass) {
- continue;
- }
-
- if (!Val.getNode()->isMachineOpcode() ||
- Val.getNode()->getMachineOpcode() == AMDGPU::IMPLICIT_DEF) {
- continue;
- }
-
- const MCInstrDesc Desc = TM.getInstrInfo()->get(Val.getNode()->getMachineOpcode());
- const TargetRegisterInfo *TRI = TM.getRegisterInfo();
- RegInfo->setRegClass(Reg, TRI->getRegClass(Desc.OpInfo[0].RegClass));
- continue;
- }
- }
MachineSDNode *MachineNode = dyn_cast<MachineSDNode>(I);
if (!MachineNode)
diff --git a/lib/Target/R600/AMDGPUTargetMachine.cpp b/lib/Target/R600/AMDGPUTargetMachine.cpp
index 1a304962e1..5ebc5f27dc 100644
--- a/lib/Target/R600/AMDGPUTargetMachine.cpp
+++ b/lib/Target/R600/AMDGPUTargetMachine.cpp
@@ -146,6 +146,8 @@ bool AMDGPUPassConfig::addPreRegAlloc() {
if (ST.getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS) {
addPass(createR600VectorRegMerger(*TM));
+ } else {
+ addPass(createSIFixSGPRCopiesPass(*TM));
}
return false;
}
diff --git a/lib/Target/R600/SIFixSGPRCopies.cpp b/lib/Target/R600/SIFixSGPRCopies.cpp
new file mode 100644
index 0000000000..435172a08e
--- /dev/null
+++ b/lib/Target/R600/SIFixSGPRCopies.cpp
@@ -0,0 +1,152 @@
+//===-- SIFixSGPRCopies.cpp - Remove potential VGPR => SGPR copies --------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file
+/// Copies from VGPR to SGPR registers are illegal and the register coalescer
+/// will sometimes generate these illegal copies in situations like this:
+///
+/// Register Class <vsrc> is the union of <vgpr> and <sgpr>
+///
+/// BB0:
+/// %vreg0 <sgpr> = SCALAR_INST
+/// %vreg1 <vsrc> = COPY %vreg0 <sgpr>
+/// ...
+/// BRANCH %cond BB1, BB2
+/// BB1:
+/// %vreg2 <vgpr> = VECTOR_INST
+/// %vreg3 <vsrc> = COPY %vreg2 <vgpr>
+/// BB2:
+/// %vreg4 <vsrc> = PHI %vreg1 <vsrc>, <BB#0>, %vreg3 <vrsc>, <BB#1>
+/// %vreg5 <vgpr> = VECTOR_INST %vreg4 <vsrc>
+///
+///
+/// The coalescer will begin at BB0 and eliminate its copy, then the resulting
+/// code will look like this:
+///
+/// BB0:
+/// %vreg0 <sgpr> = SCALAR_INST
+/// ...
+/// BRANCH %cond BB1, BB2
+/// BB1:
+/// %vreg2 <vgpr> = VECTOR_INST
+/// %vreg3 <vsrc> = COPY %vreg2 <vgpr>
+/// BB2:
+/// %vreg4 <sgpr> = PHI %vreg0 <sgpr>, <BB#0>, %vreg3 <vsrc>, <BB#1>
+/// %vreg5 <vgpr> = VECTOR_INST %vreg4 <sgpr>
+///
+/// Now that the result of the PHI instruction is an SGPR, the register
+/// allocator is now forced to constrain the register class of %vreg3 to
+/// <sgpr> so we end up with final code like this:
+///
+/// BB0:
+/// %vreg0 <sgpr> = SCALAR_INST
+/// ...
+/// BRANCH %cond BB1, BB2
+/// BB1:
+/// %vreg2 <vgpr> = VECTOR_INST
+/// %vreg3 <sgpr> = COPY %vreg2 <vgpr>
+/// BB2:
+/// %vreg4 <sgpr> = PHI %vreg0 <sgpr>, <BB#0>, %vreg3 <sgpr>, <BB#1>
+/// %vreg5 <vgpr> = VECTOR_INST %vreg4 <sgpr>
+///
+/// Now this code contains an illegal copy from a VGPR to an SGPR.
+///
+/// In order to avoid this problem, this pass searches for PHI instructions
+/// which define a <vsrc> register and constrains its definition class to
+/// <vgpr> if the user of the PHI's definition register is a vector instruction.
+/// If the PHI's definition class is constrained to <vgpr> then the coalescer
+/// will be unable to perform the COPY removal from the above example which
+/// ultimately led to the creation of an illegal COPY.
+//===----------------------------------------------------------------------===//
+
+#include "AMDGPU.h"
+#include "SIInstrInfo.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/Target/TargetMachine.h"
+
+using namespace llvm;
+
+namespace {
+
+class SIFixSGPRCopies : public MachineFunctionPass {
+
+private:
+ static char ID;
+ const TargetRegisterClass *inferRegClass(const TargetRegisterInfo *TRI,
+ const MachineRegisterInfo &MRI,
+ unsigned Reg) const;
+
+public:
+ SIFixSGPRCopies(TargetMachine &tm) : MachineFunctionPass(ID) { }
+
+ virtual bool runOnMachineFunction(MachineFunction &MF);
+
+ const char *getPassName() const {
+ return "SI Fix SGPR copies";
+ }
+
+};
+
+} // End anonymous namespace
+
+char SIFixSGPRCopies::ID = 0;
+
+FunctionPass *llvm::createSIFixSGPRCopiesPass(TargetMachine &tm) {
+ return new SIFixSGPRCopies(tm);
+}
+
+/// This functions walks the use/def chains starting with the definition of
+/// \p Reg until it finds an Instruction that isn't a COPY returns
+/// the register class of that instruction.
+const TargetRegisterClass *SIFixSGPRCopies::inferRegClass(
+ const TargetRegisterInfo *TRI,
+ const MachineRegisterInfo &MRI,
+ unsigned Reg) const {
+ // The Reg parameter to the function must always be defined by either a PHI
+ // or a COPY, therefore it cannot be a physical register.
+ assert(TargetRegisterInfo::isVirtualRegister(Reg) &&
+ "Reg cannot be a physical register");
+
+ const TargetRegisterClass *RC = MRI.getRegClass(Reg);
+ for (MachineRegisterInfo::use_iterator I = MRI.use_begin(Reg),
+ E = MRI.use_end(); I != E; ++I) {
+ switch (I->getOpcode()) {
+ case AMDGPU::COPY:
+ RC = TRI->getCommonSubClass(RC, inferRegClass(TRI, MRI,
+ I->getOperand(0).getReg()));
+ break;
+ }
+ }
+
+ return RC;
+}
+
+bool SIFixSGPRCopies::runOnMachineFunction(MachineFunction &MF) {
+ MachineRegisterInfo &MRI = MF.getRegInfo();
+ const TargetRegisterInfo *TRI = MF.getTarget().getRegisterInfo();
+ for (MachineFunction::iterator BI = MF.begin(), BE = MF.end();
+ BI != BE; ++BI) {
+
+ MachineBasicBlock &MBB = *BI;
+ for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end();
+ I != E; ++I) {
+ MachineInstr &MI = *I;
+ if (MI.getOpcode() != AMDGPU::PHI) {
+ continue;
+ }
+ unsigned Reg = MI.getOperand(0).getReg();
+ const TargetRegisterClass *RC = inferRegClass(TRI, MRI, Reg);
+ if (RC == &AMDGPU::VSrc_32RegClass) {
+ MRI.constrainRegClass(Reg, &AMDGPU::VReg_32RegClass);
+ }
+ }
+ }
+ return false;
+}
diff --git a/lib/Target/R600/SIISelLowering.cpp b/lib/Target/R600/SIISelLowering.cpp
index a53e0b9ae3..c64027f7de 100644
--- a/lib/Target/R600/SIISelLowering.cpp
+++ b/lib/Target/R600/SIISelLowering.cpp
@@ -32,7 +32,7 @@ SITargetLowering::SITargetLowering(TargetMachine &TM) :
AMDGPUTargetLowering(TM) {
addRegisterClass(MVT::i1, &AMDGPU::SReg_64RegClass);
- addRegisterClass(MVT::i64, &AMDGPU::SReg_64RegClass);
+ addRegisterClass(MVT::i64, &AMDGPU::VSrc_64RegClass);
addRegisterClass(MVT::v2i1, &AMDGPU::VReg_64RegClass);
addRegisterClass(MVT::v4i1, &AMDGPU::VReg_128RegClass);
@@ -41,14 +41,14 @@ SITargetLowering::SITargetLowering(TargetMachine &TM) :
addRegisterClass(MVT::v32i8, &AMDGPU::SReg_256RegClass);
addRegisterClass(MVT::v64i8, &AMDGPU::SReg_512RegClass);
- addRegisterClass(MVT::i32, &AMDGPU::VReg_32RegClass);
- addRegisterClass(MVT::f32, &AMDGPU::VReg_32RegClass);
+ addRegisterClass(MVT::i32, &AMDGPU::VSrc_32RegClass);
+ addRegisterClass(MVT::f32, &AMDGPU::VSrc_32RegClass);
- addRegisterClass(MVT::v1i32, &AMDGPU::VReg_32RegClass);
+ addRegisterClass(MVT::v1i32, &AMDGPU::VSrc_32RegClass);
- addRegisterClass(MVT::v2i32, &AMDGPU::VReg_64RegClass);
- addRegisterClass(MVT::v2f32, &AMDGPU::VReg_64RegClass);
- addRegisterClass(MVT::f64, &AMDGPU::VReg_64RegClass);
+ addRegisterClass(MVT::f64, &AMDGPU::VSrc_64RegClass);
+ addRegisterClass(MVT::v2i32, &AMDGPU::VSrc_64RegClass);
+ addRegisterClass(MVT::v2f32, &AMDGPU::VSrc_64RegClass);
addRegisterClass(MVT::v4i32, &AMDGPU::VReg_128RegClass);
addRegisterClass(MVT::v4f32, &AMDGPU::VReg_128RegClass);
@@ -1042,20 +1042,6 @@ MachineSDNode *SITargetLowering::AdjustRegClass(MachineSDNode *N,
switch (N->getMachineOpcode()) {
default: return N;
- case AMDGPU::REG_SEQUENCE: {
- // MVT::i128 only use SGPRs, so i128 REG_SEQUENCEs don't need to be
- // rewritten.
- if (N->getValueType(0) == MVT::i128) {
- return N;
- }
- const SDValue Ops[] = {
- DAG.getTargetConstant(AMDGPU::VReg_64RegClassID, MVT::i32),
- N->getOperand(1) , N->getOperand(2),
- N->getOperand(3), N->getOperand(4)
- };
- return DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL, MVT::i64, Ops);
- }
-
case AMDGPU::S_LOAD_DWORD_IMM:
NewOpcode = AMDGPU::BUFFER_LOAD_DWORD_ADDR64;
// Fall-through
diff --git a/test/CodeGen/R600/sgpr-copy.ll b/test/CodeGen/R600/sgpr-copy.ll
new file mode 100644
index 0000000000..b0d4549300
--- /dev/null
+++ b/test/CodeGen/R600/sgpr-copy.ll
@@ -0,0 +1,84 @@
+; RUN: llc < %s -march=r600 -mcpu=SI | FileCheck %s
+
+; This test checks that no VGPR to SGPR copies are created by the register
+; allocator.
+; CHECK: @main
+; CHECK: S_BUFFER_LOAD_DWORD [[DST:SGPR[0-9]]], {{[SGPR_[0-9]+}}, 0
+; CHECK: V_MOV_B32_e32 VGPR{{[0-9]}}, [[DST]]
+
+define void @main(<16 x i8> addrspace(2)* inreg, <16 x i8> addrspace(2)* inreg, <32 x i8> addrspace(2)* inreg, i32 inreg, <2 x i32>, <2 x i32>, <2 x i32>, <3 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, float, float, float, float, float, float, float, float, float) #0 {
+main_body:
+ %20 = getelementptr <16 x i8> addrspace(2)* %0, i32 0
+ %21 = load <16 x i8> addrspace(2)* %20, !tbaa !0
+ %22 = call float @llvm.SI.load.const(<16 x i8> %21, i32 0)
+ %23 = call float @llvm.SI.load.const(<16 x i8> %21, i32 16)
+ %24 = call float @llvm.SI.load.const(<16 x i8> %21, i32 32)
+ %25 = fptosi float %23 to i32
+ %26 = icmp ne i32 %25, 0
+ br i1 %26, label %ENDIF, label %ELSE
+
+ELSE: ; preds = %main_body
+ %27 = fsub float -0.000000e+00, %22
+ br label %ENDIF
+
+ENDIF: ; preds = %main_body, %ELSE
+ %temp.0 = phi float [ %27, %ELSE ], [ %22, %main_body ]
+ %28 = fadd float %temp.0, %24
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 0, float %28, float %28, float 0.000000e+00, float 1.000000e+00)
+ ret void
+}
+
+; We just want ot make sure the program doesn't crash
+; CHECK: @loop
+
+define void @loop(<16 x i8> addrspace(2)* inreg, <16 x i8> addrspace(2)* inreg, <32 x i8> addrspace(2)* inreg, i32 inreg, <2 x i32>, <2 x i32>, <2 x i32>, <3 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, float, float, float, float, float, float, float, float, float) #0 {
+main_body:
+ %20 = getelementptr <16 x i8> addrspace(2)* %0, i32 0
+ %21 = load <16 x i8> addrspace(2)* %20, !tbaa !0
+ %22 = call float @llvm.SI.load.const(<16 x i8> %21, i32 0)
+ %23 = call float @llvm.SI.load.const(<16 x i8> %21, i32 4)
+ %24 = call float @llvm.SI.load.const(<16 x i8> %21, i32 8)
+ %25 = call float @llvm.SI.load.const(<16 x i8> %21, i32 12)
+ %26 = fptosi float %25 to i32
+ %27 = bitcast i32 %26 to float
+ %28 = bitcast float %27 to i32
+ br label %LOOP
+
+LOOP: ; preds = %ENDIF, %main_body
+ %temp4.0 = phi float [ %22, %main_body ], [ %temp5.0, %ENDIF ]
+ %temp5.0 = phi float [ %23, %main_body ], [ %temp6.0, %ENDIF ]
+ %temp6.0 = phi float [ %24, %main_body ], [ %temp4.0, %ENDIF ]
+ %temp8.0 = phi float [ 0.000000e+00, %main_body ], [ %37, %ENDIF ]
+ %29 = bitcast float %temp8.0 to i32
+ %30 = icmp sge i32 %29, %28
+ %31 = sext i1 %30 to i32
+ %32 = bitcast i32 %31 to float
+ %33 = bitcast float %32 to i32
+ %34 = icmp ne i32 %33, 0
+ br i1 %34, label %IF, label %ENDIF
+
+IF: ; preds = %LOOP
+ call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 0, float %temp4.0, float %temp5.0, float %temp6.0, float 1.000000e+00)
+ ret void
+
+ENDIF: ; preds = %LOOP
+ %35 = bitcast float %temp8.0 to i32
+ %36 = add i32 %35, 1
+ %37 = bitcast i32 %36 to float
+ br label %LOOP
+}
+
+; Function Attrs: nounwind readnone
+declare float @llvm.SI.load.const(<16 x i8>, i32) #1
+
+; Function Attrs: readonly
+declare float @fabs(float) #2
+
+declare void @llvm.SI.export(i32, i32, i32, i32, i32, float, float, float, float)
+
+attributes #0 = { "ShaderType"="0" }
+attributes #1 = { nounwind readnone }
+attributes #2 = { readonly }
+
+!0 = metadata !{metadata !"const", null, i32 1}
+