summaryrefslogtreecommitdiff
path: root/lib/Target/X86/X86RegisterInfo.td
diff options
context:
space:
mode:
authorDan Gohman <gohman@apple.com>2009-07-30 01:56:29 +0000
committerDan Gohman <gohman@apple.com>2009-07-30 01:56:29 +0000
commita4714e025de720d0fcbaa78ab6c12dc789599233 (patch)
tree5e129202d37b2e5937fff8d088a69f5691a749c0 /lib/Target/X86/X86RegisterInfo.td
parent2395f011986e6c6277c71bddcd8af88f9b904fc2 (diff)
downloadllvm-a4714e025de720d0fcbaa78ab6c12dc789599233.tar.gz
llvm-a4714e025de720d0fcbaa78ab6c12dc789599233.tar.bz2
llvm-a4714e025de720d0fcbaa78ab6c12dc789599233.tar.xz
Add a new register class to describe operands that can't be SP,
due to x86 encoding restrictions. This is currently off by default because it may cause code quality regressions. This is for PR4572. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@77565 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/Target/X86/X86RegisterInfo.td')
-rw-r--r--lib/Target/X86/X86RegisterInfo.td105
1 files changed, 103 insertions, 2 deletions
diff --git a/lib/Target/X86/X86RegisterInfo.td b/lib/Target/X86/X86RegisterInfo.td
index bfffee7b72..d2197b2415 100644
--- a/lib/Target/X86/X86RegisterInfo.td
+++ b/lib/Target/X86/X86RegisterInfo.td
@@ -416,7 +416,9 @@ def GR32 : RegisterClass<"X86", [i32], 32,
}];
}
-
+// GR64 - 64-bit GPRs. This oddly includes RIP, which isn't accurate, since
+// RIP isn't really a register and it can't be used anywhere except in an
+// address, but it doesn't cause trouble.
def GR64 : RegisterClass<"X86", [i64], 64,
[RAX, RCX, RDX, RSI, RDI, R8, R9, R10, R11,
RBX, R14, R15, R12, R13, RBP, RSP, RIP]> {
@@ -561,7 +563,7 @@ def GR64_NOREX : RegisterClass<"X86", [i64], 64,
GR64_NOREXClass::allocation_order_end(const MachineFunction &MF) const {
const TargetMachine &TM = MF.getTarget();
const TargetRegisterInfo *RI = TM.getRegisterInfo();
- // Does the function dedicate RBP / EBP to being a frame ptr?
+ // Does the function dedicate RBP to being a frame ptr?
if (RI->hasFP(MF))
// If so, don't allocate RSP or RBP.
return end() - 2;
@@ -572,6 +574,105 @@ def GR64_NOREX : RegisterClass<"X86", [i64], 64,
}];
}
+// GR32_NOSP - GR32 registers except ESP.
+def GR32_NOSP : RegisterClass<"X86", [i32], 32,
+ [EAX, ECX, EDX, ESI, EDI, EBX, EBP,
+ R8D, R9D, R10D, R11D, R14D, R15D, R12D, R13D]> {
+ let SubRegClassList = [GR8, GR8, GR16];
+ let MethodProtos = [{
+ iterator allocation_order_begin(const MachineFunction &MF) const;
+ iterator allocation_order_end(const MachineFunction &MF) const;
+ }];
+ let MethodBodies = [{
+ static const unsigned X86_GR32_NOSP_AO_64[] = {
+ X86::EAX, X86::ECX, X86::EDX, X86::ESI, X86::EDI,
+ X86::R8D, X86::R9D, X86::R10D, X86::R11D,
+ X86::EBX, X86::R14D, X86::R15D, X86::R12D, X86::R13D, X86::EBP
+ };
+
+ GR32_NOSPClass::iterator
+ GR32_NOSPClass::allocation_order_begin(const MachineFunction &MF) const {
+ const TargetMachine &TM = MF.getTarget();
+ const X86Subtarget &Subtarget = TM.getSubtarget<X86Subtarget>();
+ if (Subtarget.is64Bit())
+ return X86_GR32_NOSP_AO_64;
+ else
+ return begin();
+ }
+
+ GR32_NOSPClass::iterator
+ GR32_NOSPClass::allocation_order_end(const MachineFunction &MF) const {
+ const TargetMachine &TM = MF.getTarget();
+ const TargetRegisterInfo *RI = TM.getRegisterInfo();
+ const X86Subtarget &Subtarget = TM.getSubtarget<X86Subtarget>();
+ if (Subtarget.is64Bit()) {
+ // Does the function dedicate RBP to being a frame ptr?
+ if (RI->hasFP(MF))
+ // If so, don't allocate EBP.
+ return array_endof(X86_GR32_NOSP_AO_64) - 1;
+ else
+ // If not, any reg in this class is ok.
+ return array_endof(X86_GR32_NOSP_AO_64);
+ } else {
+ // Does the function dedicate EBP to being a frame ptr?
+ if (RI->hasFP(MF))
+ // If so, don't allocate EBP.
+ return begin() + 6;
+ else
+ // If not, any reg in this class is ok.
+ return begin() + 7;
+ }
+ }
+ }];
+}
+
+// GR64_NOSP - GR64 registers except RSP (and RIP).
+def GR64_NOSP : RegisterClass<"X86", [i64], 64,
+ [RAX, RCX, RDX, RSI, RDI, R8, R9, R10, R11,
+ RBX, R14, R15, R12, R13, RBP]> {
+ let SubRegClassList = [GR8, GR8, GR16, GR32_NOSP];
+ let MethodProtos = [{
+ iterator allocation_order_end(const MachineFunction &MF) const;
+ }];
+ let MethodBodies = [{
+ GR64_NOSPClass::iterator
+ GR64_NOSPClass::allocation_order_end(const MachineFunction &MF) const {
+ const TargetMachine &TM = MF.getTarget();
+ const TargetRegisterInfo *RI = TM.getRegisterInfo();
+ const X86Subtarget &Subtarget = TM.getSubtarget<X86Subtarget>();
+ if (!Subtarget.is64Bit())
+ return begin(); // None of these are allocatable in 32-bit.
+ if (RI->hasFP(MF)) // Does the function dedicate RBP to being a frame ptr?
+ return end()-1; // If so, don't allocate RBP
+ else
+ return end(); // If not, any reg in this class is ok.
+ }
+ }];
+}
+
+// GR64_NOREX_NOSP - GR64_NOREX registers except RSP.
+def GR64_NOREX_NOSP : RegisterClass<"X86", [i64], 64,
+ [RAX, RCX, RDX, RSI, RDI, RBX, RBP]> {
+ let SubRegClassList = [GR8_NOREX, GR8_NOREX, GR16_NOREX, GR32_NOREX];
+ let MethodProtos = [{
+ iterator allocation_order_end(const MachineFunction &MF) const;
+ }];
+ let MethodBodies = [{
+ GR64_NOREX_NOSPClass::iterator
+ GR64_NOREX_NOSPClass::allocation_order_end(const MachineFunction &MF) const {
+ const TargetMachine &TM = MF.getTarget();
+ const TargetRegisterInfo *RI = TM.getRegisterInfo();
+ // Does the function dedicate RBP to being a frame ptr?
+ if (RI->hasFP(MF))
+ // If so, don't allocate RBP.
+ return end() - 1;
+ else
+ // If not, any reg in this class is ok.
+ return end();
+ }
+ }];
+}
+
// A class to support the 'A' assembler constraint: EAX then EDX.
def GRAD : RegisterClass<"X86", [i32], 32, [EAX, EDX]>;