summaryrefslogtreecommitdiff
path: root/lib/Target
diff options
context:
space:
mode:
authorBill Wendling <isanbard@gmail.com>2012-12-30 10:32:01 +0000
committerBill Wendling <isanbard@gmail.com>2012-12-30 10:32:01 +0000
commit831737d329a727f53a1fb0572f7b7a8127208881 (patch)
tree38691f7c03365912f11d41ee01ceaafd5aae99a6 /lib/Target
parent377660355ccdde389eb4f473bbb24125f160ea23 (diff)
downloadllvm-831737d329a727f53a1fb0572f7b7a8127208881.tar.gz
llvm-831737d329a727f53a1fb0572f7b7a8127208881.tar.bz2
llvm-831737d329a727f53a1fb0572f7b7a8127208881.tar.xz
Remove the Function::getFnAttributes method in favor of using the AttributeSet
directly. This is in preparation for removing the use of the 'Attribute' class as a collection of attributes. That will shift to the AttributeSet class instead. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@171253 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/Target')
-rw-r--r--lib/Target/ARM/ARMBaseInstrInfo.cpp5
-rw-r--r--lib/Target/ARM/ARMBaseRegisterInfo.cpp3
-rw-r--r--lib/Target/ARM/ARMFrameLowering.cpp3
-rw-r--r--lib/Target/ARM/ARMISelLowering.cpp12
-rw-r--r--lib/Target/ARM/Thumb2SizeReduction.cpp8
-rw-r--r--lib/Target/PowerPC/PPCFrameLowering.cpp7
-rw-r--r--lib/Target/PowerPC/PPCISelLowering.cpp4
-rw-r--r--lib/Target/PowerPC/PPCRegisterInfo.cpp3
-rw-r--r--lib/Target/X86/X86FrameLowering.cpp3
-rw-r--r--lib/Target/X86/X86ISelDAGToDAG.cpp4
-rw-r--r--lib/Target/X86/X86ISelLowering.cpp25
-rw-r--r--lib/Target/X86/X86InstrInfo.cpp8
-rw-r--r--lib/Target/X86/X86RegisterInfo.cpp3
13 files changed, 51 insertions, 37 deletions
diff --git a/lib/Target/ARM/ARMBaseInstrInfo.cpp b/lib/Target/ARM/ARMBaseInstrInfo.cpp
index a12e333fdc..309dfded69 100644
--- a/lib/Target/ARM/ARMBaseInstrInfo.cpp
+++ b/lib/Target/ARM/ARMBaseInstrInfo.cpp
@@ -3329,8 +3329,9 @@ ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
// instructions).
if (Latency > 0 && Subtarget.isThumb2()) {
const MachineFunction *MF = DefMI->getParent()->getParent();
- if (MF->getFunction()->getFnAttributes().
- hasAttribute(Attribute::OptimizeForSize))
+ if (MF->getFunction()->getAttributes().
+ hasAttribute(AttributeSet::FunctionIndex,
+ Attribute::OptimizeForSize))
--Latency;
}
return Latency;
diff --git a/lib/Target/ARM/ARMBaseRegisterInfo.cpp b/lib/Target/ARM/ARMBaseRegisterInfo.cpp
index 42742bf3b6..7d712bec3a 100644
--- a/lib/Target/ARM/ARMBaseRegisterInfo.cpp
+++ b/lib/Target/ARM/ARMBaseRegisterInfo.cpp
@@ -326,7 +326,8 @@ needsStackRealignment(const MachineFunction &MF) const {
unsigned StackAlign = MF.getTarget().getFrameLowering()->getStackAlignment();
bool requiresRealignment =
((MFI->getMaxAlignment() > StackAlign) ||
- F->getFnAttributes().hasAttribute(Attribute::StackAlignment));
+ F->getAttributes().hasAttribute(AttributeSet::FunctionIndex,
+ Attribute::StackAlignment));
return requiresRealignment && canRealignStack(MF);
}
diff --git a/lib/Target/ARM/ARMFrameLowering.cpp b/lib/Target/ARM/ARMFrameLowering.cpp
index a6a32f2845..82598f1d0f 100644
--- a/lib/Target/ARM/ARMFrameLowering.cpp
+++ b/lib/Target/ARM/ARMFrameLowering.cpp
@@ -1152,7 +1152,8 @@ static void checkNumAlignedDPRCS2Regs(MachineFunction &MF) {
return;
// Naked functions don't spill callee-saved registers.
- if (MF.getFunction()->getFnAttributes().hasAttribute(Attribute::Naked))
+ if (MF.getFunction()->getAttributes().hasAttribute(AttributeSet::FunctionIndex,
+ Attribute::Naked))
return;
// We are planning to use NEON instructions vst1 / vld1.
diff --git a/lib/Target/ARM/ARMISelLowering.cpp b/lib/Target/ARM/ARMISelLowering.cpp
index 8034ce1604..790bb385f2 100644
--- a/lib/Target/ARM/ARMISelLowering.cpp
+++ b/lib/Target/ARM/ARMISelLowering.cpp
@@ -1616,8 +1616,8 @@ ARMTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
// FIXME: handle tail calls differently.
unsigned CallOpc;
- bool HasMinSizeAttr = MF.getFunction()->getFnAttributes().
- hasAttribute(Attribute::MinSize);
+ bool HasMinSizeAttr = MF.getFunction()->getAttributes().
+ hasAttribute(AttributeSet::FunctionIndex, Attribute::MinSize);
if (Subtarget->isThumb()) {
if ((!isDirect || isARMFunc) && !Subtarget->hasV5TOps())
CallOpc = ARMISD::CALL_NOLINK;
@@ -6689,8 +6689,9 @@ EmitStructByval(MachineInstr *MI, MachineBasicBlock *BB) const {
UnitSize = 2;
} else {
// Check whether we can use NEON instructions.
- if (!MF->getFunction()->getFnAttributes().
- hasAttribute(Attribute::NoImplicitFloat) &&
+ if (!MF->getFunction()->getAttributes().
+ hasAttribute(AttributeSet::FunctionIndex,
+ Attribute::NoImplicitFloat) &&
Subtarget->hasNEON()) {
if ((Align % 16 == 0) && SizeVal >= 16) {
ldrOpc = ARM::VLD1q32wb_fixed;
@@ -9458,7 +9459,8 @@ EVT ARMTargetLowering::getOptimalMemOpType(uint64_t Size,
// See if we can use NEON instructions for this...
if ((!IsMemset || ZeroMemset) &&
Subtarget->hasNEON() &&
- !F->getFnAttributes().hasAttribute(Attribute::NoImplicitFloat)) {
+ !F->getAttributes().hasAttribute(AttributeSet::FunctionIndex,
+ Attribute::NoImplicitFloat)) {
bool Fast;
if (Size >= 16 &&
(memOpAlign(SrcAlign, DstAlign, 16) ||
diff --git a/lib/Target/ARM/Thumb2SizeReduction.cpp b/lib/Target/ARM/Thumb2SizeReduction.cpp
index a4f0847450..4a7eb4010b 100644
--- a/lib/Target/ARM/Thumb2SizeReduction.cpp
+++ b/lib/Target/ARM/Thumb2SizeReduction.cpp
@@ -961,9 +961,11 @@ bool Thumb2SizeReduce::runOnMachineFunction(MachineFunction &MF) {
STI = &TM.getSubtarget<ARMSubtarget>();
// Optimizing / minimizing size?
- Attribute FnAttrs = MF.getFunction()->getFnAttributes();
- OptimizeSize = FnAttrs.hasAttribute(Attribute::OptimizeForSize);
- MinimizeSize = FnAttrs.hasAttribute(Attribute::MinSize);
+ AttributeSet FnAttrs = MF.getFunction()->getAttributes();
+ OptimizeSize = FnAttrs.hasAttribute(AttributeSet::FunctionIndex,
+ Attribute::OptimizeForSize);
+ MinimizeSize = FnAttrs.hasAttribute(AttributeSet::FunctionIndex,
+ Attribute::MinSize);
bool Modified = false;
for (MachineFunction::iterator I = MF.begin(), E = MF.end(); I != E; ++I)
diff --git a/lib/Target/PowerPC/PPCFrameLowering.cpp b/lib/Target/PowerPC/PPCFrameLowering.cpp
index 1f54fcad3a..3d1917c557 100644
--- a/lib/Target/PowerPC/PPCFrameLowering.cpp
+++ b/lib/Target/PowerPC/PPCFrameLowering.cpp
@@ -198,8 +198,8 @@ void PPCFrameLowering::determineFrameLayout(MachineFunction &MF) const {
// to adjust the stack pointer (we fit in the Red Zone). For 64-bit
// SVR4, we also require a stack frame if we need to spill the CR,
// since this spill area is addressed relative to the stack pointer.
- bool DisableRedZone = MF.getFunction()->getFnAttributes().
- hasAttribute(Attribute::NoRedZone);
+ bool DisableRedZone = MF.getFunction()->getAttributes().
+ hasAttribute(AttributeSet::FunctionIndex, Attribute::NoRedZone);
// FIXME SVR4 The 32-bit SVR4 ABI has no red zone. However, it can
// still generate stackless code if all local vars are reg-allocated.
// Try: (FrameSize <= 224
@@ -261,7 +261,8 @@ bool PPCFrameLowering::needsFP(const MachineFunction &MF) const {
// Naked functions have no stack frame pushed, so we don't have a frame
// pointer.
- if (MF.getFunction()->getFnAttributes().hasAttribute(Attribute::Naked))
+ if (MF.getFunction()->getAttributes().hasAttribute(AttributeSet::FunctionIndex,
+ Attribute::Naked))
return false;
return MF.getTarget().Options.DisableFramePointerElim(MF) ||
diff --git a/lib/Target/PowerPC/PPCISelLowering.cpp b/lib/Target/PowerPC/PPCISelLowering.cpp
index b14c181249..0a7e5633c8 100644
--- a/lib/Target/PowerPC/PPCISelLowering.cpp
+++ b/lib/Target/PowerPC/PPCISelLowering.cpp
@@ -6822,8 +6822,8 @@ SDValue PPCTargetLowering::LowerFRAMEADDR(SDValue Op,
bool is31 = (getTargetMachine().Options.DisableFramePointerElim(MF) ||
MFI->hasVarSizedObjects()) &&
MFI->getStackSize() &&
- !MF.getFunction()->getFnAttributes().
- hasAttribute(Attribute::Naked);
+ !MF.getFunction()->getAttributes().
+ hasAttribute(AttributeSet::FunctionIndex, Attribute::Naked);
unsigned FrameReg = isPPC64 ? (is31 ? PPC::X31 : PPC::X1) :
(is31 ? PPC::R31 : PPC::R1);
SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg,
diff --git a/lib/Target/PowerPC/PPCRegisterInfo.cpp b/lib/Target/PowerPC/PPCRegisterInfo.cpp
index 6c8b1edf4b..a95d04994d 100644
--- a/lib/Target/PowerPC/PPCRegisterInfo.cpp
+++ b/lib/Target/PowerPC/PPCRegisterInfo.cpp
@@ -597,7 +597,8 @@ PPCRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
// to Offset to get the correct offset.
// Naked functions have stack size 0, although getStackSize may not reflect that
// because we didn't call all the pieces that compute it for naked functions.
- if (!MF.getFunction()->getFnAttributes().hasAttribute(Attribute::Naked))
+ if (!MF.getFunction()->getAttributes().
+ hasAttribute(AttributeSet::FunctionIndex, Attribute::Naked))
Offset += MFI->getStackSize();
// If we can, encode the offset directly into the instruction. If this is a
diff --git a/lib/Target/X86/X86FrameLowering.cpp b/lib/Target/X86/X86FrameLowering.cpp
index 6345487751..f70be1d2c1 100644
--- a/lib/Target/X86/X86FrameLowering.cpp
+++ b/lib/Target/X86/X86FrameLowering.cpp
@@ -691,7 +691,8 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF) const {
// pointer, calls, or dynamic alloca then we do not need to adjust the
// stack pointer (we fit in the Red Zone). We also check that we don't
// push and pop from the stack.
- if (Is64Bit && !Fn->getFnAttributes().hasAttribute(Attribute::NoRedZone) &&
+ if (Is64Bit && !Fn->getAttributes().hasAttribute(AttributeSet::FunctionIndex,
+ Attribute::NoRedZone) &&
!RegInfo->needsStackRealignment(MF) &&
!MFI->hasVarSizedObjects() && // No dynamic alloca.
!MFI->adjustsStack() && // No calls.
diff --git a/lib/Target/X86/X86ISelDAGToDAG.cpp b/lib/Target/X86/X86ISelDAGToDAG.cpp
index acb89c11fc..c9918b900d 100644
--- a/lib/Target/X86/X86ISelDAGToDAG.cpp
+++ b/lib/Target/X86/X86ISelDAGToDAG.cpp
@@ -431,8 +431,8 @@ static bool isCalleeLoad(SDValue Callee, SDValue &Chain, bool HasCallSeq) {
void X86DAGToDAGISel::PreprocessISelDAG() {
// OptForSize is used in pattern predicates that isel is matching.
- OptForSize = MF->getFunction()->getFnAttributes().
- hasAttribute(Attribute::OptimizeForSize);
+ OptForSize = MF->getFunction()->getAttributes().
+ hasAttribute(AttributeSet::FunctionIndex, Attribute::OptimizeForSize);
for (SelectionDAG::allnodes_iterator I = CurDAG->allnodes_begin(),
E = CurDAG->allnodes_end(); I != E; ) {
diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp
index d279d2da52..0900603092 100644
--- a/lib/Target/X86/X86ISelLowering.cpp
+++ b/lib/Target/X86/X86ISelLowering.cpp
@@ -1389,7 +1389,8 @@ X86TargetLowering::getOptimalMemOpType(uint64_t Size,
MachineFunction &MF) const {
const Function *F = MF.getFunction();
if ((!IsMemset || ZeroMemset) &&
- !F->getFnAttributes().hasAttribute(Attribute::NoImplicitFloat)) {
+ !F->getAttributes().hasAttribute(AttributeSet::FunctionIndex,
+ Attribute::NoImplicitFloat)) {
if (Size >= 16 &&
(Subtarget->isUnalignedMemAccessFast() ||
((DstAlign == 0 || DstAlign >= 16) &&
@@ -2068,8 +2069,8 @@ X86TargetLowering::LowerFormalArguments(SDValue Chain,
unsigned NumIntRegs = CCInfo.getFirstUnallocated(GPR64ArgRegs,
TotalNumIntRegs);
- bool NoImplicitFloatOps = Fn->getFnAttributes().
- hasAttribute(Attribute::NoImplicitFloat);
+ bool NoImplicitFloatOps = Fn->getAttributes().
+ hasAttribute(AttributeSet::FunctionIndex, Attribute::NoImplicitFloat);
assert(!(NumXMMRegs && !Subtarget->hasSSE1()) &&
"SSE register cannot be used when SSE is disabled!");
assert(!(NumXMMRegs && MF.getTarget().Options.UseSoftFloat &&
@@ -2547,8 +2548,9 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
OpFlags = X86II::MO_DARWIN_STUB;
} else if (Subtarget->isPICStyleRIPRel() &&
isa<Function>(GV) &&
- cast<Function>(GV)->getFnAttributes().
- hasAttribute(Attribute::NonLazyBind)) {
+ cast<Function>(GV)->getAttributes().
+ hasAttribute(AttributeSet::FunctionIndex,
+ Attribute::NonLazyBind)) {
// If the function is marked as non-lazy, generate an indirect call
// which loads from the GOT directly. This avoids runtime overhead
// at the cost of eager binding (and one extra byte of encoding).
@@ -6734,8 +6736,8 @@ X86TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const {
bool HasFp256 = Subtarget->hasFp256();
bool HasInt256 = Subtarget->hasInt256();
MachineFunction &MF = DAG.getMachineFunction();
- bool OptForSize = MF.getFunction()->getFnAttributes().
- hasAttribute(Attribute::OptimizeForSize);
+ bool OptForSize = MF.getFunction()->getAttributes().
+ hasAttribute(AttributeSet::FunctionIndex, Attribute::OptimizeForSize);
assert(VT.getSizeInBits() != 64 && "Can't lower MMX shuffles");
@@ -10103,8 +10105,9 @@ SDValue X86TargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const {
// Sanity Check: Make sure using fp_offset makes sense.
assert(!getTargetMachine().Options.UseSoftFloat &&
!(DAG.getMachineFunction()
- .getFunction()->getFnAttributes()
- .hasAttribute(Attribute::NoImplicitFloat)) &&
+ .getFunction()->getAttributes()
+ .hasAttribute(AttributeSet::FunctionIndex,
+ Attribute::NoImplicitFloat)) &&
Subtarget->hasSSE1());
}
@@ -16574,8 +16577,8 @@ static SDValue PerformSTORECombine(SDNode *N, SelectionDAG &DAG,
return SDValue();
const Function *F = DAG.getMachineFunction().getFunction();
- bool NoImplicitFloatOps = F->getFnAttributes().
- hasAttribute(Attribute::NoImplicitFloat);
+ bool NoImplicitFloatOps = F->getAttributes().
+ hasAttribute(AttributeSet::FunctionIndex, Attribute::NoImplicitFloat);
bool F64IsLegal = !DAG.getTarget().Options.UseSoftFloat && !NoImplicitFloatOps
&& Subtarget->hasSSE2();
if ((VT.isVector() ||
diff --git a/lib/Target/X86/X86InstrInfo.cpp b/lib/Target/X86/X86InstrInfo.cpp
index a7424096a8..16c6b3d73c 100644
--- a/lib/Target/X86/X86InstrInfo.cpp
+++ b/lib/Target/X86/X86InstrInfo.cpp
@@ -3864,8 +3864,8 @@ MachineInstr* X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
// Unless optimizing for size, don't fold to avoid partial
// register update stalls
- if (!MF.getFunction()->getFnAttributes().
- hasAttribute(Attribute::OptimizeForSize) &&
+ if (!MF.getFunction()->getAttributes().
+ hasAttribute(AttributeSet::FunctionIndex, Attribute::OptimizeForSize) &&
hasPartialRegUpdate(MI->getOpcode()))
return 0;
@@ -3906,8 +3906,8 @@ MachineInstr* X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
// Unless optimizing for size, don't fold to avoid partial
// register update stalls
- if (!MF.getFunction()->getFnAttributes().
- hasAttribute(Attribute::OptimizeForSize) &&
+ if (!MF.getFunction()->getAttributes().
+ hasAttribute(AttributeSet::FunctionIndex, Attribute::OptimizeForSize) &&
hasPartialRegUpdate(MI->getOpcode()))
return 0;
diff --git a/lib/Target/X86/X86RegisterInfo.cpp b/lib/Target/X86/X86RegisterInfo.cpp
index 6b36101a12..07c0d148aa 100644
--- a/lib/Target/X86/X86RegisterInfo.cpp
+++ b/lib/Target/X86/X86RegisterInfo.cpp
@@ -419,7 +419,8 @@ bool X86RegisterInfo::needsStackRealignment(const MachineFunction &MF) const {
unsigned StackAlign = TM.getFrameLowering()->getStackAlignment();
bool requiresRealignment =
((MFI->getMaxAlignment() > StackAlign) ||
- F->getFnAttributes().hasAttribute(Attribute::StackAlignment));
+ F->getAttributes().hasAttribute(AttributeSet::FunctionIndex,
+ Attribute::StackAlignment));
// If we've requested that we force align the stack do so now.
if (ForceStackAlign)