summaryrefslogtreecommitdiff
path: root/lib/CodeGen
diff options
context:
space:
mode:
Diffstat (limited to 'lib/CodeGen')
-rw-r--r--lib/CodeGen/Analysis.cpp16
-rw-r--r--lib/CodeGen/AsmPrinter/AsmPrinter.cpp4
-rw-r--r--lib/CodeGen/ELFWriter.cpp4
-rw-r--r--lib/CodeGen/IntrinsicLowering.cpp10
-rw-r--r--lib/CodeGen/MachineFunction.cpp2
-rw-r--r--lib/CodeGen/SelectionDAG/DAGCombiner.cpp6
-rw-r--r--lib/CodeGen/SelectionDAG/FastISel.cpp8
-rw-r--r--lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp6
-rw-r--r--lib/CodeGen/SelectionDAG/InstrEmitter.cpp2
-rw-r--r--lib/CodeGen/SelectionDAG/LegalizeDAG.cpp28
-rw-r--r--lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp6
-rw-r--r--lib/CodeGen/SelectionDAG/LegalizeTypes.cpp6
-rw-r--r--lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp2
-rw-r--r--lib/CodeGen/SelectionDAG/SelectionDAG.cpp18
-rw-r--r--lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp68
-rw-r--r--lib/CodeGen/SelectionDAG/TargetLowering.cpp14
-rw-r--r--lib/CodeGen/ShadowStackGC.cpp12
-rw-r--r--lib/CodeGen/SjLjEHPrepare.cpp8
-rw-r--r--lib/CodeGen/StackProtector.cpp4
19 files changed, 112 insertions, 112 deletions
diff --git a/lib/CodeGen/Analysis.cpp b/lib/CodeGen/Analysis.cpp
index 125e64196f..fafc01044d 100644
--- a/lib/CodeGen/Analysis.cpp
+++ b/lib/CodeGen/Analysis.cpp
@@ -31,7 +31,7 @@ using namespace llvm;
/// of insertvalue or extractvalue indices that identify a member, return
/// the linearized index of the start of the member.
///
-unsigned llvm::ComputeLinearIndex(const Type *Ty,
+unsigned llvm::ComputeLinearIndex(Type *Ty,
const unsigned *Indices,
const unsigned *IndicesEnd,
unsigned CurIndex) {
@@ -40,7 +40,7 @@ unsigned llvm::ComputeLinearIndex(const Type *Ty,
return CurIndex;
// Given a struct type, recursively traverse the elements.
- if (const StructType *STy = dyn_cast<StructType>(Ty)) {
+ if (StructType *STy = dyn_cast<StructType>(Ty)) {
for (StructType::element_iterator EB = STy->element_begin(),
EI = EB,
EE = STy->element_end();
@@ -52,8 +52,8 @@ unsigned llvm::ComputeLinearIndex(const Type *Ty,
return CurIndex;
}
// Given an array type, recursively traverse the elements.
- else if (const ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
- const Type *EltTy = ATy->getElementType();
+ else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
+ Type *EltTy = ATy->getElementType();
for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i) {
if (Indices && *Indices == i)
return ComputeLinearIndex(EltTy, Indices+1, IndicesEnd, CurIndex);
@@ -72,12 +72,12 @@ unsigned llvm::ComputeLinearIndex(const Type *Ty,
/// If Offsets is non-null, it points to a vector to be filled in
/// with the in-memory offsets of each of the individual values.
///
-void llvm::ComputeValueVTs(const TargetLowering &TLI, const Type *Ty,
+void llvm::ComputeValueVTs(const TargetLowering &TLI, Type *Ty,
SmallVectorImpl<EVT> &ValueVTs,
SmallVectorImpl<uint64_t> *Offsets,
uint64_t StartingOffset) {
// Given a struct type, recursively traverse the elements.
- if (const StructType *STy = dyn_cast<StructType>(Ty)) {
+ if (StructType *STy = dyn_cast<StructType>(Ty)) {
const StructLayout *SL = TLI.getTargetData()->getStructLayout(STy);
for (StructType::element_iterator EB = STy->element_begin(),
EI = EB,
@@ -88,8 +88,8 @@ void llvm::ComputeValueVTs(const TargetLowering &TLI, const Type *Ty,
return;
}
// Given an array type, recursively traverse the elements.
- if (const ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
- const Type *EltTy = ATy->getElementType();
+ if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
+ Type *EltTy = ATy->getElementType();
uint64_t EltSize = TLI.getTargetData()->getTypeAllocSize(EltTy);
for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i)
ComputeValueVTs(TLI, EltTy, ValueVTs, Offsets,
diff --git a/lib/CodeGen/AsmPrinter/AsmPrinter.cpp b/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
index 7f314eed3a..fbf9867191 100644
--- a/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
+++ b/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
@@ -1009,7 +1009,7 @@ void AsmPrinter::EmitConstantPool() {
unsigned NewOffset = (Offset + AlignMask) & ~AlignMask;
OutStreamer.EmitFill(NewOffset - Offset, 0/*fillval*/, 0/*addrspace*/);
- const Type *Ty = CPE.getType();
+ Type *Ty = CPE.getType();
Offset = NewOffset + TM.getTargetData()->getTypeAllocSize(Ty);
OutStreamer.EmitLabel(GetCPISymbol(CPI));
@@ -1447,7 +1447,7 @@ static const MCExpr *LowerConstant(const Constant *CV, AsmPrinter &AP) {
// Support only foldable casts to/from pointers that can be eliminated by
// changing the pointer to the appropriately sized integer type.
Constant *Op = CE->getOperand(0);
- const Type *Ty = CE->getType();
+ Type *Ty = CE->getType();
const MCExpr *OpExpr = LowerConstant(Op, AP);
diff --git a/lib/CodeGen/ELFWriter.cpp b/lib/CodeGen/ELFWriter.cpp
index d977651c32..eef3d8a65b 100644
--- a/lib/CodeGen/ELFWriter.cpp
+++ b/lib/CodeGen/ELFWriter.cpp
@@ -482,7 +482,7 @@ void ELFWriter::EmitGlobalConstant(const Constant *CV, ELFSection &GblS) {
EmitGlobalConstantLargeInt(CI, GblS);
return;
} else if (const ConstantVector *CP = dyn_cast<ConstantVector>(CV)) {
- const VectorType *PTy = CP->getType();
+ VectorType *PTy = CP->getType();
for (unsigned I = 0, E = PTy->getNumElements(); I < E; ++I)
EmitGlobalConstant(CP->getOperand(I), GblS);
return;
@@ -552,7 +552,7 @@ CstExprResTy ELFWriter::ResolveConstantExpr(const Constant *CV) {
}
case Instruction::PtrToInt: {
Constant *Op = CE->getOperand(0);
- const Type *Ty = CE->getType();
+ Type *Ty = CE->getType();
// We can emit the pointer value into this slot if the slot is an
// integer slot greater or equal to the size of the pointer.
diff --git a/lib/CodeGen/IntrinsicLowering.cpp b/lib/CodeGen/IntrinsicLowering.cpp
index 611886ff16..0f92c2d06b 100644
--- a/lib/CodeGen/IntrinsicLowering.cpp
+++ b/lib/CodeGen/IntrinsicLowering.cpp
@@ -27,7 +27,7 @@ using namespace llvm;
template <class ArgIt>
static void EnsureFunctionExists(Module &M, const char *Name,
ArgIt ArgBegin, ArgIt ArgEnd,
- const Type *RetTy) {
+ Type *RetTy) {
// Insert a correctly-typed definition now.
std::vector<Type *> ParamTys;
for (ArgIt I = ArgBegin; I != ArgEnd; ++I)
@@ -64,7 +64,7 @@ static void EnsureFPIntrinsicsExist(Module &M, Function *Fn,
template <class ArgIt>
static CallInst *ReplaceCallWith(const char *NewFn, CallInst *CI,
ArgIt ArgBegin, ArgIt ArgEnd,
- const Type *RetTy) {
+ Type *RetTy) {
// If we haven't already looked up this function, check to see if the
// program already contains a function with this name.
Module *M = CI->getParent()->getParent()->getParent();
@@ -462,7 +462,7 @@ void IntrinsicLowering::LowerIntrinsicCall(CallInst *CI) {
break; // Strip out annotate intrinsic
case Intrinsic::memcpy: {
- const IntegerType *IntPtr = TD.getIntPtrType(Context);
+ IntegerType *IntPtr = TD.getIntPtrType(Context);
Value *Size = Builder.CreateIntCast(CI->getArgOperand(2), IntPtr,
/* isSigned */ false);
Value *Ops[3];
@@ -473,7 +473,7 @@ void IntrinsicLowering::LowerIntrinsicCall(CallInst *CI) {
break;
}
case Intrinsic::memmove: {
- const IntegerType *IntPtr = TD.getIntPtrType(Context);
+ IntegerType *IntPtr = TD.getIntPtrType(Context);
Value *Size = Builder.CreateIntCast(CI->getArgOperand(2), IntPtr,
/* isSigned */ false);
Value *Ops[3];
@@ -484,7 +484,7 @@ void IntrinsicLowering::LowerIntrinsicCall(CallInst *CI) {
break;
}
case Intrinsic::memset: {
- const IntegerType *IntPtr = TD.getIntPtrType(Context);
+ IntegerType *IntPtr = TD.getIntPtrType(Context);
Value *Size = Builder.CreateIntCast(CI->getArgOperand(2), IntPtr,
/* isSigned */ false);
Value *Ops[3];
diff --git a/lib/CodeGen/MachineFunction.cpp b/lib/CodeGen/MachineFunction.cpp
index cd25156528..20066a067b 100644
--- a/lib/CodeGen/MachineFunction.cpp
+++ b/lib/CodeGen/MachineFunction.cpp
@@ -619,7 +619,7 @@ void MachineJumpTableInfo::dump() const { print(dbgs()); }
// MachineConstantPool implementation
//===----------------------------------------------------------------------===//
-const Type *MachineConstantPoolEntry::getType() const {
+Type *MachineConstantPoolEntry::getType() const {
if (isMachineConstantPoolEntry())
return Val.MachineCPVal->getType();
return Val.ConstVal->getType();
diff --git a/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index 4f0d2caca2..5a7319f9d6 100644
--- a/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -6479,7 +6479,7 @@ SDValue DAGCombiner::ReduceLoadOpStoreWidth(SDNode *N) {
PtrOff = (BitWidth + 7 - NewBW) / 8 - PtrOff;
unsigned NewAlign = MinAlign(LD->getAlignment(), PtrOff);
- const Type *NewVTTy = NewVT.getTypeForEVT(*DAG.getContext());
+ Type *NewVTTy = NewVT.getTypeForEVT(*DAG.getContext());
if (NewAlign < TLI.getTargetData()->getABITypeAlignment(NewVTTy))
return SDValue();
@@ -6542,7 +6542,7 @@ SDValue DAGCombiner::TransformFPLoadStorePair(SDNode *N) {
unsigned LDAlign = LD->getAlignment();
unsigned STAlign = ST->getAlignment();
- const Type *IntVTTy = IntVT.getTypeForEVT(*DAG.getContext());
+ Type *IntVTTy = IntVT.getTypeForEVT(*DAG.getContext());
unsigned ABIAlign = TLI.getTargetData()->getABITypeAlignment(IntVTTy);
if (LDAlign < ABIAlign || STAlign < ABIAlign)
return SDValue();
@@ -7447,7 +7447,7 @@ SDValue DAGCombiner::SimplifySelectCC(DebugLoc DL, SDValue N0, SDValue N1,
const_cast<ConstantFP*>(FV->getConstantFPValue()),
const_cast<ConstantFP*>(TV->getConstantFPValue())
};
- const Type *FPTy = Elts[0]->getType();
+ Type *FPTy = Elts[0]->getType();
const TargetData &TD = *TLI.getTargetData();
// Create a ConstantArray of the two constants.
diff --git a/lib/CodeGen/SelectionDAG/FastISel.cpp b/lib/CodeGen/SelectionDAG/FastISel.cpp
index 54a7d43f46..b8d7e82fe3 100644
--- a/lib/CodeGen/SelectionDAG/FastISel.cpp
+++ b/lib/CodeGen/SelectionDAG/FastISel.cpp
@@ -422,12 +422,12 @@ bool FastISel::SelectGetElementPtr(const User *I) {
bool NIsKill = hasTrivialKill(I->getOperand(0));
- const Type *Ty = I->getOperand(0)->getType();
+ Type *Ty = I->getOperand(0)->getType();
MVT VT = TLI.getPointerTy();
for (GetElementPtrInst::const_op_iterator OI = I->op_begin()+1,
E = I->op_end(); OI != E; ++OI) {
const Value *Idx = *OI;
- if (const StructType *StTy = dyn_cast<StructType>(Ty)) {
+ if (StructType *StTy = dyn_cast<StructType>(Ty)) {
unsigned Field = cast<ConstantInt>(Idx)->getZExtValue();
if (Field) {
// N = N + Offset
@@ -839,7 +839,7 @@ FastISel::SelectExtractValue(const User *U) {
return false;
const Value *Op0 = EVI->getOperand(0);
- const Type *AggTy = Op0->getType();
+ Type *AggTy = Op0->getType();
// Get the base result register.
unsigned ResultReg;
@@ -1074,7 +1074,7 @@ unsigned FastISel::FastEmit_ri_(MVT VT, unsigned Opcode,
if (MaterialReg == 0) {
// This is a bit ugly/slow, but failing here means falling out of
// fast-isel, which would be very slow.
- const IntegerType *ITy = IntegerType::get(FuncInfo.Fn->getContext(),
+ IntegerType *ITy = IntegerType::get(FuncInfo.Fn->getContext(),
VT.getSizeInBits());
MaterialReg = getRegForValue(ConstantInt::get(ITy, Imm));
}
diff --git a/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp b/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp
index d518b5d346..d5bf12055e 100644
--- a/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp
+++ b/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp
@@ -78,7 +78,7 @@ void FunctionLoweringInfo::set(const Function &fn, MachineFunction &mf) {
for (BasicBlock::const_iterator I = BB->begin(), E = BB->end(); I != E; ++I)
if (const AllocaInst *AI = dyn_cast<AllocaInst>(I))
if (const ConstantInt *CUI = dyn_cast<ConstantInt>(AI->getArraySize())) {
- const Type *Ty = AI->getAllocatedType();
+ Type *Ty = AI->getAllocatedType();
uint64_t TySize = TLI.getTargetData()->getTypeAllocSize(Ty);
unsigned Align =
std::max((unsigned)TLI.getTargetData()->getPrefTypeAlignment(Ty),
@@ -216,7 +216,7 @@ unsigned FunctionLoweringInfo::CreateReg(EVT VT) {
/// In the case that the given value has struct or array type, this function
/// will assign registers for each member or element.
///
-unsigned FunctionLoweringInfo::CreateRegs(const Type *Ty) {
+unsigned FunctionLoweringInfo::CreateRegs(Type *Ty) {
SmallVector<EVT, 4> ValueVTs;
ComputeValueVTs(TLI, Ty, ValueVTs);
@@ -260,7 +260,7 @@ FunctionLoweringInfo::GetLiveOutRegInfo(unsigned Reg, unsigned BitWidth) {
/// ComputePHILiveOutRegInfo - Compute LiveOutInfo for a PHI's destination
/// register based on the LiveOutInfo of its operands.
void FunctionLoweringInfo::ComputePHILiveOutRegInfo(const PHINode *PN) {
- const Type *Ty = PN->getType();
+ Type *Ty = PN->getType();
if (!Ty->isIntegerTy() || Ty->isVectorTy())
return;
diff --git a/lib/CodeGen/SelectionDAG/InstrEmitter.cpp b/lib/CodeGen/SelectionDAG/InstrEmitter.cpp
index f0f4743298..568f66c039 100644
--- a/lib/CodeGen/SelectionDAG/InstrEmitter.cpp
+++ b/lib/CodeGen/SelectionDAG/InstrEmitter.cpp
@@ -356,7 +356,7 @@ void InstrEmitter::AddOperand(MachineInstr *MI, SDValue Op,
} else if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Op)) {
int Offset = CP->getOffset();
unsigned Align = CP->getAlignment();
- const Type *Type = CP->getType();
+ Type *Type = CP->getType();
// MachineConstantPool wants an explicit alignment.
if (Align == 0) {
Align = TM->getTargetData()->getPrefTypeAlignment(Type);
diff --git a/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp b/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
index d06e2bdce0..aadfa26420 100644
--- a/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
+++ b/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
@@ -365,7 +365,7 @@ static SDValue ExpandConstantFP(ConstantFPSDNode *CFP, bool UseCP,
// smaller type.
TLI.isLoadExtLegal(ISD::EXTLOAD, SVT) &&
TLI.ShouldShrinkFPConstant(OrigVT)) {
- const Type *SType = SVT.getTypeForEVT(*DAG.getContext());
+ Type *SType = SVT.getTypeForEVT(*DAG.getContext());
LLVMC = cast<ConstantFP>(ConstantExpr::getFPTrunc(LLVMC, SType));
VT = SVT;
Extend = true;
@@ -1124,7 +1124,7 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
// If this is an unaligned load and the target doesn't support it,
// expand it.
if (!TLI.allowsUnalignedMemoryAccesses(LD->getMemoryVT())) {
- const Type *Ty = LD->getMemoryVT().getTypeForEVT(*DAG.getContext());
+ Type *Ty = LD->getMemoryVT().getTypeForEVT(*DAG.getContext());
unsigned ABIAlignment = TLI.getTargetData()->getABITypeAlignment(Ty);
if (LD->getAlignment() < ABIAlignment){
Result = ExpandUnalignedLoad(cast<LoadSDNode>(Result.getNode()),
@@ -1311,7 +1311,7 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
// If this is an unaligned load and the target doesn't support it,
// expand it.
if (!TLI.allowsUnalignedMemoryAccesses(LD->getMemoryVT())) {
- const Type *Ty =
+ Type *Ty =
LD->getMemoryVT().getTypeForEVT(*DAG.getContext());
unsigned ABIAlignment =
TLI.getTargetData()->getABITypeAlignment(Ty);
@@ -1491,7 +1491,7 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
// If this is an unaligned store and the target doesn't support it,
// expand it.
if (!TLI.allowsUnalignedMemoryAccesses(ST->getMemoryVT())) {
- const Type *Ty = ST->getMemoryVT().getTypeForEVT(*DAG.getContext());
+ Type *Ty = ST->getMemoryVT().getTypeForEVT(*DAG.getContext());
unsigned ABIAlignment= TLI.getTargetData()->getABITypeAlignment(Ty);
if (ST->getAlignment() < ABIAlignment)
Result = ExpandUnalignedStore(cast<StoreSDNode>(Result.getNode()),
@@ -1596,7 +1596,7 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
// If this is an unaligned store and the target doesn't support it,
// expand it.
if (!TLI.allowsUnalignedMemoryAccesses(ST->getMemoryVT())) {
- const Type *Ty = ST->getMemoryVT().getTypeForEVT(*DAG.getContext());
+ Type *Ty = ST->getMemoryVT().getTypeForEVT(*DAG.getContext());
unsigned ABIAlignment= TLI.getTargetData()->getABITypeAlignment(Ty);
if (ST->getAlignment() < ABIAlignment)
Result = ExpandUnalignedStore(cast<StoreSDNode>(Result.getNode()),
@@ -1999,7 +1999,7 @@ SDValue SelectionDAGLegalize::EmitStackConvert(SDValue SrcOp,
unsigned SrcSize = SrcOp.getValueType().getSizeInBits();
unsigned SlotSize = SlotVT.getSizeInBits();
unsigned DestSize = DestVT.getSizeInBits();
- const Type *DestType = DestVT.getTypeForEVT(*DAG.getContext());
+ Type *DestType = DestVT.getTypeForEVT(*DAG.getContext());
unsigned DestAlign = TLI.getTargetData()->getPrefTypeAlignment(DestType);
// Emit a store to the stack slot. Use a truncstore if the input value is
@@ -2106,7 +2106,7 @@ SDValue SelectionDAGLegalize::ExpandBUILD_VECTOR(SDNode *Node) {
}
} else {
assert(Node->getOperand(i).getOpcode() == ISD::UNDEF);
- const Type *OpNTy = EltVT.getTypeForEVT(*DAG.getContext());
+ Type *OpNTy = EltVT.getTypeForEVT(*DAG.getContext());
CV.push_back(UndefValue::get(OpNTy));
}
}
@@ -2159,7 +2159,7 @@ SDValue SelectionDAGLegalize::ExpandLibCall(RTLIB::Libcall LC, SDNode *Node,
TargetLowering::ArgListEntry Entry;
for (unsigned i = 0, e = Node->getNumOperands(); i != e; ++i) {
EVT ArgVT = Node->getOperand(i).getValueType();
- const Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
+ Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
Entry.Node = Node->getOperand(i); Entry.Ty = ArgTy;
Entry.isSExt = isSigned;
Entry.isZExt = !isSigned;
@@ -2169,7 +2169,7 @@ SDValue SelectionDAGLegalize::ExpandLibCall(RTLIB::Libcall LC, SDNode *Node,
TLI.getPointerTy());
// Splice the libcall in wherever FindInputOutputChains tells us to.
- const Type *RetTy = Node->getValueType(0).getTypeForEVT(*DAG.getContext());
+ Type *RetTy = Node->getValueType(0).getTypeForEVT(*DAG.getContext());
// isTailCall may be true since the callee does not reference caller stack
// frame. Check if it's in the right position.
@@ -2210,7 +2210,7 @@ SDValue SelectionDAGLegalize::ExpandLibCall(RTLIB::Libcall LC, EVT RetVT,
SDValue Callee = DAG.getExternalSymbol(TLI.getLibcallName(LC),
TLI.getPointerTy());
- const Type *RetTy = RetVT.getTypeForEVT(*DAG.getContext());
+ Type *RetTy = RetVT.getTypeForEVT(*DAG.getContext());
std::pair<SDValue,SDValue> CallInfo =
TLI.LowerCallTo(DAG.getEntryNode(), RetTy, isSigned, !isSigned, false,
false, 0, TLI.getLibcallCallingConv(LC), false,
@@ -2237,7 +2237,7 @@ SelectionDAGLegalize::ExpandChainLibCall(RTLIB::Libcall LC,
TargetLowering::ArgListEntry Entry;
for (unsigned i = 1, e = Node->getNumOperands(); i != e; ++i) {
EVT ArgVT = Node->getOperand(i).getValueType();
- const Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
+ Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
Entry.Node = Node->getOperand(i);
Entry.Ty = ArgTy;
Entry.isSExt = isSigned;
@@ -2248,7 +2248,7 @@ SelectionDAGLegalize::ExpandChainLibCall(RTLIB::Libcall LC,
TLI.getPointerTy());
// Splice the libcall in wherever FindInputOutputChains tells us to.
- const Type *RetTy = Node->getValueType(0).getTypeForEVT(*DAG.getContext());
+ Type *RetTy = Node->getValueType(0).getTypeForEVT(*DAG.getContext());
std::pair<SDValue, SDValue> CallInfo =
TLI.LowerCallTo(InChain, RetTy, isSigned, !isSigned, false, false,
0, TLI.getLibcallCallingConv(LC), /*isTailCall=*/false,
@@ -2360,13 +2360,13 @@ SelectionDAGLegalize::ExpandDivRemLibCall(SDNode *Node,
SDValue InChain = DAG.getEntryNode();
EVT RetVT = Node->getValueType(0);
- const Type *RetTy = RetVT.getTypeForEVT(*DAG.getContext());
+ Type *RetTy = RetVT.getTypeForEVT(*DAG.getContext());
TargetLowering::ArgListTy Args;
TargetLowering::ArgListEntry Entry;
for (unsigned i = 0, e = Node->getNumOperands(); i != e; ++i) {
EVT ArgVT = Node->getOperand(i).getValueType();
- const Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
+ Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
Entry.Node = Node->getOperand(i); Entry.Ty = ArgTy;
Entry.isSExt = isSigned;
Entry.isZExt = !isSigned;
diff --git a/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp b/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
index 20fb9c32be..41197d8b5d 100644
--- a/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
+++ b/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
@@ -2176,9 +2176,9 @@ void DAGTypeLegalizer::ExpandIntRes_UADDSUBO(SDNode *N,
void DAGTypeLegalizer::ExpandIntRes_XMULO(SDNode *N,
SDValue &Lo, SDValue &Hi) {
EVT VT = N->getValueType(0);
- const Type *RetTy = VT.getTypeForEVT(*DAG.getContext());
+ Type *RetTy = VT.getTypeForEVT(*DAG.getContext());
EVT PtrVT = TLI.getPointerTy();
- const Type *PtrTy = PtrVT.getTypeForEVT(*DAG.getContext());
+ Type *PtrTy = PtrVT.getTypeForEVT(*DAG.getContext());
DebugLoc dl = N->getDebugLoc();
// A divide for UMULO should be faster than a function call.
@@ -2222,7 +2222,7 @@ void DAGTypeLegalizer::ExpandIntRes_XMULO(SDNode *N,
TargetLowering::ArgListEntry Entry;
for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
EVT ArgVT = N->getOperand(i).getValueType();
- const Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
+ Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
Entry.Node = N->getOperand(i);
Entry.Ty = ArgTy;
Entry.isSExt = true;
diff --git a/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp b/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp
index ba658b0220..80f555bde9 100644
--- a/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp
+++ b/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp
@@ -1046,7 +1046,7 @@ SDValue DAGTypeLegalizer::MakeLibCall(RTLIB::Libcall LC, EVT RetVT,
SDValue Callee = DAG.getExternalSymbol(TLI.getLibcallName(LC),
TLI.getPointerTy());
- const Type *RetTy = RetVT.getTypeForEVT(*DAG.getContext());
+ Type *RetTy = RetVT.getTypeForEVT(*DAG.getContext());
std::pair<SDValue,SDValue> CallInfo =
TLI.LowerCallTo(DAG.getEntryNode(), RetTy, isSigned, !isSigned, false,
false, 0, TLI.getLibcallCallingConv(LC), false,
@@ -1067,7 +1067,7 @@ DAGTypeLegalizer::ExpandChainLibCall(RTLIB::Libcall LC,
TargetLowering::ArgListEntry Entry;
for (unsigned i = 1, e = Node->getNumOperands(); i != e; ++i) {
EVT ArgVT = Node->getOperand(i).getValueType();
- const Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
+ Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
Entry.Node = Node->getOperand(i);
Entry.Ty = ArgTy;
Entry.isSExt = isSigned;
@@ -1078,7 +1078,7 @@ DAGTypeLegalizer::ExpandChainLibCall(RTLIB::Libcall LC,
TLI.getPointerTy());
// Splice the libcall in wherever FindInputOutputChains tells us to.
- const Type *RetTy = Node->getValueType(0).getTypeForEVT(*DAG.getContext());
+ Type *RetTy = Node->getValueType(0).getTypeForEVT(*DAG.getContext());
std::pair<SDValue, SDValue> CallInfo =
TLI.LowerCallTo(InChain, RetTy, isSigned, !isSigned, false, false,
0, TLI.getLibcallCallingConv(LC), /*isTailCall=*/false,
diff --git a/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp b/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
index b5698f9c67..85a169261a 100644
--- a/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
+++ b/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
@@ -670,7 +670,7 @@ void DAGTypeLegalizer::SplitVecRes_INSERT_VECTOR_ELT(SDNode *N, SDValue &Lo,
// Store the new element. This may be larger than the vector element type,
// so use a truncating store.
SDValue EltPtr = GetVectorElementPointer(StackPtr, EltVT, Idx);
- const Type *VecType = VecVT.getTypeForEVT(*DAG.getContext());
+ Type *VecType = VecVT.getTypeForEVT(*DAG.getContext());
unsigned Alignment =
TLI.getTargetData()->getPrefTypeAlignment(VecType);
Store = DAG.getTruncStore(Store, dl, Elt, EltPtr, MachinePointerInfo(), EltVT,
diff --git a/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
index 35ea0bb940..b72a47d0f6 100644
--- a/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
+++ b/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
@@ -821,7 +821,7 @@ static void VerifyMachineNode(SDNode *N) {
/// given type.
///
unsigned SelectionDAG::getEVTAlignment(EVT VT) const {
- const Type *Ty = VT == MVT::iPTR ?
+ Type *Ty = VT == MVT::iPTR ?
PointerType::get(Type::getInt8Ty(*getContext()), 0) :
VT.getTypeForEVT(*getContext());
@@ -1432,7 +1432,7 @@ SDValue SelectionDAG::getShiftAmountOperand(EVT LHSTy, SDValue Op) {
SDValue SelectionDAG::CreateStackTemporary(EVT VT, unsigned minAlign) {
MachineFrameInfo *FrameInfo = getMachineFunction().getFrameInfo();
unsigned ByteSize = VT.getStoreSize();
- const Type *Ty = VT.getTypeForEVT(*getContext());
+ Type *Ty = VT.getTypeForEVT(*getContext());
unsigned StackAlign =
std::max((unsigned)TLI.getTargetData()->getPrefTypeAlignment(Ty), minAlign);
@@ -1445,8 +1445,8 @@ SDValue SelectionDAG::CreateStackTemporary(EVT VT, unsigned minAlign) {
SDValue SelectionDAG::CreateStackTemporary(EVT VT1, EVT VT2) {
unsigned Bytes = std::max(VT1.getStoreSizeInBits(),
VT2.getStoreSizeInBits())/8;
- const Type *Ty1 = VT1.getTypeForEVT(*getContext());
- const Type *Ty2 = VT2.getTypeForEVT(*getContext());
+ Type *Ty1 = VT1.getTypeForEVT(*getContext());
+ Type *Ty2 = VT2.getTypeForEVT(*getContext());
const TargetData *TD = TLI.getTargetData();
unsigned Align = std::max(TD->getPrefTypeAlignment(Ty1),
TD->getPrefTypeAlignment(Ty2));
@@ -3425,7 +3425,7 @@ static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, DebugLoc dl,
return SDValue();
if (DstAlignCanChange) {
- const Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext());
+ Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext());
unsigned NewAlign = (unsigned) TLI.getTargetData()->getABITypeAlignment(Ty);
if (NewAlign > Align) {
// Give the stack frame object a larger alignment if needed.
@@ -3514,7 +3514,7 @@ static SDValue getMemmoveLoadsAndStores(SelectionDAG &DAG, DebugLoc dl,
return SDValue();
if (DstAlignCanChange) {
- const Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext());
+ Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext());
unsigned NewAlign = (unsigned) TLI.getTargetData()->getABITypeAlignment(Ty);
if (NewAlign > Align) {
// Give the stack frame object a larger alignment if needed.
@@ -3589,7 +3589,7 @@ static SDValue getMemsetStores(SelectionDAG &DAG, DebugLoc dl,
return SDValue();
if (DstAlignCanChange) {
- const Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext());
+ Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext());
unsigned NewAlign = (unsigned) TLI.getTargetData()->getABITypeAlignment(Ty);
if (NewAlign > Align) {
// Give the stack frame object a larger alignment if needed.
@@ -3782,7 +3782,7 @@ SDValue SelectionDAG::getMemset(SDValue Chain, DebugLoc dl, SDValue Dst,
return Result;
// Emit a library call.
- const Type *IntPtrTy = TLI.getTargetData()->getIntPtrType(*getContext());
+ Type *IntPtrTy = TLI.getTargetData()->getIntPtrType(*getContext());
TargetLowering::ArgListTy Args;
TargetLowering::ArgListEntry Entry;
Entry.Node = Dst; Entry.Ty = IntPtrTy;
@@ -6528,7 +6528,7 @@ unsigned GlobalAddressSDNode::getAddressSpace() const {
}
-const Type *ConstantPoolSDNode::getType() const {
+Type *ConstantPoolSDNode::getType() const {
if (isMachineConstantPoolEntry())
return Val.MachineCPVal->getType();
return Val.ConstVal->getType();
diff --git a/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index 81b03ee76a..ba18465c0d 100644
--- a/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -578,7 +578,7 @@ namespace {
: ValueVTs(1, valuevt), RegVTs(1, regvt), Regs(regs) {}
RegsForValue(LLVMContext &Context, const TargetLowering &tli,
- unsigned Reg, const Type *Ty) {
+ unsigned Reg, Type *Ty) {
ComputeValueVTs(tli, Ty, ValueVTs);
for (unsigned Value = 0, e = ValueVTs.size(); Value != e; ++Value) {
@@ -1069,7 +1069,7 @@ SDValue SelectionDAGBuilder::getValueImpl(const Value *V) {
if (const BlockAddress *BA = dyn_cast<BlockAddress>(C))
return DAG.getBlockAddress(BA, VT);
- const VectorType *VecTy = cast<VectorType>(V->getType());
+ VectorType *VecTy = cast<VectorType>(V->getType());
unsigned NumElements = VecTy->getNumElements();
// Now that we know the number and type of the elements, get that number of
@@ -2458,7 +2458,7 @@ void SelectionDAGBuilder::visitIndirectBr(const IndirectBrInst &I) {
void SelectionDAGBuilder::visitFSub(const User &I) {
// -0.0 - X --> fneg
- const Type *Ty = I.getType();
+ Type *Ty = I.getType();
if (isa<Constant>(I.getOperand(0)) &&
I.getOperand(0) == ConstantFP::getZeroValueForNegation(Ty)) {
SDValue Op2 = getValue(I.getOperand(1));
@@ -2886,8 +2886,8 @@ void SelectionDAGBuilder::visitShuffleVector(const User &I) {
void SelectionDAGBuilder::visitInsertValue(const InsertValueInst &I) {
const Value *Op0 = I.getOperand(0);
const Value *Op1 = I.getOperand(1);
- const Type *AggTy = I.getType();
- const Type *ValTy = Op1->getType();
+ Type *AggTy = I.getType();
+ Type *ValTy = Op1->getType();
bool IntoUndef = isa<UndefValue>(Op0);
bool FromUndef = isa<UndefValue>(Op1);
@@ -2927,8 +2927,8 @@ void SelectionDAGBuilder::visitInsertValue(const InsertValueInst &I) {
void SelectionDAGBuilder::visitExtractValue(const ExtractValueInst &I) {
const Value *Op0 = I.getOperand(0);
- const Type *AggTy = Op0->getType();
- const Type *ValTy = I.getType();
+ Type *AggTy = Op0->getType();
+ Type *ValTy = I.getType();
bool OutOfUndef = isa<UndefValue>(Op0);
unsigned LinearIndex = ComputeLinearIndex(AggTy, I.getIndices());
@@ -2961,12 +2961,12 @@ void SelectionDAGBuilder::visitExtractValue(const ExtractValueInst &I) {
void SelectionDAGBuilder::visitGetElementPtr(const User &I) {
SDValue N = getValue(I.getOperand(0));
- const Type *Ty = I.getOperand(0)->getType();
+ Type *Ty = I.getOperand(0)->getType();
for (GetElementPtrInst::const_op_iterator OI = I.op_begin()+1, E = I.op_end();
OI != E; ++OI) {
const Value *Idx = *OI;
- if (const StructType *StTy = dyn_cast<StructType>(Ty)) {
+ if (StructType *StTy = dyn_cast<StructType>(Ty)) {
unsigned Field = cast<ConstantInt>(Idx)->getZExtValue();
if (Field) {
// N = N + Offset
@@ -3037,7 +3037,7 @@ void SelectionDAGBuilder::visitAlloca(const AllocaInst &I) {
if (FuncInfo.StaticAllocaMap.count(&I))
return; // getValue will auto-populate this.
- const Type *Ty = I.getAllocatedType();
+ Type *Ty = I.getAllocatedType();
uint64_t TySize = TLI.getTargetData()->getTypeAllocSize(Ty);
unsigned Align =
std::max((unsigned)TLI.getTargetData()->getPrefTypeAlignment(Ty),
@@ -3087,7 +3087,7 @@ void SelectionDAGBuilder::visitLoad(const LoadInst &I) {
const Value *SV = I.getOperand(0);
SDValue Ptr = getValue(SV);
- const Type *Ty = I.getType();
+ Type *Ty = I.getType();
bool isVolatile = I.isVolatile();
bool isNonTemporal = I.getMetadata("nontemporal") != 0;
@@ -3290,7 +3290,7 @@ void SelectionDAGBuilder::visitTargetIntrinsic(const CallInst &I,
}
if (!I.getType()->isVoidTy()) {
- if (const VectorType *PTy = dyn_cast<VectorType>(I.getType())) {
+ if (VectorType *PTy = dyn_cast<VectorType>(I.getType())) {
EVT VT = TLI.getValueType(PTy);
Result = DAG.getNode(ISD::BITCAST, getCurDebugLoc(), VT, Result);
}
@@ -4918,9 +4918,9 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
void SelectionDAGBuilder::LowerCallTo(ImmutableCallSite CS, SDValue Callee,
bool isTailCall,
MachineBasicBlock *LandingPad) {
- const PointerType *PT = cast<PointerType>(CS.getCalledValue()->getType());
- const FunctionType *FTy = cast<FunctionType>(PT->getElementType());
- const Type *RetTy = FTy->getReturnType();
+ PointerType *PT = cast<PointerType>(CS.getCalledValue()->getType());
+ FunctionType *FTy = cast<FunctionType>(PT->getElementType());
+ Type *RetTy = FTy->getReturnType();
MachineModuleInfo &MMI = DAG.getMachineFunction().getMMI();
MCSymbol *BeginLabel = 0;
@@ -4949,7 +4949,7 @@ void SelectionDAGBuilder::LowerCallTo(ImmutableCallSite CS, SDValue Callee,
FTy->getReturnType());
MachineFunction &MF = DAG.getMachineFunction();
DemoteStackIdx = MF.getFrameInfo()->CreateStackObject(TySize, Align, false);
- const Type *StackSlotPtrType = PointerType::getUnqual(FTy->getReturnType());
+ Type *StackSlotPtrType = PointerType::getUnqual(FTy->getReturnType());
DemoteStackSlot = DAG.getFrameIndex(DemoteStackIdx, TLI.getPointerTy());
Entry.Node = DemoteStackSlot;
@@ -5037,7 +5037,7 @@ void SelectionDAGBuilder::LowerCallTo(ImmutableCallSite CS, SDValue Callee,
// The instruction result is the result of loading from the
// hidden sret parameter.
SmallVector<EVT, 1> PVTs;
- const Type *PtrRetTy = PointerType::getUnqual(FTy->getReturnType());
+ Type *PtrRetTy = PointerType::getUnqual(FTy->getReturnType());
ComputeValueVTs(TLI, PtrRetTy, PVTs);
assert(PVTs.size() == 1 && "Pointers should fit in one register");
@@ -5130,7 +5130,7 @@ static bool IsOnlyUsedInZeroEqualityComparison(const Value *V) {
}
static SDValue getMemCmpLoad(const Value *PtrVal, MVT LoadVT,
- const Type *LoadTy,
+ Type *LoadTy,
SelectionDAGBuilder &Builder) {
// Check to see if this load can be trivially constant folded, e.g. if the
@@ -5193,7 +5193,7 @@ bool SelectionDAGBuilder::visitMemCmpCall(const CallInst &I) {
if (Size && IsOnlyUsedInZeroEqualityComparison(&I)) {
bool ActuallyDoIt = true;
MVT LoadVT;
- const Type *LoadTy;
+ Type *LoadTy;
switch (Size->getZExtValue()) {
default:
LoadVT = MVT::Other;
@@ -5261,14 +5261,14 @@ void SelectionDAGBuilder::visitCall(const CallInst &I) {
// See if any floating point values are being passed to this function. This is
// used to emit an undefined reference to fltused on Windows.
- const FunctionType *FT =
+ FunctionType *FT =
cast<FunctionType>(I.getCalledValue()->getType()->getContainedType(0));
MachineModuleInfo &MMI = DAG.getMachineFunction().getMMI();
if (FT->isVarArg() &&
!MMI.callsExternalVAFunctionWithFloatingPointArguments()) {
for (unsigned i = 0, e = I.getNumArgOperands(); i != e; ++i) {
- const Type* T = I.getArgOperand(i)->getType();
- for (po_iterator<const Type*> i = po_begin(T), e = po_end(T);
+ Type* T = I.getArgOperand(i)->getType();
+ for (po_iterator<Type*> i = po_begin(T), e = po_end(T);
i != e; ++i) {
if (!i->isFloatingPointTy()) continue;
MMI.setCallsExternalVAFunctionWithFloatingPointArguments(true);
@@ -5412,20 +5412,20 @@ public:
if (isa<BasicBlock>(CallOperandVal))
return TLI.getPointerTy();
- const llvm::Type *OpTy = CallOperandVal->getType();
+ llvm::Type *OpTy = CallOperandVal->getType();
// FIXME: code duplicated from TargetLowering::ParseConstraints().
// If this is an indirect operand, the operand is a pointer to the
// accessed type.
if (isIndirect) {
- const llvm::PointerType *PtrTy = dyn_cast<PointerType>(OpTy);
+ llvm::PointerType *PtrTy = dyn_cast<PointerType>(OpTy);
if (!PtrTy)
report_fatal_error("Indirect operand for inline asm not a pointer!");
OpTy = PtrTy->getElementType();
}
// Look for vector wrapped in a struct. e.g. { <16 x i8> }.
- if (const StructType *STy = dyn_cast<StructType>(OpTy))
+ if (StructType *STy = dyn_cast<StructType>(OpTy))
if (STy->getNumElements() == 1)
OpTy = STy->getElementType(0);
@@ -5639,7 +5639,7 @@ void SelectionDAGBuilder::visitInlineAsm(ImmutableCallSite CS) {
// corresponding argument.
assert(!CS.getType()->isVoidTy() &&
"Bad inline asm!");
- if (const StructType *STy = dyn_cast<StructType>(CS.getType())) {
+ if (StructType *STy = dyn_cast<StructType>(CS.getType())) {
OpVT = TLI.getValueType(STy->getElementType(ResNo));
} else {
assert(ResNo == 0 && "Asm only has one result!");
@@ -5750,7 +5750,7 @@ void SelectionDAGBuilder::visitInlineAsm(ImmutableCallSite CS) {
} else {
// Otherwise, create a stack slot and emit a store to it before the
// asm.
- const Type *Ty = OpVal->getType();
+ Type *Ty = OpVal->getType();
uint64_t TySize = TLI.getTargetData()->getTypeAllocSize(Ty);
unsigned Align = TLI.getTargetData()->getPrefTypeAlignment(Ty);
MachineFunction &MF = DAG.getMachineFunction();
@@ -6111,7 +6111,7 @@ void SelectionDAGBuilder::visitVACopy(const CallInst &I) {
/// FIXME: When all targets are
/// migrated to using LowerCall, this hook should be integrated into SDISel.
std::pair<SDValue, SDValue>
-TargetLowering::LowerCallTo(SDValue Chain, const Type *RetTy,
+TargetLowering::LowerCallTo(SDValue Chain, Type *RetTy,
bool RetSExt, bool RetZExt, bool isVarArg,
bool isInreg, unsigned NumFixedArgs,
CallingConv::ID CallConv, bool isTailCall,
@@ -6128,7 +6128,7 @@ TargetLowering::LowerCallTo(SDValue Chain, const Type *RetTy,
for (unsigned Value = 0, NumValues = ValueVTs.size();
Value != NumValues; ++Value) {
EVT VT = ValueVTs[Value];
- const Type *ArgTy = VT.getTypeForEVT(RetTy->getContext());
+ Type *ArgTy = VT.getTypeForEVT(RetTy->getContext());
SDValue Op = SDValue(Args[i].Node.getNode(),
Args[i].Node.getResNo() + Value);
ISD::ArgFlagsTy Flags;
@@ -6145,8 +6145,8 @@ TargetLowering::LowerCallTo(SDValue Chain, const Type *RetTy,
Flags.setSRet();
if (Args[i].isByVal) {
Flags.setByVal();
- const PointerType *Ty = cast<PointerType>(Args[i].Ty);
- const Type *ElementTy = Ty->getElementType();
+ PointerType *Ty = cast<PointerType>(Args[i].Ty);
+ Type *ElementTy = Ty->getElementType();
Flags.setByValSize(getTargetData()->getTypeAllocSize(ElementTy));
// For ByVal, alignment should come from FE. BE will guess if this
// info is not there but there are cases it cannot get right.
@@ -6356,7 +6356,7 @@ void SelectionDAGISel::LowerArguments(const BasicBlock *LLVMBB) {
for (unsigned Value = 0, NumValues = ValueVTs.size();
Value != NumValues; ++Value) {
EVT VT = ValueVTs[Value];
- const Type *ArgTy = VT.getTypeForEVT(*DAG.getContext());
+ Type *ArgTy = VT.getTypeForEVT(*DAG.getContext());
ISD::ArgFlagsTy Flags;
unsigned OriginalAlignment =
TD->getABITypeAlignment(ArgTy);
@@ -6371,8 +6371,8 @@ void SelectionDAGISel::LowerArguments(const BasicBlock *LLVMBB) {
Flags.setSRet();
if (F.paramHasAttr(Idx, Attribute::ByVal)) {
Flags.setByVal();
- const PointerType *Ty = cast<PointerType>(I->getType());
- const Type *ElementTy = Ty->getElementType();
+ PointerType *Ty = cast<PointerType>(I->getType());
+ Type *ElementTy = Ty->getElementType();
Flags.setByValSize(TD->getTypeAllocSize(ElementTy));
// For ByVal, alignment should be passed from FE. BE will guess if
// this info is not there but there are cases it cannot get right.
diff --git a/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/lib/CodeGen/SelectionDAG/TargetLowering.cpp
index 2626ac3bbb..335eca787f 100644
--- a/lib/CodeGen/SelectionDAG/TargetLowering.cpp
+++ b/lib/CodeGen/SelectionDAG/TargetLowering.cpp
@@ -996,7 +996,7 @@ unsigned TargetLowering::getVectorTypeBreakdown(LLVMContext &Context, EVT VT,
/// type of the given function. This does not require a DAG or a return value,
/// and is suitable for use before any DAGs for the function are constructed.
/// TODO: Move this out of TargetLowering.cpp.
-void llvm::GetReturnInfo(const Type* ReturnType, Attributes attr,
+void llvm::GetReturnInfo(Type* ReturnType, Attributes attr,
SmallVectorImpl<ISD::OutputArg> &Outs,
const TargetLowering &TLI,
SmallVectorImpl<uint64_t> *Offsets) {
@@ -1054,7 +1054,7 @@ void llvm::GetReturnInfo(const Type* ReturnType, Attributes attr,
/// getByValTypeAlignment - Return the desired alignment for ByVal aggregate
/// function arguments in the caller parameter area. This is the actual
/// alignment, not its logarithm.
-unsigned TargetLowering::getByValTypeAlignment(const Type *Ty) const {
+unsigned TargetLowering::getByValTypeAlignment(Type *Ty) const {
return TD->getCallFrameTypeAlignment(Ty);
}
@@ -2840,7 +2840,7 @@ TargetLowering::AsmOperandInfoVector TargetLowering::ParseConstraints(
// corresponding argument.
assert(!CS.getType()->isVoidTy() &&
"Bad inline asm!");
- if (const StructType *STy = dyn_cast<StructType>(CS.getType())) {
+ if (StructType *STy = dyn_cast<StructType>(CS.getType())) {
OpInfo.ConstraintVT = getValueType(STy->getElementType(ResNo));
} else {
assert(ResNo == 0 && "Asm only has one result!");
@@ -2857,16 +2857,16 @@ TargetLowering::AsmOperandInfoVector TargetLowering::ParseConstraints(
}
if (OpInfo.CallOperandVal) {
- const llvm::Type *OpTy = OpInfo.CallOperandVal->getType();
+ llvm::Type *OpTy = OpInfo.CallOperandVal->getType();
if (OpInfo.isIndirect) {
- const llvm::PointerType *PtrTy = dyn_cast<PointerType>(OpTy);
+ llvm::PointerType *PtrTy = dyn_cast<PointerType>(OpTy);
if (!PtrTy)
report_fatal_error("Indirect operand for inline asm not a pointer!");
OpTy = PtrTy->getElementType();
}
// Look for vector wrapped in a struct. e.g. { <16 x i8> }.
- if (const StructType *STy = dyn_cast<StructType>(OpTy))
+ if (StructType *STy = dyn_cast<StructType>(OpTy))
if (STy->getNumElements() == 1)
OpTy = STy->getElementType(0);
@@ -3187,7 +3187,7 @@ void TargetLowering::ComputeConstraintToUse(AsmOperandInfo &OpInfo,
/// isLegalAddressingMode - Return true if the addressing mode represented
/// by AM is legal for this target, for a load/store of the specified type.
bool TargetLowering::isLegalAddressingMode(const AddrMode &AM,
- const Type *Ty) const {
+ Type *Ty) const {
// The default implementation of this implements a conservative RISCy, r+r and
// r+i addr mode.
diff --git a/lib/CodeGen/ShadowStackGC.cpp b/lib/CodeGen/ShadowStackGC.cpp
index 5a253a4d97..2f1d223cf9 100644
--- a/lib/CodeGen/ShadowStackGC.cpp
+++ b/lib/CodeGen/ShadowStackGC.cpp
@@ -61,7 +61,7 @@ namespace {
private:
bool IsNullValue(Value *V);
Constant *GetFrameMap(Function &F);
- const Type* GetConcreteStackEntryType(Function &F);
+ Type* GetConcreteStackEntryType(Function &F);
void CollectRoots(Function &F);
static GetElementPtrInst *CreateGEP(LLVMContext &Context,
IRBuilder<> &B, Value *BasePtr,
@@ -190,7 +190,7 @@ ShadowStackGC::ShadowStackGC() : Head(0), StackEntryTy(0) {
Constant *ShadowStackGC::GetFrameMap(Function &F) {
// doInitialization creates the abstract type of this value.
- const Type *VoidPtr = Type::getInt8PtrTy(F.getContext());
+ Type *VoidPtr = Type::getInt8PtrTy(F.getContext());
// Truncate the ShadowStackDescriptor if some metadata is null.
unsigned NumMeta = 0;
@@ -203,7 +203,7 @@ Constant *ShadowStackGC::GetFrameMap(Function &F) {
}
Metadata.resize(NumMeta);
- const Type *Int32Ty = Type::getInt32Ty(F.getContext());
+ Type *Int32Ty = Type::getInt32Ty(F.getContext());
Constant *BaseElts[] = {
ConstantInt::get(Int32Ty, Roots.size(), false),
@@ -244,7 +244,7 @@ Constant *ShadowStackGC::GetFrameMap(Function &F) {
return ConstantExpr::getGetElementPtr(GV, GEPIndices, 2);
}
-const Type* ShadowStackGC::GetConcreteStackEntryType(Function &F) {
+Type* ShadowStackGC::GetConcreteStackEntryType(Function &F) {
// doInitialization creates the generic version of this type.
std::vector<Type*> EltTys;
EltTys.push_back(StackEntryTy);
@@ -282,7 +282,7 @@ bool ShadowStackGC::initializeCustomLowering(Module &M) {
EltTys.push_back(PointerType::getUnqual(StackEntryTy));
EltTys.push_back(FrameMapPtrTy);
StackEntryTy->setBody(EltTys);
- const PointerType *StackEntryPtrTy = PointerType::getUnqual(StackEntryTy);
+ PointerType *StackEntryPtrTy = PointerType::getUnqual(StackEntryTy);
// Get the root chain if it already exists.
Head = M.getGlobalVariable("llvm_gc_root_chain");
@@ -373,7 +373,7 @@ bool ShadowStackGC::performCustomLowering(Function &F) {
// Build the constant map and figure the type of the shadow stack entry.
Value *FrameMap = GetFrameMap(F);
- const Type *ConcreteStackEntryTy = GetConcreteStackEntryType(F);
+ Type *ConcreteStackEntryTy = GetConcreteStackEntryType(F);
// Build the shadow stack entry at the very start of the function.
BasicBlock::iterator IP = F.getEntryBlock().begin();
diff --git a/lib/CodeGen/SjLjEHPrepare.cpp b/lib/CodeGen/SjLjEHPrepare.cpp
index 65a33da93a..07ec857608 100644
--- a/lib/CodeGen/SjLjEHPrepare.cpp
+++ b/lib/CodeGen/SjLjEHPrepare.cpp
@@ -40,7 +40,7 @@ namespace {
const TargetLowering *TLI;
- const Type *FunctionContextTy;
+ Type *FunctionContextTy;
Constant *RegisterFn;
Constant *UnregisterFn;
Constant *BuiltinSetjmpFn;
@@ -204,7 +204,7 @@ splitLiveRangesAcrossInvokes(SmallVector<InvokeInst*,16> &Invokes) {
++AfterAllocaInsertPt;
for (Function::arg_iterator AI = F->arg_begin(), E = F->arg_end();
AI != E; ++AI) {
- const Type *Ty = AI->getType();
+ Type *Ty = AI->getType();
// Aggregate types can't be cast, but are legal argument types, so we have
// to handle them differently. We use an extract/insert pair as a
// lightweight method to achieve the same goal.
@@ -381,7 +381,7 @@ bool SjLjEHPass::insertSjLjEHSupport(Function &F) {
"fcn_context", F.begin()->begin());
Value *Idxs[2];
- const Type *Int32Ty = Type::getInt32Ty(F.getContext());
+ Type *Int32Ty = Type::getInt32Ty(F.getContext());
Value *Zero = ConstantInt::get(Int32Ty, 0);
// We need to also keep around a reference to the call_site field
Idxs[0] = Zero;
@@ -423,7 +423,7 @@ bool SjLjEHPass::insertSjLjEHSupport(Function &F) {
// instruction hasn't already been removed.
if (!I->getParent()) continue;
Value *Val = new LoadInst(ExceptionAddr, "exception", true, I);
- const Type *Ty = Type::getInt8PtrTy(F.getContext());
+ Type *Ty = Type::getInt8PtrTy(F.getContext());
Val = CastInst::Create(Instruction::IntToPtr, Val, Ty, "", I);
I->replaceAllUsesWith(Val);
diff --git a/lib/CodeGen/StackProtector.cpp b/lib/CodeGen/StackProtector.cpp
index d3cbd15b64..1f0e5a2711 100644
--- a/lib/CodeGen/StackProtector.cpp
+++ b/lib/CodeGen/StackProtector.cpp
@@ -123,7 +123,7 @@ bool StackProtector::RequiresStackProtector() const {
// protectors.
return true;
- if (const ArrayType *AT = dyn_cast<ArrayType>(AI->getAllocatedType())) {
+ if (ArrayType *AT = dyn_cast<ArrayType>(AI->getAllocatedType())) {
// We apparently only care about character arrays.
if (!AT->getElementType()->isIntegerTy(8))
continue;
@@ -165,7 +165,7 @@ bool StackProtector::InsertStackProtectors() {
// StackGuard = load __stack_chk_guard
// call void @llvm.stackprotect.create(StackGuard, StackGuardSlot)
//
- const PointerType *PtrTy = Type::getInt8PtrTy(RI->getContext());
+ PointerType *PtrTy = Type::getInt8PtrTy(RI->getContext());
unsigned AddressSpace, Offset;
if (TLI->getStackCookieLocation(AddressSpace, Offset)) {
Constant *OffsetVal =