summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
authorAlp Toker <alp@nuanti.com>2014-01-24 17:20:08 +0000
committerAlp Toker <alp@nuanti.com>2014-01-24 17:20:08 +0000
commitae43cab6bab0e5bcdbe2971bf718712559625e39 (patch)
tree536b346c514acfc8d7f3c2e424e1295de63e516a /lib
parent27ce8feb4adbb13c0efcc2d560c93dfb71785cb2 (diff)
downloadllvm-ae43cab6bab0e5bcdbe2971bf718712559625e39.tar.gz
llvm-ae43cab6bab0e5bcdbe2971bf718712559625e39.tar.bz2
llvm-ae43cab6bab0e5bcdbe2971bf718712559625e39.tar.xz
Fix known typos
Sweep the codebase for common typos. Includes some changes to visible function names that were misspelt. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@200018 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib')
-rw-r--r--lib/Analysis/BasicAliasAnalysis.cpp2
-rw-r--r--lib/Analysis/DependenceAnalysis.cpp2
-rw-r--r--lib/Analysis/ScalarEvolution.cpp8
-rw-r--r--lib/CodeGen/CriticalAntiDepBreaker.cpp2
-rw-r--r--lib/CodeGen/MachineScheduler.cpp2
-rw-r--r--lib/CodeGen/ScheduleDAG.cpp2
-rw-r--r--lib/CodeGen/SelectionDAG/DAGCombiner.cpp6
-rw-r--r--lib/CodeGen/StackMaps.cpp2
-rw-r--r--lib/ExecutionEngine/IntelJITEvents/jitprofiling.h14
-rw-r--r--lib/ExecutionEngine/Interpreter/Execution.cpp2
-rw-r--r--lib/ExecutionEngine/MCJIT/SectionMemoryManager.cpp2
-rw-r--r--lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.h2
-rw-r--r--lib/MC/MCParser/AsmParser.cpp8
-rw-r--r--lib/Support/APFloat.cpp2
-rw-r--r--lib/Support/APInt.cpp2
-rw-r--r--lib/Support/CommandLine.cpp19
-rw-r--r--lib/Support/Path.cpp6
-rw-r--r--lib/Support/regcomp.c6
-rw-r--r--lib/Target/AArch64/AArch64InstrInfo.cpp3
-rw-r--r--lib/Target/AArch64/AArch64InstrNEON.td2
-rw-r--r--lib/Target/AArch64/Disassembler/AArch64Disassembler.cpp2
-rw-r--r--lib/Target/ARM/ARMISelDAGToDAG.cpp2
-rw-r--r--lib/Target/ARM/ARMISelLowering.cpp4
-rw-r--r--lib/Target/ARM/ARMRegisterInfo.td2
-rw-r--r--lib/Target/ARM/ARMScheduleSwift.td4
-rw-r--r--lib/Target/ARM/ARMTargetTransformInfo.cpp2
-rw-r--r--lib/Target/ARM/MCTargetDesc/ARMBaseInfo.h3
-rw-r--r--lib/Target/ARM/MCTargetDesc/ARMELFStreamer.cpp2
-rw-r--r--lib/Target/Hexagon/HexagonCopyToCombine.cpp4
-rw-r--r--lib/Target/Hexagon/HexagonHardwareLoops.cpp4
-rw-r--r--lib/Target/Hexagon/HexagonInstrInfo.cpp4
-rw-r--r--lib/Target/Mips/AsmParser/MipsAsmParser.cpp4
-rw-r--r--lib/Target/Mips/MipsMSAInstrInfo.td2
-rw-r--r--lib/Target/NVPTX/NVPTXISelLowering.cpp6
-rw-r--r--lib/Target/NVPTX/NVVMReflect.cpp4
-rw-r--r--lib/Target/PowerPC/PPCISelLowering.cpp2
-rw-r--r--lib/Target/R600/AMDGPU.h2
-rw-r--r--lib/Target/R600/AMDILCFGStructurizer.cpp2
-rw-r--r--lib/Target/R600/MCTargetDesc/SIMCCodeEmitter.cpp2
-rw-r--r--lib/Target/R600/R600ClauseMergePass.cpp2
-rw-r--r--lib/Target/R600/R600Defines.h2
-rw-r--r--lib/Target/R600/R600ISelLowering.cpp6
-rw-r--r--lib/Target/R600/R600ISelLowering.h2
-rw-r--r--lib/Target/R600/R600InstrInfo.h2
-rw-r--r--lib/Target/R600/R600Instructions.td2
-rw-r--r--lib/Target/R600/R600Packetizer.cpp2
-rw-r--r--lib/Target/R600/SIISelLowering.cpp2
-rw-r--r--lib/Target/R600/SIRegisterInfo.cpp2
-rw-r--r--lib/Target/Sparc/MCTargetDesc/SparcBaseInfo.h2
-rw-r--r--lib/Target/SystemZ/SystemZISelLowering.cpp2
-rw-r--r--lib/Target/SystemZ/SystemZInstrInfo.cpp2
-rw-r--r--lib/Target/SystemZ/SystemZInstrInfo.td2
-rw-r--r--lib/Target/X86/Disassembler/X86DisassemblerDecoder.h2
-rw-r--r--lib/Target/X86/X86FastISel.cpp2
-rw-r--r--lib/Target/X86/X86Schedule.td2
-rw-r--r--lib/Target/XCore/XCoreLowerThreadLocal.cpp4
-rw-r--r--lib/Transforms/IPO/ConstantMerge.cpp6
-rw-r--r--lib/Transforms/IPO/MergeFunctions.cpp2
-rw-r--r--lib/Transforms/InstCombine/InstCombineCalls.cpp4
-rw-r--r--lib/Transforms/InstCombine/InstCombineCasts.cpp4
-rw-r--r--lib/Transforms/InstCombine/InstCombineCompares.cpp4
-rw-r--r--lib/Transforms/InstCombine/InstCombineVectorOps.cpp2
-rw-r--r--lib/Transforms/InstCombine/InstructionCombining.cpp4
-rw-r--r--lib/Transforms/Instrumentation/AddressSanitizer.cpp2
-rw-r--r--lib/Transforms/Instrumentation/MemorySanitizer.cpp2
-rw-r--r--lib/Transforms/Instrumentation/ThreadSanitizer.cpp2
-rw-r--r--lib/Transforms/ObjCARC/ObjCARCOpts.cpp14
-rw-r--r--lib/Transforms/Scalar/LoopIdiomRecognize.cpp6
-rw-r--r--lib/Transforms/Utils/FlattenCFG.cpp2
-rw-r--r--lib/Transforms/Utils/SimplifyCFG.cpp6
-rw-r--r--lib/Transforms/Vectorize/LoopVectorize.cpp6
-rw-r--r--lib/Transforms/Vectorize/SLPVectorizer.cpp4
72 files changed, 131 insertions, 130 deletions
diff --git a/lib/Analysis/BasicAliasAnalysis.cpp b/lib/Analysis/BasicAliasAnalysis.cpp
index 6adeedb244..5f7dd98e19 100644
--- a/lib/Analysis/BasicAliasAnalysis.cpp
+++ b/lib/Analysis/BasicAliasAnalysis.cpp
@@ -154,7 +154,7 @@ static bool isObjectSize(const Value *V, uint64_t Size,
/// isIdentifiedFunctionLocal - Return true if V is umabigously identified
/// at the function-level. Different IdentifiedFunctionLocals can't alias.
/// Further, an IdentifiedFunctionLocal can not alias with any function
-/// arguments other than itself, which is not neccessarily true for
+/// arguments other than itself, which is not necessarily true for
/// IdentifiedObjects.
static bool isIdentifiedFunctionLocal(const Value *V)
{
diff --git a/lib/Analysis/DependenceAnalysis.cpp b/lib/Analysis/DependenceAnalysis.cpp
index f152aeb9de..b74140db2c 100644
--- a/lib/Analysis/DependenceAnalysis.cpp
+++ b/lib/Analysis/DependenceAnalysis.cpp
@@ -3178,7 +3178,7 @@ void DependenceAnalysis::updateDirection(Dependence::DVEntry &Level,
/// Check if we can delinearize the subscripts. If the SCEVs representing the
/// source and destination array references are recurrences on a nested loop,
-/// this function flattens the nested recurrences into seperate recurrences
+/// this function flattens the nested recurrences into separate recurrences
/// for each loop level.
bool
DependenceAnalysis::tryDelinearize(const SCEV *SrcSCEV, const SCEV *DstSCEV,
diff --git a/lib/Analysis/ScalarEvolution.cpp b/lib/Analysis/ScalarEvolution.cpp
index 064aafd01e..b65d99e4d6 100644
--- a/lib/Analysis/ScalarEvolution.cpp
+++ b/lib/Analysis/ScalarEvolution.cpp
@@ -7143,7 +7143,7 @@ SCEVAddRecExpr::delinearize(ScalarEvolution &SE,
const SCEV *Start = this->getStart();
const SCEV *Step = this->getStepRecurrence(SE);
- // Build the SCEV representation of the cannonical induction variable in the
+ // Build the SCEV representation of the canonical induction variable in the
// loop of this SCEV.
const SCEV *Zero = SE.getConstant(this->getType(), 0);
const SCEV *One = SE.getConstant(this->getType(), 1);
@@ -7189,13 +7189,13 @@ SCEVAddRecExpr::delinearize(ScalarEvolution &SE,
else
Rem = Quotient;
- // Scale up the cannonical induction variable IV by whatever remains from the
+ // Scale up the canonical induction variable IV by whatever remains from the
// Step after division by the GCD: the GCD is the size of all the sub-array.
if (Step != GCD) {
Step = SCEVDivision::divide(SE, Step, GCD);
IV = SE.getMulExpr(IV, Step);
}
- // The access function in the current subscript is computed as the cannonical
+ // The access function in the current subscript is computed as the canonical
// induction variable IV (potentially scaled up by the step) and offset by
// Rem, the offset of delinearization in the sub-array.
const SCEV *Index = SE.getAddExpr(IV, Rem);
@@ -7652,7 +7652,7 @@ void ScalarEvolution::forgetMemoizedResults(const SCEV *S) {
typedef DenseMap<const Loop *, std::string> VerifyMap;
-/// replaceSubString - Replaces all occurences of From in Str with To.
+/// replaceSubString - Replaces all occurrences of From in Str with To.
static void replaceSubString(std::string &Str, StringRef From, StringRef To) {
size_t Pos = 0;
while ((Pos = Str.find(From, Pos)) != std::string::npos) {
diff --git a/lib/CodeGen/CriticalAntiDepBreaker.cpp b/lib/CodeGen/CriticalAntiDepBreaker.cpp
index 18c8e0ae12..463eb86769 100644
--- a/lib/CodeGen/CriticalAntiDepBreaker.cpp
+++ b/lib/CodeGen/CriticalAntiDepBreaker.cpp
@@ -595,7 +595,7 @@ BreakAntiDependencies(const std::vector<SUnit>& SUnits,
if (RC == reinterpret_cast<TargetRegisterClass *>(-1))
AntiDepReg = 0;
- // Look for a suitable register to use to break the anti-depenence.
+ // Look for a suitable register to use to break the anti-dependence.
//
// TODO: Instead of picking the first free register, consider which might
// be the best.
diff --git a/lib/CodeGen/MachineScheduler.cpp b/lib/CodeGen/MachineScheduler.cpp
index 2de3d20e59..beb724342e 100644
--- a/lib/CodeGen/MachineScheduler.cpp
+++ b/lib/CodeGen/MachineScheduler.cpp
@@ -1976,7 +1976,7 @@ void SchedBoundary::bumpNode(SUnit *SU) {
}
else {
// After updating ZoneCritResIdx and ExpectedLatency, check if we're
- // resource limited. If a stall occured, bumpCycle does this.
+ // resource limited. If a stall occurred, bumpCycle does this.
unsigned LFactor = SchedModel->getLatencyFactor();
IsResourceLimited =
(int)(getCriticalCount() - (getScheduledLatency() * LFactor))
diff --git a/lib/CodeGen/ScheduleDAG.cpp b/lib/CodeGen/ScheduleDAG.cpp
index 75e3790735..bd4c0e209d 100644
--- a/lib/CodeGen/ScheduleDAG.cpp
+++ b/lib/CodeGen/ScheduleDAG.cpp
@@ -63,7 +63,7 @@ const MCInstrDesc *ScheduleDAG::getNodeDesc(const SDNode *Node) const {
/// not already. It also adds the current node as a successor of the
/// specified node.
bool SUnit::addPred(const SDep &D, bool Required) {
- // If this node already has this depenence, don't add a redundant one.
+ // If this node already has this dependence, don't add a redundant one.
for (SmallVectorImpl<SDep>::iterator I = Preds.begin(), E = Preds.end();
I != E; ++I) {
// Zero-latency weak edges may be added purely for heuristic ordering. Don't
diff --git a/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index dd67b45e72..2dafcb9fa9 100644
--- a/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -6740,7 +6740,7 @@ SDValue DAGCombiner::visitSINT_TO_FP(SDNode *N) {
return DAG.getNode(ISD::UINT_TO_FP, SDLoc(N), VT, N0);
}
- // The next optimizations are desireable only if SELECT_CC can be lowered.
+ // The next optimizations are desirable only if SELECT_CC can be lowered.
// Check against MVT::Other for SELECT_CC, which is a workaround for targets
// having to say they don't support SELECT_CC on every type the DAG knows
// about, since there is no way to mark an opcode illegal at all value types
@@ -6797,7 +6797,7 @@ SDValue DAGCombiner::visitUINT_TO_FP(SDNode *N) {
return DAG.getNode(ISD::SINT_TO_FP, SDLoc(N), VT, N0);
}
- // The next optimizations are desireable only if SELECT_CC can be lowered.
+ // The next optimizations are desirable only if SELECT_CC can be lowered.
// Check against MVT::Other for SELECT_CC, which is a workaround for targets
// having to say they don't support SELECT_CC on every type the DAG knows
// about, since there is no way to mark an opcode illegal at all value types
@@ -8265,7 +8265,7 @@ bool DAGCombiner::SliceUpLoad(SDNode *N) {
// The width of the type must be a power of 2 and greater than 8-bits.
// Otherwise the load cannot be represented in LLVM IR.
// Moreover, if we shifted with a non-8-bits multiple, the slice
- // will be accross several bytes. We do not support that.
+ // will be across several bytes. We do not support that.
unsigned Width = User->getValueSizeInBits(0);
if (Width < 8 || !isPowerOf2_32(Width) || (Shift & 0x7))
return 0;
diff --git a/lib/CodeGen/StackMaps.cpp b/lib/CodeGen/StackMaps.cpp
index d70e6b3044..79c1e7dce5 100644
--- a/lib/CodeGen/StackMaps.cpp
+++ b/lib/CodeGen/StackMaps.cpp
@@ -41,7 +41,7 @@ PatchPointOpers::PatchPointOpers(const MachineInstr *MI)
++CheckStartIdx;
assert(getMetaIdx() == CheckStartIdx &&
- "Unexpected additonal definition in Patchpoint intrinsic.");
+ "Unexpected additional definition in Patchpoint intrinsic.");
#endif
}
diff --git a/lib/ExecutionEngine/IntelJITEvents/jitprofiling.h b/lib/ExecutionEngine/IntelJITEvents/jitprofiling.h
index f08e2870dc..8d16ee85d1 100644
--- a/lib/ExecutionEngine/IntelJITEvents/jitprofiling.h
+++ b/lib/ExecutionEngine/IntelJITEvents/jitprofiling.h
@@ -164,10 +164,10 @@ typedef struct _iJIT_Method_NIDS
typedef struct _LineNumberInfo
{
- /* x86 Offset from the begining of the method*/
- unsigned int Offset;
-
- /* source line number from the begining of the source file */
+ /* x86 Offset from the beginning of the method*/
+ unsigned int Offset;
+
+ /* source line number from the beginning of the source file */
unsigned int LineNumber;
} *pLineNumberInfo, LineNumberInfo;
@@ -191,9 +191,9 @@ typedef struct _iJIT_Method_Load
unsigned int method_size;
/* Line Table size in number of entries - Zero if none */
- unsigned int line_number_size;
-
- /* Pointer to the begining of the line numbers info array */
+ unsigned int line_number_size;
+
+ /* Pointer to the beginning of the line numbers info array */
pLineNumberInfo line_number_table;
/* unique class ID */
diff --git a/lib/ExecutionEngine/Interpreter/Execution.cpp b/lib/ExecutionEngine/Interpreter/Execution.cpp
index 5de065903d..8a101dfa27 100644
--- a/lib/ExecutionEngine/Interpreter/Execution.cpp
+++ b/lib/ExecutionEngine/Interpreter/Execution.cpp
@@ -1120,7 +1120,7 @@ void Interpreter::visitCallSite(CallSite CS) {
callFunction((Function*)GVTOP(SRC), ArgVals);
}
-// auxilary function for shift operations
+// auxiliary function for shift operations
static unsigned getShiftAmount(uint64_t orgShiftAmount,
llvm::APInt valueToShift) {
unsigned valueWidth = valueToShift.getBitWidth();
diff --git a/lib/ExecutionEngine/MCJIT/SectionMemoryManager.cpp b/lib/ExecutionEngine/MCJIT/SectionMemoryManager.cpp
index cf90e77e38..f1dd5a6a51 100644
--- a/lib/ExecutionEngine/MCJIT/SectionMemoryManager.cpp
+++ b/lib/ExecutionEngine/MCJIT/SectionMemoryManager.cpp
@@ -78,7 +78,7 @@ uint8_t *SectionMemoryManager::allocateSection(MemoryGroup &MemGroup,
sys::Memory::MF_WRITE,
ec);
if (ec) {
- // FIXME: Add error propogation to the interface.
+ // FIXME: Add error propagation to the interface.
return NULL;
}
diff --git a/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.h b/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.h
index 181964faa9..2ed2957d96 100644
--- a/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.h
+++ b/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.h
@@ -116,7 +116,7 @@ class RuntimeDyldELF : public RuntimeDyldImpl {
virtual void updateGOTEntries(StringRef Name, uint64_t Addr);
- // Relocation entries for symbols whose position-independant offset is
+ // Relocation entries for symbols whose position-independent offset is
// updated in a global offset table.
typedef SmallVector<RelocationValueRef, 2> GOTRelocations;
GOTRelocations GOTEntries; // List of entries requiring finalization.
diff --git a/lib/MC/MCParser/AsmParser.cpp b/lib/MC/MCParser/AsmParser.cpp
index 035ecaacc8..dba543b683 100644
--- a/lib/MC/MCParser/AsmParser.cpp
+++ b/lib/MC/MCParser/AsmParser.cpp
@@ -1341,7 +1341,7 @@ bool AsmParser::parseStatement(ParseStatementInfo &Info) {
if (!getTargetParser().ParseDirective(ID))
return false;
- // Next, check the extention directive map to see if any extension has
+ // Next, check the extension directive map to see if any extension has
// registered itself to parse this directive.
std::pair<MCAsmParserExtension *, DirectiveHandler> Handler =
ExtensionDirectiveMap.lookup(IDVal);
@@ -3164,13 +3164,13 @@ bool AsmParser::parseDirectiveMacro(SMLoc DirectiveLoc) {
///
/// With the support added for named parameters there may be code out there that
/// is transitioning from positional parameters. In versions of gas that did
-/// not support named parameters they would be ignored on the macro defintion.
+/// not support named parameters they would be ignored on the macro definition.
/// But to support both styles of parameters this is not possible so if a macro
-/// defintion has named parameters but does not use them and has what appears
+/// definition has named parameters but does not use them and has what appears
/// to be positional parameters, strings like $1, $2, ... and $n, then issue a
/// warning that the positional parameter found in body which have no effect.
/// Hoping the developer will either remove the named parameters from the macro
-/// definiton so the positional parameters get used if that was what was
+/// definition so the positional parameters get used if that was what was
/// intended or change the macro to use the named parameters. It is possible
/// this warning will trigger when the none of the named parameters are used
/// and the strings like $1 are infact to simply to be passed trough unchanged.
diff --git a/lib/Support/APFloat.cpp b/lib/Support/APFloat.cpp
index 802233c109..deb9b05206 100644
--- a/lib/Support/APFloat.cpp
+++ b/lib/Support/APFloat.cpp
@@ -3776,7 +3776,7 @@ APFloat::opStatus APFloat::next(bool nextDown) {
// change the payload.
if (isSignaling()) {
result = opInvalidOp;
- // For consistency, propogate the sign of the sNaN to the qNaN.
+ // For consistency, propagate the sign of the sNaN to the qNaN.
makeNaN(false, isNegative(), 0);
}
break;
diff --git a/lib/Support/APInt.cpp b/lib/Support/APInt.cpp
index 89f96bd577..0c4672582b 100644
--- a/lib/Support/APInt.cpp
+++ b/lib/Support/APInt.cpp
@@ -1096,7 +1096,7 @@ APInt APInt::ashr(unsigned shiftAmt) const {
// to include in this word.
val[breakWord] = pVal[breakWord+offset] >> wordShift;
- // Deal with sign extenstion in the break word, and possibly the word before
+ // Deal with sign extension in the break word, and possibly the word before
// it.
if (isNegative()) {
if (wordShift > bitsInWord) {
diff --git a/lib/Support/CommandLine.cpp b/lib/Support/CommandLine.cpp
index 7ed4dead04..1b4d2c7287 100644
--- a/lib/Support/CommandLine.cpp
+++ b/lib/Support/CommandLine.cpp
@@ -246,12 +246,11 @@ static Option *LookupNearestOption(StringRef Arg,
return Best;
}
-/// CommaSeparateAndAddOccurence - A wrapper around Handler->addOccurence() that
-/// does special handling of cl::CommaSeparated options.
-static bool CommaSeparateAndAddOccurence(Option *Handler, unsigned pos,
- StringRef ArgName,
- StringRef Value, bool MultiArg = false)
-{
+/// CommaSeparateAndAddOccurrence - A wrapper around Handler->addOccurrence()
+/// that does special handling of cl::CommaSeparated options.
+static bool CommaSeparateAndAddOccurrence(Option *Handler, unsigned pos,
+ StringRef ArgName, StringRef Value,
+ bool MultiArg = false) {
// Check to see if this option accepts a comma separated list of values. If
// it does, we have to split up the value into multiple values.
if (Handler->getMiscFlags() & CommaSeparated) {
@@ -312,13 +311,13 @@ static inline bool ProvideOption(Option *Handler, StringRef ArgName,
// If this isn't a multi-arg option, just run the handler.
if (NumAdditionalVals == 0)
- return CommaSeparateAndAddOccurence(Handler, i, ArgName, Value);
+ return CommaSeparateAndAddOccurrence(Handler, i, ArgName, Value);
// If it is, run the handle several times.
bool MultiArg = false;
if (Value.data()) {
- if (CommaSeparateAndAddOccurence(Handler, i, ArgName, Value, MultiArg))
+ if (CommaSeparateAndAddOccurrence(Handler, i, ArgName, Value, MultiArg))
return true;
--NumAdditionalVals;
MultiArg = true;
@@ -329,7 +328,7 @@ static inline bool ProvideOption(Option *Handler, StringRef ArgName,
return Handler->error("not enough values!");
Value = argv[++i];
- if (CommaSeparateAndAddOccurence(Handler, i, ArgName, Value, MultiArg))
+ if (CommaSeparateAndAddOccurrence(Handler, i, ArgName, Value, MultiArg))
return true;
MultiArg = true;
--NumAdditionalVals;
@@ -1502,7 +1501,7 @@ protected:
std::vector<OptionCategory *> SortedCategories;
std::map<OptionCategory *, std::vector<Option *> > CategorizedOptions;
- // Collect registered option categories into vector in preperation for
+ // Collect registered option categories into vector in preparation for
// sorting.
for (OptionCatSet::const_iterator I = RegisteredOptionCategories->begin(),
E = RegisteredOptionCategories->end();
diff --git a/lib/Support/Path.cpp b/lib/Support/Path.cpp
index b97b7a4baf..c2b3f1863e 100644
--- a/lib/Support/Path.cpp
+++ b/lib/Support/Path.cpp
@@ -32,10 +32,10 @@ namespace {
#ifdef LLVM_ON_WIN32
const char *separators = "\\/";
- const char prefered_separator = '\\';
+ const char preferred_separator = '\\';
#else
const char separators = '/';
- const char prefered_separator = '/';
+ const char preferred_separator = '/';
#endif
StringRef find_first_component(StringRef path) {
@@ -403,7 +403,7 @@ void append(SmallVectorImpl<char> &path, const Twine &a,
if (!component_has_sep && !(path.empty() || is_root_name)) {
// Add a separator.
- path.push_back(prefered_separator);
+ path.push_back(preferred_separator);
}
path.append(i->begin(), i->end());
diff --git a/lib/Support/regcomp.c b/lib/Support/regcomp.c
index 74d9186aaa..0b5b765f89 100644
--- a/lib/Support/regcomp.c
+++ b/lib/Support/regcomp.c
@@ -532,10 +532,10 @@ p_simp_re(struct parse *p,
sopno subno;
# define BACKSL (1<<CHAR_BIT)
- pos = HERE(); /* repetion op, if any, covers from here */
+ pos = HERE(); /* repetition op, if any, covers from here */
- assert(MORE()); /* caller should have ensured this */
- c = GETNEXT();
+ assert(MORE()); /* caller should have ensured this */
+ c = GETNEXT();
if (c == '\\') {
REQUIRE(MORE(), REG_EESCAPE);
c = BACKSL | GETNEXT();
diff --git a/lib/Target/AArch64/AArch64InstrInfo.cpp b/lib/Target/AArch64/AArch64InstrInfo.cpp
index 9bf9d1918c..ba185a436f 100644
--- a/lib/Target/AArch64/AArch64InstrInfo.cpp
+++ b/lib/Target/AArch64/AArch64InstrInfo.cpp
@@ -611,7 +611,8 @@ void AArch64InstrInfo::getAddressConstraints(const MachineInstr &MI,
int &AccessScale, int &MinOffset,
int &MaxOffset) const {
switch (MI.getOpcode()) {
- default: llvm_unreachable("Unkown load/store kind");
+ default:
+ llvm_unreachable("Unknown load/store kind");
case TargetOpcode::DBG_VALUE:
AccessScale = 1;
MinOffset = INT_MIN;
diff --git a/lib/Target/AArch64/AArch64InstrNEON.td b/lib/Target/AArch64/AArch64InstrNEON.td
index c673b3adc9..3c446d5860 100644
--- a/lib/Target/AArch64/AArch64InstrNEON.td
+++ b/lib/Target/AArch64/AArch64InstrNEON.td
@@ -6432,7 +6432,7 @@ defm TBL2 : NI_TBL_pat<0b01, 0b0, "tbl", "VPair">;
defm TBL3 : NI_TBL_pat<0b10, 0b0, "tbl", "VTriple">;
defm TBL4 : NI_TBL_pat<0b11, 0b0, "tbl", "VQuad">;
-// Table lookup extention
+// Table lookup extension
class NI_TBX<bit q, bits<2> op2, bits<2> len, bit op,
string asmop, string OpS, RegisterOperand OpVPR,
RegisterOperand VecList>
diff --git a/lib/Target/AArch64/Disassembler/AArch64Disassembler.cpp b/lib/Target/AArch64/Disassembler/AArch64Disassembler.cpp
index ff5b23013d..a88cbb2971 100644
--- a/lib/Target/AArch64/Disassembler/AArch64Disassembler.cpp
+++ b/lib/Target/AArch64/Disassembler/AArch64Disassembler.cpp
@@ -1517,7 +1517,7 @@ static DecodeStatus DecodeVLDSTLanePostInstruction(MCInst &Inst, unsigned Insn,
unsigned Q = fieldFromInstruction(Insn, 30, 1);
unsigned S = fieldFromInstruction(Insn, 10, 3);
unsigned lane = 0;
- // Calculate the number of lanes by number of vectors and transfered bytes.
+ // Calculate the number of lanes by number of vectors and transferred bytes.
// NumLanes = 16 bytes / bytes of each lane
unsigned NumLanes = 16 / (TransferBytes / NumVecs);
switch (NumLanes) {
diff --git a/lib/Target/ARM/ARMISelDAGToDAG.cpp b/lib/Target/ARM/ARMISelDAGToDAG.cpp
index d561db2f07..9e827cf103 100644
--- a/lib/Target/ARM/ARMISelDAGToDAG.cpp
+++ b/lib/Target/ARM/ARMISelDAGToDAG.cpp
@@ -1407,7 +1407,7 @@ bool ARMDAGToDAGISel::SelectT2AddrModeSoReg(SDValue N,
bool ARMDAGToDAGISel::SelectT2AddrModeExclusive(SDValue N, SDValue &Base,
SDValue &OffImm) {
- // This *must* succeed since it's used for the irreplacable ldrex and strex
+ // This *must* succeed since it's used for the irreplaceable ldrex and strex
// instructions.
Base = N;
OffImm = CurDAG->getTargetConstant(0, MVT::i32);
diff --git a/lib/Target/ARM/ARMISelLowering.cpp b/lib/Target/ARM/ARMISelLowering.cpp
index 1d2236f2c9..b851a8ffd8 100644
--- a/lib/Target/ARM/ARMISelLowering.cpp
+++ b/lib/Target/ARM/ARMISelLowering.cpp
@@ -5987,7 +5987,7 @@ static SDValue LowerAtomicLoadStore(SDValue Op, SelectionDAG &DAG) {
if (cast<AtomicSDNode>(Op)->getOrdering() <= Monotonic)
return Op;
- // Aquire/Release load/store is not legal for targets without a
+ // Acquire/Release load/store is not legal for targets without a
// dmb or equivalent available.
return SDValue();
}
@@ -10189,7 +10189,7 @@ bool ARMTargetLowering::allowsUnalignedMemoryAccesses(EVT VT, bool *Fast) const
case MVT::v2f64: {
// For any little-endian targets with neon, we can support unaligned ld/st
// of D and Q (e.g. {D0,D1}) registers by using vld1.i8/vst1.i8.
- // A big-endian target may also explictly support unaligned accesses
+ // A big-endian target may also explicitly support unaligned accesses
if (Subtarget->hasNEON() && (AllowsUnaligned || isLittleEndian())) {
if (Fast)
*Fast = true;
diff --git a/lib/Target/ARM/ARMRegisterInfo.td b/lib/Target/ARM/ARMRegisterInfo.td
index d0457618ef..7f0fe05738 100644
--- a/lib/Target/ARM/ARMRegisterInfo.td
+++ b/lib/Target/ARM/ARMRegisterInfo.td
@@ -214,7 +214,7 @@ def GPRnopc : RegisterClass<"ARM", [i32], 32, (sub GPR, PC)> {
}
// GPRs without the PC but with APSR. Some instructions allow accessing the
-// APSR, while actually encoding PC in the register field. This is usefull
+// APSR, while actually encoding PC in the register field. This is useful
// for assembly and disassembly only.
def GPRwithAPSR : RegisterClass<"ARM", [i32], 32, (add (sub GPR, PC), APSR_NZCV)> {
let AltOrders = [(add LR, GPRnopc), (trunc GPRnopc, 8)];
diff --git a/lib/Target/ARM/ARMScheduleSwift.td b/lib/Target/ARM/ARMScheduleSwift.td
index 8d7dbc2460..b03d5ff44c 100644
--- a/lib/Target/ARM/ARMScheduleSwift.td
+++ b/lib/Target/ARM/ARMScheduleSwift.td
@@ -1721,7 +1721,7 @@ let SchedModel = SwiftModel in {
SchedVar<SwiftLMAddr3Pred, [SwiftWriteLM9Cy, SwiftWriteLM10Cy,
SwiftWriteLM13CyNo, SwiftWriteP01OneCycle,
SwiftVLDMPerm3]>,
- // Load of a Q register (not neccessarily true). We should not be mapping to
+ // Load of a Q register (not necessarily true). We should not be mapping to
// 4 S registers, either.
SchedVar<SwiftLMAddr4Pred, [SwiftWriteLM4Cy, SwiftWriteLM4CyNo,
SwiftWriteLM4CyNo, SwiftWriteLM4CyNo]>,
@@ -1858,7 +1858,7 @@ let SchedModel = SwiftModel in {
// Assume 5 D registers.
SchedVar<SwiftLMAddr10Pred, [SwiftWriteSTM6]>,
SchedVar<SwiftLMAddr11Pred, [SwiftWriteSTM12]>,
- // Asume three Q registers.
+ // Assume three Q registers.
SchedVar<SwiftLMAddr12Pred, [SwiftWriteSTM4]>,
SchedVar<SwiftLMAddr13Pred, [SwiftWriteSTM14]>,
// Assume 7 D registers.
diff --git a/lib/Target/ARM/ARMTargetTransformInfo.cpp b/lib/Target/ARM/ARMTargetTransformInfo.cpp
index 2f2da73162..5d0b73a191 100644
--- a/lib/Target/ARM/ARMTargetTransformInfo.cpp
+++ b/lib/Target/ARM/ARMTargetTransformInfo.cpp
@@ -533,7 +533,7 @@ unsigned ARMTTI::getArithmeticInstrCost(unsigned Opcode, Type *Ty, OperandValueK
// creates a sequence of shift, and, or instructions to construct values.
// These sequences are recognized by the ISel and have zero-cost. Not so for
// the vectorized code. Because we have support for v2i64 but not i64 those
- // sequences look particularily beneficial to vectorize.
+ // sequences look particularly beneficial to vectorize.
// To work around this we increase the cost of v2i64 operations to make them
// seem less beneficial.
if (LT.second == MVT::v2i64 &&
diff --git a/lib/Target/ARM/MCTargetDesc/ARMBaseInfo.h b/lib/Target/ARM/MCTargetDesc/ARMBaseInfo.h
index bb781ecece..42a1cbb8c2 100644
--- a/lib/Target/ARM/MCTargetDesc/ARMBaseInfo.h
+++ b/lib/Target/ARM/MCTargetDesc/ARMBaseInfo.h
@@ -183,7 +183,8 @@ namespace ARM_ISB {
inline static const char *InstSyncBOptToString(unsigned val) {
switch (val) {
- default: llvm_unreachable("Unkown memory operation");
+ default:
+ llvm_unreachable("Unknown memory operation");
case RESERVED_0: return "#0x0";
case RESERVED_1: return "#0x1";
case RESERVED_2: return "#0x2";
diff --git a/lib/Target/ARM/MCTargetDesc/ARMELFStreamer.cpp b/lib/Target/ARM/MCTargetDesc/ARMELFStreamer.cpp
index 8e224780d8..abacc1e312 100644
--- a/lib/Target/ARM/MCTargetDesc/ARMELFStreamer.cpp
+++ b/lib/Target/ARM/MCTargetDesc/ARMELFStreamer.cpp
@@ -1035,7 +1035,7 @@ void ARMELFStreamer::emitFnStart() {
}
void ARMELFStreamer::emitFnEnd() {
- assert(FnStart && ".fnstart must preceeds .fnend");
+ assert(FnStart && ".fnstart must precedes .fnend");
// Emit unwind opcodes if there is no .handlerdata directive
if (!ExTab && !CantUnwind)
diff --git a/lib/Target/Hexagon/HexagonCopyToCombine.cpp b/lib/Target/Hexagon/HexagonCopyToCombine.cpp
index 97e7b85f55..3ab796d01c 100644
--- a/lib/Target/Hexagon/HexagonCopyToCombine.cpp
+++ b/lib/Target/Hexagon/HexagonCopyToCombine.cpp
@@ -285,7 +285,7 @@ bool HexagonCopyToCombine::isSafeToMoveTogether(MachineInstr *I1,
// Update the intermediate instruction to with the kill flag.
if (KillingInstr) {
bool Added = KillingInstr->addRegisterKilled(KilledOperand, TRI, true);
- (void)Added; // supress compiler warning
+ (void)Added; // suppress compiler warning
assert(Added && "Must successfully update kill flag");
removeKillInfo(I2, KilledOperand);
}
@@ -343,7 +343,7 @@ bool HexagonCopyToCombine::isSafeToMoveTogether(MachineInstr *I1,
// Update I1 to set the kill flag. This flag will later be picked up by
// the new COMBINE instruction.
bool Added = I1->addRegisterKilled(KilledOperand, TRI);
- (void)Added; // supress compiler warning
+ (void)Added; // suppress compiler warning
assert(Added && "Must successfully update kill flag");
}
DoInsertAtI1 = false;
diff --git a/lib/Target/Hexagon/HexagonHardwareLoops.cpp b/lib/Target/Hexagon/HexagonHardwareLoops.cpp
index 137c4bf054..fccbcb3d70 100644
--- a/lib/Target/Hexagon/HexagonHardwareLoops.cpp
+++ b/lib/Target/Hexagon/HexagonHardwareLoops.cpp
@@ -1522,7 +1522,7 @@ MachineBasicBlock *HexagonHardwareLoops::createPreheaderForLoop(
if (PB != Latch) {
Tmp2.clear();
bool NotAnalyzed = TII->AnalyzeBranch(*PB, TB, FB, Tmp2, false);
- (void)NotAnalyzed; // supress compiler warning
+ (void)NotAnalyzed; // suppress compiler warning
assert (!NotAnalyzed && "Should be analyzable!");
if (TB != Header && (Tmp2.empty() || FB != Header))
TII->InsertBranch(*PB, NewPH, 0, EmptyCond, DL);
@@ -1534,7 +1534,7 @@ MachineBasicBlock *HexagonHardwareLoops::createPreheaderForLoop(
// Insert an unconditional branch to the header.
TB = FB = 0;
bool LatchNotAnalyzed = TII->AnalyzeBranch(*Latch, TB, FB, Tmp2, false);
- (void)LatchNotAnalyzed; // supress compiler warning
+ (void)LatchNotAnalyzed; // suppress compiler warning
assert (!LatchNotAnalyzed && "Should be analyzable!");
if (!TB && !FB)
TII->InsertBranch(*Latch, Header, 0, EmptyCond, DL);
diff --git a/lib/Target/Hexagon/HexagonInstrInfo.cpp b/lib/Target/Hexagon/HexagonInstrInfo.cpp
index f9be3192f1..fff51dda67 100644
--- a/lib/Target/Hexagon/HexagonInstrInfo.cpp
+++ b/lib/Target/Hexagon/HexagonInstrInfo.cpp
@@ -1793,7 +1793,7 @@ bool HexagonInstrInfo::NonExtEquivalentExists (const MachineInstr *MI) const {
return true;
if (MI->getDesc().mayLoad() || MI->getDesc().mayStore()) {
- // Check addressing mode and retreive non-ext equivalent instruction.
+ // Check addressing mode and retrieve non-ext equivalent instruction.
switch (getAddrMode(MI)) {
case HexagonII::Absolute :
@@ -1827,7 +1827,7 @@ short HexagonInstrInfo::getNonExtOpcode (const MachineInstr *MI) const {
return NonExtOpcode;
if (MI->getDesc().mayLoad() || MI->getDesc().mayStore()) {
- // Check addressing mode and retreive non-ext equivalent instruction.
+ // Check addressing mode and retrieve non-ext equivalent instruction.
switch (getAddrMode(MI)) {
case HexagonII::Absolute :
return Hexagon::getBasedWithImmOffset(MI->getOpcode());
diff --git a/lib/Target/Mips/AsmParser/MipsAsmParser.cpp b/lib/Target/Mips/AsmParser/MipsAsmParser.cpp
index 594ff4f441..aae2dcd16e 100644
--- a/lib/Target/Mips/AsmParser/MipsAsmParser.cpp
+++ b/lib/Target/Mips/AsmParser/MipsAsmParser.cpp
@@ -869,7 +869,7 @@ void MipsAsmParser::expandMemInst(MCInst &Inst, SMLoc IDLoc,
TempInst.addOperand(MCOperand::CreateReg(BaseRegNum));
Instructions.push_back(TempInst);
TempInst.clear();
- // And finaly, create original instruction with low part
+ // And finally, create original instruction with low part
// of offset and new base.
TempInst.setOpcode(Inst.getOpcode());
TempInst.addOperand(MCOperand::CreateReg(RegOpNum));
@@ -1247,7 +1247,7 @@ MipsAsmParser::ParseOperand(SmallVectorImpl<MCParsedAsmOperand *> &Operands,
return false;
}
// Look for the existing symbol, we should check if
- // we need to assigne the propper RegisterKind.
+ // we need to assigne the proper RegisterKind.
if (searchSymbolAlias(Operands, MipsOperand::Kind_None))
return false;
// Else drop to expression parsing.
diff --git a/lib/Target/Mips/MipsMSAInstrInfo.td b/lib/Target/Mips/MipsMSAInstrInfo.td
index fbcd10fe2b..a788d60a57 100644
--- a/lib/Target/Mips/MipsMSAInstrInfo.td
+++ b/lib/Target/Mips/MipsMSAInstrInfo.td
@@ -3519,7 +3519,7 @@ class MSABitconvertPat<ValueType DstVT, ValueType SrcVT,
MSAPat<(DstVT (bitconvert SrcVT:$src)),
(COPY_TO_REGCLASS SrcVT:$src, DstRC), preds>;
-// These are endian-independant because the element size doesnt change
+// These are endian-independent because the element size doesnt change
def : MSABitconvertPat<v8i16, v8f16, MSA128H>;
def : MSABitconvertPat<v4i32, v4f32, MSA128W>;
def : MSABitconvertPat<v2i64, v2f64, MSA128D>;
diff --git a/lib/Target/NVPTX/NVPTXISelLowering.cpp b/lib/Target/NVPTX/NVPTXISelLowering.cpp
index d8151761e0..8c27c9f409 100644
--- a/lib/Target/NVPTX/NVPTXISelLowering.cpp
+++ b/lib/Target/NVPTX/NVPTXISelLowering.cpp
@@ -1258,7 +1258,7 @@ NVPTXTargetLowering::LowerSTOREVector(SDValue Op, SelectionDAG &DAG) const {
// Since StoreV2 is a target node, we cannot rely on DAG type legalization.
// Therefore, we must ensure the type is legal. For i1 and i8, we set the
- // stored type to i16 and propogate the "real" type as the memory type.
+ // stored type to i16 and propagate the "real" type as the memory type.
bool NeedExt = false;
if (EltVT.getSizeInBits() < 16)
NeedExt = true;
@@ -2074,7 +2074,7 @@ static void ReplaceLoadVector(SDNode *N, SelectionDAG &DAG,
// Since LoadV2 is a target node, we cannot rely on DAG type legalization.
// Therefore, we must ensure the type is legal. For i1 and i8, we set the
- // loaded type to i16 and propogate the "real" type as the memory type.
+ // loaded type to i16 and propagate the "real" type as the memory type.
bool NeedTrunc = false;
if (EltVT.getSizeInBits() < 16) {
EltVT = MVT::i16;
@@ -2161,7 +2161,7 @@ static void ReplaceINTRINSIC_W_CHAIN(SDNode *N, SelectionDAG &DAG,
// Since LDU/LDG are target nodes, we cannot rely on DAG type
// legalization.
// Therefore, we must ensure the type is legal. For i1 and i8, we set the
- // loaded type to i16 and propogate the "real" type as the memory type.
+ // loaded type to i16 and propagate the "real" type as the memory type.
bool NeedTrunc = false;
if (EltVT.getSizeInBits() < 16) {
EltVT = MVT::i16;
diff --git a/lib/Target/NVPTX/NVVMReflect.cpp b/lib/Target/NVPTX/NVVMReflect.cpp
index bc67cb14ff..5da8c2ed09 100644
--- a/lib/Target/NVPTX/NVVMReflect.cpp
+++ b/lib/Target/NVPTX/NVVMReflect.cpp
@@ -7,7 +7,7 @@
//
//===----------------------------------------------------------------------===//
//
-// This pass replaces occurences of __nvvm_reflect("string") with an
+// This pass replaces occurrences of __nvvm_reflect("string") with an
// integer based on -nvvm-reflect-list string=<int> option given to this pass.
// If an undefined string value is seen in a call to __nvvm_reflect("string"),
// a default value of 0 will be used.
@@ -84,7 +84,7 @@ NVVMReflectEnabled("nvvm-reflect-enable", cl::init(true), cl::Hidden,
char NVVMReflect::ID = 0;
INITIALIZE_PASS(NVVMReflect, "nvvm-reflect",
- "Replace occurences of __nvvm_reflect() calls with 0/1", false,
+ "Replace occurrences of __nvvm_reflect() calls with 0/1", false,
false)
static cl::list<std::string>
diff --git a/lib/Target/PowerPC/PPCISelLowering.cpp b/lib/Target/PowerPC/PPCISelLowering.cpp
index c5d9612771..1c8f928887 100644
--- a/lib/Target/PowerPC/PPCISelLowering.cpp
+++ b/lib/Target/PowerPC/PPCISelLowering.cpp
@@ -7205,7 +7205,7 @@ SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N,
// you might suspect (sizeof(vector) bytes after the last requested
// load), but rather sizeof(vector) - 1 bytes after the last
// requested vector. The point of this is to avoid a page fault if the
- // base address happend to be aligned. This works because if the base
+ // base address happened to be aligned. This works because if the base
// address is aligned, then adding less than a full vector length will
// cause the last vector in the sequence to be (re)loaded. Otherwise,
// the next vector will be fetched as you might suspect was necessary.
diff --git a/lib/Target/R600/AMDGPU.h b/lib/Target/R600/AMDGPU.h
index 8eb1b695d7..3e1848b5f8 100644
--- a/lib/Target/R600/AMDGPU.h
+++ b/lib/Target/R600/AMDGPU.h
@@ -68,7 +68,7 @@ namespace ShaderType {
/// various memory regions on the hardware. On the CPU
/// all of the address spaces point to the same memory,
/// however on the GPU, each address space points to
-/// a seperate piece of memory that is unique from other
+/// a separate piece of memory that is unique from other
/// memory locations.
namespace AMDGPUAS {
enum AddressSpaces {
diff --git a/lib/Target/R600/AMDILCFGStructurizer.cpp b/lib/Target/R600/AMDILCFGStructurizer.cpp
index 4ad7eba36e..69ced3c8f6 100644
--- a/lib/Target/R600/AMDILCFGStructurizer.cpp
+++ b/lib/Target/R600/AMDILCFGStructurizer.cpp
@@ -224,7 +224,7 @@ protected:
/// Compute the reversed DFS post order of Blocks
void orderBlocks(MachineFunction *MF);
- // Function originaly from CFGStructTraits
+ // Function originally from CFGStructTraits
void insertInstrEnd(MachineBasicBlock *MBB, int NewOpcode,
DebugLoc DL = DebugLoc());
MachineInstr *insertInstrBefore(MachineBasicBlock *MBB, int NewOpcode,
diff --git a/lib/Target/R600/MCTargetDesc/SIMCCodeEmitter.cpp b/lib/Target/R600/MCTargetDesc/SIMCCodeEmitter.cpp
index 5af83209a0..fc4ed35c18 100644
--- a/lib/Target/R600/MCTargetDesc/SIMCCodeEmitter.cpp
+++ b/lib/Target/R600/MCTargetDesc/SIMCCodeEmitter.cpp
@@ -53,7 +53,7 @@ public:
~SIMCCodeEmitter() { }
- /// \breif Encode the instruction and write it to the OS.
+ /// \brief Encode the instruction and write it to the OS.
virtual void EncodeInstruction(const MCInst &MI, raw_ostream &OS,
SmallVectorImpl<MCFixup> &Fixups) const;
diff --git a/lib/Target/R600/R600ClauseMergePass.cpp b/lib/Target/R600/R600ClauseMergePass.cpp
index 33d2ca3257..3d9015c9df 100644
--- a/lib/Target/R600/R600ClauseMergePass.cpp
+++ b/lib/Target/R600/R600ClauseMergePass.cpp
@@ -50,7 +50,7 @@ private:
/// IfCvt pass can generate "disabled" ALU clause marker that need to be
/// removed and their content affected to the previous alu clause.
- /// This function parse instructions after CFAlu untill it find a disabled
+ /// This function parse instructions after CFAlu until it find a disabled
/// CFAlu and merge the content, or an enabled CFAlu.
void cleanPotentialDisabledCFAlu(MachineInstr *CFAlu) const;
diff --git a/lib/Target/R600/R600Defines.h b/lib/Target/R600/R600Defines.h
index 1781f2aee1..f2f28fe469 100644
--- a/lib/Target/R600/R600Defines.h
+++ b/lib/Target/R600/R600Defines.h
@@ -52,7 +52,7 @@ namespace R600_InstFlag {
#define HAS_NATIVE_OPERANDS(Flags) ((Flags) & R600_InstFlag::NATIVE_OPERANDS)
-/// \brief Defines for extracting register infomation from register encoding
+/// \brief Defines for extracting register information from register encoding
#define HW_REG_MASK 0x1ff
#define HW_CHAN_SHIFT 9
diff --git a/lib/Target/R600/R600ISelLowering.cpp b/lib/Target/R600/R600ISelLowering.cpp
index 03feabe23e..b9b242a6e8 100644
--- a/lib/Target/R600/R600ISelLowering.cpp
+++ b/lib/Target/R600/R600ISelLowering.cpp
@@ -990,7 +990,7 @@ SDValue R600TargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const
DAG.getCondCode(ISD::SETNE));
}
-/// LLVM generates byte-addresed pointers. For indirect addressing, we need to
+/// LLVM generates byte-addressed pointers. For indirect addressing, we need to
/// convert these pointers to a register index. Each register holds
/// 16 bytes, (4 x 32bit sub-register), but we need to take into account the
/// \p StackWidth, which tells us how many of the 4 sub-registrers will be used
@@ -1389,8 +1389,8 @@ SDValue R600TargetLowering::LowerFormalArguments(
DAG.getConstant(36 + VA.getLocMemOffset(), MVT::i32),
MachinePointerInfo(UndefValue::get(PtrTy)),
MemVT, false, false, 4);
- // 4 is the prefered alignment for
- // the CONSTANT memory space.
+ // 4 is the preferred alignment for
+ // the CONSTANT memory space.
InVals.push_back(Arg);
}
return Chain;
diff --git a/lib/Target/R600/R600ISelLowering.h b/lib/Target/R600/R600ISelLowering.h
index c10257eead..3cca93306b 100644
--- a/lib/Target/R600/R600ISelLowering.h
+++ b/lib/Target/R600/R600ISelLowering.h
@@ -43,7 +43,7 @@ private:
unsigned Gen;
/// Each OpenCL kernel has nine implicit parameters that are stored in the
/// first nine dwords of a Vertex Buffer. These implicit parameters are
- /// lowered to load instructions which retreive the values from the Vertex
+ /// lowered to load instructions which retrieve the values from the Vertex
/// Buffer.
SDValue LowerImplicitParameter(SelectionDAG &DAG, EVT VT,
SDLoc DL, unsigned DwordOffset) const;
diff --git a/lib/Target/R600/R600InstrInfo.h b/lib/Target/R600/R600InstrInfo.h
index 13d981094e..d5ff4de764 100644
--- a/lib/Target/R600/R600InstrInfo.h
+++ b/lib/Target/R600/R600InstrInfo.h
@@ -138,7 +138,7 @@ namespace llvm {
/// Same but using const index set instead of MI set.
bool fitsConstReadLimitations(const std::vector<unsigned>&) const;
- /// \breif Vector instructions are instructions that must fill all
+ /// \brief Vector instructions are instructions that must fill all
/// instruction slots within an instruction group.
bool isVector(const MachineInstr &MI) const;
diff --git a/lib/Target/R600/R600Instructions.td b/lib/Target/R600/R600Instructions.td
index 4441fa6495..f7b7488d69 100644
--- a/lib/Target/R600/R600Instructions.td
+++ b/lib/Target/R600/R600Instructions.td
@@ -2263,7 +2263,7 @@ let Inst{63-32} = Word1;
//===--------------------------------------------------------------------===//
//===---------------------------------------------------------------------===//
// Custom Inserter for Branches and returns, this eventually will be a
-// seperate pass
+// separate pass
//===---------------------------------------------------------------------===//
let isTerminator = 1, usesCustomInserter = 1, isBranch = 1, isBarrier = 1 in {
def BRANCH : ILFormat<(outs), (ins brtarget:$target),
diff --git a/lib/Target/R600/R600Packetizer.cpp b/lib/Target/R600/R600Packetizer.cpp
index cd9b6eae6e..9dd4978fb5 100644
--- a/lib/Target/R600/R600Packetizer.cpp
+++ b/lib/Target/R600/R600Packetizer.cpp
@@ -66,7 +66,7 @@ private:
}
/// \returns register to PV chan mapping for bundle/single instructions that
- /// immediatly precedes I.
+ /// immediately precedes I.
DenseMap<unsigned, unsigned> getPreviousVector(MachineBasicBlock::iterator I)
const {
DenseMap<unsigned, unsigned> Result;
diff --git a/lib/Target/R600/SIISelLowering.cpp b/lib/Target/R600/SIISelLowering.cpp
index a66f289e9a..36dd3cf7f0 100644
--- a/lib/Target/R600/SIISelLowering.cpp
+++ b/lib/Target/R600/SIISelLowering.cpp
@@ -1083,7 +1083,7 @@ void SITargetLowering::ensureSRegLimit(SelectionDAG &DAG, SDValue &Operand,
else
return;
- // Nothing todo if they fit naturaly
+ // Nothing to do if they fit naturally
if (fitsRegClass(DAG, Operand, RegClass))
return;
diff --git a/lib/Target/R600/SIRegisterInfo.cpp b/lib/Target/R600/SIRegisterInfo.cpp
index ed0bbaffae..a784fa4264 100644
--- a/lib/Target/R600/SIRegisterInfo.cpp
+++ b/lib/Target/R600/SIRegisterInfo.cpp
@@ -122,7 +122,7 @@ const TargetRegisterClass *SIRegisterInfo::getSubRegClass(
return RC;
// If this register has a sub-register, we can safely assume it is a 32-bit
- // register, becuase all of SI's sub-registers are 32-bit.
+ // register, because all of SI's sub-registers are 32-bit.
if (isSGPRClass(RC)) {
return &AMDGPU::SGPR_32RegClass;
} else {
diff --git a/lib/Target/Sparc/MCTargetDesc/SparcBaseInfo.h b/lib/Target/Sparc/MCTargetDesc/SparcBaseInfo.h
index f3caeaa0c2..2e2d4bac79 100644
--- a/lib/Target/Sparc/MCTargetDesc/SparcBaseInfo.h
+++ b/lib/Target/Sparc/MCTargetDesc/SparcBaseInfo.h
@@ -35,7 +35,7 @@ enum TOF {
// Assembler: %hi(addr) or %lm(addr)
MO_HI,
- // Extract bits 43-22 of an adress. Only for sethi.
+ // Extract bits 43-22 of an address. Only for sethi.
// Assembler: %h44(addr)
MO_H44,
diff --git a/lib/Target/SystemZ/SystemZISelLowering.cpp b/lib/Target/SystemZ/SystemZISelLowering.cpp
index 0ca145e3a6..19f57ab63e 100644
--- a/lib/Target/SystemZ/SystemZISelLowering.cpp
+++ b/lib/Target/SystemZ/SystemZISelLowering.cpp
@@ -1076,7 +1076,7 @@ static IPMConversion getIPMConversion(unsigned CCValid, unsigned CCMask) {
if (CCMask == (CCValid & (SystemZ::CCMASK_0 | SystemZ::CCMASK_3)))
return IPMConversion(0, -(1 << SystemZ::IPM_CC), SystemZ::IPM_CC + 1);
- // The remaing cases are 1, 2, 0/1/3 and 0/2/3. All these are
+ // The remaining cases are 1, 2, 0/1/3 and 0/2/3. All these are
// can be done by inverting the low CC bit and applying one of the
// sign-based extractions above.
if (CCMask == (CCValid & SystemZ::CCMASK_1))
diff --git a/lib/Target/SystemZ/SystemZInstrInfo.cpp b/lib/Target/SystemZ/SystemZInstrInfo.cpp
index 90941d3616..55192f9d4e 100644
--- a/lib/Target/SystemZ/SystemZInstrInfo.cpp
+++ b/lib/Target/SystemZ/SystemZInstrInfo.cpp
@@ -53,7 +53,7 @@ void SystemZInstrInfo::splitMove(MachineBasicBlock::iterator MI,
MachineFunction &MF = *MBB->getParent();
// Get two load or store instructions. Use the original instruction for one
- // of them (arbitarily the second here) and create a clone for the other.
+ // of them (arbitrarily the second here) and create a clone for the other.
MachineInstr *EarlierMI = MF.CloneMachineInstr(MI);
MBB->insert(MI, EarlierMI);
diff --git a/lib/Target/SystemZ/SystemZInstrInfo.td b/lib/Target/SystemZ/SystemZInstrInfo.td
index e1af0932c2..033f0d8ee6 100644
--- a/lib/Target/SystemZ/SystemZInstrInfo.td
+++ b/lib/Target/SystemZ/SystemZInstrInfo.td
@@ -759,7 +759,7 @@ let Defs = [CC], Uses = [CC] in {
// Subtraction
//===----------------------------------------------------------------------===//
-// Plain substraction. Although immediate forms exist, we use the
+// Plain subtraction. Although immediate forms exist, we use the
// add-immediate instruction instead.
let Defs = [CC], CCValues = 0xF, CompareZeroCCMask = 0x8 in {
// Subtraction of a register.
diff --git a/lib/Target/X86/Disassembler/X86DisassemblerDecoder.h b/lib/Target/X86/Disassembler/X86DisassemblerDecoder.h
index c4c86ada3f..ac3b39df54 100644
--- a/lib/Target/X86/Disassembler/X86DisassemblerDecoder.h
+++ b/lib/Target/X86/Disassembler/X86DisassemblerDecoder.h
@@ -563,7 +563,7 @@ struct InternalInstruction {
uint8_t prefixPresent[0x100];
/* contains the location (for use with the reader) of the prefix byte */
uint64_t prefixLocations[0x100];
- /* The value of the vector extention prefix(EVEX/VEX/XOP), if present */
+ /* The value of the vector extension prefix(EVEX/VEX/XOP), if present */
uint8_t vectorExtensionPrefix[4];
/* The type of the vector extension prefix */
VectorExtensionType vectorExtensionType;
diff --git a/lib/Target/X86/X86FastISel.cpp b/lib/Target/X86/X86FastISel.cpp
index 9fdc58a311..d653c871b2 100644
--- a/lib/Target/X86/X86FastISel.cpp
+++ b/lib/Target/X86/X86FastISel.cpp
@@ -1512,7 +1512,7 @@ bool X86FastISel::X86SelectSelect(const Instruction *I) {
// garbage. Indeed, only the less significant bit is supposed to be accurate.
// If we read more than the lsb, we may see non-zero values whereas lsb
// is zero. Therefore, we have to truncate Op0Reg to i1 for the select.
- // This is acheived by performing TEST against 1.
+ // This is achieved by performing TEST against 1.
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(X86::TEST8ri))
.addReg(Op0Reg).addImm(1);
unsigned ResultReg = createResultReg(RC);
diff --git a/lib/Target/X86/X86Schedule.td b/lib/Target/X86/X86Schedule.td
index 0556437b83..ac28d1e543 100644
--- a/lib/Target/X86/X86Schedule.td
+++ b/lib/Target/X86/X86Schedule.td
@@ -577,7 +577,7 @@ def IIC_NOP : InstrItinClass;
//===----------------------------------------------------------------------===//
// Processor instruction itineraries.
-// IssueWidth is analagous to the number of decode units. Core and its
+// IssueWidth is analogous to the number of decode units. Core and its
// descendents, including Nehalem and SandyBridge have 4 decoders.
// Resources beyond the decoder operate on micro-ops and are bufferred
// so adjacent micro-ops don't directly compete.
diff --git a/lib/Target/XCore/XCoreLowerThreadLocal.cpp b/lib/Target/XCore/XCoreLowerThreadLocal.cpp
index 784bd66d2e..3a93d2ac2e 100644
--- a/lib/Target/XCore/XCoreLowerThreadLocal.cpp
+++ b/lib/Target/XCore/XCoreLowerThreadLocal.cpp
@@ -154,8 +154,8 @@ static bool replaceConstantExprOp(ConstantExpr *CE, Pass *P) {
return false;
}
}
- } while (CE->hasNUsesOrMore(1)); // We need to check becasue a recursive
- // sibbling may have used 'CE' when createReplacementInstr was called.
+ } while (CE->hasNUsesOrMore(1)); // We need to check because a recursive
+ // sibling may have used 'CE' when createReplacementInstr was called.
CE->destroyConstant();
return true;
}
diff --git a/lib/Transforms/IPO/ConstantMerge.cpp b/lib/Transforms/IPO/ConstantMerge.cpp
index d94c0f4532..aefcff9565 100644
--- a/lib/Transforms/IPO/ConstantMerge.cpp
+++ b/lib/Transforms/IPO/ConstantMerge.cpp
@@ -77,8 +77,8 @@ static void FindUsedValues(GlobalVariable *LLVMUsed,
}
// True if A is better than B.
-static bool IsBetterCannonical(const GlobalVariable &A,
- const GlobalVariable &B) {
+static bool IsBetterCanonical(const GlobalVariable &A,
+ const GlobalVariable &B) {
if (!A.hasLocalLinkage() && B.hasLocalLinkage())
return true;
@@ -160,7 +160,7 @@ bool ConstantMerge::runOnModule(Module &M) {
// If this is the first constant we find or if the old one is local,
// replace with the current one. If the current is externally visible
// it cannot be replace, but can be the canonical constant we merge with.
- if (Slot == 0 || IsBetterCannonical(*GV, *Slot))
+ if (Slot == 0 || IsBetterCanonical(*GV, *Slot))
Slot = GV;
}
diff --git a/lib/Transforms/IPO/MergeFunctions.cpp b/lib/Transforms/IPO/MergeFunctions.cpp
index 38614216c3..33f0707f29 100644
--- a/lib/Transforms/IPO/MergeFunctions.cpp
+++ b/lib/Transforms/IPO/MergeFunctions.cpp
@@ -723,7 +723,7 @@ void MergeFunctions::writeThunkOrAlias(Function *F, Function *G) {
// Helper for writeThunk,
// Selects proper bitcast operation,
-// but a bit simplier then CastInst::getCastOpcode.
+// but a bit simpler then CastInst::getCastOpcode.
static Value* createCast(IRBuilder<false> &Builder, Value *V, Type *DestTy) {
Type *SrcTy = V->getType();
if (SrcTy->isIntegerTy() && DestTy->isPointerTy())
diff --git a/lib/Transforms/InstCombine/InstCombineCalls.cpp b/lib/Transforms/InstCombine/InstCombineCalls.cpp
index c949720b1f..7d2fc0a528 100644
--- a/lib/Transforms/InstCombine/InstCombineCalls.cpp
+++ b/lib/Transforms/InstCombine/InstCombineCalls.cpp
@@ -77,7 +77,7 @@ Instruction *InstCombiner::SimplifyMemTransfer(MemIntrinsic *MI) {
// A single load+store correctly handles overlapping memory in the memmove
// case.
uint64_t Size = MemOpLength->getLimitedValue();
- assert(Size && "0-sized memory transfering should be removed already.");
+ assert(Size && "0-sized memory transferring should be removed already.");
if (Size > 8 || (Size&(Size-1)))
return 0; // If not 1/2/4/8 bytes, exit.
@@ -684,7 +684,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
return ReplaceInstUsesWith(CI, ConstantVector::get(NewElems));
}
- // Couldn't simplify - cannonicalize constant to the RHS.
+ // Couldn't simplify - canonicalize constant to the RHS.
std::swap(Arg0, Arg1);
}
diff --git a/lib/Transforms/InstCombine/InstCombineCasts.cpp b/lib/Transforms/InstCombine/InstCombineCasts.cpp
index c85ec29f2f..cccfd4d49e 100644
--- a/lib/Transforms/InstCombine/InstCombineCasts.cpp
+++ b/lib/Transforms/InstCombine/InstCombineCasts.cpp
@@ -1193,10 +1193,10 @@ Instruction *InstCombiner::visitFPTrunc(FPTruncInst &CI) {
// will not occur because the result of OpI is exact (as we will for
// FMul, for example) is hopeless. However, we *can* nonetheless
// frequently know that double rounding cannot occur (or that it is
- // innoculous) by taking advantage of the specific structure of
+ // innocuous) by taking advantage of the specific structure of
// infinitely-precise results that admit double rounding.
//
- // Specifically, if OpWidth >= 2*DstWdith+1 and DstWidth is sufficent
+ // Specifically, if OpWidth >= 2*DstWdith+1 and DstWidth is sufficient
// to represent both sources, we can guarantee that the double
// rounding is innocuous (See p50 of Figueroa's 2000 PhD thesis,
// "A Rigorous Framework for Fully Supporting the IEEE Standard ..."
diff --git a/lib/Transforms/InstCombine/InstCombineCompares.cpp b/lib/Transforms/InstCombine/InstCombineCompares.cpp
index 3bc8ad3c8c..5dd3325a0b 100644
--- a/lib/Transforms/InstCombine/InstCombineCompares.cpp
+++ b/lib/Transforms/InstCombine/InstCombineCompares.cpp
@@ -2048,7 +2048,7 @@ static APInt DemandedBitsLHSMask(ICmpInst &I,
/// \brief Check if the order of \p Op0 and \p Op1 as operand in an ICmpInst
/// should be swapped.
-/// The descision is based on how many times these two operands are reused
+/// The decision is based on how many times these two operands are reused
/// as subtract operands and their positions in those instructions.
/// The rational is that several architectures use the same instruction for
/// both subtract and cmp, thus it is better if the order of those operands
@@ -2064,7 +2064,7 @@ static bool swapMayExposeCSEOpportunities(const Value * Op0,
// Each time Op0 is the first operand, count -1: swapping is bad, the
// subtract has already the same layout as the compare.
// Each time Op0 is the second operand, count +1: swapping is good, the
- // subtract has a diffrent layout as the compare.
+ // subtract has a different layout as the compare.
// At the end, if the benefit is greater than 0, Op0 should come second to
// expose more CSE opportunities.
int GlobalSwapBenefits = 0;
diff --git a/lib/Transforms/InstCombine/InstCombineVectorOps.cpp b/lib/Transforms/InstCombine/InstCombineVectorOps.cpp
index 178be61b43..7ffb01b5b2 100644
--- a/lib/Transforms/InstCombine/InstCombineVectorOps.cpp
+++ b/lib/Transforms/InstCombine/InstCombineVectorOps.cpp
@@ -1013,7 +1013,7 @@ Instruction *InstCombiner::visitShuffleVectorInst(ShuffleVectorInst &SVI) {
// references from RHSOp0 to LHSOp0, so we don't need to shift the mask.
// If newRHS == newLHS, we want to remap any references from newRHS to
// newLHS so that we can properly identify splats that may occur due to
- // obfuscation accross the two vectors.
+ // obfuscation across the two vectors.
if (eltMask >= 0 && newRHS != NULL && newLHS != newRHS)
eltMask += newLHSWidth;
}
diff --git a/lib/Transforms/InstCombine/InstructionCombining.cpp b/lib/Transforms/InstCombine/InstructionCombining.cpp
index 6a7252fc41..b453f81de9 100644
--- a/lib/Transforms/InstCombine/InstructionCombining.cpp
+++ b/lib/Transforms/InstCombine/InstructionCombining.cpp
@@ -1629,7 +1629,7 @@ Instruction *InstCombiner::visitBranchInst(BranchInst &BI) {
return &BI;
}
- // Cannonicalize fcmp_one -> fcmp_oeq
+ // Canonicalize fcmp_one -> fcmp_oeq
FCmpInst::Predicate FPred; Value *Y;
if (match(&BI, m_Br(m_FCmp(FPred, m_Value(X), m_Value(Y)),
TrueDest, FalseDest)) &&
@@ -1645,7 +1645,7 @@ Instruction *InstCombiner::visitBranchInst(BranchInst &BI) {
return &BI;
}
- // Cannonicalize icmp_ne -> icmp_eq
+ // Canonicalize icmp_ne -> icmp_eq
ICmpInst::Predicate IPred;
if (match(&BI, m_Br(m_ICmp(IPred, m_Value(X), m_Value(Y)),
TrueDest, FalseDest)) &&
diff --git a/lib/Transforms/Instrumentation/AddressSanitizer.cpp b/lib/Transforms/Instrumentation/AddressSanitizer.cpp
index fe875192f5..377d0d87d1 100644
--- a/lib/Transforms/Instrumentation/AddressSanitizer.cpp
+++ b/lib/Transforms/Instrumentation/AddressSanitizer.cpp
@@ -249,7 +249,7 @@ static ShadowMapping getShadowMapping(const Module &M, int LongSize) {
ShadowMapping Mapping;
// OR-ing shadow offset if more efficient (at least on x86),
- // but on ppc64 we have to use add since the shadow offset is not neccesary
+ // but on ppc64 we have to use add since the shadow offset is not necessary
// 1/8-th of the address space.
Mapping.OrShadowOffset = !IsPPC64 && !ClShort64BitOffset;
diff --git a/lib/Transforms/Instrumentation/MemorySanitizer.cpp b/lib/Transforms/Instrumentation/MemorySanitizer.cpp
index 8a52a4444b..93e71cc116 100644
--- a/lib/Transforms/Instrumentation/MemorySanitizer.cpp
+++ b/lib/Transforms/Instrumentation/MemorySanitizer.cpp
@@ -1964,7 +1964,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
// Now, get the shadow for the RetVal.
if (!I.getType()->isSized()) return;
IRBuilder<> IRBBefore(&I);
- // Untill we have full dynamic coverage, make sure the retval shadow is 0.
+ // Until we have full dynamic coverage, make sure the retval shadow is 0.
Value *Base = getShadowPtrForRetval(&I, IRBBefore);
IRBBefore.CreateAlignedStore(getCleanShadow(&I), Base, kShadowTLSAlignment);
Instruction *NextInsn = 0;
diff --git a/lib/Transforms/Instrumentation/ThreadSanitizer.cpp b/lib/Transforms/Instrumentation/ThreadSanitizer.cpp
index 5c18817820..4570311340 100644
--- a/lib/Transforms/Instrumentation/ThreadSanitizer.cpp
+++ b/lib/Transforms/Instrumentation/ThreadSanitizer.cpp
@@ -487,7 +487,7 @@ bool ThreadSanitizer::instrumentMemIntrinsic(Instruction *I) {
}
// Both llvm and ThreadSanitizer atomic operations are based on C++11/C1x
-// standards. For background see C++11 standard. A slightly older, publically
+// standards. For background see C++11 standard. A slightly older, publicly
// available draft of the standard (not entirely up-to-date, but close enough
// for casual browsing) is available here:
// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2011/n3242.pdf
diff --git a/lib/Transforms/ObjCARC/ObjCARCOpts.cpp b/lib/Transforms/ObjCARC/ObjCARCOpts.cpp
index f8b6f15850..8e2c362ad8 100644
--- a/lib/Transforms/ObjCARC/ObjCARCOpts.cpp
+++ b/lib/Transforms/ObjCARC/ObjCARCOpts.cpp
@@ -382,7 +382,7 @@ namespace {
void clear();
/// Conservatively merge the two RRInfo. Returns true if a partial merge has
- /// occured, false otherwise.
+ /// occurred, false otherwise.
bool Merge(const RRInfo &Other);
};
@@ -659,7 +659,7 @@ namespace {
/// which pass through this block. This is only valid after both the
/// top-down and bottom-up traversals are complete.
///
- /// Returns true if overflow occured. Returns false if overflow did not
+ /// Returns true if overflow occurred. Returns false if overflow did not
/// occur.
bool GetAllPathCountWithOverflow(unsigned &PathCount) const {
if (TopDownPathCount == OverflowOccurredValue ||
@@ -667,7 +667,7 @@ namespace {
return true;
unsigned long long Product =
(unsigned long long)TopDownPathCount*BottomUpPathCount;
- // Overflow occured if any of the upper bits of Product are set or if all
+ // Overflow occurred if any of the upper bits of Product are set or if all
// the lower bits of Product are all set.
return (Product >> 32) ||
((PathCount = Product) == OverflowOccurredValue);
@@ -711,7 +711,7 @@ void BBState::MergePred(const BBState &Other) {
// In order to be consistent, we clear the top down pointers when by adding
// TopDownPathCount becomes OverflowOccurredValue even though "true" overflow
- // has not occured.
+ // has not occurred.
if (TopDownPathCount == OverflowOccurredValue) {
clearTopDownPointers();
return;
@@ -755,7 +755,7 @@ void BBState::MergeSucc(const BBState &Other) {
// In order to be consistent, we clear the top down pointers when by adding
// BottomUpPathCount becomes OverflowOccurredValue even though "true" overflow
- // has not occured.
+ // has not occurred.
if (BottomUpPathCount == OverflowOccurredValue) {
clearBottomUpPointers();
return;
@@ -1808,13 +1808,13 @@ ObjCARCOpt::VisitInstructionBottomUp(Instruction *Inst,
// pointer has multiple owners implying that we must be more conservative.
//
// This comes up in the context of a pointer being ``KnownSafe''. In the
- // presense of a block being initialized, the frontend will emit the
+ // presence of a block being initialized, the frontend will emit the
// objc_retain on the original pointer and the release on the pointer loaded
// from the alloca. The optimizer will through the provenance analysis
// realize that the two are related, but since we only require KnownSafe in
// one direction, will match the inner retain on the original pointer with
// the guard release on the original pointer. This is fixed by ensuring that
- // in the presense of allocas we only unconditionally remove pointers if
+ // in the presence of allocas we only unconditionally remove pointers if
// both our retain and our release are KnownSafe.
if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
if (AreAnyUnderlyingObjectsAnAlloca(SI->getPointerOperand())) {
diff --git a/lib/Transforms/Scalar/LoopIdiomRecognize.cpp b/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
index b4d59fa0ea..c89cd74c6e 100644
--- a/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
+++ b/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
@@ -109,8 +109,8 @@ namespace {
bool preliminaryScreen();
/// Check if the given conditional branch is based on the comparison
- /// beween a variable and zero, and if the variable is non-zero, the
- /// control yeilds to the loop entry. If the branch matches the behavior,
+ /// between a variable and zero, and if the variable is non-zero, the
+ /// control yields to the loop entry. If the branch matches the behavior,
/// the variable involved in the comparion is returned. This function will
/// be called to see if the precondition and postcondition of the loop
/// are in desirable form.
@@ -521,7 +521,7 @@ void NclPopcountRecognize::transform(Instruction *CntInst,
// TripCnt is exactly the number of iterations the loop has
TripCnt = NewCount;
- // If the popoulation counter's initial value is not zero, insert Add Inst.
+ // If the population counter's initial value is not zero, insert Add Inst.
Value *CntInitVal = CntPhi->getIncomingValueForBlock(PreHead);
ConstantInt *InitConst = dyn_cast<ConstantInt>(CntInitVal);
if (!InitConst || !InitConst->isZero()) {
diff --git a/lib/Transforms/Utils/FlattenCFG.cpp b/lib/Transforms/Utils/FlattenCFG.cpp
index 1da226bfcb..39c80f86b6 100644
--- a/lib/Transforms/Utils/FlattenCFG.cpp
+++ b/lib/Transforms/Utils/FlattenCFG.cpp
@@ -240,7 +240,7 @@ bool FlattenCFGOpt::FlattenParallelAndOr(BasicBlock *BB, IRBuilder<> &Builder,
BranchInst *BI = dyn_cast<BranchInst>(CurrBlock->getTerminator());
CmpInst *CI = dyn_cast<CmpInst>(BI->getCondition());
CmpInst::Predicate Predicate = CI->getPredicate();
- // Cannonicalize icmp_ne -> icmp_eq, fcmp_one -> fcmp_oeq
+ // Canonicalize icmp_ne -> icmp_eq, fcmp_one -> fcmp_oeq
if ((Predicate == CmpInst::ICMP_NE) || (Predicate == CmpInst::FCMP_ONE)) {
CI->setPredicate(ICmpInst::getInversePredicate(Predicate));
BI->swapSuccessors();
diff --git a/lib/Transforms/Utils/SimplifyCFG.cpp b/lib/Transforms/Utils/SimplifyCFG.cpp
index a30dcf2fe0..e43c9e2708 100644
--- a/lib/Transforms/Utils/SimplifyCFG.cpp
+++ b/lib/Transforms/Utils/SimplifyCFG.cpp
@@ -62,9 +62,9 @@ static cl::opt<bool>
SinkCommon("simplifycfg-sink-common", cl::Hidden, cl::init(true),
cl::desc("Sink common instructions down to the end block"));
-static cl::opt<bool>
-HoistCondStores("simplifycfg-hoist-cond-stores", cl::Hidden, cl::init(true),
- cl::desc("Hoist conditional stores if an unconditional store preceeds"));
+static cl::opt<bool> HoistCondStores(
+ "simplifycfg-hoist-cond-stores", cl::Hidden, cl::init(true),
+ cl::desc("Hoist conditional stores if an unconditional store precedes"));
STATISTIC(NumBitMaps, "Number of switch instructions turned into bitmaps");
STATISTIC(NumLookupTables, "Number of switch instructions turned into lookup tables");
diff --git a/lib/Transforms/Vectorize/LoopVectorize.cpp b/lib/Transforms/Vectorize/LoopVectorize.cpp
index 695ee03ea7..892c42755b 100644
--- a/lib/Transforms/Vectorize/LoopVectorize.cpp
+++ b/lib/Transforms/Vectorize/LoopVectorize.cpp
@@ -2371,7 +2371,7 @@ void InnerLoopVectorizer::vectorizeLoop() {
setDebugLocFromInst(Builder, RdxDesc.StartValue);
// We need to generate a reduction vector from the incoming scalar.
- // To do so, we need to generate the 'identity' vector and overide
+ // To do so, we need to generate the 'identity' vector and override
// one of the elements with the incoming scalar reduction. We need
// to do it in the vector-loop preheader.
Builder.SetInsertPoint(LoopBypassBlocks.front()->getTerminator());
@@ -3713,8 +3713,8 @@ void AccessAnalysis::processMemAccesses(bool UseDeferred) {
}
bool NeedDepCheck = false;
- // Check whether there is the possiblity of dependency because of underlying
- // objects being the same.
+ // Check whether there is the possibility of dependency because of
+ // underlying objects being the same.
typedef SmallVector<Value*, 16> ValueVector;
ValueVector TempObjects;
GetUnderlyingObjects(Ptr, TempObjects, DL);
diff --git a/lib/Transforms/Vectorize/SLPVectorizer.cpp b/lib/Transforms/Vectorize/SLPVectorizer.cpp
index 0e2a98e6de..80d9ffccaf 100644
--- a/lib/Transforms/Vectorize/SLPVectorizer.cpp
+++ b/lib/Transforms/Vectorize/SLPVectorizer.cpp
@@ -1871,7 +1871,7 @@ private:
StoreListMap StoreRefs;
};
-/// \brief Check that the Values in the slice in VL array are still existant in
+/// \brief Check that the Values in the slice in VL array are still existent in
/// the WeakVH array.
/// Vectorization of part of the VL array may cause later values in the VL array
/// to become invalid. We track when this has happened in the WeakVH array.
@@ -2516,7 +2516,7 @@ bool SLPVectorizer::vectorizeChainsInBlock(BasicBlock *BB, BoUpSLP &R) {
break;
}
- // Start over at the next instruction of a differnt type (or the end).
+ // Start over at the next instruction of a different type (or the end).
IncIt = SameTypeIt;
}
}