summaryrefslogtreecommitdiff
path: root/lib/Target/X86/X86FastISel.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'lib/Target/X86/X86FastISel.cpp')
-rw-r--r--lib/Target/X86/X86FastISel.cpp26
1 files changed, 13 insertions, 13 deletions
diff --git a/lib/Target/X86/X86FastISel.cpp b/lib/Target/X86/X86FastISel.cpp
index fd95c55d55..8ec9f59569 100644
--- a/lib/Target/X86/X86FastISel.cpp
+++ b/lib/Target/X86/X86FastISel.cpp
@@ -243,7 +243,7 @@ getX86ConditionCode(CmpInst::Predicate Predicate) {
}
static std::pair<unsigned, bool>
-getX86SSECondtionCode(CmpInst::Predicate Predicate) {
+getX86SSEConditionCode(CmpInst::Predicate Predicate) {
unsigned CC;
bool NeedSwap = false;
@@ -1260,7 +1260,7 @@ bool X86FastISel::X86SelectCmp(const Instruction *I) {
X86::CondCode CC;
bool SwapArgs;
std::tie(CC, SwapArgs) = getX86ConditionCode(Predicate);
- assert(CC <= X86::LAST_VALID_COND && "Unexpected conditon code.");
+ assert(CC <= X86::LAST_VALID_COND && "Unexpected condition code.");
unsigned Opc = X86::getSETFromCond(CC);
if (SwapArgs)
@@ -1368,9 +1368,9 @@ bool X86FastISel::X86SelectBranch(const Instruction *I) {
Predicate = CmpInst::getInversePredicate(Predicate);
}
- // FCMP_OEQ and FCMP_UNE cannot be expressed with a single flag/conditon
+ // FCMP_OEQ and FCMP_UNE cannot be expressed with a single flag/condition
// code check. Instead two branch instructions are required to check all
- // the flags. First we change the predicate to a supported conditon code,
+ // the flags. First we change the predicate to a supported condition code,
// which will be the first branch. Later one we will emit the second
// branch.
bool NeedExtraBranch = false;
@@ -1387,7 +1387,7 @@ bool X86FastISel::X86SelectBranch(const Instruction *I) {
bool SwapArgs;
unsigned BranchOpc;
std::tie(CC, SwapArgs) = getX86ConditionCode(Predicate);
- assert(CC <= X86::LAST_VALID_COND && "Unexpected conditon code.");
+ assert(CC <= X86::LAST_VALID_COND && "Unexpected condition code.");
BranchOpc = X86::GetCondBranchFromCond(CC);
if (SwapArgs)
@@ -1745,7 +1745,7 @@ bool X86FastISel::X86FastEmitCMoveSelect(MVT RetVT, const Instruction *I) {
bool NeedTest = true;
X86::CondCode CC = X86::COND_NE;
- // Optimize conditons coming from a compare if both instructions are in the
+ // Optimize conditions coming from a compare if both instructions are in the
// same basic block (values defined in other basic blocks may not have
// initialized registers).
const auto *CI = dyn_cast<CmpInst>(Cond);
@@ -1852,7 +1852,7 @@ bool X86FastISel::X86FastEmitCMoveSelect(MVT RetVT, const Instruction *I) {
/// This lowers fp selects into a CMP/AND/ANDN/OR sequence when the necessary
/// SSE instructions are available.
bool X86FastISel::X86FastEmitSSESelect(MVT RetVT, const Instruction *I) {
- // Optimize conditons coming from a compare if both instructions are in the
+ // Optimize conditions coming from a compare if both instructions are in the
// same basic block (values defined in other basic blocks may not have
// initialized registers).
const auto *CI = dyn_cast<FCmpInst>(I->getOperand(0));
@@ -1879,7 +1879,7 @@ bool X86FastISel::X86FastEmitSSESelect(MVT RetVT, const Instruction *I) {
unsigned CC;
bool NeedSwap;
- std::tie(CC, NeedSwap) = getX86SSECondtionCode(Predicate);
+ std::tie(CC, NeedSwap) = getX86SSEConditionCode(Predicate);
if (CC > 7)
return false;
@@ -1948,7 +1948,7 @@ bool X86FastISel::X86FastEmitPseudoSelect(MVT RetVT, const Instruction *I) {
const Value *Cond = I->getOperand(0);
X86::CondCode CC = X86::COND_NE;
- // Optimize conditons coming from a compare if both instructions are in the
+ // Optimize conditions coming from a compare if both instructions are in the
// same basic block (values defined in other basic blocks may not have
// initialized registers).
const auto *CI = dyn_cast<CmpInst>(Cond);
@@ -2030,7 +2030,7 @@ bool X86FastISel::X86SelectSelect(const Instruction *I) {
if (X86FastEmitCMoveSelect(RetVT, I))
return true;
- // Try to use a sequence of SSE instructions to simulate a conditonal move.
+ // Try to use a sequence of SSE instructions to simulate a conditional move.
if (X86FastEmitSSESelect(RetVT, I))
return true;
@@ -2320,7 +2320,7 @@ bool X86FastISel::X86VisitIntrinsicCall(const IntrinsicInst &I) {
if (!isTypeLegal(RetTy, VT))
return false;
- // Unfortunatelly we can't use FastEmit_r, because the AVX version of FSQRT
+ // Unfortunately we can't use FastEmit_r, because the AVX version of FSQRT
// is not generated by FastISel yet.
// FIXME: Update this code once tablegen can handle it.
static const unsigned SqrtOpc[2][2] = {
@@ -2369,7 +2369,7 @@ bool X86FastISel::X86VisitIntrinsicCall(const IntrinsicInst &I) {
case Intrinsic::smul_with_overflow:
case Intrinsic::umul_with_overflow: {
// This implements the basic lowering of the xalu with overflow intrinsics
- // into add/sub/mul folowed by either seto or setb.
+ // into add/sub/mul followed by either seto or setb.
const Function *Callee = I.getCalledFunction();
auto *Ty = cast<StructType>(Callee->getReturnType());
Type *RetTy = Ty->getTypeAtIndex(0U);
@@ -2385,7 +2385,7 @@ bool X86FastISel::X86VisitIntrinsicCall(const IntrinsicInst &I) {
const Value *LHS = I.getArgOperand(0);
const Value *RHS = I.getArgOperand(1);
- // Canonicalize immediates to the RHS.
+ // Canonicalize immediate to the RHS.
if (isa<ConstantInt>(LHS) && !isa<ConstantInt>(RHS) &&
isCommutativeIntrinsic(I))
std::swap(LHS, RHS);