summaryrefslogtreecommitdiff
path: root/lib/Target/PowerPC
diff options
context:
space:
mode:
authorNate Begeman <natebegeman@mac.com>2009-04-24 03:42:54 +0000
committerNate Begeman <natebegeman@mac.com>2009-04-24 03:42:54 +0000
commitb706d29f9c5ed3ed9acc82f7ab46205ba56b92dc (patch)
tree105e75ce0dc135a208ef085ba4f70fe162031ff1 /lib/Target/PowerPC
parent98d07102d67971118c73e7db84d8a05d58dcf3df (diff)
downloadllvm-b706d29f9c5ed3ed9acc82f7ab46205ba56b92dc.tar.gz
llvm-b706d29f9c5ed3ed9acc82f7ab46205ba56b92dc.tar.bz2
llvm-b706d29f9c5ed3ed9acc82f7ab46205ba56b92dc.tar.xz
PR2957
ISD::VECTOR_SHUFFLE now stores an array of integers representing the shuffle mask internal to the node, rather than taking a BUILD_VECTOR of ConstantSDNodes as the shuffle mask. A value of -1 represents UNDEF. In addition to eliminating the creation of illegal BUILD_VECTORS just to represent shuffle masks, we are better about canonicalizing the shuffle mask, resulting in substantially better code for some classes of shuffles. A clean up of x86 shuffle code, and some canonicalizing in DAGCombiner is next. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@69952 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/Target/PowerPC')
-rw-r--r--lib/Target/PowerPC/PPCISelLowering.cpp214
-rw-r--r--lib/Target/PowerPC/PPCISelLowering.h12
-rw-r--r--lib/Target/PowerPC/PPCInstrAltivec.td179
3 files changed, 210 insertions, 195 deletions
diff --git a/lib/Target/PowerPC/PPCISelLowering.cpp b/lib/Target/PowerPC/PPCISelLowering.cpp
index 32ff8f46cf..cb36b05294 100644
--- a/lib/Target/PowerPC/PPCISelLowering.cpp
+++ b/lib/Target/PowerPC/PPCISelLowering.cpp
@@ -456,22 +456,22 @@ static bool isFloatingPointZero(SDValue Op) {
/// isConstantOrUndef - Op is either an undef node or a ConstantSDNode. Return
/// true if Op is undef or if it matches the specified value.
-static bool isConstantOrUndef(SDValue Op, unsigned Val) {
- return Op.getOpcode() == ISD::UNDEF ||
- cast<ConstantSDNode>(Op)->getZExtValue() == Val;
+static bool isConstantOrUndef(int Op, int Val) {
+ return Op < 0 || Op == Val;
}
/// isVPKUHUMShuffleMask - Return true if this is the shuffle mask for a
/// VPKUHUM instruction.
-bool PPC::isVPKUHUMShuffleMask(SDNode *N, bool isUnary) {
+bool PPC::isVPKUHUMShuffleMask(ShuffleVectorSDNode *N, bool isUnary) {
+ const int *Mask = N->getMask();
if (!isUnary) {
for (unsigned i = 0; i != 16; ++i)
- if (!isConstantOrUndef(N->getOperand(i), i*2+1))
+ if (!isConstantOrUndef(Mask[i], i*2+1))
return false;
} else {
for (unsigned i = 0; i != 8; ++i)
- if (!isConstantOrUndef(N->getOperand(i), i*2+1) ||
- !isConstantOrUndef(N->getOperand(i+8), i*2+1))
+ if (!isConstantOrUndef(Mask[i], i*2+1) ||
+ !isConstantOrUndef(Mask[i+8], i*2+1))
return false;
}
return true;
@@ -479,18 +479,19 @@ bool PPC::isVPKUHUMShuffleMask(SDNode *N, bool isUnary) {
/// isVPKUWUMShuffleMask - Return true if this is the shuffle mask for a
/// VPKUWUM instruction.
-bool PPC::isVPKUWUMShuffleMask(SDNode *N, bool isUnary) {
+bool PPC::isVPKUWUMShuffleMask(ShuffleVectorSDNode *N, bool isUnary) {
+ const int *Mask = N->getMask();
if (!isUnary) {
for (unsigned i = 0; i != 16; i += 2)
- if (!isConstantOrUndef(N->getOperand(i ), i*2+2) ||
- !isConstantOrUndef(N->getOperand(i+1), i*2+3))
+ if (!isConstantOrUndef(Mask[i ], i*2+2) ||
+ !isConstantOrUndef(Mask[i+1], i*2+3))
return false;
} else {
for (unsigned i = 0; i != 8; i += 2)
- if (!isConstantOrUndef(N->getOperand(i ), i*2+2) ||
- !isConstantOrUndef(N->getOperand(i+1), i*2+3) ||
- !isConstantOrUndef(N->getOperand(i+8), i*2+2) ||
- !isConstantOrUndef(N->getOperand(i+9), i*2+3))
+ if (!isConstantOrUndef(Mask[i ], i*2+2) ||
+ !isConstantOrUndef(Mask[i+1], i*2+3) ||
+ !isConstantOrUndef(Mask[i+8], i*2+2) ||
+ !isConstantOrUndef(Mask[i+9], i*2+3))
return false;
}
return true;
@@ -498,27 +499,29 @@ bool PPC::isVPKUWUMShuffleMask(SDNode *N, bool isUnary) {
/// isVMerge - Common function, used to match vmrg* shuffles.
///
-static bool isVMerge(SDNode *N, unsigned UnitSize,
+static bool isVMerge(ShuffleVectorSDNode *N, unsigned UnitSize,
unsigned LHSStart, unsigned RHSStart) {
- assert(N->getOpcode() == ISD::BUILD_VECTOR &&
- N->getNumOperands() == 16 && "PPC only supports shuffles by bytes!");
+ assert(N->getValueType(0) == MVT::v16i8 &&
+ "PPC only supports shuffles by bytes!");
assert((UnitSize == 1 || UnitSize == 2 || UnitSize == 4) &&
"Unsupported merge size!");
+ const int *Mask = N->getMask();
for (unsigned i = 0; i != 8/UnitSize; ++i) // Step over units
for (unsigned j = 0; j != UnitSize; ++j) { // Step over bytes within unit
- if (!isConstantOrUndef(N->getOperand(i*UnitSize*2+j),
+ if (!isConstantOrUndef(Mask[i*UnitSize*2+j],
LHSStart+j+i*UnitSize) ||
- !isConstantOrUndef(N->getOperand(i*UnitSize*2+UnitSize+j),
+ !isConstantOrUndef(Mask[i*UnitSize*2+UnitSize+j],
RHSStart+j+i*UnitSize))
return false;
}
- return true;
+ return true;
}
/// isVMRGLShuffleMask - Return true if this is a shuffle mask suitable for
/// a VRGL* instruction with the specified unit size (1,2 or 4 bytes).
-bool PPC::isVMRGLShuffleMask(SDNode *N, unsigned UnitSize, bool isUnary) {
+bool PPC::isVMRGLShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize,
+ bool isUnary) {
if (!isUnary)
return isVMerge(N, UnitSize, 8, 24);
return isVMerge(N, UnitSize, 8, 8);
@@ -526,7 +529,8 @@ bool PPC::isVMRGLShuffleMask(SDNode *N, unsigned UnitSize, bool isUnary) {
/// isVMRGHShuffleMask - Return true if this is a shuffle mask suitable for
/// a VRGH* instruction with the specified unit size (1,2 or 4 bytes).
-bool PPC::isVMRGHShuffleMask(SDNode *N, unsigned UnitSize, bool isUnary) {
+bool PPC::isVMRGHShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize,
+ bool isUnary) {
if (!isUnary)
return isVMerge(N, UnitSize, 0, 16);
return isVMerge(N, UnitSize, 0, 0);
@@ -536,91 +540,92 @@ bool PPC::isVMRGHShuffleMask(SDNode *N, unsigned UnitSize, bool isUnary) {
/// isVSLDOIShuffleMask - If this is a vsldoi shuffle mask, return the shift
/// amount, otherwise return -1.
int PPC::isVSLDOIShuffleMask(SDNode *N, bool isUnary) {
- assert(N->getOpcode() == ISD::BUILD_VECTOR &&
- N->getNumOperands() == 16 && "PPC only supports shuffles by bytes!");
+ assert(N->getValueType(0) == MVT::v16i8 &&
+ "PPC only supports shuffles by bytes!");
+
+ ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
+
// Find the first non-undef value in the shuffle mask.
+ const int *Mask = SVOp->getMask();
unsigned i;
- for (i = 0; i != 16 && N->getOperand(i).getOpcode() == ISD::UNDEF; ++i)
+ for (i = 0; i != 16 && Mask[i] < 0; ++i)
/*search*/;
if (i == 16) return -1; // all undef.
- // Otherwise, check to see if the rest of the elements are consequtively
+ // Otherwise, check to see if the rest of the elements are consecutively
// numbered from this value.
- unsigned ShiftAmt = cast<ConstantSDNode>(N->getOperand(i))->getZExtValue();
+ unsigned ShiftAmt = Mask[i];
if (ShiftAmt < i) return -1;
ShiftAmt -= i;
if (!isUnary) {
- // Check the rest of the elements to see if they are consequtive.
+ // Check the rest of the elements to see if they are consecutive.
for (++i; i != 16; ++i)
- if (!isConstantOrUndef(N->getOperand(i), ShiftAmt+i))
+ if (!isConstantOrUndef(Mask[i], ShiftAmt+i))
return -1;
} else {
- // Check the rest of the elements to see if they are consequtive.
+ // Check the rest of the elements to see if they are consecutive.
for (++i; i != 16; ++i)
- if (!isConstantOrUndef(N->getOperand(i), (ShiftAmt+i) & 15))
+ if (!isConstantOrUndef(Mask[i], (ShiftAmt+i) & 15))
return -1;
}
-
return ShiftAmt;
}
/// isSplatShuffleMask - Return true if the specified VECTOR_SHUFFLE operand
/// specifies a splat of a single element that is suitable for input to
/// VSPLTB/VSPLTH/VSPLTW.
-bool PPC::isSplatShuffleMask(SDNode *N, unsigned EltSize) {
- assert(N->getOpcode() == ISD::BUILD_VECTOR &&
- N->getNumOperands() == 16 &&
+bool PPC::isSplatShuffleMask(ShuffleVectorSDNode *N, unsigned EltSize) {
+ assert(N->getValueType(0) == MVT::v16i8 &&
(EltSize == 1 || EltSize == 2 || EltSize == 4));
// This is a splat operation if each element of the permute is the same, and
// if the value doesn't reference the second vector.
- unsigned ElementBase = 0;
- SDValue Elt = N->getOperand(0);
- if (ConstantSDNode *EltV = dyn_cast<ConstantSDNode>(Elt))
- ElementBase = EltV->getZExtValue();
- else
- return false; // FIXME: Handle UNDEF elements too!
-
- if (cast<ConstantSDNode>(Elt)->getZExtValue() >= 16)
+ const int *Mask = N->getMask();
+ unsigned ElementBase = Mask[0];
+
+ // FIXME: Handle UNDEF elements too!
+ if (ElementBase >= 16)
return false;
- // Check that they are consequtive.
- for (unsigned i = 1; i != EltSize; ++i) {
- if (!isa<ConstantSDNode>(N->getOperand(i)) ||
- cast<ConstantSDNode>(N->getOperand(i))->getZExtValue() != i+ElementBase)
+ // Check that the indices are consecutive, in the case of a multi-byte element
+ // splatted with a v16i8 mask.
+ for (unsigned i = 1; i != EltSize; ++i)
+ if (Mask[i] < 0 || Mask[i] != (int)(i+ElementBase))
return false;
- }
- assert(isa<ConstantSDNode>(Elt) && "Invalid VECTOR_SHUFFLE mask!");
for (unsigned i = EltSize, e = 16; i != e; i += EltSize) {
- if (N->getOperand(i).getOpcode() == ISD::UNDEF) continue;
- assert(isa<ConstantSDNode>(N->getOperand(i)) &&
- "Invalid VECTOR_SHUFFLE mask!");
+ if (Mask[i] < 0) continue;
for (unsigned j = 0; j != EltSize; ++j)
- if (N->getOperand(i+j) != N->getOperand(j))
+ if (Mask[i+j] != Mask[j])
return false;
}
-
return true;
}
/// isAllNegativeZeroVector - Returns true if all elements of build_vector
/// are -0.0.
bool PPC::isAllNegativeZeroVector(SDNode *N) {
- assert(N->getOpcode() == ISD::BUILD_VECTOR);
- if (PPC::isSplatShuffleMask(N, N->getNumOperands()))
- if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N))
+ BuildVectorSDNode *BV = cast<BuildVectorSDNode>(N);
+
+ APInt APVal, APUndef;
+ unsigned BitSize;
+ bool HasAnyUndefs;
+
+ if (BV->isConstantSplat(APVal, APUndef, BitSize, HasAnyUndefs, 32))
+ if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N->getOperand(0)))
return CFP->getValueAPF().isNegZero();
+
return false;
}
/// getVSPLTImmediate - Return the appropriate VSPLT* immediate to splat the
/// specified isSplatShuffleMask VECTOR_SHUFFLE mask.
unsigned PPC::getVSPLTImmediate(SDNode *N, unsigned EltSize) {
- assert(isSplatShuffleMask(N, EltSize));
- return cast<ConstantSDNode>(N->getOperand(0))->getZExtValue() / EltSize;
+ ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
+ assert(isSplatShuffleMask(SVOp, EltSize));
+ return SVOp->getMask()[0] / EltSize;
}
/// get_VSPLTI_elt - If this is a build_vector of constants which can be formed
@@ -3149,11 +3154,10 @@ static SDValue BuildVSLDOI(SDValue LHS, SDValue RHS, unsigned Amt,
LHS = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v16i8, LHS);
RHS = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v16i8, RHS);
- SDValue Ops[16];
+ int Ops[16];
for (unsigned i = 0; i != 16; ++i)
- Ops[i] = DAG.getConstant(i+Amt, MVT::i8);
- SDValue T = DAG.getNode(ISD::VECTOR_SHUFFLE, dl, MVT::v16i8, LHS, RHS,
- DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v16i8, Ops,16));
+ Ops[i] = i + Amt;
+ SDValue T = DAG.getVectorShuffle(MVT::v16i8, dl, LHS, RHS, Ops);
return DAG.getNode(ISD::BIT_CONVERT, dl, VT, T);
}
@@ -3354,7 +3358,7 @@ static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS,
OpLHS = GeneratePerfectShuffle(PerfectShuffleTable[LHSID], LHS, RHS, DAG, dl);
OpRHS = GeneratePerfectShuffle(PerfectShuffleTable[RHSID], LHS, RHS, DAG, dl);
- unsigned ShufIdxs[16];
+ int ShufIdxs[16];
switch (OpNum) {
default: assert(0 && "Unknown i32 permute!");
case OP_VMRGHW:
@@ -3392,13 +3396,11 @@ static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS,
case OP_VSLDOI12:
return BuildVSLDOI(OpLHS, OpRHS, 12, OpLHS.getValueType(), DAG, dl);
}
- SDValue Ops[16];
- for (unsigned i = 0; i != 16; ++i)
- Ops[i] = DAG.getConstant(ShufIdxs[i], MVT::i8);
-
- return DAG.getNode(ISD::VECTOR_SHUFFLE, dl, OpLHS.getValueType(),
- OpLHS, OpRHS,
- DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v16i8, Ops, 16));
+ MVT VT = OpLHS.getValueType();
+ OpLHS = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v16i8, OpLHS);
+ OpRHS = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v16i8, OpRHS);
+ SDValue T = DAG.getVectorShuffle(MVT::v16i8, dl, OpLHS, OpRHS, ShufIdxs);
+ return DAG.getNode(ISD::BIT_CONVERT, dl, VT, T);
}
/// LowerVECTOR_SHUFFLE - Return the code we lower for VECTOR_SHUFFLE. If this
@@ -3406,28 +3408,30 @@ static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS,
/// return the code it can be lowered into. Worst case, it can always be
/// lowered into a vperm.
SDValue PPCTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
- SelectionDAG &DAG) {
+ SelectionDAG &DAG) {
DebugLoc dl = Op.getDebugLoc();
SDValue V1 = Op.getOperand(0);
SDValue V2 = Op.getOperand(1);
- SDValue PermMask = Op.getOperand(2);
+ ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
+ const int *PermMask = SVOp->getMask();
+ MVT VT = Op.getValueType();
// Cases that are handled by instructions that take permute immediates
// (such as vsplt*) should be left as VECTOR_SHUFFLE nodes so they can be
// selected by the instruction selector.
if (V2.getOpcode() == ISD::UNDEF) {
- if (PPC::isSplatShuffleMask(PermMask.getNode(), 1) ||
- PPC::isSplatShuffleMask(PermMask.getNode(), 2) ||
- PPC::isSplatShuffleMask(PermMask.getNode(), 4) ||
- PPC::isVPKUWUMShuffleMask(PermMask.getNode(), true) ||
- PPC::isVPKUHUMShuffleMask(PermMask.getNode(), true) ||
- PPC::isVSLDOIShuffleMask(PermMask.getNode(), true) != -1 ||
- PPC::isVMRGLShuffleMask(PermMask.getNode(), 1, true) ||
- PPC::isVMRGLShuffleMask(PermMask.getNode(), 2, true) ||
- PPC::isVMRGLShuffleMask(PermMask.getNode(), 4, true) ||
- PPC::isVMRGHShuffleMask(PermMask.getNode(), 1, true) ||
- PPC::isVMRGHShuffleMask(PermMask.getNode(), 2, true) ||
- PPC::isVMRGHShuffleMask(PermMask.getNode(), 4, true)) {
+ if (PPC::isSplatShuffleMask(SVOp, 1) ||
+ PPC::isSplatShuffleMask(SVOp, 2) ||
+ PPC::isSplatShuffleMask(SVOp, 4) ||
+ PPC::isVPKUWUMShuffleMask(SVOp, true) ||
+ PPC::isVPKUHUMShuffleMask(SVOp, true) ||
+ PPC::isVSLDOIShuffleMask(SVOp, true) != -1 ||
+ PPC::isVMRGLShuffleMask(SVOp, 1, true) ||
+ PPC::isVMRGLShuffleMask(SVOp, 2, true) ||
+ PPC::isVMRGLShuffleMask(SVOp, 4, true) ||
+ PPC::isVMRGHShuffleMask(SVOp, 1, true) ||
+ PPC::isVMRGHShuffleMask(SVOp, 2, true) ||
+ PPC::isVMRGHShuffleMask(SVOp, 4, true)) {
return Op;
}
}
@@ -3435,15 +3439,15 @@ SDValue PPCTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
// Altivec has a variety of "shuffle immediates" that take two vector inputs
// and produce a fixed permutation. If any of these match, do not lower to
// VPERM.
- if (PPC::isVPKUWUMShuffleMask(PermMask.getNode(), false) ||
- PPC::isVPKUHUMShuffleMask(PermMask.getNode(), false) ||
- PPC::isVSLDOIShuffleMask(PermMask.getNode(), false) != -1 ||
- PPC::isVMRGLShuffleMask(PermMask.getNode(), 1, false) ||
- PPC::isVMRGLShuffleMask(PermMask.getNode(), 2, false) ||
- PPC::isVMRGLShuffleMask(PermMask.getNode(), 4, false) ||
- PPC::isVMRGHShuffleMask(PermMask.getNode(), 1, false) ||
- PPC::isVMRGHShuffleMask(PermMask.getNode(), 2, false) ||
- PPC::isVMRGHShuffleMask(PermMask.getNode(), 4, false))
+ if (PPC::isVPKUWUMShuffleMask(SVOp, false) ||
+ PPC::isVPKUHUMShuffleMask(SVOp, false) ||
+ PPC::isVSLDOIShuffleMask(SVOp, false) != -1 ||
+ PPC::isVMRGLShuffleMask(SVOp, 1, false) ||
+ PPC::isVMRGLShuffleMask(SVOp, 2, false) ||
+ PPC::isVMRGLShuffleMask(SVOp, 4, false) ||
+ PPC::isVMRGHShuffleMask(SVOp, 1, false) ||
+ PPC::isVMRGHShuffleMask(SVOp, 2, false) ||
+ PPC::isVMRGHShuffleMask(SVOp, 4, false))
return Op;
// Check to see if this is a shuffle of 4-byte values. If so, we can use our
@@ -3453,11 +3457,10 @@ SDValue PPCTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
for (unsigned i = 0; i != 4 && isFourElementShuffle; ++i) { // Element number
unsigned EltNo = 8; // Start out undef.
for (unsigned j = 0; j != 4; ++j) { // Intra-element byte.
- if (PermMask.getOperand(i*4+j).getOpcode() == ISD::UNDEF)
+ if (PermMask[i*4+j] < 0)
continue; // Undef, ignore it.
- unsigned ByteSource =
- cast<ConstantSDNode>(PermMask.getOperand(i*4+j))->getZExtValue();
+ unsigned ByteSource = PermMask[i*4+j];
if ((ByteSource & 3) != j) {
isFourElementShuffle = false;
break;
@@ -3509,12 +3512,8 @@ SDValue PPCTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
unsigned BytesPerElement = EltVT.getSizeInBits()/8;
SmallVector<SDValue, 16> ResultMask;
- for (unsigned i = 0, e = PermMask.getNumOperands(); i != e; ++i) {
- unsigned SrcElt;
- if (PermMask.getOperand(i).getOpcode() == ISD::UNDEF)
- SrcElt = 0;
- else
- SrcElt = cast<ConstantSDNode>(PermMask.getOperand(i))->getZExtValue();
+ for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) {
+ unsigned SrcElt = PermMask[i] < 0 ? 0 : PermMask[i];
for (unsigned j = 0; j != BytesPerElement; ++j)
ResultMask.push_back(DAG.getConstant(SrcElt*BytesPerElement+j,
@@ -3704,13 +3703,12 @@ SDValue PPCTargetLowering::LowerMUL(SDValue Op, SelectionDAG &DAG) {
OddParts = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v16i8, OddParts);
// Merge the results together.
- SDValue Ops[16];
+ int Ops[16];
for (unsigned i = 0; i != 8; ++i) {
- Ops[i*2 ] = DAG.getConstant(2*i+1, MVT::i8);
- Ops[i*2+1] = DAG.getConstant(2*i+1+16, MVT::i8);
+ Ops[i*2 ] = 2*i+1;
+ Ops[i*2+1] = 2*i+1+16;
}
- return DAG.getNode(ISD::VECTOR_SHUFFLE, dl, MVT::v16i8, EvenParts, OddParts,
- DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v16i8, Ops, 16));
+ return DAG.getVectorShuffle(MVT::v16i8, dl, EvenParts, OddParts, Ops);
} else {
assert(0 && "Unknown mul to lower!");
abort();
diff --git a/lib/Target/PowerPC/PPCISelLowering.h b/lib/Target/PowerPC/PPCISelLowering.h
index 01111cfb87..7946474972 100644
--- a/lib/Target/PowerPC/PPCISelLowering.h
+++ b/lib/Target/PowerPC/PPCISelLowering.h
@@ -175,19 +175,21 @@ namespace llvm {
namespace PPC {
/// isVPKUHUMShuffleMask - Return true if this is the shuffle mask for a
/// VPKUHUM instruction.
- bool isVPKUHUMShuffleMask(SDNode *N, bool isUnary);
+ bool isVPKUHUMShuffleMask(ShuffleVectorSDNode *N, bool isUnary);
/// isVPKUWUMShuffleMask - Return true if this is the shuffle mask for a
/// VPKUWUM instruction.
- bool isVPKUWUMShuffleMask(SDNode *N, bool isUnary);
+ bool isVPKUWUMShuffleMask(ShuffleVectorSDNode *N, bool isUnary);
/// isVMRGLShuffleMask - Return true if this is a shuffle mask suitable for
/// a VRGL* instruction with the specified unit size (1,2 or 4 bytes).
- bool isVMRGLShuffleMask(SDNode *N, unsigned UnitSize, bool isUnary);
+ bool isVMRGLShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize,
+ bool isUnary);
/// isVMRGHShuffleMask - Return true if this is a shuffle mask suitable for
/// a VRGH* instruction with the specified unit size (1,2 or 4 bytes).
- bool isVMRGHShuffleMask(SDNode *N, unsigned UnitSize, bool isUnary);
+ bool isVMRGHShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize,
+ bool isUnary);
/// isVSLDOIShuffleMask - If this is a vsldoi shuffle mask, return the shift
/// amount, otherwise return -1.
@@ -196,7 +198,7 @@ namespace llvm {
/// isSplatShuffleMask - Return true if the specified VECTOR_SHUFFLE operand
/// specifies a splat of a single element that is suitable for input to
/// VSPLTB/VSPLTH/VSPLTW.
- bool isSplatShuffleMask(SDNode *N, unsigned EltSize);
+ bool isSplatShuffleMask(ShuffleVectorSDNode *N, unsigned EltSize);
/// isAllNegativeZeroVector - Returns true if all elements of build_vector
/// are -0.0.
diff --git a/lib/Target/PowerPC/PPCInstrAltivec.td b/lib/Target/PowerPC/PPCInstrAltivec.td
index c90fbc9101..9a5be79e81 100644
--- a/lib/Target/PowerPC/PPCInstrAltivec.td
+++ b/lib/Target/PowerPC/PPCInstrAltivec.td
@@ -15,96 +15,118 @@
// Altivec transformation functions and pattern fragments.
//
-/// VPKUHUM_shuffle_mask/VPKUWUM_shuffle_mask - Return true if this is a valid
-/// shuffle mask for the VPKUHUM or VPKUWUM instructions.
-def VPKUHUM_shuffle_mask : PatLeaf<(build_vector), [{
- return PPC::isVPKUHUMShuffleMask(N, false);
+
+def vpkuhum_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
+ (vector_shuffle node:$lhs, node:$rhs), [{
+ return PPC::isVPKUHUMShuffleMask(cast<ShuffleVectorSDNode>(N), false);
}]>;
-def VPKUWUM_shuffle_mask : PatLeaf<(build_vector), [{
- return PPC::isVPKUWUMShuffleMask(N, false);
+def vpkuwum_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
+ (vector_shuffle node:$lhs, node:$rhs), [{
+ return PPC::isVPKUWUMShuffleMask(cast<ShuffleVectorSDNode>(N), false);
}]>;
-
-def VPKUHUM_unary_shuffle_mask : PatLeaf<(build_vector), [{
- return PPC::isVPKUHUMShuffleMask(N, true);
+def vpkuhum_unary_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
+ (vector_shuffle node:$lhs, node:$rhs), [{
+ return PPC::isVPKUHUMShuffleMask(cast<ShuffleVectorSDNode>(N), true);
}]>;
-def VPKUWUM_unary_shuffle_mask : PatLeaf<(build_vector), [{
- return PPC::isVPKUWUMShuffleMask(N, true);
+def vpkuwum_unary_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
+ (vector_shuffle node:$lhs, node:$rhs), [{
+ return PPC::isVPKUWUMShuffleMask(cast<ShuffleVectorSDNode>(N), true);
}]>;
-def VMRGLB_shuffle_mask : PatLeaf<(build_vector), [{
- return PPC::isVMRGLShuffleMask(N, 1, false);
+def vmrglb_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
+ (vector_shuffle node:$lhs, node:$rhs), [{
+ return PPC::isVMRGLShuffleMask(cast<ShuffleVectorSDNode>(N), 1, false);
}]>;
-def VMRGLH_shuffle_mask : PatLeaf<(build_vector), [{
- return PPC::isVMRGLShuffleMask(N, 2, false);
+def vmrglh_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
+ (vector_shuffle node:$lhs, node:$rhs), [{
+ return PPC::isVMRGLShuffleMask(cast<ShuffleVectorSDNode>(N), 2, false);
}]>;
-def VMRGLW_shuffle_mask : PatLeaf<(build_vector), [{
- return PPC::isVMRGLShuffleMask(N, 4, false);
+def vmrglw_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
+ (vector_shuffle node:$lhs, node:$rhs), [{
+ return PPC::isVMRGLShuffleMask(cast<ShuffleVectorSDNode>(N), 4, false);
}]>;
-def VMRGHB_shuffle_mask : PatLeaf<(build_vector), [{
- return PPC::isVMRGHShuffleMask(N, 1, false);
+def vmrghb_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
+ (vector_shuffle node:$lhs, node:$rhs), [{
+ return PPC::isVMRGHShuffleMask(cast<ShuffleVectorSDNode>(N), 1, false);
}]>;
-def VMRGHH_shuffle_mask : PatLeaf<(build_vector), [{
- return PPC::isVMRGHShuffleMask(N, 2, false);
+def vmrghh_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
+ (vector_shuffle node:$lhs, node:$rhs), [{
+ return PPC::isVMRGHShuffleMask(cast<ShuffleVectorSDNode>(N), 2, false);
}]>;
-def VMRGHW_shuffle_mask : PatLeaf<(build_vector), [{
- return PPC::isVMRGHShuffleMask(N, 4, false);
+def vmrghw_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
+ (vector_shuffle node:$lhs, node:$rhs), [{
+ return PPC::isVMRGHShuffleMask(cast<ShuffleVectorSDNode>(N), 4, false);
}]>;
-def VMRGLB_unary_shuffle_mask : PatLeaf<(build_vector), [{
- return PPC::isVMRGLShuffleMask(N, 1, true);
+
+def vmrglb_unary_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
+ (vector_shuffle node:$lhs, node:$rhs), [{
+ return PPC::isVMRGLShuffleMask(cast<ShuffleVectorSDNode>(N), 1, true);
}]>;
-def VMRGLH_unary_shuffle_mask : PatLeaf<(build_vector), [{
- return PPC::isVMRGLShuffleMask(N, 2, true);
+def vmrglh_unary_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
+ (vector_shuffle node:$lhs, node:$rhs), [{
+ return PPC::isVMRGLShuffleMask(cast<ShuffleVectorSDNode>(N), 2, true);
}]>;
-def VMRGLW_unary_shuffle_mask : PatLeaf<(build_vector), [{
- return PPC::isVMRGLShuffleMask(N, 4, true);
+def vmrglw_unary_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
+ (vector_shuffle node:$lhs, node:$rhs), [{
+ return PPC::isVMRGLShuffleMask(cast<ShuffleVectorSDNode>(N), 4, true);
}]>;
-def VMRGHB_unary_shuffle_mask : PatLeaf<(build_vector), [{
- return PPC::isVMRGHShuffleMask(N, 1, true);
+def vmrghb_unary_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
+ (vector_shuffle node:$lhs, node:$rhs), [{
+ return PPC::isVMRGHShuffleMask(cast<ShuffleVectorSDNode>(N), 1, true);
}]>;
-def VMRGHH_unary_shuffle_mask : PatLeaf<(build_vector), [{
- return PPC::isVMRGHShuffleMask(N, 2, true);
+def vmrghh_unary_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
+ (vector_shuffle node:$lhs, node:$rhs), [{
+ return PPC::isVMRGHShuffleMask(cast<ShuffleVectorSDNode>(N), 2, true);
}]>;
-def VMRGHW_unary_shuffle_mask : PatLeaf<(build_vector), [{
- return PPC::isVMRGHShuffleMask(N, 4, true);
+def vmrghw_unary_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
+ (vector_shuffle node:$lhs, node:$rhs), [{
+ return PPC::isVMRGHShuffleMask(cast<ShuffleVectorSDNode>(N), 4, true);
}]>;
-def VSLDOI_get_imm : SDNodeXForm<build_vector, [{
+
+def VSLDOI_get_imm : SDNodeXForm<vector_shuffle, [{
return getI32Imm(PPC::isVSLDOIShuffleMask(N, false));
}]>;
-def VSLDOI_shuffle_mask : PatLeaf<(build_vector), [{
+def vsldoi_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
+ (vector_shuffle node:$lhs, node:$rhs), [{
return PPC::isVSLDOIShuffleMask(N, false) != -1;
}], VSLDOI_get_imm>;
+
/// VSLDOI_unary* - These are used to match vsldoi(X,X), which is turned into
/// vector_shuffle(X,undef,mask) by the dag combiner.
-def VSLDOI_unary_get_imm : SDNodeXForm<build_vector, [{
+def VSLDOI_unary_get_imm : SDNodeXForm<vector_shuffle, [{
return getI32Imm(PPC::isVSLDOIShuffleMask(N, true));
}]>;
-def VSLDOI_unary_shuffle_mask : PatLeaf<(build_vector), [{
+def vsldoi_unary_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
+ (vector_shuffle node:$lhs, node:$rhs), [{
return PPC::isVSLDOIShuffleMask(N, true) != -1;
}], VSLDOI_unary_get_imm>;
// VSPLT*_get_imm xform function: convert vector_shuffle mask to VSPLT* imm.
-def VSPLTB_get_imm : SDNodeXForm<build_vector, [{
+def VSPLTB_get_imm : SDNodeXForm<vector_shuffle, [{
return getI32Imm(PPC::getVSPLTImmediate(N, 1));
}]>;
-def VSPLTB_shuffle_mask : PatLeaf<(build_vector), [{
- return PPC::isSplatShuffleMask(N, 1);
+def vspltb_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
+ (vector_shuffle node:$lhs, node:$rhs), [{
+ return PPC::isSplatShuffleMask(cast<ShuffleVectorSDNode>(N), 1);
}], VSPLTB_get_imm>;
-def VSPLTH_get_imm : SDNodeXForm<build_vector, [{
+def VSPLTH_get_imm : SDNodeXForm<vector_shuffle, [{
return getI32Imm(PPC::getVSPLTImmediate(N, 2));
}]>;
-def VSPLTH_shuffle_mask : PatLeaf<(build_vector), [{
- return PPC::isSplatShuffleMask(N, 2);
+def vsplth_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
+ (vector_shuffle node:$lhs, node:$rhs), [{
+ return PPC::isSplatShuffleMask(cast<ShuffleVectorSDNode>(N), 2);
}], VSPLTH_get_imm>;
-def VSPLTW_get_imm : SDNodeXForm<build_vector, [{
+def VSPLTW_get_imm : SDNodeXForm<vector_shuffle, [{
return getI32Imm(PPC::getVSPLTImmediate(N, 4));
}]>;
-def VSPLTW_shuffle_mask : PatLeaf<(build_vector), [{
- return PPC::isSplatShuffleMask(N, 4);
+def vspltw_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
+ (vector_shuffle node:$lhs, node:$rhs), [{
+ return PPC::isSplatShuffleMask(cast<ShuffleVectorSDNode>(N), 4);
}], VSPLTW_get_imm>;
@@ -268,8 +290,7 @@ def VSEL : VA1a_Int<42, "vsel", int_ppc_altivec_vsel>;
def VSLDOI : VAForm_2<44, (outs VRRC:$vD), (ins VRRC:$vA, VRRC:$vB, u5imm:$SH),
"vsldoi $vD, $vA, $vB, $SH", VecFP,
[(set VRRC:$vD,
- (vector_shuffle (v16i8 VRRC:$vA), VRRC:$vB,
- VSLDOI_shuffle_mask:$SH))]>;
+ (vsldoi_shuffle:$SH (v16i8 VRRC:$vA), VRRC:$vB))]>;
// VX-Form instructions. AltiVec arithmetic ops.
def VADDFP : VXForm_1<10, (outs VRRC:$vD), (ins VRRC:$vA, VRRC:$vB),
@@ -345,28 +366,22 @@ def VMINUW : VX1_Int< 642, "vminuw", int_ppc_altivec_vminuw>;
def VMRGHB : VXForm_1< 12, (outs VRRC:$vD), (ins VRRC:$vA, VRRC:$vB),
"vmrghb $vD, $vA, $vB", VecFP,
- [(set VRRC:$vD, (vector_shuffle (v16i8 VRRC:$vA),
- VRRC:$vB, VMRGHB_shuffle_mask))]>;
+ [(set VRRC:$vD, (vmrghb_shuffle VRRC:$vA, VRRC:$vB))]>;
def VMRGHH : VXForm_1< 76, (outs VRRC:$vD), (ins VRRC:$vA, VRRC:$vB),
"vmrghh $vD, $vA, $vB", VecFP,
- [(set VRRC:$vD, (vector_shuffle (v16i8 VRRC:$vA),
- VRRC:$vB, VMRGHH_shuffle_mask))]>;
+ [(set VRRC:$vD, (vmrghh_shuffle VRRC:$vA, VRRC:$vB))]>;
def VMRGHW : VXForm_1<140, (outs VRRC:$vD), (ins VRRC:$vA, VRRC:$vB),
"vmrghw $vD, $vA, $vB", VecFP,
- [(set VRRC:$vD, (vector_shuffle (v16i8 VRRC:$vA),
- VRRC:$vB, VMRGHW_shuffle_mask))]>;
+ [(set VRRC:$vD, (vmrghw_shuffle VRRC:$vA, VRRC:$vB))]>;
def VMRGLB : VXForm_1<268, (outs VRRC:$vD), (ins VRRC:$vA, VRRC:$vB),
"vmrglb $vD, $vA, $vB", VecFP,
- [(set VRRC:$vD, (vector_shuffle (v16i8 VRRC:$vA),
- VRRC:$vB, VMRGLB_shuffle_mask))]>;
+ [(set VRRC:$vD, (vmrglb_shuffle VRRC:$vA, VRRC:$vB))]>;
def VMRGLH : VXForm_1<332, (outs VRRC:$vD), (ins VRRC:$vA, VRRC:$vB),
"vmrglh $vD, $vA, $vB", VecFP,
- [(set VRRC:$vD, (vector_shuffle (v16i8 VRRC:$vA),
- VRRC:$vB, VMRGLH_shuffle_mask))]>;
+ [(set VRRC:$vD, (vmrglh_shuffle VRRC:$vA, VRRC:$vB))]>;
def VMRGLW : VXForm_1<396, (outs VRRC:$vD), (ins VRRC:$vA, VRRC:$vB),
"vmrglw $vD, $vA, $vB", VecFP,
- [(set VRRC:$vD, (vector_shuffle (v16i8 VRRC:$vA),
- VRRC:$vB, VMRGLW_shuffle_mask))]>;
+ [(set VRRC:$vD, (vmrglw_shuffle VRRC:$vA, VRRC:$vB))]>;
def VMSUMMBM : VA1a_Int<37, "vmsummbm", int_ppc_altivec_vmsummbm>;
def VMSUMSHM : VA1a_Int<40, "vmsumshm", int_ppc_altivec_vmsumshm>;
@@ -440,16 +455,16 @@ def VSLW : VX1_Int< 388, "vslw", int_ppc_altivec_vslw>;
def VSPLTB : VXForm_1<524, (outs VRRC:$vD), (ins u5imm:$UIMM, VRRC:$vB),
"vspltb $vD, $vB, $UIMM", VecPerm,
- [(set VRRC:$vD, (vector_shuffle (v16i8 VRRC:$vB), (undef),
- VSPLTB_shuffle_mask:$UIMM))]>;
+ [(set VRRC:$vD,
+ (vspltb_shuffle:$UIMM (v16i8 VRRC:$vB), (undef)))]>;
def VSPLTH : VXForm_1<588, (outs VRRC:$vD), (ins u5imm:$UIMM, VRRC:$vB),
"vsplth $vD, $vB, $UIMM", VecPerm,
- [(set VRRC:$vD, (vector_shuffle (v16i8 VRRC:$vB), (undef),
- VSPLTH_shuffle_mask:$UIMM))]>;
+ [(set VRRC:$vD,
+ (vsplth_shuffle:$UIMM (v16i8 VRRC:$vB), (undef)))]>;
def VSPLTW : VXForm_1<652, (outs VRRC:$vD), (ins u5imm:$UIMM, VRRC:$vB),
"vspltw $vD, $vB, $UIMM", VecPerm,
- [(set VRRC:$vD, (vector_shuffle (v16i8 VRRC:$vB), (undef),
- VSPLTW_shuffle_mask:$UIMM))]>;
+ [(set VRRC:$vD,
+ (vspltw_shuffle:$UIMM (v16i8 VRRC:$vB), (undef)))]>;
def VSR : VX1_Int< 708, "vsr" , int_ppc_altivec_vsr>;
def VSRO : VX1_Int<1100, "vsro" , int_ppc_altivec_vsro>;
@@ -479,13 +494,13 @@ def VPKSWSS : VX1_Int<462, "vpkswss", int_ppc_altivec_vpkswss>;
def VPKSWUS : VX1_Int<334, "vpkswus", int_ppc_altivec_vpkswus>;
def VPKUHUM : VXForm_1<14, (outs VRRC:$vD), (ins VRRC:$vA, VRRC:$vB),
"vpkuhum $vD, $vA, $vB", VecFP,
- [(set VRRC:$vD, (vector_shuffle (v16i8 VRRC:$vA),
- VRRC:$vB, VPKUHUM_shuffle_mask))]>;
+ [(set VRRC:$vD,
+ (vpkuhum_shuffle (v16i8 VRRC:$vA), VRRC:$vB))]>;
def VPKUHUS : VX1_Int<142, "vpkuhus", int_ppc_altivec_vpkuhus>;
def VPKUWUM : VXForm_1<78, (outs VRRC:$vD), (ins VRRC:$vA, VRRC:$vB),
"vpkuwum $vD, $vA, $vB", VecFP,
- [(set VRRC:$vD, (vector_shuffle (v16i8 VRRC:$vA),
- VRRC:$vB, VPKUWUM_shuffle_mask))]>;
+ [(set VRRC:$vD,
+ (vpkuwum_shuffle (v16i8 VRRC:$vA), VRRC:$vB))]>;
def VPKUWUS : VX1_Int<206, "vpkuwus", int_ppc_altivec_vpkuwus>;
// Vector Unpack.
@@ -603,25 +618,25 @@ def : Pat<(v4f32 (bitconvert (v4i32 VRRC:$src))), (v4f32 VRRC:$src)>;
// Shuffles.
// Match vsldoi(x,x), vpkuwum(x,x), vpkuhum(x,x)
-def:Pat<(vector_shuffle (v16i8 VRRC:$vA), undef, VSLDOI_unary_shuffle_mask:$in),
- (VSLDOI VRRC:$vA, VRRC:$vA, VSLDOI_unary_shuffle_mask:$in)>;
-def:Pat<(vector_shuffle (v16i8 VRRC:$vA), undef,VPKUWUM_unary_shuffle_mask:$in),
+def:Pat<(vsldoi_unary_shuffle:$in (v16i8 VRRC:$vA), undef),
+ (VSLDOI VRRC:$vA, VRRC:$vA, (VSLDOI_unary_get_imm VRRC:$in))>;
+def:Pat<(vpkuwum_unary_shuffle (v16i8 VRRC:$vA), undef),
(VPKUWUM VRRC:$vA, VRRC:$vA)>;
-def:Pat<(vector_shuffle (v16i8 VRRC:$vA), undef,VPKUHUM_unary_shuffle_mask:$in),
+def:Pat<(vpkuhum_unary_shuffle (v16i8 VRRC:$vA), undef),
(VPKUHUM VRRC:$vA, VRRC:$vA)>;
// Match vmrg*(x,x)
-def:Pat<(vector_shuffle (v16i8 VRRC:$vA), undef, VMRGLB_unary_shuffle_mask:$in),
+def:Pat<(vmrglb_unary_shuffle (v16i8 VRRC:$vA), undef),
(VMRGLB VRRC:$vA, VRRC:$vA)>;
-def:Pat<(vector_shuffle (v16i8 VRRC:$vA), undef, VMRGLH_unary_shuffle_mask:$in),
+def:Pat<(vmrglh_unary_shuffle (v16i8 VRRC:$vA), undef),
(VMRGLH VRRC:$vA, VRRC:$vA)>;
-def:Pat<(vector_shuffle (v16i8 VRRC:$vA), undef, VMRGLW_unary_shuffle_mask:$in),
+def:Pat<(vmrglw_unary_shuffle (v16i8 VRRC:$vA), undef),
(VMRGLW VRRC:$vA, VRRC:$vA)>;
-def:Pat<(vector_shuffle (v16i8 VRRC:$vA), undef, VMRGHB_unary_shuffle_mask:$in),
+def:Pat<(vmrghb_unary_shuffle (v16i8 VRRC:$vA), undef),
(VMRGHB VRRC:$vA, VRRC:$vA)>;
-def:Pat<(vector_shuffle (v16i8 VRRC:$vA), undef, VMRGHH_unary_shuffle_mask:$in),
+def:Pat<(vmrghh_unary_shuffle (v16i8 VRRC:$vA), undef),
(VMRGHH VRRC:$vA, VRRC:$vA)>;
-def:Pat<(vector_shuffle (v16i8 VRRC:$vA), undef, VMRGHW_unary_shuffle_mask:$in),
+def:Pat<(vmrghw_unary_shuffle (v16i8 VRRC:$vA), undef),
(VMRGHW VRRC:$vA, VRRC:$vA)>;
// Logical Operations