summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAndrea Di Biagio <Andrea_DiBiagio@sn.scee.net>2014-06-09 16:54:41 +0000
committerAndrea Di Biagio <Andrea_DiBiagio@sn.scee.net>2014-06-09 16:54:41 +0000
commit9b6992ddc272e4c7a3e15bfb5924c4e62c46fa65 (patch)
tree19fdfe3cc19166e1c4b0aa07aba61625c89a32a7
parent1968da36ed044f0ad87cfc3614be9e48ac9cdd98 (diff)
downloadllvm-9b6992ddc272e4c7a3e15bfb5924c4e62c46fa65.tar.gz
llvm-9b6992ddc272e4c7a3e15bfb5924c4e62c46fa65.tar.bz2
llvm-9b6992ddc272e4c7a3e15bfb5924c4e62c46fa65.tar.xz
[X86] Add target combine rules for horizontal add/sub.
This patch adds new target specific combine rules to identify horizontal add/sub idioms from BUILD_VECTOR dag nodes. This patch also teaches the DAGCombiner how to canonicalize sequences of insert_vector_elt dag nodes according to the following rule: (insert_vector_elt (insert_vector_elt A, I0), I1) -> (insert_vecto_elt (insert_vector_elt A, I1), I0) This new canonicalization rule only triggers if the inner insert_vector dag node has exactly one use; also, both indices must be known constants, and I1 < I0. This last rule made it possible to write a simpler algorithm to identify horizontal add/sub patterns because now we don't have to worry about the ordering of insert_vector_elt dag nodes. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@210477 91177308-0d34-0410-b5e6-96231b3b80d8
-rw-r--r--lib/CodeGen/SelectionDAG/DAGCombiner.cpp21
-rw-r--r--lib/Target/X86/X86ISelLowering.cpp85
-rw-r--r--test/CodeGen/X86/haddsub-2.ll376
3 files changed, 482 insertions, 0 deletions
diff --git a/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index 8ec0c45eaf..b9da13af75 100644
--- a/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -9721,6 +9721,27 @@ SDValue DAGCombiner::visitINSERT_VECTOR_ELT(SDNode *N) {
return SDValue();
unsigned Elt = cast<ConstantSDNode>(EltNo)->getZExtValue();
+ // Canonicalize insert_vector_elt dag nodes.
+ // Example:
+ // (insert_vector_elt (insert_vector_elt A, Idx0), Idx1)
+ // -> (insert_vector_elt (insert_vector_elt A, Idx1), Idx0)
+ //
+ // Do this only if the child insert_vector node has one use; also
+ // do this only if indices are both constants and Idx1 < Idx0.
+ if (InVec.getOpcode() == ISD::INSERT_VECTOR_ELT && InVec.hasOneUse()
+ && isa<ConstantSDNode>(InVec.getOperand(2))) {
+ unsigned OtherElt =
+ cast<ConstantSDNode>(InVec.getOperand(2))->getZExtValue();
+ if (Elt < OtherElt) {
+ // Swap nodes.
+ SDValue NewOp = DAG.getNode(ISD::INSERT_VECTOR_ELT, SDLoc(N), VT,
+ InVec.getOperand(0), InVal, EltNo);
+ AddToWorkList(NewOp.getNode());
+ return DAG.getNode(ISD::INSERT_VECTOR_ELT, SDLoc(InVec.getNode()),
+ VT, NewOp, InVec.getOperand(1), InVec.getOperand(2));
+ }
+ }
+
// Check that the operand is a BUILD_VECTOR (or UNDEF, which can essentially
// be converted to a BUILD_VECTOR). Fill in the Ops vector with the
// vector elements.
diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp
index df86064d28..c9be00cee2 100644
--- a/lib/Target/X86/X86ISelLowering.cpp
+++ b/lib/Target/X86/X86ISelLowering.cpp
@@ -1560,6 +1560,7 @@ void X86TargetLowering::resetOperationActions() {
setTargetDAGCombine(ISD::SINT_TO_FP);
setTargetDAGCombine(ISD::SETCC);
setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN);
+ setTargetDAGCombine(ISD::BUILD_VECTOR);
if (Subtarget->is64Bit())
setTargetDAGCombine(ISD::MUL);
setTargetDAGCombine(ISD::XOR);
@@ -6047,6 +6048,89 @@ X86TargetLowering::LowerBUILD_VECTORvXi1(SDValue Op, SelectionDAG &DAG) const {
return DAG.getNode(ISD::BITCAST, dl, VT, Select);
}
+static SDValue PerformBUILD_VECTORCombine(SDNode *N, SelectionDAG &DAG,
+ const X86Subtarget *Subtarget) {
+ EVT VT = N->getValueType(0);
+
+ // Try to match a horizontal ADD or SUB.
+ if (((VT == MVT::v4f32 || VT == MVT::v2f64) && Subtarget->hasSSE3()) ||
+ ((VT == MVT::v8f32 || VT == MVT::v4f64) && Subtarget->hasAVX()) ||
+ ((VT == MVT::v4i32 || VT == MVT::v8i16) && Subtarget->hasSSSE3()) ||
+ ((VT == MVT::v8i32 || VT == MVT::v16i16) && Subtarget->hasAVX2())) {
+ unsigned NumOperands = N->getNumOperands();
+ unsigned Opcode = N->getOperand(0)->getOpcode();
+ bool isCommutable = false;
+ bool CanFold = false;
+ switch (Opcode) {
+ default : break;
+ case ISD::ADD :
+ case ISD::FADD :
+ isCommutable = true;
+ // FALL-THROUGH
+ case ISD::SUB :
+ case ISD::FSUB :
+ CanFold = true;
+ }
+
+ // Verify that operands have the same opcode; also, the opcode can only
+ // be either of: ADD, FADD, SUB, FSUB.
+ SDValue InVec0, InVec1;
+ for (unsigned i = 0, e = NumOperands; i != e && CanFold; ++i) {
+ SDValue Op = N->getOperand(i);
+ CanFold = Op->getOpcode() == Opcode && Op->hasOneUse();
+
+ if (!CanFold)
+ break;
+
+ SDValue Op0 = Op.getOperand(0);
+ SDValue Op1 = Op.getOperand(1);
+
+ // Try to match the following pattern:
+ // (BINOP (extract_vector_elt A, I), (extract_vector_elt A, I+1))
+ CanFold = (Op0.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
+ Op1.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
+ Op0.getOperand(0) == Op1.getOperand(0) &&
+ isa<ConstantSDNode>(Op0.getOperand(1)) &&
+ isa<ConstantSDNode>(Op1.getOperand(1)));
+ if (!CanFold)
+ break;
+
+ unsigned I0 = cast<ConstantSDNode>(Op0.getOperand(1))->getZExtValue();
+ unsigned I1 = cast<ConstantSDNode>(Op1.getOperand(1))->getZExtValue();
+ unsigned ExpectedIndex = (i * 2) % NumOperands;
+
+ if (i == 0)
+ InVec0 = Op0.getOperand(0);
+ else if (i * 2 == NumOperands)
+ InVec1 = Op0.getOperand(0);
+
+ SDValue Expected = (i * 2 < NumOperands) ? InVec0 : InVec1;
+ if (I0 == ExpectedIndex)
+ CanFold = I1 == I0 + 1 && Op0.getOperand(0) == Expected;
+ else if (isCommutable && I1 == ExpectedIndex) {
+ // Try to see if we can match the following dag sequence:
+ // (BINOP (extract_vector_elt A, I+1), (extract_vector_elt A, I))
+ CanFold = I0 == I1 + 1 && Op1.getOperand(0) == Expected;
+ }
+ }
+
+ if (CanFold) {
+ unsigned NewOpcode;
+ switch (Opcode) {
+ default : llvm_unreachable("Unexpected opcode found!");
+ case ISD::ADD : NewOpcode = X86ISD::HADD; break;
+ case ISD::FADD : NewOpcode = X86ISD::FHADD; break;
+ case ISD::SUB : NewOpcode = X86ISD::HSUB; break;
+ case ISD::FSUB : NewOpcode = X86ISD::FHSUB; break;
+ }
+
+ return DAG.getNode(NewOpcode, SDLoc(N), VT, InVec0, InVec1);
+ }
+ }
+
+ return SDValue();
+}
+
SDValue
X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
SDLoc dl(Op);
@@ -20738,6 +20822,7 @@ SDValue X86TargetLowering::PerformDAGCombine(SDNode *N,
return PerformINTRINSIC_WO_CHAINCombine(N, DAG, Subtarget);
case X86ISD::INSERTPS:
return PerformINSERTPSCombine(N, DAG, Subtarget);
+ case ISD::BUILD_VECTOR: return PerformBUILD_VECTORCombine(N, DAG, Subtarget);
}
return SDValue();
diff --git a/test/CodeGen/X86/haddsub-2.ll b/test/CodeGen/X86/haddsub-2.ll
new file mode 100644
index 0000000000..534d56a273
--- /dev/null
+++ b/test/CodeGen/X86/haddsub-2.ll
@@ -0,0 +1,376 @@
+; RUN: llc < %s -march=x86-64 -mattr=+sse2,+sse3 | FileCheck %s -check-prefix=CHECK -check-prefix=SSE3
+; RUN: llc < %s -march=x86-64 -mattr=+sse2,+sse3,+ssse3 | FileCheck %s -check-prefix=CHECK -check-prefix=SSSE3
+; RUN: llc < %s -march=x86-64 -mcpu=corei7-avx | FileCheck %s -check-prefix=CHECK -check-prefix=AVX
+; RUN: llc < %s -march=x86-64 -mcpu=core-avx2 | FileCheck %s -check-prefix=CHECK -check-prefix=AVX2
+
+
+
+define <4 x float> @hadd_ps_test1(<4 x float> %A, <4 x float> %B) {
+ %vecext = extractelement <4 x float> %A, i32 0
+ %vecext1 = extractelement <4 x float> %A, i32 1
+ %add = fadd float %vecext, %vecext1
+ %vecinit = insertelement <4 x float> undef, float %add, i32 0
+ %vecext2 = extractelement <4 x float> %A, i32 2
+ %vecext3 = extractelement <4 x float> %A, i32 3
+ %add4 = fadd float %vecext2, %vecext3
+ %vecinit5 = insertelement <4 x float> %vecinit, float %add4, i32 1
+ %vecext6 = extractelement <4 x float> %B, i32 0
+ %vecext7 = extractelement <4 x float> %B, i32 1
+ %add8 = fadd float %vecext6, %vecext7
+ %vecinit9 = insertelement <4 x float> %vecinit5, float %add8, i32 2
+ %vecext10 = extractelement <4 x float> %B, i32 2
+ %vecext11 = extractelement <4 x float> %B, i32 3
+ %add12 = fadd float %vecext10, %vecext11
+ %vecinit13 = insertelement <4 x float> %vecinit9, float %add12, i32 3
+ ret <4 x float> %vecinit13
+}
+; CHECK-LABEL: hadd_ps_test1
+; CHECK: haddps
+; CHECK-NEXT: ret
+
+
+define <4 x float> @hadd_ps_test2(<4 x float> %A, <4 x float> %B) {
+ %vecext = extractelement <4 x float> %A, i32 2
+ %vecext1 = extractelement <4 x float> %A, i32 3
+ %add = fadd float %vecext, %vecext1
+ %vecinit = insertelement <4 x float> undef, float %add, i32 1
+ %vecext2 = extractelement <4 x float> %A, i32 0
+ %vecext3 = extractelement <4 x float> %A, i32 1
+ %add4 = fadd float %vecext2, %vecext3
+ %vecinit5 = insertelement <4 x float> %vecinit, float %add4, i32 0
+ %vecext6 = extractelement <4 x float> %B, i32 2
+ %vecext7 = extractelement <4 x float> %B, i32 3
+ %add8 = fadd float %vecext6, %vecext7
+ %vecinit9 = insertelement <4 x float> %vecinit5, float %add8, i32 3
+ %vecext10 = extractelement <4 x float> %B, i32 0
+ %vecext11 = extractelement <4 x float> %B, i32 1
+ %add12 = fadd float %vecext10, %vecext11
+ %vecinit13 = insertelement <4 x float> %vecinit9, float %add12, i32 2
+ ret <4 x float> %vecinit13
+}
+; CHECK-LABEL: hadd_ps_test2
+; CHECK: haddps
+; CHECK-NEXT: ret
+
+
+define <4 x float> @hsub_ps_test1(<4 x float> %A, <4 x float> %B) {
+ %vecext = extractelement <4 x float> %A, i32 0
+ %vecext1 = extractelement <4 x float> %A, i32 1
+ %sub = fsub float %vecext, %vecext1
+ %vecinit = insertelement <4 x float> undef, float %sub, i32 0
+ %vecext2 = extractelement <4 x float> %A, i32 2
+ %vecext3 = extractelement <4 x float> %A, i32 3
+ %sub4 = fsub float %vecext2, %vecext3
+ %vecinit5 = insertelement <4 x float> %vecinit, float %sub4, i32 1
+ %vecext6 = extractelement <4 x float> %B, i32 0
+ %vecext7 = extractelement <4 x float> %B, i32 1
+ %sub8 = fsub float %vecext6, %vecext7
+ %vecinit9 = insertelement <4 x float> %vecinit5, float %sub8, i32 2
+ %vecext10 = extractelement <4 x float> %B, i32 2
+ %vecext11 = extractelement <4 x float> %B, i32 3
+ %sub12 = fsub float %vecext10, %vecext11
+ %vecinit13 = insertelement <4 x float> %vecinit9, float %sub12, i32 3
+ ret <4 x float> %vecinit13
+}
+; CHECK-LABEL: hsub_ps_test1
+; CHECK: hsubps
+; CHECK-NEXT: ret
+
+
+define <4 x float> @hsub_ps_test2(<4 x float> %A, <4 x float> %B) {
+ %vecext = extractelement <4 x float> %A, i32 2
+ %vecext1 = extractelement <4 x float> %A, i32 3
+ %sub = fsub float %vecext, %vecext1
+ %vecinit = insertelement <4 x float> undef, float %sub, i32 1
+ %vecext2 = extractelement <4 x float> %A, i32 0
+ %vecext3 = extractelement <4 x float> %A, i32 1
+ %sub4 = fsub float %vecext2, %vecext3
+ %vecinit5 = insertelement <4 x float> %vecinit, float %sub4, i32 0
+ %vecext6 = extractelement <4 x float> %B, i32 3
+ %vecext7 = extractelement <4 x float> %B, i32 2
+ %sub8 = fsub float %vecext6, %vecext7
+ %vecinit9 = insertelement <4 x float> %vecinit5, float %sub8, i32 3
+ %vecext10 = extractelement <4 x float> %B, i32 1
+ %vecext11 = extractelement <4 x float> %B, i32 0
+ %sub12 = fsub float %vecext10, %vecext11
+ %vecinit13 = insertelement <4 x float> %vecinit9, float %sub12, i32 2
+ ret <4 x float> %vecinit13
+}
+; CHECK-LABEL: hsub_ps_test2
+; CHECK: hsubps
+; CHECK-NEXT: ret
+
+
+define <4 x i32> @phadd_d_test1(<4 x i32> %A, <4 x i32> %B) {
+ %vecext = extractelement <4 x i32> %A, i32 0
+ %vecext1 = extractelement <4 x i32> %A, i32 1
+ %add = add i32 %vecext, %vecext1
+ %vecinit = insertelement <4 x i32> undef, i32 %add, i32 0
+ %vecext2 = extractelement <4 x i32> %A, i32 2
+ %vecext3 = extractelement <4 x i32> %A, i32 3
+ %add4 = add i32 %vecext2, %vecext3
+ %vecinit5 = insertelement <4 x i32> %vecinit, i32 %add4, i32 1
+ %vecext6 = extractelement <4 x i32> %B, i32 0
+ %vecext7 = extractelement <4 x i32> %B, i32 1
+ %add8 = add i32 %vecext6, %vecext7
+ %vecinit9 = insertelement <4 x i32> %vecinit5, i32 %add8, i32 2
+ %vecext10 = extractelement <4 x i32> %B, i32 2
+ %vecext11 = extractelement <4 x i32> %B, i32 3
+ %add12 = add i32 %vecext10, %vecext11
+ %vecinit13 = insertelement <4 x i32> %vecinit9, i32 %add12, i32 3
+ ret <4 x i32> %vecinit13
+}
+; CHECK-LABEL: phadd_d_test1
+; SSE3-NOT: phaddd
+; SSSE3: phaddd
+; AVX: vphaddd
+; AVX2 vphaddd
+; CHECK: ret
+
+
+define <4 x i32> @phadd_d_test2(<4 x i32> %A, <4 x i32> %B) {
+ %vecext = extractelement <4 x i32> %A, i32 2
+ %vecext1 = extractelement <4 x i32> %A, i32 3
+ %add = add i32 %vecext, %vecext1
+ %vecinit = insertelement <4 x i32> undef, i32 %add, i32 1
+ %vecext2 = extractelement <4 x i32> %A, i32 0
+ %vecext3 = extractelement <4 x i32> %A, i32 1
+ %add4 = add i32 %vecext2, %vecext3
+ %vecinit5 = insertelement <4 x i32> %vecinit, i32 %add4, i32 0
+ %vecext6 = extractelement <4 x i32> %B, i32 2
+ %vecext7 = extractelement <4 x i32> %B, i32 3
+ %add8 = add i32 %vecext6, %vecext7
+ %vecinit9 = insertelement <4 x i32> %vecinit5, i32 %add8, i32 3
+ %vecext10 = extractelement <4 x i32> %B, i32 0
+ %vecext11 = extractelement <4 x i32> %B, i32 1
+ %add12 = add i32 %vecext10, %vecext11
+ %vecinit13 = insertelement <4 x i32> %vecinit9, i32 %add12, i32 2
+ ret <4 x i32> %vecinit13
+}
+; CHECK-LABEL: phadd_d_test2
+; SSE3-NOT: phaddd
+; SSSE3: phaddd
+; AVX: vphaddd
+; AVX2 vphaddd
+; CHECK: ret
+
+
+define <4 x i32> @phsub_d_test1(<4 x i32> %A, <4 x i32> %B) {
+ %vecext = extractelement <4 x i32> %A, i32 0
+ %vecext1 = extractelement <4 x i32> %A, i32 1
+ %sub = sub i32 %vecext, %vecext1
+ %vecinit = insertelement <4 x i32> undef, i32 %sub, i32 0
+ %vecext2 = extractelement <4 x i32> %A, i32 2
+ %vecext3 = extractelement <4 x i32> %A, i32 3
+ %sub4 = sub i32 %vecext2, %vecext3
+ %vecinit5 = insertelement <4 x i32> %vecinit, i32 %sub4, i32 1
+ %vecext6 = extractelement <4 x i32> %B, i32 0
+ %vecext7 = extractelement <4 x i32> %B, i32 1
+ %sub8 = sub i32 %vecext6, %vecext7
+ %vecinit9 = insertelement <4 x i32> %vecinit5, i32 %sub8, i32 2
+ %vecext10 = extractelement <4 x i32> %B, i32 2
+ %vecext11 = extractelement <4 x i32> %B, i32 3
+ %sub12 = sub i32 %vecext10, %vecext11
+ %vecinit13 = insertelement <4 x i32> %vecinit9, i32 %sub12, i32 3
+ ret <4 x i32> %vecinit13
+}
+; CHECK-LABEL: phsub_d_test1
+; SSE3-NOT: phsubd
+; SSSE3: phsubd
+; AVX: vphsubd
+; AVX2 vphsubd
+; CHECK: ret
+
+
+define <4 x i32> @phsub_d_test2(<4 x i32> %A, <4 x i32> %B) {
+ %vecext = extractelement <4 x i32> %A, i32 2
+ %vecext1 = extractelement <4 x i32> %A, i32 3
+ %sub = sub i32 %vecext, %vecext1
+ %vecinit = insertelement <4 x i32> undef, i32 %sub, i32 1
+ %vecext2 = extractelement <4 x i32> %A, i32 0
+ %vecext3 = extractelement <4 x i32> %A, i32 1
+ %sub4 = sub i32 %vecext2, %vecext3
+ %vecinit5 = insertelement <4 x i32> %vecinit, i32 %sub4, i32 0
+ %vecext6 = extractelement <4 x i32> %B, i32 3
+ %vecext7 = extractelement <4 x i32> %B, i32 2
+ %sub8 = sub i32 %vecext6, %vecext7
+ %vecinit9 = insertelement <4 x i32> %vecinit5, i32 %sub8, i32 3
+ %vecext10 = extractelement <4 x i32> %B, i32 1
+ %vecext11 = extractelement <4 x i32> %B, i32 0
+ %sub12 = sub i32 %vecext10, %vecext11
+ %vecinit13 = insertelement <4 x i32> %vecinit9, i32 %sub12, i32 2
+ ret <4 x i32> %vecinit13
+}
+; CHECK-LABEL: phsub_d_test2
+; SSE3-NOT: phsubd
+; SSSE3: phsubd
+; AVX: vphsubd
+; AVX2 vphsubd
+; CHECK: ret
+
+
+define <2 x double> @hadd_pd_test1(<2 x double> %A, <2 x double> %B) {
+ %vecext = extractelement <2 x double> %A, i32 0
+ %vecext1 = extractelement <2 x double> %A, i32 1
+ %add = fadd double %vecext, %vecext1
+ %vecinit = insertelement <2 x double> undef, double %add, i32 0
+ %vecext2 = extractelement <2 x double> %B, i32 0
+ %vecext3 = extractelement <2 x double> %B, i32 1
+ %add2 = fadd double %vecext2, %vecext3
+ %vecinit2 = insertelement <2 x double> %vecinit, double %add2, i32 1
+ ret <2 x double> %vecinit2
+}
+; CHECK-LABEL: hadd_pd_test1
+; CHECK: haddpd
+; CHECK-NEXT: ret
+
+
+define <2 x double> @hadd_pd_test2(<2 x double> %A, <2 x double> %B) {
+ %vecext = extractelement <2 x double> %A, i32 1
+ %vecext1 = extractelement <2 x double> %A, i32 0
+ %add = fadd double %vecext, %vecext1
+ %vecinit = insertelement <2 x double> undef, double %add, i32 0
+ %vecext2 = extractelement <2 x double> %B, i32 1
+ %vecext3 = extractelement <2 x double> %B, i32 0
+ %add2 = fadd double %vecext2, %vecext3
+ %vecinit2 = insertelement <2 x double> %vecinit, double %add2, i32 1
+ ret <2 x double> %vecinit2
+}
+; CHECK-LABEL: hadd_pd_test2
+; CHECK: haddpd
+; CHECK-NEXT: ret
+
+
+define <2 x double> @hsub_pd_test1(<2 x double> %A, <2 x double> %B) {
+ %vecext = extractelement <2 x double> %A, i32 0
+ %vecext1 = extractelement <2 x double> %A, i32 1
+ %sub = fsub double %vecext, %vecext1
+ %vecinit = insertelement <2 x double> undef, double %sub, i32 0
+ %vecext2 = extractelement <2 x double> %B, i32 0
+ %vecext3 = extractelement <2 x double> %B, i32 1
+ %sub2 = fsub double %vecext2, %vecext3
+ %vecinit2 = insertelement <2 x double> %vecinit, double %sub2, i32 1
+ ret <2 x double> %vecinit2
+}
+; CHECK-LABEL: hsub_pd_test1
+; CHECK: hsubpd
+; CHECK-NEXT: ret
+
+
+define <2 x double> @hsub_pd_test2(<2 x double> %A, <2 x double> %B) {
+ %vecext = extractelement <2 x double> %A, i32 1
+ %vecext1 = extractelement <2 x double> %A, i32 0
+ %sub = fsub double %vecext, %vecext1
+ %vecinit = insertelement <2 x double> undef, double %sub, i32 0
+ %vecext2 = extractelement <2 x double> %B, i32 1
+ %vecext3 = extractelement <2 x double> %B, i32 0
+ %sub2 = fsub double %vecext2, %vecext3
+ %vecinit2 = insertelement <2 x double> %vecinit, double %sub2, i32 1
+ ret <2 x double> %vecinit2
+}
+; CHECK-LABEL: hsub_pd_test2
+; CHECK: hsubpd
+; CHECK-NEXT: ret
+
+
+define <4 x double> @avx_vhadd_pd_test(<4 x double> %A, <4 x double> %B) {
+ %vecext = extractelement <4 x double> %A, i32 0
+ %vecext1 = extractelement <4 x double> %A, i32 1
+ %add = fadd double %vecext, %vecext1
+ %vecinit = insertelement <4 x double> undef, double %add, i32 0
+ %vecext2 = extractelement <4 x double> %A, i32 2
+ %vecext3 = extractelement <4 x double> %A, i32 3
+ %add4 = fadd double %vecext2, %vecext3
+ %vecinit5 = insertelement <4 x double> %vecinit, double %add4, i32 1
+ %vecext6 = extractelement <4 x double> %B, i32 0
+ %vecext7 = extractelement <4 x double> %B, i32 1
+ %add8 = fadd double %vecext6, %vecext7
+ %vecinit9 = insertelement <4 x double> %vecinit5, double %add8, i32 2
+ %vecext10 = extractelement <4 x double> %B, i32 2
+ %vecext11 = extractelement <4 x double> %B, i32 3
+ %add12 = fadd double %vecext10, %vecext11
+ %vecinit13 = insertelement <4 x double> %vecinit9, double %add12, i32 3
+ ret <4 x double> %vecinit13
+}
+; CHECK-LABEL: avx_vhadd_pd_test
+; SSE3: haddpd
+; SSE3-NEXT: haddpd
+; SSSE3: haddpd
+; SSSE3: haddpd
+; AVX: vhaddpd
+; AVX2: vhaddpd
+; CHECK: ret
+
+
+define <4 x double> @avx_vhsub_pd_test(<4 x double> %A, <4 x double> %B) {
+ %vecext = extractelement <4 x double> %A, i32 0
+ %vecext1 = extractelement <4 x double> %A, i32 1
+ %sub = fsub double %vecext, %vecext1
+ %vecinit = insertelement <4 x double> undef, double %sub, i32 0
+ %vecext2 = extractelement <4 x double> %A, i32 2
+ %vecext3 = extractelement <4 x double> %A, i32 3
+ %sub4 = fsub double %vecext2, %vecext3
+ %vecinit5 = insertelement <4 x double> %vecinit, double %sub4, i32 1
+ %vecext6 = extractelement <4 x double> %B, i32 0
+ %vecext7 = extractelement <4 x double> %B, i32 1
+ %sub8 = fsub double %vecext6, %vecext7
+ %vecinit9 = insertelement <4 x double> %vecinit5, double %sub8, i32 2
+ %vecext10 = extractelement <4 x double> %B, i32 2
+ %vecext11 = extractelement <4 x double> %B, i32 3
+ %sub12 = fsub double %vecext10, %vecext11
+ %vecinit13 = insertelement <4 x double> %vecinit9, double %sub12, i32 3
+ ret <4 x double> %vecinit13
+}
+; CHECK-LABEL: avx_vhsub_pd_test
+; SSE3: hsubpd
+; SSE3-NEXT: hsubpd
+; SSSE3: hsubpd
+; SSSE3-NEXT: hsubpd
+; AVX: vhsubpd
+; AVX2: vhsubpd
+; CHECK: ret
+
+
+define <8 x i32> @avx2_vphadd_d_test(<8 x i32> %A, <8 x i32> %B) {
+ %vecext = extractelement <8 x i32> %A, i32 0
+ %vecext1 = extractelement <8 x i32> %A, i32 1
+ %add = add i32 %vecext, %vecext1
+ %vecinit = insertelement <8 x i32> undef, i32 %add, i32 0
+ %vecext2 = extractelement <8 x i32> %A, i32 2
+ %vecext3 = extractelement <8 x i32> %A, i32 3
+ %add4 = add i32 %vecext2, %vecext3
+ %vecinit5 = insertelement <8 x i32> %vecinit, i32 %add4, i32 1
+ %vecext6 = extractelement <8 x i32> %A, i32 4
+ %vecext7 = extractelement <8 x i32> %A, i32 5
+ %add8 = add i32 %vecext6, %vecext7
+ %vecinit9 = insertelement <8 x i32> %vecinit5, i32 %add8, i32 2
+ %vecext10 = extractelement <8 x i32> %A, i32 6
+ %vecext11 = extractelement <8 x i32> %A, i32 7
+ %add12 = add i32 %vecext10, %vecext11
+ %vecinit13 = insertelement <8 x i32> %vecinit9, i32 %add12, i32 3
+ %vecext14 = extractelement <8 x i32> %B, i32 0
+ %vecext15 = extractelement <8 x i32> %B, i32 1
+ %add16 = add i32 %vecext14, %vecext15
+ %vecinit17 = insertelement <8 x i32> %vecinit13, i32 %add16, i32 4
+ %vecext18 = extractelement <8 x i32> %B, i32 2
+ %vecext19 = extractelement <8 x i32> %B, i32 3
+ %add20 = add i32 %vecext18, %vecext19
+ %vecinit21 = insertelement <8 x i32> %vecinit17, i32 %add20, i32 5
+ %vecext22 = extractelement <8 x i32> %B, i32 4
+ %vecext23 = extractelement <8 x i32> %B, i32 5
+ %add24 = add i32 %vecext22, %vecext23
+ %vecinit25 = insertelement <8 x i32> %vecinit21, i32 %add24, i32 6
+ %vecext26 = extractelement <8 x i32> %B, i32 6
+ %vecext27 = extractelement <8 x i32> %B, i32 7
+ %add28 = add i32 %vecext26, %vecext27
+ %vecinit29 = insertelement <8 x i32> %vecinit25, i32 %add28, i32 7
+ ret <8 x i32> %vecinit29
+}
+; CHECK-LABEL: avx2_vphadd_d_test
+; SSE3-NOT: phaddd
+; SSSE3: phaddd
+; SSSE3-NEXT: phaddd
+; AVX-NOT: vphaddd
+; AVX2: vphaddd
+; CHECK: ret
+