From b53ed3cf81b299e823b5b20e7dba59717e6cb71f Mon Sep 17 00:00:00 2001 From: Craig Topper Date: Sat, 23 Feb 2019 19:51:32 +0000 Subject: [PATCH] Recommit r354647 and r354648 "[LegalizeTypes] When promoting the result of EXTRACT_SUBVECTOR, also check if the input needs to be promoted. Use that to determine the element type to extract" r354648 was a follow up to fix a regression "[X86] Add a DAG combine for (aext_vector_inreg (aext_vector_inreg X)) -> (aext_vector_inreg X) to fix a regression from my previous commit." These were reverted in r354713 as their context depended on other patches that were reverted for a bug. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@354734 91177308-0d34-0410-b5e6-96231b3b80d8 --- .../SelectionDAG/LegalizeIntegerTypes.cpp | 10 ++++++--- lib/Target/X86/X86ISelLowering.cpp | 21 +++++++++++++++++++ test/CodeGen/X86/2011-12-28-vselecti8.ll | 6 ++---- test/CodeGen/X86/pr35918.ll | 2 +- test/CodeGen/X86/vector-narrow-binop.ll | 16 ++++++-------- 5 files changed, 37 insertions(+), 18 deletions(-) diff --git a/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp b/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp index bc88eaad38e..cfcab55ce4e 100644 --- a/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp +++ b/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp @@ -3655,8 +3655,6 @@ SDValue DAGTypeLegalizer::ExpandIntOp_ATOMIC_STORE(SDNode *N) { SDValue DAGTypeLegalizer::PromoteIntRes_EXTRACT_SUBVECTOR(SDNode *N) { - SDValue InOp0 = N->getOperand(0); - EVT InVT = InOp0.getValueType(); EVT OutVT = N->getValueType(0); EVT NOutVT = TLI.getTypeToTransformTo(*DAG.getContext(), OutVT); @@ -3667,6 +3665,12 @@ SDValue DAGTypeLegalizer::PromoteIntRes_EXTRACT_SUBVECTOR(SDNode *N) { SDLoc dl(N); SDValue BaseIdx = N->getOperand(1); + SDValue InOp0 = N->getOperand(0); + if (getTypeAction(InOp0.getValueType()) == TargetLowering::TypePromoteInteger) + InOp0 = GetPromotedInteger(N->getOperand(0)); + + EVT InVT = InOp0.getValueType(); + SmallVector Ops; Ops.reserve(OutNumElems); for (unsigned i = 0; i != OutNumElems; ++i) { @@ -3677,7 +3681,7 @@ SDValue DAGTypeLegalizer::PromoteIntRes_EXTRACT_SUBVECTOR(SDNode *N) { SDValue Ext = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, InVT.getVectorElementType(), N->getOperand(0), Index); - SDValue Op = DAG.getNode(ISD::ANY_EXTEND, dl, NOutVTElem, Ext); + SDValue Op = DAG.getAnyExtOrTrunc(Ext, dl, NOutVTElem); // Insert the converted element to the new vector. Ops.push_back(Op); } diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index c5824b366cd..aa9fd6bc03a 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -1871,6 +1871,7 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM, setTargetDAGCombine(ISD::ANY_EXTEND); setTargetDAGCombine(ISD::SIGN_EXTEND); setTargetDAGCombine(ISD::SIGN_EXTEND_INREG); + setTargetDAGCombine(ISD::ANY_EXTEND_VECTOR_INREG); setTargetDAGCombine(ISD::SINT_TO_FP); setTargetDAGCombine(ISD::UINT_TO_FP); setTargetDAGCombine(ISD::SETCC); @@ -42160,6 +42161,25 @@ static SDValue combinePMULDQ(SDNode *N, SelectionDAG &DAG, return SDValue(); } +static SDValue combineExtInVec(SDNode *N, SelectionDAG &DAG) { + // Disabling for widening legalization for now. We can enable if we find a + // case that needs it. Otherwise it can be deleted when we switch to + // widening legalization. + if (ExperimentalVectorWideningLegalization) + return SDValue(); + + EVT VT = N->getValueType(0); + SDValue In = N->getOperand(0); + + // Combine (ext_invec (ext_invec X)) -> (ext_invec X) + const TargetLowering &TLI = DAG.getTargetLoweringInfo(); + if (In.getOpcode() == N->getOpcode() && + TLI.isTypeLegal(VT) && TLI.isTypeLegal(In.getOperand(0).getValueType())) + return DAG.getNode(N->getOpcode(), SDLoc(N), VT, In.getOperand(0)); + + return SDValue(); +} + SDValue X86TargetLowering::PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const { SelectionDAG &DAG = DCI.DAG; @@ -42221,6 +42241,7 @@ SDValue X86TargetLowering::PerformDAGCombine(SDNode *N, case ISD::ZERO_EXTEND: return combineZext(N, DAG, DCI, Subtarget); case ISD::SIGN_EXTEND: return combineSext(N, DAG, DCI, Subtarget); case ISD::SIGN_EXTEND_INREG: return combineSignExtendInReg(N, DAG, Subtarget); + case ISD::ANY_EXTEND_VECTOR_INREG: return combineExtInVec(N, DAG); case ISD::SETCC: return combineSetCC(N, DAG, Subtarget); case X86ISD::SETCC: return combineX86SetCC(N, DAG, Subtarget); case X86ISD::BRCOND: return combineBrCond(N, DAG, Subtarget); diff --git a/test/CodeGen/X86/2011-12-28-vselecti8.ll b/test/CodeGen/X86/2011-12-28-vselecti8.ll index d564e9f6ae2..bb3b4ed472b 100644 --- a/test/CodeGen/X86/2011-12-28-vselecti8.ll +++ b/test/CodeGen/X86/2011-12-28-vselecti8.ll @@ -18,10 +18,8 @@ target triple = "x86_64-apple-darwin11.2.0" define void @foo8(float* nocapture %RET) nounwind { ; CHECK-LABEL: foo8: ; CHECK: ## %bb.0: ## %allocas -; CHECK-NEXT: pmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero -; CHECK-NEXT: pmovzxbd {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero -; CHECK-NEXT: cvtdq2ps %xmm0, %xmm0 -; CHECK-NEXT: cvtdq2ps %xmm1, %xmm1 +; CHECK-NEXT: movaps {{.*#+}} xmm0 = [1.0E+2,2.0E+0,1.0E+2,4.0E+0] +; CHECK-NEXT: movaps {{.*#+}} xmm1 = [1.0E+2,6.0E+0,1.0E+2,8.0E+0] ; CHECK-NEXT: movups %xmm1, 16(%rdi) ; CHECK-NEXT: movups %xmm0, (%rdi) ; CHECK-NEXT: retq diff --git a/test/CodeGen/X86/pr35918.ll b/test/CodeGen/X86/pr35918.ll index 6ce97061a8f..5b9e7a614c7 100644 --- a/test/CodeGen/X86/pr35918.ll +++ b/test/CodeGen/X86/pr35918.ll @@ -69,7 +69,7 @@ define void @fetch_r16g16_snorm_unorm8(<4 x i8>*, i8*, i32, i32, { [2048 x i32], ; X64-SKX-NEXT: vpmaxsd %xmm1, %xmm0, %xmm0 ; X64-SKX-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6],xmm1[7] ; X64-SKX-NEXT: vpsrld $7, %xmm0, %xmm0 -; X64-SKX-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero +; X64-SKX-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero ; X64-SKX-NEXT: vpmovqw %xmm0, -{{[0-9]+}}(%rsp) ; X64-SKX-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero ; X64-SKX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero diff --git a/test/CodeGen/X86/vector-narrow-binop.ll b/test/CodeGen/X86/vector-narrow-binop.ll index 4836b490c7d..46b0b08a1a9 100644 --- a/test/CodeGen/X86/vector-narrow-binop.ll +++ b/test/CodeGen/X86/vector-narrow-binop.ll @@ -107,18 +107,16 @@ define <2 x i8> @PR39893(<2 x i32> %x, <8 x i8> %y) { ; SSE: # %bb.0: ; SSE-NEXT: pxor %xmm2, %xmm2 ; SSE-NEXT: psubd %xmm0, %xmm2 -; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,2,2,3] -; SSE-NEXT: psrld $16, %xmm0 -; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1] -; SSE-NEXT: movapd %xmm1, %xmm0 +; SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7] +; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,1],xmm1[2,3] +; SSE-NEXT: movaps %xmm2, %xmm0 ; SSE-NEXT: retq ; ; AVX1-LABEL: PR39893: ; AVX1: # %bb.0: ; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2 ; AVX1-NEXT: vpsubd %xmm0, %xmm2, %xmm0 -; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] -; AVX1-NEXT: vpsrld $16, %xmm0, %xmm0 +; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[2],zero,xmm0[3],zero,xmm0[2],zero,xmm0[3],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero ; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7] ; AVX1-NEXT: retq ; @@ -126,8 +124,7 @@ define <2 x i8> @PR39893(<2 x i32> %x, <8 x i8> %y) { ; AVX2: # %bb.0: ; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2 ; AVX2-NEXT: vpsubd %xmm0, %xmm2, %xmm0 -; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] -; AVX2-NEXT: vpsrld $16, %xmm0, %xmm0 +; AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[2],zero,xmm0[3],zero,xmm0[2],zero,xmm0[3],zero,xmm0[u,u,u,u,u,u,u,u] ; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3] ; AVX2-NEXT: retq ; @@ -135,8 +132,7 @@ define <2 x i8> @PR39893(<2 x i32> %x, <8 x i8> %y) { ; AVX512: # %bb.0: ; AVX512-NEXT: vpxor %xmm2, %xmm2, %xmm2 ; AVX512-NEXT: vpsubd %xmm0, %xmm2, %xmm0 -; AVX512-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] -; AVX512-NEXT: vpsrld $16, %xmm0, %xmm0 +; AVX512-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[2],zero,xmm0[3],zero,xmm0[2],zero,xmm0[3],zero,xmm0[u,u,u,u,u,u,u,u] ; AVX512-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3] ; AVX512-NEXT: retq %sub = sub <2 x i32> , %x -- 2.40.0