From: Tim Renouf Date: Thu, 4 Jul 2019 17:38:24 +0000 (+0000) Subject: [AMDGPU] Custom lower INSERT_SUBVECTOR v3, v4, v5, v8 X-Git-Url: https://granicus.if.org/sourcecode?a=commitdiff_plain;h=44e42cf0f2d44debdd79ad338185ff7d40534011;p=llvm [AMDGPU] Custom lower INSERT_SUBVECTOR v3, v4, v5, v8 Summary: Since the changes to introduce vec3 and vec5, INSERT_VECTOR for these sizes has been marked "expand", which made LegalizeDAG lower it to loads and stores via a stack slot. The code got optimized a bit later, but the now-unused stack slot was never deleted. This commit avoids that problem by custom lowering INSERT_SUBVECTOR into an EXTRACT_VECTOR_ELT and INSERT_VECTOR_ELT for each element in the subvector to insert. V2: Addressed review comments re test. Differential Revision: https://reviews.llvm.org/D63160 Change-Id: I9e3c13e36f68cfa3431bb9814851cc1f673274e1 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@365148 91177308-0d34-0410-b5e6-96231b3b80d8 --- diff --git a/lib/Target/AMDGPU/SIISelLowering.cpp b/lib/Target/AMDGPU/SIISelLowering.cpp index a0e4c8a587f..2360eaced01 100644 --- a/lib/Target/AMDGPU/SIISelLowering.cpp +++ b/lib/Target/AMDGPU/SIISelLowering.cpp @@ -335,16 +335,16 @@ SITargetLowering::SITargetLowering(const TargetMachine &TM, setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f16, Custom); // Deal with vec3 vector operations when widened to vec4. - setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v3i32, Expand); - setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v3f32, Expand); - setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v4i32, Expand); - setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v4f32, Expand); + setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v3i32, Custom); + setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v3f32, Custom); + setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v4i32, Custom); + setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v4f32, Custom); // Deal with vec5 vector operations when widened to vec8. - setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v5i32, Expand); - setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v5f32, Expand); - setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v8i32, Expand); - setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v8f32, Expand); + setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v5i32, Custom); + setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v5f32, Custom); + setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v8i32, Custom); + setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v8f32, Custom); // BUFFER/FLAT_ATOMIC_CMP_SWAP on GCN GPUs needs input marshalling, // and output demarshalling @@ -3956,6 +3956,8 @@ SDValue SITargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { case ISD::INTRINSIC_W_CHAIN: return LowerINTRINSIC_W_CHAIN(Op, DAG); case ISD::INTRINSIC_VOID: return LowerINTRINSIC_VOID(Op, DAG); case ISD::ADDRSPACECAST: return lowerADDRSPACECAST(Op, DAG); + case ISD::INSERT_SUBVECTOR: + return lowerINSERT_SUBVECTOR(Op, DAG); case ISD::INSERT_VECTOR_ELT: return lowerINSERT_VECTOR_ELT(Op, DAG); case ISD::EXTRACT_VECTOR_ELT: @@ -4628,6 +4630,32 @@ SDValue SITargetLowering::lowerADDRSPACECAST(SDValue Op, return DAG.getUNDEF(ASC->getValueType(0)); } +// This lowers an INSERT_SUBVECTOR by extracting the individual elements from +// the small vector and inserting them into the big vector. That is better than +// the default expansion of doing it via a stack slot. Even though the use of +// the stack slot would be optimized away afterwards, the stack slot itself +// remains. +SDValue SITargetLowering::lowerINSERT_SUBVECTOR(SDValue Op, + SelectionDAG &DAG) const { + SDValue Vec = Op.getOperand(0); + SDValue Ins = Op.getOperand(1); + SDValue Idx = Op.getOperand(2); + EVT VecVT = Vec.getValueType(); + EVT InsVT = Ins.getValueType(); + EVT EltVT = VecVT.getVectorElementType(); + unsigned InsNumElts = InsVT.getVectorNumElements(); + unsigned IdxVal = cast(Idx)->getZExtValue(); + SDLoc SL(Op); + + for (unsigned I = 0; I != InsNumElts; ++I) { + SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT, Ins, + DAG.getConstant(I, SL, MVT::i32)); + Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, SL, VecVT, Vec, Elt, + DAG.getConstant(IdxVal + I, SL, MVT::i32)); + } + return Vec; +} + SDValue SITargetLowering::lowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const { SDValue Vec = Op.getOperand(0); diff --git a/lib/Target/AMDGPU/SIISelLowering.h b/lib/Target/AMDGPU/SIISelLowering.h index 909ee8f7987..21a215e16ce 100644 --- a/lib/Target/AMDGPU/SIISelLowering.h +++ b/lib/Target/AMDGPU/SIISelLowering.h @@ -121,6 +121,7 @@ private: SelectionDAG &DAG) const; SDValue lowerADDRSPACECAST(SDValue Op, SelectionDAG &DAG) const; + SDValue lowerINSERT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const; SDValue lowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const; SDValue lowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const; SDValue lowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const; diff --git a/test/CodeGen/AMDGPU/insert-subvector-unused-scratch.ll b/test/CodeGen/AMDGPU/insert-subvector-unused-scratch.ll new file mode 100644 index 00000000000..d29bcef05b9 --- /dev/null +++ b/test/CodeGen/AMDGPU/insert-subvector-unused-scratch.ll @@ -0,0 +1,32 @@ +; RUN: llc -mtriple amdgcn-amd-- -mcpu=bonaire -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s + +; Before the fix that this test was committed with, this code would leave +; an unused stack slot, causing ScratchSize to be non-zero. + +; GCN-LABEL: store_v3i32: +; GCN: ds_read_b64 +; GCN: ds_read_b32 +; GCN: ds_write_b32 +; GCN: ds_write_b64 +; GCN: ScratchSize: 0 +define amdgpu_kernel void @store_v3i32(<3 x i32> addrspace(3)* %out, <3 x i32> %a) nounwind { + %val = load <3 x i32>, <3 x i32> addrspace(3)* %out + %val.1 = add <3 x i32> %a, %val + store <3 x i32> %val.1, <3 x i32> addrspace(3)* %out, align 16 + ret void +} + +; GCN-LABEL: store_v5i32: +; GCN: ds_read2_b64 +; GCN: ds_read_b32 +; GCN: ds_write_b32 +; GCN: ds_write2_b64 +; GCN: ScratchSize: 0 +define amdgpu_kernel void @store_v5i32(<5 x i32> addrspace(3)* %out, <5 x i32> %a) nounwind { + %val = load <5 x i32>, <5 x i32> addrspace(3)* %out + %val.1 = add <5 x i32> %a, %val + store <5 x i32> %val.1, <5 x i32> addrspace(3)* %out, align 16 + ret void +} + +