From bc712eaf9089f3d20d09d935ee84624d10d15a0b Mon Sep 17 00:00:00 2001 From: Matt Arsenault Date: Mon, 1 Jul 2019 13:22:06 +0000 Subject: [PATCH] AMDGPU/GlobalISel: Use and instead of BFE with inline immediate Zext from s1 is the only case where this should do anything with the current legal extensions. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@364760 91177308-0d34-0410-b5e6-96231b3b80d8 --- .../AMDGPU/AMDGPUInstructionSelector.cpp | 35 +++++++++++--- .../AMDGPU/GlobalISel/inst-select-anyext.mir | 36 ++++++++++++++ .../AMDGPU/GlobalISel/inst-select-sext.mir | 39 +++++++++++++++ .../AMDGPU/GlobalISel/inst-select-zext.mir | 48 +++++++++++++++++-- 4 files changed, 148 insertions(+), 10 deletions(-) diff --git a/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp b/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp index 745a08f19f6..c7237e42571 100644 --- a/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp +++ b/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp @@ -626,6 +626,13 @@ bool AMDGPUInstructionSelector::selectG_TRUNC(MachineInstr &I) const { return true; } +/// \returns true if a bitmask for \p Size bits will be an inline immediate. +static bool shouldUseAndMask(unsigned Size, unsigned &Mask) { + Mask = maskTrailingOnes(Size); + int SignedMask = static_cast(Mask); + return SignedMask >= -16 && SignedMask <= 64; +} + bool AMDGPUInstructionSelector::selectG_SZA_EXT(MachineInstr &I) const { bool Signed = I.getOpcode() == AMDGPU::G_SEXT; const DebugLoc &DL = I.getDebugLoc(); @@ -688,9 +695,17 @@ bool AMDGPUInstructionSelector::selectG_SZA_EXT(MachineInstr &I) const { if (SrcBank->getID() == AMDGPU::VGPRRegBankID && DstSize <= 32) { // 64-bit should have been split up in RegBankSelect - // - // TODO: USE V_AND_B32 when the constant mask is an inline immediate for - // unsigned for smaller code size. + + // Try to use an and with a mask if it will save code size. + unsigned Mask; + if (!Signed && shouldUseAndMask(SrcSize, Mask)) { + MachineInstr *ExtI = + BuildMI(MBB, I, DL, TII.get(AMDGPU::V_AND_B32_e32), DstReg) + .addImm(Mask) + .addReg(SrcReg); + return constrainSelectedInstRegOperands(*ExtI, TII, TRI, RBI); + } + const unsigned BFE = Signed ? AMDGPU::V_BFE_I32 : AMDGPU::V_BFE_U32; MachineInstr *ExtI = BuildMI(MBB, I, DL, TII.get(BFE), DstReg) @@ -736,9 +751,17 @@ bool AMDGPUInstructionSelector::selectG_SZA_EXT(MachineInstr &I) const { return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_64RegClass, MRI); } - BuildMI(MBB, I, DL, TII.get(BFE32), DstReg) - .addReg(SrcReg) - .addImm(SrcSize << 16); + unsigned Mask; + if (!Signed && shouldUseAndMask(SrcSize, Mask)) { + BuildMI(MBB, I, DL, TII.get(AMDGPU::S_AND_B32), DstReg) + .addReg(SrcReg) + .addImm(Mask); + } else { + BuildMI(MBB, I, DL, TII.get(BFE32), DstReg) + .addReg(SrcReg) + .addImm(SrcSize << 16); + } + return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_32RegClass, MRI); } diff --git a/test/CodeGen/AMDGPU/GlobalISel/inst-select-anyext.mir b/test/CodeGen/AMDGPU/GlobalISel/inst-select-anyext.mir index 82b24acc18a..a2a0d5f85c9 100644 --- a/test/CodeGen/AMDGPU/GlobalISel/inst-select-anyext.mir +++ b/test/CodeGen/AMDGPU/GlobalISel/inst-select-anyext.mir @@ -81,6 +81,24 @@ body: | --- +name: anyext_sgpr_s8_to_sgpr_s32 +legalized: true +regBankSelected: true +body: | + bb.0: + liveins: $sgpr0 + + ; GCN-LABEL: name: anyext_sgpr_s8_to_sgpr_s32 + ; GCN: $sgpr0 = COPY %2:sreg_32_xm0 + %0:sgpr(s32) = COPY $sgpr0 + %1:sgpr(s8) = G_TRUNC %0 + %2:sgpr(s32) = G_ANYEXT %1 + $sgpr0 = COPY %2 + +... + +--- + name: anyext_sgpr_s16_to_sgpr_s32 legalized: true regBankSelected: true @@ -154,6 +172,24 @@ body: | --- +name: anyext_vgpr_s8_to_vgpr_s32 +legalized: true +regBankSelected: true +body: | + bb.0: + liveins: $vgpr0 + + ; GCN-LABEL: name: anyext_vgpr_s8_to_vgpr_s32 + ; GCN: $vgpr0 = COPY %2:vgpr_32 + %0:vgpr(s32) = COPY $vgpr0 + %1:vgpr(s8) = G_TRUNC %0 + %2:vgpr(s32) = G_ANYEXT %1 + $vgpr0 = COPY %2 + +... + +--- + name: anyext_vgpr_s16_to_vgpr_s32 legalized: true regBankSelected: true diff --git a/test/CodeGen/AMDGPU/GlobalISel/inst-select-sext.mir b/test/CodeGen/AMDGPU/GlobalISel/inst-select-sext.mir index 4fa726c98cd..ac7da5f0f3c 100644 --- a/test/CodeGen/AMDGPU/GlobalISel/inst-select-sext.mir +++ b/test/CodeGen/AMDGPU/GlobalISel/inst-select-sext.mir @@ -87,6 +87,25 @@ body: | --- +name: sext_sgpr_s8_to_sgpr_s32 +legalized: true +regBankSelected: true +body: | + bb.0: + liveins: $sgpr0 + + ; GCN-LABEL: name: sext_sgpr_s8_to_sgpr_s32 + ; GCN: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GCN: [[S_SEXT_I32_I8_:%[0-9]+]]:sreg_32_xm0 = S_SEXT_I32_I8 [[COPY]] + ; GCN: $sgpr0 = COPY [[S_SEXT_I32_I8_]] + %0:sgpr(s32) = COPY $sgpr0 + %1:sgpr(s8) = G_TRUNC %0 + %2:sgpr(s32) = G_SEXT %1 + $sgpr0 = COPY %2 +... + +--- + name: sext_sgpr_s16_to_sgpr_s32 legalized: true regBankSelected: true @@ -168,6 +187,26 @@ body: | --- +name: sext_vgpr_s8_to_vgpr_s32 +legalized: true +regBankSelected: true +body: | + bb.0: + liveins: $vgpr0 + + ; GCN-LABEL: name: sext_vgpr_s8_to_vgpr_s32 + ; GCN: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GCN: [[V_BFE_I32_:%[0-9]+]]:vgpr_32 = V_BFE_I32 [[COPY]], 0, 8, implicit $exec + ; GCN: $vgpr0 = COPY [[V_BFE_I32_]] + %0:vgpr(s32) = COPY $vgpr0 + %1:vgpr(s8) = G_TRUNC %0 + %2:vgpr(s32) = G_SEXT %1 + $vgpr0 = COPY %2 + +... + +--- + name: sext_vgpr_s16_to_vgpr_s32 legalized: true regBankSelected: true diff --git a/test/CodeGen/AMDGPU/GlobalISel/inst-select-zext.mir b/test/CodeGen/AMDGPU/GlobalISel/inst-select-zext.mir index a4c023dbd29..9caf8236f9c 100644 --- a/test/CodeGen/AMDGPU/GlobalISel/inst-select-zext.mir +++ b/test/CodeGen/AMDGPU/GlobalISel/inst-select-zext.mir @@ -56,8 +56,8 @@ body: | ; GCN-LABEL: name: zext_sgpr_s1_to_sgpr_s32 ; GCN: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 - ; GCN: [[S_BFE_U32_:%[0-9]+]]:sreg_32_xm0 = S_BFE_U32 [[COPY]], 65536, implicit-def $scc - ; GCN: $sgpr0 = COPY [[S_BFE_U32_]] + ; GCN: [[S_AND_B32_:%[0-9]+]]:sreg_32_xm0 = S_AND_B32 [[COPY]], 1, implicit-def $scc + ; GCN: $sgpr0 = COPY [[S_AND_B32_]] %0:sgpr(s32) = COPY $sgpr0 %1:sgpr(s1) = G_TRUNC %0 %2:sgpr(s32) = G_ZEXT %1 @@ -87,6 +87,26 @@ body: | --- +name: zext_sgpr_s8_to_sgpr_s32 +legalized: true +regBankSelected: true +body: | + bb.0: + liveins: $sgpr0 + + ; GCN-LABEL: name: zext_sgpr_s8_to_sgpr_s32 + ; GCN: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + ; GCN: [[S_BFE_U32_:%[0-9]+]]:sreg_32_xm0 = S_BFE_U32 [[COPY]], 524288, implicit-def $scc + ; GCN: $sgpr0 = COPY [[S_BFE_U32_]] + %0:sgpr(s32) = COPY $sgpr0 + %1:sgpr(s8) = G_TRUNC %0 + %2:sgpr(s32) = G_ZEXT %1 + $sgpr0 = COPY %2 + +... + +--- + name: zext_sgpr_s16_to_sgpr_s32 legalized: true regBankSelected: true @@ -158,8 +178,8 @@ body: | ; GCN-LABEL: name: zext_vgpr_s1_to_vgpr_s32 ; GCN: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 - ; GCN: [[V_BFE_U32_:%[0-9]+]]:vgpr_32 = V_BFE_U32 [[COPY]], 0, 1, implicit $exec - ; GCN: $vgpr0 = COPY [[V_BFE_U32_]] + ; GCN: [[V_AND_B32_e32_:%[0-9]+]]:vgpr_32 = V_AND_B32_e32 1, [[COPY]], implicit $exec + ; GCN: $vgpr0 = COPY [[V_AND_B32_e32_]] %0:vgpr(s32) = COPY $vgpr0 %1:vgpr(s1) = G_TRUNC %0 %2:vgpr(s32) = G_ZEXT %1 @@ -168,6 +188,26 @@ body: | --- +name: zext_vgpr_s8_to_vgpr_s32 +legalized: true +regBankSelected: true +body: | + bb.0: + liveins: $vgpr0 + + ; GCN-LABEL: name: zext_vgpr_s8_to_vgpr_s32 + ; GCN: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GCN: [[V_BFE_U32_:%[0-9]+]]:vgpr_32 = V_BFE_U32 [[COPY]], 0, 8, implicit $exec + ; GCN: $vgpr0 = COPY [[V_BFE_U32_]] + %0:vgpr(s32) = COPY $vgpr0 + %1:vgpr(s8) = G_TRUNC %0 + %2:vgpr(s32) = G_ZEXT %1 + $vgpr0 = COPY %2 + +... + +--- + name: zext_vgpr_s16_to_vgpr_s32 legalized: true regBankSelected: true -- 2.40.0