From: Simon Pilgrim Date: Tue, 22 Jan 2019 12:17:48 +0000 (+0000) Subject: [X86][SSE] Add selective commutation support for insertps (PR40340) X-Git-Url: https://granicus.if.org/sourcecode?a=commitdiff_plain;h=49551c19d2d1cf78ce312268a876757df9c16b51;p=llvm [X86][SSE] Add selective commutation support for insertps (PR40340) When we are inserting 1 "inline" element, and zeroing 2 of the other elements then we can safely commute the insertps source inputs to improve memory folding. Differential Revision: https://reviews.llvm.org/D56843 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@351807 91177308-0d34-0410-b5e6-96231b3b80d8 --- diff --git a/lib/Target/X86/X86InstrAVX512.td b/lib/Target/X86/X86InstrAVX512.td index 90ccf1f5eb9..dde57a6e933 100644 --- a/lib/Target/X86/X86InstrAVX512.td +++ b/lib/Target/X86/X86InstrAVX512.td @@ -752,6 +752,7 @@ defm : vinsert_for_mask_cast<"VINSERTI64x4Z", v32i8x_info, v64i8_info, // vinsertps - insert f32 to XMM let ExeDomain = SSEPackedSingle in { +let isCommutable = 1 in def VINSERTPSZrr : AVX512AIi8<0x21, MRMSrcReg, (outs VR128X:$dst), (ins VR128X:$src1, VR128X:$src2, u8imm:$src3), "vinsertps\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}", diff --git a/lib/Target/X86/X86InstrInfo.cpp b/lib/Target/X86/X86InstrInfo.cpp index c07add60e24..6f374ad9ead 100644 --- a/lib/Target/X86/X86InstrInfo.cpp +++ b/lib/Target/X86/X86InstrInfo.cpp @@ -1569,6 +1569,28 @@ MachineInstr *X86InstrInfo::commuteInstructionImpl(MachineInstr &MI, bool NewMI, return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false, OpIdx1, OpIdx2); } + case X86::INSERTPSrr: + case X86::VINSERTPSrr: + case X86::VINSERTPSZrr: { + unsigned Imm = MI.getOperand(MI.getNumOperands() - 1).getImm(); + unsigned ZMask = Imm & 15; + unsigned DstIdx = (Imm >> 4) & 3; + unsigned SrcIdx = (Imm >> 6) & 3; + + // We can commute insertps if we zero 2 of the elements, the insertion is + // "inline" and we don't override the insertion with a zero. + if (DstIdx == SrcIdx && (ZMask & (1 << DstIdx)) == 0 && + countPopulation(ZMask) == 2) { + unsigned AltIdx = findFirstSet((ZMask | (1 << DstIdx)) ^ 15); + assert(0 <= AltIdx && AltIdx < 4 && "Illegal insertion index"); + unsigned AltImm = (AltIdx << 6) | (AltIdx << 4) | ZMask; + auto &WorkingMI = cloneIfNew(MI); + WorkingMI.getOperand(MI.getNumOperands() - 1).setImm(AltImm); + return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false, + OpIdx1, OpIdx2); + } + return nullptr; + } case X86::MOVSDrr: case X86::MOVSSrr: case X86::VMOVSDrr: diff --git a/lib/Target/X86/X86InstrSSE.td b/lib/Target/X86/X86InstrSSE.td index 5e528ef29bb..807af7f4808 100644 --- a/lib/Target/X86/X86InstrSSE.td +++ b/lib/Target/X86/X86InstrSSE.td @@ -5651,6 +5651,7 @@ let Constraints = "$src1 = $dst" in // vector. The next one matches the intrinsic and could zero arbitrary elements // in the target vector. multiclass SS41I_insertf32 opc, string asm, bit Is2Addr = 1> { + let isCommutable = 1 in def rr : SS4AIi8 %a0, <4 x float> *%p1) { define <4 x float> @commute_load_insertps(<4 x float>, <4 x float>* nocapture readonly) { ; SSE-LABEL: commute_load_insertps: ; SSE: # %bb.0: -; SSE-NEXT: movaps (%rdi), %xmm1 -; SSE-NEXT: insertps {{.*#+}} xmm1 = zero,xmm0[1],zero,xmm1[3] -; SSE-NEXT: movaps %xmm1, %xmm0 +; SSE-NEXT: insertps {{.*#+}} xmm0 = zero,xmm0[1],zero,mem[0] ; SSE-NEXT: retq ; ; AVX-LABEL: commute_load_insertps: ; AVX: # %bb.0: -; AVX-NEXT: vmovaps (%rdi), %xmm1 -; AVX-NEXT: vinsertps {{.*#+}} xmm0 = zero,xmm0[1],zero,xmm1[3] +; AVX-NEXT: vinsertps {{.*#+}} xmm0 = zero,xmm0[1],zero,mem[0] ; AVX-NEXT: retq %3 = load <4 x float>, <4 x float>* %1 %4 = tail call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %3, <4 x float> %0, i8 85)