From: Craig Topper Date: Sun, 23 Jun 2019 06:06:04 +0000 (+0000) Subject: [X86][SelectionDAG] Cleanup and simplify masked_load/masked_store in tablegen. Use... X-Git-Url: https://granicus.if.org/sourcecode?a=commitdiff_plain;h=bd8f303d4698a020064316c6a13a06deaf09e2aa;p=llvm [X86][SelectionDAG] Cleanup and simplify masked_load/masked_store in tablegen. Use more precise PatFrags for scalar masked load/store. Rename masked_load/masked_store to masked_ld/masked_st to discourage their direct use. We need to check truncating/extending and compressing/expanding before using them. This revealed that our scalar masked load/store patterns were misusing these. With those out of the way, renamed masked_load_unaligned and masked_store_unaligned to remove the "_unaligned". We didn't check the alignment anyway so the name was somewhat misleading. Make the aligned versions inherit from masked_load/store instead from a separate identical version. Merge the 3 different alignments PatFrags into a single version that uses the VT from the SDNode to determine the size that the alignment needs to match. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@364150 91177308-0d34-0410-b5e6-96231b3b80d8 --- diff --git a/include/llvm/Target/TargetSelectionDAG.td b/include/llvm/Target/TargetSelectionDAG.td index a8e17f60e9e..c499c015a82 100644 --- a/include/llvm/Target/TargetSelectionDAG.td +++ b/include/llvm/Target/TargetSelectionDAG.td @@ -580,9 +580,9 @@ def atomic_load : SDNode<"ISD::ATOMIC_LOAD", SDTAtomicLoad, def atomic_store : SDNode<"ISD::ATOMIC_STORE", SDTAtomicStore, [SDNPHasChain, SDNPMayStore, SDNPMemOperand]>; -def masked_store : SDNode<"ISD::MSTORE", SDTMaskedStore, +def masked_st : SDNode<"ISD::MSTORE", SDTMaskedStore, [SDNPHasChain, SDNPMayStore, SDNPMemOperand]>; -def masked_load : SDNode<"ISD::MLOAD", SDTMaskedLoad, +def masked_ld : SDNode<"ISD::MLOAD", SDTMaskedLoad, [SDNPHasChain, SDNPMayLoad, SDNPMemOperand]>; // Do not use ld, st directly. Use load, extload, sextload, zextload, store, diff --git a/lib/Target/X86/X86InstrAVX512.td b/lib/Target/X86/X86InstrAVX512.td index b7172050b36..48da35f11a2 100644 --- a/lib/Target/X86/X86InstrAVX512.td +++ b/lib/Target/X86/X86InstrAVX512.td @@ -3376,15 +3376,15 @@ multiclass avx512_alignedload_vl opc, string OpcodeStr, string EVEX2VEXOvrd, bit NoRMPattern = 0> { let Predicates = [prd] in defm Z : avx512_load, EVEX_V512; let Predicates = [prd, HasVLX] in { defm Z256 : avx512_load, EVEX_V256; defm Z128 : avx512_load, EVEX_V128; } } @@ -3396,15 +3396,15 @@ multiclass avx512_load_vl opc, string OpcodeStr, SDPatternOperator SelectOprr = vselect> { let Predicates = [prd] in defm Z : avx512_load, EVEX_V512; let Predicates = [prd, HasVLX] in { defm Z256 : avx512_load, EVEX_V256; defm Z128 : avx512_load, EVEX_V128; } } @@ -3470,14 +3470,14 @@ multiclass avx512_store_vl< bits<8> opc, string OpcodeStr, string EVEX2VEXOvrd, bit NoMRPattern = 0> { let Predicates = [prd] in defm Z : avx512_store, EVEX_V512; let Predicates = [prd, HasVLX] in { defm Z256 : avx512_store, EVEX_V256; defm Z128 : avx512_store, EVEX_V128; } } @@ -3488,15 +3488,15 @@ multiclass avx512_alignedstore_vl opc, string OpcodeStr, string EVEX2VEXOvrd, bit NoMRPattern = 0> { let Predicates = [prd] in defm Z : avx512_store, EVEX_V512; let Predicates = [prd, HasVLX] in { defm Z256 : avx512_store, EVEX_V256; defm Z128 : avx512_store, EVEX_V128; } } diff --git a/lib/Target/X86/X86InstrFragmentsSIMD.td b/lib/Target/X86/X86InstrFragmentsSIMD.td index 766194abf04..85fae57a6ef 100644 --- a/lib/Target/X86/X86InstrFragmentsSIMD.td +++ b/lib/Target/X86/X86InstrFragmentsSIMD.td @@ -1008,70 +1008,46 @@ def vinsert256_insert : PatFrag<(ops node:$bigvec, node:$smallvec, node:$index), [{}], INSERT_get_vinsert256_imm>; -def X86mload : PatFrag<(ops node:$src1, node:$src2, node:$src3), - (masked_load node:$src1, node:$src2, node:$src3), [{ +def masked_load : PatFrag<(ops node:$src1, node:$src2, node:$src3), + (masked_ld node:$src1, node:$src2, node:$src3), [{ return !cast(N)->isExpandingLoad() && cast(N)->getExtensionType() == ISD::NON_EXTLOAD; }]>; -def masked_load_aligned128 : PatFrag<(ops node:$src1, node:$src2, node:$src3), - (X86mload node:$src1, node:$src2, node:$src3), [{ - return cast(N)->getAlignment() >= 16; -}]>; - -def masked_load_aligned256 : PatFrag<(ops node:$src1, node:$src2, node:$src3), - (X86mload node:$src1, node:$src2, node:$src3), [{ - return cast(N)->getAlignment() >= 32; -}]>; - -def masked_load_aligned512 : PatFrag<(ops node:$src1, node:$src2, node:$src3), - (X86mload node:$src1, node:$src2, node:$src3), [{ - return cast(N)->getAlignment() >= 64; -}]>; - -def masked_load_unaligned : PatFrag<(ops node:$src1, node:$src2, node:$src3), +def masked_load_aligned : PatFrag<(ops node:$src1, node:$src2, node:$src3), (masked_load node:$src1, node:$src2, node:$src3), [{ - return !cast(N)->isExpandingLoad() && - cast(N)->getExtensionType() == ISD::NON_EXTLOAD; + // Use the node type to determine the size the alignment needs to match. + // We can't use memory VT because type widening changes the node VT, but + // not the memory VT. + auto *Ld = cast(N); + return Ld->getAlignment() >= Ld->getValueType(0).getStoreSize(); }]>; def X86mExpandingLoad : PatFrag<(ops node:$src1, node:$src2, node:$src3), - (masked_load node:$src1, node:$src2, node:$src3), [{ + (masked_ld node:$src1, node:$src2, node:$src3), [{ return cast(N)->isExpandingLoad(); }]>; // Masked store fragments. // X86mstore can't be implemented in core DAG files because some targets // do not support vector types (llvm-tblgen will fail). -def X86mstore : PatFrag<(ops node:$src1, node:$src2, node:$src3), - (masked_store node:$src1, node:$src2, node:$src3), [{ +def masked_store : PatFrag<(ops node:$src1, node:$src2, node:$src3), + (masked_st node:$src1, node:$src2, node:$src3), [{ return (!cast(N)->isTruncatingStore()) && (!cast(N)->isCompressingStore()); }]>; -def masked_store_aligned128 : PatFrag<(ops node:$src1, node:$src2, node:$src3), - (X86mstore node:$src1, node:$src2, node:$src3), [{ - return cast(N)->getAlignment() >= 16; -}]>; - -def masked_store_aligned256 : PatFrag<(ops node:$src1, node:$src2, node:$src3), - (X86mstore node:$src1, node:$src2, node:$src3), [{ - return cast(N)->getAlignment() >= 32; -}]>; - -def masked_store_aligned512 : PatFrag<(ops node:$src1, node:$src2, node:$src3), - (X86mstore node:$src1, node:$src2, node:$src3), [{ - return cast(N)->getAlignment() >= 64; -}]>; - -def masked_store_unaligned : PatFrag<(ops node:$src1, node:$src2, node:$src3), +def masked_store_aligned : PatFrag<(ops node:$src1, node:$src2, node:$src3), (masked_store node:$src1, node:$src2, node:$src3), [{ - return (!cast(N)->isTruncatingStore()) && - (!cast(N)->isCompressingStore()); + // Use the node type to determine the size the alignment needs to match. + // We can't use memory VT because type widening changes the node VT, but + // not the memory VT. + auto *St = cast(N); + return St->getAlignment() >= St->getOperand(1).getValueType().getStoreSize(); }]>; def X86mCompressingStore : PatFrag<(ops node:$src1, node:$src2, node:$src3), - (masked_store node:$src1, node:$src2, node:$src3), [{ + (masked_st node:$src1, node:$src2, node:$src3), [{ return cast(N)->isCompressingStore(); }]>; @@ -1079,7 +1055,7 @@ def X86mCompressingStore : PatFrag<(ops node:$src1, node:$src2, node:$src3), // X86mtruncstore can't be implemented in core DAG files because some targets // doesn't support vector type ( llvm-tblgen will fail) def X86mtruncstore : PatFrag<(ops node:$src1, node:$src2, node:$src3), - (masked_store node:$src1, node:$src2, node:$src3), [{ + (masked_st node:$src1, node:$src2, node:$src3), [{ return cast(N)->isTruncatingStore(); }]>; def masked_truncstorevi8 : diff --git a/lib/Target/X86/X86InstrSSE.td b/lib/Target/X86/X86InstrSSE.td index 810beb6f40b..1ab7af56797 100644 --- a/lib/Target/X86/X86InstrSSE.td +++ b/lib/Target/X86/X86InstrSSE.td @@ -7819,15 +7819,15 @@ defm VPMASKMOVQ : avx2_pmovmask<"vpmaskmovq", multiclass maskmov_lowering { // masked store - def: Pat<(X86mstore (VT RC:$src), addr:$ptr, (MaskVT RC:$mask)), + def: Pat<(masked_store (VT RC:$src), addr:$ptr, (MaskVT RC:$mask)), (!cast(InstrStr#"mr") addr:$ptr, RC:$mask, RC:$src)>; // masked load - def: Pat<(VT (X86mload addr:$ptr, (MaskVT RC:$mask), undef)), + def: Pat<(VT (masked_load addr:$ptr, (MaskVT RC:$mask), undef)), (!cast(InstrStr#"rm") RC:$mask, addr:$ptr)>; - def: Pat<(VT (X86mload addr:$ptr, (MaskVT RC:$mask), + def: Pat<(VT (masked_load addr:$ptr, (MaskVT RC:$mask), (VT immAllZerosV))), (!cast(InstrStr#"rm") RC:$mask, addr:$ptr)>; - def: Pat<(VT (X86mload addr:$ptr, (MaskVT RC:$mask), (VT RC:$src0))), + def: Pat<(VT (masked_load addr:$ptr, (MaskVT RC:$mask), (VT RC:$src0))), (!cast(BlendStr#"rr") RC:$src0, (VT (!cast(InstrStr#"rm") RC:$mask, addr:$ptr)),