From: Craig Topper Date: Sun, 8 May 2016 20:10:20 +0000 (+0000) Subject: [X86] Remove extra patterns that check for BUILD_VECTOR of all 0s. These are always... X-Git-Url: https://granicus.if.org/sourcecode?a=commitdiff_plain;h=ba458cf29fa616520132a3caad3b83495c868681;p=llvm [X86] Remove extra patterns that check for BUILD_VECTOR of all 0s. These are always canonicalized to v4i32/v8i32/v16i32 except for in SSE1 only when only v4f32 is supported. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@268880 91177308-0d34-0410-b5e6-96231b3b80d8 --- diff --git a/lib/Target/X86/X86InstrAVX512.td b/lib/Target/X86/X86InstrAVX512.td index 09e77ac5488..a0d503a1e3d 100644 --- a/lib/Target/X86/X86InstrAVX512.td +++ b/lib/Target/X86/X86InstrAVX512.td @@ -472,20 +472,15 @@ let Predicates = [HasAVX512] in { def : Pat<(v16i16 (bitconvert (v32i8 VR256X:$src))), (v16i16 VR256X:$src)>; } -// -// AVX-512: VPXOR instruction writes zero to its upper part, it's safe build zeros. -// - +// Alias instruction that maps zero vector to pxor / xorp* for AVX-512. +// This is expanded by ExpandPostRAPseudos to an xorps / vxorps, and then +// swizzled by ExecutionDepsFix to pxor. +// We set canFoldAsLoad because this can be converted to a constant-pool +// load of an all-zeros value if folding it would be beneficial. let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1, isPseudo = 1, Predicates = [HasAVX512] in { def AVX512_512_SET0 : I<0, Pseudo, (outs VR512:$dst), (ins), "", - [(set VR512:$dst, (v16f32 immAllZerosV))]>; -} - -let Predicates = [HasAVX512] in { -def : Pat<(v8i64 immAllZerosV), (AVX512_512_SET0)>; -def : Pat<(v16i32 immAllZerosV), (AVX512_512_SET0)>; -def : Pat<(v8f64 immAllZerosV), (AVX512_512_SET0)>; + [(set VR512:$dst, (v16i32 immAllZerosV))]>; } //===----------------------------------------------------------------------===// diff --git a/lib/Target/X86/X86InstrSSE.td b/lib/Target/X86/X86InstrSSE.td index bed0b7497a0..3e98eb88048 100644 --- a/lib/Target/X86/X86InstrSSE.td +++ b/lib/Target/X86/X86InstrSSE.td @@ -477,11 +477,7 @@ def V_SET0 : I<0, Pseudo, (outs VR128:$dst), (ins), "", [(set VR128:$dst, (v4f32 immAllZerosV))]>; } -def : Pat<(v2f64 immAllZerosV), (V_SET0)>; def : Pat<(v4i32 immAllZerosV), (V_SET0)>; -def : Pat<(v2i64 immAllZerosV), (V_SET0)>; -def : Pat<(v8i16 immAllZerosV), (V_SET0)>; -def : Pat<(v16i8 immAllZerosV), (V_SET0)>; // The same as done above but for AVX. The 256-bit AVX1 ISA doesn't support PI, @@ -491,15 +487,7 @@ def : Pat<(v16i8 immAllZerosV), (V_SET0)>; let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1, isPseudo = 1, Predicates = [HasAVX], SchedRW = [WriteZero] in { def AVX_SET0 : I<0, Pseudo, (outs VR256:$dst), (ins), "", - [(set VR256:$dst, (v8f32 immAllZerosV))]>; -} - -let Predicates = [HasAVX] in { - def : Pat<(v4f64 immAllZerosV), (AVX_SET0)>; - def : Pat<(v4i64 immAllZerosV), (AVX_SET0)>; - def : Pat<(v8i32 immAllZerosV), (AVX_SET0)>; - def : Pat<(v16i16 immAllZerosV), (AVX_SET0)>; - def : Pat<(v32i8 immAllZerosV), (AVX_SET0)>; + [(set VR256:$dst, (v8i32 immAllZerosV))]>; } // We set canFoldAsLoad because this can be converted to a constant-pool