From: Craig Topper Date: Sat, 5 Jan 2019 18:48:11 +0000 (+0000) Subject: [X86] Allow LowerTRUNCATE to use PACKUS/PACKSS for v16i16->v16i8 truncate when -mpref... X-Git-Url: https://granicus.if.org/sourcecode?a=commitdiff_plain;h=0716b2a298b4c6894e75ba2dc1f625857b47a48b;p=llvm [X86] Allow LowerTRUNCATE to use PACKUS/PACKSS for v16i16->v16i8 truncate when -mprefer-vector-width-256 is in effect and BWI is not available. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@350473 91177308-0d34-0410-b5e6-96231b3b80d8 --- diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index 25a93e94990..4056b4982b0 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -17949,9 +17949,10 @@ static SDValue truncateVectorWithPACK(unsigned Opcode, EVT DstVT, SDValue In, const X86Subtarget &Subtarget) { assert((Opcode == X86ISD::PACKSS || Opcode == X86ISD::PACKUS) && "Unexpected PACK opcode"); + assert(DstVT.isVector() && "VT not a vector?"); // Requires SSE2 but AVX512 has fast vector truncate. - if (!Subtarget.hasSSE2() || Subtarget.hasAVX512() || !DstVT.isVector()) + if (!Subtarget.hasSSE2()) return SDValue(); EVT SrcVT = In.getValueType(); @@ -36899,6 +36900,7 @@ static SDValue combineTruncateWithSat(SDValue In, EVT VT, const SDLoc &DL, return DAG.getNode(X86ISD::VTRUNCUS, DL, VT, USatVal); } if (VT.isVector() && isPowerOf2_32(VT.getVectorNumElements()) && + !Subtarget.hasAVX512() && (SVT == MVT::i8 || SVT == MVT::i16) && (InSVT == MVT::i16 || InSVT == MVT::i32)) { if (auto USatVal = detectSSatPattern(In, VT, true)) { diff --git a/test/CodeGen/X86/prefer-avx256-mask-extend.ll b/test/CodeGen/X86/prefer-avx256-mask-extend.ll index b4f8e5b0b6c..b4d452f2d3e 100644 --- a/test/CodeGen/X86/prefer-avx256-mask-extend.ll +++ b/test/CodeGen/X86/prefer-avx256-mask-extend.ll @@ -48,11 +48,9 @@ define <16 x i8> @testv16i1_sext_v16i8(<8 x i32>* %p, <8 x i32>* %q) { ; AVX256-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0 ; AVX256-NEXT: vmovdqa32 %ymm0, %ymm1 {%k2} {z} ; AVX256-NEXT: vpmovdw %ymm1, %xmm1 -; AVX256-NEXT: vpsrlw $8, %xmm1, %xmm1 ; AVX256-NEXT: vmovdqa32 %ymm0, %ymm0 {%k1} {z} ; AVX256-NEXT: vpmovdw %ymm0, %xmm0 -; AVX256-NEXT: vpsrlw $8, %xmm0, %xmm0 -; AVX256-NEXT: vpackuswb %xmm1, %xmm0, %xmm0 +; AVX256-NEXT: vpacksswb %xmm1, %xmm0, %xmm0 ; AVX256-NEXT: vzeroupper ; AVX256-NEXT: retq ; diff --git a/test/CodeGen/X86/prefer-avx256-mask-shuffle.ll b/test/CodeGen/X86/prefer-avx256-mask-shuffle.ll index 92f6e271126..7f4480ceb63 100644 --- a/test/CodeGen/X86/prefer-avx256-mask-shuffle.ll +++ b/test/CodeGen/X86/prefer-avx256-mask-shuffle.ll @@ -34,11 +34,9 @@ define <16 x i1> @shuf16i1_3_6_22_12_3_7_7_0_3_6_1_13_3_21_7_0(<8 x i32>* %a, <8 ; AVX256VL-NEXT: kshiftrw $8, %k0, %k2 ; AVX256VL-NEXT: vmovdqa32 %ymm0, %ymm1 {%k2} {z} ; AVX256VL-NEXT: vpmovdw %ymm1, %xmm1 -; AVX256VL-NEXT: vpsrlw $8, %xmm1, %xmm1 ; AVX256VL-NEXT: vmovdqa32 %ymm0, %ymm0 {%k1} {z} ; AVX256VL-NEXT: vpmovdw %ymm0, %xmm0 -; AVX256VL-NEXT: vpsrlw $8, %xmm0, %xmm0 -; AVX256VL-NEXT: vpackuswb %xmm1, %xmm0, %xmm0 +; AVX256VL-NEXT: vpacksswb %xmm1, %xmm0, %xmm0 ; AVX256VL-NEXT: vzeroupper ; AVX256VL-NEXT: retq ; @@ -169,11 +167,9 @@ define <32 x i1> @shuf32i1_3_6_22_12_3_7_7_0_3_6_1_13_3_21_7_0_3_6_22_12_3_7_7_0 ; AVX256VL-NEXT: kshiftrw $8, %k0, %k2 ; AVX256VL-NEXT: vmovdqa32 %ymm0, %ymm1 {%k2} {z} ; AVX256VL-NEXT: vpmovdw %ymm1, %xmm1 -; AVX256VL-NEXT: vpsrlw $8, %xmm1, %xmm1 ; AVX256VL-NEXT: vmovdqa32 %ymm0, %ymm0 {%k1} {z} ; AVX256VL-NEXT: vpmovdw %ymm0, %xmm0 -; AVX256VL-NEXT: vpsrlw $8, %xmm0, %xmm0 -; AVX256VL-NEXT: vpackuswb %xmm1, %xmm0, %xmm0 +; AVX256VL-NEXT: vpacksswb %xmm1, %xmm0, %xmm0 ; AVX256VL-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0 ; AVX256VL-NEXT: retq ;