From: Guy Blank Date: Wed, 21 Jun 2017 07:38:41 +0000 (+0000) Subject: [DAGCombiner] Add another combine from build vector to shuffle X-Git-Url: https://granicus.if.org/sourcecode?a=commitdiff_plain;h=415c6676800a19b6615ab378ac64609c30f31b44;p=llvm [DAGCombiner] Add another combine from build vector to shuffle Add support for combining a build vector to a shuffle. When the build vector is of extracted elements from 2 vectors (vec1, vec2) where vec2 is 2 times smaller than vec1. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@305883 91177308-0d34-0410-b5e6-96231b3b80d8 --- diff --git a/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/lib/CodeGen/SelectionDAG/DAGCombiner.cpp index 255ddcacfdb..b7f9dad23ae 100644 --- a/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ b/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -14055,6 +14055,11 @@ SDValue DAGCombiner::createBuildVecShuffle(const SDLoc &DL, SDNode *N, // when we start sorting the vectors by type. return SDValue(); } + } else if (InVT2.getSizeInBits() * 2 == VT.getSizeInBits() && + InVT1.getSizeInBits() == VT.getSizeInBits()) { + SmallVector ConcatOps(2, DAG.getUNDEF(InVT2)); + ConcatOps[0] = VecIn2; + VecIn2 = DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, ConcatOps); } else { // TODO: Support cases where the length mismatch isn't exactly by a // factor of 2. diff --git a/test/CodeGen/AArch64/arm64-neon-copy.ll b/test/CodeGen/AArch64/arm64-neon-copy.ll index a7b95e71791..16834b7e315 100644 --- a/test/CodeGen/AArch64/arm64-neon-copy.ll +++ b/test/CodeGen/AArch64/arm64-neon-copy.ll @@ -1378,7 +1378,7 @@ entry: define <2 x i64> @test_concat_v2i64_v2i64_v1i64(<2 x i64> %x, <1 x i64> %y) #0 { ; CHECK-LABEL: test_concat_v2i64_v2i64_v1i64: -; CHECK: ins {{v[0-9]+}}.d[1], {{v[0-9]+}}.d[0] +; CHECK: zip1 {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d entry: %vecext = extractelement <2 x i64> %x, i32 0 %vecinit = insertelement <2 x i64> undef, i64 %vecext, i32 0 diff --git a/test/CodeGen/X86/vector-shuffle-v48.ll b/test/CodeGen/X86/vector-shuffle-v48.ll index 885b5c86ee8..06b7c2e6472 100644 --- a/test/CodeGen/X86/vector-shuffle-v48.ll +++ b/test/CodeGen/X86/vector-shuffle-v48.ll @@ -3,42 +3,18 @@ define <32 x i8> @foo(<48 x i8>* %x0, <16 x i32> %x1, <16 x i32> %x2) { ; CHECK-LABEL: foo: ; CHECK: # BB#0: -; CHECK-NEXT: vmovdqu 32(%rdi), %xmm0 -; CHECK-NEXT: vmovdqu (%rdi), %ymm1 -; CHECK-NEXT: vextracti128 $1, %ymm1, %xmm2 -; CHECK-NEXT: vpextrb $0, %xmm2, %eax -; CHECK-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[0,1,3,4,6,7,9,10,12,13,15],zero,zero,zero,zero,zero -; CHECK-NEXT: vpinsrb $11, %eax, %xmm1, %xmm1 -; CHECK-NEXT: vpextrb $2, %xmm2, %eax -; CHECK-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1 -; CHECK-NEXT: vpextrb $3, %xmm2, %eax -; CHECK-NEXT: vpinsrb $13, %eax, %xmm1, %xmm1 -; CHECK-NEXT: vpextrb $5, %xmm2, %eax -; CHECK-NEXT: vpinsrb $14, %eax, %xmm1, %xmm1 -; CHECK-NEXT: vpextrb $6, %xmm2, %eax -; CHECK-NEXT: vpinsrb $15, %eax, %xmm1, %xmm1 -; CHECK-NEXT: vpextrb $1, %xmm0, %eax -; CHECK-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[8,9,11,12,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero -; CHECK-NEXT: vpinsrb $6, %eax, %xmm2, %xmm2 -; CHECK-NEXT: vpextrb $2, %xmm0, %eax -; CHECK-NEXT: vpinsrb $7, %eax, %xmm2, %xmm2 -; CHECK-NEXT: vpextrb $4, %xmm0, %eax -; CHECK-NEXT: vpinsrb $8, %eax, %xmm2, %xmm2 -; CHECK-NEXT: vpextrb $5, %xmm0, %eax -; CHECK-NEXT: vpinsrb $9, %eax, %xmm2, %xmm2 -; CHECK-NEXT: vpextrb $7, %xmm0, %eax -; CHECK-NEXT: vpinsrb $10, %eax, %xmm2, %xmm2 -; CHECK-NEXT: vpextrb $8, %xmm0, %eax -; CHECK-NEXT: vpinsrb $11, %eax, %xmm2, %xmm2 -; CHECK-NEXT: vpextrb $10, %xmm0, %eax -; CHECK-NEXT: vpinsrb $12, %eax, %xmm2, %xmm2 -; CHECK-NEXT: vpextrb $11, %xmm0, %eax -; CHECK-NEXT: vpinsrb $13, %eax, %xmm2, %xmm2 -; CHECK-NEXT: vpextrb $13, %xmm0, %eax -; CHECK-NEXT: vpinsrb $14, %eax, %xmm2, %xmm2 -; CHECK-NEXT: vpextrb $14, %xmm0, %eax -; CHECK-NEXT: vpinsrb $15, %eax, %xmm2, %xmm0 -; CHECK-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0 +; CHECK-NEXT: vmovdqu (%rdi), %ymm0 +; CHECK-NEXT: vmovdqu 32(%rdi), %xmm1 +; CHECK-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[u,u,u,u,u,u,1,2,4,5,7,8,10,11,13,14] +; CHECK-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1 +; CHECK-NEXT: vextracti128 $1, %ymm0, %xmm2 +; CHECK-NEXT: vpshufb {{.*#+}} xmm3 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm2[0,2,3,5,6] +; CHECK-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,3,4,6,7,9,10,12,13,15],zero,zero,zero,zero,zero +; CHECK-NEXT: vpor %xmm3, %xmm0, %xmm0 +; CHECK-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[8,9,11,12,14,15,u,u,u,u,u,u,u,u,u,u] +; CHECK-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0 +; CHECK-NEXT: vmovdqa {{.*#+}} ymm2 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0] +; CHECK-NEXT: vpblendvb %ymm2, %ymm0, %ymm1, %ymm0 ; CHECK-NEXT: retq %1 = load <48 x i8>, <48 x i8>* %x0, align 1 %2 = shufflevector <48 x i8> %1, <48 x i8> undef, <32 x i32>