From c04ba419155cd7b471479f9c7cae5b43f326ef37 Mon Sep 17 00:00:00 2001 From: Simon Pilgrim Date: Sun, 20 Nov 2016 14:45:46 +0000 Subject: [PATCH] [X86][AVX512] Add some initial VBMI target shuffle combine tests git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@287494 91177308-0d34-0410-b5e6-96231b3b80d8 --- .../vector-shuffle-combining-avx512vbmi.ll | 131 ++++++++++++++++++ 1 file changed, 131 insertions(+) create mode 100644 test/CodeGen/X86/vector-shuffle-combining-avx512vbmi.ll diff --git a/test/CodeGen/X86/vector-shuffle-combining-avx512vbmi.ll b/test/CodeGen/X86/vector-shuffle-combining-avx512vbmi.ll new file mode 100644 index 00000000000..bc29a0b19a1 --- /dev/null +++ b/test/CodeGen/X86/vector-shuffle-combining-avx512vbmi.ll @@ -0,0 +1,131 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=i686-unknown -mattr=+avx512vbmi,+avx512vl | FileCheck %s --check-prefix=X32 +; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx512vbmi,+avx512vl | FileCheck %s --check-prefix=X64 + +declare <16 x i8> @llvm.x86.avx512.mask.permvar.qi.128(<16 x i8>, <16 x i8>, <16 x i8>, i16) +declare <16 x i8> @llvm.x86.avx512.mask.vpermi2var.qi.128(<16 x i8>, <16 x i8>, <16 x i8>, i16) +declare <16 x i8> @llvm.x86.avx512.mask.vpermt2var.qi.128(<16 x i8>, <16 x i8>, <16 x i8>, i16) +declare <16 x i8> @llvm.x86.avx512.maskz.vpermt2var.qi.128(<16 x i8>, <16 x i8>, <16 x i8>, i16) + +declare <32 x i8> @llvm.x86.avx512.mask.permvar.qi.256(<32 x i8>, <32 x i8>, <32 x i8>, i32) +declare <32 x i8> @llvm.x86.avx512.mask.vpermi2var.qi.256(<32 x i8>, <32 x i8>, <32 x i8>, i32) +declare <32 x i8> @llvm.x86.avx512.mask.vpermt2var.qi.256(<32 x i8>, <32 x i8>, <32 x i8>, i32) +declare <32 x i8> @llvm.x86.avx512.maskz.vpermt2var.qi.256(<32 x i8>, <32 x i8>, <32 x i8>, i32) + +declare <64 x i8> @llvm.x86.avx512.mask.permvar.qi.512(<64 x i8>, <64 x i8>, <64 x i8>, i64) +declare <64 x i8> @llvm.x86.avx512.mask.vpermi2var.qi.512(<64 x i8>, <64 x i8>, <64 x i8>, i64) +declare <64 x i8> @llvm.x86.avx512.mask.vpermt2var.qi.512(<64 x i8>, <64 x i8>, <64 x i8>, i64) +declare <64 x i8> @llvm.x86.avx512.maskz.vpermt2var.qi.512(<64 x i8>, <64 x i8>, <64 x i8>, i64) + +declare <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8>, <16 x i8>) +declare <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8>, <32 x i8>) +declare <64 x i8> @llvm.x86.avx512.mask.pshuf.b.512(<64 x i8>, <64 x i8>, <64 x i8>, i64) + +define <16 x i8> @combine_vpermt2var_16i8_identity(<16 x i8> %x0, <16 x i8> %x1) { +; X32-LABEL: combine_vpermt2var_16i8_identity: +; X32: # BB#0: +; X32-NEXT: retl +; +; X64-LABEL: combine_vpermt2var_16i8_identity: +; X64: # BB#0: +; X64-NEXT: retq + %res0 = call <16 x i8> @llvm.x86.avx512.maskz.vpermt2var.qi.128(<16 x i8> , <16 x i8> %x0, <16 x i8> %x1, i16 -1) + %res1 = call <16 x i8> @llvm.x86.avx512.maskz.vpermt2var.qi.128(<16 x i8> , <16 x i8> %res0, <16 x i8> %res0, i16 -1) + ret <16 x i8> %res1 +} +define <16 x i8> @combine_vpermt2var_16i8_identity_mask(<16 x i8> %x0, <16 x i8> %x1, i16 %m) { +; X32-LABEL: combine_vpermt2var_16i8_identity_mask: +; X32: # BB#0: +; X32-NEXT: kmovw {{[0-9]+}}(%esp), %k1 +; X32-NEXT: vmovdqu8 {{.*#+}} xmm2 = [15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0] +; X32-NEXT: vpermt2b %xmm1, %xmm2, %xmm0 {%k1} {z} +; X32-NEXT: vmovdqu8 {{.*#+}} xmm1 = [15,30,13,28,11,26,9,24,7,22,5,20,3,18,1,16] +; X32-NEXT: vpermt2b %xmm0, %xmm1, %xmm0 {%k1} {z} +; X32-NEXT: retl +; +; X64-LABEL: combine_vpermt2var_16i8_identity_mask: +; X64: # BB#0: +; X64-NEXT: kmovw %edi, %k1 +; X64-NEXT: vmovdqu8 {{.*#+}} xmm2 = [15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0] +; X64-NEXT: vpermt2b %xmm1, %xmm2, %xmm0 {%k1} {z} +; X64-NEXT: vmovdqu8 {{.*#+}} xmm1 = [15,30,13,28,11,26,9,24,7,22,5,20,3,18,1,16] +; X64-NEXT: vpermt2b %xmm0, %xmm1, %xmm0 {%k1} {z} +; X64-NEXT: retq + %res0 = call <16 x i8> @llvm.x86.avx512.maskz.vpermt2var.qi.128(<16 x i8> , <16 x i8> %x0, <16 x i8> %x1, i16 %m) + %res1 = call <16 x i8> @llvm.x86.avx512.maskz.vpermt2var.qi.128(<16 x i8> , <16 x i8> %res0, <16 x i8> %res0, i16 %m) + ret <16 x i8> %res1 +} + +define <16 x i8> @combine_vpermi2var_16i8_as_vpshufb(<16 x i8> %x0, <16 x i8> %x1) { +; X32-LABEL: combine_vpermi2var_16i8_as_vpshufb: +; X32: # BB#0: +; X32-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[15,0,14,1,13,2,12,3,11,4,10,5,9,6,8,7] +; X32-NEXT: retl +; +; X64-LABEL: combine_vpermi2var_16i8_as_vpshufb: +; X64: # BB#0: +; X64-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[15,0,14,1,13,2,12,3,11,4,10,5,9,6,8,7] +; X64-NEXT: retq + %res0 = call <16 x i8> @llvm.x86.avx512.mask.vpermi2var.qi.128(<16 x i8> %x0, <16 x i8> , <16 x i8> %x1, i16 -1) + %res1 = call <16 x i8> @llvm.x86.avx512.mask.vpermi2var.qi.128(<16 x i8> %res0, <16 x i8> , <16 x i8> %res0, i16 -1) + ret <16 x i8> %res1 +} +define <32 x i8> @combine_vpermi2var_32i8_as_vpermb(<32 x i8> %x0, <32 x i8> %x1) { +; X32-LABEL: combine_vpermi2var_32i8_as_vpermb: +; X32: # BB#0: +; X32-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23] +; X32-NEXT: vmovdqu8 {{.*#+}} ymm0 = [0,32,2,30,4,28,6,26,8,28,10,26,12,24,14,22,0,32,2,30,4,28,6,26,8,28,10,26,12,24,14,22] +; X32-NEXT: vpermi2b %ymm1, %ymm1, %ymm0 +; X32-NEXT: retl +; +; X64-LABEL: combine_vpermi2var_32i8_as_vpermb: +; X64: # BB#0: +; X64-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23] +; X64-NEXT: vmovdqu8 {{.*#+}} ymm0 = [0,32,2,30,4,28,6,26,8,28,10,26,12,24,14,22,0,32,2,30,4,28,6,26,8,28,10,26,12,24,14,22] +; X64-NEXT: vpermi2b %ymm1, %ymm1, %ymm0 +; X64-NEXT: retq + %res0 = shufflevector <32 x i8> %x0, <32 x i8> %x1, <32 x i32> + %res1 = call <32 x i8> @llvm.x86.avx512.mask.vpermi2var.qi.256(<32 x i8> %res0, <32 x i8> , <32 x i8> %res0, i32 -1) + ret <32 x i8> %res1 +} +define <64 x i8> @combine_vpermi2var_64i8_as_vpermb(<64 x i8> %x0, <64 x i8> %x1) { +; X32-LABEL: combine_vpermi2var_64i8_as_vpermb: +; X32: # BB#0: +; X32-NEXT: vpunpcklbw {{.*#+}} zmm1 = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[2],zmm1[2],zmm0[3],zmm1[3],zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[6],zmm1[6],zmm0[7],zmm1[7],zmm0[16],zmm1[16],zmm0[17],zmm1[17],zmm0[18],zmm1[18],zmm0[19],zmm1[19],zmm0[20],zmm1[20],zmm0[21],zmm1[21],zmm0[22],zmm1[22],zmm0[23],zmm1[23],zmm0[32],zmm1[32],zmm0[33],zmm1[33],zmm0[34],zmm1[34],zmm0[35],zmm1[35],zmm0[36],zmm1[36],zmm0[37],zmm1[37],zmm0[38],zmm1[38],zmm0[39],zmm1[39],zmm0[48],zmm1[48],zmm0[49],zmm1[49],zmm0[50],zmm1[50],zmm0[51],zmm1[51],zmm0[52],zmm1[52],zmm0[53],zmm1[53],zmm0[54],zmm1[54],zmm0[55],zmm1[55] +; X32-NEXT: vmovdqu8 {{.*#+}} zmm0 = [0,32,2,30,4,28,6,26,8,28,10,26,12,24,14,22,0,32,2,30,4,28,6,26,8,28,10,26,12,24,14,22,0,32,2,30,4,28,6,26,8,28,10,26,12,24,14,22,0,32,2,30,4,28,6,26,8,28,10,26,12,24,14,22] +; X32-NEXT: vpermi2b %zmm1, %zmm1, %zmm0 +; X32-NEXT: retl +; +; X64-LABEL: combine_vpermi2var_64i8_as_vpermb: +; X64: # BB#0: +; X64-NEXT: vpunpcklbw {{.*#+}} zmm1 = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[2],zmm1[2],zmm0[3],zmm1[3],zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[6],zmm1[6],zmm0[7],zmm1[7],zmm0[16],zmm1[16],zmm0[17],zmm1[17],zmm0[18],zmm1[18],zmm0[19],zmm1[19],zmm0[20],zmm1[20],zmm0[21],zmm1[21],zmm0[22],zmm1[22],zmm0[23],zmm1[23],zmm0[32],zmm1[32],zmm0[33],zmm1[33],zmm0[34],zmm1[34],zmm0[35],zmm1[35],zmm0[36],zmm1[36],zmm0[37],zmm1[37],zmm0[38],zmm1[38],zmm0[39],zmm1[39],zmm0[48],zmm1[48],zmm0[49],zmm1[49],zmm0[50],zmm1[50],zmm0[51],zmm1[51],zmm0[52],zmm1[52],zmm0[53],zmm1[53],zmm0[54],zmm1[54],zmm0[55],zmm1[55] +; X64-NEXT: vmovdqu8 {{.*#+}} zmm0 = [0,32,2,30,4,28,6,26,8,28,10,26,12,24,14,22,0,32,2,30,4,28,6,26,8,28,10,26,12,24,14,22,0,32,2,30,4,28,6,26,8,28,10,26,12,24,14,22,0,32,2,30,4,28,6,26,8,28,10,26,12,24,14,22] +; X64-NEXT: vpermi2b %zmm1, %zmm1, %zmm0 +; X64-NEXT: retq + %res0 = shufflevector <64 x i8> %x0, <64 x i8> %x1, <64 x i32> + %res1 = call <64 x i8> @llvm.x86.avx512.mask.vpermi2var.qi.512(<64 x i8> %res0, <64 x i8> , <64 x i8> %res0, i64 -1) + ret <64 x i8> %res1 +} + +define <16 x i8> @combine_vpermt2var_vpermi2var_16i8_as_vperm2(<16 x i8> %x0, <16 x i8> %x1) { +; X32-LABEL: combine_vpermt2var_vpermi2var_16i8_as_vperm2: +; X32: # BB#0: +; X32-NEXT: vmovdqu8 {{.*#+}} xmm2 = [0,31,2,29,4,27,6,25,8,23,10,21,12,19,14,17] +; X32-NEXT: vpermi2b %xmm1, %xmm0, %xmm2 +; X32-NEXT: vmovdqu8 {{.*#+}} xmm0 = [0,17,2,18,4,19,6,21,8,23,10,25,12,27,14,29] +; X32-NEXT: vpermt2b %xmm2, %xmm0, %xmm2 +; X32-NEXT: vmovdqa64 %xmm2, %xmm0 +; X32-NEXT: retl +; +; X64-LABEL: combine_vpermt2var_vpermi2var_16i8_as_vperm2: +; X64: # BB#0: +; X64-NEXT: vmovdqu8 {{.*#+}} xmm2 = [0,31,2,29,4,27,6,25,8,23,10,21,12,19,14,17] +; X64-NEXT: vpermi2b %xmm1, %xmm0, %xmm2 +; X64-NEXT: vmovdqu8 {{.*#+}} xmm0 = [0,17,2,18,4,19,6,21,8,23,10,25,12,27,14,29] +; X64-NEXT: vpermt2b %xmm2, %xmm0, %xmm2 +; X64-NEXT: vmovdqa64 %xmm2, %xmm0 +; X64-NEXT: retq + %res0 = call <16 x i8> @llvm.x86.avx512.mask.vpermi2var.qi.128(<16 x i8> %x0, <16 x i8> , <16 x i8> %x1, i16 -1) + %res1 = call <16 x i8> @llvm.x86.avx512.maskz.vpermt2var.qi.128(<16 x i8> , <16 x i8> %res0, <16 x i8> %res0, i16 -1) + ret <16 x i8> %res1 +} -- 2.50.1