From: Simon Pilgrim Date: Fri, 15 Sep 2017 11:17:42 +0000 (+0000) Subject: [X86][SSE] Add test cases vector for integer multiplies X-Git-Url: https://granicus.if.org/sourcecode?a=commitdiff_plain;h=fc31426768d064223c2cc3843fa5877cabe9cbf4;p=llvm [X86][SSE] Add test cases vector for integer multiplies Mainly inspired by PR34474 / D37896 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@313353 91177308-0d34-0410-b5e6-96231b3b80d8 --- diff --git a/test/CodeGen/X86/vector-mul.ll b/test/CodeGen/X86/vector-mul.ll new file mode 100644 index 00000000000..acfe06a8349 --- /dev/null +++ b/test/CodeGen/X86/vector-mul.ll @@ -0,0 +1,800 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse4.2 | FileCheck %s --check-prefix=X86 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.2 | FileCheck %s --check-prefix=X64 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+xop | FileCheck %s --check-prefix=X64-AVX --check-prefix=X64-XOP +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=X64-AVX --check-prefix=X64-AVX2 + +; +; PowOf2 (uniform) +; + +define <2 x i64> @mul_v2i64_8(<2 x i64> %a0) nounwind { +; X86-LABEL: mul_v2i64_8: +; X86: # BB#0: +; X86-NEXT: psllq $3, %xmm0 +; X86-NEXT: retl +; +; X64-LABEL: mul_v2i64_8: +; X64: # BB#0: +; X64-NEXT: psllq $3, %xmm0 +; X64-NEXT: retq +; +; X64-AVX-LABEL: mul_v2i64_8: +; X64-AVX: # BB#0: +; X64-AVX-NEXT: vpsllq $3, %xmm0, %xmm0 +; X64-AVX-NEXT: retq + %1 = mul <2 x i64> %a0, + ret <2 x i64> %1 +} + +define <4 x i32> @mul_v4i32_8(<4 x i32> %a0) nounwind { +; X86-LABEL: mul_v4i32_8: +; X86: # BB#0: +; X86-NEXT: pslld $3, %xmm0 +; X86-NEXT: retl +; +; X64-LABEL: mul_v4i32_8: +; X64: # BB#0: +; X64-NEXT: pslld $3, %xmm0 +; X64-NEXT: retq +; +; X64-AVX-LABEL: mul_v4i32_8: +; X64-AVX: # BB#0: +; X64-AVX-NEXT: vpslld $3, %xmm0, %xmm0 +; X64-AVX-NEXT: retq + %1 = mul <4 x i32> %a0, + ret <4 x i32> %1 +} + +define <8 x i16> @mul_v8i16_8(<8 x i16> %a0) nounwind { +; X86-LABEL: mul_v8i16_8: +; X86: # BB#0: +; X86-NEXT: psllw $3, %xmm0 +; X86-NEXT: retl +; +; X64-LABEL: mul_v8i16_8: +; X64: # BB#0: +; X64-NEXT: psllw $3, %xmm0 +; X64-NEXT: retq +; +; X64-AVX-LABEL: mul_v8i16_8: +; X64-AVX: # BB#0: +; X64-AVX-NEXT: vpsllw $3, %xmm0, %xmm0 +; X64-AVX-NEXT: retq + %1 = mul <8 x i16> %a0, + ret <8 x i16> %1 +} + +define <16 x i8> @mul_v16i8_32(<16 x i8> %a0) nounwind { +; X86-LABEL: mul_v16i8_32: +; X86: # BB#0: +; X86-NEXT: psllw $5, %xmm0 +; X86-NEXT: pand {{\.LCPI.*}}, %xmm0 +; X86-NEXT: retl +; +; X64-LABEL: mul_v16i8_32: +; X64: # BB#0: +; X64-NEXT: psllw $5, %xmm0 +; X64-NEXT: pand {{.*}}(%rip), %xmm0 +; X64-NEXT: retq +; +; X64-XOP-LABEL: mul_v16i8_32: +; X64-XOP: # BB#0: +; X64-XOP-NEXT: vpshlb {{.*}}(%rip), %xmm0, %xmm0 +; X64-XOP-NEXT: retq +; +; X64-AVX2-LABEL: mul_v16i8_32: +; X64-AVX2: # BB#0: +; X64-AVX2-NEXT: vpsllw $5, %xmm0, %xmm0 +; X64-AVX2-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0 +; X64-AVX2-NEXT: retq + %1 = mul <16 x i8> %a0, + ret <16 x i8> %1 +} + +; +; PowOf2 (non-uniform) +; + +define <2 x i64> @mul_v2i64_32_8(<2 x i64> %a0) nounwind { +; X86-LABEL: mul_v2i64_32_8: +; X86: # BB#0: +; X86-NEXT: movdqa %xmm0, %xmm1 +; X86-NEXT: psllq $3, %xmm1 +; X86-NEXT: psllq $5, %xmm0 +; X86-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7] +; X86-NEXT: retl +; +; X64-LABEL: mul_v2i64_32_8: +; X64: # BB#0: +; X64-NEXT: movdqa %xmm0, %xmm1 +; X64-NEXT: psllq $3, %xmm1 +; X64-NEXT: psllq $5, %xmm0 +; X64-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7] +; X64-NEXT: retq +; +; X64-XOP-LABEL: mul_v2i64_32_8: +; X64-XOP: # BB#0: +; X64-XOP-NEXT: vpshlq {{.*}}(%rip), %xmm0, %xmm0 +; X64-XOP-NEXT: retq +; +; X64-AVX2-LABEL: mul_v2i64_32_8: +; X64-AVX2: # BB#0: +; X64-AVX2-NEXT: vpsllvq {{.*}}(%rip), %xmm0, %xmm0 +; X64-AVX2-NEXT: retq + %1 = mul <2 x i64> %a0, + ret <2 x i64> %1 +} + +define <4 x i32> @mul_v4i32_1_2_4_8(<4 x i32> %a0) nounwind { +; X86-LABEL: mul_v4i32_1_2_4_8: +; X86: # BB#0: +; X86-NEXT: pmulld {{\.LCPI.*}}, %xmm0 +; X86-NEXT: retl +; +; X64-LABEL: mul_v4i32_1_2_4_8: +; X64: # BB#0: +; X64-NEXT: pmulld {{.*}}(%rip), %xmm0 +; X64-NEXT: retq +; +; X64-XOP-LABEL: mul_v4i32_1_2_4_8: +; X64-XOP: # BB#0: +; X64-XOP-NEXT: vpshld {{.*}}(%rip), %xmm0, %xmm0 +; X64-XOP-NEXT: retq +; +; X64-AVX2-LABEL: mul_v4i32_1_2_4_8: +; X64-AVX2: # BB#0: +; X64-AVX2-NEXT: vpsllvd {{.*}}(%rip), %xmm0, %xmm0 +; X64-AVX2-NEXT: retq + %1 = mul <4 x i32> %a0, + ret <4 x i32> %1 +} + +define <8 x i16> @mul_v8i16_1_2_4_8_16_32_64_128(<8 x i16> %a0) nounwind { +; X86-LABEL: mul_v8i16_1_2_4_8_16_32_64_128: +; X86: # BB#0: +; X86-NEXT: pmullw {{\.LCPI.*}}, %xmm0 +; X86-NEXT: retl +; +; X64-LABEL: mul_v8i16_1_2_4_8_16_32_64_128: +; X64: # BB#0: +; X64-NEXT: pmullw {{.*}}(%rip), %xmm0 +; X64-NEXT: retq +; +; X64-XOP-LABEL: mul_v8i16_1_2_4_8_16_32_64_128: +; X64-XOP: # BB#0: +; X64-XOP-NEXT: vpshlw {{.*}}(%rip), %xmm0, %xmm0 +; X64-XOP-NEXT: retq +; +; X64-AVX2-LABEL: mul_v8i16_1_2_4_8_16_32_64_128: +; X64-AVX2: # BB#0: +; X64-AVX2-NEXT: vpmullw {{.*}}(%rip), %xmm0, %xmm0 +; X64-AVX2-NEXT: retq + %1 = mul <8 x i16> %a0, + ret <8 x i16> %1 +} + +define <16 x i8> @mul_v16i8_1_2_4_8_1_2_4_8_1_2_4_8_1_2_4_8(<16 x i8> %a0) nounwind { +; X86-LABEL: mul_v16i8_1_2_4_8_1_2_4_8_1_2_4_8_1_2_4_8: +; X86: # BB#0: +; X86-NEXT: movdqa %xmm0, %xmm1 +; X86-NEXT: movdqa %xmm1, %xmm2 +; X86-NEXT: psllw $4, %xmm2 +; X86-NEXT: pand {{\.LCPI.*}}, %xmm2 +; X86-NEXT: movdqa {{.*#+}} xmm0 = [8192,24640,8192,24640,8192,24640,8192,24640] +; X86-NEXT: pblendvb %xmm0, %xmm2, %xmm1 +; X86-NEXT: movdqa %xmm1, %xmm2 +; X86-NEXT: psllw $2, %xmm2 +; X86-NEXT: pand {{\.LCPI.*}}, %xmm2 +; X86-NEXT: paddb %xmm0, %xmm0 +; X86-NEXT: pblendvb %xmm0, %xmm2, %xmm1 +; X86-NEXT: movdqa %xmm1, %xmm2 +; X86-NEXT: paddb %xmm2, %xmm2 +; X86-NEXT: paddb %xmm0, %xmm0 +; X86-NEXT: pblendvb %xmm0, %xmm2, %xmm1 +; X86-NEXT: movdqa %xmm1, %xmm0 +; X86-NEXT: retl +; +; X64-LABEL: mul_v16i8_1_2_4_8_1_2_4_8_1_2_4_8_1_2_4_8: +; X64: # BB#0: +; X64-NEXT: movdqa %xmm0, %xmm1 +; X64-NEXT: movdqa %xmm1, %xmm2 +; X64-NEXT: psllw $4, %xmm2 +; X64-NEXT: pand {{.*}}(%rip), %xmm2 +; X64-NEXT: movdqa {{.*#+}} xmm0 = [8192,24640,8192,24640,8192,24640,8192,24640] +; X64-NEXT: pblendvb %xmm0, %xmm2, %xmm1 +; X64-NEXT: movdqa %xmm1, %xmm2 +; X64-NEXT: psllw $2, %xmm2 +; X64-NEXT: pand {{.*}}(%rip), %xmm2 +; X64-NEXT: paddb %xmm0, %xmm0 +; X64-NEXT: pblendvb %xmm0, %xmm2, %xmm1 +; X64-NEXT: movdqa %xmm1, %xmm2 +; X64-NEXT: paddb %xmm2, %xmm2 +; X64-NEXT: paddb %xmm0, %xmm0 +; X64-NEXT: pblendvb %xmm0, %xmm2, %xmm1 +; X64-NEXT: movdqa %xmm1, %xmm0 +; X64-NEXT: retq +; +; X64-XOP-LABEL: mul_v16i8_1_2_4_8_1_2_4_8_1_2_4_8_1_2_4_8: +; X64-XOP: # BB#0: +; X64-XOP-NEXT: vpshlb {{.*}}(%rip), %xmm0, %xmm0 +; X64-XOP-NEXT: retq +; +; X64-AVX2-LABEL: mul_v16i8_1_2_4_8_1_2_4_8_1_2_4_8_1_2_4_8: +; X64-AVX2: # BB#0: +; X64-AVX2-NEXT: vpsllw $4, %xmm0, %xmm1 +; X64-AVX2-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1 +; X64-AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = [8192,24640,8192,24640,8192,24640,8192,24640] +; X64-AVX2-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0 +; X64-AVX2-NEXT: vpsllw $2, %xmm0, %xmm1 +; X64-AVX2-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1 +; X64-AVX2-NEXT: vpaddb %xmm2, %xmm2, %xmm2 +; X64-AVX2-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0 +; X64-AVX2-NEXT: vpaddb %xmm0, %xmm0, %xmm1 +; X64-AVX2-NEXT: vpaddb %xmm2, %xmm2, %xmm2 +; X64-AVX2-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0 +; X64-AVX2-NEXT: retq + %1 = mul <16 x i8> %a0, + ret <16 x i8> %1 +} + +; +; PowOf2 + 1 (uniform) +; + +define <2 x i64> @mul_v2i64_17(<2 x i64> %a0) nounwind { +; X86-LABEL: mul_v2i64_17: +; X86: # BB#0: +; X86-NEXT: movdqa {{.*#+}} xmm1 = [17,0,17,0] +; X86-NEXT: movdqa %xmm0, %xmm2 +; X86-NEXT: pmuludq %xmm1, %xmm2 +; X86-NEXT: psrlq $32, %xmm0 +; X86-NEXT: pmuludq %xmm1, %xmm0 +; X86-NEXT: psllq $32, %xmm0 +; X86-NEXT: paddq %xmm2, %xmm0 +; X86-NEXT: retl +; +; X64-LABEL: mul_v2i64_17: +; X64: # BB#0: +; X64-NEXT: movdqa {{.*#+}} xmm1 = [17,17] +; X64-NEXT: movdqa %xmm0, %xmm2 +; X64-NEXT: pmuludq %xmm1, %xmm2 +; X64-NEXT: psrlq $32, %xmm0 +; X64-NEXT: pmuludq %xmm1, %xmm0 +; X64-NEXT: psllq $32, %xmm0 +; X64-NEXT: paddq %xmm2, %xmm0 +; X64-NEXT: retq +; +; X64-AVX-LABEL: mul_v2i64_17: +; X64-AVX: # BB#0: +; X64-AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [17,17] +; X64-AVX-NEXT: vpmuludq %xmm1, %xmm0, %xmm2 +; X64-AVX-NEXT: vpsrlq $32, %xmm0, %xmm0 +; X64-AVX-NEXT: vpmuludq %xmm1, %xmm0, %xmm0 +; X64-AVX-NEXT: vpsllq $32, %xmm0, %xmm0 +; X64-AVX-NEXT: vpaddq %xmm0, %xmm2, %xmm0 +; X64-AVX-NEXT: retq + %1 = mul <2 x i64> %a0, + ret <2 x i64> %1 +} + +define <4 x i32> @mul_v4i32_17(<4 x i32> %a0) nounwind { +; X86-LABEL: mul_v4i32_17: +; X86: # BB#0: +; X86-NEXT: pmulld {{\.LCPI.*}}, %xmm0 +; X86-NEXT: retl +; +; X64-LABEL: mul_v4i32_17: +; X64: # BB#0: +; X64-NEXT: pmulld {{.*}}(%rip), %xmm0 +; X64-NEXT: retq +; +; X64-XOP-LABEL: mul_v4i32_17: +; X64-XOP: # BB#0: +; X64-XOP-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm0 +; X64-XOP-NEXT: retq +; +; X64-AVX2-LABEL: mul_v4i32_17: +; X64-AVX2: # BB#0: +; X64-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [17,17,17,17] +; X64-AVX2-NEXT: vpmulld %xmm1, %xmm0, %xmm0 +; X64-AVX2-NEXT: retq + %1 = mul <4 x i32> %a0, + ret <4 x i32> %1 +} + +define <8 x i16> @mul_v8i16_17(<8 x i16> %a0) nounwind { +; X86-LABEL: mul_v8i16_17: +; X86: # BB#0: +; X86-NEXT: pmullw {{\.LCPI.*}}, %xmm0 +; X86-NEXT: retl +; +; X64-LABEL: mul_v8i16_17: +; X64: # BB#0: +; X64-NEXT: pmullw {{.*}}(%rip), %xmm0 +; X64-NEXT: retq +; +; X64-AVX-LABEL: mul_v8i16_17: +; X64-AVX: # BB#0: +; X64-AVX-NEXT: vpmullw {{.*}}(%rip), %xmm0, %xmm0 +; X64-AVX-NEXT: retq + %1 = mul <8 x i16> %a0, + ret <8 x i16> %1 +} + +define <16 x i8> @mul_v16i8_17(<16 x i8> %a0) nounwind { +; X86-LABEL: mul_v16i8_17: +; X86: # BB#0: +; X86-NEXT: pmovsxbw %xmm0, %xmm1 +; X86-NEXT: movdqa {{.*#+}} xmm2 = [17,17,17,17,17,17,17,17] +; X86-NEXT: pmullw %xmm2, %xmm1 +; X86-NEXT: movdqa {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255] +; X86-NEXT: pand %xmm3, %xmm1 +; X86-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] +; X86-NEXT: pmovsxbw %xmm0, %xmm0 +; X86-NEXT: pmullw %xmm2, %xmm0 +; X86-NEXT: pand %xmm3, %xmm0 +; X86-NEXT: packuswb %xmm0, %xmm1 +; X86-NEXT: movdqa %xmm1, %xmm0 +; X86-NEXT: retl +; +; X64-LABEL: mul_v16i8_17: +; X64: # BB#0: +; X64-NEXT: pmovsxbw %xmm0, %xmm1 +; X64-NEXT: movdqa {{.*#+}} xmm2 = [17,17,17,17,17,17,17,17] +; X64-NEXT: pmullw %xmm2, %xmm1 +; X64-NEXT: movdqa {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255] +; X64-NEXT: pand %xmm3, %xmm1 +; X64-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] +; X64-NEXT: pmovsxbw %xmm0, %xmm0 +; X64-NEXT: pmullw %xmm2, %xmm0 +; X64-NEXT: pand %xmm3, %xmm0 +; X64-NEXT: packuswb %xmm0, %xmm1 +; X64-NEXT: movdqa %xmm1, %xmm0 +; X64-NEXT: retq +; +; X64-XOP-LABEL: mul_v16i8_17: +; X64-XOP: # BB#0: +; X64-XOP-NEXT: vpmovsxbw %xmm0, %xmm1 +; X64-XOP-NEXT: vmovdqa {{.*#+}} xmm2 = [17,17,17,17,17,17,17,17] +; X64-XOP-NEXT: vpmullw %xmm2, %xmm1, %xmm1 +; X64-XOP-NEXT: vmovdqa {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255] +; X64-XOP-NEXT: vpand %xmm3, %xmm1, %xmm1 +; X64-XOP-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] +; X64-XOP-NEXT: vpmovsxbw %xmm0, %xmm0 +; X64-XOP-NEXT: vpmullw %xmm2, %xmm0, %xmm0 +; X64-XOP-NEXT: vpand %xmm3, %xmm0, %xmm0 +; X64-XOP-NEXT: vpackuswb %xmm0, %xmm1, %xmm0 +; X64-XOP-NEXT: retq +; +; X64-AVX2-LABEL: mul_v16i8_17: +; X64-AVX2: # BB#0: +; X64-AVX2-NEXT: vpmovsxbw %xmm0, %ymm0 +; X64-AVX2-NEXT: vpmullw {{.*}}(%rip), %ymm0, %ymm0 +; X64-AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 +; X64-AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u> +; X64-AVX2-NEXT: vpshufb %xmm2, %xmm1, %xmm1 +; X64-AVX2-NEXT: vpshufb %xmm2, %xmm0, %xmm0 +; X64-AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; X64-AVX2-NEXT: vzeroupper +; X64-AVX2-NEXT: retq + %1 = mul <16 x i8> %a0, + ret <16 x i8> %1 +} + +; +; PowOf2 + 1 (non-uniform) +; + +define <2 x i64> @mul_v2i64_17_65(<2 x i64> %a0) nounwind { +; X86-LABEL: mul_v2i64_17_65: +; X86: # BB#0: +; X86-NEXT: movdqa {{.*#+}} xmm1 = [17,0,65,0] +; X86-NEXT: movdqa %xmm0, %xmm2 +; X86-NEXT: pmuludq %xmm1, %xmm2 +; X86-NEXT: psrlq $32, %xmm0 +; X86-NEXT: pmuludq %xmm1, %xmm0 +; X86-NEXT: psllq $32, %xmm0 +; X86-NEXT: paddq %xmm2, %xmm0 +; X86-NEXT: retl +; +; X64-LABEL: mul_v2i64_17_65: +; X64: # BB#0: +; X64-NEXT: movdqa {{.*#+}} xmm1 = [17,65] +; X64-NEXT: movdqa %xmm0, %xmm2 +; X64-NEXT: pmuludq %xmm1, %xmm2 +; X64-NEXT: psrlq $32, %xmm0 +; X64-NEXT: pmuludq %xmm1, %xmm0 +; X64-NEXT: psllq $32, %xmm0 +; X64-NEXT: paddq %xmm2, %xmm0 +; X64-NEXT: retq +; +; X64-AVX-LABEL: mul_v2i64_17_65: +; X64-AVX: # BB#0: +; X64-AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [17,65] +; X64-AVX-NEXT: vpmuludq %xmm1, %xmm0, %xmm2 +; X64-AVX-NEXT: vpsrlq $32, %xmm0, %xmm0 +; X64-AVX-NEXT: vpmuludq %xmm1, %xmm0, %xmm0 +; X64-AVX-NEXT: vpsllq $32, %xmm0, %xmm0 +; X64-AVX-NEXT: vpaddq %xmm0, %xmm2, %xmm0 +; X64-AVX-NEXT: retq + %1 = mul <2 x i64> %a0, + ret <2 x i64> %1 +} + +define <4 x i32> @mul_v4i32_5_17_33_65(<4 x i32> %a0) nounwind { +; X86-LABEL: mul_v4i32_5_17_33_65: +; X86: # BB#0: +; X86-NEXT: pmulld {{\.LCPI.*}}, %xmm0 +; X86-NEXT: retl +; +; X64-LABEL: mul_v4i32_5_17_33_65: +; X64: # BB#0: +; X64-NEXT: pmulld {{.*}}(%rip), %xmm0 +; X64-NEXT: retq +; +; X64-AVX-LABEL: mul_v4i32_5_17_33_65: +; X64-AVX: # BB#0: +; X64-AVX-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm0 +; X64-AVX-NEXT: retq + %1 = mul <4 x i32> %a0, + ret <4 x i32> %1 +} + +define <8 x i16> @mul_v8i16_2_3_9_17_33_65_129_257(<8 x i16> %a0) nounwind { +; X86-LABEL: mul_v8i16_2_3_9_17_33_65_129_257: +; X86: # BB#0: +; X86-NEXT: pmullw {{\.LCPI.*}}, %xmm0 +; X86-NEXT: retl +; +; X64-LABEL: mul_v8i16_2_3_9_17_33_65_129_257: +; X64: # BB#0: +; X64-NEXT: pmullw {{.*}}(%rip), %xmm0 +; X64-NEXT: retq +; +; X64-AVX-LABEL: mul_v8i16_2_3_9_17_33_65_129_257: +; X64-AVX: # BB#0: +; X64-AVX-NEXT: vpmullw {{.*}}(%rip), %xmm0, %xmm0 +; X64-AVX-NEXT: retq + %1 = mul <8 x i16> %a0, + ret <8 x i16> %1 +} + +define <16 x i8> @mul_v16i8_2_3_9_17_33_65_129_2_3_9_17_33_65_129_2_3(<16 x i8> %a0) nounwind { +; X86-LABEL: mul_v16i8_2_3_9_17_33_65_129_2_3_9_17_33_65_129_2_3: +; X86: # BB#0: +; X86-NEXT: pmovsxbw %xmm0, %xmm1 +; X86-NEXT: pmullw {{\.LCPI.*}}, %xmm1 +; X86-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255] +; X86-NEXT: pand %xmm2, %xmm1 +; X86-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] +; X86-NEXT: pmovsxbw %xmm0, %xmm0 +; X86-NEXT: pmullw {{\.LCPI.*}}, %xmm0 +; X86-NEXT: pand %xmm2, %xmm0 +; X86-NEXT: packuswb %xmm0, %xmm1 +; X86-NEXT: movdqa %xmm1, %xmm0 +; X86-NEXT: retl +; +; X64-LABEL: mul_v16i8_2_3_9_17_33_65_129_2_3_9_17_33_65_129_2_3: +; X64: # BB#0: +; X64-NEXT: pmovsxbw %xmm0, %xmm1 +; X64-NEXT: pmullw {{.*}}(%rip), %xmm1 +; X64-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255] +; X64-NEXT: pand %xmm2, %xmm1 +; X64-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] +; X64-NEXT: pmovsxbw %xmm0, %xmm0 +; X64-NEXT: pmullw {{.*}}(%rip), %xmm0 +; X64-NEXT: pand %xmm2, %xmm0 +; X64-NEXT: packuswb %xmm0, %xmm1 +; X64-NEXT: movdqa %xmm1, %xmm0 +; X64-NEXT: retq +; +; X64-XOP-LABEL: mul_v16i8_2_3_9_17_33_65_129_2_3_9_17_33_65_129_2_3: +; X64-XOP: # BB#0: +; X64-XOP-NEXT: vpmovsxbw %xmm0, %xmm1 +; X64-XOP-NEXT: vpmullw {{.*}}(%rip), %xmm1, %xmm1 +; X64-XOP-NEXT: vmovdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255] +; X64-XOP-NEXT: vpand %xmm2, %xmm1, %xmm1 +; X64-XOP-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] +; X64-XOP-NEXT: vpmovsxbw %xmm0, %xmm0 +; X64-XOP-NEXT: vpmullw {{.*}}(%rip), %xmm0, %xmm0 +; X64-XOP-NEXT: vpand %xmm2, %xmm0, %xmm0 +; X64-XOP-NEXT: vpackuswb %xmm0, %xmm1, %xmm0 +; X64-XOP-NEXT: retq +; +; X64-AVX2-LABEL: mul_v16i8_2_3_9_17_33_65_129_2_3_9_17_33_65_129_2_3: +; X64-AVX2: # BB#0: +; X64-AVX2-NEXT: vpmovsxbw %xmm0, %ymm0 +; X64-AVX2-NEXT: vpmullw {{.*}}(%rip), %ymm0, %ymm0 +; X64-AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 +; X64-AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u> +; X64-AVX2-NEXT: vpshufb %xmm2, %xmm1, %xmm1 +; X64-AVX2-NEXT: vpshufb %xmm2, %xmm0, %xmm0 +; X64-AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; X64-AVX2-NEXT: vzeroupper +; X64-AVX2-NEXT: retq + %1 = mul <16 x i8> %a0, + ret <16 x i8> %1 +} + +; +; PowOf2 - 1 (uniform) +; + +define <2 x i64> @mul_v2i64_7(<2 x i64> %a0) nounwind { +; X86-LABEL: mul_v2i64_7: +; X86: # BB#0: +; X86-NEXT: movdqa {{.*#+}} xmm1 = [7,0,7,0] +; X86-NEXT: movdqa %xmm0, %xmm2 +; X86-NEXT: pmuludq %xmm1, %xmm2 +; X86-NEXT: psrlq $32, %xmm0 +; X86-NEXT: pmuludq %xmm1, %xmm0 +; X86-NEXT: psllq $32, %xmm0 +; X86-NEXT: paddq %xmm2, %xmm0 +; X86-NEXT: retl +; +; X64-LABEL: mul_v2i64_7: +; X64: # BB#0: +; X64-NEXT: movdqa {{.*#+}} xmm1 = [7,7] +; X64-NEXT: movdqa %xmm0, %xmm2 +; X64-NEXT: pmuludq %xmm1, %xmm2 +; X64-NEXT: psrlq $32, %xmm0 +; X64-NEXT: pmuludq %xmm1, %xmm0 +; X64-NEXT: psllq $32, %xmm0 +; X64-NEXT: paddq %xmm2, %xmm0 +; X64-NEXT: retq +; +; X64-AVX-LABEL: mul_v2i64_7: +; X64-AVX: # BB#0: +; X64-AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [7,7] +; X64-AVX-NEXT: vpmuludq %xmm1, %xmm0, %xmm2 +; X64-AVX-NEXT: vpsrlq $32, %xmm0, %xmm0 +; X64-AVX-NEXT: vpmuludq %xmm1, %xmm0, %xmm0 +; X64-AVX-NEXT: vpsllq $32, %xmm0, %xmm0 +; X64-AVX-NEXT: vpaddq %xmm0, %xmm2, %xmm0 +; X64-AVX-NEXT: retq + %1 = mul <2 x i64> %a0, + ret <2 x i64> %1 +} + +define <4 x i32> @mul_v4i32_7(<4 x i32> %a0) nounwind { +; X86-LABEL: mul_v4i32_7: +; X86: # BB#0: +; X86-NEXT: pmulld {{\.LCPI.*}}, %xmm0 +; X86-NEXT: retl +; +; X64-LABEL: mul_v4i32_7: +; X64: # BB#0: +; X64-NEXT: pmulld {{.*}}(%rip), %xmm0 +; X64-NEXT: retq +; +; X64-XOP-LABEL: mul_v4i32_7: +; X64-XOP: # BB#0: +; X64-XOP-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm0 +; X64-XOP-NEXT: retq +; +; X64-AVX2-LABEL: mul_v4i32_7: +; X64-AVX2: # BB#0: +; X64-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [7,7,7,7] +; X64-AVX2-NEXT: vpmulld %xmm1, %xmm0, %xmm0 +; X64-AVX2-NEXT: retq + %1 = mul <4 x i32> %a0, + ret <4 x i32> %1 +} + +define <8 x i16> @mul_v8i16_7(<8 x i16> %a0) nounwind { +; X86-LABEL: mul_v8i16_7: +; X86: # BB#0: +; X86-NEXT: pmullw {{\.LCPI.*}}, %xmm0 +; X86-NEXT: retl +; +; X64-LABEL: mul_v8i16_7: +; X64: # BB#0: +; X64-NEXT: pmullw {{.*}}(%rip), %xmm0 +; X64-NEXT: retq +; +; X64-AVX-LABEL: mul_v8i16_7: +; X64-AVX: # BB#0: +; X64-AVX-NEXT: vpmullw {{.*}}(%rip), %xmm0, %xmm0 +; X64-AVX-NEXT: retq + %1 = mul <8 x i16> %a0, + ret <8 x i16> %1 +} + +define <16 x i8> @mul_v16i8_31(<16 x i8> %a0) nounwind { +; X86-LABEL: mul_v16i8_31: +; X86: # BB#0: +; X86-NEXT: pmovsxbw %xmm0, %xmm1 +; X86-NEXT: movdqa {{.*#+}} xmm2 = [31,31,31,31,31,31,31,31] +; X86-NEXT: pmullw %xmm2, %xmm1 +; X86-NEXT: movdqa {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255] +; X86-NEXT: pand %xmm3, %xmm1 +; X86-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] +; X86-NEXT: pmovsxbw %xmm0, %xmm0 +; X86-NEXT: pmullw %xmm2, %xmm0 +; X86-NEXT: pand %xmm3, %xmm0 +; X86-NEXT: packuswb %xmm0, %xmm1 +; X86-NEXT: movdqa %xmm1, %xmm0 +; X86-NEXT: retl +; +; X64-LABEL: mul_v16i8_31: +; X64: # BB#0: +; X64-NEXT: pmovsxbw %xmm0, %xmm1 +; X64-NEXT: movdqa {{.*#+}} xmm2 = [31,31,31,31,31,31,31,31] +; X64-NEXT: pmullw %xmm2, %xmm1 +; X64-NEXT: movdqa {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255] +; X64-NEXT: pand %xmm3, %xmm1 +; X64-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] +; X64-NEXT: pmovsxbw %xmm0, %xmm0 +; X64-NEXT: pmullw %xmm2, %xmm0 +; X64-NEXT: pand %xmm3, %xmm0 +; X64-NEXT: packuswb %xmm0, %xmm1 +; X64-NEXT: movdqa %xmm1, %xmm0 +; X64-NEXT: retq +; +; X64-XOP-LABEL: mul_v16i8_31: +; X64-XOP: # BB#0: +; X64-XOP-NEXT: vpmovsxbw %xmm0, %xmm1 +; X64-XOP-NEXT: vmovdqa {{.*#+}} xmm2 = [31,31,31,31,31,31,31,31] +; X64-XOP-NEXT: vpmullw %xmm2, %xmm1, %xmm1 +; X64-XOP-NEXT: vmovdqa {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255] +; X64-XOP-NEXT: vpand %xmm3, %xmm1, %xmm1 +; X64-XOP-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] +; X64-XOP-NEXT: vpmovsxbw %xmm0, %xmm0 +; X64-XOP-NEXT: vpmullw %xmm2, %xmm0, %xmm0 +; X64-XOP-NEXT: vpand %xmm3, %xmm0, %xmm0 +; X64-XOP-NEXT: vpackuswb %xmm0, %xmm1, %xmm0 +; X64-XOP-NEXT: retq +; +; X64-AVX2-LABEL: mul_v16i8_31: +; X64-AVX2: # BB#0: +; X64-AVX2-NEXT: vpmovsxbw %xmm0, %ymm0 +; X64-AVX2-NEXT: vpmullw {{.*}}(%rip), %ymm0, %ymm0 +; X64-AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 +; X64-AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u> +; X64-AVX2-NEXT: vpshufb %xmm2, %xmm1, %xmm1 +; X64-AVX2-NEXT: vpshufb %xmm2, %xmm0, %xmm0 +; X64-AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; X64-AVX2-NEXT: vzeroupper +; X64-AVX2-NEXT: retq + %1 = mul <16 x i8> %a0, + ret <16 x i8> %1 +} + +; +; PowOf2 - 1 (non-uniform) +; + +define <2 x i64> @mul_v2i64_15_63(<2 x i64> %a0) nounwind { +; X86-LABEL: mul_v2i64_15_63: +; X86: # BB#0: +; X86-NEXT: movdqa {{.*#+}} xmm1 = [15,0,63,0] +; X86-NEXT: movdqa %xmm0, %xmm2 +; X86-NEXT: pmuludq %xmm1, %xmm2 +; X86-NEXT: psrlq $32, %xmm0 +; X86-NEXT: pmuludq %xmm1, %xmm0 +; X86-NEXT: psllq $32, %xmm0 +; X86-NEXT: paddq %xmm2, %xmm0 +; X86-NEXT: retl +; +; X64-LABEL: mul_v2i64_15_63: +; X64: # BB#0: +; X64-NEXT: movdqa {{.*#+}} xmm1 = [15,63] +; X64-NEXT: movdqa %xmm0, %xmm2 +; X64-NEXT: pmuludq %xmm1, %xmm2 +; X64-NEXT: psrlq $32, %xmm0 +; X64-NEXT: pmuludq %xmm1, %xmm0 +; X64-NEXT: psllq $32, %xmm0 +; X64-NEXT: paddq %xmm2, %xmm0 +; X64-NEXT: retq +; +; X64-AVX-LABEL: mul_v2i64_15_63: +; X64-AVX: # BB#0: +; X64-AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [15,63] +; X64-AVX-NEXT: vpmuludq %xmm1, %xmm0, %xmm2 +; X64-AVX-NEXT: vpsrlq $32, %xmm0, %xmm0 +; X64-AVX-NEXT: vpmuludq %xmm1, %xmm0, %xmm0 +; X64-AVX-NEXT: vpsllq $32, %xmm0, %xmm0 +; X64-AVX-NEXT: vpaddq %xmm0, %xmm2, %xmm0 +; X64-AVX-NEXT: retq + %1 = mul <2 x i64> %a0, + ret <2 x i64> %1 +} + +define <4 x i32> @mul_v4i32_0_15_31_7(<4 x i32> %a0) nounwind { +; X86-LABEL: mul_v4i32_0_15_31_7: +; X86: # BB#0: +; X86-NEXT: pmulld {{\.LCPI.*}}, %xmm0 +; X86-NEXT: retl +; +; X64-LABEL: mul_v4i32_0_15_31_7: +; X64: # BB#0: +; X64-NEXT: pmulld {{.*}}(%rip), %xmm0 +; X64-NEXT: retq +; +; X64-AVX-LABEL: mul_v4i32_0_15_31_7: +; X64-AVX: # BB#0: +; X64-AVX-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm0 +; X64-AVX-NEXT: retq + %1 = mul <4 x i32> %a0, + ret <4 x i32> %1 +} + +define <8 x i16> @mul_v8i16_0_1_7_15_31_63_127_255(<8 x i16> %a0) nounwind { +; X86-LABEL: mul_v8i16_0_1_7_15_31_63_127_255: +; X86: # BB#0: +; X86-NEXT: pmullw {{\.LCPI.*}}, %xmm0 +; X86-NEXT: retl +; +; X64-LABEL: mul_v8i16_0_1_7_15_31_63_127_255: +; X64: # BB#0: +; X64-NEXT: pmullw {{.*}}(%rip), %xmm0 +; X64-NEXT: retq +; +; X64-AVX-LABEL: mul_v8i16_0_1_7_15_31_63_127_255: +; X64-AVX: # BB#0: +; X64-AVX-NEXT: vpmullw {{.*}}(%rip), %xmm0, %xmm0 +; X64-AVX-NEXT: retq + %1 = mul <8 x i16> %a0, + ret <8 x i16> %1 +} + +define <16 x i8> @mul_v16i8_0_1_3_7_15_31_63_127_0_1_3_7_15_31_63_127(<16 x i8> %a0) nounwind { +; X86-LABEL: mul_v16i8_0_1_3_7_15_31_63_127_0_1_3_7_15_31_63_127: +; X86: # BB#0: +; X86-NEXT: pmovsxbw %xmm0, %xmm1 +; X86-NEXT: movdqa {{.*#+}} xmm2 = [0,1,3,7,15,31,63,127] +; X86-NEXT: pmullw %xmm2, %xmm1 +; X86-NEXT: movdqa {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255] +; X86-NEXT: pand %xmm3, %xmm1 +; X86-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] +; X86-NEXT: pmovsxbw %xmm0, %xmm0 +; X86-NEXT: pmullw %xmm2, %xmm0 +; X86-NEXT: pand %xmm3, %xmm0 +; X86-NEXT: packuswb %xmm0, %xmm1 +; X86-NEXT: movdqa %xmm1, %xmm0 +; X86-NEXT: retl +; +; X64-LABEL: mul_v16i8_0_1_3_7_15_31_63_127_0_1_3_7_15_31_63_127: +; X64: # BB#0: +; X64-NEXT: pmovsxbw %xmm0, %xmm1 +; X64-NEXT: movdqa {{.*#+}} xmm2 = [0,1,3,7,15,31,63,127] +; X64-NEXT: pmullw %xmm2, %xmm1 +; X64-NEXT: movdqa {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255] +; X64-NEXT: pand %xmm3, %xmm1 +; X64-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] +; X64-NEXT: pmovsxbw %xmm0, %xmm0 +; X64-NEXT: pmullw %xmm2, %xmm0 +; X64-NEXT: pand %xmm3, %xmm0 +; X64-NEXT: packuswb %xmm0, %xmm1 +; X64-NEXT: movdqa %xmm1, %xmm0 +; X64-NEXT: retq +; +; X64-XOP-LABEL: mul_v16i8_0_1_3_7_15_31_63_127_0_1_3_7_15_31_63_127: +; X64-XOP: # BB#0: +; X64-XOP-NEXT: vpmovsxbw %xmm0, %xmm1 +; X64-XOP-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,3,7,15,31,63,127] +; X64-XOP-NEXT: vpmullw %xmm2, %xmm1, %xmm1 +; X64-XOP-NEXT: vmovdqa {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255] +; X64-XOP-NEXT: vpand %xmm3, %xmm1, %xmm1 +; X64-XOP-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] +; X64-XOP-NEXT: vpmovsxbw %xmm0, %xmm0 +; X64-XOP-NEXT: vpmullw %xmm2, %xmm0, %xmm0 +; X64-XOP-NEXT: vpand %xmm3, %xmm0, %xmm0 +; X64-XOP-NEXT: vpackuswb %xmm0, %xmm1, %xmm0 +; X64-XOP-NEXT: retq +; +; X64-AVX2-LABEL: mul_v16i8_0_1_3_7_15_31_63_127_0_1_3_7_15_31_63_127: +; X64-AVX2: # BB#0: +; X64-AVX2-NEXT: vpmovsxbw %xmm0, %ymm0 +; X64-AVX2-NEXT: vpmullw {{.*}}(%rip), %ymm0, %ymm0 +; X64-AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 +; X64-AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u> +; X64-AVX2-NEXT: vpshufb %xmm2, %xmm1, %xmm1 +; X64-AVX2-NEXT: vpshufb %xmm2, %xmm0, %xmm0 +; X64-AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; X64-AVX2-NEXT: vzeroupper +; X64-AVX2-NEXT: retq + %1 = mul <16 x i8> %a0, + ret <16 x i8> %1 +}