--- /dev/null
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+ssse3 | FileCheck %s --check-prefix=SSSE3
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVXNOVLBW,AVX1
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVXNOVLBW,AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=AVX,AVX512,AVXNOVLBW,AVX512F
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vl | FileCheck %s --check-prefixes=AVX,AVX512,AVXNOVLBW,AVX512VL
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw,+avx512vl | FileCheck %s --check-prefixes=AVX,AVX512,AVX512VLBW
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw,+avx512vl,+avx512vbmi | FileCheck %s --check-prefixes=AVX,AVX512,AVX512VLBW,VBMI
+
+define <2 x i64> @var_shuffle_v2i64(<2 x i64> %v, <2 x i64> %indices) nounwind {
+; SSSE3-LABEL: var_shuffle_v2i64:
+; SSSE3: # BB#0:
+; SSSE3-NEXT: movq %xmm1, %rax
+; SSSE3-NEXT: andl $1, %eax
+; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
+; SSSE3-NEXT: movq %xmm1, %rcx
+; SSSE3-NEXT: andl $1, %ecx
+; SSSE3-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; SSSE3-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; SSSE3-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; SSSE3-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSSE3-NEXT: retq
+;
+; AVX-LABEL: var_shuffle_v2i64:
+; AVX: # BB#0:
+; AVX-NEXT: vmovq %xmm1, %rax
+; AVX-NEXT: andl $1, %eax
+; AVX-NEXT: vpextrq $1, %xmm1, %rcx
+; AVX-NEXT: andl $1, %ecx
+; AVX-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX-NEXT: vmovlhps {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; AVX-NEXT: retq
+ %index0 = extractelement <2 x i64> %indices, i32 0
+ %index1 = extractelement <2 x i64> %indices, i32 1
+ %v0 = extractelement <2 x i64> %v, i64 %index0
+ %v1 = extractelement <2 x i64> %v, i64 %index1
+ %ret0 = insertelement <2 x i64> undef, i64 %v0, i32 0
+ %ret1 = insertelement <2 x i64> %ret0, i64 %v1, i32 1
+ ret <2 x i64> %ret1
+}
+
+define <4 x i32> @var_shuffle_v4i32(<4 x i32> %v, <4 x i32> %indices) nounwind {
+; SSSE3-LABEL: var_shuffle_v4i32:
+; SSSE3: # BB#0:
+; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,0,1]
+; SSSE3-NEXT: movq %xmm2, %rax
+; SSSE3-NEXT: movq %rax, %rcx
+; SSSE3-NEXT: sarq $32, %rcx
+; SSSE3-NEXT: movq %xmm1, %rdx
+; SSSE3-NEXT: movq %rdx, %rsi
+; SSSE3-NEXT: sarq $32, %rsi
+; SSSE3-NEXT: andl $3, %edx
+; SSSE3-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; SSSE3-NEXT: andl $3, %esi
+; SSSE3-NEXT: andl $3, %eax
+; SSSE3-NEXT: andl $3, %ecx
+; SSSE3-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; SSSE3-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; SSSE3-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSSE3-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; SSSE3-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; SSSE3-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
+; SSSE3-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSSE3-NEXT: retq
+;
+; AVX-LABEL: var_shuffle_v4i32:
+; AVX: # BB#0:
+; AVX-NEXT: vpextrq $1, %xmm1, %rax
+; AVX-NEXT: movq %rax, %rcx
+; AVX-NEXT: sarq $32, %rcx
+; AVX-NEXT: vmovq %xmm1, %rdx
+; AVX-NEXT: movq %rdx, %rsi
+; AVX-NEXT: sarq $32, %rsi
+; AVX-NEXT: andl $3, %edx
+; AVX-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
+; AVX-NEXT: andl $3, %esi
+; AVX-NEXT: andl $3, %eax
+; AVX-NEXT: andl $3, %ecx
+; AVX-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: vpinsrd $1, -24(%rsp,%rsi,4), %xmm0, %xmm0
+; AVX-NEXT: vpinsrd $2, -24(%rsp,%rax,4), %xmm0, %xmm0
+; AVX-NEXT: vpinsrd $3, -24(%rsp,%rcx,4), %xmm0, %xmm0
+; AVX-NEXT: retq
+ %index0 = extractelement <4 x i32> %indices, i32 0
+ %index1 = extractelement <4 x i32> %indices, i32 1
+ %index2 = extractelement <4 x i32> %indices, i32 2
+ %index3 = extractelement <4 x i32> %indices, i32 3
+ %v0 = extractelement <4 x i32> %v, i32 %index0
+ %v1 = extractelement <4 x i32> %v, i32 %index1
+ %v2 = extractelement <4 x i32> %v, i32 %index2
+ %v3 = extractelement <4 x i32> %v, i32 %index3
+ %ret0 = insertelement <4 x i32> undef, i32 %v0, i32 0
+ %ret1 = insertelement <4 x i32> %ret0, i32 %v1, i32 1
+ %ret2 = insertelement <4 x i32> %ret1, i32 %v2, i32 2
+ %ret3 = insertelement <4 x i32> %ret2, i32 %v3, i32 3
+ ret <4 x i32> %ret3
+}
+
+define <8 x i16> @var_shuffle_v8i16(<8 x i16> %v, <8 x i16> %indices) nounwind {
+; SSSE3-LABEL: var_shuffle_v8i16:
+; SSSE3: # BB#0:
+; SSSE3-NEXT: movd %xmm1, %r8d
+; SSSE3-NEXT: pextrw $1, %xmm1, %r9d
+; SSSE3-NEXT: pextrw $2, %xmm1, %r10d
+; SSSE3-NEXT: pextrw $3, %xmm1, %esi
+; SSSE3-NEXT: pextrw $4, %xmm1, %edi
+; SSSE3-NEXT: pextrw $5, %xmm1, %eax
+; SSSE3-NEXT: pextrw $6, %xmm1, %ecx
+; SSSE3-NEXT: pextrw $7, %xmm1, %edx
+; SSSE3-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; SSSE3-NEXT: andl $7, %r8d
+; SSSE3-NEXT: andl $7, %r9d
+; SSSE3-NEXT: andl $7, %r10d
+; SSSE3-NEXT: andl $7, %esi
+; SSSE3-NEXT: andl $7, %edi
+; SSSE3-NEXT: andl $7, %eax
+; SSSE3-NEXT: andl $7, %ecx
+; SSSE3-NEXT: andl $7, %edx
+; SSSE3-NEXT: movzwl -24(%rsp,%rdx,2), %edx
+; SSSE3-NEXT: movd %edx, %xmm0
+; SSSE3-NEXT: movzwl -24(%rsp,%rcx,2), %ecx
+; SSSE3-NEXT: movd %ecx, %xmm1
+; SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; SSSE3-NEXT: movzwl -24(%rsp,%rax,2), %eax
+; SSSE3-NEXT: movd %eax, %xmm0
+; SSSE3-NEXT: movzwl -24(%rsp,%rdi,2), %eax
+; SSSE3-NEXT: movd %eax, %xmm2
+; SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
+; SSSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
+; SSSE3-NEXT: movzwl -24(%rsp,%rsi,2), %eax
+; SSSE3-NEXT: movd %eax, %xmm0
+; SSSE3-NEXT: movzwl -24(%rsp,%r10,2), %eax
+; SSSE3-NEXT: movd %eax, %xmm1
+; SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; SSSE3-NEXT: movzwl -24(%rsp,%r9,2), %eax
+; SSSE3-NEXT: movd %eax, %xmm3
+; SSSE3-NEXT: movzwl -24(%rsp,%r8,2), %eax
+; SSSE3-NEXT: movd %eax, %xmm0
+; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3]
+; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
+; SSSE3-NEXT: retq
+;
+; AVX-LABEL: var_shuffle_v8i16:
+; AVX: # BB#0:
+; AVX-NEXT: vmovd %xmm1, %eax
+; AVX-NEXT: vpextrw $1, %xmm1, %r10d
+; AVX-NEXT: vpextrw $2, %xmm1, %ecx
+; AVX-NEXT: vpextrw $3, %xmm1, %edx
+; AVX-NEXT: vpextrw $4, %xmm1, %esi
+; AVX-NEXT: vpextrw $5, %xmm1, %edi
+; AVX-NEXT: vpextrw $6, %xmm1, %r8d
+; AVX-NEXT: vpextrw $7, %xmm1, %r9d
+; AVX-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
+; AVX-NEXT: andl $7, %eax
+; AVX-NEXT: andl $7, %r10d
+; AVX-NEXT: andl $7, %ecx
+; AVX-NEXT: andl $7, %edx
+; AVX-NEXT: andl $7, %esi
+; AVX-NEXT: andl $7, %edi
+; AVX-NEXT: andl $7, %r8d
+; AVX-NEXT: andl $7, %r9d
+; AVX-NEXT: movzwl -24(%rsp,%rax,2), %eax
+; AVX-NEXT: vmovd %eax, %xmm0
+; AVX-NEXT: vpinsrw $1, -24(%rsp,%r10,2), %xmm0, %xmm0
+; AVX-NEXT: vpinsrw $2, -24(%rsp,%rcx,2), %xmm0, %xmm0
+; AVX-NEXT: vpinsrw $3, -24(%rsp,%rdx,2), %xmm0, %xmm0
+; AVX-NEXT: vpinsrw $4, -24(%rsp,%rsi,2), %xmm0, %xmm0
+; AVX-NEXT: vpinsrw $5, -24(%rsp,%rdi,2), %xmm0, %xmm0
+; AVX-NEXT: vpinsrw $6, -24(%rsp,%r8,2), %xmm0, %xmm0
+; AVX-NEXT: vpinsrw $7, -24(%rsp,%r9,2), %xmm0, %xmm0
+; AVX-NEXT: retq
+ %index0 = extractelement <8 x i16> %indices, i32 0
+ %index1 = extractelement <8 x i16> %indices, i32 1
+ %index2 = extractelement <8 x i16> %indices, i32 2
+ %index3 = extractelement <8 x i16> %indices, i32 3
+ %index4 = extractelement <8 x i16> %indices, i32 4
+ %index5 = extractelement <8 x i16> %indices, i32 5
+ %index6 = extractelement <8 x i16> %indices, i32 6
+ %index7 = extractelement <8 x i16> %indices, i32 7
+ %v0 = extractelement <8 x i16> %v, i16 %index0
+ %v1 = extractelement <8 x i16> %v, i16 %index1
+ %v2 = extractelement <8 x i16> %v, i16 %index2
+ %v3 = extractelement <8 x i16> %v, i16 %index3
+ %v4 = extractelement <8 x i16> %v, i16 %index4
+ %v5 = extractelement <8 x i16> %v, i16 %index5
+ %v6 = extractelement <8 x i16> %v, i16 %index6
+ %v7 = extractelement <8 x i16> %v, i16 %index7
+ %ret0 = insertelement <8 x i16> undef, i16 %v0, i32 0
+ %ret1 = insertelement <8 x i16> %ret0, i16 %v1, i32 1
+ %ret2 = insertelement <8 x i16> %ret1, i16 %v2, i32 2
+ %ret3 = insertelement <8 x i16> %ret2, i16 %v3, i32 3
+ %ret4 = insertelement <8 x i16> %ret3, i16 %v4, i32 4
+ %ret5 = insertelement <8 x i16> %ret4, i16 %v5, i32 5
+ %ret6 = insertelement <8 x i16> %ret5, i16 %v6, i32 6
+ %ret7 = insertelement <8 x i16> %ret6, i16 %v7, i32 7
+ ret <8 x i16> %ret7
+}
+
+define <16 x i8> @var_shuffle_v16i8(<16 x i8> %v, <16 x i8> %indices) nounwind {
+; SSSE3-LABEL: var_shuffle_v16i8:
+; SSSE3: # BB#0:
+; SSSE3-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
+; SSSE3-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
+; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
+; SSSE3-NEXT: andl $15, %ecx
+; SSSE3-NEXT: leaq -{{[0-9]+}}(%rsp), %rax
+; SSSE3-NEXT: movzbl (%rcx,%rax), %ecx
+; SSSE3-NEXT: movd %ecx, %xmm8
+; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
+; SSSE3-NEXT: andl $15, %ecx
+; SSSE3-NEXT: movzbl (%rcx,%rax), %ecx
+; SSSE3-NEXT: movd %ecx, %xmm15
+; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
+; SSSE3-NEXT: andl $15, %ecx
+; SSSE3-NEXT: movzbl (%rcx,%rax), %ecx
+; SSSE3-NEXT: movd %ecx, %xmm9
+; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
+; SSSE3-NEXT: andl $15, %ecx
+; SSSE3-NEXT: movzbl (%rcx,%rax), %ecx
+; SSSE3-NEXT: movd %ecx, %xmm3
+; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
+; SSSE3-NEXT: andl $15, %ecx
+; SSSE3-NEXT: movzbl (%rcx,%rax), %ecx
+; SSSE3-NEXT: movd %ecx, %xmm10
+; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
+; SSSE3-NEXT: andl $15, %ecx
+; SSSE3-NEXT: movzbl (%rcx,%rax), %ecx
+; SSSE3-NEXT: movd %ecx, %xmm7
+; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
+; SSSE3-NEXT: andl $15, %ecx
+; SSSE3-NEXT: movzbl (%rcx,%rax), %ecx
+; SSSE3-NEXT: movd %ecx, %xmm11
+; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
+; SSSE3-NEXT: andl $15, %ecx
+; SSSE3-NEXT: movzbl (%rcx,%rax), %ecx
+; SSSE3-NEXT: movd %ecx, %xmm6
+; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
+; SSSE3-NEXT: andl $15, %ecx
+; SSSE3-NEXT: movzbl (%rcx,%rax), %ecx
+; SSSE3-NEXT: movd %ecx, %xmm12
+; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
+; SSSE3-NEXT: andl $15, %ecx
+; SSSE3-NEXT: movzbl (%rcx,%rax), %ecx
+; SSSE3-NEXT: movd %ecx, %xmm5
+; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
+; SSSE3-NEXT: andl $15, %ecx
+; SSSE3-NEXT: movzbl (%rcx,%rax), %ecx
+; SSSE3-NEXT: movd %ecx, %xmm13
+; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
+; SSSE3-NEXT: andl $15, %ecx
+; SSSE3-NEXT: movzbl (%rcx,%rax), %ecx
+; SSSE3-NEXT: movd %ecx, %xmm4
+; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
+; SSSE3-NEXT: andl $15, %ecx
+; SSSE3-NEXT: movzbl (%rcx,%rax), %ecx
+; SSSE3-NEXT: movd %ecx, %xmm14
+; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
+; SSSE3-NEXT: andl $15, %ecx
+; SSSE3-NEXT: movzbl (%rcx,%rax), %ecx
+; SSSE3-NEXT: movd %ecx, %xmm1
+; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
+; SSSE3-NEXT: andl $15, %ecx
+; SSSE3-NEXT: movzbl (%rcx,%rax), %ecx
+; SSSE3-NEXT: movd %ecx, %xmm2
+; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
+; SSSE3-NEXT: andl $15, %ecx
+; SSSE3-NEXT: movzbl (%rcx,%rax), %eax
+; SSSE3-NEXT: movd %eax, %xmm0
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm15 = xmm15[0],xmm8[0],xmm15[1],xmm8[1],xmm15[2],xmm8[2],xmm15[3],xmm8[3],xmm15[4],xmm8[4],xmm15[5],xmm8[5],xmm15[6],xmm8[6],xmm15[7],xmm8[7]
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm9[0],xmm3[1],xmm9[1],xmm3[2],xmm9[2],xmm3[3],xmm9[3],xmm3[4],xmm9[4],xmm3[5],xmm9[5],xmm3[6],xmm9[6],xmm3[7],xmm9[7]
+; SSSE3-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm15[0],xmm3[1],xmm15[1],xmm3[2],xmm15[2],xmm3[3],xmm15[3]
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm10[0],xmm7[1],xmm10[1],xmm7[2],xmm10[2],xmm7[3],xmm10[3],xmm7[4],xmm10[4],xmm7[5],xmm10[5],xmm7[6],xmm10[6],xmm7[7],xmm10[7]
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm11[0],xmm6[1],xmm11[1],xmm6[2],xmm11[2],xmm6[3],xmm11[3],xmm6[4],xmm11[4],xmm6[5],xmm11[5],xmm6[6],xmm11[6],xmm6[7],xmm11[7]
+; SSSE3-NEXT: punpcklwd {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1],xmm6[2],xmm7[2],xmm6[3],xmm7[3]
+; SSSE3-NEXT: punpckldq {{.*#+}} xmm6 = xmm6[0],xmm3[0],xmm6[1],xmm3[1]
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm12[0],xmm5[1],xmm12[1],xmm5[2],xmm12[2],xmm5[3],xmm12[3],xmm5[4],xmm12[4],xmm5[5],xmm12[5],xmm5[6],xmm12[6],xmm5[7],xmm12[7]
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm13[0],xmm4[1],xmm13[1],xmm4[2],xmm13[2],xmm4[3],xmm13[3],xmm4[4],xmm13[4],xmm4[5],xmm13[5],xmm4[6],xmm13[6],xmm4[7],xmm13[7]
+; SSSE3-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1],xmm4[2],xmm5[2],xmm4[3],xmm5[3]
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm14[0],xmm1[1],xmm14[1],xmm1[2],xmm14[2],xmm1[3],xmm14[3],xmm1[4],xmm14[4],xmm1[5],xmm14[5],xmm1[6],xmm14[6],xmm1[7],xmm14[7]
+; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
+; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1]
+; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm6[0]
+; SSSE3-NEXT: retq
+;
+; AVX-LABEL: var_shuffle_v16i8:
+; AVX: # BB#0:
+; AVX-NEXT: vpextrb $0, %xmm1, %eax
+; AVX-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
+; AVX-NEXT: andl $15, %eax
+; AVX-NEXT: leaq -{{[0-9]+}}(%rsp), %rcx
+; AVX-NEXT: movzbl (%rax,%rcx), %eax
+; AVX-NEXT: vmovd %eax, %xmm0
+; AVX-NEXT: vpextrb $1, %xmm1, %eax
+; AVX-NEXT: andl $15, %eax
+; AVX-NEXT: vpinsrb $1, (%rax,%rcx), %xmm0, %xmm0
+; AVX-NEXT: vpextrb $2, %xmm1, %eax
+; AVX-NEXT: andl $15, %eax
+; AVX-NEXT: vpinsrb $2, (%rax,%rcx), %xmm0, %xmm0
+; AVX-NEXT: vpextrb $3, %xmm1, %eax
+; AVX-NEXT: andl $15, %eax
+; AVX-NEXT: vpinsrb $3, (%rax,%rcx), %xmm0, %xmm0
+; AVX-NEXT: vpextrb $4, %xmm1, %eax
+; AVX-NEXT: andl $15, %eax
+; AVX-NEXT: vpinsrb $4, (%rax,%rcx), %xmm0, %xmm0
+; AVX-NEXT: vpextrb $5, %xmm1, %eax
+; AVX-NEXT: andl $15, %eax
+; AVX-NEXT: vpinsrb $5, (%rax,%rcx), %xmm0, %xmm0
+; AVX-NEXT: vpextrb $6, %xmm1, %eax
+; AVX-NEXT: andl $15, %eax
+; AVX-NEXT: vpinsrb $6, (%rax,%rcx), %xmm0, %xmm0
+; AVX-NEXT: vpextrb $7, %xmm1, %eax
+; AVX-NEXT: andl $15, %eax
+; AVX-NEXT: vpinsrb $7, (%rax,%rcx), %xmm0, %xmm0
+; AVX-NEXT: vpextrb $8, %xmm1, %eax
+; AVX-NEXT: andl $15, %eax
+; AVX-NEXT: vpinsrb $8, (%rax,%rcx), %xmm0, %xmm0
+; AVX-NEXT: vpextrb $9, %xmm1, %eax
+; AVX-NEXT: andl $15, %eax
+; AVX-NEXT: vpinsrb $9, (%rax,%rcx), %xmm0, %xmm0
+; AVX-NEXT: vpextrb $10, %xmm1, %eax
+; AVX-NEXT: andl $15, %eax
+; AVX-NEXT: vpinsrb $10, (%rax,%rcx), %xmm0, %xmm0
+; AVX-NEXT: vpextrb $11, %xmm1, %eax
+; AVX-NEXT: andl $15, %eax
+; AVX-NEXT: vpinsrb $11, (%rax,%rcx), %xmm0, %xmm0
+; AVX-NEXT: vpextrb $12, %xmm1, %eax
+; AVX-NEXT: andl $15, %eax
+; AVX-NEXT: vpinsrb $12, (%rax,%rcx), %xmm0, %xmm0
+; AVX-NEXT: vpextrb $13, %xmm1, %eax
+; AVX-NEXT: andl $15, %eax
+; AVX-NEXT: vpinsrb $13, (%rax,%rcx), %xmm0, %xmm0
+; AVX-NEXT: vpextrb $14, %xmm1, %eax
+; AVX-NEXT: andl $15, %eax
+; AVX-NEXT: vpinsrb $14, (%rax,%rcx), %xmm0, %xmm0
+; AVX-NEXT: vpextrb $15, %xmm1, %eax
+; AVX-NEXT: andl $15, %eax
+; AVX-NEXT: vpinsrb $15, (%rax,%rcx), %xmm0, %xmm0
+; AVX-NEXT: retq
+ %index0 = extractelement <16 x i8> %indices, i32 0
+ %index1 = extractelement <16 x i8> %indices, i32 1
+ %index2 = extractelement <16 x i8> %indices, i32 2
+ %index3 = extractelement <16 x i8> %indices, i32 3
+ %index4 = extractelement <16 x i8> %indices, i32 4
+ %index5 = extractelement <16 x i8> %indices, i32 5
+ %index6 = extractelement <16 x i8> %indices, i32 6
+ %index7 = extractelement <16 x i8> %indices, i32 7
+ %index8 = extractelement <16 x i8> %indices, i32 8
+ %index9 = extractelement <16 x i8> %indices, i32 9
+ %index10 = extractelement <16 x i8> %indices, i32 10
+ %index11 = extractelement <16 x i8> %indices, i32 11
+ %index12 = extractelement <16 x i8> %indices, i32 12
+ %index13 = extractelement <16 x i8> %indices, i32 13
+ %index14 = extractelement <16 x i8> %indices, i32 14
+ %index15 = extractelement <16 x i8> %indices, i32 15
+ %v0 = extractelement <16 x i8> %v, i8 %index0
+ %v1 = extractelement <16 x i8> %v, i8 %index1
+ %v2 = extractelement <16 x i8> %v, i8 %index2
+ %v3 = extractelement <16 x i8> %v, i8 %index3
+ %v4 = extractelement <16 x i8> %v, i8 %index4
+ %v5 = extractelement <16 x i8> %v, i8 %index5
+ %v6 = extractelement <16 x i8> %v, i8 %index6
+ %v7 = extractelement <16 x i8> %v, i8 %index7
+ %v8 = extractelement <16 x i8> %v, i8 %index8
+ %v9 = extractelement <16 x i8> %v, i8 %index9
+ %v10 = extractelement <16 x i8> %v, i8 %index10
+ %v11 = extractelement <16 x i8> %v, i8 %index11
+ %v12 = extractelement <16 x i8> %v, i8 %index12
+ %v13 = extractelement <16 x i8> %v, i8 %index13
+ %v14 = extractelement <16 x i8> %v, i8 %index14
+ %v15 = extractelement <16 x i8> %v, i8 %index15
+ %ret0 = insertelement <16 x i8> undef, i8 %v0, i32 0
+ %ret1 = insertelement <16 x i8> %ret0, i8 %v1, i32 1
+ %ret2 = insertelement <16 x i8> %ret1, i8 %v2, i32 2
+ %ret3 = insertelement <16 x i8> %ret2, i8 %v3, i32 3
+ %ret4 = insertelement <16 x i8> %ret3, i8 %v4, i32 4
+ %ret5 = insertelement <16 x i8> %ret4, i8 %v5, i32 5
+ %ret6 = insertelement <16 x i8> %ret5, i8 %v6, i32 6
+ %ret7 = insertelement <16 x i8> %ret6, i8 %v7, i32 7
+ %ret8 = insertelement <16 x i8> %ret7, i8 %v8, i32 8
+ %ret9 = insertelement <16 x i8> %ret8, i8 %v9, i32 9
+ %ret10 = insertelement <16 x i8> %ret9, i8 %v10, i32 10
+ %ret11 = insertelement <16 x i8> %ret10, i8 %v11, i32 11
+ %ret12 = insertelement <16 x i8> %ret11, i8 %v12, i32 12
+ %ret13 = insertelement <16 x i8> %ret12, i8 %v13, i32 13
+ %ret14 = insertelement <16 x i8> %ret13, i8 %v14, i32 14
+ %ret15 = insertelement <16 x i8> %ret14, i8 %v15, i32 15
+ ret <16 x i8> %ret15
+}
--- /dev/null
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVXNOVLBW,AVX1
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVXNOVLBW,INT256,AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=AVX,AVXNOVLBW,INT256,AVX512,AVX512F
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vl | FileCheck %s --check-prefixes=AVX,AVXNOVLBW,INT256,AVX512,AVX512VL
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw,+avx512vl | FileCheck %s --check-prefixes=AVX,INT256,AVX512,AVX512VLBW
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw,+avx512vl,+avx512vbmi | FileCheck %s --check-prefixes=AVX,INT256,AVX512,AVX512VLBW,VBMI
+
+define <4 x i64> @var_shuffle_v4i64(<4 x i64> %v, <4 x i64> %indices) nounwind {
+; AVX1-LABEL: var_shuffle_v4i64:
+; AVX1: # BB#0:
+; AVX1-NEXT: pushq %rbp
+; AVX1-NEXT: movq %rsp, %rbp
+; AVX1-NEXT: andq $-32, %rsp
+; AVX1-NEXT: subq $64, %rsp
+; AVX1-NEXT: vmovq %xmm1, %rax
+; AVX1-NEXT: andl $3, %eax
+; AVX1-NEXT: vpextrq $1, %xmm1, %rcx
+; AVX1-NEXT: andl $3, %ecx
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
+; AVX1-NEXT: vmovq %xmm1, %rdx
+; AVX1-NEXT: andl $3, %edx
+; AVX1-NEXT: vpextrq $1, %xmm1, %rsi
+; AVX1-NEXT: andl $3, %esi
+; AVX1-NEXT: vmovaps %ymm0, (%rsp)
+; AVX1-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX1-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX1-NEXT: vmovlhps {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; AVX1-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX1-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero
+; AVX1-NEXT: vmovlhps {{.*#+}} xmm1 = xmm2[0],xmm1[0]
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: movq %rbp, %rsp
+; AVX1-NEXT: popq %rbp
+; AVX1-NEXT: retq
+;
+; INT256-LABEL: var_shuffle_v4i64:
+; INT256: # BB#0:
+; INT256-NEXT: pushq %rbp
+; INT256-NEXT: movq %rsp, %rbp
+; INT256-NEXT: andq $-32, %rsp
+; INT256-NEXT: subq $64, %rsp
+; INT256-NEXT: vmovq %xmm1, %rax
+; INT256-NEXT: andl $3, %eax
+; INT256-NEXT: vpextrq $1, %xmm1, %rcx
+; INT256-NEXT: andl $3, %ecx
+; INT256-NEXT: vextracti128 $1, %ymm1, %xmm1
+; INT256-NEXT: vmovq %xmm1, %rdx
+; INT256-NEXT: andl $3, %edx
+; INT256-NEXT: vpextrq $1, %xmm1, %rsi
+; INT256-NEXT: andl $3, %esi
+; INT256-NEXT: vmovaps %ymm0, (%rsp)
+; INT256-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; INT256-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; INT256-NEXT: vmovlhps {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; INT256-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; INT256-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero
+; INT256-NEXT: vmovlhps {{.*#+}} xmm1 = xmm2[0],xmm1[0]
+; INT256-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; INT256-NEXT: movq %rbp, %rsp
+; INT256-NEXT: popq %rbp
+; INT256-NEXT: retq
+ %index0 = extractelement <4 x i64> %indices, i32 0
+ %index1 = extractelement <4 x i64> %indices, i32 1
+ %index2 = extractelement <4 x i64> %indices, i32 2
+ %index3 = extractelement <4 x i64> %indices, i32 3
+ %v0 = extractelement <4 x i64> %v, i64 %index0
+ %v1 = extractelement <4 x i64> %v, i64 %index1
+ %v2 = extractelement <4 x i64> %v, i64 %index2
+ %v3 = extractelement <4 x i64> %v, i64 %index3
+ %ret0 = insertelement <4 x i64> undef, i64 %v0, i32 0
+ %ret1 = insertelement <4 x i64> %ret0, i64 %v1, i32 1
+ %ret2 = insertelement <4 x i64> %ret1, i64 %v2, i32 2
+ %ret3 = insertelement <4 x i64> %ret2, i64 %v3, i32 3
+ ret <4 x i64> %ret3
+}
+
+define <8 x i32> @var_shuffle_v8i32(<8 x i32> %v, <8 x i32> %indices) nounwind {
+; AVX1-LABEL: var_shuffle_v8i32:
+; AVX1: # BB#0:
+; AVX1-NEXT: pushq %rbp
+; AVX1-NEXT: movq %rsp, %rbp
+; AVX1-NEXT: andq $-32, %rsp
+; AVX1-NEXT: subq $64, %rsp
+; AVX1-NEXT: vpextrq $1, %xmm1, %r8
+; AVX1-NEXT: movq %r8, %rcx
+; AVX1-NEXT: shrq $30, %rcx
+; AVX1-NEXT: vmovq %xmm1, %r9
+; AVX1-NEXT: movq %r9, %rsi
+; AVX1-NEXT: shrq $30, %rsi
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
+; AVX1-NEXT: vpextrq $1, %xmm1, %r10
+; AVX1-NEXT: movq %r10, %rdi
+; AVX1-NEXT: shrq $30, %rdi
+; AVX1-NEXT: vmovq %xmm1, %rax
+; AVX1-NEXT: movq %rax, %rdx
+; AVX1-NEXT: shrq $30, %rdx
+; AVX1-NEXT: vmovaps %ymm0, (%rsp)
+; AVX1-NEXT: andl $7, %r9d
+; AVX1-NEXT: andl $28, %esi
+; AVX1-NEXT: andl $7, %r8d
+; AVX1-NEXT: andl $28, %ecx
+; AVX1-NEXT: andl $7, %eax
+; AVX1-NEXT: andl $28, %edx
+; AVX1-NEXT: andl $7, %r10d
+; AVX1-NEXT: andl $28, %edi
+; AVX1-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX1-NEXT: movq %rsp, %rax
+; AVX1-NEXT: vpinsrd $1, (%rdx,%rax), %xmm0, %xmm0
+; AVX1-NEXT: vpinsrd $2, (%rsp,%r10,4), %xmm0, %xmm0
+; AVX1-NEXT: vpinsrd $3, (%rdi,%rax), %xmm0, %xmm0
+; AVX1-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX1-NEXT: vpinsrd $1, (%rsi,%rax), %xmm1, %xmm1
+; AVX1-NEXT: vpinsrd $2, (%rsp,%r8,4), %xmm1, %xmm1
+; AVX1-NEXT: vpinsrd $3, (%rcx,%rax), %xmm1, %xmm1
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: movq %rbp, %rsp
+; AVX1-NEXT: popq %rbp
+; AVX1-NEXT: retq
+;
+; INT256-LABEL: var_shuffle_v8i32:
+; INT256: # BB#0:
+; INT256-NEXT: pushq %rbp
+; INT256-NEXT: movq %rsp, %rbp
+; INT256-NEXT: andq $-32, %rsp
+; INT256-NEXT: subq $64, %rsp
+; INT256-NEXT: vpextrq $1, %xmm1, %r8
+; INT256-NEXT: movq %r8, %rcx
+; INT256-NEXT: shrq $30, %rcx
+; INT256-NEXT: vmovq %xmm1, %r9
+; INT256-NEXT: movq %r9, %rsi
+; INT256-NEXT: shrq $30, %rsi
+; INT256-NEXT: vextracti128 $1, %ymm1, %xmm1
+; INT256-NEXT: vpextrq $1, %xmm1, %r10
+; INT256-NEXT: movq %r10, %rdi
+; INT256-NEXT: shrq $30, %rdi
+; INT256-NEXT: vmovq %xmm1, %rax
+; INT256-NEXT: movq %rax, %rdx
+; INT256-NEXT: shrq $30, %rdx
+; INT256-NEXT: vmovaps %ymm0, (%rsp)
+; INT256-NEXT: andl $7, %r9d
+; INT256-NEXT: andl $28, %esi
+; INT256-NEXT: andl $7, %r8d
+; INT256-NEXT: andl $28, %ecx
+; INT256-NEXT: andl $7, %eax
+; INT256-NEXT: andl $28, %edx
+; INT256-NEXT: andl $7, %r10d
+; INT256-NEXT: andl $28, %edi
+; INT256-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; INT256-NEXT: movq %rsp, %rax
+; INT256-NEXT: vpinsrd $1, (%rdx,%rax), %xmm0, %xmm0
+; INT256-NEXT: vpinsrd $2, (%rsp,%r10,4), %xmm0, %xmm0
+; INT256-NEXT: vpinsrd $3, (%rdi,%rax), %xmm0, %xmm0
+; INT256-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; INT256-NEXT: vpinsrd $1, (%rsi,%rax), %xmm1, %xmm1
+; INT256-NEXT: vpinsrd $2, (%rsp,%r8,4), %xmm1, %xmm1
+; INT256-NEXT: vpinsrd $3, (%rcx,%rax), %xmm1, %xmm1
+; INT256-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
+; INT256-NEXT: movq %rbp, %rsp
+; INT256-NEXT: popq %rbp
+; INT256-NEXT: retq
+ %index0 = extractelement <8 x i32> %indices, i32 0
+ %index1 = extractelement <8 x i32> %indices, i32 1
+ %index2 = extractelement <8 x i32> %indices, i32 2
+ %index3 = extractelement <8 x i32> %indices, i32 3
+ %index4 = extractelement <8 x i32> %indices, i32 4
+ %index5 = extractelement <8 x i32> %indices, i32 5
+ %index6 = extractelement <8 x i32> %indices, i32 6
+ %index7 = extractelement <8 x i32> %indices, i32 7
+ %v0 = extractelement <8 x i32> %v, i32 %index0
+ %v1 = extractelement <8 x i32> %v, i32 %index1
+ %v2 = extractelement <8 x i32> %v, i32 %index2
+ %v3 = extractelement <8 x i32> %v, i32 %index3
+ %v4 = extractelement <8 x i32> %v, i32 %index4
+ %v5 = extractelement <8 x i32> %v, i32 %index5
+ %v6 = extractelement <8 x i32> %v, i32 %index6
+ %v7 = extractelement <8 x i32> %v, i32 %index7
+ %ret0 = insertelement <8 x i32> undef, i32 %v0, i32 0
+ %ret1 = insertelement <8 x i32> %ret0, i32 %v1, i32 1
+ %ret2 = insertelement <8 x i32> %ret1, i32 %v2, i32 2
+ %ret3 = insertelement <8 x i32> %ret2, i32 %v3, i32 3
+ %ret4 = insertelement <8 x i32> %ret3, i32 %v4, i32 4
+ %ret5 = insertelement <8 x i32> %ret4, i32 %v5, i32 5
+ %ret6 = insertelement <8 x i32> %ret5, i32 %v6, i32 6
+ %ret7 = insertelement <8 x i32> %ret6, i32 %v7, i32 7
+ ret <8 x i32> %ret7
+}
+
+define <16 x i16> @var_shuffle_v16i16(<16 x i16> %v, <16 x i16> %indices) nounwind {
+; AVX1-LABEL: var_shuffle_v16i16:
+; AVX1: # BB#0:
+; AVX1-NEXT: pushq %rbp
+; AVX1-NEXT: movq %rsp, %rbp
+; AVX1-NEXT: andq $-32, %rsp
+; AVX1-NEXT: subq $64, %rsp
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT: vmovd %xmm2, %eax
+; AVX1-NEXT: vmovaps %ymm0, (%rsp)
+; AVX1-NEXT: andl $15, %eax
+; AVX1-NEXT: movzwl (%rsp,%rax,2), %eax
+; AVX1-NEXT: vmovd %eax, %xmm0
+; AVX1-NEXT: vpextrw $1, %xmm2, %eax
+; AVX1-NEXT: andl $15, %eax
+; AVX1-NEXT: vpinsrw $1, (%rsp,%rax,2), %xmm0, %xmm0
+; AVX1-NEXT: vpextrw $2, %xmm2, %eax
+; AVX1-NEXT: andl $15, %eax
+; AVX1-NEXT: vpinsrw $2, (%rsp,%rax,2), %xmm0, %xmm0
+; AVX1-NEXT: vpextrw $3, %xmm2, %eax
+; AVX1-NEXT: andl $15, %eax
+; AVX1-NEXT: vpinsrw $3, (%rsp,%rax,2), %xmm0, %xmm0
+; AVX1-NEXT: vpextrw $4, %xmm2, %eax
+; AVX1-NEXT: andl $15, %eax
+; AVX1-NEXT: vpinsrw $4, (%rsp,%rax,2), %xmm0, %xmm0
+; AVX1-NEXT: vpextrw $5, %xmm2, %eax
+; AVX1-NEXT: andl $15, %eax
+; AVX1-NEXT: vpinsrw $5, (%rsp,%rax,2), %xmm0, %xmm0
+; AVX1-NEXT: vpextrw $6, %xmm2, %eax
+; AVX1-NEXT: andl $15, %eax
+; AVX1-NEXT: vpinsrw $6, (%rsp,%rax,2), %xmm0, %xmm0
+; AVX1-NEXT: vpextrw $7, %xmm2, %eax
+; AVX1-NEXT: andl $15, %eax
+; AVX1-NEXT: vpinsrw $7, (%rsp,%rax,2), %xmm0, %xmm0
+; AVX1-NEXT: vmovd %xmm1, %eax
+; AVX1-NEXT: andl $15, %eax
+; AVX1-NEXT: movzwl (%rsp,%rax,2), %eax
+; AVX1-NEXT: vmovd %eax, %xmm2
+; AVX1-NEXT: vpextrw $1, %xmm1, %eax
+; AVX1-NEXT: andl $15, %eax
+; AVX1-NEXT: vpinsrw $1, (%rsp,%rax,2), %xmm2, %xmm2
+; AVX1-NEXT: vpextrw $2, %xmm1, %eax
+; AVX1-NEXT: andl $15, %eax
+; AVX1-NEXT: vpinsrw $2, (%rsp,%rax,2), %xmm2, %xmm2
+; AVX1-NEXT: vpextrw $3, %xmm1, %eax
+; AVX1-NEXT: andl $15, %eax
+; AVX1-NEXT: vpinsrw $3, (%rsp,%rax,2), %xmm2, %xmm2
+; AVX1-NEXT: vpextrw $4, %xmm1, %eax
+; AVX1-NEXT: andl $15, %eax
+; AVX1-NEXT: vpinsrw $4, (%rsp,%rax,2), %xmm2, %xmm2
+; AVX1-NEXT: vpextrw $5, %xmm1, %eax
+; AVX1-NEXT: andl $15, %eax
+; AVX1-NEXT: vpinsrw $5, (%rsp,%rax,2), %xmm2, %xmm2
+; AVX1-NEXT: vpextrw $6, %xmm1, %eax
+; AVX1-NEXT: andl $15, %eax
+; AVX1-NEXT: vpinsrw $6, (%rsp,%rax,2), %xmm2, %xmm2
+; AVX1-NEXT: vpextrw $7, %xmm1, %eax
+; AVX1-NEXT: andl $15, %eax
+; AVX1-NEXT: vpinsrw $7, (%rsp,%rax,2), %xmm2, %xmm1
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: movq %rbp, %rsp
+; AVX1-NEXT: popq %rbp
+; AVX1-NEXT: retq
+;
+; INT256-LABEL: var_shuffle_v16i16:
+; INT256: # BB#0:
+; INT256-NEXT: pushq %rbp
+; INT256-NEXT: movq %rsp, %rbp
+; INT256-NEXT: andq $-32, %rsp
+; INT256-NEXT: subq $64, %rsp
+; INT256-NEXT: vextracti128 $1, %ymm1, %xmm2
+; INT256-NEXT: vmovd %xmm2, %eax
+; INT256-NEXT: vmovaps %ymm0, (%rsp)
+; INT256-NEXT: andl $15, %eax
+; INT256-NEXT: movzwl (%rsp,%rax,2), %eax
+; INT256-NEXT: vmovd %eax, %xmm0
+; INT256-NEXT: vpextrw $1, %xmm2, %eax
+; INT256-NEXT: andl $15, %eax
+; INT256-NEXT: vpinsrw $1, (%rsp,%rax,2), %xmm0, %xmm0
+; INT256-NEXT: vpextrw $2, %xmm2, %eax
+; INT256-NEXT: andl $15, %eax
+; INT256-NEXT: vpinsrw $2, (%rsp,%rax,2), %xmm0, %xmm0
+; INT256-NEXT: vpextrw $3, %xmm2, %eax
+; INT256-NEXT: andl $15, %eax
+; INT256-NEXT: vpinsrw $3, (%rsp,%rax,2), %xmm0, %xmm0
+; INT256-NEXT: vpextrw $4, %xmm2, %eax
+; INT256-NEXT: andl $15, %eax
+; INT256-NEXT: vpinsrw $4, (%rsp,%rax,2), %xmm0, %xmm0
+; INT256-NEXT: vpextrw $5, %xmm2, %eax
+; INT256-NEXT: andl $15, %eax
+; INT256-NEXT: vpinsrw $5, (%rsp,%rax,2), %xmm0, %xmm0
+; INT256-NEXT: vpextrw $6, %xmm2, %eax
+; INT256-NEXT: andl $15, %eax
+; INT256-NEXT: vpinsrw $6, (%rsp,%rax,2), %xmm0, %xmm0
+; INT256-NEXT: vpextrw $7, %xmm2, %eax
+; INT256-NEXT: andl $15, %eax
+; INT256-NEXT: vpinsrw $7, (%rsp,%rax,2), %xmm0, %xmm0
+; INT256-NEXT: vmovd %xmm1, %eax
+; INT256-NEXT: andl $15, %eax
+; INT256-NEXT: movzwl (%rsp,%rax,2), %eax
+; INT256-NEXT: vmovd %eax, %xmm2
+; INT256-NEXT: vpextrw $1, %xmm1, %eax
+; INT256-NEXT: andl $15, %eax
+; INT256-NEXT: vpinsrw $1, (%rsp,%rax,2), %xmm2, %xmm2
+; INT256-NEXT: vpextrw $2, %xmm1, %eax
+; INT256-NEXT: andl $15, %eax
+; INT256-NEXT: vpinsrw $2, (%rsp,%rax,2), %xmm2, %xmm2
+; INT256-NEXT: vpextrw $3, %xmm1, %eax
+; INT256-NEXT: andl $15, %eax
+; INT256-NEXT: vpinsrw $3, (%rsp,%rax,2), %xmm2, %xmm2
+; INT256-NEXT: vpextrw $4, %xmm1, %eax
+; INT256-NEXT: andl $15, %eax
+; INT256-NEXT: vpinsrw $4, (%rsp,%rax,2), %xmm2, %xmm2
+; INT256-NEXT: vpextrw $5, %xmm1, %eax
+; INT256-NEXT: andl $15, %eax
+; INT256-NEXT: vpinsrw $5, (%rsp,%rax,2), %xmm2, %xmm2
+; INT256-NEXT: vpextrw $6, %xmm1, %eax
+; INT256-NEXT: andl $15, %eax
+; INT256-NEXT: vpinsrw $6, (%rsp,%rax,2), %xmm2, %xmm2
+; INT256-NEXT: vpextrw $7, %xmm1, %eax
+; INT256-NEXT: andl $15, %eax
+; INT256-NEXT: vpinsrw $7, (%rsp,%rax,2), %xmm2, %xmm1
+; INT256-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
+; INT256-NEXT: movq %rbp, %rsp
+; INT256-NEXT: popq %rbp
+; INT256-NEXT: retq
+ %index0 = extractelement <16 x i16> %indices, i32 0
+ %index1 = extractelement <16 x i16> %indices, i32 1
+ %index2 = extractelement <16 x i16> %indices, i32 2
+ %index3 = extractelement <16 x i16> %indices, i32 3
+ %index4 = extractelement <16 x i16> %indices, i32 4
+ %index5 = extractelement <16 x i16> %indices, i32 5
+ %index6 = extractelement <16 x i16> %indices, i32 6
+ %index7 = extractelement <16 x i16> %indices, i32 7
+ %index8 = extractelement <16 x i16> %indices, i32 8
+ %index9 = extractelement <16 x i16> %indices, i32 9
+ %index10 = extractelement <16 x i16> %indices, i32 10
+ %index11 = extractelement <16 x i16> %indices, i32 11
+ %index12 = extractelement <16 x i16> %indices, i32 12
+ %index13 = extractelement <16 x i16> %indices, i32 13
+ %index14 = extractelement <16 x i16> %indices, i32 14
+ %index15 = extractelement <16 x i16> %indices, i32 15
+ %v0 = extractelement <16 x i16> %v, i16 %index0
+ %v1 = extractelement <16 x i16> %v, i16 %index1
+ %v2 = extractelement <16 x i16> %v, i16 %index2
+ %v3 = extractelement <16 x i16> %v, i16 %index3
+ %v4 = extractelement <16 x i16> %v, i16 %index4
+ %v5 = extractelement <16 x i16> %v, i16 %index5
+ %v6 = extractelement <16 x i16> %v, i16 %index6
+ %v7 = extractelement <16 x i16> %v, i16 %index7
+ %v8 = extractelement <16 x i16> %v, i16 %index8
+ %v9 = extractelement <16 x i16> %v, i16 %index9
+ %v10 = extractelement <16 x i16> %v, i16 %index10
+ %v11 = extractelement <16 x i16> %v, i16 %index11
+ %v12 = extractelement <16 x i16> %v, i16 %index12
+ %v13 = extractelement <16 x i16> %v, i16 %index13
+ %v14 = extractelement <16 x i16> %v, i16 %index14
+ %v15 = extractelement <16 x i16> %v, i16 %index15
+ %ret0 = insertelement <16 x i16> undef, i16 %v0, i32 0
+ %ret1 = insertelement <16 x i16> %ret0, i16 %v1, i32 1
+ %ret2 = insertelement <16 x i16> %ret1, i16 %v2, i32 2
+ %ret3 = insertelement <16 x i16> %ret2, i16 %v3, i32 3
+ %ret4 = insertelement <16 x i16> %ret3, i16 %v4, i32 4
+ %ret5 = insertelement <16 x i16> %ret4, i16 %v5, i32 5
+ %ret6 = insertelement <16 x i16> %ret5, i16 %v6, i32 6
+ %ret7 = insertelement <16 x i16> %ret6, i16 %v7, i32 7
+ %ret8 = insertelement <16 x i16> %ret7, i16 %v8, i32 8
+ %ret9 = insertelement <16 x i16> %ret8, i16 %v9, i32 9
+ %ret10 = insertelement <16 x i16> %ret9, i16 %v10, i32 10
+ %ret11 = insertelement <16 x i16> %ret10, i16 %v11, i32 11
+ %ret12 = insertelement <16 x i16> %ret11, i16 %v12, i32 12
+ %ret13 = insertelement <16 x i16> %ret12, i16 %v13, i32 13
+ %ret14 = insertelement <16 x i16> %ret13, i16 %v14, i32 14
+ %ret15 = insertelement <16 x i16> %ret14, i16 %v15, i32 15
+ ret <16 x i16> %ret15
+}
+
+define <32 x i8> @var_shuffle_v32i8(<32 x i8> %v, <32 x i8> %indices) nounwind {
+; AVX1-LABEL: var_shuffle_v32i8:
+; AVX1: # BB#0:
+; AVX1-NEXT: pushq %rbp
+; AVX1-NEXT: movq %rsp, %rbp
+; AVX1-NEXT: andq $-32, %rsp
+; AVX1-NEXT: subq $64, %rsp
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT: vpextrb $0, %xmm2, %eax
+; AVX1-NEXT: vmovaps %ymm0, (%rsp)
+; AVX1-NEXT: andl $31, %eax
+; AVX1-NEXT: movq %rsp, %rcx
+; AVX1-NEXT: movzbl (%rax,%rcx), %eax
+; AVX1-NEXT: vmovd %eax, %xmm0
+; AVX1-NEXT: vpextrb $1, %xmm2, %eax
+; AVX1-NEXT: andl $31, %eax
+; AVX1-NEXT: movzbl (%rax,%rcx), %eax
+; AVX1-NEXT: vpinsrb $1, %eax, %xmm0, %xmm0
+; AVX1-NEXT: vpextrb $2, %xmm2, %eax
+; AVX1-NEXT: andl $31, %eax
+; AVX1-NEXT: movzbl (%rax,%rcx), %eax
+; AVX1-NEXT: vpinsrb $2, %eax, %xmm0, %xmm0
+; AVX1-NEXT: vpextrb $3, %xmm2, %eax
+; AVX1-NEXT: andl $31, %eax
+; AVX1-NEXT: movzbl (%rax,%rcx), %eax
+; AVX1-NEXT: vpinsrb $3, %eax, %xmm0, %xmm0
+; AVX1-NEXT: vpextrb $4, %xmm2, %eax
+; AVX1-NEXT: andl $31, %eax
+; AVX1-NEXT: movzbl (%rax,%rcx), %eax
+; AVX1-NEXT: vpinsrb $4, %eax, %xmm0, %xmm0
+; AVX1-NEXT: vpextrb $5, %xmm2, %eax
+; AVX1-NEXT: andl $31, %eax
+; AVX1-NEXT: movzbl (%rax,%rcx), %eax
+; AVX1-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; AVX1-NEXT: vpextrb $6, %xmm2, %eax
+; AVX1-NEXT: andl $31, %eax
+; AVX1-NEXT: movzbl (%rax,%rcx), %eax
+; AVX1-NEXT: vpinsrb $6, %eax, %xmm0, %xmm0
+; AVX1-NEXT: vpextrb $7, %xmm2, %eax
+; AVX1-NEXT: andl $31, %eax
+; AVX1-NEXT: movzbl (%rax,%rcx), %eax
+; AVX1-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; AVX1-NEXT: vpextrb $8, %xmm2, %eax
+; AVX1-NEXT: andl $31, %eax
+; AVX1-NEXT: movzbl (%rax,%rcx), %eax
+; AVX1-NEXT: vpinsrb $8, %eax, %xmm0, %xmm0
+; AVX1-NEXT: vpextrb $9, %xmm2, %eax
+; AVX1-NEXT: andl $31, %eax
+; AVX1-NEXT: movzbl (%rax,%rcx), %eax
+; AVX1-NEXT: vpinsrb $9, %eax, %xmm0, %xmm0
+; AVX1-NEXT: vpextrb $10, %xmm2, %eax
+; AVX1-NEXT: andl $31, %eax
+; AVX1-NEXT: movzbl (%rax,%rcx), %eax
+; AVX1-NEXT: vpinsrb $10, %eax, %xmm0, %xmm0
+; AVX1-NEXT: vpextrb $11, %xmm2, %eax
+; AVX1-NEXT: andl $31, %eax
+; AVX1-NEXT: movzbl (%rax,%rcx), %eax
+; AVX1-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; AVX1-NEXT: vpextrb $12, %xmm2, %eax
+; AVX1-NEXT: andl $31, %eax
+; AVX1-NEXT: movzbl (%rax,%rcx), %eax
+; AVX1-NEXT: vpinsrb $12, %eax, %xmm0, %xmm0
+; AVX1-NEXT: vpextrb $13, %xmm2, %eax
+; AVX1-NEXT: andl $31, %eax
+; AVX1-NEXT: movzbl (%rax,%rcx), %eax
+; AVX1-NEXT: vpinsrb $13, %eax, %xmm0, %xmm0
+; AVX1-NEXT: vpextrb $14, %xmm2, %eax
+; AVX1-NEXT: andl $31, %eax
+; AVX1-NEXT: movzbl (%rax,%rcx), %eax
+; AVX1-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0
+; AVX1-NEXT: vpextrb $15, %xmm2, %eax
+; AVX1-NEXT: andl $31, %eax
+; AVX1-NEXT: movzbl (%rax,%rcx), %eax
+; AVX1-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; AVX1-NEXT: vpextrb $0, %xmm1, %eax
+; AVX1-NEXT: andl $31, %eax
+; AVX1-NEXT: movzbl (%rax,%rcx), %eax
+; AVX1-NEXT: vmovd %eax, %xmm2
+; AVX1-NEXT: vpextrb $1, %xmm1, %eax
+; AVX1-NEXT: andl $31, %eax
+; AVX1-NEXT: vpinsrb $1, (%rax,%rcx), %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $2, %xmm1, %eax
+; AVX1-NEXT: andl $31, %eax
+; AVX1-NEXT: vpinsrb $2, (%rax,%rcx), %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $3, %xmm1, %eax
+; AVX1-NEXT: andl $31, %eax
+; AVX1-NEXT: vpinsrb $3, (%rax,%rcx), %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $4, %xmm1, %eax
+; AVX1-NEXT: andl $31, %eax
+; AVX1-NEXT: vpinsrb $4, (%rax,%rcx), %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $5, %xmm1, %eax
+; AVX1-NEXT: andl $31, %eax
+; AVX1-NEXT: vpinsrb $5, (%rax,%rcx), %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $6, %xmm1, %eax
+; AVX1-NEXT: andl $31, %eax
+; AVX1-NEXT: vpinsrb $6, (%rax,%rcx), %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $7, %xmm1, %eax
+; AVX1-NEXT: andl $31, %eax
+; AVX1-NEXT: vpinsrb $7, (%rax,%rcx), %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $8, %xmm1, %eax
+; AVX1-NEXT: andl $31, %eax
+; AVX1-NEXT: vpinsrb $8, (%rax,%rcx), %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $9, %xmm1, %eax
+; AVX1-NEXT: andl $31, %eax
+; AVX1-NEXT: vpinsrb $9, (%rax,%rcx), %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $10, %xmm1, %eax
+; AVX1-NEXT: andl $31, %eax
+; AVX1-NEXT: vpinsrb $10, (%rax,%rcx), %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $11, %xmm1, %eax
+; AVX1-NEXT: andl $31, %eax
+; AVX1-NEXT: vpinsrb $11, (%rax,%rcx), %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $12, %xmm1, %eax
+; AVX1-NEXT: andl $31, %eax
+; AVX1-NEXT: vpinsrb $12, (%rax,%rcx), %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $13, %xmm1, %eax
+; AVX1-NEXT: andl $31, %eax
+; AVX1-NEXT: vpinsrb $13, (%rax,%rcx), %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $14, %xmm1, %eax
+; AVX1-NEXT: andl $31, %eax
+; AVX1-NEXT: vpinsrb $14, (%rax,%rcx), %xmm2, %xmm2
+; AVX1-NEXT: vpextrb $15, %xmm1, %eax
+; AVX1-NEXT: andl $31, %eax
+; AVX1-NEXT: movzbl (%rax,%rcx), %eax
+; AVX1-NEXT: vpinsrb $15, %eax, %xmm2, %xmm1
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: movq %rbp, %rsp
+; AVX1-NEXT: popq %rbp
+; AVX1-NEXT: retq
+;
+; INT256-LABEL: var_shuffle_v32i8:
+; INT256: # BB#0:
+; INT256-NEXT: pushq %rbp
+; INT256-NEXT: movq %rsp, %rbp
+; INT256-NEXT: andq $-32, %rsp
+; INT256-NEXT: subq $64, %rsp
+; INT256-NEXT: vextracti128 $1, %ymm1, %xmm2
+; INT256-NEXT: vpextrb $0, %xmm2, %eax
+; INT256-NEXT: vmovaps %ymm0, (%rsp)
+; INT256-NEXT: andl $31, %eax
+; INT256-NEXT: movq %rsp, %rcx
+; INT256-NEXT: movzbl (%rax,%rcx), %eax
+; INT256-NEXT: vmovd %eax, %xmm0
+; INT256-NEXT: vpextrb $1, %xmm2, %eax
+; INT256-NEXT: andl $31, %eax
+; INT256-NEXT: movzbl (%rax,%rcx), %eax
+; INT256-NEXT: vpinsrb $1, %eax, %xmm0, %xmm0
+; INT256-NEXT: vpextrb $2, %xmm2, %eax
+; INT256-NEXT: andl $31, %eax
+; INT256-NEXT: movzbl (%rax,%rcx), %eax
+; INT256-NEXT: vpinsrb $2, %eax, %xmm0, %xmm0
+; INT256-NEXT: vpextrb $3, %xmm2, %eax
+; INT256-NEXT: andl $31, %eax
+; INT256-NEXT: movzbl (%rax,%rcx), %eax
+; INT256-NEXT: vpinsrb $3, %eax, %xmm0, %xmm0
+; INT256-NEXT: vpextrb $4, %xmm2, %eax
+; INT256-NEXT: andl $31, %eax
+; INT256-NEXT: movzbl (%rax,%rcx), %eax
+; INT256-NEXT: vpinsrb $4, %eax, %xmm0, %xmm0
+; INT256-NEXT: vpextrb $5, %xmm2, %eax
+; INT256-NEXT: andl $31, %eax
+; INT256-NEXT: movzbl (%rax,%rcx), %eax
+; INT256-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
+; INT256-NEXT: vpextrb $6, %xmm2, %eax
+; INT256-NEXT: andl $31, %eax
+; INT256-NEXT: movzbl (%rax,%rcx), %eax
+; INT256-NEXT: vpinsrb $6, %eax, %xmm0, %xmm0
+; INT256-NEXT: vpextrb $7, %xmm2, %eax
+; INT256-NEXT: andl $31, %eax
+; INT256-NEXT: movzbl (%rax,%rcx), %eax
+; INT256-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
+; INT256-NEXT: vpextrb $8, %xmm2, %eax
+; INT256-NEXT: andl $31, %eax
+; INT256-NEXT: movzbl (%rax,%rcx), %eax
+; INT256-NEXT: vpinsrb $8, %eax, %xmm0, %xmm0
+; INT256-NEXT: vpextrb $9, %xmm2, %eax
+; INT256-NEXT: andl $31, %eax
+; INT256-NEXT: movzbl (%rax,%rcx), %eax
+; INT256-NEXT: vpinsrb $9, %eax, %xmm0, %xmm0
+; INT256-NEXT: vpextrb $10, %xmm2, %eax
+; INT256-NEXT: andl $31, %eax
+; INT256-NEXT: movzbl (%rax,%rcx), %eax
+; INT256-NEXT: vpinsrb $10, %eax, %xmm0, %xmm0
+; INT256-NEXT: vpextrb $11, %xmm2, %eax
+; INT256-NEXT: andl $31, %eax
+; INT256-NEXT: movzbl (%rax,%rcx), %eax
+; INT256-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
+; INT256-NEXT: vpextrb $12, %xmm2, %eax
+; INT256-NEXT: andl $31, %eax
+; INT256-NEXT: movzbl (%rax,%rcx), %eax
+; INT256-NEXT: vpinsrb $12, %eax, %xmm0, %xmm0
+; INT256-NEXT: vpextrb $13, %xmm2, %eax
+; INT256-NEXT: andl $31, %eax
+; INT256-NEXT: movzbl (%rax,%rcx), %eax
+; INT256-NEXT: vpinsrb $13, %eax, %xmm0, %xmm0
+; INT256-NEXT: vpextrb $14, %xmm2, %eax
+; INT256-NEXT: andl $31, %eax
+; INT256-NEXT: movzbl (%rax,%rcx), %eax
+; INT256-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0
+; INT256-NEXT: vpextrb $15, %xmm2, %eax
+; INT256-NEXT: andl $31, %eax
+; INT256-NEXT: movzbl (%rax,%rcx), %eax
+; INT256-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; INT256-NEXT: vpextrb $0, %xmm1, %eax
+; INT256-NEXT: andl $31, %eax
+; INT256-NEXT: movzbl (%rax,%rcx), %eax
+; INT256-NEXT: vmovd %eax, %xmm2
+; INT256-NEXT: vpextrb $1, %xmm1, %eax
+; INT256-NEXT: andl $31, %eax
+; INT256-NEXT: vpinsrb $1, (%rax,%rcx), %xmm2, %xmm2
+; INT256-NEXT: vpextrb $2, %xmm1, %eax
+; INT256-NEXT: andl $31, %eax
+; INT256-NEXT: vpinsrb $2, (%rax,%rcx), %xmm2, %xmm2
+; INT256-NEXT: vpextrb $3, %xmm1, %eax
+; INT256-NEXT: andl $31, %eax
+; INT256-NEXT: vpinsrb $3, (%rax,%rcx), %xmm2, %xmm2
+; INT256-NEXT: vpextrb $4, %xmm1, %eax
+; INT256-NEXT: andl $31, %eax
+; INT256-NEXT: vpinsrb $4, (%rax,%rcx), %xmm2, %xmm2
+; INT256-NEXT: vpextrb $5, %xmm1, %eax
+; INT256-NEXT: andl $31, %eax
+; INT256-NEXT: vpinsrb $5, (%rax,%rcx), %xmm2, %xmm2
+; INT256-NEXT: vpextrb $6, %xmm1, %eax
+; INT256-NEXT: andl $31, %eax
+; INT256-NEXT: vpinsrb $6, (%rax,%rcx), %xmm2, %xmm2
+; INT256-NEXT: vpextrb $7, %xmm1, %eax
+; INT256-NEXT: andl $31, %eax
+; INT256-NEXT: vpinsrb $7, (%rax,%rcx), %xmm2, %xmm2
+; INT256-NEXT: vpextrb $8, %xmm1, %eax
+; INT256-NEXT: andl $31, %eax
+; INT256-NEXT: vpinsrb $8, (%rax,%rcx), %xmm2, %xmm2
+; INT256-NEXT: vpextrb $9, %xmm1, %eax
+; INT256-NEXT: andl $31, %eax
+; INT256-NEXT: vpinsrb $9, (%rax,%rcx), %xmm2, %xmm2
+; INT256-NEXT: vpextrb $10, %xmm1, %eax
+; INT256-NEXT: andl $31, %eax
+; INT256-NEXT: vpinsrb $10, (%rax,%rcx), %xmm2, %xmm2
+; INT256-NEXT: vpextrb $11, %xmm1, %eax
+; INT256-NEXT: andl $31, %eax
+; INT256-NEXT: vpinsrb $11, (%rax,%rcx), %xmm2, %xmm2
+; INT256-NEXT: vpextrb $12, %xmm1, %eax
+; INT256-NEXT: andl $31, %eax
+; INT256-NEXT: vpinsrb $12, (%rax,%rcx), %xmm2, %xmm2
+; INT256-NEXT: vpextrb $13, %xmm1, %eax
+; INT256-NEXT: andl $31, %eax
+; INT256-NEXT: vpinsrb $13, (%rax,%rcx), %xmm2, %xmm2
+; INT256-NEXT: vpextrb $14, %xmm1, %eax
+; INT256-NEXT: andl $31, %eax
+; INT256-NEXT: vpinsrb $14, (%rax,%rcx), %xmm2, %xmm2
+; INT256-NEXT: vpextrb $15, %xmm1, %eax
+; INT256-NEXT: andl $31, %eax
+; INT256-NEXT: movzbl (%rax,%rcx), %eax
+; INT256-NEXT: vpinsrb $15, %eax, %xmm2, %xmm1
+; INT256-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
+; INT256-NEXT: movq %rbp, %rsp
+; INT256-NEXT: popq %rbp
+; INT256-NEXT: retq
+ %index0 = extractelement <32 x i8> %indices, i32 0
+ %index1 = extractelement <32 x i8> %indices, i32 1
+ %index2 = extractelement <32 x i8> %indices, i32 2
+ %index3 = extractelement <32 x i8> %indices, i32 3
+ %index4 = extractelement <32 x i8> %indices, i32 4
+ %index5 = extractelement <32 x i8> %indices, i32 5
+ %index6 = extractelement <32 x i8> %indices, i32 6
+ %index7 = extractelement <32 x i8> %indices, i32 7
+ %index8 = extractelement <32 x i8> %indices, i32 8
+ %index9 = extractelement <32 x i8> %indices, i32 9
+ %index10 = extractelement <32 x i8> %indices, i32 10
+ %index11 = extractelement <32 x i8> %indices, i32 11
+ %index12 = extractelement <32 x i8> %indices, i32 12
+ %index13 = extractelement <32 x i8> %indices, i32 13
+ %index14 = extractelement <32 x i8> %indices, i32 14
+ %index15 = extractelement <32 x i8> %indices, i32 15
+ %index16 = extractelement <32 x i8> %indices, i32 16
+ %index17 = extractelement <32 x i8> %indices, i32 17
+ %index18 = extractelement <32 x i8> %indices, i32 18
+ %index19 = extractelement <32 x i8> %indices, i32 19
+ %index20 = extractelement <32 x i8> %indices, i32 20
+ %index21 = extractelement <32 x i8> %indices, i32 21
+ %index22 = extractelement <32 x i8> %indices, i32 22
+ %index23 = extractelement <32 x i8> %indices, i32 23
+ %index24 = extractelement <32 x i8> %indices, i32 24
+ %index25 = extractelement <32 x i8> %indices, i32 25
+ %index26 = extractelement <32 x i8> %indices, i32 26
+ %index27 = extractelement <32 x i8> %indices, i32 27
+ %index28 = extractelement <32 x i8> %indices, i32 28
+ %index29 = extractelement <32 x i8> %indices, i32 29
+ %index30 = extractelement <32 x i8> %indices, i32 30
+ %index31 = extractelement <32 x i8> %indices, i32 31
+ %v0 = extractelement <32 x i8> %v, i8 %index0
+ %v1 = extractelement <32 x i8> %v, i8 %index1
+ %v2 = extractelement <32 x i8> %v, i8 %index2
+ %v3 = extractelement <32 x i8> %v, i8 %index3
+ %v4 = extractelement <32 x i8> %v, i8 %index4
+ %v5 = extractelement <32 x i8> %v, i8 %index5
+ %v6 = extractelement <32 x i8> %v, i8 %index6
+ %v7 = extractelement <32 x i8> %v, i8 %index7
+ %v8 = extractelement <32 x i8> %v, i8 %index8
+ %v9 = extractelement <32 x i8> %v, i8 %index9
+ %v10 = extractelement <32 x i8> %v, i8 %index10
+ %v11 = extractelement <32 x i8> %v, i8 %index11
+ %v12 = extractelement <32 x i8> %v, i8 %index12
+ %v13 = extractelement <32 x i8> %v, i8 %index13
+ %v14 = extractelement <32 x i8> %v, i8 %index14
+ %v15 = extractelement <32 x i8> %v, i8 %index15
+ %v16 = extractelement <32 x i8> %v, i8 %index16
+ %v17 = extractelement <32 x i8> %v, i8 %index17
+ %v18 = extractelement <32 x i8> %v, i8 %index18
+ %v19 = extractelement <32 x i8> %v, i8 %index19
+ %v20 = extractelement <32 x i8> %v, i8 %index20
+ %v21 = extractelement <32 x i8> %v, i8 %index21
+ %v22 = extractelement <32 x i8> %v, i8 %index22
+ %v23 = extractelement <32 x i8> %v, i8 %index23
+ %v24 = extractelement <32 x i8> %v, i8 %index24
+ %v25 = extractelement <32 x i8> %v, i8 %index25
+ %v26 = extractelement <32 x i8> %v, i8 %index26
+ %v27 = extractelement <32 x i8> %v, i8 %index27
+ %v28 = extractelement <32 x i8> %v, i8 %index28
+ %v29 = extractelement <32 x i8> %v, i8 %index29
+ %v30 = extractelement <32 x i8> %v, i8 %index30
+ %v31 = extractelement <32 x i8> %v, i8 %index31
+ %ret0 = insertelement <32 x i8> undef, i8 %v0, i32 0
+ %ret1 = insertelement <32 x i8> %ret0, i8 %v1, i32 1
+ %ret2 = insertelement <32 x i8> %ret1, i8 %v2, i32 2
+ %ret3 = insertelement <32 x i8> %ret2, i8 %v3, i32 3
+ %ret4 = insertelement <32 x i8> %ret3, i8 %v4, i32 4
+ %ret5 = insertelement <32 x i8> %ret4, i8 %v5, i32 5
+ %ret6 = insertelement <32 x i8> %ret5, i8 %v6, i32 6
+ %ret7 = insertelement <32 x i8> %ret6, i8 %v7, i32 7
+ %ret8 = insertelement <32 x i8> %ret7, i8 %v8, i32 8
+ %ret9 = insertelement <32 x i8> %ret8, i8 %v9, i32 9
+ %ret10 = insertelement <32 x i8> %ret9, i8 %v10, i32 10
+ %ret11 = insertelement <32 x i8> %ret10, i8 %v11, i32 11
+ %ret12 = insertelement <32 x i8> %ret11, i8 %v12, i32 12
+ %ret13 = insertelement <32 x i8> %ret12, i8 %v13, i32 13
+ %ret14 = insertelement <32 x i8> %ret13, i8 %v14, i32 14
+ %ret15 = insertelement <32 x i8> %ret14, i8 %v15, i32 15
+ %ret16 = insertelement <32 x i8> %ret15, i8 %v16, i32 16
+ %ret17 = insertelement <32 x i8> %ret16, i8 %v17, i32 17
+ %ret18 = insertelement <32 x i8> %ret17, i8 %v18, i32 18
+ %ret19 = insertelement <32 x i8> %ret18, i8 %v19, i32 19
+ %ret20 = insertelement <32 x i8> %ret19, i8 %v20, i32 20
+ %ret21 = insertelement <32 x i8> %ret20, i8 %v21, i32 21
+ %ret22 = insertelement <32 x i8> %ret21, i8 %v22, i32 22
+ %ret23 = insertelement <32 x i8> %ret22, i8 %v23, i32 23
+ %ret24 = insertelement <32 x i8> %ret23, i8 %v24, i32 24
+ %ret25 = insertelement <32 x i8> %ret24, i8 %v25, i32 25
+ %ret26 = insertelement <32 x i8> %ret25, i8 %v26, i32 26
+ %ret27 = insertelement <32 x i8> %ret26, i8 %v27, i32 27
+ %ret28 = insertelement <32 x i8> %ret27, i8 %v28, i32 28
+ %ret29 = insertelement <32 x i8> %ret28, i8 %v29, i32 29
+ %ret30 = insertelement <32 x i8> %ret29, i8 %v30, i32 30
+ %ret31 = insertelement <32 x i8> %ret30, i8 %v31, i32 31
+ ret <32 x i8> %ret31
+}
+
--- /dev/null
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=AVX512,NOBW,NOVBMI,AVX512F
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw | FileCheck %s --check-prefixes=AVX512,NOVBMI,AVX512BW
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw,+avx512vbmi | FileCheck %s --check-prefixes=AVX512,AVX512BW,VBMI
+
+define <8 x i64> @var_shuffle_v8i64(<8 x i64> %v, <8 x i64> %indices) nounwind {
+; AVX512-LABEL: var_shuffle_v8i64:
+; AVX512: # BB#0:
+; AVX512-NEXT: pushq %rbp
+; AVX512-NEXT: movq %rsp, %rbp
+; AVX512-NEXT: andq $-64, %rsp
+; AVX512-NEXT: subq $128, %rsp
+; AVX512-NEXT: vmovq %xmm1, %r8
+; AVX512-NEXT: andl $7, %r8d
+; AVX512-NEXT: vpextrq $1, %xmm1, %r9
+; AVX512-NEXT: andl $7, %r9d
+; AVX512-NEXT: vextracti128 $1, %ymm1, %xmm2
+; AVX512-NEXT: vmovq %xmm2, %r10
+; AVX512-NEXT: andl $7, %r10d
+; AVX512-NEXT: vpextrq $1, %xmm2, %rsi
+; AVX512-NEXT: andl $7, %esi
+; AVX512-NEXT: vextracti32x4 $2, %zmm1, %xmm2
+; AVX512-NEXT: vmovq %xmm2, %rdi
+; AVX512-NEXT: andl $7, %edi
+; AVX512-NEXT: vpextrq $1, %xmm2, %rax
+; AVX512-NEXT: andl $7, %eax
+; AVX512-NEXT: vextracti32x4 $3, %zmm1, %xmm1
+; AVX512-NEXT: vmovq %xmm1, %rcx
+; AVX512-NEXT: andl $7, %ecx
+; AVX512-NEXT: vpextrq $1, %xmm1, %rdx
+; AVX512-NEXT: andl $7, %edx
+; AVX512-NEXT: vmovaps %zmm0, (%rsp)
+; AVX512-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX512-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX512-NEXT: vmovlhps {{.*#+}} xmm0 = xmm1[0],xmm0[0]
+; AVX512-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX512-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero
+; AVX512-NEXT: vmovlhps {{.*#+}} xmm1 = xmm2[0],xmm1[0]
+; AVX512-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX512-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX512-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero
+; AVX512-NEXT: vmovlhps {{.*#+}} xmm1 = xmm2[0],xmm1[0]
+; AVX512-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero
+; AVX512-NEXT: vmovsd {{.*#+}} xmm3 = mem[0],zero
+; AVX512-NEXT: vmovlhps {{.*#+}} xmm2 = xmm3[0],xmm2[0]
+; AVX512-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1
+; AVX512-NEXT: vinsertf64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512-NEXT: movq %rbp, %rsp
+; AVX512-NEXT: popq %rbp
+; AVX512-NEXT: retq
+ %index0 = extractelement <8 x i64> %indices, i32 0
+ %index1 = extractelement <8 x i64> %indices, i32 1
+ %index2 = extractelement <8 x i64> %indices, i32 2
+ %index3 = extractelement <8 x i64> %indices, i32 3
+ %index4 = extractelement <8 x i64> %indices, i32 4
+ %index5 = extractelement <8 x i64> %indices, i32 5
+ %index6 = extractelement <8 x i64> %indices, i32 6
+ %index7 = extractelement <8 x i64> %indices, i32 7
+ %v0 = extractelement <8 x i64> %v, i64 %index0
+ %v1 = extractelement <8 x i64> %v, i64 %index1
+ %v2 = extractelement <8 x i64> %v, i64 %index2
+ %v3 = extractelement <8 x i64> %v, i64 %index3
+ %v4 = extractelement <8 x i64> %v, i64 %index4
+ %v5 = extractelement <8 x i64> %v, i64 %index5
+ %v6 = extractelement <8 x i64> %v, i64 %index6
+ %v7 = extractelement <8 x i64> %v, i64 %index7
+ %ret0 = insertelement <8 x i64> undef, i64 %v0, i32 0
+ %ret1 = insertelement <8 x i64> %ret0, i64 %v1, i32 1
+ %ret2 = insertelement <8 x i64> %ret1, i64 %v2, i32 2
+ %ret3 = insertelement <8 x i64> %ret2, i64 %v3, i32 3
+ %ret4 = insertelement <8 x i64> %ret3, i64 %v4, i32 4
+ %ret5 = insertelement <8 x i64> %ret4, i64 %v5, i32 5
+ %ret6 = insertelement <8 x i64> %ret5, i64 %v6, i32 6
+ %ret7 = insertelement <8 x i64> %ret6, i64 %v7, i32 7
+ ret <8 x i64> %ret7
+}
+
+define <16 x i32> @var_shuffle_v16i32(<16 x i32> %v, <16 x i32> %indices) nounwind {
+; AVX512-LABEL: var_shuffle_v16i32:
+; AVX512: # BB#0:
+; AVX512-NEXT: pushq %rbp
+; AVX512-NEXT: movq %rsp, %rbp
+; AVX512-NEXT: andq $-64, %rsp
+; AVX512-NEXT: subq $128, %rsp
+; AVX512-NEXT: vextracti128 $1, %ymm1, %xmm2
+; AVX512-NEXT: vextracti32x4 $2, %zmm1, %xmm3
+; AVX512-NEXT: vextracti32x4 $3, %zmm1, %xmm4
+; AVX512-NEXT: vpextrq $1, %xmm4, %rax
+; AVX512-NEXT: vmovq %xmm4, %rdx
+; AVX512-NEXT: movq %rdx, %rcx
+; AVX512-NEXT: shrq $30, %rcx
+; AVX512-NEXT: vmovaps %zmm0, (%rsp)
+; AVX512-NEXT: andl $15, %edx
+; AVX512-NEXT: andl $60, %ecx
+; AVX512-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX512-NEXT: movq %rsp, %rdx
+; AVX512-NEXT: vpinsrd $1, (%rcx,%rdx), %xmm0, %xmm0
+; AVX512-NEXT: movq %rax, %rcx
+; AVX512-NEXT: andl $15, %eax
+; AVX512-NEXT: vpinsrd $2, (%rsp,%rax,4), %xmm0, %xmm0
+; AVX512-NEXT: vmovq %xmm3, %rax
+; AVX512-NEXT: shrq $30, %rcx
+; AVX512-NEXT: andl $60, %ecx
+; AVX512-NEXT: vpinsrd $3, (%rcx,%rdx), %xmm0, %xmm0
+; AVX512-NEXT: movq %rax, %rcx
+; AVX512-NEXT: andl $15, %eax
+; AVX512-NEXT: vmovd {{.*#+}} xmm4 = mem[0],zero,zero,zero
+; AVX512-NEXT: vpextrq $1, %xmm3, %rax
+; AVX512-NEXT: shrq $30, %rcx
+; AVX512-NEXT: andl $60, %ecx
+; AVX512-NEXT: vpinsrd $1, (%rcx,%rdx), %xmm4, %xmm3
+; AVX512-NEXT: movq %rax, %rcx
+; AVX512-NEXT: andl $15, %eax
+; AVX512-NEXT: vpinsrd $2, (%rsp,%rax,4), %xmm3, %xmm3
+; AVX512-NEXT: vmovq %xmm2, %rax
+; AVX512-NEXT: shrq $30, %rcx
+; AVX512-NEXT: andl $60, %ecx
+; AVX512-NEXT: vpinsrd $3, (%rcx,%rdx), %xmm3, %xmm3
+; AVX512-NEXT: movq %rax, %rcx
+; AVX512-NEXT: andl $15, %eax
+; AVX512-NEXT: vmovd {{.*#+}} xmm4 = mem[0],zero,zero,zero
+; AVX512-NEXT: vpextrq $1, %xmm2, %rax
+; AVX512-NEXT: shrq $30, %rcx
+; AVX512-NEXT: andl $60, %ecx
+; AVX512-NEXT: vpinsrd $1, (%rcx,%rdx), %xmm4, %xmm2
+; AVX512-NEXT: movq %rax, %rcx
+; AVX512-NEXT: andl $15, %eax
+; AVX512-NEXT: vpinsrd $2, (%rsp,%rax,4), %xmm2, %xmm2
+; AVX512-NEXT: vmovq %xmm1, %rax
+; AVX512-NEXT: shrq $30, %rcx
+; AVX512-NEXT: andl $60, %ecx
+; AVX512-NEXT: vpinsrd $3, (%rcx,%rdx), %xmm2, %xmm2
+; AVX512-NEXT: movq %rax, %rcx
+; AVX512-NEXT: andl $15, %eax
+; AVX512-NEXT: vmovd {{.*#+}} xmm4 = mem[0],zero,zero,zero
+; AVX512-NEXT: vpextrq $1, %xmm1, %rax
+; AVX512-NEXT: shrq $30, %rcx
+; AVX512-NEXT: andl $60, %ecx
+; AVX512-NEXT: vpinsrd $1, (%rcx,%rdx), %xmm4, %xmm1
+; AVX512-NEXT: movq %rax, %rcx
+; AVX512-NEXT: andl $15, %eax
+; AVX512-NEXT: vpinsrd $2, (%rsp,%rax,4), %xmm1, %xmm1
+; AVX512-NEXT: shrq $30, %rcx
+; AVX512-NEXT: andl $60, %ecx
+; AVX512-NEXT: vpinsrd $3, (%rcx,%rdx), %xmm1, %xmm1
+; AVX512-NEXT: vinserti128 $1, %xmm0, %ymm3, %ymm0
+; AVX512-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1
+; AVX512-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512-NEXT: movq %rbp, %rsp
+; AVX512-NEXT: popq %rbp
+; AVX512-NEXT: retq
+ %index0 = extractelement <16 x i32> %indices, i32 0
+ %index1 = extractelement <16 x i32> %indices, i32 1
+ %index2 = extractelement <16 x i32> %indices, i32 2
+ %index3 = extractelement <16 x i32> %indices, i32 3
+ %index4 = extractelement <16 x i32> %indices, i32 4
+ %index5 = extractelement <16 x i32> %indices, i32 5
+ %index6 = extractelement <16 x i32> %indices, i32 6
+ %index7 = extractelement <16 x i32> %indices, i32 7
+ %index8 = extractelement <16 x i32> %indices, i32 8
+ %index9 = extractelement <16 x i32> %indices, i32 9
+ %index10 = extractelement <16 x i32> %indices, i32 10
+ %index11 = extractelement <16 x i32> %indices, i32 11
+ %index12 = extractelement <16 x i32> %indices, i32 12
+ %index13 = extractelement <16 x i32> %indices, i32 13
+ %index14 = extractelement <16 x i32> %indices, i32 14
+ %index15 = extractelement <16 x i32> %indices, i32 15
+ %v0 = extractelement <16 x i32> %v, i32 %index0
+ %v1 = extractelement <16 x i32> %v, i32 %index1
+ %v2 = extractelement <16 x i32> %v, i32 %index2
+ %v3 = extractelement <16 x i32> %v, i32 %index3
+ %v4 = extractelement <16 x i32> %v, i32 %index4
+ %v5 = extractelement <16 x i32> %v, i32 %index5
+ %v6 = extractelement <16 x i32> %v, i32 %index6
+ %v7 = extractelement <16 x i32> %v, i32 %index7
+ %v8 = extractelement <16 x i32> %v, i32 %index8
+ %v9 = extractelement <16 x i32> %v, i32 %index9
+ %v10 = extractelement <16 x i32> %v, i32 %index10
+ %v11 = extractelement <16 x i32> %v, i32 %index11
+ %v12 = extractelement <16 x i32> %v, i32 %index12
+ %v13 = extractelement <16 x i32> %v, i32 %index13
+ %v14 = extractelement <16 x i32> %v, i32 %index14
+ %v15 = extractelement <16 x i32> %v, i32 %index15
+ %ret0 = insertelement <16 x i32> undef, i32 %v0, i32 0
+ %ret1 = insertelement <16 x i32> %ret0, i32 %v1, i32 1
+ %ret2 = insertelement <16 x i32> %ret1, i32 %v2, i32 2
+ %ret3 = insertelement <16 x i32> %ret2, i32 %v3, i32 3
+ %ret4 = insertelement <16 x i32> %ret3, i32 %v4, i32 4
+ %ret5 = insertelement <16 x i32> %ret4, i32 %v5, i32 5
+ %ret6 = insertelement <16 x i32> %ret5, i32 %v6, i32 6
+ %ret7 = insertelement <16 x i32> %ret6, i32 %v7, i32 7
+ %ret8 = insertelement <16 x i32> %ret7, i32 %v8, i32 8
+ %ret9 = insertelement <16 x i32> %ret8, i32 %v9, i32 9
+ %ret10 = insertelement <16 x i32> %ret9, i32 %v10, i32 10
+ %ret11 = insertelement <16 x i32> %ret10, i32 %v11, i32 11
+ %ret12 = insertelement <16 x i32> %ret11, i32 %v12, i32 12
+ %ret13 = insertelement <16 x i32> %ret12, i32 %v13, i32 13
+ %ret14 = insertelement <16 x i32> %ret13, i32 %v14, i32 14
+ %ret15 = insertelement <16 x i32> %ret14, i32 %v15, i32 15
+ ret <16 x i32> %ret15
+}
+
+define <32 x i16> @var_shuffle_v32i16(<32 x i16> %v, <32 x i16> %indices) nounwind {
+; NOBW-LABEL: var_shuffle_v32i16:
+; NOBW: # BB#0:
+; NOBW-NEXT: pushq %rbp
+; NOBW-NEXT: movq %rsp, %rbp
+; NOBW-NEXT: andq $-64, %rsp
+; NOBW-NEXT: subq $2112, %rsp # imm = 0x840
+; NOBW-NEXT: vextracti128 $1, %ymm2, %xmm4
+; NOBW-NEXT: vmovd %xmm4, %eax
+; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: andl $31, %eax
+; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm0, (%rsp)
+; NOBW-NEXT: movzwl 1472(%rsp,%rax,2), %eax
+; NOBW-NEXT: vmovd %eax, %xmm0
+; NOBW-NEXT: vpextrw $1, %xmm4, %eax
+; NOBW-NEXT: andl $31, %eax
+; NOBW-NEXT: vpinsrw $1, 1408(%rsp,%rax,2), %xmm0, %xmm0
+; NOBW-NEXT: vpextrw $2, %xmm4, %eax
+; NOBW-NEXT: andl $31, %eax
+; NOBW-NEXT: vpinsrw $2, 1344(%rsp,%rax,2), %xmm0, %xmm0
+; NOBW-NEXT: vpextrw $3, %xmm4, %eax
+; NOBW-NEXT: andl $31, %eax
+; NOBW-NEXT: vpinsrw $3, 1280(%rsp,%rax,2), %xmm0, %xmm0
+; NOBW-NEXT: vpextrw $4, %xmm4, %eax
+; NOBW-NEXT: andl $31, %eax
+; NOBW-NEXT: vpinsrw $4, 1216(%rsp,%rax,2), %xmm0, %xmm0
+; NOBW-NEXT: vpextrw $5, %xmm4, %eax
+; NOBW-NEXT: andl $31, %eax
+; NOBW-NEXT: vpinsrw $5, 1152(%rsp,%rax,2), %xmm0, %xmm0
+; NOBW-NEXT: vpextrw $6, %xmm4, %eax
+; NOBW-NEXT: andl $31, %eax
+; NOBW-NEXT: vpinsrw $6, 1088(%rsp,%rax,2), %xmm0, %xmm0
+; NOBW-NEXT: vpextrw $7, %xmm4, %eax
+; NOBW-NEXT: andl $31, %eax
+; NOBW-NEXT: vpinsrw $7, 1024(%rsp,%rax,2), %xmm0, %xmm0
+; NOBW-NEXT: vmovd %xmm2, %eax
+; NOBW-NEXT: andl $31, %eax
+; NOBW-NEXT: movzwl 1984(%rsp,%rax,2), %eax
+; NOBW-NEXT: vmovd %eax, %xmm1
+; NOBW-NEXT: vpextrw $1, %xmm2, %eax
+; NOBW-NEXT: andl $31, %eax
+; NOBW-NEXT: vpinsrw $1, 1920(%rsp,%rax,2), %xmm1, %xmm1
+; NOBW-NEXT: vpextrw $2, %xmm2, %eax
+; NOBW-NEXT: andl $31, %eax
+; NOBW-NEXT: vpinsrw $2, 1856(%rsp,%rax,2), %xmm1, %xmm1
+; NOBW-NEXT: vpextrw $3, %xmm2, %eax
+; NOBW-NEXT: andl $31, %eax
+; NOBW-NEXT: vpinsrw $3, 1792(%rsp,%rax,2), %xmm1, %xmm1
+; NOBW-NEXT: vpextrw $4, %xmm2, %eax
+; NOBW-NEXT: andl $31, %eax
+; NOBW-NEXT: vpinsrw $4, 1728(%rsp,%rax,2), %xmm1, %xmm1
+; NOBW-NEXT: vpextrw $5, %xmm2, %eax
+; NOBW-NEXT: andl $31, %eax
+; NOBW-NEXT: vpinsrw $5, 1664(%rsp,%rax,2), %xmm1, %xmm1
+; NOBW-NEXT: vpextrw $6, %xmm2, %eax
+; NOBW-NEXT: andl $31, %eax
+; NOBW-NEXT: vpinsrw $6, 1600(%rsp,%rax,2), %xmm1, %xmm1
+; NOBW-NEXT: vpextrw $7, %xmm2, %eax
+; NOBW-NEXT: vextracti128 $1, %ymm3, %xmm2
+; NOBW-NEXT: andl $31, %eax
+; NOBW-NEXT: vpinsrw $7, 1536(%rsp,%rax,2), %xmm1, %xmm1
+; NOBW-NEXT: vmovd %xmm2, %eax
+; NOBW-NEXT: andl $31, %eax
+; NOBW-NEXT: movzwl 448(%rsp,%rax,2), %eax
+; NOBW-NEXT: vmovd %eax, %xmm4
+; NOBW-NEXT: vpextrw $1, %xmm2, %eax
+; NOBW-NEXT: andl $31, %eax
+; NOBW-NEXT: vpinsrw $1, 384(%rsp,%rax,2), %xmm4, %xmm4
+; NOBW-NEXT: vpextrw $2, %xmm2, %eax
+; NOBW-NEXT: andl $31, %eax
+; NOBW-NEXT: vpinsrw $2, 320(%rsp,%rax,2), %xmm4, %xmm4
+; NOBW-NEXT: vpextrw $3, %xmm2, %eax
+; NOBW-NEXT: andl $31, %eax
+; NOBW-NEXT: vpinsrw $3, 256(%rsp,%rax,2), %xmm4, %xmm4
+; NOBW-NEXT: vpextrw $4, %xmm2, %eax
+; NOBW-NEXT: andl $31, %eax
+; NOBW-NEXT: vpinsrw $4, 192(%rsp,%rax,2), %xmm4, %xmm4
+; NOBW-NEXT: vpextrw $5, %xmm2, %eax
+; NOBW-NEXT: andl $31, %eax
+; NOBW-NEXT: vpinsrw $5, 128(%rsp,%rax,2), %xmm4, %xmm4
+; NOBW-NEXT: vpextrw $6, %xmm2, %eax
+; NOBW-NEXT: andl $31, %eax
+; NOBW-NEXT: vpinsrw $6, 64(%rsp,%rax,2), %xmm4, %xmm4
+; NOBW-NEXT: vpextrw $7, %xmm2, %eax
+; NOBW-NEXT: andl $31, %eax
+; NOBW-NEXT: vpinsrw $7, (%rsp,%rax,2), %xmm4, %xmm2
+; NOBW-NEXT: vmovd %xmm3, %eax
+; NOBW-NEXT: andl $31, %eax
+; NOBW-NEXT: movzwl 960(%rsp,%rax,2), %eax
+; NOBW-NEXT: vmovd %eax, %xmm4
+; NOBW-NEXT: vpextrw $1, %xmm3, %eax
+; NOBW-NEXT: andl $31, %eax
+; NOBW-NEXT: vpinsrw $1, 896(%rsp,%rax,2), %xmm4, %xmm4
+; NOBW-NEXT: vpextrw $2, %xmm3, %eax
+; NOBW-NEXT: andl $31, %eax
+; NOBW-NEXT: vpinsrw $2, 832(%rsp,%rax,2), %xmm4, %xmm4
+; NOBW-NEXT: vpextrw $3, %xmm3, %eax
+; NOBW-NEXT: andl $31, %eax
+; NOBW-NEXT: vpinsrw $3, 768(%rsp,%rax,2), %xmm4, %xmm4
+; NOBW-NEXT: vpextrw $4, %xmm3, %eax
+; NOBW-NEXT: andl $31, %eax
+; NOBW-NEXT: vpinsrw $4, 704(%rsp,%rax,2), %xmm4, %xmm4
+; NOBW-NEXT: vpextrw $5, %xmm3, %eax
+; NOBW-NEXT: andl $31, %eax
+; NOBW-NEXT: vpinsrw $5, 640(%rsp,%rax,2), %xmm4, %xmm4
+; NOBW-NEXT: vpextrw $6, %xmm3, %eax
+; NOBW-NEXT: andl $31, %eax
+; NOBW-NEXT: vpinsrw $6, 576(%rsp,%rax,2), %xmm4, %xmm4
+; NOBW-NEXT: vpextrw $7, %xmm3, %eax
+; NOBW-NEXT: andl $31, %eax
+; NOBW-NEXT: vpinsrw $7, 512(%rsp,%rax,2), %xmm4, %xmm3
+; NOBW-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
+; NOBW-NEXT: vinserti128 $1, %xmm2, %ymm3, %ymm1
+; NOBW-NEXT: movq %rbp, %rsp
+; NOBW-NEXT: popq %rbp
+; NOBW-NEXT: retq
+;
+; AVX512BW-LABEL: var_shuffle_v32i16:
+; AVX512BW: # BB#0:
+; AVX512BW-NEXT: pushq %rbp
+; AVX512BW-NEXT: movq %rsp, %rbp
+; AVX512BW-NEXT: andq $-64, %rsp
+; AVX512BW-NEXT: subq $128, %rsp
+; AVX512BW-NEXT: vextracti128 $1, %ymm1, %xmm2
+; AVX512BW-NEXT: vextracti32x4 $2, %zmm1, %xmm3
+; AVX512BW-NEXT: vextracti32x4 $3, %zmm1, %xmm4
+; AVX512BW-NEXT: vmovd %xmm4, %eax
+; AVX512BW-NEXT: vmovaps %zmm0, (%rsp)
+; AVX512BW-NEXT: andl $31, %eax
+; AVX512BW-NEXT: movzwl (%rsp,%rax,2), %eax
+; AVX512BW-NEXT: vmovd %eax, %xmm0
+; AVX512BW-NEXT: vpextrw $1, %xmm4, %eax
+; AVX512BW-NEXT: andl $31, %eax
+; AVX512BW-NEXT: movzwl (%rsp,%rax,2), %eax
+; AVX512BW-NEXT: vpinsrw $1, %eax, %xmm0, %xmm0
+; AVX512BW-NEXT: vpextrw $2, %xmm4, %eax
+; AVX512BW-NEXT: andl $31, %eax
+; AVX512BW-NEXT: movzwl (%rsp,%rax,2), %eax
+; AVX512BW-NEXT: vpinsrw $2, %eax, %xmm0, %xmm0
+; AVX512BW-NEXT: vpextrw $3, %xmm4, %eax
+; AVX512BW-NEXT: andl $31, %eax
+; AVX512BW-NEXT: movzwl (%rsp,%rax,2), %eax
+; AVX512BW-NEXT: vpinsrw $3, %eax, %xmm0, %xmm0
+; AVX512BW-NEXT: vpextrw $4, %xmm4, %eax
+; AVX512BW-NEXT: andl $31, %eax
+; AVX512BW-NEXT: movzwl (%rsp,%rax,2), %eax
+; AVX512BW-NEXT: vpinsrw $4, %eax, %xmm0, %xmm0
+; AVX512BW-NEXT: vpextrw $5, %xmm4, %eax
+; AVX512BW-NEXT: andl $31, %eax
+; AVX512BW-NEXT: movzwl (%rsp,%rax,2), %eax
+; AVX512BW-NEXT: vpinsrw $5, %eax, %xmm0, %xmm0
+; AVX512BW-NEXT: vpextrw $6, %xmm4, %eax
+; AVX512BW-NEXT: andl $31, %eax
+; AVX512BW-NEXT: movzwl (%rsp,%rax,2), %eax
+; AVX512BW-NEXT: vpinsrw $6, %eax, %xmm0, %xmm0
+; AVX512BW-NEXT: vpextrw $7, %xmm4, %eax
+; AVX512BW-NEXT: andl $31, %eax
+; AVX512BW-NEXT: movzwl (%rsp,%rax,2), %eax
+; AVX512BW-NEXT: vpinsrw $7, %eax, %xmm0, %xmm0
+; AVX512BW-NEXT: vmovd %xmm3, %eax
+; AVX512BW-NEXT: andl $31, %eax
+; AVX512BW-NEXT: movzwl (%rsp,%rax,2), %eax
+; AVX512BW-NEXT: vmovd %eax, %xmm4
+; AVX512BW-NEXT: vpextrw $1, %xmm3, %eax
+; AVX512BW-NEXT: andl $31, %eax
+; AVX512BW-NEXT: movzwl (%rsp,%rax,2), %eax
+; AVX512BW-NEXT: vpinsrw $1, %eax, %xmm4, %xmm4
+; AVX512BW-NEXT: vpextrw $2, %xmm3, %eax
+; AVX512BW-NEXT: andl $31, %eax
+; AVX512BW-NEXT: movzwl (%rsp,%rax,2), %eax
+; AVX512BW-NEXT: vpinsrw $2, %eax, %xmm4, %xmm4
+; AVX512BW-NEXT: vpextrw $3, %xmm3, %eax
+; AVX512BW-NEXT: andl $31, %eax
+; AVX512BW-NEXT: movzwl (%rsp,%rax,2), %eax
+; AVX512BW-NEXT: vpinsrw $3, %eax, %xmm4, %xmm4
+; AVX512BW-NEXT: vpextrw $4, %xmm3, %eax
+; AVX512BW-NEXT: andl $31, %eax
+; AVX512BW-NEXT: movzwl (%rsp,%rax,2), %eax
+; AVX512BW-NEXT: vpinsrw $4, %eax, %xmm4, %xmm4
+; AVX512BW-NEXT: vpextrw $5, %xmm3, %eax
+; AVX512BW-NEXT: andl $31, %eax
+; AVX512BW-NEXT: movzwl (%rsp,%rax,2), %eax
+; AVX512BW-NEXT: vpinsrw $5, %eax, %xmm4, %xmm4
+; AVX512BW-NEXT: vpextrw $6, %xmm3, %eax
+; AVX512BW-NEXT: andl $31, %eax
+; AVX512BW-NEXT: movzwl (%rsp,%rax,2), %eax
+; AVX512BW-NEXT: vpinsrw $6, %eax, %xmm4, %xmm4
+; AVX512BW-NEXT: vpextrw $7, %xmm3, %eax
+; AVX512BW-NEXT: andl $31, %eax
+; AVX512BW-NEXT: movzwl (%rsp,%rax,2), %eax
+; AVX512BW-NEXT: vpinsrw $7, %eax, %xmm4, %xmm3
+; AVX512BW-NEXT: vmovd %xmm2, %eax
+; AVX512BW-NEXT: andl $31, %eax
+; AVX512BW-NEXT: movzwl (%rsp,%rax,2), %eax
+; AVX512BW-NEXT: vmovd %eax, %xmm4
+; AVX512BW-NEXT: vpextrw $1, %xmm2, %eax
+; AVX512BW-NEXT: andl $31, %eax
+; AVX512BW-NEXT: vpinsrw $1, (%rsp,%rax,2), %xmm4, %xmm4
+; AVX512BW-NEXT: vpextrw $2, %xmm2, %eax
+; AVX512BW-NEXT: andl $31, %eax
+; AVX512BW-NEXT: vpinsrw $2, (%rsp,%rax,2), %xmm4, %xmm4
+; AVX512BW-NEXT: vpextrw $3, %xmm2, %eax
+; AVX512BW-NEXT: andl $31, %eax
+; AVX512BW-NEXT: vpinsrw $3, (%rsp,%rax,2), %xmm4, %xmm4
+; AVX512BW-NEXT: vpextrw $4, %xmm2, %eax
+; AVX512BW-NEXT: andl $31, %eax
+; AVX512BW-NEXT: vpinsrw $4, (%rsp,%rax,2), %xmm4, %xmm4
+; AVX512BW-NEXT: vpextrw $5, %xmm2, %eax
+; AVX512BW-NEXT: andl $31, %eax
+; AVX512BW-NEXT: movzwl (%rsp,%rax,2), %eax
+; AVX512BW-NEXT: vpinsrw $5, %eax, %xmm4, %xmm4
+; AVX512BW-NEXT: vpextrw $6, %xmm2, %eax
+; AVX512BW-NEXT: andl $31, %eax
+; AVX512BW-NEXT: movzwl (%rsp,%rax,2), %eax
+; AVX512BW-NEXT: vpinsrw $6, %eax, %xmm4, %xmm4
+; AVX512BW-NEXT: vpextrw $7, %xmm2, %eax
+; AVX512BW-NEXT: andl $31, %eax
+; AVX512BW-NEXT: movzwl (%rsp,%rax,2), %eax
+; AVX512BW-NEXT: vpinsrw $7, %eax, %xmm4, %xmm2
+; AVX512BW-NEXT: vmovd %xmm1, %eax
+; AVX512BW-NEXT: andl $31, %eax
+; AVX512BW-NEXT: movzwl (%rsp,%rax,2), %eax
+; AVX512BW-NEXT: vmovd %eax, %xmm4
+; AVX512BW-NEXT: vpextrw $1, %xmm1, %eax
+; AVX512BW-NEXT: andl $31, %eax
+; AVX512BW-NEXT: vpinsrw $1, (%rsp,%rax,2), %xmm4, %xmm4
+; AVX512BW-NEXT: vpextrw $2, %xmm1, %eax
+; AVX512BW-NEXT: andl $31, %eax
+; AVX512BW-NEXT: vpinsrw $2, (%rsp,%rax,2), %xmm4, %xmm4
+; AVX512BW-NEXT: vpextrw $3, %xmm1, %eax
+; AVX512BW-NEXT: andl $31, %eax
+; AVX512BW-NEXT: vpinsrw $3, (%rsp,%rax,2), %xmm4, %xmm4
+; AVX512BW-NEXT: vpextrw $4, %xmm1, %eax
+; AVX512BW-NEXT: andl $31, %eax
+; AVX512BW-NEXT: vpinsrw $4, (%rsp,%rax,2), %xmm4, %xmm4
+; AVX512BW-NEXT: vpextrw $5, %xmm1, %eax
+; AVX512BW-NEXT: andl $31, %eax
+; AVX512BW-NEXT: vpinsrw $5, (%rsp,%rax,2), %xmm4, %xmm4
+; AVX512BW-NEXT: vpextrw $6, %xmm1, %eax
+; AVX512BW-NEXT: andl $31, %eax
+; AVX512BW-NEXT: vpinsrw $6, (%rsp,%rax,2), %xmm4, %xmm4
+; AVX512BW-NEXT: vpextrw $7, %xmm1, %eax
+; AVX512BW-NEXT: andl $31, %eax
+; AVX512BW-NEXT: vpinsrw $7, (%rsp,%rax,2), %xmm4, %xmm1
+; AVX512BW-NEXT: vinserti128 $1, %xmm0, %ymm3, %ymm0
+; AVX512BW-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1
+; AVX512BW-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512BW-NEXT: movq %rbp, %rsp
+; AVX512BW-NEXT: popq %rbp
+; AVX512BW-NEXT: retq
+ %index0 = extractelement <32 x i16> %indices, i32 0
+ %index1 = extractelement <32 x i16> %indices, i32 1
+ %index2 = extractelement <32 x i16> %indices, i32 2
+ %index3 = extractelement <32 x i16> %indices, i32 3
+ %index4 = extractelement <32 x i16> %indices, i32 4
+ %index5 = extractelement <32 x i16> %indices, i32 5
+ %index6 = extractelement <32 x i16> %indices, i32 6
+ %index7 = extractelement <32 x i16> %indices, i32 7
+ %index8 = extractelement <32 x i16> %indices, i32 8
+ %index9 = extractelement <32 x i16> %indices, i32 9
+ %index10 = extractelement <32 x i16> %indices, i32 10
+ %index11 = extractelement <32 x i16> %indices, i32 11
+ %index12 = extractelement <32 x i16> %indices, i32 12
+ %index13 = extractelement <32 x i16> %indices, i32 13
+ %index14 = extractelement <32 x i16> %indices, i32 14
+ %index15 = extractelement <32 x i16> %indices, i32 15
+ %index16 = extractelement <32 x i16> %indices, i32 16
+ %index17 = extractelement <32 x i16> %indices, i32 17
+ %index18 = extractelement <32 x i16> %indices, i32 18
+ %index19 = extractelement <32 x i16> %indices, i32 19
+ %index20 = extractelement <32 x i16> %indices, i32 20
+ %index21 = extractelement <32 x i16> %indices, i32 21
+ %index22 = extractelement <32 x i16> %indices, i32 22
+ %index23 = extractelement <32 x i16> %indices, i32 23
+ %index24 = extractelement <32 x i16> %indices, i32 24
+ %index25 = extractelement <32 x i16> %indices, i32 25
+ %index26 = extractelement <32 x i16> %indices, i32 26
+ %index27 = extractelement <32 x i16> %indices, i32 27
+ %index28 = extractelement <32 x i16> %indices, i32 28
+ %index29 = extractelement <32 x i16> %indices, i32 29
+ %index30 = extractelement <32 x i16> %indices, i32 30
+ %index31 = extractelement <32 x i16> %indices, i32 31
+ %v0 = extractelement <32 x i16> %v, i16 %index0
+ %v1 = extractelement <32 x i16> %v, i16 %index1
+ %v2 = extractelement <32 x i16> %v, i16 %index2
+ %v3 = extractelement <32 x i16> %v, i16 %index3
+ %v4 = extractelement <32 x i16> %v, i16 %index4
+ %v5 = extractelement <32 x i16> %v, i16 %index5
+ %v6 = extractelement <32 x i16> %v, i16 %index6
+ %v7 = extractelement <32 x i16> %v, i16 %index7
+ %v8 = extractelement <32 x i16> %v, i16 %index8
+ %v9 = extractelement <32 x i16> %v, i16 %index9
+ %v10 = extractelement <32 x i16> %v, i16 %index10
+ %v11 = extractelement <32 x i16> %v, i16 %index11
+ %v12 = extractelement <32 x i16> %v, i16 %index12
+ %v13 = extractelement <32 x i16> %v, i16 %index13
+ %v14 = extractelement <32 x i16> %v, i16 %index14
+ %v15 = extractelement <32 x i16> %v, i16 %index15
+ %v16 = extractelement <32 x i16> %v, i16 %index16
+ %v17 = extractelement <32 x i16> %v, i16 %index17
+ %v18 = extractelement <32 x i16> %v, i16 %index18
+ %v19 = extractelement <32 x i16> %v, i16 %index19
+ %v20 = extractelement <32 x i16> %v, i16 %index20
+ %v21 = extractelement <32 x i16> %v, i16 %index21
+ %v22 = extractelement <32 x i16> %v, i16 %index22
+ %v23 = extractelement <32 x i16> %v, i16 %index23
+ %v24 = extractelement <32 x i16> %v, i16 %index24
+ %v25 = extractelement <32 x i16> %v, i16 %index25
+ %v26 = extractelement <32 x i16> %v, i16 %index26
+ %v27 = extractelement <32 x i16> %v, i16 %index27
+ %v28 = extractelement <32 x i16> %v, i16 %index28
+ %v29 = extractelement <32 x i16> %v, i16 %index29
+ %v30 = extractelement <32 x i16> %v, i16 %index30
+ %v31 = extractelement <32 x i16> %v, i16 %index31
+ %ret0 = insertelement <32 x i16> undef, i16 %v0, i32 0
+ %ret1 = insertelement <32 x i16> %ret0, i16 %v1, i32 1
+ %ret2 = insertelement <32 x i16> %ret1, i16 %v2, i32 2
+ %ret3 = insertelement <32 x i16> %ret2, i16 %v3, i32 3
+ %ret4 = insertelement <32 x i16> %ret3, i16 %v4, i32 4
+ %ret5 = insertelement <32 x i16> %ret4, i16 %v5, i32 5
+ %ret6 = insertelement <32 x i16> %ret5, i16 %v6, i32 6
+ %ret7 = insertelement <32 x i16> %ret6, i16 %v7, i32 7
+ %ret8 = insertelement <32 x i16> %ret7, i16 %v8, i32 8
+ %ret9 = insertelement <32 x i16> %ret8, i16 %v9, i32 9
+ %ret10 = insertelement <32 x i16> %ret9, i16 %v10, i32 10
+ %ret11 = insertelement <32 x i16> %ret10, i16 %v11, i32 11
+ %ret12 = insertelement <32 x i16> %ret11, i16 %v12, i32 12
+ %ret13 = insertelement <32 x i16> %ret12, i16 %v13, i32 13
+ %ret14 = insertelement <32 x i16> %ret13, i16 %v14, i32 14
+ %ret15 = insertelement <32 x i16> %ret14, i16 %v15, i32 15
+ %ret16 = insertelement <32 x i16> %ret15, i16 %v16, i32 16
+ %ret17 = insertelement <32 x i16> %ret16, i16 %v17, i32 17
+ %ret18 = insertelement <32 x i16> %ret17, i16 %v18, i32 18
+ %ret19 = insertelement <32 x i16> %ret18, i16 %v19, i32 19
+ %ret20 = insertelement <32 x i16> %ret19, i16 %v20, i32 20
+ %ret21 = insertelement <32 x i16> %ret20, i16 %v21, i32 21
+ %ret22 = insertelement <32 x i16> %ret21, i16 %v22, i32 22
+ %ret23 = insertelement <32 x i16> %ret22, i16 %v23, i32 23
+ %ret24 = insertelement <32 x i16> %ret23, i16 %v24, i32 24
+ %ret25 = insertelement <32 x i16> %ret24, i16 %v25, i32 25
+ %ret26 = insertelement <32 x i16> %ret25, i16 %v26, i32 26
+ %ret27 = insertelement <32 x i16> %ret26, i16 %v27, i32 27
+ %ret28 = insertelement <32 x i16> %ret27, i16 %v28, i32 28
+ %ret29 = insertelement <32 x i16> %ret28, i16 %v29, i32 29
+ %ret30 = insertelement <32 x i16> %ret29, i16 %v30, i32 30
+ %ret31 = insertelement <32 x i16> %ret30, i16 %v31, i32 31
+ ret <32 x i16> %ret31
+}
+
+define <64 x i8> @var_shuffle_v64i8(<64 x i8> %v, <64 x i8> %indices) nounwind {
+; NOBW-LABEL: var_shuffle_v64i8:
+; NOBW: # BB#0:
+; NOBW-NEXT: pushq %rbp
+; NOBW-NEXT: movq %rsp, %rbp
+; NOBW-NEXT: andq $-64, %rsp
+; NOBW-NEXT: subq $4160, %rsp # imm = 0x1040
+; NOBW-NEXT: vextracti128 $1, %ymm2, %xmm4
+; NOBW-NEXT: vpextrb $0, %xmm4, %eax
+; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: andl $63, %eax
+; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp)
+; NOBW-NEXT: vmovaps %ymm0, (%rsp)
+; NOBW-NEXT: leaq {{[0-9]+}}(%rsp), %rcx
+; NOBW-NEXT: movzbl (%rax,%rcx), %eax
+; NOBW-NEXT: vpextrb $1, %xmm4, %ecx
+; NOBW-NEXT: andl $63, %ecx
+; NOBW-NEXT: vmovd %eax, %xmm0
+; NOBW-NEXT: leaq {{[0-9]+}}(%rsp), %rax
+; NOBW-NEXT: vpinsrb $1, (%rcx,%rax), %xmm0, %xmm0
+; NOBW-NEXT: vpextrb $2, %xmm4, %eax
+; NOBW-NEXT: andl $63, %eax
+; NOBW-NEXT: leaq {{[0-9]+}}(%rsp), %rcx
+; NOBW-NEXT: vpinsrb $2, (%rax,%rcx), %xmm0, %xmm0
+; NOBW-NEXT: vpextrb $3, %xmm4, %eax
+; NOBW-NEXT: andl $63, %eax
+; NOBW-NEXT: leaq {{[0-9]+}}(%rsp), %rcx
+; NOBW-NEXT: vpinsrb $3, (%rax,%rcx), %xmm0, %xmm0
+; NOBW-NEXT: vpextrb $4, %xmm4, %eax
+; NOBW-NEXT: andl $63, %eax
+; NOBW-NEXT: leaq {{[0-9]+}}(%rsp), %rcx
+; NOBW-NEXT: vpinsrb $4, (%rax,%rcx), %xmm0, %xmm0
+; NOBW-NEXT: vpextrb $5, %xmm4, %eax
+; NOBW-NEXT: andl $63, %eax
+; NOBW-NEXT: leaq {{[0-9]+}}(%rsp), %rcx
+; NOBW-NEXT: vpinsrb $5, (%rax,%rcx), %xmm0, %xmm0
+; NOBW-NEXT: vpextrb $6, %xmm4, %eax
+; NOBW-NEXT: andl $63, %eax
+; NOBW-NEXT: leaq {{[0-9]+}}(%rsp), %rcx
+; NOBW-NEXT: vpinsrb $6, (%rax,%rcx), %xmm0, %xmm0
+; NOBW-NEXT: vpextrb $7, %xmm4, %eax
+; NOBW-NEXT: andl $63, %eax
+; NOBW-NEXT: leaq {{[0-9]+}}(%rsp), %rcx
+; NOBW-NEXT: vpinsrb $7, (%rax,%rcx), %xmm0, %xmm0
+; NOBW-NEXT: vpextrb $8, %xmm4, %eax
+; NOBW-NEXT: andl $63, %eax
+; NOBW-NEXT: leaq {{[0-9]+}}(%rsp), %rcx
+; NOBW-NEXT: vpinsrb $8, (%rax,%rcx), %xmm0, %xmm0
+; NOBW-NEXT: vpextrb $9, %xmm4, %eax
+; NOBW-NEXT: andl $63, %eax
+; NOBW-NEXT: leaq {{[0-9]+}}(%rsp), %rcx
+; NOBW-NEXT: vpinsrb $9, (%rax,%rcx), %xmm0, %xmm0
+; NOBW-NEXT: vpextrb $10, %xmm4, %eax
+; NOBW-NEXT: andl $63, %eax
+; NOBW-NEXT: leaq {{[0-9]+}}(%rsp), %rcx
+; NOBW-NEXT: vpinsrb $10, (%rax,%rcx), %xmm0, %xmm0
+; NOBW-NEXT: vpextrb $11, %xmm4, %eax
+; NOBW-NEXT: andl $63, %eax
+; NOBW-NEXT: leaq {{[0-9]+}}(%rsp), %rcx
+; NOBW-NEXT: vpinsrb $11, (%rax,%rcx), %xmm0, %xmm0
+; NOBW-NEXT: vpextrb $12, %xmm4, %eax
+; NOBW-NEXT: andl $63, %eax
+; NOBW-NEXT: leaq {{[0-9]+}}(%rsp), %rcx
+; NOBW-NEXT: vpinsrb $12, (%rax,%rcx), %xmm0, %xmm0
+; NOBW-NEXT: vpextrb $13, %xmm4, %eax
+; NOBW-NEXT: andl $63, %eax
+; NOBW-NEXT: leaq {{[0-9]+}}(%rsp), %rcx
+; NOBW-NEXT: vpinsrb $13, (%rax,%rcx), %xmm0, %xmm0
+; NOBW-NEXT: vpextrb $14, %xmm4, %eax
+; NOBW-NEXT: andl $63, %eax
+; NOBW-NEXT: leaq {{[0-9]+}}(%rsp), %rcx
+; NOBW-NEXT: vpinsrb $14, (%rax,%rcx), %xmm0, %xmm0
+; NOBW-NEXT: vpextrb $15, %xmm4, %eax
+; NOBW-NEXT: andl $63, %eax
+; NOBW-NEXT: leaq {{[0-9]+}}(%rsp), %rcx
+; NOBW-NEXT: vpinsrb $15, (%rax,%rcx), %xmm0, %xmm0
+; NOBW-NEXT: vpextrb $0, %xmm2, %eax
+; NOBW-NEXT: andl $63, %eax
+; NOBW-NEXT: leaq {{[0-9]+}}(%rsp), %rcx
+; NOBW-NEXT: movzbl (%rax,%rcx), %eax
+; NOBW-NEXT: vpextrb $1, %xmm2, %ecx
+; NOBW-NEXT: andl $63, %ecx
+; NOBW-NEXT: vmovd %eax, %xmm1
+; NOBW-NEXT: leaq {{[0-9]+}}(%rsp), %rax
+; NOBW-NEXT: vpinsrb $1, (%rcx,%rax), %xmm1, %xmm1
+; NOBW-NEXT: vpextrb $2, %xmm2, %eax
+; NOBW-NEXT: andl $63, %eax
+; NOBW-NEXT: leaq {{[0-9]+}}(%rsp), %rcx
+; NOBW-NEXT: vpinsrb $2, (%rax,%rcx), %xmm1, %xmm1
+; NOBW-NEXT: vpextrb $3, %xmm2, %eax
+; NOBW-NEXT: andl $63, %eax
+; NOBW-NEXT: leaq {{[0-9]+}}(%rsp), %rcx
+; NOBW-NEXT: vpinsrb $3, (%rax,%rcx), %xmm1, %xmm1
+; NOBW-NEXT: vpextrb $4, %xmm2, %eax
+; NOBW-NEXT: andl $63, %eax
+; NOBW-NEXT: leaq {{[0-9]+}}(%rsp), %rcx
+; NOBW-NEXT: vpinsrb $4, (%rax,%rcx), %xmm1, %xmm1
+; NOBW-NEXT: vpextrb $5, %xmm2, %eax
+; NOBW-NEXT: andl $63, %eax
+; NOBW-NEXT: leaq {{[0-9]+}}(%rsp), %rcx
+; NOBW-NEXT: vpinsrb $5, (%rax,%rcx), %xmm1, %xmm1
+; NOBW-NEXT: vpextrb $6, %xmm2, %eax
+; NOBW-NEXT: andl $63, %eax
+; NOBW-NEXT: leaq {{[0-9]+}}(%rsp), %rcx
+; NOBW-NEXT: vpinsrb $6, (%rax,%rcx), %xmm1, %xmm1
+; NOBW-NEXT: vpextrb $7, %xmm2, %eax
+; NOBW-NEXT: andl $63, %eax
+; NOBW-NEXT: leaq {{[0-9]+}}(%rsp), %rcx
+; NOBW-NEXT: vpinsrb $7, (%rax,%rcx), %xmm1, %xmm1
+; NOBW-NEXT: vpextrb $8, %xmm2, %eax
+; NOBW-NEXT: andl $63, %eax
+; NOBW-NEXT: leaq {{[0-9]+}}(%rsp), %rcx
+; NOBW-NEXT: vpinsrb $8, (%rax,%rcx), %xmm1, %xmm1
+; NOBW-NEXT: vpextrb $9, %xmm2, %eax
+; NOBW-NEXT: andl $63, %eax
+; NOBW-NEXT: leaq {{[0-9]+}}(%rsp), %rcx
+; NOBW-NEXT: vpinsrb $9, (%rax,%rcx), %xmm1, %xmm1
+; NOBW-NEXT: vpextrb $10, %xmm2, %eax
+; NOBW-NEXT: andl $63, %eax
+; NOBW-NEXT: leaq {{[0-9]+}}(%rsp), %rcx
+; NOBW-NEXT: vpinsrb $10, (%rax,%rcx), %xmm1, %xmm1
+; NOBW-NEXT: vpextrb $11, %xmm2, %eax
+; NOBW-NEXT: andl $63, %eax
+; NOBW-NEXT: leaq {{[0-9]+}}(%rsp), %rcx
+; NOBW-NEXT: vpinsrb $11, (%rax,%rcx), %xmm1, %xmm1
+; NOBW-NEXT: vpextrb $12, %xmm2, %eax
+; NOBW-NEXT: andl $63, %eax
+; NOBW-NEXT: leaq {{[0-9]+}}(%rsp), %rcx
+; NOBW-NEXT: vpinsrb $12, (%rax,%rcx), %xmm1, %xmm1
+; NOBW-NEXT: vpextrb $13, %xmm2, %eax
+; NOBW-NEXT: andl $63, %eax
+; NOBW-NEXT: leaq {{[0-9]+}}(%rsp), %rcx
+; NOBW-NEXT: vpinsrb $13, (%rax,%rcx), %xmm1, %xmm1
+; NOBW-NEXT: vpextrb $14, %xmm2, %eax
+; NOBW-NEXT: andl $63, %eax
+; NOBW-NEXT: leaq {{[0-9]+}}(%rsp), %rcx
+; NOBW-NEXT: vpinsrb $14, (%rax,%rcx), %xmm1, %xmm1
+; NOBW-NEXT: vpextrb $15, %xmm2, %eax
+; NOBW-NEXT: vextracti128 $1, %ymm3, %xmm2
+; NOBW-NEXT: andl $63, %eax
+; NOBW-NEXT: leaq {{[0-9]+}}(%rsp), %rcx
+; NOBW-NEXT: vpinsrb $15, (%rax,%rcx), %xmm1, %xmm1
+; NOBW-NEXT: vpextrb $0, %xmm2, %eax
+; NOBW-NEXT: andl $63, %eax
+; NOBW-NEXT: leaq {{[0-9]+}}(%rsp), %rcx
+; NOBW-NEXT: movzbl (%rax,%rcx), %eax
+; NOBW-NEXT: vpextrb $1, %xmm2, %ecx
+; NOBW-NEXT: andl $63, %ecx
+; NOBW-NEXT: vmovd %eax, %xmm4
+; NOBW-NEXT: leaq {{[0-9]+}}(%rsp), %rax
+; NOBW-NEXT: vpinsrb $1, (%rcx,%rax), %xmm4, %xmm4
+; NOBW-NEXT: vpextrb $2, %xmm2, %eax
+; NOBW-NEXT: andl $63, %eax
+; NOBW-NEXT: leaq {{[0-9]+}}(%rsp), %rcx
+; NOBW-NEXT: vpinsrb $2, (%rax,%rcx), %xmm4, %xmm4
+; NOBW-NEXT: vpextrb $3, %xmm2, %eax
+; NOBW-NEXT: andl $63, %eax
+; NOBW-NEXT: leaq {{[0-9]+}}(%rsp), %rcx
+; NOBW-NEXT: vpinsrb $3, (%rax,%rcx), %xmm4, %xmm4
+; NOBW-NEXT: vpextrb $4, %xmm2, %eax
+; NOBW-NEXT: andl $63, %eax
+; NOBW-NEXT: leaq {{[0-9]+}}(%rsp), %rcx
+; NOBW-NEXT: vpinsrb $4, (%rax,%rcx), %xmm4, %xmm4
+; NOBW-NEXT: vpextrb $5, %xmm2, %eax
+; NOBW-NEXT: andl $63, %eax
+; NOBW-NEXT: leaq {{[0-9]+}}(%rsp), %rcx
+; NOBW-NEXT: vpinsrb $5, (%rax,%rcx), %xmm4, %xmm4
+; NOBW-NEXT: vpextrb $6, %xmm2, %eax
+; NOBW-NEXT: andl $63, %eax
+; NOBW-NEXT: leaq {{[0-9]+}}(%rsp), %rcx
+; NOBW-NEXT: vpinsrb $6, (%rax,%rcx), %xmm4, %xmm4
+; NOBW-NEXT: vpextrb $7, %xmm2, %eax
+; NOBW-NEXT: andl $63, %eax
+; NOBW-NEXT: leaq {{[0-9]+}}(%rsp), %rcx
+; NOBW-NEXT: vpinsrb $7, (%rax,%rcx), %xmm4, %xmm4
+; NOBW-NEXT: vpextrb $8, %xmm2, %eax
+; NOBW-NEXT: andl $63, %eax
+; NOBW-NEXT: leaq {{[0-9]+}}(%rsp), %rcx
+; NOBW-NEXT: vpinsrb $8, (%rax,%rcx), %xmm4, %xmm4
+; NOBW-NEXT: vpextrb $9, %xmm2, %eax
+; NOBW-NEXT: andl $63, %eax
+; NOBW-NEXT: leaq {{[0-9]+}}(%rsp), %rcx
+; NOBW-NEXT: vpinsrb $9, (%rax,%rcx), %xmm4, %xmm4
+; NOBW-NEXT: vpextrb $10, %xmm2, %eax
+; NOBW-NEXT: andl $63, %eax
+; NOBW-NEXT: leaq {{[0-9]+}}(%rsp), %rcx
+; NOBW-NEXT: vpinsrb $10, (%rax,%rcx), %xmm4, %xmm4
+; NOBW-NEXT: vpextrb $11, %xmm2, %eax
+; NOBW-NEXT: andl $63, %eax
+; NOBW-NEXT: leaq {{[0-9]+}}(%rsp), %rcx
+; NOBW-NEXT: vpinsrb $11, (%rax,%rcx), %xmm4, %xmm4
+; NOBW-NEXT: vpextrb $12, %xmm2, %eax
+; NOBW-NEXT: andl $63, %eax
+; NOBW-NEXT: leaq {{[0-9]+}}(%rsp), %rcx
+; NOBW-NEXT: vpinsrb $12, (%rax,%rcx), %xmm4, %xmm4
+; NOBW-NEXT: vpextrb $13, %xmm2, %eax
+; NOBW-NEXT: andl $63, %eax
+; NOBW-NEXT: leaq {{[0-9]+}}(%rsp), %rcx
+; NOBW-NEXT: vpinsrb $13, (%rax,%rcx), %xmm4, %xmm4
+; NOBW-NEXT: vpextrb $14, %xmm2, %eax
+; NOBW-NEXT: andl $63, %eax
+; NOBW-NEXT: leaq {{[0-9]+}}(%rsp), %rcx
+; NOBW-NEXT: vpinsrb $14, (%rax,%rcx), %xmm4, %xmm4
+; NOBW-NEXT: vpextrb $15, %xmm2, %eax
+; NOBW-NEXT: andl $63, %eax
+; NOBW-NEXT: movq %rsp, %rcx
+; NOBW-NEXT: vpinsrb $15, (%rax,%rcx), %xmm4, %xmm2
+; NOBW-NEXT: vpextrb $0, %xmm3, %eax
+; NOBW-NEXT: andl $63, %eax
+; NOBW-NEXT: leaq {{[0-9]+}}(%rsp), %rcx
+; NOBW-NEXT: movzbl (%rax,%rcx), %eax
+; NOBW-NEXT: vpextrb $1, %xmm3, %ecx
+; NOBW-NEXT: andl $63, %ecx
+; NOBW-NEXT: vmovd %eax, %xmm4
+; NOBW-NEXT: leaq {{[0-9]+}}(%rsp), %rax
+; NOBW-NEXT: vpinsrb $1, (%rcx,%rax), %xmm4, %xmm4
+; NOBW-NEXT: vpextrb $2, %xmm3, %eax
+; NOBW-NEXT: andl $63, %eax
+; NOBW-NEXT: leaq {{[0-9]+}}(%rsp), %rcx
+; NOBW-NEXT: vpinsrb $2, (%rax,%rcx), %xmm4, %xmm4
+; NOBW-NEXT: vpextrb $3, %xmm3, %eax
+; NOBW-NEXT: andl $63, %eax
+; NOBW-NEXT: leaq {{[0-9]+}}(%rsp), %rcx
+; NOBW-NEXT: vpinsrb $3, (%rax,%rcx), %xmm4, %xmm4
+; NOBW-NEXT: vpextrb $4, %xmm3, %eax
+; NOBW-NEXT: andl $63, %eax
+; NOBW-NEXT: leaq {{[0-9]+}}(%rsp), %rcx
+; NOBW-NEXT: vpinsrb $4, (%rax,%rcx), %xmm4, %xmm4
+; NOBW-NEXT: vpextrb $5, %xmm3, %eax
+; NOBW-NEXT: andl $63, %eax
+; NOBW-NEXT: leaq {{[0-9]+}}(%rsp), %rcx
+; NOBW-NEXT: vpinsrb $5, (%rax,%rcx), %xmm4, %xmm4
+; NOBW-NEXT: vpextrb $6, %xmm3, %eax
+; NOBW-NEXT: andl $63, %eax
+; NOBW-NEXT: leaq {{[0-9]+}}(%rsp), %rcx
+; NOBW-NEXT: vpinsrb $6, (%rax,%rcx), %xmm4, %xmm4
+; NOBW-NEXT: vpextrb $7, %xmm3, %eax
+; NOBW-NEXT: andl $63, %eax
+; NOBW-NEXT: leaq {{[0-9]+}}(%rsp), %rcx
+; NOBW-NEXT: vpinsrb $7, (%rax,%rcx), %xmm4, %xmm4
+; NOBW-NEXT: vpextrb $8, %xmm3, %eax
+; NOBW-NEXT: andl $63, %eax
+; NOBW-NEXT: leaq {{[0-9]+}}(%rsp), %rcx
+; NOBW-NEXT: vpinsrb $8, (%rax,%rcx), %xmm4, %xmm4
+; NOBW-NEXT: vpextrb $9, %xmm3, %eax
+; NOBW-NEXT: andl $63, %eax
+; NOBW-NEXT: leaq {{[0-9]+}}(%rsp), %rcx
+; NOBW-NEXT: vpinsrb $9, (%rax,%rcx), %xmm4, %xmm4
+; NOBW-NEXT: vpextrb $10, %xmm3, %eax
+; NOBW-NEXT: andl $63, %eax
+; NOBW-NEXT: leaq {{[0-9]+}}(%rsp), %rcx
+; NOBW-NEXT: vpinsrb $10, (%rax,%rcx), %xmm4, %xmm4
+; NOBW-NEXT: vpextrb $11, %xmm3, %eax
+; NOBW-NEXT: andl $63, %eax
+; NOBW-NEXT: leaq {{[0-9]+}}(%rsp), %rcx
+; NOBW-NEXT: vpinsrb $11, (%rax,%rcx), %xmm4, %xmm4
+; NOBW-NEXT: vpextrb $12, %xmm3, %eax
+; NOBW-NEXT: andl $63, %eax
+; NOBW-NEXT: leaq {{[0-9]+}}(%rsp), %rcx
+; NOBW-NEXT: vpinsrb $12, (%rax,%rcx), %xmm4, %xmm4
+; NOBW-NEXT: vpextrb $13, %xmm3, %eax
+; NOBW-NEXT: andl $63, %eax
+; NOBW-NEXT: leaq {{[0-9]+}}(%rsp), %rcx
+; NOBW-NEXT: vpinsrb $13, (%rax,%rcx), %xmm4, %xmm4
+; NOBW-NEXT: vpextrb $14, %xmm3, %eax
+; NOBW-NEXT: andl $63, %eax
+; NOBW-NEXT: leaq {{[0-9]+}}(%rsp), %rcx
+; NOBW-NEXT: vpinsrb $14, (%rax,%rcx), %xmm4, %xmm4
+; NOBW-NEXT: vpextrb $15, %xmm3, %eax
+; NOBW-NEXT: andl $63, %eax
+; NOBW-NEXT: leaq {{[0-9]+}}(%rsp), %rcx
+; NOBW-NEXT: vpinsrb $15, (%rax,%rcx), %xmm4, %xmm3
+; NOBW-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
+; NOBW-NEXT: vinserti128 $1, %xmm2, %ymm3, %ymm1
+; NOBW-NEXT: movq %rbp, %rsp
+; NOBW-NEXT: popq %rbp
+; NOBW-NEXT: retq
+;
+; AVX512BW-LABEL: var_shuffle_v64i8:
+; AVX512BW: # BB#0:
+; AVX512BW-NEXT: pushq %rbp
+; AVX512BW-NEXT: movq %rsp, %rbp
+; AVX512BW-NEXT: andq $-64, %rsp
+; AVX512BW-NEXT: subq $128, %rsp
+; AVX512BW-NEXT: vpextrb $0, %xmm1, %ecx
+; AVX512BW-NEXT: vpextrb $1, %xmm1, %eax
+; AVX512BW-NEXT: vextracti128 $1, %ymm1, %xmm2
+; AVX512BW-NEXT: vextracti32x4 $2, %zmm1, %xmm3
+; AVX512BW-NEXT: vextracti32x4 $3, %zmm1, %xmm4
+; AVX512BW-NEXT: vpextrb $0, %xmm4, %edx
+; AVX512BW-NEXT: vmovaps %zmm0, (%rsp)
+; AVX512BW-NEXT: andl $63, %edx
+; AVX512BW-NEXT: movq %rsp, %rsi
+; AVX512BW-NEXT: movzbl (%rdx,%rsi), %edx
+; AVX512BW-NEXT: vmovd %edx, %xmm0
+; AVX512BW-NEXT: vpextrb $1, %xmm4, %edx
+; AVX512BW-NEXT: andl $63, %edx
+; AVX512BW-NEXT: movzbl (%rdx,%rsi), %edx
+; AVX512BW-NEXT: vpinsrb $1, %edx, %xmm0, %xmm0
+; AVX512BW-NEXT: vpextrb $2, %xmm4, %edx
+; AVX512BW-NEXT: andl $63, %edx
+; AVX512BW-NEXT: movzbl (%rdx,%rsi), %edx
+; AVX512BW-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
+; AVX512BW-NEXT: vpextrb $3, %xmm4, %edx
+; AVX512BW-NEXT: andl $63, %edx
+; AVX512BW-NEXT: movzbl (%rdx,%rsi), %edx
+; AVX512BW-NEXT: vpinsrb $3, %edx, %xmm0, %xmm0
+; AVX512BW-NEXT: vpextrb $4, %xmm4, %edx
+; AVX512BW-NEXT: andl $63, %edx
+; AVX512BW-NEXT: movzbl (%rdx,%rsi), %edx
+; AVX512BW-NEXT: vpinsrb $4, %edx, %xmm0, %xmm0
+; AVX512BW-NEXT: vpextrb $5, %xmm4, %edx
+; AVX512BW-NEXT: andl $63, %edx
+; AVX512BW-NEXT: movzbl (%rdx,%rsi), %edx
+; AVX512BW-NEXT: vpinsrb $5, %edx, %xmm0, %xmm0
+; AVX512BW-NEXT: vpextrb $6, %xmm4, %edx
+; AVX512BW-NEXT: andl $63, %edx
+; AVX512BW-NEXT: movzbl (%rdx,%rsi), %edx
+; AVX512BW-NEXT: vpinsrb $6, %edx, %xmm0, %xmm0
+; AVX512BW-NEXT: vpextrb $7, %xmm4, %edx
+; AVX512BW-NEXT: andl $63, %edx
+; AVX512BW-NEXT: movzbl (%rdx,%rsi), %edx
+; AVX512BW-NEXT: vpinsrb $7, %edx, %xmm0, %xmm0
+; AVX512BW-NEXT: vpextrb $8, %xmm4, %edx
+; AVX512BW-NEXT: andl $63, %edx
+; AVX512BW-NEXT: movzbl (%rdx,%rsi), %edx
+; AVX512BW-NEXT: vpinsrb $8, %edx, %xmm0, %xmm0
+; AVX512BW-NEXT: vpextrb $9, %xmm4, %edx
+; AVX512BW-NEXT: andl $63, %edx
+; AVX512BW-NEXT: movzbl (%rdx,%rsi), %edx
+; AVX512BW-NEXT: vpinsrb $9, %edx, %xmm0, %xmm0
+; AVX512BW-NEXT: vpextrb $10, %xmm4, %edx
+; AVX512BW-NEXT: andl $63, %edx
+; AVX512BW-NEXT: movzbl (%rdx,%rsi), %edx
+; AVX512BW-NEXT: vpinsrb $10, %edx, %xmm0, %xmm0
+; AVX512BW-NEXT: vpextrb $11, %xmm4, %edx
+; AVX512BW-NEXT: andl $63, %edx
+; AVX512BW-NEXT: movzbl (%rdx,%rsi), %edx
+; AVX512BW-NEXT: vpinsrb $11, %edx, %xmm0, %xmm0
+; AVX512BW-NEXT: vpextrb $12, %xmm4, %edx
+; AVX512BW-NEXT: andl $63, %edx
+; AVX512BW-NEXT: movzbl (%rdx,%rsi), %edx
+; AVX512BW-NEXT: vpinsrb $12, %edx, %xmm0, %xmm0
+; AVX512BW-NEXT: vpextrb $13, %xmm4, %edx
+; AVX512BW-NEXT: andl $63, %edx
+; AVX512BW-NEXT: movzbl (%rdx,%rsi), %edx
+; AVX512BW-NEXT: vpinsrb $13, %edx, %xmm0, %xmm0
+; AVX512BW-NEXT: vpextrb $14, %xmm4, %edx
+; AVX512BW-NEXT: andl $63, %edx
+; AVX512BW-NEXT: movzbl (%rdx,%rsi), %edx
+; AVX512BW-NEXT: vpinsrb $14, %edx, %xmm0, %xmm0
+; AVX512BW-NEXT: vpextrb $15, %xmm4, %edx
+; AVX512BW-NEXT: andl $63, %edx
+; AVX512BW-NEXT: movzbl (%rdx,%rsi), %edx
+; AVX512BW-NEXT: vpinsrb $15, %edx, %xmm0, %xmm0
+; AVX512BW-NEXT: vpextrb $0, %xmm3, %edx
+; AVX512BW-NEXT: andl $63, %edx
+; AVX512BW-NEXT: movzbl (%rdx,%rsi), %edx
+; AVX512BW-NEXT: vmovd %edx, %xmm4
+; AVX512BW-NEXT: vpextrb $1, %xmm3, %edx
+; AVX512BW-NEXT: andl $63, %edx
+; AVX512BW-NEXT: movzbl (%rdx,%rsi), %edx
+; AVX512BW-NEXT: vpinsrb $1, %edx, %xmm4, %xmm4
+; AVX512BW-NEXT: vpextrb $2, %xmm3, %edx
+; AVX512BW-NEXT: andl $63, %edx
+; AVX512BW-NEXT: movzbl (%rdx,%rsi), %edx
+; AVX512BW-NEXT: vpinsrb $2, %edx, %xmm4, %xmm4
+; AVX512BW-NEXT: vpextrb $3, %xmm3, %edx
+; AVX512BW-NEXT: andl $63, %edx
+; AVX512BW-NEXT: movzbl (%rdx,%rsi), %edx
+; AVX512BW-NEXT: vpinsrb $3, %edx, %xmm4, %xmm4
+; AVX512BW-NEXT: vpextrb $4, %xmm3, %edx
+; AVX512BW-NEXT: andl $63, %edx
+; AVX512BW-NEXT: movzbl (%rdx,%rsi), %edx
+; AVX512BW-NEXT: vpinsrb $4, %edx, %xmm4, %xmm4
+; AVX512BW-NEXT: vpextrb $5, %xmm3, %edx
+; AVX512BW-NEXT: andl $63, %edx
+; AVX512BW-NEXT: movzbl (%rdx,%rsi), %edx
+; AVX512BW-NEXT: vpinsrb $5, %edx, %xmm4, %xmm4
+; AVX512BW-NEXT: vpextrb $6, %xmm3, %edx
+; AVX512BW-NEXT: andl $63, %edx
+; AVX512BW-NEXT: movzbl (%rdx,%rsi), %edx
+; AVX512BW-NEXT: vpinsrb $6, %edx, %xmm4, %xmm4
+; AVX512BW-NEXT: vpextrb $7, %xmm3, %edx
+; AVX512BW-NEXT: andl $63, %edx
+; AVX512BW-NEXT: movzbl (%rdx,%rsi), %edx
+; AVX512BW-NEXT: vpinsrb $7, %edx, %xmm4, %xmm4
+; AVX512BW-NEXT: vpextrb $8, %xmm3, %edx
+; AVX512BW-NEXT: andl $63, %edx
+; AVX512BW-NEXT: movzbl (%rdx,%rsi), %edx
+; AVX512BW-NEXT: vpinsrb $8, %edx, %xmm4, %xmm4
+; AVX512BW-NEXT: vpextrb $9, %xmm3, %edx
+; AVX512BW-NEXT: andl $63, %edx
+; AVX512BW-NEXT: movzbl (%rdx,%rsi), %edx
+; AVX512BW-NEXT: vpinsrb $9, %edx, %xmm4, %xmm4
+; AVX512BW-NEXT: vpextrb $10, %xmm3, %edx
+; AVX512BW-NEXT: andl $63, %edx
+; AVX512BW-NEXT: movzbl (%rdx,%rsi), %edx
+; AVX512BW-NEXT: vpinsrb $10, %edx, %xmm4, %xmm4
+; AVX512BW-NEXT: vpextrb $11, %xmm3, %edx
+; AVX512BW-NEXT: andl $63, %edx
+; AVX512BW-NEXT: movzbl (%rdx,%rsi), %edx
+; AVX512BW-NEXT: vpinsrb $11, %edx, %xmm4, %xmm4
+; AVX512BW-NEXT: vpextrb $12, %xmm3, %edx
+; AVX512BW-NEXT: andl $63, %edx
+; AVX512BW-NEXT: movzbl (%rdx,%rsi), %edx
+; AVX512BW-NEXT: vpinsrb $12, %edx, %xmm4, %xmm4
+; AVX512BW-NEXT: vpextrb $13, %xmm3, %edx
+; AVX512BW-NEXT: andl $63, %edx
+; AVX512BW-NEXT: movzbl (%rdx,%rsi), %edx
+; AVX512BW-NEXT: vpinsrb $13, %edx, %xmm4, %xmm4
+; AVX512BW-NEXT: vpextrb $14, %xmm3, %edx
+; AVX512BW-NEXT: andl $63, %edx
+; AVX512BW-NEXT: movzbl (%rdx,%rsi), %edx
+; AVX512BW-NEXT: vpinsrb $14, %edx, %xmm4, %xmm4
+; AVX512BW-NEXT: vpextrb $15, %xmm3, %edx
+; AVX512BW-NEXT: andl $63, %edx
+; AVX512BW-NEXT: movzbl (%rdx,%rsi), %edx
+; AVX512BW-NEXT: vpinsrb $15, %edx, %xmm4, %xmm3
+; AVX512BW-NEXT: vpextrb $0, %xmm2, %edx
+; AVX512BW-NEXT: andl $63, %edx
+; AVX512BW-NEXT: movzbl (%rdx,%rsi), %edx
+; AVX512BW-NEXT: vmovd %edx, %xmm4
+; AVX512BW-NEXT: vpextrb $1, %xmm2, %edx
+; AVX512BW-NEXT: andl $63, %edx
+; AVX512BW-NEXT: movzbl (%rdx,%rsi), %edx
+; AVX512BW-NEXT: vpinsrb $1, %edx, %xmm4, %xmm4
+; AVX512BW-NEXT: vpextrb $2, %xmm2, %edx
+; AVX512BW-NEXT: andl $63, %edx
+; AVX512BW-NEXT: movzbl (%rdx,%rsi), %edx
+; AVX512BW-NEXT: vpinsrb $2, %edx, %xmm4, %xmm4
+; AVX512BW-NEXT: vpextrb $3, %xmm2, %edx
+; AVX512BW-NEXT: andl $63, %edx
+; AVX512BW-NEXT: movzbl (%rdx,%rsi), %edx
+; AVX512BW-NEXT: vpinsrb $3, %edx, %xmm4, %xmm4
+; AVX512BW-NEXT: vpextrb $4, %xmm2, %edx
+; AVX512BW-NEXT: andl $63, %edx
+; AVX512BW-NEXT: movzbl (%rdx,%rsi), %edx
+; AVX512BW-NEXT: vpinsrb $4, %edx, %xmm4, %xmm4
+; AVX512BW-NEXT: vpextrb $5, %xmm2, %edx
+; AVX512BW-NEXT: andl $63, %edx
+; AVX512BW-NEXT: movzbl (%rdx,%rsi), %edx
+; AVX512BW-NEXT: vpinsrb $5, %edx, %xmm4, %xmm4
+; AVX512BW-NEXT: vpextrb $6, %xmm2, %edx
+; AVX512BW-NEXT: andl $63, %edx
+; AVX512BW-NEXT: movzbl (%rdx,%rsi), %edx
+; AVX512BW-NEXT: vpinsrb $6, %edx, %xmm4, %xmm4
+; AVX512BW-NEXT: vpextrb $7, %xmm2, %edx
+; AVX512BW-NEXT: andl $63, %edx
+; AVX512BW-NEXT: movzbl (%rdx,%rsi), %edx
+; AVX512BW-NEXT: vpinsrb $7, %edx, %xmm4, %xmm4
+; AVX512BW-NEXT: vpextrb $8, %xmm2, %edx
+; AVX512BW-NEXT: andl $63, %edx
+; AVX512BW-NEXT: movzbl (%rdx,%rsi), %edx
+; AVX512BW-NEXT: vpinsrb $8, %edx, %xmm4, %xmm4
+; AVX512BW-NEXT: vpextrb $9, %xmm2, %edx
+; AVX512BW-NEXT: andl $63, %edx
+; AVX512BW-NEXT: movzbl (%rdx,%rsi), %edx
+; AVX512BW-NEXT: vpinsrb $9, %edx, %xmm4, %xmm4
+; AVX512BW-NEXT: vpextrb $10, %xmm2, %edx
+; AVX512BW-NEXT: andl $63, %edx
+; AVX512BW-NEXT: movzbl (%rdx,%rsi), %edx
+; AVX512BW-NEXT: vpinsrb $10, %edx, %xmm4, %xmm4
+; AVX512BW-NEXT: vpextrb $11, %xmm2, %edx
+; AVX512BW-NEXT: andl $63, %edx
+; AVX512BW-NEXT: movzbl (%rdx,%rsi), %edx
+; AVX512BW-NEXT: vpinsrb $11, %edx, %xmm4, %xmm4
+; AVX512BW-NEXT: vpextrb $12, %xmm2, %edx
+; AVX512BW-NEXT: andl $63, %edx
+; AVX512BW-NEXT: movzbl (%rdx,%rsi), %edx
+; AVX512BW-NEXT: vpinsrb $12, %edx, %xmm4, %xmm4
+; AVX512BW-NEXT: vpextrb $13, %xmm2, %edx
+; AVX512BW-NEXT: andl $63, %edx
+; AVX512BW-NEXT: movzbl (%rdx,%rsi), %edx
+; AVX512BW-NEXT: vpinsrb $13, %edx, %xmm4, %xmm4
+; AVX512BW-NEXT: vpextrb $14, %xmm2, %edx
+; AVX512BW-NEXT: andl $63, %edx
+; AVX512BW-NEXT: movzbl (%rdx,%rsi), %edx
+; AVX512BW-NEXT: vpinsrb $14, %edx, %xmm4, %xmm4
+; AVX512BW-NEXT: vpextrb $15, %xmm2, %edx
+; AVX512BW-NEXT: andl $63, %edx
+; AVX512BW-NEXT: movzbl (%rdx,%rsi), %edx
+; AVX512BW-NEXT: vpinsrb $15, %edx, %xmm4, %xmm2
+; AVX512BW-NEXT: vpextrb $2, %xmm1, %edx
+; AVX512BW-NEXT: andl $63, %ecx
+; AVX512BW-NEXT: movzbl (%rcx,%rsi), %ecx
+; AVX512BW-NEXT: vmovd %ecx, %xmm4
+; AVX512BW-NEXT: vpextrb $3, %xmm1, %ecx
+; AVX512BW-NEXT: andl $63, %eax
+; AVX512BW-NEXT: vpinsrb $1, (%rax,%rsi), %xmm4, %xmm4
+; AVX512BW-NEXT: vpextrb $4, %xmm1, %eax
+; AVX512BW-NEXT: andl $63, %edx
+; AVX512BW-NEXT: vpinsrb $2, (%rdx,%rsi), %xmm4, %xmm4
+; AVX512BW-NEXT: vpextrb $5, %xmm1, %edx
+; AVX512BW-NEXT: andl $63, %ecx
+; AVX512BW-NEXT: vpinsrb $3, (%rcx,%rsi), %xmm4, %xmm4
+; AVX512BW-NEXT: vpextrb $6, %xmm1, %ecx
+; AVX512BW-NEXT: andl $63, %eax
+; AVX512BW-NEXT: vpinsrb $4, (%rax,%rsi), %xmm4, %xmm4
+; AVX512BW-NEXT: vpextrb $7, %xmm1, %eax
+; AVX512BW-NEXT: andl $63, %edx
+; AVX512BW-NEXT: vpinsrb $5, (%rdx,%rsi), %xmm4, %xmm4
+; AVX512BW-NEXT: vpextrb $8, %xmm1, %edx
+; AVX512BW-NEXT: andl $63, %ecx
+; AVX512BW-NEXT: vpinsrb $6, (%rcx,%rsi), %xmm4, %xmm4
+; AVX512BW-NEXT: vpextrb $9, %xmm1, %ecx
+; AVX512BW-NEXT: andl $63, %eax
+; AVX512BW-NEXT: vpinsrb $7, (%rax,%rsi), %xmm4, %xmm4
+; AVX512BW-NEXT: vpextrb $10, %xmm1, %eax
+; AVX512BW-NEXT: andl $63, %edx
+; AVX512BW-NEXT: vpinsrb $8, (%rdx,%rsi), %xmm4, %xmm4
+; AVX512BW-NEXT: vpextrb $11, %xmm1, %edx
+; AVX512BW-NEXT: andl $63, %ecx
+; AVX512BW-NEXT: vpinsrb $9, (%rcx,%rsi), %xmm4, %xmm4
+; AVX512BW-NEXT: vpextrb $12, %xmm1, %ecx
+; AVX512BW-NEXT: andl $63, %eax
+; AVX512BW-NEXT: vpinsrb $10, (%rax,%rsi), %xmm4, %xmm4
+; AVX512BW-NEXT: vpextrb $13, %xmm1, %eax
+; AVX512BW-NEXT: andl $63, %edx
+; AVX512BW-NEXT: vpinsrb $11, (%rdx,%rsi), %xmm4, %xmm4
+; AVX512BW-NEXT: vpextrb $14, %xmm1, %edx
+; AVX512BW-NEXT: andl $63, %ecx
+; AVX512BW-NEXT: vpinsrb $12, (%rcx,%rsi), %xmm4, %xmm4
+; AVX512BW-NEXT: vpextrb $15, %xmm1, %ecx
+; AVX512BW-NEXT: andl $63, %eax
+; AVX512BW-NEXT: andl $63, %edx
+; AVX512BW-NEXT: andl $63, %ecx
+; AVX512BW-NEXT: movzbl (%rcx,%rsi), %ecx
+; AVX512BW-NEXT: movzbl (%rdx,%rsi), %edx
+; AVX512BW-NEXT: movzbl (%rax,%rsi), %eax
+; AVX512BW-NEXT: vpinsrb $13, %eax, %xmm4, %xmm1
+; AVX512BW-NEXT: vpinsrb $14, %edx, %xmm1, %xmm1
+; AVX512BW-NEXT: vpinsrb $15, %ecx, %xmm1, %xmm1
+; AVX512BW-NEXT: vinserti128 $1, %xmm0, %ymm3, %ymm0
+; AVX512BW-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1
+; AVX512BW-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
+; AVX512BW-NEXT: movq %rbp, %rsp
+; AVX512BW-NEXT: popq %rbp
+; AVX512BW-NEXT: retq
+ %index0 = extractelement <64 x i8> %indices, i32 0
+ %index1 = extractelement <64 x i8> %indices, i32 1
+ %index2 = extractelement <64 x i8> %indices, i32 2
+ %index3 = extractelement <64 x i8> %indices, i32 3
+ %index4 = extractelement <64 x i8> %indices, i32 4
+ %index5 = extractelement <64 x i8> %indices, i32 5
+ %index6 = extractelement <64 x i8> %indices, i32 6
+ %index7 = extractelement <64 x i8> %indices, i32 7
+ %index8 = extractelement <64 x i8> %indices, i32 8
+ %index9 = extractelement <64 x i8> %indices, i32 9
+ %index10 = extractelement <64 x i8> %indices, i32 10
+ %index11 = extractelement <64 x i8> %indices, i32 11
+ %index12 = extractelement <64 x i8> %indices, i32 12
+ %index13 = extractelement <64 x i8> %indices, i32 13
+ %index14 = extractelement <64 x i8> %indices, i32 14
+ %index15 = extractelement <64 x i8> %indices, i32 15
+ %index16 = extractelement <64 x i8> %indices, i32 16
+ %index17 = extractelement <64 x i8> %indices, i32 17
+ %index18 = extractelement <64 x i8> %indices, i32 18
+ %index19 = extractelement <64 x i8> %indices, i32 19
+ %index20 = extractelement <64 x i8> %indices, i32 20
+ %index21 = extractelement <64 x i8> %indices, i32 21
+ %index22 = extractelement <64 x i8> %indices, i32 22
+ %index23 = extractelement <64 x i8> %indices, i32 23
+ %index24 = extractelement <64 x i8> %indices, i32 24
+ %index25 = extractelement <64 x i8> %indices, i32 25
+ %index26 = extractelement <64 x i8> %indices, i32 26
+ %index27 = extractelement <64 x i8> %indices, i32 27
+ %index28 = extractelement <64 x i8> %indices, i32 28
+ %index29 = extractelement <64 x i8> %indices, i32 29
+ %index30 = extractelement <64 x i8> %indices, i32 30
+ %index31 = extractelement <64 x i8> %indices, i32 31
+ %index32 = extractelement <64 x i8> %indices, i32 32
+ %index33 = extractelement <64 x i8> %indices, i32 33
+ %index34 = extractelement <64 x i8> %indices, i32 34
+ %index35 = extractelement <64 x i8> %indices, i32 35
+ %index36 = extractelement <64 x i8> %indices, i32 36
+ %index37 = extractelement <64 x i8> %indices, i32 37
+ %index38 = extractelement <64 x i8> %indices, i32 38
+ %index39 = extractelement <64 x i8> %indices, i32 39
+ %index40 = extractelement <64 x i8> %indices, i32 40
+ %index41 = extractelement <64 x i8> %indices, i32 41
+ %index42 = extractelement <64 x i8> %indices, i32 42
+ %index43 = extractelement <64 x i8> %indices, i32 43
+ %index44 = extractelement <64 x i8> %indices, i32 44
+ %index45 = extractelement <64 x i8> %indices, i32 45
+ %index46 = extractelement <64 x i8> %indices, i32 46
+ %index47 = extractelement <64 x i8> %indices, i32 47
+ %index48 = extractelement <64 x i8> %indices, i32 48
+ %index49 = extractelement <64 x i8> %indices, i32 49
+ %index50 = extractelement <64 x i8> %indices, i32 50
+ %index51 = extractelement <64 x i8> %indices, i32 51
+ %index52 = extractelement <64 x i8> %indices, i32 52
+ %index53 = extractelement <64 x i8> %indices, i32 53
+ %index54 = extractelement <64 x i8> %indices, i32 54
+ %index55 = extractelement <64 x i8> %indices, i32 55
+ %index56 = extractelement <64 x i8> %indices, i32 56
+ %index57 = extractelement <64 x i8> %indices, i32 57
+ %index58 = extractelement <64 x i8> %indices, i32 58
+ %index59 = extractelement <64 x i8> %indices, i32 59
+ %index60 = extractelement <64 x i8> %indices, i32 60
+ %index61 = extractelement <64 x i8> %indices, i32 61
+ %index62 = extractelement <64 x i8> %indices, i32 62
+ %index63 = extractelement <64 x i8> %indices, i32 63
+ %v0 = extractelement <64 x i8> %v, i8 %index0
+ %v1 = extractelement <64 x i8> %v, i8 %index1
+ %v2 = extractelement <64 x i8> %v, i8 %index2
+ %v3 = extractelement <64 x i8> %v, i8 %index3
+ %v4 = extractelement <64 x i8> %v, i8 %index4
+ %v5 = extractelement <64 x i8> %v, i8 %index5
+ %v6 = extractelement <64 x i8> %v, i8 %index6
+ %v7 = extractelement <64 x i8> %v, i8 %index7
+ %v8 = extractelement <64 x i8> %v, i8 %index8
+ %v9 = extractelement <64 x i8> %v, i8 %index9
+ %v10 = extractelement <64 x i8> %v, i8 %index10
+ %v11 = extractelement <64 x i8> %v, i8 %index11
+ %v12 = extractelement <64 x i8> %v, i8 %index12
+ %v13 = extractelement <64 x i8> %v, i8 %index13
+ %v14 = extractelement <64 x i8> %v, i8 %index14
+ %v15 = extractelement <64 x i8> %v, i8 %index15
+ %v16 = extractelement <64 x i8> %v, i8 %index16
+ %v17 = extractelement <64 x i8> %v, i8 %index17
+ %v18 = extractelement <64 x i8> %v, i8 %index18
+ %v19 = extractelement <64 x i8> %v, i8 %index19
+ %v20 = extractelement <64 x i8> %v, i8 %index20
+ %v21 = extractelement <64 x i8> %v, i8 %index21
+ %v22 = extractelement <64 x i8> %v, i8 %index22
+ %v23 = extractelement <64 x i8> %v, i8 %index23
+ %v24 = extractelement <64 x i8> %v, i8 %index24
+ %v25 = extractelement <64 x i8> %v, i8 %index25
+ %v26 = extractelement <64 x i8> %v, i8 %index26
+ %v27 = extractelement <64 x i8> %v, i8 %index27
+ %v28 = extractelement <64 x i8> %v, i8 %index28
+ %v29 = extractelement <64 x i8> %v, i8 %index29
+ %v30 = extractelement <64 x i8> %v, i8 %index30
+ %v31 = extractelement <64 x i8> %v, i8 %index31
+ %v32 = extractelement <64 x i8> %v, i8 %index32
+ %v33 = extractelement <64 x i8> %v, i8 %index33
+ %v34 = extractelement <64 x i8> %v, i8 %index34
+ %v35 = extractelement <64 x i8> %v, i8 %index35
+ %v36 = extractelement <64 x i8> %v, i8 %index36
+ %v37 = extractelement <64 x i8> %v, i8 %index37
+ %v38 = extractelement <64 x i8> %v, i8 %index38
+ %v39 = extractelement <64 x i8> %v, i8 %index39
+ %v40 = extractelement <64 x i8> %v, i8 %index40
+ %v41 = extractelement <64 x i8> %v, i8 %index41
+ %v42 = extractelement <64 x i8> %v, i8 %index42
+ %v43 = extractelement <64 x i8> %v, i8 %index43
+ %v44 = extractelement <64 x i8> %v, i8 %index44
+ %v45 = extractelement <64 x i8> %v, i8 %index45
+ %v46 = extractelement <64 x i8> %v, i8 %index46
+ %v47 = extractelement <64 x i8> %v, i8 %index47
+ %v48 = extractelement <64 x i8> %v, i8 %index48
+ %v49 = extractelement <64 x i8> %v, i8 %index49
+ %v50 = extractelement <64 x i8> %v, i8 %index50
+ %v51 = extractelement <64 x i8> %v, i8 %index51
+ %v52 = extractelement <64 x i8> %v, i8 %index52
+ %v53 = extractelement <64 x i8> %v, i8 %index53
+ %v54 = extractelement <64 x i8> %v, i8 %index54
+ %v55 = extractelement <64 x i8> %v, i8 %index55
+ %v56 = extractelement <64 x i8> %v, i8 %index56
+ %v57 = extractelement <64 x i8> %v, i8 %index57
+ %v58 = extractelement <64 x i8> %v, i8 %index58
+ %v59 = extractelement <64 x i8> %v, i8 %index59
+ %v60 = extractelement <64 x i8> %v, i8 %index60
+ %v61 = extractelement <64 x i8> %v, i8 %index61
+ %v62 = extractelement <64 x i8> %v, i8 %index62
+ %v63 = extractelement <64 x i8> %v, i8 %index63
+ %ret0 = insertelement <64 x i8> undef, i8 %v0, i32 0
+ %ret1 = insertelement <64 x i8> %ret0, i8 %v1, i32 1
+ %ret2 = insertelement <64 x i8> %ret1, i8 %v2, i32 2
+ %ret3 = insertelement <64 x i8> %ret2, i8 %v3, i32 3
+ %ret4 = insertelement <64 x i8> %ret3, i8 %v4, i32 4
+ %ret5 = insertelement <64 x i8> %ret4, i8 %v5, i32 5
+ %ret6 = insertelement <64 x i8> %ret5, i8 %v6, i32 6
+ %ret7 = insertelement <64 x i8> %ret6, i8 %v7, i32 7
+ %ret8 = insertelement <64 x i8> %ret7, i8 %v8, i32 8
+ %ret9 = insertelement <64 x i8> %ret8, i8 %v9, i32 9
+ %ret10 = insertelement <64 x i8> %ret9, i8 %v10, i32 10
+ %ret11 = insertelement <64 x i8> %ret10, i8 %v11, i32 11
+ %ret12 = insertelement <64 x i8> %ret11, i8 %v12, i32 12
+ %ret13 = insertelement <64 x i8> %ret12, i8 %v13, i32 13
+ %ret14 = insertelement <64 x i8> %ret13, i8 %v14, i32 14
+ %ret15 = insertelement <64 x i8> %ret14, i8 %v15, i32 15
+ %ret16 = insertelement <64 x i8> %ret15, i8 %v16, i32 16
+ %ret17 = insertelement <64 x i8> %ret16, i8 %v17, i32 17
+ %ret18 = insertelement <64 x i8> %ret17, i8 %v18, i32 18
+ %ret19 = insertelement <64 x i8> %ret18, i8 %v19, i32 19
+ %ret20 = insertelement <64 x i8> %ret19, i8 %v20, i32 20
+ %ret21 = insertelement <64 x i8> %ret20, i8 %v21, i32 21
+ %ret22 = insertelement <64 x i8> %ret21, i8 %v22, i32 22
+ %ret23 = insertelement <64 x i8> %ret22, i8 %v23, i32 23
+ %ret24 = insertelement <64 x i8> %ret23, i8 %v24, i32 24
+ %ret25 = insertelement <64 x i8> %ret24, i8 %v25, i32 25
+ %ret26 = insertelement <64 x i8> %ret25, i8 %v26, i32 26
+ %ret27 = insertelement <64 x i8> %ret26, i8 %v27, i32 27
+ %ret28 = insertelement <64 x i8> %ret27, i8 %v28, i32 28
+ %ret29 = insertelement <64 x i8> %ret28, i8 %v29, i32 29
+ %ret30 = insertelement <64 x i8> %ret29, i8 %v30, i32 30
+ %ret31 = insertelement <64 x i8> %ret30, i8 %v31, i32 31
+ %ret32 = insertelement <64 x i8> %ret31, i8 %v32, i32 32
+ %ret33 = insertelement <64 x i8> %ret32, i8 %v33, i32 33
+ %ret34 = insertelement <64 x i8> %ret33, i8 %v34, i32 34
+ %ret35 = insertelement <64 x i8> %ret34, i8 %v35, i32 35
+ %ret36 = insertelement <64 x i8> %ret35, i8 %v36, i32 36
+ %ret37 = insertelement <64 x i8> %ret36, i8 %v37, i32 37
+ %ret38 = insertelement <64 x i8> %ret37, i8 %v38, i32 38
+ %ret39 = insertelement <64 x i8> %ret38, i8 %v39, i32 39
+ %ret40 = insertelement <64 x i8> %ret39, i8 %v40, i32 40
+ %ret41 = insertelement <64 x i8> %ret40, i8 %v41, i32 41
+ %ret42 = insertelement <64 x i8> %ret41, i8 %v42, i32 42
+ %ret43 = insertelement <64 x i8> %ret42, i8 %v43, i32 43
+ %ret44 = insertelement <64 x i8> %ret43, i8 %v44, i32 44
+ %ret45 = insertelement <64 x i8> %ret44, i8 %v45, i32 45
+ %ret46 = insertelement <64 x i8> %ret45, i8 %v46, i32 46
+ %ret47 = insertelement <64 x i8> %ret46, i8 %v47, i32 47
+ %ret48 = insertelement <64 x i8> %ret47, i8 %v48, i32 48
+ %ret49 = insertelement <64 x i8> %ret48, i8 %v49, i32 49
+ %ret50 = insertelement <64 x i8> %ret49, i8 %v50, i32 50
+ %ret51 = insertelement <64 x i8> %ret50, i8 %v51, i32 51
+ %ret52 = insertelement <64 x i8> %ret51, i8 %v52, i32 52
+ %ret53 = insertelement <64 x i8> %ret52, i8 %v53, i32 53
+ %ret54 = insertelement <64 x i8> %ret53, i8 %v54, i32 54
+ %ret55 = insertelement <64 x i8> %ret54, i8 %v55, i32 55
+ %ret56 = insertelement <64 x i8> %ret55, i8 %v56, i32 56
+ %ret57 = insertelement <64 x i8> %ret56, i8 %v57, i32 57
+ %ret58 = insertelement <64 x i8> %ret57, i8 %v58, i32 58
+ %ret59 = insertelement <64 x i8> %ret58, i8 %v59, i32 59
+ %ret60 = insertelement <64 x i8> %ret59, i8 %v60, i32 60
+ %ret61 = insertelement <64 x i8> %ret60, i8 %v61, i32 61
+ %ret62 = insertelement <64 x i8> %ret61, i8 %v62, i32 62
+ %ret63 = insertelement <64 x i8> %ret62, i8 %v63, i32 63
+ ret <64 x i8> %ret63
+}
+