VT, SDLoc(N),
InVec.getNode()->ops().slice(IdxVal, VT.getVectorNumElements()));
+ // If we're extracting from a broadcast then we're better off just
+ // broadcasting to the smaller type directly, assuming this is the only use.
+ // As its a broadcast we don't care about the extraction index.
+ if (InVec.getOpcode() == X86ISD::VBROADCAST && InVec.hasOneUse() &&
+ InVec.getOperand(0).getValueSizeInBits() <= VT.getSizeInBits())
+ return DAG.getNode(X86ISD::VBROADCAST, SDLoc(N), VT, InVec.getOperand(0));
+
// If we're extracting the lowest subvector and we're the only user,
// we may be able to perform this with a smaller vector width.
if (IdxVal == 0 && InVec.hasOneUse()) {
; KNL-LABEL: fsub_noundef_ee:
; KNL: # %bb.0:
; KNL-NEXT: vextractf32x4 $2, %zmm1, %xmm0
-; KNL-NEXT: vbroadcastsd %xmm0, %zmm1
-; KNL-NEXT: vextractf32x4 $2, %zmm1, %xmm1
+; KNL-NEXT: vmovddup {{.*#+}} xmm1 = xmm0[0,0]
; KNL-NEXT: vsubpd %xmm0, %xmm1, %xmm0
; KNL-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; KNL-NEXT: retq
; SKX-LABEL: fsub_noundef_ee:
; SKX: # %bb.0:
; SKX-NEXT: vextractf32x4 $2, %zmm1, %xmm0
-; SKX-NEXT: vbroadcastsd %xmm0, %zmm1
-; SKX-NEXT: vextractf32x4 $2, %zmm1, %xmm1
+; SKX-NEXT: vmovddup {{.*#+}} xmm1 = xmm0[0,0]
; SKX-NEXT: vsubpd %xmm0, %xmm1, %xmm0
; SKX-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; SKX-NEXT: vzeroupper
; CHECK-NEXT: vextractf64x4 $1, %zmm0, %ymm2
; CHECK-NEXT: vpermps %ymm2, %ymm1, %ymm1
; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm0
-; CHECK-NEXT: vbroadcastss %xmm0, %ymm0
+; CHECK-NEXT: vbroadcastss %xmm0, %xmm0
; CHECK-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0,1],xmm0[2],xmm1[3]
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
;
; AVX2-LABEL: splatvar_funnnel_v4i64:
; AVX2: # %bb.0:
-; AVX2-NEXT: vpbroadcastq %xmm1, %ymm1
+; AVX2-NEXT: vpbroadcastq %xmm1, %xmm1
; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = [63,63]
; AVX2-NEXT: vpand %xmm2, %xmm1, %xmm3
; AVX2-NEXT: vpsllq %xmm3, %ymm0, %ymm3
;
; AVX2-LABEL: splatvar_funnnel_v8i32:
; AVX2: # %bb.0:
-; AVX2-NEXT: vpbroadcastd %xmm1, %ymm1
+; AVX2-NEXT: vpbroadcastd %xmm1, %xmm1
; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm2 = [31,31,31,31]
; AVX2-NEXT: vpand %xmm2, %xmm1, %xmm1
; AVX2-NEXT: vpmovzxdq {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero
;
; AVX2-LABEL: splatvar_funnnel_v16i16:
; AVX2: # %bb.0:
-; AVX2-NEXT: vpbroadcastw %xmm1, %ymm1
+; AVX2-NEXT: vpbroadcastw %xmm1, %xmm1
; AVX2-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
; AVX2-NEXT: vpmovzxwq {{.*#+}} xmm2 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
; AVX2-NEXT: vpsllw %xmm2, %ymm0, %ymm2
;
; AVX512-LABEL: splatvar_funnnel_v16i16:
; AVX512: # %bb.0:
-; AVX512-NEXT: vpbroadcastw %xmm1, %ymm1
+; AVX512-NEXT: vpbroadcastw %xmm1, %xmm1
; AVX512-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
; AVX512-NEXT: vpmovzxwq {{.*#+}} xmm2 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
; AVX512-NEXT: vpsllw %xmm2, %ymm0, %ymm2
;
; AVX2-LABEL: splatvar_funnnel_v32i8:
; AVX2: # %bb.0:
-; AVX2-NEXT: vpbroadcastb %xmm1, %ymm1
+; AVX2-NEXT: vpbroadcastb %xmm1, %xmm1
; AVX2-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
; AVX2-NEXT: vpmovzxbq {{.*#+}} xmm2 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
; AVX2-NEXT: vpsllw %xmm2, %ymm0, %ymm3
;
; AVX512F-LABEL: splatvar_funnnel_v32i8:
; AVX512F: # %bb.0:
-; AVX512F-NEXT: vpbroadcastb %xmm1, %ymm1
+; AVX512F-NEXT: vpbroadcastb %xmm1, %xmm1
; AVX512F-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
; AVX512F-NEXT: vpmovzxbq {{.*#+}} xmm2 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
; AVX512F-NEXT: vpsllw %xmm2, %ymm0, %ymm3
;
; AVX512VL-LABEL: splatvar_funnnel_v32i8:
; AVX512VL: # %bb.0:
-; AVX512VL-NEXT: vpbroadcastb %xmm1, %ymm1
+; AVX512VL-NEXT: vpbroadcastb %xmm1, %xmm1
; AVX512VL-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
; AVX512VL-NEXT: vpmovzxbq {{.*#+}} xmm2 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
; AVX512VL-NEXT: vpsllw %xmm2, %ymm0, %ymm3
define <32 x i16> @splatvar_funnnel_v32i16(<32 x i16> %x, <32 x i16> %amt) nounwind {
; AVX512F-LABEL: splatvar_funnnel_v32i16:
; AVX512F: # %bb.0:
-; AVX512F-NEXT: vpbroadcastw %xmm2, %ymm2
+; AVX512F-NEXT: vpbroadcastw %xmm2, %xmm2
; AVX512F-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
; AVX512F-NEXT: vpmovzxwq {{.*#+}} xmm3 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero
; AVX512F-NEXT: vpsllw %xmm3, %ymm0, %ymm4
;
; AVX512VL-LABEL: splatvar_funnnel_v32i16:
; AVX512VL: # %bb.0:
-; AVX512VL-NEXT: vpbroadcastw %xmm2, %ymm2
+; AVX512VL-NEXT: vpbroadcastw %xmm2, %xmm2
; AVX512VL-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
; AVX512VL-NEXT: vpmovzxwq {{.*#+}} xmm3 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero
; AVX512VL-NEXT: vpsllw %xmm3, %ymm0, %ymm4
;
; AVX512BW-LABEL: splatvar_funnnel_v32i16:
; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: vpbroadcastw %xmm1, %zmm1
+; AVX512BW-NEXT: vpbroadcastw %xmm1, %xmm1
; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15]
; AVX512BW-NEXT: vpand %xmm2, %xmm1, %xmm3
; AVX512BW-NEXT: vpmovzxwq {{.*#+}} xmm3 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero
;
; AVX512VLBW-LABEL: splatvar_funnnel_v32i16:
; AVX512VLBW: # %bb.0:
-; AVX512VLBW-NEXT: vpbroadcastw %xmm1, %zmm1
+; AVX512VLBW-NEXT: vpbroadcastw %xmm1, %xmm1
; AVX512VLBW-NEXT: vmovdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15]
; AVX512VLBW-NEXT: vpand %xmm2, %xmm1, %xmm3
; AVX512VLBW-NEXT: vpmovzxwq {{.*#+}} xmm3 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero
define <64 x i8> @splatvar_funnnel_v64i8(<64 x i8> %x, <64 x i8> %amt) nounwind {
; AVX512F-LABEL: splatvar_funnnel_v64i8:
; AVX512F: # %bb.0:
-; AVX512F-NEXT: vpbroadcastb %xmm2, %ymm2
+; AVX512F-NEXT: vpbroadcastb %xmm2, %xmm2
; AVX512F-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
; AVX512F-NEXT: vpmovzxbq {{.*#+}} xmm3 = xmm2[0],zero,zero,zero,zero,zero,zero,zero,xmm2[1],zero,zero,zero,zero,zero,zero,zero
; AVX512F-NEXT: vpsllw %xmm3, %ymm0, %ymm4
;
; AVX512VL-LABEL: splatvar_funnnel_v64i8:
; AVX512VL: # %bb.0:
-; AVX512VL-NEXT: vpbroadcastb %xmm2, %ymm2
+; AVX512VL-NEXT: vpbroadcastb %xmm2, %xmm2
; AVX512VL-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
; AVX512VL-NEXT: vpmovzxbq {{.*#+}} xmm3 = xmm2[0],zero,zero,zero,zero,zero,zero,zero,xmm2[1],zero,zero,zero,zero,zero,zero,zero
; AVX512VL-NEXT: vpsllw %xmm3, %ymm0, %ymm4
;
; AVX512BW-LABEL: splatvar_funnnel_v64i8:
; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: vpbroadcastb %xmm1, %zmm1
+; AVX512BW-NEXT: vpbroadcastb %xmm1, %xmm1
; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm2 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
; AVX512BW-NEXT: vpand %xmm2, %xmm1, %xmm3
; AVX512BW-NEXT: vpmovzxbq {{.*#+}} xmm3 = xmm3[0],zero,zero,zero,zero,zero,zero,zero,xmm3[1],zero,zero,zero,zero,zero,zero,zero
;
; AVX512VLBW-LABEL: splatvar_funnnel_v64i8:
; AVX512VLBW: # %bb.0:
-; AVX512VLBW-NEXT: vpbroadcastb %xmm1, %zmm1
+; AVX512VLBW-NEXT: vpbroadcastb %xmm1, %xmm1
; AVX512VLBW-NEXT: vmovdqa {{.*#+}} xmm2 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
; AVX512VLBW-NEXT: vpand %xmm2, %xmm1, %xmm3
; AVX512VLBW-NEXT: vpmovzxbq {{.*#+}} xmm3 = xmm3[0],zero,zero,zero,zero,zero,zero,zero,xmm3[1],zero,zero,zero,zero,zero,zero,zero
;
; AVX2-LABEL: splatvar_funnnel_v4i64:
; AVX2: # %bb.0:
-; AVX2-NEXT: vpbroadcastq %xmm1, %ymm1
+; AVX2-NEXT: vpbroadcastq %xmm1, %xmm1
; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = [63,63]
; AVX2-NEXT: vpand %xmm2, %xmm1, %xmm3
; AVX2-NEXT: vpsrlq %xmm3, %ymm0, %ymm3
;
; AVX2-LABEL: splatvar_funnnel_v8i32:
; AVX2: # %bb.0:
-; AVX2-NEXT: vpbroadcastd %xmm1, %ymm1
+; AVX2-NEXT: vpbroadcastd %xmm1, %xmm1
; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX2-NEXT: vpsubd %xmm1, %xmm2, %xmm1
; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm2 = [31,31,31,31]
;
; AVX2-LABEL: splatvar_funnnel_v16i16:
; AVX2: # %bb.0:
-; AVX2-NEXT: vpbroadcastw %xmm1, %ymm1
+; AVX2-NEXT: vpbroadcastw %xmm1, %xmm1
; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX2-NEXT: vpsubw %xmm1, %xmm2, %xmm1
; AVX2-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
;
; AVX512-LABEL: splatvar_funnnel_v16i16:
; AVX512: # %bb.0:
-; AVX512-NEXT: vpbroadcastw %xmm1, %ymm1
+; AVX512-NEXT: vpbroadcastw %xmm1, %xmm1
; AVX512-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX512-NEXT: vpsubw %xmm1, %xmm2, %xmm1
; AVX512-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
;
; AVX2-LABEL: splatvar_funnnel_v32i8:
; AVX2: # %bb.0:
-; AVX2-NEXT: vpbroadcastb %xmm1, %ymm1
+; AVX2-NEXT: vpbroadcastb %xmm1, %xmm1
; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX2-NEXT: vpsubb %xmm1, %xmm2, %xmm1
; AVX2-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
;
; AVX512F-LABEL: splatvar_funnnel_v32i8:
; AVX512F: # %bb.0:
-; AVX512F-NEXT: vpbroadcastb %xmm1, %ymm1
+; AVX512F-NEXT: vpbroadcastb %xmm1, %xmm1
; AVX512F-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX512F-NEXT: vpsubb %xmm1, %xmm2, %xmm1
; AVX512F-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
;
; AVX512VL-LABEL: splatvar_funnnel_v32i8:
; AVX512VL: # %bb.0:
-; AVX512VL-NEXT: vpbroadcastb %xmm1, %ymm1
+; AVX512VL-NEXT: vpbroadcastb %xmm1, %xmm1
; AVX512VL-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX512VL-NEXT: vpsubb %xmm1, %xmm2, %xmm1
; AVX512VL-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
define <32 x i16> @splatvar_funnnel_v32i16(<32 x i16> %x, <32 x i16> %amt) nounwind {
; AVX512F-LABEL: splatvar_funnnel_v32i16:
; AVX512F: # %bb.0:
-; AVX512F-NEXT: vpbroadcastw %xmm2, %ymm2
+; AVX512F-NEXT: vpbroadcastw %xmm2, %xmm2
; AVX512F-NEXT: vpxor %xmm3, %xmm3, %xmm3
; AVX512F-NEXT: vpsubw %xmm2, %xmm3, %xmm2
; AVX512F-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
;
; AVX512VL-LABEL: splatvar_funnnel_v32i16:
; AVX512VL: # %bb.0:
-; AVX512VL-NEXT: vpbroadcastw %xmm2, %ymm2
+; AVX512VL-NEXT: vpbroadcastw %xmm2, %xmm2
; AVX512VL-NEXT: vpxor %xmm3, %xmm3, %xmm3
; AVX512VL-NEXT: vpsubw %xmm2, %xmm3, %xmm2
; AVX512VL-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
;
; AVX512BW-LABEL: splatvar_funnnel_v32i16:
; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: vpbroadcastw %xmm1, %zmm1
+; AVX512BW-NEXT: vpbroadcastw %xmm1, %xmm1
; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15]
; AVX512BW-NEXT: vpand %xmm2, %xmm1, %xmm3
; AVX512BW-NEXT: vpmovzxwq {{.*#+}} xmm3 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero
;
; AVX512VLBW-LABEL: splatvar_funnnel_v32i16:
; AVX512VLBW: # %bb.0:
-; AVX512VLBW-NEXT: vpbroadcastw %xmm1, %zmm1
+; AVX512VLBW-NEXT: vpbroadcastw %xmm1, %xmm1
; AVX512VLBW-NEXT: vmovdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15]
; AVX512VLBW-NEXT: vpand %xmm2, %xmm1, %xmm3
; AVX512VLBW-NEXT: vpmovzxwq {{.*#+}} xmm3 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero
define <64 x i8> @splatvar_funnnel_v64i8(<64 x i8> %x, <64 x i8> %amt) nounwind {
; AVX512F-LABEL: splatvar_funnnel_v64i8:
; AVX512F: # %bb.0:
-; AVX512F-NEXT: vpbroadcastb %xmm2, %ymm2
+; AVX512F-NEXT: vpbroadcastb %xmm2, %xmm2
; AVX512F-NEXT: vpxor %xmm3, %xmm3, %xmm3
; AVX512F-NEXT: vpsubb %xmm2, %xmm3, %xmm2
; AVX512F-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
;
; AVX512VL-LABEL: splatvar_funnnel_v64i8:
; AVX512VL: # %bb.0:
-; AVX512VL-NEXT: vpbroadcastb %xmm2, %ymm2
+; AVX512VL-NEXT: vpbroadcastb %xmm2, %xmm2
; AVX512VL-NEXT: vpxor %xmm3, %xmm3, %xmm3
; AVX512VL-NEXT: vpsubb %xmm2, %xmm3, %xmm2
; AVX512VL-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
;
; AVX512BW-LABEL: splatvar_funnnel_v64i8:
; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: vpbroadcastb %xmm1, %zmm1
+; AVX512BW-NEXT: vpbroadcastb %xmm1, %xmm1
; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm2 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
; AVX512BW-NEXT: vpand %xmm2, %xmm1, %xmm3
; AVX512BW-NEXT: vpmovzxbq {{.*#+}} xmm3 = xmm3[0],zero,zero,zero,zero,zero,zero,zero,xmm3[1],zero,zero,zero,zero,zero,zero,zero
;
; AVX512VLBW-LABEL: splatvar_funnnel_v64i8:
; AVX512VLBW: # %bb.0:
-; AVX512VLBW-NEXT: vpbroadcastb %xmm1, %zmm1
+; AVX512VLBW-NEXT: vpbroadcastb %xmm1, %xmm1
; AVX512VLBW-NEXT: vmovdqa {{.*#+}} xmm2 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
; AVX512VLBW-NEXT: vpand %xmm2, %xmm1, %xmm3
; AVX512VLBW-NEXT: vpmovzxbq {{.*#+}} xmm3 = xmm3[0],zero,zero,zero,zero,zero,zero,zero,xmm3[1],zero,zero,zero,zero,zero,zero,zero
;
; AVX2-LABEL: splatvar_rotate_v8i32:
; AVX2: # %bb.0:
-; AVX2-NEXT: vpbroadcastd %xmm1, %ymm1
+; AVX2-NEXT: vpbroadcastd %xmm1, %xmm1
; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm2 = [31,31,31,31]
; AVX2-NEXT: vpand %xmm2, %xmm1, %xmm1
; AVX2-NEXT: vpmovzxdq {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero
;
; AVX2-LABEL: splatvar_rotate_v16i16:
; AVX2: # %bb.0:
-; AVX2-NEXT: vpbroadcastw %xmm1, %ymm1
+; AVX2-NEXT: vpbroadcastw %xmm1, %xmm1
; AVX2-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
; AVX2-NEXT: vpmovzxwq {{.*#+}} xmm2 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
; AVX2-NEXT: vpsllw %xmm2, %ymm0, %ymm2
;
; AVX512-LABEL: splatvar_rotate_v16i16:
; AVX512: # %bb.0:
-; AVX512-NEXT: vpbroadcastw %xmm1, %ymm1
+; AVX512-NEXT: vpbroadcastw %xmm1, %xmm1
; AVX512-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
; AVX512-NEXT: vpmovzxwq {{.*#+}} xmm2 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
; AVX512-NEXT: vpsllw %xmm2, %ymm0, %ymm2
;
; AVX2-LABEL: splatvar_rotate_v32i8:
; AVX2: # %bb.0:
-; AVX2-NEXT: vpbroadcastb %xmm1, %ymm1
+; AVX2-NEXT: vpbroadcastb %xmm1, %xmm1
; AVX2-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
; AVX2-NEXT: vpmovzxbq {{.*#+}} xmm2 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
; AVX2-NEXT: vpsllw %xmm2, %ymm0, %ymm3
;
; AVX512F-LABEL: splatvar_rotate_v32i8:
; AVX512F: # %bb.0:
-; AVX512F-NEXT: vpbroadcastb %xmm1, %ymm1
+; AVX512F-NEXT: vpbroadcastb %xmm1, %xmm1
; AVX512F-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
; AVX512F-NEXT: vpmovzxbq {{.*#+}} xmm2 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
; AVX512F-NEXT: vpsllw %xmm2, %ymm0, %ymm3
;
; AVX512VL-LABEL: splatvar_rotate_v32i8:
; AVX512VL: # %bb.0:
-; AVX512VL-NEXT: vpbroadcastb %xmm1, %ymm1
+; AVX512VL-NEXT: vpbroadcastb %xmm1, %xmm1
; AVX512VL-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
; AVX512VL-NEXT: vpmovzxbq {{.*#+}} xmm2 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
; AVX512VL-NEXT: vpsllw %xmm2, %ymm0, %ymm3
define <32 x i16> @splatvar_rotate_v32i16(<32 x i16> %a, <32 x i16> %b) nounwind {
; AVX512F-LABEL: splatvar_rotate_v32i16:
; AVX512F: # %bb.0:
-; AVX512F-NEXT: vpbroadcastw %xmm2, %ymm2
+; AVX512F-NEXT: vpbroadcastw %xmm2, %xmm2
; AVX512F-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
; AVX512F-NEXT: vpmovzxwq {{.*#+}} xmm3 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero
; AVX512F-NEXT: vpsllw %xmm3, %ymm0, %ymm4
;
; AVX512VL-LABEL: splatvar_rotate_v32i16:
; AVX512VL: # %bb.0:
-; AVX512VL-NEXT: vpbroadcastw %xmm2, %ymm2
+; AVX512VL-NEXT: vpbroadcastw %xmm2, %xmm2
; AVX512VL-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
; AVX512VL-NEXT: vpmovzxwq {{.*#+}} xmm3 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero
; AVX512VL-NEXT: vpsllw %xmm3, %ymm0, %ymm4
define <64 x i8> @splatvar_rotate_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind {
; AVX512F-LABEL: splatvar_rotate_v64i8:
; AVX512F: # %bb.0:
-; AVX512F-NEXT: vpbroadcastb %xmm2, %ymm2
+; AVX512F-NEXT: vpbroadcastb %xmm2, %xmm2
; AVX512F-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
; AVX512F-NEXT: vpmovzxbq {{.*#+}} xmm3 = xmm2[0],zero,zero,zero,zero,zero,zero,zero,xmm2[1],zero,zero,zero,zero,zero,zero,zero
; AVX512F-NEXT: vpsllw %xmm3, %ymm0, %ymm4
;
; AVX512VL-LABEL: splatvar_rotate_v64i8:
; AVX512VL: # %bb.0:
-; AVX512VL-NEXT: vpbroadcastb %xmm2, %ymm2
+; AVX512VL-NEXT: vpbroadcastb %xmm2, %xmm2
; AVX512VL-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2
; AVX512VL-NEXT: vpmovzxbq {{.*#+}} xmm3 = xmm2[0],zero,zero,zero,zero,zero,zero,zero,xmm2[1],zero,zero,zero,zero,zero,zero,zero
; AVX512VL-NEXT: vpsllw %xmm3, %ymm0, %ymm4