return getI8Imm(Imm ^ 0xff, SDLoc(N));
}]>;
+// Turn a 4-bit blendi immediate to 8-bit for use with pblendw.
+def BlendScaleImm4 : SDNodeXForm<imm, [{
+ uint8_t Imm = N->getZExtValue();
+ uint8_t NewImm = 0;
+ for (unsigned i = 0; i != 4; ++i) {
+ if (Imm & (1 << i))
+ NewImm |= 0x3 << (i * 2);
+ }
+ return getI8Imm(NewImm, SDLoc(N));
+}]>;
+
+// Turn a 2-bit blendi immediate to 8-bit for use with pblendw.
+def BlendScaleImm2 : SDNodeXForm<imm, [{
+ uint8_t Imm = N->getZExtValue();
+ uint8_t NewImm = 0;
+ for (unsigned i = 0; i != 2; ++i) {
+ if (Imm & (1 << i))
+ NewImm |= 0xf << (i * 4);
+ }
+ return getI8Imm(NewImm, SDLoc(N));
+}]>;
+
+// Turn a 4-bit blendi immediate to 8-bit for use with pblendw and invert it.
+def BlendScaleCommuteImm4 : SDNodeXForm<imm, [{
+ uint8_t Imm = N->getZExtValue();
+ uint8_t NewImm = 0;
+ for (unsigned i = 0; i != 4; ++i) {
+ if (Imm & (1 << i))
+ NewImm |= 0x3 << (i * 2);
+ }
+ return getI8Imm(NewImm ^ 0xff, SDLoc(N));
+}]>;
+
+// Turn a 2-bit blendi immediate to 8-bit for use with pblendw and invert it.
+def BlendScaleCommuteImm2 : SDNodeXForm<imm, [{
+ uint8_t Imm = N->getZExtValue();
+ uint8_t NewImm = 0;
+ for (unsigned i = 0; i != 2; ++i) {
+ if (Imm & (1 << i))
+ NewImm |= 0xf << (i * 4);
+ }
+ return getI8Imm(NewImm ^ 0xff, SDLoc(N));
+}]>;
+
let Predicates = [HasAVX] in {
let isCommutable = 0 in {
defm VMPSADBW : SS41I_binop_rmi_int<0x42, "vmpsadbw", int_x86_sse41_mpsadbw,
VEX_4V, VEX_L, VEX_WIG;
}
-// Emulate vXi32/vXi64 blends with vXf32/vXf64.
+// Emulate vXi32/vXi64 blends with vXf32/vXf64 or pblendw.
// ExecutionDomainFixPass will cleanup domains later on.
let Predicates = [HasAVX] in {
def : Pat<(X86Blendi (v4i64 VR256:$src1), (v4i64 VR256:$src2), imm:$src3),
def : Pat<(X86Blendi (loadv4i64 addr:$src2), VR256:$src1, imm:$src3),
(VBLENDPDYrmi VR256:$src1, addr:$src2, (BlendCommuteImm4 imm:$src3))>;
+// Use pblendw for 128-bit integer to keep it in the integer domain and prevent
+// it from becoming movsd via commuting under optsize.
def : Pat<(X86Blendi (v2i64 VR128:$src1), (v2i64 VR128:$src2), imm:$src3),
- (VBLENDPDrri VR128:$src1, VR128:$src2, imm:$src3)>;
+ (VPBLENDWrri VR128:$src1, VR128:$src2, (BlendScaleImm2 imm:$src3))>;
def : Pat<(X86Blendi VR128:$src1, (loadv2i64 addr:$src2), imm:$src3),
- (VBLENDPDrmi VR128:$src1, addr:$src2, imm:$src3)>;
+ (VPBLENDWrmi VR128:$src1, addr:$src2, (BlendScaleImm2 imm:$src3))>;
def : Pat<(X86Blendi (loadv2i64 addr:$src2), VR128:$src1, imm:$src3),
- (VBLENDPDrmi VR128:$src1, addr:$src2, (BlendCommuteImm2 imm:$src3))>;
+ (VPBLENDWrmi VR128:$src1, addr:$src2, (BlendScaleCommuteImm2 imm:$src3))>;
}
let Predicates = [HasAVX1Only] in {
def : Pat<(X86Blendi (loadv8i32 addr:$src2), VR256:$src1, imm:$src3),
(VBLENDPSYrmi VR256:$src1, addr:$src2, (BlendCommuteImm8 imm:$src3))>;
+// Use pblendw for 128-bit integer to keep it in the integer domain and prevent
+// it from becoming movss via commuting under optsize.
def : Pat<(X86Blendi (v4i32 VR128:$src1), (v4i32 VR128:$src2), imm:$src3),
- (VBLENDPSrri VR128:$src1, VR128:$src2, imm:$src3)>;
+ (VPBLENDWrri VR128:$src1, VR128:$src2, (BlendScaleImm4 imm:$src3))>;
def : Pat<(X86Blendi VR128:$src1, (loadv4i32 addr:$src2), imm:$src3),
- (VBLENDPSrmi VR128:$src1, addr:$src2, imm:$src3)>;
+ (VPBLENDWrmi VR128:$src1, addr:$src2, (BlendScaleImm4 imm:$src3))>;
def : Pat<(X86Blendi (loadv4i32 addr:$src2), VR128:$src1, imm:$src3),
- (VBLENDPSrmi VR128:$src1, addr:$src2, (BlendCommuteImm4 imm:$src3))>;
+ (VPBLENDWrmi VR128:$src1, addr:$src2, (BlendScaleCommuteImm4 imm:$src3))>;
}
defm BLENDPS : SS41I_blend_rmi<0x0C, "blendps", X86Blendi, v4f32,
SchedWriteBlend.XMM, BlendCommuteImm8>;
let Predicates = [UseSSE41] in {
+// Use pblendw for 128-bit integer to keep it in the integer domain and prevent
+// it from becoming movss via commuting under optsize.
def : Pat<(X86Blendi (v2i64 VR128:$src1), (v2i64 VR128:$src2), imm:$src3),
- (BLENDPDrri VR128:$src1, VR128:$src2, imm:$src3)>;
+ (PBLENDWrri VR128:$src1, VR128:$src2, (BlendScaleImm2 imm:$src3))>;
def : Pat<(X86Blendi VR128:$src1, (memopv2i64 addr:$src2), imm:$src3),
- (BLENDPDrmi VR128:$src1, addr:$src2, imm:$src3)>;
+ (PBLENDWrmi VR128:$src1, addr:$src2, (BlendScaleImm2 imm:$src3))>;
def : Pat<(X86Blendi (memopv2i64 addr:$src2), VR128:$src1, imm:$src3),
- (BLENDPDrmi VR128:$src1, addr:$src2, (BlendCommuteImm2 imm:$src3))>;
+ (PBLENDWrmi VR128:$src1, addr:$src2, (BlendScaleCommuteImm2 imm:$src3))>;
def : Pat<(X86Blendi (v4i32 VR128:$src1), (v4i32 VR128:$src2), imm:$src3),
- (BLENDPSrri VR128:$src1, VR128:$src2, imm:$src3)>;
+ (PBLENDWrri VR128:$src1, VR128:$src2, (BlendScaleImm4 imm:$src3))>;
def : Pat<(X86Blendi VR128:$src1, (memopv4i32 addr:$src2), imm:$src3),
- (BLENDPSrmi VR128:$src1, addr:$src2, imm:$src3)>;
+ (PBLENDWrmi VR128:$src1, addr:$src2, (BlendScaleImm4 imm:$src3))>;
def : Pat<(X86Blendi (memopv4i32 addr:$src2), VR128:$src1, imm:$src3),
- (BLENDPSrmi VR128:$src1, addr:$src2, (BlendCommuteImm4 imm:$src3))>;
+ (PBLENDWrmi VR128:$src1, addr:$src2, (BlendScaleCommuteImm4 imm:$src3))>;
}
// For insertion into the zero index (low half) of a 256-bit vector, it is
; CHECK-LABEL: test_masked_4xi64_to_2xi64_perm_mem_mask1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa 16(%rdi), %xmm2
-; CHECK-NEXT: vpblendd {{.*#+}} xmm2 = xmm2[0,1],mem[2,3]
+; CHECK-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],mem[4,5,6,7]
; CHECK-NEXT: vptestnmq %xmm1, %xmm1, %k1
; CHECK-NEXT: vmovdqa64 %xmm2, %xmm0 {%k1}
; CHECK-NEXT: retq
; CHECK-LABEL: test_masked_z_4xi64_to_2xi64_perm_mem_mask1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa 16(%rdi), %xmm1
-; CHECK-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0,1],mem[2,3]
+; CHECK-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],mem[4,5,6,7]
; CHECK-NEXT: vptestnmq %xmm0, %xmm0, %k1
; CHECK-NEXT: vmovdqa64 %xmm1, %xmm0 {%k1} {z}
; CHECK-NEXT: retq
; CHECK-LABEL: test_masked_8xi64_to_2xi64_perm_mem_mask0:
; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa 32(%rdi), %xmm2
-; CHECK-NEXT: vpblendd {{.*#+}} xmm2 = xmm2[0,1],mem[2,3]
+; CHECK-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],mem[4,5,6,7]
; CHECK-NEXT: vptestnmq %xmm1, %xmm1, %k1
; CHECK-NEXT: vmovdqa64 %xmm2, %xmm0 {%k1}
; CHECK-NEXT: retq
; CHECK-LABEL: test_masked_z_8xi64_to_2xi64_perm_mem_mask0:
; CHECK: # %bb.0:
; CHECK-NEXT: vmovdqa 32(%rdi), %xmm1
-; CHECK-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0,1],mem[2,3]
+; CHECK-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],mem[4,5,6,7]
; CHECK-NEXT: vptestnmq %xmm0, %xmm0, %k1
; CHECK-NEXT: vmovdqa64 %xmm1, %xmm0 {%k1} {z}
; CHECK-NEXT: retq
; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,2305843009213693952]
; AVX2-NEXT: vpxor %xmm2, %xmm1, %xmm1
; AVX2-NEXT: vpsubq %xmm2, %xmm1, %xmm1
-; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
+; AVX2-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
; AVX2-NEXT: retq
;
; AVX512F-LABEL: combine_vec_sdiv_by_pow2b_v2i64:
; AVX512F-NEXT: vpsrlvq {{.*}}(%rip), %xmm2, %xmm2
; AVX512F-NEXT: vpaddq %xmm2, %xmm0, %xmm2
; AVX512F-NEXT: vpsravq %zmm1, %zmm2, %zmm1
-; AVX512F-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
+; AVX512F-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
; AVX512BW-NEXT: vmovq %rax, %xmm2
; AVX512BW-NEXT: vpslldq {{.*#+}} xmm2 = zero,zero,zero,zero,zero,zero,zero,zero,xmm2[0,1,2,3,4,5,6,7]
; AVX512BW-NEXT: vpsravq %xmm2, %xmm1, %xmm1
-; AVX512BW-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
+; AVX512BW-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
; AVX512BW-NEXT: retq
;
; XOP-LABEL: combine_vec_sdiv_by_pow2b_v2i64:
; AVX2-NEXT: vpmulhw {{.*}}(%rip), %xmm0, %xmm0
; AVX2-NEXT: vpsraw $1, %xmm0, %xmm1
; AVX2-NEXT: vpsraw $2, %xmm0, %xmm2
-; AVX2-NEXT: vpblendd {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,3]
+; AVX2-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0,1,2,3],xmm1[4,5,6,7]
; AVX2-NEXT: vpsrlw $15, %xmm0, %xmm0
; AVX2-NEXT: vpaddw %xmm0, %xmm1, %xmm0
; AVX2-NEXT: retq
; AVX512F-NEXT: vpmulhw {{.*}}(%rip), %xmm0, %xmm0
; AVX512F-NEXT: vpsraw $1, %xmm0, %xmm1
; AVX512F-NEXT: vpsraw $2, %xmm0, %xmm2
-; AVX512F-NEXT: vpblendd {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,3]
+; AVX512F-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0,1,2,3],xmm1[4,5,6,7]
; AVX512F-NEXT: vpsrlw $15, %xmm0, %xmm0
; AVX512F-NEXT: vpaddw %xmm0, %xmm1, %xmm0
; AVX512F-NEXT: retq
; AVX2-NEXT: vpaddw %xmm0, %xmm1, %xmm0
; AVX2-NEXT: vpsraw $8, %xmm0, %xmm1
; AVX2-NEXT: vpsraw $4, %xmm0, %xmm2
-; AVX2-NEXT: vpblendd {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,3]
+; AVX2-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0,1,2,3],xmm1[4,5,6,7]
; AVX2-NEXT: vpsrlw $15, %xmm0, %xmm0
; AVX2-NEXT: vpaddw %xmm0, %xmm1, %xmm0
; AVX2-NEXT: retq
; AVX512F-NEXT: vpaddw %xmm0, %xmm1, %xmm0
; AVX512F-NEXT: vpsraw $8, %xmm0, %xmm1
; AVX512F-NEXT: vpsraw $4, %xmm0, %xmm2
-; AVX512F-NEXT: vpblendd {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,3]
+; AVX512F-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0,1,2,3],xmm1[4,5,6,7]
; AVX512F-NEXT: vpsrlw $15, %xmm0, %xmm0
; AVX512F-NEXT: vpaddw %xmm0, %xmm1, %xmm0
; AVX512F-NEXT: retq
; AVX2-NEXT: vpsubw %xmm0, %xmm1, %xmm0
; AVX2-NEXT: vpsraw $8, %xmm0, %xmm1
; AVX2-NEXT: vpsraw $4, %xmm0, %xmm2
-; AVX2-NEXT: vpblendd {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,3]
+; AVX2-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0,1,2,3],xmm1[4,5,6,7]
; AVX2-NEXT: vpsrlw $15, %xmm0, %xmm0
; AVX2-NEXT: vpaddw %xmm0, %xmm1, %xmm0
; AVX2-NEXT: retq
; AVX512F-NEXT: vpsubw %xmm0, %xmm1, %xmm0
; AVX512F-NEXT: vpsraw $8, %xmm0, %xmm1
; AVX512F-NEXT: vpsraw $4, %xmm0, %xmm2
-; AVX512F-NEXT: vpblendd {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,3]
+; AVX512F-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0,1,2,3],xmm1[4,5,6,7]
; AVX512F-NEXT: vpsrlw $15, %xmm0, %xmm0
; AVX512F-NEXT: vpaddw %xmm0, %xmm1, %xmm0
; AVX512F-NEXT: retq
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7]
; SSE41-NEXT: retq
;
-; AVX1-LABEL: combine_vec_sdiv_nonuniform7:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
-; AVX1-NEXT: vpsubw %xmm0, %xmm1, %xmm1
-; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7]
-; AVX1-NEXT: retq
-;
-; AVX2ORLATER-LABEL: combine_vec_sdiv_nonuniform7:
-; AVX2ORLATER: # %bb.0:
-; AVX2ORLATER-NEXT: vpxor %xmm1, %xmm1, %xmm1
-; AVX2ORLATER-NEXT: vpsubw %xmm0, %xmm1, %xmm1
-; AVX2ORLATER-NEXT: vpblendd {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3]
-; AVX2ORLATER-NEXT: retq
-;
-; XOP-LABEL: combine_vec_sdiv_nonuniform7:
-; XOP: # %bb.0:
-; XOP-NEXT: vpxor %xmm1, %xmm1, %xmm1
-; XOP-NEXT: vpsubw %xmm0, %xmm1, %xmm1
-; XOP-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7]
-; XOP-NEXT: retq
+; AVX-LABEL: combine_vec_sdiv_nonuniform7:
+; AVX: # %bb.0:
+; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX-NEXT: vpsubw %xmm0, %xmm1, %xmm1
+; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7]
+; AVX-NEXT: retq
%1 = sdiv <8 x i16> %x, <i16 -1, i16 -1, i16 -1, i16 -1, i16 1, i16 1, i16 1, i16 1>
ret <8 x i16> %1
}
define void @baz(<2 x i64>* %arg, %struct.spam* %arg1) optsize {
; CHECK-LABEL: baz:
; CHECK: # %bb.0: # %bb
-; CHECK-NEXT: movapd (%rdi), %xmm0
-; CHECK-NEXT: movapd {{.*#+}} xmm1 = [3,3]
-; CHECK-NEXT: andpd %xmm0, %xmm1
-; CHECK-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
-; CHECK-NEXT: movupd %xmm1, (%rsi)
+; CHECK-NEXT: movaps (%rdi), %xmm0
+; CHECK-NEXT: movaps {{.*#+}} xmm1 = [3,3]
+; CHECK-NEXT: andps %xmm0, %xmm1
+; CHECK-NEXT: blendps {{.*#+}} xmm1 = xmm0[0,1],xmm1[2,3]
+; CHECK-NEXT: movups %xmm1, (%rsi)
; CHECK-NEXT: retq
bb:
%tmp = load <2 x i64>, <2 x i64>* %arg, align 16
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7]
; SSE41-NEXT: retq
;
-; AVX1-LABEL: insert_v2i64_x1:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
-; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7]
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: insert_v2i64_x1:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
-; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3]
-; AVX2-NEXT: retq
-;
-; AVX512-LABEL: insert_v2i64_x1:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
-; AVX512-NEXT: vpblendd {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3]
-; AVX512-NEXT: retq
+; AVX-LABEL: insert_v2i64_x1:
+; AVX: # %bb.0:
+; AVX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
+; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7]
+; AVX-NEXT: retq
%1 = insertelement <2 x i64> %a, i64 -1, i32 0
ret <2 x i64> %1
}
; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; SSE-NEXT: retq
;
-; AVX1-LABEL: test2:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vpsrlw $2, %xmm0, %xmm1
-; AVX1-NEXT: vpsrlw $3, %xmm0, %xmm0
-; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: test2:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vpsrlw $2, %xmm0, %xmm1
-; AVX2-NEXT: vpsrlw $3, %xmm0, %xmm0
-; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
-; AVX2-NEXT: retq
+; AVX-LABEL: test2:
+; AVX: # %bb.0:
+; AVX-NEXT: vpsrlw $2, %xmm0, %xmm1
+; AVX-NEXT: vpsrlw $3, %xmm0, %xmm0
+; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
+; AVX-NEXT: retq
%lshr = lshr <8 x i16> %a, <i16 3, i16 3, i16 3, i16 3, i16 2, i16 2, i16 2, i16 2>
ret <8 x i16> %lshr
}
; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; SSE-NEXT: retq
;
-; AVX1-LABEL: test6:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vpsraw $2, %xmm0, %xmm1
-; AVX1-NEXT: vpsraw $3, %xmm0, %xmm0
-; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: test6:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vpsraw $2, %xmm0, %xmm1
-; AVX2-NEXT: vpsraw $3, %xmm0, %xmm0
-; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
-; AVX2-NEXT: retq
+; AVX-LABEL: test6:
+; AVX: # %bb.0:
+; AVX-NEXT: vpsraw $2, %xmm0, %xmm1
+; AVX-NEXT: vpsraw $3, %xmm0, %xmm0
+; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
+; AVX-NEXT: retq
%lshr = ashr <8 x i16> %a, <i16 3, i16 3, i16 3, i16 3, i16 2, i16 2, i16 2, i16 2>
ret <8 x i16> %lshr
}
; X64-SSE-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; X64-SSE-NEXT: retq
;
-; X64-AVX1-LABEL: PR19721:
-; X64-AVX1: # %bb.0:
-; X64-AVX1-NEXT: vmovq %xmm0, %rax
-; X64-AVX1-NEXT: movabsq $-4294967296, %rcx # imm = 0xFFFFFFFF00000000
-; X64-AVX1-NEXT: andq %rax, %rcx
-; X64-AVX1-NEXT: vmovq %rcx, %xmm1
-; X64-AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7]
-; X64-AVX1-NEXT: retq
-;
-; X64-AVX512-LABEL: PR19721:
-; X64-AVX512: # %bb.0:
-; X64-AVX512-NEXT: vmovq %xmm0, %rax
-; X64-AVX512-NEXT: movabsq $-4294967296, %rcx # imm = 0xFFFFFFFF00000000
-; X64-AVX512-NEXT: andq %rax, %rcx
-; X64-AVX512-NEXT: vmovq %rcx, %xmm1
-; X64-AVX512-NEXT: vpblendd {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3]
-; X64-AVX512-NEXT: retq
+; X64-AVX-LABEL: PR19721:
+; X64-AVX: # %bb.0:
+; X64-AVX-NEXT: vmovq %xmm0, %rax
+; X64-AVX-NEXT: movabsq $-4294967296, %rcx # imm = 0xFFFFFFFF00000000
+; X64-AVX-NEXT: andq %rax, %rcx
+; X64-AVX-NEXT: vmovq %rcx, %xmm1
+; X64-AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7]
+; X64-AVX-NEXT: retq
%bc = bitcast <4 x i32> %i to i128
%insert = and i128 %bc, -4294967296
%bc2 = bitcast i128 %insert to <4 x i32>
; SSE-NEXT: movaps %xmm2, %xmm0
; SSE-NEXT: retq
;
-; AVX1-LABEL: PR39893:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
-; AVX1-NEXT: vpsubd %xmm0, %xmm2, %xmm0
-; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[2],zero,xmm0[3],zero,xmm0[2],zero,xmm0[3],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero
-; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: PR39893:
-; AVX2: # %bb.0:
-; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
-; AVX2-NEXT: vpsubd %xmm0, %xmm2, %xmm0
-; AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[2],zero,xmm0[3],zero,xmm0[2],zero,xmm0[3],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero
-; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
-; AVX2-NEXT: retq
-;
-; AVX512-LABEL: PR39893:
-; AVX512: # %bb.0:
-; AVX512-NEXT: vpxor %xmm2, %xmm2, %xmm2
-; AVX512-NEXT: vpsubd %xmm0, %xmm2, %xmm0
-; AVX512-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[2],zero,xmm0[3],zero,xmm0[2],zero,xmm0[3],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero
-; AVX512-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
-; AVX512-NEXT: retq
+; AVX-LABEL: PR39893:
+; AVX: # %bb.0:
+; AVX-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX-NEXT: vpsubd %xmm0, %xmm2, %xmm0
+; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[2],zero,xmm0[3],zero,xmm0[2],zero,xmm0[3],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero
+; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
+; AVX-NEXT: retq
%sub = sub <2 x i32> <i32 0, i32 undef>, %x
%bc = bitcast <2 x i32> %sub to <8 x i8>
%shuffle = shufflevector <8 x i8> %y, <8 x i8> %bc, <2 x i32> <i32 10, i32 4>
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7]
; SSE41-NEXT: retq
;
-; AVX1-LABEL: insert_reg_lo_v4i32:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vmovq %rdi, %xmm1
-; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7]
-; AVX1-NEXT: retq
-;
-; AVX2OR512VL-LABEL: insert_reg_lo_v4i32:
-; AVX2OR512VL: # %bb.0:
-; AVX2OR512VL-NEXT: vmovq %rdi, %xmm1
-; AVX2OR512VL-NEXT: vpblendd {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3]
-; AVX2OR512VL-NEXT: retq
+; AVX-LABEL: insert_reg_lo_v4i32:
+; AVX: # %bb.0:
+; AVX-NEXT: vmovq %rdi, %xmm1
+; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7]
+; AVX-NEXT: retq
%a.cast = bitcast i64 %a to <2 x i32>
%v = shufflevector <2 x i32> %a.cast, <2 x i32> undef, <4 x i32> <i32 0, i32 1, i32 undef, i32 undef>
%shuffle = shufflevector <4 x i32> %v, <4 x i32> %b, <4 x i32> <i32 0, i32 1, i32 6, i32 7>
; AVX2-SLOW: # %bb.0:
; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,6,5,7]
; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,1,3,4,5,6,7]
-; AVX2-SLOW-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
+; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
; AVX2-SLOW-NEXT: retq
;
; AVX2-FAST-LABEL: shuffle_v8i16_0213cedf:
; AVX512VL-SLOW: # %bb.0:
; AVX512VL-SLOW-NEXT: vpshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,6,5,7]
; AVX512VL-SLOW-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,2,1,3,4,5,6,7]
-; AVX512VL-SLOW-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
+; AVX512VL-SLOW-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
; AVX512VL-SLOW-NEXT: retq
;
; AVX512VL-FAST-LABEL: shuffle_v8i16_0213cedf:
; SSE41-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,1,6,7,4,5,10,11,0,1,10,11,0,1,2,3]
; SSE41-NEXT: retq
;
-; AVX1-LABEL: shuffle_v8i16_032dXXXX:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
-; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,6,7,4,5,10,11,0,1,10,11,0,1,2,3]
-; AVX1-NEXT: retq
-;
-; AVX2OR512VL-LABEL: shuffle_v8i16_032dXXXX:
-; AVX2OR512VL: # %bb.0:
-; AVX2OR512VL-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
-; AVX2OR512VL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,6,7,4,5,10,11,0,1,10,11,0,1,2,3]
-; AVX2OR512VL-NEXT: retq
+; AVX-LABEL: shuffle_v8i16_032dXXXX:
+; AVX: # %bb.0:
+; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
+; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,6,7,4,5,10,11,0,1,10,11,0,1,2,3]
+; AVX-NEXT: retq
%shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 0, i32 3, i32 2, i32 13, i32 undef, i32 undef, i32 undef, i32 undef>
ret <8 x i16> %shuffle
}
; SSE41-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,10,11,8,9,10,11,12,13,6,7]
; SSE41-NEXT: retq
;
-; AVX1-LABEL: shuffle_v8i16_012dcde3:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
-; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,10,11,8,9,10,11,12,13,6,7]
-; AVX1-NEXT: retq
-;
-; AVX2OR512VL-LABEL: shuffle_v8i16_012dcde3:
-; AVX2OR512VL: # %bb.0:
-; AVX2OR512VL-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
-; AVX2OR512VL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,10,11,8,9,10,11,12,13,6,7]
-; AVX2OR512VL-NEXT: retq
+; AVX-LABEL: shuffle_v8i16_012dcde3:
+; AVX: # %bb.0:
+; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
+; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,10,11,8,9,10,11,12,13,6,7]
+; AVX-NEXT: retq
%shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 0, i32 1, i32 2, i32 13, i32 12, i32 13, i32 14, i32 3>
ret <8 x i16> %shuffle
}
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
; SSE41-NEXT: retq
;
-; AVX1-LABEL: shuffle_v8i16_XX4X8acX:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[0,1,4,5,4,5,6,7,0,1,4,5,8,9,4,5]
-; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,2,3,3]
-; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
-; AVX1-NEXT: retq
-;
-; AVX2OR512VL-LABEL: shuffle_v8i16_XX4X8acX:
-; AVX2OR512VL: # %bb.0:
-; AVX2OR512VL-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[0,1,4,5,4,5,6,7,0,1,4,5,8,9,4,5]
-; AVX2OR512VL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,2,3,3]
-; AVX2OR512VL-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
-; AVX2OR512VL-NEXT: retq
+; AVX-LABEL: shuffle_v8i16_XX4X8acX:
+; AVX: # %bb.0:
+; AVX-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[0,1,4,5,4,5,6,7,0,1,4,5,8,9,4,5]
+; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,2,3,3]
+; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
+; AVX-NEXT: retq
%shuffle = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> <i32 undef, i32 undef, i32 4, i32 undef, i32 8, i32 10, i32 12, i32 undef>
ret <8 x i16> %shuffle
}