ret <8 x i16> %5
}
-define <16 x i8> @packsswb_icmp_128_zero(<8 x i16> %a0) {
-; SSE-LABEL: packsswb_icmp_128_zero:
+define <16 x i8> @packsswb_icmp_zero_128(<8 x i16> %a0) {
+; SSE-LABEL: packsswb_icmp_zero_128:
; SSE: # %bb.0:
; SSE-NEXT: pxor %xmm1, %xmm1
; SSE-NEXT: pcmpeqw %xmm0, %xmm1
; SSE-NEXT: movq {{.*#+}} xmm0 = xmm1[0],zero
; SSE-NEXT: ret{{[l|q]}}
;
-; AVX-LABEL: packsswb_icmp_128_zero:
+; AVX-LABEL: packsswb_icmp_zero_128:
; AVX: # %bb.0:
; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX-NEXT: vpcmpeqw %xmm1, %xmm0, %xmm0
ret <16 x i8> %3
}
+define <16 x i8> @packsswb_icmp_zero_trunc_128(<8 x i16> %a0) {
+; SSE-LABEL: packsswb_icmp_zero_trunc_128:
+; SSE: # %bb.0:
+; SSE-NEXT: pxor %xmm1, %xmm1
+; SSE-NEXT: pcmpeqw %xmm1, %xmm0
+; SSE-NEXT: packsswb %xmm1, %xmm0
+; SSE-NEXT: ret{{[l|q]}}
+;
+; AVX-LABEL: packsswb_icmp_zero_trunc_128:
+; AVX: # %bb.0:
+; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX-NEXT: vpcmpeqw %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
+; AVX-NEXT: ret{{[l|q]}}
+ %1 = icmp eq <8 x i16> %a0, zeroinitializer
+ %2 = sext <8 x i1> %1 to <8 x i16>
+ %3 = shufflevector <8 x i16> %2, <8 x i16> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %4 = trunc <16 x i16> %3 to <16 x i8>
+ ret <16 x i8> %4
+}
+
define <32 x i8> @packsswb_icmp_zero_256(<16 x i16> %a0) {
; SSE-LABEL: packsswb_icmp_zero_256:
; SSE: # %bb.0:
; SSE-NEXT: pxor %xmm2, %xmm2
; SSE-NEXT: pcmpeqw %xmm2, %xmm1
; SSE-NEXT: pcmpeqw %xmm2, %xmm0
-; SSE-NEXT: packsswb %xmm1, %xmm0
-; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,1],xmm0[2,3]
-; SSE-NEXT: pslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7]
-; SSE-NEXT: movaps %xmm2, %xmm1
+; SSE-NEXT: packsswb %xmm0, %xmm0
+; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm2[0],xmm0[1]
+; SSE-NEXT: packsswb %xmm1, %xmm1
+; SSE-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
; SSE-NEXT: ret{{[l|q]}}
;
; AVX1-LABEL: packsswb_icmp_zero_256:
; AVX1: # %bb.0:
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
-; AVX1-NEXT: vpcmpeqw %xmm2, %xmm1, %xmm1
-; AVX1-NEXT: vpacksswb %xmm1, %xmm0, %xmm1
-; AVX1-NEXT: vpcmpeqw %xmm2, %xmm0, %xmm0
-; AVX1-NEXT: vpacksswb %xmm0, %xmm0, %xmm0
-; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
-; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5],ymm0[6,7]
+; AVX1-NEXT: vpcmpeqw %xmm1, %xmm0, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpcmpeqw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [128,128,128,128,128,128,128,128,0,2,4,6,8,10,12,14]
+; AVX1-NEXT: vpshufb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpshufb %xmm1, %xmm2, %xmm1
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1-NEXT: ret{{[l|q]}}
;
; AVX2-LABEL: packsswb_icmp_zero_256:
; AVX2: # %bb.0:
; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX2-NEXT: vpcmpeqw %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm2
-; AVX2-NEXT: vpacksswb %xmm2, %xmm0, %xmm0
-; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,0,2,1]
-; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5],ymm0[6,7]
+; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = zero,zero,zero,zero,zero,zero,zero,zero,ymm0[0,2,4,6,8,10,12,14],zero,zero,zero,zero,zero,zero,zero,zero,ymm0[16,18,20,22,24,26,28,30]
+; AVX2-NEXT: ret{{[l|q]}}
+ %1 = icmp eq <16 x i16> %a0, zeroinitializer
+ %2 = sext <16 x i1> %1 to <16 x i16>
+ %3 = bitcast <16 x i16> %2 to <32 x i8>
+ %4 = shufflevector <32 x i8> zeroinitializer, <32 x i8> %3, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 32, i32 34, i32 36, i32 38, i32 40, i32 42, i32 44, i32 46, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 48, i32 50, i32 52, i32 54, i32 56, i32 58, i32 60, i32 62>
+ ret <32 x i8> %4
+}
+
+define <32 x i8> @packsswb_icmp_zero_trunc_256(<16 x i16> %a0) {
+; SSE-LABEL: packsswb_icmp_zero_trunc_256:
+; SSE: # %bb.0:
+; SSE-NEXT: pxor %xmm2, %xmm2
+; SSE-NEXT: pcmpeqw %xmm2, %xmm1
+; SSE-NEXT: pcmpeqw %xmm2, %xmm0
+; SSE-NEXT: pxor %xmm3, %xmm3
+; SSE-NEXT: packsswb %xmm0, %xmm3
+; SSE-NEXT: packsswb %xmm1, %xmm2
+; SSE-NEXT: movdqa %xmm3, %xmm0
+; SSE-NEXT: movdqa %xmm2, %xmm1
+; SSE-NEXT: ret{{[l|q]}}
+;
+; AVX1-LABEL: packsswb_icmp_zero_trunc_256:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3
+; AVX1-NEXT: vpcmpeqw %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpcmpeqw %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT: vperm2f128 {{.*#+}} ymm2 = zero,zero,ymm0[0,1]
+; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm1
+; AVX1-NEXT: vpacksswb %xmm1, %xmm2, %xmm1
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: ret{{[l|q]}}
+;
+; AVX2-LABEL: packsswb_icmp_zero_trunc_256:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX2-NEXT: vpcmpeqw %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm0[4,5,6,7]
+; AVX2-NEXT: vperm2i128 {{.*#+}} ymm0 = zero,zero,ymm0[0,1]
+; AVX2-NEXT: vpacksswb %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
; AVX2-NEXT: ret{{[l|q]}}
%1 = icmp eq <16 x i16> %a0, zeroinitializer
- %2 = sext <16 x i1> %1 to <16 x i8>
- %3 = shufflevector <16 x i8> zeroinitializer, <16 x i8> %2, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
- ret <32 x i8> %3
+ %2 = sext <16 x i1> %1 to <16 x i16>
+ %3 = shufflevector <16 x i16> zeroinitializer, <16 x i16> %2, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+ %4 = trunc <32 x i16> %3 to <32 x i8>
+ ret <32 x i8> %4
}