;
; CHECK-AVX512VL-LABEL: test_srem_odd_even:
; CHECK-AVX512VL: # %bb.0:
-; CHECK-AVX512VL-NEXT: vmovdqa {{.*#+}} xmm1 = [1717986919,2454267027,1374389535,1374389535]
-; CHECK-AVX512VL-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
-; CHECK-AVX512VL-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
-; CHECK-AVX512VL-NEXT: vpmuldq %xmm2, %xmm3, %xmm2
-; CHECK-AVX512VL-NEXT: vpmuldq %xmm1, %xmm0, %xmm1
-; CHECK-AVX512VL-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; CHECK-AVX512VL-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3]
-; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm2
-; CHECK-AVX512VL-NEXT: vpaddd %xmm2, %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT: vpsrld $31, %xmm1, %xmm2
-; CHECK-AVX512VL-NEXT: vpsravd {{.*}}(%rip), %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT: vpaddd %xmm2, %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip), %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT: vpsubd %xmm1, %xmm0, %xmm0
-; CHECK-AVX512VL-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT: vpaddd {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT: vprorvd {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT: vpminud {{.*}}(%rip), %xmm0, %xmm1
; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
; CHECK-AVX512VL-NEXT: vpsrld $31, %xmm0, %xmm0
; CHECK-AVX512VL-NEXT: retq
define <4 x i32> @test_srem_odd_allones_eq(<4 x i32> %X) nounwind {
; CHECK-SSE2-LABEL: test_srem_odd_allones_eq:
; CHECK-SSE2: # %bb.0:
-; CHECK-SSE2-NEXT: pxor %xmm1, %xmm1
-; CHECK-SSE2-NEXT: pxor %xmm2, %xmm2
-; CHECK-SSE2-NEXT: pcmpgtd %xmm0, %xmm2
-; CHECK-SSE2-NEXT: movdqa {{.*#+}} xmm3 = [1717986919,1717986919,0,1717986919]
-; CHECK-SSE2-NEXT: pand %xmm3, %xmm2
-; CHECK-SSE2-NEXT: pmuludq %xmm0, %xmm3
-; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,3,2,3]
-; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3]
-; CHECK-SSE2-NEXT: pmuludq {{.*}}(%rip), %xmm4
-; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,3,2,3]
-; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1]
-; CHECK-SSE2-NEXT: psubd %xmm2, %xmm3
-; CHECK-SSE2-NEXT: movdqa {{.*#+}} xmm2 = <0,u,4294967295,u>
-; CHECK-SSE2-NEXT: pmuludq %xmm0, %xmm2
-; CHECK-SSE2-NEXT: pand {{.*}}(%rip), %xmm2
-; CHECK-SSE2-NEXT: paddd %xmm3, %xmm2
-; CHECK-SSE2-NEXT: movdqa %xmm2, %xmm3
-; CHECK-SSE2-NEXT: psrad $1, %xmm3
-; CHECK-SSE2-NEXT: movdqa %xmm2, %xmm4
-; CHECK-SSE2-NEXT: shufps {{.*#+}} xmm4 = xmm4[2,0],xmm3[3,0]
-; CHECK-SSE2-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,1],xmm4[0,2]
-; CHECK-SSE2-NEXT: psrld $31, %xmm2
-; CHECK-SSE2-NEXT: pand {{.*}}(%rip), %xmm2
-; CHECK-SSE2-NEXT: paddd %xmm3, %xmm2
-; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm2[1,1,3,3]
-; CHECK-SSE2-NEXT: pmuludq {{.*}}(%rip), %xmm2
-; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
-; CHECK-SSE2-NEXT: pmuludq {{.*}}(%rip), %xmm3
-; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,2,2,3]
-; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
-; CHECK-SSE2-NEXT: psubd %xmm2, %xmm0
-; CHECK-SSE2-NEXT: pcmpeqd %xmm1, %xmm0
-; CHECK-SSE2-NEXT: psrld $31, %xmm0
+; CHECK-SSE2-NEXT: movdqa {{.*#+}} xmm1 = [3435973837,3435973837,3435973837,3435973837]
+; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
+; CHECK-SSE2-NEXT: pmuludq %xmm1, %xmm0
+; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; CHECK-SSE2-NEXT: pmuludq %xmm1, %xmm2
+; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[0,2,2,3]
+; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; CHECK-SSE2-NEXT: paddd {{.*}}(%rip), %xmm0
+; CHECK-SSE2-NEXT: pxor {{.*}}(%rip), %xmm0
+; CHECK-SSE2-NEXT: pcmpgtd {{.*}}(%rip), %xmm0
+; CHECK-SSE2-NEXT: pandn {{.*}}(%rip), %xmm0
; CHECK-SSE2-NEXT: retq
;
; CHECK-SSE41-LABEL: test_srem_odd_allones_eq:
; CHECK-SSE41: # %bb.0:
-; CHECK-SSE41-NEXT: movl $1717986919, %eax # imm = 0x66666667
-; CHECK-SSE41-NEXT: movd %eax, %xmm1
-; CHECK-SSE41-NEXT: pmuldq %xmm0, %xmm1
-; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
-; CHECK-SSE41-NEXT: pmuldq {{.*}}(%rip), %xmm2
-; CHECK-SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
-; CHECK-SSE41-NEXT: movdqa {{.*#+}} xmm1 = [0,0,4294967295,0]
-; CHECK-SSE41-NEXT: pmulld %xmm0, %xmm1
-; CHECK-SSE41-NEXT: paddd %xmm2, %xmm1
-; CHECK-SSE41-NEXT: movdqa %xmm1, %xmm2
-; CHECK-SSE41-NEXT: psrad $1, %xmm2
-; CHECK-SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm1[4,5],xmm2[6,7]
-; CHECK-SSE41-NEXT: psrld $31, %xmm1
-; CHECK-SSE41-NEXT: pxor %xmm3, %xmm3
-; CHECK-SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm3[4,5],xmm1[6,7]
-; CHECK-SSE41-NEXT: paddd %xmm2, %xmm1
-; CHECK-SSE41-NEXT: pmulld {{.*}}(%rip), %xmm1
-; CHECK-SSE41-NEXT: psubd %xmm1, %xmm0
-; CHECK-SSE41-NEXT: pcmpeqd %xmm3, %xmm0
+; CHECK-SSE41-NEXT: pmulld {{.*}}(%rip), %xmm0
+; CHECK-SSE41-NEXT: paddd {{.*}}(%rip), %xmm0
+; CHECK-SSE41-NEXT: movdqa {{.*#+}} xmm1 = [858993458,858993458,4294967295,858993458]
+; CHECK-SSE41-NEXT: pminud %xmm0, %xmm1
+; CHECK-SSE41-NEXT: pcmpeqd %xmm1, %xmm0
; CHECK-SSE41-NEXT: psrld $31, %xmm0
; CHECK-SSE41-NEXT: retq
;
; CHECK-AVX1-LABEL: test_srem_odd_allones_eq:
; CHECK-AVX1: # %bb.0:
-; CHECK-AVX1-NEXT: movl $1717986919, %eax # imm = 0x66666667
-; CHECK-AVX1-NEXT: vmovd %eax, %xmm1
-; CHECK-AVX1-NEXT: vpmuldq %xmm1, %xmm0, %xmm1
-; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
-; CHECK-AVX1-NEXT: vpmuldq {{.*}}(%rip), %xmm2, %xmm2
-; CHECK-AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
-; CHECK-AVX1-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm2
-; CHECK-AVX1-NEXT: vpaddd %xmm2, %xmm1, %xmm1
-; CHECK-AVX1-NEXT: vpsrad $1, %xmm1, %xmm2
-; CHECK-AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm1[4,5],xmm2[6,7]
-; CHECK-AVX1-NEXT: vpsrld $31, %xmm1, %xmm1
-; CHECK-AVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3
-; CHECK-AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm3[4,5],xmm1[6,7]
-; CHECK-AVX1-NEXT: vpaddd %xmm1, %xmm2, %xmm1
-; CHECK-AVX1-NEXT: vpmulld {{.*}}(%rip), %xmm1, %xmm1
-; CHECK-AVX1-NEXT: vpsubd %xmm1, %xmm0, %xmm0
-; CHECK-AVX1-NEXT: vpcmpeqd %xmm3, %xmm0, %xmm0
+; CHECK-AVX1-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-AVX1-NEXT: vpaddd {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-AVX1-NEXT: vpminud {{.*}}(%rip), %xmm0, %xmm1
+; CHECK-AVX1-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
; CHECK-AVX1-NEXT: vpsrld $31, %xmm0, %xmm0
; CHECK-AVX1-NEXT: retq
;
; CHECK-AVX2-LABEL: test_srem_odd_allones_eq:
; CHECK-AVX2: # %bb.0:
-; CHECK-AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; CHECK-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm2 = [1717986919,1717986919,1717986919,1717986919]
-; CHECK-AVX2-NEXT: vpmuldq %xmm2, %xmm1, %xmm1
-; CHECK-AVX2-NEXT: movl $1717986919, %eax # imm = 0x66666667
-; CHECK-AVX2-NEXT: vmovd %eax, %xmm2
-; CHECK-AVX2-NEXT: vpmuldq %xmm2, %xmm0, %xmm2
-; CHECK-AVX2-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
-; CHECK-AVX2-NEXT: vpblendd {{.*#+}} xmm1 = xmm2[0],xmm1[1],xmm2[2],xmm1[3]
-; CHECK-AVX2-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm2
-; CHECK-AVX2-NEXT: vpaddd %xmm2, %xmm1, %xmm1
-; CHECK-AVX2-NEXT: vpsrld $31, %xmm1, %xmm2
-; CHECK-AVX2-NEXT: vpxor %xmm3, %xmm3, %xmm3
-; CHECK-AVX2-NEXT: vpblendd {{.*#+}} xmm2 = xmm2[0,1],xmm3[2],xmm2[3]
-; CHECK-AVX2-NEXT: vpsravd {{.*}}(%rip), %xmm1, %xmm1
-; CHECK-AVX2-NEXT: vpaddd %xmm2, %xmm1, %xmm1
-; CHECK-AVX2-NEXT: vpmulld {{.*}}(%rip), %xmm1, %xmm1
-; CHECK-AVX2-NEXT: vpsubd %xmm1, %xmm0, %xmm0
-; CHECK-AVX2-NEXT: vpcmpeqd %xmm3, %xmm0, %xmm0
+; CHECK-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [3435973837,3435973837,3435973837,3435973837]
+; CHECK-AVX2-NEXT: vpmulld %xmm1, %xmm0, %xmm0
+; CHECK-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [429496729,429496729,429496729,429496729]
+; CHECK-AVX2-NEXT: vpaddd %xmm1, %xmm0, %xmm0
+; CHECK-AVX2-NEXT: vpminud {{.*}}(%rip), %xmm0, %xmm1
+; CHECK-AVX2-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
; CHECK-AVX2-NEXT: vpsrld $31, %xmm0, %xmm0
; CHECK-AVX2-NEXT: retq
;
; CHECK-AVX512VL-LABEL: test_srem_odd_allones_eq:
; CHECK-AVX512VL: # %bb.0:
-; CHECK-AVX512VL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; CHECK-AVX512VL-NEXT: vpbroadcastd {{.*#+}} xmm2 = [1717986919,1717986919,1717986919,1717986919]
-; CHECK-AVX512VL-NEXT: vpmuldq %xmm2, %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT: movl $1717986919, %eax # imm = 0x66666667
-; CHECK-AVX512VL-NEXT: vmovd %eax, %xmm2
-; CHECK-AVX512VL-NEXT: vpmuldq %xmm2, %xmm0, %xmm2
-; CHECK-AVX512VL-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
-; CHECK-AVX512VL-NEXT: vpblendd {{.*#+}} xmm1 = xmm2[0],xmm1[1],xmm2[2],xmm1[3]
-; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm2
-; CHECK-AVX512VL-NEXT: vpaddd %xmm2, %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT: vpsrld $31, %xmm1, %xmm2
-; CHECK-AVX512VL-NEXT: vpxor %xmm3, %xmm3, %xmm3
-; CHECK-AVX512VL-NEXT: vpblendd {{.*#+}} xmm2 = xmm2[0,1],xmm3[2],xmm2[3]
-; CHECK-AVX512VL-NEXT: vpsravd {{.*}}(%rip), %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT: vpaddd %xmm2, %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip), %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT: vpsubd %xmm1, %xmm0, %xmm0
-; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm3, %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip){1to4}, %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT: vpaddd {{.*}}(%rip){1to4}, %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT: vpminud {{.*}}(%rip), %xmm0, %xmm1
+; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
; CHECK-AVX512VL-NEXT: vpsrld $31, %xmm0, %xmm0
; CHECK-AVX512VL-NEXT: retq
%srem = srem <4 x i32> %X, <i32 5, i32 5, i32 4294967295, i32 5>
define <4 x i32> @test_srem_odd_allones_ne(<4 x i32> %X) nounwind {
; CHECK-SSE2-LABEL: test_srem_odd_allones_ne:
; CHECK-SSE2: # %bb.0:
-; CHECK-SSE2-NEXT: pxor %xmm1, %xmm1
-; CHECK-SSE2-NEXT: pxor %xmm2, %xmm2
-; CHECK-SSE2-NEXT: pcmpgtd %xmm0, %xmm2
-; CHECK-SSE2-NEXT: movdqa {{.*#+}} xmm3 = [1717986919,1717986919,0,1717986919]
-; CHECK-SSE2-NEXT: pand %xmm3, %xmm2
-; CHECK-SSE2-NEXT: pmuludq %xmm0, %xmm3
-; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,3,2,3]
-; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3]
-; CHECK-SSE2-NEXT: pmuludq {{.*}}(%rip), %xmm4
-; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,3,2,3]
-; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1]
-; CHECK-SSE2-NEXT: psubd %xmm2, %xmm3
-; CHECK-SSE2-NEXT: movdqa {{.*#+}} xmm2 = <0,u,4294967295,u>
-; CHECK-SSE2-NEXT: pmuludq %xmm0, %xmm2
-; CHECK-SSE2-NEXT: pand {{.*}}(%rip), %xmm2
-; CHECK-SSE2-NEXT: paddd %xmm3, %xmm2
-; CHECK-SSE2-NEXT: movdqa %xmm2, %xmm3
-; CHECK-SSE2-NEXT: psrad $1, %xmm3
-; CHECK-SSE2-NEXT: movdqa %xmm2, %xmm4
-; CHECK-SSE2-NEXT: shufps {{.*#+}} xmm4 = xmm4[2,0],xmm3[3,0]
-; CHECK-SSE2-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,1],xmm4[0,2]
-; CHECK-SSE2-NEXT: psrld $31, %xmm2
-; CHECK-SSE2-NEXT: pand {{.*}}(%rip), %xmm2
-; CHECK-SSE2-NEXT: paddd %xmm3, %xmm2
-; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm2[1,1,3,3]
-; CHECK-SSE2-NEXT: pmuludq {{.*}}(%rip), %xmm2
-; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
-; CHECK-SSE2-NEXT: pmuludq {{.*}}(%rip), %xmm3
-; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,2,2,3]
-; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
-; CHECK-SSE2-NEXT: psubd %xmm2, %xmm0
-; CHECK-SSE2-NEXT: pcmpeqd %xmm1, %xmm0
-; CHECK-SSE2-NEXT: pandn {{.*}}(%rip), %xmm0
+; CHECK-SSE2-NEXT: movdqa {{.*#+}} xmm1 = [3435973837,3435973837,3435973837,3435973837]
+; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
+; CHECK-SSE2-NEXT: pmuludq %xmm1, %xmm0
+; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; CHECK-SSE2-NEXT: pmuludq %xmm1, %xmm2
+; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[0,2,2,3]
+; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; CHECK-SSE2-NEXT: paddd {{.*}}(%rip), %xmm0
+; CHECK-SSE2-NEXT: pxor {{.*}}(%rip), %xmm0
+; CHECK-SSE2-NEXT: pcmpgtd {{.*}}(%rip), %xmm0
+; CHECK-SSE2-NEXT: psrld $31, %xmm0
; CHECK-SSE2-NEXT: retq
;
; CHECK-SSE41-LABEL: test_srem_odd_allones_ne:
; CHECK-SSE41: # %bb.0:
-; CHECK-SSE41-NEXT: movl $1717986919, %eax # imm = 0x66666667
-; CHECK-SSE41-NEXT: movd %eax, %xmm1
-; CHECK-SSE41-NEXT: pmuldq %xmm0, %xmm1
-; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
-; CHECK-SSE41-NEXT: pmuldq {{.*}}(%rip), %xmm2
-; CHECK-SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
-; CHECK-SSE41-NEXT: movdqa {{.*#+}} xmm1 = [0,0,4294967295,0]
-; CHECK-SSE41-NEXT: pmulld %xmm0, %xmm1
-; CHECK-SSE41-NEXT: paddd %xmm2, %xmm1
-; CHECK-SSE41-NEXT: movdqa %xmm1, %xmm2
-; CHECK-SSE41-NEXT: psrad $1, %xmm2
-; CHECK-SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm1[4,5],xmm2[6,7]
-; CHECK-SSE41-NEXT: psrld $31, %xmm1
-; CHECK-SSE41-NEXT: pxor %xmm3, %xmm3
-; CHECK-SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm3[4,5],xmm1[6,7]
-; CHECK-SSE41-NEXT: paddd %xmm2, %xmm1
-; CHECK-SSE41-NEXT: pmulld {{.*}}(%rip), %xmm1
-; CHECK-SSE41-NEXT: psubd %xmm1, %xmm0
-; CHECK-SSE41-NEXT: pcmpeqd %xmm3, %xmm0
+; CHECK-SSE41-NEXT: pmulld {{.*}}(%rip), %xmm0
+; CHECK-SSE41-NEXT: paddd {{.*}}(%rip), %xmm0
+; CHECK-SSE41-NEXT: movdqa {{.*#+}} xmm1 = [858993458,858993458,4294967295,858993458]
+; CHECK-SSE41-NEXT: pminud %xmm0, %xmm1
+; CHECK-SSE41-NEXT: pcmpeqd %xmm1, %xmm0
; CHECK-SSE41-NEXT: pandn {{.*}}(%rip), %xmm0
; CHECK-SSE41-NEXT: retq
;
; CHECK-AVX1-LABEL: test_srem_odd_allones_ne:
; CHECK-AVX1: # %bb.0:
-; CHECK-AVX1-NEXT: movl $1717986919, %eax # imm = 0x66666667
-; CHECK-AVX1-NEXT: vmovd %eax, %xmm1
-; CHECK-AVX1-NEXT: vpmuldq %xmm1, %xmm0, %xmm1
-; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
-; CHECK-AVX1-NEXT: vpmuldq {{.*}}(%rip), %xmm2, %xmm2
-; CHECK-AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
-; CHECK-AVX1-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm2
-; CHECK-AVX1-NEXT: vpaddd %xmm2, %xmm1, %xmm1
-; CHECK-AVX1-NEXT: vpsrad $1, %xmm1, %xmm2
-; CHECK-AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm1[4,5],xmm2[6,7]
-; CHECK-AVX1-NEXT: vpsrld $31, %xmm1, %xmm1
-; CHECK-AVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3
-; CHECK-AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm3[4,5],xmm1[6,7]
-; CHECK-AVX1-NEXT: vpaddd %xmm1, %xmm2, %xmm1
-; CHECK-AVX1-NEXT: vpmulld {{.*}}(%rip), %xmm1, %xmm1
-; CHECK-AVX1-NEXT: vpsubd %xmm1, %xmm0, %xmm0
-; CHECK-AVX1-NEXT: vpcmpeqd %xmm3, %xmm0, %xmm0
+; CHECK-AVX1-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-AVX1-NEXT: vpaddd {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-AVX1-NEXT: vpminud {{.*}}(%rip), %xmm0, %xmm1
+; CHECK-AVX1-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
; CHECK-AVX1-NEXT: vpandn {{.*}}(%rip), %xmm0, %xmm0
; CHECK-AVX1-NEXT: retq
;
; CHECK-AVX2-LABEL: test_srem_odd_allones_ne:
; CHECK-AVX2: # %bb.0:
-; CHECK-AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; CHECK-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm2 = [1717986919,1717986919,1717986919,1717986919]
-; CHECK-AVX2-NEXT: vpmuldq %xmm2, %xmm1, %xmm1
-; CHECK-AVX2-NEXT: movl $1717986919, %eax # imm = 0x66666667
-; CHECK-AVX2-NEXT: vmovd %eax, %xmm2
-; CHECK-AVX2-NEXT: vpmuldq %xmm2, %xmm0, %xmm2
-; CHECK-AVX2-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
-; CHECK-AVX2-NEXT: vpblendd {{.*#+}} xmm1 = xmm2[0],xmm1[1],xmm2[2],xmm1[3]
-; CHECK-AVX2-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm2
-; CHECK-AVX2-NEXT: vpaddd %xmm2, %xmm1, %xmm1
-; CHECK-AVX2-NEXT: vpsrld $31, %xmm1, %xmm2
-; CHECK-AVX2-NEXT: vpxor %xmm3, %xmm3, %xmm3
-; CHECK-AVX2-NEXT: vpblendd {{.*#+}} xmm2 = xmm2[0,1],xmm3[2],xmm2[3]
-; CHECK-AVX2-NEXT: vpsravd {{.*}}(%rip), %xmm1, %xmm1
-; CHECK-AVX2-NEXT: vpaddd %xmm2, %xmm1, %xmm1
-; CHECK-AVX2-NEXT: vpmulld {{.*}}(%rip), %xmm1, %xmm1
-; CHECK-AVX2-NEXT: vpsubd %xmm1, %xmm0, %xmm0
-; CHECK-AVX2-NEXT: vpcmpeqd %xmm3, %xmm0, %xmm0
+; CHECK-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [3435973837,3435973837,3435973837,3435973837]
+; CHECK-AVX2-NEXT: vpmulld %xmm1, %xmm0, %xmm0
+; CHECK-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [429496729,429496729,429496729,429496729]
+; CHECK-AVX2-NEXT: vpaddd %xmm1, %xmm0, %xmm0
+; CHECK-AVX2-NEXT: vpminud {{.*}}(%rip), %xmm0, %xmm1
+; CHECK-AVX2-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
; CHECK-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [1,1,1,1]
; CHECK-AVX2-NEXT: vpandn %xmm1, %xmm0, %xmm0
; CHECK-AVX2-NEXT: retq
;
; CHECK-AVX512VL-LABEL: test_srem_odd_allones_ne:
; CHECK-AVX512VL: # %bb.0:
-; CHECK-AVX512VL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; CHECK-AVX512VL-NEXT: vpbroadcastd {{.*#+}} xmm2 = [1717986919,1717986919,1717986919,1717986919]
-; CHECK-AVX512VL-NEXT: vpmuldq %xmm2, %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT: movl $1717986919, %eax # imm = 0x66666667
-; CHECK-AVX512VL-NEXT: vmovd %eax, %xmm2
-; CHECK-AVX512VL-NEXT: vpmuldq %xmm2, %xmm0, %xmm2
-; CHECK-AVX512VL-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
-; CHECK-AVX512VL-NEXT: vpblendd {{.*#+}} xmm1 = xmm2[0],xmm1[1],xmm2[2],xmm1[3]
-; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm2
-; CHECK-AVX512VL-NEXT: vpaddd %xmm2, %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT: vpsrld $31, %xmm1, %xmm2
-; CHECK-AVX512VL-NEXT: vpxor %xmm3, %xmm3, %xmm3
-; CHECK-AVX512VL-NEXT: vpblendd {{.*#+}} xmm2 = xmm2[0,1],xmm3[2],xmm2[3]
-; CHECK-AVX512VL-NEXT: vpsravd {{.*}}(%rip), %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT: vpaddd %xmm2, %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip), %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT: vpsubd %xmm1, %xmm0, %xmm0
-; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm3, %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip){1to4}, %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT: vpaddd {{.*}}(%rip){1to4}, %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT: vpminud {{.*}}(%rip), %xmm0, %xmm1
+; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
; CHECK-AVX512VL-NEXT: vpandnd {{.*}}(%rip){1to4}, %xmm0, %xmm0
; CHECK-AVX512VL-NEXT: retq
%srem = srem <4 x i32> %X, <i32 5, i32 5, i32 4294967295, i32 5>
;
; CHECK-AVX512VL-LABEL: test_srem_even_allones_eq:
; CHECK-AVX512VL: # %bb.0:
-; CHECK-AVX512VL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; CHECK-AVX512VL-NEXT: vpbroadcastd {{.*#+}} xmm2 = [2454267027,2454267027,2454267027,2454267027]
-; CHECK-AVX512VL-NEXT: vpmuldq %xmm2, %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT: movl $-1840700269, %eax # imm = 0x92492493
-; CHECK-AVX512VL-NEXT: vmovd %eax, %xmm2
-; CHECK-AVX512VL-NEXT: vpmuldq %xmm2, %xmm0, %xmm2
-; CHECK-AVX512VL-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
-; CHECK-AVX512VL-NEXT: vpblendd {{.*#+}} xmm1 = xmm2[0],xmm1[1],xmm2[2],xmm1[3]
-; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm2
-; CHECK-AVX512VL-NEXT: vpaddd %xmm2, %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT: vpsrld $31, %xmm1, %xmm2
-; CHECK-AVX512VL-NEXT: vpxor %xmm3, %xmm3, %xmm3
-; CHECK-AVX512VL-NEXT: vpblendd {{.*#+}} xmm2 = xmm2[0,1],xmm3[2],xmm2[3]
-; CHECK-AVX512VL-NEXT: vpsravd {{.*}}(%rip), %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT: vpaddd %xmm2, %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip), %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT: vpsubd %xmm1, %xmm0, %xmm0
-; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm3, %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip){1to4}, %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT: vpaddd {{.*}}(%rip){1to4}, %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT: vprord $1, %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT: vpminud {{.*}}(%rip), %xmm0, %xmm1
+; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
; CHECK-AVX512VL-NEXT: vpsrld $31, %xmm0, %xmm0
; CHECK-AVX512VL-NEXT: retq
%srem = srem <4 x i32> %X, <i32 14, i32 14, i32 4294967295, i32 14>
;
; CHECK-AVX512VL-LABEL: test_srem_even_allones_ne:
; CHECK-AVX512VL: # %bb.0:
-; CHECK-AVX512VL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; CHECK-AVX512VL-NEXT: vpbroadcastd {{.*#+}} xmm2 = [2454267027,2454267027,2454267027,2454267027]
-; CHECK-AVX512VL-NEXT: vpmuldq %xmm2, %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT: movl $-1840700269, %eax # imm = 0x92492493
-; CHECK-AVX512VL-NEXT: vmovd %eax, %xmm2
-; CHECK-AVX512VL-NEXT: vpmuldq %xmm2, %xmm0, %xmm2
-; CHECK-AVX512VL-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
-; CHECK-AVX512VL-NEXT: vpblendd {{.*#+}} xmm1 = xmm2[0],xmm1[1],xmm2[2],xmm1[3]
-; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm2
-; CHECK-AVX512VL-NEXT: vpaddd %xmm2, %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT: vpsrld $31, %xmm1, %xmm2
-; CHECK-AVX512VL-NEXT: vpxor %xmm3, %xmm3, %xmm3
-; CHECK-AVX512VL-NEXT: vpblendd {{.*#+}} xmm2 = xmm2[0,1],xmm3[2],xmm2[3]
-; CHECK-AVX512VL-NEXT: vpsravd {{.*}}(%rip), %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT: vpaddd %xmm2, %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip), %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT: vpsubd %xmm1, %xmm0, %xmm0
-; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm3, %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip){1to4}, %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT: vpaddd {{.*}}(%rip){1to4}, %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT: vprord $1, %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT: vpminud {{.*}}(%rip), %xmm0, %xmm1
+; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
; CHECK-AVX512VL-NEXT: vpandnd {{.*}}(%rip){1to4}, %xmm0, %xmm0
; CHECK-AVX512VL-NEXT: retq
%srem = srem <4 x i32> %X, <i32 14, i32 14, i32 4294967295, i32 14>
;
; CHECK-AVX512VL-LABEL: test_srem_odd_even_allones_eq:
; CHECK-AVX512VL: # %bb.0:
-; CHECK-AVX512VL-NEXT: vmovdqa {{.*#+}} xmm1 = [1717986919,2454267027,0,1374389535]
-; CHECK-AVX512VL-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
-; CHECK-AVX512VL-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
-; CHECK-AVX512VL-NEXT: vpmuldq %xmm2, %xmm3, %xmm2
-; CHECK-AVX512VL-NEXT: vpmuldq %xmm1, %xmm0, %xmm1
-; CHECK-AVX512VL-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; CHECK-AVX512VL-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3]
-; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm2
-; CHECK-AVX512VL-NEXT: vpaddd %xmm2, %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT: vpsrld $31, %xmm1, %xmm2
-; CHECK-AVX512VL-NEXT: vpxor %xmm3, %xmm3, %xmm3
-; CHECK-AVX512VL-NEXT: vpblendd {{.*#+}} xmm2 = xmm2[0,1],xmm3[2],xmm2[3]
-; CHECK-AVX512VL-NEXT: vpsravd {{.*}}(%rip), %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT: vpaddd %xmm2, %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip), %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT: vpsubd %xmm1, %xmm0, %xmm0
-; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm3, %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT: vpaddd {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT: vprorvd {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT: vpminud {{.*}}(%rip), %xmm0, %xmm1
+; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
; CHECK-AVX512VL-NEXT: vpsrld $31, %xmm0, %xmm0
; CHECK-AVX512VL-NEXT: retq
%srem = srem <4 x i32> %X, <i32 5, i32 14, i32 4294967295, i32 100>
;
; CHECK-AVX512VL-LABEL: test_srem_odd_even_allones_ne:
; CHECK-AVX512VL: # %bb.0:
-; CHECK-AVX512VL-NEXT: vmovdqa {{.*#+}} xmm1 = [1717986919,2454267027,0,1374389535]
-; CHECK-AVX512VL-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
-; CHECK-AVX512VL-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
-; CHECK-AVX512VL-NEXT: vpmuldq %xmm2, %xmm3, %xmm2
-; CHECK-AVX512VL-NEXT: vpmuldq %xmm1, %xmm0, %xmm1
-; CHECK-AVX512VL-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; CHECK-AVX512VL-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3]
-; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm2
-; CHECK-AVX512VL-NEXT: vpaddd %xmm2, %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT: vpsrld $31, %xmm1, %xmm2
-; CHECK-AVX512VL-NEXT: vpxor %xmm3, %xmm3, %xmm3
-; CHECK-AVX512VL-NEXT: vpblendd {{.*#+}} xmm2 = xmm2[0,1],xmm3[2],xmm2[3]
-; CHECK-AVX512VL-NEXT: vpsravd {{.*}}(%rip), %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT: vpaddd %xmm2, %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip), %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT: vpsubd %xmm1, %xmm0, %xmm0
-; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm3, %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT: vpaddd {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT: vprorvd {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT: vpminud {{.*}}(%rip), %xmm0, %xmm1
+; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
; CHECK-AVX512VL-NEXT: vpandnd {{.*}}(%rip){1to4}, %xmm0, %xmm0
; CHECK-AVX512VL-NEXT: retq
%srem = srem <4 x i32> %X, <i32 5, i32 14, i32 4294967295, i32 100>
;
; CHECK-AVX512VL-LABEL: test_srem_odd_poweroftwo:
; CHECK-AVX512VL: # %bb.0:
-; CHECK-AVX512VL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; CHECK-AVX512VL-NEXT: vpbroadcastd {{.*#+}} xmm2 = [1717986919,1717986919,1717986919,1717986919]
-; CHECK-AVX512VL-NEXT: vpmuldq %xmm2, %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT: vpmuldq {{.*}}(%rip), %xmm0, %xmm2
-; CHECK-AVX512VL-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
-; CHECK-AVX512VL-NEXT: vpblendd {{.*#+}} xmm1 = xmm2[0],xmm1[1],xmm2[2],xmm1[3]
-; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm2
-; CHECK-AVX512VL-NEXT: vpaddd %xmm2, %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT: vpsrld $31, %xmm1, %xmm2
-; CHECK-AVX512VL-NEXT: vpsravd {{.*}}(%rip), %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT: vpaddd %xmm2, %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip), %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT: vpsubd %xmm1, %xmm0, %xmm0
-; CHECK-AVX512VL-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT: vpaddd {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT: vprorvd {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT: vpminud {{.*}}(%rip), %xmm0, %xmm1
; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
; CHECK-AVX512VL-NEXT: vpsrld $31, %xmm0, %xmm0
; CHECK-AVX512VL-NEXT: retq
;
; CHECK-AVX512VL-LABEL: test_srem_even_poweroftwo:
; CHECK-AVX512VL: # %bb.0:
-; CHECK-AVX512VL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; CHECK-AVX512VL-NEXT: vpbroadcastd {{.*#+}} xmm2 = [2454267027,2454267027,2454267027,2454267027]
-; CHECK-AVX512VL-NEXT: vpmuldq %xmm2, %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT: vpmuldq {{.*}}(%rip), %xmm0, %xmm2
-; CHECK-AVX512VL-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
-; CHECK-AVX512VL-NEXT: vpblendd {{.*#+}} xmm1 = xmm2[0],xmm1[1],xmm2[2],xmm1[3]
-; CHECK-AVX512VL-NEXT: vpaddd %xmm0, %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT: vpsrld $31, %xmm1, %xmm2
-; CHECK-AVX512VL-NEXT: vpsrad $3, %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT: vpaddd %xmm2, %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip), %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT: vpsubd %xmm1, %xmm0, %xmm0
-; CHECK-AVX512VL-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT: vpaddd {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT: vprorvd {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT: vpminud {{.*}}(%rip), %xmm0, %xmm1
; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
; CHECK-AVX512VL-NEXT: vpsrld $31, %xmm0, %xmm0
; CHECK-AVX512VL-NEXT: retq
;
; CHECK-AVX512VL-LABEL: test_srem_odd_even_poweroftwo:
; CHECK-AVX512VL: # %bb.0:
-; CHECK-AVX512VL-NEXT: vmovdqa {{.*#+}} xmm1 = [1717986919,2454267027,2147483649,1374389535]
-; CHECK-AVX512VL-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
-; CHECK-AVX512VL-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
-; CHECK-AVX512VL-NEXT: vpmuldq %xmm2, %xmm3, %xmm2
-; CHECK-AVX512VL-NEXT: vpmuldq %xmm1, %xmm0, %xmm1
-; CHECK-AVX512VL-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; CHECK-AVX512VL-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3]
-; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm2
-; CHECK-AVX512VL-NEXT: vpaddd %xmm2, %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT: vpsrld $31, %xmm1, %xmm2
-; CHECK-AVX512VL-NEXT: vpsravd {{.*}}(%rip), %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT: vpaddd %xmm2, %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip), %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT: vpsubd %xmm1, %xmm0, %xmm0
-; CHECK-AVX512VL-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT: vpaddd {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT: vprorvd {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT: vpminud {{.*}}(%rip), %xmm0, %xmm1
; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
; CHECK-AVX512VL-NEXT: vpsrld $31, %xmm0, %xmm0
; CHECK-AVX512VL-NEXT: retq
define <4 x i32> @test_srem_odd_one(<4 x i32> %X) nounwind {
; CHECK-SSE2-LABEL: test_srem_odd_one:
; CHECK-SSE2: # %bb.0:
-; CHECK-SSE2-NEXT: pxor %xmm1, %xmm1
-; CHECK-SSE2-NEXT: pxor %xmm2, %xmm2
-; CHECK-SSE2-NEXT: pcmpgtd %xmm0, %xmm2
-; CHECK-SSE2-NEXT: movdqa {{.*#+}} xmm3 = [1717986919,1717986919,0,1717986919]
-; CHECK-SSE2-NEXT: pand %xmm3, %xmm2
-; CHECK-SSE2-NEXT: pmuludq %xmm0, %xmm3
-; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,3,2,3]
-; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3]
-; CHECK-SSE2-NEXT: pmuludq {{.*}}(%rip), %xmm4
-; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,3,2,3]
-; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1]
-; CHECK-SSE2-NEXT: psubd %xmm2, %xmm3
-; CHECK-SSE2-NEXT: movdqa {{.*#+}} xmm2 = <0,u,1,u>
-; CHECK-SSE2-NEXT: pmuludq %xmm0, %xmm2
-; CHECK-SSE2-NEXT: pand {{.*}}(%rip), %xmm2
-; CHECK-SSE2-NEXT: paddd %xmm3, %xmm2
-; CHECK-SSE2-NEXT: movdqa %xmm2, %xmm3
-; CHECK-SSE2-NEXT: psrad $1, %xmm3
-; CHECK-SSE2-NEXT: movdqa %xmm2, %xmm4
-; CHECK-SSE2-NEXT: shufps {{.*#+}} xmm4 = xmm4[2,0],xmm3[3,0]
-; CHECK-SSE2-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,1],xmm4[0,2]
-; CHECK-SSE2-NEXT: psrld $31, %xmm2
-; CHECK-SSE2-NEXT: pand {{.*}}(%rip), %xmm2
-; CHECK-SSE2-NEXT: paddd %xmm3, %xmm2
-; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm2[1,1,3,3]
-; CHECK-SSE2-NEXT: pmuludq {{.*}}(%rip), %xmm2
-; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
-; CHECK-SSE2-NEXT: pmuludq {{.*}}(%rip), %xmm3
-; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,2,2,3]
-; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
-; CHECK-SSE2-NEXT: psubd %xmm2, %xmm0
-; CHECK-SSE2-NEXT: pcmpeqd %xmm1, %xmm0
-; CHECK-SSE2-NEXT: psrld $31, %xmm0
+; CHECK-SSE2-NEXT: movdqa {{.*#+}} xmm1 = [3435973837,3435973837,3435973837,3435973837]
+; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
+; CHECK-SSE2-NEXT: pmuludq %xmm1, %xmm0
+; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; CHECK-SSE2-NEXT: pmuludq %xmm1, %xmm2
+; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[0,2,2,3]
+; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; CHECK-SSE2-NEXT: paddd {{.*}}(%rip), %xmm0
+; CHECK-SSE2-NEXT: pxor {{.*}}(%rip), %xmm0
+; CHECK-SSE2-NEXT: pcmpgtd {{.*}}(%rip), %xmm0
+; CHECK-SSE2-NEXT: pandn {{.*}}(%rip), %xmm0
; CHECK-SSE2-NEXT: retq
;
; CHECK-SSE41-LABEL: test_srem_odd_one:
; CHECK-SSE41: # %bb.0:
-; CHECK-SSE41-NEXT: movl $1717986919, %eax # imm = 0x66666667
-; CHECK-SSE41-NEXT: movd %eax, %xmm1
-; CHECK-SSE41-NEXT: pmuldq %xmm0, %xmm1
-; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
-; CHECK-SSE41-NEXT: pmuldq {{.*}}(%rip), %xmm2
-; CHECK-SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
-; CHECK-SSE41-NEXT: movdqa {{.*#+}} xmm1 = [0,0,1,0]
-; CHECK-SSE41-NEXT: pmulld %xmm0, %xmm1
-; CHECK-SSE41-NEXT: paddd %xmm2, %xmm1
-; CHECK-SSE41-NEXT: movdqa %xmm1, %xmm2
-; CHECK-SSE41-NEXT: psrad $1, %xmm2
-; CHECK-SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm1[4,5],xmm2[6,7]
-; CHECK-SSE41-NEXT: psrld $31, %xmm1
-; CHECK-SSE41-NEXT: pxor %xmm3, %xmm3
-; CHECK-SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm3[4,5],xmm1[6,7]
-; CHECK-SSE41-NEXT: paddd %xmm2, %xmm1
-; CHECK-SSE41-NEXT: pmulld {{.*}}(%rip), %xmm1
-; CHECK-SSE41-NEXT: psubd %xmm1, %xmm0
-; CHECK-SSE41-NEXT: pcmpeqd %xmm3, %xmm0
+; CHECK-SSE41-NEXT: pmulld {{.*}}(%rip), %xmm0
+; CHECK-SSE41-NEXT: paddd {{.*}}(%rip), %xmm0
+; CHECK-SSE41-NEXT: movdqa {{.*#+}} xmm1 = [858993458,858993458,4294967295,858993458]
+; CHECK-SSE41-NEXT: pminud %xmm0, %xmm1
+; CHECK-SSE41-NEXT: pcmpeqd %xmm1, %xmm0
; CHECK-SSE41-NEXT: psrld $31, %xmm0
; CHECK-SSE41-NEXT: retq
;
; CHECK-AVX1-LABEL: test_srem_odd_one:
; CHECK-AVX1: # %bb.0:
-; CHECK-AVX1-NEXT: movl $1717986919, %eax # imm = 0x66666667
-; CHECK-AVX1-NEXT: vmovd %eax, %xmm1
-; CHECK-AVX1-NEXT: vpmuldq %xmm1, %xmm0, %xmm1
-; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
-; CHECK-AVX1-NEXT: vpmuldq {{.*}}(%rip), %xmm2, %xmm2
-; CHECK-AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
-; CHECK-AVX1-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm2
-; CHECK-AVX1-NEXT: vpaddd %xmm2, %xmm1, %xmm1
-; CHECK-AVX1-NEXT: vpsrad $1, %xmm1, %xmm2
-; CHECK-AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm1[4,5],xmm2[6,7]
-; CHECK-AVX1-NEXT: vpsrld $31, %xmm1, %xmm1
-; CHECK-AVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3
-; CHECK-AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm3[4,5],xmm1[6,7]
-; CHECK-AVX1-NEXT: vpaddd %xmm1, %xmm2, %xmm1
-; CHECK-AVX1-NEXT: vpmulld {{.*}}(%rip), %xmm1, %xmm1
-; CHECK-AVX1-NEXT: vpsubd %xmm1, %xmm0, %xmm0
-; CHECK-AVX1-NEXT: vpcmpeqd %xmm3, %xmm0, %xmm0
+; CHECK-AVX1-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-AVX1-NEXT: vpaddd {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-AVX1-NEXT: vpminud {{.*}}(%rip), %xmm0, %xmm1
+; CHECK-AVX1-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
; CHECK-AVX1-NEXT: vpsrld $31, %xmm0, %xmm0
; CHECK-AVX1-NEXT: retq
;
; CHECK-AVX2-LABEL: test_srem_odd_one:
-; CHECK-AVX2: # %bb.0:
-; CHECK-AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; CHECK-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm2 = [1717986919,1717986919,1717986919,1717986919]
-; CHECK-AVX2-NEXT: vpmuldq %xmm2, %xmm1, %xmm1
-; CHECK-AVX2-NEXT: movl $1717986919, %eax # imm = 0x66666667
-; CHECK-AVX2-NEXT: vmovd %eax, %xmm2
-; CHECK-AVX2-NEXT: vpmuldq %xmm2, %xmm0, %xmm2
-; CHECK-AVX2-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
-; CHECK-AVX2-NEXT: vpblendd {{.*#+}} xmm1 = xmm2[0],xmm1[1],xmm2[2],xmm1[3]
-; CHECK-AVX2-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm2
-; CHECK-AVX2-NEXT: vpaddd %xmm2, %xmm1, %xmm1
-; CHECK-AVX2-NEXT: vpsrld $31, %xmm1, %xmm2
-; CHECK-AVX2-NEXT: vpxor %xmm3, %xmm3, %xmm3
-; CHECK-AVX2-NEXT: vpblendd {{.*#+}} xmm2 = xmm2[0,1],xmm3[2],xmm2[3]
-; CHECK-AVX2-NEXT: vpsravd {{.*}}(%rip), %xmm1, %xmm1
-; CHECK-AVX2-NEXT: vpaddd %xmm2, %xmm1, %xmm1
-; CHECK-AVX2-NEXT: vpmulld {{.*}}(%rip), %xmm1, %xmm1
-; CHECK-AVX2-NEXT: vpsubd %xmm1, %xmm0, %xmm0
-; CHECK-AVX2-NEXT: vpcmpeqd %xmm3, %xmm0, %xmm0
+; CHECK-AVX2: # %bb.0:
+; CHECK-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [3435973837,3435973837,3435973837,3435973837]
+; CHECK-AVX2-NEXT: vpmulld %xmm1, %xmm0, %xmm0
+; CHECK-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [429496729,429496729,429496729,429496729]
+; CHECK-AVX2-NEXT: vpaddd %xmm1, %xmm0, %xmm0
+; CHECK-AVX2-NEXT: vpminud {{.*}}(%rip), %xmm0, %xmm1
+; CHECK-AVX2-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
; CHECK-AVX2-NEXT: vpsrld $31, %xmm0, %xmm0
; CHECK-AVX2-NEXT: retq
;
; CHECK-AVX512VL-LABEL: test_srem_odd_one:
; CHECK-AVX512VL: # %bb.0:
-; CHECK-AVX512VL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; CHECK-AVX512VL-NEXT: vpbroadcastd {{.*#+}} xmm2 = [1717986919,1717986919,1717986919,1717986919]
-; CHECK-AVX512VL-NEXT: vpmuldq %xmm2, %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT: movl $1717986919, %eax # imm = 0x66666667
-; CHECK-AVX512VL-NEXT: vmovd %eax, %xmm2
-; CHECK-AVX512VL-NEXT: vpmuldq %xmm2, %xmm0, %xmm2
-; CHECK-AVX512VL-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
-; CHECK-AVX512VL-NEXT: vpblendd {{.*#+}} xmm1 = xmm2[0],xmm1[1],xmm2[2],xmm1[3]
-; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm2
-; CHECK-AVX512VL-NEXT: vpaddd %xmm2, %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT: vpsrld $31, %xmm1, %xmm2
-; CHECK-AVX512VL-NEXT: vpxor %xmm3, %xmm3, %xmm3
-; CHECK-AVX512VL-NEXT: vpblendd {{.*#+}} xmm2 = xmm2[0,1],xmm3[2],xmm2[3]
-; CHECK-AVX512VL-NEXT: vpsravd {{.*}}(%rip), %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT: vpaddd %xmm2, %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip), %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT: vpsubd %xmm1, %xmm0, %xmm0
-; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm3, %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip){1to4}, %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT: vpaddd {{.*}}(%rip){1to4}, %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT: vpminud {{.*}}(%rip), %xmm0, %xmm1
+; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
; CHECK-AVX512VL-NEXT: vpsrld $31, %xmm0, %xmm0
; CHECK-AVX512VL-NEXT: retq
%srem = srem <4 x i32> %X, <i32 5, i32 5, i32 1, i32 5>
;
; CHECK-AVX512VL-LABEL: test_srem_even_one:
; CHECK-AVX512VL: # %bb.0:
-; CHECK-AVX512VL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; CHECK-AVX512VL-NEXT: vpbroadcastd {{.*#+}} xmm2 = [2454267027,2454267027,2454267027,2454267027]
-; CHECK-AVX512VL-NEXT: vpmuldq %xmm2, %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT: movl $-1840700269, %eax # imm = 0x92492493
-; CHECK-AVX512VL-NEXT: vmovd %eax, %xmm2
-; CHECK-AVX512VL-NEXT: vpmuldq %xmm2, %xmm0, %xmm2
-; CHECK-AVX512VL-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
-; CHECK-AVX512VL-NEXT: vpblendd {{.*#+}} xmm1 = xmm2[0],xmm1[1],xmm2[2],xmm1[3]
-; CHECK-AVX512VL-NEXT: vpaddd %xmm0, %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT: vpsrld $31, %xmm1, %xmm2
-; CHECK-AVX512VL-NEXT: vpxor %xmm3, %xmm3, %xmm3
-; CHECK-AVX512VL-NEXT: vpblendd {{.*#+}} xmm2 = xmm2[0,1],xmm3[2],xmm2[3]
-; CHECK-AVX512VL-NEXT: vpsravd {{.*}}(%rip), %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT: vpaddd %xmm2, %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip), %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT: vpsubd %xmm1, %xmm0, %xmm0
-; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm3, %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip){1to4}, %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT: vpaddd {{.*}}(%rip){1to4}, %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT: vprord $1, %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT: vpminud {{.*}}(%rip), %xmm0, %xmm1
+; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
; CHECK-AVX512VL-NEXT: vpsrld $31, %xmm0, %xmm0
; CHECK-AVX512VL-NEXT: retq
%srem = srem <4 x i32> %X, <i32 14, i32 14, i32 1, i32 14>
;
; CHECK-AVX512VL-LABEL: test_srem_odd_even_one:
; CHECK-AVX512VL: # %bb.0:
-; CHECK-AVX512VL-NEXT: vmovdqa {{.*#+}} xmm1 = [1717986919,2454267027,0,1374389535]
-; CHECK-AVX512VL-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
-; CHECK-AVX512VL-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
-; CHECK-AVX512VL-NEXT: vpmuldq %xmm2, %xmm3, %xmm2
-; CHECK-AVX512VL-NEXT: vpmuldq %xmm1, %xmm0, %xmm1
-; CHECK-AVX512VL-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; CHECK-AVX512VL-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3]
-; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm2
-; CHECK-AVX512VL-NEXT: vpaddd %xmm2, %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT: vpsrld $31, %xmm1, %xmm2
-; CHECK-AVX512VL-NEXT: vpxor %xmm3, %xmm3, %xmm3
-; CHECK-AVX512VL-NEXT: vpblendd {{.*#+}} xmm2 = xmm2[0,1],xmm3[2],xmm2[3]
-; CHECK-AVX512VL-NEXT: vpsravd {{.*}}(%rip), %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT: vpaddd %xmm2, %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip), %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT: vpsubd %xmm1, %xmm0, %xmm0
-; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm3, %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT: vpaddd {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT: vprorvd {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT: vpminud {{.*}}(%rip), %xmm0, %xmm1
+; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
; CHECK-AVX512VL-NEXT: vpsrld $31, %xmm0, %xmm0
; CHECK-AVX512VL-NEXT: retq
%srem = srem <4 x i32> %X, <i32 5, i32 14, i32 1, i32 100>
;
; CHECK-AVX512VL-LABEL: test_srem_odd_allones_and_poweroftwo:
; CHECK-AVX512VL: # %bb.0:
-; CHECK-AVX512VL-NEXT: vmovdqa {{.*#+}} xmm1 = [1717986919,0,2147483649,1717986919]
-; CHECK-AVX512VL-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
-; CHECK-AVX512VL-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
-; CHECK-AVX512VL-NEXT: vpmuldq %xmm2, %xmm3, %xmm2
-; CHECK-AVX512VL-NEXT: vpmuldq %xmm1, %xmm0, %xmm1
-; CHECK-AVX512VL-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; CHECK-AVX512VL-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3]
-; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm2
-; CHECK-AVX512VL-NEXT: vpaddd %xmm2, %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT: vpsrld $31, %xmm1, %xmm2
-; CHECK-AVX512VL-NEXT: vpxor %xmm3, %xmm3, %xmm3
-; CHECK-AVX512VL-NEXT: vpblendd {{.*#+}} xmm2 = xmm2[0],xmm3[1],xmm2[2,3]
-; CHECK-AVX512VL-NEXT: vpsravd {{.*}}(%rip), %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT: vpaddd %xmm2, %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip), %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT: vpsubd %xmm1, %xmm0, %xmm0
-; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm3, %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT: vpaddd {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT: vprorvd {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT: vpminud {{.*}}(%rip), %xmm0, %xmm1
+; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
; CHECK-AVX512VL-NEXT: vpsrld $31, %xmm0, %xmm0
; CHECK-AVX512VL-NEXT: retq
%srem = srem <4 x i32> %X, <i32 5, i32 4294967295, i32 16, i32 5>
;
; CHECK-AVX512VL-LABEL: test_srem_even_allones_and_poweroftwo:
; CHECK-AVX512VL: # %bb.0:
-; CHECK-AVX512VL-NEXT: vmovdqa {{.*#+}} xmm1 = [2454267027,0,2147483649,2454267027]
-; CHECK-AVX512VL-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
-; CHECK-AVX512VL-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
-; CHECK-AVX512VL-NEXT: vpmuldq %xmm2, %xmm3, %xmm2
-; CHECK-AVX512VL-NEXT: vpmuldq %xmm1, %xmm0, %xmm1
-; CHECK-AVX512VL-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; CHECK-AVX512VL-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3]
-; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm2
-; CHECK-AVX512VL-NEXT: vpaddd %xmm2, %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT: vpsrld $31, %xmm1, %xmm2
-; CHECK-AVX512VL-NEXT: vpxor %xmm3, %xmm3, %xmm3
-; CHECK-AVX512VL-NEXT: vpblendd {{.*#+}} xmm2 = xmm2[0],xmm3[1],xmm2[2,3]
-; CHECK-AVX512VL-NEXT: vpsravd {{.*}}(%rip), %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT: vpaddd %xmm2, %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip), %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT: vpsubd %xmm1, %xmm0, %xmm0
-; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm3, %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT: vpaddd {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT: vprorvd {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT: vpminud {{.*}}(%rip), %xmm0, %xmm1
+; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
; CHECK-AVX512VL-NEXT: vpsrld $31, %xmm0, %xmm0
; CHECK-AVX512VL-NEXT: retq
%srem = srem <4 x i32> %X, <i32 14, i32 4294967295, i32 16, i32 14>
;
; CHECK-AVX512VL-LABEL: test_srem_odd_even_allones_and_poweroftwo:
; CHECK-AVX512VL: # %bb.0:
-; CHECK-AVX512VL-NEXT: vmovdqa {{.*#+}} xmm1 = [1717986919,0,2147483649,1374389535]
-; CHECK-AVX512VL-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
-; CHECK-AVX512VL-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
-; CHECK-AVX512VL-NEXT: vpmuldq %xmm2, %xmm3, %xmm2
-; CHECK-AVX512VL-NEXT: vpmuldq %xmm1, %xmm0, %xmm1
-; CHECK-AVX512VL-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; CHECK-AVX512VL-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3]
-; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm2
-; CHECK-AVX512VL-NEXT: vpaddd %xmm2, %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT: vpsrld $31, %xmm1, %xmm2
-; CHECK-AVX512VL-NEXT: vpxor %xmm3, %xmm3, %xmm3
-; CHECK-AVX512VL-NEXT: vpblendd {{.*#+}} xmm2 = xmm2[0],xmm3[1],xmm2[2,3]
-; CHECK-AVX512VL-NEXT: vpsravd {{.*}}(%rip), %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT: vpaddd %xmm2, %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip), %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT: vpsubd %xmm1, %xmm0, %xmm0
-; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm3, %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT: vpaddd {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT: vprorvd {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT: vpminud {{.*}}(%rip), %xmm0, %xmm1
+; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
; CHECK-AVX512VL-NEXT: vpsrld $31, %xmm0, %xmm0
; CHECK-AVX512VL-NEXT: retq
%srem = srem <4 x i32> %X, <i32 5, i32 4294967295, i32 16, i32 100>
define <4 x i32> @test_srem_odd_allones_and_one(<4 x i32> %X) nounwind {
; CHECK-SSE2-LABEL: test_srem_odd_allones_and_one:
; CHECK-SSE2: # %bb.0:
-; CHECK-SSE2-NEXT: movdqa {{.*#+}} xmm1 = [0,4294967295,1,0]
-; CHECK-SSE2-NEXT: movdqa %xmm0, %xmm2
+; CHECK-SSE2-NEXT: movdqa {{.*#+}} xmm1 = [3435973837,3435973837,3435973837,3435973837]
+; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
+; CHECK-SSE2-NEXT: pmuludq %xmm1, %xmm0
+; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; CHECK-SSE2-NEXT: pmuludq %xmm1, %xmm2
-; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
-; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
-; CHECK-SSE2-NEXT: pmuludq %xmm3, %xmm1
-; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
-; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
-; CHECK-SSE2-NEXT: movdqa {{.*#+}} xmm4 = [1717986919,0,0,1717986919]
-; CHECK-SSE2-NEXT: movdqa %xmm0, %xmm1
-; CHECK-SSE2-NEXT: pmuludq %xmm4, %xmm1
-; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm1[1,3,2,3]
-; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm4[2,2,3,3]
-; CHECK-SSE2-NEXT: pmuludq %xmm3, %xmm1
-; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,3,2,3]
-; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm1[0],xmm5[1],xmm1[1]
-; CHECK-SSE2-NEXT: pxor %xmm1, %xmm1
-; CHECK-SSE2-NEXT: pxor %xmm3, %xmm3
-; CHECK-SSE2-NEXT: pcmpgtd %xmm0, %xmm3
-; CHECK-SSE2-NEXT: pand %xmm4, %xmm3
-; CHECK-SSE2-NEXT: psubd %xmm3, %xmm5
-; CHECK-SSE2-NEXT: paddd %xmm2, %xmm5
-; CHECK-SSE2-NEXT: movdqa %xmm5, %xmm2
-; CHECK-SSE2-NEXT: psrad $1, %xmm2
-; CHECK-SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,3],xmm5[1,2]
-; CHECK-SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2,3,1]
-; CHECK-SSE2-NEXT: psrld $31, %xmm5
-; CHECK-SSE2-NEXT: pand {{.*}}(%rip), %xmm5
-; CHECK-SSE2-NEXT: paddd %xmm2, %xmm5
-; CHECK-SSE2-NEXT: movdqa {{.*#+}} xmm2 = [5,4294967295,1,5]
-; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm5[1,1,3,3]
-; CHECK-SSE2-NEXT: pmuludq %xmm2, %xmm5
-; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm5[0,2,2,3]
-; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
-; CHECK-SSE2-NEXT: pmuludq %xmm3, %xmm2
-; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
-; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1]
-; CHECK-SSE2-NEXT: psubd %xmm4, %xmm0
-; CHECK-SSE2-NEXT: pcmpeqd %xmm1, %xmm0
-; CHECK-SSE2-NEXT: psrld $31, %xmm0
+; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[0,2,2,3]
+; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; CHECK-SSE2-NEXT: paddd {{.*}}(%rip), %xmm0
+; CHECK-SSE2-NEXT: pxor {{.*}}(%rip), %xmm0
+; CHECK-SSE2-NEXT: pcmpgtd {{.*}}(%rip), %xmm0
+; CHECK-SSE2-NEXT: pandn {{.*}}(%rip), %xmm0
; CHECK-SSE2-NEXT: retq
;
; CHECK-SSE41-LABEL: test_srem_odd_allones_and_one:
; CHECK-SSE41: # %bb.0:
-; CHECK-SSE41-NEXT: movdqa {{.*#+}} xmm1 = [1717986919,0,0,1717986919]
-; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,2,3,3]
-; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
-; CHECK-SSE41-NEXT: pmuldq %xmm2, %xmm3
-; CHECK-SSE41-NEXT: pmuldq %xmm0, %xmm1
-; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; CHECK-SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm3[2,3],xmm1[4,5],xmm3[6,7]
-; CHECK-SSE41-NEXT: movdqa {{.*#+}} xmm2 = [0,4294967295,1,0]
-; CHECK-SSE41-NEXT: pmulld %xmm0, %xmm2
-; CHECK-SSE41-NEXT: paddd %xmm1, %xmm2
-; CHECK-SSE41-NEXT: movdqa %xmm2, %xmm1
-; CHECK-SSE41-NEXT: psrad $1, %xmm1
-; CHECK-SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3,4,5],xmm1[6,7]
-; CHECK-SSE41-NEXT: psrld $31, %xmm2
-; CHECK-SSE41-NEXT: pxor %xmm3, %xmm3
-; CHECK-SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm3[2,3,4,5],xmm2[6,7]
-; CHECK-SSE41-NEXT: paddd %xmm1, %xmm2
-; CHECK-SSE41-NEXT: pmulld {{.*}}(%rip), %xmm2
-; CHECK-SSE41-NEXT: psubd %xmm2, %xmm0
-; CHECK-SSE41-NEXT: pcmpeqd %xmm3, %xmm0
+; CHECK-SSE41-NEXT: pmulld {{.*}}(%rip), %xmm0
+; CHECK-SSE41-NEXT: paddd {{.*}}(%rip), %xmm0
+; CHECK-SSE41-NEXT: movdqa {{.*#+}} xmm1 = [858993458,4294967295,4294967295,858993458]
+; CHECK-SSE41-NEXT: pminud %xmm0, %xmm1
+; CHECK-SSE41-NEXT: pcmpeqd %xmm1, %xmm0
; CHECK-SSE41-NEXT: psrld $31, %xmm0
; CHECK-SSE41-NEXT: retq
;
; CHECK-AVX1-LABEL: test_srem_odd_allones_and_one:
; CHECK-AVX1: # %bb.0:
-; CHECK-AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [1717986919,0,0,1717986919]
-; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[2,2,3,3]
-; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
-; CHECK-AVX1-NEXT: vpmuldq %xmm2, %xmm3, %xmm2
-; CHECK-AVX1-NEXT: vpmuldq %xmm1, %xmm0, %xmm1
-; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; CHECK-AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
-; CHECK-AVX1-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm2
-; CHECK-AVX1-NEXT: vpaddd %xmm2, %xmm1, %xmm1
-; CHECK-AVX1-NEXT: vpsrad $1, %xmm1, %xmm2
-; CHECK-AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3,4,5],xmm2[6,7]
-; CHECK-AVX1-NEXT: vpsrld $31, %xmm1, %xmm1
-; CHECK-AVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3
-; CHECK-AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm3[2,3,4,5],xmm1[6,7]
-; CHECK-AVX1-NEXT: vpaddd %xmm1, %xmm2, %xmm1
-; CHECK-AVX1-NEXT: vpmulld {{.*}}(%rip), %xmm1, %xmm1
-; CHECK-AVX1-NEXT: vpsubd %xmm1, %xmm0, %xmm0
-; CHECK-AVX1-NEXT: vpcmpeqd %xmm3, %xmm0, %xmm0
+; CHECK-AVX1-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-AVX1-NEXT: vpaddd {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-AVX1-NEXT: vpminud {{.*}}(%rip), %xmm0, %xmm1
+; CHECK-AVX1-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
; CHECK-AVX1-NEXT: vpsrld $31, %xmm0, %xmm0
; CHECK-AVX1-NEXT: retq
;
; CHECK-AVX2-LABEL: test_srem_odd_allones_and_one:
; CHECK-AVX2: # %bb.0:
-; CHECK-AVX2-NEXT: vmovdqa {{.*#+}} xmm1 = [1717986919,0,0,1717986919]
-; CHECK-AVX2-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[2,2,3,3]
-; CHECK-AVX2-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
-; CHECK-AVX2-NEXT: vpmuldq %xmm2, %xmm3, %xmm2
-; CHECK-AVX2-NEXT: vpmuldq %xmm1, %xmm0, %xmm1
-; CHECK-AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; CHECK-AVX2-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3]
-; CHECK-AVX2-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm2
-; CHECK-AVX2-NEXT: vpaddd %xmm2, %xmm1, %xmm1
-; CHECK-AVX2-NEXT: vpsrld $31, %xmm1, %xmm2
-; CHECK-AVX2-NEXT: vpxor %xmm3, %xmm3, %xmm3
-; CHECK-AVX2-NEXT: vpblendd {{.*#+}} xmm2 = xmm2[0],xmm3[1,2],xmm2[3]
-; CHECK-AVX2-NEXT: vpsravd {{.*}}(%rip), %xmm1, %xmm1
-; CHECK-AVX2-NEXT: vpaddd %xmm2, %xmm1, %xmm1
-; CHECK-AVX2-NEXT: vpmulld {{.*}}(%rip), %xmm1, %xmm1
-; CHECK-AVX2-NEXT: vpsubd %xmm1, %xmm0, %xmm0
-; CHECK-AVX2-NEXT: vpcmpeqd %xmm3, %xmm0, %xmm0
+; CHECK-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [3435973837,3435973837,3435973837,3435973837]
+; CHECK-AVX2-NEXT: vpmulld %xmm1, %xmm0, %xmm0
+; CHECK-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [429496729,429496729,429496729,429496729]
+; CHECK-AVX2-NEXT: vpaddd %xmm1, %xmm0, %xmm0
+; CHECK-AVX2-NEXT: vpminud {{.*}}(%rip), %xmm0, %xmm1
+; CHECK-AVX2-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
; CHECK-AVX2-NEXT: vpsrld $31, %xmm0, %xmm0
; CHECK-AVX2-NEXT: retq
;
; CHECK-AVX512VL-LABEL: test_srem_odd_allones_and_one:
; CHECK-AVX512VL: # %bb.0:
-; CHECK-AVX512VL-NEXT: vmovdqa {{.*#+}} xmm1 = [1717986919,0,0,1717986919]
-; CHECK-AVX512VL-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[2,2,3,3]
-; CHECK-AVX512VL-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
-; CHECK-AVX512VL-NEXT: vpmuldq %xmm2, %xmm3, %xmm2
-; CHECK-AVX512VL-NEXT: vpmuldq %xmm1, %xmm0, %xmm1
-; CHECK-AVX512VL-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; CHECK-AVX512VL-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3]
-; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm2
-; CHECK-AVX512VL-NEXT: vpaddd %xmm2, %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT: vpsrld $31, %xmm1, %xmm2
-; CHECK-AVX512VL-NEXT: vpxor %xmm3, %xmm3, %xmm3
-; CHECK-AVX512VL-NEXT: vpblendd {{.*#+}} xmm2 = xmm2[0],xmm3[1,2],xmm2[3]
-; CHECK-AVX512VL-NEXT: vpsravd {{.*}}(%rip), %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT: vpaddd %xmm2, %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip), %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT: vpsubd %xmm1, %xmm0, %xmm0
-; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm3, %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip){1to4}, %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT: vpaddd {{.*}}(%rip){1to4}, %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT: vpminud {{.*}}(%rip), %xmm0, %xmm1
+; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
; CHECK-AVX512VL-NEXT: vpsrld $31, %xmm0, %xmm0
; CHECK-AVX512VL-NEXT: retq
%srem = srem <4 x i32> %X, <i32 5, i32 4294967295, i32 1, i32 5>
;
; CHECK-AVX512VL-LABEL: test_srem_even_allones_and_one:
; CHECK-AVX512VL: # %bb.0:
-; CHECK-AVX512VL-NEXT: vmovdqa {{.*#+}} xmm1 = [2454267027,0,0,2454267027]
-; CHECK-AVX512VL-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[2,2,3,3]
-; CHECK-AVX512VL-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
-; CHECK-AVX512VL-NEXT: vpmuldq %xmm2, %xmm3, %xmm2
-; CHECK-AVX512VL-NEXT: vpmuldq %xmm1, %xmm0, %xmm1
-; CHECK-AVX512VL-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; CHECK-AVX512VL-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3]
-; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm2
-; CHECK-AVX512VL-NEXT: vpaddd %xmm2, %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT: vpsrld $31, %xmm1, %xmm2
-; CHECK-AVX512VL-NEXT: vpxor %xmm3, %xmm3, %xmm3
-; CHECK-AVX512VL-NEXT: vpblendd {{.*#+}} xmm2 = xmm2[0],xmm3[1,2],xmm2[3]
-; CHECK-AVX512VL-NEXT: vpsravd {{.*}}(%rip), %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT: vpaddd %xmm2, %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip), %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT: vpsubd %xmm1, %xmm0, %xmm0
-; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm3, %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip){1to4}, %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT: vpaddd {{.*}}(%rip){1to4}, %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT: vprord $1, %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT: vpminud {{.*}}(%rip), %xmm0, %xmm1
+; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
; CHECK-AVX512VL-NEXT: vpsrld $31, %xmm0, %xmm0
; CHECK-AVX512VL-NEXT: retq
%srem = srem <4 x i32> %X, <i32 14, i32 4294967295, i32 1, i32 14>
;
; CHECK-AVX512VL-LABEL: test_srem_odd_even_allones_and_one:
; CHECK-AVX512VL: # %bb.0:
-; CHECK-AVX512VL-NEXT: vmovdqa {{.*#+}} xmm1 = [1717986919,0,0,1374389535]
-; CHECK-AVX512VL-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[2,2,3,3]
-; CHECK-AVX512VL-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
-; CHECK-AVX512VL-NEXT: vpmuldq %xmm2, %xmm3, %xmm2
-; CHECK-AVX512VL-NEXT: vpmuldq %xmm1, %xmm0, %xmm1
-; CHECK-AVX512VL-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; CHECK-AVX512VL-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3]
-; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm2
-; CHECK-AVX512VL-NEXT: vpaddd %xmm2, %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT: vpsrld $31, %xmm1, %xmm2
-; CHECK-AVX512VL-NEXT: vpxor %xmm3, %xmm3, %xmm3
-; CHECK-AVX512VL-NEXT: vpblendd {{.*#+}} xmm2 = xmm2[0],xmm3[1,2],xmm2[3]
-; CHECK-AVX512VL-NEXT: vpsravd {{.*}}(%rip), %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT: vpaddd %xmm2, %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip), %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT: vpsubd %xmm1, %xmm0, %xmm0
-; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm3, %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT: vpaddd {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT: vprorvd {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT: vpminud {{.*}}(%rip), %xmm0, %xmm1
+; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
; CHECK-AVX512VL-NEXT: vpsrld $31, %xmm0, %xmm0
; CHECK-AVX512VL-NEXT: retq
%srem = srem <4 x i32> %X, <i32 5, i32 4294967295, i32 1, i32 100>
;
; CHECK-AVX512VL-LABEL: test_srem_odd_poweroftwo_and_one:
; CHECK-AVX512VL: # %bb.0:
-; CHECK-AVX512VL-NEXT: vmovdqa {{.*#+}} xmm1 = [1717986919,2147483649,0,1717986919]
-; CHECK-AVX512VL-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
-; CHECK-AVX512VL-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
-; CHECK-AVX512VL-NEXT: vpmuldq %xmm2, %xmm3, %xmm2
-; CHECK-AVX512VL-NEXT: vpmuldq %xmm1, %xmm0, %xmm1
-; CHECK-AVX512VL-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; CHECK-AVX512VL-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3]
-; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm2
-; CHECK-AVX512VL-NEXT: vpaddd %xmm2, %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT: vpsrld $31, %xmm1, %xmm2
-; CHECK-AVX512VL-NEXT: vpxor %xmm3, %xmm3, %xmm3
-; CHECK-AVX512VL-NEXT: vpblendd {{.*#+}} xmm2 = xmm2[0,1],xmm3[2],xmm2[3]
-; CHECK-AVX512VL-NEXT: vpsravd {{.*}}(%rip), %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT: vpaddd %xmm2, %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip), %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT: vpsubd %xmm1, %xmm0, %xmm0
-; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm3, %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT: vpaddd {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT: vprorvd {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT: vpminud {{.*}}(%rip), %xmm0, %xmm1
+; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
; CHECK-AVX512VL-NEXT: vpsrld $31, %xmm0, %xmm0
; CHECK-AVX512VL-NEXT: retq
%srem = srem <4 x i32> %X, <i32 5, i32 16, i32 1, i32 5>
;
; CHECK-AVX512VL-LABEL: test_srem_even_poweroftwo_and_one:
; CHECK-AVX512VL: # %bb.0:
-; CHECK-AVX512VL-NEXT: vmovdqa {{.*#+}} xmm1 = [2454267027,2147483649,0,2454267027]
-; CHECK-AVX512VL-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
-; CHECK-AVX512VL-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
-; CHECK-AVX512VL-NEXT: vpmuldq %xmm2, %xmm3, %xmm2
-; CHECK-AVX512VL-NEXT: vpmuldq %xmm1, %xmm0, %xmm1
-; CHECK-AVX512VL-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; CHECK-AVX512VL-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3]
-; CHECK-AVX512VL-NEXT: vpaddd %xmm0, %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT: vpsrld $31, %xmm1, %xmm2
-; CHECK-AVX512VL-NEXT: vpxor %xmm3, %xmm3, %xmm3
-; CHECK-AVX512VL-NEXT: vpblendd {{.*#+}} xmm2 = xmm2[0,1],xmm3[2],xmm2[3]
-; CHECK-AVX512VL-NEXT: vpsravd {{.*}}(%rip), %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT: vpaddd %xmm2, %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip), %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT: vpsubd %xmm1, %xmm0, %xmm0
-; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm3, %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT: vpaddd {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT: vprorvd {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT: vpminud {{.*}}(%rip), %xmm0, %xmm1
+; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
; CHECK-AVX512VL-NEXT: vpsrld $31, %xmm0, %xmm0
; CHECK-AVX512VL-NEXT: retq
%srem = srem <4 x i32> %X, <i32 14, i32 16, i32 1, i32 14>
;
; CHECK-AVX512VL-LABEL: test_srem_odd_even_poweroftwo_and_one:
; CHECK-AVX512VL: # %bb.0:
-; CHECK-AVX512VL-NEXT: vmovdqa {{.*#+}} xmm1 = [1717986919,2147483649,0,1374389535]
-; CHECK-AVX512VL-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
-; CHECK-AVX512VL-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
-; CHECK-AVX512VL-NEXT: vpmuldq %xmm2, %xmm3, %xmm2
-; CHECK-AVX512VL-NEXT: vpmuldq %xmm1, %xmm0, %xmm1
-; CHECK-AVX512VL-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; CHECK-AVX512VL-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3]
-; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm2
-; CHECK-AVX512VL-NEXT: vpaddd %xmm2, %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT: vpsrld $31, %xmm1, %xmm2
-; CHECK-AVX512VL-NEXT: vpxor %xmm3, %xmm3, %xmm3
-; CHECK-AVX512VL-NEXT: vpblendd {{.*#+}} xmm2 = xmm2[0,1],xmm3[2],xmm2[3]
-; CHECK-AVX512VL-NEXT: vpsravd {{.*}}(%rip), %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT: vpaddd %xmm2, %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip), %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT: vpsubd %xmm1, %xmm0, %xmm0
-; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm3, %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT: vpaddd {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT: vprorvd {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT: vpminud {{.*}}(%rip), %xmm0, %xmm1
+; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
; CHECK-AVX512VL-NEXT: vpsrld $31, %xmm0, %xmm0
; CHECK-AVX512VL-NEXT: retq
%srem = srem <4 x i32> %X, <i32 5, i32 16, i32 1, i32 100>
;
; CHECK-AVX512VL-LABEL: test_srem_odd_allones_and_poweroftwo_and_one:
; CHECK-AVX512VL: # %bb.0:
-; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm1
-; CHECK-AVX512VL-NEXT: vpmuldq {{.*}}(%rip), %xmm0, %xmm2
-; CHECK-AVX512VL-NEXT: vpsrlq $32, %xmm2, %xmm2
-; CHECK-AVX512VL-NEXT: vpaddd %xmm1, %xmm2, %xmm1
-; CHECK-AVX512VL-NEXT: vpsrld $31, %xmm1, %xmm2
-; CHECK-AVX512VL-NEXT: vpxor %xmm3, %xmm3, %xmm3
-; CHECK-AVX512VL-NEXT: vpblendd {{.*#+}} xmm2 = xmm2[0],xmm3[1],xmm2[2],xmm3[3]
-; CHECK-AVX512VL-NEXT: vpsravd {{.*}}(%rip), %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT: vpaddd %xmm2, %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip), %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT: vpsubd %xmm1, %xmm0, %xmm0
-; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm3, %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT: vpaddd {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT: vprorvd {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT: vpminud {{.*}}(%rip), %xmm0, %xmm1
+; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
; CHECK-AVX512VL-NEXT: vpsrld $31, %xmm0, %xmm0
; CHECK-AVX512VL-NEXT: retq
%srem = srem <4 x i32> %X, <i32 5, i32 4294967295, i32 16, i32 1>
;
; CHECK-AVX512VL-LABEL: test_srem_even_allones_and_poweroftwo_and_one:
; CHECK-AVX512VL: # %bb.0:
-; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm1
-; CHECK-AVX512VL-NEXT: vpmuldq {{.*}}(%rip), %xmm0, %xmm2
-; CHECK-AVX512VL-NEXT: vpsrlq $32, %xmm2, %xmm2
-; CHECK-AVX512VL-NEXT: vpaddd %xmm1, %xmm2, %xmm1
-; CHECK-AVX512VL-NEXT: vpsrld $31, %xmm1, %xmm2
-; CHECK-AVX512VL-NEXT: vpxor %xmm3, %xmm3, %xmm3
-; CHECK-AVX512VL-NEXT: vpblendd {{.*#+}} xmm2 = xmm2[0],xmm3[1],xmm2[2],xmm3[3]
-; CHECK-AVX512VL-NEXT: vpsravd {{.*}}(%rip), %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT: vpaddd %xmm2, %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip), %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT: vpsubd %xmm1, %xmm0, %xmm0
-; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm3, %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT: vpaddd {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT: vprorvd {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT: vpminud {{.*}}(%rip), %xmm0, %xmm1
+; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
; CHECK-AVX512VL-NEXT: vpsrld $31, %xmm0, %xmm0
; CHECK-AVX512VL-NEXT: retq
%srem = srem <4 x i32> %X, <i32 14, i32 4294967295, i32 16, i32 1>
define <4 x i32> @test_srem_odd_25(<4 x i32> %X) nounwind {
; CHECK-SSE2-LABEL: test_srem_odd_25:
; CHECK-SSE2: # %bb.0:
-; CHECK-SSE2-NEXT: movdqa {{.*#+}} xmm1 = [1374389535,1374389535,1374389535,1374389535]
-; CHECK-SSE2-NEXT: movdqa %xmm0, %xmm2
-; CHECK-SSE2-NEXT: pmuludq %xmm1, %xmm2
-; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,3,2,3]
-; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
-; CHECK-SSE2-NEXT: pmuludq %xmm1, %xmm3
-; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,3,2,3]
-; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
-; CHECK-SSE2-NEXT: pxor %xmm3, %xmm3
-; CHECK-SSE2-NEXT: pxor %xmm4, %xmm4
-; CHECK-SSE2-NEXT: pcmpgtd %xmm0, %xmm4
-; CHECK-SSE2-NEXT: pand %xmm1, %xmm4
-; CHECK-SSE2-NEXT: psubd %xmm4, %xmm2
-; CHECK-SSE2-NEXT: movdqa %xmm2, %xmm1
-; CHECK-SSE2-NEXT: psrld $31, %xmm1
-; CHECK-SSE2-NEXT: psrad $3, %xmm2
-; CHECK-SSE2-NEXT: paddd %xmm1, %xmm2
-; CHECK-SSE2-NEXT: movdqa {{.*#+}} xmm1 = [25,25,25,25]
-; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm2[1,1,3,3]
+; CHECK-SSE2-NEXT: movdqa {{.*#+}} xmm1 = [3264175145,3264175145,3264175145,3264175145]
+; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
+; CHECK-SSE2-NEXT: pmuludq %xmm1, %xmm0
+; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; CHECK-SSE2-NEXT: pmuludq %xmm1, %xmm2
-; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
-; CHECK-SSE2-NEXT: pmuludq %xmm1, %xmm4
-; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm4[0,2,2,3]
-; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
-; CHECK-SSE2-NEXT: psubd %xmm2, %xmm0
-; CHECK-SSE2-NEXT: pcmpeqd %xmm3, %xmm0
-; CHECK-SSE2-NEXT: psrld $31, %xmm0
+; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[0,2,2,3]
+; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; CHECK-SSE2-NEXT: paddd {{.*}}(%rip), %xmm0
+; CHECK-SSE2-NEXT: pxor {{.*}}(%rip), %xmm0
+; CHECK-SSE2-NEXT: pcmpgtd {{.*}}(%rip), %xmm0
+; CHECK-SSE2-NEXT: pandn {{.*}}(%rip), %xmm0
; CHECK-SSE2-NEXT: retq
;
; CHECK-SSE41-LABEL: test_srem_odd_25:
; CHECK-SSE41: # %bb.0:
-; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; CHECK-SSE41-NEXT: movdqa {{.*#+}} xmm2 = [1374389535,1374389535,1374389535,1374389535]
-; CHECK-SSE41-NEXT: pmuldq %xmm2, %xmm1
-; CHECK-SSE41-NEXT: pmuldq %xmm0, %xmm2
-; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
-; CHECK-SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7]
-; CHECK-SSE41-NEXT: movdqa %xmm2, %xmm1
-; CHECK-SSE41-NEXT: psrld $31, %xmm1
-; CHECK-SSE41-NEXT: psrad $3, %xmm2
-; CHECK-SSE41-NEXT: paddd %xmm1, %xmm2
-; CHECK-SSE41-NEXT: pmulld {{.*}}(%rip), %xmm2
-; CHECK-SSE41-NEXT: psubd %xmm2, %xmm0
-; CHECK-SSE41-NEXT: pxor %xmm1, %xmm1
+; CHECK-SSE41-NEXT: pmulld {{.*}}(%rip), %xmm0
+; CHECK-SSE41-NEXT: paddd {{.*}}(%rip), %xmm0
+; CHECK-SSE41-NEXT: movdqa {{.*#+}} xmm1 = [171798690,171798690,171798690,171798690]
+; CHECK-SSE41-NEXT: pminud %xmm0, %xmm1
; CHECK-SSE41-NEXT: pcmpeqd %xmm1, %xmm0
; CHECK-SSE41-NEXT: psrld $31, %xmm0
; CHECK-SSE41-NEXT: retq
;
; CHECK-AVX1-LABEL: test_srem_odd_25:
; CHECK-AVX1: # %bb.0:
-; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; CHECK-AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [1374389535,1374389535,1374389535,1374389535]
-; CHECK-AVX1-NEXT: vpmuldq %xmm2, %xmm1, %xmm1
-; CHECK-AVX1-NEXT: vpmuldq %xmm2, %xmm0, %xmm2
-; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
-; CHECK-AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7]
-; CHECK-AVX1-NEXT: vpsrld $31, %xmm1, %xmm2
-; CHECK-AVX1-NEXT: vpsrad $3, %xmm1, %xmm1
-; CHECK-AVX1-NEXT: vpaddd %xmm2, %xmm1, %xmm1
-; CHECK-AVX1-NEXT: vpmulld {{.*}}(%rip), %xmm1, %xmm1
-; CHECK-AVX1-NEXT: vpsubd %xmm1, %xmm0, %xmm0
-; CHECK-AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; CHECK-AVX1-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-AVX1-NEXT: vpaddd {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-AVX1-NEXT: vpminud {{.*}}(%rip), %xmm0, %xmm1
; CHECK-AVX1-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
; CHECK-AVX1-NEXT: vpsrld $31, %xmm0, %xmm0
; CHECK-AVX1-NEXT: retq
;
; CHECK-AVX2-LABEL: test_srem_odd_25:
; CHECK-AVX2: # %bb.0:
-; CHECK-AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; CHECK-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm2 = [1374389535,1374389535,1374389535,1374389535]
-; CHECK-AVX2-NEXT: vpmuldq %xmm2, %xmm1, %xmm1
-; CHECK-AVX2-NEXT: vpmuldq %xmm2, %xmm0, %xmm2
-; CHECK-AVX2-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
-; CHECK-AVX2-NEXT: vpblendd {{.*#+}} xmm1 = xmm2[0],xmm1[1],xmm2[2],xmm1[3]
-; CHECK-AVX2-NEXT: vpsrld $31, %xmm1, %xmm2
-; CHECK-AVX2-NEXT: vpsrad $3, %xmm1, %xmm1
-; CHECK-AVX2-NEXT: vpaddd %xmm2, %xmm1, %xmm1
-; CHECK-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm2 = [25,25,25,25]
-; CHECK-AVX2-NEXT: vpmulld %xmm2, %xmm1, %xmm1
-; CHECK-AVX2-NEXT: vpsubd %xmm1, %xmm0, %xmm0
-; CHECK-AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; CHECK-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [3264175145,3264175145,3264175145,3264175145]
+; CHECK-AVX2-NEXT: vpmulld %xmm1, %xmm0, %xmm0
+; CHECK-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [85899345,85899345,85899345,85899345]
+; CHECK-AVX2-NEXT: vpaddd %xmm1, %xmm0, %xmm0
+; CHECK-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [171798690,171798690,171798690,171798690]
+; CHECK-AVX2-NEXT: vpminud %xmm1, %xmm0, %xmm1
; CHECK-AVX2-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
; CHECK-AVX2-NEXT: vpsrld $31, %xmm0, %xmm0
; CHECK-AVX2-NEXT: retq
;
; CHECK-AVX512VL-LABEL: test_srem_odd_25:
; CHECK-AVX512VL: # %bb.0:
-; CHECK-AVX512VL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; CHECK-AVX512VL-NEXT: vpbroadcastd {{.*#+}} xmm2 = [1374389535,1374389535,1374389535,1374389535]
-; CHECK-AVX512VL-NEXT: vpmuldq %xmm2, %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT: vpmuldq %xmm2, %xmm0, %xmm2
-; CHECK-AVX512VL-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
-; CHECK-AVX512VL-NEXT: vpblendd {{.*#+}} xmm1 = xmm2[0],xmm1[1],xmm2[2],xmm1[3]
-; CHECK-AVX512VL-NEXT: vpsrld $31, %xmm1, %xmm2
-; CHECK-AVX512VL-NEXT: vpsrad $3, %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT: vpaddd %xmm2, %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip){1to4}, %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT: vpsubd %xmm1, %xmm0, %xmm0
-; CHECK-AVX512VL-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip){1to4}, %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT: vpaddd {{.*}}(%rip){1to4}, %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT: vpminud {{.*}}(%rip){1to4}, %xmm0, %xmm1
; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
; CHECK-AVX512VL-NEXT: vpsrld $31, %xmm0, %xmm0
; CHECK-AVX512VL-NEXT: retq
;
; CHECK-AVX512VL-LABEL: test_srem_even_100:
; CHECK-AVX512VL: # %bb.0:
-; CHECK-AVX512VL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; CHECK-AVX512VL-NEXT: vpbroadcastd {{.*#+}} xmm2 = [1374389535,1374389535,1374389535,1374389535]
-; CHECK-AVX512VL-NEXT: vpmuldq %xmm2, %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT: vpmuldq %xmm2, %xmm0, %xmm2
-; CHECK-AVX512VL-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
-; CHECK-AVX512VL-NEXT: vpblendd {{.*#+}} xmm1 = xmm2[0],xmm1[1],xmm2[2],xmm1[3]
-; CHECK-AVX512VL-NEXT: vpsrld $31, %xmm1, %xmm2
-; CHECK-AVX512VL-NEXT: vpsrad $5, %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT: vpaddd %xmm2, %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip){1to4}, %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT: vpsubd %xmm1, %xmm0, %xmm0
-; CHECK-AVX512VL-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip){1to4}, %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT: vpaddd {{.*}}(%rip){1to4}, %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT: vprord $2, %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT: vpminud {{.*}}(%rip){1to4}, %xmm0, %xmm1
; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
; CHECK-AVX512VL-NEXT: vpsrld $31, %xmm0, %xmm0
; CHECK-AVX512VL-NEXT: retq
define <4 x i32> @test_srem_odd_neg25(<4 x i32> %X) nounwind {
; CHECK-SSE2-LABEL: test_srem_odd_neg25:
; CHECK-SSE2: # %bb.0:
-; CHECK-SSE2-NEXT: movdqa {{.*#+}} xmm1 = [1374389535,2920577761,2920577761,1374389535]
-; CHECK-SSE2-NEXT: movdqa %xmm0, %xmm2
+; CHECK-SSE2-NEXT: movdqa {{.*#+}} xmm1 = [3264175145,3264175145,3264175145,3264175145]
+; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
+; CHECK-SSE2-NEXT: pmuludq %xmm1, %xmm0
+; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; CHECK-SSE2-NEXT: pmuludq %xmm1, %xmm2
-; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,3,2,3]
-; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[2,2,3,3]
-; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3]
-; CHECK-SSE2-NEXT: pmuludq %xmm3, %xmm4
-; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm4[1,3,2,3]
-; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
-; CHECK-SSE2-NEXT: pxor %xmm3, %xmm3
-; CHECK-SSE2-NEXT: pxor %xmm4, %xmm4
-; CHECK-SSE2-NEXT: pcmpgtd %xmm0, %xmm4
-; CHECK-SSE2-NEXT: pand %xmm1, %xmm4
-; CHECK-SSE2-NEXT: movdqa {{.*#+}} xmm1 = [0,4294967295,4294967295,0]
-; CHECK-SSE2-NEXT: pand %xmm0, %xmm1
-; CHECK-SSE2-NEXT: paddd %xmm4, %xmm1
-; CHECK-SSE2-NEXT: psubd %xmm1, %xmm2
-; CHECK-SSE2-NEXT: movdqa %xmm2, %xmm1
-; CHECK-SSE2-NEXT: psrld $31, %xmm1
-; CHECK-SSE2-NEXT: psrad $3, %xmm2
-; CHECK-SSE2-NEXT: paddd %xmm1, %xmm2
-; CHECK-SSE2-NEXT: movdqa {{.*#+}} xmm1 = [25,4294967271,4294967271,25]
-; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm2[1,1,3,3]
-; CHECK-SSE2-NEXT: pmuludq %xmm1, %xmm2
-; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
-; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,2,3,3]
-; CHECK-SSE2-NEXT: pmuludq %xmm4, %xmm1
-; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
-; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
-; CHECK-SSE2-NEXT: psubd %xmm2, %xmm0
-; CHECK-SSE2-NEXT: pcmpeqd %xmm3, %xmm0
-; CHECK-SSE2-NEXT: psrld $31, %xmm0
+; CHECK-SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[0,2,2,3]
+; CHECK-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; CHECK-SSE2-NEXT: paddd {{.*}}(%rip), %xmm0
+; CHECK-SSE2-NEXT: pxor {{.*}}(%rip), %xmm0
+; CHECK-SSE2-NEXT: pcmpgtd {{.*}}(%rip), %xmm0
+; CHECK-SSE2-NEXT: pandn {{.*}}(%rip), %xmm0
; CHECK-SSE2-NEXT: retq
;
; CHECK-SSE41-LABEL: test_srem_odd_neg25:
; CHECK-SSE41: # %bb.0:
-; CHECK-SSE41-NEXT: movdqa {{.*#+}} xmm1 = [1374389535,2920577761,2920577761,1374389535]
-; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,2,3,3]
-; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
-; CHECK-SSE41-NEXT: pmuldq %xmm2, %xmm3
-; CHECK-SSE41-NEXT: pmuldq %xmm0, %xmm1
-; CHECK-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; CHECK-SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm3[2,3],xmm1[4,5],xmm3[6,7]
-; CHECK-SSE41-NEXT: movdqa %xmm1, %xmm2
-; CHECK-SSE41-NEXT: psrld $31, %xmm2
-; CHECK-SSE41-NEXT: psrad $3, %xmm1
-; CHECK-SSE41-NEXT: paddd %xmm2, %xmm1
-; CHECK-SSE41-NEXT: pmulld {{.*}}(%rip), %xmm1
-; CHECK-SSE41-NEXT: psubd %xmm1, %xmm0
-; CHECK-SSE41-NEXT: pxor %xmm1, %xmm1
+; CHECK-SSE41-NEXT: pmulld {{.*}}(%rip), %xmm0
+; CHECK-SSE41-NEXT: paddd {{.*}}(%rip), %xmm0
+; CHECK-SSE41-NEXT: movdqa {{.*#+}} xmm1 = [171798690,171798690,171798690,171798690]
+; CHECK-SSE41-NEXT: pminud %xmm0, %xmm1
; CHECK-SSE41-NEXT: pcmpeqd %xmm1, %xmm0
; CHECK-SSE41-NEXT: psrld $31, %xmm0
; CHECK-SSE41-NEXT: retq
;
; CHECK-AVX1-LABEL: test_srem_odd_neg25:
; CHECK-AVX1: # %bb.0:
-; CHECK-AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [1374389535,2920577761,2920577761,1374389535]
-; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[2,2,3,3]
-; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
-; CHECK-AVX1-NEXT: vpmuldq %xmm2, %xmm3, %xmm2
-; CHECK-AVX1-NEXT: vpmuldq %xmm1, %xmm0, %xmm1
-; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; CHECK-AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
-; CHECK-AVX1-NEXT: vpsrld $31, %xmm1, %xmm2
-; CHECK-AVX1-NEXT: vpsrad $3, %xmm1, %xmm1
-; CHECK-AVX1-NEXT: vpaddd %xmm2, %xmm1, %xmm1
-; CHECK-AVX1-NEXT: vpmulld {{.*}}(%rip), %xmm1, %xmm1
-; CHECK-AVX1-NEXT: vpsubd %xmm1, %xmm0, %xmm0
-; CHECK-AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; CHECK-AVX1-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-AVX1-NEXT: vpaddd {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-AVX1-NEXT: vpminud {{.*}}(%rip), %xmm0, %xmm1
; CHECK-AVX1-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
; CHECK-AVX1-NEXT: vpsrld $31, %xmm0, %xmm0
; CHECK-AVX1-NEXT: retq
;
; CHECK-AVX2-LABEL: test_srem_odd_neg25:
; CHECK-AVX2: # %bb.0:
-; CHECK-AVX2-NEXT: vmovdqa {{.*#+}} xmm1 = [1374389535,2920577761,2920577761,1374389535]
-; CHECK-AVX2-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[2,2,3,3]
-; CHECK-AVX2-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
-; CHECK-AVX2-NEXT: vpmuldq %xmm2, %xmm3, %xmm2
-; CHECK-AVX2-NEXT: vpmuldq %xmm1, %xmm0, %xmm1
-; CHECK-AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; CHECK-AVX2-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3]
-; CHECK-AVX2-NEXT: vpsrld $31, %xmm1, %xmm2
-; CHECK-AVX2-NEXT: vpsrad $3, %xmm1, %xmm1
-; CHECK-AVX2-NEXT: vpaddd %xmm2, %xmm1, %xmm1
-; CHECK-AVX2-NEXT: vpmulld {{.*}}(%rip), %xmm1, %xmm1
-; CHECK-AVX2-NEXT: vpsubd %xmm1, %xmm0, %xmm0
-; CHECK-AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; CHECK-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [3264175145,3264175145,3264175145,3264175145]
+; CHECK-AVX2-NEXT: vpmulld %xmm1, %xmm0, %xmm0
+; CHECK-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [85899345,85899345,85899345,85899345]
+; CHECK-AVX2-NEXT: vpaddd %xmm1, %xmm0, %xmm0
+; CHECK-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [171798690,171798690,171798690,171798690]
+; CHECK-AVX2-NEXT: vpminud %xmm1, %xmm0, %xmm1
; CHECK-AVX2-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
; CHECK-AVX2-NEXT: vpsrld $31, %xmm0, %xmm0
; CHECK-AVX2-NEXT: retq
;
; CHECK-AVX512VL-LABEL: test_srem_odd_neg25:
; CHECK-AVX512VL: # %bb.0:
-; CHECK-AVX512VL-NEXT: vmovdqa {{.*#+}} xmm1 = [1374389535,2920577761,2920577761,1374389535]
-; CHECK-AVX512VL-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[2,2,3,3]
-; CHECK-AVX512VL-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
-; CHECK-AVX512VL-NEXT: vpmuldq %xmm2, %xmm3, %xmm2
-; CHECK-AVX512VL-NEXT: vpmuldq %xmm1, %xmm0, %xmm1
-; CHECK-AVX512VL-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; CHECK-AVX512VL-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3]
-; CHECK-AVX512VL-NEXT: vpsrld $31, %xmm1, %xmm2
-; CHECK-AVX512VL-NEXT: vpsrad $3, %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT: vpaddd %xmm2, %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip), %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT: vpsubd %xmm1, %xmm0, %xmm0
-; CHECK-AVX512VL-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip){1to4}, %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT: vpaddd {{.*}}(%rip){1to4}, %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT: vpminud {{.*}}(%rip){1to4}, %xmm0, %xmm1
; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
; CHECK-AVX512VL-NEXT: vpsrld $31, %xmm0, %xmm0
; CHECK-AVX512VL-NEXT: retq
;
; CHECK-AVX512VL-LABEL: test_srem_even_neg100:
; CHECK-AVX512VL: # %bb.0:
-; CHECK-AVX512VL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,3,3]
-; CHECK-AVX512VL-NEXT: vpbroadcastd {{.*#+}} xmm2 = [1374389535,1374389535,1374389535,1374389535]
-; CHECK-AVX512VL-NEXT: vpmuldq %xmm2, %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT: vpbroadcastd {{.*#+}} xmm2 = [2920577761,2920577761,2920577761,2920577761]
-; CHECK-AVX512VL-NEXT: vpmuldq %xmm2, %xmm0, %xmm2
-; CHECK-AVX512VL-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
-; CHECK-AVX512VL-NEXT: vpblendd {{.*#+}} xmm1 = xmm2[0],xmm1[1],xmm2[2],xmm1[3]
-; CHECK-AVX512VL-NEXT: vpsrld $31, %xmm1, %xmm2
-; CHECK-AVX512VL-NEXT: vpsrad $5, %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT: vpaddd %xmm2, %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip), %xmm1, %xmm1
-; CHECK-AVX512VL-NEXT: vpsubd %xmm1, %xmm0, %xmm0
-; CHECK-AVX512VL-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; CHECK-AVX512VL-NEXT: vpmulld {{.*}}(%rip){1to4}, %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT: vpaddd {{.*}}(%rip){1to4}, %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT: vprord $2, %xmm0, %xmm0
+; CHECK-AVX512VL-NEXT: vpminud {{.*}}(%rip){1to4}, %xmm0, %xmm1
; CHECK-AVX512VL-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
; CHECK-AVX512VL-NEXT: vpsrld $31, %xmm0, %xmm0
; CHECK-AVX512VL-NEXT: retq
define i32 @test_srem_odd(i32 %X) nounwind {
; X86-LABEL: test_srem_odd:
; X86: # %bb.0:
-; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT: movl $1717986919, %edx # imm = 0x66666667
-; X86-NEXT: movl %ecx, %eax
-; X86-NEXT: imull %edx
-; X86-NEXT: movl %edx, %eax
-; X86-NEXT: shrl $31, %eax
-; X86-NEXT: sarl %edx
-; X86-NEXT: addl %eax, %edx
-; X86-NEXT: leal (%edx,%edx,4), %edx
+; X86-NEXT: imull $-858993459, {{[0-9]+}}(%esp), %ecx # imm = 0xCCCCCCCD
+; X86-NEXT: addl $429496729, %ecx # imm = 0x19999999
; X86-NEXT: xorl %eax, %eax
-; X86-NEXT: cmpl %edx, %ecx
-; X86-NEXT: sete %al
+; X86-NEXT: cmpl $858993459, %ecx # imm = 0x33333333
+; X86-NEXT: setb %al
; X86-NEXT: retl
;
; X64-LABEL: test_srem_odd:
; X64: # %bb.0:
-; X64-NEXT: movslq %edi, %rcx
-; X64-NEXT: imulq $1717986919, %rcx, %rax # imm = 0x66666667
-; X64-NEXT: movq %rax, %rdx
-; X64-NEXT: shrq $63, %rdx
-; X64-NEXT: sarq $33, %rax
-; X64-NEXT: addl %edx, %eax
-; X64-NEXT: leal (%rax,%rax,4), %edx
+; X64-NEXT: imull $-858993459, %edi, %ecx # imm = 0xCCCCCCCD
+; X64-NEXT: addl $429496729, %ecx # imm = 0x19999999
; X64-NEXT: xorl %eax, %eax
-; X64-NEXT: cmpl %edx, %ecx
-; X64-NEXT: sete %al
+; X64-NEXT: cmpl $858993459, %ecx # imm = 0x33333333
+; X64-NEXT: setb %al
; X64-NEXT: retq
%srem = srem i32 %X, 5
%cmp = icmp eq i32 %srem, 0
define i32 @test_srem_odd_25(i32 %X) nounwind {
; X86-LABEL: test_srem_odd_25:
; X86: # %bb.0:
-; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT: movl $1374389535, %edx # imm = 0x51EB851F
-; X86-NEXT: movl %ecx, %eax
-; X86-NEXT: imull %edx
-; X86-NEXT: movl %edx, %eax
-; X86-NEXT: shrl $31, %eax
-; X86-NEXT: sarl $3, %edx
-; X86-NEXT: addl %eax, %edx
-; X86-NEXT: leal (%edx,%edx,4), %eax
-; X86-NEXT: leal (%eax,%eax,4), %edx
+; X86-NEXT: imull $-1030792151, {{[0-9]+}}(%esp), %ecx # imm = 0xC28F5C29
+; X86-NEXT: addl $85899345, %ecx # imm = 0x51EB851
; X86-NEXT: xorl %eax, %eax
-; X86-NEXT: cmpl %edx, %ecx
-; X86-NEXT: sete %al
+; X86-NEXT: cmpl $171798691, %ecx # imm = 0xA3D70A3
+; X86-NEXT: setb %al
; X86-NEXT: retl
;
; X64-LABEL: test_srem_odd_25:
; X64: # %bb.0:
-; X64-NEXT: movslq %edi, %rcx
-; X64-NEXT: imulq $1374389535, %rcx, %rax # imm = 0x51EB851F
-; X64-NEXT: movq %rax, %rdx
-; X64-NEXT: shrq $63, %rdx
-; X64-NEXT: sarq $35, %rax
-; X64-NEXT: addl %edx, %eax
-; X64-NEXT: leal (%rax,%rax,4), %eax
-; X64-NEXT: leal (%rax,%rax,4), %edx
+; X64-NEXT: imull $-1030792151, %edi, %ecx # imm = 0xC28F5C29
+; X64-NEXT: addl $85899345, %ecx # imm = 0x51EB851
; X64-NEXT: xorl %eax, %eax
-; X64-NEXT: cmpl %edx, %ecx
-; X64-NEXT: sete %al
+; X64-NEXT: cmpl $171798691, %ecx # imm = 0xA3D70A3
+; X64-NEXT: setb %al
; X64-NEXT: retq
%srem = srem i32 %X, 25
%cmp = icmp eq i32 %srem, 0
define i32 @test_srem_odd_bit30(i32 %X) nounwind {
; X86-LABEL: test_srem_odd_bit30:
; X86: # %bb.0:
-; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT: movl $536870911, %edx # imm = 0x1FFFFFFF
-; X86-NEXT: movl %ecx, %eax
-; X86-NEXT: imull %edx
-; X86-NEXT: movl %edx, %eax
-; X86-NEXT: shrl $31, %eax
-; X86-NEXT: sarl $27, %edx
-; X86-NEXT: addl %eax, %edx
-; X86-NEXT: imull $1073741827, %edx, %edx # imm = 0x40000003
+; X86-NEXT: imull $1789569707, {{[0-9]+}}(%esp), %ecx # imm = 0x6AAAAAAB
+; X86-NEXT: incl %ecx
; X86-NEXT: xorl %eax, %eax
-; X86-NEXT: cmpl %edx, %ecx
-; X86-NEXT: sete %al
+; X86-NEXT: cmpl $3, %ecx
+; X86-NEXT: setb %al
; X86-NEXT: retl
;
; X64-LABEL: test_srem_odd_bit30:
; X64: # %bb.0:
-; X64-NEXT: movslq %edi, %rcx
-; X64-NEXT: movq %rcx, %rax
-; X64-NEXT: shlq $29, %rax
-; X64-NEXT: subq %rcx, %rax
-; X64-NEXT: movq %rax, %rdx
-; X64-NEXT: shrq $63, %rdx
-; X64-NEXT: sarq $59, %rax
-; X64-NEXT: addl %edx, %eax
-; X64-NEXT: imull $1073741827, %eax, %edx # imm = 0x40000003
+; X64-NEXT: imull $1789569707, %edi, %ecx # imm = 0x6AAAAAAB
+; X64-NEXT: incl %ecx
; X64-NEXT: xorl %eax, %eax
-; X64-NEXT: cmpl %edx, %ecx
-; X64-NEXT: sete %al
+; X64-NEXT: cmpl $3, %ecx
+; X64-NEXT: setb %al
; X64-NEXT: retq
%srem = srem i32 %X, 1073741827
%cmp = icmp eq i32 %srem, 0
define i32 @test_srem_odd_bit31(i32 %X) nounwind {
; X86-LABEL: test_srem_odd_bit31:
; X86: # %bb.0:
-; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT: movl $-536870913, %edx # imm = 0xDFFFFFFF
-; X86-NEXT: movl %ecx, %eax
-; X86-NEXT: imull %edx
-; X86-NEXT: movl %edx, %eax
-; X86-NEXT: shrl $31, %eax
-; X86-NEXT: sarl $28, %edx
-; X86-NEXT: addl %eax, %edx
-; X86-NEXT: imull $-2147483645, %edx, %edx # imm = 0x80000003
+; X86-NEXT: imull $-715827883, {{[0-9]+}}(%esp), %ecx # imm = 0xD5555555
+; X86-NEXT: incl %ecx
; X86-NEXT: xorl %eax, %eax
-; X86-NEXT: cmpl %edx, %ecx
-; X86-NEXT: sete %al
+; X86-NEXT: cmpl $3, %ecx
+; X86-NEXT: setb %al
; X86-NEXT: retl
;
; X64-LABEL: test_srem_odd_bit31:
; X64: # %bb.0:
-; X64-NEXT: movslq %edi, %rcx
-; X64-NEXT: movq %rcx, %rax
-; X64-NEXT: shlq $29, %rax
-; X64-NEXT: addq %rcx, %rax
-; X64-NEXT: negq %rax
-; X64-NEXT: movq %rax, %rdx
-; X64-NEXT: shrq $63, %rdx
-; X64-NEXT: sarq $60, %rax
-; X64-NEXT: addl %edx, %eax
-; X64-NEXT: imull $-2147483645, %eax, %edx # imm = 0x80000003
+; X64-NEXT: imull $-715827883, %edi, %ecx # imm = 0xD5555555
+; X64-NEXT: incl %ecx
; X64-NEXT: xorl %eax, %eax
-; X64-NEXT: cmpl %edx, %ecx
-; X64-NEXT: sete %al
+; X64-NEXT: cmpl $3, %ecx
+; X64-NEXT: setb %al
; X64-NEXT: retq
%srem = srem i32 %X, 2147483651
%cmp = icmp eq i32 %srem, 0
define i16 @test_srem_even(i16 %X) nounwind {
; X86-LABEL: test_srem_even:
; X86: # %bb.0:
-; X86-NEXT: movswl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT: imull $18725, %ecx, %eax # imm = 0x4925
-; X86-NEXT: movl %eax, %edx
-; X86-NEXT: shrl $31, %edx
-; X86-NEXT: sarl $18, %eax
-; X86-NEXT: addl %edx, %eax
-; X86-NEXT: movl %eax, %edx
-; X86-NEXT: shll $4, %edx
-; X86-NEXT: subl %eax, %edx
-; X86-NEXT: subl %eax, %edx
+; X86-NEXT: imull $28087, {{[0-9]+}}(%esp), %eax # imm = 0x6DB7
+; X86-NEXT: addl $4680, %eax # imm = 0x1248
+; X86-NEXT: rorw %ax
+; X86-NEXT: movzwl %ax, %ecx
; X86-NEXT: xorl %eax, %eax
-; X86-NEXT: cmpw %dx, %cx
-; X86-NEXT: setne %al
+; X86-NEXT: cmpl $4680, %ecx # imm = 0x1248
+; X86-NEXT: seta %al
; X86-NEXT: # kill: def $ax killed $ax killed $eax
; X86-NEXT: retl
;
; X64-LABEL: test_srem_even:
; X64: # %bb.0:
-; X64-NEXT: movswl %di, %ecx
-; X64-NEXT: imull $18725, %ecx, %eax # imm = 0x4925
-; X64-NEXT: movl %eax, %edx
-; X64-NEXT: shrl $31, %edx
-; X64-NEXT: sarl $18, %eax
-; X64-NEXT: addl %edx, %eax
-; X64-NEXT: movl %eax, %edx
-; X64-NEXT: shll $4, %edx
-; X64-NEXT: subl %eax, %edx
-; X64-NEXT: subl %eax, %edx
+; X64-NEXT: imull $28087, %edi, %eax # imm = 0x6DB7
+; X64-NEXT: addl $4680, %eax # imm = 0x1248
+; X64-NEXT: rorw %ax
+; X64-NEXT: movzwl %ax, %ecx
; X64-NEXT: xorl %eax, %eax
-; X64-NEXT: cmpw %dx, %cx
-; X64-NEXT: setne %al
+; X64-NEXT: cmpl $4680, %ecx # imm = 0x1248
+; X64-NEXT: seta %al
; X64-NEXT: # kill: def $ax killed $ax killed $eax
; X64-NEXT: retq
%srem = srem i16 %X, 14
define i32 @test_srem_even_100(i32 %X) nounwind {
; X86-LABEL: test_srem_even_100:
; X86: # %bb.0:
-; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT: movl $1374389535, %edx # imm = 0x51EB851F
-; X86-NEXT: movl %ecx, %eax
-; X86-NEXT: imull %edx
-; X86-NEXT: movl %edx, %eax
-; X86-NEXT: shrl $31, %eax
-; X86-NEXT: sarl $5, %edx
-; X86-NEXT: addl %eax, %edx
-; X86-NEXT: imull $100, %edx, %edx
+; X86-NEXT: imull $-1030792151, {{[0-9]+}}(%esp), %ecx # imm = 0xC28F5C29
+; X86-NEXT: addl $85899344, %ecx # imm = 0x51EB850
+; X86-NEXT: rorl $2, %ecx
; X86-NEXT: xorl %eax, %eax
-; X86-NEXT: cmpl %edx, %ecx
-; X86-NEXT: sete %al
+; X86-NEXT: cmpl $42949673, %ecx # imm = 0x28F5C29
+; X86-NEXT: setb %al
; X86-NEXT: retl
;
; X64-LABEL: test_srem_even_100:
; X64: # %bb.0:
-; X64-NEXT: movslq %edi, %rcx
-; X64-NEXT: imulq $1374389535, %rcx, %rax # imm = 0x51EB851F
-; X64-NEXT: movq %rax, %rdx
-; X64-NEXT: shrq $63, %rdx
-; X64-NEXT: sarq $37, %rax
-; X64-NEXT: addl %edx, %eax
-; X64-NEXT: imull $100, %eax, %edx
+; X64-NEXT: imull $-1030792151, %edi, %ecx # imm = 0xC28F5C29
+; X64-NEXT: addl $85899344, %ecx # imm = 0x51EB850
+; X64-NEXT: rorl $2, %ecx
; X64-NEXT: xorl %eax, %eax
-; X64-NEXT: cmpl %edx, %ecx
-; X64-NEXT: sete %al
+; X64-NEXT: cmpl $42949673, %ecx # imm = 0x28F5C29
+; X64-NEXT: setb %al
; X64-NEXT: retq
%srem = srem i32 %X, 100
%cmp = icmp eq i32 %srem, 0
define i32 @test_srem_even_bit30(i32 %X) nounwind {
; X86-LABEL: test_srem_even_bit30:
; X86: # %bb.0:
-; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT: movl $1073741721, %edx # imm = 0x3FFFFF99
-; X86-NEXT: movl %ecx, %eax
-; X86-NEXT: imull %edx
-; X86-NEXT: movl %edx, %eax
-; X86-NEXT: shrl $31, %eax
-; X86-NEXT: sarl $28, %edx
-; X86-NEXT: addl %eax, %edx
-; X86-NEXT: imull $1073741928, %edx, %edx # imm = 0x40000068
+; X86-NEXT: imull $-51622203, {{[0-9]+}}(%esp), %ecx # imm = 0xFCEC4EC5
+; X86-NEXT: addl $8, %ecx
+; X86-NEXT: rorl $3, %ecx
; X86-NEXT: xorl %eax, %eax
-; X86-NEXT: cmpl %edx, %ecx
-; X86-NEXT: sete %al
+; X86-NEXT: cmpl $3, %ecx
+; X86-NEXT: setb %al
; X86-NEXT: retl
;
; X64-LABEL: test_srem_even_bit30:
; X64: # %bb.0:
-; X64-NEXT: movslq %edi, %rcx
-; X64-NEXT: imulq $1073741721, %rcx, %rax # imm = 0x3FFFFF99
-; X64-NEXT: movq %rax, %rdx
-; X64-NEXT: shrq $63, %rdx
-; X64-NEXT: sarq $60, %rax
-; X64-NEXT: addl %edx, %eax
-; X64-NEXT: imull $1073741928, %eax, %edx # imm = 0x40000068
+; X64-NEXT: imull $-51622203, %edi, %ecx # imm = 0xFCEC4EC5
+; X64-NEXT: addl $8, %ecx
+; X64-NEXT: rorl $3, %ecx
; X64-NEXT: xorl %eax, %eax
-; X64-NEXT: cmpl %edx, %ecx
-; X64-NEXT: sete %al
+; X64-NEXT: cmpl $3, %ecx
+; X64-NEXT: setb %al
; X64-NEXT: retq
%srem = srem i32 %X, 1073741928
%cmp = icmp eq i32 %srem, 0
define i32 @test_srem_even_bit31(i32 %X) nounwind {
; X86-LABEL: test_srem_even_bit31:
; X86: # %bb.0:
-; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT: movl $2147483545, %edx # imm = 0x7FFFFF99
-; X86-NEXT: movl %ecx, %eax
-; X86-NEXT: imull %edx
-; X86-NEXT: subl %ecx, %edx
-; X86-NEXT: movl %edx, %eax
-; X86-NEXT: shrl $31, %eax
-; X86-NEXT: sarl $30, %edx
-; X86-NEXT: addl %eax, %edx
-; X86-NEXT: imull $-2147483546, %edx, %edx # imm = 0x80000066
+; X86-NEXT: imull $-989526779, {{[0-9]+}}(%esp), %ecx # imm = 0xC5050505
+; X86-NEXT: addl $2, %ecx
+; X86-NEXT: rorl %ecx
; X86-NEXT: xorl %eax, %eax
-; X86-NEXT: cmpl %edx, %ecx
-; X86-NEXT: sete %al
+; X86-NEXT: cmpl $3, %ecx
+; X86-NEXT: setb %al
; X86-NEXT: retl
;
; X64-LABEL: test_srem_even_bit31:
; X64: # %bb.0:
-; X64-NEXT: movslq %edi, %rcx
-; X64-NEXT: imulq $2147483545, %rcx, %rax # imm = 0x7FFFFF99
-; X64-NEXT: shrq $32, %rax
-; X64-NEXT: subl %ecx, %eax
-; X64-NEXT: movl %eax, %edx
-; X64-NEXT: shrl $31, %edx
-; X64-NEXT: sarl $30, %eax
-; X64-NEXT: addl %edx, %eax
-; X64-NEXT: imull $-2147483546, %eax, %edx # imm = 0x80000066
+; X64-NEXT: imull $-989526779, %edi, %ecx # imm = 0xC5050505
+; X64-NEXT: addl $2, %ecx
+; X64-NEXT: rorl %ecx
; X64-NEXT: xorl %eax, %eax
-; X64-NEXT: cmpl %edx, %ecx
-; X64-NEXT: sete %al
+; X64-NEXT: cmpl $3, %ecx
+; X64-NEXT: setb %al
; X64-NEXT: retq
%srem = srem i32 %X, 2147483750
%cmp = icmp eq i32 %srem, 0
define i32 @test_srem_odd_setne(i32 %X) nounwind {
; X86-LABEL: test_srem_odd_setne:
; X86: # %bb.0:
-; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT: movl $1717986919, %edx # imm = 0x66666667
-; X86-NEXT: movl %ecx, %eax
-; X86-NEXT: imull %edx
-; X86-NEXT: movl %edx, %eax
-; X86-NEXT: shrl $31, %eax
-; X86-NEXT: sarl %edx
-; X86-NEXT: addl %eax, %edx
-; X86-NEXT: leal (%edx,%edx,4), %edx
+; X86-NEXT: imull $-858993459, {{[0-9]+}}(%esp), %ecx # imm = 0xCCCCCCCD
+; X86-NEXT: addl $429496729, %ecx # imm = 0x19999999
; X86-NEXT: xorl %eax, %eax
-; X86-NEXT: cmpl %edx, %ecx
-; X86-NEXT: setne %al
+; X86-NEXT: cmpl $858993458, %ecx # imm = 0x33333332
+; X86-NEXT: seta %al
; X86-NEXT: retl
;
; X64-LABEL: test_srem_odd_setne:
; X64: # %bb.0:
-; X64-NEXT: movslq %edi, %rcx
-; X64-NEXT: imulq $1717986919, %rcx, %rax # imm = 0x66666667
-; X64-NEXT: movq %rax, %rdx
-; X64-NEXT: shrq $63, %rdx
-; X64-NEXT: sarq $33, %rax
-; X64-NEXT: addl %edx, %eax
-; X64-NEXT: leal (%rax,%rax,4), %edx
+; X64-NEXT: imull $-858993459, %edi, %ecx # imm = 0xCCCCCCCD
+; X64-NEXT: addl $429496729, %ecx # imm = 0x19999999
; X64-NEXT: xorl %eax, %eax
-; X64-NEXT: cmpl %edx, %ecx
-; X64-NEXT: setne %al
+; X64-NEXT: cmpl $858993458, %ecx # imm = 0x33333332
+; X64-NEXT: seta %al
; X64-NEXT: retq
%srem = srem i32 %X, 5
%cmp = icmp ne i32 %srem, 0
define i32 @test_srem_negative_odd(i32 %X) nounwind {
; X86-LABEL: test_srem_negative_odd:
; X86: # %bb.0:
-; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT: movl $-1717986919, %edx # imm = 0x99999999
-; X86-NEXT: movl %ecx, %eax
-; X86-NEXT: imull %edx
-; X86-NEXT: movl %edx, %eax
-; X86-NEXT: shrl $31, %eax
-; X86-NEXT: sarl %edx
-; X86-NEXT: addl %eax, %edx
-; X86-NEXT: leal (%edx,%edx,4), %edx
+; X86-NEXT: imull $-858993459, {{[0-9]+}}(%esp), %ecx # imm = 0xCCCCCCCD
+; X86-NEXT: addl $429496729, %ecx # imm = 0x19999999
; X86-NEXT: xorl %eax, %eax
-; X86-NEXT: addl %ecx, %edx
-; X86-NEXT: setne %al
+; X86-NEXT: cmpl $858993458, %ecx # imm = 0x33333332
+; X86-NEXT: seta %al
; X86-NEXT: retl
;
; X64-LABEL: test_srem_negative_odd:
; X64: # %bb.0:
-; X64-NEXT: movslq %edi, %rcx
-; X64-NEXT: imulq $-1717986919, %rcx, %rax # imm = 0x99999999
-; X64-NEXT: movq %rax, %rdx
-; X64-NEXT: shrq $63, %rdx
-; X64-NEXT: sarq $33, %rax
-; X64-NEXT: addl %edx, %eax
-; X64-NEXT: leal (%rax,%rax,4), %edx
+; X64-NEXT: imull $-858993459, %edi, %ecx # imm = 0xCCCCCCCD
+; X64-NEXT: addl $429496729, %ecx # imm = 0x19999999
; X64-NEXT: xorl %eax, %eax
-; X64-NEXT: addl %edx, %ecx
-; X64-NEXT: setne %al
+; X64-NEXT: cmpl $858993458, %ecx # imm = 0x33333332
+; X64-NEXT: seta %al
; X64-NEXT: retq
%srem = srem i32 %X, -5
%cmp = icmp ne i32 %srem, 0
define i32 @test_srem_negative_even(i32 %X) nounwind {
; X86-LABEL: test_srem_negative_even:
; X86: # %bb.0:
-; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT: movl $1840700269, %edx # imm = 0x6DB6DB6D
-; X86-NEXT: movl %ecx, %eax
-; X86-NEXT: imull %edx
-; X86-NEXT: subl %ecx, %edx
-; X86-NEXT: movl %edx, %eax
-; X86-NEXT: shrl $31, %eax
-; X86-NEXT: sarl $3, %edx
-; X86-NEXT: addl %eax, %edx
-; X86-NEXT: imull $-14, %edx, %edx
+; X86-NEXT: imull $-1227133513, {{[0-9]+}}(%esp), %ecx # imm = 0xB6DB6DB7
+; X86-NEXT: addl $306783378, %ecx # imm = 0x12492492
+; X86-NEXT: rorl %ecx
; X86-NEXT: xorl %eax, %eax
-; X86-NEXT: cmpl %edx, %ecx
-; X86-NEXT: setne %al
+; X86-NEXT: cmpl $306783378, %ecx # imm = 0x12492492
+; X86-NEXT: seta %al
; X86-NEXT: retl
;
; X64-LABEL: test_srem_negative_even:
; X64: # %bb.0:
-; X64-NEXT: movslq %edi, %rcx
-; X64-NEXT: imulq $1840700269, %rcx, %rax # imm = 0x6DB6DB6D
-; X64-NEXT: shrq $32, %rax
-; X64-NEXT: subl %ecx, %eax
-; X64-NEXT: movl %eax, %edx
-; X64-NEXT: shrl $31, %edx
-; X64-NEXT: sarl $3, %eax
-; X64-NEXT: addl %edx, %eax
-; X64-NEXT: imull $-14, %eax, %edx
+; X64-NEXT: imull $-1227133513, %edi, %ecx # imm = 0xB6DB6DB7
+; X64-NEXT: addl $306783378, %ecx # imm = 0x12492492
+; X64-NEXT: rorl %ecx
; X64-NEXT: xorl %eax, %eax
-; X64-NEXT: cmpl %edx, %ecx
-; X64-NEXT: setne %al
+; X64-NEXT: cmpl $306783378, %ecx # imm = 0x12492492
+; X64-NEXT: seta %al
; X64-NEXT: retq
%srem = srem i32 %X, -14
%cmp = icmp ne i32 %srem, 0