return SDValue();
}
+static SDValue combineVectorShift(SDNode *N, SelectionDAG &DAG,
+ TargetLowering::DAGCombinerInfo &DCI,
+ const X86Subtarget &Subtarget) {
+ assert((X86ISD::VSHLI == N->getOpcode() || X86ISD::VSRLI == N->getOpcode()) &&
+ "Unexpected opcode");
+ EVT VT = N->getValueType(0);
+ unsigned NumBitsPerElt = VT.getScalarSizeInBits();
+
+ // This fails for mask register (vXi1) shifts.
+ if ((NumBitsPerElt % 8) != 0)
+ return SDValue();
+
+ // Out of range logical bit shifts are guaranteed to be zero.
+ APInt ShiftVal = cast<ConstantSDNode>(N->getOperand(1))->getAPIntValue();
+ if (ShiftVal.zextOrTrunc(8).uge(NumBitsPerElt))
+ return getZeroVector(VT.getSimpleVT(), Subtarget, DAG, SDLoc(N));
+
+ // We can decode 'whole byte' logical bit shifts as shuffles.
+ if ((ShiftVal.getZExtValue() % 8) == 0) {
+ SDValue Op(N, 0);
+ SmallVector<int, 1> NonceMask; // Just a placeholder.
+ NonceMask.push_back(0);
+ if (combineX86ShufflesRecursively({Op}, 0, Op, NonceMask,
+ /*Depth*/ 1, /*HasPSHUFB*/ false, DAG,
+ DCI, Subtarget))
+ return SDValue(); // This routine will use CombineTo to replace N.
+ }
+
+ return SDValue();
+}
+
/// Recognize the distinctive (AND (setcc ...) (setcc ..)) where both setccs
/// reference the same FP CMP, and rewrite for CMPEQSS and friends. Likewise for
/// OR -> CMPNEQSS.
case ISD::SETCC: return combineSetCC(N, DAG, Subtarget);
case X86ISD::SETCC: return combineX86SetCC(N, DAG, DCI, Subtarget);
case X86ISD::BRCOND: return combineBrCond(N, DAG, DCI, Subtarget);
+ case X86ISD::VSHLI:
+ case X86ISD::VSRLI: return combineVectorShift(N, DAG, DCI, Subtarget);
case X86ISD::VSEXT:
case X86ISD::VZEXT: return combineVSZext(N, DAG, DCI, Subtarget);
case X86ISD::SHUFP: // Handle all target specific shuffles
;
; CHECK-SSSE3-LABEL: test7:
; CHECK-SSSE3: # BB#0: # %entry
-; CHECK-SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12]
-; CHECK-SSSE3-NEXT: psrld $16, %xmm0
+; CHECK-SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[1,0],zero,zero,xmm0[5,4],zero,zero,xmm0[9,8],zero,zero,xmm0[13,12],zero,zero
; CHECK-SSSE3-NEXT: retq
;
; CHECK-AVX-LABEL: test7:
; CHECK-AVX: # BB#0: # %entry
-; CHECK-AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12]
-; CHECK-AVX-NEXT: vpsrld $16, %xmm0, %xmm0
+; CHECK-AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[1,0],zero,zero,xmm0[5,4],zero,zero,xmm0[9,8],zero,zero,xmm0[13,12],zero,zero
; CHECK-AVX-NEXT: retq
;
; CHECK-WIDE-AVX-LABEL: test7:
; SSE2-NEXT: movdqa %xmm0, %xmm4
; SSE2-NEXT: punpckhdq {{.*#+}} xmm4 = xmm4[2],xmm3[2],xmm4[3],xmm3[3]
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
-; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [0,4294967295,0,4294967295]
-; SSE2-NEXT: pand %xmm3, %xmm2
-; SSE2-NEXT: pand %xmm3, %xmm1
; SSE2-NEXT: psrlq $32, %xmm1
; SSE2-NEXT: pmuludq %xmm0, %xmm1
; SSE2-NEXT: psllq $32, %xmm1
; SSE41-NEXT: pxor %xmm3, %xmm3
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm4 = xmm0[0],zero,xmm0[1],zero
; SSE41-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm3[2],xmm0[3],xmm3[3]
-; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm3[0,1],xmm1[2,3],xmm3[4,5],xmm1[6,7]
-; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm3[0,1],xmm2[2,3],xmm3[4,5],xmm2[6,7]
-; SSE41-NEXT: psrlq $32, %xmm2
-; SSE41-NEXT: pmuludq %xmm0, %xmm2
-; SSE41-NEXT: psllq $32, %xmm2
; SSE41-NEXT: psrlq $32, %xmm1
; SSE41-NEXT: pmuludq %xmm4, %xmm1
; SSE41-NEXT: psllq $32, %xmm1
+; SSE41-NEXT: psrlq $32, %xmm2
+; SSE41-NEXT: pmuludq %xmm0, %xmm2
+; SSE41-NEXT: psllq $32, %xmm2
; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,1,1,3]
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,3,2,3]
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4,5,6,7]
; AVX2-LABEL: mul_v4i64_zero_lower:
; AVX2: # BB#0: # %entry
; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
-; AVX2-NEXT: vpxor %ymm2, %ymm2, %ymm2
-; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2],ymm1[3],ymm2[4],ymm1[5],ymm2[6],ymm1[7]
; AVX2-NEXT: vpsrlq $32, %ymm1, %ymm1
; AVX2-NEXT: vpmuludq %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpsllq $32, %ymm0, %ymm0
; AVX512-LABEL: mul_v4i64_zero_lower:
; AVX512: # BB#0: # %entry
; AVX512-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
-; AVX512-NEXT: vpxor %ymm2, %ymm2, %ymm2
-; AVX512-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2],ymm1[3],ymm2[4],ymm1[5],ymm2[6],ymm1[7]
; AVX512-NEXT: vpsrlq $32, %ymm1, %ymm1
; AVX512-NEXT: vpmuludq %ymm1, %ymm0, %ymm0
; AVX512-NEXT: vpsllq $32, %ymm0, %ymm0
define <16 x i8> @test_div7_16i8(<16 x i8> %a) nounwind {
; SSE2-LABEL: test_div7_16i8:
; SSE2: # BB#0:
-; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37]
+; SSE2-NEXT: movdqa %xmm0, %xmm1
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; SSE2-NEXT: psrlw $8, %xmm1
+; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0]
+; SSE2-NEXT: pmullw %xmm2, %xmm1
; SSE2-NEXT: psrlw $8, %xmm1
-; SSE2-NEXT: movdqa %xmm0, %xmm2
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; SSE2-NEXT: psrlw $8, %xmm2
-; SSE2-NEXT: pmullw %xmm1, %xmm2
-; SSE2-NEXT: psrlw $8, %xmm2
; SSE2-NEXT: movdqa %xmm0, %xmm3
; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE2-NEXT: psrlw $8, %xmm3
-; SSE2-NEXT: pmullw %xmm1, %xmm3
+; SSE2-NEXT: pmullw %xmm2, %xmm3
; SSE2-NEXT: psrlw $8, %xmm3
-; SSE2-NEXT: packuswb %xmm2, %xmm3
+; SSE2-NEXT: packuswb %xmm1, %xmm3
; SSE2-NEXT: psubb %xmm3, %xmm0
; SSE2-NEXT: psrlw $1, %xmm0
; SSE2-NEXT: pand {{.*}}(%rip), %xmm0
define <16 x i8> @test_rem7_16i8(<16 x i8> %a) nounwind {
; SSE2-LABEL: test_rem7_16i8:
; SSE2: # BB#0:
-; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37]
+; SSE2-NEXT: movdqa %xmm0, %xmm1
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
+; SSE2-NEXT: psrlw $8, %xmm1
+; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0]
+; SSE2-NEXT: pmullw %xmm2, %xmm1
; SSE2-NEXT: psrlw $8, %xmm1
-; SSE2-NEXT: movdqa %xmm0, %xmm2
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; SSE2-NEXT: psrlw $8, %xmm2
-; SSE2-NEXT: pmullw %xmm1, %xmm2
-; SSE2-NEXT: psrlw $8, %xmm2
; SSE2-NEXT: movdqa %xmm0, %xmm3
; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE2-NEXT: psrlw $8, %xmm3
-; SSE2-NEXT: pmullw %xmm1, %xmm3
+; SSE2-NEXT: pmullw %xmm2, %xmm3
; SSE2-NEXT: psrlw $8, %xmm3
-; SSE2-NEXT: packuswb %xmm2, %xmm3
+; SSE2-NEXT: packuswb %xmm1, %xmm3
; SSE2-NEXT: movdqa %xmm0, %xmm1
; SSE2-NEXT: psubb %xmm3, %xmm1
; SSE2-NEXT: psrlw $1, %xmm1
; X32-SSE-NEXT: pand %xmm0, %xmm1
; X32-SSE-NEXT: psrld $16, %xmm0
; X32-SSE-NEXT: paddd %xmm1, %xmm0
-; X32-SSE-NEXT: movdqa {{.*#+}} xmm1 = [0,4294967295,0,0]
-; X32-SSE-NEXT: psrlq $32, %xmm1
-; X32-SSE-NEXT: pand %xmm0, %xmm1
+; X32-SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm0[0,1],xmm2[2,3,4,5,6,7]
; X32-SSE-NEXT: psrlq $32, %xmm0
-; X32-SSE-NEXT: paddq %xmm1, %xmm0
+; X32-SSE-NEXT: paddq %xmm2, %xmm0
; X32-SSE-NEXT: retl
%out = call <2 x i64> @llvm.ctlz.v2i64(<2 x i64> <i64 256, i64 -1>, i1 0)
ret <2 x i64> %out
; X32-SSE-NEXT: pand %xmm0, %xmm1
; X32-SSE-NEXT: psrld $16, %xmm0
; X32-SSE-NEXT: paddd %xmm1, %xmm0
-; X32-SSE-NEXT: movdqa {{.*#+}} xmm1 = [0,4294967295,0,0]
-; X32-SSE-NEXT: psrlq $32, %xmm1
-; X32-SSE-NEXT: pand %xmm0, %xmm1
+; X32-SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm0[0,1],xmm2[2,3,4,5,6,7]
; X32-SSE-NEXT: psrlq $32, %xmm0
-; X32-SSE-NEXT: paddq %xmm1, %xmm0
+; X32-SSE-NEXT: paddq %xmm2, %xmm0
; X32-SSE-NEXT: retl
%out = call <2 x i64> @llvm.ctlz.v2i64(<2 x i64> <i64 256, i64 -1>, i1 -1)
ret <2 x i64> %out
; X32-AVX-NEXT: vpand %ymm1, %ymm0, %ymm1
; X32-AVX-NEXT: vpsrld $16, %ymm0, %ymm0
; X32-AVX-NEXT: vpaddd %ymm1, %ymm0, %ymm0
-; X32-AVX-NEXT: vmovdqa {{.*#+}} ymm1 = [0,4294967295,0,0,4294967295,4294967295,0,4294967295]
-; X32-AVX-NEXT: vpsrlq $32, %ymm1, %ymm1
-; X32-AVX-NEXT: vpand %ymm1, %ymm0, %ymm1
+; X32-AVX-NEXT: vpblendd {{.*#+}} ymm1 = ymm0[0],ymm4[1,2,3],ymm0[4],ymm4[5],ymm0[6],ymm4[7]
; X32-AVX-NEXT: vpsrlq $32, %ymm0, %ymm0
; X32-AVX-NEXT: vpaddq %ymm1, %ymm0, %ymm0
; X32-AVX-NEXT: retl
; X32-AVX-NEXT: vpand %ymm1, %ymm0, %ymm1
; X32-AVX-NEXT: vpsrld $16, %ymm0, %ymm0
; X32-AVX-NEXT: vpaddd %ymm1, %ymm0, %ymm0
-; X32-AVX-NEXT: vmovdqa {{.*#+}} ymm1 = [0,4294967295,0,0,4294967295,4294967295,0,4294967295]
-; X32-AVX-NEXT: vpsrlq $32, %ymm1, %ymm1
-; X32-AVX-NEXT: vpand %ymm1, %ymm0, %ymm1
+; X32-AVX-NEXT: vpblendd {{.*#+}} ymm1 = ymm0[0],ymm4[1,2,3],ymm0[4],ymm4[5],ymm0[6],ymm4[7]
; X32-AVX-NEXT: vpsrlq $32, %ymm0, %ymm0
; X32-AVX-NEXT: vpaddq %ymm1, %ymm0, %ymm0
; X32-AVX-NEXT: retl