if (N0C && N1C && !N1C->isOpaque())
return DAG.FoldConstantArithmetic(ISD::SRA, SDLoc(N), VT, N0C, N1C);
// fold (sra x, c >= size(x)) -> undef
- if (N1C && N1C->getAPIntValue().uge(OpSizeInBits))
+ // NOTE: ALL vector elements must be too big to avoid partial UNDEFs.
+ auto MatchShiftTooBig = [OpSizeInBits](ConstantSDNode *Val) {
+ return Val->getAPIntValue().uge(OpSizeInBits);
+ };
+ if (matchUnaryPredicate(N1, MatchShiftTooBig))
return DAG.getUNDEF(VT);
// fold (sra x, 0) -> x
if (N1C && N1C->isNullValue())
}
// fold (sra (sra x, c1), c2) -> (sra x, (add c1, c2))
- if (N1C && N0.getOpcode() == ISD::SRA) {
- if (ConstantSDNode *N0C1 = isConstOrConstSplat(N0.getOperand(1))) {
- SDLoc DL(N);
- APInt c1 = N0C1->getAPIntValue();
- APInt c2 = N1C->getAPIntValue();
- zeroExtendToMatch(c1, c2, 1 /* Overflow Bit */);
+ if (N0.getOpcode() == ISD::SRA) {
+ SDLoc DL(N);
+ EVT ShiftVT = N1.getValueType();
- APInt Sum = c1 + c2;
- if (Sum.uge(OpSizeInBits))
- Sum = APInt(OpSizeInBits, OpSizeInBits - 1);
+ auto MatchOutOfRange = [OpSizeInBits](ConstantSDNode *LHS,
+ ConstantSDNode *RHS) {
+ APInt c1 = LHS->getAPIntValue();
+ APInt c2 = RHS->getAPIntValue();
+ zeroExtendToMatch(c1, c2, 1 /* Overflow Bit */);
+ return (c1 + c2).uge(OpSizeInBits);
+ };
+ if (matchBinaryPredicate(N1, N0.getOperand(1), MatchOutOfRange))
+ return DAG.getNode(ISD::SRA, DL, VT, N0.getOperand(0),
+ DAG.getConstant(OpSizeInBits - 1, DL, ShiftVT));
- return DAG.getNode(
- ISD::SRA, DL, VT, N0.getOperand(0),
- DAG.getConstant(Sum.getZExtValue(), DL, N1.getValueType()));
+ auto MatchInRange = [OpSizeInBits](ConstantSDNode *LHS,
+ ConstantSDNode *RHS) {
+ APInt c1 = LHS->getAPIntValue();
+ APInt c2 = RHS->getAPIntValue();
+ zeroExtendToMatch(c1, c2, 1 /* Overflow Bit */);
+ return (c1 + c2).ult(OpSizeInBits);
+ };
+ if (matchBinaryPredicate(N1, N0.getOperand(1), MatchInRange)) {
+ SDValue Sum = DAG.getNode(ISD::ADD, DL, ShiftVT, N1, N0.getOperand(1));
+ return DAG.getNode(ISD::SRA, DL, VT, N0.getOperand(0), Sum);
}
}
define <4 x i32> @combine_vec_ashr_outofrange1(<4 x i32> %x) {
; SSE-LABEL: combine_vec_ashr_outofrange1:
; SSE: # BB#0:
-; SSE-NEXT: psrad $31, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_ashr_outofrange1:
; AVX: # BB#0:
-; AVX-NEXT: vpsravd {{.*}}(%rip), %xmm0, %xmm0
; AVX-NEXT: retq
%1 = ashr <4 x i32> %x, <i32 33, i32 34, i32 35, i32 36>
ret <4 x i32> %1
define <4 x i32> @combine_vec_ashr_ashr1(<4 x i32> %x) {
; SSE-LABEL: combine_vec_ashr_ashr1:
; SSE: # BB#0:
+; SSE-NEXT: movdqa %xmm0, %xmm1
+; SSE-NEXT: psrad $10, %xmm1
; SSE-NEXT: movdqa %xmm0, %xmm2
+; SSE-NEXT: psrad $6, %xmm2
+; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm1[4,5,6,7]
; SSE-NEXT: movdqa %xmm0, %xmm1
-; SSE-NEXT: psrad $2, %xmm1
-; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm0[0,1,2,3],xmm1[4,5,6,7]
-; SSE-NEXT: psrad $3, %xmm0
-; SSE-NEXT: psrad $1, %xmm2
-; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm0[4,5,6,7]
-; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
-; SSE-NEXT: movdqa %xmm1, %xmm0
-; SSE-NEXT: psrad $7, %xmm0
-; SSE-NEXT: movdqa %xmm1, %xmm2
-; SSE-NEXT: psrad $5, %xmm2
-; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm0[4,5,6,7]
-; SSE-NEXT: movdqa %xmm1, %xmm0
-; SSE-NEXT: psrad $6, %xmm0
-; SSE-NEXT: psrad $4, %xmm1
-; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm0[4,5,6,7]
-; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
-; SSE-NEXT: movdqa %xmm1, %xmm0
+; SSE-NEXT: psrad $8, %xmm1
+; SSE-NEXT: psrad $4, %xmm0
+; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
+; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_ashr_ashr1:
; AVX: # BB#0:
; AVX-NEXT: vpsravd {{.*}}(%rip), %xmm0, %xmm0
-; AVX-NEXT: vpsravd {{.*}}(%rip), %xmm0, %xmm0
; AVX-NEXT: retq
%1 = ashr <4 x i32> %x, <i32 0, i32 1, i32 2, i32 3>
%2 = ashr <4 x i32> %1, <i32 4, i32 5, i32 6, i32 7>
define <4 x i32> @combine_vec_ashr_ashr2(<4 x i32> %x) {
; SSE-LABEL: combine_vec_ashr_ashr2:
; SSE: # BB#0:
-; SSE-NEXT: movdqa %xmm0, %xmm1
-; SSE-NEXT: psrad $20, %xmm1
-; SSE-NEXT: movdqa %xmm0, %xmm2
-; SSE-NEXT: psrad $18, %xmm2
-; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm1[4,5,6,7]
-; SSE-NEXT: movdqa %xmm0, %xmm1
-; SSE-NEXT: psrad $19, %xmm1
-; SSE-NEXT: psrad $17, %xmm0
-; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
-; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
-; SSE-NEXT: movdqa %xmm0, %xmm1
-; SSE-NEXT: psrad $28, %xmm1
-; SSE-NEXT: movdqa %xmm0, %xmm2
-; SSE-NEXT: psrad $26, %xmm2
-; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm1[4,5,6,7]
-; SSE-NEXT: movdqa %xmm0, %xmm1
-; SSE-NEXT: psrad $27, %xmm1
-; SSE-NEXT: psrad $25, %xmm0
-; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
-; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
+; SSE-NEXT: psrad $31, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_ashr_ashr2:
; AVX: # BB#0:
-; AVX-NEXT: vpsravd {{.*}}(%rip), %xmm0, %xmm0
-; AVX-NEXT: vpsravd {{.*}}(%rip), %xmm0, %xmm0
+; AVX-NEXT: vpsrad $31, %xmm0, %xmm0
; AVX-NEXT: retq
%1 = ashr <4 x i32> %x, <i32 17, i32 18, i32 19, i32 20>
%2 = ashr <4 x i32> %1, <i32 25, i32 26, i32 27, i32 28>