/// Return true if a select of constants (select Cond, C1, C2) should be
/// transformed into simple math ops with the condition value. For example:
/// select Cond, C1, C1-1 --> add (zext Cond), C1-1
- virtual bool convertSelectOfConstantsToMath() const {
+ virtual bool convertSelectOfConstantsToMath(EVT VT) const {
return false;
}
SDValue visitShiftByConstant(SDNode *N, ConstantSDNode *Amt);
SDValue foldSelectOfConstants(SDNode *N);
+ SDValue foldVSelectOfConstants(SDNode *N);
SDValue foldBinOpIntoSelect(SDNode *BO);
bool SimplifySelectOps(SDNode *SELECT, SDValue LHS, SDValue RHS);
SDValue SimplifyBinOpWithSameOpcodeHands(SDNode *N);
// For any constants that differ by 1, we can transform the select into an
// extend and add. Use a target hook because some targets may prefer to
// transform in the other direction.
- if (TLI.convertSelectOfConstantsToMath()) {
+ if (TLI.convertSelectOfConstantsToMath(VT)) {
if (C1->getAPIntValue() - 1 == C2->getAPIntValue()) {
// select Cond, C1, C1-1 --> add (zext Cond), C1-1
if (VT != MVT::i1)
return SDValue();
}
+/// A vector select of 2 constant vectors can be simplified to math/logic to
+/// avoid a variable select instruction and possibly avoid constant loads.
+SDValue DAGCombiner::foldVSelectOfConstants(SDNode *N) {
+ SDValue Cond = N->getOperand(0);
+ SDValue N1 = N->getOperand(1);
+ SDValue N2 = N->getOperand(2);
+ EVT VT = N->getValueType(0);
+ if (!Cond.hasOneUse() || Cond.getScalarValueSizeInBits() != 1 ||
+ !TLI.convertSelectOfConstantsToMath(VT) ||
+ !ISD::isBuildVectorOfConstantSDNodes(N1.getNode()) ||
+ !ISD::isBuildVectorOfConstantSDNodes(N2.getNode()))
+ return SDValue();
+
+ // Check if we can use the condition value to increment/decrement a single
+ // constant value. This simplifies a select to an add and removes a constant
+ // load/materialization from the general case.
+ bool AllAddOne = true;
+ bool AllSubOne = true;
+ unsigned Elts = VT.getVectorNumElements();
+ for (unsigned i = 0; i != Elts; ++i) {
+ SDValue N1Elt = N1.getOperand(i);
+ SDValue N2Elt = N2.getOperand(i);
+ if (N1Elt.isUndef() || N2Elt.isUndef())
+ continue;
+
+ const APInt &C1 = cast<ConstantSDNode>(N1Elt)->getAPIntValue();
+ const APInt &C2 = cast<ConstantSDNode>(N2Elt)->getAPIntValue();
+ if (C1 != C2 + 1)
+ AllAddOne = false;
+ if (C1 != C2 - 1)
+ AllSubOne = false;
+ }
+
+ // Further simplifications for the extra-special cases where the constants are
+ // all 0 or all -1 should be implemented as folds of these patterns.
+ SDLoc DL(N);
+ if (AllAddOne || AllSubOne) {
+ // vselect <N x i1> Cond, C+1, C --> add (zext Cond), C
+ // vselect <N x i1> Cond, C-1, C --> add (sext Cond), C
+ auto ExtendOpcode = AllAddOne ? ISD::ZERO_EXTEND : ISD::SIGN_EXTEND;
+ SDValue ExtendedCond = DAG.getNode(ExtendOpcode, DL, VT, Cond);
+ return DAG.getNode(ISD::ADD, DL, VT, ExtendedCond, N2);
+ }
+
+ // The general case for select-of-constants:
+ // vselect <N x i1> Cond, C1, C2 --> xor (and (sext Cond), (C1^C2)), C2
+ // ...but that only makes sense if a vselect is slower than 2 logic ops, so
+ // leave that to a machine-specific pass.
+ return SDValue();
+}
+
SDValue DAGCombiner::visitVSELECT(SDNode *N) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
return CV;
}
+ if (SDValue V = foldVSelectOfConstants(N))
+ return V;
+
return SDValue();
}
SimplifySelectCC(DL, N00, N01, ExtTrueVal, Zero, CC, true))
return SCC;
- if (!VT.isVector() && !TLI.convertSelectOfConstantsToMath()) {
+ if (!VT.isVector() && !TLI.convertSelectOfConstantsToMath(VT)) {
EVT SetCCVT = getSetCCResultType(N00VT);
// Don't do this transform for i1 because there's a select transform
// that would reverse it.
bool shouldConvertConstantLoadToIntImm(const APInt &Imm,
Type *Ty) const override;
- bool convertSelectOfConstantsToMath() const override {
+ bool convertSelectOfConstantsToMath(EVT VT) const override {
return true;
}
return true;
}
+bool X86TargetLowering::convertSelectOfConstantsToMath(EVT VT) const {
+ // TODO: It might be a win to ease or lift this restriction, but the generic
+ // folds in DAGCombiner conflict with vector folds for an AVX512 target.
+ if (VT.isVector() && Subtarget.hasAVX512())
+ return false;
+
+ return true;
+}
+
bool X86TargetLowering::isExtractSubvectorCheap(EVT ResVT, EVT SrcVT,
unsigned Index) const {
if (!isOperationLegalOrCustom(ISD::EXTRACT_SUBVECTOR, ResVT))
bool shouldConvertConstantLoadToIntImm(const APInt &Imm,
Type *Ty) const override;
- bool convertSelectOfConstantsToMath() const override {
- return true;
- }
+ bool convertSelectOfConstantsToMath(EVT VT) const override;
/// Return true if EXTRACT_SUBVECTOR is cheap for this result type
/// with this index.
define <4 x i32> @sel_Cplus1_or_C_vec(<4 x i1> %cond) {
; CHECK-LABEL: sel_Cplus1_or_C_vec:
; CHECK: # BB#0:
-; CHECK-NEXT: vspltisw 3, -16
-; CHECK-NEXT: vspltisw 4, 15
+; CHECK-NEXT: vspltisw 3, 1
; CHECK-NEXT: addis 3, 2, .LCPI2_0@toc@ha
-; CHECK-NEXT: addis 4, 2, .LCPI2_1@toc@ha
; CHECK-NEXT: addi 3, 3, .LCPI2_0@toc@l
-; CHECK-NEXT: addi 4, 4, .LCPI2_1@toc@l
-; CHECK-NEXT: lvx 18, 0, 3
-; CHECK-NEXT: lvx 19, 0, 4
-; CHECK-NEXT: vsubuwm 3, 4, 3
-; CHECK-NEXT: vslw 2, 2, 3
-; CHECK-NEXT: vsraw 2, 2, 3
-; CHECK-NEXT: xxsel 34, 51, 50, 34
+; CHECK-NEXT: lvx 19, 0, 3
+; CHECK-NEXT: xxland 34, 34, 35
+; CHECK-NEXT: vadduwm 2, 2, 19
; CHECK-NEXT: blr
%add = select <4 x i1> %cond, <4 x i32> <i32 43, i32 1, i32 -1, i32 0>, <4 x i32> <i32 42, i32 0, i32 -2, i32 -1>
ret <4 x i32> %add
; CHECK: # BB#0:
; CHECK-NEXT: vcmpequw 2, 2, 3
; CHECK-NEXT: addis 3, 2, .LCPI3_0@toc@ha
-; CHECK-NEXT: addis 4, 2, .LCPI3_1@toc@ha
; CHECK-NEXT: addi 3, 3, .LCPI3_0@toc@l
-; CHECK-NEXT: addi 4, 4, .LCPI3_1@toc@l
; CHECK-NEXT: lvx 19, 0, 3
-; CHECK-NEXT: lvx 4, 0, 4
-; CHECK-NEXT: xxsel 34, 36, 51, 34
+; CHECK-NEXT: vsubuwm 2, 19, 2
; CHECK-NEXT: blr
%cond = icmp eq <4 x i32> %x, %y
%add = select <4 x i1> %cond, <4 x i32> <i32 43, i32 1, i32 -1, i32 0>, <4 x i32> <i32 42, i32 0, i32 -2, i32 -1>
; CHECK-NEXT: vspltisw 3, -16
; CHECK-NEXT: vspltisw 4, 15
; CHECK-NEXT: addis 3, 2, .LCPI4_0@toc@ha
-; CHECK-NEXT: addis 4, 2, .LCPI4_1@toc@ha
; CHECK-NEXT: addi 3, 3, .LCPI4_0@toc@l
-; CHECK-NEXT: addi 4, 4, .LCPI4_1@toc@l
-; CHECK-NEXT: lvx 18, 0, 3
-; CHECK-NEXT: lvx 19, 0, 4
+; CHECK-NEXT: lvx 19, 0, 3
; CHECK-NEXT: vsubuwm 3, 4, 3
; CHECK-NEXT: vslw 2, 2, 3
; CHECK-NEXT: vsraw 2, 2, 3
-; CHECK-NEXT: xxsel 34, 51, 50, 34
+; CHECK-NEXT: vadduwm 2, 2, 19
; CHECK-NEXT: blr
%add = select <4 x i1> %cond, <4 x i32> <i32 43, i32 1, i32 -1, i32 0>, <4 x i32> <i32 44, i32 2, i32 0, i32 1>
ret <4 x i32> %add
; CHECK: # BB#0:
; CHECK-NEXT: vcmpequw 2, 2, 3
; CHECK-NEXT: addis 3, 2, .LCPI5_0@toc@ha
-; CHECK-NEXT: addis 4, 2, .LCPI5_1@toc@ha
; CHECK-NEXT: addi 3, 3, .LCPI5_0@toc@l
-; CHECK-NEXT: addi 4, 4, .LCPI5_1@toc@l
; CHECK-NEXT: lvx 19, 0, 3
-; CHECK-NEXT: lvx 4, 0, 4
-; CHECK-NEXT: xxsel 34, 36, 51, 34
+; CHECK-NEXT: vadduwm 2, 2, 19
; CHECK-NEXT: blr
%cond = icmp eq <4 x i32> %x, %y
%add = select <4 x i1> %cond, <4 x i32> <i32 43, i32 1, i32 -1, i32 0>, <4 x i32> <i32 44, i32 2, i32 0, i32 1>
; CHECK: # BB#0:
; CHECK-NEXT: vspltisw 3, -16
; CHECK-NEXT: vspltisw 4, 15
-; CHECK-NEXT: vspltisb 19, -1
-; CHECK-NEXT: xxlxor 0, 0, 0
; CHECK-NEXT: vsubuwm 3, 4, 3
; CHECK-NEXT: vslw 2, 2, 3
; CHECK-NEXT: vsraw 2, 2, 3
-; CHECK-NEXT: xxsel 34, 0, 51, 34
; CHECK-NEXT: blr
%add = select <4 x i1> %cond, <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
ret <4 x i32> %add
; CHECK-LABEL: cmp_sel_minus1_or_0_vec:
; CHECK: # BB#0:
; CHECK-NEXT: vcmpequw 2, 2, 3
-; CHECK-NEXT: vspltisb 19, -1
-; CHECK-NEXT: xxlxor 0, 0, 0
-; CHECK-NEXT: xxsel 34, 0, 51, 34
; CHECK-NEXT: blr
%cond = icmp eq <4 x i32> %x, %y
%add = select <4 x i1> %cond, <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
define <4 x i32> @sel_0_or_minus1_vec(<4 x i1> %cond) {
; CHECK-LABEL: sel_0_or_minus1_vec:
; CHECK: # BB#0:
-; CHECK-NEXT: vspltisw 3, -16
-; CHECK-NEXT: vspltisw 4, 15
-; CHECK-NEXT: vspltisb 19, -1
-; CHECK-NEXT: xxlxor 0, 0, 0
-; CHECK-NEXT: vsubuwm 3, 4, 3
-; CHECK-NEXT: vslw 2, 2, 3
-; CHECK-NEXT: vsraw 2, 2, 3
-; CHECK-NEXT: xxsel 34, 51, 0, 34
+; CHECK-NEXT: vspltisw 3, 1
+; CHECK-NEXT: vspltisb 4, -1
+; CHECK-NEXT: xxland 34, 34, 35
+; CHECK-NEXT: vadduwm 2, 2, 4
; CHECK-NEXT: blr
%add = select <4 x i1> %cond, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>
ret <4 x i32> %add
; CHECK-LABEL: cmp_sel_0_or_minus1_vec:
; CHECK: # BB#0:
; CHECK-NEXT: vcmpequw 2, 2, 3
-; CHECK-NEXT: vspltisb 19, -1
-; CHECK-NEXT: xxlxor 0, 0, 0
-; CHECK-NEXT: xxsel 34, 51, 0, 34
+; CHECK-NEXT: xxlnor 34, 34, 34
; CHECK-NEXT: blr
%cond = icmp eq <4 x i32> %x, %y
%add = select <4 x i1> %cond, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>
define <4 x i32> @sel_1_or_0_vec(<4 x i1> %cond) {
; CHECK-LABEL: sel_1_or_0_vec:
; CHECK: # BB#0:
-; CHECK-NEXT: vspltisw 3, -16
-; CHECK-NEXT: vspltisw 4, 15
-; CHECK-NEXT: vspltisw 19, 1
-; CHECK-NEXT: xxlxor 0, 0, 0
-; CHECK-NEXT: vsubuwm 3, 4, 3
-; CHECK-NEXT: vslw 2, 2, 3
-; CHECK-NEXT: vsraw 2, 2, 3
-; CHECK-NEXT: xxsel 34, 0, 51, 34
+; CHECK-NEXT: vspltisw 3, 1
+; CHECK-NEXT: xxland 34, 34, 35
; CHECK-NEXT: blr
%add = select <4 x i1> %cond, <4 x i32> <i32 1, i32 1, i32 1, i32 1>, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
ret <4 x i32> %add
; CHECK: # BB#0:
; CHECK-NEXT: vcmpequw 2, 2, 3
; CHECK-NEXT: vspltisw 19, 1
-; CHECK-NEXT: xxlxor 0, 0, 0
-; CHECK-NEXT: xxsel 34, 0, 51, 34
+; CHECK-NEXT: xxland 34, 34, 51
; CHECK-NEXT: blr
%cond = icmp eq <4 x i32> %x, %y
%add = select <4 x i1> %cond, <4 x i32> <i32 1, i32 1, i32 1, i32 1>, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
define <4 x i32> @sel_0_or_1_vec(<4 x i1> %cond) {
; CHECK-LABEL: sel_0_or_1_vec:
; CHECK: # BB#0:
-; CHECK-NEXT: vspltisw 3, -16
-; CHECK-NEXT: vspltisw 4, 15
-; CHECK-NEXT: vspltisw 19, 1
-; CHECK-NEXT: xxlxor 0, 0, 0
-; CHECK-NEXT: vsubuwm 3, 4, 3
-; CHECK-NEXT: vslw 2, 2, 3
-; CHECK-NEXT: vsraw 2, 2, 3
-; CHECK-NEXT: xxsel 34, 51, 0, 34
+; CHECK-NEXT: vspltisw 3, 1
+; CHECK-NEXT: xxlandc 34, 35, 34
; CHECK-NEXT: blr
%add = select <4 x i1> %cond, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
ret <4 x i32> %add
; CHECK: # BB#0:
; CHECK-NEXT: vcmpequw 2, 2, 3
; CHECK-NEXT: vspltisw 19, 1
-; CHECK-NEXT: xxlxor 0, 0, 0
-; CHECK-NEXT: xxsel 34, 51, 0, 34
+; CHECK-NEXT: xxlnor 0, 34, 34
+; CHECK-NEXT: xxland 34, 0, 51
; CHECK-NEXT: blr
%cond = icmp eq <4 x i32> %x, %y
%add = select <4 x i1> %cond, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
; AVX1-NEXT: vpand %xmm2, %xmm1, %xmm1
; AVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3
; AVX1-NEXT: vpcmpgtb %xmm1, %xmm3, %xmm1
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2]
+; AVX1-NEXT: vpaddb %xmm4, %xmm1, %xmm1
; AVX1-NEXT: vpsllw $7, %xmm0, %xmm0
; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vpcmpgtb %xmm0, %xmm3, %xmm0
+; AVX1-NEXT: vpaddb %xmm4, %xmm0, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; AVX1-NEXT: vandnps {{.*}}(%rip), %ymm0, %ymm1
-; AVX1-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0
-; AVX1-NEXT: vorps %ymm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: PR22706:
; AVX2: ## BB#0:
; AVX2-NEXT: vpsllw $7, %ymm0, %ymm0
; AVX2-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0
-; AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2]
-; AVX2-NEXT: vpblendvb %ymm0, {{.*}}(%rip), %ymm1, %ymm0
+; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX2-NEXT: vpcmpgtb %ymm0, %ymm1, %ymm0
+; AVX2-NEXT: vpaddb {{.*}}(%rip), %ymm0, %ymm0
; AVX2-NEXT: retq
%tmp = select <32 x i1> %x, <32 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>, <32 x i8> <i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2>
ret <32 x i8> %tmp
; Each minimal select test is repeated with a more typical pattern that includes a compare to
; generate the condition value.
+; TODO: If we don't have blendv, this can definitely be improved. There's also a selection of
+; chips where it makes sense to transform the general case blendv to 2 bit-ops. That should be
+; a uarch-specfic transform. At some point (Ryzen?), the implementation should catch up to the
+; architecture, so blendv is as fast as a single bit-op.
+
define <4 x i32> @sel_C1_or_C2_vec(<4 x i1> %cond) {
; SSE-LABEL: sel_C1_or_C2_vec:
; SSE: # BB#0:
define <4 x i32> @sel_Cplus1_or_C_vec(<4 x i1> %cond) {
; SSE-LABEL: sel_Cplus1_or_C_vec:
; SSE: # BB#0:
-; SSE-NEXT: pslld $31, %xmm0
-; SSE-NEXT: psrad $31, %xmm0
-; SSE-NEXT: movdqa %xmm0, %xmm1
-; SSE-NEXT: pandn {{.*}}(%rip), %xmm1
; SSE-NEXT: pand {{.*}}(%rip), %xmm0
-; SSE-NEXT: por %xmm1, %xmm0
+; SSE-NEXT: paddd {{.*}}(%rip), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: sel_Cplus1_or_C_vec:
; AVX: # BB#0:
-; AVX-NEXT: vpslld $31, %xmm0, %xmm0
-; AVX-NEXT: vmovaps {{.*#+}} xmm1 = [42,0,4294967294,4294967295]
-; AVX-NEXT: vblendvps %xmm0, {{.*}}(%rip), %xmm1, %xmm0
+; AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
+; AVX-NEXT: vpaddd {{.*}}(%rip), %xmm0, %xmm0
; AVX-NEXT: retq
%add = select <4 x i1> %cond, <4 x i32> <i32 43, i32 1, i32 -1, i32 0>, <4 x i32> <i32 42, i32 0, i32 -2, i32 -1>
ret <4 x i32> %add
; SSE-LABEL: cmp_sel_Cplus1_or_C_vec:
; SSE: # BB#0:
; SSE-NEXT: pcmpeqd %xmm1, %xmm0
-; SSE-NEXT: movdqa %xmm0, %xmm1
-; SSE-NEXT: pandn {{.*}}(%rip), %xmm1
-; SSE-NEXT: pand {{.*}}(%rip), %xmm0
-; SSE-NEXT: por %xmm1, %xmm0
+; SSE-NEXT: movdqa {{.*#+}} xmm1 = [42,0,4294967294,4294967295]
+; SSE-NEXT: psubd %xmm0, %xmm1
+; SSE-NEXT: movdqa %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: cmp_sel_Cplus1_or_C_vec:
; AVX: # BB#0:
; AVX-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vmovaps {{.*#+}} xmm1 = [42,0,4294967294,4294967295]
-; AVX-NEXT: vblendvps %xmm0, {{.*}}(%rip), %xmm1, %xmm0
+; AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [42,0,4294967294,4294967295]
+; AVX-NEXT: vpsubd %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
%cond = icmp eq <4 x i32> %x, %y
%add = select <4 x i1> %cond, <4 x i32> <i32 43, i32 1, i32 -1, i32 0>, <4 x i32> <i32 42, i32 0, i32 -2, i32 -1>
; SSE: # BB#0:
; SSE-NEXT: pslld $31, %xmm0
; SSE-NEXT: psrad $31, %xmm0
-; SSE-NEXT: movdqa %xmm0, %xmm1
-; SSE-NEXT: pandn {{.*}}(%rip), %xmm1
-; SSE-NEXT: pand {{.*}}(%rip), %xmm0
-; SSE-NEXT: por %xmm1, %xmm0
+; SSE-NEXT: paddd {{.*}}(%rip), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: sel_Cminus1_or_C_vec:
; AVX: # BB#0:
; AVX-NEXT: vpslld $31, %xmm0, %xmm0
-; AVX-NEXT: vmovaps {{.*#+}} xmm1 = [44,2,0,1]
-; AVX-NEXT: vblendvps %xmm0, {{.*}}(%rip), %xmm1, %xmm0
+; AVX-NEXT: vpsrad $31, %xmm0, %xmm0
+; AVX-NEXT: vpaddd {{.*}}(%rip), %xmm0, %xmm0
; AVX-NEXT: retq
%add = select <4 x i1> %cond, <4 x i32> <i32 43, i32 1, i32 -1, i32 0>, <4 x i32> <i32 44, i32 2, i32 0, i32 1>
ret <4 x i32> %add
; SSE-LABEL: cmp_sel_Cminus1_or_C_vec:
; SSE: # BB#0:
; SSE-NEXT: pcmpeqd %xmm1, %xmm0
-; SSE-NEXT: movdqa %xmm0, %xmm1
-; SSE-NEXT: pandn {{.*}}(%rip), %xmm1
-; SSE-NEXT: pand {{.*}}(%rip), %xmm0
-; SSE-NEXT: por %xmm1, %xmm0
+; SSE-NEXT: paddd {{.*}}(%rip), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: cmp_sel_Cminus1_or_C_vec:
; AVX: # BB#0:
; AVX-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vmovaps {{.*#+}} xmm1 = [44,2,0,1]
-; AVX-NEXT: vblendvps %xmm0, {{.*}}(%rip), %xmm1, %xmm0
+; AVX-NEXT: vpaddd {{.*}}(%rip), %xmm0, %xmm0
; AVX-NEXT: retq
%cond = icmp eq <4 x i32> %x, %y
%add = select <4 x i1> %cond, <4 x i32> <i32 43, i32 1, i32 -1, i32 0>, <4 x i32> <i32 44, i32 2, i32 0, i32 1>
define <4 x i32> @sel_0_or_minus1_vec(<4 x i1> %cond) {
; SSE-LABEL: sel_0_or_minus1_vec:
; SSE: # BB#0:
-; SSE-NEXT: pslld $31, %xmm0
-; SSE-NEXT: psrad $31, %xmm0
+; SSE-NEXT: pand {{.*}}(%rip), %xmm0
; SSE-NEXT: pcmpeqd %xmm1, %xmm1
-; SSE-NEXT: pxor %xmm1, %xmm0
+; SSE-NEXT: paddd %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: sel_0_or_minus1_vec:
; AVX: # BB#0:
-; AVX-NEXT: vpslld $31, %xmm0, %xmm0
-; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
-; AVX-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
-; AVX-NEXT: vblendvps %xmm0, %xmm1, %xmm2, %xmm0
+; AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
+; AVX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
+; AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%add = select <4 x i1> %cond, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>
ret <4 x i32> %add
define <4 x i32> @sel_0_or_1_vec(<4 x i1> %cond) {
; SSE-LABEL: sel_0_or_1_vec:
; SSE: # BB#0:
-; SSE-NEXT: pslld $31, %xmm0
-; SSE-NEXT: psrad $31, %xmm0
-; SSE-NEXT: pandn {{.*}}(%rip), %xmm0
+; SSE-NEXT: andnps {{.*}}(%rip), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: sel_0_or_1_vec:
; AVX: # BB#0:
-; AVX-NEXT: vpslld $31, %xmm0, %xmm0
-; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
-; AVX-NEXT: vmovaps {{.*#+}} xmm2 = [1,1,1,1]
-; AVX-NEXT: vblendvps %xmm0, %xmm1, %xmm2, %xmm0
+; AVX-NEXT: vandnps {{.*}}(%rip), %xmm0, %xmm0
; AVX-NEXT: retq
%add = select <4 x i1> %cond, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
ret <4 x i32> %add
define <2 x i16> @compare_v2i64_to_v2i16(<2 x i16>* %src) nounwind {
; X86-LABEL: compare_v2i64_to_v2i16:
; X86: # BB#0:
-; X86-NEXT: movaps {{.*#+}} xmm0 = [65535,0,65535,0]
+; X86-NEXT: pcmpeqd %xmm0, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: compare_v2i64_to_v2i16:
; X64: # BB#0:
-; X64-NEXT: movaps {{.*#+}} xmm0 = [65535,65535]
+; X64-NEXT: pcmpeqd %xmm0, %xmm0
; X64-NEXT: retq
%val = load <2 x i16>, <2 x i16>* %src, align 4
%cmp = icmp uge <2 x i16> %val, %val