From c8f9cf9e2661978f2c3cd751ac07ca3b551f30cf Mon Sep 17 00:00:00 2001 From: Sanjay Patel Date: Thu, 24 Aug 2017 23:24:43 +0000 Subject: [PATCH] [DAG] convert vector select-of-constants to logic/math This goes back to a discussion about IR canonicalization. We'd like to preserve and convert more IR to 'select' than we currently do because that's likely the best choice in IR: http://lists.llvm.org/pipermail/llvm-dev/2016-September/105335.html ...but that's often not true for codegen, so we need to account for this pattern coming in to the backend and transform it to better DAG ops. Steps in this patch: 1. Add an EVT param to the existing convertSelectOfConstantsToMath() TLI hook to more finely enable this transform. Other targets will probably want that anyway to distinguish scalars from vectors. We're using that here to exclude AVX512 targets, but it may not be necessary. 2. Convert a vselect to ext+add. This eliminates a constant load/materialization, and the vector ext is often free. Implementing a more general fold using xor+and can be a follow-up for targets that don't have a legal vselect. It's also possible that we can remove the TLI hook for the special case fold implemented here because we're eliminating a constant, but it needs to be tested on other targets. Differential Revision: https://reviews.llvm.org/D36840 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@311731 91177308-0d34-0410-b5e6-96231b3b80d8 --- include/llvm/Target/TargetLowering.h | 2 +- lib/CodeGen/SelectionDAG/DAGCombiner.cpp | 59 ++++++++++++++++- lib/Target/PowerPC/PPCISelLowering.h | 2 +- lib/Target/X86/X86ISelLowering.cpp | 9 +++ lib/Target/X86/X86ISelLowering.h | 4 +- test/CodeGen/PowerPC/vselect-constants.ll | 80 ++++++----------------- test/CodeGen/X86/vselect-avx.ll | 11 ++-- test/CodeGen/X86/vselect-constants.ll | 65 +++++++----------- test/CodeGen/X86/widen_compare-1.ll | 4 +- 9 files changed, 122 insertions(+), 114 deletions(-) diff --git a/include/llvm/Target/TargetLowering.h b/include/llvm/Target/TargetLowering.h index 4c8a220eac2..df9e8eee20b 100644 --- a/include/llvm/Target/TargetLowering.h +++ b/include/llvm/Target/TargetLowering.h @@ -1591,7 +1591,7 @@ public: /// Return true if a select of constants (select Cond, C1, C2) should be /// transformed into simple math ops with the condition value. For example: /// select Cond, C1, C1-1 --> add (zext Cond), C1-1 - virtual bool convertSelectOfConstantsToMath() const { + virtual bool convertSelectOfConstantsToMath(EVT VT) const { return false; } diff --git a/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/lib/CodeGen/SelectionDAG/DAGCombiner.cpp index e6f2eb5480e..6e106bb869a 100644 --- a/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ b/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -348,6 +348,7 @@ namespace { SDValue visitShiftByConstant(SDNode *N, ConstantSDNode *Amt); SDValue foldSelectOfConstants(SDNode *N); + SDValue foldVSelectOfConstants(SDNode *N); SDValue foldBinOpIntoSelect(SDNode *BO); bool SimplifySelectOps(SDNode *SELECT, SDValue LHS, SDValue RHS); SDValue SimplifyBinOpWithSameOpcodeHands(SDNode *N); @@ -6191,7 +6192,7 @@ SDValue DAGCombiner::foldSelectOfConstants(SDNode *N) { // For any constants that differ by 1, we can transform the select into an // extend and add. Use a target hook because some targets may prefer to // transform in the other direction. - if (TLI.convertSelectOfConstantsToMath()) { + if (TLI.convertSelectOfConstantsToMath(VT)) { if (C1->getAPIntValue() - 1 == C2->getAPIntValue()) { // select Cond, C1, C1-1 --> add (zext Cond), C1-1 if (VT != MVT::i1) @@ -6760,6 +6761,57 @@ SDValue DAGCombiner::visitMLOAD(SDNode *N) { return SDValue(); } +/// A vector select of 2 constant vectors can be simplified to math/logic to +/// avoid a variable select instruction and possibly avoid constant loads. +SDValue DAGCombiner::foldVSelectOfConstants(SDNode *N) { + SDValue Cond = N->getOperand(0); + SDValue N1 = N->getOperand(1); + SDValue N2 = N->getOperand(2); + EVT VT = N->getValueType(0); + if (!Cond.hasOneUse() || Cond.getScalarValueSizeInBits() != 1 || + !TLI.convertSelectOfConstantsToMath(VT) || + !ISD::isBuildVectorOfConstantSDNodes(N1.getNode()) || + !ISD::isBuildVectorOfConstantSDNodes(N2.getNode())) + return SDValue(); + + // Check if we can use the condition value to increment/decrement a single + // constant value. This simplifies a select to an add and removes a constant + // load/materialization from the general case. + bool AllAddOne = true; + bool AllSubOne = true; + unsigned Elts = VT.getVectorNumElements(); + for (unsigned i = 0; i != Elts; ++i) { + SDValue N1Elt = N1.getOperand(i); + SDValue N2Elt = N2.getOperand(i); + if (N1Elt.isUndef() || N2Elt.isUndef()) + continue; + + const APInt &C1 = cast(N1Elt)->getAPIntValue(); + const APInt &C2 = cast(N2Elt)->getAPIntValue(); + if (C1 != C2 + 1) + AllAddOne = false; + if (C1 != C2 - 1) + AllSubOne = false; + } + + // Further simplifications for the extra-special cases where the constants are + // all 0 or all -1 should be implemented as folds of these patterns. + SDLoc DL(N); + if (AllAddOne || AllSubOne) { + // vselect Cond, C+1, C --> add (zext Cond), C + // vselect Cond, C-1, C --> add (sext Cond), C + auto ExtendOpcode = AllAddOne ? ISD::ZERO_EXTEND : ISD::SIGN_EXTEND; + SDValue ExtendedCond = DAG.getNode(ExtendOpcode, DL, VT, Cond); + return DAG.getNode(ISD::ADD, DL, VT, ExtendedCond, N2); + } + + // The general case for select-of-constants: + // vselect Cond, C1, C2 --> xor (and (sext Cond), (C1^C2)), C2 + // ...but that only makes sense if a vselect is slower than 2 logic ops, so + // leave that to a machine-specific pass. + return SDValue(); +} + SDValue DAGCombiner::visitVSELECT(SDNode *N) { SDValue N0 = N->getOperand(0); SDValue N1 = N->getOperand(1); @@ -6824,6 +6876,9 @@ SDValue DAGCombiner::visitVSELECT(SDNode *N) { return CV; } + if (SDValue V = foldVSelectOfConstants(N)) + return V; + return SDValue(); } @@ -7409,7 +7464,7 @@ SDValue DAGCombiner::visitSIGN_EXTEND(SDNode *N) { SimplifySelectCC(DL, N00, N01, ExtTrueVal, Zero, CC, true)) return SCC; - if (!VT.isVector() && !TLI.convertSelectOfConstantsToMath()) { + if (!VT.isVector() && !TLI.convertSelectOfConstantsToMath(VT)) { EVT SetCCVT = getSetCCResultType(N00VT); // Don't do this transform for i1 because there's a select transform // that would reverse it. diff --git a/lib/Target/PowerPC/PPCISelLowering.h b/lib/Target/PowerPC/PPCISelLowering.h index e2f3812e2ff..b5577a40c4a 100644 --- a/lib/Target/PowerPC/PPCISelLowering.h +++ b/lib/Target/PowerPC/PPCISelLowering.h @@ -765,7 +765,7 @@ namespace llvm { bool shouldConvertConstantLoadToIntImm(const APInt &Imm, Type *Ty) const override; - bool convertSelectOfConstantsToMath() const override { + bool convertSelectOfConstantsToMath(EVT VT) const override { return true; } diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index fdfdde3767c..f570f1991b3 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -4574,6 +4574,15 @@ bool X86TargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm, return true; } +bool X86TargetLowering::convertSelectOfConstantsToMath(EVT VT) const { + // TODO: It might be a win to ease or lift this restriction, but the generic + // folds in DAGCombiner conflict with vector folds for an AVX512 target. + if (VT.isVector() && Subtarget.hasAVX512()) + return false; + + return true; +} + bool X86TargetLowering::isExtractSubvectorCheap(EVT ResVT, EVT SrcVT, unsigned Index) const { if (!isOperationLegalOrCustom(ISD::EXTRACT_SUBVECTOR, ResVT)) diff --git a/lib/Target/X86/X86ISelLowering.h b/lib/Target/X86/X86ISelLowering.h index 7307af100a6..53cd8ca5361 100644 --- a/lib/Target/X86/X86ISelLowering.h +++ b/lib/Target/X86/X86ISelLowering.h @@ -1030,9 +1030,7 @@ namespace llvm { bool shouldConvertConstantLoadToIntImm(const APInt &Imm, Type *Ty) const override; - bool convertSelectOfConstantsToMath() const override { - return true; - } + bool convertSelectOfConstantsToMath(EVT VT) const override; /// Return true if EXTRACT_SUBVECTOR is cheap for this result type /// with this index. diff --git a/test/CodeGen/PowerPC/vselect-constants.ll b/test/CodeGen/PowerPC/vselect-constants.ll index 2dbe12e882d..077eb2defc0 100644 --- a/test/CodeGen/PowerPC/vselect-constants.ll +++ b/test/CodeGen/PowerPC/vselect-constants.ll @@ -47,18 +47,12 @@ define <4 x i32> @cmp_sel_C1_or_C2_vec(<4 x i32> %x, <4 x i32> %y) { define <4 x i32> @sel_Cplus1_or_C_vec(<4 x i1> %cond) { ; CHECK-LABEL: sel_Cplus1_or_C_vec: ; CHECK: # BB#0: -; CHECK-NEXT: vspltisw 3, -16 -; CHECK-NEXT: vspltisw 4, 15 +; CHECK-NEXT: vspltisw 3, 1 ; CHECK-NEXT: addis 3, 2, .LCPI2_0@toc@ha -; CHECK-NEXT: addis 4, 2, .LCPI2_1@toc@ha ; CHECK-NEXT: addi 3, 3, .LCPI2_0@toc@l -; CHECK-NEXT: addi 4, 4, .LCPI2_1@toc@l -; CHECK-NEXT: lvx 18, 0, 3 -; CHECK-NEXT: lvx 19, 0, 4 -; CHECK-NEXT: vsubuwm 3, 4, 3 -; CHECK-NEXT: vslw 2, 2, 3 -; CHECK-NEXT: vsraw 2, 2, 3 -; CHECK-NEXT: xxsel 34, 51, 50, 34 +; CHECK-NEXT: lvx 19, 0, 3 +; CHECK-NEXT: xxland 34, 34, 35 +; CHECK-NEXT: vadduwm 2, 2, 19 ; CHECK-NEXT: blr %add = select <4 x i1> %cond, <4 x i32> , <4 x i32> ret <4 x i32> %add @@ -69,12 +63,9 @@ define <4 x i32> @cmp_sel_Cplus1_or_C_vec(<4 x i32> %x, <4 x i32> %y) { ; CHECK: # BB#0: ; CHECK-NEXT: vcmpequw 2, 2, 3 ; CHECK-NEXT: addis 3, 2, .LCPI3_0@toc@ha -; CHECK-NEXT: addis 4, 2, .LCPI3_1@toc@ha ; CHECK-NEXT: addi 3, 3, .LCPI3_0@toc@l -; CHECK-NEXT: addi 4, 4, .LCPI3_1@toc@l ; CHECK-NEXT: lvx 19, 0, 3 -; CHECK-NEXT: lvx 4, 0, 4 -; CHECK-NEXT: xxsel 34, 36, 51, 34 +; CHECK-NEXT: vsubuwm 2, 19, 2 ; CHECK-NEXT: blr %cond = icmp eq <4 x i32> %x, %y %add = select <4 x i1> %cond, <4 x i32> , <4 x i32> @@ -87,15 +78,12 @@ define <4 x i32> @sel_Cminus1_or_C_vec(<4 x i1> %cond) { ; CHECK-NEXT: vspltisw 3, -16 ; CHECK-NEXT: vspltisw 4, 15 ; CHECK-NEXT: addis 3, 2, .LCPI4_0@toc@ha -; CHECK-NEXT: addis 4, 2, .LCPI4_1@toc@ha ; CHECK-NEXT: addi 3, 3, .LCPI4_0@toc@l -; CHECK-NEXT: addi 4, 4, .LCPI4_1@toc@l -; CHECK-NEXT: lvx 18, 0, 3 -; CHECK-NEXT: lvx 19, 0, 4 +; CHECK-NEXT: lvx 19, 0, 3 ; CHECK-NEXT: vsubuwm 3, 4, 3 ; CHECK-NEXT: vslw 2, 2, 3 ; CHECK-NEXT: vsraw 2, 2, 3 -; CHECK-NEXT: xxsel 34, 51, 50, 34 +; CHECK-NEXT: vadduwm 2, 2, 19 ; CHECK-NEXT: blr %add = select <4 x i1> %cond, <4 x i32> , <4 x i32> ret <4 x i32> %add @@ -106,12 +94,9 @@ define <4 x i32> @cmp_sel_Cminus1_or_C_vec(<4 x i32> %x, <4 x i32> %y) { ; CHECK: # BB#0: ; CHECK-NEXT: vcmpequw 2, 2, 3 ; CHECK-NEXT: addis 3, 2, .LCPI5_0@toc@ha -; CHECK-NEXT: addis 4, 2, .LCPI5_1@toc@ha ; CHECK-NEXT: addi 3, 3, .LCPI5_0@toc@l -; CHECK-NEXT: addi 4, 4, .LCPI5_1@toc@l ; CHECK-NEXT: lvx 19, 0, 3 -; CHECK-NEXT: lvx 4, 0, 4 -; CHECK-NEXT: xxsel 34, 36, 51, 34 +; CHECK-NEXT: vadduwm 2, 2, 19 ; CHECK-NEXT: blr %cond = icmp eq <4 x i32> %x, %y %add = select <4 x i1> %cond, <4 x i32> , <4 x i32> @@ -123,12 +108,9 @@ define <4 x i32> @sel_minus1_or_0_vec(<4 x i1> %cond) { ; CHECK: # BB#0: ; CHECK-NEXT: vspltisw 3, -16 ; CHECK-NEXT: vspltisw 4, 15 -; CHECK-NEXT: vspltisb 19, -1 -; CHECK-NEXT: xxlxor 0, 0, 0 ; CHECK-NEXT: vsubuwm 3, 4, 3 ; CHECK-NEXT: vslw 2, 2, 3 ; CHECK-NEXT: vsraw 2, 2, 3 -; CHECK-NEXT: xxsel 34, 0, 51, 34 ; CHECK-NEXT: blr %add = select <4 x i1> %cond, <4 x i32> , <4 x i32> ret <4 x i32> %add @@ -138,9 +120,6 @@ define <4 x i32> @cmp_sel_minus1_or_0_vec(<4 x i32> %x, <4 x i32> %y) { ; CHECK-LABEL: cmp_sel_minus1_or_0_vec: ; CHECK: # BB#0: ; CHECK-NEXT: vcmpequw 2, 2, 3 -; CHECK-NEXT: vspltisb 19, -1 -; CHECK-NEXT: xxlxor 0, 0, 0 -; CHECK-NEXT: xxsel 34, 0, 51, 34 ; CHECK-NEXT: blr %cond = icmp eq <4 x i32> %x, %y %add = select <4 x i1> %cond, <4 x i32> , <4 x i32> @@ -150,14 +129,10 @@ define <4 x i32> @cmp_sel_minus1_or_0_vec(<4 x i32> %x, <4 x i32> %y) { define <4 x i32> @sel_0_or_minus1_vec(<4 x i1> %cond) { ; CHECK-LABEL: sel_0_or_minus1_vec: ; CHECK: # BB#0: -; CHECK-NEXT: vspltisw 3, -16 -; CHECK-NEXT: vspltisw 4, 15 -; CHECK-NEXT: vspltisb 19, -1 -; CHECK-NEXT: xxlxor 0, 0, 0 -; CHECK-NEXT: vsubuwm 3, 4, 3 -; CHECK-NEXT: vslw 2, 2, 3 -; CHECK-NEXT: vsraw 2, 2, 3 -; CHECK-NEXT: xxsel 34, 51, 0, 34 +; CHECK-NEXT: vspltisw 3, 1 +; CHECK-NEXT: vspltisb 4, -1 +; CHECK-NEXT: xxland 34, 34, 35 +; CHECK-NEXT: vadduwm 2, 2, 4 ; CHECK-NEXT: blr %add = select <4 x i1> %cond, <4 x i32> , <4 x i32> ret <4 x i32> %add @@ -167,9 +142,7 @@ define <4 x i32> @cmp_sel_0_or_minus1_vec(<4 x i32> %x, <4 x i32> %y) { ; CHECK-LABEL: cmp_sel_0_or_minus1_vec: ; CHECK: # BB#0: ; CHECK-NEXT: vcmpequw 2, 2, 3 -; CHECK-NEXT: vspltisb 19, -1 -; CHECK-NEXT: xxlxor 0, 0, 0 -; CHECK-NEXT: xxsel 34, 51, 0, 34 +; CHECK-NEXT: xxlnor 34, 34, 34 ; CHECK-NEXT: blr %cond = icmp eq <4 x i32> %x, %y %add = select <4 x i1> %cond, <4 x i32> , <4 x i32> @@ -179,14 +152,8 @@ define <4 x i32> @cmp_sel_0_or_minus1_vec(<4 x i32> %x, <4 x i32> %y) { define <4 x i32> @sel_1_or_0_vec(<4 x i1> %cond) { ; CHECK-LABEL: sel_1_or_0_vec: ; CHECK: # BB#0: -; CHECK-NEXT: vspltisw 3, -16 -; CHECK-NEXT: vspltisw 4, 15 -; CHECK-NEXT: vspltisw 19, 1 -; CHECK-NEXT: xxlxor 0, 0, 0 -; CHECK-NEXT: vsubuwm 3, 4, 3 -; CHECK-NEXT: vslw 2, 2, 3 -; CHECK-NEXT: vsraw 2, 2, 3 -; CHECK-NEXT: xxsel 34, 0, 51, 34 +; CHECK-NEXT: vspltisw 3, 1 +; CHECK-NEXT: xxland 34, 34, 35 ; CHECK-NEXT: blr %add = select <4 x i1> %cond, <4 x i32> , <4 x i32> ret <4 x i32> %add @@ -197,8 +164,7 @@ define <4 x i32> @cmp_sel_1_or_0_vec(<4 x i32> %x, <4 x i32> %y) { ; CHECK: # BB#0: ; CHECK-NEXT: vcmpequw 2, 2, 3 ; CHECK-NEXT: vspltisw 19, 1 -; CHECK-NEXT: xxlxor 0, 0, 0 -; CHECK-NEXT: xxsel 34, 0, 51, 34 +; CHECK-NEXT: xxland 34, 34, 51 ; CHECK-NEXT: blr %cond = icmp eq <4 x i32> %x, %y %add = select <4 x i1> %cond, <4 x i32> , <4 x i32> @@ -208,14 +174,8 @@ define <4 x i32> @cmp_sel_1_or_0_vec(<4 x i32> %x, <4 x i32> %y) { define <4 x i32> @sel_0_or_1_vec(<4 x i1> %cond) { ; CHECK-LABEL: sel_0_or_1_vec: ; CHECK: # BB#0: -; CHECK-NEXT: vspltisw 3, -16 -; CHECK-NEXT: vspltisw 4, 15 -; CHECK-NEXT: vspltisw 19, 1 -; CHECK-NEXT: xxlxor 0, 0, 0 -; CHECK-NEXT: vsubuwm 3, 4, 3 -; CHECK-NEXT: vslw 2, 2, 3 -; CHECK-NEXT: vsraw 2, 2, 3 -; CHECK-NEXT: xxsel 34, 51, 0, 34 +; CHECK-NEXT: vspltisw 3, 1 +; CHECK-NEXT: xxlandc 34, 35, 34 ; CHECK-NEXT: blr %add = select <4 x i1> %cond, <4 x i32> , <4 x i32> ret <4 x i32> %add @@ -226,8 +186,8 @@ define <4 x i32> @cmp_sel_0_or_1_vec(<4 x i32> %x, <4 x i32> %y) { ; CHECK: # BB#0: ; CHECK-NEXT: vcmpequw 2, 2, 3 ; CHECK-NEXT: vspltisw 19, 1 -; CHECK-NEXT: xxlxor 0, 0, 0 -; CHECK-NEXT: xxsel 34, 51, 0, 34 +; CHECK-NEXT: xxlnor 0, 34, 34 +; CHECK-NEXT: xxland 34, 0, 51 ; CHECK-NEXT: blr %cond = icmp eq <4 x i32> %x, %y %add = select <4 x i1> %cond, <4 x i32> , <4 x i32> diff --git a/test/CodeGen/X86/vselect-avx.ll b/test/CodeGen/X86/vselect-avx.ll index 5825a56b6f9..603437e35ea 100644 --- a/test/CodeGen/X86/vselect-avx.ll +++ b/test/CodeGen/X86/vselect-avx.ll @@ -151,21 +151,22 @@ define <32 x i8> @PR22706(<32 x i1> %x) { ; AVX1-NEXT: vpand %xmm2, %xmm1, %xmm1 ; AVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3 ; AVX1-NEXT: vpcmpgtb %xmm1, %xmm3, %xmm1 +; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2] +; AVX1-NEXT: vpaddb %xmm4, %xmm1, %xmm1 ; AVX1-NEXT: vpsllw $7, %xmm0, %xmm0 ; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm0 ; AVX1-NEXT: vpcmpgtb %xmm0, %xmm3, %xmm0 +; AVX1-NEXT: vpaddb %xmm4, %xmm0, %xmm0 ; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 -; AVX1-NEXT: vandnps {{.*}}(%rip), %ymm0, %ymm1 -; AVX1-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0 -; AVX1-NEXT: vorps %ymm1, %ymm0, %ymm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: PR22706: ; AVX2: ## BB#0: ; AVX2-NEXT: vpsllw $7, %ymm0, %ymm0 ; AVX2-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0 -; AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2] -; AVX2-NEXT: vpblendvb %ymm0, {{.*}}(%rip), %ymm1, %ymm0 +; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; AVX2-NEXT: vpcmpgtb %ymm0, %ymm1, %ymm0 +; AVX2-NEXT: vpaddb {{.*}}(%rip), %ymm0, %ymm0 ; AVX2-NEXT: retq %tmp = select <32 x i1> %x, <32 x i8> , <32 x i8> ret <32 x i8> %tmp diff --git a/test/CodeGen/X86/vselect-constants.ll b/test/CodeGen/X86/vselect-constants.ll index 838c03500c6..4ce2ecfa739 100644 --- a/test/CodeGen/X86/vselect-constants.ll +++ b/test/CodeGen/X86/vselect-constants.ll @@ -8,6 +8,11 @@ ; Each minimal select test is repeated with a more typical pattern that includes a compare to ; generate the condition value. +; TODO: If we don't have blendv, this can definitely be improved. There's also a selection of +; chips where it makes sense to transform the general case blendv to 2 bit-ops. That should be +; a uarch-specfic transform. At some point (Ryzen?), the implementation should catch up to the +; architecture, so blendv is as fast as a single bit-op. + define <4 x i32> @sel_C1_or_C2_vec(<4 x i1> %cond) { ; SSE-LABEL: sel_C1_or_C2_vec: ; SSE: # BB#0: @@ -53,19 +58,14 @@ define <4 x i32> @cmp_sel_C1_or_C2_vec(<4 x i32> %x, <4 x i32> %y) { define <4 x i32> @sel_Cplus1_or_C_vec(<4 x i1> %cond) { ; SSE-LABEL: sel_Cplus1_or_C_vec: ; SSE: # BB#0: -; SSE-NEXT: pslld $31, %xmm0 -; SSE-NEXT: psrad $31, %xmm0 -; SSE-NEXT: movdqa %xmm0, %xmm1 -; SSE-NEXT: pandn {{.*}}(%rip), %xmm1 ; SSE-NEXT: pand {{.*}}(%rip), %xmm0 -; SSE-NEXT: por %xmm1, %xmm0 +; SSE-NEXT: paddd {{.*}}(%rip), %xmm0 ; SSE-NEXT: retq ; ; AVX-LABEL: sel_Cplus1_or_C_vec: ; AVX: # BB#0: -; AVX-NEXT: vpslld $31, %xmm0, %xmm0 -; AVX-NEXT: vmovaps {{.*#+}} xmm1 = [42,0,4294967294,4294967295] -; AVX-NEXT: vblendvps %xmm0, {{.*}}(%rip), %xmm1, %xmm0 +; AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0 +; AVX-NEXT: vpaddd {{.*}}(%rip), %xmm0, %xmm0 ; AVX-NEXT: retq %add = select <4 x i1> %cond, <4 x i32> , <4 x i32> ret <4 x i32> %add @@ -75,17 +75,16 @@ define <4 x i32> @cmp_sel_Cplus1_or_C_vec(<4 x i32> %x, <4 x i32> %y) { ; SSE-LABEL: cmp_sel_Cplus1_or_C_vec: ; SSE: # BB#0: ; SSE-NEXT: pcmpeqd %xmm1, %xmm0 -; SSE-NEXT: movdqa %xmm0, %xmm1 -; SSE-NEXT: pandn {{.*}}(%rip), %xmm1 -; SSE-NEXT: pand {{.*}}(%rip), %xmm0 -; SSE-NEXT: por %xmm1, %xmm0 +; SSE-NEXT: movdqa {{.*#+}} xmm1 = [42,0,4294967294,4294967295] +; SSE-NEXT: psubd %xmm0, %xmm1 +; SSE-NEXT: movdqa %xmm1, %xmm0 ; SSE-NEXT: retq ; ; AVX-LABEL: cmp_sel_Cplus1_or_C_vec: ; AVX: # BB#0: ; AVX-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0 -; AVX-NEXT: vmovaps {{.*#+}} xmm1 = [42,0,4294967294,4294967295] -; AVX-NEXT: vblendvps %xmm0, {{.*}}(%rip), %xmm1, %xmm0 +; AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [42,0,4294967294,4294967295] +; AVX-NEXT: vpsubd %xmm0, %xmm1, %xmm0 ; AVX-NEXT: retq %cond = icmp eq <4 x i32> %x, %y %add = select <4 x i1> %cond, <4 x i32> , <4 x i32> @@ -97,17 +96,14 @@ define <4 x i32> @sel_Cminus1_or_C_vec(<4 x i1> %cond) { ; SSE: # BB#0: ; SSE-NEXT: pslld $31, %xmm0 ; SSE-NEXT: psrad $31, %xmm0 -; SSE-NEXT: movdqa %xmm0, %xmm1 -; SSE-NEXT: pandn {{.*}}(%rip), %xmm1 -; SSE-NEXT: pand {{.*}}(%rip), %xmm0 -; SSE-NEXT: por %xmm1, %xmm0 +; SSE-NEXT: paddd {{.*}}(%rip), %xmm0 ; SSE-NEXT: retq ; ; AVX-LABEL: sel_Cminus1_or_C_vec: ; AVX: # BB#0: ; AVX-NEXT: vpslld $31, %xmm0, %xmm0 -; AVX-NEXT: vmovaps {{.*#+}} xmm1 = [44,2,0,1] -; AVX-NEXT: vblendvps %xmm0, {{.*}}(%rip), %xmm1, %xmm0 +; AVX-NEXT: vpsrad $31, %xmm0, %xmm0 +; AVX-NEXT: vpaddd {{.*}}(%rip), %xmm0, %xmm0 ; AVX-NEXT: retq %add = select <4 x i1> %cond, <4 x i32> , <4 x i32> ret <4 x i32> %add @@ -117,17 +113,13 @@ define <4 x i32> @cmp_sel_Cminus1_or_C_vec(<4 x i32> %x, <4 x i32> %y) { ; SSE-LABEL: cmp_sel_Cminus1_or_C_vec: ; SSE: # BB#0: ; SSE-NEXT: pcmpeqd %xmm1, %xmm0 -; SSE-NEXT: movdqa %xmm0, %xmm1 -; SSE-NEXT: pandn {{.*}}(%rip), %xmm1 -; SSE-NEXT: pand {{.*}}(%rip), %xmm0 -; SSE-NEXT: por %xmm1, %xmm0 +; SSE-NEXT: paddd {{.*}}(%rip), %xmm0 ; SSE-NEXT: retq ; ; AVX-LABEL: cmp_sel_Cminus1_or_C_vec: ; AVX: # BB#0: ; AVX-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0 -; AVX-NEXT: vmovaps {{.*#+}} xmm1 = [44,2,0,1] -; AVX-NEXT: vblendvps %xmm0, {{.*}}(%rip), %xmm1, %xmm0 +; AVX-NEXT: vpaddd {{.*}}(%rip), %xmm0, %xmm0 ; AVX-NEXT: retq %cond = icmp eq <4 x i32> %x, %y %add = select <4 x i1> %cond, <4 x i32> , <4 x i32> @@ -168,18 +160,16 @@ define <4 x i32> @cmp_sel_minus1_or_0_vec(<4 x i32> %x, <4 x i32> %y) { define <4 x i32> @sel_0_or_minus1_vec(<4 x i1> %cond) { ; SSE-LABEL: sel_0_or_minus1_vec: ; SSE: # BB#0: -; SSE-NEXT: pslld $31, %xmm0 -; SSE-NEXT: psrad $31, %xmm0 +; SSE-NEXT: pand {{.*}}(%rip), %xmm0 ; SSE-NEXT: pcmpeqd %xmm1, %xmm1 -; SSE-NEXT: pxor %xmm1, %xmm0 +; SSE-NEXT: paddd %xmm1, %xmm0 ; SSE-NEXT: retq ; ; AVX-LABEL: sel_0_or_minus1_vec: ; AVX: # BB#0: -; AVX-NEXT: vpslld $31, %xmm0, %xmm0 -; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1 -; AVX-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2 -; AVX-NEXT: vblendvps %xmm0, %xmm1, %xmm2, %xmm0 +; AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0 +; AVX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1 +; AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0 ; AVX-NEXT: retq %add = select <4 x i1> %cond, <4 x i32> , <4 x i32> ret <4 x i32> %add @@ -238,17 +228,12 @@ define <4 x i32> @cmp_sel_1_or_0_vec(<4 x i32> %x, <4 x i32> %y) { define <4 x i32> @sel_0_or_1_vec(<4 x i1> %cond) { ; SSE-LABEL: sel_0_or_1_vec: ; SSE: # BB#0: -; SSE-NEXT: pslld $31, %xmm0 -; SSE-NEXT: psrad $31, %xmm0 -; SSE-NEXT: pandn {{.*}}(%rip), %xmm0 +; SSE-NEXT: andnps {{.*}}(%rip), %xmm0 ; SSE-NEXT: retq ; ; AVX-LABEL: sel_0_or_1_vec: ; AVX: # BB#0: -; AVX-NEXT: vpslld $31, %xmm0, %xmm0 -; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1 -; AVX-NEXT: vmovaps {{.*#+}} xmm2 = [1,1,1,1] -; AVX-NEXT: vblendvps %xmm0, %xmm1, %xmm2, %xmm0 +; AVX-NEXT: vandnps {{.*}}(%rip), %xmm0, %xmm0 ; AVX-NEXT: retq %add = select <4 x i1> %cond, <4 x i32> , <4 x i32> ret <4 x i32> %add diff --git a/test/CodeGen/X86/widen_compare-1.ll b/test/CodeGen/X86/widen_compare-1.ll index 8ea0db53a39..e8d993d2280 100644 --- a/test/CodeGen/X86/widen_compare-1.ll +++ b/test/CodeGen/X86/widen_compare-1.ll @@ -7,12 +7,12 @@ define <2 x i16> @compare_v2i64_to_v2i16(<2 x i16>* %src) nounwind { ; X86-LABEL: compare_v2i64_to_v2i16: ; X86: # BB#0: -; X86-NEXT: movaps {{.*#+}} xmm0 = [65535,0,65535,0] +; X86-NEXT: pcmpeqd %xmm0, %xmm0 ; X86-NEXT: retl ; ; X64-LABEL: compare_v2i64_to_v2i16: ; X64: # BB#0: -; X64-NEXT: movaps {{.*#+}} xmm0 = [65535,65535] +; X64-NEXT: pcmpeqd %xmm0, %xmm0 ; X64-NEXT: retq %val = load <2 x i16>, <2 x i16>* %src, align 4 %cmp = icmp uge <2 x i16> %val, %val -- 2.50.1