return DAG.getNode(SelectOpcode, DL, VT, IsOp0Nan, Op1, MinOrMax);
}
+/// Do target-specific dag combines on X86ISD::ANDNP nodes.
+static SDValue combineAndnp(SDNode *N, SelectionDAG &DAG,
+ const X86Subtarget &Subtarget) {
+ // ANDNP(0, x) -> x
+ if (ISD::isBuildVectorAllZeros(N->getOperand(0).getNode()))
+ return N->getOperand(1);
+
+ // ANDNP(x, 0) -> 0
+ if (ISD::isBuildVectorAllZeros(N->getOperand(1).getNode()))
+ return getZeroVector(N->getSimpleValueType(0), Subtarget, DAG, SDLoc(N));
+
+ return SDValue();
+}
+
static SDValue combineBT(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI) {
// BT ignores high bits in the bit index operand.
case ISD::FSUB: return combineFaddFsub(N, DAG, Subtarget);
case ISD::FNEG: return combineFneg(N, DAG, Subtarget);
case ISD::TRUNCATE: return combineTruncate(N, DAG, Subtarget);
+ case X86ISD::ANDNP: return combineAndnp(N, DAG, Subtarget);
case X86ISD::FAND: return combineFAnd(N, DAG, Subtarget);
case X86ISD::FANDN: return combineFAndn(N, DAG, Subtarget);
case X86ISD::FXOR:
; SSE-NEXT: pslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0,1,2,3,4]
; SSE-NEXT: pandn %xmm1, %xmm2
; SSE-NEXT: por %xmm2, %xmm0
-; SSE-NEXT: movdqa {{.*#+}} xmm1 = [255,255,255,255,255,255,255,255,255,255,255,255,255,0,255,255]
-; SSE-NEXT: pand %xmm1, %xmm0
-; SSE-NEXT: pxor %xmm2, %xmm2
-; SSE-NEXT: pandn %xmm2, %xmm1
-; SSE-NEXT: por %xmm1, %xmm0
-; SSE-NEXT: movdqa {{.*#+}} xmm1 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0]
-; SSE-NEXT: pand %xmm1, %xmm0
-; SSE-NEXT: pandn %xmm2, %xmm1
-; SSE-NEXT: por %xmm1, %xmm0
+; SSE-NEXT: pand {{.*}}(%rip), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: _clearupper8xi16b:
; SSE2-NEXT: movd %eax, %xmm2
; SSE2-NEXT: pandn %xmm2, %xmm1
; SSE2-NEXT: por %xmm1, %xmm0
-; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0]
-; SSE2-NEXT: pand %xmm1, %xmm0
-; SSE2-NEXT: pxor %xmm2, %xmm2
-; SSE2-NEXT: pandn %xmm2, %xmm1
-; SSE2-NEXT: por %xmm1, %xmm0
+; SSE2-NEXT: pand {{.*}}(%rip), %xmm0
; SSE2-NEXT: retq
;
; SSE3-LABEL: insert_v16i8_z123456789ABCDEz:
; SSE3-NEXT: movd %eax, %xmm2
; SSE3-NEXT: pandn %xmm2, %xmm1
; SSE3-NEXT: por %xmm1, %xmm0
-; SSE3-NEXT: movdqa {{.*#+}} xmm1 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0]
-; SSE3-NEXT: pand %xmm1, %xmm0
-; SSE3-NEXT: pxor %xmm2, %xmm2
-; SSE3-NEXT: pandn %xmm2, %xmm1
-; SSE3-NEXT: por %xmm1, %xmm0
+; SSE3-NEXT: pand {{.*}}(%rip), %xmm0
; SSE3-NEXT: retq
;
; SSSE3-LABEL: insert_v16i8_z123456789ABCDEz:
; SSE2-NEXT: movd %eax, %xmm3
; SSE2-NEXT: pandn %xmm3, %xmm2
; SSE2-NEXT: por %xmm2, %xmm0
-; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0]
-; SSE2-NEXT: pand %xmm2, %xmm0
-; SSE2-NEXT: pxor %xmm3, %xmm3
-; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,255]
-; SSE2-NEXT: pand %xmm4, %xmm1
-; SSE2-NEXT: pandn %xmm3, %xmm4
-; SSE2-NEXT: por %xmm4, %xmm1
-; SSE2-NEXT: pand %xmm2, %xmm1
-; SSE2-NEXT: pandn %xmm3, %xmm2
-; SSE2-NEXT: por %xmm2, %xmm0
-; SSE2-NEXT: por %xmm2, %xmm1
+; SSE2-NEXT: pand {{.*}}(%rip), %xmm0
+; SSE2-NEXT: andps {{.*}}(%rip), %xmm1
; SSE2-NEXT: retq
;
; SSE3-LABEL: insert_v32i8_z123456789ABCDEzGHIJKLMNOPQRSTzz:
; SSE3-NEXT: movd %eax, %xmm3
; SSE3-NEXT: pandn %xmm3, %xmm2
; SSE3-NEXT: por %xmm2, %xmm0
-; SSE3-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0]
-; SSE3-NEXT: pand %xmm2, %xmm0
-; SSE3-NEXT: pxor %xmm3, %xmm3
-; SSE3-NEXT: movdqa {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,255]
-; SSE3-NEXT: pand %xmm4, %xmm1
-; SSE3-NEXT: pandn %xmm3, %xmm4
-; SSE3-NEXT: por %xmm4, %xmm1
-; SSE3-NEXT: pand %xmm2, %xmm1
-; SSE3-NEXT: pandn %xmm3, %xmm2
-; SSE3-NEXT: por %xmm2, %xmm0
-; SSE3-NEXT: por %xmm2, %xmm1
+; SSE3-NEXT: pand {{.*}}(%rip), %xmm0
+; SSE3-NEXT: andps {{.*}}(%rip), %xmm1
; SSE3-NEXT: retq
;
; SSSE3-LABEL: insert_v32i8_z123456789ABCDEzGHIJKLMNOPQRSTzz: