setTargetDAGCombine(ISD::VECTOR_SHUFFLE);
setTargetDAGCombine(ISD::EXTRACT_VECTOR_ELT);
setTargetDAGCombine(ISD::INSERT_SUBVECTOR);
+ setTargetDAGCombine(ISD::EXTRACT_SUBVECTOR);
setTargetDAGCombine(ISD::BITCAST);
setTargetDAGCombine(ISD::VSELECT);
setTargetDAGCombine(ISD::SELECT);
return SDValue();
}
+static SDValue combineExtractSubvector(SDNode *N, SelectionDAG &DAG,
+ TargetLowering::DAGCombinerInfo &DCI,
+ const X86Subtarget &Subtarget) {
+ if (DCI.isBeforeLegalizeOps())
+ return SDValue();
+
+ MVT OpVT = N->getSimpleValueType(0);
+
+ if (ISD::isBuildVectorAllZeros(N->getOperand(0).getNode()))
+ return getZeroVector(OpVT, Subtarget, DAG, SDLoc(N));
+
+ if (ISD::isBuildVectorAllOnes(N->getOperand(0).getNode())) {
+ if (OpVT.getScalarType() == MVT::i1)
+ return DAG.getConstant(1, SDLoc(N), OpVT);
+ return getZeroVector(OpVT, Subtarget, DAG, SDLoc(N));
+ }
+
+ return SDValue();
+}
SDValue X86TargetLowering::PerformDAGCombine(SDNode *N,
DAGCombinerInfo &DCI) const {
return combineExtractVectorElt_SSE(N, DAG, DCI, Subtarget);
case ISD::INSERT_SUBVECTOR:
return combineInsertSubvector(N, DAG, DCI, Subtarget);
+ case ISD::EXTRACT_SUBVECTOR:
+ return combineExtractSubvector(N, DAG, DCI, Subtarget);
case ISD::VSELECT:
case ISD::SELECT:
case X86ISD::SHRUNKBLEND: return combineSelect(N, DAG, DCI, Subtarget);
; CHECK-NEXT: vxorps %xmm0, %xmm0, %xmm0
; CHECK-NEXT: vmovaps %xmm0, (%eax)
; CHECK-NEXT: movl $0, (%eax)
-; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retl
%vext.i = shufflevector <2 x i64> undef, <2 x i64> undef, <3 x i32> <i32 0, i32 1, i32 undef>
%vecinit8.i = shufflevector <3 x i64> zeroinitializer, <3 x i64> %vext.i, <3 x i32> <i32 0, i32 3, i32 4>
; KNL_64-NEXT: vpsllq $2, %zmm1, %zmm1
; KNL_64-NEXT: vpaddq %zmm1, %zmm0, %zmm0
; KNL_64-NEXT: kxnorw %k0, %k0, %k1
-; KNL_64-NEXT: kshiftrw $8, %k1, %k2
-; KNL_64-NEXT: vgatherqps (,%zmm0), %ymm1 {%k2}
-; KNL_64-NEXT: vgatherqps (,%zmm0), %ymm2 {%k1}
-; KNL_64-NEXT: vinsertf64x4 $1, %ymm1, %zmm2, %zmm0
+; KNL_64-NEXT: vgatherqps (,%zmm0), %ymm1 {%k1}
+; KNL_64-NEXT: vinsertf64x4 $1, %ymm1, %zmm1, %zmm0
; KNL_64-NEXT: retq
;
; KNL_32-LABEL: test14:
; SKX-NEXT: vpsllq $2, %zmm1, %zmm1
; SKX-NEXT: vpaddq %zmm1, %zmm0, %zmm0
; SKX-NEXT: kxnorw %k0, %k0, %k1
-; SKX-NEXT: kshiftrw $8, %k1, %k2
-; SKX-NEXT: vgatherqps (,%zmm0), %ymm1 {%k2}
-; SKX-NEXT: vgatherqps (,%zmm0), %ymm2 {%k1}
-; SKX-NEXT: vinsertf64x4 $1, %ymm1, %zmm2, %zmm0
+; SKX-NEXT: vgatherqps (,%zmm0), %ymm1 {%k1}
+; SKX-NEXT: vinsertf64x4 $1, %ymm1, %zmm1, %zmm0
; SKX-NEXT: retq
;
; SKX_32-LABEL: test14:
; KNL_64-NEXT: kxnorw %k0, %k0, %k1
; KNL_64-NEXT: kxnorw %k0, %k0, %k2
; KNL_64-NEXT: vpgatherqq (,%zmm0), %zmm2 {%k2}
-; KNL_64-NEXT: kshiftrw $8, %k1, %k1
; KNL_64-NEXT: vpgatherqq (,%zmm1), %zmm3 {%k1}
; KNL_64-NEXT: vmovdqa64 %zmm2, %zmm0
; KNL_64-NEXT: vmovdqa64 %zmm3, %zmm1
; SKX-NEXT: kxnorw %k0, %k0, %k1
; SKX-NEXT: kxnorw %k0, %k0, %k2
; SKX-NEXT: vpgatherqq (,%zmm0), %zmm2 {%k2}
-; SKX-NEXT: kshiftrw $8, %k1, %k1
; SKX-NEXT: vpgatherqq (,%zmm1), %zmm3 {%k1}
; SKX-NEXT: vmovdqa64 %zmm2, %zmm0
; SKX-NEXT: vmovdqa64 %zmm3, %zmm1
; CHECK: # BB#0:
; CHECK-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
; CHECK-NEXT: vmovdqa %xmm0, (%rax)
-; CHECK-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0
-; CHECK-NEXT: vmovapd (%rdi), %zmm1
-; CHECK-NEXT: vmovapd 64(%rdi), %zmm2
-; CHECK-NEXT: vptestmq %zmm0, %zmm0, %k1
-; CHECK-NEXT: vmovapd %zmm0, %zmm1 {%k1}
-; CHECK-NEXT: vmovapd %zmm0, %zmm2 {%k1}
-; CHECK-NEXT: vmovapd %zmm2, 64(%rdi)
-; CHECK-NEXT: vmovapd %zmm1, (%rdi)
store <16 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <16 x i8>* undef
%load_mask8.i.i.i = load <16 x i8>, <16 x i8>* undef
%v.i.i.i.i = load <16 x double>, <16 x double>* %ptr