}
// Attempt to match against broadcast-from-vector.
- // TODO: Add (partial) AVX1 support.
- if (Subtarget.hasAVX2() && (!IsEVEXShuffle || NumRootElts == NumMaskElts)) {
+ // Limit AVX1 to cases where we're loading+broadcasting a scalar element.
+ if ((Subtarget.hasAVX2() || (Subtarget.hasAVX() && 32 <= MaskEltSizeInBits))
+ && (!IsEVEXShuffle || NumRootElts == NumMaskElts)) {
SmallVector<int, 64> BroadcastMask(NumMaskElts, 0);
if (isTargetShuffleEquivalent(Mask, BroadcastMask)) {
- if (Depth == 1 && Root.getOpcode() == X86ISD::VBROADCAST)
- return SDValue(); // Nothing to do!
- Res = DAG.getBitcast(MaskVT, V1);
- Res = DAG.getNode(X86ISD::VBROADCAST, DL, MaskVT, Res);
- return DAG.getBitcast(RootVT, Res);
+ if (V1.getValueType() == MaskVT &&
+ V1.getOpcode() == ISD::SCALAR_TO_VECTOR &&
+ MayFoldLoad(V1.getOperand(0))) {
+ if (Depth == 1 && Root.getOpcode() == X86ISD::VBROADCAST)
+ return SDValue(); // Nothing to do!
+ Res = V1.getOperand(0);
+ Res = DAG.getNode(X86ISD::VBROADCAST, DL, MaskVT, Res);
+ return DAG.getBitcast(RootVT, Res);
+ }
+ if (Subtarget.hasAVX2()) {
+ if (Depth == 1 && Root.getOpcode() == X86ISD::VBROADCAST)
+ return SDValue(); // Nothing to do!
+ Res = DAG.getBitcast(MaskVT, V1);
+ Res = DAG.getNode(X86ISD::VBROADCAST, DL, MaskVT, Res);
+ return DAG.getBitcast(RootVT, Res);
+ }
}
}
;
; X64-LABEL: G:
; X64: ## %bb.0: ## %entry
-; X64-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
-; X64-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,1,0,1]
+; X64-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
; X64-NEXT: retq
entry:
%q = load i64, i64* %ptr, align 8
;
; X64-AVX-LABEL: t4:
; X64-AVX: # %bb.0:
-; X64-AVX-NEXT: movq (%rdi), %rax
+; X64-AVX-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
+; X64-AVX-NEXT: vpextrq $1, %xmm0, %rax
; X64-AVX-NEXT: retq
%b = load <2 x double>, <2 x double>* %a, align 16
%c = shufflevector <2 x double> %b, <2 x double> %b, <2 x i32> <i32 1, i32 0>
;
; X32AVX1-LABEL: elt7_v8i32:
; X32AVX1: # %bb.0:
-; X32AVX1-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X32AVX1-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,1,2,0]
+; X32AVX1-NEXT: vbroadcastss {{[0-9]+}}(%esp), %xmm0
; X32AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; X32AVX1-NEXT: vblendps {{.*#+}} ymm0 = mem[0,1,2,3,4,5,6],ymm0[7]
; X32AVX1-NEXT: retl
;
; AVX1-LABEL: load64_ins_eltc_v2i64:
; AVX1: # %bb.0:
-; AVX1-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
-; AVX1-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,1,0,1]
+; AVX1-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
; AVX1-NEXT: retq
;
; AVX2-LABEL: load64_ins_eltc_v2i64:
;
; AVX1-LABEL: load_i64_v2i64:
; AVX1: # %bb.0:
-; AVX1-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
-; AVX1-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,1,0,1]
+; AVX1-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
; AVX1-NEXT: retq
;
; AVX2-LABEL: load_i64_v2i64:
;
; AVX1-LABEL: insert_dup_mem_v2i64:
; AVX1: # %bb.0:
-; AVX1-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
-; AVX1-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,1,0,1]
+; AVX1-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
; AVX1-NEXT: retq
;
; AVX2-LABEL: insert_dup_mem_v2i64:
;
; AVX1-LABEL: load_splat_4i32_2i32_0101:
; AVX1: # %bb.0:
-; AVX1-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
-; AVX1-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,1,0,1]
+; AVX1-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
; AVX1-NEXT: retq
;
; AVX2-LABEL: load_splat_4i32_2i32_0101:
;
; AVX1-LABEL: load_splat_8i32_2i32_0101:
; AVX1: # %bb.0:
-; AVX1-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
-; AVX1-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,1,0,1]
+; AVX1-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; AVX1-NEXT: retq
;
;
; AVX1-LABEL: load_splat_16i32_2i32_0101:
; AVX1: # %bb.0:
-; AVX1-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
-; AVX1-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,1,0,1]
+; AVX1-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; AVX1-NEXT: vmovaps %ymm0, %ymm1
; AVX1-NEXT: retq