Ops.push_back(IsAndN ? N1 : N0);
return true;
}
+ case X86ISD::PINSRW: {
+ // Attempt to recognise a PINSRW(ASSERTZEXT(PEXTRW)) shuffle pattern.
+ // TODO: Expand this to support PINSRB/INSERT_VECTOR_ELT/etc.
+ SDValue InVec = N.getOperand(0);
+ SDValue InScl = N.getOperand(1);
+ uint64_t InIdx = N.getConstantOperandVal(2);
+ assert(0 <= InIdx && InIdx < NumElts && "Illegal insertion index");
+ if (InScl.getOpcode() != ISD::AssertZext ||
+ InScl.getOperand(0).getOpcode() != X86ISD::PEXTRW)
+ return false;
+
+ SDValue ExVec = InScl.getOperand(0).getOperand(0);
+ uint64_t ExIdx = InScl.getOperand(0).getConstantOperandVal(1);
+ assert(0 <= ExIdx && ExIdx < NumElts && "Illegal extraction index");
+ Ops.push_back(InVec);
+ Ops.push_back(ExVec);
+ for (unsigned i = 0; i != NumElts; ++i)
+ Mask.push_back(i == InIdx ? NumElts + ExIdx : i);
+ return true;
+ }
case X86ISD::VSHLI:
case X86ISD::VSRLI: {
uint64_t ShiftVal = N.getConstantOperandVal(1);
;
; AVX-LABEL: _clearupper8xi16a:
; AVX: # BB#0:
-; AVX-NEXT: vpextrw $1, %xmm0, %eax
-; AVX-NEXT: vpextrw $2, %xmm0, %ecx
-; AVX-NEXT: vpextrw $3, %xmm0, %edx
-; AVX-NEXT: vpextrw $4, %xmm0, %esi
-; AVX-NEXT: vpextrw $5, %xmm0, %edi
-; AVX-NEXT: vpextrw $6, %xmm0, %r8d
-; AVX-NEXT: vpextrw $7, %xmm0, %r9d
-; AVX-NEXT: vpinsrw $1, %eax, %xmm0, %xmm0
-; AVX-NEXT: vpinsrw $2, %ecx, %xmm0, %xmm0
-; AVX-NEXT: vpinsrw $3, %edx, %xmm0, %xmm0
-; AVX-NEXT: vpinsrw $4, %esi, %xmm0, %xmm0
-; AVX-NEXT: vpinsrw $5, %edi, %xmm0, %xmm0
-; AVX-NEXT: vpinsrw $6, %r8d, %xmm0, %xmm0
-; AVX-NEXT: vpinsrw $7, %r9d, %xmm0, %xmm0
-; AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
+; AVX-NEXT: vandps {{.*}}(%rip), %xmm0, %xmm0
; AVX-NEXT: retq
%x0 = extractelement <8 x i16> %0, i32 0
%x1 = extractelement <8 x i16> %0, i32 1