Ops.push_back(IsAndN ? N1 : N0);
return true;
}
+ case X86ISD::PINSRB:
case X86ISD::PINSRW: {
SDValue InVec = N.getOperand(0);
SDValue InScl = N.getOperand(1);
uint64_t InIdx = N.getConstantOperandVal(2);
assert(InIdx < NumElts && "Illegal insertion index");
- // Attempt to recognise a PINSRW(VEC, 0, Idx) shuffle pattern.
+ // Attempt to recognise a PINSR*(VEC, 0, Idx) shuffle pattern.
if (X86::isZeroNode(InScl)) {
Ops.push_back(InVec);
for (unsigned i = 0; i != NumElts; ++i)
return true;
}
- // Attempt to recognise a PINSRW(ASSERTZEXT(PEXTRW)) shuffle pattern.
- // TODO: Expand this to support PINSRB/INSERT_VECTOR_ELT/etc.
+ // Attempt to recognise a PINSR*(ASSERTZEXT(PEXTR*)) shuffle pattern.
+ // TODO: Expand this to support INSERT_VECTOR_ELT/etc.
+ unsigned ExOp =
+ (X86ISD::PINSRB == Opcode ? X86ISD::PEXTRB : X86ISD::PEXTRW);
if (InScl.getOpcode() != ISD::AssertZext ||
- InScl.getOperand(0).getOpcode() != X86ISD::PEXTRW)
+ InScl.getOperand(0).getOpcode() != ExOp)
return false;
SDValue ExVec = InScl.getOperand(0).getOperand(0);
; AVX-LABEL: _clearupper16xi8a:
; AVX: # BB#0:
; AVX-NEXT: vpextrb $0, %xmm0, %eax
+; AVX-NEXT: vpextrb $1, %xmm0, %ecx
; AVX-NEXT: vmovd %eax, %xmm1
-; AVX-NEXT: vpextrb $1, %xmm0, %eax
-; AVX-NEXT: vpinsrb $1, %eax, %xmm1, %xmm1
-; AVX-NEXT: vpextrb $2, %xmm0, %eax
-; AVX-NEXT: vpinsrb $2, %eax, %xmm1, %xmm1
-; AVX-NEXT: vpextrb $3, %xmm0, %eax
-; AVX-NEXT: vpinsrb $3, %eax, %xmm1, %xmm1
-; AVX-NEXT: vpextrb $4, %xmm0, %eax
-; AVX-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
-; AVX-NEXT: vpextrb $5, %xmm0, %eax
-; AVX-NEXT: vpinsrb $5, %eax, %xmm1, %xmm1
-; AVX-NEXT: vpextrb $6, %xmm0, %eax
-; AVX-NEXT: vpinsrb $6, %eax, %xmm1, %xmm1
-; AVX-NEXT: vpextrb $7, %xmm0, %eax
-; AVX-NEXT: vpinsrb $7, %eax, %xmm1, %xmm1
-; AVX-NEXT: vpextrb $8, %xmm0, %eax
-; AVX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
-; AVX-NEXT: vpextrb $9, %xmm0, %eax
-; AVX-NEXT: vpinsrb $9, %eax, %xmm1, %xmm1
-; AVX-NEXT: vpextrb $10, %xmm0, %eax
-; AVX-NEXT: vpinsrb $10, %eax, %xmm1, %xmm1
-; AVX-NEXT: vpextrb $11, %xmm0, %eax
-; AVX-NEXT: vpinsrb $11, %eax, %xmm1, %xmm1
-; AVX-NEXT: vpextrb $12, %xmm0, %eax
-; AVX-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
-; AVX-NEXT: vpextrb $13, %xmm0, %eax
-; AVX-NEXT: vpinsrb $13, %eax, %xmm1, %xmm1
-; AVX-NEXT: vpextrb $14, %xmm0, %eax
-; AVX-NEXT: vpinsrb $14, %eax, %xmm1, %xmm1
-; AVX-NEXT: vpextrb $15, %xmm0, %eax
-; AVX-NEXT: vpinsrb $15, %eax, %xmm1, %xmm0
+; AVX-NEXT: vpinsrb $1, %ecx, %xmm1, %xmm1
+; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3,4,5,6,7]
; AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; AVX-NEXT: retq
%x0 = extractelement <16 x i8> %0, i32 0
;
; AVX-LABEL: _clearupper8xi16b:
; AVX: # BB#0:
-; AVX-NEXT: xorl %eax, %eax
-; AVX-NEXT: vpinsrb $1, %eax, %xmm0, %xmm0
-; AVX-NEXT: vpinsrb $3, %eax, %xmm0, %xmm0
-; AVX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0
-; AVX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0
-; AVX-NEXT: vpinsrb $9, %eax, %xmm0, %xmm0
-; AVX-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0
-; AVX-NEXT: vpinsrb $13, %eax, %xmm0, %xmm0
-; AVX-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
+; AVX-NEXT: vandps {{.*}}(%rip), %xmm0, %xmm0
; AVX-NEXT: retq
%x8 = bitcast <8 x i16> %0 to <16 x i8>
%r0 = insertelement <16 x i8> %x8, i8 zeroinitializer, i32 1
; SSE41-NEXT: xorl %eax, %eax
; SSE41-NEXT: pinsrb $0, %eax, %xmm0
; SSE41-NEXT: pinsrb $15, %eax, %xmm0
-; SSE41-NEXT: pinsrb $14, %eax, %xmm1
-; SSE41-NEXT: pinsrb $15, %eax, %xmm1
+; SSE41-NEXT: pxor %xmm2, %xmm2
+; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5,6],xmm2[7]
; SSE41-NEXT: retq
;
; AVX1-LABEL: insert_v32i8_z123456789ABCDEzGHIJKLMNOPQRSTzz:
; AVX1-NEXT: vpinsrb $15, %eax, %xmm0, %xmm1
; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT: vpinsrb $14, %eax, %xmm1, %xmm1
-; AVX1-NEXT: vpinsrb $15, %eax, %xmm1, %xmm1
+; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5,6],xmm2[7]
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-NEXT: vpinsrb $15, %eax, %xmm0, %xmm1
; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vpinsrb $14, %eax, %xmm1, %xmm1
-; AVX2-NEXT: vpinsrb $15, %eax, %xmm1, %xmm1
+; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX2-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5,6],xmm2[7]
; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
; AVX2-NEXT: retq
%1 = insertelement <32 x i8> %a, i8 0, i32 0