TARGET_BUILTIN(__builtin_ia32_pminuq512_mask, "V8LLiV8LLiV8LLiV8LLiUc", "", "avx512f")
TARGET_BUILTIN(__builtin_ia32_pmuldq512_mask, "V8LLiV16iV16iV8LLiUc", "", "avx512f")
TARGET_BUILTIN(__builtin_ia32_pmuludq512_mask, "V8LLiV16iV16iV8LLiUc", "", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_blendmd_512_mask, "V16iV16iV16iUs", "", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_blendmq_512_mask, "V8LLiV8LLiV8LLiUc", "", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_blendmps_512_mask, "V16fV16fV16fUs", "", "avx512f")
-TARGET_BUILTIN(__builtin_ia32_blendmpd_512_mask, "V8dV8dV8dUc", "", "avx512f")
TARGET_BUILTIN(__builtin_ia32_ptestmd512, "UsV16iV16iUs", "", "avx512f")
TARGET_BUILTIN(__builtin_ia32_ptestmq512, "UcV8LLiV8LLiUc", "", "avx512f")
TARGET_BUILTIN(__builtin_ia32_pbroadcastd512, "V16iV4iV16iUs","","avx512f")
TARGET_BUILTIN(__builtin_ia32_orps256_mask, "V8fV8fV8fV8fUc", "", "avx512vl,avx512dq")
TARGET_BUILTIN(__builtin_ia32_orps128_mask, "V4fV4fV4fV4fUc", "", "avx512vl,avx512dq")
-TARGET_BUILTIN(__builtin_ia32_blendmb_512_mask, "V64cV64cV64cULLi", "", "avx512bw")
-TARGET_BUILTIN(__builtin_ia32_blendmw_512_mask, "V32sV32sV32sUi", "", "avx512bw")
TARGET_BUILTIN(__builtin_ia32_pabsb512_mask, "V64cV64cV64cULLi", "", "avx512bw")
TARGET_BUILTIN(__builtin_ia32_pabsw512_mask, "V32sV32sV32sUi", "", "avx512bw")
TARGET_BUILTIN(__builtin_ia32_packssdw512_mask, "V32sV16iV16iV32sUi", "", "avx512bw")
TARGET_BUILTIN(__builtin_ia32_vplzcntd_512_mask, "V16iV16iV16iUs", "", "avx512cd")
TARGET_BUILTIN(__builtin_ia32_vplzcntq_512_mask, "V8LLiV8LLiV8LLiUc", "", "avx512cd")
-TARGET_BUILTIN(__builtin_ia32_blendmb_128_mask, "V16cV16cV16cUs", "", "avx512vl,avx512bw")
-TARGET_BUILTIN(__builtin_ia32_blendmb_256_mask, "V32cV32cV32cUi", "", "avx512vl,avx512bw")
-TARGET_BUILTIN(__builtin_ia32_blendmw_128_mask, "V8sV8sV8sUc", "", "avx512vl,avx512bw")
-TARGET_BUILTIN(__builtin_ia32_blendmw_256_mask, "V16sV16sV16sUs", "", "avx512vl,avx512bw")
TARGET_BUILTIN(__builtin_ia32_pabsb128_mask, "V16cV16cV16cUs", "", "avx512vl,avx512bw")
TARGET_BUILTIN(__builtin_ia32_pabsb256_mask, "V32cV32cV32cUi", "", "avx512vl,avx512bw")
TARGET_BUILTIN(__builtin_ia32_pabsw128_mask, "V8sV8sV8sUc", "", "avx512vl,avx512bw")
TARGET_BUILTIN(__builtin_ia32_addpd256_mask, "V4dV4dV4dV4dUc", "", "avx512vl")
TARGET_BUILTIN(__builtin_ia32_addps128_mask, "V4fV4fV4fV4fUc", "", "avx512vl")
TARGET_BUILTIN(__builtin_ia32_addps256_mask, "V8fV8fV8fV8fUc", "", "avx512vl")
-TARGET_BUILTIN(__builtin_ia32_blendmd_128_mask, "V4iV4iV4iUc", "", "avx512vl")
-TARGET_BUILTIN(__builtin_ia32_blendmd_256_mask, "V8iV8iV8iUc", "", "avx512vl")
-TARGET_BUILTIN(__builtin_ia32_blendmpd_128_mask, "V2dV2dV2dUc", "", "avx512vl")
-TARGET_BUILTIN(__builtin_ia32_blendmpd_256_mask, "V4dV4dV4dUc", "", "avx512vl")
-TARGET_BUILTIN(__builtin_ia32_blendmps_128_mask, "V4fV4fV4fUc", "", "avx512vl")
-TARGET_BUILTIN(__builtin_ia32_blendmps_256_mask, "V8fV8fV8fUc", "", "avx512vl")
-TARGET_BUILTIN(__builtin_ia32_blendmq_128_mask, "V2LLiV2LLiV2LLiUc", "", "avx512vl")
-TARGET_BUILTIN(__builtin_ia32_blendmq_256_mask, "V4LLiV4LLiV4LLiUc", "", "avx512vl")
TARGET_BUILTIN(__builtin_ia32_compressdf128_mask, "V2dV2dV2dUc", "", "avx512vl")
TARGET_BUILTIN(__builtin_ia32_compressdf256_mask, "V4dV4dV4dUc", "", "avx512vl")
TARGET_BUILTIN(__builtin_ia32_compressdi128_mask, "V2LLiV2LLiV2LLiUc", "", "avx512vl")
TARGET_BUILTIN(__builtin_ia32_psrlw256_mask, "V16sV16sV8sV16sUs","","avx512bw,avx512vl")
TARGET_BUILTIN(__builtin_ia32_psrlwi128_mask, "V8sV8sIiV8sUc","","avx512bw,avx512vl")
TARGET_BUILTIN(__builtin_ia32_psrlwi256_mask, "V16sV16sIiV16sUs","","avx512bw,avx512vl")
-TARGET_BUILTIN(__builtin_ia32_movdqa32_128_mask, "V4iV4iV4iUc","","avx512vl")
-TARGET_BUILTIN(__builtin_ia32_movdqa32_256_mask, "V8iV8iV8iUc","","avx512vl")
-TARGET_BUILTIN(__builtin_ia32_movdqa32_512_mask, "V16iV16iV16iUs","","avx512f")
TARGET_BUILTIN(__builtin_ia32_movdqa32load128_mask, "V4iV4i*V4iUc","","avx512f")
TARGET_BUILTIN(__builtin_ia32_movdqa32load256_mask, "V8iV8i*V8iUc","","avx512f")
TARGET_BUILTIN(__builtin_ia32_movdqa32load512_mask, "V16iV16iC*V16iUs","","avx512f")
TARGET_BUILTIN(__builtin_ia32_movdqa32store512_mask, "vV16i*V16iUs","","avx512f")
-TARGET_BUILTIN(__builtin_ia32_movdqa64_512_mask, "V8LLiV8LLiV8LLiUc","","avx512f")
TARGET_BUILTIN(__builtin_ia32_movdqa64load512_mask, "V8LLiV8LLiC*V8LLiUc","","avx512f")
TARGET_BUILTIN(__builtin_ia32_movdqa64store512_mask, "vV8LLi*V8LLiUc","","avx512f")
TARGET_BUILTIN(__builtin_ia32_movdqa32store128_mask, "vV4i*V4iUc","","avx512f")
TARGET_BUILTIN(__builtin_ia32_movdqa32store256_mask, "vV8i*V8iUc","","avx512f")
-TARGET_BUILTIN(__builtin_ia32_movdqa64_128_mask, "V2LLiV2LLiV2LLiUc","","avx512vl")
-TARGET_BUILTIN(__builtin_ia32_movdqa64_256_mask, "V4LLiV4LLiV4LLiUc","","avx512vl")
TARGET_BUILTIN(__builtin_ia32_movdqa64load128_mask, "V2LLiV2LLiC*V2LLiUc","","avx512vl")
TARGET_BUILTIN(__builtin_ia32_movdqa64load256_mask, "V4LLiV4LLiC*V4LLiUc","","avx512vl")
TARGET_BUILTIN(__builtin_ia32_movdqa64store128_mask, "vV2LLi*V2LLiUc","","avx512f")
TARGET_BUILTIN(__builtin_ia32_movdqa64store256_mask, "vV4LLi*V4LLiUc","","avx512f")
-TARGET_BUILTIN(__builtin_ia32_movdquhi512_mask, "V32sV32sV32sUi","","avx512bw")
-TARGET_BUILTIN(__builtin_ia32_movdquqi512_mask, "V64cV64cV64cULLi","","avx512bw")
-TARGET_BUILTIN(__builtin_ia32_movdquhi128_mask, "V8sV8sV8sUc","","avx512bw,avx512vl")
-TARGET_BUILTIN(__builtin_ia32_movdquhi256_mask, "V16sV16sV16sUs","","avx512bw,avx512vl")
-TARGET_BUILTIN(__builtin_ia32_movdquqi128_mask, "V16cV16cV16cUs","","avx512bw,avx512vl")
-TARGET_BUILTIN(__builtin_ia32_movdquqi256_mask, "V32cV32cV32cUi","","avx512bw,avx512vl")
TARGET_BUILTIN(__builtin_ia32_movddup512_mask, "V8dV8dV8dUc","","avx512f")
TARGET_BUILTIN(__builtin_ia32_movddup128_mask, "V2dV2dV2dUc","","avx512vl")
TARGET_BUILTIN(__builtin_ia32_movddup256_mask, "V4dV4dV4dUc","","avx512vl")
TARGET_BUILTIN(__builtin_ia32_expandsf512_mask, "V16fV16fV16fUs","","avx512f")
TARGET_BUILTIN(__builtin_ia32_expandsi512_mask, "V16iV16iV16iUs","","avx512f")
TARGET_BUILTIN(__builtin_ia32_cvtps2pd512_mask, "V8dV8fV8dUcIi","","avx512f")
-TARGET_BUILTIN(__builtin_ia32_movapd512_mask, "V8dV8dV8dUc","","avx512f")
-TARGET_BUILTIN(__builtin_ia32_movaps512_mask, "V16fV16fV16fUs","","avx512f")
-TARGET_BUILTIN(__builtin_ia32_movapd128_mask, "V2dV2dV2dUc","","avx512vl")
-TARGET_BUILTIN(__builtin_ia32_movapd256_mask, "V4dV4dV4dUc","","avx512vl")
-TARGET_BUILTIN(__builtin_ia32_movaps128_mask, "V4fV4fV4fUc","","avx512vl")
-TARGET_BUILTIN(__builtin_ia32_movaps256_mask, "V8fV8fV8fUc","","avx512vl")
TARGET_BUILTIN(__builtin_ia32_compressstoredf512_mask, "vV8d*V8dUc","","avx512f")
TARGET_BUILTIN(__builtin_ia32_compressstoredi512_mask, "vV8LLi*V8LLiUc","","avx512f")
TARGET_BUILTIN(__builtin_ia32_compressstoresf512_mask, "vV16f*V16fUs","","avx512f")
TARGET_BUILTIN(__builtin_ia32_vpmultishiftqb128_mask, "V16cV16cV16cV16cUs","","avx512vbmi,avx512vl")
TARGET_BUILTIN(__builtin_ia32_vpmultishiftqb256_mask, "V32cV32cV32cV32cUi","","avx512vbmi,avx512vl")
+// generic select intrinsics
+TARGET_BUILTIN(__builtin_ia32_selectb_128, "V16cUsV16cV16c", "", "")
+TARGET_BUILTIN(__builtin_ia32_selectb_256, "V32cUiV32cV32c", "", "")
+TARGET_BUILTIN(__builtin_ia32_selectb_512, "V64cULLiV64cV64c", "", "")
+TARGET_BUILTIN(__builtin_ia32_selectw_128, "V8sUcV8sV8s", "", "")
+TARGET_BUILTIN(__builtin_ia32_selectw_256, "V16sUsV16sV16s", "", "")
+TARGET_BUILTIN(__builtin_ia32_selectw_512, "V32sUiV32sV32s", "", "")
+TARGET_BUILTIN(__builtin_ia32_selectd_128, "V4iUcV4iV4i", "", "")
+TARGET_BUILTIN(__builtin_ia32_selectd_256, "V8iUcV8iV8i", "", "")
+TARGET_BUILTIN(__builtin_ia32_selectd_512, "V16iUsV16iV16i", "", "")
+TARGET_BUILTIN(__builtin_ia32_selectq_128, "V2LLiUcV2LLiV2LLi", "", "")
+TARGET_BUILTIN(__builtin_ia32_selectq_256, "V4LLiUcV4LLiV4LLi", "", "")
+TARGET_BUILTIN(__builtin_ia32_selectq_512, "V8LLiUcV8LLiV8LLi", "", "")
+TARGET_BUILTIN(__builtin_ia32_selectps_128, "V4fUcV4fV4f", "", "")
+TARGET_BUILTIN(__builtin_ia32_selectps_256, "V8fUcV8fV8f", "", "")
+TARGET_BUILTIN(__builtin_ia32_selectps_512, "V16fUsV16fV16f", "", "")
+TARGET_BUILTIN(__builtin_ia32_selectpd_128, "V2dUcV2dV2d", "", "")
+TARGET_BUILTIN(__builtin_ia32_selectpd_256, "V4dUcV4dV4d", "", "")
+TARGET_BUILTIN(__builtin_ia32_selectpd_512, "V8dUcV8dV8d", "", "")
+
// MONITORX/MWAITX
TARGET_BUILTIN(__builtin_ia32_monitorx, "vv*UiUi", "", "mwaitx")
TARGET_BUILTIN(__builtin_ia32_mwaitx, "vUiUiUi", "", "mwaitx")
return Result;
}
-static Value *EmitX86MaskedStore(CodeGenFunction &CGF,
- SmallVectorImpl<Value *> &Ops,
- unsigned Align) {
- // Cast the pointer to right type.
- Ops[0] = CGF.Builder.CreateBitCast(Ops[0],
- llvm::PointerType::getUnqual(Ops[1]->getType()));
-
- // If the mask is all ones just emit a regular store.
- if (const auto *C = dyn_cast<Constant>(Ops[2]))
- if (C->isAllOnesValue())
- return CGF.Builder.CreateAlignedStore(Ops[1], Ops[0], Align);
+// Convert the mask from an integer type to a vector of i1.
+static Value *getMaskVecValue(CodeGenFunction &CGF, Value *Mask,
+ unsigned NumElts) {
- // Convert the mask from an integer type to a vector of i1.
- unsigned NumElts = Ops[1]->getType()->getVectorNumElements();
llvm::VectorType *MaskTy = llvm::VectorType::get(CGF.Builder.getInt1Ty(),
- cast<IntegerType>(Ops[2]->getType())->getBitWidth());
- Ops[2] = CGF.Builder.CreateBitCast(Ops[2], MaskTy);
+ cast<IntegerType>(Mask->getType())->getBitWidth());
+ Value *MaskVec = CGF.Builder.CreateBitCast(Mask, MaskTy);
// If we have less than 8 elements, then the starting mask was an i8 and
// we need to extract down to the right number of elements.
int Indices[4];
for (unsigned i = 0; i != NumElts; ++i)
Indices[i] = i;
- Ops[2] = CGF.Builder.CreateShuffleVector(Ops[2], Ops[2],
+ MaskVec = CGF.Builder.CreateShuffleVector(MaskVec, MaskVec,
makeArrayRef(Indices, NumElts),
"extract");
}
+ return MaskVec;
+}
+
+static Value *EmitX86MaskedStore(CodeGenFunction &CGF,
+ SmallVectorImpl<Value *> &Ops,
+ unsigned Align) {
+ // Cast the pointer to right type.
+ Ops[0] = CGF.Builder.CreateBitCast(Ops[0],
+ llvm::PointerType::getUnqual(Ops[1]->getType()));
+
+ // If the mask is all ones just emit a regular store.
+ if (const auto *C = dyn_cast<Constant>(Ops[2]))
+ if (C->isAllOnesValue())
+ return CGF.Builder.CreateAlignedStore(Ops[1], Ops[0], Align);
+
+ Value *MaskVec = getMaskVecValue(CGF, Ops[2],
+ Ops[1]->getType()->getVectorNumElements());
- return CGF.Builder.CreateMaskedStore(Ops[1], Ops[0], Align, Ops[2]);
+ return CGF.Builder.CreateMaskedStore(Ops[1], Ops[0], Align, MaskVec);
}
static Value *EmitX86MaskedLoad(CodeGenFunction &CGF,
if (C->isAllOnesValue())
return CGF.Builder.CreateAlignedLoad(Ops[0], Align);
- // Convert the mask from an integer type to a vector of i1.
- unsigned NumElts = Ops[1]->getType()->getVectorNumElements();
- llvm::VectorType *MaskTy = llvm::VectorType::get(CGF.Builder.getInt1Ty(),
- cast<IntegerType>(Ops[2]->getType())->getBitWidth());
- Ops[2] = CGF.Builder.CreateBitCast(Ops[2], MaskTy);
+ Value *MaskVec = getMaskVecValue(CGF, Ops[2],
+ Ops[1]->getType()->getVectorNumElements());
- // If we have less than 8 elements, then the starting mask was an i8 and
- // we need to extract down to the right number of elements.
- if (NumElts < 8) {
- int Indices[4];
- for (unsigned i = 0; i != NumElts; ++i)
- Indices[i] = i;
- Ops[2] = CGF.Builder.CreateShuffleVector(Ops[2], Ops[2],
- makeArrayRef(Indices, NumElts),
- "extract");
- }
+ return CGF.Builder.CreateMaskedLoad(Ops[0], Align, MaskVec, Ops[1]);
+}
+
+static Value *EmitX86Select(CodeGenFunction &CGF,
+ SmallVectorImpl<Value *> &Ops) {
+
+ // If the mask is all ones just return first argument.
+ if (const auto *C = dyn_cast<Constant>(Ops[0]))
+ if (C->isAllOnesValue())
+ return Ops[1];
+
+ Value *MaskVec = getMaskVecValue(CGF, Ops[0],
+ Ops[1]->getType()->getVectorNumElements());
- return CGF.Builder.CreateMaskedLoad(Ops[0], Align, Ops[2], Ops[1]);
+ return CGF.Builder.CreateSelect(MaskVec, Ops[1], Ops[2]);
}
Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
SI->setAlignment(Align);
return SI;
}
+ case X86::BI__builtin_ia32_selectb_128:
+ case X86::BI__builtin_ia32_selectb_256:
+ case X86::BI__builtin_ia32_selectb_512:
+ case X86::BI__builtin_ia32_selectw_128:
+ case X86::BI__builtin_ia32_selectw_256:
+ case X86::BI__builtin_ia32_selectw_512:
+ case X86::BI__builtin_ia32_selectd_128:
+ case X86::BI__builtin_ia32_selectd_256:
+ case X86::BI__builtin_ia32_selectd_512:
+ case X86::BI__builtin_ia32_selectq_128:
+ case X86::BI__builtin_ia32_selectq_256:
+ case X86::BI__builtin_ia32_selectq_512:
+ case X86::BI__builtin_ia32_selectps_128:
+ case X86::BI__builtin_ia32_selectps_256:
+ case X86::BI__builtin_ia32_selectps_512:
+ case X86::BI__builtin_ia32_selectpd_128:
+ case X86::BI__builtin_ia32_selectpd_256:
+ case X86::BI__builtin_ia32_selectpd_512:
+ return EmitX86Select(*this, Ops);
// 3DNow!
case X86::BI__builtin_ia32_pswapdsf:
case X86::BI__builtin_ia32_pswapdsi: {
static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_mask_blend_epi8 (__mmask64 __U, __m512i __A, __m512i __W)
{
- return (__m512i) __builtin_ia32_blendmb_512_mask ((__v64qi) __A,
+ return (__m512i) __builtin_ia32_selectb_512 ((__mmask64) __U,
(__v64qi) __W,
- (__mmask64) __U);
+ (__v64qi) __A);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_mask_blend_epi16 (__mmask32 __U, __m512i __A, __m512i __W)
{
- return (__m512i) __builtin_ia32_blendmw_512_mask ((__v32hi) __A,
+ return (__m512i) __builtin_ia32_selectw_512 ((__mmask32) __U,
(__v32hi) __W,
- (__mmask32) __U);
+ (__v32hi) __A);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_mask_mov_epi16 (__m512i __W, __mmask32 __U, __m512i __A)
{
- return (__m512i) __builtin_ia32_movdquhi512_mask ((__v32hi) __A,
- (__v32hi) __W,
- (__mmask32) __U);
+ return (__m512i) __builtin_ia32_selectw_512 ((__mmask32) __U,
+ (__v32hi) __A,
+ (__v32hi) __W);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_maskz_mov_epi16 (__mmask32 __U, __m512i __A)
{
- return (__m512i) __builtin_ia32_movdquhi512_mask ((__v32hi) __A,
- (__v32hi)
- _mm512_setzero_hi (),
- (__mmask32) __U);
+ return (__m512i) __builtin_ia32_selectw_512 ((__mmask32) __U,
+ (__v32hi) __A,
+ (__v32hi) _mm512_setzero_hi ());
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_mask_mov_epi8 (__m512i __W, __mmask64 __U, __m512i __A)
{
- return (__m512i) __builtin_ia32_movdquqi512_mask ((__v64qi) __A,
- (__v64qi) __W,
- (__mmask64) __U);
+ return (__m512i) __builtin_ia32_selectb_512 ((__mmask64) __U,
+ (__v64qi) __A,
+ (__v64qi) __W);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_maskz_mov_epi8 (__mmask64 __U, __m512i __A)
{
- return (__m512i) __builtin_ia32_movdquqi512_mask ((__v64qi) __A,
- (__v64qi)
- _mm512_setzero_hi (),
- (__mmask64) __U);
+ return (__m512i) __builtin_ia32_selectb_512 ((__mmask64) __U,
+ (__v64qi) __A,
+ (__v64qi) _mm512_setzero_hi ());
}
-
static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_mask_set1_epi8 (__m512i __O, __mmask64 __M, char __A)
{
static __inline __m512d __DEFAULT_FN_ATTRS
_mm512_mask_blend_pd(__mmask8 __U, __m512d __A, __m512d __W)
{
- return (__m512d) __builtin_ia32_blendmpd_512_mask ((__v8df) __A,
+ return (__m512d) __builtin_ia32_selectpd_512 ((__mmask8) __U,
(__v8df) __W,
- (__mmask8) __U);
+ (__v8df) __A);
}
static __inline __m512 __DEFAULT_FN_ATTRS
_mm512_mask_blend_ps(__mmask16 __U, __m512 __A, __m512 __W)
{
- return (__m512) __builtin_ia32_blendmps_512_mask ((__v16sf) __A,
+ return (__m512) __builtin_ia32_selectps_512 ((__mmask16) __U,
(__v16sf) __W,
- (__mmask16) __U);
+ (__v16sf) __A);
}
static __inline __m512i __DEFAULT_FN_ATTRS
_mm512_mask_blend_epi64(__mmask8 __U, __m512i __A, __m512i __W)
{
- return (__m512i) __builtin_ia32_blendmq_512_mask ((__v8di) __A,
+ return (__m512i) __builtin_ia32_selectq_512 ((__mmask8) __U,
(__v8di) __W,
- (__mmask8) __U);
+ (__v8di) __A);
}
static __inline __m512i __DEFAULT_FN_ATTRS
_mm512_mask_blend_epi32(__mmask16 __U, __m512i __A, __m512i __W)
{
- return (__m512i) __builtin_ia32_blendmd_512_mask ((__v16si) __A,
+ return (__m512i) __builtin_ia32_selectd_512 ((__mmask16) __U,
(__v16si) __W,
- (__mmask16) __U);
+ (__v16si) __A);
}
/* Compare */
static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_mask_mov_epi32 (__m512i __W, __mmask16 __U, __m512i __A)
{
- return (__m512i) __builtin_ia32_movdqa32_512_mask ((__v16si) __A,
- (__v16si) __W,
- (__mmask16) __U);
+ return (__m512i) __builtin_ia32_selectd_512 ((__mmask16) __U,
+ (__v16si) __A,
+ (__v16si) __W);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_maskz_mov_epi32 (__mmask16 __U, __m512i __A)
{
- return (__m512i) __builtin_ia32_movdqa32_512_mask ((__v16si) __A,
- (__v16si)
- _mm512_setzero_si512 (),
- (__mmask16) __U);
+ return (__m512i) __builtin_ia32_selectd_512 ((__mmask16) __U,
+ (__v16si) __A,
+ (__v16si) _mm512_setzero_si512 ());
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_mask_mov_epi64 (__m512i __W, __mmask8 __U, __m512i __A)
{
- return (__m512i) __builtin_ia32_movdqa64_512_mask ((__v8di) __A,
- (__v8di) __W,
- (__mmask8) __U);
+ return (__m512i) __builtin_ia32_selectq_512 ((__mmask8) __U,
+ (__v8di) __A,
+ (__v8di) __W);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_maskz_mov_epi64 (__mmask8 __U, __m512i __A)
{
- return (__m512i) __builtin_ia32_movdqa64_512_mask ((__v8di) __A,
- (__v8di)
- _mm512_setzero_si512 (),
- (__mmask8) __U);
+ return (__m512i) __builtin_ia32_selectq_512 ((__mmask8) __U,
+ (__v8di) __A,
+ (__v8di) _mm512_setzero_si512 ());
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
static __inline__ __m512d __DEFAULT_FN_ATTRS
_mm512_mask_mov_pd (__m512d __W, __mmask8 __U, __m512d __A)
{
- return (__m512d) __builtin_ia32_movapd512_mask ((__v8df) __A,
- (__v8df) __W,
- (__mmask8) __U);
+ return (__m512d) __builtin_ia32_selectpd_512 ((__mmask8) __U,
+ (__v8df) __A,
+ (__v8df) __W);
}
static __inline__ __m512d __DEFAULT_FN_ATTRS
_mm512_maskz_mov_pd (__mmask8 __U, __m512d __A)
{
- return (__m512d) __builtin_ia32_movapd512_mask ((__v8df) __A,
- (__v8df)
- _mm512_setzero_pd (),
- (__mmask8) __U);
+ return (__m512d) __builtin_ia32_selectpd_512 ((__mmask8) __U,
+ (__v8df) __A,
+ (__v8df) _mm512_setzero_pd ());
}
static __inline__ __m512 __DEFAULT_FN_ATTRS
_mm512_mask_mov_ps (__m512 __W, __mmask16 __U, __m512 __A)
{
- return (__m512) __builtin_ia32_movaps512_mask ((__v16sf) __A,
- (__v16sf) __W,
- (__mmask16) __U);
+ return (__m512) __builtin_ia32_selectps_512 ((__mmask16) __U,
+ (__v16sf) __A,
+ (__v16sf) __W);
}
static __inline__ __m512 __DEFAULT_FN_ATTRS
_mm512_maskz_mov_ps (__mmask16 __U, __m512 __A)
{
- return (__m512) __builtin_ia32_movaps512_mask ((__v16sf) __A,
- (__v16sf)
- _mm512_setzero_ps (),
- (__mmask16) __U);
+ return (__m512) __builtin_ia32_selectps_512 ((__mmask16) __U,
+ (__v16sf) __A,
+ (__v16sf) _mm512_setzero_ps ());
}
static __inline__ void __DEFAULT_FN_ATTRS
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_mask_blend_epi8 (__mmask16 __U, __m128i __A, __m128i __W)
{
- return (__m128i) __builtin_ia32_blendmb_128_mask ((__v16qi) __A,
- (__v16qi) __W,
- (__mmask16) __U);
+ return (__m128i) __builtin_ia32_selectb_128 ((__mmask16) __U,
+ (__v16qi) __W,
+ (__v16qi) __A);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
_mm256_mask_blend_epi8 (__mmask32 __U, __m256i __A, __m256i __W)
{
- return (__m256i) __builtin_ia32_blendmb_256_mask ((__v32qi) __A,
+ return (__m256i) __builtin_ia32_selectb_256 ((__mmask32) __U,
(__v32qi) __W,
- (__mmask32) __U);
+ (__v32qi) __A);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_mask_blend_epi16 (__mmask8 __U, __m128i __A, __m128i __W)
{
- return (__m128i) __builtin_ia32_blendmw_128_mask ((__v8hi) __A,
+ return (__m128i) __builtin_ia32_selectw_128 ((__mmask8) __U,
(__v8hi) __W,
- (__mmask8) __U);
+ (__v8hi) __A);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
_mm256_mask_blend_epi16 (__mmask16 __U, __m256i __A, __m256i __W)
{
- return (__m256i) __builtin_ia32_blendmw_256_mask ((__v16hi) __A,
+ return (__m256i) __builtin_ia32_selectw_256 ((__mmask16) __U,
(__v16hi) __W,
- (__mmask16) __U);
+ (__v16hi) __A);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_mask_mov_epi16 (__m128i __W, __mmask8 __U, __m128i __A)
{
- return (__m128i) __builtin_ia32_movdquhi128_mask ((__v8hi) __A,
- (__v8hi) __W,
- (__mmask8) __U);
+ return (__m128i) __builtin_ia32_selectw_128 ((__mmask8) __U,
+ (__v8hi) __A,
+ (__v8hi) __W);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_maskz_mov_epi16 (__mmask8 __U, __m128i __A)
{
- return (__m128i) __builtin_ia32_movdquhi128_mask ((__v8hi) __A,
- (__v8hi)
- _mm_setzero_hi (),
- (__mmask8) __U);
+ return (__m128i) __builtin_ia32_selectw_128 ((__mmask8) __U,
+ (__v8hi) __A,
+ (__v8hi) _mm_setzero_hi ());
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
_mm256_mask_mov_epi16 (__m256i __W, __mmask16 __U, __m256i __A)
{
- return (__m256i) __builtin_ia32_movdquhi256_mask ((__v16hi) __A,
- (__v16hi) __W,
- (__mmask16) __U);
+ return (__m256i) __builtin_ia32_selectw_256 ((__mmask16) __U,
+ (__v16hi) __A,
+ (__v16hi) __W);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
_mm256_maskz_mov_epi16 (__mmask16 __U, __m256i __A)
{
- return (__m256i) __builtin_ia32_movdquhi256_mask ((__v16hi) __A,
- (__v16hi)
- _mm256_setzero_si256 (),
- (__mmask16) __U);
+ return (__m256i) __builtin_ia32_selectw_256 ((__mmask16) __U,
+ (__v16hi) __A,
+ (__v16hi) _mm256_setzero_si256 ());
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_mask_mov_epi8 (__m128i __W, __mmask16 __U, __m128i __A)
{
- return (__m128i) __builtin_ia32_movdquqi128_mask ((__v16qi) __A,
- (__v16qi) __W,
- (__mmask16) __U);
+ return (__m128i) __builtin_ia32_selectb_128 ((__mmask16) __U,
+ (__v16qi) __A,
+ (__v16qi) __W);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_maskz_mov_epi8 (__mmask16 __U, __m128i __A)
{
- return (__m128i) __builtin_ia32_movdquqi128_mask ((__v16qi) __A,
- (__v16qi)
- _mm_setzero_hi (),
- (__mmask16) __U);
+ return (__m128i) __builtin_ia32_selectb_128 ((__mmask16) __U,
+ (__v16qi) __A,
+ (__v16qi) _mm_setzero_hi ());
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
_mm256_mask_mov_epi8 (__m256i __W, __mmask32 __U, __m256i __A)
{
- return (__m256i) __builtin_ia32_movdquqi256_mask ((__v32qi) __A,
- (__v32qi) __W,
- (__mmask32) __U);
+ return (__m256i) __builtin_ia32_selectb_256 ((__mmask32) __U,
+ (__v32qi) __A,
+ (__v32qi) __W);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
_mm256_maskz_mov_epi8 (__mmask32 __U, __m256i __A)
{
- return (__m256i) __builtin_ia32_movdquqi256_mask ((__v32qi) __A,
- (__v32qi)
- _mm256_setzero_si256 (),
- (__mmask32) __U);
+ return (__m256i) __builtin_ia32_selectb_256 ((__mmask32) __U,
+ (__v32qi) __A,
+ (__v32qi) _mm256_setzero_si256 ());
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_mask_blend_epi32 (__mmask8 __U, __m128i __A, __m128i __W) {
- return (__m128i) __builtin_ia32_blendmd_128_mask ((__v4si) __A,
+ return (__m128i) __builtin_ia32_selectd_128 ((__mmask8) __U,
(__v4si) __W,
- (__mmask8) __U);
+ (__v4si) __A);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
_mm256_mask_blend_epi32 (__mmask8 __U, __m256i __A, __m256i __W) {
- return (__m256i) __builtin_ia32_blendmd_256_mask ((__v8si) __A,
+ return (__m256i) __builtin_ia32_selectd_256 ((__mmask8) __U,
(__v8si) __W,
- (__mmask8) __U);
+ (__v8si) __A);
}
static __inline__ __m128d __DEFAULT_FN_ATTRS
_mm_mask_blend_pd (__mmask8 __U, __m128d __A, __m128d __W) {
- return (__m128d) __builtin_ia32_blendmpd_128_mask ((__v2df) __A,
+ return (__m128d) __builtin_ia32_selectpd_128 ((__mmask8) __U,
(__v2df) __W,
- (__mmask8) __U);
+ (__v2df) __A);
}
static __inline__ __m256d __DEFAULT_FN_ATTRS
_mm256_mask_blend_pd (__mmask8 __U, __m256d __A, __m256d __W) {
- return (__m256d) __builtin_ia32_blendmpd_256_mask ((__v4df) __A,
+ return (__m256d) __builtin_ia32_selectpd_256 ((__mmask8) __U,
(__v4df) __W,
- (__mmask8) __U);
+ (__v4df) __A);
}
static __inline__ __m128 __DEFAULT_FN_ATTRS
_mm_mask_blend_ps (__mmask8 __U, __m128 __A, __m128 __W) {
- return (__m128) __builtin_ia32_blendmps_128_mask ((__v4sf) __A,
+ return (__m128) __builtin_ia32_selectps_128 ((__mmask8) __U,
(__v4sf) __W,
- (__mmask8) __U);
+ (__v4sf) __A);
}
static __inline__ __m256 __DEFAULT_FN_ATTRS
_mm256_mask_blend_ps (__mmask8 __U, __m256 __A, __m256 __W) {
- return (__m256) __builtin_ia32_blendmps_256_mask ((__v8sf) __A,
+ return (__m256) __builtin_ia32_selectps_256 ((__mmask8) __U,
(__v8sf) __W,
- (__mmask8) __U);
+ (__v8sf) __A);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_mask_blend_epi64 (__mmask8 __U, __m128i __A, __m128i __W) {
- return (__m128i) __builtin_ia32_blendmq_128_mask ((__v2di) __A,
+ return (__m128i) __builtin_ia32_selectq_128 ((__mmask8) __U,
(__v2di) __W,
- (__mmask8) __U);
+ (__v2di) __A);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
_mm256_mask_blend_epi64 (__mmask8 __U, __m256i __A, __m256i __W) {
- return (__m256i) __builtin_ia32_blendmq_256_mask ((__v4di) __A,
+ return (__m256i) __builtin_ia32_selectq_256 ((__mmask8) __U,
(__v4di) __W,
- (__mmask8) __U);
+ (__v4di) __A);
}
static __inline__ __m128d __DEFAULT_FN_ATTRS
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_mask_mov_epi32 (__m128i __W, __mmask8 __U, __m128i __A)
{
- return (__m128i) __builtin_ia32_movdqa32_128_mask ((__v4si) __A,
- (__v4si) __W,
- (__mmask8) __U);
+ return (__m128i) __builtin_ia32_selectd_128 ((__mmask8) __U,
+ (__v4si) __A,
+ (__v4si) __W);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_maskz_mov_epi32 (__mmask8 __U, __m128i __A)
{
- return (__m128i) __builtin_ia32_movdqa32_128_mask ((__v4si) __A,
- (__v4si)
- _mm_setzero_si128 (),
- (__mmask8) __U);
+ return (__m128i) __builtin_ia32_selectd_128 ((__mmask8) __U,
+ (__v4si) __A,
+ (__v4si) _mm_setzero_si128 ());
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
_mm256_mask_mov_epi32 (__m256i __W, __mmask8 __U, __m256i __A)
{
- return (__m256i) __builtin_ia32_movdqa32_256_mask ((__v8si) __A,
- (__v8si) __W,
- (__mmask8) __U);
+ return (__m256i) __builtin_ia32_selectd_256 ((__mmask8) __U,
+ (__v8si) __A,
+ (__v8si) __W);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
_mm256_maskz_mov_epi32 (__mmask8 __U, __m256i __A)
{
- return (__m256i) __builtin_ia32_movdqa32_256_mask ((__v8si) __A,
- (__v8si)
- _mm256_setzero_si256 (),
- (__mmask8) __U);
+ return (__m256i) __builtin_ia32_selectd_256 ((__mmask8) __U,
+ (__v8si) __A,
+ (__v8si) _mm256_setzero_si256 ());
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_mask_mov_epi64 (__m128i __W, __mmask8 __U, __m128i __A)
{
- return (__m128i) __builtin_ia32_movdqa64_128_mask ((__v2di) __A,
- (__v2di) __W,
- (__mmask8) __U);
+ return (__m128i) __builtin_ia32_selectq_128 ((__mmask8) __U,
+ (__v2di) __A,
+ (__v2di) __W);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_maskz_mov_epi64 (__mmask8 __U, __m128i __A)
{
- return (__m128i) __builtin_ia32_movdqa64_128_mask ((__v2di) __A,
- (__v2di)
- _mm_setzero_di (),
- (__mmask8) __U);
+ return (__m128i) __builtin_ia32_selectq_128 ((__mmask8) __U,
+ (__v2di) __A,
+ (__v2di) _mm_setzero_di ());
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
_mm256_mask_mov_epi64 (__m256i __W, __mmask8 __U, __m256i __A)
{
- return (__m256i) __builtin_ia32_movdqa64_256_mask ((__v4di) __A,
- (__v4di) __W,
- (__mmask8) __U);
+ return (__m256i) __builtin_ia32_selectq_256 ((__mmask8) __U,
+ (__v4di) __A,
+ (__v4di) __W);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
_mm256_maskz_mov_epi64 (__mmask8 __U, __m256i __A)
{
- return (__m256i) __builtin_ia32_movdqa64_256_mask ((__v4di) __A,
- (__v4di)
- _mm256_setzero_si256 (),
- (__mmask8) __U);
+ return (__m256i) __builtin_ia32_selectq_256 ((__mmask8) __U,
+ (__v4di) __A,
+ (__v4di) _mm256_setzero_si256 ());
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
static __inline__ __m128d __DEFAULT_FN_ATTRS
_mm_mask_mov_pd (__m128d __W, __mmask8 __U, __m128d __A)
{
- return (__m128d) __builtin_ia32_movapd128_mask ((__v2df) __A,
- (__v2df) __W,
- (__mmask8) __U);
+ return (__m128d) __builtin_ia32_selectpd_128 ((__mmask8) __U,
+ (__v2df) __A,
+ (__v2df) __W);
}
static __inline__ __m128d __DEFAULT_FN_ATTRS
_mm_maskz_mov_pd (__mmask8 __U, __m128d __A)
{
- return (__m128d) __builtin_ia32_movapd128_mask ((__v2df) __A,
- (__v2df)
- _mm_setzero_pd (),
- (__mmask8) __U);
+ return (__m128d) __builtin_ia32_selectpd_128 ((__mmask8) __U,
+ (__v2df) __A,
+ (__v2df) _mm_setzero_pd ());
}
static __inline__ __m256d __DEFAULT_FN_ATTRS
_mm256_mask_mov_pd (__m256d __W, __mmask8 __U, __m256d __A)
{
- return (__m256d) __builtin_ia32_movapd256_mask ((__v4df) __A,
- (__v4df) __W,
- (__mmask8) __U);
+ return (__m256d) __builtin_ia32_selectpd_256 ((__mmask8) __U,
+ (__v4df) __A,
+ (__v4df) __W);
}
static __inline__ __m256d __DEFAULT_FN_ATTRS
_mm256_maskz_mov_pd (__mmask8 __U, __m256d __A)
{
- return (__m256d) __builtin_ia32_movapd256_mask ((__v4df) __A,
- (__v4df)
- _mm256_setzero_pd (),
- (__mmask8) __U);
+ return (__m256d) __builtin_ia32_selectpd_256 ((__mmask8) __U,
+ (__v4df) __A,
+ (__v4df) _mm256_setzero_pd ());
}
static __inline__ __m128 __DEFAULT_FN_ATTRS
_mm_mask_mov_ps (__m128 __W, __mmask8 __U, __m128 __A)
{
- return (__m128) __builtin_ia32_movaps128_mask ((__v4sf) __A,
- (__v4sf) __W,
- (__mmask8) __U);
+ return (__m128) __builtin_ia32_selectps_128 ((__mmask8) __U,
+ (__v4sf) __A,
+ (__v4sf) __W);
}
static __inline__ __m128 __DEFAULT_FN_ATTRS
_mm_maskz_mov_ps (__mmask8 __U, __m128 __A)
{
- return (__m128) __builtin_ia32_movaps128_mask ((__v4sf) __A,
- (__v4sf)
- _mm_setzero_ps (),
- (__mmask8) __U);
+ return (__m128) __builtin_ia32_selectps_128 ((__mmask8) __U,
+ (__v4sf) __A,
+ (__v4sf) _mm_setzero_ps ());
}
static __inline__ __m256 __DEFAULT_FN_ATTRS
_mm256_mask_mov_ps (__m256 __W, __mmask8 __U, __m256 __A)
{
- return (__m256) __builtin_ia32_movaps256_mask ((__v8sf) __A,
- (__v8sf) __W,
- (__mmask8) __U);
+ return (__m256) __builtin_ia32_selectps_256 ((__mmask8) __U,
+ (__v8sf) __A,
+ (__v8sf) __W);
}
static __inline__ __m256 __DEFAULT_FN_ATTRS
_mm256_maskz_mov_ps (__mmask8 __U, __m256 __A)
{
- return (__m256) __builtin_ia32_movaps256_mask ((__v8sf) __A,
- (__v8sf)
- _mm256_setzero_ps (),
- (__mmask8) __U);
+ return (__m256) __builtin_ia32_selectps_256 ((__mmask8) __U,
+ (__v8sf) __A,
+ (__v8sf) _mm256_setzero_ps ());
}
static __inline__ __m128 __DEFAULT_FN_ATTRS
__m512i test_mm512_mask_blend_epi8(__mmask64 __U, __m512i __A, __m512i __W) {
// CHECK-LABEL: @test_mm512_mask_blend_epi8
- // CHECK: @llvm.x86.avx512.mask.blend.b.512
+ // CHECK: select <64 x i1> %{{.*}}, <64 x i8> %{{.*}}, <64 x i8> %{{.*}}
return _mm512_mask_blend_epi8(__U,__A,__W);
}
__m512i test_mm512_mask_blend_epi16(__mmask32 __U, __m512i __A, __m512i __W) {
// CHECK-LABEL: @test_mm512_mask_blend_epi16
- // CHECK: @llvm.x86.avx512.mask.blend.w.512
+ // CHECK: select <32 x i1> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}}
return _mm512_mask_blend_epi16(__U,__A,__W);
}
__m512i test_mm512_abs_epi8(__m512i __A) {
__m512i test_mm512_mask_mov_epi16(__m512i __W, __mmask32 __U, __m512i __A) {
// CHECK-LABEL: @test_mm512_mask_mov_epi16
- // CHECK: @llvm.x86.avx512.mask.movu.w.512
+ // CHECK: select <32 x i1> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}}
return _mm512_mask_mov_epi16(__W, __U, __A);
}
__m512i test_mm512_maskz_mov_epi16(__mmask32 __U, __m512i __A) {
// CHECK-LABEL: @test_mm512_maskz_mov_epi16
- // CHECK: @llvm.x86.avx512.mask.movu.w.512
+ // CHECK: select <32 x i1> %{{.*}}, <32 x i16> %{{.*}}, <32 x i16> %{{.*}}
return _mm512_maskz_mov_epi16(__U, __A);
}
__m512i test_mm512_mask_mov_epi8(__m512i __W, __mmask64 __U, __m512i __A) {
// CHECK-LABEL: @test_mm512_mask_mov_epi8
- // CHECK: @llvm.x86.avx512.mask.movu.b.512
+ // CHECK: select <64 x i1> %{{.*}}, <64 x i8> %{{.*}}, <64 x i8> %{{.*}}
return _mm512_mask_mov_epi8(__W, __U, __A);
}
__m512i test_mm512_maskz_mov_epi8(__mmask64 __U, __m512i __A) {
// CHECK-LABEL: @test_mm512_maskz_mov_epi8
- // CHECK: @llvm.x86.avx512.mask.movu.b.512
+ // CHECK: select <64 x i1> %{{.*}}, <64 x i8> %{{.*}}, <64 x i8> %{{.*}}
return _mm512_maskz_mov_epi8(__U, __A);
}
__m512i test_mm512_mask_mov_epi32(__m512i __W, __mmask16 __U, __m512i __A) {
// CHECK-LABEL: @test_mm512_mask_mov_epi32
- // CHECK: @llvm.x86.avx512.mask.mov
+ // CHECK: select <16 x i1> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> %{{.*}}
return _mm512_mask_mov_epi32(__W, __U, __A);
}
__m512i test_mm512_maskz_mov_epi32(__mmask16 __U, __m512i __A) {
// CHECK-LABEL: @test_mm512_maskz_mov_epi32
- // CHECK: @llvm.x86.avx512.mask.mov
+ // CHECK: select <16 x i1> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> %{{.*}}
return _mm512_maskz_mov_epi32(__U, __A);
}
__m512i test_mm512_mask_mov_epi64(__m512i __W, __mmask8 __U, __m512i __A) {
// CHECK-LABEL: @test_mm512_mask_mov_epi64
- // CHECK: @llvm.x86.avx512.mask.mov
+ // CHECK: select <8 x i1> %{{.*}}, <8 x i64> %{{.*}}, <8 x i64> %{{.*}}
return _mm512_mask_mov_epi64(__W, __U, __A);
}
__m512i test_mm512_maskz_mov_epi64(__mmask8 __U, __m512i __A) {
// CHECK-LABEL: @test_mm512_maskz_mov_epi64
- // CHECK: @llvm.x86.avx512.mask.mov
+ // CHECK: select <8 x i1> %{{.*}}, <8 x i64> %{{.*}}, <8 x i64> %{{.*}}
return _mm512_maskz_mov_epi64(__U, __A);
}
}
__m512d test_mm512_mask_mov_pd(__m512d __W, __mmask8 __U, __m512d __A) {
// CHECK-LABEL: @test_mm512_mask_mov_pd
- // CHECK: @llvm.x86.avx512.mask.mova.pd.512
+ // CHECK: select <8 x i1> %{{.*}}, <8 x double> %{{.*}}, <8 x double> %{{.*}}
return _mm512_mask_mov_pd(__W, __U, __A);
}
__m512d test_mm512_maskz_mov_pd(__mmask8 __U, __m512d __A) {
// CHECK-LABEL: @test_mm512_maskz_mov_pd
- // CHECK: @llvm.x86.avx512.mask.mova.pd.512
+ // CHECK: select <8 x i1> %{{.*}}, <8 x double> %{{.*}}, <8 x double> %{{.*}}
return _mm512_maskz_mov_pd(__U, __A);
}
__m512 test_mm512_mask_mov_ps(__m512 __W, __mmask16 __U, __m512 __A) {
// CHECK-LABEL: @test_mm512_mask_mov_ps
- // CHECK: @llvm.x86.avx512.mask.mova.ps.512
+ // CHECK: select <16 x i1> %{{.*}}, <16 x float> %{{.*}}, <16 x float> %{{.*}}
return _mm512_mask_mov_ps(__W, __U, __A);
}
__m512 test_mm512_maskz_mov_ps(__mmask16 __U, __m512 __A) {
// CHECK-LABEL: @test_mm512_maskz_mov_ps
- // CHECK: @llvm.x86.avx512.mask.mova.ps.512
+ // CHECK: select <16 x i1> %{{.*}}, <16 x float> %{{.*}}, <16 x float> %{{.*}}
return _mm512_maskz_mov_ps(__U, __A);
}
}
__m128i test_mm_mask_blend_epi32(__mmask8 __U, __m128i __A, __m128i __W) {
// CHECK-LABEL: @test_mm_mask_blend_epi32
- // CHECK: @llvm.x86.avx512.mask.blend.d.128
+ // CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_mask_blend_epi32(__U,__A,__W);
}
__m256i test_mm256_mask_blend_epi32(__mmask8 __U, __m256i __A, __m256i __W) {
// CHECK-LABEL: @test_mm256_mask_blend_epi32
- // CHECK: @llvm.x86.avx512.mask.blend.d.256
+ // CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_mask_blend_epi32(__U,__A,__W);
}
__m128d test_mm_mask_blend_pd(__mmask8 __U, __m128d __A, __m128d __W) {
// CHECK-LABEL: @test_mm_mask_blend_pd
- // CHECK: @llvm.x86.avx512.mask.blend.pd.128
+ // CHECK: select <2 x i1> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}
return _mm_mask_blend_pd(__U,__A,__W);
}
__m256d test_mm256_mask_blend_pd(__mmask8 __U, __m256d __A, __m256d __W) {
// CHECK-LABEL: @test_mm256_mask_blend_pd
- // CHECK: @llvm.x86.avx512.mask.blend.pd.256
+ // CHECK: select <4 x i1> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}}
return _mm256_mask_blend_pd(__U,__A,__W);
}
__m128 test_mm_mask_blend_ps(__mmask8 __U, __m128 __A, __m128 __W) {
// CHECK-LABEL: @test_mm_mask_blend_ps
- // CHECK: @llvm.x86.avx512.mask.blend.ps.128
+ // CHECK: select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}
return _mm_mask_blend_ps(__U,__A,__W);
}
__m256 test_mm256_mask_blend_ps(__mmask8 __U, __m256 __A, __m256 __W) {
// CHECK-LABEL: @test_mm256_mask_blend_ps
- // CHECK: @llvm.x86.avx512.mask.blend.ps.256
+ // CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm256_mask_blend_ps(__U,__A,__W);
}
__m128i test_mm_mask_blend_epi64(__mmask8 __U, __m128i __A, __m128i __W) {
// CHECK-LABEL: @test_mm_mask_blend_epi64
- // CHECK: @llvm.x86.avx512.mask.blend.q.128
+ // CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_mask_blend_epi64(__U,__A,__W);
}
__m256i test_mm256_mask_blend_epi64(__mmask8 __U, __m256i __A, __m256i __W) {
// CHECK-LABEL: @test_mm256_mask_blend_epi64
- // CHECK: @llvm.x86.avx512.mask.blend.q.256
+ // CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_mask_blend_epi64(__U,__A,__W);
}
__m128d test_mm_mask_compress_pd(__m128d __W, __mmask8 __U, __m128d __A) {
__m128i test_mm_mask_mov_epi32(__m128i __W, __mmask8 __U, __m128i __A) {
// CHECK-LABEL: @test_mm_mask_mov_epi32
- // CHECK: @llvm.x86.avx512.mask.mov
+ // CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_mask_mov_epi32(__W, __U, __A);
}
__m128i test_mm_maskz_mov_epi32(__mmask8 __U, __m128i __A) {
// CHECK-LABEL: @test_mm_maskz_mov_epi32
- // CHECK: @llvm.x86.avx512.mask.mov
+ // CHECK: select <4 x i1> %{{.*}}, <4 x i32> %{{.*}}, <4 x i32> %{{.*}}
return _mm_maskz_mov_epi32(__U, __A);
}
__m256i test_mm256_mask_mov_epi32(__m256i __W, __mmask8 __U, __m256i __A) {
// CHECK-LABEL: @test_mm256_mask_mov_epi32
- // CHECK: @llvm.x86.avx512.mask.mov
+ // CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_mask_mov_epi32(__W, __U, __A);
}
__m256i test_mm256_maskz_mov_epi32(__mmask8 __U, __m256i __A) {
// CHECK-LABEL: @test_mm256_maskz_mov_epi32
- // CHECK: @llvm.x86.avx512.mask.mov
+ // CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_maskz_mov_epi32(__U, __A);
}
__m128i test_mm_mask_mov_epi64(__m128i __W, __mmask8 __U, __m128i __A) {
// CHECK-LABEL: @test_mm_mask_mov_epi64
- // CHECK: @llvm.x86.avx512.mask.mov
+ // CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_mask_mov_epi64(__W, __U, __A);
}
__m128i test_mm_maskz_mov_epi64(__mmask8 __U, __m128i __A) {
// CHECK-LABEL: @test_mm_maskz_mov_epi64
- // CHECK: @llvm.x86.avx512.mask.mov
+ // CHECK: select <2 x i1> %{{.*}}, <2 x i64> %{{.*}}, <2 x i64> %{{.*}}
return _mm_maskz_mov_epi64(__U, __A);
}
__m256i test_mm256_mask_mov_epi64(__m256i __W, __mmask8 __U, __m256i __A) {
// CHECK-LABEL: @test_mm256_mask_mov_epi64
- // CHECK: @llvm.x86.avx512.mask.mov
+ // CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_mask_mov_epi64(__W, __U, __A);
}
__m256i test_mm256_maskz_mov_epi64(__mmask8 __U, __m256i __A) {
// CHECK-LABEL: @test_mm256_maskz_mov_epi64
- // CHECK: @llvm.x86.avx512.mask.mov
+ // CHECK: select <4 x i1> %{{.*}}, <4 x i64> %{{.*}}, <4 x i64> %{{.*}}
return _mm256_maskz_mov_epi64(__U, __A);
}
__m128d test_mm_mask_mov_pd(__m128d __W, __mmask8 __U, __m128d __A) {
// CHECK-LABEL: @test_mm_mask_mov_pd
- // CHECK: @llvm.x86.avx512.mask.mova.pd.128
+ // CHECK: select <2 x i1> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}
return _mm_mask_mov_pd(__W, __U, __A);
}
__m128d test_mm_maskz_mov_pd(__mmask8 __U, __m128d __A) {
// CHECK-LABEL: @test_mm_maskz_mov_pd
- // CHECK: @llvm.x86.avx512.mask.mova.pd.128
+ // CHECK: select <2 x i1> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}
return _mm_maskz_mov_pd(__U, __A);
}
__m256d test_mm256_mask_mov_pd(__m256d __W, __mmask8 __U, __m256d __A) {
// CHECK-LABEL: @test_mm256_mask_mov_pd
- // CHECK: @llvm.x86.avx512.mask.mova.pd.256
+ // CHECK: select <4 x i1> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}}
return _mm256_mask_mov_pd(__W, __U, __A);
}
__m256d test_mm256_maskz_mov_pd(__mmask8 __U, __m256d __A) {
// CHECK-LABEL: @test_mm256_maskz_mov_pd
- // CHECK: @llvm.x86.avx512.mask.mova.pd.256
+ // CHECK: select <4 x i1> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}}
return _mm256_maskz_mov_pd(__U, __A);
}
__m128 test_mm_mask_mov_ps(__m128 __W, __mmask8 __U, __m128 __A) {
// CHECK-LABEL: @test_mm_mask_mov_ps
- // CHECK: @llvm.x86.avx512.mask.mova.ps.128
+ // CHECK: select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}
return _mm_mask_mov_ps(__W, __U, __A);
}
__m128 test_mm_maskz_mov_ps(__mmask8 __U, __m128 __A) {
// CHECK-LABEL: @test_mm_maskz_mov_ps
- // CHECK: @llvm.x86.avx512.mask.mova.ps.128
+ // CHECK: select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}
return _mm_maskz_mov_ps(__U, __A);
}
__m256 test_mm256_mask_mov_ps(__m256 __W, __mmask8 __U, __m256 __A) {
// CHECK-LABEL: @test_mm256_mask_mov_ps
- // CHECK: @llvm.x86.avx512.mask.mova.ps.256
+ // CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm256_mask_mov_ps(__W, __U, __A);
}
__m256 test_mm256_maskz_mov_ps(__mmask8 __U, __m256 __A) {
// CHECK-LABEL: @test_mm256_maskz_mov_ps
- // CHECK: @llvm.x86.avx512.mask.mova.ps.256
+ // CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
return _mm256_maskz_mov_ps(__U, __A);
}
__m128i test_mm_mask_blend_epi8(__mmask16 __U, __m128i __A, __m128i __W) {
// CHECK-LABEL: @test_mm_mask_blend_epi8
- // CHECK: @llvm.x86.avx512.mask.blend.b.128
+ // CHECK: select <16 x i1> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}
return _mm_mask_blend_epi8(__U,__A,__W);
}
__m256i test_mm256_mask_blend_epi8(__mmask32 __U, __m256i __A, __m256i __W) {
// CHECK-LABEL: @test_mm256_mask_blend_epi8
- // CHECK: @llvm.x86.avx512.mask.blend.b.256
+ // CHECK: select <32 x i1> %{{.*}}, <32 x i8> %{{.*}}, <32 x i8> %{{.*}}
return _mm256_mask_blend_epi8(__U,__A,__W);
}
__m128i test_mm_mask_blend_epi16(__mmask8 __U, __m128i __A, __m128i __W) {
// CHECK-LABEL: @test_mm_mask_blend_epi16
- // CHECK: @llvm.x86.avx512.mask.blend.w.128
+ // CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}
return _mm_mask_blend_epi16(__U,__A,__W);
}
__m256i test_mm256_mask_blend_epi16(__mmask16 __U, __m256i __A, __m256i __W) {
// CHECK-LABEL: @test_mm256_mask_blend_epi16
- // CHECK: @llvm.x86.avx512.mask.blend.w.256
+ // CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}}
return _mm256_mask_blend_epi16(__U,__A,__W);
}
__m128i test_mm_mask_mov_epi16(__m128i __W, __mmask8 __U, __m128i __A) {
// CHECK-LABEL: @test_mm_mask_mov_epi16
- // CHECK: @llvm.x86.avx512.mask.movu.w.128
+ // CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}
return _mm_mask_mov_epi16(__W, __U, __A);
}
__m128i test_mm_maskz_mov_epi16(__mmask8 __U, __m128i __A) {
// CHECK-LABEL: @test_mm_maskz_mov_epi16
- // CHECK: @llvm.x86.avx512.mask.movu.w.128
+ // CHECK: select <8 x i1> %{{.*}}, <8 x i16> %{{.*}}, <8 x i16> %{{.*}}
return _mm_maskz_mov_epi16(__U, __A);
}
__m256i test_mm256_mask_mov_epi16(__m256i __W, __mmask16 __U, __m256i __A) {
// CHECK-LABEL: @test_mm256_mask_mov_epi16
- // CHECK: @llvm.x86.avx512.mask.movu.w.256
+ // CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}}
return _mm256_mask_mov_epi16(__W, __U, __A);
}
__m256i test_mm256_maskz_mov_epi16(__mmask16 __U, __m256i __A) {
// CHECK-LABEL: @test_mm256_maskz_mov_epi16
- // CHECK: @llvm.x86.avx512.mask.movu.w.256
+ // CHECK: select <16 x i1> %{{.*}}, <16 x i16> %{{.*}}, <16 x i16> %{{.*}}
return _mm256_maskz_mov_epi16(__U, __A);
}
__m128i test_mm_mask_mov_epi8(__m128i __W, __mmask16 __U, __m128i __A) {
// CHECK-LABEL: @test_mm_mask_mov_epi8
- // CHECK: @llvm.x86.avx512.mask.movu.b.128
+ // CHECK: select <16 x i1> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}
return _mm_mask_mov_epi8(__W, __U, __A);
}
__m128i test_mm_maskz_mov_epi8(__mmask16 __U, __m128i __A) {
// CHECK-LABEL: @test_mm_maskz_mov_epi8
- // CHECK: @llvm.x86.avx512.mask.movu.b.128
+ // CHECK: select <16 x i1> %{{.*}}, <16 x i8> %{{.*}}, <16 x i8> %{{.*}}
return _mm_maskz_mov_epi8(__U, __A);
}
__m256i test_mm256_mask_mov_epi8(__m256i __W, __mmask32 __U, __m256i __A) {
// CHECK-LABEL: @test_mm256_mask_mov_epi8
- // CHECK: @llvm.x86.avx512.mask.movu.b.256
+ // CHECK: select <32 x i1> %{{.*}}, <32 x i8> %{{.*}}, <32 x i8> %{{.*}}
return _mm256_mask_mov_epi8(__W, __U, __A);
}
__m256i test_mm256_maskz_mov_epi8(__mmask32 __U, __m256i __A) {
// CHECK-LABEL: @test_mm256_maskz_mov_epi8
- // CHECK: @llvm.x86.avx512.mask.movu.b.256
+ // CHECK: select <32 x i1> %{{.*}}, <32 x i8> %{{.*}}, <32 x i8> %{{.*}}
return _mm256_maskz_mov_epi8(__U, __A);
}