TARGET_BUILTIN(__builtin_ia32_extracti64x2_256_mask, "V2LLiV4LLiIiV2LLiUc","","avx512dq,avx512vl")
TARGET_BUILTIN(__builtin_ia32_extractf32x4_256_mask, "V4fV8fIiV4fUc","","avx512vl")
TARGET_BUILTIN(__builtin_ia32_extracti32x4_256_mask, "V4iV8iIiV4iUc","","avx512vl")
+TARGET_BUILTIN(__builtin_ia32_insertf32x8_mask, "V16fV16fV8fIiV16fUs","","avx512dq")
TARGET_BUILTIN(__builtin_ia32_insertf64x2_512_mask, "V8dV8dV2dIiV8dUc","","avx512dq")
TARGET_BUILTIN(__builtin_ia32_inserti32x8_mask, "V16iV16iV8iIiV16iUs","","avx512dq")
TARGET_BUILTIN(__builtin_ia32_inserti64x2_512_mask, "V8LLiV8LLiV2LLiIiV8LLiUc","","avx512dq")
TARGET_BUILTIN(__builtin_ia32_inserti64x2_256_mask, "V4LLiV4LLiV2LLiIiV4LLiUc","","avx512dq,avx512vl")
TARGET_BUILTIN(__builtin_ia32_insertf32x4_256_mask, "V8fV8fV4fIiV8fUc","","avx512vl")
TARGET_BUILTIN(__builtin_ia32_inserti32x4_256_mask, "V8iV8iV4iIiV8iUc","","avx512vl")
+TARGET_BUILTIN(__builtin_ia32_insertf32x4_mask, "V16fV16fV4fIiV16fUs","","avx512f")
+TARGET_BUILTIN(__builtin_ia32_inserti32x4_mask, "V16iV16iV4iIiV16iUs","","avx512f")
TARGET_BUILTIN(__builtin_ia32_getmantpd128_mask, "V2dV2diV2dUc","","avx512vl")
TARGET_BUILTIN(__builtin_ia32_getmantpd256_mask, "V4dV4diV4dUc","","avx512vl")
TARGET_BUILTIN(__builtin_ia32_getmantps128_mask, "V4fV4fiV4fUc","","avx512vl")
(__mmask8) ( __U));\
})
+#define _mm512_insertf32x8( __A, __B, __imm) __extension__ ({ \
+__builtin_ia32_insertf32x8_mask ((__v16sf)( __A),\
+ (__v8sf)( __B),\
+ ( __imm),\
+ (__v16sf) _mm512_setzero_ps (),\
+ (__mmask16) -1);\
+})
+
+#define _mm512_mask_insertf32x8( __W, __U, __A, __B, __imm) __extension__ ({ \
+__builtin_ia32_insertf32x8_mask ((__v16sf)( __A),\
+ (__v8sf)( __B),\
+ ( __imm),\
+ (__v16sf)( __W),\
+ (__mmask16)( __U));\
+})
+
+#define _mm512_maskz_insertf32x8( __U, __A, __B, __imm) __extension__ ({ \
+__builtin_ia32_insertf32x8_mask ((__v16sf)( __A),\
+ (__v8sf)( __B),\
+ ( __imm),\
+ (__v16sf) _mm512_setzero_ps (),\
+ (__mmask16)( __U));\
+})
+
#define _mm512_insertf64x2( __A, __B, __imm) __extension__ ({ \
__builtin_ia32_insertf64x2_512_mask ((__v8df)( __A),\
(__v2df)( __B),\
(__mmask8)( __U));\
})
+#define _mm512_insertf32x4( __A, __B, __imm) __extension__ ({ \
+__builtin_ia32_insertf32x4_mask ((__v16sf)( __A),\
+ (__v4sf)( __B),\
+ ( __imm),\
+ (__v16sf) _mm512_undefined_ps (),\
+ (__mmask16) -1);\
+})
+
+#define _mm512_mask_insertf32x4( __W, __U, __A, __B, __imm) __extension__ ({ \
+__builtin_ia32_insertf32x4_mask ((__v16sf)( __A),\
+ (__v4sf)( __B),\
+ ( __imm),\
+ (__v16sf)( __W),\
+ (__mmask16)( __U));\
+})
+
+#define _mm512_maskz_insertf32x4( __U, __A, __B, __imm) __extension__ ({ \
+__builtin_ia32_insertf32x4_mask ((__v16sf)( __A),\
+ (__v4sf)( __B),\
+ ( __imm),\
+ (__v16sf) _mm512_setzero_ps (),\
+ (__mmask16)( __U));\
+})
+
+#define _mm512_inserti32x4( __A, __B, __imm) __extension__ ({ \
+__builtin_ia32_inserti32x4_mask ((__v16si)( __A),\
+ (__v4si)( __B),\
+ ( __imm),\
+ (__v16si) _mm512_setzero_si512 (),\
+ (__mmask16) -1);\
+})
+
+#define _mm512_mask_inserti32x4( __W, __U, __A, __B, __imm) __extension__ ({ \
+__builtin_ia32_inserti32x4_mask ((__v16si)( __A),\
+ (__v4si)( __B),\
+ ( __imm),\
+ (__v16si)( __W),\
+ (__mmask16)( __U));\
+})
+
+#define _mm512_maskz_inserti32x4( __U, __A, __B, __imm) __extension__ ({ \
+__builtin_ia32_inserti32x4_mask ((__v16si)( __A),\
+ (__v4si)( __B),\
+ ( __imm),\
+ (__v16si) _mm512_setzero_si512 (),\
+ (__mmask16)( __U));\
+})
+
#define _mm512_getmant_round_pd( __A, __B, __C, __R) __extension__ ({ \
__builtin_ia32_getmantpd512_mask ((__v8df)( __A),\
(__C << 2) |( __B),\
return _mm512_maskz_extracti64x2_epi64(__U, __A, 3);
}
+__m512 test_mm512_insertf32x8(__m512 __A, __m256 __B) {
+ // CHECK-LABEL: @test_mm512_insertf32x8
+ // CHECK: @llvm.x86.avx512.mask.insertf32x8
+ return _mm512_insertf32x8(__A, __B, 1);
+}
+
+__m512 test_mm512_mask_insertf32x8(__m512 __W, __mmask16 __U, __m512 __A, __m256 __B) {
+ // CHECK-LABEL: @test_mm512_mask_insertf32x8
+ // CHECK: @llvm.x86.avx512.mask.insertf32x8
+ return _mm512_mask_insertf32x8(__W, __U, __A, __B, 1);
+}
+
+__m512 test_mm512_maskz_insertf32x8(__mmask16 __U, __m512 __A, __m256 __B) {
+ // CHECK-LABEL: @test_mm512_maskz_insertf32x8
+ // CHECK: @llvm.x86.avx512.mask.insertf32x8
+ return _mm512_maskz_insertf32x8(__U, __A, __B, 1);
+}
+
__m512d test_mm512_insertf64x2(__m512d __A, __m128d __B) {
// CHECK-LABEL: @test_mm512_insertf64x2
// CHECK: @llvm.x86.avx512.mask.insertf64x2
return _mm512_maskz_inserti64x4(__U, __A, __B, 1);
}
+__m512 test_mm512_insertf32x4(__m512 __A, __m128 __B) {
+ // CHECK-LABEL: @test_mm512_insertf32x4
+ // CHECK: @llvm.x86.avx512.mask.insertf32x4
+ return _mm512_insertf32x4(__A, __B, 1);
+}
+
+__m512 test_mm512_mask_insertf32x4(__m512 __W, __mmask16 __U, __m512 __A, __m128 __B) {
+ // CHECK-LABEL: @test_mm512_mask_insertf32x4
+ // CHECK: @llvm.x86.avx512.mask.insertf32x4
+ return _mm512_mask_insertf32x4(__W, __U, __A, __B, 1);
+}
+
+__m512 test_mm512_maskz_insertf32x4(__mmask16 __U, __m512 __A, __m128 __B) {
+ // CHECK-LABEL: @test_mm512_maskz_insertf32x4
+ // CHECK: @llvm.x86.avx512.mask.insertf32x4
+ return _mm512_maskz_insertf32x4(__U, __A, __B, 1);
+}
+
+__m512i test_mm512_inserti32x4(__m512i __A, __m128i __B) {
+ // CHECK-LABEL: @test_mm512_inserti32x4
+ // CHECK: @llvm.x86.avx512.mask.inserti32x4
+ return _mm512_inserti32x4(__A, __B, 1);
+}
+
+__m512i test_mm512_mask_inserti32x4(__m512i __W, __mmask16 __U, __m512i __A, __m128i __B) {
+ // CHECK-LABEL: @test_mm512_mask_inserti32x4
+ // CHECK: @llvm.x86.avx512.mask.inserti32x4
+ return _mm512_mask_inserti32x4(__W, __U, __A, __B, 1);
+}
+
+__m512i test_mm512_maskz_inserti32x4(__mmask16 __U, __m512i __A, __m128i __B) {
+ // CHECK-LABEL: @test_mm512_maskz_inserti32x4
+ // CHECK: @llvm.x86.avx512.mask.inserti32x4
+ return _mm512_maskz_inserti32x4(__U, __A, __B, 1);
+}
+
__m512d test_mm512_getmant_round_pd(__m512d __A) {
// CHECK-LABEL: @test_mm512_getmant_round_pd
// CHECK: @llvm.x86.avx512.mask.getmant.pd.512