}
#define _mm512_add_round_pd(A, B, R) \
- (__m512d)__builtin_ia32_addpd512_mask((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), \
- (__v8df)_mm512_setzero_pd(), \
- (__mmask8)-1, (int)(R))
+ (__m512d)__builtin_ia32_addpd512((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), (int)(R))
#define _mm512_mask_add_round_pd(W, U, A, B, R) \
- (__m512d)__builtin_ia32_addpd512_mask((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), \
- (__v8df)(__m512d)(W), (__mmask8)(U), \
- (int)(R))
+ (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
+ (__v8df)_mm512_add_round_pd((A), (B), (R)), \
+ (__v8df)(__m512d)(W));
#define _mm512_maskz_add_round_pd(U, A, B, R) \
- (__m512d)__builtin_ia32_addpd512_mask((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), \
- (__v8df)_mm512_setzero_pd(), \
- (__mmask8)(U), (int)(R))
+ (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
+ (__v8df)_mm512_add_round_pd((A), (B), (R)), \
+ (__v8df)_mm512_setzero_pd());
#define _mm512_add_round_ps(A, B, R) \
- (__m512)__builtin_ia32_addps512_mask((__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), \
- (__v16sf)_mm512_setzero_ps(), \
- (__mmask16)-1, (int)(R))
+ (__m512)__builtin_ia32_addps512((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), (int)(R))
#define _mm512_mask_add_round_ps(W, U, A, B, R) \
- (__m512)__builtin_ia32_addps512_mask((__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), \
- (__v16sf)(__m512)(W), (__mmask16)(U), \
- (int)(R))
+ (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
+ (__v16sf)_mm512_add_round_ps((A), (B), (R)), \
+ (__v16sf)(__m512)(W));
#define _mm512_maskz_add_round_ps(U, A, B, R) \
- (__m512)__builtin_ia32_addps512_mask((__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), \
- (__v16sf)_mm512_setzero_ps(), \
- (__mmask16)(U), (int)(R))
+ (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
+ (__v16sf)_mm512_add_round_ps((A), (B), (R)), \
+ (__v16sf)_mm512_setzero_ps());
static __inline__ __m128 __DEFAULT_FN_ATTRS
_mm_mask_sub_ss(__m128 __W, __mmask8 __U,__m128 __A, __m128 __B) {
}
#define _mm512_sub_round_pd(A, B, R) \
- (__m512d)__builtin_ia32_subpd512_mask((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), \
- (__v8df)_mm512_setzero_pd(), \
- (__mmask8)-1, (int)(R))
+ (__m512d)__builtin_ia32_subpd512((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), (int)(R))
#define _mm512_mask_sub_round_pd(W, U, A, B, R) \
- (__m512d)__builtin_ia32_subpd512_mask((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), \
- (__v8df)(__m512d)(W), (__mmask8)(U), \
- (int)(R))
+ (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
+ (__v8df)_mm512_sub_round_pd((A), (B), (R)), \
+ (__v8df)(__m512d)(W));
#define _mm512_maskz_sub_round_pd(U, A, B, R) \
- (__m512d)__builtin_ia32_subpd512_mask((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), \
- (__v8df)_mm512_setzero_pd(), \
- (__mmask8)(U), (int)(R))
+ (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
+ (__v8df)_mm512_sub_round_pd((A), (B), (R)), \
+ (__v8df)_mm512_setzero_pd());
#define _mm512_sub_round_ps(A, B, R) \
- (__m512)__builtin_ia32_subps512_mask((__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), \
- (__v16sf)_mm512_setzero_ps(), \
- (__mmask16)-1, (int)(R))
+ (__m512)__builtin_ia32_subps512((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), (int)(R))
-#define _mm512_mask_sub_round_ps(W, U, A, B, R) \
- (__m512)__builtin_ia32_subps512_mask((__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), \
- (__v16sf)(__m512)(W), (__mmask16)(U), \
- (int)(R))
+#define _mm512_mask_sub_round_ps(W, U, A, B, R) \
+ (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
+ (__v16sf)_mm512_sub_round_ps((A), (B), (R)), \
+ (__v16sf)(__m512)(W));
-#define _mm512_maskz_sub_round_ps(U, A, B, R) \
- (__m512)__builtin_ia32_subps512_mask((__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), \
- (__v16sf)_mm512_setzero_ps(), \
- (__mmask16)(U), (int)(R))
+#define _mm512_maskz_sub_round_ps(U, A, B, R) \
+ (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
+ (__v16sf)_mm512_sub_round_ps((A), (B), (R)), \
+ (__v16sf)_mm512_setzero_ps());
static __inline__ __m128 __DEFAULT_FN_ATTRS
_mm_mask_mul_ss(__m128 __W, __mmask8 __U,__m128 __A, __m128 __B) {
}
#define _mm512_mul_round_pd(A, B, R) \
- (__m512d)__builtin_ia32_mulpd512_mask((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), \
- (__v8df)_mm512_setzero_pd(), \
- (__mmask8)-1, (int)(R))
+ (__m512d)__builtin_ia32_mulpd512((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), (int)(R))
#define _mm512_mask_mul_round_pd(W, U, A, B, R) \
- (__m512d)__builtin_ia32_mulpd512_mask((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), \
- (__v8df)(__m512d)(W), (__mmask8)(U), \
- (int)(R))
+ (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
+ (__v8df)_mm512_mul_round_pd((A), (B), (R)), \
+ (__v8df)(__m512d)(W));
#define _mm512_maskz_mul_round_pd(U, A, B, R) \
- (__m512d)__builtin_ia32_mulpd512_mask((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), \
- (__v8df)_mm512_setzero_pd(), \
- (__mmask8)(U), (int)(R))
+ (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
+ (__v8df)_mm512_mul_round_pd((A), (B), (R)), \
+ (__v8df)_mm512_setzero_pd());
#define _mm512_mul_round_ps(A, B, R) \
- (__m512)__builtin_ia32_mulps512_mask((__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), \
- (__v16sf)_mm512_setzero_ps(), \
- (__mmask16)-1, (int)(R))
+ (__m512)__builtin_ia32_mulps512((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), (int)(R))
-#define _mm512_mask_mul_round_ps(W, U, A, B, R) \
- (__m512)__builtin_ia32_mulps512_mask((__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), \
- (__v16sf)(__m512)(W), (__mmask16)(U), \
- (int)(R))
+#define _mm512_mask_mul_round_ps(W, U, A, B, R) \
+ (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
+ (__v16sf)_mm512_mul_round_ps((A), (B), (R)), \
+ (__v16sf)(__m512)(W));
-#define _mm512_maskz_mul_round_ps(U, A, B, R) \
- (__m512)__builtin_ia32_mulps512_mask((__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), \
- (__v16sf)_mm512_setzero_ps(), \
- (__mmask16)(U), (int)(R))
+#define _mm512_maskz_mul_round_ps(U, A, B, R) \
+ (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
+ (__v16sf)_mm512_mul_round_ps((A), (B), (R)), \
+ (__v16sf)_mm512_setzero_ps());
static __inline__ __m128 __DEFAULT_FN_ATTRS
_mm_mask_div_ss(__m128 __W, __mmask8 __U,__m128 __A, __m128 __B) {
}
#define _mm512_div_round_pd(A, B, R) \
- (__m512d)__builtin_ia32_divpd512_mask((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), \
- (__v8df)_mm512_setzero_pd(), \
- (__mmask8)-1, (int)(R))
+ (__m512d)__builtin_ia32_divpd512((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), (int)(R))
#define _mm512_mask_div_round_pd(W, U, A, B, R) \
- (__m512d)__builtin_ia32_divpd512_mask((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), \
- (__v8df)(__m512d)(W), (__mmask8)(U), \
- (int)(R))
+ (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
+ (__v8df)_mm512_div_round_pd((A), (B), (R)), \
+ (__v8df)(__m512d)(W));
#define _mm512_maskz_div_round_pd(U, A, B, R) \
- (__m512d)__builtin_ia32_divpd512_mask((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), \
- (__v8df)_mm512_setzero_pd(), \
- (__mmask8)(U), (int)(R))
+ (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
+ (__v8df)_mm512_div_round_pd((A), (B), (R)), \
+ (__v8df)_mm512_setzero_pd());
#define _mm512_div_round_ps(A, B, R) \
- (__m512)__builtin_ia32_divps512_mask((__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), \
- (__v16sf)_mm512_setzero_ps(), \
- (__mmask16)-1, (int)(R))
+ (__m512)__builtin_ia32_divps512((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), (int)(R))
-#define _mm512_mask_div_round_ps(W, U, A, B, R) \
- (__m512)__builtin_ia32_divps512_mask((__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), \
- (__v16sf)(__m512)(W), (__mmask16)(U), \
- (int)(R))
+#define _mm512_mask_div_round_ps(W, U, A, B, R) \
+ (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
+ (__v16sf)_mm512_div_round_ps((A), (B), (R)), \
+ (__v16sf)(__m512)(W));
-#define _mm512_maskz_div_round_ps(U, A, B, R) \
- (__m512)__builtin_ia32_divps512_mask((__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), \
- (__v16sf)_mm512_setzero_ps(), \
- (__mmask16)(U), (int)(R))
+#define _mm512_maskz_div_round_ps(U, A, B, R) \
+ (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
+ (__v16sf)_mm512_div_round_ps((A), (B), (R)), \
+ (__v16sf)_mm512_setzero_ps());
#define _mm512_roundscale_ps(A, B) \
(__m512)__builtin_ia32_rndscaleps_mask((__v16sf)(__m512)(A), (int)(B), \
__m512d test_mm512_add_round_pd(__m512d __A, __m512d __B) {
// CHECK-LABEL: @test_mm512_add_round_pd
- // CHECK: @llvm.x86.avx512.mask.add.pd.512
+ // CHECK: @llvm.x86.avx512.add.pd.512
return _mm512_add_round_pd(__A,__B,_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
}
__m512d test_mm512_mask_add_round_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) {
// CHECK-LABEL: @test_mm512_mask_add_round_pd
- // CHECK: @llvm.x86.avx512.mask.add.pd.512
+ // CHECK: @llvm.x86.avx512.add.pd.512
+ // CHECK: select <8 x i1> %{{.*}}, <8 x double> %{{.*}}, <8 x double> %{{.*}}
return _mm512_mask_add_round_pd(__W,__U,__A,__B,_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
}
__m512d test_mm512_maskz_add_round_pd(__mmask8 __U, __m512d __A, __m512d __B) {
// CHECK-LABEL: @test_mm512_maskz_add_round_pd
- // CHECK: @llvm.x86.avx512.mask.add.pd.512
+ // CHECK: @llvm.x86.avx512.add.pd.512
+ // CHECK: select <8 x i1> %{{.*}}, <8 x double> %{{.*}}, <8 x double> %{{.*}}
return _mm512_maskz_add_round_pd(__U,__A,__B,_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
}
__m512d test_mm512_mask_add_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) {
}
__m512 test_mm512_add_round_ps(__m512 __A, __m512 __B) {
// CHECK-LABEL: @test_mm512_add_round_ps
- // CHECK: @llvm.x86.avx512.mask.add.ps.512
+ // CHECK: @llvm.x86.avx512.add.ps.512
return _mm512_add_round_ps(__A,__B,_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
}
__m512 test_mm512_mask_add_round_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) {
// CHECK-LABEL: @test_mm512_mask_add_round_ps
- // CHECK: @llvm.x86.avx512.mask.add.ps.512
+ // CHECK: @llvm.x86.avx512.add.ps.512
+ // CHECK: select <16 x i1> %{{.*}}, <16 x float> %{{.*}}, <16 x float> %{{.*}}
return _mm512_mask_add_round_ps(__W,__U,__A,__B,_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
}
__m512 test_mm512_maskz_add_round_ps(__mmask16 __U, __m512 __A, __m512 __B) {
// CHECK-LABEL: @test_mm512_maskz_add_round_ps
- // CHECK: @llvm.x86.avx512.mask.add.ps.512
+ // CHECK: @llvm.x86.avx512.add.ps.512
+ // CHECK: select <16 x i1> %{{.*}}, <16 x float> %{{.*}}, <16 x float> %{{.*}}
return _mm512_maskz_add_round_ps(__U,__A,__B,_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
}
__m512 test_mm512_mask_add_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) {
}
__m512d test_mm512_sub_round_pd(__m512d __A, __m512d __B) {
// CHECK-LABEL: @test_mm512_sub_round_pd
- // CHECK: @llvm.x86.avx512.mask.sub.pd.512
+ // CHECK: @llvm.x86.avx512.sub.pd.512
return _mm512_sub_round_pd(__A,__B,_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
}
__m512d test_mm512_mask_sub_round_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) {
// CHECK-LABEL: @test_mm512_mask_sub_round_pd
- // CHECK: @llvm.x86.avx512.mask.sub.pd.512
+ // CHECK: @llvm.x86.avx512.sub.pd.512
+ // CHECK: select <8 x i1> %{{.*}}, <8 x double> %{{.*}}, <8 x double> %{{.*}}
return _mm512_mask_sub_round_pd(__W,__U,__A,__B,_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
}
__m512d test_mm512_maskz_sub_round_pd(__mmask8 __U, __m512d __A, __m512d __B) {
// CHECK-LABEL: @test_mm512_maskz_sub_round_pd
- // CHECK: @llvm.x86.avx512.mask.sub.pd.512
+ // CHECK: @llvm.x86.avx512.sub.pd.512
+ // CHECK: select <8 x i1> %{{.*}}, <8 x double> %{{.*}}, <8 x double> %{{.*}}
return _mm512_maskz_sub_round_pd(__U,__A,__B,_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
}
__m512d test_mm512_mask_sub_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) {
}
__m512 test_mm512_sub_round_ps(__m512 __A, __m512 __B) {
// CHECK-LABEL: @test_mm512_sub_round_ps
- // CHECK: @llvm.x86.avx512.mask.sub.ps.512
+ // CHECK: @llvm.x86.avx512.sub.ps.512
return _mm512_sub_round_ps(__A,__B,_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
}
__m512 test_mm512_mask_sub_round_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) {
// CHECK-LABEL: @test_mm512_mask_sub_round_ps
- // CHECK: @llvm.x86.avx512.mask.sub.ps.512
+ // CHECK: @llvm.x86.avx512.sub.ps.512
+ // CHECK: select <16 x i1> %{{.*}}, <16 x float> %{{.*}}, <16 x float> %{{.*}}
return _mm512_mask_sub_round_ps(__W,__U,__A,__B,_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
}
__m512 test_mm512_maskz_sub_round_ps(__mmask16 __U, __m512 __A, __m512 __B) {
// CHECK-LABEL: @test_mm512_maskz_sub_round_ps
- // CHECK: @llvm.x86.avx512.mask.sub.ps.512
+ // CHECK: @llvm.x86.avx512.sub.ps.512
+ // CHECK: select <16 x i1> %{{.*}}, <16 x float> %{{.*}}, <16 x float> %{{.*}}
return _mm512_maskz_sub_round_ps(__U,__A,__B,_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
}
__m512 test_mm512_mask_sub_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) {
}
__m512d test_mm512_mul_round_pd(__m512d __A, __m512d __B) {
// CHECK-LABEL: @test_mm512_mul_round_pd
- // CHECK: @llvm.x86.avx512.mask.mul.pd.512
+ // CHECK: @llvm.x86.avx512.mul.pd.512
return _mm512_mul_round_pd(__A,__B,_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
}
__m512d test_mm512_mask_mul_round_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) {
// CHECK-LABEL: @test_mm512_mask_mul_round_pd
- // CHECK: @llvm.x86.avx512.mask.mul.pd.512
+ // CHECK: @llvm.x86.avx512.mul.pd.512
+ // CHECK: select <8 x i1> %{{.*}}, <8 x double> %{{.*}}, <8 x double> %{{.*}}
return _mm512_mask_mul_round_pd(__W,__U,__A,__B,_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
}
__m512d test_mm512_maskz_mul_round_pd(__mmask8 __U, __m512d __A, __m512d __B) {
// CHECK-LABEL: @test_mm512_maskz_mul_round_pd
- // CHECK: @llvm.x86.avx512.mask.mul.pd.512
+ // CHECK: @llvm.x86.avx512.mul.pd.512
+ // CHECK: select <8 x i1> %{{.*}}, <8 x double> %{{.*}}, <8 x double> %{{.*}}
return _mm512_maskz_mul_round_pd(__U,__A,__B,_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
}
__m512d test_mm512_mask_mul_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) {
}
__m512 test_mm512_mul_round_ps(__m512 __A, __m512 __B) {
// CHECK-LABEL: @test_mm512_mul_round_ps
- // CHECK: @llvm.x86.avx512.mask.mul.ps.512
+ // CHECK: @llvm.x86.avx512.mul.ps.512
return _mm512_mul_round_ps(__A,__B,_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
}
__m512 test_mm512_mask_mul_round_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) {
// CHECK-LABEL: @test_mm512_mask_mul_round_ps
- // CHECK: @llvm.x86.avx512.mask.mul.ps.512
+ // CHECK: @llvm.x86.avx512.mul.ps.512
+ // CHECK: select <16 x i1> %{{.*}}, <16 x float> %{{.*}}, <16 x float> %{{.*}}
return _mm512_mask_mul_round_ps(__W,__U,__A,__B,_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
}
__m512 test_mm512_maskz_mul_round_ps(__mmask16 __U, __m512 __A, __m512 __B) {
// CHECK-LABEL: @test_mm512_maskz_mul_round_ps
- // CHECK: @llvm.x86.avx512.mask.mul.ps.512
+ // CHECK: @llvm.x86.avx512.mul.ps.512
+ // CHECK: select <16 x i1> %{{.*}}, <16 x float> %{{.*}}, <16 x float> %{{.*}}
return _mm512_maskz_mul_round_ps(__U,__A,__B,_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
}
__m512 test_mm512_mask_mul_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) {
}
__m512d test_mm512_div_round_pd(__m512d __A, __m512d __B) {
// CHECK-LABEL: @test_mm512_div_round_pd
- // CHECK: @llvm.x86.avx512.mask.div.pd.512
+ // CHECK: @llvm.x86.avx512.div.pd.512
return _mm512_div_round_pd(__A,__B,_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
}
__m512d test_mm512_mask_div_round_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) {
// CHECK-LABEL: @test_mm512_mask_div_round_pd
- // CHECK: @llvm.x86.avx512.mask.div.pd.512
+ // CHECK: @llvm.x86.avx512.div.pd.512
+ // CHECK: select <8 x i1> %{{.*}}, <8 x double> %{{.*}}, <8 x double> %{{.*}}
return _mm512_mask_div_round_pd(__W,__U,__A,__B,_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
}
__m512d test_mm512_maskz_div_round_pd(__mmask8 __U, __m512d __A, __m512d __B) {
// CHECK-LABEL: @test_mm512_maskz_div_round_pd
- // CHECK: @llvm.x86.avx512.mask.div.pd.512
+ // CHECK: @llvm.x86.avx512.div.pd.512
+ // CHECK: select <8 x i1> %{{.*}}, <8 x double> %{{.*}}, <8 x double> %{{.*}}
return _mm512_maskz_div_round_pd(__U,__A,__B,_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
}
__m512d test_mm512_div_pd(__m512d __a, __m512d __b) {
}
__m512 test_mm512_div_round_ps(__m512 __A, __m512 __B) {
// CHECK-LABEL: @test_mm512_div_round_ps
- // CHECK: @llvm.x86.avx512.mask.div.ps.512
+ // CHECK: @llvm.x86.avx512.div.ps.512
return _mm512_div_round_ps(__A,__B,_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
}
__m512 test_mm512_mask_div_round_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) {
// CHECK-LABEL: @test_mm512_mask_div_round_ps
- // CHECK: @llvm.x86.avx512.mask.div.ps.512
+ // CHECK: @llvm.x86.avx512.div.ps.512
+ // CHECK: select <16 x i1> %{{.*}}, <16 x float> %{{.*}}, <16 x float> %{{.*}}
return _mm512_mask_div_round_ps(__W,__U,__A,__B,_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
}
__m512 test_mm512_maskz_div_round_ps(__mmask16 __U, __m512 __A, __m512 __B) {
// CHECK-LABEL: @test_mm512_maskz_div_round_ps
- // CHECK: @llvm.x86.avx512.mask.div.ps.512
+ // CHECK: @llvm.x86.avx512.div.ps.512
+ // CHECK: select <16 x i1> %{{.*}}, <16 x float> %{{.*}}, <16 x float> %{{.*}}
return _mm512_maskz_div_round_ps(__U,__A,__B,_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
}
__m512 test_mm512_div_ps(__m512 __A, __m512 __B) {