static __inline__ __m128 __DEFAULT_FN_ATTRS
_mm_mask_max_ss(__m128 __W, __mmask8 __U,__m128 __A, __m128 __B) {
- return (__m128) __builtin_ia32_maxss_mask ((__v4sf) __A,
+ return (__m128) __builtin_ia32_maxss_round ((__v4sf) __A,
(__v4sf) __B,
(__v4sf) __W,
(__mmask8) __U,
static __inline__ __m128 __DEFAULT_FN_ATTRS
_mm_maskz_max_ss(__mmask8 __U,__m128 __A, __m128 __B) {
- return (__m128) __builtin_ia32_maxss_mask ((__v4sf) __A,
+ return (__m128) __builtin_ia32_maxss_round ((__v4sf) __A,
(__v4sf) __B,
(__v4sf) _mm_setzero_ps (),
(__mmask8) __U,
}
#define _mm_max_round_ss(__A, __B, __R) __extension__ ({ \
- (__m128) __builtin_ia32_maxss_mask ((__v4sf) __A, (__v4sf) __B, \
+ (__m128) __builtin_ia32_maxss_round ((__v4sf) __A, (__v4sf) __B, \
(__v4sf) _mm_setzero_ps(), (__mmask8) -1, __R); })
#define _mm_mask_max_round_ss(__W, __U, __A, __B, __R) __extension__ ({ \
- (__m128) __builtin_ia32_maxss_mask ((__v4sf) __A, (__v4sf) __B, \
+ (__m128) __builtin_ia32_maxss_round ((__v4sf) __A, (__v4sf) __B, \
(__v4sf) __W, (__mmask8) __U,__R); })
#define _mm_maskz_max_round_ss(__U, __A, __B, __R) __extension__ ({ \
- (__m128) __builtin_ia32_maxss_mask ((__v4sf) __A, (__v4sf) __B, \
+ (__m128) __builtin_ia32_maxss_round ((__v4sf) __A, (__v4sf) __B, \
(__v4sf) _mm_setzero_ps(), (__mmask8) __U,__R); })
static __inline__ __m128d __DEFAULT_FN_ATTRS
_mm_mask_max_sd(__m128d __W, __mmask8 __U,__m128d __A, __m128d __B) {
- return (__m128d) __builtin_ia32_maxsd_mask ((__v2df) __A,
+ return (__m128d) __builtin_ia32_maxsd_round ((__v2df) __A,
(__v2df) __B,
(__v2df) __W,
(__mmask8) __U,
static __inline__ __m128d __DEFAULT_FN_ATTRS
_mm_maskz_max_sd(__mmask8 __U,__m128d __A, __m128d __B) {
- return (__m128d) __builtin_ia32_maxsd_mask ((__v2df) __A,
+ return (__m128d) __builtin_ia32_maxsd_round ((__v2df) __A,
(__v2df) __B,
(__v2df) _mm_setzero_pd (),
(__mmask8) __U,
}
#define _mm_max_round_sd(__A, __B, __R) __extension__ ({ \
- (__m128d) __builtin_ia32_maxsd_mask ((__v2df) __A, (__v2df) __B, \
+ (__m128d) __builtin_ia32_maxsd_round ((__v2df) __A, (__v2df) __B, \
(__v2df) _mm_setzero_pd(), (__mmask8) -1, __R); })
#define _mm_mask_max_round_sd(__W, __U, __A, __B, __R) __extension__ ({ \
- (__m128d) __builtin_ia32_maxsd_mask ((__v2df) __A, (__v2df) __B, \
+ (__m128d) __builtin_ia32_maxsd_round ((__v2df) __A, (__v2df) __B, \
(__v2df) __W, (__mmask8) __U,__R); })
#define _mm_maskz_max_round_sd(__U, __A, __B, __R) __extension__ ({ \
- (__m128d) __builtin_ia32_maxsd_mask ((__v2df) __A, (__v2df) __B, \
+ (__m128d) __builtin_ia32_maxsd_round ((__v2df) __A, (__v2df) __B, \
(__v2df) _mm_setzero_pd(), (__mmask8) __U,__R); })
static __inline __m512i
static __inline__ __m128 __DEFAULT_FN_ATTRS
_mm_mask_min_ss(__m128 __W, __mmask8 __U,__m128 __A, __m128 __B) {
- return (__m128) __builtin_ia32_minss_mask ((__v4sf) __A,
+ return (__m128) __builtin_ia32_minss_round ((__v4sf) __A,
(__v4sf) __B,
(__v4sf) __W,
(__mmask8) __U,
static __inline__ __m128 __DEFAULT_FN_ATTRS
_mm_maskz_min_ss(__mmask8 __U,__m128 __A, __m128 __B) {
- return (__m128) __builtin_ia32_minss_mask ((__v4sf) __A,
+ return (__m128) __builtin_ia32_minss_round ((__v4sf) __A,
(__v4sf) __B,
(__v4sf) _mm_setzero_ps (),
(__mmask8) __U,
}
#define _mm_min_round_ss(__A, __B, __R) __extension__ ({ \
- (__m128) __builtin_ia32_minss_mask ((__v4sf) __A, (__v4sf) __B, \
+ (__m128) __builtin_ia32_minss_round ((__v4sf) __A, (__v4sf) __B, \
(__v4sf) _mm_setzero_ps(), (__mmask8) -1, __R); })
#define _mm_mask_min_round_ss(__W, __U, __A, __B, __R) __extension__ ({ \
- (__m128) __builtin_ia32_minss_mask ((__v4sf) __A, (__v4sf) __B, \
+ (__m128) __builtin_ia32_minss_round ((__v4sf) __A, (__v4sf) __B, \
(__v4sf) __W, (__mmask8) __U,__R); })
#define _mm_maskz_min_round_ss(__U, __A, __B, __R) __extension__ ({ \
- (__m128) __builtin_ia32_minss_mask ((__v4sf) __A, (__v4sf) __B, \
+ (__m128) __builtin_ia32_minss_round ((__v4sf) __A, (__v4sf) __B, \
(__v4sf) _mm_setzero_ps(), (__mmask8) __U,__R); })
static __inline__ __m128d __DEFAULT_FN_ATTRS
_mm_mask_min_sd(__m128d __W, __mmask8 __U,__m128d __A, __m128d __B) {
- return (__m128d) __builtin_ia32_minsd_mask ((__v2df) __A,
+ return (__m128d) __builtin_ia32_minsd_round ((__v2df) __A,
(__v2df) __B,
(__v2df) __W,
(__mmask8) __U,
static __inline__ __m128d __DEFAULT_FN_ATTRS
_mm_maskz_min_sd(__mmask8 __U,__m128d __A, __m128d __B) {
- return (__m128d) __builtin_ia32_minsd_mask ((__v2df) __A,
+ return (__m128d) __builtin_ia32_minsd_round ((__v2df) __A,
(__v2df) __B,
(__v2df) _mm_setzero_pd (),
(__mmask8) __U,
}
#define _mm_min_round_sd(__A, __B, __R) __extension__ ({ \
- (__m128d) __builtin_ia32_minsd_mask ((__v2df) __A, (__v2df) __B, \
+ (__m128d) __builtin_ia32_minsd_round ((__v2df) __A, (__v2df) __B, \
(__v2df) _mm_setzero_pd(), (__mmask8) -1, __R); })
#define _mm_mask_min_round_sd(__W, __U, __A, __B, __R) __extension__ ({ \
- (__m128d) __builtin_ia32_minsd_mask ((__v2df) __A, (__v2df) __B, \
+ (__m128d) __builtin_ia32_minsd_round ((__v2df) __A, (__v2df) __B, \
(__v2df) __W, (__mmask8) __U,__R); })
#define _mm_maskz_min_round_sd(__U, __A, __B, __R) __extension__ ({ \
- (__m128d) __builtin_ia32_minsd_mask ((__v2df) __A, (__v2df) __B, \
+ (__m128d) __builtin_ia32_minsd_round ((__v2df) __A, (__v2df) __B, \
(__v2df) _mm_setzero_pd(), (__mmask8) __U,__R); })
static __inline __m512i
static __inline__ __m128 __DEFAULT_FN_ATTRS
_mm_mask_add_ss(__m128 __W, __mmask8 __U,__m128 __A, __m128 __B) {
- return (__m128) __builtin_ia32_addss_mask ((__v4sf) __A,
+ return (__m128) __builtin_ia32_addss_round ((__v4sf) __A,
(__v4sf) __B,
(__v4sf) __W,
(__mmask8) __U,
static __inline__ __m128 __DEFAULT_FN_ATTRS
_mm_maskz_add_ss(__mmask8 __U,__m128 __A, __m128 __B) {
- return (__m128) __builtin_ia32_addss_mask ((__v4sf) __A,
+ return (__m128) __builtin_ia32_addss_round ((__v4sf) __A,
(__v4sf) __B,
(__v4sf) _mm_setzero_ps (),
(__mmask8) __U,
}
#define _mm_add_round_ss(__A, __B, __R) __extension__ ({ \
- (__m128) __builtin_ia32_addss_mask ((__v4sf) __A, (__v4sf) __B, \
+ (__m128) __builtin_ia32_addss_round ((__v4sf) __A, (__v4sf) __B, \
(__v4sf) _mm_setzero_ps(), (__mmask8) -1, __R); })
#define _mm_mask_add_round_ss(__W, __U, __A, __B, __R) __extension__ ({ \
- (__m128) __builtin_ia32_addss_mask ((__v4sf) __A, (__v4sf) __B, \
+ (__m128) __builtin_ia32_addss_round ((__v4sf) __A, (__v4sf) __B, \
(__v4sf) __W, (__mmask8) __U,__R); })
#define _mm_maskz_add_round_ss(__U, __A, __B, __R) __extension__ ({ \
- (__m128) __builtin_ia32_addss_mask ((__v4sf) __A, (__v4sf) __B, \
+ (__m128) __builtin_ia32_addss_round ((__v4sf) __A, (__v4sf) __B, \
(__v4sf) _mm_setzero_ps(), (__mmask8) __U,__R); })
static __inline__ __m128d __DEFAULT_FN_ATTRS
_mm_mask_add_sd(__m128d __W, __mmask8 __U,__m128d __A, __m128d __B) {
- return (__m128d) __builtin_ia32_addsd_mask ((__v2df) __A,
+ return (__m128d) __builtin_ia32_addsd_round ((__v2df) __A,
(__v2df) __B,
(__v2df) __W,
(__mmask8) __U,
static __inline__ __m128d __DEFAULT_FN_ATTRS
_mm_maskz_add_sd(__mmask8 __U,__m128d __A, __m128d __B) {
- return (__m128d) __builtin_ia32_addsd_mask ((__v2df) __A,
+ return (__m128d) __builtin_ia32_addsd_round ((__v2df) __A,
(__v2df) __B,
(__v2df) _mm_setzero_pd (),
(__mmask8) __U,
_MM_FROUND_CUR_DIRECTION);
}
#define _mm_add_round_sd(__A, __B, __R) __extension__ ({ \
- (__m128d) __builtin_ia32_addsd_mask ((__v2df) __A, (__v2df) __B, \
+ (__m128d) __builtin_ia32_addsd_round ((__v2df) __A, (__v2df) __B, \
(__v2df) _mm_setzero_pd(), (__mmask8) -1, __R); })
#define _mm_mask_add_round_sd(__W, __U, __A, __B, __R) __extension__ ({ \
- (__m128d) __builtin_ia32_addsd_mask ((__v2df) __A, (__v2df) __B, \
+ (__m128d) __builtin_ia32_addsd_round ((__v2df) __A, (__v2df) __B, \
(__v2df) __W, (__mmask8) __U,__R); })
#define _mm_maskz_add_round_sd(__U, __A, __B, __R) __extension__ ({ \
- (__m128d) __builtin_ia32_addsd_mask ((__v2df) __A, (__v2df) __B, \
+ (__m128d) __builtin_ia32_addsd_round ((__v2df) __A, (__v2df) __B, \
(__v2df) _mm_setzero_pd(), (__mmask8) __U,__R); })
static __inline__ __m512d __DEFAULT_FN_ATTRS
static __inline__ __m128 __DEFAULT_FN_ATTRS
_mm_mask_sub_ss(__m128 __W, __mmask8 __U,__m128 __A, __m128 __B) {
- return (__m128) __builtin_ia32_subss_mask ((__v4sf) __A,
+ return (__m128) __builtin_ia32_subss_round ((__v4sf) __A,
(__v4sf) __B,
(__v4sf) __W,
(__mmask8) __U,
static __inline__ __m128 __DEFAULT_FN_ATTRS
_mm_maskz_sub_ss(__mmask8 __U,__m128 __A, __m128 __B) {
- return (__m128) __builtin_ia32_subss_mask ((__v4sf) __A,
+ return (__m128) __builtin_ia32_subss_round ((__v4sf) __A,
(__v4sf) __B,
(__v4sf) _mm_setzero_ps (),
(__mmask8) __U,
_MM_FROUND_CUR_DIRECTION);
}
#define _mm_sub_round_ss(__A, __B, __R) __extension__ ({ \
- (__m128) __builtin_ia32_subss_mask ((__v4sf) __A, (__v4sf) __B, \
+ (__m128) __builtin_ia32_subss_round ((__v4sf) __A, (__v4sf) __B, \
(__v4sf) _mm_setzero_ps(), (__mmask8) -1, __R); })
#define _mm_mask_sub_round_ss(__W, __U, __A, __B, __R) __extension__ ({ \
- (__m128) __builtin_ia32_subss_mask ((__v4sf) __A, (__v4sf) __B, \
+ (__m128) __builtin_ia32_subss_round ((__v4sf) __A, (__v4sf) __B, \
(__v4sf) __W, (__mmask8) __U,__R); })
#define _mm_maskz_sub_round_ss(__U, __A, __B, __R) __extension__ ({ \
- (__m128) __builtin_ia32_subss_mask ((__v4sf) __A, (__v4sf) __B, \
+ (__m128) __builtin_ia32_subss_round ((__v4sf) __A, (__v4sf) __B, \
(__v4sf) _mm_setzero_ps(), (__mmask8) __U,__R); })
static __inline__ __m128d __DEFAULT_FN_ATTRS
_mm_mask_sub_sd(__m128d __W, __mmask8 __U,__m128d __A, __m128d __B) {
- return (__m128d) __builtin_ia32_subsd_mask ((__v2df) __A,
+ return (__m128d) __builtin_ia32_subsd_round ((__v2df) __A,
(__v2df) __B,
(__v2df) __W,
(__mmask8) __U,
static __inline__ __m128d __DEFAULT_FN_ATTRS
_mm_maskz_sub_sd(__mmask8 __U,__m128d __A, __m128d __B) {
- return (__m128d) __builtin_ia32_subsd_mask ((__v2df) __A,
+ return (__m128d) __builtin_ia32_subsd_round ((__v2df) __A,
(__v2df) __B,
(__v2df) _mm_setzero_pd (),
(__mmask8) __U,
}
#define _mm_sub_round_sd(__A, __B, __R) __extension__ ({ \
- (__m128d) __builtin_ia32_subsd_mask ((__v2df) __A, (__v2df) __B, \
+ (__m128d) __builtin_ia32_subsd_round ((__v2df) __A, (__v2df) __B, \
(__v2df) _mm_setzero_pd(), (__mmask8) -1, __R); })
#define _mm_mask_sub_round_sd(__W, __U, __A, __B, __R) __extension__ ({ \
- (__m128d) __builtin_ia32_subsd_mask ((__v2df) __A, (__v2df) __B, \
+ (__m128d) __builtin_ia32_subsd_round ((__v2df) __A, (__v2df) __B, \
(__v2df) __W, (__mmask8) __U,__R); })
#define _mm_maskz_sub_round_sd(__U, __A, __B, __R) __extension__ ({ \
- (__m128d) __builtin_ia32_subsd_mask ((__v2df) __A, (__v2df) __B, \
+ (__m128d) __builtin_ia32_subsd_round ((__v2df) __A, (__v2df) __B, \
(__v2df) _mm_setzero_pd(), (__mmask8) __U,__R); })
static __inline__ __m512d __DEFAULT_FN_ATTRS
static __inline__ __m128 __DEFAULT_FN_ATTRS
_mm_mask_mul_ss(__m128 __W, __mmask8 __U,__m128 __A, __m128 __B) {
- return (__m128) __builtin_ia32_mulss_mask ((__v4sf) __A,
+ return (__m128) __builtin_ia32_mulss_round ((__v4sf) __A,
(__v4sf) __B,
(__v4sf) __W,
(__mmask8) __U,
static __inline__ __m128 __DEFAULT_FN_ATTRS
_mm_maskz_mul_ss(__mmask8 __U,__m128 __A, __m128 __B) {
- return (__m128) __builtin_ia32_mulss_mask ((__v4sf) __A,
+ return (__m128) __builtin_ia32_mulss_round ((__v4sf) __A,
(__v4sf) __B,
(__v4sf) _mm_setzero_ps (),
(__mmask8) __U,
_MM_FROUND_CUR_DIRECTION);
}
#define _mm_mul_round_ss(__A, __B, __R) __extension__ ({ \
- (__m128) __builtin_ia32_mulss_mask ((__v4sf) __A, (__v4sf) __B, \
+ (__m128) __builtin_ia32_mulss_round ((__v4sf) __A, (__v4sf) __B, \
(__v4sf) _mm_setzero_ps(), (__mmask8) -1, __R); })
#define _mm_mask_mul_round_ss(__W, __U, __A, __B, __R) __extension__ ({ \
- (__m128) __builtin_ia32_mulss_mask ((__v4sf) __A, (__v4sf) __B, \
+ (__m128) __builtin_ia32_mulss_round ((__v4sf) __A, (__v4sf) __B, \
(__v4sf) __W, (__mmask8) __U,__R); })
#define _mm_maskz_mul_round_ss(__U, __A, __B, __R) __extension__ ({ \
- (__m128) __builtin_ia32_mulss_mask ((__v4sf) __A, (__v4sf) __B, \
+ (__m128) __builtin_ia32_mulss_round ((__v4sf) __A, (__v4sf) __B, \
(__v4sf) _mm_setzero_ps(), (__mmask8) __U,__R); })
static __inline__ __m128d __DEFAULT_FN_ATTRS
_mm_mask_mul_sd(__m128d __W, __mmask8 __U,__m128d __A, __m128d __B) {
- return (__m128d) __builtin_ia32_mulsd_mask ((__v2df) __A,
+ return (__m128d) __builtin_ia32_mulsd_round ((__v2df) __A,
(__v2df) __B,
(__v2df) __W,
(__mmask8) __U,
static __inline__ __m128d __DEFAULT_FN_ATTRS
_mm_maskz_mul_sd(__mmask8 __U,__m128d __A, __m128d __B) {
- return (__m128d) __builtin_ia32_mulsd_mask ((__v2df) __A,
+ return (__m128d) __builtin_ia32_mulsd_round ((__v2df) __A,
(__v2df) __B,
(__v2df) _mm_setzero_pd (),
(__mmask8) __U,
}
#define _mm_mul_round_sd(__A, __B, __R) __extension__ ({ \
- (__m128d) __builtin_ia32_mulsd_mask ((__v2df) __A, (__v2df) __B, \
+ (__m128d) __builtin_ia32_mulsd_round ((__v2df) __A, (__v2df) __B, \
(__v2df) _mm_setzero_pd(), (__mmask8) -1, __R); })
#define _mm_mask_mul_round_sd(__W, __U, __A, __B, __R) __extension__ ({ \
- (__m128d) __builtin_ia32_mulsd_mask ((__v2df) __A, (__v2df) __B, \
+ (__m128d) __builtin_ia32_mulsd_round ((__v2df) __A, (__v2df) __B, \
(__v2df) __W, (__mmask8) __U,__R); })
#define _mm_maskz_mul_round_sd(__U, __A, __B, __R) __extension__ ({ \
- (__m128d) __builtin_ia32_mulsd_mask ((__v2df) __A, (__v2df) __B, \
+ (__m128d) __builtin_ia32_mulsd_round ((__v2df) __A, (__v2df) __B, \
(__v2df) _mm_setzero_pd(), (__mmask8) __U,__R); })
static __inline__ __m512d __DEFAULT_FN_ATTRS
static __inline__ __m128 __DEFAULT_FN_ATTRS
_mm_mask_div_ss(__m128 __W, __mmask8 __U,__m128 __A, __m128 __B) {
- return (__m128) __builtin_ia32_divss_mask ((__v4sf) __A,
+ return (__m128) __builtin_ia32_divss_round ((__v4sf) __A,
(__v4sf) __B,
(__v4sf) __W,
(__mmask8) __U,
static __inline__ __m128 __DEFAULT_FN_ATTRS
_mm_maskz_div_ss(__mmask8 __U,__m128 __A, __m128 __B) {
- return (__m128) __builtin_ia32_divss_mask ((__v4sf) __A,
+ return (__m128) __builtin_ia32_divss_round ((__v4sf) __A,
(__v4sf) __B,
(__v4sf) _mm_setzero_ps (),
(__mmask8) __U,
}
#define _mm_div_round_ss(__A, __B, __R) __extension__ ({ \
- (__m128) __builtin_ia32_divss_mask ((__v4sf) __A, (__v4sf) __B, \
+ (__m128) __builtin_ia32_divss_round ((__v4sf) __A, (__v4sf) __B, \
(__v4sf) _mm_setzero_ps(), (__mmask8) -1, __R); })
#define _mm_mask_div_round_ss(__W, __U, __A, __B, __R) __extension__ ({ \
- (__m128) __builtin_ia32_divss_mask ((__v4sf) __A, (__v4sf) __B, \
+ (__m128) __builtin_ia32_divss_round ((__v4sf) __A, (__v4sf) __B, \
(__v4sf) __W, (__mmask8) __U,__R); })
#define _mm_maskz_div_round_ss(__U, __A, __B, __R) __extension__ ({ \
- (__m128) __builtin_ia32_divss_mask ((__v4sf) __A, (__v4sf) __B, \
+ (__m128) __builtin_ia32_divss_round ((__v4sf) __A, (__v4sf) __B, \
(__v4sf) _mm_setzero_ps(), (__mmask8) __U,__R); })
static __inline__ __m128d __DEFAULT_FN_ATTRS
_mm_mask_div_sd(__m128d __W, __mmask8 __U,__m128d __A, __m128d __B) {
- return (__m128d) __builtin_ia32_divsd_mask ((__v2df) __A,
+ return (__m128d) __builtin_ia32_divsd_round ((__v2df) __A,
(__v2df) __B,
(__v2df) __W,
(__mmask8) __U,
static __inline__ __m128d __DEFAULT_FN_ATTRS
_mm_maskz_div_sd(__mmask8 __U,__m128d __A, __m128d __B) {
- return (__m128d) __builtin_ia32_divsd_mask ((__v2df) __A,
+ return (__m128d) __builtin_ia32_divsd_round ((__v2df) __A,
(__v2df) __B,
(__v2df) _mm_setzero_pd (),
(__mmask8) __U,
}
#define _mm_div_round_sd(__A, __B, __R) __extension__ ({ \
- (__m128d) __builtin_ia32_divsd_mask ((__v2df) __A, (__v2df) __B, \
+ (__m128d) __builtin_ia32_divsd_round ((__v2df) __A, (__v2df) __B, \
(__v2df) _mm_setzero_pd(), (__mmask8) -1, __R); })
#define _mm_mask_div_round_sd(__W, __U, __A, __B, __R) __extension__ ({ \
- (__m128d) __builtin_ia32_divsd_mask ((__v2df) __A, (__v2df) __B, \
+ (__m128d) __builtin_ia32_divsd_round ((__v2df) __A, (__v2df) __B, \
(__v2df) __W, (__mmask8) __U,__R); })
#define _mm_maskz_div_round_sd(__U, __A, __B, __R) __extension__ ({ \
- (__m128d) __builtin_ia32_divsd_mask ((__v2df) __A, (__v2df) __B, \
+ (__m128d) __builtin_ia32_divsd_round ((__v2df) __A, (__v2df) __B, \
(__v2df) _mm_setzero_pd(), (__mmask8) __U,__R); })
static __inline__ __m512d __DEFAULT_FN_ATTRS