TARGET_BUILTIN(__builtin_ia32_stmxcsr, "Ui", "", "sse")
TARGET_BUILTIN(__builtin_ia32_cvtss2si, "iV4f", "", "sse")
TARGET_BUILTIN(__builtin_ia32_cvtss2si64, "LLiV4f", "", "sse")
-TARGET_BUILTIN(__builtin_ia32_storeups, "vf*V4f", "", "sse")
TARGET_BUILTIN(__builtin_ia32_storehps, "vV2i*V4f", "", "sse")
TARGET_BUILTIN(__builtin_ia32_storelps, "vV2i*V4f", "", "sse")
TARGET_BUILTIN(__builtin_ia32_movmskps, "iV4f", "", "sse")
TARGET_BUILTIN(__builtin_ia32_sqrtss, "V4fV4f", "", "sse")
TARGET_BUILTIN(__builtin_ia32_maskmovdqu, "vV16cV16cc*", "", "sse2")
-TARGET_BUILTIN(__builtin_ia32_storeupd, "vd*V2d", "", "sse2")
TARGET_BUILTIN(__builtin_ia32_movmskpd, "iV2d", "", "sse2")
TARGET_BUILTIN(__builtin_ia32_pmovmskb128, "iV16c", "", "sse2")
TARGET_BUILTIN(__builtin_ia32_movnti, "vi*i", "", "sse2")
TARGET_BUILTIN(__builtin_ia32_lfence, "v", "", "sse2")
TARGET_BUILTIN(__builtin_ia32_mfence, "v", "", "sse2")
TARGET_BUILTIN(__builtin_ia32_pause, "v", "", "sse2")
-TARGET_BUILTIN(__builtin_ia32_storedqu, "vc*V16c", "", "sse2")
TARGET_BUILTIN(__builtin_ia32_pmuludq128, "V2LLiV4iV4i", "", "sse2")
TARGET_BUILTIN(__builtin_ia32_psraw128, "V8sV8sV8s", "", "sse2")
TARGET_BUILTIN(__builtin_ia32_psrad128, "V4iV4iV4i", "", "sse2")
TARGET_BUILTIN(__builtin_ia32_vzeroupper, "v", "", "avx")
TARGET_BUILTIN(__builtin_ia32_vbroadcastf128_pd256, "V4dV2dC*", "", "avx")
TARGET_BUILTIN(__builtin_ia32_vbroadcastf128_ps256, "V8fV4fC*", "", "avx")
-TARGET_BUILTIN(__builtin_ia32_storeupd256, "vd*V4d", "", "avx")
-TARGET_BUILTIN(__builtin_ia32_storeups256, "vf*V8f", "", "avx")
-TARGET_BUILTIN(__builtin_ia32_storedqu256, "vc*V32c", "", "avx")
TARGET_BUILTIN(__builtin_ia32_lddqu256, "V32ccC*", "", "avx")
TARGET_BUILTIN(__builtin_ia32_movntdq256, "vV4LLi*V4LLi", "", "avx")
TARGET_BUILTIN(__builtin_ia32_movntpd256, "vd*V4d", "", "avx")
static __inline void __DEFAULT_FN_ATTRS
_mm256_storeu_pd(double *__p, __m256d __a)
{
- __builtin_ia32_storeupd256(__p, (__v4df)__a);
+ struct __storeu_pd {
+ __m256d __v;
+ } __attribute__((__packed__, __may_alias__));
+ ((struct __storeu_pd*)__p)->__v = __a;
}
static __inline void __DEFAULT_FN_ATTRS
_mm256_storeu_ps(float *__p, __m256 __a)
{
- __builtin_ia32_storeups256(__p, (__v8sf)__a);
+ struct __storeu_ps {
+ __m256 __v;
+ } __attribute__((__packed__, __may_alias__));
+ ((struct __storeu_ps*)__p)->__v = __a;
}
static __inline void __DEFAULT_FN_ATTRS
static __inline void __DEFAULT_FN_ATTRS
_mm256_storeu_si256(__m256i *__p, __m256i __a)
{
- __builtin_ia32_storedqu256((char *)__p, (__v32qi)__a);
+ struct __storeu_si256 {
+ __m256i __v;
+ } __attribute__((__packed__, __may_alias__));
+ ((struct __storeu_si256*)__p)->__v = __a;
}
/* Conditional load ops */
__m128 __v128;
__v128 = _mm256_castps256_ps128(__a);
- __builtin_ia32_storeups(__addr_lo, __v128);
+ _mm_storeu_ps(__addr_lo, __v128);
__v128 = _mm256_extractf128_ps(__a, 1);
- __builtin_ia32_storeups(__addr_hi, __v128);
+ _mm_storeu_ps(__addr_hi, __v128);
}
static __inline void __DEFAULT_FN_ATTRS
__m128d __v128;
__v128 = _mm256_castpd256_pd128(__a);
- __builtin_ia32_storeupd(__addr_lo, __v128);
+ _mm_storeu_pd(__addr_lo, __v128);
__v128 = _mm256_extractf128_pd(__a, 1);
- __builtin_ia32_storeupd(__addr_hi, __v128);
+ _mm_storeu_pd(__addr_hi, __v128);
}
static __inline void __DEFAULT_FN_ATTRS
__m128i __v128;
__v128 = _mm256_castsi256_si128(__a);
- __builtin_ia32_storedqu((char *)__addr_lo, (__v16qi)__v128);
+ _mm_storeu_si128(__addr_lo, __v128);
__v128 = _mm256_extractf128_si256(__a, 1);
- __builtin_ia32_storedqu((char *)__addr_hi, (__v16qi)__v128);
+ _mm_storeu_si128(__addr_hi, __v128);
}
static __inline __m256 __DEFAULT_FN_ATTRS
static __inline__ void __DEFAULT_FN_ATTRS
_mm_storeu_pd(double *__dp, __m128d __a)
{
- __builtin_ia32_storeupd(__dp, (__v2df)__a);
+ struct __storeu_pd {
+ __m128d __v;
+ } __attribute__((__packed__, __may_alias__));
+ ((struct __storeu_pd*)__dp)->__v = __a;
}
static __inline__ void __DEFAULT_FN_ATTRS
static __inline__ void __DEFAULT_FN_ATTRS
_mm_storeu_si128(__m128i *__p, __m128i __b)
{
- __builtin_ia32_storedqu((char *)__p, (__v16qi)__b);
+ struct __storeu_si128 {
+ __m128i __v;
+ } __attribute__((__packed__, __may_alias__));
+ ((struct __storeu_si128*)__p)->__v = __b;
}
static __inline__ void __DEFAULT_FN_ATTRS
static __inline__ void __DEFAULT_FN_ATTRS
_mm_storeu_ps(float *__p, __m128 __a)
{
- __builtin_ia32_storeups(__p, (__v4sf)__a);
+ struct __storeu_ps {
+ __m128 __v;
+ } __attribute__((__packed__, __may_alias__));
+ ((struct __storeu_ps*)__p)->__v = __a;
}
static __inline__ void __DEFAULT_FN_ATTRS
void test_mm256_storeu_pd(double* A, __m256d B) {
// CHECK-LABEL: test_mm256_storeu_pd
- // CHECK: call void @llvm.x86.avx.storeu.pd.256(i8* %{{.*}}, <4 x double> %{{.*}})
+ // CHECK: store <4 x double> %{{.*}}, <4 x double>* %{{.*}}, align 1{{$}}
+ // CHECK-NEXT: ret void
_mm256_storeu_pd(A, B);
}
void test_mm256_storeu_ps(float* A, __m256 B) {
// CHECK-LABEL: test_mm256_storeu_ps
- // CHECK: call void @llvm.x86.avx.storeu.ps.256(i8* %{{.*}}, <8 x float> %{{.*}})
+ // CHECK: store <8 x float> %{{.*}}, <8 x float>* %{{.*}}, align 1{{$}}
+ // CHECk-NEXT: ret void
_mm256_storeu_ps(A, B);
}
void test_mm256_storeu_si256(__m256i* A, __m256i B) {
// CHECK-LABEL: test_mm256_storeu_si256
- // CHECK: call void @llvm.x86.avx.storeu.dq.256(i8* %{{.*}}, <32 x i8> %{{.*}})
+ // CHECK: store <4 x i64> %{{.*}}, <4 x i64>* %{{.*}}, align 1{{$}}
+ // CHECk-NEXT: ret void
_mm256_storeu_si256(A, B);
}
void test_mm256_storeu2_m128(float* A, float* B, __m256 C) {
// CHECK-LABEL: test_mm256_storeu2_m128
// CHECK: shufflevector <8 x float> %{{.*}}, <8 x float> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
- // CHECK: call void @llvm.x86.sse.storeu.ps(i8* %{{.*}}, <4 x float> %{{.*}})
+ // CHECK: store <4 x float> %{{.*}}, <4 x float>* %{{.*}}, align 1{{$}}
// CHECK: shufflevector <8 x float> %{{.*}}, <8 x float> %{{.*}}, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
- // CHECK: call void @llvm.x86.sse.storeu.ps(i8* %{{.*}}, <4 x float> %{{.*}})
+ // CHECK: store <4 x float> %{{.*}}, <4 x float>* %{{.*}}, align 1{{$}}
_mm256_storeu2_m128(A, B, C);
}
void test_mm256_storeu2_m128d(double* A, double* B, __m256d C) {
// CHECK-LABEL: test_mm256_storeu2_m128d
// CHECK: shufflevector <4 x double> %{{.*}}, <4 x double> %{{.*}}, <2 x i32> <i32 0, i32 1>
- // CHECK: call void @llvm.x86.sse2.storeu.pd(i8* %{{.*}}, <2 x double> %{{.*}})
+ // CHECK: store <2 x double> %{{.*}}, <2 x double>* %{{.*}}, align 1{{$}}
// CHECK: shufflevector <4 x double> %{{.*}}, <4 x double> %{{.*}}, <2 x i32> <i32 2, i32 3>
- // CHECK: call void @llvm.x86.sse2.storeu.pd(i8* %{{.*}}, <2 x double> %{{.*}})
+ // CHECK: store <2 x double> %{{.*}}, <2 x double>* %{{.*}}, align 1{{$}}
_mm256_storeu2_m128d(A, B, C);
}
void test_mm256_storeu2_m128i(__m128i* A, __m128i* B, __m256i C) {
// CHECK-LABEL: test_mm256_storeu2_m128i
// CHECK: shufflevector <4 x i64> %{{.*}}, <4 x i64> %{{.*}}, <2 x i32> <i32 0, i32 1>
- // CHECK: call void @llvm.x86.sse2.storeu.dq(i8* %{{.*}}, <16 x i8> %{{.*}})
+ // CHECK: store <2 x i64> %{{.*}}, <2 x i64>* %{{.*}}, align 1{{$}}
// CHECK: shufflevector <4 x i64> %{{.*}}, <4 x i64> %{{.*}}, <2 x i32> <i32 2, i32 3>
- // CHECK: call void @llvm.x86.sse2.storeu.dq(i8* %{{.*}}, <16 x i8> %{{.*}})
+ // CHECK: store <2 x i64> %{{.*}}, <2 x i64>* %{{.*}}, align 1{{$}}
_mm256_storeu2_m128i(A, B, C);
}
#endif
tmp_V2i = __builtin_ia32_cvttps2pi(tmp_V4f);
(void) __builtin_ia32_maskmovq(tmp_V8c, tmp_V8c, tmp_cp);
- (void) __builtin_ia32_storeups(tmp_fp, tmp_V4f);
(void) __builtin_ia32_storehps(tmp_V2ip, tmp_V4f);
(void) __builtin_ia32_storelps(tmp_V2ip, tmp_V4f);
tmp_i = __builtin_ia32_movmskps(tmp_V4f);
tmp_V4f = __builtin_ia32_sqrtps(tmp_V4f);
tmp_V4f = __builtin_ia32_sqrtss(tmp_V4f);
(void) __builtin_ia32_maskmovdqu(tmp_V16c, tmp_V16c, tmp_cp);
- (void) __builtin_ia32_storeupd(tmp_dp, tmp_V2d);
tmp_i = __builtin_ia32_movmskpd(tmp_V2d);
tmp_i = __builtin_ia32_pmovmskb128(tmp_V16c);
(void) __builtin_ia32_movnti(tmp_ip, tmp_i);
(void) __builtin_ia32_clflush(tmp_vCp);
(void) __builtin_ia32_lfence();
(void) __builtin_ia32_mfence();
- (void) __builtin_ia32_storedqu(tmp_cp, tmp_V16c);
tmp_V4s = __builtin_ia32_psllwi(tmp_V4s, tmp_i);
tmp_V2i = __builtin_ia32_pslldi(tmp_V2i, tmp_i);
tmp_V1LLi = __builtin_ia32_psllqi(tmp_V1LLi, tmp_i);
__builtin_ia32_vzeroupper();
tmp_V4d = __builtin_ia32_vbroadcastf128_pd256(tmp_V2dCp);
tmp_V8f = __builtin_ia32_vbroadcastf128_ps256(tmp_V4fCp);
- __builtin_ia32_storeupd256(tmp_dp, tmp_V4d);
- __builtin_ia32_storeups256(tmp_fp, tmp_V8f);
- __builtin_ia32_storedqu256(tmp_cp, tmp_V32c);
tmp_V32c = __builtin_ia32_lddqu256(tmp_cCp);
__builtin_ia32_movntdq256(tmp_V4LLip, tmp_V4LLi);
__builtin_ia32_movntpd256(tmp_dp, tmp_V4d);
void test_mm_store_ps1(float* x, __m128 y) {
// CHECK-LABEL: test_mm_store_ps1
// CHECK: shufflevector <4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x i32> zeroinitializer
- // CHECK: call void @llvm.x86.sse.storeu.ps(i8* %{{.*}}, <4 x float> %{{.*}})
+ // CHECK: store <4 x float> %{{.*}}, <4 x float>* %{{.*}}, align 1{{$}}
+ // CHECK-NEXT: ret void
_mm_store_ps1(x, y);
}
void test_mm_store1_ps(float* x, __m128 y) {
// CHECK-LABEL: test_mm_store1_ps
// CHECK: shufflevector <4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x i32> zeroinitializer
- // CHECK: call void @llvm.x86.sse.storeu.ps(i8* %{{.*}}, <4 x float> %{{.*}})
+ // CHECK: store <4 x float> %{{.*}}, <4 x float>* %{{.*}}, align 1{{$}}
+ // CHECK-NEXT: ret void
_mm_store1_ps(x, y);
}
void test_mm_storeu_ps(float* x, __m128 y) {
// CHECK-LABEL: test_mm_storeu_ps
- // CHECK: call void @llvm.x86.sse.storeu.ps(i8* %{{.*}}, <4 x float> %{{.*}})
+ // CHECK: store <4 x float> %{{.*}}, <4 x float>* %{{.*}}, align 1{{$}}
+ // CHECK-NEXT: ret void
_mm_storeu_ps(x, y);
}
void test_mm_storeu_pd(double* A, __m128d B) {
// CHECK-LABEL: test_mm_storeu_pd
- // CHECK: call void @llvm.x86.sse2.storeu.pd(i8* %{{.*}}, <2 x double> %{{.*}})
+ // CHECK: store {{.*}} <2 x double>* {{.*}}, align 1{{$}}
+ // CHECK-NEXT: ret void
_mm_storeu_pd(A, B);
}
void test_mm_storeu_si128(__m128i* A, __m128i B) {
// CHECK-LABEL: test_mm_storeu_si128
- // CHECK: call void @llvm.x86.sse2.storeu.dq(i8* %{{.*}}, <16 x i8> %{{.*}})
+ // CHECK: store <2 x i64> %{{.*}}, <2 x i64>* %{{.*}}, align 1{{$}}
+ // CHECK-NEXT: ret void
_mm_storeu_si128(A, B);
}