TARGET_BUILTIN(__builtin_ia32_pternlogq128_maskz, "V2LLiV2LLiV2LLiV2LLiIiUc", "nc", "avx512vl")
TARGET_BUILTIN(__builtin_ia32_pternlogq256_mask, "V4LLiV4LLiV4LLiV4LLiIiUc", "nc", "avx512vl")
TARGET_BUILTIN(__builtin_ia32_pternlogq256_maskz, "V4LLiV4LLiV4LLiV4LLiIiUc", "nc", "avx512vl")
+TARGET_BUILTIN(__builtin_ia32_shuf_f32x4, "V16fV16fV16fIi", "nc", "avx512f")
+TARGET_BUILTIN(__builtin_ia32_shuf_f64x2, "V8dV8dV8dIi", "nc", "avx512f")
+TARGET_BUILTIN(__builtin_ia32_shuf_i32x4, "V16iV16iV16iIi", "nc", "avx512f")
+TARGET_BUILTIN(__builtin_ia32_shuf_i64x2, "V8LLiV8LLiV8LLiIi", "nc", "avx512f")
+TARGET_BUILTIN(__builtin_ia32_shuf_f32x4_256, "V8fV8fV8fIi", "nc", "avx512vl")
+TARGET_BUILTIN(__builtin_ia32_shuf_f64x2_256, "V4dV4dV4dIi", "nc", "avx512vl")
+TARGET_BUILTIN(__builtin_ia32_shuf_i32x4_256, "V8iV8iV8iIi", "nc", "avx512vl")
+TARGET_BUILTIN(__builtin_ia32_shuf_i64x2_256, "V4LLiV4LLiV4LLiIi", "nc", "avx512vl")
TARGET_BUILTIN(__builtin_ia32_sqrtsd_round_mask, "V2dV2dV2dV2dUcIi", "nc", "avx512f")
TARGET_BUILTIN(__builtin_ia32_sqrtss_round_mask, "V4fV4fV4fV4fUcIi", "nc", "avx512f")
TARGET_BUILTIN(__builtin_ia32_rsqrt14pd128_mask, "V2dV2dV2dUc", "nc", "avx512vl")
makeArrayRef(Indices, NumElts),
"valign");
}
+ case X86::BI__builtin_ia32_shuf_f32x4_256:
+ case X86::BI__builtin_ia32_shuf_f64x2_256:
+ case X86::BI__builtin_ia32_shuf_i32x4_256:
+ case X86::BI__builtin_ia32_shuf_i64x2_256:
+ case X86::BI__builtin_ia32_shuf_f32x4:
+ case X86::BI__builtin_ia32_shuf_f64x2:
+ case X86::BI__builtin_ia32_shuf_i32x4:
+ case X86::BI__builtin_ia32_shuf_i64x2: {
+ unsigned Imm = cast<llvm::ConstantInt>(Ops[2])->getZExtValue();
+ llvm::Type *Ty = Ops[0]->getType();
+ unsigned NumElts = Ty->getVectorNumElements();
+ unsigned NumLanes = Ty->getPrimitiveSizeInBits() == 512 ? 4 : 2;
+ unsigned NumLaneElts = NumElts / NumLanes;
+
+ uint32_t Indices[16];
+ for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
+ unsigned Index = (Imm % NumLanes) * NumLaneElts;
+ Imm /= NumLanes; // Discard the bits we just used.
+ if (l >= (NumElts / 2))
+ Index += NumElts; // Switch to other source.
+ for (unsigned i = 0; i != NumLaneElts; ++i) {
+ Indices[l + i] = Index + i;
+ }
+ }
+
+ return Builder.CreateShuffleVector(Ops[0], Ops[1],
+ makeArrayRef(Indices, NumElts),
+ "shuf");
+ }
case X86::BI__builtin_ia32_vperm2f128_pd256:
case X86::BI__builtin_ia32_vperm2f128_ps256:
}
#define _mm512_shuffle_f32x4(A, B, imm) \
- (__m512)__builtin_shufflevector((__v16sf)(__m512)(A), \
- (__v16sf)(__m512)(B), \
- 0 + ((((imm) >> 0) & 0x3) * 4), \
- 1 + ((((imm) >> 0) & 0x3) * 4), \
- 2 + ((((imm) >> 0) & 0x3) * 4), \
- 3 + ((((imm) >> 0) & 0x3) * 4), \
- 0 + ((((imm) >> 2) & 0x3) * 4), \
- 1 + ((((imm) >> 2) & 0x3) * 4), \
- 2 + ((((imm) >> 2) & 0x3) * 4), \
- 3 + ((((imm) >> 2) & 0x3) * 4), \
- 16 + ((((imm) >> 4) & 0x3) * 4), \
- 17 + ((((imm) >> 4) & 0x3) * 4), \
- 18 + ((((imm) >> 4) & 0x3) * 4), \
- 19 + ((((imm) >> 4) & 0x3) * 4), \
- 16 + ((((imm) >> 6) & 0x3) * 4), \
- 17 + ((((imm) >> 6) & 0x3) * 4), \
- 18 + ((((imm) >> 6) & 0x3) * 4), \
- 19 + ((((imm) >> 6) & 0x3) * 4))
+ (__m512)__builtin_ia32_shuf_f32x4((__v16sf)(__m512)(A), \
+ (__v16sf)(__m512)(B), (int)(imm))
#define _mm512_mask_shuffle_f32x4(W, U, A, B, imm) \
(__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
(__v16sf)_mm512_setzero_ps())
#define _mm512_shuffle_f64x2(A, B, imm) \
- (__m512d)__builtin_shufflevector((__v8df)(__m512d)(A), \
- (__v8df)(__m512d)(B), \
- 0 + ((((imm) >> 0) & 0x3) * 2), \
- 1 + ((((imm) >> 0) & 0x3) * 2), \
- 0 + ((((imm) >> 2) & 0x3) * 2), \
- 1 + ((((imm) >> 2) & 0x3) * 2), \
- 8 + ((((imm) >> 4) & 0x3) * 2), \
- 9 + ((((imm) >> 4) & 0x3) * 2), \
- 8 + ((((imm) >> 6) & 0x3) * 2), \
- 9 + ((((imm) >> 6) & 0x3) * 2))
+ (__m512d)__builtin_ia32_shuf_f64x2((__v8df)(__m512d)(A), \
+ (__v8df)(__m512d)(B), (int)(imm))
#define _mm512_mask_shuffle_f64x2(W, U, A, B, imm) \
(__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
(__v8df)_mm512_setzero_pd())
#define _mm512_shuffle_i32x4(A, B, imm) \
- (__m512i)__builtin_shufflevector((__v8di)(__m512i)(A), \
- (__v8di)(__m512i)(B), \
- 0 + ((((imm) >> 0) & 0x3) * 2), \
- 1 + ((((imm) >> 0) & 0x3) * 2), \
- 0 + ((((imm) >> 2) & 0x3) * 2), \
- 1 + ((((imm) >> 2) & 0x3) * 2), \
- 8 + ((((imm) >> 4) & 0x3) * 2), \
- 9 + ((((imm) >> 4) & 0x3) * 2), \
- 8 + ((((imm) >> 6) & 0x3) * 2), \
- 9 + ((((imm) >> 6) & 0x3) * 2))
+ (__m512i)__builtin_ia32_shuf_i32x4((__v16si)(__m512i)(A), \
+ (__v16si)(__m512i)(B), (int)(imm))
#define _mm512_mask_shuffle_i32x4(W, U, A, B, imm) \
(__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
(__v16si)_mm512_setzero_si512())
#define _mm512_shuffle_i64x2(A, B, imm) \
- (__m512i)__builtin_shufflevector((__v8di)(__m512i)(A), \
- (__v8di)(__m512i)(B), \
- 0 + ((((imm) >> 0) & 0x3) * 2), \
- 1 + ((((imm) >> 0) & 0x3) * 2), \
- 0 + ((((imm) >> 2) & 0x3) * 2), \
- 1 + ((((imm) >> 2) & 0x3) * 2), \
- 8 + ((((imm) >> 4) & 0x3) * 2), \
- 9 + ((((imm) >> 4) & 0x3) * 2), \
- 8 + ((((imm) >> 6) & 0x3) * 2), \
- 9 + ((((imm) >> 6) & 0x3) * 2))
+ (__m512i)__builtin_ia32_shuf_i64x2((__v8di)(__m512i)(A), \
+ (__v8di)(__m512i)(B), (int)(imm))
#define _mm512_mask_shuffle_i64x2(W, U, A, B, imm) \
(__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
#define _mm256_shuffle_f32x4(A, B, imm) \
- (__m256)__builtin_shufflevector((__v8sf)(__m256)(A), \
- (__v8sf)(__m256)(B), \
- 0 + ((((imm) >> 0) & 0x1) * 4), \
- 1 + ((((imm) >> 0) & 0x1) * 4), \
- 2 + ((((imm) >> 0) & 0x1) * 4), \
- 3 + ((((imm) >> 0) & 0x1) * 4), \
- 8 + ((((imm) >> 1) & 0x1) * 4), \
- 9 + ((((imm) >> 1) & 0x1) * 4), \
- 10 + ((((imm) >> 1) & 0x1) * 4), \
- 11 + ((((imm) >> 1) & 0x1) * 4))
+ (__m256)__builtin_ia32_shuf_f32x4_256((__v8sf)(__m256)(A), \
+ (__v8sf)(__m256)(B), (int)(imm))
#define _mm256_mask_shuffle_f32x4(W, U, A, B, imm) \
(__m256)__builtin_ia32_selectps_256((__mmask8)(U), \
(__v8sf)_mm256_setzero_ps())
#define _mm256_shuffle_f64x2(A, B, imm) \
- (__m256d)__builtin_shufflevector((__v4df)(__m256d)(A), \
- (__v4df)(__m256d)(B), \
- 0 + ((((imm) >> 0) & 0x1) * 2), \
- 1 + ((((imm) >> 0) & 0x1) * 2), \
- 4 + ((((imm) >> 1) & 0x1) * 2), \
- 5 + ((((imm) >> 1) & 0x1) * 2))
+ (__m256d)__builtin_ia32_shuf_f64x2_256((__v4df)(__m256d)(A), \
+ (__v4df)(__m256d)(B), (int)(imm))
#define _mm256_mask_shuffle_f64x2(W, U, A, B, imm) \
(__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \
(__v4df)_mm256_setzero_pd())
#define _mm256_shuffle_i32x4(A, B, imm) \
- (__m256i)__builtin_shufflevector((__v4di)(__m256i)(A), \
- (__v4di)(__m256i)(B), \
- 0 + ((((imm) >> 0) & 0x1) * 2), \
- 1 + ((((imm) >> 0) & 0x1) * 2), \
- 4 + ((((imm) >> 1) & 0x1) * 2), \
- 5 + ((((imm) >> 1) & 0x1) * 2))
+ (__m256i)__builtin_ia32_shuf_i32x4_256((__v8si)(__m256i)(A), \
+ (__v8si)(__m256i)(B), (int)(imm))
#define _mm256_mask_shuffle_i32x4(W, U, A, B, imm) \
(__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \
(__v8si)_mm256_setzero_si256())
#define _mm256_shuffle_i64x2(A, B, imm) \
- (__m256i)__builtin_shufflevector((__v4di)(__m256i)(A), \
- (__v4di)(__m256i)(B), \
- 0 + ((((imm) >> 0) & 0x1) * 2), \
- 1 + ((((imm) >> 0) & 0x1) * 2), \
- 4 + ((((imm) >> 1) & 0x1) * 2), \
- 5 + ((((imm) >> 1) & 0x1) * 2))
+ (__m256i)__builtin_ia32_shuf_i64x2_256((__v4di)(__m256i)(A), \
+ (__v4di)(__m256i)(B), (int)(imm))
#define _mm256_mask_shuffle_i64x2(W, U, A, B, imm) \
(__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \
case X86::BI__builtin_ia32_vec_set_v4hi:
case X86::BI__builtin_ia32_vec_set_v4si:
case X86::BI__builtin_ia32_vec_set_v4di:
+ case X86::BI__builtin_ia32_shuf_f32x4_256:
+ case X86::BI__builtin_ia32_shuf_f64x2_256:
+ case X86::BI__builtin_ia32_shuf_i32x4_256:
+ case X86::BI__builtin_ia32_shuf_i64x2_256:
i = 2; l = 0; u = 3;
break;
case X86::BI__builtin_ia32_vpermil2pd:
case X86::BI__builtin_ia32_alignq256:
case X86::BI__builtin_ia32_vcomisd:
case X86::BI__builtin_ia32_vcomiss:
+ case X86::BI__builtin_ia32_shuf_f32x4:
+ case X86::BI__builtin_ia32_shuf_f64x2:
+ case X86::BI__builtin_ia32_shuf_i32x4:
+ case X86::BI__builtin_ia32_shuf_i64x2:
case X86::BI__builtin_ia32_dbpsadbw128_mask:
case X86::BI__builtin_ia32_dbpsadbw256_mask:
case X86::BI__builtin_ia32_dbpsadbw512_mask:
__m512i test_mm512_shuffle_i32x4(__m512i __A, __m512i __B) {
// CHECK-LABEL: @test_mm512_shuffle_i32x4
- // CHECK: shufflevector <8 x i64> %{{.*}}, <8 x i64> %{{.*}}, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 8, i32 9, i32 8, i32 9>
+ // CHECK: shufflevector <16 x i32> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 16, i32 17, i32 18, i32 19, i32 16, i32 17, i32 18, i32 19>
return _mm512_shuffle_i32x4(__A, __B, 4);
}
__m512i test_mm512_mask_shuffle_i32x4(__m512i __W, __mmask16 __U, __m512i __A, __m512i __B) {
// CHECK-LABEL: @test_mm512_mask_shuffle_i32x4
- // CHECK: shufflevector <8 x i64> %{{.*}}, <8 x i64> %{{.*}}, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 8, i32 9, i32 8, i32 9>
+ // CHECK: shufflevector <16 x i32> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 16, i32 17, i32 18, i32 19, i32 16, i32 17, i32 18, i32 19>
// CHECK: select <16 x i1> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> %{{.*}}
return _mm512_mask_shuffle_i32x4(__W, __U, __A, __B, 4);
}
__m512i test_mm512_maskz_shuffle_i32x4(__mmask16 __U, __m512i __A, __m512i __B) {
// CHECK-LABEL: @test_mm512_maskz_shuffle_i32x4
- // CHECK: shufflevector <8 x i64> %{{.*}}, <8 x i64> %{{.*}}, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 8, i32 9, i32 8, i32 9>
+ // CHECK: shufflevector <16 x i32> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 16, i32 17, i32 18, i32 19, i32 16, i32 17, i32 18, i32 19>
// CHECK: select <16 x i1> %{{.*}}, <16 x i32> %{{.*}}, <16 x i32> %{{.*}}
return _mm512_maskz_shuffle_i32x4(__U, __A, __B, 4);
}
__m256i test_mm256_shuffle_i32x4(__m256i __A, __m256i __B) {
// CHECK-LABEL: @test_mm256_shuffle_i32x4
- // CHECK: shufflevector <4 x i64> %{{.*}}, <4 x i64> %{{.*}}, <4 x i32> <i32 2, i32 3, i32 6, i32 7>
+ // CHECK: shufflevector <8 x i32> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 12, i32 13, i32 14, i32 15>
return _mm256_shuffle_i32x4(__A, __B, 3);
}
__m256i test_mm256_mask_shuffle_i32x4(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) {
// CHECK-LABEL: @test_mm256_mask_shuffle_i32x4
- // CHECK: shufflevector <4 x i64> %{{.*}}, <4 x i64> %{{.*}}, <4 x i32> <i32 2, i32 3, i32 6, i32 7>
+ // CHECK: shufflevector <8 x i32> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 12, i32 13, i32 14, i32 15>
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_mask_shuffle_i32x4(__W, __U, __A, __B, 3);
}
__m256i test_mm256_maskz_shuffle_i32x4(__mmask8 __U, __m256i __A, __m256i __B) {
// CHECK-LABEL: @test_mm256_maskz_shuffle_i32x4
- // CHECK: shufflevector <4 x i64> %{{.*}}, <4 x i64> %{{.*}}, <4 x i32> <i32 2, i32 3, i32 6, i32 7>
+ // CHECK: shufflevector <8 x i32> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 12, i32 13, i32 14, i32 15>
// CHECK: select <8 x i1> %{{.*}}, <8 x i32> %{{.*}}, <8 x i32> %{{.*}}
return _mm256_maskz_shuffle_i32x4(__U, __A, __B, 3);
}