Intrinsic<[llvm_v16f32_ty],
[llvm_v16f32_ty, llvm_v16f32_ty, llvm_i32_ty, llvm_v16f32_ty, llvm_i16_ty],
[IntrNoMem]>;
-
- def int_x86_avx512_mask_movshdup_128 :
- GCCBuiltin<"__builtin_ia32_movshdup128_mask">,
- Intrinsic<[llvm_v4f32_ty],
- [llvm_v4f32_ty, llvm_v4f32_ty, llvm_i8_ty],
- [IntrNoMem]>;
-
- def int_x86_avx512_mask_movshdup_256 :
- GCCBuiltin<"__builtin_ia32_movshdup256_mask">,
- Intrinsic<[llvm_v8f32_ty],
- [llvm_v8f32_ty, llvm_v8f32_ty, llvm_i8_ty],
- [IntrNoMem]>;
-
- def int_x86_avx512_mask_movshdup_512 :
- GCCBuiltin<"__builtin_ia32_movshdup512_mask">,
- Intrinsic<[llvm_v16f32_ty],
- [llvm_v16f32_ty, llvm_v16f32_ty, llvm_i16_ty],
- [IntrNoMem]>;
-
- def int_x86_avx512_mask_movsldup_128 :
- GCCBuiltin<"__builtin_ia32_movsldup128_mask">,
- Intrinsic<[llvm_v4f32_ty],
- [llvm_v4f32_ty, llvm_v4f32_ty, llvm_i8_ty],
- [IntrNoMem]>;
-
- def int_x86_avx512_mask_movsldup_256 :
- GCCBuiltin<"__builtin_ia32_movsldup256_mask">,
- Intrinsic<[llvm_v8f32_ty],
- [llvm_v8f32_ty, llvm_v8f32_ty, llvm_i8_ty],
- [IntrNoMem]>;
-
- def int_x86_avx512_mask_movsldup_512 :
- GCCBuiltin<"__builtin_ia32_movsldup512_mask">,
- Intrinsic<[llvm_v16f32_ty],
- [llvm_v16f32_ty, llvm_v16f32_ty, llvm_i16_ty],
- [IntrNoMem]>;
-
- def int_x86_avx512_mask_movddup_128 :
- GCCBuiltin<"__builtin_ia32_movddup128_mask">,
- Intrinsic<[llvm_v2f64_ty],
- [llvm_v2f64_ty, llvm_v2f64_ty, llvm_i8_ty],
- [IntrNoMem]>;
-
- def int_x86_avx512_mask_movddup_256 :
- GCCBuiltin<"__builtin_ia32_movddup256_mask">,
- Intrinsic<[llvm_v4f64_ty],
- [llvm_v4f64_ty, llvm_v4f64_ty, llvm_i8_ty],
- [IntrNoMem]>;
-
- def int_x86_avx512_mask_movddup_512 :
- GCCBuiltin<"__builtin_ia32_movddup512_mask">,
- Intrinsic<[llvm_v8f64_ty],
- [llvm_v8f64_ty, llvm_v8f64_ty, llvm_i8_ty],
- [IntrNoMem]>;
}
// Vector blend
; NOTE: This should use IR equivalent to what is generated by clang/test/CodeGen/avx512f-builtins.c
+define <8 x double> @test_mm512_movddup_pd(<8 x double> %a0) {
+; X32-LABEL: test_mm512_movddup_pd:
+; X32: # BB#0:
+; X32-NEXT: vmovddup {{.*#+}} zmm0 = zmm0[0,0,2,2,4,4,6,6]
+; X32-NEXT: retl
+;
+; X64-LABEL: test_mm512_movddup_pd:
+; X64: # BB#0:
+; X64-NEXT: vmovddup {{.*#+}} zmm0 = zmm0[0,0,2,2,4,4,6,6]
+; X64-NEXT: retq
+ %res = shufflevector <8 x double> %a0, <8 x double> undef, <8 x i32> <i32 0, i32 0, i32 2, i32 2, i32 4, i32 4, i32 6, i32 6>
+ ret <8 x double> %res
+}
+
+define <8 x double> @test_mm512_mask_movddup_pd(<8 x double> %a0, i8 %a1, <8 x double> %a2) {
+; X32-LABEL: test_mm512_mask_movddup_pd:
+; X32: # BB#0:
+; X32-NEXT: movb {{[0-9]+}}(%esp), %al
+; X32-NEXT: kmovw %eax, %k1
+; X32-NEXT: vmovddup {{.*#+}} zmm0 = zmm1[0,0,2,2,4,4,6,6]
+; X32-NEXT: retl
+;
+; X64-LABEL: test_mm512_mask_movddup_pd:
+; X64: # BB#0:
+; X64-NEXT: kmovw %edi, %k1
+; X64-NEXT: vmovddup {{.*#+}} zmm0 = zmm1[0,0,2,2,4,4,6,6]
+; X64-NEXT: retq
+ %arg1 = bitcast i8 %a1 to <8 x i1>
+ %res0 = shufflevector <8 x double> %a2, <8 x double> undef, <8 x i32> <i32 0, i32 0, i32 2, i32 2, i32 4, i32 4, i32 6, i32 6>
+ %res1 = select <8 x i1> %arg1, <8 x double> %res0, <8 x double> %a0
+ ret <8 x double> %res1
+}
+
+define <8 x double> @test_mm512_maskz_movddup_pd(i8 %a0, <8 x double> %a1) {
+; X32-LABEL: test_mm512_maskz_movddup_pd:
+; X32: # BB#0:
+; X32-NEXT: movb {{[0-9]+}}(%esp), %al
+; X32-NEXT: kmovw %eax, %k1
+; X32-NEXT: vmovddup {{.*#+}} zmm0 = zmm0[0,0,2,2,4,4,6,6]
+; X32-NEXT: retl
+;
+; X64-LABEL: test_mm512_maskz_movddup_pd:
+; X64: # BB#0:
+; X64-NEXT: kmovw %edi, %k1
+; X64-NEXT: vmovddup {{.*#+}} zmm0 = zmm0[0,0,2,2,4,4,6,6]
+; X64-NEXT: retq
+ %arg0 = bitcast i8 %a0 to <8 x i1>
+ %res0 = shufflevector <8 x double> %a1, <8 x double> undef, <8 x i32> <i32 0, i32 0, i32 2, i32 2, i32 4, i32 4, i32 6, i32 6>
+ %res1 = select <8 x i1> %arg0, <8 x double> %res0, <8 x double> zeroinitializer
+ ret <8 x double> %res1
+}
+
+define <16 x float> @test_mm512_movehdup_ps(<16 x float> %a0) {
+; X32-LABEL: test_mm512_movehdup_ps:
+; X32: # BB#0:
+; X32-NEXT: vmovshdup {{.*#+}} zmm0 = zmm0[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15]
+; X32-NEXT: retl
+;
+; X64-LABEL: test_mm512_movehdup_ps:
+; X64: # BB#0:
+; X64-NEXT: vmovshdup {{.*#+}} zmm0 = zmm0[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15]
+; X64-NEXT: retq
+ %res = shufflevector <16 x float> %a0, <16 x float> undef, <16 x i32> <i32 1, i32 1, i32 3, i32 3, i32 5, i32 5, i32 7, i32 7, i32 9, i32 9, i32 11, i32 11, i32 13, i32 13, i32 15, i32 15>
+ ret <16 x float> %res
+}
+
+define <16 x float> @test_mm512_mask_movehdup_ps(<16 x float> %a0, i16 %a1, <16 x float> %a2) {
+; X32-LABEL: test_mm512_mask_movehdup_ps:
+; X32: # BB#0:
+; X32-NEXT: movw {{[0-9]+}}(%esp), %ax
+; X32-NEXT: kmovw %eax, %k1
+; X32-NEXT: vmovshdup {{.*#+}} zmm0 = zmm1[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15]
+; X32-NEXT: retl
+;
+; X64-LABEL: test_mm512_mask_movehdup_ps:
+; X64: # BB#0:
+; X64-NEXT: kmovw %edi, %k1
+; X64-NEXT: vmovshdup {{.*#+}} zmm0 = zmm1[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15]
+; X64-NEXT: retq
+ %arg1 = bitcast i16 %a1 to <16 x i1>
+ %res0 = shufflevector <16 x float> %a2, <16 x float> undef, <16 x i32> <i32 1, i32 1, i32 3, i32 3, i32 5, i32 5, i32 7, i32 7, i32 9, i32 9, i32 11, i32 11, i32 13, i32 13, i32 15, i32 15>
+ %res1 = select <16 x i1> %arg1, <16 x float> %res0, <16 x float> %a0
+ ret <16 x float> %res1
+}
+
+define <16 x float> @test_mm512_maskz_movehdup_ps(i16 %a0, <16 x float> %a1) {
+; X32-LABEL: test_mm512_maskz_movehdup_ps:
+; X32: # BB#0:
+; X32-NEXT: movw {{[0-9]+}}(%esp), %ax
+; X32-NEXT: kmovw %eax, %k1
+; X32-NEXT: vmovshdup {{.*#+}} zmm0 = zmm0[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15]
+; X32-NEXT: retl
+;
+; X64-LABEL: test_mm512_maskz_movehdup_ps:
+; X64: # BB#0:
+; X64-NEXT: kmovw %edi, %k1
+; X64-NEXT: vmovshdup {{.*#+}} zmm0 = zmm0[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15]
+; X64-NEXT: retq
+ %arg0 = bitcast i16 %a0 to <16 x i1>
+ %res0 = shufflevector <16 x float> %a1, <16 x float> undef, <16 x i32> <i32 1, i32 1, i32 3, i32 3, i32 5, i32 5, i32 7, i32 7, i32 9, i32 9, i32 11, i32 11, i32 13, i32 13, i32 15, i32 15>
+ %res1 = select <16 x i1> %arg0, <16 x float> %res0, <16 x float> zeroinitializer
+ ret <16 x float> %res1
+}
+
+define <16 x float> @test_mm512_moveldup_ps(<16 x float> %a0) {
+; X32-LABEL: test_mm512_moveldup_ps:
+; X32: # BB#0:
+; X32-NEXT: vmovsldup {{.*#+}} zmm0 = zmm0[0,0,2,2,4,4,6,6,8,8,10,10,12,12,14,14]
+; X32-NEXT: retl
+;
+; X64-LABEL: test_mm512_moveldup_ps:
+; X64: # BB#0:
+; X64-NEXT: vmovsldup {{.*#+}} zmm0 = zmm0[0,0,2,2,4,4,6,6,8,8,10,10,12,12,14,14]
+; X64-NEXT: retq
+ %res = shufflevector <16 x float> %a0, <16 x float> undef, <16 x i32> <i32 0, i32 0, i32 2, i32 2, i32 4, i32 4, i32 6, i32 6, i32 8, i32 8, i32 10, i32 10, i32 12, i32 12, i32 14, i32 14>
+ ret <16 x float> %res
+}
+
+define <16 x float> @test_mm512_mask_moveldup_ps(<16 x float> %a0, i16 %a1, <16 x float> %a2) {
+; X32-LABEL: test_mm512_mask_moveldup_ps:
+; X32: # BB#0:
+; X32-NEXT: movw {{[0-9]+}}(%esp), %ax
+; X32-NEXT: kmovw %eax, %k1
+; X32-NEXT: vmovsldup {{.*#+}} zmm0 = zmm1[0,0,2,2,4,4,6,6,8,8,10,10,12,12,14,14]
+; X32-NEXT: retl
+;
+; X64-LABEL: test_mm512_mask_moveldup_ps:
+; X64: # BB#0:
+; X64-NEXT: kmovw %edi, %k1
+; X64-NEXT: vmovsldup {{.*#+}} zmm0 = zmm1[0,0,2,2,4,4,6,6,8,8,10,10,12,12,14,14]
+; X64-NEXT: retq
+ %arg1 = bitcast i16 %a1 to <16 x i1>
+ %res0 = shufflevector <16 x float> %a2, <16 x float> undef, <16 x i32> <i32 0, i32 0, i32 2, i32 2, i32 4, i32 4, i32 6, i32 6, i32 8, i32 8, i32 10, i32 10, i32 12, i32 12, i32 14, i32 14>
+ %res1 = select <16 x i1> %arg1, <16 x float> %res0, <16 x float> %a0
+ ret <16 x float> %res1
+}
+
+define <16 x float> @test_mm512_maskz_moveldup_ps(i16 %a0, <16 x float> %a1) {
+; X32-LABEL: test_mm512_maskz_moveldup_ps:
+; X32: # BB#0:
+; X32-NEXT: movw {{[0-9]+}}(%esp), %ax
+; X32-NEXT: kmovw %eax, %k1
+; X32-NEXT: vmovsldup {{.*#+}} zmm0 = zmm0[0,0,2,2,4,4,6,6,8,8,10,10,12,12,14,14]
+; X32-NEXT: retl
+;
+; X64-LABEL: test_mm512_maskz_moveldup_ps:
+; X64: # BB#0:
+; X64-NEXT: kmovw %edi, %k1
+; X64-NEXT: vmovsldup {{.*#+}} zmm0 = zmm0[0,0,2,2,4,4,6,6,8,8,10,10,12,12,14,14]
+; X64-NEXT: retq
+ %arg0 = bitcast i16 %a0 to <16 x i1>
+ %res0 = shufflevector <16 x float> %a1, <16 x float> undef, <16 x i32> <i32 0, i32 0, i32 2, i32 2, i32 4, i32 4, i32 6, i32 6, i32 8, i32 8, i32 10, i32 10, i32 12, i32 12, i32 14, i32 14>
+ %res1 = select <16 x i1> %arg0, <16 x float> %res0, <16 x float> zeroinitializer
+ ret <16 x float> %res1
+}
+
define <8 x i64> @test_mm512_unpackhi_epi32(<8 x i64> %a0, <8 x i64> %a1) {
; X32-LABEL: test_mm512_unpackhi_epi32:
; X32: # BB#0: