From: Simon Pilgrim Date: Sat, 2 Jul 2016 17:16:41 +0000 (+0000) Subject: [X86][AVX512] Converted the MOVDDUP/MOVSLDUP/MOVSHDUP masked intrinsics to generic IR X-Git-Url: https://granicus.if.org/sourcecode?a=commitdiff_plain;h=1dcb0785026125c8e17aa8dca6127807501f1221;p=llvm [X86][AVX512] Converted the MOVDDUP/MOVSLDUP/MOVSHDUP masked intrinsics to generic IR git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@274443 91177308-0d34-0410-b5e6-96231b3b80d8 --- diff --git a/include/llvm/IR/IntrinsicsX86.td b/include/llvm/IR/IntrinsicsX86.td index b861588df5b..92241f318f5 100644 --- a/include/llvm/IR/IntrinsicsX86.td +++ b/include/llvm/IR/IntrinsicsX86.td @@ -1527,60 +1527,6 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.". Intrinsic<[llvm_v16f32_ty], [llvm_v16f32_ty, llvm_v16f32_ty, llvm_i32_ty, llvm_v16f32_ty, llvm_i16_ty], [IntrNoMem]>; - - def int_x86_avx512_mask_movshdup_128 : - GCCBuiltin<"__builtin_ia32_movshdup128_mask">, - Intrinsic<[llvm_v4f32_ty], - [llvm_v4f32_ty, llvm_v4f32_ty, llvm_i8_ty], - [IntrNoMem]>; - - def int_x86_avx512_mask_movshdup_256 : - GCCBuiltin<"__builtin_ia32_movshdup256_mask">, - Intrinsic<[llvm_v8f32_ty], - [llvm_v8f32_ty, llvm_v8f32_ty, llvm_i8_ty], - [IntrNoMem]>; - - def int_x86_avx512_mask_movshdup_512 : - GCCBuiltin<"__builtin_ia32_movshdup512_mask">, - Intrinsic<[llvm_v16f32_ty], - [llvm_v16f32_ty, llvm_v16f32_ty, llvm_i16_ty], - [IntrNoMem]>; - - def int_x86_avx512_mask_movsldup_128 : - GCCBuiltin<"__builtin_ia32_movsldup128_mask">, - Intrinsic<[llvm_v4f32_ty], - [llvm_v4f32_ty, llvm_v4f32_ty, llvm_i8_ty], - [IntrNoMem]>; - - def int_x86_avx512_mask_movsldup_256 : - GCCBuiltin<"__builtin_ia32_movsldup256_mask">, - Intrinsic<[llvm_v8f32_ty], - [llvm_v8f32_ty, llvm_v8f32_ty, llvm_i8_ty], - [IntrNoMem]>; - - def int_x86_avx512_mask_movsldup_512 : - GCCBuiltin<"__builtin_ia32_movsldup512_mask">, - Intrinsic<[llvm_v16f32_ty], - [llvm_v16f32_ty, llvm_v16f32_ty, llvm_i16_ty], - [IntrNoMem]>; - - def int_x86_avx512_mask_movddup_128 : - GCCBuiltin<"__builtin_ia32_movddup128_mask">, - Intrinsic<[llvm_v2f64_ty], - [llvm_v2f64_ty, llvm_v2f64_ty, llvm_i8_ty], - [IntrNoMem]>; - - def int_x86_avx512_mask_movddup_256 : - GCCBuiltin<"__builtin_ia32_movddup256_mask">, - Intrinsic<[llvm_v4f64_ty], - [llvm_v4f64_ty, llvm_v4f64_ty, llvm_i8_ty], - [IntrNoMem]>; - - def int_x86_avx512_mask_movddup_512 : - GCCBuiltin<"__builtin_ia32_movddup512_mask">, - Intrinsic<[llvm_v8f64_ty], - [llvm_v8f64_ty, llvm_v8f64_ty, llvm_i8_ty], - [IntrNoMem]>; } // Vector blend diff --git a/lib/Target/X86/X86IntrinsicsInfo.h b/lib/Target/X86/X86IntrinsicsInfo.h index 341775cf4eb..9a4af90ff15 100644 --- a/lib/Target/X86/X86IntrinsicsInfo.h +++ b/lib/Target/X86/X86IntrinsicsInfo.h @@ -818,28 +818,10 @@ static const IntrinsicData IntrinsicsWithoutChain[] = { X86ISD::FMIN, X86ISD::FMIN_RND), X86_INTRINSIC_DATA(avx512_mask_min_ss_round, INTR_TYPE_SCALAR_MASK_RM, X86ISD::FMIN, X86ISD::FMIN_RND), - X86_INTRINSIC_DATA(avx512_mask_movddup_128, INTR_TYPE_1OP_MASK, - X86ISD::MOVDDUP, 0), - X86_INTRINSIC_DATA(avx512_mask_movddup_256, INTR_TYPE_1OP_MASK, - X86ISD::MOVDDUP, 0), - X86_INTRINSIC_DATA(avx512_mask_movddup_512, INTR_TYPE_1OP_MASK, - X86ISD::MOVDDUP, 0), X86_INTRINSIC_DATA(avx512_mask_move_sd, INTR_TYPE_SCALAR_MASK, X86ISD::MOVSD, 0), X86_INTRINSIC_DATA(avx512_mask_move_ss, INTR_TYPE_SCALAR_MASK, X86ISD::MOVSS, 0), - X86_INTRINSIC_DATA(avx512_mask_movshdup_128, INTR_TYPE_1OP_MASK, - X86ISD::MOVSHDUP, 0), - X86_INTRINSIC_DATA(avx512_mask_movshdup_256, INTR_TYPE_1OP_MASK, - X86ISD::MOVSHDUP, 0), - X86_INTRINSIC_DATA(avx512_mask_movshdup_512, INTR_TYPE_1OP_MASK, - X86ISD::MOVSHDUP, 0), - X86_INTRINSIC_DATA(avx512_mask_movsldup_128, INTR_TYPE_1OP_MASK, - X86ISD::MOVSLDUP, 0), - X86_INTRINSIC_DATA(avx512_mask_movsldup_256, INTR_TYPE_1OP_MASK, - X86ISD::MOVSLDUP, 0), - X86_INTRINSIC_DATA(avx512_mask_movsldup_512, INTR_TYPE_1OP_MASK, - X86ISD::MOVSLDUP, 0), X86_INTRINSIC_DATA(avx512_mask_mul_pd_128, INTR_TYPE_2OP_MASK, ISD::FMUL, 0), X86_INTRINSIC_DATA(avx512_mask_mul_pd_256, INTR_TYPE_2OP_MASK, ISD::FMUL, 0), X86_INTRINSIC_DATA(avx512_mask_mul_pd_512, INTR_TYPE_2OP_MASK, ISD::FMUL, diff --git a/test/CodeGen/X86/avx512-intrinsics-fast-isel.ll b/test/CodeGen/X86/avx512-intrinsics-fast-isel.ll index 6cca286f03c..f81fe585c7d 100644 --- a/test/CodeGen/X86/avx512-intrinsics-fast-isel.ll +++ b/test/CodeGen/X86/avx512-intrinsics-fast-isel.ll @@ -4,6 +4,162 @@ ; NOTE: This should use IR equivalent to what is generated by clang/test/CodeGen/avx512f-builtins.c +define <8 x double> @test_mm512_movddup_pd(<8 x double> %a0) { +; X32-LABEL: test_mm512_movddup_pd: +; X32: # BB#0: +; X32-NEXT: vmovddup {{.*#+}} zmm0 = zmm0[0,0,2,2,4,4,6,6] +; X32-NEXT: retl +; +; X64-LABEL: test_mm512_movddup_pd: +; X64: # BB#0: +; X64-NEXT: vmovddup {{.*#+}} zmm0 = zmm0[0,0,2,2,4,4,6,6] +; X64-NEXT: retq + %res = shufflevector <8 x double> %a0, <8 x double> undef, <8 x i32> + ret <8 x double> %res +} + +define <8 x double> @test_mm512_mask_movddup_pd(<8 x double> %a0, i8 %a1, <8 x double> %a2) { +; X32-LABEL: test_mm512_mask_movddup_pd: +; X32: # BB#0: +; X32-NEXT: movb {{[0-9]+}}(%esp), %al +; X32-NEXT: kmovw %eax, %k1 +; X32-NEXT: vmovddup {{.*#+}} zmm0 = zmm1[0,0,2,2,4,4,6,6] +; X32-NEXT: retl +; +; X64-LABEL: test_mm512_mask_movddup_pd: +; X64: # BB#0: +; X64-NEXT: kmovw %edi, %k1 +; X64-NEXT: vmovddup {{.*#+}} zmm0 = zmm1[0,0,2,2,4,4,6,6] +; X64-NEXT: retq + %arg1 = bitcast i8 %a1 to <8 x i1> + %res0 = shufflevector <8 x double> %a2, <8 x double> undef, <8 x i32> + %res1 = select <8 x i1> %arg1, <8 x double> %res0, <8 x double> %a0 + ret <8 x double> %res1 +} + +define <8 x double> @test_mm512_maskz_movddup_pd(i8 %a0, <8 x double> %a1) { +; X32-LABEL: test_mm512_maskz_movddup_pd: +; X32: # BB#0: +; X32-NEXT: movb {{[0-9]+}}(%esp), %al +; X32-NEXT: kmovw %eax, %k1 +; X32-NEXT: vmovddup {{.*#+}} zmm0 = zmm0[0,0,2,2,4,4,6,6] +; X32-NEXT: retl +; +; X64-LABEL: test_mm512_maskz_movddup_pd: +; X64: # BB#0: +; X64-NEXT: kmovw %edi, %k1 +; X64-NEXT: vmovddup {{.*#+}} zmm0 = zmm0[0,0,2,2,4,4,6,6] +; X64-NEXT: retq + %arg0 = bitcast i8 %a0 to <8 x i1> + %res0 = shufflevector <8 x double> %a1, <8 x double> undef, <8 x i32> + %res1 = select <8 x i1> %arg0, <8 x double> %res0, <8 x double> zeroinitializer + ret <8 x double> %res1 +} + +define <16 x float> @test_mm512_movehdup_ps(<16 x float> %a0) { +; X32-LABEL: test_mm512_movehdup_ps: +; X32: # BB#0: +; X32-NEXT: vmovshdup {{.*#+}} zmm0 = zmm0[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15] +; X32-NEXT: retl +; +; X64-LABEL: test_mm512_movehdup_ps: +; X64: # BB#0: +; X64-NEXT: vmovshdup {{.*#+}} zmm0 = zmm0[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15] +; X64-NEXT: retq + %res = shufflevector <16 x float> %a0, <16 x float> undef, <16 x i32> + ret <16 x float> %res +} + +define <16 x float> @test_mm512_mask_movehdup_ps(<16 x float> %a0, i16 %a1, <16 x float> %a2) { +; X32-LABEL: test_mm512_mask_movehdup_ps: +; X32: # BB#0: +; X32-NEXT: movw {{[0-9]+}}(%esp), %ax +; X32-NEXT: kmovw %eax, %k1 +; X32-NEXT: vmovshdup {{.*#+}} zmm0 = zmm1[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15] +; X32-NEXT: retl +; +; X64-LABEL: test_mm512_mask_movehdup_ps: +; X64: # BB#0: +; X64-NEXT: kmovw %edi, %k1 +; X64-NEXT: vmovshdup {{.*#+}} zmm0 = zmm1[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15] +; X64-NEXT: retq + %arg1 = bitcast i16 %a1 to <16 x i1> + %res0 = shufflevector <16 x float> %a2, <16 x float> undef, <16 x i32> + %res1 = select <16 x i1> %arg1, <16 x float> %res0, <16 x float> %a0 + ret <16 x float> %res1 +} + +define <16 x float> @test_mm512_maskz_movehdup_ps(i16 %a0, <16 x float> %a1) { +; X32-LABEL: test_mm512_maskz_movehdup_ps: +; X32: # BB#0: +; X32-NEXT: movw {{[0-9]+}}(%esp), %ax +; X32-NEXT: kmovw %eax, %k1 +; X32-NEXT: vmovshdup {{.*#+}} zmm0 = zmm0[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15] +; X32-NEXT: retl +; +; X64-LABEL: test_mm512_maskz_movehdup_ps: +; X64: # BB#0: +; X64-NEXT: kmovw %edi, %k1 +; X64-NEXT: vmovshdup {{.*#+}} zmm0 = zmm0[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15] +; X64-NEXT: retq + %arg0 = bitcast i16 %a0 to <16 x i1> + %res0 = shufflevector <16 x float> %a1, <16 x float> undef, <16 x i32> + %res1 = select <16 x i1> %arg0, <16 x float> %res0, <16 x float> zeroinitializer + ret <16 x float> %res1 +} + +define <16 x float> @test_mm512_moveldup_ps(<16 x float> %a0) { +; X32-LABEL: test_mm512_moveldup_ps: +; X32: # BB#0: +; X32-NEXT: vmovsldup {{.*#+}} zmm0 = zmm0[0,0,2,2,4,4,6,6,8,8,10,10,12,12,14,14] +; X32-NEXT: retl +; +; X64-LABEL: test_mm512_moveldup_ps: +; X64: # BB#0: +; X64-NEXT: vmovsldup {{.*#+}} zmm0 = zmm0[0,0,2,2,4,4,6,6,8,8,10,10,12,12,14,14] +; X64-NEXT: retq + %res = shufflevector <16 x float> %a0, <16 x float> undef, <16 x i32> + ret <16 x float> %res +} + +define <16 x float> @test_mm512_mask_moveldup_ps(<16 x float> %a0, i16 %a1, <16 x float> %a2) { +; X32-LABEL: test_mm512_mask_moveldup_ps: +; X32: # BB#0: +; X32-NEXT: movw {{[0-9]+}}(%esp), %ax +; X32-NEXT: kmovw %eax, %k1 +; X32-NEXT: vmovsldup {{.*#+}} zmm0 = zmm1[0,0,2,2,4,4,6,6,8,8,10,10,12,12,14,14] +; X32-NEXT: retl +; +; X64-LABEL: test_mm512_mask_moveldup_ps: +; X64: # BB#0: +; X64-NEXT: kmovw %edi, %k1 +; X64-NEXT: vmovsldup {{.*#+}} zmm0 = zmm1[0,0,2,2,4,4,6,6,8,8,10,10,12,12,14,14] +; X64-NEXT: retq + %arg1 = bitcast i16 %a1 to <16 x i1> + %res0 = shufflevector <16 x float> %a2, <16 x float> undef, <16 x i32> + %res1 = select <16 x i1> %arg1, <16 x float> %res0, <16 x float> %a0 + ret <16 x float> %res1 +} + +define <16 x float> @test_mm512_maskz_moveldup_ps(i16 %a0, <16 x float> %a1) { +; X32-LABEL: test_mm512_maskz_moveldup_ps: +; X32: # BB#0: +; X32-NEXT: movw {{[0-9]+}}(%esp), %ax +; X32-NEXT: kmovw %eax, %k1 +; X32-NEXT: vmovsldup {{.*#+}} zmm0 = zmm0[0,0,2,2,4,4,6,6,8,8,10,10,12,12,14,14] +; X32-NEXT: retl +; +; X64-LABEL: test_mm512_maskz_moveldup_ps: +; X64: # BB#0: +; X64-NEXT: kmovw %edi, %k1 +; X64-NEXT: vmovsldup {{.*#+}} zmm0 = zmm0[0,0,2,2,4,4,6,6,8,8,10,10,12,12,14,14] +; X64-NEXT: retq + %arg0 = bitcast i16 %a0 to <16 x i1> + %res0 = shufflevector <16 x float> %a1, <16 x float> undef, <16 x i32> + %res1 = select <16 x i1> %arg0, <16 x float> %res0, <16 x float> zeroinitializer + ret <16 x float> %res1 +} + define <8 x i64> @test_mm512_unpackhi_epi32(<8 x i64> %a0, <8 x i64> %a1) { ; X32-LABEL: test_mm512_unpackhi_epi32: ; X32: # BB#0: