From: Simon Pilgrim Date: Sat, 14 Jan 2017 17:13:52 +0000 (+0000) Subject: [X86][XOP] Added support for VPMACSWW/VPMACSDD 'lossy' IFMA patterns X-Git-Url: https://granicus.if.org/sourcecode?a=commitdiff_plain;h=08c5cbd394891cebb4d6292d213824bfbf035471;p=llvm [X86][XOP] Added support for VPMACSWW/VPMACSDD 'lossy' IFMA patterns VPMACSWW/VPMACSDD act as add( mul( x, y ), z ) - ignoring any upper bits from both the multiply and add stages git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@292019 91177308-0d34-0410-b5e6-96231b3b80d8 --- diff --git a/lib/Target/X86/X86InstrXOP.td b/lib/Target/X86/X86InstrXOP.td index 2b296e1e5b8..efd627d6b86 100644 --- a/lib/Target/X86/X86InstrXOP.td +++ b/lib/Target/X86/X86InstrXOP.td @@ -183,6 +183,17 @@ let ExeDomain = SSEPackedInt in { defm VPMACSDD : xop4opm2<0x9E, "vpmacsdd", int_x86_xop_vpmacsdd>; } +// IFMA patterns - for cases where we can safely ignore the overflow bits from +// the multiply. +let Predicates = [HasXOP] in { + def : Pat<(v8i16 (add (mul (v8i16 VR128:$src1), (v8i16 VR128:$src2)), + (v8i16 VR128:$src3))), + (VPMACSWWrr VR128:$src1, VR128:$src2, VR128:$src3)>; + def : Pat<(v4i32 (add (mul (v4i32 VR128:$src1), (v4i32 VR128:$src2)), + (v4i32 VR128:$src3))), + (VPMACSDDrr VR128:$src1, VR128:$src2, VR128:$src3)>; +} + // Instruction where second source can be memory, third must be imm8 multiclass xopvpcom opc, string Suffix, SDNode OpNode, ValueType vt128> { let isCommutable = 1 in diff --git a/test/CodeGen/X86/xop-ifma.ll b/test/CodeGen/X86/xop-ifma.ll index 8e1572fdf93..088d6795814 100644 --- a/test/CodeGen/X86/xop-ifma.ll +++ b/test/CodeGen/X86/xop-ifma.ll @@ -5,8 +5,7 @@ define <8 x i16> @test_mul_v8i16_add_v8i16(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> %a2) { ; XOP-LABEL: test_mul_v8i16_add_v8i16: ; XOP: # BB#0: -; XOP-NEXT: vpmullw %xmm1, %xmm0, %xmm0 -; XOP-NEXT: vpaddw %xmm2, %xmm0, %xmm0 +; XOP-NEXT: vpmacsww %xmm2, %xmm1, %xmm0, %xmm0 ; XOP-NEXT: retq %1 = mul <8 x i16> %a0, %a1 %2 = add <8 x i16> %1, %a2 @@ -16,14 +15,12 @@ define <8 x i16> @test_mul_v8i16_add_v8i16(<8 x i16> %a0, <8 x i16> %a1, <8 x i1 define <16 x i16> @test_mul_v16i16_add_v16i16(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> %a2) { ; XOP-AVX1-LABEL: test_mul_v16i16_add_v16i16: ; XOP-AVX1: # BB#0: -; XOP-AVX1-NEXT: vpmullw %xmm1, %xmm0, %xmm3 -; XOP-AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1 -; XOP-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0 -; XOP-AVX1-NEXT: vpmullw %xmm1, %xmm0, %xmm0 -; XOP-AVX1-NEXT: vextractf128 $1, %ymm2, %xmm1 -; XOP-AVX1-NEXT: vpaddw %xmm0, %xmm1, %xmm0 -; XOP-AVX1-NEXT: vpaddw %xmm3, %xmm2, %xmm1 -; XOP-AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 +; XOP-AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3 +; XOP-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4 +; XOP-AVX1-NEXT: vextractf128 $1, %ymm2, %xmm5 +; XOP-AVX1-NEXT: vpmacsww %xmm5, %xmm3, %xmm4, %xmm3 +; XOP-AVX1-NEXT: vpmacsww %xmm2, %xmm1, %xmm0, %xmm0 +; XOP-AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm0 ; XOP-AVX1-NEXT: retq ; ; XOP-AVX2-LABEL: test_mul_v16i16_add_v16i16: @@ -39,8 +36,7 @@ define <16 x i16> @test_mul_v16i16_add_v16i16(<16 x i16> %a0, <16 x i16> %a1, <1 define <4 x i32> @test_mul_v4i32_add_v4i32(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> %a2) { ; XOP-LABEL: test_mul_v4i32_add_v4i32: ; XOP: # BB#0: -; XOP-NEXT: vpmulld %xmm1, %xmm0, %xmm0 -; XOP-NEXT: vpaddd %xmm2, %xmm0, %xmm0 +; XOP-NEXT: vpmacsdd %xmm2, %xmm1, %xmm0, %xmm0 ; XOP-NEXT: retq %1 = mul <4 x i32> %a0, %a1 %2 = add <4 x i32> %1, %a2 @@ -50,14 +46,12 @@ define <4 x i32> @test_mul_v4i32_add_v4i32(<4 x i32> %a0, <4 x i32> %a1, <4 x i3 define <8 x i32> @test_mul_v8i32_add_v8i32(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> %a2) { ; XOP-AVX1-LABEL: test_mul_v8i32_add_v8i32: ; XOP-AVX1: # BB#0: -; XOP-AVX1-NEXT: vpmulld %xmm1, %xmm0, %xmm3 -; XOP-AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1 -; XOP-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0 -; XOP-AVX1-NEXT: vpmulld %xmm1, %xmm0, %xmm0 -; XOP-AVX1-NEXT: vextractf128 $1, %ymm2, %xmm1 -; XOP-AVX1-NEXT: vpaddd %xmm0, %xmm1, %xmm0 -; XOP-AVX1-NEXT: vpaddd %xmm3, %xmm2, %xmm1 -; XOP-AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 +; XOP-AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3 +; XOP-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4 +; XOP-AVX1-NEXT: vextractf128 $1, %ymm2, %xmm5 +; XOP-AVX1-NEXT: vpmacsdd %xmm5, %xmm3, %xmm4, %xmm3 +; XOP-AVX1-NEXT: vpmacsdd %xmm2, %xmm1, %xmm0, %xmm0 +; XOP-AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm0 ; XOP-AVX1-NEXT: retq ; ; XOP-AVX2-LABEL: test_mul_v8i32_add_v8i32: