From ad27fdae895df1b9ad11a93102de6622f63e1220 Mon Sep 17 00:00:00 2001 From: Simon Pilgrim Date: Mon, 14 Nov 2016 15:54:24 +0000 Subject: [PATCH] [CostModel][X86] Added mul costs for vXi8 vectors More realistic v16i8/v32i8/v64i8 MUL costs - we have to extend to vXi16, use PMULLW and then truncate the result git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@286838 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86TargetTransformInfo.cpp | 26 +++++++++++++---- test/Analysis/CostModel/X86/arith.ll | 34 ++++++++++++----------- 2 files changed, 39 insertions(+), 21 deletions(-) diff --git a/lib/Target/X86/X86TargetTransformInfo.cpp b/lib/Target/X86/X86TargetTransformInfo.cpp index 5b3091eed3c..a2e4b3e61cc 100644 --- a/lib/Target/X86/X86TargetTransformInfo.cpp +++ b/lib/Target/X86/X86TargetTransformInfo.cpp @@ -218,15 +218,19 @@ int X86TTIImpl::getArithmeticInstrCost( } static const CostTblEntry AVX512BWCostTable[] = { + { ISD::MUL, MVT::v64i8, 11 }, // extend/pmullw/trunc sequence. + { ISD::MUL, MVT::v32i8, 4 }, // extend/pmullw/trunc sequence. + { ISD::MUL, MVT::v16i8, 4 }, // extend/pmullw/trunc sequence. + // Vectorizing division is a bad idea. See the SSE2 table for more comments. { ISD::SDIV, MVT::v64i8, 64*20 }, { ISD::SDIV, MVT::v32i16, 32*20 }, { ISD::SDIV, MVT::v16i32, 16*20 }, - { ISD::SDIV, MVT::v8i64, 8*20 }, + { ISD::SDIV, MVT::v8i64, 8*20 }, { ISD::UDIV, MVT::v64i8, 64*20 }, { ISD::UDIV, MVT::v32i16, 32*20 }, { ISD::UDIV, MVT::v16i32, 16*20 }, - { ISD::UDIV, MVT::v8i64, 8*20 }, + { ISD::UDIV, MVT::v8i64, 8*20 }, }; // Look for AVX512BW lowering tricks for custom cases. @@ -240,9 +244,12 @@ int X86TTIImpl::getArithmeticInstrCost( { ISD::SHL, MVT::v16i32, 1 }, { ISD::SRL, MVT::v16i32, 1 }, { ISD::SRA, MVT::v16i32, 1 }, - { ISD::SHL, MVT::v8i64, 1 }, - { ISD::SRL, MVT::v8i64, 1 }, - { ISD::SRA, MVT::v8i64, 1 }, + { ISD::SHL, MVT::v8i64, 1 }, + { ISD::SRL, MVT::v8i64, 1 }, + { ISD::SRA, MVT::v8i64, 1 }, + + { ISD::MUL, MVT::v32i8, 13 }, // extend/pmullw/trunc sequence. + { ISD::MUL, MVT::v16i8, 5 }, // extend/pmullw/trunc sequence. }; if (ST->hasAVX512()) { @@ -324,6 +331,10 @@ int X86TTIImpl::getArithmeticInstrCost( { ISD::SRA, MVT::v16i16, 10 }, // extend/vpsravd/pack sequence. { ISD::SRA, MVT::v2i64, 4 }, // srl/xor/sub sequence. { ISD::SRA, MVT::v4i64, 4 }, // srl/xor/sub sequence. + + { ISD::MUL, MVT::v32i8, 17 }, // extend/pmullw/trunc sequence. + { ISD::MUL, MVT::v16i8, 7 }, // extend/pmullw/trunc sequence. + { ISD::FDIV, MVT::f32, 7 }, // Haswell from http://www.agner.org/ { ISD::FDIV, MVT::v4f32, 7 }, // Haswell from http://www.agner.org/ { ISD::FDIV, MVT::v8f32, 14 }, // Haswell from http://www.agner.org/ @@ -340,12 +351,15 @@ int X86TTIImpl::getArithmeticInstrCost( } static const CostTblEntry AVXCustomCostTable[] = { + { ISD::MUL, MVT::v32i8, 26 }, // extend/pmullw/trunc sequence. + { ISD::FDIV, MVT::f32, 14 }, // SNB from http://www.agner.org/ { ISD::FDIV, MVT::v4f32, 14 }, // SNB from http://www.agner.org/ { ISD::FDIV, MVT::v8f32, 28 }, // SNB from http://www.agner.org/ { ISD::FDIV, MVT::f64, 22 }, // SNB from http://www.agner.org/ { ISD::FDIV, MVT::v2f64, 22 }, // SNB from http://www.agner.org/ { ISD::FDIV, MVT::v4f64, 44 }, // SNB from http://www.agner.org/ + // Vectorizing division is a bad idea. See the SSE2 table for more comments. { ISD::SDIV, MVT::v32i8, 32*20 }, { ISD::SDIV, MVT::v16i16, 16*20 }, @@ -494,6 +508,8 @@ int X86TTIImpl::getArithmeticInstrCost( { ISD::SRA, MVT::v2i64, 12 }, // srl/xor/sub sequence. { ISD::SRA, MVT::v4i64, 2*12 }, // srl/xor/sub sequence. + { ISD::MUL, MVT::v16i8, 12 }, // extend/pmullw/trunc sequence. + { ISD::FDIV, MVT::f32, 23 }, // Pentium IV from http://www.agner.org/ { ISD::FDIV, MVT::v4f32, 39 }, // Pentium IV from http://www.agner.org/ { ISD::FDIV, MVT::f64, 38 }, // Pentium IV from http://www.agner.org/ diff --git a/test/Analysis/CostModel/X86/arith.ll b/test/Analysis/CostModel/X86/arith.ll index aa204db3042..55766e25cc4 100644 --- a/test/Analysis/CostModel/X86/arith.ll +++ b/test/Analysis/CostModel/X86/arith.ll @@ -490,24 +490,26 @@ define i32 @mul(i32 %arg) { ; AVX512BW: cost of 1 {{.*}} %I = mul %I = mul <32 x i16> undef, undef - ; SSSE3: cost of 2 {{.*}} %J = mul - ; SSE42: cost of 2 {{.*}} %J = mul - ; AVX: cost of 2 {{.*}} %J = mul - ; AVX2: cost of 2 {{.*}} %J = mul - ; AVX512: cost of 2 {{.*}} %J = mul + ; SSSE3: cost of 12 {{.*}} %J = mul + ; SSE42: cost of 12 {{.*}} %J = mul + ; AVX: cost of 12 {{.*}} %J = mul + ; AVX2: cost of 7 {{.*}} %J = mul + ; AVX512F: cost of 5 {{.*}} %J = mul + ; AVX512BW: cost of 4 {{.*}} %J = mul %J = mul <16 x i8> undef, undef - ; SSSE3: cost of 4 {{.*}} %K = mul - ; SSE42: cost of 4 {{.*}} %K = mul - ; AVX: cost of 2 {{.*}} %K = mul - ; AVX2: cost of 2 {{.*}} %K = mul - ; AVX512: cost of 2 {{.*}} %K = mul + ; SSSE3: cost of 24 {{.*}} %K = mul + ; SSE42: cost of 24 {{.*}} %K = mul + ; AVX: cost of 26 {{.*}} %K = mul + ; AVX2: cost of 17 {{.*}} %K = mul + ; AVX512F: cost of 13 {{.*}} %K = mul + ; AVX512BW: cost of 4 {{.*}} %K = mul %K = mul <32 x i8> undef, undef - ; SSSE3: cost of 8 {{.*}} %L = mul - ; SSE42: cost of 8 {{.*}} %L = mul - ; AVX: cost of 4 {{.*}} %L = mul - ; AVX2: cost of 4 {{.*}} %L = mul - ; AVX512F: cost of 4 {{.*}} %L = mul - ; AVX512BW: cost of 2 {{.*}} %L = mul + ; SSSE3: cost of 48 {{.*}} %L = mul + ; SSE42: cost of 48 {{.*}} %L = mul + ; AVX: cost of 52 {{.*}} %L = mul + ; AVX2: cost of 34 {{.*}} %L = mul + ; AVX512F: cost of 26 {{.*}} %L = mul + ; AVX512BW: cost of 11 {{.*}} %L = mul %L = mul <64 x i8> undef, undef ret i32 undef -- 2.50.1