From bbc7752ec413b130acf534b3d5f55ed857b5497a Mon Sep 17 00:00:00 2001 From: Florian Hahn Date: Mon, 15 Jul 2019 08:48:47 +0000 Subject: [PATCH] [LoopVectorize] Pass unfiltered list of arguments to getIntrinsicInstCost. We do not compute the scalarization overhead in getVectorIntrinsicCost and TTI::getIntrinsicInstrCost requires the full arguments list. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@366049 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Transforms/Vectorize/LoopVectorize.cpp | 7 ++--- .../vector-intrinsic-call-cost.ll | 30 +++++++++++++++++++ 2 files changed, 32 insertions(+), 5 deletions(-) create mode 100644 test/Transforms/LoopVectorize/vector-intrinsic-call-cost.ll diff --git a/lib/Transforms/Vectorize/LoopVectorize.cpp b/lib/Transforms/Vectorize/LoopVectorize.cpp index 22cf9c7db94..46265e3f3e1 100644 --- a/lib/Transforms/Vectorize/LoopVectorize.cpp +++ b/lib/Transforms/Vectorize/LoopVectorize.cpp @@ -3149,11 +3149,8 @@ unsigned LoopVectorizationCostModel::getVectorIntrinsicCost(CallInst *CI, if (auto *FPMO = dyn_cast(CI)) FMF = FPMO->getFastMathFlags(); - // Skip operands that do not require extraction/scalarization and do not incur - // any overhead. - return TTI.getIntrinsicInstrCost( - ID, CI->getType(), filterExtractingOperands(CI->arg_operands(), VF), FMF, - VF); + SmallVector Operands(CI->arg_operands()); + return TTI.getIntrinsicInstrCost(ID, CI->getType(), Operands, FMF, VF); } static Type *smallestIntegerVectorType(Type *T1, Type *T2) { diff --git a/test/Transforms/LoopVectorize/vector-intrinsic-call-cost.ll b/test/Transforms/LoopVectorize/vector-intrinsic-call-cost.ll new file mode 100644 index 00000000000..fce4d56c2e6 --- /dev/null +++ b/test/Transforms/LoopVectorize/vector-intrinsic-call-cost.ll @@ -0,0 +1,30 @@ +; RUN: opt -S -loop-vectorize -force-vector-width=4 %s | FileCheck %s + +; CHECK-LABEL: @test_fshl +; CHECK-LABEL: vector.body: +; CHECK-NEXT: %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ] +; CHECK-NEXT: %broadcast.splatinsert = insertelement <4 x i32> undef, i32 %index, i32 0 +; CHECK-NEXT: %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> undef, <4 x i32> zeroinitializer +; CHECK-NEXT: %induction = add <4 x i32> %broadcast.splat, +; CHECK-NEXT: %0 = add i32 %index, 0 +; CHECK-NEXT: %1 = call <4 x i16> @llvm.fshl.v4i16(<4 x i16> undef, <4 x i16> undef, <4 x i16> ) +; CHECK-NEXT: %index.next = add i32 %index, 4 +; CHECK-NEXT: %2 = icmp eq i32 %index.next, %n.vec +; CHECK-NEXT: br i1 %2, label %middle.block, label %vector.body, !llvm.loop !0 +; +define void @test_fshl(i32 %width) { +entry: + br label %for.body9.us.us + +for.cond6.for.cond.cleanup8_crit_edge.us.us: ; preds = %for.body9.us.us + ret void + +for.body9.us.us: ; preds = %for.body9.us.us, %entry + %x.020.us.us = phi i32 [ 0, %entry ], [ %inc.us.us, %for.body9.us.us ] + %conv4.i.us.us = tail call i16 @llvm.fshl.i16(i16 undef, i16 undef, i16 15) + %inc.us.us = add nuw i32 %x.020.us.us, 1 + %exitcond50 = icmp eq i32 %inc.us.us, %width + br i1 %exitcond50, label %for.cond6.for.cond.cleanup8_crit_edge.us.us, label %for.body9.us.us +} + +declare i16 @llvm.fshl.i16(i16, i16, i16) -- 2.50.0