From: Simon Pilgrim Date: Tue, 16 Apr 2019 20:57:28 +0000 (+0000) Subject: [TargetLowering] Rename preferShiftsToClearExtremeBits and shouldFoldShiftPairToMask... X-Git-Url: https://granicus.if.org/sourcecode?a=commitdiff_plain;h=29d0764b94ee70258a6cc7f553a698a4bb079ef3;p=llvm [TargetLowering] Rename preferShiftsToClearExtremeBits and shouldFoldShiftPairToMask (PR41359) As discussed on PR41359, this patch renames the pair of shift-mask target feature functions to make their purposes more obvious. shouldFoldShiftPairToMask -> shouldFoldConstantShiftPairToMask preferShiftsToClearExtremeBits -> shouldFoldMaskToVariableShiftPair git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@358526 91177308-0d34-0410-b5e6-96231b3b80d8 --- diff --git a/include/llvm/CodeGen/TargetLowering.h b/include/llvm/CodeGen/TargetLowering.h index a5e41962a12..cbbf1ccd70a 100644 --- a/include/llvm/CodeGen/TargetLowering.h +++ b/include/llvm/CodeGen/TargetLowering.h @@ -527,9 +527,9 @@ public: /// There are two ways to clear extreme bits (either low or high): /// Mask: x & (-1 << y) (the instcombine canonical form) /// Shifts: x >> y << y - /// Return true if the variant with 2 shifts is preferred. + /// Return true if the variant with 2 variable shifts is preferred. /// Return false if there is no preference. - virtual bool preferShiftsToClearExtremeBits(SDValue X) const { + virtual bool shouldFoldMaskToVariableShiftPair(SDValue X) const { // By default, let's assume that no one prefers shifts. return false; } @@ -538,8 +538,8 @@ public: /// This is usually true on most targets. But some targets, like Thumb1, /// have immediate shift instructions, but no immediate "and" instruction; /// this makes the fold unprofitable. - virtual bool shouldFoldShiftPairToMask(const SDNode *N, - CombineLevel Level) const { + virtual bool shouldFoldConstantShiftPairToMask(const SDNode *N, + CombineLevel Level) const { return true; } diff --git a/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/lib/CodeGen/SelectionDAG/DAGCombiner.cpp index b03c65ccf2c..51f7d731a48 100644 --- a/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ b/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -4677,7 +4677,7 @@ SDValue DAGCombiner::unfoldExtremeBitClearingToShifts(SDNode *N) { SDValue N1 = N->getOperand(1); // Do we actually prefer shifts over mask? - if (!TLI.preferShiftsToClearExtremeBits(N0)) + if (!TLI.shouldFoldMaskToVariableShiftPair(N0)) return SDValue(); // Try to match (-1 '[outer] logical shift' y) @@ -6850,7 +6850,7 @@ SDValue DAGCombiner::visitSHL(SDNode *N) { // Only fold this if the inner shift has no other uses -- if it does, folding // this will increase the total number of instructions. if (N1C && N0.getOpcode() == ISD::SRL && N0.hasOneUse() && - TLI.shouldFoldShiftPairToMask(N, Level)) { + TLI.shouldFoldConstantShiftPairToMask(N, Level)) { if (ConstantSDNode *N0C1 = isConstOrConstSplat(N0.getOperand(1))) { if (N0C1->getAPIntValue().ult(OpSizeInBits)) { uint64_t c1 = N0C1->getZExtValue(); diff --git a/lib/Target/ARM/ARMISelLowering.cpp b/lib/Target/ARM/ARMISelLowering.cpp index cbf47965a43..0281d68fcfd 100644 --- a/lib/Target/ARM/ARMISelLowering.cpp +++ b/lib/Target/ARM/ARMISelLowering.cpp @@ -10483,9 +10483,8 @@ ARMTargetLowering::isDesirableToCommuteWithShift(const SDNode *N, return false; } -bool -ARMTargetLowering::shouldFoldShiftPairToMask(const SDNode *N, - CombineLevel Level) const { +bool ARMTargetLowering::shouldFoldConstantShiftPairToMask( + const SDNode *N, CombineLevel Level) const { if (!Subtarget->isThumb1Only()) return true; diff --git a/lib/Target/ARM/ARMISelLowering.h b/lib/Target/ARM/ARMISelLowering.h index 97283831913..911db14e52e 100644 --- a/lib/Target/ARM/ARMISelLowering.h +++ b/lib/Target/ARM/ARMISelLowering.h @@ -598,8 +598,8 @@ class VectorType; bool isDesirableToCommuteWithShift(const SDNode *N, CombineLevel Level) const override; - bool shouldFoldShiftPairToMask(const SDNode *N, - CombineLevel Level) const override; + bool shouldFoldConstantShiftPairToMask(const SDNode *N, + CombineLevel Level) const override; protected: std::pair findRepresentativeClass(const TargetRegisterInfo *TRI, diff --git a/lib/Target/Mips/MipsISelLowering.cpp b/lib/Target/Mips/MipsISelLowering.cpp index f942d3f97ae..99de079788e 100644 --- a/lib/Target/Mips/MipsISelLowering.cpp +++ b/lib/Target/Mips/MipsISelLowering.cpp @@ -1190,8 +1190,8 @@ bool MipsTargetLowering::isCheapToSpeculateCtlz() const { return Subtarget.hasMips32(); } -bool MipsTargetLowering::shouldFoldShiftPairToMask(const SDNode *N, - CombineLevel Level) const { +bool MipsTargetLowering::shouldFoldConstantShiftPairToMask( + const SDNode *N, CombineLevel Level) const { if (N->getOperand(0).getValueType().isVector()) return false; return true; diff --git a/lib/Target/Mips/MipsISelLowering.h b/lib/Target/Mips/MipsISelLowering.h index 431387b5ed0..b93e2c31acb 100644 --- a/lib/Target/Mips/MipsISelLowering.h +++ b/lib/Target/Mips/MipsISelLowering.h @@ -284,8 +284,8 @@ class TargetRegisterClass; bool isCheapToSpeculateCttz() const override; bool isCheapToSpeculateCtlz() const override; - bool shouldFoldShiftPairToMask(const SDNode *N, - CombineLevel Level) const override; + bool shouldFoldConstantShiftPairToMask(const SDNode *N, + CombineLevel Level) const override; /// Return the register type for a given MVT, ensuring vectors are treated /// as a series of gpr sized integers. diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index e379c350ee2..f51d3570405 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -5007,7 +5007,7 @@ bool X86TargetLowering::hasAndNot(SDValue Y) const { return Subtarget.hasSSE2(); } -bool X86TargetLowering::preferShiftsToClearExtremeBits(SDValue Y) const { +bool X86TargetLowering::shouldFoldMaskToVariableShiftPair(SDValue Y) const { EVT VT = Y.getValueType(); // For vectors, we don't have a preference, but we probably want a mask. diff --git a/lib/Target/X86/X86ISelLowering.h b/lib/Target/X86/X86ISelLowering.h index 193d04094e2..1742e743f05 100644 --- a/lib/Target/X86/X86ISelLowering.h +++ b/lib/Target/X86/X86ISelLowering.h @@ -814,7 +814,7 @@ namespace llvm { bool hasAndNot(SDValue Y) const override; - bool preferShiftsToClearExtremeBits(SDValue Y) const override; + bool shouldFoldMaskToVariableShiftPair(SDValue Y) const override; bool shouldTransformSignedTruncationCheck(EVT XVT,