-//===-- NVPTXISelLowering.cpp - NVPTX DAG Lowering Implementation ---------===//\r
-//\r
-// The LLVM Compiler Infrastructure\r
-//\r
-// This file is distributed under the University of Illinois Open Source\r
-// License. See LICENSE.TXT for details.\r
-//\r
-//===----------------------------------------------------------------------===//\r
-//\r
-// This file defines the interfaces that NVPTX uses to lower LLVM code into a\r
-// selection DAG.\r
-//\r
-//===----------------------------------------------------------------------===//\r
-\r
-#include "NVPTXISelLowering.h"\r
-#include "MCTargetDesc/NVPTXBaseInfo.h"\r
-#include "NVPTX.h"\r
-#include "NVPTXSection.h"\r
-#include "NVPTXSubtarget.h"\r
-#include "NVPTXTargetMachine.h"\r
-#include "NVPTXTargetObjectFile.h"\r
-#include "NVPTXUtilities.h"\r
-#include "llvm/ADT/APInt.h"\r
-#include "llvm/ADT/SmallVector.h"\r
-#include "llvm/ADT/StringRef.h"\r
-#include "llvm/CodeGen/Analysis.h"\r
-#include "llvm/CodeGen/MachineFunction.h"\r
-#include "llvm/CodeGen/MachineMemOperand.h"\r
-#include "llvm/CodeGen/MachineValueType.h"\r
-#include "llvm/CodeGen/SelectionDAG.h"\r
-#include "llvm/CodeGen/SelectionDAGNodes.h"\r
-#include "llvm/CodeGen/ValueTypes.h"\r
-#include "llvm/IR/Argument.h"\r
-#include "llvm/IR/Attributes.h"\r
-#include "llvm/IR/CallSite.h"\r
-#include "llvm/IR/Constants.h"\r
-#include "llvm/IR/DataLayout.h"\r
-#include "llvm/IR/DerivedTypes.h"\r
-#include "llvm/IR/Function.h"\r
-#include "llvm/IR/GlobalValue.h"\r
-#include "llvm/IR/Instruction.h"\r
-#include "llvm/IR/Instructions.h"\r
-#include "llvm/IR/Module.h"\r
-#include "llvm/IR/Type.h"\r
-#include "llvm/IR/Value.h"\r
-#include "llvm/Support/Casting.h"\r
-#include "llvm/Support/CodeGen.h"\r
-#include "llvm/Support/CommandLine.h"\r
-#include "llvm/Support/ErrorHandling.h"\r
-#include "llvm/Support/MathExtras.h"\r
-#include "llvm/Support/raw_ostream.h"\r
-#include "llvm/Target/TargetCallingConv.h"\r
-#include "llvm/Target/TargetLowering.h"\r
-#include "llvm/Target/TargetMachine.h"\r
-#include "llvm/Target/TargetOptions.h"\r
-#include <algorithm>\r
-#include <cassert>\r
-#include <cstdint>\r
-#include <iterator>\r
-#include <sstream>\r
-#include <string>\r
-#include <utility>\r
-#include <vector>\r
-\r
-#define DEBUG_TYPE "nvptx-lower"\r
-\r
-using namespace llvm;\r
-\r
-static unsigned int uniqueCallSite = 0;\r
-\r
-static cl::opt<bool> sched4reg(\r
- "nvptx-sched4reg",\r
- cl::desc("NVPTX Specific: schedule for register pressue"), cl::init(false));\r
-\r
-static cl::opt<unsigned>\r
-FMAContractLevelOpt("nvptx-fma-level", cl::ZeroOrMore, cl::Hidden,\r
- cl::desc("NVPTX Specific: FMA contraction (0: don't do it"\r
- " 1: do it 2: do it aggressively"),\r
- cl::init(2));\r
-\r
-static cl::opt<int> UsePrecDivF32(\r
- "nvptx-prec-divf32", cl::ZeroOrMore, cl::Hidden,\r
- cl::desc("NVPTX Specifies: 0 use div.approx, 1 use div.full, 2 use"\r
- " IEEE Compliant F32 div.rnd if available."),\r
- cl::init(2));\r
-\r
-static cl::opt<bool> UsePrecSqrtF32(\r
- "nvptx-prec-sqrtf32", cl::Hidden,\r
- cl::desc("NVPTX Specific: 0 use sqrt.approx, 1 use sqrt.rn."),\r
- cl::init(true));\r
-\r
-static cl::opt<bool> FtzEnabled(\r
- "nvptx-f32ftz", cl::ZeroOrMore, cl::Hidden,\r
- cl::desc("NVPTX Specific: Flush f32 subnormals to sign-preserving zero."),\r
- cl::init(false));\r
-\r
-int NVPTXTargetLowering::getDivF32Level() const {\r
- if (UsePrecDivF32.getNumOccurrences() > 0) {\r
- // If nvptx-prec-div32=N is used on the command-line, always honor it\r
- return UsePrecDivF32;\r
- } else {\r
- // Otherwise, use div.approx if fast math is enabled\r
- if (getTargetMachine().Options.UnsafeFPMath)\r
- return 0;\r
- else\r
- return 2;\r
- }\r
-}\r
-\r
-bool NVPTXTargetLowering::usePrecSqrtF32() const {\r
- if (UsePrecSqrtF32.getNumOccurrences() > 0) {\r
- // If nvptx-prec-sqrtf32 is used on the command-line, always honor it\r
- return UsePrecSqrtF32;\r
- } else {\r
- // Otherwise, use sqrt.approx if fast math is enabled\r
- return !getTargetMachine().Options.UnsafeFPMath;\r
- }\r
-}\r
-\r
-bool NVPTXTargetLowering::useF32FTZ(const MachineFunction &MF) const {\r
- // TODO: Get rid of this flag; there can be only one way to do this.\r
- if (FtzEnabled.getNumOccurrences() > 0) {\r
- // If nvptx-f32ftz is used on the command-line, always honor it\r
- return FtzEnabled;\r
- } else {\r
- const Function *F = MF.getFunction();\r
- // Otherwise, check for an nvptx-f32ftz attribute on the function\r
- if (F->hasFnAttribute("nvptx-f32ftz"))\r
- return F->getFnAttribute("nvptx-f32ftz").getValueAsString() == "true";\r
- else\r
- return false;\r
- }\r
-}\r
-\r
-static bool IsPTXVectorType(MVT VT) {\r
- switch (VT.SimpleTy) {\r
- default:\r
- return false;\r
- case MVT::v2i1:\r
- case MVT::v4i1:\r
- case MVT::v2i8:\r
- case MVT::v4i8:\r
- case MVT::v2i16:\r
- case MVT::v4i16:\r
- case MVT::v2i32:\r
- case MVT::v4i32:\r
- case MVT::v2i64:\r
- case MVT::v2f16:\r
- case MVT::v4f16:\r
- case MVT::v8f16: // <4 x f16x2>\r
- case MVT::v2f32:\r
- case MVT::v4f32:\r
- case MVT::v2f64:\r
- return true;\r
- }\r
-}\r
-\r
-/// ComputePTXValueVTs - For the given Type \p Ty, returns the set of primitive\r
-/// EVTs that compose it. Unlike ComputeValueVTs, this will break apart vectors\r
-/// into their primitive components.\r
-/// NOTE: This is a band-aid for code that expects ComputeValueVTs to return the\r
-/// same number of types as the Ins/Outs arrays in LowerFormalArguments,\r
-/// LowerCall, and LowerReturn.\r
-static void ComputePTXValueVTs(const TargetLowering &TLI, const DataLayout &DL,\r
- Type *Ty, SmallVectorImpl<EVT> &ValueVTs,\r
- SmallVectorImpl<uint64_t> *Offsets = nullptr,\r
- uint64_t StartingOffset = 0) {\r
- SmallVector<EVT, 16> TempVTs;\r
- SmallVector<uint64_t, 16> TempOffsets;\r
-\r
- ComputeValueVTs(TLI, DL, Ty, TempVTs, &TempOffsets, StartingOffset);\r
- for (unsigned i = 0, e = TempVTs.size(); i != e; ++i) {\r
- EVT VT = TempVTs[i];\r
- uint64_t Off = TempOffsets[i];\r
- // Split vectors into individual elements, except for v2f16, which\r
- // we will pass as a single scalar.\r
- if (VT.isVector()) {\r
- unsigned NumElts = VT.getVectorNumElements();\r
- EVT EltVT = VT.getVectorElementType();\r
- // Vectors with an even number of f16 elements will be passed to\r
- // us as an array of v2f16 elements. We must match this so we\r
- // stay in sync with Ins/Outs.\r
- if (EltVT == MVT::f16 && NumElts % 2 == 0) {\r
- EltVT = MVT::v2f16;\r
- NumElts /= 2;\r
- }\r
- for (unsigned j = 0; j != NumElts; ++j) {\r
- ValueVTs.push_back(EltVT);\r
- if (Offsets)\r
- Offsets->push_back(Off + j * EltVT.getStoreSize());\r
- }\r
- } else {\r
- ValueVTs.push_back(VT);\r
- if (Offsets)\r
- Offsets->push_back(Off);\r
- }\r
- }\r
-}\r
-\r
-// Check whether we can merge loads/stores of some of the pieces of a\r
-// flattened function parameter or return value into a single vector\r
-// load/store.\r
-//\r
-// The flattened parameter is represented as a list of EVTs and\r
-// offsets, and the whole structure is aligned to ParamAlignment. This\r
-// function determines whether we can load/store pieces of the\r
-// parameter starting at index Idx using a single vectorized op of\r
-// size AccessSize. If so, it returns the number of param pieces\r
-// covered by the vector op. Otherwise, it returns 1.\r
-static unsigned CanMergeParamLoadStoresStartingAt(\r
- unsigned Idx, uint32_t AccessSize, const SmallVectorImpl<EVT> &ValueVTs,\r
- const SmallVectorImpl<uint64_t> &Offsets, unsigned ParamAlignment) {\r
- assert(isPowerOf2_32(AccessSize) && "must be a power of 2!");\r
-\r
- // Can't vectorize if param alignment is not sufficient.\r
- if (AccessSize > ParamAlignment)\r
- return 1;\r
- // Can't vectorize if offset is not aligned.\r
- if (Offsets[Idx] & (AccessSize - 1))\r
- return 1;\r
-\r
- EVT EltVT = ValueVTs[Idx];\r
- unsigned EltSize = EltVT.getStoreSize();\r
-\r
- // Element is too large to vectorize.\r
- if (EltSize >= AccessSize)\r
- return 1;\r
-\r
- unsigned NumElts = AccessSize / EltSize;\r
- // Can't vectorize if AccessBytes if not a multiple of EltSize.\r
- if (AccessSize != EltSize * NumElts)\r
- return 1;\r
-\r
- // We don't have enough elements to vectorize.\r
- if (Idx + NumElts > ValueVTs.size())\r
- return 1;\r
-\r
- // PTX ISA can only deal with 2- and 4-element vector ops.\r
- if (NumElts != 4 && NumElts != 2)\r
- return 1;\r
-\r
- for (unsigned j = Idx + 1; j < Idx + NumElts; ++j) {\r
- // Types do not match.\r
- if (ValueVTs[j] != EltVT)\r
- return 1;\r
-\r
- // Elements are not contiguous.\r
- if (Offsets[j] - Offsets[j - 1] != EltSize)\r
- return 1;\r
- }\r
- // OK. We can vectorize ValueVTs[i..i+NumElts)\r
- return NumElts;\r
-}\r
-\r
-// Flags for tracking per-element vectorization state of loads/stores\r
-// of a flattened function parameter or return value.\r
-enum ParamVectorizationFlags {\r
- PVF_INNER = 0x0, // Middle elements of a vector.\r
- PVF_FIRST = 0x1, // First element of the vector.\r
- PVF_LAST = 0x2, // Last element of the vector.\r
- // Scalar is effectively a 1-element vector.\r
- PVF_SCALAR = PVF_FIRST | PVF_LAST\r
-};\r
-\r
-// Computes whether and how we can vectorize the loads/stores of a\r
-// flattened function parameter or return value.\r
-//\r
-// The flattened parameter is represented as the list of ValueVTs and\r
-// Offsets, and is aligned to ParamAlignment bytes. We return a vector\r
-// of the same size as ValueVTs indicating how each piece should be\r
-// loaded/stored (i.e. as a scalar, or as part of a vector\r
-// load/store).\r
-static SmallVector<ParamVectorizationFlags, 16>\r
-VectorizePTXValueVTs(const SmallVectorImpl<EVT> &ValueVTs,\r
- const SmallVectorImpl<uint64_t> &Offsets,\r
- unsigned ParamAlignment) {\r
- // Set vector size to match ValueVTs and mark all elements as\r
- // scalars by default.\r
- SmallVector<ParamVectorizationFlags, 16> VectorInfo;\r
- VectorInfo.assign(ValueVTs.size(), PVF_SCALAR);\r
-\r
- // Check what we can vectorize using 128/64/32-bit accesses.\r
- for (int I = 0, E = ValueVTs.size(); I != E; ++I) {\r
- // Skip elements we've already processed.\r
- assert(VectorInfo[I] == PVF_SCALAR && "Unexpected vector info state.");\r
- for (unsigned AccessSize : {16, 8, 4, 2}) {\r
- unsigned NumElts = CanMergeParamLoadStoresStartingAt(\r
- I, AccessSize, ValueVTs, Offsets, ParamAlignment);\r
- // Mark vectorized elements.\r
- switch (NumElts) {\r
- default:\r
- llvm_unreachable("Unexpected return value");\r
- case 1:\r
- // Can't vectorize using this size, try next smaller size.\r
- continue;\r
- case 2:\r
- assert(I + 1 < E && "Not enough elements.");\r
- VectorInfo[I] = PVF_FIRST;\r
- VectorInfo[I + 1] = PVF_LAST;\r
- I += 1;\r
- break;\r
- case 4:\r
- assert(I + 3 < E && "Not enough elements.");\r
- VectorInfo[I] = PVF_FIRST;\r
- VectorInfo[I + 1] = PVF_INNER;\r
- VectorInfo[I + 2] = PVF_INNER;\r
- VectorInfo[I + 3] = PVF_LAST;\r
- I += 3;\r
- break;\r
- }\r
- // Break out of the inner loop because we've already succeeded\r
- // using largest possible AccessSize.\r
- break;\r
- }\r
- }\r
- return VectorInfo;\r
-}\r
-\r
-// NVPTXTargetLowering Constructor.\r
-NVPTXTargetLowering::NVPTXTargetLowering(const NVPTXTargetMachine &TM,\r
- const NVPTXSubtarget &STI)\r
- : TargetLowering(TM), nvTM(&TM), STI(STI) {\r
- // always lower memset, memcpy, and memmove intrinsics to load/store\r
- // instructions, rather\r
- // then generating calls to memset, mempcy or memmove.\r
- MaxStoresPerMemset = (unsigned) 0xFFFFFFFF;\r
- MaxStoresPerMemcpy = (unsigned) 0xFFFFFFFF;\r
- MaxStoresPerMemmove = (unsigned) 0xFFFFFFFF;\r
-\r
- setBooleanContents(ZeroOrNegativeOneBooleanContent);\r
- setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);\r
-\r
- // Jump is Expensive. Don't create extra control flow for 'and', 'or'\r
- // condition branches.\r
- setJumpIsExpensive(true);\r
-\r
- // Wide divides are _very_ slow. Try to reduce the width of the divide if\r
- // possible.\r
- addBypassSlowDiv(64, 32);\r
-\r
- // By default, use the Source scheduling\r
- if (sched4reg)\r
- setSchedulingPreference(Sched::RegPressure);\r
- else\r
- setSchedulingPreference(Sched::Source);\r
-\r
- auto setFP16OperationAction = [&](unsigned Op, MVT VT, LegalizeAction Action,\r
- LegalizeAction NoF16Action) {\r
- setOperationAction(Op, VT, STI.allowFP16Math() ? Action : NoF16Action);\r
- };\r
-\r
- addRegisterClass(MVT::i1, &NVPTX::Int1RegsRegClass);\r
- addRegisterClass(MVT::i16, &NVPTX::Int16RegsRegClass);\r
- addRegisterClass(MVT::i32, &NVPTX::Int32RegsRegClass);\r
- addRegisterClass(MVT::i64, &NVPTX::Int64RegsRegClass);\r
- addRegisterClass(MVT::f32, &NVPTX::Float32RegsRegClass);\r
- addRegisterClass(MVT::f64, &NVPTX::Float64RegsRegClass);\r
- addRegisterClass(MVT::f16, &NVPTX::Float16RegsRegClass);\r
- addRegisterClass(MVT::v2f16, &NVPTX::Float16x2RegsRegClass);\r
-\r
- // Conversion to/from FP16/FP16x2 is always legal.\r
- setOperationAction(ISD::SINT_TO_FP, MVT::f16, Legal);\r
- setOperationAction(ISD::FP_TO_SINT, MVT::f16, Legal);\r
- setOperationAction(ISD::BUILD_VECTOR, MVT::v2f16, Custom);\r
- setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f16, Custom);\r
-\r
- setFP16OperationAction(ISD::SETCC, MVT::f16, Legal, Promote);\r
- setFP16OperationAction(ISD::SETCC, MVT::v2f16, Legal, Expand);\r
-\r
- // Operations not directly supported by NVPTX.\r
- setOperationAction(ISD::SELECT_CC, MVT::f16, Expand);\r
- setOperationAction(ISD::SELECT_CC, MVT::v2f16, Expand);\r
- setOperationAction(ISD::SELECT_CC, MVT::f32, Expand);\r
- setOperationAction(ISD::SELECT_CC, MVT::f64, Expand);\r
- setOperationAction(ISD::SELECT_CC, MVT::i1, Expand);\r
- setOperationAction(ISD::SELECT_CC, MVT::i8, Expand);\r
- setOperationAction(ISD::SELECT_CC, MVT::i16, Expand);\r
- setOperationAction(ISD::SELECT_CC, MVT::i32, Expand);\r
- setOperationAction(ISD::SELECT_CC, MVT::i64, Expand);\r
- setOperationAction(ISD::BR_CC, MVT::f16, Expand);\r
- setOperationAction(ISD::BR_CC, MVT::v2f16, Expand);\r
- setOperationAction(ISD::BR_CC, MVT::f32, Expand);\r
- setOperationAction(ISD::BR_CC, MVT::f64, Expand);\r
- setOperationAction(ISD::BR_CC, MVT::i1, Expand);\r
- setOperationAction(ISD::BR_CC, MVT::i8, Expand);\r
- setOperationAction(ISD::BR_CC, MVT::i16, Expand);\r
- setOperationAction(ISD::BR_CC, MVT::i32, Expand);\r
- setOperationAction(ISD::BR_CC, MVT::i64, Expand);\r
- // Some SIGN_EXTEND_INREG can be done using cvt instruction.\r
- // For others we will expand to a SHL/SRA pair.\r
- setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i64, Legal);\r
- setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Legal);\r
- setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Legal);\r
- setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8 , Legal);\r
- setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);\r
-\r
- setOperationAction(ISD::SHL_PARTS, MVT::i32 , Custom);\r
- setOperationAction(ISD::SRA_PARTS, MVT::i32 , Custom);\r
- setOperationAction(ISD::SRL_PARTS, MVT::i32 , Custom);\r
- setOperationAction(ISD::SHL_PARTS, MVT::i64 , Custom);\r
- setOperationAction(ISD::SRA_PARTS, MVT::i64 , Custom);\r
- setOperationAction(ISD::SRL_PARTS, MVT::i64 , Custom);\r
-\r
- setOperationAction(ISD::BITREVERSE, MVT::i32, Legal);\r
- setOperationAction(ISD::BITREVERSE, MVT::i64, Legal);\r
-\r
- if (STI.hasROT64()) {\r
- setOperationAction(ISD::ROTL, MVT::i64, Legal);\r
- setOperationAction(ISD::ROTR, MVT::i64, Legal);\r
- } else {\r
- setOperationAction(ISD::ROTL, MVT::i64, Expand);\r
- setOperationAction(ISD::ROTR, MVT::i64, Expand);\r
- }\r
- if (STI.hasROT32()) {\r
- setOperationAction(ISD::ROTL, MVT::i32, Legal);\r
- setOperationAction(ISD::ROTR, MVT::i32, Legal);\r
- } else {\r
- setOperationAction(ISD::ROTL, MVT::i32, Expand);\r
- setOperationAction(ISD::ROTR, MVT::i32, Expand);\r
- }\r
-\r
- setOperationAction(ISD::ROTL, MVT::i16, Expand);\r
- setOperationAction(ISD::ROTR, MVT::i16, Expand);\r
- setOperationAction(ISD::ROTL, MVT::i8, Expand);\r
- setOperationAction(ISD::ROTR, MVT::i8, Expand);\r
- setOperationAction(ISD::BSWAP, MVT::i16, Expand);\r
- setOperationAction(ISD::BSWAP, MVT::i32, Expand);\r
- setOperationAction(ISD::BSWAP, MVT::i64, Expand);\r
-\r
- // Indirect branch is not supported.\r
- // This also disables Jump Table creation.\r
- setOperationAction(ISD::BR_JT, MVT::Other, Expand);\r
- setOperationAction(ISD::BRIND, MVT::Other, Expand);\r
-\r
- setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);\r
- setOperationAction(ISD::GlobalAddress, MVT::i64, Custom);\r
-\r
- // We want to legalize constant related memmove and memcopy\r
- // intrinsics.\r
- setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);\r
-\r
- // Turn FP extload into load/fpextend\r
- setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand);\r
- setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand);\r
- setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand);\r
- setLoadExtAction(ISD::EXTLOAD, MVT::v2f32, MVT::v2f16, Expand);\r
- setLoadExtAction(ISD::EXTLOAD, MVT::v2f64, MVT::v2f16, Expand);\r
- setLoadExtAction(ISD::EXTLOAD, MVT::v2f64, MVT::v2f32, Expand);\r
- setLoadExtAction(ISD::EXTLOAD, MVT::v4f32, MVT::v4f16, Expand);\r
- setLoadExtAction(ISD::EXTLOAD, MVT::v4f64, MVT::v4f16, Expand);\r
- setLoadExtAction(ISD::EXTLOAD, MVT::v4f64, MVT::v4f32, Expand);\r
- // Turn FP truncstore into trunc + store.\r
- // FIXME: vector types should also be expanded\r
- setTruncStoreAction(MVT::f32, MVT::f16, Expand);\r
- setTruncStoreAction(MVT::f64, MVT::f16, Expand);\r
- setTruncStoreAction(MVT::f64, MVT::f32, Expand);\r
-\r
- // PTX does not support load / store predicate registers\r
- setOperationAction(ISD::LOAD, MVT::i1, Custom);\r
- setOperationAction(ISD::STORE, MVT::i1, Custom);\r
-\r
- for (MVT VT : MVT::integer_valuetypes()) {\r
- setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);\r
- setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote);\r
- setTruncStoreAction(VT, MVT::i1, Expand);\r
- }\r
-\r
- // This is legal in NVPTX\r
- setOperationAction(ISD::ConstantFP, MVT::f64, Legal);\r
- setOperationAction(ISD::ConstantFP, MVT::f32, Legal);\r
- setOperationAction(ISD::ConstantFP, MVT::f16, Legal);\r
-\r
- // TRAP can be lowered to PTX trap\r
- setOperationAction(ISD::TRAP, MVT::Other, Legal);\r
-\r
- setOperationAction(ISD::ADDC, MVT::i64, Expand);\r
- setOperationAction(ISD::ADDE, MVT::i64, Expand);\r
-\r
- // Register custom handling for vector loads/stores\r
- for (MVT VT : MVT::vector_valuetypes()) {\r
- if (IsPTXVectorType(VT)) {\r
- setOperationAction(ISD::LOAD, VT, Custom);\r
- setOperationAction(ISD::STORE, VT, Custom);\r
- setOperationAction(ISD::INTRINSIC_W_CHAIN, VT, Custom);\r
- }\r
- }\r
-\r
- // Custom handling for i8 intrinsics\r
- setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i8, Custom);\r
-\r
- for (const auto& Ty : {MVT::i16, MVT::i32, MVT::i64}) {\r
- setOperationAction(ISD::ABS, Ty, Legal);\r
- setOperationAction(ISD::SMIN, Ty, Legal);\r
- setOperationAction(ISD::SMAX, Ty, Legal);\r
- setOperationAction(ISD::UMIN, Ty, Legal);\r
- setOperationAction(ISD::UMAX, Ty, Legal);\r
-\r
- setOperationAction(ISD::CTPOP, Ty, Legal);\r
- setOperationAction(ISD::CTLZ, Ty, Legal);\r
- }\r
-\r
- setOperationAction(ISD::CTTZ, MVT::i16, Expand);\r
- setOperationAction(ISD::CTTZ, MVT::i32, Expand);\r
- setOperationAction(ISD::CTTZ, MVT::i64, Expand);\r
-\r
- // PTX does not directly support SELP of i1, so promote to i32 first\r
- setOperationAction(ISD::SELECT, MVT::i1, Custom);\r
-\r
- // PTX cannot multiply two i64s in a single instruction.\r
- setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand);\r
- setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand);\r
-\r
- // We have some custom DAG combine patterns for these nodes\r
- setTargetDAGCombine(ISD::ADD);\r
- setTargetDAGCombine(ISD::AND);\r
- setTargetDAGCombine(ISD::FADD);\r
- setTargetDAGCombine(ISD::MUL);\r
- setTargetDAGCombine(ISD::SHL);\r
- setTargetDAGCombine(ISD::SREM);\r
- setTargetDAGCombine(ISD::UREM);\r
-\r
- // setcc for f16x2 needs special handling to prevent legalizer's\r
- // attempt to scalarize it due to v2i1 not being legal.\r
- if (STI.allowFP16Math())\r
- setTargetDAGCombine(ISD::SETCC);\r
-\r
- // Promote fp16 arithmetic if fp16 hardware isn't available or the\r
- // user passed --nvptx-no-fp16-math. The flag is useful because,\r
- // although sm_53+ GPUs have some sort of FP16 support in\r
- // hardware, only sm_53 and sm_60 have full implementation. Others\r
- // only have token amount of hardware and are likely to run faster\r
- // by using fp32 units instead.\r
- for (const auto &Op : {ISD::FADD, ISD::FMUL, ISD::FSUB, ISD::FMA}) {\r
- setFP16OperationAction(Op, MVT::f16, Legal, Promote);\r
- setFP16OperationAction(Op, MVT::v2f16, Legal, Expand);\r
- }\r
-\r
- // There's no neg.f16 instruction. Expand to (0-x).\r
- setOperationAction(ISD::FNEG, MVT::f16, Expand);\r
- setOperationAction(ISD::FNEG, MVT::v2f16, Expand);\r
-\r
- // (would be) Library functions.\r
-\r
- // These map to conversion instructions for scalar FP types.\r
- for (const auto &Op : {ISD::FCEIL, ISD::FFLOOR, ISD::FNEARBYINT, ISD::FRINT,\r
- ISD::FROUND, ISD::FTRUNC}) {\r
- setOperationAction(Op, MVT::f16, Legal);\r
- setOperationAction(Op, MVT::f32, Legal);\r
- setOperationAction(Op, MVT::f64, Legal);\r
- setOperationAction(Op, MVT::v2f16, Expand);\r
- }\r
-\r
- // 'Expand' implements FCOPYSIGN without calling an external library.\r
- setOperationAction(ISD::FCOPYSIGN, MVT::f16, Expand);\r
- setOperationAction(ISD::FCOPYSIGN, MVT::v2f16, Expand);\r
- setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);\r
- setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);\r
-\r
- // These map to corresponding instructions for f32/f64. f16 must be\r
- // promoted to f32. v2f16 is expanded to f16, which is then promoted\r
- // to f32.\r
- for (const auto &Op : {ISD::FDIV, ISD::FREM, ISD::FSQRT, ISD::FSIN, ISD::FCOS,\r
- ISD::FABS, ISD::FMINNUM, ISD::FMAXNUM}) {\r
- setOperationAction(Op, MVT::f16, Promote);\r
- setOperationAction(Op, MVT::f32, Legal);\r
- setOperationAction(Op, MVT::f64, Legal);\r
- setOperationAction(Op, MVT::v2f16, Expand);\r
- }\r
- setOperationAction(ISD::FMINNUM, MVT::f16, Promote);\r
- setOperationAction(ISD::FMAXNUM, MVT::f16, Promote);\r
- setOperationAction(ISD::FMINNAN, MVT::f16, Promote);\r
- setOperationAction(ISD::FMAXNAN, MVT::f16, Promote);\r
-\r
- // No FEXP2, FLOG2. The PTX ex2 and log2 functions are always approximate.\r
- // No FPOW or FREM in PTX.\r
-\r
- // Now deduce the information based on the above mentioned\r
- // actions\r
- computeRegisterProperties(STI.getRegisterInfo());\r
-}\r
-\r
-const char *NVPTXTargetLowering::getTargetNodeName(unsigned Opcode) const {\r
- switch ((NVPTXISD::NodeType)Opcode) {\r
- case NVPTXISD::FIRST_NUMBER:\r
- break;\r
- case NVPTXISD::CALL:\r
- return "NVPTXISD::CALL";\r
- case NVPTXISD::RET_FLAG:\r
- return "NVPTXISD::RET_FLAG";\r
- case NVPTXISD::LOAD_PARAM:\r
- return "NVPTXISD::LOAD_PARAM";\r
- case NVPTXISD::Wrapper:\r
- return "NVPTXISD::Wrapper";\r
- case NVPTXISD::DeclareParam:\r
- return "NVPTXISD::DeclareParam";\r
- case NVPTXISD::DeclareScalarParam:\r
- return "NVPTXISD::DeclareScalarParam";\r
- case NVPTXISD::DeclareRet:\r
- return "NVPTXISD::DeclareRet";\r
- case NVPTXISD::DeclareScalarRet:\r
- return "NVPTXISD::DeclareScalarRet";\r
- case NVPTXISD::DeclareRetParam:\r
- return "NVPTXISD::DeclareRetParam";\r
- case NVPTXISD::PrintCall:\r
- return "NVPTXISD::PrintCall";\r
- case NVPTXISD::PrintConvergentCall:\r
- return "NVPTXISD::PrintConvergentCall";\r
- case NVPTXISD::PrintCallUni:\r
- return "NVPTXISD::PrintCallUni";\r
- case NVPTXISD::PrintConvergentCallUni:\r
- return "NVPTXISD::PrintConvergentCallUni";\r
- case NVPTXISD::LoadParam:\r
- return "NVPTXISD::LoadParam";\r
- case NVPTXISD::LoadParamV2:\r
- return "NVPTXISD::LoadParamV2";\r
- case NVPTXISD::LoadParamV4:\r
- return "NVPTXISD::LoadParamV4";\r
- case NVPTXISD::StoreParam:\r
- return "NVPTXISD::StoreParam";\r
- case NVPTXISD::StoreParamV2:\r
- return "NVPTXISD::StoreParamV2";\r
- case NVPTXISD::StoreParamV4:\r
- return "NVPTXISD::StoreParamV4";\r
- case NVPTXISD::StoreParamS32:\r
- return "NVPTXISD::StoreParamS32";\r
- case NVPTXISD::StoreParamU32:\r
- return "NVPTXISD::StoreParamU32";\r
- case NVPTXISD::CallArgBegin:\r
- return "NVPTXISD::CallArgBegin";\r
- case NVPTXISD::CallArg:\r
- return "NVPTXISD::CallArg";\r
- case NVPTXISD::LastCallArg:\r
- return "NVPTXISD::LastCallArg";\r
- case NVPTXISD::CallArgEnd:\r
- return "NVPTXISD::CallArgEnd";\r
- case NVPTXISD::CallVoid:\r
- return "NVPTXISD::CallVoid";\r
- case NVPTXISD::CallVal:\r
- return "NVPTXISD::CallVal";\r
- case NVPTXISD::CallSymbol:\r
- return "NVPTXISD::CallSymbol";\r
- case NVPTXISD::Prototype:\r
- return "NVPTXISD::Prototype";\r
- case NVPTXISD::MoveParam:\r
- return "NVPTXISD::MoveParam";\r
- case NVPTXISD::StoreRetval:\r
- return "NVPTXISD::StoreRetval";\r
- case NVPTXISD::StoreRetvalV2:\r
- return "NVPTXISD::StoreRetvalV2";\r
- case NVPTXISD::StoreRetvalV4:\r
- return "NVPTXISD::StoreRetvalV4";\r
- case NVPTXISD::PseudoUseParam:\r
- return "NVPTXISD::PseudoUseParam";\r
- case NVPTXISD::RETURN:\r
- return "NVPTXISD::RETURN";\r
- case NVPTXISD::CallSeqBegin:\r
- return "NVPTXISD::CallSeqBegin";\r
- case NVPTXISD::CallSeqEnd:\r
- return "NVPTXISD::CallSeqEnd";\r
- case NVPTXISD::CallPrototype:\r
- return "NVPTXISD::CallPrototype";\r
- case NVPTXISD::LoadV2:\r
- return "NVPTXISD::LoadV2";\r
- case NVPTXISD::LoadV4:\r
- return "NVPTXISD::LoadV4";\r
- case NVPTXISD::LDGV2:\r
- return "NVPTXISD::LDGV2";\r
- case NVPTXISD::LDGV4:\r
- return "NVPTXISD::LDGV4";\r
- case NVPTXISD::LDUV2:\r
- return "NVPTXISD::LDUV2";\r
- case NVPTXISD::LDUV4:\r
- return "NVPTXISD::LDUV4";\r
- case NVPTXISD::StoreV2:\r
- return "NVPTXISD::StoreV2";\r
- case NVPTXISD::StoreV4:\r
- return "NVPTXISD::StoreV4";\r
- case NVPTXISD::FUN_SHFL_CLAMP:\r
- return "NVPTXISD::FUN_SHFL_CLAMP";\r
- case NVPTXISD::FUN_SHFR_CLAMP:\r
- return "NVPTXISD::FUN_SHFR_CLAMP";\r
- case NVPTXISD::IMAD:\r
- return "NVPTXISD::IMAD";\r
- case NVPTXISD::SETP_F16X2:\r
- return "NVPTXISD::SETP_F16X2";\r
- case NVPTXISD::Dummy:\r
- return "NVPTXISD::Dummy";\r
- case NVPTXISD::MUL_WIDE_SIGNED:\r
- return "NVPTXISD::MUL_WIDE_SIGNED";\r
- case NVPTXISD::MUL_WIDE_UNSIGNED:\r
- return "NVPTXISD::MUL_WIDE_UNSIGNED";\r
- case NVPTXISD::Tex1DFloatS32: return "NVPTXISD::Tex1DFloatS32";\r
- case NVPTXISD::Tex1DFloatFloat: return "NVPTXISD::Tex1DFloatFloat";\r
- case NVPTXISD::Tex1DFloatFloatLevel:\r
- return "NVPTXISD::Tex1DFloatFloatLevel";\r
- case NVPTXISD::Tex1DFloatFloatGrad:\r
- return "NVPTXISD::Tex1DFloatFloatGrad";\r
- case NVPTXISD::Tex1DS32S32: return "NVPTXISD::Tex1DS32S32";\r
- case NVPTXISD::Tex1DS32Float: return "NVPTXISD::Tex1DS32Float";\r
- case NVPTXISD::Tex1DS32FloatLevel:\r
- return "NVPTXISD::Tex1DS32FloatLevel";\r
- case NVPTXISD::Tex1DS32FloatGrad:\r
- return "NVPTXISD::Tex1DS32FloatGrad";\r
- case NVPTXISD::Tex1DU32S32: return "NVPTXISD::Tex1DU32S32";\r
- case NVPTXISD::Tex1DU32Float: return "NVPTXISD::Tex1DU32Float";\r
- case NVPTXISD::Tex1DU32FloatLevel:\r
- return "NVPTXISD::Tex1DU32FloatLevel";\r
- case NVPTXISD::Tex1DU32FloatGrad:\r
- return "NVPTXISD::Tex1DU32FloatGrad";\r
- case NVPTXISD::Tex1DArrayFloatS32: return "NVPTXISD::Tex1DArrayFloatS32";\r
- case NVPTXISD::Tex1DArrayFloatFloat: return "NVPTXISD::Tex1DArrayFloatFloat";\r
- case NVPTXISD::Tex1DArrayFloatFloatLevel:\r
- return "NVPTXISD::Tex1DArrayFloatFloatLevel";\r
- case NVPTXISD::Tex1DArrayFloatFloatGrad:\r
- return "NVPTXISD::Tex1DArrayFloatFloatGrad";\r
- case NVPTXISD::Tex1DArrayS32S32: return "NVPTXISD::Tex1DArrayS32S32";\r
- case NVPTXISD::Tex1DArrayS32Float: return "NVPTXISD::Tex1DArrayS32Float";\r
- case NVPTXISD::Tex1DArrayS32FloatLevel:\r
- return "NVPTXISD::Tex1DArrayS32FloatLevel";\r
- case NVPTXISD::Tex1DArrayS32FloatGrad:\r
- return "NVPTXISD::Tex1DArrayS32FloatGrad";\r
- case NVPTXISD::Tex1DArrayU32S32: return "NVPTXISD::Tex1DArrayU32S32";\r
- case NVPTXISD::Tex1DArrayU32Float: return "NVPTXISD::Tex1DArrayU32Float";\r
- case NVPTXISD::Tex1DArrayU32FloatLevel:\r
- return "NVPTXISD::Tex1DArrayU32FloatLevel";\r
- case NVPTXISD::Tex1DArrayU32FloatGrad:\r
- return "NVPTXISD::Tex1DArrayU32FloatGrad";\r
- case NVPTXISD::Tex2DFloatS32: return "NVPTXISD::Tex2DFloatS32";\r
- case NVPTXISD::Tex2DFloatFloat: return "NVPTXISD::Tex2DFloatFloat";\r
- case NVPTXISD::Tex2DFloatFloatLevel:\r
- return "NVPTXISD::Tex2DFloatFloatLevel";\r
- case NVPTXISD::Tex2DFloatFloatGrad:\r
- return "NVPTXISD::Tex2DFloatFloatGrad";\r
- case NVPTXISD::Tex2DS32S32: return "NVPTXISD::Tex2DS32S32";\r
- case NVPTXISD::Tex2DS32Float: return "NVPTXISD::Tex2DS32Float";\r
- case NVPTXISD::Tex2DS32FloatLevel:\r
- return "NVPTXISD::Tex2DS32FloatLevel";\r
- case NVPTXISD::Tex2DS32FloatGrad:\r
- return "NVPTXISD::Tex2DS32FloatGrad";\r
- case NVPTXISD::Tex2DU32S32: return "NVPTXISD::Tex2DU32S32";\r
- case NVPTXISD::Tex2DU32Float: return "NVPTXISD::Tex2DU32Float";\r
- case NVPTXISD::Tex2DU32FloatLevel:\r
- return "NVPTXISD::Tex2DU32FloatLevel";\r
- case NVPTXISD::Tex2DU32FloatGrad:\r
- return "NVPTXISD::Tex2DU32FloatGrad";\r
- case NVPTXISD::Tex2DArrayFloatS32: return "NVPTXISD::Tex2DArrayFloatS32";\r
- case NVPTXISD::Tex2DArrayFloatFloat: return "NVPTXISD::Tex2DArrayFloatFloat";\r
- case NVPTXISD::Tex2DArrayFloatFloatLevel:\r
- return "NVPTXISD::Tex2DArrayFloatFloatLevel";\r
- case NVPTXISD::Tex2DArrayFloatFloatGrad:\r
- return "NVPTXISD::Tex2DArrayFloatFloatGrad";\r
- case NVPTXISD::Tex2DArrayS32S32: return "NVPTXISD::Tex2DArrayS32S32";\r
- case NVPTXISD::Tex2DArrayS32Float: return "NVPTXISD::Tex2DArrayS32Float";\r
- case NVPTXISD::Tex2DArrayS32FloatLevel:\r
- return "NVPTXISD::Tex2DArrayS32FloatLevel";\r
- case NVPTXISD::Tex2DArrayS32FloatGrad:\r
- return "NVPTXISD::Tex2DArrayS32FloatGrad";\r
- case NVPTXISD::Tex2DArrayU32S32: return "NVPTXISD::Tex2DArrayU32S32";\r
- case NVPTXISD::Tex2DArrayU32Float: return "NVPTXISD::Tex2DArrayU32Float";\r
- case NVPTXISD::Tex2DArrayU32FloatLevel:\r
- return "NVPTXISD::Tex2DArrayU32FloatLevel";\r
- case NVPTXISD::Tex2DArrayU32FloatGrad:\r
- return "NVPTXISD::Tex2DArrayU32FloatGrad";\r
- case NVPTXISD::Tex3DFloatS32: return "NVPTXISD::Tex3DFloatS32";\r
- case NVPTXISD::Tex3DFloatFloat: return "NVPTXISD::Tex3DFloatFloat";\r
- case NVPTXISD::Tex3DFloatFloatLevel:\r
- return "NVPTXISD::Tex3DFloatFloatLevel";\r
- case NVPTXISD::Tex3DFloatFloatGrad:\r
- return "NVPTXISD::Tex3DFloatFloatGrad";\r
- case NVPTXISD::Tex3DS32S32: return "NVPTXISD::Tex3DS32S32";\r
- case NVPTXISD::Tex3DS32Float: return "NVPTXISD::Tex3DS32Float";\r
- case NVPTXISD::Tex3DS32FloatLevel:\r
- return "NVPTXISD::Tex3DS32FloatLevel";\r
- case NVPTXISD::Tex3DS32FloatGrad:\r
- return "NVPTXISD::Tex3DS32FloatGrad";\r
- case NVPTXISD::Tex3DU32S32: return "NVPTXISD::Tex3DU32S32";\r
- case NVPTXISD::Tex3DU32Float: return "NVPTXISD::Tex3DU32Float";\r
- case NVPTXISD::Tex3DU32FloatLevel:\r
- return "NVPTXISD::Tex3DU32FloatLevel";\r
- case NVPTXISD::Tex3DU32FloatGrad:\r
- return "NVPTXISD::Tex3DU32FloatGrad";\r
- case NVPTXISD::TexCubeFloatFloat: return "NVPTXISD::TexCubeFloatFloat";\r
- case NVPTXISD::TexCubeFloatFloatLevel:\r
- return "NVPTXISD::TexCubeFloatFloatLevel";\r
- case NVPTXISD::TexCubeS32Float: return "NVPTXISD::TexCubeS32Float";\r
- case NVPTXISD::TexCubeS32FloatLevel:\r
- return "NVPTXISD::TexCubeS32FloatLevel";\r
- case NVPTXISD::TexCubeU32Float: return "NVPTXISD::TexCubeU32Float";\r
- case NVPTXISD::TexCubeU32FloatLevel:\r
- return "NVPTXISD::TexCubeU32FloatLevel";\r
- case NVPTXISD::TexCubeArrayFloatFloat:\r
- return "NVPTXISD::TexCubeArrayFloatFloat";\r
- case NVPTXISD::TexCubeArrayFloatFloatLevel:\r
- return "NVPTXISD::TexCubeArrayFloatFloatLevel";\r
- case NVPTXISD::TexCubeArrayS32Float:\r
- return "NVPTXISD::TexCubeArrayS32Float";\r
- case NVPTXISD::TexCubeArrayS32FloatLevel:\r
- return "NVPTXISD::TexCubeArrayS32FloatLevel";\r
- case NVPTXISD::TexCubeArrayU32Float:\r
- return "NVPTXISD::TexCubeArrayU32Float";\r
- case NVPTXISD::TexCubeArrayU32FloatLevel:\r
- return "NVPTXISD::TexCubeArrayU32FloatLevel";\r
- case NVPTXISD::Tld4R2DFloatFloat:\r
- return "NVPTXISD::Tld4R2DFloatFloat";\r
- case NVPTXISD::Tld4G2DFloatFloat:\r
- return "NVPTXISD::Tld4G2DFloatFloat";\r
- case NVPTXISD::Tld4B2DFloatFloat:\r
- return "NVPTXISD::Tld4B2DFloatFloat";\r
- case NVPTXISD::Tld4A2DFloatFloat:\r
- return "NVPTXISD::Tld4A2DFloatFloat";\r
- case NVPTXISD::Tld4R2DS64Float:\r
- return "NVPTXISD::Tld4R2DS64Float";\r
- case NVPTXISD::Tld4G2DS64Float:\r
- return "NVPTXISD::Tld4G2DS64Float";\r
- case NVPTXISD::Tld4B2DS64Float:\r
- return "NVPTXISD::Tld4B2DS64Float";\r
- case NVPTXISD::Tld4A2DS64Float:\r
- return "NVPTXISD::Tld4A2DS64Float";\r
- case NVPTXISD::Tld4R2DU64Float:\r
- return "NVPTXISD::Tld4R2DU64Float";\r
- case NVPTXISD::Tld4G2DU64Float:\r
- return "NVPTXISD::Tld4G2DU64Float";\r
- case NVPTXISD::Tld4B2DU64Float:\r
- return "NVPTXISD::Tld4B2DU64Float";\r
- case NVPTXISD::Tld4A2DU64Float:\r
- return "NVPTXISD::Tld4A2DU64Float";\r
-\r
- case NVPTXISD::TexUnified1DFloatS32:\r
- return "NVPTXISD::TexUnified1DFloatS32";\r
- case NVPTXISD::TexUnified1DFloatFloat:\r
- return "NVPTXISD::TexUnified1DFloatFloat";\r
- case NVPTXISD::TexUnified1DFloatFloatLevel:\r
- return "NVPTXISD::TexUnified1DFloatFloatLevel";\r
- case NVPTXISD::TexUnified1DFloatFloatGrad:\r
- return "NVPTXISD::TexUnified1DFloatFloatGrad";\r
- case NVPTXISD::TexUnified1DS32S32:\r
- return "NVPTXISD::TexUnified1DS32S32";\r
- case NVPTXISD::TexUnified1DS32Float:\r
- return "NVPTXISD::TexUnified1DS32Float";\r
- case NVPTXISD::TexUnified1DS32FloatLevel:\r
- return "NVPTXISD::TexUnified1DS32FloatLevel";\r
- case NVPTXISD::TexUnified1DS32FloatGrad:\r
- return "NVPTXISD::TexUnified1DS32FloatGrad";\r
- case NVPTXISD::TexUnified1DU32S32:\r
- return "NVPTXISD::TexUnified1DU32S32";\r
- case NVPTXISD::TexUnified1DU32Float:\r
- return "NVPTXISD::TexUnified1DU32Float";\r
- case NVPTXISD::TexUnified1DU32FloatLevel:\r
- return "NVPTXISD::TexUnified1DU32FloatLevel";\r
- case NVPTXISD::TexUnified1DU32FloatGrad:\r
- return "NVPTXISD::TexUnified1DU32FloatGrad";\r
- case NVPTXISD::TexUnified1DArrayFloatS32:\r
- return "NVPTXISD::TexUnified1DArrayFloatS32";\r
- case NVPTXISD::TexUnified1DArrayFloatFloat:\r
- return "NVPTXISD::TexUnified1DArrayFloatFloat";\r
- case NVPTXISD::TexUnified1DArrayFloatFloatLevel:\r
- return "NVPTXISD::TexUnified1DArrayFloatFloatLevel";\r
- case NVPTXISD::TexUnified1DArrayFloatFloatGrad:\r
- return "NVPTXISD::TexUnified1DArrayFloatFloatGrad";\r
- case NVPTXISD::TexUnified1DArrayS32S32:\r
- return "NVPTXISD::TexUnified1DArrayS32S32";\r
- case NVPTXISD::TexUnified1DArrayS32Float:\r
- return "NVPTXISD::TexUnified1DArrayS32Float";\r
- case NVPTXISD::TexUnified1DArrayS32FloatLevel:\r
- return "NVPTXISD::TexUnified1DArrayS32FloatLevel";\r
- case NVPTXISD::TexUnified1DArrayS32FloatGrad:\r
- return "NVPTXISD::TexUnified1DArrayS32FloatGrad";\r
- case NVPTXISD::TexUnified1DArrayU32S32:\r
- return "NVPTXISD::TexUnified1DArrayU32S32";\r
- case NVPTXISD::TexUnified1DArrayU32Float:\r
- return "NVPTXISD::TexUnified1DArrayU32Float";\r
- case NVPTXISD::TexUnified1DArrayU32FloatLevel:\r
- return "NVPTXISD::TexUnified1DArrayU32FloatLevel";\r
- case NVPTXISD::TexUnified1DArrayU32FloatGrad:\r
- return "NVPTXISD::TexUnified1DArrayU32FloatGrad";\r
- case NVPTXISD::TexUnified2DFloatS32:\r
- return "NVPTXISD::TexUnified2DFloatS32";\r
- case NVPTXISD::TexUnified2DFloatFloat:\r
- return "NVPTXISD::TexUnified2DFloatFloat";\r
- case NVPTXISD::TexUnified2DFloatFloatLevel:\r
- return "NVPTXISD::TexUnified2DFloatFloatLevel";\r
- case NVPTXISD::TexUnified2DFloatFloatGrad:\r
- return "NVPTXISD::TexUnified2DFloatFloatGrad";\r
- case NVPTXISD::TexUnified2DS32S32:\r
- return "NVPTXISD::TexUnified2DS32S32";\r
- case NVPTXISD::TexUnified2DS32Float:\r
- return "NVPTXISD::TexUnified2DS32Float";\r
- case NVPTXISD::TexUnified2DS32FloatLevel:\r
- return "NVPTXISD::TexUnified2DS32FloatLevel";\r
- case NVPTXISD::TexUnified2DS32FloatGrad:\r
- return "NVPTXISD::TexUnified2DS32FloatGrad";\r
- case NVPTXISD::TexUnified2DU32S32:\r
- return "NVPTXISD::TexUnified2DU32S32";\r
- case NVPTXISD::TexUnified2DU32Float:\r
- return "NVPTXISD::TexUnified2DU32Float";\r
- case NVPTXISD::TexUnified2DU32FloatLevel:\r
- return "NVPTXISD::TexUnified2DU32FloatLevel";\r
- case NVPTXISD::TexUnified2DU32FloatGrad:\r
- return "NVPTXISD::TexUnified2DU32FloatGrad";\r
- case NVPTXISD::TexUnified2DArrayFloatS32:\r
- return "NVPTXISD::TexUnified2DArrayFloatS32";\r
- case NVPTXISD::TexUnified2DArrayFloatFloat:\r
- return "NVPTXISD::TexUnified2DArrayFloatFloat";\r
- case NVPTXISD::TexUnified2DArrayFloatFloatLevel:\r
- return "NVPTXISD::TexUnified2DArrayFloatFloatLevel";\r
- case NVPTXISD::TexUnified2DArrayFloatFloatGrad:\r
- return "NVPTXISD::TexUnified2DArrayFloatFloatGrad";\r
- case NVPTXISD::TexUnified2DArrayS32S32:\r
- return "NVPTXISD::TexUnified2DArrayS32S32";\r
- case NVPTXISD::TexUnified2DArrayS32Float:\r
- return "NVPTXISD::TexUnified2DArrayS32Float";\r
- case NVPTXISD::TexUnified2DArrayS32FloatLevel:\r
- return "NVPTXISD::TexUnified2DArrayS32FloatLevel";\r
- case NVPTXISD::TexUnified2DArrayS32FloatGrad:\r
- return "NVPTXISD::TexUnified2DArrayS32FloatGrad";\r
- case NVPTXISD::TexUnified2DArrayU32S32:\r
- return "NVPTXISD::TexUnified2DArrayU32S32";\r
- case NVPTXISD::TexUnified2DArrayU32Float:\r
- return "NVPTXISD::TexUnified2DArrayU32Float";\r
- case NVPTXISD::TexUnified2DArrayU32FloatLevel:\r
- return "NVPTXISD::TexUnified2DArrayU32FloatLevel";\r
- case NVPTXISD::TexUnified2DArrayU32FloatGrad:\r
- return "NVPTXISD::TexUnified2DArrayU32FloatGrad";\r
- case NVPTXISD::TexUnified3DFloatS32:\r
- return "NVPTXISD::TexUnified3DFloatS32";\r
- case NVPTXISD::TexUnified3DFloatFloat:\r
- return "NVPTXISD::TexUnified3DFloatFloat";\r
- case NVPTXISD::TexUnified3DFloatFloatLevel:\r
- return "NVPTXISD::TexUnified3DFloatFloatLevel";\r
- case NVPTXISD::TexUnified3DFloatFloatGrad:\r
- return "NVPTXISD::TexUnified3DFloatFloatGrad";\r
- case NVPTXISD::TexUnified3DS32S32:\r
- return "NVPTXISD::TexUnified3DS32S32";\r
- case NVPTXISD::TexUnified3DS32Float:\r
- return "NVPTXISD::TexUnified3DS32Float";\r
- case NVPTXISD::TexUnified3DS32FloatLevel:\r
- return "NVPTXISD::TexUnified3DS32FloatLevel";\r
- case NVPTXISD::TexUnified3DS32FloatGrad:\r
- return "NVPTXISD::TexUnified3DS32FloatGrad";\r
- case NVPTXISD::TexUnified3DU32S32:\r
- return "NVPTXISD::TexUnified3DU32S32";\r
- case NVPTXISD::TexUnified3DU32Float:\r
- return "NVPTXISD::TexUnified3DU32Float";\r
- case NVPTXISD::TexUnified3DU32FloatLevel:\r
- return "NVPTXISD::TexUnified3DU32FloatLevel";\r
- case NVPTXISD::TexUnified3DU32FloatGrad:\r
- return "NVPTXISD::TexUnified3DU32FloatGrad";\r
- case NVPTXISD::TexUnifiedCubeFloatFloat:\r
- return "NVPTXISD::TexUnifiedCubeFloatFloat";\r
- case NVPTXISD::TexUnifiedCubeFloatFloatLevel:\r
- return "NVPTXISD::TexUnifiedCubeFloatFloatLevel";\r
- case NVPTXISD::TexUnifiedCubeS32Float:\r
- return "NVPTXISD::TexUnifiedCubeS32Float";\r
- case NVPTXISD::TexUnifiedCubeS32FloatLevel:\r
- return "NVPTXISD::TexUnifiedCubeS32FloatLevel";\r
- case NVPTXISD::TexUnifiedCubeU32Float:\r
- return "NVPTXISD::TexUnifiedCubeU32Float";\r
- case NVPTXISD::TexUnifiedCubeU32FloatLevel:\r
- return "NVPTXISD::TexUnifiedCubeU32FloatLevel";\r
- case NVPTXISD::TexUnifiedCubeArrayFloatFloat:\r
- return "NVPTXISD::TexUnifiedCubeArrayFloatFloat";\r
- case NVPTXISD::TexUnifiedCubeArrayFloatFloatLevel:\r
- return "NVPTXISD::TexUnifiedCubeArrayFloatFloatLevel";\r
- case NVPTXISD::TexUnifiedCubeArrayS32Float:\r
- return "NVPTXISD::TexUnifiedCubeArrayS32Float";\r
- case NVPTXISD::TexUnifiedCubeArrayS32FloatLevel:\r
- return "NVPTXISD::TexUnifiedCubeArrayS32FloatLevel";\r
- case NVPTXISD::TexUnifiedCubeArrayU32Float:\r
- return "NVPTXISD::TexUnifiedCubeArrayU32Float";\r
- case NVPTXISD::TexUnifiedCubeArrayU32FloatLevel:\r
- return "NVPTXISD::TexUnifiedCubeArrayU32FloatLevel";\r
- case NVPTXISD::Tld4UnifiedR2DFloatFloat:\r
- return "NVPTXISD::Tld4UnifiedR2DFloatFloat";\r
- case NVPTXISD::Tld4UnifiedG2DFloatFloat:\r
- return "NVPTXISD::Tld4UnifiedG2DFloatFloat";\r
- case NVPTXISD::Tld4UnifiedB2DFloatFloat:\r
- return "NVPTXISD::Tld4UnifiedB2DFloatFloat";\r
- case NVPTXISD::Tld4UnifiedA2DFloatFloat:\r
- return "NVPTXISD::Tld4UnifiedA2DFloatFloat";\r
- case NVPTXISD::Tld4UnifiedR2DS64Float:\r
- return "NVPTXISD::Tld4UnifiedR2DS64Float";\r
- case NVPTXISD::Tld4UnifiedG2DS64Float:\r
- return "NVPTXISD::Tld4UnifiedG2DS64Float";\r
- case NVPTXISD::Tld4UnifiedB2DS64Float:\r
- return "NVPTXISD::Tld4UnifiedB2DS64Float";\r
- case NVPTXISD::Tld4UnifiedA2DS64Float:\r
- return "NVPTXISD::Tld4UnifiedA2DS64Float";\r
- case NVPTXISD::Tld4UnifiedR2DU64Float:\r
- return "NVPTXISD::Tld4UnifiedR2DU64Float";\r
- case NVPTXISD::Tld4UnifiedG2DU64Float:\r
- return "NVPTXISD::Tld4UnifiedG2DU64Float";\r
- case NVPTXISD::Tld4UnifiedB2DU64Float:\r
- return "NVPTXISD::Tld4UnifiedB2DU64Float";\r
- case NVPTXISD::Tld4UnifiedA2DU64Float:\r
- return "NVPTXISD::Tld4UnifiedA2DU64Float";\r
-\r
- case NVPTXISD::Suld1DI8Clamp: return "NVPTXISD::Suld1DI8Clamp";\r
- case NVPTXISD::Suld1DI16Clamp: return "NVPTXISD::Suld1DI16Clamp";\r
- case NVPTXISD::Suld1DI32Clamp: return "NVPTXISD::Suld1DI32Clamp";\r
- case NVPTXISD::Suld1DI64Clamp: return "NVPTXISD::Suld1DI64Clamp";\r
- case NVPTXISD::Suld1DV2I8Clamp: return "NVPTXISD::Suld1DV2I8Clamp";\r
- case NVPTXISD::Suld1DV2I16Clamp: return "NVPTXISD::Suld1DV2I16Clamp";\r
- case NVPTXISD::Suld1DV2I32Clamp: return "NVPTXISD::Suld1DV2I32Clamp";\r
- case NVPTXISD::Suld1DV2I64Clamp: return "NVPTXISD::Suld1DV2I64Clamp";\r
- case NVPTXISD::Suld1DV4I8Clamp: return "NVPTXISD::Suld1DV4I8Clamp";\r
- case NVPTXISD::Suld1DV4I16Clamp: return "NVPTXISD::Suld1DV4I16Clamp";\r
- case NVPTXISD::Suld1DV4I32Clamp: return "NVPTXISD::Suld1DV4I32Clamp";\r
-\r
- case NVPTXISD::Suld1DArrayI8Clamp: return "NVPTXISD::Suld1DArrayI8Clamp";\r
- case NVPTXISD::Suld1DArrayI16Clamp: return "NVPTXISD::Suld1DArrayI16Clamp";\r
- case NVPTXISD::Suld1DArrayI32Clamp: return "NVPTXISD::Suld1DArrayI32Clamp";\r
- case NVPTXISD::Suld1DArrayI64Clamp: return "NVPTXISD::Suld1DArrayI64Clamp";\r
- case NVPTXISD::Suld1DArrayV2I8Clamp: return "NVPTXISD::Suld1DArrayV2I8Clamp";\r
- case NVPTXISD::Suld1DArrayV2I16Clamp:return "NVPTXISD::Suld1DArrayV2I16Clamp";\r
- case NVPTXISD::Suld1DArrayV2I32Clamp:return "NVPTXISD::Suld1DArrayV2I32Clamp";\r
- case NVPTXISD::Suld1DArrayV2I64Clamp:return "NVPTXISD::Suld1DArrayV2I64Clamp";\r
- case NVPTXISD::Suld1DArrayV4I8Clamp: return "NVPTXISD::Suld1DArrayV4I8Clamp";\r
- case NVPTXISD::Suld1DArrayV4I16Clamp:return "NVPTXISD::Suld1DArrayV4I16Clamp";\r
- case NVPTXISD::Suld1DArrayV4I32Clamp:return "NVPTXISD::Suld1DArrayV4I32Clamp";\r
-\r
- case NVPTXISD::Suld2DI8Clamp: return "NVPTXISD::Suld2DI8Clamp";\r
- case NVPTXISD::Suld2DI16Clamp: return "NVPTXISD::Suld2DI16Clamp";\r
- case NVPTXISD::Suld2DI32Clamp: return "NVPTXISD::Suld2DI32Clamp";\r
- case NVPTXISD::Suld2DI64Clamp: return "NVPTXISD::Suld2DI64Clamp";\r
- case NVPTXISD::Suld2DV2I8Clamp: return "NVPTXISD::Suld2DV2I8Clamp";\r
- case NVPTXISD::Suld2DV2I16Clamp: return "NVPTXISD::Suld2DV2I16Clamp";\r
- case NVPTXISD::Suld2DV2I32Clamp: return "NVPTXISD::Suld2DV2I32Clamp";\r
- case NVPTXISD::Suld2DV2I64Clamp: return "NVPTXISD::Suld2DV2I64Clamp";\r
- case NVPTXISD::Suld2DV4I8Clamp: return "NVPTXISD::Suld2DV4I8Clamp";\r
- case NVPTXISD::Suld2DV4I16Clamp: return "NVPTXISD::Suld2DV4I16Clamp";\r
- case NVPTXISD::Suld2DV4I32Clamp: return "NVPTXISD::Suld2DV4I32Clamp";\r
-\r
- case NVPTXISD::Suld2DArrayI8Clamp: return "NVPTXISD::Suld2DArrayI8Clamp";\r
- case NVPTXISD::Suld2DArrayI16Clamp: return "NVPTXISD::Suld2DArrayI16Clamp";\r
- case NVPTXISD::Suld2DArrayI32Clamp: return "NVPTXISD::Suld2DArrayI32Clamp";\r
- case NVPTXISD::Suld2DArrayI64Clamp: return "NVPTXISD::Suld2DArrayI64Clamp";\r
- case NVPTXISD::Suld2DArrayV2I8Clamp: return "NVPTXISD::Suld2DArrayV2I8Clamp";\r
- case NVPTXISD::Suld2DArrayV2I16Clamp:return "NVPTXISD::Suld2DArrayV2I16Clamp";\r
- case NVPTXISD::Suld2DArrayV2I32Clamp:return "NVPTXISD::Suld2DArrayV2I32Clamp";\r
- case NVPTXISD::Suld2DArrayV2I64Clamp:return "NVPTXISD::Suld2DArrayV2I64Clamp";\r
- case NVPTXISD::Suld2DArrayV4I8Clamp: return "NVPTXISD::Suld2DArrayV4I8Clamp";\r
- case NVPTXISD::Suld2DArrayV4I16Clamp:return "NVPTXISD::Suld2DArrayV4I16Clamp";\r
- case NVPTXISD::Suld2DArrayV4I32Clamp:return "NVPTXISD::Suld2DArrayV4I32Clamp";\r
-\r
- case NVPTXISD::Suld3DI8Clamp: return "NVPTXISD::Suld3DI8Clamp";\r
- case NVPTXISD::Suld3DI16Clamp: return "NVPTXISD::Suld3DI16Clamp";\r
- case NVPTXISD::Suld3DI32Clamp: return "NVPTXISD::Suld3DI32Clamp";\r
- case NVPTXISD::Suld3DI64Clamp: return "NVPTXISD::Suld3DI64Clamp";\r
- case NVPTXISD::Suld3DV2I8Clamp: return "NVPTXISD::Suld3DV2I8Clamp";\r
- case NVPTXISD::Suld3DV2I16Clamp: return "NVPTXISD::Suld3DV2I16Clamp";\r
- case NVPTXISD::Suld3DV2I32Clamp: return "NVPTXISD::Suld3DV2I32Clamp";\r
- case NVPTXISD::Suld3DV2I64Clamp: return "NVPTXISD::Suld3DV2I64Clamp";\r
- case NVPTXISD::Suld3DV4I8Clamp: return "NVPTXISD::Suld3DV4I8Clamp";\r
- case NVPTXISD::Suld3DV4I16Clamp: return "NVPTXISD::Suld3DV4I16Clamp";\r
- case NVPTXISD::Suld3DV4I32Clamp: return "NVPTXISD::Suld3DV4I32Clamp";\r
-\r
- case NVPTXISD::Suld1DI8Trap: return "NVPTXISD::Suld1DI8Trap";\r
- case NVPTXISD::Suld1DI16Trap: return "NVPTXISD::Suld1DI16Trap";\r
- case NVPTXISD::Suld1DI32Trap: return "NVPTXISD::Suld1DI32Trap";\r
- case NVPTXISD::Suld1DI64Trap: return "NVPTXISD::Suld1DI64Trap";\r
- case NVPTXISD::Suld1DV2I8Trap: return "NVPTXISD::Suld1DV2I8Trap";\r
- case NVPTXISD::Suld1DV2I16Trap: return "NVPTXISD::Suld1DV2I16Trap";\r
- case NVPTXISD::Suld1DV2I32Trap: return "NVPTXISD::Suld1DV2I32Trap";\r
- case NVPTXISD::Suld1DV2I64Trap: return "NVPTXISD::Suld1DV2I64Trap";\r
- case NVPTXISD::Suld1DV4I8Trap: return "NVPTXISD::Suld1DV4I8Trap";\r
- case NVPTXISD::Suld1DV4I16Trap: return "NVPTXISD::Suld1DV4I16Trap";\r
- case NVPTXISD::Suld1DV4I32Trap: return "NVPTXISD::Suld1DV4I32Trap";\r
-\r
- case NVPTXISD::Suld1DArrayI8Trap: return "NVPTXISD::Suld1DArrayI8Trap";\r
- case NVPTXISD::Suld1DArrayI16Trap: return "NVPTXISD::Suld1DArrayI16Trap";\r
- case NVPTXISD::Suld1DArrayI32Trap: return "NVPTXISD::Suld1DArrayI32Trap";\r
- case NVPTXISD::Suld1DArrayI64Trap: return "NVPTXISD::Suld1DArrayI64Trap";\r
- case NVPTXISD::Suld1DArrayV2I8Trap: return "NVPTXISD::Suld1DArrayV2I8Trap";\r
- case NVPTXISD::Suld1DArrayV2I16Trap: return "NVPTXISD::Suld1DArrayV2I16Trap";\r
- case NVPTXISD::Suld1DArrayV2I32Trap: return "NVPTXISD::Suld1DArrayV2I32Trap";\r
- case NVPTXISD::Suld1DArrayV2I64Trap: return "NVPTXISD::Suld1DArrayV2I64Trap";\r
- case NVPTXISD::Suld1DArrayV4I8Trap: return "NVPTXISD::Suld1DArrayV4I8Trap";\r
- case NVPTXISD::Suld1DArrayV4I16Trap: return "NVPTXISD::Suld1DArrayV4I16Trap";\r
- case NVPTXISD::Suld1DArrayV4I32Trap: return "NVPTXISD::Suld1DArrayV4I32Trap";\r
-\r
- case NVPTXISD::Suld2DI8Trap: return "NVPTXISD::Suld2DI8Trap";\r
- case NVPTXISD::Suld2DI16Trap: return "NVPTXISD::Suld2DI16Trap";\r
- case NVPTXISD::Suld2DI32Trap: return "NVPTXISD::Suld2DI32Trap";\r
- case NVPTXISD::Suld2DI64Trap: return "NVPTXISD::Suld2DI64Trap";\r
- case NVPTXISD::Suld2DV2I8Trap: return "NVPTXISD::Suld2DV2I8Trap";\r
- case NVPTXISD::Suld2DV2I16Trap: return "NVPTXISD::Suld2DV2I16Trap";\r
- case NVPTXISD::Suld2DV2I32Trap: return "NVPTXISD::Suld2DV2I32Trap";\r
- case NVPTXISD::Suld2DV2I64Trap: return "NVPTXISD::Suld2DV2I64Trap";\r
- case NVPTXISD::Suld2DV4I8Trap: return "NVPTXISD::Suld2DV4I8Trap";\r
- case NVPTXISD::Suld2DV4I16Trap: return "NVPTXISD::Suld2DV4I16Trap";\r
- case NVPTXISD::Suld2DV4I32Trap: return "NVPTXISD::Suld2DV4I32Trap";\r
-\r
- case NVPTXISD::Suld2DArrayI8Trap: return "NVPTXISD::Suld2DArrayI8Trap";\r
- case NVPTXISD::Suld2DArrayI16Trap: return "NVPTXISD::Suld2DArrayI16Trap";\r
- case NVPTXISD::Suld2DArrayI32Trap: return "NVPTXISD::Suld2DArrayI32Trap";\r
- case NVPTXISD::Suld2DArrayI64Trap: return "NVPTXISD::Suld2DArrayI64Trap";\r
- case NVPTXISD::Suld2DArrayV2I8Trap: return "NVPTXISD::Suld2DArrayV2I8Trap";\r
- case NVPTXISD::Suld2DArrayV2I16Trap: return "NVPTXISD::Suld2DArrayV2I16Trap";\r
- case NVPTXISD::Suld2DArrayV2I32Trap: return "NVPTXISD::Suld2DArrayV2I32Trap";\r
- case NVPTXISD::Suld2DArrayV2I64Trap: return "NVPTXISD::Suld2DArrayV2I64Trap";\r
- case NVPTXISD::Suld2DArrayV4I8Trap: return "NVPTXISD::Suld2DArrayV4I8Trap";\r
- case NVPTXISD::Suld2DArrayV4I16Trap: return "NVPTXISD::Suld2DArrayV4I16Trap";\r
- case NVPTXISD::Suld2DArrayV4I32Trap: return "NVPTXISD::Suld2DArrayV4I32Trap";\r
-\r
- case NVPTXISD::Suld3DI8Trap: return "NVPTXISD::Suld3DI8Trap";\r
- case NVPTXISD::Suld3DI16Trap: return "NVPTXISD::Suld3DI16Trap";\r
- case NVPTXISD::Suld3DI32Trap: return "NVPTXISD::Suld3DI32Trap";\r
- case NVPTXISD::Suld3DI64Trap: return "NVPTXISD::Suld3DI64Trap";\r
- case NVPTXISD::Suld3DV2I8Trap: return "NVPTXISD::Suld3DV2I8Trap";\r
- case NVPTXISD::Suld3DV2I16Trap: return "NVPTXISD::Suld3DV2I16Trap";\r
- case NVPTXISD::Suld3DV2I32Trap: return "NVPTXISD::Suld3DV2I32Trap";\r
- case NVPTXISD::Suld3DV2I64Trap: return "NVPTXISD::Suld3DV2I64Trap";\r
- case NVPTXISD::Suld3DV4I8Trap: return "NVPTXISD::Suld3DV4I8Trap";\r
- case NVPTXISD::Suld3DV4I16Trap: return "NVPTXISD::Suld3DV4I16Trap";\r
- case NVPTXISD::Suld3DV4I32Trap: return "NVPTXISD::Suld3DV4I32Trap";\r
-\r
- case NVPTXISD::Suld1DI8Zero: return "NVPTXISD::Suld1DI8Zero";\r
- case NVPTXISD::Suld1DI16Zero: return "NVPTXISD::Suld1DI16Zero";\r
- case NVPTXISD::Suld1DI32Zero: return "NVPTXISD::Suld1DI32Zero";\r
- case NVPTXISD::Suld1DI64Zero: return "NVPTXISD::Suld1DI64Zero";\r
- case NVPTXISD::Suld1DV2I8Zero: return "NVPTXISD::Suld1DV2I8Zero";\r
- case NVPTXISD::Suld1DV2I16Zero: return "NVPTXISD::Suld1DV2I16Zero";\r
- case NVPTXISD::Suld1DV2I32Zero: return "NVPTXISD::Suld1DV2I32Zero";\r
- case NVPTXISD::Suld1DV2I64Zero: return "NVPTXISD::Suld1DV2I64Zero";\r
- case NVPTXISD::Suld1DV4I8Zero: return "NVPTXISD::Suld1DV4I8Zero";\r
- case NVPTXISD::Suld1DV4I16Zero: return "NVPTXISD::Suld1DV4I16Zero";\r
- case NVPTXISD::Suld1DV4I32Zero: return "NVPTXISD::Suld1DV4I32Zero";\r
-\r
- case NVPTXISD::Suld1DArrayI8Zero: return "NVPTXISD::Suld1DArrayI8Zero";\r
- case NVPTXISD::Suld1DArrayI16Zero: return "NVPTXISD::Suld1DArrayI16Zero";\r
- case NVPTXISD::Suld1DArrayI32Zero: return "NVPTXISD::Suld1DArrayI32Zero";\r
- case NVPTXISD::Suld1DArrayI64Zero: return "NVPTXISD::Suld1DArrayI64Zero";\r
- case NVPTXISD::Suld1DArrayV2I8Zero: return "NVPTXISD::Suld1DArrayV2I8Zero";\r
- case NVPTXISD::Suld1DArrayV2I16Zero: return "NVPTXISD::Suld1DArrayV2I16Zero";\r
- case NVPTXISD::Suld1DArrayV2I32Zero: return "NVPTXISD::Suld1DArrayV2I32Zero";\r
- case NVPTXISD::Suld1DArrayV2I64Zero: return "NVPTXISD::Suld1DArrayV2I64Zero";\r
- case NVPTXISD::Suld1DArrayV4I8Zero: return "NVPTXISD::Suld1DArrayV4I8Zero";\r
- case NVPTXISD::Suld1DArrayV4I16Zero: return "NVPTXISD::Suld1DArrayV4I16Zero";\r
- case NVPTXISD::Suld1DArrayV4I32Zero: return "NVPTXISD::Suld1DArrayV4I32Zero";\r
-\r
- case NVPTXISD::Suld2DI8Zero: return "NVPTXISD::Suld2DI8Zero";\r
- case NVPTXISD::Suld2DI16Zero: return "NVPTXISD::Suld2DI16Zero";\r
- case NVPTXISD::Suld2DI32Zero: return "NVPTXISD::Suld2DI32Zero";\r
- case NVPTXISD::Suld2DI64Zero: return "NVPTXISD::Suld2DI64Zero";\r
- case NVPTXISD::Suld2DV2I8Zero: return "NVPTXISD::Suld2DV2I8Zero";\r
- case NVPTXISD::Suld2DV2I16Zero: return "NVPTXISD::Suld2DV2I16Zero";\r
- case NVPTXISD::Suld2DV2I32Zero: return "NVPTXISD::Suld2DV2I32Zero";\r
- case NVPTXISD::Suld2DV2I64Zero: return "NVPTXISD::Suld2DV2I64Zero";\r
- case NVPTXISD::Suld2DV4I8Zero: return "NVPTXISD::Suld2DV4I8Zero";\r
- case NVPTXISD::Suld2DV4I16Zero: return "NVPTXISD::Suld2DV4I16Zero";\r
- case NVPTXISD::Suld2DV4I32Zero: return "NVPTXISD::Suld2DV4I32Zero";\r
-\r
- case NVPTXISD::Suld2DArrayI8Zero: return "NVPTXISD::Suld2DArrayI8Zero";\r
- case NVPTXISD::Suld2DArrayI16Zero: return "NVPTXISD::Suld2DArrayI16Zero";\r
- case NVPTXISD::Suld2DArrayI32Zero: return "NVPTXISD::Suld2DArrayI32Zero";\r
- case NVPTXISD::Suld2DArrayI64Zero: return "NVPTXISD::Suld2DArrayI64Zero";\r
- case NVPTXISD::Suld2DArrayV2I8Zero: return "NVPTXISD::Suld2DArrayV2I8Zero";\r
- case NVPTXISD::Suld2DArrayV2I16Zero: return "NVPTXISD::Suld2DArrayV2I16Zero";\r
- case NVPTXISD::Suld2DArrayV2I32Zero: return "NVPTXISD::Suld2DArrayV2I32Zero";\r
- case NVPTXISD::Suld2DArrayV2I64Zero: return "NVPTXISD::Suld2DArrayV2I64Zero";\r
- case NVPTXISD::Suld2DArrayV4I8Zero: return "NVPTXISD::Suld2DArrayV4I8Zero";\r
- case NVPTXISD::Suld2DArrayV4I16Zero: return "NVPTXISD::Suld2DArrayV4I16Zero";\r
- case NVPTXISD::Suld2DArrayV4I32Zero: return "NVPTXISD::Suld2DArrayV4I32Zero";\r
-\r
- case NVPTXISD::Suld3DI8Zero: return "NVPTXISD::Suld3DI8Zero";\r
- case NVPTXISD::Suld3DI16Zero: return "NVPTXISD::Suld3DI16Zero";\r
- case NVPTXISD::Suld3DI32Zero: return "NVPTXISD::Suld3DI32Zero";\r
- case NVPTXISD::Suld3DI64Zero: return "NVPTXISD::Suld3DI64Zero";\r
- case NVPTXISD::Suld3DV2I8Zero: return "NVPTXISD::Suld3DV2I8Zero";\r
- case NVPTXISD::Suld3DV2I16Zero: return "NVPTXISD::Suld3DV2I16Zero";\r
- case NVPTXISD::Suld3DV2I32Zero: return "NVPTXISD::Suld3DV2I32Zero";\r
- case NVPTXISD::Suld3DV2I64Zero: return "NVPTXISD::Suld3DV2I64Zero";\r
- case NVPTXISD::Suld3DV4I8Zero: return "NVPTXISD::Suld3DV4I8Zero";\r
- case NVPTXISD::Suld3DV4I16Zero: return "NVPTXISD::Suld3DV4I16Zero";\r
- case NVPTXISD::Suld3DV4I32Zero: return "NVPTXISD::Suld3DV4I32Zero";\r
- }\r
- return nullptr;\r
-}\r
-\r
-TargetLoweringBase::LegalizeTypeAction\r
-NVPTXTargetLowering::getPreferredVectorAction(EVT VT) const {\r
- if (VT.getVectorNumElements() != 1 && VT.getScalarType() == MVT::i1)\r
- return TypeSplitVector;\r
- if (VT == MVT::v2f16)\r
- return TypeLegal;\r
- return TargetLoweringBase::getPreferredVectorAction(VT);\r
-}\r
-\r
-SDValue NVPTXTargetLowering::getSqrtEstimate(SDValue Operand, SelectionDAG &DAG,\r
- int Enabled, int &ExtraSteps,\r
- bool &UseOneConst,\r
- bool Reciprocal) const {\r
- if (!(Enabled == ReciprocalEstimate::Enabled ||\r
- (Enabled == ReciprocalEstimate::Unspecified && !usePrecSqrtF32())))\r
- return SDValue();\r
-\r
- if (ExtraSteps == ReciprocalEstimate::Unspecified)\r
- ExtraSteps = 0;\r
-\r
- SDLoc DL(Operand);\r
- EVT VT = Operand.getValueType();\r
- bool Ftz = useF32FTZ(DAG.getMachineFunction());\r
-\r
- auto MakeIntrinsicCall = [&](Intrinsic::ID IID) {\r
- return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT,\r
- DAG.getConstant(IID, DL, MVT::i32), Operand);\r
- };\r
-\r
- // The sqrt and rsqrt refinement processes assume we always start out with an\r
- // approximation of the rsqrt. Therefore, if we're going to do any refinement\r
- // (i.e. ExtraSteps > 0), we must return an rsqrt. But if we're *not* doing\r
- // any refinement, we must return a regular sqrt.\r
- if (Reciprocal || ExtraSteps > 0) {\r
- if (VT == MVT::f32)\r
- return MakeIntrinsicCall(Ftz ? Intrinsic::nvvm_rsqrt_approx_ftz_f\r
- : Intrinsic::nvvm_rsqrt_approx_f);\r
- else if (VT == MVT::f64)\r
- return MakeIntrinsicCall(Intrinsic::nvvm_rsqrt_approx_d);\r
- else\r
- return SDValue();\r
- } else {\r
- if (VT == MVT::f32)\r
- return MakeIntrinsicCall(Ftz ? Intrinsic::nvvm_sqrt_approx_ftz_f\r
- : Intrinsic::nvvm_sqrt_approx_f);\r
- else {\r
- // There's no sqrt.approx.f64 instruction, so we emit\r
- // reciprocal(rsqrt(x)). This is faster than\r
- // select(x == 0, 0, x * rsqrt(x)). (In fact, it's faster than plain\r
- // x * rsqrt(x).)\r
- return DAG.getNode(\r
- ISD::INTRINSIC_WO_CHAIN, DL, VT,\r
- DAG.getConstant(Intrinsic::nvvm_rcp_approx_ftz_d, DL, MVT::i32),\r
- MakeIntrinsicCall(Intrinsic::nvvm_rsqrt_approx_d));\r
- }\r
- }\r
-}\r
-\r
-SDValue\r
-NVPTXTargetLowering::LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const {\r
- SDLoc dl(Op);\r
- const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();\r
- auto PtrVT = getPointerTy(DAG.getDataLayout());\r
- Op = DAG.getTargetGlobalAddress(GV, dl, PtrVT);\r
- return DAG.getNode(NVPTXISD::Wrapper, dl, PtrVT, Op);\r
-}\r
-\r
-std::string NVPTXTargetLowering::getPrototype(\r
- const DataLayout &DL, Type *retTy, const ArgListTy &Args,\r
- const SmallVectorImpl<ISD::OutputArg> &Outs, unsigned retAlignment,\r
- const ImmutableCallSite *CS) const {\r
- auto PtrVT = getPointerTy(DL);\r
-\r
- bool isABI = (STI.getSmVersion() >= 20);\r
- assert(isABI && "Non-ABI compilation is not supported");\r
- if (!isABI)\r
- return "";\r
-\r
- std::stringstream O;\r
- O << "prototype_" << uniqueCallSite << " : .callprototype ";\r
-\r
- if (retTy->getTypeID() == Type::VoidTyID) {\r
- O << "()";\r
- } else {\r
- O << "(";\r
- if (retTy->isFloatingPointTy() || retTy->isIntegerTy()) {\r
- unsigned size = 0;\r
- if (auto *ITy = dyn_cast<IntegerType>(retTy)) {\r
- size = ITy->getBitWidth();\r
- } else {\r
- assert(retTy->isFloatingPointTy() &&\r
- "Floating point type expected here");\r
- size = retTy->getPrimitiveSizeInBits();\r
- }\r
- // PTX ABI requires all scalar return values to be at least 32\r
- // bits in size. fp16 normally uses .b16 as its storage type in\r
- // PTX, so its size must be adjusted here, too.\r
- if (size < 32)\r
- size = 32;\r
-\r
- O << ".param .b" << size << " _";\r
- } else if (isa<PointerType>(retTy)) {\r
- O << ".param .b" << PtrVT.getSizeInBits() << " _";\r
- } else if (retTy->isAggregateType() || retTy->isVectorTy()) {\r
- auto &DL = CS->getCalledFunction()->getParent()->getDataLayout();\r
- O << ".param .align " << retAlignment << " .b8 _["\r
- << DL.getTypeAllocSize(retTy) << "]";\r
- } else {\r
- llvm_unreachable("Unknown return type");\r
- }\r
- O << ") ";\r
- }\r
- O << "_ (";\r
-\r
- bool first = true;\r
-\r
- unsigned OIdx = 0;\r
- for (unsigned i = 0, e = Args.size(); i != e; ++i, ++OIdx) {\r
- Type *Ty = Args[i].Ty;\r
- if (!first) {\r
- O << ", ";\r
- }\r
- first = false;\r
-\r
- if (!Outs[OIdx].Flags.isByVal()) {\r
- if (Ty->isAggregateType() || Ty->isVectorTy()) {\r
- unsigned align = 0;\r
- const CallInst *CallI = cast<CallInst>(CS->getInstruction());\r
- // +1 because index 0 is reserved for return type alignment\r
- if (!getAlign(*CallI, i + 1, align))\r
- align = DL.getABITypeAlignment(Ty);\r
- unsigned sz = DL.getTypeAllocSize(Ty);\r
- O << ".param .align " << align << " .b8 ";\r
- O << "_";\r
- O << "[" << sz << "]";\r
- // update the index for Outs\r
- SmallVector<EVT, 16> vtparts;\r
- ComputeValueVTs(*this, DL, Ty, vtparts);\r
- if (unsigned len = vtparts.size())\r
- OIdx += len - 1;\r
- continue;\r
- }\r
- // i8 types in IR will be i16 types in SDAG\r
- assert((getValueType(DL, Ty) == Outs[OIdx].VT ||\r
- (getValueType(DL, Ty) == MVT::i8 && Outs[OIdx].VT == MVT::i16)) &&\r
- "type mismatch between callee prototype and arguments");\r
- // scalar type\r
- unsigned sz = 0;\r
- if (isa<IntegerType>(Ty)) {\r
- sz = cast<IntegerType>(Ty)->getBitWidth();\r
- if (sz < 32)\r
- sz = 32;\r
- } else if (isa<PointerType>(Ty)) {\r
- sz = PtrVT.getSizeInBits();\r
- } else if (Ty->isHalfTy())\r
- // PTX ABI requires all scalar parameters to be at least 32\r
- // bits in size. fp16 normally uses .b16 as its storage type\r
- // in PTX, so its size must be adjusted here, too.\r
- sz = 32;\r
- else\r
- sz = Ty->getPrimitiveSizeInBits();\r
- O << ".param .b" << sz << " ";\r
- O << "_";\r
- continue;\r
- }\r
- auto *PTy = dyn_cast<PointerType>(Ty);\r
- assert(PTy && "Param with byval attribute should be a pointer type");\r
- Type *ETy = PTy->getElementType();\r
-\r
- unsigned align = Outs[OIdx].Flags.getByValAlign();\r
- unsigned sz = DL.getTypeAllocSize(ETy);\r
- O << ".param .align " << align << " .b8 ";\r
- O << "_";\r
- O << "[" << sz << "]";\r
- }\r
- O << ");";\r
- return O.str();\r
-}\r
-\r
-unsigned NVPTXTargetLowering::getArgumentAlignment(SDValue Callee,\r
- const ImmutableCallSite *CS,\r
- Type *Ty, unsigned Idx,\r
- const DataLayout &DL) const {\r
- if (!CS) {\r
- // CallSite is zero, fallback to ABI type alignment\r
- return DL.getABITypeAlignment(Ty);\r
- }\r
-\r
- unsigned Align = 0;\r
- const Value *DirectCallee = CS->getCalledFunction();\r
-\r
- if (!DirectCallee) {\r
- // We don't have a direct function symbol, but that may be because of\r
- // constant cast instructions in the call.\r
- const Instruction *CalleeI = CS->getInstruction();\r
- assert(CalleeI && "Call target is not a function or derived value?");\r
-\r
- // With bitcast'd call targets, the instruction will be the call\r
- if (isa<CallInst>(CalleeI)) {\r
- // Check if we have call alignment metadata\r
- if (getAlign(*cast<CallInst>(CalleeI), Idx, Align))\r
- return Align;\r
-\r
- const Value *CalleeV = cast<CallInst>(CalleeI)->getCalledValue();\r
- // Ignore any bitcast instructions\r
- while (isa<ConstantExpr>(CalleeV)) {\r
- const ConstantExpr *CE = cast<ConstantExpr>(CalleeV);\r
- if (!CE->isCast())\r
- break;\r
- // Look through the bitcast\r
- CalleeV = cast<ConstantExpr>(CalleeV)->getOperand(0);\r
- }\r
-\r
- // We have now looked past all of the bitcasts. Do we finally have a\r
- // Function?\r
- if (isa<Function>(CalleeV))\r
- DirectCallee = CalleeV;\r
- }\r
- }\r
-\r
- // Check for function alignment information if we found that the\r
- // ultimate target is a Function\r
- if (DirectCallee)\r
- if (getAlign(*cast<Function>(DirectCallee), Idx, Align))\r
- return Align;\r
-\r
- // Call is indirect or alignment information is not available, fall back to\r
- // the ABI type alignment\r
- return DL.getABITypeAlignment(Ty);\r
-}\r
-\r
-SDValue NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,\r
- SmallVectorImpl<SDValue> &InVals) const {\r
- SelectionDAG &DAG = CLI.DAG;\r
- SDLoc dl = CLI.DL;\r
- SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;\r
- SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;\r
- SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;\r
- SDValue Chain = CLI.Chain;\r
- SDValue Callee = CLI.Callee;\r
- bool &isTailCall = CLI.IsTailCall;\r
- ArgListTy &Args = CLI.getArgs();\r
- Type *RetTy = CLI.RetTy;\r
- ImmutableCallSite *CS = CLI.CS;\r
- const DataLayout &DL = DAG.getDataLayout();\r
-\r
- bool isABI = (STI.getSmVersion() >= 20);\r
- assert(isABI && "Non-ABI compilation is not supported");\r
- if (!isABI)\r
- return Chain;\r
-\r
- SDValue tempChain = Chain;\r
- Chain = DAG.getCALLSEQ_START(Chain, uniqueCallSite, 0, dl);\r
- SDValue InFlag = Chain.getValue(1);\r
-\r
- unsigned paramCount = 0;\r
- // Args.size() and Outs.size() need not match.\r
- // Outs.size() will be larger\r
- // * if there is an aggregate argument with multiple fields (each field\r
- // showing up separately in Outs)\r
- // * if there is a vector argument with more than typical vector-length\r
- // elements (generally if more than 4) where each vector element is\r
- // individually present in Outs.\r
- // So a different index should be used for indexing into Outs/OutVals.\r
- // See similar issue in LowerFormalArguments.\r
- unsigned OIdx = 0;\r
- // Declare the .params or .reg need to pass values\r
- // to the function\r
- for (unsigned i = 0, e = Args.size(); i != e; ++i, ++OIdx) {\r
- EVT VT = Outs[OIdx].VT;\r
- Type *Ty = Args[i].Ty;\r
-\r
- if (!Outs[OIdx].Flags.isByVal()) {\r
- SmallVector<EVT, 16> VTs;\r
- SmallVector<uint64_t, 16> Offsets;\r
- ComputePTXValueVTs(*this, DL, Ty, VTs, &Offsets);\r
- unsigned ArgAlign =\r
- getArgumentAlignment(Callee, CS, Ty, paramCount + 1, DL);\r
- unsigned AllocSize = DL.getTypeAllocSize(Ty);\r
- SDVTList DeclareParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);\r
- bool NeedAlign; // Does argument declaration specify alignment?\r
- if (Ty->isAggregateType() || Ty->isVectorTy()) {\r
- // declare .param .align <align> .b8 .param<n>[<size>];\r
- SDValue DeclareParamOps[] = {\r
- Chain, DAG.getConstant(ArgAlign, dl, MVT::i32),\r
- DAG.getConstant(paramCount, dl, MVT::i32),\r
- DAG.getConstant(AllocSize, dl, MVT::i32), InFlag};\r
- Chain = DAG.getNode(NVPTXISD::DeclareParam, dl, DeclareParamVTs,\r
- DeclareParamOps);\r
- NeedAlign = true;\r
- } else {\r
- // declare .param .b<size> .param<n>;\r
- if ((VT.isInteger() || VT.isFloatingPoint()) && AllocSize < 4) {\r
- // PTX ABI requires integral types to be at least 32 bits in\r
- // size. FP16 is loaded/stored using i16, so it's handled\r
- // here as well.\r
- AllocSize = 4;\r
- }\r
- SDValue DeclareScalarParamOps[] = {\r
- Chain, DAG.getConstant(paramCount, dl, MVT::i32),\r
- DAG.getConstant(AllocSize * 8, dl, MVT::i32),\r
- DAG.getConstant(0, dl, MVT::i32), InFlag};\r
- Chain = DAG.getNode(NVPTXISD::DeclareScalarParam, dl, DeclareParamVTs,\r
- DeclareScalarParamOps);\r
- NeedAlign = false;\r
- }\r
- InFlag = Chain.getValue(1);\r
-\r
- // PTX Interoperability Guide 3.3(A): [Integer] Values shorter\r
- // than 32-bits are sign extended or zero extended, depending on\r
- // whether they are signed or unsigned types. This case applies\r
- // only to scalar parameters and not to aggregate values.\r
- bool ExtendIntegerParam =\r
- Ty->isIntegerTy() && DL.getTypeAllocSizeInBits(Ty) < 32;\r
-\r
- auto VectorInfo = VectorizePTXValueVTs(VTs, Offsets, ArgAlign);\r
- SmallVector<SDValue, 6> StoreOperands;\r
- for (unsigned j = 0, je = VTs.size(); j != je; ++j) {\r
- // New store.\r
- if (VectorInfo[j] & PVF_FIRST) {\r
- assert(StoreOperands.empty() && "Unfinished preceeding store.");\r
- StoreOperands.push_back(Chain);\r
- StoreOperands.push_back(DAG.getConstant(paramCount, dl, MVT::i32));\r
- StoreOperands.push_back(DAG.getConstant(Offsets[j], dl, MVT::i32));\r
- }\r
-\r
- EVT EltVT = VTs[j];\r
- SDValue StVal = OutVals[OIdx];\r
- if (ExtendIntegerParam) {\r
- assert(VTs.size() == 1 && "Scalar can't have multiple parts.");\r
- // zext/sext to i32\r
- StVal = DAG.getNode(Outs[OIdx].Flags.isSExt() ? ISD::SIGN_EXTEND\r
- : ISD::ZERO_EXTEND,\r
- dl, MVT::i32, StVal);\r
- } else if (EltVT.getSizeInBits() < 16) {\r
- // Use 16-bit registers for small stores as it's the\r
- // smallest general purpose register size supported by NVPTX.\r
- StVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i16, StVal);\r
- }\r
-\r
- // Record the value to store.\r
- StoreOperands.push_back(StVal);\r
-\r
- if (VectorInfo[j] & PVF_LAST) {\r
- unsigned NumElts = StoreOperands.size() - 3;\r
- NVPTXISD::NodeType Op;\r
- switch (NumElts) {\r
- case 1:\r
- Op = NVPTXISD::StoreParam;\r
- break;\r
- case 2:\r
- Op = NVPTXISD::StoreParamV2;\r
- break;\r
- case 4:\r
- Op = NVPTXISD::StoreParamV4;\r
- break;\r
- default:\r
- llvm_unreachable("Invalid vector info.");\r
- }\r
-\r
- StoreOperands.push_back(InFlag);\r
-\r
- // Adjust type of the store op if we've extended the scalar\r
- // return value.\r
- EVT TheStoreType = ExtendIntegerParam ? MVT::i32 : VTs[j];\r
- unsigned EltAlign =\r
- NeedAlign ? GreatestCommonDivisor64(ArgAlign, Offsets[j]) : 0;\r
-\r
- Chain = DAG.getMemIntrinsicNode(\r
- Op, dl, DAG.getVTList(MVT::Other, MVT::Glue), StoreOperands,\r
- TheStoreType, MachinePointerInfo(), EltAlign,\r
- /* Volatile */ false, /* ReadMem */ false,\r
- /* WriteMem */ true, /* Size */ 0);\r
- InFlag = Chain.getValue(1);\r
-\r
- // Cleanup.\r
- StoreOperands.clear();\r
- }\r
- ++OIdx;\r
- }\r
- assert(StoreOperands.empty() && "Unfinished parameter store.");\r
- if (VTs.size() > 0)\r
- --OIdx;\r
- ++paramCount;\r
- continue;\r
- }\r
-\r
- // ByVal arguments\r
- SmallVector<EVT, 16> VTs;\r
- SmallVector<uint64_t, 16> Offsets;\r
- auto *PTy = dyn_cast<PointerType>(Args[i].Ty);\r
- assert(PTy && "Type of a byval parameter should be pointer");\r
- ComputePTXValueVTs(*this, DL, PTy->getElementType(), VTs, &Offsets, 0);\r
-\r
- // declare .param .align <align> .b8 .param<n>[<size>];\r
- unsigned sz = Outs[OIdx].Flags.getByValSize();\r
- SDVTList DeclareParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);\r
- unsigned ArgAlign = Outs[OIdx].Flags.getByValAlign();\r
- // The ByValAlign in the Outs[OIdx].Flags is alway set at this point,\r
- // so we don't need to worry about natural alignment or not.\r
- // See TargetLowering::LowerCallTo().\r
-\r
- // Enforce minumum alignment of 4 to work around ptxas miscompile\r
- // for sm_50+. See corresponding alignment adjustment in\r
- // emitFunctionParamList() for details.\r
- if (ArgAlign < 4)\r
- ArgAlign = 4;\r
- SDValue DeclareParamOps[] = {Chain, DAG.getConstant(ArgAlign, dl, MVT::i32),\r
- DAG.getConstant(paramCount, dl, MVT::i32),\r
- DAG.getConstant(sz, dl, MVT::i32), InFlag};\r
- Chain = DAG.getNode(NVPTXISD::DeclareParam, dl, DeclareParamVTs,\r
- DeclareParamOps);\r
- InFlag = Chain.getValue(1);\r
- for (unsigned j = 0, je = VTs.size(); j != je; ++j) {\r
- EVT elemtype = VTs[j];\r
- int curOffset = Offsets[j];\r
- unsigned PartAlign = GreatestCommonDivisor64(ArgAlign, curOffset);\r
- auto PtrVT = getPointerTy(DL);\r
- SDValue srcAddr = DAG.getNode(ISD::ADD, dl, PtrVT, OutVals[OIdx],\r
- DAG.getConstant(curOffset, dl, PtrVT));\r
- SDValue theVal = DAG.getLoad(elemtype, dl, tempChain, srcAddr,\r
- MachinePointerInfo(), PartAlign);\r
- if (elemtype.getSizeInBits() < 16) {\r
- theVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i16, theVal);\r
- }\r
- SDVTList CopyParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);\r
- SDValue CopyParamOps[] = { Chain,\r
- DAG.getConstant(paramCount, dl, MVT::i32),\r
- DAG.getConstant(curOffset, dl, MVT::i32),\r
- theVal, InFlag };\r
- Chain = DAG.getMemIntrinsicNode(NVPTXISD::StoreParam, dl, CopyParamVTs,\r
- CopyParamOps, elemtype,\r
- MachinePointerInfo(), /* Align */ 0,\r
- /* Volatile */ false, /* ReadMem */ false,\r
- /* WriteMem */ true, /* Size */ 0);\r
-\r
- InFlag = Chain.getValue(1);\r
- }\r
- ++paramCount;\r
- }\r
-\r
- GlobalAddressSDNode *Func = dyn_cast<GlobalAddressSDNode>(Callee.getNode());\r
- unsigned retAlignment = 0;\r
-\r
- // Handle Result\r
- if (Ins.size() > 0) {\r
- SmallVector<EVT, 16> resvtparts;\r
- ComputeValueVTs(*this, DL, RetTy, resvtparts);\r
-\r
- // Declare\r
- // .param .align 16 .b8 retval0[<size-in-bytes>], or\r
- // .param .b<size-in-bits> retval0\r
- unsigned resultsz = DL.getTypeAllocSizeInBits(RetTy);\r
- // Emit ".param .b<size-in-bits> retval0" instead of byte arrays only for\r
- // these three types to match the logic in\r
- // NVPTXAsmPrinter::printReturnValStr and NVPTXTargetLowering::getPrototype.\r
- // Plus, this behavior is consistent with nvcc's.\r
- if (RetTy->isFloatingPointTy() || RetTy->isIntegerTy() ||\r
- RetTy->isPointerTy()) {\r
- // Scalar needs to be at least 32bit wide\r
- if (resultsz < 32)\r
- resultsz = 32;\r
- SDVTList DeclareRetVTs = DAG.getVTList(MVT::Other, MVT::Glue);\r
- SDValue DeclareRetOps[] = { Chain, DAG.getConstant(1, dl, MVT::i32),\r
- DAG.getConstant(resultsz, dl, MVT::i32),\r
- DAG.getConstant(0, dl, MVT::i32), InFlag };\r
- Chain = DAG.getNode(NVPTXISD::DeclareRet, dl, DeclareRetVTs,\r
- DeclareRetOps);\r
- InFlag = Chain.getValue(1);\r
- } else {\r
- retAlignment = getArgumentAlignment(Callee, CS, RetTy, 0, DL);\r
- SDVTList DeclareRetVTs = DAG.getVTList(MVT::Other, MVT::Glue);\r
- SDValue DeclareRetOps[] = { Chain,\r
- DAG.getConstant(retAlignment, dl, MVT::i32),\r
- DAG.getConstant(resultsz / 8, dl, MVT::i32),\r
- DAG.getConstant(0, dl, MVT::i32), InFlag };\r
- Chain = DAG.getNode(NVPTXISD::DeclareRetParam, dl, DeclareRetVTs,\r
- DeclareRetOps);\r
- InFlag = Chain.getValue(1);\r
- }\r
- }\r
-\r
- if (!Func) {\r
- // This is indirect function call case : PTX requires a prototype of the\r
- // form\r
- // proto_0 : .callprototype(.param .b32 _) _ (.param .b32 _);\r
- // to be emitted, and the label has to used as the last arg of call\r
- // instruction.\r
- // The prototype is embedded in a string and put as the operand for a\r
- // CallPrototype SDNode which will print out to the value of the string.\r
- SDVTList ProtoVTs = DAG.getVTList(MVT::Other, MVT::Glue);\r
- std::string Proto = getPrototype(DL, RetTy, Args, Outs, retAlignment, CS);\r
- const char *ProtoStr =\r
- nvTM->getManagedStrPool()->getManagedString(Proto.c_str())->c_str();\r
- SDValue ProtoOps[] = {\r
- Chain, DAG.getTargetExternalSymbol(ProtoStr, MVT::i32), InFlag,\r
- };\r
- Chain = DAG.getNode(NVPTXISD::CallPrototype, dl, ProtoVTs, ProtoOps);\r
- InFlag = Chain.getValue(1);\r
- }\r
- // Op to just print "call"\r
- SDVTList PrintCallVTs = DAG.getVTList(MVT::Other, MVT::Glue);\r
- SDValue PrintCallOps[] = {\r
- Chain, DAG.getConstant((Ins.size() == 0) ? 0 : 1, dl, MVT::i32), InFlag\r
- };\r
- // We model convergent calls as separate opcodes.\r
- unsigned Opcode = Func ? NVPTXISD::PrintCallUni : NVPTXISD::PrintCall;\r
- if (CLI.IsConvergent)\r
- Opcode = Opcode == NVPTXISD::PrintCallUni ? NVPTXISD::PrintConvergentCallUni\r
- : NVPTXISD::PrintConvergentCall;\r
- Chain = DAG.getNode(Opcode, dl, PrintCallVTs, PrintCallOps);\r
- InFlag = Chain.getValue(1);\r
-\r
- // Ops to print out the function name\r
- SDVTList CallVoidVTs = DAG.getVTList(MVT::Other, MVT::Glue);\r
- SDValue CallVoidOps[] = { Chain, Callee, InFlag };\r
- Chain = DAG.getNode(NVPTXISD::CallVoid, dl, CallVoidVTs, CallVoidOps);\r
- InFlag = Chain.getValue(1);\r
-\r
- // Ops to print out the param list\r
- SDVTList CallArgBeginVTs = DAG.getVTList(MVT::Other, MVT::Glue);\r
- SDValue CallArgBeginOps[] = { Chain, InFlag };\r
- Chain = DAG.getNode(NVPTXISD::CallArgBegin, dl, CallArgBeginVTs,\r
- CallArgBeginOps);\r
- InFlag = Chain.getValue(1);\r
-\r
- for (unsigned i = 0, e = paramCount; i != e; ++i) {\r
- unsigned opcode;\r
- if (i == (e - 1))\r
- opcode = NVPTXISD::LastCallArg;\r
- else\r
- opcode = NVPTXISD::CallArg;\r
- SDVTList CallArgVTs = DAG.getVTList(MVT::Other, MVT::Glue);\r
- SDValue CallArgOps[] = { Chain, DAG.getConstant(1, dl, MVT::i32),\r
- DAG.getConstant(i, dl, MVT::i32), InFlag };\r
- Chain = DAG.getNode(opcode, dl, CallArgVTs, CallArgOps);\r
- InFlag = Chain.getValue(1);\r
- }\r
- SDVTList CallArgEndVTs = DAG.getVTList(MVT::Other, MVT::Glue);\r
- SDValue CallArgEndOps[] = { Chain,\r
- DAG.getConstant(Func ? 1 : 0, dl, MVT::i32),\r
- InFlag };\r
- Chain = DAG.getNode(NVPTXISD::CallArgEnd, dl, CallArgEndVTs, CallArgEndOps);\r
- InFlag = Chain.getValue(1);\r
-\r
- if (!Func) {\r
- SDVTList PrototypeVTs = DAG.getVTList(MVT::Other, MVT::Glue);\r
- SDValue PrototypeOps[] = { Chain,\r
- DAG.getConstant(uniqueCallSite, dl, MVT::i32),\r
- InFlag };\r
- Chain = DAG.getNode(NVPTXISD::Prototype, dl, PrototypeVTs, PrototypeOps);\r
- InFlag = Chain.getValue(1);\r
- }\r
-\r
- // Generate loads from param memory/moves from registers for result\r
- if (Ins.size() > 0) {\r
- SmallVector<EVT, 16> VTs;\r
- SmallVector<uint64_t, 16> Offsets;\r
- ComputePTXValueVTs(*this, DL, RetTy, VTs, &Offsets, 0);\r
- assert(VTs.size() == Ins.size() && "Bad value decomposition");\r
-\r
- unsigned RetAlign = getArgumentAlignment(Callee, CS, RetTy, 0, DL);\r
- auto VectorInfo = VectorizePTXValueVTs(VTs, Offsets, RetAlign);\r
-\r
- SmallVector<EVT, 6> LoadVTs;\r
- int VecIdx = -1; // Index of the first element of the vector.\r
-\r
- // PTX Interoperability Guide 3.3(A): [Integer] Values shorter than\r
- // 32-bits are sign extended or zero extended, depending on whether\r
- // they are signed or unsigned types.\r
- bool ExtendIntegerRetVal =\r
- RetTy->isIntegerTy() && DL.getTypeAllocSizeInBits(RetTy) < 32;\r
-\r
- for (unsigned i = 0, e = VTs.size(); i != e; ++i) {\r
- bool needTruncate = false;\r
- EVT TheLoadType = VTs[i];\r
- EVT EltType = Ins[i].VT;\r
- unsigned EltAlign = GreatestCommonDivisor64(RetAlign, Offsets[i]);\r
- if (ExtendIntegerRetVal) {\r
- TheLoadType = MVT::i32;\r
- EltType = MVT::i32;\r
- needTruncate = true;\r
- } else if (TheLoadType.getSizeInBits() < 16) {\r
- if (VTs[i].isInteger())\r
- needTruncate = true;\r
- EltType = MVT::i16;\r
- }\r
-\r
- // Record index of the very first element of the vector.\r
- if (VectorInfo[i] & PVF_FIRST) {\r
- assert(VecIdx == -1 && LoadVTs.empty() && "Orphaned operand list.");\r
- VecIdx = i;\r
- }\r
-\r
- LoadVTs.push_back(EltType);\r
-\r
- if (VectorInfo[i] & PVF_LAST) {\r
- unsigned NumElts = LoadVTs.size();\r
- LoadVTs.push_back(MVT::Other);\r
- LoadVTs.push_back(MVT::Glue);\r
- NVPTXISD::NodeType Op;\r
- switch (NumElts) {\r
- case 1:\r
- Op = NVPTXISD::LoadParam;\r
- break;\r
- case 2:\r
- Op = NVPTXISD::LoadParamV2;\r
- break;\r
- case 4:\r
- Op = NVPTXISD::LoadParamV4;\r
- break;\r
- default:\r
- llvm_unreachable("Invalid vector info.");\r
- }\r
-\r
- SDValue LoadOperands[] = {\r
- Chain, DAG.getConstant(1, dl, MVT::i32),\r
- DAG.getConstant(Offsets[VecIdx], dl, MVT::i32), InFlag};\r
- SDValue RetVal = DAG.getMemIntrinsicNode(\r
- Op, dl, DAG.getVTList(LoadVTs), LoadOperands, TheLoadType,\r
- MachinePointerInfo(), EltAlign, /* Volatile */ false,\r
- /* ReadMem */ true, /* WriteMem */ false, /* Size */ 0);\r
-\r
- for (unsigned j = 0; j < NumElts; ++j) {\r
- SDValue Ret = RetVal.getValue(j);\r
- if (needTruncate)\r
- Ret = DAG.getNode(ISD::TRUNCATE, dl, Ins[VecIdx + j].VT, Ret);\r
- InVals.push_back(Ret);\r
- }\r
- Chain = RetVal.getValue(NumElts);\r
- InFlag = RetVal.getValue(NumElts + 1);\r
-\r
- // Cleanup\r
- VecIdx = -1;\r
- LoadVTs.clear();\r
- }\r
- }\r
- }\r
-\r
- Chain = DAG.getCALLSEQ_END(Chain,\r
- DAG.getIntPtrConstant(uniqueCallSite, dl, true),\r
- DAG.getIntPtrConstant(uniqueCallSite + 1, dl,\r
- true),\r
- InFlag, dl);\r
- uniqueCallSite++;\r
-\r
- // set isTailCall to false for now, until we figure out how to express\r
- // tail call optimization in PTX\r
- isTailCall = false;\r
- return Chain;\r
-}\r
-\r
-// By default CONCAT_VECTORS is lowered by ExpandVectorBuildThroughStack()\r
-// (see LegalizeDAG.cpp). This is slow and uses local memory.\r
-// We use extract/insert/build vector just as what LegalizeOp() does in llvm 2.5\r
-SDValue\r
-NVPTXTargetLowering::LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const {\r
- SDNode *Node = Op.getNode();\r
- SDLoc dl(Node);\r
- SmallVector<SDValue, 8> Ops;\r
- unsigned NumOperands = Node->getNumOperands();\r
- for (unsigned i = 0; i < NumOperands; ++i) {\r
- SDValue SubOp = Node->getOperand(i);\r
- EVT VVT = SubOp.getNode()->getValueType(0);\r
- EVT EltVT = VVT.getVectorElementType();\r
- unsigned NumSubElem = VVT.getVectorNumElements();\r
- for (unsigned j = 0; j < NumSubElem; ++j) {\r
- Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, SubOp,\r
- DAG.getIntPtrConstant(j, dl)));\r
- }\r
- }\r
- return DAG.getBuildVector(Node->getValueType(0), dl, Ops);\r
-}\r
-\r
-// We can init constant f16x2 with a single .b32 move. Normally it\r
-// would get lowered as two constant loads and vector-packing move.\r
-// mov.b16 %h1, 0x4000;\r
-// mov.b16 %h2, 0x3C00;\r
-// mov.b32 %hh2, {%h2, %h1};\r
-// Instead we want just a constant move:\r
-// mov.b32 %hh2, 0x40003C00\r
-//\r
-// This results in better SASS code with CUDA 7.x. Ptxas in CUDA 8.0\r
-// generates good SASS in both cases.\r
-SDValue NVPTXTargetLowering::LowerBUILD_VECTOR(SDValue Op,\r
- SelectionDAG &DAG) const {\r
- //return Op;\r
- if (!(Op->getValueType(0) == MVT::v2f16 &&\r
- isa<ConstantFPSDNode>(Op->getOperand(0)) &&\r
- isa<ConstantFPSDNode>(Op->getOperand(1))))\r
- return Op;\r
-\r
- APInt E0 =\r
- cast<ConstantFPSDNode>(Op->getOperand(0))->getValueAPF().bitcastToAPInt();\r
- APInt E1 =\r
- cast<ConstantFPSDNode>(Op->getOperand(1))->getValueAPF().bitcastToAPInt();\r
- SDValue Const =\r
- DAG.getConstant(E1.zext(32).shl(16) | E0.zext(32), SDLoc(Op), MVT::i32);\r
- return DAG.getNode(ISD::BITCAST, SDLoc(Op), MVT::v2f16, Const);\r
-}\r
-\r
-SDValue NVPTXTargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op,\r
- SelectionDAG &DAG) const {\r
- SDValue Index = Op->getOperand(1);\r
- // Constant index will be matched by tablegen.\r
- if (isa<ConstantSDNode>(Index.getNode()))\r
- return Op;\r
-\r
- // Extract individual elements and select one of them.\r
- SDValue Vector = Op->getOperand(0);\r
- EVT VectorVT = Vector.getValueType();\r
- assert(VectorVT == MVT::v2f16 && "Unexpected vector type.");\r
- EVT EltVT = VectorVT.getVectorElementType();\r
-\r
- SDLoc dl(Op.getNode());\r
- SDValue E0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, Vector,\r
- DAG.getIntPtrConstant(0, dl));\r
- SDValue E1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, Vector,\r
- DAG.getIntPtrConstant(1, dl));\r
- return DAG.getSelectCC(dl, Index, DAG.getIntPtrConstant(0, dl), E0, E1,\r
- ISD::CondCode::SETEQ);\r
-}\r
-\r
-/// LowerShiftRightParts - Lower SRL_PARTS, SRA_PARTS, which\r
-/// 1) returns two i32 values and take a 2 x i32 value to shift plus a shift\r
-/// amount, or\r
-/// 2) returns two i64 values and take a 2 x i64 value to shift plus a shift\r
-/// amount.\r
-SDValue NVPTXTargetLowering::LowerShiftRightParts(SDValue Op,\r
- SelectionDAG &DAG) const {\r
- assert(Op.getNumOperands() == 3 && "Not a double-shift!");\r
- assert(Op.getOpcode() == ISD::SRA_PARTS || Op.getOpcode() == ISD::SRL_PARTS);\r
-\r
- EVT VT = Op.getValueType();\r
- unsigned VTBits = VT.getSizeInBits();\r
- SDLoc dl(Op);\r
- SDValue ShOpLo = Op.getOperand(0);\r
- SDValue ShOpHi = Op.getOperand(1);\r
- SDValue ShAmt = Op.getOperand(2);\r
- unsigned Opc = (Op.getOpcode() == ISD::SRA_PARTS) ? ISD::SRA : ISD::SRL;\r
-\r
- if (VTBits == 32 && STI.getSmVersion() >= 35) {\r
- // For 32bit and sm35, we can use the funnel shift 'shf' instruction.\r
- // {dHi, dLo} = {aHi, aLo} >> Amt\r
- // dHi = aHi >> Amt\r
- // dLo = shf.r.clamp aLo, aHi, Amt\r
-\r
- SDValue Hi = DAG.getNode(Opc, dl, VT, ShOpHi, ShAmt);\r
- SDValue Lo = DAG.getNode(NVPTXISD::FUN_SHFR_CLAMP, dl, VT, ShOpLo, ShOpHi,\r
- ShAmt);\r
-\r
- SDValue Ops[2] = { Lo, Hi };\r
- return DAG.getMergeValues(Ops, dl);\r
- }\r
- else {\r
- // {dHi, dLo} = {aHi, aLo} >> Amt\r
- // - if (Amt>=size) then\r
- // dLo = aHi >> (Amt-size)\r
- // dHi = aHi >> Amt (this is either all 0 or all 1)\r
- // else\r
- // dLo = (aLo >>logic Amt) | (aHi << (size-Amt))\r
- // dHi = aHi >> Amt\r
-\r
- SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32,\r
- DAG.getConstant(VTBits, dl, MVT::i32),\r
- ShAmt);\r
- SDValue Tmp1 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, ShAmt);\r
- SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt,\r
- DAG.getConstant(VTBits, dl, MVT::i32));\r
- SDValue Tmp2 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, RevShAmt);\r
- SDValue FalseVal = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2);\r
- SDValue TrueVal = DAG.getNode(Opc, dl, VT, ShOpHi, ExtraShAmt);\r
-\r
- SDValue Cmp = DAG.getSetCC(dl, MVT::i1, ShAmt,\r
- DAG.getConstant(VTBits, dl, MVT::i32),\r
- ISD::SETGE);\r
- SDValue Hi = DAG.getNode(Opc, dl, VT, ShOpHi, ShAmt);\r
- SDValue Lo = DAG.getNode(ISD::SELECT, dl, VT, Cmp, TrueVal, FalseVal);\r
-\r
- SDValue Ops[2] = { Lo, Hi };\r
- return DAG.getMergeValues(Ops, dl);\r
- }\r
-}\r
-\r
-/// LowerShiftLeftParts - Lower SHL_PARTS, which\r
-/// 1) returns two i32 values and take a 2 x i32 value to shift plus a shift\r
-/// amount, or\r
-/// 2) returns two i64 values and take a 2 x i64 value to shift plus a shift\r
-/// amount.\r
-SDValue NVPTXTargetLowering::LowerShiftLeftParts(SDValue Op,\r
- SelectionDAG &DAG) const {\r
- assert(Op.getNumOperands() == 3 && "Not a double-shift!");\r
- assert(Op.getOpcode() == ISD::SHL_PARTS);\r
-\r
- EVT VT = Op.getValueType();\r
- unsigned VTBits = VT.getSizeInBits();\r
- SDLoc dl(Op);\r
- SDValue ShOpLo = Op.getOperand(0);\r
- SDValue ShOpHi = Op.getOperand(1);\r
- SDValue ShAmt = Op.getOperand(2);\r
-\r
- if (VTBits == 32 && STI.getSmVersion() >= 35) {\r
- // For 32bit and sm35, we can use the funnel shift 'shf' instruction.\r
- // {dHi, dLo} = {aHi, aLo} << Amt\r
- // dHi = shf.l.clamp aLo, aHi, Amt\r
- // dLo = aLo << Amt\r
-\r
- SDValue Hi = DAG.getNode(NVPTXISD::FUN_SHFL_CLAMP, dl, VT, ShOpLo, ShOpHi,\r
- ShAmt);\r
- SDValue Lo = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ShAmt);\r
-\r
- SDValue Ops[2] = { Lo, Hi };\r
- return DAG.getMergeValues(Ops, dl);\r
- }\r
- else {\r
- // {dHi, dLo} = {aHi, aLo} << Amt\r
- // - if (Amt>=size) then\r
- // dLo = aLo << Amt (all 0)\r
- // dLo = aLo << (Amt-size)\r
- // else\r
- // dLo = aLo << Amt\r
- // dHi = (aHi << Amt) | (aLo >> (size-Amt))\r
-\r
- SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32,\r
- DAG.getConstant(VTBits, dl, MVT::i32),\r
- ShAmt);\r
- SDValue Tmp1 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, ShAmt);\r
- SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt,\r
- DAG.getConstant(VTBits, dl, MVT::i32));\r
- SDValue Tmp2 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, RevShAmt);\r
- SDValue FalseVal = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2);\r
- SDValue TrueVal = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ExtraShAmt);\r
-\r
- SDValue Cmp = DAG.getSetCC(dl, MVT::i1, ShAmt,\r
- DAG.getConstant(VTBits, dl, MVT::i32),\r
- ISD::SETGE);\r
- SDValue Lo = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ShAmt);\r
- SDValue Hi = DAG.getNode(ISD::SELECT, dl, VT, Cmp, TrueVal, FalseVal);\r
-\r
- SDValue Ops[2] = { Lo, Hi };\r
- return DAG.getMergeValues(Ops, dl);\r
- }\r
-}\r
-\r
-SDValue\r
-NVPTXTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {\r
- switch (Op.getOpcode()) {\r
- case ISD::RETURNADDR:\r
- return SDValue();\r
- case ISD::FRAMEADDR:\r
- return SDValue();\r
- case ISD::GlobalAddress:\r
- return LowerGlobalAddress(Op, DAG);\r
- case ISD::INTRINSIC_W_CHAIN:\r
- return Op;\r
- case ISD::BUILD_VECTOR:\r
- return LowerBUILD_VECTOR(Op, DAG);\r
- case ISD::EXTRACT_SUBVECTOR:\r
- return Op;\r
- case ISD::EXTRACT_VECTOR_ELT:\r
- return LowerEXTRACT_VECTOR_ELT(Op, DAG);\r
- case ISD::CONCAT_VECTORS:\r
- return LowerCONCAT_VECTORS(Op, DAG);\r
- case ISD::STORE:\r
- return LowerSTORE(Op, DAG);\r
- case ISD::LOAD:\r
- return LowerLOAD(Op, DAG);\r
- case ISD::SHL_PARTS:\r
- return LowerShiftLeftParts(Op, DAG);\r
- case ISD::SRA_PARTS:\r
- case ISD::SRL_PARTS:\r
- return LowerShiftRightParts(Op, DAG);\r
- case ISD::SELECT:\r
- return LowerSelect(Op, DAG);\r
- default:\r
- llvm_unreachable("Custom lowering not defined for operation");\r
- }\r
-}\r
-\r
-SDValue NVPTXTargetLowering::LowerSelect(SDValue Op, SelectionDAG &DAG) const {\r
- SDValue Op0 = Op->getOperand(0);\r
- SDValue Op1 = Op->getOperand(1);\r
- SDValue Op2 = Op->getOperand(2);\r
- SDLoc DL(Op.getNode());\r
-\r
- assert(Op.getValueType() == MVT::i1 && "Custom lowering enabled only for i1");\r
-\r
- Op1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Op1);\r
- Op2 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Op2);\r
- SDValue Select = DAG.getNode(ISD::SELECT, DL, MVT::i32, Op0, Op1, Op2);\r
- SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, Select);\r
-\r
- return Trunc;\r
-}\r
-\r
-SDValue NVPTXTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {\r
- if (Op.getValueType() == MVT::i1)\r
- return LowerLOADi1(Op, DAG);\r
-\r
- // v2f16 is legal, so we can't rely on legalizer to handle unaligned\r
- // loads and have to handle it here.\r
- if (Op.getValueType() == MVT::v2f16) {\r
- LoadSDNode *Load = cast<LoadSDNode>(Op);\r
- EVT MemVT = Load->getMemoryVT();\r
- if (!allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), MemVT,\r
- Load->getAddressSpace(), Load->getAlignment())) {\r
- SDValue Ops[2];\r
- std::tie(Ops[0], Ops[1]) = expandUnalignedLoad(Load, DAG);\r
- return DAG.getMergeValues(Ops, SDLoc(Op));\r
- }\r
- }\r
-\r
- return SDValue();\r
-}\r
-\r
-// v = ld i1* addr\r
-// =>\r
-// v1 = ld i8* addr (-> i16)\r
-// v = trunc i16 to i1\r
-SDValue NVPTXTargetLowering::LowerLOADi1(SDValue Op, SelectionDAG &DAG) const {\r
- SDNode *Node = Op.getNode();\r
- LoadSDNode *LD = cast<LoadSDNode>(Node);\r
- SDLoc dl(Node);\r
- assert(LD->getExtensionType() == ISD::NON_EXTLOAD);\r
- assert(Node->getValueType(0) == MVT::i1 &&\r
- "Custom lowering for i1 load only");\r
- SDValue newLD = DAG.getLoad(MVT::i16, dl, LD->getChain(), LD->getBasePtr(),\r
- LD->getPointerInfo(), LD->getAlignment(),\r
- LD->getMemOperand()->getFlags());\r
- SDValue result = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, newLD);\r
- // The legalizer (the caller) is expecting two values from the legalized\r
- // load, so we build a MergeValues node for it. See ExpandUnalignedLoad()\r
- // in LegalizeDAG.cpp which also uses MergeValues.\r
- SDValue Ops[] = { result, LD->getChain() };\r
- return DAG.getMergeValues(Ops, dl);\r
-}\r
-\r
-SDValue NVPTXTargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {\r
- StoreSDNode *Store = cast<StoreSDNode>(Op);\r
- EVT VT = Store->getMemoryVT();\r
-\r
- if (VT == MVT::i1)\r
- return LowerSTOREi1(Op, DAG);\r
-\r
- // v2f16 is legal, so we can't rely on legalizer to handle unaligned\r
- // stores and have to handle it here.\r
- if (VT == MVT::v2f16 &&\r
- !allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), VT,\r
- Store->getAddressSpace(), Store->getAlignment()))\r
- return expandUnalignedStore(Store, DAG);\r
-\r
- if (VT.isVector())\r
- return LowerSTOREVector(Op, DAG);\r
-\r
- return SDValue();\r
-}\r
-\r
-SDValue\r
-NVPTXTargetLowering::LowerSTOREVector(SDValue Op, SelectionDAG &DAG) const {\r
- SDNode *N = Op.getNode();\r
- SDValue Val = N->getOperand(1);\r
- SDLoc DL(N);\r
- EVT ValVT = Val.getValueType();\r
-\r
- if (ValVT.isVector()) {\r
- // We only handle "native" vector sizes for now, e.g. <4 x double> is not\r
- // legal. We can (and should) split that into 2 stores of <2 x double> here\r
- // but I'm leaving that as a TODO for now.\r
- if (!ValVT.isSimple())\r
- return SDValue();\r
- switch (ValVT.getSimpleVT().SimpleTy) {\r
- default:\r
- return SDValue();\r
- case MVT::v2i8:\r
- case MVT::v2i16:\r
- case MVT::v2i32:\r
- case MVT::v2i64:\r
- case MVT::v2f16:\r
- case MVT::v2f32:\r
- case MVT::v2f64:\r
- case MVT::v4i8:\r
- case MVT::v4i16:\r
- case MVT::v4i32:\r
- case MVT::v4f16:\r
- case MVT::v4f32:\r
- case MVT::v8f16: // <4 x f16x2>\r
- // This is a "native" vector type\r
- break;\r
- }\r
-\r
- MemSDNode *MemSD = cast<MemSDNode>(N);\r
- const DataLayout &TD = DAG.getDataLayout();\r
-\r
- unsigned Align = MemSD->getAlignment();\r
- unsigned PrefAlign =\r
- TD.getPrefTypeAlignment(ValVT.getTypeForEVT(*DAG.getContext()));\r
- if (Align < PrefAlign) {\r
- // This store is not sufficiently aligned, so bail out and let this vector\r
- // store be scalarized. Note that we may still be able to emit smaller\r
- // vector stores. For example, if we are storing a <4 x float> with an\r
- // alignment of 8, this check will fail but the legalizer will try again\r
- // with 2 x <2 x float>, which will succeed with an alignment of 8.\r
- return SDValue();\r
- }\r
-\r
- unsigned Opcode = 0;\r
- EVT EltVT = ValVT.getVectorElementType();\r
- unsigned NumElts = ValVT.getVectorNumElements();\r
-\r
- // Since StoreV2 is a target node, we cannot rely on DAG type legalization.\r
- // Therefore, we must ensure the type is legal. For i1 and i8, we set the\r
- // stored type to i16 and propagate the "real" type as the memory type.\r
- bool NeedExt = false;\r
- if (EltVT.getSizeInBits() < 16)\r
- NeedExt = true;\r
-\r
- bool StoreF16x2 = false;\r
- switch (NumElts) {\r
- default:\r
- return SDValue();\r
- case 2:\r
- Opcode = NVPTXISD::StoreV2;\r
- break;\r
- case 4:\r
- Opcode = NVPTXISD::StoreV4;\r
- break;\r
- case 8:\r
- // v8f16 is a special case. PTX doesn't have st.v8.f16\r
- // instruction. Instead, we split the vector into v2f16 chunks and\r
- // store them with st.v4.b32.\r
- assert(EltVT == MVT::f16 && "Wrong type for the vector.");\r
- Opcode = NVPTXISD::StoreV4;\r
- StoreF16x2 = true;\r
- break;\r
- }\r
-\r
- SmallVector<SDValue, 8> Ops;\r
-\r
- // First is the chain\r
- Ops.push_back(N->getOperand(0));\r
-\r
- if (StoreF16x2) {\r
- // Combine f16,f16 -> v2f16\r
- NumElts /= 2;\r
- for (unsigned i = 0; i < NumElts; ++i) {\r
- SDValue E0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f16, Val,\r
- DAG.getIntPtrConstant(i * 2, DL));\r
- SDValue E1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f16, Val,\r
- DAG.getIntPtrConstant(i * 2 + 1, DL));\r
- SDValue V2 = DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v2f16, E0, E1);\r
- Ops.push_back(V2);\r
- }\r
- } else {\r
- // Then the split values\r
- for (unsigned i = 0; i < NumElts; ++i) {\r
- SDValue ExtVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Val,\r
- DAG.getIntPtrConstant(i, DL));\r
- if (NeedExt)\r
- ExtVal = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i16, ExtVal);\r
- Ops.push_back(ExtVal);\r
- }\r
- }\r
-\r
- // Then any remaining arguments\r
- Ops.append(N->op_begin() + 2, N->op_end());\r
-\r
- SDValue NewSt =\r
- DAG.getMemIntrinsicNode(Opcode, DL, DAG.getVTList(MVT::Other), Ops,\r
- MemSD->getMemoryVT(), MemSD->getMemOperand());\r
-\r
- // return DCI.CombineTo(N, NewSt, true);\r
- return NewSt;\r
- }\r
-\r
- return SDValue();\r
-}\r
-\r
-// st i1 v, addr\r
-// =>\r
-// v1 = zxt v to i16\r
-// st.u8 i16, addr\r
-SDValue NVPTXTargetLowering::LowerSTOREi1(SDValue Op, SelectionDAG &DAG) const {\r
- SDNode *Node = Op.getNode();\r
- SDLoc dl(Node);\r
- StoreSDNode *ST = cast<StoreSDNode>(Node);\r
- SDValue Tmp1 = ST->getChain();\r
- SDValue Tmp2 = ST->getBasePtr();\r
- SDValue Tmp3 = ST->getValue();\r
- assert(Tmp3.getValueType() == MVT::i1 && "Custom lowering for i1 store only");\r
- Tmp3 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, Tmp3);\r
- SDValue Result =\r
- DAG.getTruncStore(Tmp1, dl, Tmp3, Tmp2, ST->getPointerInfo(), MVT::i8,\r
- ST->getAlignment(), ST->getMemOperand()->getFlags());\r
- return Result;\r
-}\r
-\r
-SDValue\r
-NVPTXTargetLowering::getParamSymbol(SelectionDAG &DAG, int idx, EVT v) const {\r
- std::string ParamSym;\r
- raw_string_ostream ParamStr(ParamSym);\r
-\r
- ParamStr << DAG.getMachineFunction().getName() << "_param_" << idx;\r
- ParamStr.flush();\r
-\r
- std::string *SavedStr =\r
- nvTM->getManagedStrPool()->getManagedString(ParamSym.c_str());\r
- return DAG.getTargetExternalSymbol(SavedStr->c_str(), v);\r
-}\r
-\r
-// Check to see if the kernel argument is image*_t or sampler_t\r
-\r
-static bool isImageOrSamplerVal(const Value *arg, const Module *context) {\r
- static const char *const specialTypes[] = { "struct._image2d_t",\r
- "struct._image3d_t",\r
- "struct._sampler_t" };\r
-\r
- Type *Ty = arg->getType();\r
- auto *PTy = dyn_cast<PointerType>(Ty);\r
-\r
- if (!PTy)\r
- return false;\r
-\r
- if (!context)\r
- return false;\r
-\r
- auto *STy = dyn_cast<StructType>(PTy->getElementType());\r
- if (!STy || STy->isLiteral())\r
- return false;\r
-\r
- return std::find(std::begin(specialTypes), std::end(specialTypes),\r
- STy->getName()) != std::end(specialTypes);\r
-}\r
-\r
-SDValue NVPTXTargetLowering::LowerFormalArguments(\r
- SDValue Chain, CallingConv::ID CallConv, bool isVarArg,\r
- const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,\r
- SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {\r
- MachineFunction &MF = DAG.getMachineFunction();\r
- const DataLayout &DL = DAG.getDataLayout();\r
- auto PtrVT = getPointerTy(DAG.getDataLayout());\r
-\r
- const Function *F = MF.getFunction();\r
- const AttributeList &PAL = F->getAttributes();\r
- const TargetLowering *TLI = STI.getTargetLowering();\r
-\r
- SDValue Root = DAG.getRoot();\r
- std::vector<SDValue> OutChains;\r
-\r
- bool isABI = (STI.getSmVersion() >= 20);\r
- assert(isABI && "Non-ABI compilation is not supported");\r
- if (!isABI)\r
- return Chain;\r
-\r
- std::vector<Type *> argTypes;\r
- std::vector<const Argument *> theArgs;\r
- for (const Argument &I : F->args()) {\r
- theArgs.push_back(&I);\r
- argTypes.push_back(I.getType());\r
- }\r
- // argTypes.size() (or theArgs.size()) and Ins.size() need not match.\r
- // Ins.size() will be larger\r
- // * if there is an aggregate argument with multiple fields (each field\r
- // showing up separately in Ins)\r
- // * if there is a vector argument with more than typical vector-length\r
- // elements (generally if more than 4) where each vector element is\r
- // individually present in Ins.\r
- // So a different index should be used for indexing into Ins.\r
- // See similar issue in LowerCall.\r
- unsigned InsIdx = 0;\r
-\r
- int idx = 0;\r
- for (unsigned i = 0, e = theArgs.size(); i != e; ++i, ++idx, ++InsIdx) {\r
- Type *Ty = argTypes[i];\r
-\r
- // If the kernel argument is image*_t or sampler_t, convert it to\r
- // a i32 constant holding the parameter position. This can later\r
- // matched in the AsmPrinter to output the correct mangled name.\r
- if (isImageOrSamplerVal(\r
- theArgs[i],\r
- (theArgs[i]->getParent() ? theArgs[i]->getParent()->getParent()\r
- : nullptr))) {\r
- assert(isKernelFunction(*F) &&\r
- "Only kernels can have image/sampler params");\r
- InVals.push_back(DAG.getConstant(i + 1, dl, MVT::i32));\r
- continue;\r
- }\r
-\r
- if (theArgs[i]->use_empty()) {\r
- // argument is dead\r
- if (Ty->isAggregateType()) {\r
- SmallVector<EVT, 16> vtparts;\r
-\r
- ComputePTXValueVTs(*this, DAG.getDataLayout(), Ty, vtparts);\r
- assert(vtparts.size() > 0 && "empty aggregate type not expected");\r
- for (unsigned parti = 0, parte = vtparts.size(); parti != parte;\r
- ++parti) {\r
- InVals.push_back(DAG.getNode(ISD::UNDEF, dl, Ins[InsIdx].VT));\r
- ++InsIdx;\r
- }\r
- if (vtparts.size() > 0)\r
- --InsIdx;\r
- continue;\r
- }\r
- if (Ty->isVectorTy()) {\r
- EVT ObjectVT = getValueType(DL, Ty);\r
- unsigned NumRegs = TLI->getNumRegisters(F->getContext(), ObjectVT);\r
- for (unsigned parti = 0; parti < NumRegs; ++parti) {\r
- InVals.push_back(DAG.getNode(ISD::UNDEF, dl, Ins[InsIdx].VT));\r
- ++InsIdx;\r
- }\r
- if (NumRegs > 0)\r
- --InsIdx;\r
- continue;\r
- }\r
- InVals.push_back(DAG.getNode(ISD::UNDEF, dl, Ins[InsIdx].VT));\r
- continue;\r
- }\r
-\r
- // In the following cases, assign a node order of "idx+1"\r
- // to newly created nodes. The SDNodes for params have to\r
- // appear in the same order as their order of appearance\r
- // in the original function. "idx+1" holds that order.\r
- if (!PAL.hasParamAttribute(i, Attribute::ByVal)) {\r
- bool aggregateIsPacked = false;\r
- if (StructType *STy = dyn_cast<StructType>(Ty))\r
- aggregateIsPacked = STy->isPacked();\r
-\r
- SmallVector<EVT, 16> VTs;\r
- SmallVector<uint64_t, 16> Offsets;\r
- ComputePTXValueVTs(*this, DL, Ty, VTs, &Offsets, 0);\r
- assert(VTs.size() > 0 && "Unexpected empty type.");\r
- auto VectorInfo =\r
- VectorizePTXValueVTs(VTs, Offsets, DL.getABITypeAlignment(Ty));\r
-\r
- SDValue Arg = getParamSymbol(DAG, idx, PtrVT);\r
- int VecIdx = -1; // Index of the first element of the current vector.\r
- for (unsigned parti = 0, parte = VTs.size(); parti != parte; ++parti) {\r
- if (VectorInfo[parti] & PVF_FIRST) {\r
- assert(VecIdx == -1 && "Orphaned vector.");\r
- VecIdx = parti;\r
- }\r
-\r
- // That's the last element of this store op.\r
- if (VectorInfo[parti] & PVF_LAST) {\r
- unsigned NumElts = parti - VecIdx + 1;\r
- EVT EltVT = VTs[parti];\r
- // i1 is loaded/stored as i8.\r
- EVT LoadVT = EltVT;\r
- if (EltVT == MVT::i1)\r
- LoadVT = MVT::i8;\r
- else if (EltVT == MVT::v2f16)\r
- // getLoad needs a vector type, but it can't handle\r
- // vectors which contain v2f16 elements. So we must load\r
- // using i32 here and then bitcast back.\r
- LoadVT = MVT::i32;\r
-\r
- EVT VecVT = EVT::getVectorVT(F->getContext(), LoadVT, NumElts);\r
- SDValue VecAddr =\r
- DAG.getNode(ISD::ADD, dl, PtrVT, Arg,\r
- DAG.getConstant(Offsets[VecIdx], dl, PtrVT));\r
- Value *srcValue = Constant::getNullValue(PointerType::get(\r
- EltVT.getTypeForEVT(F->getContext()), ADDRESS_SPACE_PARAM));\r
- SDValue P =\r
- DAG.getLoad(VecVT, dl, Root, VecAddr,\r
- MachinePointerInfo(srcValue), aggregateIsPacked,\r
- MachineMemOperand::MODereferenceable |\r
- MachineMemOperand::MOInvariant);\r
- if (P.getNode())\r
- P.getNode()->setIROrder(idx + 1);\r
- for (unsigned j = 0; j < NumElts; ++j) {\r
- SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, LoadVT, P,\r
- DAG.getIntPtrConstant(j, dl));\r
- // We've loaded i1 as an i8 and now must truncate it back to i1\r
- if (EltVT == MVT::i1)\r
- Elt = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, Elt);\r
- // v2f16 was loaded as an i32. Now we must bitcast it back.\r
- else if (EltVT == MVT::v2f16)\r
- Elt = DAG.getNode(ISD::BITCAST, dl, MVT::v2f16, Elt);\r
- // Extend the element if necessary (e.g. an i8 is loaded\r
- // into an i16 register)\r
- if (Ins[InsIdx].VT.isInteger() &&\r
- Ins[InsIdx].VT.getSizeInBits() > LoadVT.getSizeInBits()) {\r
- unsigned Extend = Ins[InsIdx].Flags.isSExt() ? ISD::SIGN_EXTEND\r
- : ISD::ZERO_EXTEND;\r
- Elt = DAG.getNode(Extend, dl, Ins[InsIdx].VT, Elt);\r
- }\r
- InVals.push_back(Elt);\r
- }\r
-\r
- // Reset vector tracking state.\r
- VecIdx = -1;\r
- }\r
- ++InsIdx;\r
- }\r
- if (VTs.size() > 0)\r
- --InsIdx;\r
- continue;\r
- }\r
-\r
- // Param has ByVal attribute\r
- // Return MoveParam(param symbol).\r
- // Ideally, the param symbol can be returned directly,\r
- // but when SDNode builder decides to use it in a CopyToReg(),\r
- // machine instruction fails because TargetExternalSymbol\r
- // (not lowered) is target dependent, and CopyToReg assumes\r
- // the source is lowered.\r
- EVT ObjectVT = getValueType(DL, Ty);\r
- assert(ObjectVT == Ins[InsIdx].VT &&\r
- "Ins type did not match function type");\r
- SDValue Arg = getParamSymbol(DAG, idx, PtrVT);\r
- SDValue p = DAG.getNode(NVPTXISD::MoveParam, dl, ObjectVT, Arg);\r
- if (p.getNode())\r
- p.getNode()->setIROrder(idx + 1);\r
- InVals.push_back(p);\r
- }\r
-\r
- // Clang will check explicit VarArg and issue error if any. However, Clang\r
- // will let code with\r
- // implicit var arg like f() pass. See bug 617733.\r
- // We treat this case as if the arg list is empty.\r
- // if (F.isVarArg()) {\r
- // assert(0 && "VarArg not supported yet!");\r
- //}\r
-\r
- if (!OutChains.empty())\r
- DAG.setRoot(DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains));\r
-\r
- return Chain;\r
-}\r
-\r
-SDValue\r
-NVPTXTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,\r
- bool isVarArg,\r
- const SmallVectorImpl<ISD::OutputArg> &Outs,\r
- const SmallVectorImpl<SDValue> &OutVals,\r
- const SDLoc &dl, SelectionDAG &DAG) const {\r
- MachineFunction &MF = DAG.getMachineFunction();\r
- Type *RetTy = MF.getFunction()->getReturnType();\r
-\r
- bool isABI = (STI.getSmVersion() >= 20);\r
- assert(isABI && "Non-ABI compilation is not supported");\r
- if (!isABI)\r
- return Chain;\r
-\r
- const DataLayout DL = DAG.getDataLayout();\r
- SmallVector<EVT, 16> VTs;\r
- SmallVector<uint64_t, 16> Offsets;\r
- ComputePTXValueVTs(*this, DL, RetTy, VTs, &Offsets);\r
- assert(VTs.size() == OutVals.size() && "Bad return value decomposition");\r
-\r
- auto VectorInfo = VectorizePTXValueVTs(\r
- VTs, Offsets, RetTy->isSized() ? DL.getABITypeAlignment(RetTy) : 1);\r
-\r
- // PTX Interoperability Guide 3.3(A): [Integer] Values shorter than\r
- // 32-bits are sign extended or zero extended, depending on whether\r
- // they are signed or unsigned types.\r
- bool ExtendIntegerRetVal =\r
- RetTy->isIntegerTy() && DL.getTypeAllocSizeInBits(RetTy) < 32;\r
-\r
- SmallVector<SDValue, 6> StoreOperands;\r
- for (unsigned i = 0, e = VTs.size(); i != e; ++i) {\r
- // New load/store. Record chain and offset operands.\r
- if (VectorInfo[i] & PVF_FIRST) {\r
- assert(StoreOperands.empty() && "Orphaned operand list.");\r
- StoreOperands.push_back(Chain);\r
- StoreOperands.push_back(DAG.getConstant(Offsets[i], dl, MVT::i32));\r
- }\r
-\r
- SDValue RetVal = OutVals[i];\r
- if (ExtendIntegerRetVal) {\r
- RetVal = DAG.getNode(Outs[i].Flags.isSExt() ? ISD::SIGN_EXTEND\r
- : ISD::ZERO_EXTEND,\r
- dl, MVT::i32, RetVal);\r
- } else if (RetVal.getValueSizeInBits() < 16) {\r
- // Use 16-bit registers for small load-stores as it's the\r
- // smallest general purpose register size supported by NVPTX.\r
- RetVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i16, RetVal);\r
- }\r
-\r
- // Record the value to return.\r
- StoreOperands.push_back(RetVal);\r
-\r
- // That's the last element of this store op.\r
- if (VectorInfo[i] & PVF_LAST) {\r
- NVPTXISD::NodeType Op;\r
- unsigned NumElts = StoreOperands.size() - 2;\r
- switch (NumElts) {\r
- case 1:\r
- Op = NVPTXISD::StoreRetval;\r
- break;\r
- case 2:\r
- Op = NVPTXISD::StoreRetvalV2;\r
- break;\r
- case 4:\r
- Op = NVPTXISD::StoreRetvalV4;\r
- break;\r
- default:\r
- llvm_unreachable("Invalid vector info.");\r
- }\r
-\r
- // Adjust type of load/store op if we've extended the scalar\r
- // return value.\r
- EVT TheStoreType = ExtendIntegerRetVal ? MVT::i32 : VTs[i];\r
- Chain = DAG.getMemIntrinsicNode(Op, dl, DAG.getVTList(MVT::Other),\r
- StoreOperands, TheStoreType,\r
- MachinePointerInfo(), /* Align */ 1,\r
- /* Volatile */ false, /* ReadMem */ false,\r
- /* WriteMem */ true, /* Size */ 0);\r
- // Cleanup vector state.\r
- StoreOperands.clear();\r
- }\r
- }\r
-\r
- return DAG.getNode(NVPTXISD::RET_FLAG, dl, MVT::Other, Chain);\r
-}\r
-\r
-void NVPTXTargetLowering::LowerAsmOperandForConstraint(\r
- SDValue Op, std::string &Constraint, std::vector<SDValue> &Ops,\r
- SelectionDAG &DAG) const {\r
- if (Constraint.length() > 1)\r
- return;\r
- else\r
- TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);\r
-}\r
-\r
-static unsigned getOpcForTextureInstr(unsigned Intrinsic) {\r
- switch (Intrinsic) {\r
- default:\r
- return 0;\r
-\r
- case Intrinsic::nvvm_tex_1d_v4f32_s32:\r
- return NVPTXISD::Tex1DFloatS32;\r
- case Intrinsic::nvvm_tex_1d_v4f32_f32:\r
- return NVPTXISD::Tex1DFloatFloat;\r
- case Intrinsic::nvvm_tex_1d_level_v4f32_f32:\r
- return NVPTXISD::Tex1DFloatFloatLevel;\r
- case Intrinsic::nvvm_tex_1d_grad_v4f32_f32:\r
- return NVPTXISD::Tex1DFloatFloatGrad;\r
- case Intrinsic::nvvm_tex_1d_v4s32_s32:\r
- return NVPTXISD::Tex1DS32S32;\r
- case Intrinsic::nvvm_tex_1d_v4s32_f32:\r
- return NVPTXISD::Tex1DS32Float;\r
- case Intrinsic::nvvm_tex_1d_level_v4s32_f32:\r
- return NVPTXISD::Tex1DS32FloatLevel;\r
- case Intrinsic::nvvm_tex_1d_grad_v4s32_f32:\r
- return NVPTXISD::Tex1DS32FloatGrad;\r
- case Intrinsic::nvvm_tex_1d_v4u32_s32:\r
- return NVPTXISD::Tex1DU32S32;\r
- case Intrinsic::nvvm_tex_1d_v4u32_f32:\r
- return NVPTXISD::Tex1DU32Float;\r
- case Intrinsic::nvvm_tex_1d_level_v4u32_f32:\r
- return NVPTXISD::Tex1DU32FloatLevel;\r
- case Intrinsic::nvvm_tex_1d_grad_v4u32_f32:\r
- return NVPTXISD::Tex1DU32FloatGrad;\r
-\r
- case Intrinsic::nvvm_tex_1d_array_v4f32_s32:\r
- return NVPTXISD::Tex1DArrayFloatS32;\r
- case Intrinsic::nvvm_tex_1d_array_v4f32_f32:\r
- return NVPTXISD::Tex1DArrayFloatFloat;\r
- case Intrinsic::nvvm_tex_1d_array_level_v4f32_f32:\r
- return NVPTXISD::Tex1DArrayFloatFloatLevel;\r
- case Intrinsic::nvvm_tex_1d_array_grad_v4f32_f32:\r
- return NVPTXISD::Tex1DArrayFloatFloatGrad;\r
- case Intrinsic::nvvm_tex_1d_array_v4s32_s32:\r
- return NVPTXISD::Tex1DArrayS32S32;\r
- case Intrinsic::nvvm_tex_1d_array_v4s32_f32:\r
- return NVPTXISD::Tex1DArrayS32Float;\r
- case Intrinsic::nvvm_tex_1d_array_level_v4s32_f32:\r
- return NVPTXISD::Tex1DArrayS32FloatLevel;\r
- case Intrinsic::nvvm_tex_1d_array_grad_v4s32_f32:\r
- return NVPTXISD::Tex1DArrayS32FloatGrad;\r
- case Intrinsic::nvvm_tex_1d_array_v4u32_s32:\r
- return NVPTXISD::Tex1DArrayU32S32;\r
- case Intrinsic::nvvm_tex_1d_array_v4u32_f32:\r
- return NVPTXISD::Tex1DArrayU32Float;\r
- case Intrinsic::nvvm_tex_1d_array_level_v4u32_f32:\r
- return NVPTXISD::Tex1DArrayU32FloatLevel;\r
- case Intrinsic::nvvm_tex_1d_array_grad_v4u32_f32:\r
- return NVPTXISD::Tex1DArrayU32FloatGrad;\r
-\r
- case Intrinsic::nvvm_tex_2d_v4f32_s32:\r
- return NVPTXISD::Tex2DFloatS32;\r
- case Intrinsic::nvvm_tex_2d_v4f32_f32:\r
- return NVPTXISD::Tex2DFloatFloat;\r
- case Intrinsic::nvvm_tex_2d_level_v4f32_f32:\r
- return NVPTXISD::Tex2DFloatFloatLevel;\r
- case Intrinsic::nvvm_tex_2d_grad_v4f32_f32:\r
- return NVPTXISD::Tex2DFloatFloatGrad;\r
- case Intrinsic::nvvm_tex_2d_v4s32_s32:\r
- return NVPTXISD::Tex2DS32S32;\r
- case Intrinsic::nvvm_tex_2d_v4s32_f32:\r
- return NVPTXISD::Tex2DS32Float;\r
- case Intrinsic::nvvm_tex_2d_level_v4s32_f32:\r
- return NVPTXISD::Tex2DS32FloatLevel;\r
- case Intrinsic::nvvm_tex_2d_grad_v4s32_f32:\r
- return NVPTXISD::Tex2DS32FloatGrad;\r
- case Intrinsic::nvvm_tex_2d_v4u32_s32:\r
- return NVPTXISD::Tex2DU32S32;\r
- case Intrinsic::nvvm_tex_2d_v4u32_f32:\r
- return NVPTXISD::Tex2DU32Float;\r
- case Intrinsic::nvvm_tex_2d_level_v4u32_f32:\r
- return NVPTXISD::Tex2DU32FloatLevel;\r
- case Intrinsic::nvvm_tex_2d_grad_v4u32_f32:\r
- return NVPTXISD::Tex2DU32FloatGrad;\r
-\r
- case Intrinsic::nvvm_tex_2d_array_v4f32_s32:\r
- return NVPTXISD::Tex2DArrayFloatS32;\r
- case Intrinsic::nvvm_tex_2d_array_v4f32_f32:\r
- return NVPTXISD::Tex2DArrayFloatFloat;\r
- case Intrinsic::nvvm_tex_2d_array_level_v4f32_f32:\r
- return NVPTXISD::Tex2DArrayFloatFloatLevel;\r
- case Intrinsic::nvvm_tex_2d_array_grad_v4f32_f32:\r
- return NVPTXISD::Tex2DArrayFloatFloatGrad;\r
- case Intrinsic::nvvm_tex_2d_array_v4s32_s32:\r
- return NVPTXISD::Tex2DArrayS32S32;\r
- case Intrinsic::nvvm_tex_2d_array_v4s32_f32:\r
- return NVPTXISD::Tex2DArrayS32Float;\r
- case Intrinsic::nvvm_tex_2d_array_level_v4s32_f32:\r
- return NVPTXISD::Tex2DArrayS32FloatLevel;\r
- case Intrinsic::nvvm_tex_2d_array_grad_v4s32_f32:\r
- return NVPTXISD::Tex2DArrayS32FloatGrad;\r
- case Intrinsic::nvvm_tex_2d_array_v4u32_s32:\r
- return NVPTXISD::Tex2DArrayU32S32;\r
- case Intrinsic::nvvm_tex_2d_array_v4u32_f32:\r
- return NVPTXISD::Tex2DArrayU32Float;\r
- case Intrinsic::nvvm_tex_2d_array_level_v4u32_f32:\r
- return NVPTXISD::Tex2DArrayU32FloatLevel;\r
- case Intrinsic::nvvm_tex_2d_array_grad_v4u32_f32:\r
- return NVPTXISD::Tex2DArrayU32FloatGrad;\r
-\r
- case Intrinsic::nvvm_tex_3d_v4f32_s32:\r
- return NVPTXISD::Tex3DFloatS32;\r
- case Intrinsic::nvvm_tex_3d_v4f32_f32:\r
- return NVPTXISD::Tex3DFloatFloat;\r
- case Intrinsic::nvvm_tex_3d_level_v4f32_f32:\r
- return NVPTXISD::Tex3DFloatFloatLevel;\r
- case Intrinsic::nvvm_tex_3d_grad_v4f32_f32:\r
- return NVPTXISD::Tex3DFloatFloatGrad;\r
- case Intrinsic::nvvm_tex_3d_v4s32_s32:\r
- return NVPTXISD::Tex3DS32S32;\r
- case Intrinsic::nvvm_tex_3d_v4s32_f32:\r
- return NVPTXISD::Tex3DS32Float;\r
- case Intrinsic::nvvm_tex_3d_level_v4s32_f32:\r
- return NVPTXISD::Tex3DS32FloatLevel;\r
- case Intrinsic::nvvm_tex_3d_grad_v4s32_f32:\r
- return NVPTXISD::Tex3DS32FloatGrad;\r
- case Intrinsic::nvvm_tex_3d_v4u32_s32:\r
- return NVPTXISD::Tex3DU32S32;\r
- case Intrinsic::nvvm_tex_3d_v4u32_f32:\r
- return NVPTXISD::Tex3DU32Float;\r
- case Intrinsic::nvvm_tex_3d_level_v4u32_f32:\r
- return NVPTXISD::Tex3DU32FloatLevel;\r
- case Intrinsic::nvvm_tex_3d_grad_v4u32_f32:\r
- return NVPTXISD::Tex3DU32FloatGrad;\r
-\r
- case Intrinsic::nvvm_tex_cube_v4f32_f32:\r
- return NVPTXISD::TexCubeFloatFloat;\r
- case Intrinsic::nvvm_tex_cube_level_v4f32_f32:\r
- return NVPTXISD::TexCubeFloatFloatLevel;\r
- case Intrinsic::nvvm_tex_cube_v4s32_f32:\r
- return NVPTXISD::TexCubeS32Float;\r
- case Intrinsic::nvvm_tex_cube_level_v4s32_f32:\r
- return NVPTXISD::TexCubeS32FloatLevel;\r
- case Intrinsic::nvvm_tex_cube_v4u32_f32:\r
- return NVPTXISD::TexCubeU32Float;\r
- case Intrinsic::nvvm_tex_cube_level_v4u32_f32:\r
- return NVPTXISD::TexCubeU32FloatLevel;\r
-\r
- case Intrinsic::nvvm_tex_cube_array_v4f32_f32:\r
- return NVPTXISD::TexCubeArrayFloatFloat;\r
- case Intrinsic::nvvm_tex_cube_array_level_v4f32_f32:\r
- return NVPTXISD::TexCubeArrayFloatFloatLevel;\r
- case Intrinsic::nvvm_tex_cube_array_v4s32_f32:\r
- return NVPTXISD::TexCubeArrayS32Float;\r
- case Intrinsic::nvvm_tex_cube_array_level_v4s32_f32:\r
- return NVPTXISD::TexCubeArrayS32FloatLevel;\r
- case Intrinsic::nvvm_tex_cube_array_v4u32_f32:\r
- return NVPTXISD::TexCubeArrayU32Float;\r
- case Intrinsic::nvvm_tex_cube_array_level_v4u32_f32:\r
- return NVPTXISD::TexCubeArrayU32FloatLevel;\r
-\r
- case Intrinsic::nvvm_tld4_r_2d_v4f32_f32:\r
- return NVPTXISD::Tld4R2DFloatFloat;\r
- case Intrinsic::nvvm_tld4_g_2d_v4f32_f32:\r
- return NVPTXISD::Tld4G2DFloatFloat;\r
- case Intrinsic::nvvm_tld4_b_2d_v4f32_f32:\r
- return NVPTXISD::Tld4B2DFloatFloat;\r
- case Intrinsic::nvvm_tld4_a_2d_v4f32_f32:\r
- return NVPTXISD::Tld4A2DFloatFloat;\r
- case Intrinsic::nvvm_tld4_r_2d_v4s32_f32:\r
- return NVPTXISD::Tld4R2DS64Float;\r
- case Intrinsic::nvvm_tld4_g_2d_v4s32_f32:\r
- return NVPTXISD::Tld4G2DS64Float;\r
- case Intrinsic::nvvm_tld4_b_2d_v4s32_f32:\r
- return NVPTXISD::Tld4B2DS64Float;\r
- case Intrinsic::nvvm_tld4_a_2d_v4s32_f32:\r
- return NVPTXISD::Tld4A2DS64Float;\r
- case Intrinsic::nvvm_tld4_r_2d_v4u32_f32:\r
- return NVPTXISD::Tld4R2DU64Float;\r
- case Intrinsic::nvvm_tld4_g_2d_v4u32_f32:\r
- return NVPTXISD::Tld4G2DU64Float;\r
- case Intrinsic::nvvm_tld4_b_2d_v4u32_f32:\r
- return NVPTXISD::Tld4B2DU64Float;\r
- case Intrinsic::nvvm_tld4_a_2d_v4u32_f32:\r
- return NVPTXISD::Tld4A2DU64Float;\r
-\r
- case Intrinsic::nvvm_tex_unified_1d_v4f32_s32:\r
- return NVPTXISD::TexUnified1DFloatS32;\r
- case Intrinsic::nvvm_tex_unified_1d_v4f32_f32:\r
- return NVPTXISD::TexUnified1DFloatFloat;\r
- case Intrinsic::nvvm_tex_unified_1d_level_v4f32_f32:\r
- return NVPTXISD::TexUnified1DFloatFloatLevel;\r
- case Intrinsic::nvvm_tex_unified_1d_grad_v4f32_f32:\r
- return NVPTXISD::TexUnified1DFloatFloatGrad;\r
- case Intrinsic::nvvm_tex_unified_1d_v4s32_s32:\r
- return NVPTXISD::TexUnified1DS32S32;\r
- case Intrinsic::nvvm_tex_unified_1d_v4s32_f32:\r
- return NVPTXISD::TexUnified1DS32Float;\r
- case Intrinsic::nvvm_tex_unified_1d_level_v4s32_f32:\r
- return NVPTXISD::TexUnified1DS32FloatLevel;\r
- case Intrinsic::nvvm_tex_unified_1d_grad_v4s32_f32:\r
- return NVPTXISD::TexUnified1DS32FloatGrad;\r
- case Intrinsic::nvvm_tex_unified_1d_v4u32_s32:\r
- return NVPTXISD::TexUnified1DU32S32;\r
- case Intrinsic::nvvm_tex_unified_1d_v4u32_f32:\r
- return NVPTXISD::TexUnified1DU32Float;\r
- case Intrinsic::nvvm_tex_unified_1d_level_v4u32_f32:\r
- return NVPTXISD::TexUnified1DU32FloatLevel;\r
- case Intrinsic::nvvm_tex_unified_1d_grad_v4u32_f32:\r
- return NVPTXISD::TexUnified1DU32FloatGrad;\r
-\r
- case Intrinsic::nvvm_tex_unified_1d_array_v4f32_s32:\r
- return NVPTXISD::TexUnified1DArrayFloatS32;\r
- case Intrinsic::nvvm_tex_unified_1d_array_v4f32_f32:\r
- return NVPTXISD::TexUnified1DArrayFloatFloat;\r
- case Intrinsic::nvvm_tex_unified_1d_array_level_v4f32_f32:\r
- return NVPTXISD::TexUnified1DArrayFloatFloatLevel;\r
- case Intrinsic::nvvm_tex_unified_1d_array_grad_v4f32_f32:\r
- return NVPTXISD::TexUnified1DArrayFloatFloatGrad;\r
- case Intrinsic::nvvm_tex_unified_1d_array_v4s32_s32:\r
- return NVPTXISD::TexUnified1DArrayS32S32;\r
- case Intrinsic::nvvm_tex_unified_1d_array_v4s32_f32:\r
- return NVPTXISD::TexUnified1DArrayS32Float;\r
- case Intrinsic::nvvm_tex_unified_1d_array_level_v4s32_f32:\r
- return NVPTXISD::TexUnified1DArrayS32FloatLevel;\r
- case Intrinsic::nvvm_tex_unified_1d_array_grad_v4s32_f32:\r
- return NVPTXISD::TexUnified1DArrayS32FloatGrad;\r
- case Intrinsic::nvvm_tex_unified_1d_array_v4u32_s32:\r
- return NVPTXISD::TexUnified1DArrayU32S32;\r
- case Intrinsic::nvvm_tex_unified_1d_array_v4u32_f32:\r
- return NVPTXISD::TexUnified1DArrayU32Float;\r
- case Intrinsic::nvvm_tex_unified_1d_array_level_v4u32_f32:\r
- return NVPTXISD::TexUnified1DArrayU32FloatLevel;\r
- case Intrinsic::nvvm_tex_unified_1d_array_grad_v4u32_f32:\r
- return NVPTXISD::TexUnified1DArrayU32FloatGrad;\r
-\r
- case Intrinsic::nvvm_tex_unified_2d_v4f32_s32:\r
- return NVPTXISD::TexUnified2DFloatS32;\r
- case Intrinsic::nvvm_tex_unified_2d_v4f32_f32:\r
- return NVPTXISD::TexUnified2DFloatFloat;\r
- case Intrinsic::nvvm_tex_unified_2d_level_v4f32_f32:\r
- return NVPTXISD::TexUnified2DFloatFloatLevel;\r
- case Intrinsic::nvvm_tex_unified_2d_grad_v4f32_f32:\r
- return NVPTXISD::TexUnified2DFloatFloatGrad;\r
- case Intrinsic::nvvm_tex_unified_2d_v4s32_s32:\r
- return NVPTXISD::TexUnified2DS32S32;\r
- case Intrinsic::nvvm_tex_unified_2d_v4s32_f32:\r
- return NVPTXISD::TexUnified2DS32Float;\r
- case Intrinsic::nvvm_tex_unified_2d_level_v4s32_f32:\r
- return NVPTXISD::TexUnified2DS32FloatLevel;\r
- case Intrinsic::nvvm_tex_unified_2d_grad_v4s32_f32:\r
- return NVPTXISD::TexUnified2DS32FloatGrad;\r
- case Intrinsic::nvvm_tex_unified_2d_v4u32_s32:\r
- return NVPTXISD::TexUnified2DU32S32;\r
- case Intrinsic::nvvm_tex_unified_2d_v4u32_f32:\r
- return NVPTXISD::TexUnified2DU32Float;\r
- case Intrinsic::nvvm_tex_unified_2d_level_v4u32_f32:\r
- return NVPTXISD::TexUnified2DU32FloatLevel;\r
- case Intrinsic::nvvm_tex_unified_2d_grad_v4u32_f32:\r
- return NVPTXISD::TexUnified2DU32FloatGrad;\r
-\r
- case Intrinsic::nvvm_tex_unified_2d_array_v4f32_s32:\r
- return NVPTXISD::TexUnified2DArrayFloatS32;\r
- case Intrinsic::nvvm_tex_unified_2d_array_v4f32_f32:\r
- return NVPTXISD::TexUnified2DArrayFloatFloat;\r
- case Intrinsic::nvvm_tex_unified_2d_array_level_v4f32_f32:\r
- return NVPTXISD::TexUnified2DArrayFloatFloatLevel;\r
- case Intrinsic::nvvm_tex_unified_2d_array_grad_v4f32_f32:\r
- return NVPTXISD::TexUnified2DArrayFloatFloatGrad;\r
- case Intrinsic::nvvm_tex_unified_2d_array_v4s32_s32:\r
- return NVPTXISD::TexUnified2DArrayS32S32;\r
- case Intrinsic::nvvm_tex_unified_2d_array_v4s32_f32:\r
- return NVPTXISD::TexUnified2DArrayS32Float;\r
- case Intrinsic::nvvm_tex_unified_2d_array_level_v4s32_f32:\r
- return NVPTXISD::TexUnified2DArrayS32FloatLevel;\r
- case Intrinsic::nvvm_tex_unified_2d_array_grad_v4s32_f32:\r
- return NVPTXISD::TexUnified2DArrayS32FloatGrad;\r
- case Intrinsic::nvvm_tex_unified_2d_array_v4u32_s32:\r
- return NVPTXISD::TexUnified2DArrayU32S32;\r
- case Intrinsic::nvvm_tex_unified_2d_array_v4u32_f32:\r
- return NVPTXISD::TexUnified2DArrayU32Float;\r
- case Intrinsic::nvvm_tex_unified_2d_array_level_v4u32_f32:\r
- return NVPTXISD::TexUnified2DArrayU32FloatLevel;\r
- case Intrinsic::nvvm_tex_unified_2d_array_grad_v4u32_f32:\r
- return NVPTXISD::TexUnified2DArrayU32FloatGrad;\r
-\r
- case Intrinsic::nvvm_tex_unified_3d_v4f32_s32:\r
- return NVPTXISD::TexUnified3DFloatS32;\r
- case Intrinsic::nvvm_tex_unified_3d_v4f32_f32:\r
- return NVPTXISD::TexUnified3DFloatFloat;\r
- case Intrinsic::nvvm_tex_unified_3d_level_v4f32_f32:\r
- return NVPTXISD::TexUnified3DFloatFloatLevel;\r
- case Intrinsic::nvvm_tex_unified_3d_grad_v4f32_f32:\r
- return NVPTXISD::TexUnified3DFloatFloatGrad;\r
- case Intrinsic::nvvm_tex_unified_3d_v4s32_s32:\r
- return NVPTXISD::TexUnified3DS32S32;\r
- case Intrinsic::nvvm_tex_unified_3d_v4s32_f32:\r
- return NVPTXISD::TexUnified3DS32Float;\r
- case Intrinsic::nvvm_tex_unified_3d_level_v4s32_f32:\r
- return NVPTXISD::TexUnified3DS32FloatLevel;\r
- case Intrinsic::nvvm_tex_unified_3d_grad_v4s32_f32:\r
- return NVPTXISD::TexUnified3DS32FloatGrad;\r
- case Intrinsic::nvvm_tex_unified_3d_v4u32_s32:\r
- return NVPTXISD::TexUnified3DU32S32;\r
- case Intrinsic::nvvm_tex_unified_3d_v4u32_f32:\r
- return NVPTXISD::TexUnified3DU32Float;\r
- case Intrinsic::nvvm_tex_unified_3d_level_v4u32_f32:\r
- return NVPTXISD::TexUnified3DU32FloatLevel;\r
- case Intrinsic::nvvm_tex_unified_3d_grad_v4u32_f32:\r
- return NVPTXISD::TexUnified3DU32FloatGrad;\r
-\r
- case Intrinsic::nvvm_tex_unified_cube_v4f32_f32:\r
- return NVPTXISD::TexUnifiedCubeFloatFloat;\r
- case Intrinsic::nvvm_tex_unified_cube_level_v4f32_f32:\r
- return NVPTXISD::TexUnifiedCubeFloatFloatLevel;\r
- case Intrinsic::nvvm_tex_unified_cube_v4s32_f32:\r
- return NVPTXISD::TexUnifiedCubeS32Float;\r
- case Intrinsic::nvvm_tex_unified_cube_level_v4s32_f32:\r
- return NVPTXISD::TexUnifiedCubeS32FloatLevel;\r
- case Intrinsic::nvvm_tex_unified_cube_v4u32_f32:\r
- return NVPTXISD::TexUnifiedCubeU32Float;\r
- case Intrinsic::nvvm_tex_unified_cube_level_v4u32_f32:\r
- return NVPTXISD::TexUnifiedCubeU32FloatLevel;\r
-\r
- case Intrinsic::nvvm_tex_unified_cube_array_v4f32_f32:\r
- return NVPTXISD::TexUnifiedCubeArrayFloatFloat;\r
- case Intrinsic::nvvm_tex_unified_cube_array_level_v4f32_f32:\r
- return NVPTXISD::TexUnifiedCubeArrayFloatFloatLevel;\r
- case Intrinsic::nvvm_tex_unified_cube_array_v4s32_f32:\r
- return NVPTXISD::TexUnifiedCubeArrayS32Float;\r
- case Intrinsic::nvvm_tex_unified_cube_array_level_v4s32_f32:\r
- return NVPTXISD::TexUnifiedCubeArrayS32FloatLevel;\r
- case Intrinsic::nvvm_tex_unified_cube_array_v4u32_f32:\r
- return NVPTXISD::TexUnifiedCubeArrayU32Float;\r
- case Intrinsic::nvvm_tex_unified_cube_array_level_v4u32_f32:\r
- return NVPTXISD::TexUnifiedCubeArrayU32FloatLevel;\r
-\r
- case Intrinsic::nvvm_tld4_unified_r_2d_v4f32_f32:\r
- return NVPTXISD::Tld4UnifiedR2DFloatFloat;\r
- case Intrinsic::nvvm_tld4_unified_g_2d_v4f32_f32:\r
- return NVPTXISD::Tld4UnifiedG2DFloatFloat;\r
- case Intrinsic::nvvm_tld4_unified_b_2d_v4f32_f32:\r
- return NVPTXISD::Tld4UnifiedB2DFloatFloat;\r
- case Intrinsic::nvvm_tld4_unified_a_2d_v4f32_f32:\r
- return NVPTXISD::Tld4UnifiedA2DFloatFloat;\r
- case Intrinsic::nvvm_tld4_unified_r_2d_v4s32_f32:\r
- return NVPTXISD::Tld4UnifiedR2DS64Float;\r
- case Intrinsic::nvvm_tld4_unified_g_2d_v4s32_f32:\r
- return NVPTXISD::Tld4UnifiedG2DS64Float;\r
- case Intrinsic::nvvm_tld4_unified_b_2d_v4s32_f32:\r
- return NVPTXISD::Tld4UnifiedB2DS64Float;\r
- case Intrinsic::nvvm_tld4_unified_a_2d_v4s32_f32:\r
- return NVPTXISD::Tld4UnifiedA2DS64Float;\r
- case Intrinsic::nvvm_tld4_unified_r_2d_v4u32_f32:\r
- return NVPTXISD::Tld4UnifiedR2DU64Float;\r
- case Intrinsic::nvvm_tld4_unified_g_2d_v4u32_f32:\r
- return NVPTXISD::Tld4UnifiedG2DU64Float;\r
- case Intrinsic::nvvm_tld4_unified_b_2d_v4u32_f32:\r
- return NVPTXISD::Tld4UnifiedB2DU64Float;\r
- case Intrinsic::nvvm_tld4_unified_a_2d_v4u32_f32:\r
- return NVPTXISD::Tld4UnifiedA2DU64Float;\r
- }\r
-}\r
-\r
-static unsigned getOpcForSurfaceInstr(unsigned Intrinsic) {\r
- switch (Intrinsic) {\r
- default:\r
- return 0;\r
- case Intrinsic::nvvm_suld_1d_i8_clamp:\r
- return NVPTXISD::Suld1DI8Clamp;\r
- case Intrinsic::nvvm_suld_1d_i16_clamp:\r
- return NVPTXISD::Suld1DI16Clamp;\r
- case Intrinsic::nvvm_suld_1d_i32_clamp:\r
- return NVPTXISD::Suld1DI32Clamp;\r
- case Intrinsic::nvvm_suld_1d_i64_clamp:\r
- return NVPTXISD::Suld1DI64Clamp;\r
- case Intrinsic::nvvm_suld_1d_v2i8_clamp:\r
- return NVPTXISD::Suld1DV2I8Clamp;\r
- case Intrinsic::nvvm_suld_1d_v2i16_clamp:\r
- return NVPTXISD::Suld1DV2I16Clamp;\r
- case Intrinsic::nvvm_suld_1d_v2i32_clamp:\r
- return NVPTXISD::Suld1DV2I32Clamp;\r
- case Intrinsic::nvvm_suld_1d_v2i64_clamp:\r
- return NVPTXISD::Suld1DV2I64Clamp;\r
- case Intrinsic::nvvm_suld_1d_v4i8_clamp:\r
- return NVPTXISD::Suld1DV4I8Clamp;\r
- case Intrinsic::nvvm_suld_1d_v4i16_clamp:\r
- return NVPTXISD::Suld1DV4I16Clamp;\r
- case Intrinsic::nvvm_suld_1d_v4i32_clamp:\r
- return NVPTXISD::Suld1DV4I32Clamp;\r
- case Intrinsic::nvvm_suld_1d_array_i8_clamp:\r
- return NVPTXISD::Suld1DArrayI8Clamp;\r
- case Intrinsic::nvvm_suld_1d_array_i16_clamp:\r
- return NVPTXISD::Suld1DArrayI16Clamp;\r
- case Intrinsic::nvvm_suld_1d_array_i32_clamp:\r
- return NVPTXISD::Suld1DArrayI32Clamp;\r
- case Intrinsic::nvvm_suld_1d_array_i64_clamp:\r
- return NVPTXISD::Suld1DArrayI64Clamp;\r
- case Intrinsic::nvvm_suld_1d_array_v2i8_clamp:\r
- return NVPTXISD::Suld1DArrayV2I8Clamp;\r
- case Intrinsic::nvvm_suld_1d_array_v2i16_clamp:\r
- return NVPTXISD::Suld1DArrayV2I16Clamp;\r
- case Intrinsic::nvvm_suld_1d_array_v2i32_clamp:\r
- return NVPTXISD::Suld1DArrayV2I32Clamp;\r
- case Intrinsic::nvvm_suld_1d_array_v2i64_clamp:\r
- return NVPTXISD::Suld1DArrayV2I64Clamp;\r
- case Intrinsic::nvvm_suld_1d_array_v4i8_clamp:\r
- return NVPTXISD::Suld1DArrayV4I8Clamp;\r
- case Intrinsic::nvvm_suld_1d_array_v4i16_clamp:\r
- return NVPTXISD::Suld1DArrayV4I16Clamp;\r
- case Intrinsic::nvvm_suld_1d_array_v4i32_clamp:\r
- return NVPTXISD::Suld1DArrayV4I32Clamp;\r
- case Intrinsic::nvvm_suld_2d_i8_clamp:\r
- return NVPTXISD::Suld2DI8Clamp;\r
- case Intrinsic::nvvm_suld_2d_i16_clamp:\r
- return NVPTXISD::Suld2DI16Clamp;\r
- case Intrinsic::nvvm_suld_2d_i32_clamp:\r
- return NVPTXISD::Suld2DI32Clamp;\r
- case Intrinsic::nvvm_suld_2d_i64_clamp:\r
- return NVPTXISD::Suld2DI64Clamp;\r
- case Intrinsic::nvvm_suld_2d_v2i8_clamp:\r
- return NVPTXISD::Suld2DV2I8Clamp;\r
- case Intrinsic::nvvm_suld_2d_v2i16_clamp:\r
- return NVPTXISD::Suld2DV2I16Clamp;\r
- case Intrinsic::nvvm_suld_2d_v2i32_clamp:\r
- return NVPTXISD::Suld2DV2I32Clamp;\r
- case Intrinsic::nvvm_suld_2d_v2i64_clamp:\r
- return NVPTXISD::Suld2DV2I64Clamp;\r
- case Intrinsic::nvvm_suld_2d_v4i8_clamp:\r
- return NVPTXISD::Suld2DV4I8Clamp;\r
- case Intrinsic::nvvm_suld_2d_v4i16_clamp:\r
- return NVPTXISD::Suld2DV4I16Clamp;\r
- case Intrinsic::nvvm_suld_2d_v4i32_clamp:\r
- return NVPTXISD::Suld2DV4I32Clamp;\r
- case Intrinsic::nvvm_suld_2d_array_i8_clamp:\r
- return NVPTXISD::Suld2DArrayI8Clamp;\r
- case Intrinsic::nvvm_suld_2d_array_i16_clamp:\r
- return NVPTXISD::Suld2DArrayI16Clamp;\r
- case Intrinsic::nvvm_suld_2d_array_i32_clamp:\r
- return NVPTXISD::Suld2DArrayI32Clamp;\r
- case Intrinsic::nvvm_suld_2d_array_i64_clamp:\r
- return NVPTXISD::Suld2DArrayI64Clamp;\r
- case Intrinsic::nvvm_suld_2d_array_v2i8_clamp:\r
- return NVPTXISD::Suld2DArrayV2I8Clamp;\r
- case Intrinsic::nvvm_suld_2d_array_v2i16_clamp:\r
- return NVPTXISD::Suld2DArrayV2I16Clamp;\r
- case Intrinsic::nvvm_suld_2d_array_v2i32_clamp:\r
- return NVPTXISD::Suld2DArrayV2I32Clamp;\r
- case Intrinsic::nvvm_suld_2d_array_v2i64_clamp:\r
- return NVPTXISD::Suld2DArrayV2I64Clamp;\r
- case Intrinsic::nvvm_suld_2d_array_v4i8_clamp:\r
- return NVPTXISD::Suld2DArrayV4I8Clamp;\r
- case Intrinsic::nvvm_suld_2d_array_v4i16_clamp:\r
- return NVPTXISD::Suld2DArrayV4I16Clamp;\r
- case Intrinsic::nvvm_suld_2d_array_v4i32_clamp:\r
- return NVPTXISD::Suld2DArrayV4I32Clamp;\r
- case Intrinsic::nvvm_suld_3d_i8_clamp:\r
- return NVPTXISD::Suld3DI8Clamp;\r
- case Intrinsic::nvvm_suld_3d_i16_clamp:\r
- return NVPTXISD::Suld3DI16Clamp;\r
- case Intrinsic::nvvm_suld_3d_i32_clamp:\r
- return NVPTXISD::Suld3DI32Clamp;\r
- case Intrinsic::nvvm_suld_3d_i64_clamp:\r
- return NVPTXISD::Suld3DI64Clamp;\r
- case Intrinsic::nvvm_suld_3d_v2i8_clamp:\r
- return NVPTXISD::Suld3DV2I8Clamp;\r
- case Intrinsic::nvvm_suld_3d_v2i16_clamp:\r
- return NVPTXISD::Suld3DV2I16Clamp;\r
- case Intrinsic::nvvm_suld_3d_v2i32_clamp:\r
- return NVPTXISD::Suld3DV2I32Clamp;\r
- case Intrinsic::nvvm_suld_3d_v2i64_clamp:\r
- return NVPTXISD::Suld3DV2I64Clamp;\r
- case Intrinsic::nvvm_suld_3d_v4i8_clamp:\r
- return NVPTXISD::Suld3DV4I8Clamp;\r
- case Intrinsic::nvvm_suld_3d_v4i16_clamp:\r
- return NVPTXISD::Suld3DV4I16Clamp;\r
- case Intrinsic::nvvm_suld_3d_v4i32_clamp:\r
- return NVPTXISD::Suld3DV4I32Clamp;\r
- case Intrinsic::nvvm_suld_1d_i8_trap:\r
- return NVPTXISD::Suld1DI8Trap;\r
- case Intrinsic::nvvm_suld_1d_i16_trap:\r
- return NVPTXISD::Suld1DI16Trap;\r
- case Intrinsic::nvvm_suld_1d_i32_trap:\r
- return NVPTXISD::Suld1DI32Trap;\r
- case Intrinsic::nvvm_suld_1d_i64_trap:\r
- return NVPTXISD::Suld1DI64Trap;\r
- case Intrinsic::nvvm_suld_1d_v2i8_trap:\r
- return NVPTXISD::Suld1DV2I8Trap;\r
- case Intrinsic::nvvm_suld_1d_v2i16_trap:\r
- return NVPTXISD::Suld1DV2I16Trap;\r
- case Intrinsic::nvvm_suld_1d_v2i32_trap:\r
- return NVPTXISD::Suld1DV2I32Trap;\r
- case Intrinsic::nvvm_suld_1d_v2i64_trap:\r
- return NVPTXISD::Suld1DV2I64Trap;\r
- case Intrinsic::nvvm_suld_1d_v4i8_trap:\r
- return NVPTXISD::Suld1DV4I8Trap;\r
- case Intrinsic::nvvm_suld_1d_v4i16_trap:\r
- return NVPTXISD::Suld1DV4I16Trap;\r
- case Intrinsic::nvvm_suld_1d_v4i32_trap:\r
- return NVPTXISD::Suld1DV4I32Trap;\r
- case Intrinsic::nvvm_suld_1d_array_i8_trap:\r
- return NVPTXISD::Suld1DArrayI8Trap;\r
- case Intrinsic::nvvm_suld_1d_array_i16_trap:\r
- return NVPTXISD::Suld1DArrayI16Trap;\r
- case Intrinsic::nvvm_suld_1d_array_i32_trap:\r
- return NVPTXISD::Suld1DArrayI32Trap;\r
- case Intrinsic::nvvm_suld_1d_array_i64_trap:\r
- return NVPTXISD::Suld1DArrayI64Trap;\r
- case Intrinsic::nvvm_suld_1d_array_v2i8_trap:\r
- return NVPTXISD::Suld1DArrayV2I8Trap;\r
- case Intrinsic::nvvm_suld_1d_array_v2i16_trap:\r
- return NVPTXISD::Suld1DArrayV2I16Trap;\r
- case Intrinsic::nvvm_suld_1d_array_v2i32_trap:\r
- return NVPTXISD::Suld1DArrayV2I32Trap;\r
- case Intrinsic::nvvm_suld_1d_array_v2i64_trap:\r
- return NVPTXISD::Suld1DArrayV2I64Trap;\r
- case Intrinsic::nvvm_suld_1d_array_v4i8_trap:\r
- return NVPTXISD::Suld1DArrayV4I8Trap;\r
- case Intrinsic::nvvm_suld_1d_array_v4i16_trap:\r
- return NVPTXISD::Suld1DArrayV4I16Trap;\r
- case Intrinsic::nvvm_suld_1d_array_v4i32_trap:\r
- return NVPTXISD::Suld1DArrayV4I32Trap;\r
- case Intrinsic::nvvm_suld_2d_i8_trap:\r
- return NVPTXISD::Suld2DI8Trap;\r
- case Intrinsic::nvvm_suld_2d_i16_trap:\r
- return NVPTXISD::Suld2DI16Trap;\r
- case Intrinsic::nvvm_suld_2d_i32_trap:\r
- return NVPTXISD::Suld2DI32Trap;\r
- case Intrinsic::nvvm_suld_2d_i64_trap:\r
- return NVPTXISD::Suld2DI64Trap;\r
- case Intrinsic::nvvm_suld_2d_v2i8_trap:\r
- return NVPTXISD::Suld2DV2I8Trap;\r
- case Intrinsic::nvvm_suld_2d_v2i16_trap:\r
- return NVPTXISD::Suld2DV2I16Trap;\r
- case Intrinsic::nvvm_suld_2d_v2i32_trap:\r
- return NVPTXISD::Suld2DV2I32Trap;\r
- case Intrinsic::nvvm_suld_2d_v2i64_trap:\r
- return NVPTXISD::Suld2DV2I64Trap;\r
- case Intrinsic::nvvm_suld_2d_v4i8_trap:\r
- return NVPTXISD::Suld2DV4I8Trap;\r
- case Intrinsic::nvvm_suld_2d_v4i16_trap:\r
- return NVPTXISD::Suld2DV4I16Trap;\r
- case Intrinsic::nvvm_suld_2d_v4i32_trap:\r
- return NVPTXISD::Suld2DV4I32Trap;\r
- case Intrinsic::nvvm_suld_2d_array_i8_trap:\r
- return NVPTXISD::Suld2DArrayI8Trap;\r
- case Intrinsic::nvvm_suld_2d_array_i16_trap:\r
- return NVPTXISD::Suld2DArrayI16Trap;\r
- case Intrinsic::nvvm_suld_2d_array_i32_trap:\r
- return NVPTXISD::Suld2DArrayI32Trap;\r
- case Intrinsic::nvvm_suld_2d_array_i64_trap:\r
- return NVPTXISD::Suld2DArrayI64Trap;\r
- case Intrinsic::nvvm_suld_2d_array_v2i8_trap:\r
- return NVPTXISD::Suld2DArrayV2I8Trap;\r
- case Intrinsic::nvvm_suld_2d_array_v2i16_trap:\r
- return NVPTXISD::Suld2DArrayV2I16Trap;\r
- case Intrinsic::nvvm_suld_2d_array_v2i32_trap:\r
- return NVPTXISD::Suld2DArrayV2I32Trap;\r
- case Intrinsic::nvvm_suld_2d_array_v2i64_trap:\r
- return NVPTXISD::Suld2DArrayV2I64Trap;\r
- case Intrinsic::nvvm_suld_2d_array_v4i8_trap:\r
- return NVPTXISD::Suld2DArrayV4I8Trap;\r
- case Intrinsic::nvvm_suld_2d_array_v4i16_trap:\r
- return NVPTXISD::Suld2DArrayV4I16Trap;\r
- case Intrinsic::nvvm_suld_2d_array_v4i32_trap:\r
- return NVPTXISD::Suld2DArrayV4I32Trap;\r
- case Intrinsic::nvvm_suld_3d_i8_trap:\r
- return NVPTXISD::Suld3DI8Trap;\r
- case Intrinsic::nvvm_suld_3d_i16_trap:\r
- return NVPTXISD::Suld3DI16Trap;\r
- case Intrinsic::nvvm_suld_3d_i32_trap:\r
- return NVPTXISD::Suld3DI32Trap;\r
- case Intrinsic::nvvm_suld_3d_i64_trap:\r
- return NVPTXISD::Suld3DI64Trap;\r
- case Intrinsic::nvvm_suld_3d_v2i8_trap:\r
- return NVPTXISD::Suld3DV2I8Trap;\r
- case Intrinsic::nvvm_suld_3d_v2i16_trap:\r
- return NVPTXISD::Suld3DV2I16Trap;\r
- case Intrinsic::nvvm_suld_3d_v2i32_trap:\r
- return NVPTXISD::Suld3DV2I32Trap;\r
- case Intrinsic::nvvm_suld_3d_v2i64_trap:\r
- return NVPTXISD::Suld3DV2I64Trap;\r
- case Intrinsic::nvvm_suld_3d_v4i8_trap:\r
- return NVPTXISD::Suld3DV4I8Trap;\r
- case Intrinsic::nvvm_suld_3d_v4i16_trap:\r
- return NVPTXISD::Suld3DV4I16Trap;\r
- case Intrinsic::nvvm_suld_3d_v4i32_trap:\r
- return NVPTXISD::Suld3DV4I32Trap;\r
- case Intrinsic::nvvm_suld_1d_i8_zero:\r
- return NVPTXISD::Suld1DI8Zero;\r
- case Intrinsic::nvvm_suld_1d_i16_zero:\r
- return NVPTXISD::Suld1DI16Zero;\r
- case Intrinsic::nvvm_suld_1d_i32_zero:\r
- return NVPTXISD::Suld1DI32Zero;\r
- case Intrinsic::nvvm_suld_1d_i64_zero:\r
- return NVPTXISD::Suld1DI64Zero;\r
- case Intrinsic::nvvm_suld_1d_v2i8_zero:\r
- return NVPTXISD::Suld1DV2I8Zero;\r
- case Intrinsic::nvvm_suld_1d_v2i16_zero:\r
- return NVPTXISD::Suld1DV2I16Zero;\r
- case Intrinsic::nvvm_suld_1d_v2i32_zero:\r
- return NVPTXISD::Suld1DV2I32Zero;\r
- case Intrinsic::nvvm_suld_1d_v2i64_zero:\r
- return NVPTXISD::Suld1DV2I64Zero;\r
- case Intrinsic::nvvm_suld_1d_v4i8_zero:\r
- return NVPTXISD::Suld1DV4I8Zero;\r
- case Intrinsic::nvvm_suld_1d_v4i16_zero:\r
- return NVPTXISD::Suld1DV4I16Zero;\r
- case Intrinsic::nvvm_suld_1d_v4i32_zero:\r
- return NVPTXISD::Suld1DV4I32Zero;\r
- case Intrinsic::nvvm_suld_1d_array_i8_zero:\r
- return NVPTXISD::Suld1DArrayI8Zero;\r
- case Intrinsic::nvvm_suld_1d_array_i16_zero:\r
- return NVPTXISD::Suld1DArrayI16Zero;\r
- case Intrinsic::nvvm_suld_1d_array_i32_zero:\r
- return NVPTXISD::Suld1DArrayI32Zero;\r
- case Intrinsic::nvvm_suld_1d_array_i64_zero:\r
- return NVPTXISD::Suld1DArrayI64Zero;\r
- case Intrinsic::nvvm_suld_1d_array_v2i8_zero:\r
- return NVPTXISD::Suld1DArrayV2I8Zero;\r
- case Intrinsic::nvvm_suld_1d_array_v2i16_zero:\r
- return NVPTXISD::Suld1DArrayV2I16Zero;\r
- case Intrinsic::nvvm_suld_1d_array_v2i32_zero:\r
- return NVPTXISD::Suld1DArrayV2I32Zero;\r
- case Intrinsic::nvvm_suld_1d_array_v2i64_zero:\r
- return NVPTXISD::Suld1DArrayV2I64Zero;\r
- case Intrinsic::nvvm_suld_1d_array_v4i8_zero:\r
- return NVPTXISD::Suld1DArrayV4I8Zero;\r
- case Intrinsic::nvvm_suld_1d_array_v4i16_zero:\r
- return NVPTXISD::Suld1DArrayV4I16Zero;\r
- case Intrinsic::nvvm_suld_1d_array_v4i32_zero:\r
- return NVPTXISD::Suld1DArrayV4I32Zero;\r
- case Intrinsic::nvvm_suld_2d_i8_zero:\r
- return NVPTXISD::Suld2DI8Zero;\r
- case Intrinsic::nvvm_suld_2d_i16_zero:\r
- return NVPTXISD::Suld2DI16Zero;\r
- case Intrinsic::nvvm_suld_2d_i32_zero:\r
- return NVPTXISD::Suld2DI32Zero;\r
- case Intrinsic::nvvm_suld_2d_i64_zero:\r
- return NVPTXISD::Suld2DI64Zero;\r
- case Intrinsic::nvvm_suld_2d_v2i8_zero:\r
- return NVPTXISD::Suld2DV2I8Zero;\r
- case Intrinsic::nvvm_suld_2d_v2i16_zero:\r
- return NVPTXISD::Suld2DV2I16Zero;\r
- case Intrinsic::nvvm_suld_2d_v2i32_zero:\r
- return NVPTXISD::Suld2DV2I32Zero;\r
- case Intrinsic::nvvm_suld_2d_v2i64_zero:\r
- return NVPTXISD::Suld2DV2I64Zero;\r
- case Intrinsic::nvvm_suld_2d_v4i8_zero:\r
- return NVPTXISD::Suld2DV4I8Zero;\r
- case Intrinsic::nvvm_suld_2d_v4i16_zero:\r
- return NVPTXISD::Suld2DV4I16Zero;\r
- case Intrinsic::nvvm_suld_2d_v4i32_zero:\r
- return NVPTXISD::Suld2DV4I32Zero;\r
- case Intrinsic::nvvm_suld_2d_array_i8_zero:\r
- return NVPTXISD::Suld2DArrayI8Zero;\r
- case Intrinsic::nvvm_suld_2d_array_i16_zero:\r
- return NVPTXISD::Suld2DArrayI16Zero;\r
- case Intrinsic::nvvm_suld_2d_array_i32_zero:\r
- return NVPTXISD::Suld2DArrayI32Zero;\r
- case Intrinsic::nvvm_suld_2d_array_i64_zero:\r
- return NVPTXISD::Suld2DArrayI64Zero;\r
- case Intrinsic::nvvm_suld_2d_array_v2i8_zero:\r
- return NVPTXISD::Suld2DArrayV2I8Zero;\r
- case Intrinsic::nvvm_suld_2d_array_v2i16_zero:\r
- return NVPTXISD::Suld2DArrayV2I16Zero;\r
- case Intrinsic::nvvm_suld_2d_array_v2i32_zero:\r
- return NVPTXISD::Suld2DArrayV2I32Zero;\r
- case Intrinsic::nvvm_suld_2d_array_v2i64_zero:\r
- return NVPTXISD::Suld2DArrayV2I64Zero;\r
- case Intrinsic::nvvm_suld_2d_array_v4i8_zero:\r
- return NVPTXISD::Suld2DArrayV4I8Zero;\r
- case Intrinsic::nvvm_suld_2d_array_v4i16_zero:\r
- return NVPTXISD::Suld2DArrayV4I16Zero;\r
- case Intrinsic::nvvm_suld_2d_array_v4i32_zero:\r
- return NVPTXISD::Suld2DArrayV4I32Zero;\r
- case Intrinsic::nvvm_suld_3d_i8_zero:\r
- return NVPTXISD::Suld3DI8Zero;\r
- case Intrinsic::nvvm_suld_3d_i16_zero:\r
- return NVPTXISD::Suld3DI16Zero;\r
- case Intrinsic::nvvm_suld_3d_i32_zero:\r
- return NVPTXISD::Suld3DI32Zero;\r
- case Intrinsic::nvvm_suld_3d_i64_zero:\r
- return NVPTXISD::Suld3DI64Zero;\r
- case Intrinsic::nvvm_suld_3d_v2i8_zero:\r
- return NVPTXISD::Suld3DV2I8Zero;\r
- case Intrinsic::nvvm_suld_3d_v2i16_zero:\r
- return NVPTXISD::Suld3DV2I16Zero;\r
- case Intrinsic::nvvm_suld_3d_v2i32_zero:\r
- return NVPTXISD::Suld3DV2I32Zero;\r
- case Intrinsic::nvvm_suld_3d_v2i64_zero:\r
- return NVPTXISD::Suld3DV2I64Zero;\r
- case Intrinsic::nvvm_suld_3d_v4i8_zero:\r
- return NVPTXISD::Suld3DV4I8Zero;\r
- case Intrinsic::nvvm_suld_3d_v4i16_zero:\r
- return NVPTXISD::Suld3DV4I16Zero;\r
- case Intrinsic::nvvm_suld_3d_v4i32_zero:\r
- return NVPTXISD::Suld3DV4I32Zero;\r
- }\r
-}\r
-\r
-// llvm.ptx.memcpy.const and llvm.ptx.memmove.const need to be modeled as\r
-// TgtMemIntrinsic\r
-// because we need the information that is only available in the "Value" type\r
-// of destination\r
-// pointer. In particular, the address space information.\r
-bool NVPTXTargetLowering::getTgtMemIntrinsic(\r
- IntrinsicInfo &Info, const CallInst &I, unsigned Intrinsic) const {\r
- switch (Intrinsic) {\r
- default:\r
- return false;\r
-\r
- case Intrinsic::nvvm_atomic_load_add_f32:\r
- case Intrinsic::nvvm_atomic_load_inc_32:\r
- case Intrinsic::nvvm_atomic_load_dec_32:\r
-\r
- case Intrinsic::nvvm_atomic_add_gen_f_cta:\r
- case Intrinsic::nvvm_atomic_add_gen_f_sys:\r
- case Intrinsic::nvvm_atomic_add_gen_i_cta:\r
- case Intrinsic::nvvm_atomic_add_gen_i_sys:\r
- case Intrinsic::nvvm_atomic_and_gen_i_cta:\r
- case Intrinsic::nvvm_atomic_and_gen_i_sys:\r
- case Intrinsic::nvvm_atomic_cas_gen_i_cta:\r
- case Intrinsic::nvvm_atomic_cas_gen_i_sys:\r
- case Intrinsic::nvvm_atomic_dec_gen_i_cta:\r
- case Intrinsic::nvvm_atomic_dec_gen_i_sys:\r
- case Intrinsic::nvvm_atomic_inc_gen_i_cta:\r
- case Intrinsic::nvvm_atomic_inc_gen_i_sys:\r
- case Intrinsic::nvvm_atomic_max_gen_i_cta:\r
- case Intrinsic::nvvm_atomic_max_gen_i_sys:\r
- case Intrinsic::nvvm_atomic_min_gen_i_cta:\r
- case Intrinsic::nvvm_atomic_min_gen_i_sys:\r
- case Intrinsic::nvvm_atomic_or_gen_i_cta:\r
- case Intrinsic::nvvm_atomic_or_gen_i_sys:\r
- case Intrinsic::nvvm_atomic_exch_gen_i_cta:\r
- case Intrinsic::nvvm_atomic_exch_gen_i_sys:\r
- case Intrinsic::nvvm_atomic_xor_gen_i_cta:\r
- case Intrinsic::nvvm_atomic_xor_gen_i_sys: {\r
- auto &DL = I.getModule()->getDataLayout();\r
- Info.opc = ISD::INTRINSIC_W_CHAIN;\r
- Info.memVT = getValueType(DL, I.getType());\r
- Info.ptrVal = I.getArgOperand(0);\r
- Info.offset = 0;\r
- Info.vol = false;\r
- Info.readMem = true;\r
- Info.writeMem = true;\r
- Info.align = 0;\r
- return true;\r
- }\r
-\r
- case Intrinsic::nvvm_ldu_global_i:\r
- case Intrinsic::nvvm_ldu_global_f:\r
- case Intrinsic::nvvm_ldu_global_p: {\r
- auto &DL = I.getModule()->getDataLayout();\r
- Info.opc = ISD::INTRINSIC_W_CHAIN;\r
- if (Intrinsic == Intrinsic::nvvm_ldu_global_i)\r
- Info.memVT = getValueType(DL, I.getType());\r
- else if(Intrinsic == Intrinsic::nvvm_ldu_global_p)\r
- Info.memVT = getPointerTy(DL);\r
- else\r
- Info.memVT = getValueType(DL, I.getType());\r
- Info.ptrVal = I.getArgOperand(0);\r
- Info.offset = 0;\r
- Info.vol = false;\r
- Info.readMem = true;\r
- Info.writeMem = false;\r
- Info.align = cast<ConstantInt>(I.getArgOperand(1))->getZExtValue();\r
-\r
- return true;\r
- }\r
- case Intrinsic::nvvm_ldg_global_i:\r
- case Intrinsic::nvvm_ldg_global_f:\r
- case Intrinsic::nvvm_ldg_global_p: {\r
- auto &DL = I.getModule()->getDataLayout();\r
-\r
- Info.opc = ISD::INTRINSIC_W_CHAIN;\r
- if (Intrinsic == Intrinsic::nvvm_ldg_global_i)\r
- Info.memVT = getValueType(DL, I.getType());\r
- else if(Intrinsic == Intrinsic::nvvm_ldg_global_p)\r
- Info.memVT = getPointerTy(DL);\r
- else\r
- Info.memVT = getValueType(DL, I.getType());\r
- Info.ptrVal = I.getArgOperand(0);\r
- Info.offset = 0;\r
- Info.vol = false;\r
- Info.readMem = true;\r
- Info.writeMem = false;\r
- Info.align = cast<ConstantInt>(I.getArgOperand(1))->getZExtValue();\r
-\r
- return true;\r
- }\r
-\r
- case Intrinsic::nvvm_tex_1d_v4f32_s32:\r
- case Intrinsic::nvvm_tex_1d_v4f32_f32:\r
- case Intrinsic::nvvm_tex_1d_level_v4f32_f32:\r
- case Intrinsic::nvvm_tex_1d_grad_v4f32_f32:\r
- case Intrinsic::nvvm_tex_1d_array_v4f32_s32:\r
- case Intrinsic::nvvm_tex_1d_array_v4f32_f32:\r
- case Intrinsic::nvvm_tex_1d_array_level_v4f32_f32:\r
- case Intrinsic::nvvm_tex_1d_array_grad_v4f32_f32:\r
- case Intrinsic::nvvm_tex_2d_v4f32_s32:\r
- case Intrinsic::nvvm_tex_2d_v4f32_f32:\r
- case Intrinsic::nvvm_tex_2d_level_v4f32_f32:\r
- case Intrinsic::nvvm_tex_2d_grad_v4f32_f32:\r
- case Intrinsic::nvvm_tex_2d_array_v4f32_s32:\r
- case Intrinsic::nvvm_tex_2d_array_v4f32_f32:\r
- case Intrinsic::nvvm_tex_2d_array_level_v4f32_f32:\r
- case Intrinsic::nvvm_tex_2d_array_grad_v4f32_f32:\r
- case Intrinsic::nvvm_tex_3d_v4f32_s32:\r
- case Intrinsic::nvvm_tex_3d_v4f32_f32:\r
- case Intrinsic::nvvm_tex_3d_level_v4f32_f32:\r
- case Intrinsic::nvvm_tex_3d_grad_v4f32_f32:\r
- case Intrinsic::nvvm_tex_cube_v4f32_f32:\r
- case Intrinsic::nvvm_tex_cube_level_v4f32_f32:\r
- case Intrinsic::nvvm_tex_cube_array_v4f32_f32:\r
- case Intrinsic::nvvm_tex_cube_array_level_v4f32_f32:\r
- case Intrinsic::nvvm_tld4_r_2d_v4f32_f32:\r
- case Intrinsic::nvvm_tld4_g_2d_v4f32_f32:\r
- case Intrinsic::nvvm_tld4_b_2d_v4f32_f32:\r
- case Intrinsic::nvvm_tld4_a_2d_v4f32_f32:\r
- case Intrinsic::nvvm_tex_unified_1d_v4f32_s32:\r
- case Intrinsic::nvvm_tex_unified_1d_v4f32_f32:\r
- case Intrinsic::nvvm_tex_unified_1d_level_v4f32_f32:\r
- case Intrinsic::nvvm_tex_unified_1d_grad_v4f32_f32:\r
- case Intrinsic::nvvm_tex_unified_1d_array_v4f32_s32:\r
- case Intrinsic::nvvm_tex_unified_1d_array_v4f32_f32:\r
- case Intrinsic::nvvm_tex_unified_1d_array_level_v4f32_f32:\r
- case Intrinsic::nvvm_tex_unified_1d_array_grad_v4f32_f32:\r
- case Intrinsic::nvvm_tex_unified_2d_v4f32_s32:\r
- case Intrinsic::nvvm_tex_unified_2d_v4f32_f32:\r
- case Intrinsic::nvvm_tex_unified_2d_level_v4f32_f32:\r
- case Intrinsic::nvvm_tex_unified_2d_grad_v4f32_f32:\r
- case Intrinsic::nvvm_tex_unified_2d_array_v4f32_s32:\r
- case Intrinsic::nvvm_tex_unified_2d_array_v4f32_f32:\r
- case Intrinsic::nvvm_tex_unified_2d_array_level_v4f32_f32:\r
- case Intrinsic::nvvm_tex_unified_2d_array_grad_v4f32_f32:\r
- case Intrinsic::nvvm_tex_unified_3d_v4f32_s32:\r
- case Intrinsic::nvvm_tex_unified_3d_v4f32_f32:\r
- case Intrinsic::nvvm_tex_unified_3d_level_v4f32_f32:\r
- case Intrinsic::nvvm_tex_unified_3d_grad_v4f32_f32:\r
- case Intrinsic::nvvm_tex_unified_cube_v4f32_f32:\r
- case Intrinsic::nvvm_tex_unified_cube_level_v4f32_f32:\r
- case Intrinsic::nvvm_tex_unified_cube_array_v4f32_f32:\r
- case Intrinsic::nvvm_tex_unified_cube_array_level_v4f32_f32:\r
- case Intrinsic::nvvm_tld4_unified_r_2d_v4f32_f32:\r
- case Intrinsic::nvvm_tld4_unified_g_2d_v4f32_f32:\r
- case Intrinsic::nvvm_tld4_unified_b_2d_v4f32_f32:\r
- case Intrinsic::nvvm_tld4_unified_a_2d_v4f32_f32:\r
- Info.opc = getOpcForTextureInstr(Intrinsic);\r
- Info.memVT = MVT::v4f32;\r
- Info.ptrVal = nullptr;\r
- Info.offset = 0;\r
- Info.vol = false;\r
- Info.readMem = true;\r
- Info.writeMem = false;\r
- Info.align = 16;\r
- return true;\r
-\r
- case Intrinsic::nvvm_tex_1d_v4s32_s32:\r
- case Intrinsic::nvvm_tex_1d_v4s32_f32:\r
- case Intrinsic::nvvm_tex_1d_level_v4s32_f32:\r
- case Intrinsic::nvvm_tex_1d_grad_v4s32_f32:\r
- case Intrinsic::nvvm_tex_1d_array_v4s32_s32:\r
- case Intrinsic::nvvm_tex_1d_array_v4s32_f32:\r
- case Intrinsic::nvvm_tex_1d_array_level_v4s32_f32:\r
- case Intrinsic::nvvm_tex_1d_array_grad_v4s32_f32:\r
- case Intrinsic::nvvm_tex_2d_v4s32_s32:\r
- case Intrinsic::nvvm_tex_2d_v4s32_f32:\r
- case Intrinsic::nvvm_tex_2d_level_v4s32_f32:\r
- case Intrinsic::nvvm_tex_2d_grad_v4s32_f32:\r
- case Intrinsic::nvvm_tex_2d_array_v4s32_s32:\r
- case Intrinsic::nvvm_tex_2d_array_v4s32_f32:\r
- case Intrinsic::nvvm_tex_2d_array_level_v4s32_f32:\r
- case Intrinsic::nvvm_tex_2d_array_grad_v4s32_f32:\r
- case Intrinsic::nvvm_tex_3d_v4s32_s32:\r
- case Intrinsic::nvvm_tex_3d_v4s32_f32:\r
- case Intrinsic::nvvm_tex_3d_level_v4s32_f32:\r
- case Intrinsic::nvvm_tex_3d_grad_v4s32_f32:\r
- case Intrinsic::nvvm_tex_cube_v4s32_f32:\r
- case Intrinsic::nvvm_tex_cube_level_v4s32_f32:\r
- case Intrinsic::nvvm_tex_cube_array_v4s32_f32:\r
- case Intrinsic::nvvm_tex_cube_array_level_v4s32_f32:\r
- case Intrinsic::nvvm_tex_cube_v4u32_f32:\r
- case Intrinsic::nvvm_tex_cube_level_v4u32_f32:\r
- case Intrinsic::nvvm_tex_cube_array_v4u32_f32:\r
- case Intrinsic::nvvm_tex_cube_array_level_v4u32_f32:\r
- case Intrinsic::nvvm_tex_1d_v4u32_s32:\r
- case Intrinsic::nvvm_tex_1d_v4u32_f32:\r
- case Intrinsic::nvvm_tex_1d_level_v4u32_f32:\r
- case Intrinsic::nvvm_tex_1d_grad_v4u32_f32:\r
- case Intrinsic::nvvm_tex_1d_array_v4u32_s32:\r
- case Intrinsic::nvvm_tex_1d_array_v4u32_f32:\r
- case Intrinsic::nvvm_tex_1d_array_level_v4u32_f32:\r
- case Intrinsic::nvvm_tex_1d_array_grad_v4u32_f32:\r
- case Intrinsic::nvvm_tex_2d_v4u32_s32:\r
- case Intrinsic::nvvm_tex_2d_v4u32_f32:\r
- case Intrinsic::nvvm_tex_2d_level_v4u32_f32:\r
- case Intrinsic::nvvm_tex_2d_grad_v4u32_f32:\r
- case Intrinsic::nvvm_tex_2d_array_v4u32_s32:\r
- case Intrinsic::nvvm_tex_2d_array_v4u32_f32:\r
- case Intrinsic::nvvm_tex_2d_array_level_v4u32_f32:\r
- case Intrinsic::nvvm_tex_2d_array_grad_v4u32_f32:\r
- case Intrinsic::nvvm_tex_3d_v4u32_s32:\r
- case Intrinsic::nvvm_tex_3d_v4u32_f32:\r
- case Intrinsic::nvvm_tex_3d_level_v4u32_f32:\r
- case Intrinsic::nvvm_tex_3d_grad_v4u32_f32:\r
- case Intrinsic::nvvm_tld4_r_2d_v4s32_f32:\r
- case Intrinsic::nvvm_tld4_g_2d_v4s32_f32:\r
- case Intrinsic::nvvm_tld4_b_2d_v4s32_f32:\r
- case Intrinsic::nvvm_tld4_a_2d_v4s32_f32:\r
- case Intrinsic::nvvm_tld4_r_2d_v4u32_f32:\r
- case Intrinsic::nvvm_tld4_g_2d_v4u32_f32:\r
- case Intrinsic::nvvm_tld4_b_2d_v4u32_f32:\r
- case Intrinsic::nvvm_tld4_a_2d_v4u32_f32:\r
- case Intrinsic::nvvm_tex_unified_1d_v4s32_s32:\r
- case Intrinsic::nvvm_tex_unified_1d_v4s32_f32:\r
- case Intrinsic::nvvm_tex_unified_1d_level_v4s32_f32:\r
- case Intrinsic::nvvm_tex_unified_1d_grad_v4s32_f32:\r
- case Intrinsic::nvvm_tex_unified_1d_array_v4s32_s32:\r
- case Intrinsic::nvvm_tex_unified_1d_array_v4s32_f32:\r
- case Intrinsic::nvvm_tex_unified_1d_array_level_v4s32_f32:\r
- case Intrinsic::nvvm_tex_unified_1d_array_grad_v4s32_f32:\r
- case Intrinsic::nvvm_tex_unified_2d_v4s32_s32:\r
- case Intrinsic::nvvm_tex_unified_2d_v4s32_f32:\r
- case Intrinsic::nvvm_tex_unified_2d_level_v4s32_f32:\r
- case Intrinsic::nvvm_tex_unified_2d_grad_v4s32_f32:\r
- case Intrinsic::nvvm_tex_unified_2d_array_v4s32_s32:\r
- case Intrinsic::nvvm_tex_unified_2d_array_v4s32_f32:\r
- case Intrinsic::nvvm_tex_unified_2d_array_level_v4s32_f32:\r
- case Intrinsic::nvvm_tex_unified_2d_array_grad_v4s32_f32:\r
- case Intrinsic::nvvm_tex_unified_3d_v4s32_s32:\r
- case Intrinsic::nvvm_tex_unified_3d_v4s32_f32:\r
- case Intrinsic::nvvm_tex_unified_3d_level_v4s32_f32:\r
- case Intrinsic::nvvm_tex_unified_3d_grad_v4s32_f32:\r
- case Intrinsic::nvvm_tex_unified_1d_v4u32_s32:\r
- case Intrinsic::nvvm_tex_unified_1d_v4u32_f32:\r
- case Intrinsic::nvvm_tex_unified_1d_level_v4u32_f32:\r
- case Intrinsic::nvvm_tex_unified_1d_grad_v4u32_f32:\r
- case Intrinsic::nvvm_tex_unified_1d_array_v4u32_s32:\r
- case Intrinsic::nvvm_tex_unified_1d_array_v4u32_f32:\r
- case Intrinsic::nvvm_tex_unified_1d_array_level_v4u32_f32:\r
- case Intrinsic::nvvm_tex_unified_1d_array_grad_v4u32_f32:\r
- case Intrinsic::nvvm_tex_unified_2d_v4u32_s32:\r
- case Intrinsic::nvvm_tex_unified_2d_v4u32_f32:\r
- case Intrinsic::nvvm_tex_unified_2d_level_v4u32_f32:\r
- case Intrinsic::nvvm_tex_unified_2d_grad_v4u32_f32:\r
- case Intrinsic::nvvm_tex_unified_2d_array_v4u32_s32:\r
- case Intrinsic::nvvm_tex_unified_2d_array_v4u32_f32:\r
- case Intrinsic::nvvm_tex_unified_2d_array_level_v4u32_f32:\r
- case Intrinsic::nvvm_tex_unified_2d_array_grad_v4u32_f32:\r
- case Intrinsic::nvvm_tex_unified_3d_v4u32_s32:\r
- case Intrinsic::nvvm_tex_unified_3d_v4u32_f32:\r
- case Intrinsic::nvvm_tex_unified_3d_level_v4u32_f32:\r
- case Intrinsic::nvvm_tex_unified_3d_grad_v4u32_f32:\r
- case Intrinsic::nvvm_tex_unified_cube_v4s32_f32:\r
- case Intrinsic::nvvm_tex_unified_cube_level_v4s32_f32:\r
- case Intrinsic::nvvm_tex_unified_cube_array_v4s32_f32:\r
- case Intrinsic::nvvm_tex_unified_cube_array_level_v4s32_f32:\r
- case Intrinsic::nvvm_tex_unified_cube_v4u32_f32:\r
- case Intrinsic::nvvm_tex_unified_cube_level_v4u32_f32:\r
- case Intrinsic::nvvm_tex_unified_cube_array_v4u32_f32:\r
- case Intrinsic::nvvm_tex_unified_cube_array_level_v4u32_f32:\r
- case Intrinsic::nvvm_tld4_unified_r_2d_v4s32_f32:\r
- case Intrinsic::nvvm_tld4_unified_g_2d_v4s32_f32:\r
- case Intrinsic::nvvm_tld4_unified_b_2d_v4s32_f32:\r
- case Intrinsic::nvvm_tld4_unified_a_2d_v4s32_f32:\r
- case Intrinsic::nvvm_tld4_unified_r_2d_v4u32_f32:\r
- case Intrinsic::nvvm_tld4_unified_g_2d_v4u32_f32:\r
- case Intrinsic::nvvm_tld4_unified_b_2d_v4u32_f32:\r
- case Intrinsic::nvvm_tld4_unified_a_2d_v4u32_f32:\r
- Info.opc = getOpcForTextureInstr(Intrinsic);\r
- Info.memVT = MVT::v4i32;\r
- Info.ptrVal = nullptr;\r
- Info.offset = 0;\r
- Info.vol = false;\r
- Info.readMem = true;\r
- Info.writeMem = false;\r
- Info.align = 16;\r
- return true;\r
-\r
- case Intrinsic::nvvm_suld_1d_i8_clamp:\r
- case Intrinsic::nvvm_suld_1d_v2i8_clamp:\r
- case Intrinsic::nvvm_suld_1d_v4i8_clamp:\r
- case Intrinsic::nvvm_suld_1d_array_i8_clamp:\r
- case Intrinsic::nvvm_suld_1d_array_v2i8_clamp:\r
- case Intrinsic::nvvm_suld_1d_array_v4i8_clamp:\r
- case Intrinsic::nvvm_suld_2d_i8_clamp:\r
- case Intrinsic::nvvm_suld_2d_v2i8_clamp:\r
- case Intrinsic::nvvm_suld_2d_v4i8_clamp:\r
- case Intrinsic::nvvm_suld_2d_array_i8_clamp:\r
- case Intrinsic::nvvm_suld_2d_array_v2i8_clamp:\r
- case Intrinsic::nvvm_suld_2d_array_v4i8_clamp:\r
- case Intrinsic::nvvm_suld_3d_i8_clamp:\r
- case Intrinsic::nvvm_suld_3d_v2i8_clamp:\r
- case Intrinsic::nvvm_suld_3d_v4i8_clamp:\r
- case Intrinsic::nvvm_suld_1d_i8_trap:\r
- case Intrinsic::nvvm_suld_1d_v2i8_trap:\r
- case Intrinsic::nvvm_suld_1d_v4i8_trap:\r
- case Intrinsic::nvvm_suld_1d_array_i8_trap:\r
- case Intrinsic::nvvm_suld_1d_array_v2i8_trap:\r
- case Intrinsic::nvvm_suld_1d_array_v4i8_trap:\r
- case Intrinsic::nvvm_suld_2d_i8_trap:\r
- case Intrinsic::nvvm_suld_2d_v2i8_trap:\r
- case Intrinsic::nvvm_suld_2d_v4i8_trap:\r
- case Intrinsic::nvvm_suld_2d_array_i8_trap:\r
- case Intrinsic::nvvm_suld_2d_array_v2i8_trap:\r
- case Intrinsic::nvvm_suld_2d_array_v4i8_trap:\r
- case Intrinsic::nvvm_suld_3d_i8_trap:\r
- case Intrinsic::nvvm_suld_3d_v2i8_trap:\r
- case Intrinsic::nvvm_suld_3d_v4i8_trap:\r
- case Intrinsic::nvvm_suld_1d_i8_zero:\r
- case Intrinsic::nvvm_suld_1d_v2i8_zero:\r
- case Intrinsic::nvvm_suld_1d_v4i8_zero:\r
- case Intrinsic::nvvm_suld_1d_array_i8_zero:\r
- case Intrinsic::nvvm_suld_1d_array_v2i8_zero:\r
- case Intrinsic::nvvm_suld_1d_array_v4i8_zero:\r
- case Intrinsic::nvvm_suld_2d_i8_zero:\r
- case Intrinsic::nvvm_suld_2d_v2i8_zero:\r
- case Intrinsic::nvvm_suld_2d_v4i8_zero:\r
- case Intrinsic::nvvm_suld_2d_array_i8_zero:\r
- case Intrinsic::nvvm_suld_2d_array_v2i8_zero:\r
- case Intrinsic::nvvm_suld_2d_array_v4i8_zero:\r
- case Intrinsic::nvvm_suld_3d_i8_zero:\r
- case Intrinsic::nvvm_suld_3d_v2i8_zero:\r
- case Intrinsic::nvvm_suld_3d_v4i8_zero:\r
- Info.opc = getOpcForSurfaceInstr(Intrinsic);\r
- Info.memVT = MVT::i8;\r
- Info.ptrVal = nullptr;\r
- Info.offset = 0;\r
- Info.vol = false;\r
- Info.readMem = true;\r
- Info.writeMem = false;\r
- Info.align = 16;\r
- return true;\r
-\r
- case Intrinsic::nvvm_suld_1d_i16_clamp:\r
- case Intrinsic::nvvm_suld_1d_v2i16_clamp:\r
- case Intrinsic::nvvm_suld_1d_v4i16_clamp:\r
- case Intrinsic::nvvm_suld_1d_array_i16_clamp:\r
- case Intrinsic::nvvm_suld_1d_array_v2i16_clamp:\r
- case Intrinsic::nvvm_suld_1d_array_v4i16_clamp:\r
- case Intrinsic::nvvm_suld_2d_i16_clamp:\r
- case Intrinsic::nvvm_suld_2d_v2i16_clamp:\r
- case Intrinsic::nvvm_suld_2d_v4i16_clamp:\r
- case Intrinsic::nvvm_suld_2d_array_i16_clamp:\r
- case Intrinsic::nvvm_suld_2d_array_v2i16_clamp:\r
- case Intrinsic::nvvm_suld_2d_array_v4i16_clamp:\r
- case Intrinsic::nvvm_suld_3d_i16_clamp:\r
- case Intrinsic::nvvm_suld_3d_v2i16_clamp:\r
- case Intrinsic::nvvm_suld_3d_v4i16_clamp:\r
- case Intrinsic::nvvm_suld_1d_i16_trap:\r
- case Intrinsic::nvvm_suld_1d_v2i16_trap:\r
- case Intrinsic::nvvm_suld_1d_v4i16_trap:\r
- case Intrinsic::nvvm_suld_1d_array_i16_trap:\r
- case Intrinsic::nvvm_suld_1d_array_v2i16_trap:\r
- case Intrinsic::nvvm_suld_1d_array_v4i16_trap:\r
- case Intrinsic::nvvm_suld_2d_i16_trap:\r
- case Intrinsic::nvvm_suld_2d_v2i16_trap:\r
- case Intrinsic::nvvm_suld_2d_v4i16_trap:\r
- case Intrinsic::nvvm_suld_2d_array_i16_trap:\r
- case Intrinsic::nvvm_suld_2d_array_v2i16_trap:\r
- case Intrinsic::nvvm_suld_2d_array_v4i16_trap:\r
- case Intrinsic::nvvm_suld_3d_i16_trap:\r
- case Intrinsic::nvvm_suld_3d_v2i16_trap:\r
- case Intrinsic::nvvm_suld_3d_v4i16_trap:\r
- case Intrinsic::nvvm_suld_1d_i16_zero:\r
- case Intrinsic::nvvm_suld_1d_v2i16_zero:\r
- case Intrinsic::nvvm_suld_1d_v4i16_zero:\r
- case Intrinsic::nvvm_suld_1d_array_i16_zero:\r
- case Intrinsic::nvvm_suld_1d_array_v2i16_zero:\r
- case Intrinsic::nvvm_suld_1d_array_v4i16_zero:\r
- case Intrinsic::nvvm_suld_2d_i16_zero:\r
- case Intrinsic::nvvm_suld_2d_v2i16_zero:\r
- case Intrinsic::nvvm_suld_2d_v4i16_zero:\r
- case Intrinsic::nvvm_suld_2d_array_i16_zero:\r
- case Intrinsic::nvvm_suld_2d_array_v2i16_zero:\r
- case Intrinsic::nvvm_suld_2d_array_v4i16_zero:\r
- case Intrinsic::nvvm_suld_3d_i16_zero:\r
- case Intrinsic::nvvm_suld_3d_v2i16_zero:\r
- case Intrinsic::nvvm_suld_3d_v4i16_zero:\r
- Info.opc = getOpcForSurfaceInstr(Intrinsic);\r
- Info.memVT = MVT::i16;\r
- Info.ptrVal = nullptr;\r
- Info.offset = 0;\r
- Info.vol = false;\r
- Info.readMem = true;\r
- Info.writeMem = false;\r
- Info.align = 16;\r
- return true;\r
-\r
- case Intrinsic::nvvm_suld_1d_i32_clamp:\r
- case Intrinsic::nvvm_suld_1d_v2i32_clamp:\r
- case Intrinsic::nvvm_suld_1d_v4i32_clamp:\r
- case Intrinsic::nvvm_suld_1d_array_i32_clamp:\r
- case Intrinsic::nvvm_suld_1d_array_v2i32_clamp:\r
- case Intrinsic::nvvm_suld_1d_array_v4i32_clamp:\r
- case Intrinsic::nvvm_suld_2d_i32_clamp:\r
- case Intrinsic::nvvm_suld_2d_v2i32_clamp:\r
- case Intrinsic::nvvm_suld_2d_v4i32_clamp:\r
- case Intrinsic::nvvm_suld_2d_array_i32_clamp:\r
- case Intrinsic::nvvm_suld_2d_array_v2i32_clamp:\r
- case Intrinsic::nvvm_suld_2d_array_v4i32_clamp:\r
- case Intrinsic::nvvm_suld_3d_i32_clamp:\r
- case Intrinsic::nvvm_suld_3d_v2i32_clamp:\r
- case Intrinsic::nvvm_suld_3d_v4i32_clamp:\r
- case Intrinsic::nvvm_suld_1d_i32_trap:\r
- case Intrinsic::nvvm_suld_1d_v2i32_trap:\r
- case Intrinsic::nvvm_suld_1d_v4i32_trap:\r
- case Intrinsic::nvvm_suld_1d_array_i32_trap:\r
- case Intrinsic::nvvm_suld_1d_array_v2i32_trap:\r
- case Intrinsic::nvvm_suld_1d_array_v4i32_trap:\r
- case Intrinsic::nvvm_suld_2d_i32_trap:\r
- case Intrinsic::nvvm_suld_2d_v2i32_trap:\r
- case Intrinsic::nvvm_suld_2d_v4i32_trap:\r
- case Intrinsic::nvvm_suld_2d_array_i32_trap:\r
- case Intrinsic::nvvm_suld_2d_array_v2i32_trap:\r
- case Intrinsic::nvvm_suld_2d_array_v4i32_trap:\r
- case Intrinsic::nvvm_suld_3d_i32_trap:\r
- case Intrinsic::nvvm_suld_3d_v2i32_trap:\r
- case Intrinsic::nvvm_suld_3d_v4i32_trap:\r
- case Intrinsic::nvvm_suld_1d_i32_zero:\r
- case Intrinsic::nvvm_suld_1d_v2i32_zero:\r
- case Intrinsic::nvvm_suld_1d_v4i32_zero:\r
- case Intrinsic::nvvm_suld_1d_array_i32_zero:\r
- case Intrinsic::nvvm_suld_1d_array_v2i32_zero:\r
- case Intrinsic::nvvm_suld_1d_array_v4i32_zero:\r
- case Intrinsic::nvvm_suld_2d_i32_zero:\r
- case Intrinsic::nvvm_suld_2d_v2i32_zero:\r
- case Intrinsic::nvvm_suld_2d_v4i32_zero:\r
- case Intrinsic::nvvm_suld_2d_array_i32_zero:\r
- case Intrinsic::nvvm_suld_2d_array_v2i32_zero:\r
- case Intrinsic::nvvm_suld_2d_array_v4i32_zero:\r
- case Intrinsic::nvvm_suld_3d_i32_zero:\r
- case Intrinsic::nvvm_suld_3d_v2i32_zero:\r
- case Intrinsic::nvvm_suld_3d_v4i32_zero:\r
- Info.opc = getOpcForSurfaceInstr(Intrinsic);\r
- Info.memVT = MVT::i32;\r
- Info.ptrVal = nullptr;\r
- Info.offset = 0;\r
- Info.vol = false;\r
- Info.readMem = true;\r
- Info.writeMem = false;\r
- Info.align = 16;\r
- return true;\r
-\r
- case Intrinsic::nvvm_suld_1d_i64_clamp:\r
- case Intrinsic::nvvm_suld_1d_v2i64_clamp:\r
- case Intrinsic::nvvm_suld_1d_array_i64_clamp:\r
- case Intrinsic::nvvm_suld_1d_array_v2i64_clamp:\r
- case Intrinsic::nvvm_suld_2d_i64_clamp:\r
- case Intrinsic::nvvm_suld_2d_v2i64_clamp:\r
- case Intrinsic::nvvm_suld_2d_array_i64_clamp:\r
- case Intrinsic::nvvm_suld_2d_array_v2i64_clamp:\r
- case Intrinsic::nvvm_suld_3d_i64_clamp:\r
- case Intrinsic::nvvm_suld_3d_v2i64_clamp:\r
- case Intrinsic::nvvm_suld_1d_i64_trap:\r
- case Intrinsic::nvvm_suld_1d_v2i64_trap:\r
- case Intrinsic::nvvm_suld_1d_array_i64_trap:\r
- case Intrinsic::nvvm_suld_1d_array_v2i64_trap:\r
- case Intrinsic::nvvm_suld_2d_i64_trap:\r
- case Intrinsic::nvvm_suld_2d_v2i64_trap:\r
- case Intrinsic::nvvm_suld_2d_array_i64_trap:\r
- case Intrinsic::nvvm_suld_2d_array_v2i64_trap:\r
- case Intrinsic::nvvm_suld_3d_i64_trap:\r
- case Intrinsic::nvvm_suld_3d_v2i64_trap:\r
- case Intrinsic::nvvm_suld_1d_i64_zero:\r
- case Intrinsic::nvvm_suld_1d_v2i64_zero:\r
- case Intrinsic::nvvm_suld_1d_array_i64_zero:\r
- case Intrinsic::nvvm_suld_1d_array_v2i64_zero:\r
- case Intrinsic::nvvm_suld_2d_i64_zero:\r
- case Intrinsic::nvvm_suld_2d_v2i64_zero:\r
- case Intrinsic::nvvm_suld_2d_array_i64_zero:\r
- case Intrinsic::nvvm_suld_2d_array_v2i64_zero:\r
- case Intrinsic::nvvm_suld_3d_i64_zero:\r
- case Intrinsic::nvvm_suld_3d_v2i64_zero:\r
- Info.opc = getOpcForSurfaceInstr(Intrinsic);\r
- Info.memVT = MVT::i64;\r
- Info.ptrVal = nullptr;\r
- Info.offset = 0;\r
- Info.vol = false;\r
- Info.readMem = true;\r
- Info.writeMem = false;\r
- Info.align = 16;\r
- return true;\r
- }\r
- return false;\r
-}\r
-\r
-/// isLegalAddressingMode - Return true if the addressing mode represented\r
-/// by AM is legal for this target, for a load/store of the specified type.\r
-/// Used to guide target specific optimizations, like loop strength reduction\r
-/// (LoopStrengthReduce.cpp) and memory optimization for address mode\r
-/// (CodeGenPrepare.cpp)\r
-bool NVPTXTargetLowering::isLegalAddressingMode(const DataLayout &DL,\r
- const AddrMode &AM, Type *Ty,\r
- unsigned AS) const {\r
- // AddrMode - This represents an addressing mode of:\r
- // BaseGV + BaseOffs + BaseReg + Scale*ScaleReg\r
- //\r
- // The legal address modes are\r
- // - [avar]\r
- // - [areg]\r
- // - [areg+immoff]\r
- // - [immAddr]\r
-\r
- if (AM.BaseGV) {\r
- return !AM.BaseOffs && !AM.HasBaseReg && !AM.Scale;\r
- }\r
-\r
- switch (AM.Scale) {\r
- case 0: // "r", "r+i" or "i" is allowed\r
- break;\r
- case 1:\r
- if (AM.HasBaseReg) // "r+r+i" or "r+r" is not allowed.\r
- return false;\r
- // Otherwise we have r+i.\r
- break;\r
- default:\r
- // No scale > 1 is allowed\r
- return false;\r
- }\r
- return true;\r
-}\r
-\r
-//===----------------------------------------------------------------------===//\r
-// NVPTX Inline Assembly Support\r
-//===----------------------------------------------------------------------===//\r
-\r
-/// getConstraintType - Given a constraint letter, return the type of\r
-/// constraint it is for this target.\r
-NVPTXTargetLowering::ConstraintType\r
-NVPTXTargetLowering::getConstraintType(StringRef Constraint) const {\r
- if (Constraint.size() == 1) {\r
- switch (Constraint[0]) {\r
- default:\r
- break;\r
- case 'b':\r
- case 'r':\r
- case 'h':\r
- case 'c':\r
- case 'l':\r
- case 'f':\r
- case 'd':\r
- case '0':\r
- case 'N':\r
- return C_RegisterClass;\r
- }\r
- }\r
- return TargetLowering::getConstraintType(Constraint);\r
-}\r
-\r
-std::pair<unsigned, const TargetRegisterClass *>\r
-NVPTXTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,\r
- StringRef Constraint,\r
- MVT VT) const {\r
- if (Constraint.size() == 1) {\r
- switch (Constraint[0]) {\r
- case 'b':\r
- return std::make_pair(0U, &NVPTX::Int1RegsRegClass);\r
- case 'c':\r
- return std::make_pair(0U, &NVPTX::Int16RegsRegClass);\r
- case 'h':\r
- return std::make_pair(0U, &NVPTX::Int16RegsRegClass);\r
- case 'r':\r
- return std::make_pair(0U, &NVPTX::Int32RegsRegClass);\r
- case 'l':\r
- case 'N':\r
- return std::make_pair(0U, &NVPTX::Int64RegsRegClass);\r
- case 'f':\r
- return std::make_pair(0U, &NVPTX::Float32RegsRegClass);\r
- case 'd':\r
- return std::make_pair(0U, &NVPTX::Float64RegsRegClass);\r
- }\r
- }\r
- return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);\r
-}\r
-\r
-//===----------------------------------------------------------------------===//\r
-// NVPTX DAG Combining\r
-//===----------------------------------------------------------------------===//\r
-\r
-bool NVPTXTargetLowering::allowFMA(MachineFunction &MF,\r
- CodeGenOpt::Level OptLevel) const {\r
- // Always honor command-line argument\r
- if (FMAContractLevelOpt.getNumOccurrences() > 0)\r
- return FMAContractLevelOpt > 0;\r
-\r
- // Do not contract if we're not optimizing the code.\r
- if (OptLevel == 0)\r
- return false;\r
-\r
- // Honor TargetOptions flags that explicitly say fusion is okay.\r
- if (MF.getTarget().Options.AllowFPOpFusion == FPOpFusion::Fast)\r
- return true;\r
-\r
- return allowUnsafeFPMath(MF);\r
-}\r
-\r
-bool NVPTXTargetLowering::allowUnsafeFPMath(MachineFunction &MF) const {\r
- // Honor TargetOptions flags that explicitly say unsafe math is okay.\r
- if (MF.getTarget().Options.UnsafeFPMath)\r
- return true;\r
-\r
- // Allow unsafe math if unsafe-fp-math attribute explicitly says so.\r
- const Function *F = MF.getFunction();\r
- if (F->hasFnAttribute("unsafe-fp-math")) {\r
- Attribute Attr = F->getFnAttribute("unsafe-fp-math");\r
- StringRef Val = Attr.getValueAsString();\r
- if (Val == "true")\r
- return true;\r
- }\r
-\r
- return false;\r
-}\r
-\r
-/// PerformADDCombineWithOperands - Try DAG combinations for an ADD with\r
-/// operands N0 and N1. This is a helper for PerformADDCombine that is\r
-/// called with the default operands, and if that fails, with commuted\r
-/// operands.\r
-static SDValue PerformADDCombineWithOperands(SDNode *N, SDValue N0, SDValue N1,\r
- TargetLowering::DAGCombinerInfo &DCI,\r
- const NVPTXSubtarget &Subtarget,\r
- CodeGenOpt::Level OptLevel) {\r
- SelectionDAG &DAG = DCI.DAG;\r
- // Skip non-integer, non-scalar case\r
- EVT VT=N0.getValueType();\r
- if (VT.isVector())\r
- return SDValue();\r
-\r
- // fold (add (mul a, b), c) -> (mad a, b, c)\r
- //\r
- if (N0.getOpcode() == ISD::MUL) {\r
- assert (VT.isInteger());\r
- // For integer:\r
- // Since integer multiply-add costs the same as integer multiply\r
- // but is more costly than integer add, do the fusion only when\r
- // the mul is only used in the add.\r
- if (OptLevel==CodeGenOpt::None || VT != MVT::i32 ||\r
- !N0.getNode()->hasOneUse())\r
- return SDValue();\r
-\r
- // Do the folding\r
- return DAG.getNode(NVPTXISD::IMAD, SDLoc(N), VT,\r
- N0.getOperand(0), N0.getOperand(1), N1);\r
- }\r
- else if (N0.getOpcode() == ISD::FMUL) {\r
- if (VT == MVT::f32 || VT == MVT::f64) {\r
- const auto *TLI = static_cast<const NVPTXTargetLowering *>(\r
- &DAG.getTargetLoweringInfo());\r
- if (!TLI->allowFMA(DAG.getMachineFunction(), OptLevel))\r
- return SDValue();\r
-\r
- // For floating point:\r
- // Do the fusion only when the mul has less than 5 uses and all\r
- // are add.\r
- // The heuristic is that if a use is not an add, then that use\r
- // cannot be fused into fma, therefore mul is still needed anyway.\r
- // If there are more than 4 uses, even if they are all add, fusing\r
- // them will increase register pressue.\r
- //\r
- int numUses = 0;\r
- int nonAddCount = 0;\r
- for (SDNode::use_iterator UI = N0.getNode()->use_begin(),\r
- UE = N0.getNode()->use_end();\r
- UI != UE; ++UI) {\r
- numUses++;\r
- SDNode *User = *UI;\r
- if (User->getOpcode() != ISD::FADD)\r
- ++nonAddCount;\r
- }\r
- if (numUses >= 5)\r
- return SDValue();\r
- if (nonAddCount) {\r
- int orderNo = N->getIROrder();\r
- int orderNo2 = N0.getNode()->getIROrder();\r
- // simple heuristics here for considering potential register\r
- // pressure, the logics here is that the differnce are used\r
- // to measure the distance between def and use, the longer distance\r
- // more likely cause register pressure.\r
- if (orderNo - orderNo2 < 500)\r
- return SDValue();\r
-\r
- // Now, check if at least one of the FMUL's operands is live beyond the node N,\r
- // which guarantees that the FMA will not increase register pressure at node N.\r
- bool opIsLive = false;\r
- const SDNode *left = N0.getOperand(0).getNode();\r
- const SDNode *right = N0.getOperand(1).getNode();\r
-\r
- if (isa<ConstantSDNode>(left) || isa<ConstantSDNode>(right))\r
- opIsLive = true;\r
-\r
- if (!opIsLive)\r
- for (SDNode::use_iterator UI = left->use_begin(), UE = left->use_end(); UI != UE; ++UI) {\r
- SDNode *User = *UI;\r
- int orderNo3 = User->getIROrder();\r
- if (orderNo3 > orderNo) {\r
- opIsLive = true;\r
- break;\r
- }\r
- }\r
-\r
- if (!opIsLive)\r
- for (SDNode::use_iterator UI = right->use_begin(), UE = right->use_end(); UI != UE; ++UI) {\r
- SDNode *User = *UI;\r
- int orderNo3 = User->getIROrder();\r
- if (orderNo3 > orderNo) {\r
- opIsLive = true;\r
- break;\r
- }\r
- }\r
-\r
- if (!opIsLive)\r
- return SDValue();\r
- }\r
-\r
- return DAG.getNode(ISD::FMA, SDLoc(N), VT,\r
- N0.getOperand(0), N0.getOperand(1), N1);\r
- }\r
- }\r
-\r
- return SDValue();\r
-}\r
-\r
-/// PerformADDCombine - Target-specific dag combine xforms for ISD::ADD.\r
-///\r
-static SDValue PerformADDCombine(SDNode *N,\r
- TargetLowering::DAGCombinerInfo &DCI,\r
- const NVPTXSubtarget &Subtarget,\r
- CodeGenOpt::Level OptLevel) {\r
- SDValue N0 = N->getOperand(0);\r
- SDValue N1 = N->getOperand(1);\r
-\r
- // First try with the default operand order.\r
- if (SDValue Result =\r
- PerformADDCombineWithOperands(N, N0, N1, DCI, Subtarget, OptLevel))\r
- return Result;\r
-\r
- // If that didn't work, try again with the operands commuted.\r
- return PerformADDCombineWithOperands(N, N1, N0, DCI, Subtarget, OptLevel);\r
-}\r
-\r
-static SDValue PerformANDCombine(SDNode *N,\r
- TargetLowering::DAGCombinerInfo &DCI) {\r
- // The type legalizer turns a vector load of i8 values into a zextload to i16\r
- // registers, optionally ANY_EXTENDs it (if target type is integer),\r
- // and ANDs off the high 8 bits. Since we turn this load into a\r
- // target-specific DAG node, the DAG combiner fails to eliminate these AND\r
- // nodes. Do that here.\r
- SDValue Val = N->getOperand(0);\r
- SDValue Mask = N->getOperand(1);\r
-\r
- if (isa<ConstantSDNode>(Val)) {\r
- std::swap(Val, Mask);\r
- }\r
-\r
- SDValue AExt;\r
- // Generally, we will see zextload -> IMOV16rr -> ANY_EXTEND -> and\r
- if (Val.getOpcode() == ISD::ANY_EXTEND) {\r
- AExt = Val;\r
- Val = Val->getOperand(0);\r
- }\r
-\r
- if (Val->isMachineOpcode() && Val->getMachineOpcode() == NVPTX::IMOV16rr) {\r
- Val = Val->getOperand(0);\r
- }\r
-\r
- if (Val->getOpcode() == NVPTXISD::LoadV2 ||\r
- Val->getOpcode() == NVPTXISD::LoadV4) {\r
- ConstantSDNode *MaskCnst = dyn_cast<ConstantSDNode>(Mask);\r
- if (!MaskCnst) {\r
- // Not an AND with a constant\r
- return SDValue();\r
- }\r
-\r
- uint64_t MaskVal = MaskCnst->getZExtValue();\r
- if (MaskVal != 0xff) {\r
- // Not an AND that chops off top 8 bits\r
- return SDValue();\r
- }\r
-\r
- MemSDNode *Mem = dyn_cast<MemSDNode>(Val);\r
- if (!Mem) {\r
- // Not a MemSDNode?!?\r
- return SDValue();\r
- }\r
-\r
- EVT MemVT = Mem->getMemoryVT();\r
- if (MemVT != MVT::v2i8 && MemVT != MVT::v4i8) {\r
- // We only handle the i8 case\r
- return SDValue();\r
- }\r
-\r
- unsigned ExtType =\r
- cast<ConstantSDNode>(Val->getOperand(Val->getNumOperands()-1))->\r
- getZExtValue();\r
- if (ExtType == ISD::SEXTLOAD) {\r
- // If for some reason the load is a sextload, the and is needed to zero\r
- // out the high 8 bits\r
- return SDValue();\r
- }\r
-\r
- bool AddTo = false;\r
- if (AExt.getNode() != nullptr) {\r
- // Re-insert the ext as a zext.\r
- Val = DCI.DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N),\r
- AExt.getValueType(), Val);\r
- AddTo = true;\r
- }\r
-\r
- // If we get here, the AND is unnecessary. Just replace it with the load\r
- DCI.CombineTo(N, Val, AddTo);\r
- }\r
-\r
- return SDValue();\r
-}\r
-\r
-static SDValue PerformREMCombine(SDNode *N,\r
- TargetLowering::DAGCombinerInfo &DCI,\r
- CodeGenOpt::Level OptLevel) {\r
- assert(N->getOpcode() == ISD::SREM || N->getOpcode() == ISD::UREM);\r
-\r
- // Don't do anything at less than -O2.\r
- if (OptLevel < CodeGenOpt::Default)\r
- return SDValue();\r
-\r
- SelectionDAG &DAG = DCI.DAG;\r
- SDLoc DL(N);\r
- EVT VT = N->getValueType(0);\r
- bool IsSigned = N->getOpcode() == ISD::SREM;\r
- unsigned DivOpc = IsSigned ? ISD::SDIV : ISD::UDIV;\r
-\r
- const SDValue &Num = N->getOperand(0);\r
- const SDValue &Den = N->getOperand(1);\r
-\r
- for (const SDNode *U : Num->uses()) {\r
- if (U->getOpcode() == DivOpc && U->getOperand(0) == Num &&\r
- U->getOperand(1) == Den) {\r
- // Num % Den -> Num - (Num / Den) * Den\r
- return DAG.getNode(ISD::SUB, DL, VT, Num,\r
- DAG.getNode(ISD::MUL, DL, VT,\r
- DAG.getNode(DivOpc, DL, VT, Num, Den),\r
- Den));\r
- }\r
- }\r
- return SDValue();\r
-}\r
-\r
-enum OperandSignedness {\r
- Signed = 0,\r
- Unsigned,\r
- Unknown\r
-};\r
-\r
-/// IsMulWideOperandDemotable - Checks if the provided DAG node is an operand\r
-/// that can be demoted to \p OptSize bits without loss of information. The\r
-/// signedness of the operand, if determinable, is placed in \p S.\r
-static bool IsMulWideOperandDemotable(SDValue Op,\r
- unsigned OptSize,\r
- OperandSignedness &S) {\r
- S = Unknown;\r
-\r
- if (Op.getOpcode() == ISD::SIGN_EXTEND ||\r
- Op.getOpcode() == ISD::SIGN_EXTEND_INREG) {\r
- EVT OrigVT = Op.getOperand(0).getValueType();\r
- if (OrigVT.getSizeInBits() <= OptSize) {\r
- S = Signed;\r
- return true;\r
- }\r
- } else if (Op.getOpcode() == ISD::ZERO_EXTEND) {\r
- EVT OrigVT = Op.getOperand(0).getValueType();\r
- if (OrigVT.getSizeInBits() <= OptSize) {\r
- S = Unsigned;\r
- return true;\r
- }\r
- }\r
-\r
- return false;\r
-}\r
-\r
-/// AreMulWideOperandsDemotable - Checks if the given LHS and RHS operands can\r
-/// be demoted to \p OptSize bits without loss of information. If the operands\r
-/// contain a constant, it should appear as the RHS operand. The signedness of\r
-/// the operands is placed in \p IsSigned.\r
-static bool AreMulWideOperandsDemotable(SDValue LHS, SDValue RHS,\r
- unsigned OptSize,\r
- bool &IsSigned) {\r
- OperandSignedness LHSSign;\r
-\r
- // The LHS operand must be a demotable op\r
- if (!IsMulWideOperandDemotable(LHS, OptSize, LHSSign))\r
- return false;\r
-\r
- // We should have been able to determine the signedness from the LHS\r
- if (LHSSign == Unknown)\r
- return false;\r
-\r
- IsSigned = (LHSSign == Signed);\r
-\r
- // The RHS can be a demotable op or a constant\r
- if (ConstantSDNode *CI = dyn_cast<ConstantSDNode>(RHS)) {\r
- const APInt &Val = CI->getAPIntValue();\r
- if (LHSSign == Unsigned) {\r
- return Val.isIntN(OptSize);\r
- } else {\r
- return Val.isSignedIntN(OptSize);\r
- }\r
- } else {\r
- OperandSignedness RHSSign;\r
- if (!IsMulWideOperandDemotable(RHS, OptSize, RHSSign))\r
- return false;\r
-\r
- return LHSSign == RHSSign;\r
- }\r
-}\r
-\r
-/// TryMULWIDECombine - Attempt to replace a multiply of M bits with a multiply\r
-/// of M/2 bits that produces an M-bit result (i.e. mul.wide). This transform\r
-/// works on both multiply DAG nodes and SHL DAG nodes with a constant shift\r
-/// amount.\r
-static SDValue TryMULWIDECombine(SDNode *N,\r
- TargetLowering::DAGCombinerInfo &DCI) {\r
- EVT MulType = N->getValueType(0);\r
- if (MulType != MVT::i32 && MulType != MVT::i64) {\r
- return SDValue();\r
- }\r
-\r
- SDLoc DL(N);\r
- unsigned OptSize = MulType.getSizeInBits() >> 1;\r
- SDValue LHS = N->getOperand(0);\r
- SDValue RHS = N->getOperand(1);\r
-\r
- // Canonicalize the multiply so the constant (if any) is on the right\r
- if (N->getOpcode() == ISD::MUL) {\r
- if (isa<ConstantSDNode>(LHS)) {\r
- std::swap(LHS, RHS);\r
- }\r
- }\r
-\r
- // If we have a SHL, determine the actual multiply amount\r
- if (N->getOpcode() == ISD::SHL) {\r
- ConstantSDNode *ShlRHS = dyn_cast<ConstantSDNode>(RHS);\r
- if (!ShlRHS) {\r
- return SDValue();\r
- }\r
-\r
- APInt ShiftAmt = ShlRHS->getAPIntValue();\r
- unsigned BitWidth = MulType.getSizeInBits();\r
- if (ShiftAmt.sge(0) && ShiftAmt.slt(BitWidth)) {\r
- APInt MulVal = APInt(BitWidth, 1) << ShiftAmt;\r
- RHS = DCI.DAG.getConstant(MulVal, DL, MulType);\r
- } else {\r
- return SDValue();\r
- }\r
- }\r
-\r
- bool Signed;\r
- // Verify that our operands are demotable\r
- if (!AreMulWideOperandsDemotable(LHS, RHS, OptSize, Signed)) {\r
- return SDValue();\r
- }\r
-\r
- EVT DemotedVT;\r
- if (MulType == MVT::i32) {\r
- DemotedVT = MVT::i16;\r
- } else {\r
- DemotedVT = MVT::i32;\r
- }\r
-\r
- // Truncate the operands to the correct size. Note that these are just for\r
- // type consistency and will (likely) be eliminated in later phases.\r
- SDValue TruncLHS =\r
- DCI.DAG.getNode(ISD::TRUNCATE, DL, DemotedVT, LHS);\r
- SDValue TruncRHS =\r
- DCI.DAG.getNode(ISD::TRUNCATE, DL, DemotedVT, RHS);\r
-\r
- unsigned Opc;\r
- if (Signed) {\r
- Opc = NVPTXISD::MUL_WIDE_SIGNED;\r
- } else {\r
- Opc = NVPTXISD::MUL_WIDE_UNSIGNED;\r
- }\r
-\r
- return DCI.DAG.getNode(Opc, DL, MulType, TruncLHS, TruncRHS);\r
-}\r
-\r
-/// PerformMULCombine - Runs PTX-specific DAG combine patterns on MUL nodes.\r
-static SDValue PerformMULCombine(SDNode *N,\r
- TargetLowering::DAGCombinerInfo &DCI,\r
- CodeGenOpt::Level OptLevel) {\r
- if (OptLevel > 0) {\r
- // Try mul.wide combining at OptLevel > 0\r
- if (SDValue Ret = TryMULWIDECombine(N, DCI))\r
- return Ret;\r
- }\r
-\r
- return SDValue();\r
-}\r
-\r
-/// PerformSHLCombine - Runs PTX-specific DAG combine patterns on SHL nodes.\r
-static SDValue PerformSHLCombine(SDNode *N,\r
- TargetLowering::DAGCombinerInfo &DCI,\r
- CodeGenOpt::Level OptLevel) {\r
- if (OptLevel > 0) {\r
- // Try mul.wide combining at OptLevel > 0\r
- if (SDValue Ret = TryMULWIDECombine(N, DCI))\r
- return Ret;\r
- }\r
-\r
- return SDValue();\r
-}\r
-\r
-static SDValue PerformSETCCCombine(SDNode *N,\r
- TargetLowering::DAGCombinerInfo &DCI) {\r
- EVT CCType = N->getValueType(0);\r
- SDValue A = N->getOperand(0);\r
- SDValue B = N->getOperand(1);\r
-\r
- if (CCType != MVT::v2i1 || A.getValueType() != MVT::v2f16)\r
- return SDValue();\r
-\r
- SDLoc DL(N);\r
- // setp.f16x2 returns two scalar predicates, which we need to\r
- // convert back to v2i1. The returned result will be scalarized by\r
- // the legalizer, but the comparison will remain a single vector\r
- // instruction.\r
- SDValue CCNode = DCI.DAG.getNode(NVPTXISD::SETP_F16X2, DL,\r
- DCI.DAG.getVTList(MVT::i1, MVT::i1),\r
- {A, B, N->getOperand(2)});\r
- return DCI.DAG.getNode(ISD::BUILD_VECTOR, DL, CCType, CCNode.getValue(0),\r
- CCNode.getValue(1));\r
-}\r
-\r
-SDValue NVPTXTargetLowering::PerformDAGCombine(SDNode *N,\r
- DAGCombinerInfo &DCI) const {\r
- CodeGenOpt::Level OptLevel = getTargetMachine().getOptLevel();\r
- switch (N->getOpcode()) {\r
- default: break;\r
- case ISD::ADD:\r
- case ISD::FADD:\r
- return PerformADDCombine(N, DCI, STI, OptLevel);\r
- case ISD::MUL:\r
- return PerformMULCombine(N, DCI, OptLevel);\r
- case ISD::SHL:\r
- return PerformSHLCombine(N, DCI, OptLevel);\r
- case ISD::AND:\r
- return PerformANDCombine(N, DCI);\r
- case ISD::UREM:\r
- case ISD::SREM:\r
- return PerformREMCombine(N, DCI, OptLevel);\r
- case ISD::SETCC:\r
- return PerformSETCCCombine(N, DCI);\r
- }\r
- return SDValue();\r
-}\r
-\r
-/// ReplaceVectorLoad - Convert vector loads into multi-output scalar loads.\r
-static void ReplaceLoadVector(SDNode *N, SelectionDAG &DAG,\r
- SmallVectorImpl<SDValue> &Results) {\r
- EVT ResVT = N->getValueType(0);\r
- SDLoc DL(N);\r
-\r
- assert(ResVT.isVector() && "Vector load must have vector type");\r
-\r
- // We only handle "native" vector sizes for now, e.g. <4 x double> is not\r
- // legal. We can (and should) split that into 2 loads of <2 x double> here\r
- // but I'm leaving that as a TODO for now.\r
- assert(ResVT.isSimple() && "Can only handle simple types");\r
- switch (ResVT.getSimpleVT().SimpleTy) {\r
- default:\r
- return;\r
- case MVT::v2i8:\r
- case MVT::v2i16:\r
- case MVT::v2i32:\r
- case MVT::v2i64:\r
- case MVT::v2f16:\r
- case MVT::v2f32:\r
- case MVT::v2f64:\r
- case MVT::v4i8:\r
- case MVT::v4i16:\r
- case MVT::v4i32:\r
- case MVT::v4f16:\r
- case MVT::v4f32:\r
- case MVT::v8f16: // <4 x f16x2>\r
- // This is a "native" vector type\r
- break;\r
- }\r
-\r
- LoadSDNode *LD = cast<LoadSDNode>(N);\r
-\r
- unsigned Align = LD->getAlignment();\r
- auto &TD = DAG.getDataLayout();\r
- unsigned PrefAlign =\r
- TD.getPrefTypeAlignment(ResVT.getTypeForEVT(*DAG.getContext()));\r
- if (Align < PrefAlign) {\r
- // This load is not sufficiently aligned, so bail out and let this vector\r
- // load be scalarized. Note that we may still be able to emit smaller\r
- // vector loads. For example, if we are loading a <4 x float> with an\r
- // alignment of 8, this check will fail but the legalizer will try again\r
- // with 2 x <2 x float>, which will succeed with an alignment of 8.\r
- return;\r
- }\r
-\r
- EVT EltVT = ResVT.getVectorElementType();\r
- unsigned NumElts = ResVT.getVectorNumElements();\r
-\r
- // Since LoadV2 is a target node, we cannot rely on DAG type legalization.\r
- // Therefore, we must ensure the type is legal. For i1 and i8, we set the\r
- // loaded type to i16 and propagate the "real" type as the memory type.\r
- bool NeedTrunc = false;\r
- if (EltVT.getSizeInBits() < 16) {\r
- EltVT = MVT::i16;\r
- NeedTrunc = true;\r
- }\r
-\r
- unsigned Opcode = 0;\r
- SDVTList LdResVTs;\r
- bool LoadF16x2 = false;\r
-\r
- switch (NumElts) {\r
- default:\r
- return;\r
- case 2:\r
- Opcode = NVPTXISD::LoadV2;\r
- LdResVTs = DAG.getVTList(EltVT, EltVT, MVT::Other);\r
- break;\r
- case 4: {\r
- Opcode = NVPTXISD::LoadV4;\r
- EVT ListVTs[] = { EltVT, EltVT, EltVT, EltVT, MVT::Other };\r
- LdResVTs = DAG.getVTList(ListVTs);\r
- break;\r
- }\r
- case 8: {\r
- // v8f16 is a special case. PTX doesn't have ld.v8.f16\r
- // instruction. Instead, we split the vector into v2f16 chunks and\r
- // load them with ld.v4.b32.\r
- assert(EltVT == MVT::f16 && "Unsupported v8 vector type.");\r
- LoadF16x2 = true;\r
- Opcode = NVPTXISD::LoadV4;\r
- EVT ListVTs[] = {MVT::v2f16, MVT::v2f16, MVT::v2f16, MVT::v2f16,\r
- MVT::Other};\r
- LdResVTs = DAG.getVTList(ListVTs);\r
- break;\r
- }\r
- }\r
-\r
- // Copy regular operands\r
- SmallVector<SDValue, 8> OtherOps(N->op_begin(), N->op_end());\r
-\r
- // The select routine does not have access to the LoadSDNode instance, so\r
- // pass along the extension information\r
- OtherOps.push_back(DAG.getIntPtrConstant(LD->getExtensionType(), DL));\r
-\r
- SDValue NewLD = DAG.getMemIntrinsicNode(Opcode, DL, LdResVTs, OtherOps,\r
- LD->getMemoryVT(),\r
- LD->getMemOperand());\r
-\r
- SmallVector<SDValue, 8> ScalarRes;\r
- if (LoadF16x2) {\r
- // Split v2f16 subvectors back into individual elements.\r
- NumElts /= 2;\r
- for (unsigned i = 0; i < NumElts; ++i) {\r
- SDValue SubVector = NewLD.getValue(i);\r
- SDValue E0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, SubVector,\r
- DAG.getIntPtrConstant(0, DL));\r
- SDValue E1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, SubVector,\r
- DAG.getIntPtrConstant(1, DL));\r
- ScalarRes.push_back(E0);\r
- ScalarRes.push_back(E1);\r
- }\r
- } else {\r
- for (unsigned i = 0; i < NumElts; ++i) {\r
- SDValue Res = NewLD.getValue(i);\r
- if (NeedTrunc)\r
- Res = DAG.getNode(ISD::TRUNCATE, DL, ResVT.getVectorElementType(), Res);\r
- ScalarRes.push_back(Res);\r
- }\r
- }\r
-\r
- SDValue LoadChain = NewLD.getValue(NumElts);\r
-\r
- SDValue BuildVec = DAG.getBuildVector(ResVT, DL, ScalarRes);\r
-\r
- Results.push_back(BuildVec);\r
- Results.push_back(LoadChain);\r
-}\r
-\r
-static void ReplaceINTRINSIC_W_CHAIN(SDNode *N, SelectionDAG &DAG,\r
- SmallVectorImpl<SDValue> &Results) {\r
- SDValue Chain = N->getOperand(0);\r
- SDValue Intrin = N->getOperand(1);\r
- SDLoc DL(N);\r
-\r
- // Get the intrinsic ID\r
- unsigned IntrinNo = cast<ConstantSDNode>(Intrin.getNode())->getZExtValue();\r
- switch (IntrinNo) {\r
- default:\r
- return;\r
- case Intrinsic::nvvm_ldg_global_i:\r
- case Intrinsic::nvvm_ldg_global_f:\r
- case Intrinsic::nvvm_ldg_global_p:\r
- case Intrinsic::nvvm_ldu_global_i:\r
- case Intrinsic::nvvm_ldu_global_f:\r
- case Intrinsic::nvvm_ldu_global_p: {\r
- EVT ResVT = N->getValueType(0);\r
-\r
- if (ResVT.isVector()) {\r
- // Vector LDG/LDU\r
-\r
- unsigned NumElts = ResVT.getVectorNumElements();\r
- EVT EltVT = ResVT.getVectorElementType();\r
-\r
- // Since LDU/LDG are target nodes, we cannot rely on DAG type\r
- // legalization.\r
- // Therefore, we must ensure the type is legal. For i1 and i8, we set the\r
- // loaded type to i16 and propagate the "real" type as the memory type.\r
- bool NeedTrunc = false;\r
- if (EltVT.getSizeInBits() < 16) {\r
- EltVT = MVT::i16;\r
- NeedTrunc = true;\r
- }\r
-\r
- unsigned Opcode = 0;\r
- SDVTList LdResVTs;\r
-\r
- switch (NumElts) {\r
- default:\r
- return;\r
- case 2:\r
- switch (IntrinNo) {\r
- default:\r
- return;\r
- case Intrinsic::nvvm_ldg_global_i:\r
- case Intrinsic::nvvm_ldg_global_f:\r
- case Intrinsic::nvvm_ldg_global_p:\r
- Opcode = NVPTXISD::LDGV2;\r
- break;\r
- case Intrinsic::nvvm_ldu_global_i:\r
- case Intrinsic::nvvm_ldu_global_f:\r
- case Intrinsic::nvvm_ldu_global_p:\r
- Opcode = NVPTXISD::LDUV2;\r
- break;\r
- }\r
- LdResVTs = DAG.getVTList(EltVT, EltVT, MVT::Other);\r
- break;\r
- case 4: {\r
- switch (IntrinNo) {\r
- default:\r
- return;\r
- case Intrinsic::nvvm_ldg_global_i:\r
- case Intrinsic::nvvm_ldg_global_f:\r
- case Intrinsic::nvvm_ldg_global_p:\r
- Opcode = NVPTXISD::LDGV4;\r
- break;\r
- case Intrinsic::nvvm_ldu_global_i:\r
- case Intrinsic::nvvm_ldu_global_f:\r
- case Intrinsic::nvvm_ldu_global_p:\r
- Opcode = NVPTXISD::LDUV4;\r
- break;\r
- }\r
- EVT ListVTs[] = { EltVT, EltVT, EltVT, EltVT, MVT::Other };\r
- LdResVTs = DAG.getVTList(ListVTs);\r
- break;\r
- }\r
- }\r
-\r
- SmallVector<SDValue, 8> OtherOps;\r
-\r
- // Copy regular operands\r
-\r
- OtherOps.push_back(Chain); // Chain\r
- // Skip operand 1 (intrinsic ID)\r
- // Others\r
- OtherOps.append(N->op_begin() + 2, N->op_end());\r
-\r
- MemIntrinsicSDNode *MemSD = cast<MemIntrinsicSDNode>(N);\r
-\r
- SDValue NewLD = DAG.getMemIntrinsicNode(Opcode, DL, LdResVTs, OtherOps,\r
- MemSD->getMemoryVT(),\r
- MemSD->getMemOperand());\r
-\r
- SmallVector<SDValue, 4> ScalarRes;\r
-\r
- for (unsigned i = 0; i < NumElts; ++i) {\r
- SDValue Res = NewLD.getValue(i);\r
- if (NeedTrunc)\r
- Res =\r
- DAG.getNode(ISD::TRUNCATE, DL, ResVT.getVectorElementType(), Res);\r
- ScalarRes.push_back(Res);\r
- }\r
-\r
- SDValue LoadChain = NewLD.getValue(NumElts);\r
-\r
- SDValue BuildVec =\r
- DAG.getBuildVector(ResVT, DL, ScalarRes);\r
-\r
- Results.push_back(BuildVec);\r
- Results.push_back(LoadChain);\r
- } else {\r
- // i8 LDG/LDU\r
- assert(ResVT.isSimple() && ResVT.getSimpleVT().SimpleTy == MVT::i8 &&\r
- "Custom handling of non-i8 ldu/ldg?");\r
-\r
- // Just copy all operands as-is\r
- SmallVector<SDValue, 4> Ops(N->op_begin(), N->op_end());\r
-\r
- // Force output to i16\r
- SDVTList LdResVTs = DAG.getVTList(MVT::i16, MVT::Other);\r
-\r
- MemIntrinsicSDNode *MemSD = cast<MemIntrinsicSDNode>(N);\r
-\r
- // We make sure the memory type is i8, which will be used during isel\r
- // to select the proper instruction.\r
- SDValue NewLD =\r
- DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, LdResVTs, Ops,\r
- MVT::i8, MemSD->getMemOperand());\r
-\r
- Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i8,\r
- NewLD.getValue(0)));\r
- Results.push_back(NewLD.getValue(1));\r
- }\r
- }\r
- }\r
-}\r
-\r
-void NVPTXTargetLowering::ReplaceNodeResults(\r
- SDNode *N, SmallVectorImpl<SDValue> &Results, SelectionDAG &DAG) const {\r
- switch (N->getOpcode()) {\r
- default:\r
- report_fatal_error("Unhandled custom legalization");\r
- case ISD::LOAD:\r
- ReplaceLoadVector(N, DAG, Results);\r
- return;\r
- case ISD::INTRINSIC_W_CHAIN:\r
- ReplaceINTRINSIC_W_CHAIN(N, DAG, Results);\r
- return;\r
- }\r
-}\r
-\r
-// Pin NVPTXSection's and NVPTXTargetObjectFile's vtables to this file.\r
-void NVPTXSection::anchor() {}\r
-\r
-NVPTXTargetObjectFile::~NVPTXTargetObjectFile() {\r
- delete static_cast<NVPTXSection *>(TextSection);\r
- delete static_cast<NVPTXSection *>(DataSection);\r
- delete static_cast<NVPTXSection *>(BSSSection);\r
- delete static_cast<NVPTXSection *>(ReadOnlySection);\r
-\r
- delete static_cast<NVPTXSection *>(StaticCtorSection);\r
- delete static_cast<NVPTXSection *>(StaticDtorSection);\r
- delete static_cast<NVPTXSection *>(LSDASection);\r
- delete static_cast<NVPTXSection *>(EHFrameSection);\r
- delete static_cast<NVPTXSection *>(DwarfAbbrevSection);\r
- delete static_cast<NVPTXSection *>(DwarfInfoSection);\r
- delete static_cast<NVPTXSection *>(DwarfLineSection);\r
- delete static_cast<NVPTXSection *>(DwarfFrameSection);\r
- delete static_cast<NVPTXSection *>(DwarfPubTypesSection);\r
- delete static_cast<const NVPTXSection *>(DwarfDebugInlineSection);\r
- delete static_cast<NVPTXSection *>(DwarfStrSection);\r
- delete static_cast<NVPTXSection *>(DwarfLocSection);\r
- delete static_cast<NVPTXSection *>(DwarfARangesSection);\r
- delete static_cast<NVPTXSection *>(DwarfRangesSection);\r
- delete static_cast<NVPTXSection *>(DwarfMacinfoSection);\r
-}\r
-\r
-MCSection *NVPTXTargetObjectFile::SelectSectionForGlobal(\r
- const GlobalObject *GO, SectionKind Kind, const TargetMachine &TM) const {\r
- return getDataSection();\r
-}\r
+//===-- NVPTXISelLowering.cpp - NVPTX DAG Lowering Implementation ---------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the interfaces that NVPTX uses to lower LLVM code into a
+// selection DAG.
+//
+//===----------------------------------------------------------------------===//
+
+#include "NVPTXISelLowering.h"
+#include "MCTargetDesc/NVPTXBaseInfo.h"
+#include "NVPTX.h"
+#include "NVPTXSection.h"
+#include "NVPTXSubtarget.h"
+#include "NVPTXTargetMachine.h"
+#include "NVPTXTargetObjectFile.h"
+#include "NVPTXUtilities.h"
+#include "llvm/ADT/APInt.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/CodeGen/Analysis.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineMemOperand.h"
+#include "llvm/CodeGen/MachineValueType.h"
+#include "llvm/CodeGen/SelectionDAG.h"
+#include "llvm/CodeGen/SelectionDAGNodes.h"
+#include "llvm/CodeGen/ValueTypes.h"
+#include "llvm/IR/Argument.h"
+#include "llvm/IR/Attributes.h"
+#include "llvm/IR/CallSite.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/GlobalValue.h"
+#include "llvm/IR/Instruction.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/Module.h"
+#include "llvm/IR/Type.h"
+#include "llvm/IR/Value.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/CodeGen.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/MathExtras.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Target/TargetCallingConv.h"
+#include "llvm/Target/TargetLowering.h"
+#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetOptions.h"
+#include <algorithm>
+#include <cassert>
+#include <cstdint>
+#include <iterator>
+#include <sstream>
+#include <string>
+#include <utility>
+#include <vector>
+
+#define DEBUG_TYPE "nvptx-lower"
+
+using namespace llvm;
+
+static unsigned int uniqueCallSite = 0;
+
+static cl::opt<bool> sched4reg(
+ "nvptx-sched4reg",
+ cl::desc("NVPTX Specific: schedule for register pressue"), cl::init(false));
+
+static cl::opt<unsigned>
+FMAContractLevelOpt("nvptx-fma-level", cl::ZeroOrMore, cl::Hidden,
+ cl::desc("NVPTX Specific: FMA contraction (0: don't do it"
+ " 1: do it 2: do it aggressively"),
+ cl::init(2));
+
+static cl::opt<int> UsePrecDivF32(
+ "nvptx-prec-divf32", cl::ZeroOrMore, cl::Hidden,
+ cl::desc("NVPTX Specifies: 0 use div.approx, 1 use div.full, 2 use"
+ " IEEE Compliant F32 div.rnd if available."),
+ cl::init(2));
+
+static cl::opt<bool> UsePrecSqrtF32(
+ "nvptx-prec-sqrtf32", cl::Hidden,
+ cl::desc("NVPTX Specific: 0 use sqrt.approx, 1 use sqrt.rn."),
+ cl::init(true));
+
+static cl::opt<bool> FtzEnabled(
+ "nvptx-f32ftz", cl::ZeroOrMore, cl::Hidden,
+ cl::desc("NVPTX Specific: Flush f32 subnormals to sign-preserving zero."),
+ cl::init(false));
+
+int NVPTXTargetLowering::getDivF32Level() const {
+ if (UsePrecDivF32.getNumOccurrences() > 0) {
+ // If nvptx-prec-div32=N is used on the command-line, always honor it
+ return UsePrecDivF32;
+ } else {
+ // Otherwise, use div.approx if fast math is enabled
+ if (getTargetMachine().Options.UnsafeFPMath)
+ return 0;
+ else
+ return 2;
+ }
+}
+
+bool NVPTXTargetLowering::usePrecSqrtF32() const {
+ if (UsePrecSqrtF32.getNumOccurrences() > 0) {
+ // If nvptx-prec-sqrtf32 is used on the command-line, always honor it
+ return UsePrecSqrtF32;
+ } else {
+ // Otherwise, use sqrt.approx if fast math is enabled
+ return !getTargetMachine().Options.UnsafeFPMath;
+ }
+}
+
+bool NVPTXTargetLowering::useF32FTZ(const MachineFunction &MF) const {
+ // TODO: Get rid of this flag; there can be only one way to do this.
+ if (FtzEnabled.getNumOccurrences() > 0) {
+ // If nvptx-f32ftz is used on the command-line, always honor it
+ return FtzEnabled;
+ } else {
+ const Function *F = MF.getFunction();
+ // Otherwise, check for an nvptx-f32ftz attribute on the function
+ if (F->hasFnAttribute("nvptx-f32ftz"))
+ return F->getFnAttribute("nvptx-f32ftz").getValueAsString() == "true";
+ else
+ return false;
+ }
+}
+
+static bool IsPTXVectorType(MVT VT) {
+ switch (VT.SimpleTy) {
+ default:
+ return false;
+ case MVT::v2i1:
+ case MVT::v4i1:
+ case MVT::v2i8:
+ case MVT::v4i8:
+ case MVT::v2i16:
+ case MVT::v4i16:
+ case MVT::v2i32:
+ case MVT::v4i32:
+ case MVT::v2i64:
+ case MVT::v2f16:
+ case MVT::v4f16:
+ case MVT::v8f16: // <4 x f16x2>
+ case MVT::v2f32:
+ case MVT::v4f32:
+ case MVT::v2f64:
+ return true;
+ }
+}
+
+/// ComputePTXValueVTs - For the given Type \p Ty, returns the set of primitive
+/// EVTs that compose it. Unlike ComputeValueVTs, this will break apart vectors
+/// into their primitive components.
+/// NOTE: This is a band-aid for code that expects ComputeValueVTs to return the
+/// same number of types as the Ins/Outs arrays in LowerFormalArguments,
+/// LowerCall, and LowerReturn.
+static void ComputePTXValueVTs(const TargetLowering &TLI, const DataLayout &DL,
+ Type *Ty, SmallVectorImpl<EVT> &ValueVTs,
+ SmallVectorImpl<uint64_t> *Offsets = nullptr,
+ uint64_t StartingOffset = 0) {
+ SmallVector<EVT, 16> TempVTs;
+ SmallVector<uint64_t, 16> TempOffsets;
+
+ ComputeValueVTs(TLI, DL, Ty, TempVTs, &TempOffsets, StartingOffset);
+ for (unsigned i = 0, e = TempVTs.size(); i != e; ++i) {
+ EVT VT = TempVTs[i];
+ uint64_t Off = TempOffsets[i];
+ // Split vectors into individual elements, except for v2f16, which
+ // we will pass as a single scalar.
+ if (VT.isVector()) {
+ unsigned NumElts = VT.getVectorNumElements();
+ EVT EltVT = VT.getVectorElementType();
+ // Vectors with an even number of f16 elements will be passed to
+ // us as an array of v2f16 elements. We must match this so we
+ // stay in sync with Ins/Outs.
+ if (EltVT == MVT::f16 && NumElts % 2 == 0) {
+ EltVT = MVT::v2f16;
+ NumElts /= 2;
+ }
+ for (unsigned j = 0; j != NumElts; ++j) {
+ ValueVTs.push_back(EltVT);
+ if (Offsets)
+ Offsets->push_back(Off + j * EltVT.getStoreSize());
+ }
+ } else {
+ ValueVTs.push_back(VT);
+ if (Offsets)
+ Offsets->push_back(Off);
+ }
+ }
+}
+
+// Check whether we can merge loads/stores of some of the pieces of a
+// flattened function parameter or return value into a single vector
+// load/store.
+//
+// The flattened parameter is represented as a list of EVTs and
+// offsets, and the whole structure is aligned to ParamAlignment. This
+// function determines whether we can load/store pieces of the
+// parameter starting at index Idx using a single vectorized op of
+// size AccessSize. If so, it returns the number of param pieces
+// covered by the vector op. Otherwise, it returns 1.
+static unsigned CanMergeParamLoadStoresStartingAt(
+ unsigned Idx, uint32_t AccessSize, const SmallVectorImpl<EVT> &ValueVTs,
+ const SmallVectorImpl<uint64_t> &Offsets, unsigned ParamAlignment) {
+ assert(isPowerOf2_32(AccessSize) && "must be a power of 2!");
+
+ // Can't vectorize if param alignment is not sufficient.
+ if (AccessSize > ParamAlignment)
+ return 1;
+ // Can't vectorize if offset is not aligned.
+ if (Offsets[Idx] & (AccessSize - 1))
+ return 1;
+
+ EVT EltVT = ValueVTs[Idx];
+ unsigned EltSize = EltVT.getStoreSize();
+
+ // Element is too large to vectorize.
+ if (EltSize >= AccessSize)
+ return 1;
+
+ unsigned NumElts = AccessSize / EltSize;
+ // Can't vectorize if AccessBytes if not a multiple of EltSize.
+ if (AccessSize != EltSize * NumElts)
+ return 1;
+
+ // We don't have enough elements to vectorize.
+ if (Idx + NumElts > ValueVTs.size())
+ return 1;
+
+ // PTX ISA can only deal with 2- and 4-element vector ops.
+ if (NumElts != 4 && NumElts != 2)
+ return 1;
+
+ for (unsigned j = Idx + 1; j < Idx + NumElts; ++j) {
+ // Types do not match.
+ if (ValueVTs[j] != EltVT)
+ return 1;
+
+ // Elements are not contiguous.
+ if (Offsets[j] - Offsets[j - 1] != EltSize)
+ return 1;
+ }
+ // OK. We can vectorize ValueVTs[i..i+NumElts)
+ return NumElts;
+}
+
+// Flags for tracking per-element vectorization state of loads/stores
+// of a flattened function parameter or return value.
+enum ParamVectorizationFlags {
+ PVF_INNER = 0x0, // Middle elements of a vector.
+ PVF_FIRST = 0x1, // First element of the vector.
+ PVF_LAST = 0x2, // Last element of the vector.
+ // Scalar is effectively a 1-element vector.
+ PVF_SCALAR = PVF_FIRST | PVF_LAST
+};
+
+// Computes whether and how we can vectorize the loads/stores of a
+// flattened function parameter or return value.
+//
+// The flattened parameter is represented as the list of ValueVTs and
+// Offsets, and is aligned to ParamAlignment bytes. We return a vector
+// of the same size as ValueVTs indicating how each piece should be
+// loaded/stored (i.e. as a scalar, or as part of a vector
+// load/store).
+static SmallVector<ParamVectorizationFlags, 16>
+VectorizePTXValueVTs(const SmallVectorImpl<EVT> &ValueVTs,
+ const SmallVectorImpl<uint64_t> &Offsets,
+ unsigned ParamAlignment) {
+ // Set vector size to match ValueVTs and mark all elements as
+ // scalars by default.
+ SmallVector<ParamVectorizationFlags, 16> VectorInfo;
+ VectorInfo.assign(ValueVTs.size(), PVF_SCALAR);
+
+ // Check what we can vectorize using 128/64/32-bit accesses.
+ for (int I = 0, E = ValueVTs.size(); I != E; ++I) {
+ // Skip elements we've already processed.
+ assert(VectorInfo[I] == PVF_SCALAR && "Unexpected vector info state.");
+ for (unsigned AccessSize : {16, 8, 4, 2}) {
+ unsigned NumElts = CanMergeParamLoadStoresStartingAt(
+ I, AccessSize, ValueVTs, Offsets, ParamAlignment);
+ // Mark vectorized elements.
+ switch (NumElts) {
+ default:
+ llvm_unreachable("Unexpected return value");
+ case 1:
+ // Can't vectorize using this size, try next smaller size.
+ continue;
+ case 2:
+ assert(I + 1 < E && "Not enough elements.");
+ VectorInfo[I] = PVF_FIRST;
+ VectorInfo[I + 1] = PVF_LAST;
+ I += 1;
+ break;
+ case 4:
+ assert(I + 3 < E && "Not enough elements.");
+ VectorInfo[I] = PVF_FIRST;
+ VectorInfo[I + 1] = PVF_INNER;
+ VectorInfo[I + 2] = PVF_INNER;
+ VectorInfo[I + 3] = PVF_LAST;
+ I += 3;
+ break;
+ }
+ // Break out of the inner loop because we've already succeeded
+ // using largest possible AccessSize.
+ break;
+ }
+ }
+ return VectorInfo;
+}
+
+// NVPTXTargetLowering Constructor.
+NVPTXTargetLowering::NVPTXTargetLowering(const NVPTXTargetMachine &TM,
+ const NVPTXSubtarget &STI)
+ : TargetLowering(TM), nvTM(&TM), STI(STI) {
+ // always lower memset, memcpy, and memmove intrinsics to load/store
+ // instructions, rather
+ // then generating calls to memset, mempcy or memmove.
+ MaxStoresPerMemset = (unsigned) 0xFFFFFFFF;
+ MaxStoresPerMemcpy = (unsigned) 0xFFFFFFFF;
+ MaxStoresPerMemmove = (unsigned) 0xFFFFFFFF;
+
+ setBooleanContents(ZeroOrNegativeOneBooleanContent);
+ setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
+
+ // Jump is Expensive. Don't create extra control flow for 'and', 'or'
+ // condition branches.
+ setJumpIsExpensive(true);
+
+ // Wide divides are _very_ slow. Try to reduce the width of the divide if
+ // possible.
+ addBypassSlowDiv(64, 32);
+
+ // By default, use the Source scheduling
+ if (sched4reg)
+ setSchedulingPreference(Sched::RegPressure);
+ else
+ setSchedulingPreference(Sched::Source);
+
+ auto setFP16OperationAction = [&](unsigned Op, MVT VT, LegalizeAction Action,
+ LegalizeAction NoF16Action) {
+ setOperationAction(Op, VT, STI.allowFP16Math() ? Action : NoF16Action);
+ };
+
+ addRegisterClass(MVT::i1, &NVPTX::Int1RegsRegClass);
+ addRegisterClass(MVT::i16, &NVPTX::Int16RegsRegClass);
+ addRegisterClass(MVT::i32, &NVPTX::Int32RegsRegClass);
+ addRegisterClass(MVT::i64, &NVPTX::Int64RegsRegClass);
+ addRegisterClass(MVT::f32, &NVPTX::Float32RegsRegClass);
+ addRegisterClass(MVT::f64, &NVPTX::Float64RegsRegClass);
+ addRegisterClass(MVT::f16, &NVPTX::Float16RegsRegClass);
+ addRegisterClass(MVT::v2f16, &NVPTX::Float16x2RegsRegClass);
+
+ // Conversion to/from FP16/FP16x2 is always legal.
+ setOperationAction(ISD::SINT_TO_FP, MVT::f16, Legal);
+ setOperationAction(ISD::FP_TO_SINT, MVT::f16, Legal);
+ setOperationAction(ISD::BUILD_VECTOR, MVT::v2f16, Custom);
+ setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f16, Custom);
+
+ setFP16OperationAction(ISD::SETCC, MVT::f16, Legal, Promote);
+ setFP16OperationAction(ISD::SETCC, MVT::v2f16, Legal, Expand);
+
+ // Operations not directly supported by NVPTX.
+ setOperationAction(ISD::SELECT_CC, MVT::f16, Expand);
+ setOperationAction(ISD::SELECT_CC, MVT::v2f16, Expand);
+ setOperationAction(ISD::SELECT_CC, MVT::f32, Expand);
+ setOperationAction(ISD::SELECT_CC, MVT::f64, Expand);
+ setOperationAction(ISD::SELECT_CC, MVT::i1, Expand);
+ setOperationAction(ISD::SELECT_CC, MVT::i8, Expand);
+ setOperationAction(ISD::SELECT_CC, MVT::i16, Expand);
+ setOperationAction(ISD::SELECT_CC, MVT::i32, Expand);
+ setOperationAction(ISD::SELECT_CC, MVT::i64, Expand);
+ setOperationAction(ISD::BR_CC, MVT::f16, Expand);
+ setOperationAction(ISD::BR_CC, MVT::v2f16, Expand);
+ setOperationAction(ISD::BR_CC, MVT::f32, Expand);
+ setOperationAction(ISD::BR_CC, MVT::f64, Expand);
+ setOperationAction(ISD::BR_CC, MVT::i1, Expand);
+ setOperationAction(ISD::BR_CC, MVT::i8, Expand);
+ setOperationAction(ISD::BR_CC, MVT::i16, Expand);
+ setOperationAction(ISD::BR_CC, MVT::i32, Expand);
+ setOperationAction(ISD::BR_CC, MVT::i64, Expand);
+ // Some SIGN_EXTEND_INREG can be done using cvt instruction.
+ // For others we will expand to a SHL/SRA pair.
+ setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i64, Legal);
+ setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Legal);
+ setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Legal);
+ setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8 , Legal);
+ setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
+
+ setOperationAction(ISD::SHL_PARTS, MVT::i32 , Custom);
+ setOperationAction(ISD::SRA_PARTS, MVT::i32 , Custom);
+ setOperationAction(ISD::SRL_PARTS, MVT::i32 , Custom);
+ setOperationAction(ISD::SHL_PARTS, MVT::i64 , Custom);
+ setOperationAction(ISD::SRA_PARTS, MVT::i64 , Custom);
+ setOperationAction(ISD::SRL_PARTS, MVT::i64 , Custom);
+
+ setOperationAction(ISD::BITREVERSE, MVT::i32, Legal);
+ setOperationAction(ISD::BITREVERSE, MVT::i64, Legal);
+
+ if (STI.hasROT64()) {
+ setOperationAction(ISD::ROTL, MVT::i64, Legal);
+ setOperationAction(ISD::ROTR, MVT::i64, Legal);
+ } else {
+ setOperationAction(ISD::ROTL, MVT::i64, Expand);
+ setOperationAction(ISD::ROTR, MVT::i64, Expand);
+ }
+ if (STI.hasROT32()) {
+ setOperationAction(ISD::ROTL, MVT::i32, Legal);
+ setOperationAction(ISD::ROTR, MVT::i32, Legal);
+ } else {
+ setOperationAction(ISD::ROTL, MVT::i32, Expand);
+ setOperationAction(ISD::ROTR, MVT::i32, Expand);
+ }
+
+ setOperationAction(ISD::ROTL, MVT::i16, Expand);
+ setOperationAction(ISD::ROTR, MVT::i16, Expand);
+ setOperationAction(ISD::ROTL, MVT::i8, Expand);
+ setOperationAction(ISD::ROTR, MVT::i8, Expand);
+ setOperationAction(ISD::BSWAP, MVT::i16, Expand);
+ setOperationAction(ISD::BSWAP, MVT::i32, Expand);
+ setOperationAction(ISD::BSWAP, MVT::i64, Expand);
+
+ // Indirect branch is not supported.
+ // This also disables Jump Table creation.
+ setOperationAction(ISD::BR_JT, MVT::Other, Expand);
+ setOperationAction(ISD::BRIND, MVT::Other, Expand);
+
+ setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
+ setOperationAction(ISD::GlobalAddress, MVT::i64, Custom);
+
+ // We want to legalize constant related memmove and memcopy
+ // intrinsics.
+ setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
+
+ // Turn FP extload into load/fpextend
+ setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand);
+ setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand);
+ setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand);
+ setLoadExtAction(ISD::EXTLOAD, MVT::v2f32, MVT::v2f16, Expand);
+ setLoadExtAction(ISD::EXTLOAD, MVT::v2f64, MVT::v2f16, Expand);
+ setLoadExtAction(ISD::EXTLOAD, MVT::v2f64, MVT::v2f32, Expand);
+ setLoadExtAction(ISD::EXTLOAD, MVT::v4f32, MVT::v4f16, Expand);
+ setLoadExtAction(ISD::EXTLOAD, MVT::v4f64, MVT::v4f16, Expand);
+ setLoadExtAction(ISD::EXTLOAD, MVT::v4f64, MVT::v4f32, Expand);
+ // Turn FP truncstore into trunc + store.
+ // FIXME: vector types should also be expanded
+ setTruncStoreAction(MVT::f32, MVT::f16, Expand);
+ setTruncStoreAction(MVT::f64, MVT::f16, Expand);
+ setTruncStoreAction(MVT::f64, MVT::f32, Expand);
+
+ // PTX does not support load / store predicate registers
+ setOperationAction(ISD::LOAD, MVT::i1, Custom);
+ setOperationAction(ISD::STORE, MVT::i1, Custom);
+
+ for (MVT VT : MVT::integer_valuetypes()) {
+ setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
+ setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote);
+ setTruncStoreAction(VT, MVT::i1, Expand);
+ }
+
+ // This is legal in NVPTX
+ setOperationAction(ISD::ConstantFP, MVT::f64, Legal);
+ setOperationAction(ISD::ConstantFP, MVT::f32, Legal);
+ setOperationAction(ISD::ConstantFP, MVT::f16, Legal);
+
+ // TRAP can be lowered to PTX trap
+ setOperationAction(ISD::TRAP, MVT::Other, Legal);
+
+ setOperationAction(ISD::ADDC, MVT::i64, Expand);
+ setOperationAction(ISD::ADDE, MVT::i64, Expand);
+
+ // Register custom handling for vector loads/stores
+ for (MVT VT : MVT::vector_valuetypes()) {
+ if (IsPTXVectorType(VT)) {
+ setOperationAction(ISD::LOAD, VT, Custom);
+ setOperationAction(ISD::STORE, VT, Custom);
+ setOperationAction(ISD::INTRINSIC_W_CHAIN, VT, Custom);
+ }
+ }
+
+ // Custom handling for i8 intrinsics
+ setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i8, Custom);
+
+ for (const auto& Ty : {MVT::i16, MVT::i32, MVT::i64}) {
+ setOperationAction(ISD::ABS, Ty, Legal);
+ setOperationAction(ISD::SMIN, Ty, Legal);
+ setOperationAction(ISD::SMAX, Ty, Legal);
+ setOperationAction(ISD::UMIN, Ty, Legal);
+ setOperationAction(ISD::UMAX, Ty, Legal);
+
+ setOperationAction(ISD::CTPOP, Ty, Legal);
+ setOperationAction(ISD::CTLZ, Ty, Legal);
+ }
+
+ setOperationAction(ISD::CTTZ, MVT::i16, Expand);
+ setOperationAction(ISD::CTTZ, MVT::i32, Expand);
+ setOperationAction(ISD::CTTZ, MVT::i64, Expand);
+
+ // PTX does not directly support SELP of i1, so promote to i32 first
+ setOperationAction(ISD::SELECT, MVT::i1, Custom);
+
+ // PTX cannot multiply two i64s in a single instruction.
+ setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand);
+ setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand);
+
+ // We have some custom DAG combine patterns for these nodes
+ setTargetDAGCombine(ISD::ADD);
+ setTargetDAGCombine(ISD::AND);
+ setTargetDAGCombine(ISD::FADD);
+ setTargetDAGCombine(ISD::MUL);
+ setTargetDAGCombine(ISD::SHL);
+ setTargetDAGCombine(ISD::SREM);
+ setTargetDAGCombine(ISD::UREM);
+
+ // setcc for f16x2 needs special handling to prevent legalizer's
+ // attempt to scalarize it due to v2i1 not being legal.
+ if (STI.allowFP16Math())
+ setTargetDAGCombine(ISD::SETCC);
+
+ // Promote fp16 arithmetic if fp16 hardware isn't available or the
+ // user passed --nvptx-no-fp16-math. The flag is useful because,
+ // although sm_53+ GPUs have some sort of FP16 support in
+ // hardware, only sm_53 and sm_60 have full implementation. Others
+ // only have token amount of hardware and are likely to run faster
+ // by using fp32 units instead.
+ for (const auto &Op : {ISD::FADD, ISD::FMUL, ISD::FSUB, ISD::FMA}) {
+ setFP16OperationAction(Op, MVT::f16, Legal, Promote);
+ setFP16OperationAction(Op, MVT::v2f16, Legal, Expand);
+ }
+
+ // There's no neg.f16 instruction. Expand to (0-x).
+ setOperationAction(ISD::FNEG, MVT::f16, Expand);
+ setOperationAction(ISD::FNEG, MVT::v2f16, Expand);
+
+ // (would be) Library functions.
+
+ // These map to conversion instructions for scalar FP types.
+ for (const auto &Op : {ISD::FCEIL, ISD::FFLOOR, ISD::FNEARBYINT, ISD::FRINT,
+ ISD::FROUND, ISD::FTRUNC}) {
+ setOperationAction(Op, MVT::f16, Legal);
+ setOperationAction(Op, MVT::f32, Legal);
+ setOperationAction(Op, MVT::f64, Legal);
+ setOperationAction(Op, MVT::v2f16, Expand);
+ }
+
+ // 'Expand' implements FCOPYSIGN without calling an external library.
+ setOperationAction(ISD::FCOPYSIGN, MVT::f16, Expand);
+ setOperationAction(ISD::FCOPYSIGN, MVT::v2f16, Expand);
+ setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
+ setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
+
+ // These map to corresponding instructions for f32/f64. f16 must be
+ // promoted to f32. v2f16 is expanded to f16, which is then promoted
+ // to f32.
+ for (const auto &Op : {ISD::FDIV, ISD::FREM, ISD::FSQRT, ISD::FSIN, ISD::FCOS,
+ ISD::FABS, ISD::FMINNUM, ISD::FMAXNUM}) {
+ setOperationAction(Op, MVT::f16, Promote);
+ setOperationAction(Op, MVT::f32, Legal);
+ setOperationAction(Op, MVT::f64, Legal);
+ setOperationAction(Op, MVT::v2f16, Expand);
+ }
+ setOperationAction(ISD::FMINNUM, MVT::f16, Promote);
+ setOperationAction(ISD::FMAXNUM, MVT::f16, Promote);
+ setOperationAction(ISD::FMINNAN, MVT::f16, Promote);
+ setOperationAction(ISD::FMAXNAN, MVT::f16, Promote);
+
+ // No FEXP2, FLOG2. The PTX ex2 and log2 functions are always approximate.
+ // No FPOW or FREM in PTX.
+
+ // Now deduce the information based on the above mentioned
+ // actions
+ computeRegisterProperties(STI.getRegisterInfo());
+}
+
+const char *NVPTXTargetLowering::getTargetNodeName(unsigned Opcode) const {
+ switch ((NVPTXISD::NodeType)Opcode) {
+ case NVPTXISD::FIRST_NUMBER:
+ break;
+ case NVPTXISD::CALL:
+ return "NVPTXISD::CALL";
+ case NVPTXISD::RET_FLAG:
+ return "NVPTXISD::RET_FLAG";
+ case NVPTXISD::LOAD_PARAM:
+ return "NVPTXISD::LOAD_PARAM";
+ case NVPTXISD::Wrapper:
+ return "NVPTXISD::Wrapper";
+ case NVPTXISD::DeclareParam:
+ return "NVPTXISD::DeclareParam";
+ case NVPTXISD::DeclareScalarParam:
+ return "NVPTXISD::DeclareScalarParam";
+ case NVPTXISD::DeclareRet:
+ return "NVPTXISD::DeclareRet";
+ case NVPTXISD::DeclareScalarRet:
+ return "NVPTXISD::DeclareScalarRet";
+ case NVPTXISD::DeclareRetParam:
+ return "NVPTXISD::DeclareRetParam";
+ case NVPTXISD::PrintCall:
+ return "NVPTXISD::PrintCall";
+ case NVPTXISD::PrintConvergentCall:
+ return "NVPTXISD::PrintConvergentCall";
+ case NVPTXISD::PrintCallUni:
+ return "NVPTXISD::PrintCallUni";
+ case NVPTXISD::PrintConvergentCallUni:
+ return "NVPTXISD::PrintConvergentCallUni";
+ case NVPTXISD::LoadParam:
+ return "NVPTXISD::LoadParam";
+ case NVPTXISD::LoadParamV2:
+ return "NVPTXISD::LoadParamV2";
+ case NVPTXISD::LoadParamV4:
+ return "NVPTXISD::LoadParamV4";
+ case NVPTXISD::StoreParam:
+ return "NVPTXISD::StoreParam";
+ case NVPTXISD::StoreParamV2:
+ return "NVPTXISD::StoreParamV2";
+ case NVPTXISD::StoreParamV4:
+ return "NVPTXISD::StoreParamV4";
+ case NVPTXISD::StoreParamS32:
+ return "NVPTXISD::StoreParamS32";
+ case NVPTXISD::StoreParamU32:
+ return "NVPTXISD::StoreParamU32";
+ case NVPTXISD::CallArgBegin:
+ return "NVPTXISD::CallArgBegin";
+ case NVPTXISD::CallArg:
+ return "NVPTXISD::CallArg";
+ case NVPTXISD::LastCallArg:
+ return "NVPTXISD::LastCallArg";
+ case NVPTXISD::CallArgEnd:
+ return "NVPTXISD::CallArgEnd";
+ case NVPTXISD::CallVoid:
+ return "NVPTXISD::CallVoid";
+ case NVPTXISD::CallVal:
+ return "NVPTXISD::CallVal";
+ case NVPTXISD::CallSymbol:
+ return "NVPTXISD::CallSymbol";
+ case NVPTXISD::Prototype:
+ return "NVPTXISD::Prototype";
+ case NVPTXISD::MoveParam:
+ return "NVPTXISD::MoveParam";
+ case NVPTXISD::StoreRetval:
+ return "NVPTXISD::StoreRetval";
+ case NVPTXISD::StoreRetvalV2:
+ return "NVPTXISD::StoreRetvalV2";
+ case NVPTXISD::StoreRetvalV4:
+ return "NVPTXISD::StoreRetvalV4";
+ case NVPTXISD::PseudoUseParam:
+ return "NVPTXISD::PseudoUseParam";
+ case NVPTXISD::RETURN:
+ return "NVPTXISD::RETURN";
+ case NVPTXISD::CallSeqBegin:
+ return "NVPTXISD::CallSeqBegin";
+ case NVPTXISD::CallSeqEnd:
+ return "NVPTXISD::CallSeqEnd";
+ case NVPTXISD::CallPrototype:
+ return "NVPTXISD::CallPrototype";
+ case NVPTXISD::LoadV2:
+ return "NVPTXISD::LoadV2";
+ case NVPTXISD::LoadV4:
+ return "NVPTXISD::LoadV4";
+ case NVPTXISD::LDGV2:
+ return "NVPTXISD::LDGV2";
+ case NVPTXISD::LDGV4:
+ return "NVPTXISD::LDGV4";
+ case NVPTXISD::LDUV2:
+ return "NVPTXISD::LDUV2";
+ case NVPTXISD::LDUV4:
+ return "NVPTXISD::LDUV4";
+ case NVPTXISD::StoreV2:
+ return "NVPTXISD::StoreV2";
+ case NVPTXISD::StoreV4:
+ return "NVPTXISD::StoreV4";
+ case NVPTXISD::FUN_SHFL_CLAMP:
+ return "NVPTXISD::FUN_SHFL_CLAMP";
+ case NVPTXISD::FUN_SHFR_CLAMP:
+ return "NVPTXISD::FUN_SHFR_CLAMP";
+ case NVPTXISD::IMAD:
+ return "NVPTXISD::IMAD";
+ case NVPTXISD::SETP_F16X2:
+ return "NVPTXISD::SETP_F16X2";
+ case NVPTXISD::Dummy:
+ return "NVPTXISD::Dummy";
+ case NVPTXISD::MUL_WIDE_SIGNED:
+ return "NVPTXISD::MUL_WIDE_SIGNED";
+ case NVPTXISD::MUL_WIDE_UNSIGNED:
+ return "NVPTXISD::MUL_WIDE_UNSIGNED";
+ case NVPTXISD::Tex1DFloatS32: return "NVPTXISD::Tex1DFloatS32";
+ case NVPTXISD::Tex1DFloatFloat: return "NVPTXISD::Tex1DFloatFloat";
+ case NVPTXISD::Tex1DFloatFloatLevel:
+ return "NVPTXISD::Tex1DFloatFloatLevel";
+ case NVPTXISD::Tex1DFloatFloatGrad:
+ return "NVPTXISD::Tex1DFloatFloatGrad";
+ case NVPTXISD::Tex1DS32S32: return "NVPTXISD::Tex1DS32S32";
+ case NVPTXISD::Tex1DS32Float: return "NVPTXISD::Tex1DS32Float";
+ case NVPTXISD::Tex1DS32FloatLevel:
+ return "NVPTXISD::Tex1DS32FloatLevel";
+ case NVPTXISD::Tex1DS32FloatGrad:
+ return "NVPTXISD::Tex1DS32FloatGrad";
+ case NVPTXISD::Tex1DU32S32: return "NVPTXISD::Tex1DU32S32";
+ case NVPTXISD::Tex1DU32Float: return "NVPTXISD::Tex1DU32Float";
+ case NVPTXISD::Tex1DU32FloatLevel:
+ return "NVPTXISD::Tex1DU32FloatLevel";
+ case NVPTXISD::Tex1DU32FloatGrad:
+ return "NVPTXISD::Tex1DU32FloatGrad";
+ case NVPTXISD::Tex1DArrayFloatS32: return "NVPTXISD::Tex1DArrayFloatS32";
+ case NVPTXISD::Tex1DArrayFloatFloat: return "NVPTXISD::Tex1DArrayFloatFloat";
+ case NVPTXISD::Tex1DArrayFloatFloatLevel:
+ return "NVPTXISD::Tex1DArrayFloatFloatLevel";
+ case NVPTXISD::Tex1DArrayFloatFloatGrad:
+ return "NVPTXISD::Tex1DArrayFloatFloatGrad";
+ case NVPTXISD::Tex1DArrayS32S32: return "NVPTXISD::Tex1DArrayS32S32";
+ case NVPTXISD::Tex1DArrayS32Float: return "NVPTXISD::Tex1DArrayS32Float";
+ case NVPTXISD::Tex1DArrayS32FloatLevel:
+ return "NVPTXISD::Tex1DArrayS32FloatLevel";
+ case NVPTXISD::Tex1DArrayS32FloatGrad:
+ return "NVPTXISD::Tex1DArrayS32FloatGrad";
+ case NVPTXISD::Tex1DArrayU32S32: return "NVPTXISD::Tex1DArrayU32S32";
+ case NVPTXISD::Tex1DArrayU32Float: return "NVPTXISD::Tex1DArrayU32Float";
+ case NVPTXISD::Tex1DArrayU32FloatLevel:
+ return "NVPTXISD::Tex1DArrayU32FloatLevel";
+ case NVPTXISD::Tex1DArrayU32FloatGrad:
+ return "NVPTXISD::Tex1DArrayU32FloatGrad";
+ case NVPTXISD::Tex2DFloatS32: return "NVPTXISD::Tex2DFloatS32";
+ case NVPTXISD::Tex2DFloatFloat: return "NVPTXISD::Tex2DFloatFloat";
+ case NVPTXISD::Tex2DFloatFloatLevel:
+ return "NVPTXISD::Tex2DFloatFloatLevel";
+ case NVPTXISD::Tex2DFloatFloatGrad:
+ return "NVPTXISD::Tex2DFloatFloatGrad";
+ case NVPTXISD::Tex2DS32S32: return "NVPTXISD::Tex2DS32S32";
+ case NVPTXISD::Tex2DS32Float: return "NVPTXISD::Tex2DS32Float";
+ case NVPTXISD::Tex2DS32FloatLevel:
+ return "NVPTXISD::Tex2DS32FloatLevel";
+ case NVPTXISD::Tex2DS32FloatGrad:
+ return "NVPTXISD::Tex2DS32FloatGrad";
+ case NVPTXISD::Tex2DU32S32: return "NVPTXISD::Tex2DU32S32";
+ case NVPTXISD::Tex2DU32Float: return "NVPTXISD::Tex2DU32Float";
+ case NVPTXISD::Tex2DU32FloatLevel:
+ return "NVPTXISD::Tex2DU32FloatLevel";
+ case NVPTXISD::Tex2DU32FloatGrad:
+ return "NVPTXISD::Tex2DU32FloatGrad";
+ case NVPTXISD::Tex2DArrayFloatS32: return "NVPTXISD::Tex2DArrayFloatS32";
+ case NVPTXISD::Tex2DArrayFloatFloat: return "NVPTXISD::Tex2DArrayFloatFloat";
+ case NVPTXISD::Tex2DArrayFloatFloatLevel:
+ return "NVPTXISD::Tex2DArrayFloatFloatLevel";
+ case NVPTXISD::Tex2DArrayFloatFloatGrad:
+ return "NVPTXISD::Tex2DArrayFloatFloatGrad";
+ case NVPTXISD::Tex2DArrayS32S32: return "NVPTXISD::Tex2DArrayS32S32";
+ case NVPTXISD::Tex2DArrayS32Float: return "NVPTXISD::Tex2DArrayS32Float";
+ case NVPTXISD::Tex2DArrayS32FloatLevel:
+ return "NVPTXISD::Tex2DArrayS32FloatLevel";
+ case NVPTXISD::Tex2DArrayS32FloatGrad:
+ return "NVPTXISD::Tex2DArrayS32FloatGrad";
+ case NVPTXISD::Tex2DArrayU32S32: return "NVPTXISD::Tex2DArrayU32S32";
+ case NVPTXISD::Tex2DArrayU32Float: return "NVPTXISD::Tex2DArrayU32Float";
+ case NVPTXISD::Tex2DArrayU32FloatLevel:
+ return "NVPTXISD::Tex2DArrayU32FloatLevel";
+ case NVPTXISD::Tex2DArrayU32FloatGrad:
+ return "NVPTXISD::Tex2DArrayU32FloatGrad";
+ case NVPTXISD::Tex3DFloatS32: return "NVPTXISD::Tex3DFloatS32";
+ case NVPTXISD::Tex3DFloatFloat: return "NVPTXISD::Tex3DFloatFloat";
+ case NVPTXISD::Tex3DFloatFloatLevel:
+ return "NVPTXISD::Tex3DFloatFloatLevel";
+ case NVPTXISD::Tex3DFloatFloatGrad:
+ return "NVPTXISD::Tex3DFloatFloatGrad";
+ case NVPTXISD::Tex3DS32S32: return "NVPTXISD::Tex3DS32S32";
+ case NVPTXISD::Tex3DS32Float: return "NVPTXISD::Tex3DS32Float";
+ case NVPTXISD::Tex3DS32FloatLevel:
+ return "NVPTXISD::Tex3DS32FloatLevel";
+ case NVPTXISD::Tex3DS32FloatGrad:
+ return "NVPTXISD::Tex3DS32FloatGrad";
+ case NVPTXISD::Tex3DU32S32: return "NVPTXISD::Tex3DU32S32";
+ case NVPTXISD::Tex3DU32Float: return "NVPTXISD::Tex3DU32Float";
+ case NVPTXISD::Tex3DU32FloatLevel:
+ return "NVPTXISD::Tex3DU32FloatLevel";
+ case NVPTXISD::Tex3DU32FloatGrad:
+ return "NVPTXISD::Tex3DU32FloatGrad";
+ case NVPTXISD::TexCubeFloatFloat: return "NVPTXISD::TexCubeFloatFloat";
+ case NVPTXISD::TexCubeFloatFloatLevel:
+ return "NVPTXISD::TexCubeFloatFloatLevel";
+ case NVPTXISD::TexCubeS32Float: return "NVPTXISD::TexCubeS32Float";
+ case NVPTXISD::TexCubeS32FloatLevel:
+ return "NVPTXISD::TexCubeS32FloatLevel";
+ case NVPTXISD::TexCubeU32Float: return "NVPTXISD::TexCubeU32Float";
+ case NVPTXISD::TexCubeU32FloatLevel:
+ return "NVPTXISD::TexCubeU32FloatLevel";
+ case NVPTXISD::TexCubeArrayFloatFloat:
+ return "NVPTXISD::TexCubeArrayFloatFloat";
+ case NVPTXISD::TexCubeArrayFloatFloatLevel:
+ return "NVPTXISD::TexCubeArrayFloatFloatLevel";
+ case NVPTXISD::TexCubeArrayS32Float:
+ return "NVPTXISD::TexCubeArrayS32Float";
+ case NVPTXISD::TexCubeArrayS32FloatLevel:
+ return "NVPTXISD::TexCubeArrayS32FloatLevel";
+ case NVPTXISD::TexCubeArrayU32Float:
+ return "NVPTXISD::TexCubeArrayU32Float";
+ case NVPTXISD::TexCubeArrayU32FloatLevel:
+ return "NVPTXISD::TexCubeArrayU32FloatLevel";
+ case NVPTXISD::Tld4R2DFloatFloat:
+ return "NVPTXISD::Tld4R2DFloatFloat";
+ case NVPTXISD::Tld4G2DFloatFloat:
+ return "NVPTXISD::Tld4G2DFloatFloat";
+ case NVPTXISD::Tld4B2DFloatFloat:
+ return "NVPTXISD::Tld4B2DFloatFloat";
+ case NVPTXISD::Tld4A2DFloatFloat:
+ return "NVPTXISD::Tld4A2DFloatFloat";
+ case NVPTXISD::Tld4R2DS64Float:
+ return "NVPTXISD::Tld4R2DS64Float";
+ case NVPTXISD::Tld4G2DS64Float:
+ return "NVPTXISD::Tld4G2DS64Float";
+ case NVPTXISD::Tld4B2DS64Float:
+ return "NVPTXISD::Tld4B2DS64Float";
+ case NVPTXISD::Tld4A2DS64Float:
+ return "NVPTXISD::Tld4A2DS64Float";
+ case NVPTXISD::Tld4R2DU64Float:
+ return "NVPTXISD::Tld4R2DU64Float";
+ case NVPTXISD::Tld4G2DU64Float:
+ return "NVPTXISD::Tld4G2DU64Float";
+ case NVPTXISD::Tld4B2DU64Float:
+ return "NVPTXISD::Tld4B2DU64Float";
+ case NVPTXISD::Tld4A2DU64Float:
+ return "NVPTXISD::Tld4A2DU64Float";
+
+ case NVPTXISD::TexUnified1DFloatS32:
+ return "NVPTXISD::TexUnified1DFloatS32";
+ case NVPTXISD::TexUnified1DFloatFloat:
+ return "NVPTXISD::TexUnified1DFloatFloat";
+ case NVPTXISD::TexUnified1DFloatFloatLevel:
+ return "NVPTXISD::TexUnified1DFloatFloatLevel";
+ case NVPTXISD::TexUnified1DFloatFloatGrad:
+ return "NVPTXISD::TexUnified1DFloatFloatGrad";
+ case NVPTXISD::TexUnified1DS32S32:
+ return "NVPTXISD::TexUnified1DS32S32";
+ case NVPTXISD::TexUnified1DS32Float:
+ return "NVPTXISD::TexUnified1DS32Float";
+ case NVPTXISD::TexUnified1DS32FloatLevel:
+ return "NVPTXISD::TexUnified1DS32FloatLevel";
+ case NVPTXISD::TexUnified1DS32FloatGrad:
+ return "NVPTXISD::TexUnified1DS32FloatGrad";
+ case NVPTXISD::TexUnified1DU32S32:
+ return "NVPTXISD::TexUnified1DU32S32";
+ case NVPTXISD::TexUnified1DU32Float:
+ return "NVPTXISD::TexUnified1DU32Float";
+ case NVPTXISD::TexUnified1DU32FloatLevel:
+ return "NVPTXISD::TexUnified1DU32FloatLevel";
+ case NVPTXISD::TexUnified1DU32FloatGrad:
+ return "NVPTXISD::TexUnified1DU32FloatGrad";
+ case NVPTXISD::TexUnified1DArrayFloatS32:
+ return "NVPTXISD::TexUnified1DArrayFloatS32";
+ case NVPTXISD::TexUnified1DArrayFloatFloat:
+ return "NVPTXISD::TexUnified1DArrayFloatFloat";
+ case NVPTXISD::TexUnified1DArrayFloatFloatLevel:
+ return "NVPTXISD::TexUnified1DArrayFloatFloatLevel";
+ case NVPTXISD::TexUnified1DArrayFloatFloatGrad:
+ return "NVPTXISD::TexUnified1DArrayFloatFloatGrad";
+ case NVPTXISD::TexUnified1DArrayS32S32:
+ return "NVPTXISD::TexUnified1DArrayS32S32";
+ case NVPTXISD::TexUnified1DArrayS32Float:
+ return "NVPTXISD::TexUnified1DArrayS32Float";
+ case NVPTXISD::TexUnified1DArrayS32FloatLevel:
+ return "NVPTXISD::TexUnified1DArrayS32FloatLevel";
+ case NVPTXISD::TexUnified1DArrayS32FloatGrad:
+ return "NVPTXISD::TexUnified1DArrayS32FloatGrad";
+ case NVPTXISD::TexUnified1DArrayU32S32:
+ return "NVPTXISD::TexUnified1DArrayU32S32";
+ case NVPTXISD::TexUnified1DArrayU32Float:
+ return "NVPTXISD::TexUnified1DArrayU32Float";
+ case NVPTXISD::TexUnified1DArrayU32FloatLevel:
+ return "NVPTXISD::TexUnified1DArrayU32FloatLevel";
+ case NVPTXISD::TexUnified1DArrayU32FloatGrad:
+ return "NVPTXISD::TexUnified1DArrayU32FloatGrad";
+ case NVPTXISD::TexUnified2DFloatS32:
+ return "NVPTXISD::TexUnified2DFloatS32";
+ case NVPTXISD::TexUnified2DFloatFloat:
+ return "NVPTXISD::TexUnified2DFloatFloat";
+ case NVPTXISD::TexUnified2DFloatFloatLevel:
+ return "NVPTXISD::TexUnified2DFloatFloatLevel";
+ case NVPTXISD::TexUnified2DFloatFloatGrad:
+ return "NVPTXISD::TexUnified2DFloatFloatGrad";
+ case NVPTXISD::TexUnified2DS32S32:
+ return "NVPTXISD::TexUnified2DS32S32";
+ case NVPTXISD::TexUnified2DS32Float:
+ return "NVPTXISD::TexUnified2DS32Float";
+ case NVPTXISD::TexUnified2DS32FloatLevel:
+ return "NVPTXISD::TexUnified2DS32FloatLevel";
+ case NVPTXISD::TexUnified2DS32FloatGrad:
+ return "NVPTXISD::TexUnified2DS32FloatGrad";
+ case NVPTXISD::TexUnified2DU32S32:
+ return "NVPTXISD::TexUnified2DU32S32";
+ case NVPTXISD::TexUnified2DU32Float:
+ return "NVPTXISD::TexUnified2DU32Float";
+ case NVPTXISD::TexUnified2DU32FloatLevel:
+ return "NVPTXISD::TexUnified2DU32FloatLevel";
+ case NVPTXISD::TexUnified2DU32FloatGrad:
+ return "NVPTXISD::TexUnified2DU32FloatGrad";
+ case NVPTXISD::TexUnified2DArrayFloatS32:
+ return "NVPTXISD::TexUnified2DArrayFloatS32";
+ case NVPTXISD::TexUnified2DArrayFloatFloat:
+ return "NVPTXISD::TexUnified2DArrayFloatFloat";
+ case NVPTXISD::TexUnified2DArrayFloatFloatLevel:
+ return "NVPTXISD::TexUnified2DArrayFloatFloatLevel";
+ case NVPTXISD::TexUnified2DArrayFloatFloatGrad:
+ return "NVPTXISD::TexUnified2DArrayFloatFloatGrad";
+ case NVPTXISD::TexUnified2DArrayS32S32:
+ return "NVPTXISD::TexUnified2DArrayS32S32";
+ case NVPTXISD::TexUnified2DArrayS32Float:
+ return "NVPTXISD::TexUnified2DArrayS32Float";
+ case NVPTXISD::TexUnified2DArrayS32FloatLevel:
+ return "NVPTXISD::TexUnified2DArrayS32FloatLevel";
+ case NVPTXISD::TexUnified2DArrayS32FloatGrad:
+ return "NVPTXISD::TexUnified2DArrayS32FloatGrad";
+ case NVPTXISD::TexUnified2DArrayU32S32:
+ return "NVPTXISD::TexUnified2DArrayU32S32";
+ case NVPTXISD::TexUnified2DArrayU32Float:
+ return "NVPTXISD::TexUnified2DArrayU32Float";
+ case NVPTXISD::TexUnified2DArrayU32FloatLevel:
+ return "NVPTXISD::TexUnified2DArrayU32FloatLevel";
+ case NVPTXISD::TexUnified2DArrayU32FloatGrad:
+ return "NVPTXISD::TexUnified2DArrayU32FloatGrad";
+ case NVPTXISD::TexUnified3DFloatS32:
+ return "NVPTXISD::TexUnified3DFloatS32";
+ case NVPTXISD::TexUnified3DFloatFloat:
+ return "NVPTXISD::TexUnified3DFloatFloat";
+ case NVPTXISD::TexUnified3DFloatFloatLevel:
+ return "NVPTXISD::TexUnified3DFloatFloatLevel";
+ case NVPTXISD::TexUnified3DFloatFloatGrad:
+ return "NVPTXISD::TexUnified3DFloatFloatGrad";
+ case NVPTXISD::TexUnified3DS32S32:
+ return "NVPTXISD::TexUnified3DS32S32";
+ case NVPTXISD::TexUnified3DS32Float:
+ return "NVPTXISD::TexUnified3DS32Float";
+ case NVPTXISD::TexUnified3DS32FloatLevel:
+ return "NVPTXISD::TexUnified3DS32FloatLevel";
+ case NVPTXISD::TexUnified3DS32FloatGrad:
+ return "NVPTXISD::TexUnified3DS32FloatGrad";
+ case NVPTXISD::TexUnified3DU32S32:
+ return "NVPTXISD::TexUnified3DU32S32";
+ case NVPTXISD::TexUnified3DU32Float:
+ return "NVPTXISD::TexUnified3DU32Float";
+ case NVPTXISD::TexUnified3DU32FloatLevel:
+ return "NVPTXISD::TexUnified3DU32FloatLevel";
+ case NVPTXISD::TexUnified3DU32FloatGrad:
+ return "NVPTXISD::TexUnified3DU32FloatGrad";
+ case NVPTXISD::TexUnifiedCubeFloatFloat:
+ return "NVPTXISD::TexUnifiedCubeFloatFloat";
+ case NVPTXISD::TexUnifiedCubeFloatFloatLevel:
+ return "NVPTXISD::TexUnifiedCubeFloatFloatLevel";
+ case NVPTXISD::TexUnifiedCubeS32Float:
+ return "NVPTXISD::TexUnifiedCubeS32Float";
+ case NVPTXISD::TexUnifiedCubeS32FloatLevel:
+ return "NVPTXISD::TexUnifiedCubeS32FloatLevel";
+ case NVPTXISD::TexUnifiedCubeU32Float:
+ return "NVPTXISD::TexUnifiedCubeU32Float";
+ case NVPTXISD::TexUnifiedCubeU32FloatLevel:
+ return "NVPTXISD::TexUnifiedCubeU32FloatLevel";
+ case NVPTXISD::TexUnifiedCubeArrayFloatFloat:
+ return "NVPTXISD::TexUnifiedCubeArrayFloatFloat";
+ case NVPTXISD::TexUnifiedCubeArrayFloatFloatLevel:
+ return "NVPTXISD::TexUnifiedCubeArrayFloatFloatLevel";
+ case NVPTXISD::TexUnifiedCubeArrayS32Float:
+ return "NVPTXISD::TexUnifiedCubeArrayS32Float";
+ case NVPTXISD::TexUnifiedCubeArrayS32FloatLevel:
+ return "NVPTXISD::TexUnifiedCubeArrayS32FloatLevel";
+ case NVPTXISD::TexUnifiedCubeArrayU32Float:
+ return "NVPTXISD::TexUnifiedCubeArrayU32Float";
+ case NVPTXISD::TexUnifiedCubeArrayU32FloatLevel:
+ return "NVPTXISD::TexUnifiedCubeArrayU32FloatLevel";
+ case NVPTXISD::Tld4UnifiedR2DFloatFloat:
+ return "NVPTXISD::Tld4UnifiedR2DFloatFloat";
+ case NVPTXISD::Tld4UnifiedG2DFloatFloat:
+ return "NVPTXISD::Tld4UnifiedG2DFloatFloat";
+ case NVPTXISD::Tld4UnifiedB2DFloatFloat:
+ return "NVPTXISD::Tld4UnifiedB2DFloatFloat";
+ case NVPTXISD::Tld4UnifiedA2DFloatFloat:
+ return "NVPTXISD::Tld4UnifiedA2DFloatFloat";
+ case NVPTXISD::Tld4UnifiedR2DS64Float:
+ return "NVPTXISD::Tld4UnifiedR2DS64Float";
+ case NVPTXISD::Tld4UnifiedG2DS64Float:
+ return "NVPTXISD::Tld4UnifiedG2DS64Float";
+ case NVPTXISD::Tld4UnifiedB2DS64Float:
+ return "NVPTXISD::Tld4UnifiedB2DS64Float";
+ case NVPTXISD::Tld4UnifiedA2DS64Float:
+ return "NVPTXISD::Tld4UnifiedA2DS64Float";
+ case NVPTXISD::Tld4UnifiedR2DU64Float:
+ return "NVPTXISD::Tld4UnifiedR2DU64Float";
+ case NVPTXISD::Tld4UnifiedG2DU64Float:
+ return "NVPTXISD::Tld4UnifiedG2DU64Float";
+ case NVPTXISD::Tld4UnifiedB2DU64Float:
+ return "NVPTXISD::Tld4UnifiedB2DU64Float";
+ case NVPTXISD::Tld4UnifiedA2DU64Float:
+ return "NVPTXISD::Tld4UnifiedA2DU64Float";
+
+ case NVPTXISD::Suld1DI8Clamp: return "NVPTXISD::Suld1DI8Clamp";
+ case NVPTXISD::Suld1DI16Clamp: return "NVPTXISD::Suld1DI16Clamp";
+ case NVPTXISD::Suld1DI32Clamp: return "NVPTXISD::Suld1DI32Clamp";
+ case NVPTXISD::Suld1DI64Clamp: return "NVPTXISD::Suld1DI64Clamp";
+ case NVPTXISD::Suld1DV2I8Clamp: return "NVPTXISD::Suld1DV2I8Clamp";
+ case NVPTXISD::Suld1DV2I16Clamp: return "NVPTXISD::Suld1DV2I16Clamp";
+ case NVPTXISD::Suld1DV2I32Clamp: return "NVPTXISD::Suld1DV2I32Clamp";
+ case NVPTXISD::Suld1DV2I64Clamp: return "NVPTXISD::Suld1DV2I64Clamp";
+ case NVPTXISD::Suld1DV4I8Clamp: return "NVPTXISD::Suld1DV4I8Clamp";
+ case NVPTXISD::Suld1DV4I16Clamp: return "NVPTXISD::Suld1DV4I16Clamp";
+ case NVPTXISD::Suld1DV4I32Clamp: return "NVPTXISD::Suld1DV4I32Clamp";
+
+ case NVPTXISD::Suld1DArrayI8Clamp: return "NVPTXISD::Suld1DArrayI8Clamp";
+ case NVPTXISD::Suld1DArrayI16Clamp: return "NVPTXISD::Suld1DArrayI16Clamp";
+ case NVPTXISD::Suld1DArrayI32Clamp: return "NVPTXISD::Suld1DArrayI32Clamp";
+ case NVPTXISD::Suld1DArrayI64Clamp: return "NVPTXISD::Suld1DArrayI64Clamp";
+ case NVPTXISD::Suld1DArrayV2I8Clamp: return "NVPTXISD::Suld1DArrayV2I8Clamp";
+ case NVPTXISD::Suld1DArrayV2I16Clamp:return "NVPTXISD::Suld1DArrayV2I16Clamp";
+ case NVPTXISD::Suld1DArrayV2I32Clamp:return "NVPTXISD::Suld1DArrayV2I32Clamp";
+ case NVPTXISD::Suld1DArrayV2I64Clamp:return "NVPTXISD::Suld1DArrayV2I64Clamp";
+ case NVPTXISD::Suld1DArrayV4I8Clamp: return "NVPTXISD::Suld1DArrayV4I8Clamp";
+ case NVPTXISD::Suld1DArrayV4I16Clamp:return "NVPTXISD::Suld1DArrayV4I16Clamp";
+ case NVPTXISD::Suld1DArrayV4I32Clamp:return "NVPTXISD::Suld1DArrayV4I32Clamp";
+
+ case NVPTXISD::Suld2DI8Clamp: return "NVPTXISD::Suld2DI8Clamp";
+ case NVPTXISD::Suld2DI16Clamp: return "NVPTXISD::Suld2DI16Clamp";
+ case NVPTXISD::Suld2DI32Clamp: return "NVPTXISD::Suld2DI32Clamp";
+ case NVPTXISD::Suld2DI64Clamp: return "NVPTXISD::Suld2DI64Clamp";
+ case NVPTXISD::Suld2DV2I8Clamp: return "NVPTXISD::Suld2DV2I8Clamp";
+ case NVPTXISD::Suld2DV2I16Clamp: return "NVPTXISD::Suld2DV2I16Clamp";
+ case NVPTXISD::Suld2DV2I32Clamp: return "NVPTXISD::Suld2DV2I32Clamp";
+ case NVPTXISD::Suld2DV2I64Clamp: return "NVPTXISD::Suld2DV2I64Clamp";
+ case NVPTXISD::Suld2DV4I8Clamp: return "NVPTXISD::Suld2DV4I8Clamp";
+ case NVPTXISD::Suld2DV4I16Clamp: return "NVPTXISD::Suld2DV4I16Clamp";
+ case NVPTXISD::Suld2DV4I32Clamp: return "NVPTXISD::Suld2DV4I32Clamp";
+
+ case NVPTXISD::Suld2DArrayI8Clamp: return "NVPTXISD::Suld2DArrayI8Clamp";
+ case NVPTXISD::Suld2DArrayI16Clamp: return "NVPTXISD::Suld2DArrayI16Clamp";
+ case NVPTXISD::Suld2DArrayI32Clamp: return "NVPTXISD::Suld2DArrayI32Clamp";
+ case NVPTXISD::Suld2DArrayI64Clamp: return "NVPTXISD::Suld2DArrayI64Clamp";
+ case NVPTXISD::Suld2DArrayV2I8Clamp: return "NVPTXISD::Suld2DArrayV2I8Clamp";
+ case NVPTXISD::Suld2DArrayV2I16Clamp:return "NVPTXISD::Suld2DArrayV2I16Clamp";
+ case NVPTXISD::Suld2DArrayV2I32Clamp:return "NVPTXISD::Suld2DArrayV2I32Clamp";
+ case NVPTXISD::Suld2DArrayV2I64Clamp:return "NVPTXISD::Suld2DArrayV2I64Clamp";
+ case NVPTXISD::Suld2DArrayV4I8Clamp: return "NVPTXISD::Suld2DArrayV4I8Clamp";
+ case NVPTXISD::Suld2DArrayV4I16Clamp:return "NVPTXISD::Suld2DArrayV4I16Clamp";
+ case NVPTXISD::Suld2DArrayV4I32Clamp:return "NVPTXISD::Suld2DArrayV4I32Clamp";
+
+ case NVPTXISD::Suld3DI8Clamp: return "NVPTXISD::Suld3DI8Clamp";
+ case NVPTXISD::Suld3DI16Clamp: return "NVPTXISD::Suld3DI16Clamp";
+ case NVPTXISD::Suld3DI32Clamp: return "NVPTXISD::Suld3DI32Clamp";
+ case NVPTXISD::Suld3DI64Clamp: return "NVPTXISD::Suld3DI64Clamp";
+ case NVPTXISD::Suld3DV2I8Clamp: return "NVPTXISD::Suld3DV2I8Clamp";
+ case NVPTXISD::Suld3DV2I16Clamp: return "NVPTXISD::Suld3DV2I16Clamp";
+ case NVPTXISD::Suld3DV2I32Clamp: return "NVPTXISD::Suld3DV2I32Clamp";
+ case NVPTXISD::Suld3DV2I64Clamp: return "NVPTXISD::Suld3DV2I64Clamp";
+ case NVPTXISD::Suld3DV4I8Clamp: return "NVPTXISD::Suld3DV4I8Clamp";
+ case NVPTXISD::Suld3DV4I16Clamp: return "NVPTXISD::Suld3DV4I16Clamp";
+ case NVPTXISD::Suld3DV4I32Clamp: return "NVPTXISD::Suld3DV4I32Clamp";
+
+ case NVPTXISD::Suld1DI8Trap: return "NVPTXISD::Suld1DI8Trap";
+ case NVPTXISD::Suld1DI16Trap: return "NVPTXISD::Suld1DI16Trap";
+ case NVPTXISD::Suld1DI32Trap: return "NVPTXISD::Suld1DI32Trap";
+ case NVPTXISD::Suld1DI64Trap: return "NVPTXISD::Suld1DI64Trap";
+ case NVPTXISD::Suld1DV2I8Trap: return "NVPTXISD::Suld1DV2I8Trap";
+ case NVPTXISD::Suld1DV2I16Trap: return "NVPTXISD::Suld1DV2I16Trap";
+ case NVPTXISD::Suld1DV2I32Trap: return "NVPTXISD::Suld1DV2I32Trap";
+ case NVPTXISD::Suld1DV2I64Trap: return "NVPTXISD::Suld1DV2I64Trap";
+ case NVPTXISD::Suld1DV4I8Trap: return "NVPTXISD::Suld1DV4I8Trap";
+ case NVPTXISD::Suld1DV4I16Trap: return "NVPTXISD::Suld1DV4I16Trap";
+ case NVPTXISD::Suld1DV4I32Trap: return "NVPTXISD::Suld1DV4I32Trap";
+
+ case NVPTXISD::Suld1DArrayI8Trap: return "NVPTXISD::Suld1DArrayI8Trap";
+ case NVPTXISD::Suld1DArrayI16Trap: return "NVPTXISD::Suld1DArrayI16Trap";
+ case NVPTXISD::Suld1DArrayI32Trap: return "NVPTXISD::Suld1DArrayI32Trap";
+ case NVPTXISD::Suld1DArrayI64Trap: return "NVPTXISD::Suld1DArrayI64Trap";
+ case NVPTXISD::Suld1DArrayV2I8Trap: return "NVPTXISD::Suld1DArrayV2I8Trap";
+ case NVPTXISD::Suld1DArrayV2I16Trap: return "NVPTXISD::Suld1DArrayV2I16Trap";
+ case NVPTXISD::Suld1DArrayV2I32Trap: return "NVPTXISD::Suld1DArrayV2I32Trap";
+ case NVPTXISD::Suld1DArrayV2I64Trap: return "NVPTXISD::Suld1DArrayV2I64Trap";
+ case NVPTXISD::Suld1DArrayV4I8Trap: return "NVPTXISD::Suld1DArrayV4I8Trap";
+ case NVPTXISD::Suld1DArrayV4I16Trap: return "NVPTXISD::Suld1DArrayV4I16Trap";
+ case NVPTXISD::Suld1DArrayV4I32Trap: return "NVPTXISD::Suld1DArrayV4I32Trap";
+
+ case NVPTXISD::Suld2DI8Trap: return "NVPTXISD::Suld2DI8Trap";
+ case NVPTXISD::Suld2DI16Trap: return "NVPTXISD::Suld2DI16Trap";
+ case NVPTXISD::Suld2DI32Trap: return "NVPTXISD::Suld2DI32Trap";
+ case NVPTXISD::Suld2DI64Trap: return "NVPTXISD::Suld2DI64Trap";
+ case NVPTXISD::Suld2DV2I8Trap: return "NVPTXISD::Suld2DV2I8Trap";
+ case NVPTXISD::Suld2DV2I16Trap: return "NVPTXISD::Suld2DV2I16Trap";
+ case NVPTXISD::Suld2DV2I32Trap: return "NVPTXISD::Suld2DV2I32Trap";
+ case NVPTXISD::Suld2DV2I64Trap: return "NVPTXISD::Suld2DV2I64Trap";
+ case NVPTXISD::Suld2DV4I8Trap: return "NVPTXISD::Suld2DV4I8Trap";
+ case NVPTXISD::Suld2DV4I16Trap: return "NVPTXISD::Suld2DV4I16Trap";
+ case NVPTXISD::Suld2DV4I32Trap: return "NVPTXISD::Suld2DV4I32Trap";
+
+ case NVPTXISD::Suld2DArrayI8Trap: return "NVPTXISD::Suld2DArrayI8Trap";
+ case NVPTXISD::Suld2DArrayI16Trap: return "NVPTXISD::Suld2DArrayI16Trap";
+ case NVPTXISD::Suld2DArrayI32Trap: return "NVPTXISD::Suld2DArrayI32Trap";
+ case NVPTXISD::Suld2DArrayI64Trap: return "NVPTXISD::Suld2DArrayI64Trap";
+ case NVPTXISD::Suld2DArrayV2I8Trap: return "NVPTXISD::Suld2DArrayV2I8Trap";
+ case NVPTXISD::Suld2DArrayV2I16Trap: return "NVPTXISD::Suld2DArrayV2I16Trap";
+ case NVPTXISD::Suld2DArrayV2I32Trap: return "NVPTXISD::Suld2DArrayV2I32Trap";
+ case NVPTXISD::Suld2DArrayV2I64Trap: return "NVPTXISD::Suld2DArrayV2I64Trap";
+ case NVPTXISD::Suld2DArrayV4I8Trap: return "NVPTXISD::Suld2DArrayV4I8Trap";
+ case NVPTXISD::Suld2DArrayV4I16Trap: return "NVPTXISD::Suld2DArrayV4I16Trap";
+ case NVPTXISD::Suld2DArrayV4I32Trap: return "NVPTXISD::Suld2DArrayV4I32Trap";
+
+ case NVPTXISD::Suld3DI8Trap: return "NVPTXISD::Suld3DI8Trap";
+ case NVPTXISD::Suld3DI16Trap: return "NVPTXISD::Suld3DI16Trap";
+ case NVPTXISD::Suld3DI32Trap: return "NVPTXISD::Suld3DI32Trap";
+ case NVPTXISD::Suld3DI64Trap: return "NVPTXISD::Suld3DI64Trap";
+ case NVPTXISD::Suld3DV2I8Trap: return "NVPTXISD::Suld3DV2I8Trap";
+ case NVPTXISD::Suld3DV2I16Trap: return "NVPTXISD::Suld3DV2I16Trap";
+ case NVPTXISD::Suld3DV2I32Trap: return "NVPTXISD::Suld3DV2I32Trap";
+ case NVPTXISD::Suld3DV2I64Trap: return "NVPTXISD::Suld3DV2I64Trap";
+ case NVPTXISD::Suld3DV4I8Trap: return "NVPTXISD::Suld3DV4I8Trap";
+ case NVPTXISD::Suld3DV4I16Trap: return "NVPTXISD::Suld3DV4I16Trap";
+ case NVPTXISD::Suld3DV4I32Trap: return "NVPTXISD::Suld3DV4I32Trap";
+
+ case NVPTXISD::Suld1DI8Zero: return "NVPTXISD::Suld1DI8Zero";
+ case NVPTXISD::Suld1DI16Zero: return "NVPTXISD::Suld1DI16Zero";
+ case NVPTXISD::Suld1DI32Zero: return "NVPTXISD::Suld1DI32Zero";
+ case NVPTXISD::Suld1DI64Zero: return "NVPTXISD::Suld1DI64Zero";
+ case NVPTXISD::Suld1DV2I8Zero: return "NVPTXISD::Suld1DV2I8Zero";
+ case NVPTXISD::Suld1DV2I16Zero: return "NVPTXISD::Suld1DV2I16Zero";
+ case NVPTXISD::Suld1DV2I32Zero: return "NVPTXISD::Suld1DV2I32Zero";
+ case NVPTXISD::Suld1DV2I64Zero: return "NVPTXISD::Suld1DV2I64Zero";
+ case NVPTXISD::Suld1DV4I8Zero: return "NVPTXISD::Suld1DV4I8Zero";
+ case NVPTXISD::Suld1DV4I16Zero: return "NVPTXISD::Suld1DV4I16Zero";
+ case NVPTXISD::Suld1DV4I32Zero: return "NVPTXISD::Suld1DV4I32Zero";
+
+ case NVPTXISD::Suld1DArrayI8Zero: return "NVPTXISD::Suld1DArrayI8Zero";
+ case NVPTXISD::Suld1DArrayI16Zero: return "NVPTXISD::Suld1DArrayI16Zero";
+ case NVPTXISD::Suld1DArrayI32Zero: return "NVPTXISD::Suld1DArrayI32Zero";
+ case NVPTXISD::Suld1DArrayI64Zero: return "NVPTXISD::Suld1DArrayI64Zero";
+ case NVPTXISD::Suld1DArrayV2I8Zero: return "NVPTXISD::Suld1DArrayV2I8Zero";
+ case NVPTXISD::Suld1DArrayV2I16Zero: return "NVPTXISD::Suld1DArrayV2I16Zero";
+ case NVPTXISD::Suld1DArrayV2I32Zero: return "NVPTXISD::Suld1DArrayV2I32Zero";
+ case NVPTXISD::Suld1DArrayV2I64Zero: return "NVPTXISD::Suld1DArrayV2I64Zero";
+ case NVPTXISD::Suld1DArrayV4I8Zero: return "NVPTXISD::Suld1DArrayV4I8Zero";
+ case NVPTXISD::Suld1DArrayV4I16Zero: return "NVPTXISD::Suld1DArrayV4I16Zero";
+ case NVPTXISD::Suld1DArrayV4I32Zero: return "NVPTXISD::Suld1DArrayV4I32Zero";
+
+ case NVPTXISD::Suld2DI8Zero: return "NVPTXISD::Suld2DI8Zero";
+ case NVPTXISD::Suld2DI16Zero: return "NVPTXISD::Suld2DI16Zero";
+ case NVPTXISD::Suld2DI32Zero: return "NVPTXISD::Suld2DI32Zero";
+ case NVPTXISD::Suld2DI64Zero: return "NVPTXISD::Suld2DI64Zero";
+ case NVPTXISD::Suld2DV2I8Zero: return "NVPTXISD::Suld2DV2I8Zero";
+ case NVPTXISD::Suld2DV2I16Zero: return "NVPTXISD::Suld2DV2I16Zero";
+ case NVPTXISD::Suld2DV2I32Zero: return "NVPTXISD::Suld2DV2I32Zero";
+ case NVPTXISD::Suld2DV2I64Zero: return "NVPTXISD::Suld2DV2I64Zero";
+ case NVPTXISD::Suld2DV4I8Zero: return "NVPTXISD::Suld2DV4I8Zero";
+ case NVPTXISD::Suld2DV4I16Zero: return "NVPTXISD::Suld2DV4I16Zero";
+ case NVPTXISD::Suld2DV4I32Zero: return "NVPTXISD::Suld2DV4I32Zero";
+
+ case NVPTXISD::Suld2DArrayI8Zero: return "NVPTXISD::Suld2DArrayI8Zero";
+ case NVPTXISD::Suld2DArrayI16Zero: return "NVPTXISD::Suld2DArrayI16Zero";
+ case NVPTXISD::Suld2DArrayI32Zero: return "NVPTXISD::Suld2DArrayI32Zero";
+ case NVPTXISD::Suld2DArrayI64Zero: return "NVPTXISD::Suld2DArrayI64Zero";
+ case NVPTXISD::Suld2DArrayV2I8Zero: return "NVPTXISD::Suld2DArrayV2I8Zero";
+ case NVPTXISD::Suld2DArrayV2I16Zero: return "NVPTXISD::Suld2DArrayV2I16Zero";
+ case NVPTXISD::Suld2DArrayV2I32Zero: return "NVPTXISD::Suld2DArrayV2I32Zero";
+ case NVPTXISD::Suld2DArrayV2I64Zero: return "NVPTXISD::Suld2DArrayV2I64Zero";
+ case NVPTXISD::Suld2DArrayV4I8Zero: return "NVPTXISD::Suld2DArrayV4I8Zero";
+ case NVPTXISD::Suld2DArrayV4I16Zero: return "NVPTXISD::Suld2DArrayV4I16Zero";
+ case NVPTXISD::Suld2DArrayV4I32Zero: return "NVPTXISD::Suld2DArrayV4I32Zero";
+
+ case NVPTXISD::Suld3DI8Zero: return "NVPTXISD::Suld3DI8Zero";
+ case NVPTXISD::Suld3DI16Zero: return "NVPTXISD::Suld3DI16Zero";
+ case NVPTXISD::Suld3DI32Zero: return "NVPTXISD::Suld3DI32Zero";
+ case NVPTXISD::Suld3DI64Zero: return "NVPTXISD::Suld3DI64Zero";
+ case NVPTXISD::Suld3DV2I8Zero: return "NVPTXISD::Suld3DV2I8Zero";
+ case NVPTXISD::Suld3DV2I16Zero: return "NVPTXISD::Suld3DV2I16Zero";
+ case NVPTXISD::Suld3DV2I32Zero: return "NVPTXISD::Suld3DV2I32Zero";
+ case NVPTXISD::Suld3DV2I64Zero: return "NVPTXISD::Suld3DV2I64Zero";
+ case NVPTXISD::Suld3DV4I8Zero: return "NVPTXISD::Suld3DV4I8Zero";
+ case NVPTXISD::Suld3DV4I16Zero: return "NVPTXISD::Suld3DV4I16Zero";
+ case NVPTXISD::Suld3DV4I32Zero: return "NVPTXISD::Suld3DV4I32Zero";
+ }
+ return nullptr;
+}
+
+TargetLoweringBase::LegalizeTypeAction
+NVPTXTargetLowering::getPreferredVectorAction(EVT VT) const {
+ if (VT.getVectorNumElements() != 1 && VT.getScalarType() == MVT::i1)
+ return TypeSplitVector;
+ if (VT == MVT::v2f16)
+ return TypeLegal;
+ return TargetLoweringBase::getPreferredVectorAction(VT);
+}
+
+SDValue NVPTXTargetLowering::getSqrtEstimate(SDValue Operand, SelectionDAG &DAG,
+ int Enabled, int &ExtraSteps,
+ bool &UseOneConst,
+ bool Reciprocal) const {
+ if (!(Enabled == ReciprocalEstimate::Enabled ||
+ (Enabled == ReciprocalEstimate::Unspecified && !usePrecSqrtF32())))
+ return SDValue();
+
+ if (ExtraSteps == ReciprocalEstimate::Unspecified)
+ ExtraSteps = 0;
+
+ SDLoc DL(Operand);
+ EVT VT = Operand.getValueType();
+ bool Ftz = useF32FTZ(DAG.getMachineFunction());
+
+ auto MakeIntrinsicCall = [&](Intrinsic::ID IID) {
+ return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT,
+ DAG.getConstant(IID, DL, MVT::i32), Operand);
+ };
+
+ // The sqrt and rsqrt refinement processes assume we always start out with an
+ // approximation of the rsqrt. Therefore, if we're going to do any refinement
+ // (i.e. ExtraSteps > 0), we must return an rsqrt. But if we're *not* doing
+ // any refinement, we must return a regular sqrt.
+ if (Reciprocal || ExtraSteps > 0) {
+ if (VT == MVT::f32)
+ return MakeIntrinsicCall(Ftz ? Intrinsic::nvvm_rsqrt_approx_ftz_f
+ : Intrinsic::nvvm_rsqrt_approx_f);
+ else if (VT == MVT::f64)
+ return MakeIntrinsicCall(Intrinsic::nvvm_rsqrt_approx_d);
+ else
+ return SDValue();
+ } else {
+ if (VT == MVT::f32)
+ return MakeIntrinsicCall(Ftz ? Intrinsic::nvvm_sqrt_approx_ftz_f
+ : Intrinsic::nvvm_sqrt_approx_f);
+ else {
+ // There's no sqrt.approx.f64 instruction, so we emit
+ // reciprocal(rsqrt(x)). This is faster than
+ // select(x == 0, 0, x * rsqrt(x)). (In fact, it's faster than plain
+ // x * rsqrt(x).)
+ return DAG.getNode(
+ ISD::INTRINSIC_WO_CHAIN, DL, VT,
+ DAG.getConstant(Intrinsic::nvvm_rcp_approx_ftz_d, DL, MVT::i32),
+ MakeIntrinsicCall(Intrinsic::nvvm_rsqrt_approx_d));
+ }
+ }
+}
+
+SDValue
+NVPTXTargetLowering::LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const {
+ SDLoc dl(Op);
+ const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
+ auto PtrVT = getPointerTy(DAG.getDataLayout());
+ Op = DAG.getTargetGlobalAddress(GV, dl, PtrVT);
+ return DAG.getNode(NVPTXISD::Wrapper, dl, PtrVT, Op);
+}
+
+std::string NVPTXTargetLowering::getPrototype(
+ const DataLayout &DL, Type *retTy, const ArgListTy &Args,
+ const SmallVectorImpl<ISD::OutputArg> &Outs, unsigned retAlignment,
+ const ImmutableCallSite *CS) const {
+ auto PtrVT = getPointerTy(DL);
+
+ bool isABI = (STI.getSmVersion() >= 20);
+ assert(isABI && "Non-ABI compilation is not supported");
+ if (!isABI)
+ return "";
+
+ std::stringstream O;
+ O << "prototype_" << uniqueCallSite << " : .callprototype ";
+
+ if (retTy->getTypeID() == Type::VoidTyID) {
+ O << "()";
+ } else {
+ O << "(";
+ if (retTy->isFloatingPointTy() || retTy->isIntegerTy()) {
+ unsigned size = 0;
+ if (auto *ITy = dyn_cast<IntegerType>(retTy)) {
+ size = ITy->getBitWidth();
+ } else {
+ assert(retTy->isFloatingPointTy() &&
+ "Floating point type expected here");
+ size = retTy->getPrimitiveSizeInBits();
+ }
+ // PTX ABI requires all scalar return values to be at least 32
+ // bits in size. fp16 normally uses .b16 as its storage type in
+ // PTX, so its size must be adjusted here, too.
+ if (size < 32)
+ size = 32;
+
+ O << ".param .b" << size << " _";
+ } else if (isa<PointerType>(retTy)) {
+ O << ".param .b" << PtrVT.getSizeInBits() << " _";
+ } else if (retTy->isAggregateType() || retTy->isVectorTy()) {
+ auto &DL = CS->getCalledFunction()->getParent()->getDataLayout();
+ O << ".param .align " << retAlignment << " .b8 _["
+ << DL.getTypeAllocSize(retTy) << "]";
+ } else {
+ llvm_unreachable("Unknown return type");
+ }
+ O << ") ";
+ }
+ O << "_ (";
+
+ bool first = true;
+
+ unsigned OIdx = 0;
+ for (unsigned i = 0, e = Args.size(); i != e; ++i, ++OIdx) {
+ Type *Ty = Args[i].Ty;
+ if (!first) {
+ O << ", ";
+ }
+ first = false;
+
+ if (!Outs[OIdx].Flags.isByVal()) {
+ if (Ty->isAggregateType() || Ty->isVectorTy()) {
+ unsigned align = 0;
+ const CallInst *CallI = cast<CallInst>(CS->getInstruction());
+ // +1 because index 0 is reserved for return type alignment
+ if (!getAlign(*CallI, i + 1, align))
+ align = DL.getABITypeAlignment(Ty);
+ unsigned sz = DL.getTypeAllocSize(Ty);
+ O << ".param .align " << align << " .b8 ";
+ O << "_";
+ O << "[" << sz << "]";
+ // update the index for Outs
+ SmallVector<EVT, 16> vtparts;
+ ComputeValueVTs(*this, DL, Ty, vtparts);
+ if (unsigned len = vtparts.size())
+ OIdx += len - 1;
+ continue;
+ }
+ // i8 types in IR will be i16 types in SDAG
+ assert((getValueType(DL, Ty) == Outs[OIdx].VT ||
+ (getValueType(DL, Ty) == MVT::i8 && Outs[OIdx].VT == MVT::i16)) &&
+ "type mismatch between callee prototype and arguments");
+ // scalar type
+ unsigned sz = 0;
+ if (isa<IntegerType>(Ty)) {
+ sz = cast<IntegerType>(Ty)->getBitWidth();
+ if (sz < 32)
+ sz = 32;
+ } else if (isa<PointerType>(Ty)) {
+ sz = PtrVT.getSizeInBits();
+ } else if (Ty->isHalfTy())
+ // PTX ABI requires all scalar parameters to be at least 32
+ // bits in size. fp16 normally uses .b16 as its storage type
+ // in PTX, so its size must be adjusted here, too.
+ sz = 32;
+ else
+ sz = Ty->getPrimitiveSizeInBits();
+ O << ".param .b" << sz << " ";
+ O << "_";
+ continue;
+ }
+ auto *PTy = dyn_cast<PointerType>(Ty);
+ assert(PTy && "Param with byval attribute should be a pointer type");
+ Type *ETy = PTy->getElementType();
+
+ unsigned align = Outs[OIdx].Flags.getByValAlign();
+ unsigned sz = DL.getTypeAllocSize(ETy);
+ O << ".param .align " << align << " .b8 ";
+ O << "_";
+ O << "[" << sz << "]";
+ }
+ O << ");";
+ return O.str();
+}
+
+unsigned NVPTXTargetLowering::getArgumentAlignment(SDValue Callee,
+ const ImmutableCallSite *CS,
+ Type *Ty, unsigned Idx,
+ const DataLayout &DL) const {
+ if (!CS) {
+ // CallSite is zero, fallback to ABI type alignment
+ return DL.getABITypeAlignment(Ty);
+ }
+
+ unsigned Align = 0;
+ const Value *DirectCallee = CS->getCalledFunction();
+
+ if (!DirectCallee) {
+ // We don't have a direct function symbol, but that may be because of
+ // constant cast instructions in the call.
+ const Instruction *CalleeI = CS->getInstruction();
+ assert(CalleeI && "Call target is not a function or derived value?");
+
+ // With bitcast'd call targets, the instruction will be the call
+ if (isa<CallInst>(CalleeI)) {
+ // Check if we have call alignment metadata
+ if (getAlign(*cast<CallInst>(CalleeI), Idx, Align))
+ return Align;
+
+ const Value *CalleeV = cast<CallInst>(CalleeI)->getCalledValue();
+ // Ignore any bitcast instructions
+ while (isa<ConstantExpr>(CalleeV)) {
+ const ConstantExpr *CE = cast<ConstantExpr>(CalleeV);
+ if (!CE->isCast())
+ break;
+ // Look through the bitcast
+ CalleeV = cast<ConstantExpr>(CalleeV)->getOperand(0);
+ }
+
+ // We have now looked past all of the bitcasts. Do we finally have a
+ // Function?
+ if (isa<Function>(CalleeV))
+ DirectCallee = CalleeV;
+ }
+ }
+
+ // Check for function alignment information if we found that the
+ // ultimate target is a Function
+ if (DirectCallee)
+ if (getAlign(*cast<Function>(DirectCallee), Idx, Align))
+ return Align;
+
+ // Call is indirect or alignment information is not available, fall back to
+ // the ABI type alignment
+ return DL.getABITypeAlignment(Ty);
+}
+
+SDValue NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
+ SmallVectorImpl<SDValue> &InVals) const {
+ SelectionDAG &DAG = CLI.DAG;
+ SDLoc dl = CLI.DL;
+ SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
+ SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
+ SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
+ SDValue Chain = CLI.Chain;
+ SDValue Callee = CLI.Callee;
+ bool &isTailCall = CLI.IsTailCall;
+ ArgListTy &Args = CLI.getArgs();
+ Type *RetTy = CLI.RetTy;
+ ImmutableCallSite *CS = CLI.CS;
+ const DataLayout &DL = DAG.getDataLayout();
+
+ bool isABI = (STI.getSmVersion() >= 20);
+ assert(isABI && "Non-ABI compilation is not supported");
+ if (!isABI)
+ return Chain;
+
+ SDValue tempChain = Chain;
+ Chain = DAG.getCALLSEQ_START(Chain, uniqueCallSite, 0, dl);
+ SDValue InFlag = Chain.getValue(1);
+
+ unsigned paramCount = 0;
+ // Args.size() and Outs.size() need not match.
+ // Outs.size() will be larger
+ // * if there is an aggregate argument with multiple fields (each field
+ // showing up separately in Outs)
+ // * if there is a vector argument with more than typical vector-length
+ // elements (generally if more than 4) where each vector element is
+ // individually present in Outs.
+ // So a different index should be used for indexing into Outs/OutVals.
+ // See similar issue in LowerFormalArguments.
+ unsigned OIdx = 0;
+ // Declare the .params or .reg need to pass values
+ // to the function
+ for (unsigned i = 0, e = Args.size(); i != e; ++i, ++OIdx) {
+ EVT VT = Outs[OIdx].VT;
+ Type *Ty = Args[i].Ty;
+
+ if (!Outs[OIdx].Flags.isByVal()) {
+ SmallVector<EVT, 16> VTs;
+ SmallVector<uint64_t, 16> Offsets;
+ ComputePTXValueVTs(*this, DL, Ty, VTs, &Offsets);
+ unsigned ArgAlign =
+ getArgumentAlignment(Callee, CS, Ty, paramCount + 1, DL);
+ unsigned AllocSize = DL.getTypeAllocSize(Ty);
+ SDVTList DeclareParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
+ bool NeedAlign; // Does argument declaration specify alignment?
+ if (Ty->isAggregateType() || Ty->isVectorTy()) {
+ // declare .param .align <align> .b8 .param<n>[<size>];
+ SDValue DeclareParamOps[] = {
+ Chain, DAG.getConstant(ArgAlign, dl, MVT::i32),
+ DAG.getConstant(paramCount, dl, MVT::i32),
+ DAG.getConstant(AllocSize, dl, MVT::i32), InFlag};
+ Chain = DAG.getNode(NVPTXISD::DeclareParam, dl, DeclareParamVTs,
+ DeclareParamOps);
+ NeedAlign = true;
+ } else {
+ // declare .param .b<size> .param<n>;
+ if ((VT.isInteger() || VT.isFloatingPoint()) && AllocSize < 4) {
+ // PTX ABI requires integral types to be at least 32 bits in
+ // size. FP16 is loaded/stored using i16, so it's handled
+ // here as well.
+ AllocSize = 4;
+ }
+ SDValue DeclareScalarParamOps[] = {
+ Chain, DAG.getConstant(paramCount, dl, MVT::i32),
+ DAG.getConstant(AllocSize * 8, dl, MVT::i32),
+ DAG.getConstant(0, dl, MVT::i32), InFlag};
+ Chain = DAG.getNode(NVPTXISD::DeclareScalarParam, dl, DeclareParamVTs,
+ DeclareScalarParamOps);
+ NeedAlign = false;
+ }
+ InFlag = Chain.getValue(1);
+
+ // PTX Interoperability Guide 3.3(A): [Integer] Values shorter
+ // than 32-bits are sign extended or zero extended, depending on
+ // whether they are signed or unsigned types. This case applies
+ // only to scalar parameters and not to aggregate values.
+ bool ExtendIntegerParam =
+ Ty->isIntegerTy() && DL.getTypeAllocSizeInBits(Ty) < 32;
+
+ auto VectorInfo = VectorizePTXValueVTs(VTs, Offsets, ArgAlign);
+ SmallVector<SDValue, 6> StoreOperands;
+ for (unsigned j = 0, je = VTs.size(); j != je; ++j) {
+ // New store.
+ if (VectorInfo[j] & PVF_FIRST) {
+ assert(StoreOperands.empty() && "Unfinished preceeding store.");
+ StoreOperands.push_back(Chain);
+ StoreOperands.push_back(DAG.getConstant(paramCount, dl, MVT::i32));
+ StoreOperands.push_back(DAG.getConstant(Offsets[j], dl, MVT::i32));
+ }
+
+ EVT EltVT = VTs[j];
+ SDValue StVal = OutVals[OIdx];
+ if (ExtendIntegerParam) {
+ assert(VTs.size() == 1 && "Scalar can't have multiple parts.");
+ // zext/sext to i32
+ StVal = DAG.getNode(Outs[OIdx].Flags.isSExt() ? ISD::SIGN_EXTEND
+ : ISD::ZERO_EXTEND,
+ dl, MVT::i32, StVal);
+ } else if (EltVT.getSizeInBits() < 16) {
+ // Use 16-bit registers for small stores as it's the
+ // smallest general purpose register size supported by NVPTX.
+ StVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i16, StVal);
+ }
+
+ // Record the value to store.
+ StoreOperands.push_back(StVal);
+
+ if (VectorInfo[j] & PVF_LAST) {
+ unsigned NumElts = StoreOperands.size() - 3;
+ NVPTXISD::NodeType Op;
+ switch (NumElts) {
+ case 1:
+ Op = NVPTXISD::StoreParam;
+ break;
+ case 2:
+ Op = NVPTXISD::StoreParamV2;
+ break;
+ case 4:
+ Op = NVPTXISD::StoreParamV4;
+ break;
+ default:
+ llvm_unreachable("Invalid vector info.");
+ }
+
+ StoreOperands.push_back(InFlag);
+
+ // Adjust type of the store op if we've extended the scalar
+ // return value.
+ EVT TheStoreType = ExtendIntegerParam ? MVT::i32 : VTs[j];
+ unsigned EltAlign =
+ NeedAlign ? GreatestCommonDivisor64(ArgAlign, Offsets[j]) : 0;
+
+ Chain = DAG.getMemIntrinsicNode(
+ Op, dl, DAG.getVTList(MVT::Other, MVT::Glue), StoreOperands,
+ TheStoreType, MachinePointerInfo(), EltAlign,
+ /* Volatile */ false, /* ReadMem */ false,
+ /* WriteMem */ true, /* Size */ 0);
+ InFlag = Chain.getValue(1);
+
+ // Cleanup.
+ StoreOperands.clear();
+ }
+ ++OIdx;
+ }
+ assert(StoreOperands.empty() && "Unfinished parameter store.");
+ if (VTs.size() > 0)
+ --OIdx;
+ ++paramCount;
+ continue;
+ }
+
+ // ByVal arguments
+ SmallVector<EVT, 16> VTs;
+ SmallVector<uint64_t, 16> Offsets;
+ auto *PTy = dyn_cast<PointerType>(Args[i].Ty);
+ assert(PTy && "Type of a byval parameter should be pointer");
+ ComputePTXValueVTs(*this, DL, PTy->getElementType(), VTs, &Offsets, 0);
+
+ // declare .param .align <align> .b8 .param<n>[<size>];
+ unsigned sz = Outs[OIdx].Flags.getByValSize();
+ SDVTList DeclareParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
+ unsigned ArgAlign = Outs[OIdx].Flags.getByValAlign();
+ // The ByValAlign in the Outs[OIdx].Flags is alway set at this point,
+ // so we don't need to worry about natural alignment or not.
+ // See TargetLowering::LowerCallTo().
+
+ // Enforce minumum alignment of 4 to work around ptxas miscompile
+ // for sm_50+. See corresponding alignment adjustment in
+ // emitFunctionParamList() for details.
+ if (ArgAlign < 4)
+ ArgAlign = 4;
+ SDValue DeclareParamOps[] = {Chain, DAG.getConstant(ArgAlign, dl, MVT::i32),
+ DAG.getConstant(paramCount, dl, MVT::i32),
+ DAG.getConstant(sz, dl, MVT::i32), InFlag};
+ Chain = DAG.getNode(NVPTXISD::DeclareParam, dl, DeclareParamVTs,
+ DeclareParamOps);
+ InFlag = Chain.getValue(1);
+ for (unsigned j = 0, je = VTs.size(); j != je; ++j) {
+ EVT elemtype = VTs[j];
+ int curOffset = Offsets[j];
+ unsigned PartAlign = GreatestCommonDivisor64(ArgAlign, curOffset);
+ auto PtrVT = getPointerTy(DL);
+ SDValue srcAddr = DAG.getNode(ISD::ADD, dl, PtrVT, OutVals[OIdx],
+ DAG.getConstant(curOffset, dl, PtrVT));
+ SDValue theVal = DAG.getLoad(elemtype, dl, tempChain, srcAddr,
+ MachinePointerInfo(), PartAlign);
+ if (elemtype.getSizeInBits() < 16) {
+ theVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i16, theVal);
+ }
+ SDVTList CopyParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
+ SDValue CopyParamOps[] = { Chain,
+ DAG.getConstant(paramCount, dl, MVT::i32),
+ DAG.getConstant(curOffset, dl, MVT::i32),
+ theVal, InFlag };
+ Chain = DAG.getMemIntrinsicNode(NVPTXISD::StoreParam, dl, CopyParamVTs,
+ CopyParamOps, elemtype,
+ MachinePointerInfo(), /* Align */ 0,
+ /* Volatile */ false, /* ReadMem */ false,
+ /* WriteMem */ true, /* Size */ 0);
+
+ InFlag = Chain.getValue(1);
+ }
+ ++paramCount;
+ }
+
+ GlobalAddressSDNode *Func = dyn_cast<GlobalAddressSDNode>(Callee.getNode());
+ unsigned retAlignment = 0;
+
+ // Handle Result
+ if (Ins.size() > 0) {
+ SmallVector<EVT, 16> resvtparts;
+ ComputeValueVTs(*this, DL, RetTy, resvtparts);
+
+ // Declare
+ // .param .align 16 .b8 retval0[<size-in-bytes>], or
+ // .param .b<size-in-bits> retval0
+ unsigned resultsz = DL.getTypeAllocSizeInBits(RetTy);
+ // Emit ".param .b<size-in-bits> retval0" instead of byte arrays only for
+ // these three types to match the logic in
+ // NVPTXAsmPrinter::printReturnValStr and NVPTXTargetLowering::getPrototype.
+ // Plus, this behavior is consistent with nvcc's.
+ if (RetTy->isFloatingPointTy() || RetTy->isIntegerTy() ||
+ RetTy->isPointerTy()) {
+ // Scalar needs to be at least 32bit wide
+ if (resultsz < 32)
+ resultsz = 32;
+ SDVTList DeclareRetVTs = DAG.getVTList(MVT::Other, MVT::Glue);
+ SDValue DeclareRetOps[] = { Chain, DAG.getConstant(1, dl, MVT::i32),
+ DAG.getConstant(resultsz, dl, MVT::i32),
+ DAG.getConstant(0, dl, MVT::i32), InFlag };
+ Chain = DAG.getNode(NVPTXISD::DeclareRet, dl, DeclareRetVTs,
+ DeclareRetOps);
+ InFlag = Chain.getValue(1);
+ } else {
+ retAlignment = getArgumentAlignment(Callee, CS, RetTy, 0, DL);
+ SDVTList DeclareRetVTs = DAG.getVTList(MVT::Other, MVT::Glue);
+ SDValue DeclareRetOps[] = { Chain,
+ DAG.getConstant(retAlignment, dl, MVT::i32),
+ DAG.getConstant(resultsz / 8, dl, MVT::i32),
+ DAG.getConstant(0, dl, MVT::i32), InFlag };
+ Chain = DAG.getNode(NVPTXISD::DeclareRetParam, dl, DeclareRetVTs,
+ DeclareRetOps);
+ InFlag = Chain.getValue(1);
+ }
+ }
+
+ if (!Func) {
+ // This is indirect function call case : PTX requires a prototype of the
+ // form
+ // proto_0 : .callprototype(.param .b32 _) _ (.param .b32 _);
+ // to be emitted, and the label has to used as the last arg of call
+ // instruction.
+ // The prototype is embedded in a string and put as the operand for a
+ // CallPrototype SDNode which will print out to the value of the string.
+ SDVTList ProtoVTs = DAG.getVTList(MVT::Other, MVT::Glue);
+ std::string Proto = getPrototype(DL, RetTy, Args, Outs, retAlignment, CS);
+ const char *ProtoStr =
+ nvTM->getManagedStrPool()->getManagedString(Proto.c_str())->c_str();
+ SDValue ProtoOps[] = {
+ Chain, DAG.getTargetExternalSymbol(ProtoStr, MVT::i32), InFlag,
+ };
+ Chain = DAG.getNode(NVPTXISD::CallPrototype, dl, ProtoVTs, ProtoOps);
+ InFlag = Chain.getValue(1);
+ }
+ // Op to just print "call"
+ SDVTList PrintCallVTs = DAG.getVTList(MVT::Other, MVT::Glue);
+ SDValue PrintCallOps[] = {
+ Chain, DAG.getConstant((Ins.size() == 0) ? 0 : 1, dl, MVT::i32), InFlag
+ };
+ // We model convergent calls as separate opcodes.
+ unsigned Opcode = Func ? NVPTXISD::PrintCallUni : NVPTXISD::PrintCall;
+ if (CLI.IsConvergent)
+ Opcode = Opcode == NVPTXISD::PrintCallUni ? NVPTXISD::PrintConvergentCallUni
+ : NVPTXISD::PrintConvergentCall;
+ Chain = DAG.getNode(Opcode, dl, PrintCallVTs, PrintCallOps);
+ InFlag = Chain.getValue(1);
+
+ // Ops to print out the function name
+ SDVTList CallVoidVTs = DAG.getVTList(MVT::Other, MVT::Glue);
+ SDValue CallVoidOps[] = { Chain, Callee, InFlag };
+ Chain = DAG.getNode(NVPTXISD::CallVoid, dl, CallVoidVTs, CallVoidOps);
+ InFlag = Chain.getValue(1);
+
+ // Ops to print out the param list
+ SDVTList CallArgBeginVTs = DAG.getVTList(MVT::Other, MVT::Glue);
+ SDValue CallArgBeginOps[] = { Chain, InFlag };
+ Chain = DAG.getNode(NVPTXISD::CallArgBegin, dl, CallArgBeginVTs,
+ CallArgBeginOps);
+ InFlag = Chain.getValue(1);
+
+ for (unsigned i = 0, e = paramCount; i != e; ++i) {
+ unsigned opcode;
+ if (i == (e - 1))
+ opcode = NVPTXISD::LastCallArg;
+ else
+ opcode = NVPTXISD::CallArg;
+ SDVTList CallArgVTs = DAG.getVTList(MVT::Other, MVT::Glue);
+ SDValue CallArgOps[] = { Chain, DAG.getConstant(1, dl, MVT::i32),
+ DAG.getConstant(i, dl, MVT::i32), InFlag };
+ Chain = DAG.getNode(opcode, dl, CallArgVTs, CallArgOps);
+ InFlag = Chain.getValue(1);
+ }
+ SDVTList CallArgEndVTs = DAG.getVTList(MVT::Other, MVT::Glue);
+ SDValue CallArgEndOps[] = { Chain,
+ DAG.getConstant(Func ? 1 : 0, dl, MVT::i32),
+ InFlag };
+ Chain = DAG.getNode(NVPTXISD::CallArgEnd, dl, CallArgEndVTs, CallArgEndOps);
+ InFlag = Chain.getValue(1);
+
+ if (!Func) {
+ SDVTList PrototypeVTs = DAG.getVTList(MVT::Other, MVT::Glue);
+ SDValue PrototypeOps[] = { Chain,
+ DAG.getConstant(uniqueCallSite, dl, MVT::i32),
+ InFlag };
+ Chain = DAG.getNode(NVPTXISD::Prototype, dl, PrototypeVTs, PrototypeOps);
+ InFlag = Chain.getValue(1);
+ }
+
+ // Generate loads from param memory/moves from registers for result
+ if (Ins.size() > 0) {
+ SmallVector<EVT, 16> VTs;
+ SmallVector<uint64_t, 16> Offsets;
+ ComputePTXValueVTs(*this, DL, RetTy, VTs, &Offsets, 0);
+ assert(VTs.size() == Ins.size() && "Bad value decomposition");
+
+ unsigned RetAlign = getArgumentAlignment(Callee, CS, RetTy, 0, DL);
+ auto VectorInfo = VectorizePTXValueVTs(VTs, Offsets, RetAlign);
+
+ SmallVector<EVT, 6> LoadVTs;
+ int VecIdx = -1; // Index of the first element of the vector.
+
+ // PTX Interoperability Guide 3.3(A): [Integer] Values shorter than
+ // 32-bits are sign extended or zero extended, depending on whether
+ // they are signed or unsigned types.
+ bool ExtendIntegerRetVal =
+ RetTy->isIntegerTy() && DL.getTypeAllocSizeInBits(RetTy) < 32;
+
+ for (unsigned i = 0, e = VTs.size(); i != e; ++i) {
+ bool needTruncate = false;
+ EVT TheLoadType = VTs[i];
+ EVT EltType = Ins[i].VT;
+ unsigned EltAlign = GreatestCommonDivisor64(RetAlign, Offsets[i]);
+ if (ExtendIntegerRetVal) {
+ TheLoadType = MVT::i32;
+ EltType = MVT::i32;
+ needTruncate = true;
+ } else if (TheLoadType.getSizeInBits() < 16) {
+ if (VTs[i].isInteger())
+ needTruncate = true;
+ EltType = MVT::i16;
+ }
+
+ // Record index of the very first element of the vector.
+ if (VectorInfo[i] & PVF_FIRST) {
+ assert(VecIdx == -1 && LoadVTs.empty() && "Orphaned operand list.");
+ VecIdx = i;
+ }
+
+ LoadVTs.push_back(EltType);
+
+ if (VectorInfo[i] & PVF_LAST) {
+ unsigned NumElts = LoadVTs.size();
+ LoadVTs.push_back(MVT::Other);
+ LoadVTs.push_back(MVT::Glue);
+ NVPTXISD::NodeType Op;
+ switch (NumElts) {
+ case 1:
+ Op = NVPTXISD::LoadParam;
+ break;
+ case 2:
+ Op = NVPTXISD::LoadParamV2;
+ break;
+ case 4:
+ Op = NVPTXISD::LoadParamV4;
+ break;
+ default:
+ llvm_unreachable("Invalid vector info.");
+ }
+
+ SDValue LoadOperands[] = {
+ Chain, DAG.getConstant(1, dl, MVT::i32),
+ DAG.getConstant(Offsets[VecIdx], dl, MVT::i32), InFlag};
+ SDValue RetVal = DAG.getMemIntrinsicNode(
+ Op, dl, DAG.getVTList(LoadVTs), LoadOperands, TheLoadType,
+ MachinePointerInfo(), EltAlign, /* Volatile */ false,
+ /* ReadMem */ true, /* WriteMem */ false, /* Size */ 0);
+
+ for (unsigned j = 0; j < NumElts; ++j) {
+ SDValue Ret = RetVal.getValue(j);
+ if (needTruncate)
+ Ret = DAG.getNode(ISD::TRUNCATE, dl, Ins[VecIdx + j].VT, Ret);
+ InVals.push_back(Ret);
+ }
+ Chain = RetVal.getValue(NumElts);
+ InFlag = RetVal.getValue(NumElts + 1);
+
+ // Cleanup
+ VecIdx = -1;
+ LoadVTs.clear();
+ }
+ }
+ }
+
+ Chain = DAG.getCALLSEQ_END(Chain,
+ DAG.getIntPtrConstant(uniqueCallSite, dl, true),
+ DAG.getIntPtrConstant(uniqueCallSite + 1, dl,
+ true),
+ InFlag, dl);
+ uniqueCallSite++;
+
+ // set isTailCall to false for now, until we figure out how to express
+ // tail call optimization in PTX
+ isTailCall = false;
+ return Chain;
+}
+
+// By default CONCAT_VECTORS is lowered by ExpandVectorBuildThroughStack()
+// (see LegalizeDAG.cpp). This is slow and uses local memory.
+// We use extract/insert/build vector just as what LegalizeOp() does in llvm 2.5
+SDValue
+NVPTXTargetLowering::LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const {
+ SDNode *Node = Op.getNode();
+ SDLoc dl(Node);
+ SmallVector<SDValue, 8> Ops;
+ unsigned NumOperands = Node->getNumOperands();
+ for (unsigned i = 0; i < NumOperands; ++i) {
+ SDValue SubOp = Node->getOperand(i);
+ EVT VVT = SubOp.getNode()->getValueType(0);
+ EVT EltVT = VVT.getVectorElementType();
+ unsigned NumSubElem = VVT.getVectorNumElements();
+ for (unsigned j = 0; j < NumSubElem; ++j) {
+ Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, SubOp,
+ DAG.getIntPtrConstant(j, dl)));
+ }
+ }
+ return DAG.getBuildVector(Node->getValueType(0), dl, Ops);
+}
+
+// We can init constant f16x2 with a single .b32 move. Normally it
+// would get lowered as two constant loads and vector-packing move.
+// mov.b16 %h1, 0x4000;
+// mov.b16 %h2, 0x3C00;
+// mov.b32 %hh2, {%h2, %h1};
+// Instead we want just a constant move:
+// mov.b32 %hh2, 0x40003C00
+//
+// This results in better SASS code with CUDA 7.x. Ptxas in CUDA 8.0
+// generates good SASS in both cases.
+SDValue NVPTXTargetLowering::LowerBUILD_VECTOR(SDValue Op,
+ SelectionDAG &DAG) const {
+ //return Op;
+ if (!(Op->getValueType(0) == MVT::v2f16 &&
+ isa<ConstantFPSDNode>(Op->getOperand(0)) &&
+ isa<ConstantFPSDNode>(Op->getOperand(1))))
+ return Op;
+
+ APInt E0 =
+ cast<ConstantFPSDNode>(Op->getOperand(0))->getValueAPF().bitcastToAPInt();
+ APInt E1 =
+ cast<ConstantFPSDNode>(Op->getOperand(1))->getValueAPF().bitcastToAPInt();
+ SDValue Const =
+ DAG.getConstant(E1.zext(32).shl(16) | E0.zext(32), SDLoc(Op), MVT::i32);
+ return DAG.getNode(ISD::BITCAST, SDLoc(Op), MVT::v2f16, Const);
+}
+
+SDValue NVPTXTargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op,
+ SelectionDAG &DAG) const {
+ SDValue Index = Op->getOperand(1);
+ // Constant index will be matched by tablegen.
+ if (isa<ConstantSDNode>(Index.getNode()))
+ return Op;
+
+ // Extract individual elements and select one of them.
+ SDValue Vector = Op->getOperand(0);
+ EVT VectorVT = Vector.getValueType();
+ assert(VectorVT == MVT::v2f16 && "Unexpected vector type.");
+ EVT EltVT = VectorVT.getVectorElementType();
+
+ SDLoc dl(Op.getNode());
+ SDValue E0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, Vector,
+ DAG.getIntPtrConstant(0, dl));
+ SDValue E1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, Vector,
+ DAG.getIntPtrConstant(1, dl));
+ return DAG.getSelectCC(dl, Index, DAG.getIntPtrConstant(0, dl), E0, E1,
+ ISD::CondCode::SETEQ);
+}
+
+/// LowerShiftRightParts - Lower SRL_PARTS, SRA_PARTS, which
+/// 1) returns two i32 values and take a 2 x i32 value to shift plus a shift
+/// amount, or
+/// 2) returns two i64 values and take a 2 x i64 value to shift plus a shift
+/// amount.
+SDValue NVPTXTargetLowering::LowerShiftRightParts(SDValue Op,
+ SelectionDAG &DAG) const {
+ assert(Op.getNumOperands() == 3 && "Not a double-shift!");
+ assert(Op.getOpcode() == ISD::SRA_PARTS || Op.getOpcode() == ISD::SRL_PARTS);
+
+ EVT VT = Op.getValueType();
+ unsigned VTBits = VT.getSizeInBits();
+ SDLoc dl(Op);
+ SDValue ShOpLo = Op.getOperand(0);
+ SDValue ShOpHi = Op.getOperand(1);
+ SDValue ShAmt = Op.getOperand(2);
+ unsigned Opc = (Op.getOpcode() == ISD::SRA_PARTS) ? ISD::SRA : ISD::SRL;
+
+ if (VTBits == 32 && STI.getSmVersion() >= 35) {
+ // For 32bit and sm35, we can use the funnel shift 'shf' instruction.
+ // {dHi, dLo} = {aHi, aLo} >> Amt
+ // dHi = aHi >> Amt
+ // dLo = shf.r.clamp aLo, aHi, Amt
+
+ SDValue Hi = DAG.getNode(Opc, dl, VT, ShOpHi, ShAmt);
+ SDValue Lo = DAG.getNode(NVPTXISD::FUN_SHFR_CLAMP, dl, VT, ShOpLo, ShOpHi,
+ ShAmt);
+
+ SDValue Ops[2] = { Lo, Hi };
+ return DAG.getMergeValues(Ops, dl);
+ }
+ else {
+ // {dHi, dLo} = {aHi, aLo} >> Amt
+ // - if (Amt>=size) then
+ // dLo = aHi >> (Amt-size)
+ // dHi = aHi >> Amt (this is either all 0 or all 1)
+ // else
+ // dLo = (aLo >>logic Amt) | (aHi << (size-Amt))
+ // dHi = aHi >> Amt
+
+ SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32,
+ DAG.getConstant(VTBits, dl, MVT::i32),
+ ShAmt);
+ SDValue Tmp1 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, ShAmt);
+ SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt,
+ DAG.getConstant(VTBits, dl, MVT::i32));
+ SDValue Tmp2 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, RevShAmt);
+ SDValue FalseVal = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2);
+ SDValue TrueVal = DAG.getNode(Opc, dl, VT, ShOpHi, ExtraShAmt);
+
+ SDValue Cmp = DAG.getSetCC(dl, MVT::i1, ShAmt,
+ DAG.getConstant(VTBits, dl, MVT::i32),
+ ISD::SETGE);
+ SDValue Hi = DAG.getNode(Opc, dl, VT, ShOpHi, ShAmt);
+ SDValue Lo = DAG.getNode(ISD::SELECT, dl, VT, Cmp, TrueVal, FalseVal);
+
+ SDValue Ops[2] = { Lo, Hi };
+ return DAG.getMergeValues(Ops, dl);
+ }
+}
+
+/// LowerShiftLeftParts - Lower SHL_PARTS, which
+/// 1) returns two i32 values and take a 2 x i32 value to shift plus a shift
+/// amount, or
+/// 2) returns two i64 values and take a 2 x i64 value to shift plus a shift
+/// amount.
+SDValue NVPTXTargetLowering::LowerShiftLeftParts(SDValue Op,
+ SelectionDAG &DAG) const {
+ assert(Op.getNumOperands() == 3 && "Not a double-shift!");
+ assert(Op.getOpcode() == ISD::SHL_PARTS);
+
+ EVT VT = Op.getValueType();
+ unsigned VTBits = VT.getSizeInBits();
+ SDLoc dl(Op);
+ SDValue ShOpLo = Op.getOperand(0);
+ SDValue ShOpHi = Op.getOperand(1);
+ SDValue ShAmt = Op.getOperand(2);
+
+ if (VTBits == 32 && STI.getSmVersion() >= 35) {
+ // For 32bit and sm35, we can use the funnel shift 'shf' instruction.
+ // {dHi, dLo} = {aHi, aLo} << Amt
+ // dHi = shf.l.clamp aLo, aHi, Amt
+ // dLo = aLo << Amt
+
+ SDValue Hi = DAG.getNode(NVPTXISD::FUN_SHFL_CLAMP, dl, VT, ShOpLo, ShOpHi,
+ ShAmt);
+ SDValue Lo = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ShAmt);
+
+ SDValue Ops[2] = { Lo, Hi };
+ return DAG.getMergeValues(Ops, dl);
+ }
+ else {
+ // {dHi, dLo} = {aHi, aLo} << Amt
+ // - if (Amt>=size) then
+ // dLo = aLo << Amt (all 0)
+ // dLo = aLo << (Amt-size)
+ // else
+ // dLo = aLo << Amt
+ // dHi = (aHi << Amt) | (aLo >> (size-Amt))
+
+ SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32,
+ DAG.getConstant(VTBits, dl, MVT::i32),
+ ShAmt);
+ SDValue Tmp1 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, ShAmt);
+ SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt,
+ DAG.getConstant(VTBits, dl, MVT::i32));
+ SDValue Tmp2 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, RevShAmt);
+ SDValue FalseVal = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2);
+ SDValue TrueVal = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ExtraShAmt);
+
+ SDValue Cmp = DAG.getSetCC(dl, MVT::i1, ShAmt,
+ DAG.getConstant(VTBits, dl, MVT::i32),
+ ISD::SETGE);
+ SDValue Lo = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ShAmt);
+ SDValue Hi = DAG.getNode(ISD::SELECT, dl, VT, Cmp, TrueVal, FalseVal);
+
+ SDValue Ops[2] = { Lo, Hi };
+ return DAG.getMergeValues(Ops, dl);
+ }
+}
+
+SDValue
+NVPTXTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
+ switch (Op.getOpcode()) {
+ case ISD::RETURNADDR:
+ return SDValue();
+ case ISD::FRAMEADDR:
+ return SDValue();
+ case ISD::GlobalAddress:
+ return LowerGlobalAddress(Op, DAG);
+ case ISD::INTRINSIC_W_CHAIN:
+ return Op;
+ case ISD::BUILD_VECTOR:
+ return LowerBUILD_VECTOR(Op, DAG);
+ case ISD::EXTRACT_SUBVECTOR:
+ return Op;
+ case ISD::EXTRACT_VECTOR_ELT:
+ return LowerEXTRACT_VECTOR_ELT(Op, DAG);
+ case ISD::CONCAT_VECTORS:
+ return LowerCONCAT_VECTORS(Op, DAG);
+ case ISD::STORE:
+ return LowerSTORE(Op, DAG);
+ case ISD::LOAD:
+ return LowerLOAD(Op, DAG);
+ case ISD::SHL_PARTS:
+ return LowerShiftLeftParts(Op, DAG);
+ case ISD::SRA_PARTS:
+ case ISD::SRL_PARTS:
+ return LowerShiftRightParts(Op, DAG);
+ case ISD::SELECT:
+ return LowerSelect(Op, DAG);
+ default:
+ llvm_unreachable("Custom lowering not defined for operation");
+ }
+}
+
+SDValue NVPTXTargetLowering::LowerSelect(SDValue Op, SelectionDAG &DAG) const {
+ SDValue Op0 = Op->getOperand(0);
+ SDValue Op1 = Op->getOperand(1);
+ SDValue Op2 = Op->getOperand(2);
+ SDLoc DL(Op.getNode());
+
+ assert(Op.getValueType() == MVT::i1 && "Custom lowering enabled only for i1");
+
+ Op1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Op1);
+ Op2 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Op2);
+ SDValue Select = DAG.getNode(ISD::SELECT, DL, MVT::i32, Op0, Op1, Op2);
+ SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, Select);
+
+ return Trunc;
+}
+
+SDValue NVPTXTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
+ if (Op.getValueType() == MVT::i1)
+ return LowerLOADi1(Op, DAG);
+
+ // v2f16 is legal, so we can't rely on legalizer to handle unaligned
+ // loads and have to handle it here.
+ if (Op.getValueType() == MVT::v2f16) {
+ LoadSDNode *Load = cast<LoadSDNode>(Op);
+ EVT MemVT = Load->getMemoryVT();
+ if (!allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), MemVT,
+ Load->getAddressSpace(), Load->getAlignment())) {
+ SDValue Ops[2];
+ std::tie(Ops[0], Ops[1]) = expandUnalignedLoad(Load, DAG);
+ return DAG.getMergeValues(Ops, SDLoc(Op));
+ }
+ }
+
+ return SDValue();
+}
+
+// v = ld i1* addr
+// =>
+// v1 = ld i8* addr (-> i16)
+// v = trunc i16 to i1
+SDValue NVPTXTargetLowering::LowerLOADi1(SDValue Op, SelectionDAG &DAG) const {
+ SDNode *Node = Op.getNode();
+ LoadSDNode *LD = cast<LoadSDNode>(Node);
+ SDLoc dl(Node);
+ assert(LD->getExtensionType() == ISD::NON_EXTLOAD);
+ assert(Node->getValueType(0) == MVT::i1 &&
+ "Custom lowering for i1 load only");
+ SDValue newLD = DAG.getLoad(MVT::i16, dl, LD->getChain(), LD->getBasePtr(),
+ LD->getPointerInfo(), LD->getAlignment(),
+ LD->getMemOperand()->getFlags());
+ SDValue result = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, newLD);
+ // The legalizer (the caller) is expecting two values from the legalized
+ // load, so we build a MergeValues node for it. See ExpandUnalignedLoad()
+ // in LegalizeDAG.cpp which also uses MergeValues.
+ SDValue Ops[] = { result, LD->getChain() };
+ return DAG.getMergeValues(Ops, dl);
+}
+
+SDValue NVPTXTargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
+ StoreSDNode *Store = cast<StoreSDNode>(Op);
+ EVT VT = Store->getMemoryVT();
+
+ if (VT == MVT::i1)
+ return LowerSTOREi1(Op, DAG);
+
+ // v2f16 is legal, so we can't rely on legalizer to handle unaligned
+ // stores and have to handle it here.
+ if (VT == MVT::v2f16 &&
+ !allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), VT,
+ Store->getAddressSpace(), Store->getAlignment()))
+ return expandUnalignedStore(Store, DAG);
+
+ if (VT.isVector())
+ return LowerSTOREVector(Op, DAG);
+
+ return SDValue();
+}
+
+SDValue
+NVPTXTargetLowering::LowerSTOREVector(SDValue Op, SelectionDAG &DAG) const {
+ SDNode *N = Op.getNode();
+ SDValue Val = N->getOperand(1);
+ SDLoc DL(N);
+ EVT ValVT = Val.getValueType();
+
+ if (ValVT.isVector()) {
+ // We only handle "native" vector sizes for now, e.g. <4 x double> is not
+ // legal. We can (and should) split that into 2 stores of <2 x double> here
+ // but I'm leaving that as a TODO for now.
+ if (!ValVT.isSimple())
+ return SDValue();
+ switch (ValVT.getSimpleVT().SimpleTy) {
+ default:
+ return SDValue();
+ case MVT::v2i8:
+ case MVT::v2i16:
+ case MVT::v2i32:
+ case MVT::v2i64:
+ case MVT::v2f16:
+ case MVT::v2f32:
+ case MVT::v2f64:
+ case MVT::v4i8:
+ case MVT::v4i16:
+ case MVT::v4i32:
+ case MVT::v4f16:
+ case MVT::v4f32:
+ case MVT::v8f16: // <4 x f16x2>
+ // This is a "native" vector type
+ break;
+ }
+
+ MemSDNode *MemSD = cast<MemSDNode>(N);
+ const DataLayout &TD = DAG.getDataLayout();
+
+ unsigned Align = MemSD->getAlignment();
+ unsigned PrefAlign =
+ TD.getPrefTypeAlignment(ValVT.getTypeForEVT(*DAG.getContext()));
+ if (Align < PrefAlign) {
+ // This store is not sufficiently aligned, so bail out and let this vector
+ // store be scalarized. Note that we may still be able to emit smaller
+ // vector stores. For example, if we are storing a <4 x float> with an
+ // alignment of 8, this check will fail but the legalizer will try again
+ // with 2 x <2 x float>, which will succeed with an alignment of 8.
+ return SDValue();
+ }
+
+ unsigned Opcode = 0;
+ EVT EltVT = ValVT.getVectorElementType();
+ unsigned NumElts = ValVT.getVectorNumElements();
+
+ // Since StoreV2 is a target node, we cannot rely on DAG type legalization.
+ // Therefore, we must ensure the type is legal. For i1 and i8, we set the
+ // stored type to i16 and propagate the "real" type as the memory type.
+ bool NeedExt = false;
+ if (EltVT.getSizeInBits() < 16)
+ NeedExt = true;
+
+ bool StoreF16x2 = false;
+ switch (NumElts) {
+ default:
+ return SDValue();
+ case 2:
+ Opcode = NVPTXISD::StoreV2;
+ break;
+ case 4:
+ Opcode = NVPTXISD::StoreV4;
+ break;
+ case 8:
+ // v8f16 is a special case. PTX doesn't have st.v8.f16
+ // instruction. Instead, we split the vector into v2f16 chunks and
+ // store them with st.v4.b32.
+ assert(EltVT == MVT::f16 && "Wrong type for the vector.");
+ Opcode = NVPTXISD::StoreV4;
+ StoreF16x2 = true;
+ break;
+ }
+
+ SmallVector<SDValue, 8> Ops;
+
+ // First is the chain
+ Ops.push_back(N->getOperand(0));
+
+ if (StoreF16x2) {
+ // Combine f16,f16 -> v2f16
+ NumElts /= 2;
+ for (unsigned i = 0; i < NumElts; ++i) {
+ SDValue E0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f16, Val,
+ DAG.getIntPtrConstant(i * 2, DL));
+ SDValue E1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f16, Val,
+ DAG.getIntPtrConstant(i * 2 + 1, DL));
+ SDValue V2 = DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v2f16, E0, E1);
+ Ops.push_back(V2);
+ }
+ } else {
+ // Then the split values
+ for (unsigned i = 0; i < NumElts; ++i) {
+ SDValue ExtVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Val,
+ DAG.getIntPtrConstant(i, DL));
+ if (NeedExt)
+ ExtVal = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i16, ExtVal);
+ Ops.push_back(ExtVal);
+ }
+ }
+
+ // Then any remaining arguments
+ Ops.append(N->op_begin() + 2, N->op_end());
+
+ SDValue NewSt =
+ DAG.getMemIntrinsicNode(Opcode, DL, DAG.getVTList(MVT::Other), Ops,
+ MemSD->getMemoryVT(), MemSD->getMemOperand());
+
+ // return DCI.CombineTo(N, NewSt, true);
+ return NewSt;
+ }
+
+ return SDValue();
+}
+
+// st i1 v, addr
+// =>
+// v1 = zxt v to i16
+// st.u8 i16, addr
+SDValue NVPTXTargetLowering::LowerSTOREi1(SDValue Op, SelectionDAG &DAG) const {
+ SDNode *Node = Op.getNode();
+ SDLoc dl(Node);
+ StoreSDNode *ST = cast<StoreSDNode>(Node);
+ SDValue Tmp1 = ST->getChain();
+ SDValue Tmp2 = ST->getBasePtr();
+ SDValue Tmp3 = ST->getValue();
+ assert(Tmp3.getValueType() == MVT::i1 && "Custom lowering for i1 store only");
+ Tmp3 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, Tmp3);
+ SDValue Result =
+ DAG.getTruncStore(Tmp1, dl, Tmp3, Tmp2, ST->getPointerInfo(), MVT::i8,
+ ST->getAlignment(), ST->getMemOperand()->getFlags());
+ return Result;
+}
+
+SDValue
+NVPTXTargetLowering::getParamSymbol(SelectionDAG &DAG, int idx, EVT v) const {
+ std::string ParamSym;
+ raw_string_ostream ParamStr(ParamSym);
+
+ ParamStr << DAG.getMachineFunction().getName() << "_param_" << idx;
+ ParamStr.flush();
+
+ std::string *SavedStr =
+ nvTM->getManagedStrPool()->getManagedString(ParamSym.c_str());
+ return DAG.getTargetExternalSymbol(SavedStr->c_str(), v);
+}
+
+// Check to see if the kernel argument is image*_t or sampler_t
+
+static bool isImageOrSamplerVal(const Value *arg, const Module *context) {
+ static const char *const specialTypes[] = { "struct._image2d_t",
+ "struct._image3d_t",
+ "struct._sampler_t" };
+
+ Type *Ty = arg->getType();
+ auto *PTy = dyn_cast<PointerType>(Ty);
+
+ if (!PTy)
+ return false;
+
+ if (!context)
+ return false;
+
+ auto *STy = dyn_cast<StructType>(PTy->getElementType());
+ if (!STy || STy->isLiteral())
+ return false;
+
+ return std::find(std::begin(specialTypes), std::end(specialTypes),
+ STy->getName()) != std::end(specialTypes);
+}
+
+SDValue NVPTXTargetLowering::LowerFormalArguments(
+ SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
+ const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
+ SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
+ MachineFunction &MF = DAG.getMachineFunction();
+ const DataLayout &DL = DAG.getDataLayout();
+ auto PtrVT = getPointerTy(DAG.getDataLayout());
+
+ const Function *F = MF.getFunction();
+ const AttributeList &PAL = F->getAttributes();
+ const TargetLowering *TLI = STI.getTargetLowering();
+
+ SDValue Root = DAG.getRoot();
+ std::vector<SDValue> OutChains;
+
+ bool isABI = (STI.getSmVersion() >= 20);
+ assert(isABI && "Non-ABI compilation is not supported");
+ if (!isABI)
+ return Chain;
+
+ std::vector<Type *> argTypes;
+ std::vector<const Argument *> theArgs;
+ for (const Argument &I : F->args()) {
+ theArgs.push_back(&I);
+ argTypes.push_back(I.getType());
+ }
+ // argTypes.size() (or theArgs.size()) and Ins.size() need not match.
+ // Ins.size() will be larger
+ // * if there is an aggregate argument with multiple fields (each field
+ // showing up separately in Ins)
+ // * if there is a vector argument with more than typical vector-length
+ // elements (generally if more than 4) where each vector element is
+ // individually present in Ins.
+ // So a different index should be used for indexing into Ins.
+ // See similar issue in LowerCall.
+ unsigned InsIdx = 0;
+
+ int idx = 0;
+ for (unsigned i = 0, e = theArgs.size(); i != e; ++i, ++idx, ++InsIdx) {
+ Type *Ty = argTypes[i];
+
+ // If the kernel argument is image*_t or sampler_t, convert it to
+ // a i32 constant holding the parameter position. This can later
+ // matched in the AsmPrinter to output the correct mangled name.
+ if (isImageOrSamplerVal(
+ theArgs[i],
+ (theArgs[i]->getParent() ? theArgs[i]->getParent()->getParent()
+ : nullptr))) {
+ assert(isKernelFunction(*F) &&
+ "Only kernels can have image/sampler params");
+ InVals.push_back(DAG.getConstant(i + 1, dl, MVT::i32));
+ continue;
+ }
+
+ if (theArgs[i]->use_empty()) {
+ // argument is dead
+ if (Ty->isAggregateType()) {
+ SmallVector<EVT, 16> vtparts;
+
+ ComputePTXValueVTs(*this, DAG.getDataLayout(), Ty, vtparts);
+ assert(vtparts.size() > 0 && "empty aggregate type not expected");
+ for (unsigned parti = 0, parte = vtparts.size(); parti != parte;
+ ++parti) {
+ InVals.push_back(DAG.getNode(ISD::UNDEF, dl, Ins[InsIdx].VT));
+ ++InsIdx;
+ }
+ if (vtparts.size() > 0)
+ --InsIdx;
+ continue;
+ }
+ if (Ty->isVectorTy()) {
+ EVT ObjectVT = getValueType(DL, Ty);
+ unsigned NumRegs = TLI->getNumRegisters(F->getContext(), ObjectVT);
+ for (unsigned parti = 0; parti < NumRegs; ++parti) {
+ InVals.push_back(DAG.getNode(ISD::UNDEF, dl, Ins[InsIdx].VT));
+ ++InsIdx;
+ }
+ if (NumRegs > 0)
+ --InsIdx;
+ continue;
+ }
+ InVals.push_back(DAG.getNode(ISD::UNDEF, dl, Ins[InsIdx].VT));
+ continue;
+ }
+
+ // In the following cases, assign a node order of "idx+1"
+ // to newly created nodes. The SDNodes for params have to
+ // appear in the same order as their order of appearance
+ // in the original function. "idx+1" holds that order.
+ if (!PAL.hasParamAttribute(i, Attribute::ByVal)) {
+ bool aggregateIsPacked = false;
+ if (StructType *STy = dyn_cast<StructType>(Ty))
+ aggregateIsPacked = STy->isPacked();
+
+ SmallVector<EVT, 16> VTs;
+ SmallVector<uint64_t, 16> Offsets;
+ ComputePTXValueVTs(*this, DL, Ty, VTs, &Offsets, 0);
+ assert(VTs.size() > 0 && "Unexpected empty type.");
+ auto VectorInfo =
+ VectorizePTXValueVTs(VTs, Offsets, DL.getABITypeAlignment(Ty));
+
+ SDValue Arg = getParamSymbol(DAG, idx, PtrVT);
+ int VecIdx = -1; // Index of the first element of the current vector.
+ for (unsigned parti = 0, parte = VTs.size(); parti != parte; ++parti) {
+ if (VectorInfo[parti] & PVF_FIRST) {
+ assert(VecIdx == -1 && "Orphaned vector.");
+ VecIdx = parti;
+ }
+
+ // That's the last element of this store op.
+ if (VectorInfo[parti] & PVF_LAST) {
+ unsigned NumElts = parti - VecIdx + 1;
+ EVT EltVT = VTs[parti];
+ // i1 is loaded/stored as i8.
+ EVT LoadVT = EltVT;
+ if (EltVT == MVT::i1)
+ LoadVT = MVT::i8;
+ else if (EltVT == MVT::v2f16)
+ // getLoad needs a vector type, but it can't handle
+ // vectors which contain v2f16 elements. So we must load
+ // using i32 here and then bitcast back.
+ LoadVT = MVT::i32;
+
+ EVT VecVT = EVT::getVectorVT(F->getContext(), LoadVT, NumElts);
+ SDValue VecAddr =
+ DAG.getNode(ISD::ADD, dl, PtrVT, Arg,
+ DAG.getConstant(Offsets[VecIdx], dl, PtrVT));
+ Value *srcValue = Constant::getNullValue(PointerType::get(
+ EltVT.getTypeForEVT(F->getContext()), ADDRESS_SPACE_PARAM));
+ SDValue P =
+ DAG.getLoad(VecVT, dl, Root, VecAddr,
+ MachinePointerInfo(srcValue), aggregateIsPacked,
+ MachineMemOperand::MODereferenceable |
+ MachineMemOperand::MOInvariant);
+ if (P.getNode())
+ P.getNode()->setIROrder(idx + 1);
+ for (unsigned j = 0; j < NumElts; ++j) {
+ SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, LoadVT, P,
+ DAG.getIntPtrConstant(j, dl));
+ // We've loaded i1 as an i8 and now must truncate it back to i1
+ if (EltVT == MVT::i1)
+ Elt = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, Elt);
+ // v2f16 was loaded as an i32. Now we must bitcast it back.
+ else if (EltVT == MVT::v2f16)
+ Elt = DAG.getNode(ISD::BITCAST, dl, MVT::v2f16, Elt);
+ // Extend the element if necessary (e.g. an i8 is loaded
+ // into an i16 register)
+ if (Ins[InsIdx].VT.isInteger() &&
+ Ins[InsIdx].VT.getSizeInBits() > LoadVT.getSizeInBits()) {
+ unsigned Extend = Ins[InsIdx].Flags.isSExt() ? ISD::SIGN_EXTEND
+ : ISD::ZERO_EXTEND;
+ Elt = DAG.getNode(Extend, dl, Ins[InsIdx].VT, Elt);
+ }
+ InVals.push_back(Elt);
+ }
+
+ // Reset vector tracking state.
+ VecIdx = -1;
+ }
+ ++InsIdx;
+ }
+ if (VTs.size() > 0)
+ --InsIdx;
+ continue;
+ }
+
+ // Param has ByVal attribute
+ // Return MoveParam(param symbol).
+ // Ideally, the param symbol can be returned directly,
+ // but when SDNode builder decides to use it in a CopyToReg(),
+ // machine instruction fails because TargetExternalSymbol
+ // (not lowered) is target dependent, and CopyToReg assumes
+ // the source is lowered.
+ EVT ObjectVT = getValueType(DL, Ty);
+ assert(ObjectVT == Ins[InsIdx].VT &&
+ "Ins type did not match function type");
+ SDValue Arg = getParamSymbol(DAG, idx, PtrVT);
+ SDValue p = DAG.getNode(NVPTXISD::MoveParam, dl, ObjectVT, Arg);
+ if (p.getNode())
+ p.getNode()->setIROrder(idx + 1);
+ InVals.push_back(p);
+ }
+
+ // Clang will check explicit VarArg and issue error if any. However, Clang
+ // will let code with
+ // implicit var arg like f() pass. See bug 617733.
+ // We treat this case as if the arg list is empty.
+ // if (F.isVarArg()) {
+ // assert(0 && "VarArg not supported yet!");
+ //}
+
+ if (!OutChains.empty())
+ DAG.setRoot(DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains));
+
+ return Chain;
+}
+
+SDValue
+NVPTXTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
+ bool isVarArg,
+ const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
+ const SDLoc &dl, SelectionDAG &DAG) const {
+ MachineFunction &MF = DAG.getMachineFunction();
+ Type *RetTy = MF.getFunction()->getReturnType();
+
+ bool isABI = (STI.getSmVersion() >= 20);
+ assert(isABI && "Non-ABI compilation is not supported");
+ if (!isABI)
+ return Chain;
+
+ const DataLayout DL = DAG.getDataLayout();
+ SmallVector<EVT, 16> VTs;
+ SmallVector<uint64_t, 16> Offsets;
+ ComputePTXValueVTs(*this, DL, RetTy, VTs, &Offsets);
+ assert(VTs.size() == OutVals.size() && "Bad return value decomposition");
+
+ auto VectorInfo = VectorizePTXValueVTs(
+ VTs, Offsets, RetTy->isSized() ? DL.getABITypeAlignment(RetTy) : 1);
+
+ // PTX Interoperability Guide 3.3(A): [Integer] Values shorter than
+ // 32-bits are sign extended or zero extended, depending on whether
+ // they are signed or unsigned types.
+ bool ExtendIntegerRetVal =
+ RetTy->isIntegerTy() && DL.getTypeAllocSizeInBits(RetTy) < 32;
+
+ SmallVector<SDValue, 6> StoreOperands;
+ for (unsigned i = 0, e = VTs.size(); i != e; ++i) {
+ // New load/store. Record chain and offset operands.
+ if (VectorInfo[i] & PVF_FIRST) {
+ assert(StoreOperands.empty() && "Orphaned operand list.");
+ StoreOperands.push_back(Chain);
+ StoreOperands.push_back(DAG.getConstant(Offsets[i], dl, MVT::i32));
+ }
+
+ SDValue RetVal = OutVals[i];
+ if (ExtendIntegerRetVal) {
+ RetVal = DAG.getNode(Outs[i].Flags.isSExt() ? ISD::SIGN_EXTEND
+ : ISD::ZERO_EXTEND,
+ dl, MVT::i32, RetVal);
+ } else if (RetVal.getValueSizeInBits() < 16) {
+ // Use 16-bit registers for small load-stores as it's the
+ // smallest general purpose register size supported by NVPTX.
+ RetVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i16, RetVal);
+ }
+
+ // Record the value to return.
+ StoreOperands.push_back(RetVal);
+
+ // That's the last element of this store op.
+ if (VectorInfo[i] & PVF_LAST) {
+ NVPTXISD::NodeType Op;
+ unsigned NumElts = StoreOperands.size() - 2;
+ switch (NumElts) {
+ case 1:
+ Op = NVPTXISD::StoreRetval;
+ break;
+ case 2:
+ Op = NVPTXISD::StoreRetvalV2;
+ break;
+ case 4:
+ Op = NVPTXISD::StoreRetvalV4;
+ break;
+ default:
+ llvm_unreachable("Invalid vector info.");
+ }
+
+ // Adjust type of load/store op if we've extended the scalar
+ // return value.
+ EVT TheStoreType = ExtendIntegerRetVal ? MVT::i32 : VTs[i];
+ Chain = DAG.getMemIntrinsicNode(Op, dl, DAG.getVTList(MVT::Other),
+ StoreOperands, TheStoreType,
+ MachinePointerInfo(), /* Align */ 1,
+ /* Volatile */ false, /* ReadMem */ false,
+ /* WriteMem */ true, /* Size */ 0);
+ // Cleanup vector state.
+ StoreOperands.clear();
+ }
+ }
+
+ return DAG.getNode(NVPTXISD::RET_FLAG, dl, MVT::Other, Chain);
+}
+
+void NVPTXTargetLowering::LowerAsmOperandForConstraint(
+ SDValue Op, std::string &Constraint, std::vector<SDValue> &Ops,
+ SelectionDAG &DAG) const {
+ if (Constraint.length() > 1)
+ return;
+ else
+ TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
+}
+
+static unsigned getOpcForTextureInstr(unsigned Intrinsic) {
+ switch (Intrinsic) {
+ default:
+ return 0;
+
+ case Intrinsic::nvvm_tex_1d_v4f32_s32:
+ return NVPTXISD::Tex1DFloatS32;
+ case Intrinsic::nvvm_tex_1d_v4f32_f32:
+ return NVPTXISD::Tex1DFloatFloat;
+ case Intrinsic::nvvm_tex_1d_level_v4f32_f32:
+ return NVPTXISD::Tex1DFloatFloatLevel;
+ case Intrinsic::nvvm_tex_1d_grad_v4f32_f32:
+ return NVPTXISD::Tex1DFloatFloatGrad;
+ case Intrinsic::nvvm_tex_1d_v4s32_s32:
+ return NVPTXISD::Tex1DS32S32;
+ case Intrinsic::nvvm_tex_1d_v4s32_f32:
+ return NVPTXISD::Tex1DS32Float;
+ case Intrinsic::nvvm_tex_1d_level_v4s32_f32:
+ return NVPTXISD::Tex1DS32FloatLevel;
+ case Intrinsic::nvvm_tex_1d_grad_v4s32_f32:
+ return NVPTXISD::Tex1DS32FloatGrad;
+ case Intrinsic::nvvm_tex_1d_v4u32_s32:
+ return NVPTXISD::Tex1DU32S32;
+ case Intrinsic::nvvm_tex_1d_v4u32_f32:
+ return NVPTXISD::Tex1DU32Float;
+ case Intrinsic::nvvm_tex_1d_level_v4u32_f32:
+ return NVPTXISD::Tex1DU32FloatLevel;
+ case Intrinsic::nvvm_tex_1d_grad_v4u32_f32:
+ return NVPTXISD::Tex1DU32FloatGrad;
+
+ case Intrinsic::nvvm_tex_1d_array_v4f32_s32:
+ return NVPTXISD::Tex1DArrayFloatS32;
+ case Intrinsic::nvvm_tex_1d_array_v4f32_f32:
+ return NVPTXISD::Tex1DArrayFloatFloat;
+ case Intrinsic::nvvm_tex_1d_array_level_v4f32_f32:
+ return NVPTXISD::Tex1DArrayFloatFloatLevel;
+ case Intrinsic::nvvm_tex_1d_array_grad_v4f32_f32:
+ return NVPTXISD::Tex1DArrayFloatFloatGrad;
+ case Intrinsic::nvvm_tex_1d_array_v4s32_s32:
+ return NVPTXISD::Tex1DArrayS32S32;
+ case Intrinsic::nvvm_tex_1d_array_v4s32_f32:
+ return NVPTXISD::Tex1DArrayS32Float;
+ case Intrinsic::nvvm_tex_1d_array_level_v4s32_f32:
+ return NVPTXISD::Tex1DArrayS32FloatLevel;
+ case Intrinsic::nvvm_tex_1d_array_grad_v4s32_f32:
+ return NVPTXISD::Tex1DArrayS32FloatGrad;
+ case Intrinsic::nvvm_tex_1d_array_v4u32_s32:
+ return NVPTXISD::Tex1DArrayU32S32;
+ case Intrinsic::nvvm_tex_1d_array_v4u32_f32:
+ return NVPTXISD::Tex1DArrayU32Float;
+ case Intrinsic::nvvm_tex_1d_array_level_v4u32_f32:
+ return NVPTXISD::Tex1DArrayU32FloatLevel;
+ case Intrinsic::nvvm_tex_1d_array_grad_v4u32_f32:
+ return NVPTXISD::Tex1DArrayU32FloatGrad;
+
+ case Intrinsic::nvvm_tex_2d_v4f32_s32:
+ return NVPTXISD::Tex2DFloatS32;
+ case Intrinsic::nvvm_tex_2d_v4f32_f32:
+ return NVPTXISD::Tex2DFloatFloat;
+ case Intrinsic::nvvm_tex_2d_level_v4f32_f32:
+ return NVPTXISD::Tex2DFloatFloatLevel;
+ case Intrinsic::nvvm_tex_2d_grad_v4f32_f32:
+ return NVPTXISD::Tex2DFloatFloatGrad;
+ case Intrinsic::nvvm_tex_2d_v4s32_s32:
+ return NVPTXISD::Tex2DS32S32;
+ case Intrinsic::nvvm_tex_2d_v4s32_f32:
+ return NVPTXISD::Tex2DS32Float;
+ case Intrinsic::nvvm_tex_2d_level_v4s32_f32:
+ return NVPTXISD::Tex2DS32FloatLevel;
+ case Intrinsic::nvvm_tex_2d_grad_v4s32_f32:
+ return NVPTXISD::Tex2DS32FloatGrad;
+ case Intrinsic::nvvm_tex_2d_v4u32_s32:
+ return NVPTXISD::Tex2DU32S32;
+ case Intrinsic::nvvm_tex_2d_v4u32_f32:
+ return NVPTXISD::Tex2DU32Float;
+ case Intrinsic::nvvm_tex_2d_level_v4u32_f32:
+ return NVPTXISD::Tex2DU32FloatLevel;
+ case Intrinsic::nvvm_tex_2d_grad_v4u32_f32:
+ return NVPTXISD::Tex2DU32FloatGrad;
+
+ case Intrinsic::nvvm_tex_2d_array_v4f32_s32:
+ return NVPTXISD::Tex2DArrayFloatS32;
+ case Intrinsic::nvvm_tex_2d_array_v4f32_f32:
+ return NVPTXISD::Tex2DArrayFloatFloat;
+ case Intrinsic::nvvm_tex_2d_array_level_v4f32_f32:
+ return NVPTXISD::Tex2DArrayFloatFloatLevel;
+ case Intrinsic::nvvm_tex_2d_array_grad_v4f32_f32:
+ return NVPTXISD::Tex2DArrayFloatFloatGrad;
+ case Intrinsic::nvvm_tex_2d_array_v4s32_s32:
+ return NVPTXISD::Tex2DArrayS32S32;
+ case Intrinsic::nvvm_tex_2d_array_v4s32_f32:
+ return NVPTXISD::Tex2DArrayS32Float;
+ case Intrinsic::nvvm_tex_2d_array_level_v4s32_f32:
+ return NVPTXISD::Tex2DArrayS32FloatLevel;
+ case Intrinsic::nvvm_tex_2d_array_grad_v4s32_f32:
+ return NVPTXISD::Tex2DArrayS32FloatGrad;
+ case Intrinsic::nvvm_tex_2d_array_v4u32_s32:
+ return NVPTXISD::Tex2DArrayU32S32;
+ case Intrinsic::nvvm_tex_2d_array_v4u32_f32:
+ return NVPTXISD::Tex2DArrayU32Float;
+ case Intrinsic::nvvm_tex_2d_array_level_v4u32_f32:
+ return NVPTXISD::Tex2DArrayU32FloatLevel;
+ case Intrinsic::nvvm_tex_2d_array_grad_v4u32_f32:
+ return NVPTXISD::Tex2DArrayU32FloatGrad;
+
+ case Intrinsic::nvvm_tex_3d_v4f32_s32:
+ return NVPTXISD::Tex3DFloatS32;
+ case Intrinsic::nvvm_tex_3d_v4f32_f32:
+ return NVPTXISD::Tex3DFloatFloat;
+ case Intrinsic::nvvm_tex_3d_level_v4f32_f32:
+ return NVPTXISD::Tex3DFloatFloatLevel;
+ case Intrinsic::nvvm_tex_3d_grad_v4f32_f32:
+ return NVPTXISD::Tex3DFloatFloatGrad;
+ case Intrinsic::nvvm_tex_3d_v4s32_s32:
+ return NVPTXISD::Tex3DS32S32;
+ case Intrinsic::nvvm_tex_3d_v4s32_f32:
+ return NVPTXISD::Tex3DS32Float;
+ case Intrinsic::nvvm_tex_3d_level_v4s32_f32:
+ return NVPTXISD::Tex3DS32FloatLevel;
+ case Intrinsic::nvvm_tex_3d_grad_v4s32_f32:
+ return NVPTXISD::Tex3DS32FloatGrad;
+ case Intrinsic::nvvm_tex_3d_v4u32_s32:
+ return NVPTXISD::Tex3DU32S32;
+ case Intrinsic::nvvm_tex_3d_v4u32_f32:
+ return NVPTXISD::Tex3DU32Float;
+ case Intrinsic::nvvm_tex_3d_level_v4u32_f32:
+ return NVPTXISD::Tex3DU32FloatLevel;
+ case Intrinsic::nvvm_tex_3d_grad_v4u32_f32:
+ return NVPTXISD::Tex3DU32FloatGrad;
+
+ case Intrinsic::nvvm_tex_cube_v4f32_f32:
+ return NVPTXISD::TexCubeFloatFloat;
+ case Intrinsic::nvvm_tex_cube_level_v4f32_f32:
+ return NVPTXISD::TexCubeFloatFloatLevel;
+ case Intrinsic::nvvm_tex_cube_v4s32_f32:
+ return NVPTXISD::TexCubeS32Float;
+ case Intrinsic::nvvm_tex_cube_level_v4s32_f32:
+ return NVPTXISD::TexCubeS32FloatLevel;
+ case Intrinsic::nvvm_tex_cube_v4u32_f32:
+ return NVPTXISD::TexCubeU32Float;
+ case Intrinsic::nvvm_tex_cube_level_v4u32_f32:
+ return NVPTXISD::TexCubeU32FloatLevel;
+
+ case Intrinsic::nvvm_tex_cube_array_v4f32_f32:
+ return NVPTXISD::TexCubeArrayFloatFloat;
+ case Intrinsic::nvvm_tex_cube_array_level_v4f32_f32:
+ return NVPTXISD::TexCubeArrayFloatFloatLevel;
+ case Intrinsic::nvvm_tex_cube_array_v4s32_f32:
+ return NVPTXISD::TexCubeArrayS32Float;
+ case Intrinsic::nvvm_tex_cube_array_level_v4s32_f32:
+ return NVPTXISD::TexCubeArrayS32FloatLevel;
+ case Intrinsic::nvvm_tex_cube_array_v4u32_f32:
+ return NVPTXISD::TexCubeArrayU32Float;
+ case Intrinsic::nvvm_tex_cube_array_level_v4u32_f32:
+ return NVPTXISD::TexCubeArrayU32FloatLevel;
+
+ case Intrinsic::nvvm_tld4_r_2d_v4f32_f32:
+ return NVPTXISD::Tld4R2DFloatFloat;
+ case Intrinsic::nvvm_tld4_g_2d_v4f32_f32:
+ return NVPTXISD::Tld4G2DFloatFloat;
+ case Intrinsic::nvvm_tld4_b_2d_v4f32_f32:
+ return NVPTXISD::Tld4B2DFloatFloat;
+ case Intrinsic::nvvm_tld4_a_2d_v4f32_f32:
+ return NVPTXISD::Tld4A2DFloatFloat;
+ case Intrinsic::nvvm_tld4_r_2d_v4s32_f32:
+ return NVPTXISD::Tld4R2DS64Float;
+ case Intrinsic::nvvm_tld4_g_2d_v4s32_f32:
+ return NVPTXISD::Tld4G2DS64Float;
+ case Intrinsic::nvvm_tld4_b_2d_v4s32_f32:
+ return NVPTXISD::Tld4B2DS64Float;
+ case Intrinsic::nvvm_tld4_a_2d_v4s32_f32:
+ return NVPTXISD::Tld4A2DS64Float;
+ case Intrinsic::nvvm_tld4_r_2d_v4u32_f32:
+ return NVPTXISD::Tld4R2DU64Float;
+ case Intrinsic::nvvm_tld4_g_2d_v4u32_f32:
+ return NVPTXISD::Tld4G2DU64Float;
+ case Intrinsic::nvvm_tld4_b_2d_v4u32_f32:
+ return NVPTXISD::Tld4B2DU64Float;
+ case Intrinsic::nvvm_tld4_a_2d_v4u32_f32:
+ return NVPTXISD::Tld4A2DU64Float;
+
+ case Intrinsic::nvvm_tex_unified_1d_v4f32_s32:
+ return NVPTXISD::TexUnified1DFloatS32;
+ case Intrinsic::nvvm_tex_unified_1d_v4f32_f32:
+ return NVPTXISD::TexUnified1DFloatFloat;
+ case Intrinsic::nvvm_tex_unified_1d_level_v4f32_f32:
+ return NVPTXISD::TexUnified1DFloatFloatLevel;
+ case Intrinsic::nvvm_tex_unified_1d_grad_v4f32_f32:
+ return NVPTXISD::TexUnified1DFloatFloatGrad;
+ case Intrinsic::nvvm_tex_unified_1d_v4s32_s32:
+ return NVPTXISD::TexUnified1DS32S32;
+ case Intrinsic::nvvm_tex_unified_1d_v4s32_f32:
+ return NVPTXISD::TexUnified1DS32Float;
+ case Intrinsic::nvvm_tex_unified_1d_level_v4s32_f32:
+ return NVPTXISD::TexUnified1DS32FloatLevel;
+ case Intrinsic::nvvm_tex_unified_1d_grad_v4s32_f32:
+ return NVPTXISD::TexUnified1DS32FloatGrad;
+ case Intrinsic::nvvm_tex_unified_1d_v4u32_s32:
+ return NVPTXISD::TexUnified1DU32S32;
+ case Intrinsic::nvvm_tex_unified_1d_v4u32_f32:
+ return NVPTXISD::TexUnified1DU32Float;
+ case Intrinsic::nvvm_tex_unified_1d_level_v4u32_f32:
+ return NVPTXISD::TexUnified1DU32FloatLevel;
+ case Intrinsic::nvvm_tex_unified_1d_grad_v4u32_f32:
+ return NVPTXISD::TexUnified1DU32FloatGrad;
+
+ case Intrinsic::nvvm_tex_unified_1d_array_v4f32_s32:
+ return NVPTXISD::TexUnified1DArrayFloatS32;
+ case Intrinsic::nvvm_tex_unified_1d_array_v4f32_f32:
+ return NVPTXISD::TexUnified1DArrayFloatFloat;
+ case Intrinsic::nvvm_tex_unified_1d_array_level_v4f32_f32:
+ return NVPTXISD::TexUnified1DArrayFloatFloatLevel;
+ case Intrinsic::nvvm_tex_unified_1d_array_grad_v4f32_f32:
+ return NVPTXISD::TexUnified1DArrayFloatFloatGrad;
+ case Intrinsic::nvvm_tex_unified_1d_array_v4s32_s32:
+ return NVPTXISD::TexUnified1DArrayS32S32;
+ case Intrinsic::nvvm_tex_unified_1d_array_v4s32_f32:
+ return NVPTXISD::TexUnified1DArrayS32Float;
+ case Intrinsic::nvvm_tex_unified_1d_array_level_v4s32_f32:
+ return NVPTXISD::TexUnified1DArrayS32FloatLevel;
+ case Intrinsic::nvvm_tex_unified_1d_array_grad_v4s32_f32:
+ return NVPTXISD::TexUnified1DArrayS32FloatGrad;
+ case Intrinsic::nvvm_tex_unified_1d_array_v4u32_s32:
+ return NVPTXISD::TexUnified1DArrayU32S32;
+ case Intrinsic::nvvm_tex_unified_1d_array_v4u32_f32:
+ return NVPTXISD::TexUnified1DArrayU32Float;
+ case Intrinsic::nvvm_tex_unified_1d_array_level_v4u32_f32:
+ return NVPTXISD::TexUnified1DArrayU32FloatLevel;
+ case Intrinsic::nvvm_tex_unified_1d_array_grad_v4u32_f32:
+ return NVPTXISD::TexUnified1DArrayU32FloatGrad;
+
+ case Intrinsic::nvvm_tex_unified_2d_v4f32_s32:
+ return NVPTXISD::TexUnified2DFloatS32;
+ case Intrinsic::nvvm_tex_unified_2d_v4f32_f32:
+ return NVPTXISD::TexUnified2DFloatFloat;
+ case Intrinsic::nvvm_tex_unified_2d_level_v4f32_f32:
+ return NVPTXISD::TexUnified2DFloatFloatLevel;
+ case Intrinsic::nvvm_tex_unified_2d_grad_v4f32_f32:
+ return NVPTXISD::TexUnified2DFloatFloatGrad;
+ case Intrinsic::nvvm_tex_unified_2d_v4s32_s32:
+ return NVPTXISD::TexUnified2DS32S32;
+ case Intrinsic::nvvm_tex_unified_2d_v4s32_f32:
+ return NVPTXISD::TexUnified2DS32Float;
+ case Intrinsic::nvvm_tex_unified_2d_level_v4s32_f32:
+ return NVPTXISD::TexUnified2DS32FloatLevel;
+ case Intrinsic::nvvm_tex_unified_2d_grad_v4s32_f32:
+ return NVPTXISD::TexUnified2DS32FloatGrad;
+ case Intrinsic::nvvm_tex_unified_2d_v4u32_s32:
+ return NVPTXISD::TexUnified2DU32S32;
+ case Intrinsic::nvvm_tex_unified_2d_v4u32_f32:
+ return NVPTXISD::TexUnified2DU32Float;
+ case Intrinsic::nvvm_tex_unified_2d_level_v4u32_f32:
+ return NVPTXISD::TexUnified2DU32FloatLevel;
+ case Intrinsic::nvvm_tex_unified_2d_grad_v4u32_f32:
+ return NVPTXISD::TexUnified2DU32FloatGrad;
+
+ case Intrinsic::nvvm_tex_unified_2d_array_v4f32_s32:
+ return NVPTXISD::TexUnified2DArrayFloatS32;
+ case Intrinsic::nvvm_tex_unified_2d_array_v4f32_f32:
+ return NVPTXISD::TexUnified2DArrayFloatFloat;
+ case Intrinsic::nvvm_tex_unified_2d_array_level_v4f32_f32:
+ return NVPTXISD::TexUnified2DArrayFloatFloatLevel;
+ case Intrinsic::nvvm_tex_unified_2d_array_grad_v4f32_f32:
+ return NVPTXISD::TexUnified2DArrayFloatFloatGrad;
+ case Intrinsic::nvvm_tex_unified_2d_array_v4s32_s32:
+ return NVPTXISD::TexUnified2DArrayS32S32;
+ case Intrinsic::nvvm_tex_unified_2d_array_v4s32_f32:
+ return NVPTXISD::TexUnified2DArrayS32Float;
+ case Intrinsic::nvvm_tex_unified_2d_array_level_v4s32_f32:
+ return NVPTXISD::TexUnified2DArrayS32FloatLevel;
+ case Intrinsic::nvvm_tex_unified_2d_array_grad_v4s32_f32:
+ return NVPTXISD::TexUnified2DArrayS32FloatGrad;
+ case Intrinsic::nvvm_tex_unified_2d_array_v4u32_s32:
+ return NVPTXISD::TexUnified2DArrayU32S32;
+ case Intrinsic::nvvm_tex_unified_2d_array_v4u32_f32:
+ return NVPTXISD::TexUnified2DArrayU32Float;
+ case Intrinsic::nvvm_tex_unified_2d_array_level_v4u32_f32:
+ return NVPTXISD::TexUnified2DArrayU32FloatLevel;
+ case Intrinsic::nvvm_tex_unified_2d_array_grad_v4u32_f32:
+ return NVPTXISD::TexUnified2DArrayU32FloatGrad;
+
+ case Intrinsic::nvvm_tex_unified_3d_v4f32_s32:
+ return NVPTXISD::TexUnified3DFloatS32;
+ case Intrinsic::nvvm_tex_unified_3d_v4f32_f32:
+ return NVPTXISD::TexUnified3DFloatFloat;
+ case Intrinsic::nvvm_tex_unified_3d_level_v4f32_f32:
+ return NVPTXISD::TexUnified3DFloatFloatLevel;
+ case Intrinsic::nvvm_tex_unified_3d_grad_v4f32_f32:
+ return NVPTXISD::TexUnified3DFloatFloatGrad;
+ case Intrinsic::nvvm_tex_unified_3d_v4s32_s32:
+ return NVPTXISD::TexUnified3DS32S32;
+ case Intrinsic::nvvm_tex_unified_3d_v4s32_f32:
+ return NVPTXISD::TexUnified3DS32Float;
+ case Intrinsic::nvvm_tex_unified_3d_level_v4s32_f32:
+ return NVPTXISD::TexUnified3DS32FloatLevel;
+ case Intrinsic::nvvm_tex_unified_3d_grad_v4s32_f32:
+ return NVPTXISD::TexUnified3DS32FloatGrad;
+ case Intrinsic::nvvm_tex_unified_3d_v4u32_s32:
+ return NVPTXISD::TexUnified3DU32S32;
+ case Intrinsic::nvvm_tex_unified_3d_v4u32_f32:
+ return NVPTXISD::TexUnified3DU32Float;
+ case Intrinsic::nvvm_tex_unified_3d_level_v4u32_f32:
+ return NVPTXISD::TexUnified3DU32FloatLevel;
+ case Intrinsic::nvvm_tex_unified_3d_grad_v4u32_f32:
+ return NVPTXISD::TexUnified3DU32FloatGrad;
+
+ case Intrinsic::nvvm_tex_unified_cube_v4f32_f32:
+ return NVPTXISD::TexUnifiedCubeFloatFloat;
+ case Intrinsic::nvvm_tex_unified_cube_level_v4f32_f32:
+ return NVPTXISD::TexUnifiedCubeFloatFloatLevel;
+ case Intrinsic::nvvm_tex_unified_cube_v4s32_f32:
+ return NVPTXISD::TexUnifiedCubeS32Float;
+ case Intrinsic::nvvm_tex_unified_cube_level_v4s32_f32:
+ return NVPTXISD::TexUnifiedCubeS32FloatLevel;
+ case Intrinsic::nvvm_tex_unified_cube_v4u32_f32:
+ return NVPTXISD::TexUnifiedCubeU32Float;
+ case Intrinsic::nvvm_tex_unified_cube_level_v4u32_f32:
+ return NVPTXISD::TexUnifiedCubeU32FloatLevel;
+
+ case Intrinsic::nvvm_tex_unified_cube_array_v4f32_f32:
+ return NVPTXISD::TexUnifiedCubeArrayFloatFloat;
+ case Intrinsic::nvvm_tex_unified_cube_array_level_v4f32_f32:
+ return NVPTXISD::TexUnifiedCubeArrayFloatFloatLevel;
+ case Intrinsic::nvvm_tex_unified_cube_array_v4s32_f32:
+ return NVPTXISD::TexUnifiedCubeArrayS32Float;
+ case Intrinsic::nvvm_tex_unified_cube_array_level_v4s32_f32:
+ return NVPTXISD::TexUnifiedCubeArrayS32FloatLevel;
+ case Intrinsic::nvvm_tex_unified_cube_array_v4u32_f32:
+ return NVPTXISD::TexUnifiedCubeArrayU32Float;
+ case Intrinsic::nvvm_tex_unified_cube_array_level_v4u32_f32:
+ return NVPTXISD::TexUnifiedCubeArrayU32FloatLevel;
+
+ case Intrinsic::nvvm_tld4_unified_r_2d_v4f32_f32:
+ return NVPTXISD::Tld4UnifiedR2DFloatFloat;
+ case Intrinsic::nvvm_tld4_unified_g_2d_v4f32_f32:
+ return NVPTXISD::Tld4UnifiedG2DFloatFloat;
+ case Intrinsic::nvvm_tld4_unified_b_2d_v4f32_f32:
+ return NVPTXISD::Tld4UnifiedB2DFloatFloat;
+ case Intrinsic::nvvm_tld4_unified_a_2d_v4f32_f32:
+ return NVPTXISD::Tld4UnifiedA2DFloatFloat;
+ case Intrinsic::nvvm_tld4_unified_r_2d_v4s32_f32:
+ return NVPTXISD::Tld4UnifiedR2DS64Float;
+ case Intrinsic::nvvm_tld4_unified_g_2d_v4s32_f32:
+ return NVPTXISD::Tld4UnifiedG2DS64Float;
+ case Intrinsic::nvvm_tld4_unified_b_2d_v4s32_f32:
+ return NVPTXISD::Tld4UnifiedB2DS64Float;
+ case Intrinsic::nvvm_tld4_unified_a_2d_v4s32_f32:
+ return NVPTXISD::Tld4UnifiedA2DS64Float;
+ case Intrinsic::nvvm_tld4_unified_r_2d_v4u32_f32:
+ return NVPTXISD::Tld4UnifiedR2DU64Float;
+ case Intrinsic::nvvm_tld4_unified_g_2d_v4u32_f32:
+ return NVPTXISD::Tld4UnifiedG2DU64Float;
+ case Intrinsic::nvvm_tld4_unified_b_2d_v4u32_f32:
+ return NVPTXISD::Tld4UnifiedB2DU64Float;
+ case Intrinsic::nvvm_tld4_unified_a_2d_v4u32_f32:
+ return NVPTXISD::Tld4UnifiedA2DU64Float;
+ }
+}
+
+static unsigned getOpcForSurfaceInstr(unsigned Intrinsic) {
+ switch (Intrinsic) {
+ default:
+ return 0;
+ case Intrinsic::nvvm_suld_1d_i8_clamp:
+ return NVPTXISD::Suld1DI8Clamp;
+ case Intrinsic::nvvm_suld_1d_i16_clamp:
+ return NVPTXISD::Suld1DI16Clamp;
+ case Intrinsic::nvvm_suld_1d_i32_clamp:
+ return NVPTXISD::Suld1DI32Clamp;
+ case Intrinsic::nvvm_suld_1d_i64_clamp:
+ return NVPTXISD::Suld1DI64Clamp;
+ case Intrinsic::nvvm_suld_1d_v2i8_clamp:
+ return NVPTXISD::Suld1DV2I8Clamp;
+ case Intrinsic::nvvm_suld_1d_v2i16_clamp:
+ return NVPTXISD::Suld1DV2I16Clamp;
+ case Intrinsic::nvvm_suld_1d_v2i32_clamp:
+ return NVPTXISD::Suld1DV2I32Clamp;
+ case Intrinsic::nvvm_suld_1d_v2i64_clamp:
+ return NVPTXISD::Suld1DV2I64Clamp;
+ case Intrinsic::nvvm_suld_1d_v4i8_clamp:
+ return NVPTXISD::Suld1DV4I8Clamp;
+ case Intrinsic::nvvm_suld_1d_v4i16_clamp:
+ return NVPTXISD::Suld1DV4I16Clamp;
+ case Intrinsic::nvvm_suld_1d_v4i32_clamp:
+ return NVPTXISD::Suld1DV4I32Clamp;
+ case Intrinsic::nvvm_suld_1d_array_i8_clamp:
+ return NVPTXISD::Suld1DArrayI8Clamp;
+ case Intrinsic::nvvm_suld_1d_array_i16_clamp:
+ return NVPTXISD::Suld1DArrayI16Clamp;
+ case Intrinsic::nvvm_suld_1d_array_i32_clamp:
+ return NVPTXISD::Suld1DArrayI32Clamp;
+ case Intrinsic::nvvm_suld_1d_array_i64_clamp:
+ return NVPTXISD::Suld1DArrayI64Clamp;
+ case Intrinsic::nvvm_suld_1d_array_v2i8_clamp:
+ return NVPTXISD::Suld1DArrayV2I8Clamp;
+ case Intrinsic::nvvm_suld_1d_array_v2i16_clamp:
+ return NVPTXISD::Suld1DArrayV2I16Clamp;
+ case Intrinsic::nvvm_suld_1d_array_v2i32_clamp:
+ return NVPTXISD::Suld1DArrayV2I32Clamp;
+ case Intrinsic::nvvm_suld_1d_array_v2i64_clamp:
+ return NVPTXISD::Suld1DArrayV2I64Clamp;
+ case Intrinsic::nvvm_suld_1d_array_v4i8_clamp:
+ return NVPTXISD::Suld1DArrayV4I8Clamp;
+ case Intrinsic::nvvm_suld_1d_array_v4i16_clamp:
+ return NVPTXISD::Suld1DArrayV4I16Clamp;
+ case Intrinsic::nvvm_suld_1d_array_v4i32_clamp:
+ return NVPTXISD::Suld1DArrayV4I32Clamp;
+ case Intrinsic::nvvm_suld_2d_i8_clamp:
+ return NVPTXISD::Suld2DI8Clamp;
+ case Intrinsic::nvvm_suld_2d_i16_clamp:
+ return NVPTXISD::Suld2DI16Clamp;
+ case Intrinsic::nvvm_suld_2d_i32_clamp:
+ return NVPTXISD::Suld2DI32Clamp;
+ case Intrinsic::nvvm_suld_2d_i64_clamp:
+ return NVPTXISD::Suld2DI64Clamp;
+ case Intrinsic::nvvm_suld_2d_v2i8_clamp:
+ return NVPTXISD::Suld2DV2I8Clamp;
+ case Intrinsic::nvvm_suld_2d_v2i16_clamp:
+ return NVPTXISD::Suld2DV2I16Clamp;
+ case Intrinsic::nvvm_suld_2d_v2i32_clamp:
+ return NVPTXISD::Suld2DV2I32Clamp;
+ case Intrinsic::nvvm_suld_2d_v2i64_clamp:
+ return NVPTXISD::Suld2DV2I64Clamp;
+ case Intrinsic::nvvm_suld_2d_v4i8_clamp:
+ return NVPTXISD::Suld2DV4I8Clamp;
+ case Intrinsic::nvvm_suld_2d_v4i16_clamp:
+ return NVPTXISD::Suld2DV4I16Clamp;
+ case Intrinsic::nvvm_suld_2d_v4i32_clamp:
+ return NVPTXISD::Suld2DV4I32Clamp;
+ case Intrinsic::nvvm_suld_2d_array_i8_clamp:
+ return NVPTXISD::Suld2DArrayI8Clamp;
+ case Intrinsic::nvvm_suld_2d_array_i16_clamp:
+ return NVPTXISD::Suld2DArrayI16Clamp;
+ case Intrinsic::nvvm_suld_2d_array_i32_clamp:
+ return NVPTXISD::Suld2DArrayI32Clamp;
+ case Intrinsic::nvvm_suld_2d_array_i64_clamp:
+ return NVPTXISD::Suld2DArrayI64Clamp;
+ case Intrinsic::nvvm_suld_2d_array_v2i8_clamp:
+ return NVPTXISD::Suld2DArrayV2I8Clamp;
+ case Intrinsic::nvvm_suld_2d_array_v2i16_clamp:
+ return NVPTXISD::Suld2DArrayV2I16Clamp;
+ case Intrinsic::nvvm_suld_2d_array_v2i32_clamp:
+ return NVPTXISD::Suld2DArrayV2I32Clamp;
+ case Intrinsic::nvvm_suld_2d_array_v2i64_clamp:
+ return NVPTXISD::Suld2DArrayV2I64Clamp;
+ case Intrinsic::nvvm_suld_2d_array_v4i8_clamp:
+ return NVPTXISD::Suld2DArrayV4I8Clamp;
+ case Intrinsic::nvvm_suld_2d_array_v4i16_clamp:
+ return NVPTXISD::Suld2DArrayV4I16Clamp;
+ case Intrinsic::nvvm_suld_2d_array_v4i32_clamp:
+ return NVPTXISD::Suld2DArrayV4I32Clamp;
+ case Intrinsic::nvvm_suld_3d_i8_clamp:
+ return NVPTXISD::Suld3DI8Clamp;
+ case Intrinsic::nvvm_suld_3d_i16_clamp:
+ return NVPTXISD::Suld3DI16Clamp;
+ case Intrinsic::nvvm_suld_3d_i32_clamp:
+ return NVPTXISD::Suld3DI32Clamp;
+ case Intrinsic::nvvm_suld_3d_i64_clamp:
+ return NVPTXISD::Suld3DI64Clamp;
+ case Intrinsic::nvvm_suld_3d_v2i8_clamp:
+ return NVPTXISD::Suld3DV2I8Clamp;
+ case Intrinsic::nvvm_suld_3d_v2i16_clamp:
+ return NVPTXISD::Suld3DV2I16Clamp;
+ case Intrinsic::nvvm_suld_3d_v2i32_clamp:
+ return NVPTXISD::Suld3DV2I32Clamp;
+ case Intrinsic::nvvm_suld_3d_v2i64_clamp:
+ return NVPTXISD::Suld3DV2I64Clamp;
+ case Intrinsic::nvvm_suld_3d_v4i8_clamp:
+ return NVPTXISD::Suld3DV4I8Clamp;
+ case Intrinsic::nvvm_suld_3d_v4i16_clamp:
+ return NVPTXISD::Suld3DV4I16Clamp;
+ case Intrinsic::nvvm_suld_3d_v4i32_clamp:
+ return NVPTXISD::Suld3DV4I32Clamp;
+ case Intrinsic::nvvm_suld_1d_i8_trap:
+ return NVPTXISD::Suld1DI8Trap;
+ case Intrinsic::nvvm_suld_1d_i16_trap:
+ return NVPTXISD::Suld1DI16Trap;
+ case Intrinsic::nvvm_suld_1d_i32_trap:
+ return NVPTXISD::Suld1DI32Trap;
+ case Intrinsic::nvvm_suld_1d_i64_trap:
+ return NVPTXISD::Suld1DI64Trap;
+ case Intrinsic::nvvm_suld_1d_v2i8_trap:
+ return NVPTXISD::Suld1DV2I8Trap;
+ case Intrinsic::nvvm_suld_1d_v2i16_trap:
+ return NVPTXISD::Suld1DV2I16Trap;
+ case Intrinsic::nvvm_suld_1d_v2i32_trap:
+ return NVPTXISD::Suld1DV2I32Trap;
+ case Intrinsic::nvvm_suld_1d_v2i64_trap:
+ return NVPTXISD::Suld1DV2I64Trap;
+ case Intrinsic::nvvm_suld_1d_v4i8_trap:
+ return NVPTXISD::Suld1DV4I8Trap;
+ case Intrinsic::nvvm_suld_1d_v4i16_trap:
+ return NVPTXISD::Suld1DV4I16Trap;
+ case Intrinsic::nvvm_suld_1d_v4i32_trap:
+ return NVPTXISD::Suld1DV4I32Trap;
+ case Intrinsic::nvvm_suld_1d_array_i8_trap:
+ return NVPTXISD::Suld1DArrayI8Trap;
+ case Intrinsic::nvvm_suld_1d_array_i16_trap:
+ return NVPTXISD::Suld1DArrayI16Trap;
+ case Intrinsic::nvvm_suld_1d_array_i32_trap:
+ return NVPTXISD::Suld1DArrayI32Trap;
+ case Intrinsic::nvvm_suld_1d_array_i64_trap:
+ return NVPTXISD::Suld1DArrayI64Trap;
+ case Intrinsic::nvvm_suld_1d_array_v2i8_trap:
+ return NVPTXISD::Suld1DArrayV2I8Trap;
+ case Intrinsic::nvvm_suld_1d_array_v2i16_trap:
+ return NVPTXISD::Suld1DArrayV2I16Trap;
+ case Intrinsic::nvvm_suld_1d_array_v2i32_trap:
+ return NVPTXISD::Suld1DArrayV2I32Trap;
+ case Intrinsic::nvvm_suld_1d_array_v2i64_trap:
+ return NVPTXISD::Suld1DArrayV2I64Trap;
+ case Intrinsic::nvvm_suld_1d_array_v4i8_trap:
+ return NVPTXISD::Suld1DArrayV4I8Trap;
+ case Intrinsic::nvvm_suld_1d_array_v4i16_trap:
+ return NVPTXISD::Suld1DArrayV4I16Trap;
+ case Intrinsic::nvvm_suld_1d_array_v4i32_trap:
+ return NVPTXISD::Suld1DArrayV4I32Trap;
+ case Intrinsic::nvvm_suld_2d_i8_trap:
+ return NVPTXISD::Suld2DI8Trap;
+ case Intrinsic::nvvm_suld_2d_i16_trap:
+ return NVPTXISD::Suld2DI16Trap;
+ case Intrinsic::nvvm_suld_2d_i32_trap:
+ return NVPTXISD::Suld2DI32Trap;
+ case Intrinsic::nvvm_suld_2d_i64_trap:
+ return NVPTXISD::Suld2DI64Trap;
+ case Intrinsic::nvvm_suld_2d_v2i8_trap:
+ return NVPTXISD::Suld2DV2I8Trap;
+ case Intrinsic::nvvm_suld_2d_v2i16_trap:
+ return NVPTXISD::Suld2DV2I16Trap;
+ case Intrinsic::nvvm_suld_2d_v2i32_trap:
+ return NVPTXISD::Suld2DV2I32Trap;
+ case Intrinsic::nvvm_suld_2d_v2i64_trap:
+ return NVPTXISD::Suld2DV2I64Trap;
+ case Intrinsic::nvvm_suld_2d_v4i8_trap:
+ return NVPTXISD::Suld2DV4I8Trap;
+ case Intrinsic::nvvm_suld_2d_v4i16_trap:
+ return NVPTXISD::Suld2DV4I16Trap;
+ case Intrinsic::nvvm_suld_2d_v4i32_trap:
+ return NVPTXISD::Suld2DV4I32Trap;
+ case Intrinsic::nvvm_suld_2d_array_i8_trap:
+ return NVPTXISD::Suld2DArrayI8Trap;
+ case Intrinsic::nvvm_suld_2d_array_i16_trap:
+ return NVPTXISD::Suld2DArrayI16Trap;
+ case Intrinsic::nvvm_suld_2d_array_i32_trap:
+ return NVPTXISD::Suld2DArrayI32Trap;
+ case Intrinsic::nvvm_suld_2d_array_i64_trap:
+ return NVPTXISD::Suld2DArrayI64Trap;
+ case Intrinsic::nvvm_suld_2d_array_v2i8_trap:
+ return NVPTXISD::Suld2DArrayV2I8Trap;
+ case Intrinsic::nvvm_suld_2d_array_v2i16_trap:
+ return NVPTXISD::Suld2DArrayV2I16Trap;
+ case Intrinsic::nvvm_suld_2d_array_v2i32_trap:
+ return NVPTXISD::Suld2DArrayV2I32Trap;
+ case Intrinsic::nvvm_suld_2d_array_v2i64_trap:
+ return NVPTXISD::Suld2DArrayV2I64Trap;
+ case Intrinsic::nvvm_suld_2d_array_v4i8_trap:
+ return NVPTXISD::Suld2DArrayV4I8Trap;
+ case Intrinsic::nvvm_suld_2d_array_v4i16_trap:
+ return NVPTXISD::Suld2DArrayV4I16Trap;
+ case Intrinsic::nvvm_suld_2d_array_v4i32_trap:
+ return NVPTXISD::Suld2DArrayV4I32Trap;
+ case Intrinsic::nvvm_suld_3d_i8_trap:
+ return NVPTXISD::Suld3DI8Trap;
+ case Intrinsic::nvvm_suld_3d_i16_trap:
+ return NVPTXISD::Suld3DI16Trap;
+ case Intrinsic::nvvm_suld_3d_i32_trap:
+ return NVPTXISD::Suld3DI32Trap;
+ case Intrinsic::nvvm_suld_3d_i64_trap:
+ return NVPTXISD::Suld3DI64Trap;
+ case Intrinsic::nvvm_suld_3d_v2i8_trap:
+ return NVPTXISD::Suld3DV2I8Trap;
+ case Intrinsic::nvvm_suld_3d_v2i16_trap:
+ return NVPTXISD::Suld3DV2I16Trap;
+ case Intrinsic::nvvm_suld_3d_v2i32_trap:
+ return NVPTXISD::Suld3DV2I32Trap;
+ case Intrinsic::nvvm_suld_3d_v2i64_trap:
+ return NVPTXISD::Suld3DV2I64Trap;
+ case Intrinsic::nvvm_suld_3d_v4i8_trap:
+ return NVPTXISD::Suld3DV4I8Trap;
+ case Intrinsic::nvvm_suld_3d_v4i16_trap:
+ return NVPTXISD::Suld3DV4I16Trap;
+ case Intrinsic::nvvm_suld_3d_v4i32_trap:
+ return NVPTXISD::Suld3DV4I32Trap;
+ case Intrinsic::nvvm_suld_1d_i8_zero:
+ return NVPTXISD::Suld1DI8Zero;
+ case Intrinsic::nvvm_suld_1d_i16_zero:
+ return NVPTXISD::Suld1DI16Zero;
+ case Intrinsic::nvvm_suld_1d_i32_zero:
+ return NVPTXISD::Suld1DI32Zero;
+ case Intrinsic::nvvm_suld_1d_i64_zero:
+ return NVPTXISD::Suld1DI64Zero;
+ case Intrinsic::nvvm_suld_1d_v2i8_zero:
+ return NVPTXISD::Suld1DV2I8Zero;
+ case Intrinsic::nvvm_suld_1d_v2i16_zero:
+ return NVPTXISD::Suld1DV2I16Zero;
+ case Intrinsic::nvvm_suld_1d_v2i32_zero:
+ return NVPTXISD::Suld1DV2I32Zero;
+ case Intrinsic::nvvm_suld_1d_v2i64_zero:
+ return NVPTXISD::Suld1DV2I64Zero;
+ case Intrinsic::nvvm_suld_1d_v4i8_zero:
+ return NVPTXISD::Suld1DV4I8Zero;
+ case Intrinsic::nvvm_suld_1d_v4i16_zero:
+ return NVPTXISD::Suld1DV4I16Zero;
+ case Intrinsic::nvvm_suld_1d_v4i32_zero:
+ return NVPTXISD::Suld1DV4I32Zero;
+ case Intrinsic::nvvm_suld_1d_array_i8_zero:
+ return NVPTXISD::Suld1DArrayI8Zero;
+ case Intrinsic::nvvm_suld_1d_array_i16_zero:
+ return NVPTXISD::Suld1DArrayI16Zero;
+ case Intrinsic::nvvm_suld_1d_array_i32_zero:
+ return NVPTXISD::Suld1DArrayI32Zero;
+ case Intrinsic::nvvm_suld_1d_array_i64_zero:
+ return NVPTXISD::Suld1DArrayI64Zero;
+ case Intrinsic::nvvm_suld_1d_array_v2i8_zero:
+ return NVPTXISD::Suld1DArrayV2I8Zero;
+ case Intrinsic::nvvm_suld_1d_array_v2i16_zero:
+ return NVPTXISD::Suld1DArrayV2I16Zero;
+ case Intrinsic::nvvm_suld_1d_array_v2i32_zero:
+ return NVPTXISD::Suld1DArrayV2I32Zero;
+ case Intrinsic::nvvm_suld_1d_array_v2i64_zero:
+ return NVPTXISD::Suld1DArrayV2I64Zero;
+ case Intrinsic::nvvm_suld_1d_array_v4i8_zero:
+ return NVPTXISD::Suld1DArrayV4I8Zero;
+ case Intrinsic::nvvm_suld_1d_array_v4i16_zero:
+ return NVPTXISD::Suld1DArrayV4I16Zero;
+ case Intrinsic::nvvm_suld_1d_array_v4i32_zero:
+ return NVPTXISD::Suld1DArrayV4I32Zero;
+ case Intrinsic::nvvm_suld_2d_i8_zero:
+ return NVPTXISD::Suld2DI8Zero;
+ case Intrinsic::nvvm_suld_2d_i16_zero:
+ return NVPTXISD::Suld2DI16Zero;
+ case Intrinsic::nvvm_suld_2d_i32_zero:
+ return NVPTXISD::Suld2DI32Zero;
+ case Intrinsic::nvvm_suld_2d_i64_zero:
+ return NVPTXISD::Suld2DI64Zero;
+ case Intrinsic::nvvm_suld_2d_v2i8_zero:
+ return NVPTXISD::Suld2DV2I8Zero;
+ case Intrinsic::nvvm_suld_2d_v2i16_zero:
+ return NVPTXISD::Suld2DV2I16Zero;
+ case Intrinsic::nvvm_suld_2d_v2i32_zero:
+ return NVPTXISD::Suld2DV2I32Zero;
+ case Intrinsic::nvvm_suld_2d_v2i64_zero:
+ return NVPTXISD::Suld2DV2I64Zero;
+ case Intrinsic::nvvm_suld_2d_v4i8_zero:
+ return NVPTXISD::Suld2DV4I8Zero;
+ case Intrinsic::nvvm_suld_2d_v4i16_zero:
+ return NVPTXISD::Suld2DV4I16Zero;
+ case Intrinsic::nvvm_suld_2d_v4i32_zero:
+ return NVPTXISD::Suld2DV4I32Zero;
+ case Intrinsic::nvvm_suld_2d_array_i8_zero:
+ return NVPTXISD::Suld2DArrayI8Zero;
+ case Intrinsic::nvvm_suld_2d_array_i16_zero:
+ return NVPTXISD::Suld2DArrayI16Zero;
+ case Intrinsic::nvvm_suld_2d_array_i32_zero:
+ return NVPTXISD::Suld2DArrayI32Zero;
+ case Intrinsic::nvvm_suld_2d_array_i64_zero:
+ return NVPTXISD::Suld2DArrayI64Zero;
+ case Intrinsic::nvvm_suld_2d_array_v2i8_zero:
+ return NVPTXISD::Suld2DArrayV2I8Zero;
+ case Intrinsic::nvvm_suld_2d_array_v2i16_zero:
+ return NVPTXISD::Suld2DArrayV2I16Zero;
+ case Intrinsic::nvvm_suld_2d_array_v2i32_zero:
+ return NVPTXISD::Suld2DArrayV2I32Zero;
+ case Intrinsic::nvvm_suld_2d_array_v2i64_zero:
+ return NVPTXISD::Suld2DArrayV2I64Zero;
+ case Intrinsic::nvvm_suld_2d_array_v4i8_zero:
+ return NVPTXISD::Suld2DArrayV4I8Zero;
+ case Intrinsic::nvvm_suld_2d_array_v4i16_zero:
+ return NVPTXISD::Suld2DArrayV4I16Zero;
+ case Intrinsic::nvvm_suld_2d_array_v4i32_zero:
+ return NVPTXISD::Suld2DArrayV4I32Zero;
+ case Intrinsic::nvvm_suld_3d_i8_zero:
+ return NVPTXISD::Suld3DI8Zero;
+ case Intrinsic::nvvm_suld_3d_i16_zero:
+ return NVPTXISD::Suld3DI16Zero;
+ case Intrinsic::nvvm_suld_3d_i32_zero:
+ return NVPTXISD::Suld3DI32Zero;
+ case Intrinsic::nvvm_suld_3d_i64_zero:
+ return NVPTXISD::Suld3DI64Zero;
+ case Intrinsic::nvvm_suld_3d_v2i8_zero:
+ return NVPTXISD::Suld3DV2I8Zero;
+ case Intrinsic::nvvm_suld_3d_v2i16_zero:
+ return NVPTXISD::Suld3DV2I16Zero;
+ case Intrinsic::nvvm_suld_3d_v2i32_zero:
+ return NVPTXISD::Suld3DV2I32Zero;
+ case Intrinsic::nvvm_suld_3d_v2i64_zero:
+ return NVPTXISD::Suld3DV2I64Zero;
+ case Intrinsic::nvvm_suld_3d_v4i8_zero:
+ return NVPTXISD::Suld3DV4I8Zero;
+ case Intrinsic::nvvm_suld_3d_v4i16_zero:
+ return NVPTXISD::Suld3DV4I16Zero;
+ case Intrinsic::nvvm_suld_3d_v4i32_zero:
+ return NVPTXISD::Suld3DV4I32Zero;
+ }
+}
+
+// llvm.ptx.memcpy.const and llvm.ptx.memmove.const need to be modeled as
+// TgtMemIntrinsic
+// because we need the information that is only available in the "Value" type
+// of destination
+// pointer. In particular, the address space information.
+bool NVPTXTargetLowering::getTgtMemIntrinsic(
+ IntrinsicInfo &Info, const CallInst &I, unsigned Intrinsic) const {
+ switch (Intrinsic) {
+ default:
+ return false;
+
+ case Intrinsic::nvvm_atomic_load_add_f32:
+ case Intrinsic::nvvm_atomic_load_inc_32:
+ case Intrinsic::nvvm_atomic_load_dec_32:
+
+ case Intrinsic::nvvm_atomic_add_gen_f_cta:
+ case Intrinsic::nvvm_atomic_add_gen_f_sys:
+ case Intrinsic::nvvm_atomic_add_gen_i_cta:
+ case Intrinsic::nvvm_atomic_add_gen_i_sys:
+ case Intrinsic::nvvm_atomic_and_gen_i_cta:
+ case Intrinsic::nvvm_atomic_and_gen_i_sys:
+ case Intrinsic::nvvm_atomic_cas_gen_i_cta:
+ case Intrinsic::nvvm_atomic_cas_gen_i_sys:
+ case Intrinsic::nvvm_atomic_dec_gen_i_cta:
+ case Intrinsic::nvvm_atomic_dec_gen_i_sys:
+ case Intrinsic::nvvm_atomic_inc_gen_i_cta:
+ case Intrinsic::nvvm_atomic_inc_gen_i_sys:
+ case Intrinsic::nvvm_atomic_max_gen_i_cta:
+ case Intrinsic::nvvm_atomic_max_gen_i_sys:
+ case Intrinsic::nvvm_atomic_min_gen_i_cta:
+ case Intrinsic::nvvm_atomic_min_gen_i_sys:
+ case Intrinsic::nvvm_atomic_or_gen_i_cta:
+ case Intrinsic::nvvm_atomic_or_gen_i_sys:
+ case Intrinsic::nvvm_atomic_exch_gen_i_cta:
+ case Intrinsic::nvvm_atomic_exch_gen_i_sys:
+ case Intrinsic::nvvm_atomic_xor_gen_i_cta:
+ case Intrinsic::nvvm_atomic_xor_gen_i_sys: {
+ auto &DL = I.getModule()->getDataLayout();
+ Info.opc = ISD::INTRINSIC_W_CHAIN;
+ Info.memVT = getValueType(DL, I.getType());
+ Info.ptrVal = I.getArgOperand(0);
+ Info.offset = 0;
+ Info.vol = false;
+ Info.readMem = true;
+ Info.writeMem = true;
+ Info.align = 0;
+ return true;
+ }
+
+ case Intrinsic::nvvm_ldu_global_i:
+ case Intrinsic::nvvm_ldu_global_f:
+ case Intrinsic::nvvm_ldu_global_p: {
+ auto &DL = I.getModule()->getDataLayout();
+ Info.opc = ISD::INTRINSIC_W_CHAIN;
+ if (Intrinsic == Intrinsic::nvvm_ldu_global_i)
+ Info.memVT = getValueType(DL, I.getType());
+ else if(Intrinsic == Intrinsic::nvvm_ldu_global_p)
+ Info.memVT = getPointerTy(DL);
+ else
+ Info.memVT = getValueType(DL, I.getType());
+ Info.ptrVal = I.getArgOperand(0);
+ Info.offset = 0;
+ Info.vol = false;
+ Info.readMem = true;
+ Info.writeMem = false;
+ Info.align = cast<ConstantInt>(I.getArgOperand(1))->getZExtValue();
+
+ return true;
+ }
+ case Intrinsic::nvvm_ldg_global_i:
+ case Intrinsic::nvvm_ldg_global_f:
+ case Intrinsic::nvvm_ldg_global_p: {
+ auto &DL = I.getModule()->getDataLayout();
+
+ Info.opc = ISD::INTRINSIC_W_CHAIN;
+ if (Intrinsic == Intrinsic::nvvm_ldg_global_i)
+ Info.memVT = getValueType(DL, I.getType());
+ else if(Intrinsic == Intrinsic::nvvm_ldg_global_p)
+ Info.memVT = getPointerTy(DL);
+ else
+ Info.memVT = getValueType(DL, I.getType());
+ Info.ptrVal = I.getArgOperand(0);
+ Info.offset = 0;
+ Info.vol = false;
+ Info.readMem = true;
+ Info.writeMem = false;
+ Info.align = cast<ConstantInt>(I.getArgOperand(1))->getZExtValue();
+
+ return true;
+ }
+
+ case Intrinsic::nvvm_tex_1d_v4f32_s32:
+ case Intrinsic::nvvm_tex_1d_v4f32_f32:
+ case Intrinsic::nvvm_tex_1d_level_v4f32_f32:
+ case Intrinsic::nvvm_tex_1d_grad_v4f32_f32:
+ case Intrinsic::nvvm_tex_1d_array_v4f32_s32:
+ case Intrinsic::nvvm_tex_1d_array_v4f32_f32:
+ case Intrinsic::nvvm_tex_1d_array_level_v4f32_f32:
+ case Intrinsic::nvvm_tex_1d_array_grad_v4f32_f32:
+ case Intrinsic::nvvm_tex_2d_v4f32_s32:
+ case Intrinsic::nvvm_tex_2d_v4f32_f32:
+ case Intrinsic::nvvm_tex_2d_level_v4f32_f32:
+ case Intrinsic::nvvm_tex_2d_grad_v4f32_f32:
+ case Intrinsic::nvvm_tex_2d_array_v4f32_s32:
+ case Intrinsic::nvvm_tex_2d_array_v4f32_f32:
+ case Intrinsic::nvvm_tex_2d_array_level_v4f32_f32:
+ case Intrinsic::nvvm_tex_2d_array_grad_v4f32_f32:
+ case Intrinsic::nvvm_tex_3d_v4f32_s32:
+ case Intrinsic::nvvm_tex_3d_v4f32_f32:
+ case Intrinsic::nvvm_tex_3d_level_v4f32_f32:
+ case Intrinsic::nvvm_tex_3d_grad_v4f32_f32:
+ case Intrinsic::nvvm_tex_cube_v4f32_f32:
+ case Intrinsic::nvvm_tex_cube_level_v4f32_f32:
+ case Intrinsic::nvvm_tex_cube_array_v4f32_f32:
+ case Intrinsic::nvvm_tex_cube_array_level_v4f32_f32:
+ case Intrinsic::nvvm_tld4_r_2d_v4f32_f32:
+ case Intrinsic::nvvm_tld4_g_2d_v4f32_f32:
+ case Intrinsic::nvvm_tld4_b_2d_v4f32_f32:
+ case Intrinsic::nvvm_tld4_a_2d_v4f32_f32:
+ case Intrinsic::nvvm_tex_unified_1d_v4f32_s32:
+ case Intrinsic::nvvm_tex_unified_1d_v4f32_f32:
+ case Intrinsic::nvvm_tex_unified_1d_level_v4f32_f32:
+ case Intrinsic::nvvm_tex_unified_1d_grad_v4f32_f32:
+ case Intrinsic::nvvm_tex_unified_1d_array_v4f32_s32:
+ case Intrinsic::nvvm_tex_unified_1d_array_v4f32_f32:
+ case Intrinsic::nvvm_tex_unified_1d_array_level_v4f32_f32:
+ case Intrinsic::nvvm_tex_unified_1d_array_grad_v4f32_f32:
+ case Intrinsic::nvvm_tex_unified_2d_v4f32_s32:
+ case Intrinsic::nvvm_tex_unified_2d_v4f32_f32:
+ case Intrinsic::nvvm_tex_unified_2d_level_v4f32_f32:
+ case Intrinsic::nvvm_tex_unified_2d_grad_v4f32_f32:
+ case Intrinsic::nvvm_tex_unified_2d_array_v4f32_s32:
+ case Intrinsic::nvvm_tex_unified_2d_array_v4f32_f32:
+ case Intrinsic::nvvm_tex_unified_2d_array_level_v4f32_f32:
+ case Intrinsic::nvvm_tex_unified_2d_array_grad_v4f32_f32:
+ case Intrinsic::nvvm_tex_unified_3d_v4f32_s32:
+ case Intrinsic::nvvm_tex_unified_3d_v4f32_f32:
+ case Intrinsic::nvvm_tex_unified_3d_level_v4f32_f32:
+ case Intrinsic::nvvm_tex_unified_3d_grad_v4f32_f32:
+ case Intrinsic::nvvm_tex_unified_cube_v4f32_f32:
+ case Intrinsic::nvvm_tex_unified_cube_level_v4f32_f32:
+ case Intrinsic::nvvm_tex_unified_cube_array_v4f32_f32:
+ case Intrinsic::nvvm_tex_unified_cube_array_level_v4f32_f32:
+ case Intrinsic::nvvm_tld4_unified_r_2d_v4f32_f32:
+ case Intrinsic::nvvm_tld4_unified_g_2d_v4f32_f32:
+ case Intrinsic::nvvm_tld4_unified_b_2d_v4f32_f32:
+ case Intrinsic::nvvm_tld4_unified_a_2d_v4f32_f32:
+ Info.opc = getOpcForTextureInstr(Intrinsic);
+ Info.memVT = MVT::v4f32;
+ Info.ptrVal = nullptr;
+ Info.offset = 0;
+ Info.vol = false;
+ Info.readMem = true;
+ Info.writeMem = false;
+ Info.align = 16;
+ return true;
+
+ case Intrinsic::nvvm_tex_1d_v4s32_s32:
+ case Intrinsic::nvvm_tex_1d_v4s32_f32:
+ case Intrinsic::nvvm_tex_1d_level_v4s32_f32:
+ case Intrinsic::nvvm_tex_1d_grad_v4s32_f32:
+ case Intrinsic::nvvm_tex_1d_array_v4s32_s32:
+ case Intrinsic::nvvm_tex_1d_array_v4s32_f32:
+ case Intrinsic::nvvm_tex_1d_array_level_v4s32_f32:
+ case Intrinsic::nvvm_tex_1d_array_grad_v4s32_f32:
+ case Intrinsic::nvvm_tex_2d_v4s32_s32:
+ case Intrinsic::nvvm_tex_2d_v4s32_f32:
+ case Intrinsic::nvvm_tex_2d_level_v4s32_f32:
+ case Intrinsic::nvvm_tex_2d_grad_v4s32_f32:
+ case Intrinsic::nvvm_tex_2d_array_v4s32_s32:
+ case Intrinsic::nvvm_tex_2d_array_v4s32_f32:
+ case Intrinsic::nvvm_tex_2d_array_level_v4s32_f32:
+ case Intrinsic::nvvm_tex_2d_array_grad_v4s32_f32:
+ case Intrinsic::nvvm_tex_3d_v4s32_s32:
+ case Intrinsic::nvvm_tex_3d_v4s32_f32:
+ case Intrinsic::nvvm_tex_3d_level_v4s32_f32:
+ case Intrinsic::nvvm_tex_3d_grad_v4s32_f32:
+ case Intrinsic::nvvm_tex_cube_v4s32_f32:
+ case Intrinsic::nvvm_tex_cube_level_v4s32_f32:
+ case Intrinsic::nvvm_tex_cube_array_v4s32_f32:
+ case Intrinsic::nvvm_tex_cube_array_level_v4s32_f32:
+ case Intrinsic::nvvm_tex_cube_v4u32_f32:
+ case Intrinsic::nvvm_tex_cube_level_v4u32_f32:
+ case Intrinsic::nvvm_tex_cube_array_v4u32_f32:
+ case Intrinsic::nvvm_tex_cube_array_level_v4u32_f32:
+ case Intrinsic::nvvm_tex_1d_v4u32_s32:
+ case Intrinsic::nvvm_tex_1d_v4u32_f32:
+ case Intrinsic::nvvm_tex_1d_level_v4u32_f32:
+ case Intrinsic::nvvm_tex_1d_grad_v4u32_f32:
+ case Intrinsic::nvvm_tex_1d_array_v4u32_s32:
+ case Intrinsic::nvvm_tex_1d_array_v4u32_f32:
+ case Intrinsic::nvvm_tex_1d_array_level_v4u32_f32:
+ case Intrinsic::nvvm_tex_1d_array_grad_v4u32_f32:
+ case Intrinsic::nvvm_tex_2d_v4u32_s32:
+ case Intrinsic::nvvm_tex_2d_v4u32_f32:
+ case Intrinsic::nvvm_tex_2d_level_v4u32_f32:
+ case Intrinsic::nvvm_tex_2d_grad_v4u32_f32:
+ case Intrinsic::nvvm_tex_2d_array_v4u32_s32:
+ case Intrinsic::nvvm_tex_2d_array_v4u32_f32:
+ case Intrinsic::nvvm_tex_2d_array_level_v4u32_f32:
+ case Intrinsic::nvvm_tex_2d_array_grad_v4u32_f32:
+ case Intrinsic::nvvm_tex_3d_v4u32_s32:
+ case Intrinsic::nvvm_tex_3d_v4u32_f32:
+ case Intrinsic::nvvm_tex_3d_level_v4u32_f32:
+ case Intrinsic::nvvm_tex_3d_grad_v4u32_f32:
+ case Intrinsic::nvvm_tld4_r_2d_v4s32_f32:
+ case Intrinsic::nvvm_tld4_g_2d_v4s32_f32:
+ case Intrinsic::nvvm_tld4_b_2d_v4s32_f32:
+ case Intrinsic::nvvm_tld4_a_2d_v4s32_f32:
+ case Intrinsic::nvvm_tld4_r_2d_v4u32_f32:
+ case Intrinsic::nvvm_tld4_g_2d_v4u32_f32:
+ case Intrinsic::nvvm_tld4_b_2d_v4u32_f32:
+ case Intrinsic::nvvm_tld4_a_2d_v4u32_f32:
+ case Intrinsic::nvvm_tex_unified_1d_v4s32_s32:
+ case Intrinsic::nvvm_tex_unified_1d_v4s32_f32:
+ case Intrinsic::nvvm_tex_unified_1d_level_v4s32_f32:
+ case Intrinsic::nvvm_tex_unified_1d_grad_v4s32_f32:
+ case Intrinsic::nvvm_tex_unified_1d_array_v4s32_s32:
+ case Intrinsic::nvvm_tex_unified_1d_array_v4s32_f32:
+ case Intrinsic::nvvm_tex_unified_1d_array_level_v4s32_f32:
+ case Intrinsic::nvvm_tex_unified_1d_array_grad_v4s32_f32:
+ case Intrinsic::nvvm_tex_unified_2d_v4s32_s32:
+ case Intrinsic::nvvm_tex_unified_2d_v4s32_f32:
+ case Intrinsic::nvvm_tex_unified_2d_level_v4s32_f32:
+ case Intrinsic::nvvm_tex_unified_2d_grad_v4s32_f32:
+ case Intrinsic::nvvm_tex_unified_2d_array_v4s32_s32:
+ case Intrinsic::nvvm_tex_unified_2d_array_v4s32_f32:
+ case Intrinsic::nvvm_tex_unified_2d_array_level_v4s32_f32:
+ case Intrinsic::nvvm_tex_unified_2d_array_grad_v4s32_f32:
+ case Intrinsic::nvvm_tex_unified_3d_v4s32_s32:
+ case Intrinsic::nvvm_tex_unified_3d_v4s32_f32:
+ case Intrinsic::nvvm_tex_unified_3d_level_v4s32_f32:
+ case Intrinsic::nvvm_tex_unified_3d_grad_v4s32_f32:
+ case Intrinsic::nvvm_tex_unified_1d_v4u32_s32:
+ case Intrinsic::nvvm_tex_unified_1d_v4u32_f32:
+ case Intrinsic::nvvm_tex_unified_1d_level_v4u32_f32:
+ case Intrinsic::nvvm_tex_unified_1d_grad_v4u32_f32:
+ case Intrinsic::nvvm_tex_unified_1d_array_v4u32_s32:
+ case Intrinsic::nvvm_tex_unified_1d_array_v4u32_f32:
+ case Intrinsic::nvvm_tex_unified_1d_array_level_v4u32_f32:
+ case Intrinsic::nvvm_tex_unified_1d_array_grad_v4u32_f32:
+ case Intrinsic::nvvm_tex_unified_2d_v4u32_s32:
+ case Intrinsic::nvvm_tex_unified_2d_v4u32_f32:
+ case Intrinsic::nvvm_tex_unified_2d_level_v4u32_f32:
+ case Intrinsic::nvvm_tex_unified_2d_grad_v4u32_f32:
+ case Intrinsic::nvvm_tex_unified_2d_array_v4u32_s32:
+ case Intrinsic::nvvm_tex_unified_2d_array_v4u32_f32:
+ case Intrinsic::nvvm_tex_unified_2d_array_level_v4u32_f32:
+ case Intrinsic::nvvm_tex_unified_2d_array_grad_v4u32_f32:
+ case Intrinsic::nvvm_tex_unified_3d_v4u32_s32:
+ case Intrinsic::nvvm_tex_unified_3d_v4u32_f32:
+ case Intrinsic::nvvm_tex_unified_3d_level_v4u32_f32:
+ case Intrinsic::nvvm_tex_unified_3d_grad_v4u32_f32:
+ case Intrinsic::nvvm_tex_unified_cube_v4s32_f32:
+ case Intrinsic::nvvm_tex_unified_cube_level_v4s32_f32:
+ case Intrinsic::nvvm_tex_unified_cube_array_v4s32_f32:
+ case Intrinsic::nvvm_tex_unified_cube_array_level_v4s32_f32:
+ case Intrinsic::nvvm_tex_unified_cube_v4u32_f32:
+ case Intrinsic::nvvm_tex_unified_cube_level_v4u32_f32:
+ case Intrinsic::nvvm_tex_unified_cube_array_v4u32_f32:
+ case Intrinsic::nvvm_tex_unified_cube_array_level_v4u32_f32:
+ case Intrinsic::nvvm_tld4_unified_r_2d_v4s32_f32:
+ case Intrinsic::nvvm_tld4_unified_g_2d_v4s32_f32:
+ case Intrinsic::nvvm_tld4_unified_b_2d_v4s32_f32:
+ case Intrinsic::nvvm_tld4_unified_a_2d_v4s32_f32:
+ case Intrinsic::nvvm_tld4_unified_r_2d_v4u32_f32:
+ case Intrinsic::nvvm_tld4_unified_g_2d_v4u32_f32:
+ case Intrinsic::nvvm_tld4_unified_b_2d_v4u32_f32:
+ case Intrinsic::nvvm_tld4_unified_a_2d_v4u32_f32:
+ Info.opc = getOpcForTextureInstr(Intrinsic);
+ Info.memVT = MVT::v4i32;
+ Info.ptrVal = nullptr;
+ Info.offset = 0;
+ Info.vol = false;
+ Info.readMem = true;
+ Info.writeMem = false;
+ Info.align = 16;
+ return true;
+
+ case Intrinsic::nvvm_suld_1d_i8_clamp:
+ case Intrinsic::nvvm_suld_1d_v2i8_clamp:
+ case Intrinsic::nvvm_suld_1d_v4i8_clamp:
+ case Intrinsic::nvvm_suld_1d_array_i8_clamp:
+ case Intrinsic::nvvm_suld_1d_array_v2i8_clamp:
+ case Intrinsic::nvvm_suld_1d_array_v4i8_clamp:
+ case Intrinsic::nvvm_suld_2d_i8_clamp:
+ case Intrinsic::nvvm_suld_2d_v2i8_clamp:
+ case Intrinsic::nvvm_suld_2d_v4i8_clamp:
+ case Intrinsic::nvvm_suld_2d_array_i8_clamp:
+ case Intrinsic::nvvm_suld_2d_array_v2i8_clamp:
+ case Intrinsic::nvvm_suld_2d_array_v4i8_clamp:
+ case Intrinsic::nvvm_suld_3d_i8_clamp:
+ case Intrinsic::nvvm_suld_3d_v2i8_clamp:
+ case Intrinsic::nvvm_suld_3d_v4i8_clamp:
+ case Intrinsic::nvvm_suld_1d_i8_trap:
+ case Intrinsic::nvvm_suld_1d_v2i8_trap:
+ case Intrinsic::nvvm_suld_1d_v4i8_trap:
+ case Intrinsic::nvvm_suld_1d_array_i8_trap:
+ case Intrinsic::nvvm_suld_1d_array_v2i8_trap:
+ case Intrinsic::nvvm_suld_1d_array_v4i8_trap:
+ case Intrinsic::nvvm_suld_2d_i8_trap:
+ case Intrinsic::nvvm_suld_2d_v2i8_trap:
+ case Intrinsic::nvvm_suld_2d_v4i8_trap:
+ case Intrinsic::nvvm_suld_2d_array_i8_trap:
+ case Intrinsic::nvvm_suld_2d_array_v2i8_trap:
+ case Intrinsic::nvvm_suld_2d_array_v4i8_trap:
+ case Intrinsic::nvvm_suld_3d_i8_trap:
+ case Intrinsic::nvvm_suld_3d_v2i8_trap:
+ case Intrinsic::nvvm_suld_3d_v4i8_trap:
+ case Intrinsic::nvvm_suld_1d_i8_zero:
+ case Intrinsic::nvvm_suld_1d_v2i8_zero:
+ case Intrinsic::nvvm_suld_1d_v4i8_zero:
+ case Intrinsic::nvvm_suld_1d_array_i8_zero:
+ case Intrinsic::nvvm_suld_1d_array_v2i8_zero:
+ case Intrinsic::nvvm_suld_1d_array_v4i8_zero:
+ case Intrinsic::nvvm_suld_2d_i8_zero:
+ case Intrinsic::nvvm_suld_2d_v2i8_zero:
+ case Intrinsic::nvvm_suld_2d_v4i8_zero:
+ case Intrinsic::nvvm_suld_2d_array_i8_zero:
+ case Intrinsic::nvvm_suld_2d_array_v2i8_zero:
+ case Intrinsic::nvvm_suld_2d_array_v4i8_zero:
+ case Intrinsic::nvvm_suld_3d_i8_zero:
+ case Intrinsic::nvvm_suld_3d_v2i8_zero:
+ case Intrinsic::nvvm_suld_3d_v4i8_zero:
+ Info.opc = getOpcForSurfaceInstr(Intrinsic);
+ Info.memVT = MVT::i8;
+ Info.ptrVal = nullptr;
+ Info.offset = 0;
+ Info.vol = false;
+ Info.readMem = true;
+ Info.writeMem = false;
+ Info.align = 16;
+ return true;
+
+ case Intrinsic::nvvm_suld_1d_i16_clamp:
+ case Intrinsic::nvvm_suld_1d_v2i16_clamp:
+ case Intrinsic::nvvm_suld_1d_v4i16_clamp:
+ case Intrinsic::nvvm_suld_1d_array_i16_clamp:
+ case Intrinsic::nvvm_suld_1d_array_v2i16_clamp:
+ case Intrinsic::nvvm_suld_1d_array_v4i16_clamp:
+ case Intrinsic::nvvm_suld_2d_i16_clamp:
+ case Intrinsic::nvvm_suld_2d_v2i16_clamp:
+ case Intrinsic::nvvm_suld_2d_v4i16_clamp:
+ case Intrinsic::nvvm_suld_2d_array_i16_clamp:
+ case Intrinsic::nvvm_suld_2d_array_v2i16_clamp:
+ case Intrinsic::nvvm_suld_2d_array_v4i16_clamp:
+ case Intrinsic::nvvm_suld_3d_i16_clamp:
+ case Intrinsic::nvvm_suld_3d_v2i16_clamp:
+ case Intrinsic::nvvm_suld_3d_v4i16_clamp:
+ case Intrinsic::nvvm_suld_1d_i16_trap:
+ case Intrinsic::nvvm_suld_1d_v2i16_trap:
+ case Intrinsic::nvvm_suld_1d_v4i16_trap:
+ case Intrinsic::nvvm_suld_1d_array_i16_trap:
+ case Intrinsic::nvvm_suld_1d_array_v2i16_trap:
+ case Intrinsic::nvvm_suld_1d_array_v4i16_trap:
+ case Intrinsic::nvvm_suld_2d_i16_trap:
+ case Intrinsic::nvvm_suld_2d_v2i16_trap:
+ case Intrinsic::nvvm_suld_2d_v4i16_trap:
+ case Intrinsic::nvvm_suld_2d_array_i16_trap:
+ case Intrinsic::nvvm_suld_2d_array_v2i16_trap:
+ case Intrinsic::nvvm_suld_2d_array_v4i16_trap:
+ case Intrinsic::nvvm_suld_3d_i16_trap:
+ case Intrinsic::nvvm_suld_3d_v2i16_trap:
+ case Intrinsic::nvvm_suld_3d_v4i16_trap:
+ case Intrinsic::nvvm_suld_1d_i16_zero:
+ case Intrinsic::nvvm_suld_1d_v2i16_zero:
+ case Intrinsic::nvvm_suld_1d_v4i16_zero:
+ case Intrinsic::nvvm_suld_1d_array_i16_zero:
+ case Intrinsic::nvvm_suld_1d_array_v2i16_zero:
+ case Intrinsic::nvvm_suld_1d_array_v4i16_zero:
+ case Intrinsic::nvvm_suld_2d_i16_zero:
+ case Intrinsic::nvvm_suld_2d_v2i16_zero:
+ case Intrinsic::nvvm_suld_2d_v4i16_zero:
+ case Intrinsic::nvvm_suld_2d_array_i16_zero:
+ case Intrinsic::nvvm_suld_2d_array_v2i16_zero:
+ case Intrinsic::nvvm_suld_2d_array_v4i16_zero:
+ case Intrinsic::nvvm_suld_3d_i16_zero:
+ case Intrinsic::nvvm_suld_3d_v2i16_zero:
+ case Intrinsic::nvvm_suld_3d_v4i16_zero:
+ Info.opc = getOpcForSurfaceInstr(Intrinsic);
+ Info.memVT = MVT::i16;
+ Info.ptrVal = nullptr;
+ Info.offset = 0;
+ Info.vol = false;
+ Info.readMem = true;
+ Info.writeMem = false;
+ Info.align = 16;
+ return true;
+
+ case Intrinsic::nvvm_suld_1d_i32_clamp:
+ case Intrinsic::nvvm_suld_1d_v2i32_clamp:
+ case Intrinsic::nvvm_suld_1d_v4i32_clamp:
+ case Intrinsic::nvvm_suld_1d_array_i32_clamp:
+ case Intrinsic::nvvm_suld_1d_array_v2i32_clamp:
+ case Intrinsic::nvvm_suld_1d_array_v4i32_clamp:
+ case Intrinsic::nvvm_suld_2d_i32_clamp:
+ case Intrinsic::nvvm_suld_2d_v2i32_clamp:
+ case Intrinsic::nvvm_suld_2d_v4i32_clamp:
+ case Intrinsic::nvvm_suld_2d_array_i32_clamp:
+ case Intrinsic::nvvm_suld_2d_array_v2i32_clamp:
+ case Intrinsic::nvvm_suld_2d_array_v4i32_clamp:
+ case Intrinsic::nvvm_suld_3d_i32_clamp:
+ case Intrinsic::nvvm_suld_3d_v2i32_clamp:
+ case Intrinsic::nvvm_suld_3d_v4i32_clamp:
+ case Intrinsic::nvvm_suld_1d_i32_trap:
+ case Intrinsic::nvvm_suld_1d_v2i32_trap:
+ case Intrinsic::nvvm_suld_1d_v4i32_trap:
+ case Intrinsic::nvvm_suld_1d_array_i32_trap:
+ case Intrinsic::nvvm_suld_1d_array_v2i32_trap:
+ case Intrinsic::nvvm_suld_1d_array_v4i32_trap:
+ case Intrinsic::nvvm_suld_2d_i32_trap:
+ case Intrinsic::nvvm_suld_2d_v2i32_trap:
+ case Intrinsic::nvvm_suld_2d_v4i32_trap:
+ case Intrinsic::nvvm_suld_2d_array_i32_trap:
+ case Intrinsic::nvvm_suld_2d_array_v2i32_trap:
+ case Intrinsic::nvvm_suld_2d_array_v4i32_trap:
+ case Intrinsic::nvvm_suld_3d_i32_trap:
+ case Intrinsic::nvvm_suld_3d_v2i32_trap:
+ case Intrinsic::nvvm_suld_3d_v4i32_trap:
+ case Intrinsic::nvvm_suld_1d_i32_zero:
+ case Intrinsic::nvvm_suld_1d_v2i32_zero:
+ case Intrinsic::nvvm_suld_1d_v4i32_zero:
+ case Intrinsic::nvvm_suld_1d_array_i32_zero:
+ case Intrinsic::nvvm_suld_1d_array_v2i32_zero:
+ case Intrinsic::nvvm_suld_1d_array_v4i32_zero:
+ case Intrinsic::nvvm_suld_2d_i32_zero:
+ case Intrinsic::nvvm_suld_2d_v2i32_zero:
+ case Intrinsic::nvvm_suld_2d_v4i32_zero:
+ case Intrinsic::nvvm_suld_2d_array_i32_zero:
+ case Intrinsic::nvvm_suld_2d_array_v2i32_zero:
+ case Intrinsic::nvvm_suld_2d_array_v4i32_zero:
+ case Intrinsic::nvvm_suld_3d_i32_zero:
+ case Intrinsic::nvvm_suld_3d_v2i32_zero:
+ case Intrinsic::nvvm_suld_3d_v4i32_zero:
+ Info.opc = getOpcForSurfaceInstr(Intrinsic);
+ Info.memVT = MVT::i32;
+ Info.ptrVal = nullptr;
+ Info.offset = 0;
+ Info.vol = false;
+ Info.readMem = true;
+ Info.writeMem = false;
+ Info.align = 16;
+ return true;
+
+ case Intrinsic::nvvm_suld_1d_i64_clamp:
+ case Intrinsic::nvvm_suld_1d_v2i64_clamp:
+ case Intrinsic::nvvm_suld_1d_array_i64_clamp:
+ case Intrinsic::nvvm_suld_1d_array_v2i64_clamp:
+ case Intrinsic::nvvm_suld_2d_i64_clamp:
+ case Intrinsic::nvvm_suld_2d_v2i64_clamp:
+ case Intrinsic::nvvm_suld_2d_array_i64_clamp:
+ case Intrinsic::nvvm_suld_2d_array_v2i64_clamp:
+ case Intrinsic::nvvm_suld_3d_i64_clamp:
+ case Intrinsic::nvvm_suld_3d_v2i64_clamp:
+ case Intrinsic::nvvm_suld_1d_i64_trap:
+ case Intrinsic::nvvm_suld_1d_v2i64_trap:
+ case Intrinsic::nvvm_suld_1d_array_i64_trap:
+ case Intrinsic::nvvm_suld_1d_array_v2i64_trap:
+ case Intrinsic::nvvm_suld_2d_i64_trap:
+ case Intrinsic::nvvm_suld_2d_v2i64_trap:
+ case Intrinsic::nvvm_suld_2d_array_i64_trap:
+ case Intrinsic::nvvm_suld_2d_array_v2i64_trap:
+ case Intrinsic::nvvm_suld_3d_i64_trap:
+ case Intrinsic::nvvm_suld_3d_v2i64_trap:
+ case Intrinsic::nvvm_suld_1d_i64_zero:
+ case Intrinsic::nvvm_suld_1d_v2i64_zero:
+ case Intrinsic::nvvm_suld_1d_array_i64_zero:
+ case Intrinsic::nvvm_suld_1d_array_v2i64_zero:
+ case Intrinsic::nvvm_suld_2d_i64_zero:
+ case Intrinsic::nvvm_suld_2d_v2i64_zero:
+ case Intrinsic::nvvm_suld_2d_array_i64_zero:
+ case Intrinsic::nvvm_suld_2d_array_v2i64_zero:
+ case Intrinsic::nvvm_suld_3d_i64_zero:
+ case Intrinsic::nvvm_suld_3d_v2i64_zero:
+ Info.opc = getOpcForSurfaceInstr(Intrinsic);
+ Info.memVT = MVT::i64;
+ Info.ptrVal = nullptr;
+ Info.offset = 0;
+ Info.vol = false;
+ Info.readMem = true;
+ Info.writeMem = false;
+ Info.align = 16;
+ return true;
+ }
+ return false;
+}
+
+/// isLegalAddressingMode - Return true if the addressing mode represented
+/// by AM is legal for this target, for a load/store of the specified type.
+/// Used to guide target specific optimizations, like loop strength reduction
+/// (LoopStrengthReduce.cpp) and memory optimization for address mode
+/// (CodeGenPrepare.cpp)
+bool NVPTXTargetLowering::isLegalAddressingMode(const DataLayout &DL,
+ const AddrMode &AM, Type *Ty,
+ unsigned AS) const {
+ // AddrMode - This represents an addressing mode of:
+ // BaseGV + BaseOffs + BaseReg + Scale*ScaleReg
+ //
+ // The legal address modes are
+ // - [avar]
+ // - [areg]
+ // - [areg+immoff]
+ // - [immAddr]
+
+ if (AM.BaseGV) {
+ return !AM.BaseOffs && !AM.HasBaseReg && !AM.Scale;
+ }
+
+ switch (AM.Scale) {
+ case 0: // "r", "r+i" or "i" is allowed
+ break;
+ case 1:
+ if (AM.HasBaseReg) // "r+r+i" or "r+r" is not allowed.
+ return false;
+ // Otherwise we have r+i.
+ break;
+ default:
+ // No scale > 1 is allowed
+ return false;
+ }
+ return true;
+}
+
+//===----------------------------------------------------------------------===//
+// NVPTX Inline Assembly Support
+//===----------------------------------------------------------------------===//
+
+/// getConstraintType - Given a constraint letter, return the type of
+/// constraint it is for this target.
+NVPTXTargetLowering::ConstraintType
+NVPTXTargetLowering::getConstraintType(StringRef Constraint) const {
+ if (Constraint.size() == 1) {
+ switch (Constraint[0]) {
+ default:
+ break;
+ case 'b':
+ case 'r':
+ case 'h':
+ case 'c':
+ case 'l':
+ case 'f':
+ case 'd':
+ case '0':
+ case 'N':
+ return C_RegisterClass;
+ }
+ }
+ return TargetLowering::getConstraintType(Constraint);
+}
+
+std::pair<unsigned, const TargetRegisterClass *>
+NVPTXTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
+ StringRef Constraint,
+ MVT VT) const {
+ if (Constraint.size() == 1) {
+ switch (Constraint[0]) {
+ case 'b':
+ return std::make_pair(0U, &NVPTX::Int1RegsRegClass);
+ case 'c':
+ return std::make_pair(0U, &NVPTX::Int16RegsRegClass);
+ case 'h':
+ return std::make_pair(0U, &NVPTX::Int16RegsRegClass);
+ case 'r':
+ return std::make_pair(0U, &NVPTX::Int32RegsRegClass);
+ case 'l':
+ case 'N':
+ return std::make_pair(0U, &NVPTX::Int64RegsRegClass);
+ case 'f':
+ return std::make_pair(0U, &NVPTX::Float32RegsRegClass);
+ case 'd':
+ return std::make_pair(0U, &NVPTX::Float64RegsRegClass);
+ }
+ }
+ return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
+}
+
+//===----------------------------------------------------------------------===//
+// NVPTX DAG Combining
+//===----------------------------------------------------------------------===//
+
+bool NVPTXTargetLowering::allowFMA(MachineFunction &MF,
+ CodeGenOpt::Level OptLevel) const {
+ // Always honor command-line argument
+ if (FMAContractLevelOpt.getNumOccurrences() > 0)
+ return FMAContractLevelOpt > 0;
+
+ // Do not contract if we're not optimizing the code.
+ if (OptLevel == 0)
+ return false;
+
+ // Honor TargetOptions flags that explicitly say fusion is okay.
+ if (MF.getTarget().Options.AllowFPOpFusion == FPOpFusion::Fast)
+ return true;
+
+ return allowUnsafeFPMath(MF);
+}
+
+bool NVPTXTargetLowering::allowUnsafeFPMath(MachineFunction &MF) const {
+ // Honor TargetOptions flags that explicitly say unsafe math is okay.
+ if (MF.getTarget().Options.UnsafeFPMath)
+ return true;
+
+ // Allow unsafe math if unsafe-fp-math attribute explicitly says so.
+ const Function *F = MF.getFunction();
+ if (F->hasFnAttribute("unsafe-fp-math")) {
+ Attribute Attr = F->getFnAttribute("unsafe-fp-math");
+ StringRef Val = Attr.getValueAsString();
+ if (Val == "true")
+ return true;
+ }
+
+ return false;
+}
+
+/// PerformADDCombineWithOperands - Try DAG combinations for an ADD with
+/// operands N0 and N1. This is a helper for PerformADDCombine that is
+/// called with the default operands, and if that fails, with commuted
+/// operands.
+static SDValue PerformADDCombineWithOperands(SDNode *N, SDValue N0, SDValue N1,
+ TargetLowering::DAGCombinerInfo &DCI,
+ const NVPTXSubtarget &Subtarget,
+ CodeGenOpt::Level OptLevel) {
+ SelectionDAG &DAG = DCI.DAG;
+ // Skip non-integer, non-scalar case
+ EVT VT=N0.getValueType();
+ if (VT.isVector())
+ return SDValue();
+
+ // fold (add (mul a, b), c) -> (mad a, b, c)
+ //
+ if (N0.getOpcode() == ISD::MUL) {
+ assert (VT.isInteger());
+ // For integer:
+ // Since integer multiply-add costs the same as integer multiply
+ // but is more costly than integer add, do the fusion only when
+ // the mul is only used in the add.
+ if (OptLevel==CodeGenOpt::None || VT != MVT::i32 ||
+ !N0.getNode()->hasOneUse())
+ return SDValue();
+
+ // Do the folding
+ return DAG.getNode(NVPTXISD::IMAD, SDLoc(N), VT,
+ N0.getOperand(0), N0.getOperand(1), N1);
+ }
+ else if (N0.getOpcode() == ISD::FMUL) {
+ if (VT == MVT::f32 || VT == MVT::f64) {
+ const auto *TLI = static_cast<const NVPTXTargetLowering *>(
+ &DAG.getTargetLoweringInfo());
+ if (!TLI->allowFMA(DAG.getMachineFunction(), OptLevel))
+ return SDValue();
+
+ // For floating point:
+ // Do the fusion only when the mul has less than 5 uses and all
+ // are add.
+ // The heuristic is that if a use is not an add, then that use
+ // cannot be fused into fma, therefore mul is still needed anyway.
+ // If there are more than 4 uses, even if they are all add, fusing
+ // them will increase register pressue.
+ //
+ int numUses = 0;
+ int nonAddCount = 0;
+ for (SDNode::use_iterator UI = N0.getNode()->use_begin(),
+ UE = N0.getNode()->use_end();
+ UI != UE; ++UI) {
+ numUses++;
+ SDNode *User = *UI;
+ if (User->getOpcode() != ISD::FADD)
+ ++nonAddCount;
+ }
+ if (numUses >= 5)
+ return SDValue();
+ if (nonAddCount) {
+ int orderNo = N->getIROrder();
+ int orderNo2 = N0.getNode()->getIROrder();
+ // simple heuristics here for considering potential register
+ // pressure, the logics here is that the differnce are used
+ // to measure the distance between def and use, the longer distance
+ // more likely cause register pressure.
+ if (orderNo - orderNo2 < 500)
+ return SDValue();
+
+ // Now, check if at least one of the FMUL's operands is live beyond the node N,
+ // which guarantees that the FMA will not increase register pressure at node N.
+ bool opIsLive = false;
+ const SDNode *left = N0.getOperand(0).getNode();
+ const SDNode *right = N0.getOperand(1).getNode();
+
+ if (isa<ConstantSDNode>(left) || isa<ConstantSDNode>(right))
+ opIsLive = true;
+
+ if (!opIsLive)
+ for (SDNode::use_iterator UI = left->use_begin(), UE = left->use_end(); UI != UE; ++UI) {
+ SDNode *User = *UI;
+ int orderNo3 = User->getIROrder();
+ if (orderNo3 > orderNo) {
+ opIsLive = true;
+ break;
+ }
+ }
+
+ if (!opIsLive)
+ for (SDNode::use_iterator UI = right->use_begin(), UE = right->use_end(); UI != UE; ++UI) {
+ SDNode *User = *UI;
+ int orderNo3 = User->getIROrder();
+ if (orderNo3 > orderNo) {
+ opIsLive = true;
+ break;
+ }
+ }
+
+ if (!opIsLive)
+ return SDValue();
+ }
+
+ return DAG.getNode(ISD::FMA, SDLoc(N), VT,
+ N0.getOperand(0), N0.getOperand(1), N1);
+ }
+ }
+
+ return SDValue();
+}
+
+/// PerformADDCombine - Target-specific dag combine xforms for ISD::ADD.
+///
+static SDValue PerformADDCombine(SDNode *N,
+ TargetLowering::DAGCombinerInfo &DCI,
+ const NVPTXSubtarget &Subtarget,
+ CodeGenOpt::Level OptLevel) {
+ SDValue N0 = N->getOperand(0);
+ SDValue N1 = N->getOperand(1);
+
+ // First try with the default operand order.
+ if (SDValue Result =
+ PerformADDCombineWithOperands(N, N0, N1, DCI, Subtarget, OptLevel))
+ return Result;
+
+ // If that didn't work, try again with the operands commuted.
+ return PerformADDCombineWithOperands(N, N1, N0, DCI, Subtarget, OptLevel);
+}
+
+static SDValue PerformANDCombine(SDNode *N,
+ TargetLowering::DAGCombinerInfo &DCI) {
+ // The type legalizer turns a vector load of i8 values into a zextload to i16
+ // registers, optionally ANY_EXTENDs it (if target type is integer),
+ // and ANDs off the high 8 bits. Since we turn this load into a
+ // target-specific DAG node, the DAG combiner fails to eliminate these AND
+ // nodes. Do that here.
+ SDValue Val = N->getOperand(0);
+ SDValue Mask = N->getOperand(1);
+
+ if (isa<ConstantSDNode>(Val)) {
+ std::swap(Val, Mask);
+ }
+
+ SDValue AExt;
+ // Generally, we will see zextload -> IMOV16rr -> ANY_EXTEND -> and
+ if (Val.getOpcode() == ISD::ANY_EXTEND) {
+ AExt = Val;
+ Val = Val->getOperand(0);
+ }
+
+ if (Val->isMachineOpcode() && Val->getMachineOpcode() == NVPTX::IMOV16rr) {
+ Val = Val->getOperand(0);
+ }
+
+ if (Val->getOpcode() == NVPTXISD::LoadV2 ||
+ Val->getOpcode() == NVPTXISD::LoadV4) {
+ ConstantSDNode *MaskCnst = dyn_cast<ConstantSDNode>(Mask);
+ if (!MaskCnst) {
+ // Not an AND with a constant
+ return SDValue();
+ }
+
+ uint64_t MaskVal = MaskCnst->getZExtValue();
+ if (MaskVal != 0xff) {
+ // Not an AND that chops off top 8 bits
+ return SDValue();
+ }
+
+ MemSDNode *Mem = dyn_cast<MemSDNode>(Val);
+ if (!Mem) {
+ // Not a MemSDNode?!?
+ return SDValue();
+ }
+
+ EVT MemVT = Mem->getMemoryVT();
+ if (MemVT != MVT::v2i8 && MemVT != MVT::v4i8) {
+ // We only handle the i8 case
+ return SDValue();
+ }
+
+ unsigned ExtType =
+ cast<ConstantSDNode>(Val->getOperand(Val->getNumOperands()-1))->
+ getZExtValue();
+ if (ExtType == ISD::SEXTLOAD) {
+ // If for some reason the load is a sextload, the and is needed to zero
+ // out the high 8 bits
+ return SDValue();
+ }
+
+ bool AddTo = false;
+ if (AExt.getNode() != nullptr) {
+ // Re-insert the ext as a zext.
+ Val = DCI.DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N),
+ AExt.getValueType(), Val);
+ AddTo = true;
+ }
+
+ // If we get here, the AND is unnecessary. Just replace it with the load
+ DCI.CombineTo(N, Val, AddTo);
+ }
+
+ return SDValue();
+}
+
+static SDValue PerformREMCombine(SDNode *N,
+ TargetLowering::DAGCombinerInfo &DCI,
+ CodeGenOpt::Level OptLevel) {
+ assert(N->getOpcode() == ISD::SREM || N->getOpcode() == ISD::UREM);
+
+ // Don't do anything at less than -O2.
+ if (OptLevel < CodeGenOpt::Default)
+ return SDValue();
+
+ SelectionDAG &DAG = DCI.DAG;
+ SDLoc DL(N);
+ EVT VT = N->getValueType(0);
+ bool IsSigned = N->getOpcode() == ISD::SREM;
+ unsigned DivOpc = IsSigned ? ISD::SDIV : ISD::UDIV;
+
+ const SDValue &Num = N->getOperand(0);
+ const SDValue &Den = N->getOperand(1);
+
+ for (const SDNode *U : Num->uses()) {
+ if (U->getOpcode() == DivOpc && U->getOperand(0) == Num &&
+ U->getOperand(1) == Den) {
+ // Num % Den -> Num - (Num / Den) * Den
+ return DAG.getNode(ISD::SUB, DL, VT, Num,
+ DAG.getNode(ISD::MUL, DL, VT,
+ DAG.getNode(DivOpc, DL, VT, Num, Den),
+ Den));
+ }
+ }
+ return SDValue();
+}
+
+enum OperandSignedness {
+ Signed = 0,
+ Unsigned,
+ Unknown
+};
+
+/// IsMulWideOperandDemotable - Checks if the provided DAG node is an operand
+/// that can be demoted to \p OptSize bits without loss of information. The
+/// signedness of the operand, if determinable, is placed in \p S.
+static bool IsMulWideOperandDemotable(SDValue Op,
+ unsigned OptSize,
+ OperandSignedness &S) {
+ S = Unknown;
+
+ if (Op.getOpcode() == ISD::SIGN_EXTEND ||
+ Op.getOpcode() == ISD::SIGN_EXTEND_INREG) {
+ EVT OrigVT = Op.getOperand(0).getValueType();
+ if (OrigVT.getSizeInBits() <= OptSize) {
+ S = Signed;
+ return true;
+ }
+ } else if (Op.getOpcode() == ISD::ZERO_EXTEND) {
+ EVT OrigVT = Op.getOperand(0).getValueType();
+ if (OrigVT.getSizeInBits() <= OptSize) {
+ S = Unsigned;
+ return true;
+ }
+ }
+
+ return false;
+}
+
+/// AreMulWideOperandsDemotable - Checks if the given LHS and RHS operands can
+/// be demoted to \p OptSize bits without loss of information. If the operands
+/// contain a constant, it should appear as the RHS operand. The signedness of
+/// the operands is placed in \p IsSigned.
+static bool AreMulWideOperandsDemotable(SDValue LHS, SDValue RHS,
+ unsigned OptSize,
+ bool &IsSigned) {
+ OperandSignedness LHSSign;
+
+ // The LHS operand must be a demotable op
+ if (!IsMulWideOperandDemotable(LHS, OptSize, LHSSign))
+ return false;
+
+ // We should have been able to determine the signedness from the LHS
+ if (LHSSign == Unknown)
+ return false;
+
+ IsSigned = (LHSSign == Signed);
+
+ // The RHS can be a demotable op or a constant
+ if (ConstantSDNode *CI = dyn_cast<ConstantSDNode>(RHS)) {
+ const APInt &Val = CI->getAPIntValue();
+ if (LHSSign == Unsigned) {
+ return Val.isIntN(OptSize);
+ } else {
+ return Val.isSignedIntN(OptSize);
+ }
+ } else {
+ OperandSignedness RHSSign;
+ if (!IsMulWideOperandDemotable(RHS, OptSize, RHSSign))
+ return false;
+
+ return LHSSign == RHSSign;
+ }
+}
+
+/// TryMULWIDECombine - Attempt to replace a multiply of M bits with a multiply
+/// of M/2 bits that produces an M-bit result (i.e. mul.wide). This transform
+/// works on both multiply DAG nodes and SHL DAG nodes with a constant shift
+/// amount.
+static SDValue TryMULWIDECombine(SDNode *N,
+ TargetLowering::DAGCombinerInfo &DCI) {
+ EVT MulType = N->getValueType(0);
+ if (MulType != MVT::i32 && MulType != MVT::i64) {
+ return SDValue();
+ }
+
+ SDLoc DL(N);
+ unsigned OptSize = MulType.getSizeInBits() >> 1;
+ SDValue LHS = N->getOperand(0);
+ SDValue RHS = N->getOperand(1);
+
+ // Canonicalize the multiply so the constant (if any) is on the right
+ if (N->getOpcode() == ISD::MUL) {
+ if (isa<ConstantSDNode>(LHS)) {
+ std::swap(LHS, RHS);
+ }
+ }
+
+ // If we have a SHL, determine the actual multiply amount
+ if (N->getOpcode() == ISD::SHL) {
+ ConstantSDNode *ShlRHS = dyn_cast<ConstantSDNode>(RHS);
+ if (!ShlRHS) {
+ return SDValue();
+ }
+
+ APInt ShiftAmt = ShlRHS->getAPIntValue();
+ unsigned BitWidth = MulType.getSizeInBits();
+ if (ShiftAmt.sge(0) && ShiftAmt.slt(BitWidth)) {
+ APInt MulVal = APInt(BitWidth, 1) << ShiftAmt;
+ RHS = DCI.DAG.getConstant(MulVal, DL, MulType);
+ } else {
+ return SDValue();
+ }
+ }
+
+ bool Signed;
+ // Verify that our operands are demotable
+ if (!AreMulWideOperandsDemotable(LHS, RHS, OptSize, Signed)) {
+ return SDValue();
+ }
+
+ EVT DemotedVT;
+ if (MulType == MVT::i32) {
+ DemotedVT = MVT::i16;
+ } else {
+ DemotedVT = MVT::i32;
+ }
+
+ // Truncate the operands to the correct size. Note that these are just for
+ // type consistency and will (likely) be eliminated in later phases.
+ SDValue TruncLHS =
+ DCI.DAG.getNode(ISD::TRUNCATE, DL, DemotedVT, LHS);
+ SDValue TruncRHS =
+ DCI.DAG.getNode(ISD::TRUNCATE, DL, DemotedVT, RHS);
+
+ unsigned Opc;
+ if (Signed) {
+ Opc = NVPTXISD::MUL_WIDE_SIGNED;
+ } else {
+ Opc = NVPTXISD::MUL_WIDE_UNSIGNED;
+ }
+
+ return DCI.DAG.getNode(Opc, DL, MulType, TruncLHS, TruncRHS);
+}
+
+/// PerformMULCombine - Runs PTX-specific DAG combine patterns on MUL nodes.
+static SDValue PerformMULCombine(SDNode *N,
+ TargetLowering::DAGCombinerInfo &DCI,
+ CodeGenOpt::Level OptLevel) {
+ if (OptLevel > 0) {
+ // Try mul.wide combining at OptLevel > 0
+ if (SDValue Ret = TryMULWIDECombine(N, DCI))
+ return Ret;
+ }
+
+ return SDValue();
+}
+
+/// PerformSHLCombine - Runs PTX-specific DAG combine patterns on SHL nodes.
+static SDValue PerformSHLCombine(SDNode *N,
+ TargetLowering::DAGCombinerInfo &DCI,
+ CodeGenOpt::Level OptLevel) {
+ if (OptLevel > 0) {
+ // Try mul.wide combining at OptLevel > 0
+ if (SDValue Ret = TryMULWIDECombine(N, DCI))
+ return Ret;
+ }
+
+ return SDValue();
+}
+
+static SDValue PerformSETCCCombine(SDNode *N,
+ TargetLowering::DAGCombinerInfo &DCI) {
+ EVT CCType = N->getValueType(0);
+ SDValue A = N->getOperand(0);
+ SDValue B = N->getOperand(1);
+
+ if (CCType != MVT::v2i1 || A.getValueType() != MVT::v2f16)
+ return SDValue();
+
+ SDLoc DL(N);
+ // setp.f16x2 returns two scalar predicates, which we need to
+ // convert back to v2i1. The returned result will be scalarized by
+ // the legalizer, but the comparison will remain a single vector
+ // instruction.
+ SDValue CCNode = DCI.DAG.getNode(NVPTXISD::SETP_F16X2, DL,
+ DCI.DAG.getVTList(MVT::i1, MVT::i1),
+ {A, B, N->getOperand(2)});
+ return DCI.DAG.getNode(ISD::BUILD_VECTOR, DL, CCType, CCNode.getValue(0),
+ CCNode.getValue(1));
+}
+
+SDValue NVPTXTargetLowering::PerformDAGCombine(SDNode *N,
+ DAGCombinerInfo &DCI) const {
+ CodeGenOpt::Level OptLevel = getTargetMachine().getOptLevel();
+ switch (N->getOpcode()) {
+ default: break;
+ case ISD::ADD:
+ case ISD::FADD:
+ return PerformADDCombine(N, DCI, STI, OptLevel);
+ case ISD::MUL:
+ return PerformMULCombine(N, DCI, OptLevel);
+ case ISD::SHL:
+ return PerformSHLCombine(N, DCI, OptLevel);
+ case ISD::AND:
+ return PerformANDCombine(N, DCI);
+ case ISD::UREM:
+ case ISD::SREM:
+ return PerformREMCombine(N, DCI, OptLevel);
+ case ISD::SETCC:
+ return PerformSETCCCombine(N, DCI);
+ }
+ return SDValue();
+}
+
+/// ReplaceVectorLoad - Convert vector loads into multi-output scalar loads.
+static void ReplaceLoadVector(SDNode *N, SelectionDAG &DAG,
+ SmallVectorImpl<SDValue> &Results) {
+ EVT ResVT = N->getValueType(0);
+ SDLoc DL(N);
+
+ assert(ResVT.isVector() && "Vector load must have vector type");
+
+ // We only handle "native" vector sizes for now, e.g. <4 x double> is not
+ // legal. We can (and should) split that into 2 loads of <2 x double> here
+ // but I'm leaving that as a TODO for now.
+ assert(ResVT.isSimple() && "Can only handle simple types");
+ switch (ResVT.getSimpleVT().SimpleTy) {
+ default:
+ return;
+ case MVT::v2i8:
+ case MVT::v2i16:
+ case MVT::v2i32:
+ case MVT::v2i64:
+ case MVT::v2f16:
+ case MVT::v2f32:
+ case MVT::v2f64:
+ case MVT::v4i8:
+ case MVT::v4i16:
+ case MVT::v4i32:
+ case MVT::v4f16:
+ case MVT::v4f32:
+ case MVT::v8f16: // <4 x f16x2>
+ // This is a "native" vector type
+ break;
+ }
+
+ LoadSDNode *LD = cast<LoadSDNode>(N);
+
+ unsigned Align = LD->getAlignment();
+ auto &TD = DAG.getDataLayout();
+ unsigned PrefAlign =
+ TD.getPrefTypeAlignment(ResVT.getTypeForEVT(*DAG.getContext()));
+ if (Align < PrefAlign) {
+ // This load is not sufficiently aligned, so bail out and let this vector
+ // load be scalarized. Note that we may still be able to emit smaller
+ // vector loads. For example, if we are loading a <4 x float> with an
+ // alignment of 8, this check will fail but the legalizer will try again
+ // with 2 x <2 x float>, which will succeed with an alignment of 8.
+ return;
+ }
+
+ EVT EltVT = ResVT.getVectorElementType();
+ unsigned NumElts = ResVT.getVectorNumElements();
+
+ // Since LoadV2 is a target node, we cannot rely on DAG type legalization.
+ // Therefore, we must ensure the type is legal. For i1 and i8, we set the
+ // loaded type to i16 and propagate the "real" type as the memory type.
+ bool NeedTrunc = false;
+ if (EltVT.getSizeInBits() < 16) {
+ EltVT = MVT::i16;
+ NeedTrunc = true;
+ }
+
+ unsigned Opcode = 0;
+ SDVTList LdResVTs;
+ bool LoadF16x2 = false;
+
+ switch (NumElts) {
+ default:
+ return;
+ case 2:
+ Opcode = NVPTXISD::LoadV2;
+ LdResVTs = DAG.getVTList(EltVT, EltVT, MVT::Other);
+ break;
+ case 4: {
+ Opcode = NVPTXISD::LoadV4;
+ EVT ListVTs[] = { EltVT, EltVT, EltVT, EltVT, MVT::Other };
+ LdResVTs = DAG.getVTList(ListVTs);
+ break;
+ }
+ case 8: {
+ // v8f16 is a special case. PTX doesn't have ld.v8.f16
+ // instruction. Instead, we split the vector into v2f16 chunks and
+ // load them with ld.v4.b32.
+ assert(EltVT == MVT::f16 && "Unsupported v8 vector type.");
+ LoadF16x2 = true;
+ Opcode = NVPTXISD::LoadV4;
+ EVT ListVTs[] = {MVT::v2f16, MVT::v2f16, MVT::v2f16, MVT::v2f16,
+ MVT::Other};
+ LdResVTs = DAG.getVTList(ListVTs);
+ break;
+ }
+ }
+
+ // Copy regular operands
+ SmallVector<SDValue, 8> OtherOps(N->op_begin(), N->op_end());
+
+ // The select routine does not have access to the LoadSDNode instance, so
+ // pass along the extension information
+ OtherOps.push_back(DAG.getIntPtrConstant(LD->getExtensionType(), DL));
+
+ SDValue NewLD = DAG.getMemIntrinsicNode(Opcode, DL, LdResVTs, OtherOps,
+ LD->getMemoryVT(),
+ LD->getMemOperand());
+
+ SmallVector<SDValue, 8> ScalarRes;
+ if (LoadF16x2) {
+ // Split v2f16 subvectors back into individual elements.
+ NumElts /= 2;
+ for (unsigned i = 0; i < NumElts; ++i) {
+ SDValue SubVector = NewLD.getValue(i);
+ SDValue E0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, SubVector,
+ DAG.getIntPtrConstant(0, DL));
+ SDValue E1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, SubVector,
+ DAG.getIntPtrConstant(1, DL));
+ ScalarRes.push_back(E0);
+ ScalarRes.push_back(E1);
+ }
+ } else {
+ for (unsigned i = 0; i < NumElts; ++i) {
+ SDValue Res = NewLD.getValue(i);
+ if (NeedTrunc)
+ Res = DAG.getNode(ISD::TRUNCATE, DL, ResVT.getVectorElementType(), Res);
+ ScalarRes.push_back(Res);
+ }
+ }
+
+ SDValue LoadChain = NewLD.getValue(NumElts);
+
+ SDValue BuildVec = DAG.getBuildVector(ResVT, DL, ScalarRes);
+
+ Results.push_back(BuildVec);
+ Results.push_back(LoadChain);
+}
+
+static void ReplaceINTRINSIC_W_CHAIN(SDNode *N, SelectionDAG &DAG,
+ SmallVectorImpl<SDValue> &Results) {
+ SDValue Chain = N->getOperand(0);
+ SDValue Intrin = N->getOperand(1);
+ SDLoc DL(N);
+
+ // Get the intrinsic ID
+ unsigned IntrinNo = cast<ConstantSDNode>(Intrin.getNode())->getZExtValue();
+ switch (IntrinNo) {
+ default:
+ return;
+ case Intrinsic::nvvm_ldg_global_i:
+ case Intrinsic::nvvm_ldg_global_f:
+ case Intrinsic::nvvm_ldg_global_p:
+ case Intrinsic::nvvm_ldu_global_i:
+ case Intrinsic::nvvm_ldu_global_f:
+ case Intrinsic::nvvm_ldu_global_p: {
+ EVT ResVT = N->getValueType(0);
+
+ if (ResVT.isVector()) {
+ // Vector LDG/LDU
+
+ unsigned NumElts = ResVT.getVectorNumElements();
+ EVT EltVT = ResVT.getVectorElementType();
+
+ // Since LDU/LDG are target nodes, we cannot rely on DAG type
+ // legalization.
+ // Therefore, we must ensure the type is legal. For i1 and i8, we set the
+ // loaded type to i16 and propagate the "real" type as the memory type.
+ bool NeedTrunc = false;
+ if (EltVT.getSizeInBits() < 16) {
+ EltVT = MVT::i16;
+ NeedTrunc = true;
+ }
+
+ unsigned Opcode = 0;
+ SDVTList LdResVTs;
+
+ switch (NumElts) {
+ default:
+ return;
+ case 2:
+ switch (IntrinNo) {
+ default:
+ return;
+ case Intrinsic::nvvm_ldg_global_i:
+ case Intrinsic::nvvm_ldg_global_f:
+ case Intrinsic::nvvm_ldg_global_p:
+ Opcode = NVPTXISD::LDGV2;
+ break;
+ case Intrinsic::nvvm_ldu_global_i:
+ case Intrinsic::nvvm_ldu_global_f:
+ case Intrinsic::nvvm_ldu_global_p:
+ Opcode = NVPTXISD::LDUV2;
+ break;
+ }
+ LdResVTs = DAG.getVTList(EltVT, EltVT, MVT::Other);
+ break;
+ case 4: {
+ switch (IntrinNo) {
+ default:
+ return;
+ case Intrinsic::nvvm_ldg_global_i:
+ case Intrinsic::nvvm_ldg_global_f:
+ case Intrinsic::nvvm_ldg_global_p:
+ Opcode = NVPTXISD::LDGV4;
+ break;
+ case Intrinsic::nvvm_ldu_global_i:
+ case Intrinsic::nvvm_ldu_global_f:
+ case Intrinsic::nvvm_ldu_global_p:
+ Opcode = NVPTXISD::LDUV4;
+ break;
+ }
+ EVT ListVTs[] = { EltVT, EltVT, EltVT, EltVT, MVT::Other };
+ LdResVTs = DAG.getVTList(ListVTs);
+ break;
+ }
+ }
+
+ SmallVector<SDValue, 8> OtherOps;
+
+ // Copy regular operands
+
+ OtherOps.push_back(Chain); // Chain
+ // Skip operand 1 (intrinsic ID)
+ // Others
+ OtherOps.append(N->op_begin() + 2, N->op_end());
+
+ MemIntrinsicSDNode *MemSD = cast<MemIntrinsicSDNode>(N);
+
+ SDValue NewLD = DAG.getMemIntrinsicNode(Opcode, DL, LdResVTs, OtherOps,
+ MemSD->getMemoryVT(),
+ MemSD->getMemOperand());
+
+ SmallVector<SDValue, 4> ScalarRes;
+
+ for (unsigned i = 0; i < NumElts; ++i) {
+ SDValue Res = NewLD.getValue(i);
+ if (NeedTrunc)
+ Res =
+ DAG.getNode(ISD::TRUNCATE, DL, ResVT.getVectorElementType(), Res);
+ ScalarRes.push_back(Res);
+ }
+
+ SDValue LoadChain = NewLD.getValue(NumElts);
+
+ SDValue BuildVec =
+ DAG.getBuildVector(ResVT, DL, ScalarRes);
+
+ Results.push_back(BuildVec);
+ Results.push_back(LoadChain);
+ } else {
+ // i8 LDG/LDU
+ assert(ResVT.isSimple() && ResVT.getSimpleVT().SimpleTy == MVT::i8 &&
+ "Custom handling of non-i8 ldu/ldg?");
+
+ // Just copy all operands as-is
+ SmallVector<SDValue, 4> Ops(N->op_begin(), N->op_end());
+
+ // Force output to i16
+ SDVTList LdResVTs = DAG.getVTList(MVT::i16, MVT::Other);
+
+ MemIntrinsicSDNode *MemSD = cast<MemIntrinsicSDNode>(N);
+
+ // We make sure the memory type is i8, which will be used during isel
+ // to select the proper instruction.
+ SDValue NewLD =
+ DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, LdResVTs, Ops,
+ MVT::i8, MemSD->getMemOperand());
+
+ Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i8,
+ NewLD.getValue(0)));
+ Results.push_back(NewLD.getValue(1));
+ }
+ }
+ }
+}
+
+void NVPTXTargetLowering::ReplaceNodeResults(
+ SDNode *N, SmallVectorImpl<SDValue> &Results, SelectionDAG &DAG) const {
+ switch (N->getOpcode()) {
+ default:
+ report_fatal_error("Unhandled custom legalization");
+ case ISD::LOAD:
+ ReplaceLoadVector(N, DAG, Results);
+ return;
+ case ISD::INTRINSIC_W_CHAIN:
+ ReplaceINTRINSIC_W_CHAIN(N, DAG, Results);
+ return;
+ }
+}
+
+// Pin NVPTXSection's and NVPTXTargetObjectFile's vtables to this file.
+void NVPTXSection::anchor() {}
+
+NVPTXTargetObjectFile::~NVPTXTargetObjectFile() {
+ delete static_cast<NVPTXSection *>(TextSection);
+ delete static_cast<NVPTXSection *>(DataSection);
+ delete static_cast<NVPTXSection *>(BSSSection);
+ delete static_cast<NVPTXSection *>(ReadOnlySection);
+
+ delete static_cast<NVPTXSection *>(StaticCtorSection);
+ delete static_cast<NVPTXSection *>(StaticDtorSection);
+ delete static_cast<NVPTXSection *>(LSDASection);
+ delete static_cast<NVPTXSection *>(EHFrameSection);
+ delete static_cast<NVPTXSection *>(DwarfAbbrevSection);
+ delete static_cast<NVPTXSection *>(DwarfInfoSection);
+ delete static_cast<NVPTXSection *>(DwarfLineSection);
+ delete static_cast<NVPTXSection *>(DwarfFrameSection);
+ delete static_cast<NVPTXSection *>(DwarfPubTypesSection);
+ delete static_cast<const NVPTXSection *>(DwarfDebugInlineSection);
+ delete static_cast<NVPTXSection *>(DwarfStrSection);
+ delete static_cast<NVPTXSection *>(DwarfLocSection);
+ delete static_cast<NVPTXSection *>(DwarfARangesSection);
+ delete static_cast<NVPTXSection *>(DwarfRangesSection);
+ delete static_cast<NVPTXSection *>(DwarfMacinfoSection);
+}
+
+MCSection *NVPTXTargetObjectFile::SelectSectionForGlobal(
+ const GlobalObject *GO, SectionKind Kind, const TargetMachine &TM) const {
+ return getDataSection();
+}
-//===- NVPTXInstrInfo.td - NVPTX Instruction defs -------------*- tblgen-*-===//\r
-//\r
-// The LLVM Compiler Infrastructure\r
-//\r
-// This file is distributed under the University of Illinois Open Source\r
-// License. See LICENSE.TXT for details.\r
-//\r
-//===----------------------------------------------------------------------===//\r
-//\r
-// This file describes the PTX instructions in TableGen format.\r
-//\r
-//===----------------------------------------------------------------------===//\r
-\r
-include "NVPTXInstrFormats.td"\r
-\r
-// A NOP instruction\r
-let hasSideEffects = 0 in {\r
- def NOP : NVPTXInst<(outs), (ins), "", []>;\r
-}\r
-\r
-let OperandType = "OPERAND_IMMEDIATE" in {\r
- def f16imm : Operand<f16>;\r
-}\r
-\r
-// List of vector specific properties\r
-def isVecLD : VecInstTypeEnum<1>;\r
-def isVecST : VecInstTypeEnum<2>;\r
-def isVecBuild : VecInstTypeEnum<3>;\r
-def isVecShuffle : VecInstTypeEnum<4>;\r
-def isVecExtract : VecInstTypeEnum<5>;\r
-def isVecInsert : VecInstTypeEnum<6>;\r
-def isVecDest : VecInstTypeEnum<7>;\r
-def isVecOther : VecInstTypeEnum<15>;\r
-\r
-//===----------------------------------------------------------------------===//\r
-// NVPTX Operand Definitions.\r
-//===----------------------------------------------------------------------===//\r
-\r
-def brtarget : Operand<OtherVT>;\r
-\r
-// CVT conversion modes\r
-// These must match the enum in NVPTX.h\r
-def CvtNONE : PatLeaf<(i32 0x0)>;\r
-def CvtRNI : PatLeaf<(i32 0x1)>;\r
-def CvtRZI : PatLeaf<(i32 0x2)>;\r
-def CvtRMI : PatLeaf<(i32 0x3)>;\r
-def CvtRPI : PatLeaf<(i32 0x4)>;\r
-def CvtRN : PatLeaf<(i32 0x5)>;\r
-def CvtRZ : PatLeaf<(i32 0x6)>;\r
-def CvtRM : PatLeaf<(i32 0x7)>;\r
-def CvtRP : PatLeaf<(i32 0x8)>;\r
-\r
-def CvtNONE_FTZ : PatLeaf<(i32 0x10)>;\r
-def CvtRNI_FTZ : PatLeaf<(i32 0x11)>;\r
-def CvtRZI_FTZ : PatLeaf<(i32 0x12)>;\r
-def CvtRMI_FTZ : PatLeaf<(i32 0x13)>;\r
-def CvtRPI_FTZ : PatLeaf<(i32 0x14)>;\r
-def CvtRN_FTZ : PatLeaf<(i32 0x15)>;\r
-def CvtRZ_FTZ : PatLeaf<(i32 0x16)>;\r
-def CvtRM_FTZ : PatLeaf<(i32 0x17)>;\r
-def CvtRP_FTZ : PatLeaf<(i32 0x18)>;\r
-\r
-def CvtSAT : PatLeaf<(i32 0x20)>;\r
-def CvtSAT_FTZ : PatLeaf<(i32 0x30)>;\r
-\r
-def CvtMode : Operand<i32> {\r
- let PrintMethod = "printCvtMode";\r
-}\r
-\r
-// Compare modes\r
-// These must match the enum in NVPTX.h\r
-def CmpEQ : PatLeaf<(i32 0)>;\r
-def CmpNE : PatLeaf<(i32 1)>;\r
-def CmpLT : PatLeaf<(i32 2)>;\r
-def CmpLE : PatLeaf<(i32 3)>;\r
-def CmpGT : PatLeaf<(i32 4)>;\r
-def CmpGE : PatLeaf<(i32 5)>;\r
-def CmpEQU : PatLeaf<(i32 10)>;\r
-def CmpNEU : PatLeaf<(i32 11)>;\r
-def CmpLTU : PatLeaf<(i32 12)>;\r
-def CmpLEU : PatLeaf<(i32 13)>;\r
-def CmpGTU : PatLeaf<(i32 14)>;\r
-def CmpGEU : PatLeaf<(i32 15)>;\r
-def CmpNUM : PatLeaf<(i32 16)>;\r
-def CmpNAN : PatLeaf<(i32 17)>;\r
-\r
-def CmpEQ_FTZ : PatLeaf<(i32 0x100)>;\r
-def CmpNE_FTZ : PatLeaf<(i32 0x101)>;\r
-def CmpLT_FTZ : PatLeaf<(i32 0x102)>;\r
-def CmpLE_FTZ : PatLeaf<(i32 0x103)>;\r
-def CmpGT_FTZ : PatLeaf<(i32 0x104)>;\r
-def CmpGE_FTZ : PatLeaf<(i32 0x105)>;\r
-def CmpEQU_FTZ : PatLeaf<(i32 0x10A)>;\r
-def CmpNEU_FTZ : PatLeaf<(i32 0x10B)>;\r
-def CmpLTU_FTZ : PatLeaf<(i32 0x10C)>;\r
-def CmpLEU_FTZ : PatLeaf<(i32 0x10D)>;\r
-def CmpGTU_FTZ : PatLeaf<(i32 0x10E)>;\r
-def CmpGEU_FTZ : PatLeaf<(i32 0x10F)>;\r
-def CmpNUM_FTZ : PatLeaf<(i32 0x110)>;\r
-def CmpNAN_FTZ : PatLeaf<(i32 0x111)>;\r
-\r
-def CmpMode : Operand<i32> {\r
- let PrintMethod = "printCmpMode";\r
-}\r
-def VecElement : Operand<i32> {\r
- let PrintMethod = "printVecElement";\r
-}\r
-\r
-//===----------------------------------------------------------------------===//\r
-// NVPTX Instruction Predicate Definitions\r
-//===----------------------------------------------------------------------===//\r
-\r
-\r
-def hasAtomRedG32 : Predicate<"Subtarget->hasAtomRedG32()">;\r
-def hasAtomRedS32 : Predicate<"Subtarget->hasAtomRedS32()">;\r
-def hasAtomRedGen32 : Predicate<"Subtarget->hasAtomRedGen32()">;\r
-def useAtomRedG32forGen32 :\r
- Predicate<"!Subtarget->hasAtomRedGen32() && Subtarget->hasAtomRedG32()">;\r
-def hasBrkPt : Predicate<"Subtarget->hasBrkPt()">;\r
-def hasAtomRedG64 : Predicate<"Subtarget->hasAtomRedG64()">;\r
-def hasAtomRedS64 : Predicate<"Subtarget->hasAtomRedS64()">;\r
-def hasAtomRedGen64 : Predicate<"Subtarget->hasAtomRedGen64()">;\r
-def useAtomRedG64forGen64 :\r
- Predicate<"!Subtarget->hasAtomRedGen64() && Subtarget->hasAtomRedG64()">;\r
-def hasAtomAddF32 : Predicate<"Subtarget->hasAtomAddF32()">;\r
-def hasAtomAddF64 : Predicate<"Subtarget->hasAtomAddF64()">;\r
-def hasAtomScope : Predicate<"Subtarget->hasAtomScope()">;\r
-def hasAtomBitwise64 : Predicate<"Subtarget->hasAtomBitwise64()">;\r
-def hasAtomMinMax64 : Predicate<"Subtarget->hasAtomMinMax64()">;\r
-def hasVote : Predicate<"Subtarget->hasVote()">;\r
-def hasDouble : Predicate<"Subtarget->hasDouble()">;\r
-def reqPTX20 : Predicate<"Subtarget->reqPTX20()">;\r
-def hasLDG : Predicate<"Subtarget->hasLDG()">;\r
-def hasLDU : Predicate<"Subtarget->hasLDU()">;\r
-def hasGenericLdSt : Predicate<"Subtarget->hasGenericLdSt()">;\r
-\r
-def doF32FTZ : Predicate<"useF32FTZ()">;\r
-def doNoF32FTZ : Predicate<"!useF32FTZ()">;\r
-\r
-def doMulWide : Predicate<"doMulWide">;\r
-\r
-def allowFMA : Predicate<"allowFMA()">;\r
-def noFMA : Predicate<"!allowFMA()">;\r
-def allowUnsafeFPMath : Predicate<"allowUnsafeFPMath()">;\r
-\r
-def do_DIVF32_APPROX : Predicate<"getDivF32Level()==0">;\r
-def do_DIVF32_FULL : Predicate<"getDivF32Level()==1">;\r
-\r
-def do_SQRTF32_APPROX : Predicate<"!usePrecSqrtF32()">;\r
-def do_SQRTF32_RN : Predicate<"usePrecSqrtF32()">;\r
-\r
-def hasHWROT32 : Predicate<"Subtarget->hasHWROT32()">;\r
-def noHWROT32 : Predicate<"!Subtarget->hasHWROT32()">;\r
-\r
-def true : Predicate<"true">;\r
-\r
-def hasPTX31 : Predicate<"Subtarget->getPTXVersion() >= 31">;\r
-\r
-def useFP16Math: Predicate<"Subtarget->allowFP16Math()">;\r
-\r
-//===----------------------------------------------------------------------===//\r
-// Some Common Instruction Class Templates\r
-//===----------------------------------------------------------------------===//\r
-\r
-// Template for instructions which take three int64, int32, or int16 args.\r
-// The instructions are named "<OpcStr><Width>" (e.g. "add.s64").\r
-multiclass I3<string OpcStr, SDNode OpNode> {\r
- def i64rr :\r
- NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$a, Int64Regs:$b),\r
- !strconcat(OpcStr, "64 \t$dst, $a, $b;"),\r
- [(set Int64Regs:$dst, (OpNode Int64Regs:$a, Int64Regs:$b))]>;\r
- def i64ri :\r
- NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$a, i64imm:$b),\r
- !strconcat(OpcStr, "64 \t$dst, $a, $b;"),\r
- [(set Int64Regs:$dst, (OpNode Int64Regs:$a, imm:$b))]>;\r
- def i32rr :\r
- NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a, Int32Regs:$b),\r
- !strconcat(OpcStr, "32 \t$dst, $a, $b;"),\r
- [(set Int32Regs:$dst, (OpNode Int32Regs:$a, Int32Regs:$b))]>;\r
- def i32ri :\r
- NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a, i32imm:$b),\r
- !strconcat(OpcStr, "32 \t$dst, $a, $b;"),\r
- [(set Int32Regs:$dst, (OpNode Int32Regs:$a, imm:$b))]>;\r
- def i16rr :\r
- NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$a, Int16Regs:$b),\r
- !strconcat(OpcStr, "16 \t$dst, $a, $b;"),\r
- [(set Int16Regs:$dst, (OpNode Int16Regs:$a, Int16Regs:$b))]>;\r
- def i16ri :\r
- NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$a, i16imm:$b),\r
- !strconcat(OpcStr, "16 \t$dst, $a, $b;"),\r
- [(set Int16Regs:$dst, (OpNode Int16Regs:$a, (imm):$b))]>;\r
-}\r
-\r
-// Template for instructions which take 3 int32 args. The instructions are\r
-// named "<OpcStr>.s32" (e.g. "addc.cc.s32").\r
-multiclass ADD_SUB_INT_32<string OpcStr, SDNode OpNode> {\r
- def i32rr :\r
- NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a, Int32Regs:$b),\r
- !strconcat(OpcStr, ".s32 \t$dst, $a, $b;"),\r
- [(set Int32Regs:$dst, (OpNode Int32Regs:$a, Int32Regs:$b))]>;\r
- def i32ri :\r
- NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a, i32imm:$b),\r
- !strconcat(OpcStr, ".s32 \t$dst, $a, $b;"),\r
- [(set Int32Regs:$dst, (OpNode Int32Regs:$a, imm:$b))]>;\r
-}\r
-\r
-// Template for instructions which take three fp64 or fp32 args. The\r
-// instructions are named "<OpcStr>.f<Width>" (e.g. "min.f64").\r
-//\r
-// Also defines ftz (flush subnormal inputs and results to sign-preserving\r
-// zero) variants for fp32 functions.\r
-//\r
-// This multiclass should be used for nodes that cannot be folded into FMAs.\r
-// For nodes that can be folded into FMAs (i.e. adds and muls), use\r
-// F3_fma_component.\r
-multiclass F3<string OpcStr, SDNode OpNode> {\r
- def f64rr :\r
- NVPTXInst<(outs Float64Regs:$dst),\r
- (ins Float64Regs:$a, Float64Regs:$b),\r
- !strconcat(OpcStr, ".f64 \t$dst, $a, $b;"),\r
- [(set Float64Regs:$dst, (OpNode Float64Regs:$a, Float64Regs:$b))]>;\r
- def f64ri :\r
- NVPTXInst<(outs Float64Regs:$dst),\r
- (ins Float64Regs:$a, f64imm:$b),\r
- !strconcat(OpcStr, ".f64 \t$dst, $a, $b;"),\r
- [(set Float64Regs:$dst, (OpNode Float64Regs:$a, fpimm:$b))]>;\r
- def f32rr_ftz :\r
- NVPTXInst<(outs Float32Regs:$dst),\r
- (ins Float32Regs:$a, Float32Regs:$b),\r
- !strconcat(OpcStr, ".ftz.f32 \t$dst, $a, $b;"),\r
- [(set Float32Regs:$dst, (OpNode Float32Regs:$a, Float32Regs:$b))]>,\r
- Requires<[doF32FTZ]>;\r
- def f32ri_ftz :\r
- NVPTXInst<(outs Float32Regs:$dst),\r
- (ins Float32Regs:$a, f32imm:$b),\r
- !strconcat(OpcStr, ".ftz.f32 \t$dst, $a, $b;"),\r
- [(set Float32Regs:$dst, (OpNode Float32Regs:$a, fpimm:$b))]>,\r
- Requires<[doF32FTZ]>;\r
- def f32rr :\r
- NVPTXInst<(outs Float32Regs:$dst),\r
- (ins Float32Regs:$a, Float32Regs:$b),\r
- !strconcat(OpcStr, ".f32 \t$dst, $a, $b;"),\r
- [(set Float32Regs:$dst, (OpNode Float32Regs:$a, Float32Regs:$b))]>;\r
- def f32ri :\r
- NVPTXInst<(outs Float32Regs:$dst),\r
- (ins Float32Regs:$a, f32imm:$b),\r
- !strconcat(OpcStr, ".f32 \t$dst, $a, $b;"),\r
- [(set Float32Regs:$dst, (OpNode Float32Regs:$a, fpimm:$b))]>;\r
-}\r
-\r
-// Template for instructions which take three FP args. The\r
-// instructions are named "<OpcStr>.f<Width>" (e.g. "add.f64").\r
-//\r
-// Also defines ftz (flush subnormal inputs and results to sign-preserving\r
-// zero) variants for fp32/fp16 functions.\r
-//\r
-// This multiclass should be used for nodes that can be folded to make fma ops.\r
-// In this case, we use the ".rn" variant when FMA is disabled, as this behaves\r
-// just like the non ".rn" op, but prevents ptxas from creating FMAs.\r
-multiclass F3_fma_component<string OpcStr, SDNode OpNode> {\r
- def f64rr :\r
- NVPTXInst<(outs Float64Regs:$dst),\r
- (ins Float64Regs:$a, Float64Regs:$b),\r
- !strconcat(OpcStr, ".f64 \t$dst, $a, $b;"),\r
- [(set Float64Regs:$dst, (OpNode Float64Regs:$a, Float64Regs:$b))]>,\r
- Requires<[allowFMA]>;\r
- def f64ri :\r
- NVPTXInst<(outs Float64Regs:$dst),\r
- (ins Float64Regs:$a, f64imm:$b),\r
- !strconcat(OpcStr, ".f64 \t$dst, $a, $b;"),\r
- [(set Float64Regs:$dst, (OpNode Float64Regs:$a, fpimm:$b))]>,\r
- Requires<[allowFMA]>;\r
- def f32rr_ftz :\r
- NVPTXInst<(outs Float32Regs:$dst),\r
- (ins Float32Regs:$a, Float32Regs:$b),\r
- !strconcat(OpcStr, ".ftz.f32 \t$dst, $a, $b;"),\r
- [(set Float32Regs:$dst, (OpNode Float32Regs:$a, Float32Regs:$b))]>,\r
- Requires<[allowFMA, doF32FTZ]>;\r
- def f32ri_ftz :\r
- NVPTXInst<(outs Float32Regs:$dst),\r
- (ins Float32Regs:$a, f32imm:$b),\r
- !strconcat(OpcStr, ".ftz.f32 \t$dst, $a, $b;"),\r
- [(set Float32Regs:$dst, (OpNode Float32Regs:$a, fpimm:$b))]>,\r
- Requires<[allowFMA, doF32FTZ]>;\r
- def f32rr :\r
- NVPTXInst<(outs Float32Regs:$dst),\r
- (ins Float32Regs:$a, Float32Regs:$b),\r
- !strconcat(OpcStr, ".f32 \t$dst, $a, $b;"),\r
- [(set Float32Regs:$dst, (OpNode Float32Regs:$a, Float32Regs:$b))]>,\r
- Requires<[allowFMA]>;\r
- def f32ri :\r
- NVPTXInst<(outs Float32Regs:$dst),\r
- (ins Float32Regs:$a, f32imm:$b),\r
- !strconcat(OpcStr, ".f32 \t$dst, $a, $b;"),\r
- [(set Float32Regs:$dst, (OpNode Float32Regs:$a, fpimm:$b))]>,\r
- Requires<[allowFMA]>;\r
-\r
- def f16rr_ftz :\r
- NVPTXInst<(outs Float16Regs:$dst),\r
- (ins Float16Regs:$a, Float16Regs:$b),\r
- !strconcat(OpcStr, ".ftz.f16 \t$dst, $a, $b;"),\r
- [(set Float16Regs:$dst, (OpNode Float16Regs:$a, Float16Regs:$b))]>,\r
- Requires<[useFP16Math, allowFMA, doF32FTZ]>;\r
- def f16rr :\r
- NVPTXInst<(outs Float16Regs:$dst),\r
- (ins Float16Regs:$a, Float16Regs:$b),\r
- !strconcat(OpcStr, ".f16 \t$dst, $a, $b;"),\r
- [(set Float16Regs:$dst, (OpNode Float16Regs:$a, Float16Regs:$b))]>,\r
- Requires<[useFP16Math, allowFMA]>;\r
-\r
- def f16x2rr_ftz :\r
- NVPTXInst<(outs Float16x2Regs:$dst),\r
- (ins Float16x2Regs:$a, Float16x2Regs:$b),\r
- !strconcat(OpcStr, ".ftz.f16x2 \t$dst, $a, $b;"),\r
- [(set Float16x2Regs:$dst, (OpNode Float16x2Regs:$a, Float16x2Regs:$b))]>,\r
- Requires<[useFP16Math, allowFMA, doF32FTZ]>;\r
- def f16x2rr :\r
- NVPTXInst<(outs Float16x2Regs:$dst),\r
- (ins Float16x2Regs:$a, Float16x2Regs:$b),\r
- !strconcat(OpcStr, ".f16x2 \t$dst, $a, $b;"),\r
- [(set Float16x2Regs:$dst, (OpNode Float16x2Regs:$a, Float16x2Regs:$b))]>,\r
- Requires<[useFP16Math, allowFMA]>;\r
-\r
- // These have strange names so we don't perturb existing mir tests.\r
- def _rnf64rr :\r
- NVPTXInst<(outs Float64Regs:$dst),\r
- (ins Float64Regs:$a, Float64Regs:$b),\r
- !strconcat(OpcStr, ".rn.f64 \t$dst, $a, $b;"),\r
- [(set Float64Regs:$dst, (OpNode Float64Regs:$a, Float64Regs:$b))]>,\r
- Requires<[noFMA]>;\r
- def _rnf64ri :\r
- NVPTXInst<(outs Float64Regs:$dst),\r
- (ins Float64Regs:$a, f64imm:$b),\r
- !strconcat(OpcStr, ".rn.f64 \t$dst, $a, $b;"),\r
- [(set Float64Regs:$dst, (OpNode Float64Regs:$a, fpimm:$b))]>,\r
- Requires<[noFMA]>;\r
- def _rnf32rr_ftz :\r
- NVPTXInst<(outs Float32Regs:$dst),\r
- (ins Float32Regs:$a, Float32Regs:$b),\r
- !strconcat(OpcStr, ".rn.ftz.f32 \t$dst, $a, $b;"),\r
- [(set Float32Regs:$dst, (OpNode Float32Regs:$a, Float32Regs:$b))]>,\r
- Requires<[noFMA, doF32FTZ]>;\r
- def _rnf32ri_ftz :\r
- NVPTXInst<(outs Float32Regs:$dst),\r
- (ins Float32Regs:$a, f32imm:$b),\r
- !strconcat(OpcStr, ".rn.ftz.f32 \t$dst, $a, $b;"),\r
- [(set Float32Regs:$dst, (OpNode Float32Regs:$a, fpimm:$b))]>,\r
- Requires<[noFMA, doF32FTZ]>;\r
- def _rnf32rr :\r
- NVPTXInst<(outs Float32Regs:$dst),\r
- (ins Float32Regs:$a, Float32Regs:$b),\r
- !strconcat(OpcStr, ".rn.f32 \t$dst, $a, $b;"),\r
- [(set Float32Regs:$dst, (OpNode Float32Regs:$a, Float32Regs:$b))]>,\r
- Requires<[noFMA]>;\r
- def _rnf32ri :\r
- NVPTXInst<(outs Float32Regs:$dst),\r
- (ins Float32Regs:$a, f32imm:$b),\r
- !strconcat(OpcStr, ".rn.f32 \t$dst, $a, $b;"),\r
- [(set Float32Regs:$dst, (OpNode Float32Regs:$a, fpimm:$b))]>,\r
- Requires<[noFMA]>;\r
- def _rnf16rr_ftz :\r
- NVPTXInst<(outs Float16Regs:$dst),\r
- (ins Float16Regs:$a, Float16Regs:$b),\r
- !strconcat(OpcStr, ".rn.ftz.f16 \t$dst, $a, $b;"),\r
- [(set Float16Regs:$dst, (OpNode Float16Regs:$a, Float16Regs:$b))]>,\r
- Requires<[useFP16Math, noFMA, doF32FTZ]>;\r
- def _rnf16rr :\r
- NVPTXInst<(outs Float16Regs:$dst),\r
- (ins Float16Regs:$a, Float16Regs:$b),\r
- !strconcat(OpcStr, ".rn.f16 \t$dst, $a, $b;"),\r
- [(set Float16Regs:$dst, (OpNode Float16Regs:$a, Float16Regs:$b))]>,\r
- Requires<[useFP16Math, noFMA]>;\r
- def _rnf16x2rr_ftz :\r
- NVPTXInst<(outs Float16x2Regs:$dst),\r
- (ins Float16x2Regs:$a, Float16x2Regs:$b),\r
- !strconcat(OpcStr, ".rn.ftz.f16x2 \t$dst, $a, $b;"),\r
- [(set Float16x2Regs:$dst, (OpNode Float16x2Regs:$a, Float16x2Regs:$b))]>,\r
- Requires<[useFP16Math, noFMA, doF32FTZ]>;\r
- def _rnf16x2rr :\r
- NVPTXInst<(outs Float16x2Regs:$dst),\r
- (ins Float16x2Regs:$a, Float16x2Regs:$b),\r
- !strconcat(OpcStr, ".rn.f16x2 \t$dst, $a, $b;"),\r
- [(set Float16x2Regs:$dst, (OpNode Float16x2Regs:$a, Float16x2Regs:$b))]>,\r
- Requires<[useFP16Math, noFMA]>;\r
-}\r
-\r
-// Template for operations which take two f32 or f64 operands. Provides three\r
-// instructions: <OpcStr>.f64, <OpcStr>.f32, and <OpcStr>.ftz.f32 (flush\r
-// subnormal inputs and results to zero).\r
-multiclass F2<string OpcStr, SDNode OpNode> {\r
- def f64 : NVPTXInst<(outs Float64Regs:$dst), (ins Float64Regs:$a),\r
- !strconcat(OpcStr, ".f64 \t$dst, $a;"),\r
- [(set Float64Regs:$dst, (OpNode Float64Regs:$a))]>;\r
- def f32_ftz : NVPTXInst<(outs Float32Regs:$dst), (ins Float32Regs:$a),\r
- !strconcat(OpcStr, ".ftz.f32 \t$dst, $a;"),\r
- [(set Float32Regs:$dst, (OpNode Float32Regs:$a))]>,\r
- Requires<[doF32FTZ]>;\r
- def f32 : NVPTXInst<(outs Float32Regs:$dst), (ins Float32Regs:$a),\r
- !strconcat(OpcStr, ".f32 \t$dst, $a;"),\r
- [(set Float32Regs:$dst, (OpNode Float32Regs:$a))]>;\r
-}\r
-\r
-//===----------------------------------------------------------------------===//\r
-// NVPTX Instructions.\r
-//===----------------------------------------------------------------------===//\r
-\r
-//-----------------------------------\r
-// Type Conversion\r
-//-----------------------------------\r
-\r
-let hasSideEffects = 0 in {\r
- // Generate a cvt to the given type from all possible types. Each instance\r
- // takes a CvtMode immediate that defines the conversion mode to use. It can\r
- // be CvtNONE to omit a conversion mode.\r
- multiclass CVT_FROM_ALL<string FromName, RegisterClass RC> {\r
- def _s8 :\r
- NVPTXInst<(outs RC:$dst),\r
- (ins Int16Regs:$src, CvtMode:$mode),\r
- !strconcat("cvt${mode:base}${mode:ftz}${mode:sat}.",\r
- FromName, ".s8 \t$dst, $src;"), []>;\r
- def _u8 :\r
- NVPTXInst<(outs RC:$dst),\r
- (ins Int16Regs:$src, CvtMode:$mode),\r
- !strconcat("cvt${mode:base}${mode:ftz}${mode:sat}.",\r
- FromName, ".u8 \t$dst, $src;"), []>;\r
- def _s16 :\r
- NVPTXInst<(outs RC:$dst),\r
- (ins Int16Regs:$src, CvtMode:$mode),\r
- !strconcat("cvt${mode:base}${mode:ftz}${mode:sat}.",\r
- FromName, ".s16 \t$dst, $src;"), []>;\r
- def _u16 :\r
- NVPTXInst<(outs RC:$dst),\r
- (ins Int16Regs:$src, CvtMode:$mode),\r
- !strconcat("cvt${mode:base}${mode:ftz}${mode:sat}.",\r
- FromName, ".u16 \t$dst, $src;"), []>;\r
- def _s32 :\r
- NVPTXInst<(outs RC:$dst),\r
- (ins Int32Regs:$src, CvtMode:$mode),\r
- !strconcat("cvt${mode:base}${mode:ftz}${mode:sat}.",\r
- FromName, ".s32 \t$dst, $src;"), []>;\r
- def _u32 :\r
- NVPTXInst<(outs RC:$dst),\r
- (ins Int32Regs:$src, CvtMode:$mode),\r
- !strconcat("cvt${mode:base}${mode:ftz}${mode:sat}.",\r
- FromName, ".u32 \t$dst, $src;"), []>;\r
- def _s64 :\r
- NVPTXInst<(outs RC:$dst),\r
- (ins Int64Regs:$src, CvtMode:$mode),\r
- !strconcat("cvt${mode:base}${mode:ftz}${mode:sat}.",\r
- FromName, ".s64 \t$dst, $src;"), []>;\r
- def _u64 :\r
- NVPTXInst<(outs RC:$dst),\r
- (ins Int64Regs:$src, CvtMode:$mode),\r
- !strconcat("cvt${mode:base}${mode:ftz}${mode:sat}.",\r
- FromName, ".u64 \t$dst, $src;"), []>;\r
- def _f16 :\r
- NVPTXInst<(outs RC:$dst),\r
- (ins Float16Regs:$src, CvtMode:$mode),\r
- !strconcat("cvt${mode:base}${mode:ftz}${mode:sat}.",\r
- FromName, ".f16 \t$dst, $src;"), []>;\r
- def _f32 :\r
- NVPTXInst<(outs RC:$dst),\r
- (ins Float32Regs:$src, CvtMode:$mode),\r
- !strconcat("cvt${mode:base}${mode:ftz}${mode:sat}.",\r
- FromName, ".f32 \t$dst, $src;"), []>;\r
- def _f64 :\r
- NVPTXInst<(outs RC:$dst),\r
- (ins Float64Regs:$src, CvtMode:$mode),\r
- !strconcat("cvt${mode:base}${mode:ftz}${mode:sat}.",\r
- FromName, ".f64 \t$dst, $src;"), []>;\r
- }\r
-\r
- // Generate cvts from all types to all types.\r
- defm CVT_s8 : CVT_FROM_ALL<"s8", Int16Regs>;\r
- defm CVT_u8 : CVT_FROM_ALL<"u8", Int16Regs>;\r
- defm CVT_s16 : CVT_FROM_ALL<"s16", Int16Regs>;\r
- defm CVT_u16 : CVT_FROM_ALL<"u16", Int16Regs>;\r
- defm CVT_s32 : CVT_FROM_ALL<"s32", Int32Regs>;\r
- defm CVT_u32 : CVT_FROM_ALL<"u32", Int32Regs>;\r
- defm CVT_s64 : CVT_FROM_ALL<"s64", Int64Regs>;\r
- defm CVT_u64 : CVT_FROM_ALL<"u64", Int64Regs>;\r
- defm CVT_f16 : CVT_FROM_ALL<"f16", Float16Regs>;\r
- defm CVT_f32 : CVT_FROM_ALL<"f32", Float32Regs>;\r
- defm CVT_f64 : CVT_FROM_ALL<"f64", Float64Regs>;\r
-\r
- // These cvts are different from those above: The source and dest registers\r
- // are of the same type.\r
- def CVT_INREG_s16_s8 : NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$src),\r
- "cvt.s16.s8 \t$dst, $src;", []>;\r
- def CVT_INREG_s32_s8 : NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$src),\r
- "cvt.s32.s8 \t$dst, $src;", []>;\r
- def CVT_INREG_s32_s16 : NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$src),\r
- "cvt.s32.s16 \t$dst, $src;", []>;\r
- def CVT_INREG_s64_s8 : NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$src),\r
- "cvt.s64.s8 \t$dst, $src;", []>;\r
- def CVT_INREG_s64_s16 : NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$src),\r
- "cvt.s64.s16 \t$dst, $src;", []>;\r
- def CVT_INREG_s64_s32 : NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$src),\r
- "cvt.s64.s32 \t$dst, $src;", []>;\r
-}\r
-\r
-//-----------------------------------\r
-// Integer Arithmetic\r
-//-----------------------------------\r
-\r
-// Template for xor masquerading as int1 arithmetic.\r
-multiclass ADD_SUB_i1<SDNode OpNode> {\r
- def _rr: NVPTXInst<(outs Int1Regs:$dst), (ins Int1Regs:$a, Int1Regs:$b),\r
- "xor.pred \t$dst, $a, $b;",\r
- [(set Int1Regs:$dst, (OpNode Int1Regs:$a, Int1Regs:$b))]>;\r
- def _ri: NVPTXInst<(outs Int1Regs:$dst), (ins Int1Regs:$a, i1imm:$b),\r
- "xor.pred \t$dst, $a, $b;",\r
- [(set Int1Regs:$dst, (OpNode Int1Regs:$a, (imm):$b))]>;\r
-}\r
-\r
-// int1 addition and subtraction are both just xor.\r
-defm ADD_i1 : ADD_SUB_i1<add>;\r
-defm SUB_i1 : ADD_SUB_i1<sub>;\r
-\r
-// int16, int32, and int64 signed addition. Since nvptx is 2's complement, we\r
-// also use these for unsigned arithmetic.\r
-defm ADD : I3<"add.s", add>;\r
-defm SUB : I3<"sub.s", sub>;\r
-\r
-// int32 addition and subtraction with carry-out.\r
-// FIXME: PTX 4.3 adds a 64-bit add.cc (and maybe also 64-bit addc.cc?).\r
-defm ADDCC : ADD_SUB_INT_32<"add.cc", addc>;\r
-defm SUBCC : ADD_SUB_INT_32<"sub.cc", subc>;\r
-\r
-// int32 addition and subtraction with carry-in and carry-out.\r
-defm ADDCCC : ADD_SUB_INT_32<"addc.cc", adde>;\r
-defm SUBCCC : ADD_SUB_INT_32<"subc.cc", sube>;\r
-\r
-defm MULT : I3<"mul.lo.s", mul>;\r
-\r
-defm MULTHS : I3<"mul.hi.s", mulhs>;\r
-defm MULTHU : I3<"mul.hi.u", mulhu>;\r
-\r
-defm SDIV : I3<"div.s", sdiv>;\r
-defm UDIV : I3<"div.u", udiv>;\r
-\r
-// The ri versions of rem.s and rem.u won't be selected; DAGCombiner::visitSREM\r
-// will lower it.\r
-defm SREM : I3<"rem.s", srem>;\r
-defm UREM : I3<"rem.u", urem>;\r
-\r
-// Integer absolute value. NumBits should be one minus the bit width of RC.\r
-// This idiom implements the algorithm at\r
-// http://graphics.stanford.edu/~seander/bithacks.html#IntegerAbs.\r
-multiclass ABS<RegisterClass RC, string SizeName> {\r
- def : NVPTXInst<(outs RC:$dst), (ins RC:$a),\r
- !strconcat("abs", SizeName, " \t$dst, $a;"),\r
- [(set RC:$dst, (abs RC:$a))]>;\r
-}\r
-defm ABS_16 : ABS<Int16Regs, ".s16">;\r
-defm ABS_32 : ABS<Int32Regs, ".s32">;\r
-defm ABS_64 : ABS<Int64Regs, ".s64">;\r
-\r
-// Integer min/max.\r
-defm SMAX : I3<"max.s", smax>;\r
-defm UMAX : I3<"max.u", umax>;\r
-defm SMIN : I3<"min.s", smin>;\r
-defm UMIN : I3<"min.u", umin>;\r
-\r
-//\r
-// Wide multiplication\r
-//\r
-def MULWIDES64 :\r
- NVPTXInst<(outs Int64Regs:$dst), (ins Int32Regs:$a, Int32Regs:$b),\r
- "mul.wide.s32 \t$dst, $a, $b;", []>;\r
-def MULWIDES64Imm :\r
- NVPTXInst<(outs Int64Regs:$dst), (ins Int32Regs:$a, i32imm:$b),\r
- "mul.wide.s32 \t$dst, $a, $b;", []>;\r
-def MULWIDES64Imm64 :\r
- NVPTXInst<(outs Int64Regs:$dst), (ins Int32Regs:$a, i64imm:$b),\r
- "mul.wide.s32 \t$dst, $a, $b;", []>;\r
-\r
-def MULWIDEU64 :\r
- NVPTXInst<(outs Int64Regs:$dst), (ins Int32Regs:$a, Int32Regs:$b),\r
- "mul.wide.u32 \t$dst, $a, $b;", []>;\r
-def MULWIDEU64Imm :\r
- NVPTXInst<(outs Int64Regs:$dst), (ins Int32Regs:$a, i32imm:$b),\r
- "mul.wide.u32 \t$dst, $a, $b;", []>;\r
-def MULWIDEU64Imm64 :\r
- NVPTXInst<(outs Int64Regs:$dst), (ins Int32Regs:$a, i64imm:$b),\r
- "mul.wide.u32 \t$dst, $a, $b;", []>;\r
-\r
-def MULWIDES32 :\r
- NVPTXInst<(outs Int32Regs:$dst), (ins Int16Regs:$a, Int16Regs:$b),\r
- "mul.wide.s16 \t$dst, $a, $b;", []>;\r
-def MULWIDES32Imm :\r
- NVPTXInst<(outs Int32Regs:$dst), (ins Int16Regs:$a, i16imm:$b),\r
- "mul.wide.s16 \t$dst, $a, $b;", []>;\r
-def MULWIDES32Imm32 :\r
- NVPTXInst<(outs Int32Regs:$dst), (ins Int16Regs:$a, i32imm:$b),\r
- "mul.wide.s16 \t$dst, $a, $b;", []>;\r
-\r
-def MULWIDEU32 :\r
- NVPTXInst<(outs Int32Regs:$dst), (ins Int16Regs:$a, Int16Regs:$b),\r
- "mul.wide.u16 \t$dst, $a, $b;", []>;\r
-def MULWIDEU32Imm :\r
- NVPTXInst<(outs Int32Regs:$dst), (ins Int16Regs:$a, i16imm:$b),\r
- "mul.wide.u16 \t$dst, $a, $b;", []>;\r
-def MULWIDEU32Imm32 :\r
- NVPTXInst<(outs Int32Regs:$dst), (ins Int16Regs:$a, i32imm:$b),\r
- "mul.wide.u16 \t$dst, $a, $b;", []>;\r
-\r
-def SDTMulWide : SDTypeProfile<1, 2, [SDTCisSameAs<1, 2>]>;\r
-def mul_wide_signed : SDNode<"NVPTXISD::MUL_WIDE_SIGNED", SDTMulWide>;\r
-def mul_wide_unsigned : SDNode<"NVPTXISD::MUL_WIDE_UNSIGNED", SDTMulWide>;\r
-\r
-// Matchers for signed, unsigned mul.wide ISD nodes.\r
-def : Pat<(i32 (mul_wide_signed Int16Regs:$a, Int16Regs:$b)),\r
- (MULWIDES32 Int16Regs:$a, Int16Regs:$b)>,\r
- Requires<[doMulWide]>;\r
-def : Pat<(i32 (mul_wide_signed Int16Regs:$a, imm:$b)),\r
- (MULWIDES32Imm Int16Regs:$a, imm:$b)>,\r
- Requires<[doMulWide]>;\r
-def : Pat<(i32 (mul_wide_unsigned Int16Regs:$a, Int16Regs:$b)),\r
- (MULWIDEU32 Int16Regs:$a, Int16Regs:$b)>,\r
- Requires<[doMulWide]>;\r
-def : Pat<(i32 (mul_wide_unsigned Int16Regs:$a, imm:$b)),\r
- (MULWIDEU32Imm Int16Regs:$a, imm:$b)>,\r
- Requires<[doMulWide]>;\r
-\r
-def : Pat<(i64 (mul_wide_signed Int32Regs:$a, Int32Regs:$b)),\r
- (MULWIDES64 Int32Regs:$a, Int32Regs:$b)>,\r
- Requires<[doMulWide]>;\r
-def : Pat<(i64 (mul_wide_signed Int32Regs:$a, imm:$b)),\r
- (MULWIDES64Imm Int32Regs:$a, imm:$b)>,\r
- Requires<[doMulWide]>;\r
-def : Pat<(i64 (mul_wide_unsigned Int32Regs:$a, Int32Regs:$b)),\r
- (MULWIDEU64 Int32Regs:$a, Int32Regs:$b)>,\r
- Requires<[doMulWide]>;\r
-def : Pat<(i64 (mul_wide_unsigned Int32Regs:$a, imm:$b)),\r
- (MULWIDEU64Imm Int32Regs:$a, imm:$b)>,\r
- Requires<[doMulWide]>;\r
-\r
-// Predicates used for converting some patterns to mul.wide.\r
-def SInt32Const : PatLeaf<(imm), [{\r
- const APInt &v = N->getAPIntValue();\r
- return v.isSignedIntN(32);\r
-}]>;\r
-\r
-def UInt32Const : PatLeaf<(imm), [{\r
- const APInt &v = N->getAPIntValue();\r
- return v.isIntN(32);\r
-}]>;\r
-\r
-def SInt16Const : PatLeaf<(imm), [{\r
- const APInt &v = N->getAPIntValue();\r
- return v.isSignedIntN(16);\r
-}]>;\r
-\r
-def UInt16Const : PatLeaf<(imm), [{\r
- const APInt &v = N->getAPIntValue();\r
- return v.isIntN(16);\r
-}]>;\r
-\r
-def Int5Const : PatLeaf<(imm), [{\r
- // Check if 0 <= v < 32; only then will the result of (x << v) be an int32.\r
- const APInt &v = N->getAPIntValue();\r
- return v.sge(0) && v.slt(32);\r
-}]>;\r
-\r
-def Int4Const : PatLeaf<(imm), [{\r
- // Check if 0 <= v < 16; only then will the result of (x << v) be an int16.\r
- const APInt &v = N->getAPIntValue();\r
- return v.sge(0) && v.slt(16);\r
-}]>;\r
-\r
-def SHL2MUL32 : SDNodeXForm<imm, [{\r
- const APInt &v = N->getAPIntValue();\r
- APInt temp(32, 1);\r
- return CurDAG->getTargetConstant(temp.shl(v), SDLoc(N), MVT::i32);\r
-}]>;\r
-\r
-def SHL2MUL16 : SDNodeXForm<imm, [{\r
- const APInt &v = N->getAPIntValue();\r
- APInt temp(16, 1);\r
- return CurDAG->getTargetConstant(temp.shl(v), SDLoc(N), MVT::i16);\r
-}]>;\r
-\r
-// Convert "sign/zero-extend, then shift left by an immediate" to mul.wide.\r
-def : Pat<(shl (sext Int32Regs:$a), (i32 Int5Const:$b)),\r
- (MULWIDES64Imm Int32Regs:$a, (SHL2MUL32 node:$b))>,\r
- Requires<[doMulWide]>;\r
-def : Pat<(shl (zext Int32Regs:$a), (i32 Int5Const:$b)),\r
- (MULWIDEU64Imm Int32Regs:$a, (SHL2MUL32 node:$b))>,\r
- Requires<[doMulWide]>;\r
-\r
-def : Pat<(shl (sext Int16Regs:$a), (i16 Int4Const:$b)),\r
- (MULWIDES32Imm Int16Regs:$a, (SHL2MUL16 node:$b))>,\r
- Requires<[doMulWide]>;\r
-def : Pat<(shl (zext Int16Regs:$a), (i16 Int4Const:$b)),\r
- (MULWIDEU32Imm Int16Regs:$a, (SHL2MUL16 node:$b))>,\r
- Requires<[doMulWide]>;\r
-\r
-// Convert "sign/zero-extend then multiply" to mul.wide.\r
-def : Pat<(mul (sext Int32Regs:$a), (sext Int32Regs:$b)),\r
- (MULWIDES64 Int32Regs:$a, Int32Regs:$b)>,\r
- Requires<[doMulWide]>;\r
-def : Pat<(mul (sext Int32Regs:$a), (i64 SInt32Const:$b)),\r
- (MULWIDES64Imm64 Int32Regs:$a, (i64 SInt32Const:$b))>,\r
- Requires<[doMulWide]>;\r
-\r
-def : Pat<(mul (zext Int32Regs:$a), (zext Int32Regs:$b)),\r
- (MULWIDEU64 Int32Regs:$a, Int32Regs:$b)>,\r
- Requires<[doMulWide]>;\r
-def : Pat<(mul (zext Int32Regs:$a), (i64 UInt32Const:$b)),\r
- (MULWIDEU64Imm64 Int32Regs:$a, (i64 UInt32Const:$b))>,\r
- Requires<[doMulWide]>;\r
-\r
-def : Pat<(mul (sext Int16Regs:$a), (sext Int16Regs:$b)),\r
- (MULWIDES32 Int16Regs:$a, Int16Regs:$b)>,\r
- Requires<[doMulWide]>;\r
-def : Pat<(mul (sext Int16Regs:$a), (i32 SInt16Const:$b)),\r
- (MULWIDES32Imm32 Int16Regs:$a, (i32 SInt16Const:$b))>,\r
- Requires<[doMulWide]>;\r
-\r
-def : Pat<(mul (zext Int16Regs:$a), (zext Int16Regs:$b)),\r
- (MULWIDEU32 Int16Regs:$a, Int16Regs:$b)>,\r
- Requires<[doMulWide]>;\r
-def : Pat<(mul (zext Int16Regs:$a), (i32 UInt16Const:$b)),\r
- (MULWIDEU32Imm32 Int16Regs:$a, (i32 UInt16Const:$b))>,\r
- Requires<[doMulWide]>;\r
-\r
-//\r
-// Integer multiply-add\r
-//\r
-def SDTIMAD :\r
- SDTypeProfile<1, 3, [SDTCisSameAs<0, 1>, SDTCisInt<0>, SDTCisInt<2>,\r
- SDTCisSameAs<0, 2>, SDTCisSameAs<0, 3>]>;\r
-def imad : SDNode<"NVPTXISD::IMAD", SDTIMAD>;\r
-\r
-def MAD16rrr :\r
- NVPTXInst<(outs Int16Regs:$dst),\r
- (ins Int16Regs:$a, Int16Regs:$b, Int16Regs:$c),\r
- "mad.lo.s16 \t$dst, $a, $b, $c;",\r
- [(set Int16Regs:$dst, (imad Int16Regs:$a, Int16Regs:$b, Int16Regs:$c))]>;\r
-def MAD16rri :\r
- NVPTXInst<(outs Int16Regs:$dst),\r
- (ins Int16Regs:$a, Int16Regs:$b, i16imm:$c),\r
- "mad.lo.s16 \t$dst, $a, $b, $c;",\r
- [(set Int16Regs:$dst, (imad Int16Regs:$a, Int16Regs:$b, imm:$c))]>;\r
-def MAD16rir :\r
- NVPTXInst<(outs Int16Regs:$dst),\r
- (ins Int16Regs:$a, i16imm:$b, Int16Regs:$c),\r
- "mad.lo.s16 \t$dst, $a, $b, $c;",\r
- [(set Int16Regs:$dst, (imad Int16Regs:$a, imm:$b, Int16Regs:$c))]>;\r
-def MAD16rii :\r
- NVPTXInst<(outs Int16Regs:$dst),\r
- (ins Int16Regs:$a, i16imm:$b, i16imm:$c),\r
- "mad.lo.s16 \t$dst, $a, $b, $c;",\r
- [(set Int16Regs:$dst, (imad Int16Regs:$a, imm:$b, imm:$c))]>;\r
-\r
-def MAD32rrr :\r
- NVPTXInst<(outs Int32Regs:$dst),\r
- (ins Int32Regs:$a, Int32Regs:$b, Int32Regs:$c),\r
- "mad.lo.s32 \t$dst, $a, $b, $c;",\r
- [(set Int32Regs:$dst, (imad Int32Regs:$a, Int32Regs:$b, Int32Regs:$c))]>;\r
-def MAD32rri :\r
- NVPTXInst<(outs Int32Regs:$dst),\r
- (ins Int32Regs:$a, Int32Regs:$b, i32imm:$c),\r
- "mad.lo.s32 \t$dst, $a, $b, $c;",\r
- [(set Int32Regs:$dst, (imad Int32Regs:$a, Int32Regs:$b, imm:$c))]>;\r
-def MAD32rir :\r
- NVPTXInst<(outs Int32Regs:$dst),\r
- (ins Int32Regs:$a, i32imm:$b, Int32Regs:$c),\r
- "mad.lo.s32 \t$dst, $a, $b, $c;",\r
- [(set Int32Regs:$dst, (imad Int32Regs:$a, imm:$b, Int32Regs:$c))]>;\r
-def MAD32rii :\r
- NVPTXInst<(outs Int32Regs:$dst),\r
- (ins Int32Regs:$a, i32imm:$b, i32imm:$c),\r
- "mad.lo.s32 \t$dst, $a, $b, $c;",\r
- [(set Int32Regs:$dst, (imad Int32Regs:$a, imm:$b, imm:$c))]>;\r
-\r
-def MAD64rrr :\r
- NVPTXInst<(outs Int64Regs:$dst),\r
- (ins Int64Regs:$a, Int64Regs:$b, Int64Regs:$c),\r
- "mad.lo.s64 \t$dst, $a, $b, $c;",\r
- [(set Int64Regs:$dst, (imad Int64Regs:$a, Int64Regs:$b, Int64Regs:$c))]>;\r
-def MAD64rri :\r
- NVPTXInst<(outs Int64Regs:$dst),\r
- (ins Int64Regs:$a, Int64Regs:$b, i64imm:$c),\r
- "mad.lo.s64 \t$dst, $a, $b, $c;",\r
- [(set Int64Regs:$dst, (imad Int64Regs:$a, Int64Regs:$b, imm:$c))]>;\r
-def MAD64rir :\r
- NVPTXInst<(outs Int64Regs:$dst),\r
- (ins Int64Regs:$a, i64imm:$b, Int64Regs:$c),\r
- "mad.lo.s64 \t$dst, $a, $b, $c;",\r
- [(set Int64Regs:$dst, (imad Int64Regs:$a, imm:$b, Int64Regs:$c))]>;\r
-def MAD64rii :\r
- NVPTXInst<(outs Int64Regs:$dst),\r
- (ins Int64Regs:$a, i64imm:$b, i64imm:$c),\r
- "mad.lo.s64 \t$dst, $a, $b, $c;",\r
- [(set Int64Regs:$dst, (imad Int64Regs:$a, imm:$b, imm:$c))]>;\r
-\r
-def INEG16 :\r
- NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$src),\r
- "neg.s16 \t$dst, $src;",\r
- [(set Int16Regs:$dst, (ineg Int16Regs:$src))]>;\r
-def INEG32 :\r
- NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$src),\r
- "neg.s32 \t$dst, $src;",\r
- [(set Int32Regs:$dst, (ineg Int32Regs:$src))]>;\r
-def INEG64 :\r
- NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$src),\r
- "neg.s64 \t$dst, $src;",\r
- [(set Int64Regs:$dst, (ineg Int64Regs:$src))]>;\r
-\r
-//-----------------------------------\r
-// Floating Point Arithmetic\r
-//-----------------------------------\r
-\r
-// Constant 1.0f\r
-def FloatConst1 : PatLeaf<(fpimm), [{\r
- return &N->getValueAPF().getSemantics() == &llvm::APFloat::IEEEsingle() &&\r
- N->getValueAPF().convertToFloat() == 1.0f;\r
-}]>;\r
-// Constant 1.0 (double)\r
-def DoubleConst1 : PatLeaf<(fpimm), [{\r
- return &N->getValueAPF().getSemantics() == &llvm::APFloat::IEEEdouble() &&\r
- N->getValueAPF().convertToDouble() == 1.0;\r
-}]>;\r
-\r
-// Loads FP16 constant into a register.\r
-//\r
-// ptxas does not have hex representation for fp16, so we can't use\r
-// fp16 immediate values in .f16 instructions. Instead we have to load\r
-// the constant into a register using mov.b16.\r
-def LOAD_CONST_F16 :\r
- NVPTXInst<(outs Float16Regs:$dst), (ins f16imm:$a),\r
- "mov.b16 \t$dst, $a;", []>;\r
-\r
-defm FADD : F3_fma_component<"add", fadd>;\r
-defm FSUB : F3_fma_component<"sub", fsub>;\r
-defm FMUL : F3_fma_component<"mul", fmul>;\r
-\r
-defm FMIN : F3<"min", fminnum>;\r
-defm FMAX : F3<"max", fmaxnum>;\r
-\r
-defm FABS : F2<"abs", fabs>;\r
-defm FNEG : F2<"neg", fneg>;\r
-defm FSQRT : F2<"sqrt.rn", fsqrt>;\r
-\r
-//\r
-// F64 division\r
-//\r
-def FDIV641r :\r
- NVPTXInst<(outs Float64Regs:$dst),\r
- (ins f64imm:$a, Float64Regs:$b),\r
- "rcp.rn.f64 \t$dst, $b;",\r
- [(set Float64Regs:$dst, (fdiv DoubleConst1:$a, Float64Regs:$b))]>;\r
-def FDIV64rr :\r
- NVPTXInst<(outs Float64Regs:$dst),\r
- (ins Float64Regs:$a, Float64Regs:$b),\r
- "div.rn.f64 \t$dst, $a, $b;",\r
- [(set Float64Regs:$dst, (fdiv Float64Regs:$a, Float64Regs:$b))]>;\r
-def FDIV64ri :\r
- NVPTXInst<(outs Float64Regs:$dst),\r
- (ins Float64Regs:$a, f64imm:$b),\r
- "div.rn.f64 \t$dst, $a, $b;",\r
- [(set Float64Regs:$dst, (fdiv Float64Regs:$a, fpimm:$b))]>;\r
-\r
-//\r
-// F32 Approximate reciprocal\r
-//\r
-def FDIV321r_ftz :\r
- NVPTXInst<(outs Float32Regs:$dst),\r
- (ins f32imm:$a, Float32Regs:$b),\r
- "rcp.approx.ftz.f32 \t$dst, $b;",\r
- [(set Float32Regs:$dst, (fdiv FloatConst1:$a, Float32Regs:$b))]>,\r
- Requires<[do_DIVF32_APPROX, doF32FTZ]>;\r
-def FDIV321r :\r
- NVPTXInst<(outs Float32Regs:$dst),\r
- (ins f32imm:$a, Float32Regs:$b),\r
- "rcp.approx.f32 \t$dst, $b;",\r
- [(set Float32Regs:$dst, (fdiv FloatConst1:$a, Float32Regs:$b))]>,\r
- Requires<[do_DIVF32_APPROX]>;\r
-//\r
-// F32 Approximate division\r
-//\r
-def FDIV32approxrr_ftz :\r
- NVPTXInst<(outs Float32Regs:$dst),\r
- (ins Float32Regs:$a, Float32Regs:$b),\r
- "div.approx.ftz.f32 \t$dst, $a, $b;",\r
- [(set Float32Regs:$dst, (fdiv Float32Regs:$a, Float32Regs:$b))]>,\r
- Requires<[do_DIVF32_APPROX, doF32FTZ]>;\r
-def FDIV32approxri_ftz :\r
- NVPTXInst<(outs Float32Regs:$dst),\r
- (ins Float32Regs:$a, f32imm:$b),\r
- "div.approx.ftz.f32 \t$dst, $a, $b;",\r
- [(set Float32Regs:$dst, (fdiv Float32Regs:$a, fpimm:$b))]>,\r
- Requires<[do_DIVF32_APPROX, doF32FTZ]>;\r
-def FDIV32approxrr :\r
- NVPTXInst<(outs Float32Regs:$dst),\r
- (ins Float32Regs:$a, Float32Regs:$b),\r
- "div.approx.f32 \t$dst, $a, $b;",\r
- [(set Float32Regs:$dst, (fdiv Float32Regs:$a, Float32Regs:$b))]>,\r
- Requires<[do_DIVF32_APPROX]>;\r
-def FDIV32approxri :\r
- NVPTXInst<(outs Float32Regs:$dst),\r
- (ins Float32Regs:$a, f32imm:$b),\r
- "div.approx.f32 \t$dst, $a, $b;",\r
- [(set Float32Regs:$dst, (fdiv Float32Regs:$a, fpimm:$b))]>,\r
- Requires<[do_DIVF32_APPROX]>;\r
-//\r
-// F32 Semi-accurate reciprocal\r
-//\r
-// rcp.approx gives the same result as div.full(1.0f, a) and is faster.\r
-//\r
-def FDIV321r_approx_ftz :\r
- NVPTXInst<(outs Float32Regs:$dst),\r
- (ins f32imm:$a, Float32Regs:$b),\r
- "rcp.approx.ftz.f32 \t$dst, $b;",\r
- [(set Float32Regs:$dst, (fdiv FloatConst1:$a, Float32Regs:$b))]>,\r
- Requires<[do_DIVF32_FULL, doF32FTZ]>;\r
-def FDIV321r_approx :\r
- NVPTXInst<(outs Float32Regs:$dst),\r
- (ins f32imm:$a, Float32Regs:$b),\r
- "rcp.approx.f32 \t$dst, $b;",\r
- [(set Float32Regs:$dst, (fdiv FloatConst1:$a, Float32Regs:$b))]>,\r
- Requires<[do_DIVF32_FULL]>;\r
-//\r
-// F32 Semi-accurate division\r
-//\r
-def FDIV32rr_ftz :\r
- NVPTXInst<(outs Float32Regs:$dst),\r
- (ins Float32Regs:$a, Float32Regs:$b),\r
- "div.full.ftz.f32 \t$dst, $a, $b;",\r
- [(set Float32Regs:$dst, (fdiv Float32Regs:$a, Float32Regs:$b))]>,\r
- Requires<[do_DIVF32_FULL, doF32FTZ]>;\r
-def FDIV32ri_ftz :\r
- NVPTXInst<(outs Float32Regs:$dst),\r
- (ins Float32Regs:$a, f32imm:$b),\r
- "div.full.ftz.f32 \t$dst, $a, $b;",\r
- [(set Float32Regs:$dst, (fdiv Float32Regs:$a, fpimm:$b))]>,\r
- Requires<[do_DIVF32_FULL, doF32FTZ]>;\r
-def FDIV32rr :\r
- NVPTXInst<(outs Float32Regs:$dst),\r
- (ins Float32Regs:$a, Float32Regs:$b),\r
- "div.full.f32 \t$dst, $a, $b;",\r
- [(set Float32Regs:$dst, (fdiv Float32Regs:$a, Float32Regs:$b))]>,\r
- Requires<[do_DIVF32_FULL]>;\r
-def FDIV32ri :\r
- NVPTXInst<(outs Float32Regs:$dst),\r
- (ins Float32Regs:$a, f32imm:$b),\r
- "div.full.f32 \t$dst, $a, $b;",\r
- [(set Float32Regs:$dst, (fdiv Float32Regs:$a, fpimm:$b))]>,\r
- Requires<[do_DIVF32_FULL]>;\r
-//\r
-// F32 Accurate reciprocal\r
-//\r
-def FDIV321r_prec_ftz :\r
- NVPTXInst<(outs Float32Regs:$dst),\r
- (ins f32imm:$a, Float32Regs:$b),\r
- "rcp.rn.ftz.f32 \t$dst, $b;",\r
- [(set Float32Regs:$dst, (fdiv FloatConst1:$a, Float32Regs:$b))]>,\r
- Requires<[reqPTX20, doF32FTZ]>;\r
-def FDIV321r_prec :\r
- NVPTXInst<(outs Float32Regs:$dst),\r
- (ins f32imm:$a, Float32Regs:$b),\r
- "rcp.rn.f32 \t$dst, $b;",\r
- [(set Float32Regs:$dst, (fdiv FloatConst1:$a, Float32Regs:$b))]>,\r
- Requires<[reqPTX20]>;\r
-//\r
-// F32 Accurate division\r
-//\r
-def FDIV32rr_prec_ftz :\r
- NVPTXInst<(outs Float32Regs:$dst),\r
- (ins Float32Regs:$a, Float32Regs:$b),\r
- "div.rn.ftz.f32 \t$dst, $a, $b;",\r
- [(set Float32Regs:$dst, (fdiv Float32Regs:$a, Float32Regs:$b))]>,\r
- Requires<[doF32FTZ, reqPTX20]>;\r
-def FDIV32ri_prec_ftz :\r
- NVPTXInst<(outs Float32Regs:$dst),\r
- (ins Float32Regs:$a, f32imm:$b),\r
- "div.rn.ftz.f32 \t$dst, $a, $b;",\r
- [(set Float32Regs:$dst, (fdiv Float32Regs:$a, fpimm:$b))]>,\r
- Requires<[doF32FTZ, reqPTX20]>;\r
-def FDIV32rr_prec :\r
- NVPTXInst<(outs Float32Regs:$dst),\r
- (ins Float32Regs:$a, Float32Regs:$b),\r
- "div.rn.f32 \t$dst, $a, $b;",\r
- [(set Float32Regs:$dst, (fdiv Float32Regs:$a, Float32Regs:$b))]>,\r
- Requires<[reqPTX20]>;\r
-def FDIV32ri_prec :\r
- NVPTXInst<(outs Float32Regs:$dst),\r
- (ins Float32Regs:$a, f32imm:$b),\r
- "div.rn.f32 \t$dst, $a, $b;",\r
- [(set Float32Regs:$dst, (fdiv Float32Regs:$a, fpimm:$b))]>,\r
- Requires<[reqPTX20]>;\r
-\r
-//\r
-// FMA\r
-//\r
-\r
-multiclass FMA<string OpcStr, RegisterClass RC, Operand ImmCls, Predicate Pred> {\r
- def rrr : NVPTXInst<(outs RC:$dst), (ins RC:$a, RC:$b, RC:$c),\r
- !strconcat(OpcStr, " \t$dst, $a, $b, $c;"),\r
- [(set RC:$dst, (fma RC:$a, RC:$b, RC:$c))]>,\r
- Requires<[Pred]>;\r
- def rri : NVPTXInst<(outs RC:$dst),\r
- (ins RC:$a, RC:$b, ImmCls:$c),\r
- !strconcat(OpcStr, " \t$dst, $a, $b, $c;"),\r
- [(set RC:$dst, (fma RC:$a, RC:$b, fpimm:$c))]>,\r
- Requires<[Pred]>;\r
- def rir : NVPTXInst<(outs RC:$dst),\r
- (ins RC:$a, ImmCls:$b, RC:$c),\r
- !strconcat(OpcStr, " \t$dst, $a, $b, $c;"),\r
- [(set RC:$dst, (fma RC:$a, fpimm:$b, RC:$c))]>,\r
- Requires<[Pred]>;\r
- def rii : NVPTXInst<(outs RC:$dst),\r
- (ins RC:$a, ImmCls:$b, ImmCls:$c),\r
- !strconcat(OpcStr, " \t$dst, $a, $b, $c;"),\r
- [(set RC:$dst, (fma RC:$a, fpimm:$b, fpimm:$c))]>,\r
- Requires<[Pred]>;\r
-}\r
-\r
-multiclass FMA_F16<string OpcStr, RegisterClass RC, Predicate Pred> {\r
- def rrr : NVPTXInst<(outs RC:$dst), (ins RC:$a, RC:$b, RC:$c),\r
- !strconcat(OpcStr, " \t$dst, $a, $b, $c;"),\r
- [(set RC:$dst, (fma RC:$a, RC:$b, RC:$c))]>,\r
- Requires<[useFP16Math, Pred]>;\r
-}\r
-\r
-defm FMA16_ftz : FMA_F16<"fma.rn.ftz.f16", Float16Regs, doF32FTZ>;\r
-defm FMA16 : FMA_F16<"fma.rn.f16", Float16Regs, true>;\r
-defm FMA16x2_ftz : FMA_F16<"fma.rn.ftz.f16x2", Float16x2Regs, doF32FTZ>;\r
-defm FMA16x2 : FMA_F16<"fma.rn.f16x2", Float16x2Regs, true>;\r
-defm FMA32_ftz : FMA<"fma.rn.ftz.f32", Float32Regs, f32imm, doF32FTZ>;\r
-defm FMA32 : FMA<"fma.rn.f32", Float32Regs, f32imm, true>;\r
-defm FMA64 : FMA<"fma.rn.f64", Float64Regs, f64imm, true>;\r
-\r
-// sin/cos\r
-def SINF: NVPTXInst<(outs Float32Regs:$dst), (ins Float32Regs:$src),\r
- "sin.approx.f32 \t$dst, $src;",\r
- [(set Float32Regs:$dst, (fsin Float32Regs:$src))]>,\r
- Requires<[allowUnsafeFPMath]>;\r
-def COSF: NVPTXInst<(outs Float32Regs:$dst), (ins Float32Regs:$src),\r
- "cos.approx.f32 \t$dst, $src;",\r
- [(set Float32Regs:$dst, (fcos Float32Regs:$src))]>,\r
- Requires<[allowUnsafeFPMath]>;\r
-\r
-// Lower (frem x, y) into (sub x, (mul (floor (div x, y)) y)),\r
-// i.e. "poor man's fmod()"\r
-\r
-// frem - f32 FTZ\r
-def : Pat<(frem Float32Regs:$x, Float32Regs:$y),\r
- (FSUBf32rr_ftz Float32Regs:$x, (FMULf32rr_ftz (CVT_f32_f32\r
- (FDIV32rr_prec_ftz Float32Regs:$x, Float32Regs:$y), CvtRMI_FTZ),\r
- Float32Regs:$y))>,\r
- Requires<[doF32FTZ]>;\r
-def : Pat<(frem Float32Regs:$x, fpimm:$y),\r
- (FSUBf32rr_ftz Float32Regs:$x, (FMULf32ri_ftz (CVT_f32_f32\r
- (FDIV32ri_prec_ftz Float32Regs:$x, fpimm:$y), CvtRMI_FTZ),\r
- fpimm:$y))>,\r
- Requires<[doF32FTZ]>;\r
-\r
-// frem - f32\r
-def : Pat<(frem Float32Regs:$x, Float32Regs:$y),\r
- (FSUBf32rr Float32Regs:$x, (FMULf32rr (CVT_f32_f32\r
- (FDIV32rr_prec Float32Regs:$x, Float32Regs:$y), CvtRMI),\r
- Float32Regs:$y))>;\r
-def : Pat<(frem Float32Regs:$x, fpimm:$y),\r
- (FSUBf32rr Float32Regs:$x, (FMULf32ri (CVT_f32_f32\r
- (FDIV32ri_prec Float32Regs:$x, fpimm:$y), CvtRMI),\r
- fpimm:$y))>;\r
-\r
-// frem - f64\r
-def : Pat<(frem Float64Regs:$x, Float64Regs:$y),\r
- (FSUBf64rr Float64Regs:$x, (FMULf64rr (CVT_f64_f64\r
- (FDIV64rr Float64Regs:$x, Float64Regs:$y), CvtRMI),\r
- Float64Regs:$y))>;\r
-def : Pat<(frem Float64Regs:$x, fpimm:$y),\r
- (FSUBf64rr Float64Regs:$x, (FMULf64ri (CVT_f64_f64\r
- (FDIV64ri Float64Regs:$x, fpimm:$y), CvtRMI),\r
- fpimm:$y))>;\r
-\r
-//-----------------------------------\r
-// Bitwise operations\r
-//-----------------------------------\r
-\r
-// Template for three-arg bitwise operations. Takes three args, Creates .b16,\r
-// .b32, .b64, and .pred (predicate registers -- i.e., i1) versions of OpcStr.\r
-multiclass BITWISE<string OpcStr, SDNode OpNode> {\r
- def b1rr :\r
- NVPTXInst<(outs Int1Regs:$dst), (ins Int1Regs:$a, Int1Regs:$b),\r
- !strconcat(OpcStr, ".pred \t$dst, $a, $b;"),\r
- [(set Int1Regs:$dst, (OpNode Int1Regs:$a, Int1Regs:$b))]>;\r
- def b1ri :\r
- NVPTXInst<(outs Int1Regs:$dst), (ins Int1Regs:$a, i1imm:$b),\r
- !strconcat(OpcStr, ".pred \t$dst, $a, $b;"),\r
- [(set Int1Regs:$dst, (OpNode Int1Regs:$a, imm:$b))]>;\r
- def b16rr :\r
- NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$a, Int16Regs:$b),\r
- !strconcat(OpcStr, ".b16 \t$dst, $a, $b;"),\r
- [(set Int16Regs:$dst, (OpNode Int16Regs:$a, Int16Regs:$b))]>;\r
- def b16ri :\r
- NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$a, i16imm:$b),\r
- !strconcat(OpcStr, ".b16 \t$dst, $a, $b;"),\r
- [(set Int16Regs:$dst, (OpNode Int16Regs:$a, imm:$b))]>;\r
- def b32rr :\r
- NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a, Int32Regs:$b),\r
- !strconcat(OpcStr, ".b32 \t$dst, $a, $b;"),\r
- [(set Int32Regs:$dst, (OpNode Int32Regs:$a, Int32Regs:$b))]>;\r
- def b32ri :\r
- NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a, i32imm:$b),\r
- !strconcat(OpcStr, ".b32 \t$dst, $a, $b;"),\r
- [(set Int32Regs:$dst, (OpNode Int32Regs:$a, imm:$b))]>;\r
- def b64rr :\r
- NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$a, Int64Regs:$b),\r
- !strconcat(OpcStr, ".b64 \t$dst, $a, $b;"),\r
- [(set Int64Regs:$dst, (OpNode Int64Regs:$a, Int64Regs:$b))]>;\r
- def b64ri :\r
- NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$a, i64imm:$b),\r
- !strconcat(OpcStr, ".b64 \t$dst, $a, $b;"),\r
- [(set Int64Regs:$dst, (OpNode Int64Regs:$a, imm:$b))]>;\r
-}\r
-\r
-defm OR : BITWISE<"or", or>;\r
-defm AND : BITWISE<"and", and>;\r
-defm XOR : BITWISE<"xor", xor>;\r
-\r
-def NOT1 : NVPTXInst<(outs Int1Regs:$dst), (ins Int1Regs:$src),\r
- "not.pred \t$dst, $src;",\r
- [(set Int1Regs:$dst, (not Int1Regs:$src))]>;\r
-def NOT16 : NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$src),\r
- "not.b16 \t$dst, $src;",\r
- [(set Int16Regs:$dst, (not Int16Regs:$src))]>;\r
-def NOT32 : NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$src),\r
- "not.b32 \t$dst, $src;",\r
- [(set Int32Regs:$dst, (not Int32Regs:$src))]>;\r
-def NOT64 : NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$src),\r
- "not.b64 \t$dst, $src;",\r
- [(set Int64Regs:$dst, (not Int64Regs:$src))]>;\r
-\r
-// Template for left/right shifts. Takes three operands,\r
-// [dest (reg), src (reg), shift (reg or imm)].\r
-// dest and src may be int64, int32, or int16, but shift is always int32.\r
-//\r
-// This template also defines a 32-bit shift (imm, imm) instruction.\r
-multiclass SHIFT<string OpcStr, SDNode OpNode> {\r
- def i64rr :\r
- NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$a, Int32Regs:$b),\r
- !strconcat(OpcStr, "64 \t$dst, $a, $b;"),\r
- [(set Int64Regs:$dst, (OpNode Int64Regs:$a, Int32Regs:$b))]>;\r
- def i64ri :\r
- NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$a, i32imm:$b),\r
- !strconcat(OpcStr, "64 \t$dst, $a, $b;"),\r
- [(set Int64Regs:$dst, (OpNode Int64Regs:$a, (i32 imm:$b)))]>;\r
- def i32rr :\r
- NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a, Int32Regs:$b),\r
- !strconcat(OpcStr, "32 \t$dst, $a, $b;"),\r
- [(set Int32Regs:$dst, (OpNode Int32Regs:$a, Int32Regs:$b))]>;\r
- def i32ri :\r
- NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a, i32imm:$b),\r
- !strconcat(OpcStr, "32 \t$dst, $a, $b;"),\r
- [(set Int32Regs:$dst, (OpNode Int32Regs:$a, (i32 imm:$b)))]>;\r
- def i32ii :\r
- NVPTXInst<(outs Int32Regs:$dst), (ins i32imm:$a, i32imm:$b),\r
- !strconcat(OpcStr, "32 \t$dst, $a, $b;"),\r
- [(set Int32Regs:$dst, (OpNode (i32 imm:$a), (i32 imm:$b)))]>;\r
- def i16rr :\r
- NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$a, Int32Regs:$b),\r
- !strconcat(OpcStr, "16 \t$dst, $a, $b;"),\r
- [(set Int16Regs:$dst, (OpNode Int16Regs:$a, Int32Regs:$b))]>;\r
- def i16ri :\r
- NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$a, i32imm:$b),\r
- !strconcat(OpcStr, "16 \t$dst, $a, $b;"),\r
- [(set Int16Regs:$dst, (OpNode Int16Regs:$a, (i32 imm:$b)))]>;\r
-}\r
-\r
-defm SHL : SHIFT<"shl.b", shl>;\r
-defm SRA : SHIFT<"shr.s", sra>;\r
-defm SRL : SHIFT<"shr.u", srl>;\r
-\r
-// Bit-reverse\r
-def BREV32 :\r
- NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a),\r
- "brev.b32 \t$dst, $a;",\r
- [(set Int32Regs:$dst, (bitreverse Int32Regs:$a))]>;\r
-def BREV64 :\r
- NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$a),\r
- "brev.b64 \t$dst, $a;",\r
- [(set Int64Regs:$dst, (bitreverse Int64Regs:$a))]>;\r
-\r
-//\r
-// Rotate: Use ptx shf instruction if available.\r
-//\r
-\r
-// 32 bit r2 = rotl r1, n\r
-// =>\r
-// r2 = shf.l r1, r1, n\r
-def ROTL32imm_hw :\r
- NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$src, i32imm:$amt),\r
- "shf.l.wrap.b32 \t$dst, $src, $src, $amt;",\r
- [(set Int32Regs:$dst, (rotl Int32Regs:$src, (i32 imm:$amt)))]>,\r
- Requires<[hasHWROT32]>;\r
-\r
-def ROTL32reg_hw :\r
- NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$src, Int32Regs:$amt),\r
- "shf.l.wrap.b32 \t$dst, $src, $src, $amt;",\r
- [(set Int32Regs:$dst, (rotl Int32Regs:$src, Int32Regs:$amt))]>,\r
- Requires<[hasHWROT32]>;\r
-\r
-// 32 bit r2 = rotr r1, n\r
-// =>\r
-// r2 = shf.r r1, r1, n\r
-def ROTR32imm_hw :\r
- NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$src, i32imm:$amt),\r
- "shf.r.wrap.b32 \t$dst, $src, $src, $amt;",\r
- [(set Int32Regs:$dst, (rotr Int32Regs:$src, (i32 imm:$amt)))]>,\r
- Requires<[hasHWROT32]>;\r
-\r
-def ROTR32reg_hw :\r
- NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$src, Int32Regs:$amt),\r
- "shf.r.wrap.b32 \t$dst, $src, $src, $amt;",\r
- [(set Int32Regs:$dst, (rotr Int32Regs:$src, Int32Regs:$amt))]>,\r
- Requires<[hasHWROT32]>;\r
-\r
-// 32-bit software rotate by immediate. $amt2 should equal 32 - $amt1.\r
-def ROT32imm_sw :\r
- NVPTXInst<(outs Int32Regs:$dst),\r
- (ins Int32Regs:$src, i32imm:$amt1, i32imm:$amt2),\r
- "{{\n\t"\r
- ".reg .b32 %lhs;\n\t"\r
- ".reg .b32 %rhs;\n\t"\r
- "shl.b32 \t%lhs, $src, $amt1;\n\t"\r
- "shr.b32 \t%rhs, $src, $amt2;\n\t"\r
- "add.u32 \t$dst, %lhs, %rhs;\n\t"\r
- "}}",\r
- []>;\r
-\r
-def SUB_FRM_32 : SDNodeXForm<imm, [{\r
- return CurDAG->getTargetConstant(32 - N->getZExtValue(), SDLoc(N), MVT::i32);\r
-}]>;\r
-\r
-def : Pat<(rotl Int32Regs:$src, (i32 imm:$amt)),\r
- (ROT32imm_sw Int32Regs:$src, imm:$amt, (SUB_FRM_32 node:$amt))>,\r
- Requires<[noHWROT32]>;\r
-def : Pat<(rotr Int32Regs:$src, (i32 imm:$amt)),\r
- (ROT32imm_sw Int32Regs:$src, (SUB_FRM_32 node:$amt), imm:$amt)>,\r
- Requires<[noHWROT32]>;\r
-\r
-// 32-bit software rotate left by register.\r
-def ROTL32reg_sw :\r
- NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$src, Int32Regs:$amt),\r
- "{{\n\t"\r
- ".reg .b32 %lhs;\n\t"\r
- ".reg .b32 %rhs;\n\t"\r
- ".reg .b32 %amt2;\n\t"\r
- "shl.b32 \t%lhs, $src, $amt;\n\t"\r
- "sub.s32 \t%amt2, 32, $amt;\n\t"\r
- "shr.b32 \t%rhs, $src, %amt2;\n\t"\r
- "add.u32 \t$dst, %lhs, %rhs;\n\t"\r
- "}}",\r
- [(set Int32Regs:$dst, (rotl Int32Regs:$src, Int32Regs:$amt))]>,\r
- Requires<[noHWROT32]>;\r
-\r
-// 32-bit software rotate right by register.\r
-def ROTR32reg_sw :\r
- NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$src, Int32Regs:$amt),\r
- "{{\n\t"\r
- ".reg .b32 %lhs;\n\t"\r
- ".reg .b32 %rhs;\n\t"\r
- ".reg .b32 %amt2;\n\t"\r
- "shr.b32 \t%lhs, $src, $amt;\n\t"\r
- "sub.s32 \t%amt2, 32, $amt;\n\t"\r
- "shl.b32 \t%rhs, $src, %amt2;\n\t"\r
- "add.u32 \t$dst, %lhs, %rhs;\n\t"\r
- "}}",\r
- [(set Int32Regs:$dst, (rotr Int32Regs:$src, Int32Regs:$amt))]>,\r
- Requires<[noHWROT32]>;\r
-\r
-// 64-bit software rotate by immediate. $amt2 should equal 64 - $amt1.\r
-def ROT64imm_sw :\r
- NVPTXInst<(outs Int64Regs:$dst),\r
- (ins Int64Regs:$src, i32imm:$amt1, i32imm:$amt2),\r
- "{{\n\t"\r
- ".reg .b64 %lhs;\n\t"\r
- ".reg .b64 %rhs;\n\t"\r
- "shl.b64 \t%lhs, $src, $amt1;\n\t"\r
- "shr.b64 \t%rhs, $src, $amt2;\n\t"\r
- "add.u64 \t$dst, %lhs, %rhs;\n\t"\r
- "}}",\r
- []>;\r
-\r
-def SUB_FRM_64 : SDNodeXForm<imm, [{\r
- return CurDAG->getTargetConstant(64-N->getZExtValue(), SDLoc(N), MVT::i32);\r
-}]>;\r
-\r
-def : Pat<(rotl Int64Regs:$src, (i32 imm:$amt)),\r
- (ROT64imm_sw Int64Regs:$src, imm:$amt, (SUB_FRM_64 node:$amt))>;\r
-def : Pat<(rotr Int64Regs:$src, (i32 imm:$amt)),\r
- (ROT64imm_sw Int64Regs:$src, (SUB_FRM_64 node:$amt), imm:$amt)>;\r
-\r
-// 64-bit software rotate left by register.\r
-def ROTL64reg_sw :\r
- NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$src, Int32Regs:$amt),\r
- "{{\n\t"\r
- ".reg .b64 %lhs;\n\t"\r
- ".reg .b64 %rhs;\n\t"\r
- ".reg .u32 %amt2;\n\t"\r
- "shl.b64 \t%lhs, $src, $amt;\n\t"\r
- "sub.u32 \t%amt2, 64, $amt;\n\t"\r
- "shr.b64 \t%rhs, $src, %amt2;\n\t"\r
- "add.u64 \t$dst, %lhs, %rhs;\n\t"\r
- "}}",\r
- [(set Int64Regs:$dst, (rotl Int64Regs:$src, Int32Regs:$amt))]>;\r
-\r
-def ROTR64reg_sw :\r
- NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$src, Int32Regs:$amt),\r
- "{{\n\t"\r
- ".reg .b64 %lhs;\n\t"\r
- ".reg .b64 %rhs;\n\t"\r
- ".reg .u32 %amt2;\n\t"\r
- "shr.b64 \t%lhs, $src, $amt;\n\t"\r
- "sub.u32 \t%amt2, 64, $amt;\n\t"\r
- "shl.b64 \t%rhs, $src, %amt2;\n\t"\r
- "add.u64 \t$dst, %lhs, %rhs;\n\t"\r
- "}}",\r
- [(set Int64Regs:$dst, (rotr Int64Regs:$src, Int32Regs:$amt))]>;\r
-\r
-//\r
-// Funnnel shift in clamp mode\r
-//\r
-\r
-// Create SDNodes so they can be used in the DAG code, e.g.\r
-// NVPTXISelLowering (LowerShiftLeftParts and LowerShiftRightParts)\r
-def SDTIntShiftDOp :\r
- SDTypeProfile<1, 3, [SDTCisSameAs<0, 1>, SDTCisSameAs<0, 2>,\r
- SDTCisInt<0>, SDTCisInt<3>]>;\r
-def FUN_SHFL_CLAMP : SDNode<"NVPTXISD::FUN_SHFL_CLAMP", SDTIntShiftDOp, []>;\r
-def FUN_SHFR_CLAMP : SDNode<"NVPTXISD::FUN_SHFR_CLAMP", SDTIntShiftDOp, []>;\r
-\r
-def FUNSHFLCLAMP :\r
- NVPTXInst<(outs Int32Regs:$dst),\r
- (ins Int32Regs:$lo, Int32Regs:$hi, Int32Regs:$amt),\r
- "shf.l.clamp.b32 \t$dst, $lo, $hi, $amt;",\r
- [(set Int32Regs:$dst,\r
- (FUN_SHFL_CLAMP Int32Regs:$lo, Int32Regs:$hi, Int32Regs:$amt))]>;\r
-\r
-def FUNSHFRCLAMP :\r
- NVPTXInst<(outs Int32Regs:$dst),\r
- (ins Int32Regs:$lo, Int32Regs:$hi, Int32Regs:$amt),\r
- "shf.r.clamp.b32 \t$dst, $lo, $hi, $amt;",\r
- [(set Int32Regs:$dst,\r
- (FUN_SHFR_CLAMP Int32Regs:$lo, Int32Regs:$hi, Int32Regs:$amt))]>;\r
-\r
-//\r
-// BFE - bit-field extract\r
-//\r
-\r
-// Template for BFE instructions. Takes four args,\r
-// [dest (reg), src (reg), start (reg or imm), end (reg or imm)].\r
-// Start may be an imm only if end is also an imm. FIXME: Is this a\r
-// restriction in PTX?\r
-//\r
-// dest and src may be int32 or int64, but start and end are always int32.\r
-multiclass BFE<string TyStr, RegisterClass RC> {\r
- def rrr\r
- : NVPTXInst<(outs RC:$d),\r
- (ins RC:$a, Int32Regs:$b, Int32Regs:$c),\r
- !strconcat("bfe.", TyStr, " \t$d, $a, $b, $c;"), []>;\r
- def rri\r
- : NVPTXInst<(outs RC:$d),\r
- (ins RC:$a, Int32Regs:$b, i32imm:$c),\r
- !strconcat("bfe.", TyStr, " \t$d, $a, $b, $c;"), []>;\r
- def rii\r
- : NVPTXInst<(outs RC:$d),\r
- (ins RC:$a, i32imm:$b, i32imm:$c),\r
- !strconcat("bfe.", TyStr, " \t$d, $a, $b, $c;"), []>;\r
-}\r
-\r
-let hasSideEffects = 0 in {\r
- defm BFE_S32 : BFE<"s32", Int32Regs>;\r
- defm BFE_U32 : BFE<"u32", Int32Regs>;\r
- defm BFE_S64 : BFE<"s64", Int64Regs>;\r
- defm BFE_U64 : BFE<"u64", Int64Regs>;\r
-}\r
-\r
-//-----------------------------------\r
-// Comparison instructions (setp, set)\r
-//-----------------------------------\r
-\r
-// FIXME: This doesn't cover versions of set and setp that combine with a\r
-// boolean predicate, e.g. setp.eq.and.b16.\r
-\r
-let hasSideEffects = 0 in {\r
- multiclass SETP<string TypeStr, RegisterClass RC, Operand ImmCls> {\r
- def rr :\r
- NVPTXInst<(outs Int1Regs:$dst), (ins RC:$a, RC:$b, CmpMode:$cmp),\r
- !strconcat("setp${cmp:base}${cmp:ftz}.", TypeStr,\r
- " \t$dst, $a, $b;"), []>;\r
- def ri :\r
- NVPTXInst<(outs Int1Regs:$dst), (ins RC:$a, ImmCls:$b, CmpMode:$cmp),\r
- !strconcat("setp${cmp:base}${cmp:ftz}.", TypeStr,\r
- " \t$dst, $a, $b;"), []>;\r
- def ir :\r
- NVPTXInst<(outs Int1Regs:$dst), (ins ImmCls:$a, RC:$b, CmpMode:$cmp),\r
- !strconcat("setp${cmp:base}${cmp:ftz}.", TypeStr,\r
- " \t$dst, $a, $b;"), []>;\r
- }\r
-}\r
-\r
-defm SETP_b16 : SETP<"b16", Int16Regs, i16imm>;\r
-defm SETP_s16 : SETP<"s16", Int16Regs, i16imm>;\r
-defm SETP_u16 : SETP<"u16", Int16Regs, i16imm>;\r
-defm SETP_b32 : SETP<"b32", Int32Regs, i32imm>;\r
-defm SETP_s32 : SETP<"s32", Int32Regs, i32imm>;\r
-defm SETP_u32 : SETP<"u32", Int32Regs, i32imm>;\r
-defm SETP_b64 : SETP<"b64", Int64Regs, i64imm>;\r
-defm SETP_s64 : SETP<"s64", Int64Regs, i64imm>;\r
-defm SETP_u64 : SETP<"u64", Int64Regs, i64imm>;\r
-defm SETP_f32 : SETP<"f32", Float32Regs, f32imm>;\r
-defm SETP_f64 : SETP<"f64", Float64Regs, f64imm>;\r
-def SETP_f16rr :\r
- NVPTXInst<(outs Int1Regs:$dst),\r
- (ins Float16Regs:$a, Float16Regs:$b, CmpMode:$cmp),\r
- "setp${cmp:base}${cmp:ftz}.f16 \t$dst, $a, $b;",\r
- []>, Requires<[useFP16Math]>;\r
-\r
-def SETP_f16x2rr :\r
- NVPTXInst<(outs Int1Regs:$p, Int1Regs:$q),\r
- (ins Float16x2Regs:$a, Float16x2Regs:$b, CmpMode:$cmp),\r
- "setp${cmp:base}${cmp:ftz}.f16x2 \t$p|$q, $a, $b;",\r
- []>,\r
- Requires<[useFP16Math]>;\r
-\r
-\r
-// FIXME: This doesn't appear to be correct. The "set" mnemonic has the form\r
-// "set.CmpOp{.ftz}.dtype.stype", where dtype is the type of the destination\r
-// reg, either u32, s32, or f32. Anyway these aren't used at the moment.\r
-\r
-let hasSideEffects = 0 in {\r
- multiclass SET<string TypeStr, RegisterClass RC, Operand ImmCls> {\r
- def rr : NVPTXInst<(outs Int32Regs:$dst),\r
- (ins RC:$a, RC:$b, CmpMode:$cmp),\r
- !strconcat("set$cmp.", TypeStr, " \t$dst, $a, $b;"), []>;\r
- def ri : NVPTXInst<(outs Int32Regs:$dst),\r
- (ins RC:$a, ImmCls:$b, CmpMode:$cmp),\r
- !strconcat("set$cmp.", TypeStr, " \t$dst, $a, $b;"), []>;\r
- def ir : NVPTXInst<(outs Int32Regs:$dst),\r
- (ins ImmCls:$a, RC:$b, CmpMode:$cmp),\r
- !strconcat("set$cmp.", TypeStr, " \t$dst, $a, $b;"), []>;\r
- }\r
-}\r
-\r
-defm SET_b16 : SET<"b16", Int16Regs, i16imm>;\r
-defm SET_s16 : SET<"s16", Int16Regs, i16imm>;\r
-defm SET_u16 : SET<"u16", Int16Regs, i16imm>;\r
-defm SET_b32 : SET<"b32", Int32Regs, i32imm>;\r
-defm SET_s32 : SET<"s32", Int32Regs, i32imm>;\r
-defm SET_u32 : SET<"u32", Int32Regs, i32imm>;\r
-defm SET_b64 : SET<"b64", Int64Regs, i64imm>;\r
-defm SET_s64 : SET<"s64", Int64Regs, i64imm>;\r
-defm SET_u64 : SET<"u64", Int64Regs, i64imm>;\r
-defm SET_f16 : SET<"f16", Float16Regs, f16imm>;\r
-defm SET_f32 : SET<"f32", Float32Regs, f32imm>;\r
-defm SET_f64 : SET<"f64", Float64Regs, f64imm>;\r
-\r
-//-----------------------------------\r
-// Selection instructions (selp)\r
-//-----------------------------------\r
-\r
-// FIXME: Missing slct\r
-\r
-// selp instructions that don't have any pattern matches; we explicitly use\r
-// them within this file.\r
-let hasSideEffects = 0 in {\r
- multiclass SELP<string TypeStr, RegisterClass RC, Operand ImmCls> {\r
- def rr : NVPTXInst<(outs RC:$dst),\r
- (ins RC:$a, RC:$b, Int1Regs:$p),\r
- !strconcat("selp.", TypeStr, " \t$dst, $a, $b, $p;"), []>;\r
- def ri : NVPTXInst<(outs RC:$dst),\r
- (ins RC:$a, ImmCls:$b, Int1Regs:$p),\r
- !strconcat("selp.", TypeStr, " \t$dst, $a, $b, $p;"), []>;\r
- def ir : NVPTXInst<(outs RC:$dst),\r
- (ins ImmCls:$a, RC:$b, Int1Regs:$p),\r
- !strconcat("selp.", TypeStr, " \t$dst, $a, $b, $p;"), []>;\r
- def ii : NVPTXInst<(outs RC:$dst),\r
- (ins ImmCls:$a, ImmCls:$b, Int1Regs:$p),\r
- !strconcat("selp.", TypeStr, " \t$dst, $a, $b, $p;"), []>;\r
- }\r
-\r
- multiclass SELP_PATTERN<string TypeStr, RegisterClass RC, Operand ImmCls,\r
- SDNode ImmNode> {\r
- def rr :\r
- NVPTXInst<(outs RC:$dst),\r
- (ins RC:$a, RC:$b, Int1Regs:$p),\r
- !strconcat("selp.", TypeStr, " \t$dst, $a, $b, $p;"),\r
- [(set RC:$dst, (select Int1Regs:$p, RC:$a, RC:$b))]>;\r
- def ri :\r
- NVPTXInst<(outs RC:$dst),\r
- (ins RC:$a, ImmCls:$b, Int1Regs:$p),\r
- !strconcat("selp.", TypeStr, " \t$dst, $a, $b, $p;"),\r
- [(set RC:$dst, (select Int1Regs:$p, RC:$a, ImmNode:$b))]>;\r
- def ir :\r
- NVPTXInst<(outs RC:$dst),\r
- (ins ImmCls:$a, RC:$b, Int1Regs:$p),\r
- !strconcat("selp.", TypeStr, " \t$dst, $a, $b, $p;"),\r
- [(set RC:$dst, (select Int1Regs:$p, ImmNode:$a, RC:$b))]>;\r
- def ii :\r
- NVPTXInst<(outs RC:$dst),\r
- (ins ImmCls:$a, ImmCls:$b, Int1Regs:$p),\r
- !strconcat("selp.", TypeStr, " \t$dst, $a, $b, $p;"),\r
- [(set RC:$dst, (select Int1Regs:$p, ImmNode:$a, ImmNode:$b))]>;\r
- }\r
-}\r
-\r
-// Don't pattern match on selp.{s,u}{16,32,64} -- selp.b{16,32,64} is just as\r
-// good.\r
-defm SELP_b16 : SELP_PATTERN<"b16", Int16Regs, i16imm, imm>;\r
-defm SELP_s16 : SELP<"s16", Int16Regs, i16imm>;\r
-defm SELP_u16 : SELP<"u16", Int16Regs, i16imm>;\r
-defm SELP_b32 : SELP_PATTERN<"b32", Int32Regs, i32imm, imm>;\r
-defm SELP_s32 : SELP<"s32", Int32Regs, i32imm>;\r
-defm SELP_u32 : SELP<"u32", Int32Regs, i32imm>;\r
-defm SELP_b64 : SELP_PATTERN<"b64", Int64Regs, i64imm, imm>;\r
-defm SELP_s64 : SELP<"s64", Int64Regs, i64imm>;\r
-defm SELP_u64 : SELP<"u64", Int64Regs, i64imm>;\r
-defm SELP_f16 : SELP_PATTERN<"b16", Float16Regs, f16imm, fpimm>;\r
-defm SELP_f32 : SELP_PATTERN<"f32", Float32Regs, f32imm, fpimm>;\r
-defm SELP_f64 : SELP_PATTERN<"f64", Float64Regs, f64imm, fpimm>;\r
-\r
-def SELP_f16x2rr :\r
- NVPTXInst<(outs Float16x2Regs:$dst),\r
- (ins Float16x2Regs:$a, Float16x2Regs:$b, Int1Regs:$p),\r
- "selp.b32 \t$dst, $a, $b, $p;",\r
- [(set Float16x2Regs:$dst,\r
- (select Int1Regs:$p, Float16x2Regs:$a, Float16x2Regs:$b))]>;\r
-\r
-//-----------------------------------\r
-// Data Movement (Load / Store, Move)\r
-//-----------------------------------\r
-\r
-def ADDRri : ComplexPattern<i32, 2, "SelectADDRri", [frameindex],\r
- [SDNPWantRoot]>;\r
-def ADDRri64 : ComplexPattern<i64, 2, "SelectADDRri64", [frameindex],\r
- [SDNPWantRoot]>;\r
-\r
-def MEMri : Operand<i32> {\r
- let PrintMethod = "printMemOperand";\r
- let MIOperandInfo = (ops Int32Regs, i32imm);\r
-}\r
-def MEMri64 : Operand<i64> {\r
- let PrintMethod = "printMemOperand";\r
- let MIOperandInfo = (ops Int64Regs, i64imm);\r
-}\r
-\r
-def imem : Operand<iPTR> {\r
- let PrintMethod = "printOperand";\r
-}\r
-\r
-def imemAny : Operand<iPTRAny> {\r
- let PrintMethod = "printOperand";\r
-}\r
-\r
-def LdStCode : Operand<i32> {\r
- let PrintMethod = "printLdStCode";\r
-}\r
-\r
-def SDTWrapper : SDTypeProfile<1, 1, [SDTCisSameAs<0, 1>, SDTCisPtrTy<0>]>;\r
-def Wrapper : SDNode<"NVPTXISD::Wrapper", SDTWrapper>;\r
-\r
-// Load a memory address into a u32 or u64 register.\r
-def MOV_ADDR : NVPTXInst<(outs Int32Regs:$dst), (ins imem:$a),\r
- "mov.u32 \t$dst, $a;",\r
- [(set Int32Regs:$dst, (Wrapper tglobaladdr:$a))]>;\r
-def MOV_ADDR64 : NVPTXInst<(outs Int64Regs:$dst), (ins imem:$a),\r
- "mov.u64 \t$dst, $a;",\r
- [(set Int64Regs:$dst, (Wrapper tglobaladdr:$a))]>;\r
-\r
-// Get pointer to local stack.\r
-let hasSideEffects = 0 in {\r
- def MOV_DEPOT_ADDR : NVPTXInst<(outs Int32Regs:$d), (ins i32imm:$num),\r
- "mov.u32 \t$d, __local_depot$num;", []>;\r
- def MOV_DEPOT_ADDR_64 : NVPTXInst<(outs Int64Regs:$d), (ins i32imm:$num),\r
- "mov.u64 \t$d, __local_depot$num;", []>;\r
-}\r
-\r
-\r
-// copyPhysreg is hard-coded in NVPTXInstrInfo.cpp\r
-let IsSimpleMove=1, hasSideEffects=0 in {\r
- def IMOV1rr : NVPTXInst<(outs Int1Regs:$dst), (ins Int1Regs:$sss),\r
- "mov.pred \t$dst, $sss;", []>;\r
- def IMOV16rr : NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$sss),\r
- "mov.u16 \t$dst, $sss;", []>;\r
- def IMOV32rr : NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$sss),\r
- "mov.u32 \t$dst, $sss;", []>;\r
- def IMOV64rr : NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$sss),\r
- "mov.u64 \t$dst, $sss;", []>;\r
-\r
- def FMOV16rr : NVPTXInst<(outs Float16Regs:$dst), (ins Float16Regs:$src),\r
- // We have to use .b16 here as there's no mov.f16.\r
- "mov.b16 \t$dst, $src;", []>;\r
- def FMOV32rr : NVPTXInst<(outs Float32Regs:$dst), (ins Float32Regs:$src),\r
- "mov.f32 \t$dst, $src;", []>;\r
- def FMOV64rr : NVPTXInst<(outs Float64Regs:$dst), (ins Float64Regs:$src),\r
- "mov.f64 \t$dst, $src;", []>;\r
-}\r
-\r
-def IMOV1ri : NVPTXInst<(outs Int1Regs:$dst), (ins i1imm:$src),\r
- "mov.pred \t$dst, $src;",\r
- [(set Int1Regs:$dst, imm:$src)]>;\r
-def IMOV16ri : NVPTXInst<(outs Int16Regs:$dst), (ins i16imm:$src),\r
- "mov.u16 \t$dst, $src;",\r
- [(set Int16Regs:$dst, imm:$src)]>;\r
-def IMOV32ri : NVPTXInst<(outs Int32Regs:$dst), (ins i32imm:$src),\r
- "mov.u32 \t$dst, $src;",\r
- [(set Int32Regs:$dst, imm:$src)]>;\r
-def IMOV64i : NVPTXInst<(outs Int64Regs:$dst), (ins i64imm:$src),\r
- "mov.u64 \t$dst, $src;",\r
- [(set Int64Regs:$dst, imm:$src)]>;\r
-\r
-def FMOV32ri : NVPTXInst<(outs Float32Regs:$dst), (ins f32imm:$src),\r
- "mov.f32 \t$dst, $src;",\r
- [(set Float32Regs:$dst, fpimm:$src)]>;\r
-def FMOV64ri : NVPTXInst<(outs Float64Regs:$dst), (ins f64imm:$src),\r
- "mov.f64 \t$dst, $src;",\r
- [(set Float64Regs:$dst, fpimm:$src)]>;\r
-\r
-def : Pat<(i32 (Wrapper texternalsym:$dst)), (IMOV32ri texternalsym:$dst)>;\r
-\r
-//---- Copy Frame Index ----\r
-def LEA_ADDRi : NVPTXInst<(outs Int32Regs:$dst), (ins MEMri:$addr),\r
- "add.u32 \t$dst, ${addr:add};",\r
- [(set Int32Regs:$dst, ADDRri:$addr)]>;\r
-def LEA_ADDRi64 : NVPTXInst<(outs Int64Regs:$dst), (ins MEMri64:$addr),\r
- "add.u64 \t$dst, ${addr:add};",\r
- [(set Int64Regs:$dst, ADDRri64:$addr)]>;\r
-\r
-//-----------------------------------\r
-// Comparison and Selection\r
-//-----------------------------------\r
-\r
-multiclass ISET_FORMAT<PatFrag OpNode, PatLeaf Mode,\r
- Instruction setp_16rr,\r
- Instruction setp_16ri,\r
- Instruction setp_16ir,\r
- Instruction setp_32rr,\r
- Instruction setp_32ri,\r
- Instruction setp_32ir,\r
- Instruction setp_64rr,\r
- Instruction setp_64ri,\r
- Instruction setp_64ir,\r
- Instruction set_16rr,\r
- Instruction set_16ri,\r
- Instruction set_16ir,\r
- Instruction set_32rr,\r
- Instruction set_32ri,\r
- Instruction set_32ir,\r
- Instruction set_64rr,\r
- Instruction set_64ri,\r
- Instruction set_64ir> {\r
- // i16 -> pred\r
- def : Pat<(i1 (OpNode Int16Regs:$a, Int16Regs:$b)),\r
- (setp_16rr Int16Regs:$a, Int16Regs:$b, Mode)>;\r
- def : Pat<(i1 (OpNode Int16Regs:$a, imm:$b)),\r
- (setp_16ri Int16Regs:$a, imm:$b, Mode)>;\r
- def : Pat<(i1 (OpNode imm:$a, Int16Regs:$b)),\r
- (setp_16ir imm:$a, Int16Regs:$b, Mode)>;\r
- // i32 -> pred\r
- def : Pat<(i1 (OpNode Int32Regs:$a, Int32Regs:$b)),\r
- (setp_32rr Int32Regs:$a, Int32Regs:$b, Mode)>;\r
- def : Pat<(i1 (OpNode Int32Regs:$a, imm:$b)),\r
- (setp_32ri Int32Regs:$a, imm:$b, Mode)>;\r
- def : Pat<(i1 (OpNode imm:$a, Int32Regs:$b)),\r
- (setp_32ir imm:$a, Int32Regs:$b, Mode)>;\r
- // i64 -> pred\r
- def : Pat<(i1 (OpNode Int64Regs:$a, Int64Regs:$b)),\r
- (setp_64rr Int64Regs:$a, Int64Regs:$b, Mode)>;\r
- def : Pat<(i1 (OpNode Int64Regs:$a, imm:$b)),\r
- (setp_64ri Int64Regs:$a, imm:$b, Mode)>;\r
- def : Pat<(i1 (OpNode imm:$a, Int64Regs:$b)),\r
- (setp_64ir imm:$a, Int64Regs:$b, Mode)>;\r
-\r
- // i16 -> i32\r
- def : Pat<(i32 (OpNode Int16Regs:$a, Int16Regs:$b)),\r
- (set_16rr Int16Regs:$a, Int16Regs:$b, Mode)>;\r
- def : Pat<(i32 (OpNode Int16Regs:$a, imm:$b)),\r
- (set_16ri Int16Regs:$a, imm:$b, Mode)>;\r
- def : Pat<(i32 (OpNode imm:$a, Int16Regs:$b)),\r
- (set_16ir imm:$a, Int16Regs:$b, Mode)>;\r
- // i32 -> i32\r
- def : Pat<(i32 (OpNode Int32Regs:$a, Int32Regs:$b)),\r
- (set_32rr Int32Regs:$a, Int32Regs:$b, Mode)>;\r
- def : Pat<(i32 (OpNode Int32Regs:$a, imm:$b)),\r
- (set_32ri Int32Regs:$a, imm:$b, Mode)>;\r
- def : Pat<(i32 (OpNode imm:$a, Int32Regs:$b)),\r
- (set_32ir imm:$a, Int32Regs:$b, Mode)>;\r
- // i64 -> i32\r
- def : Pat<(i32 (OpNode Int64Regs:$a, Int64Regs:$b)),\r
- (set_64rr Int64Regs:$a, Int64Regs:$b, Mode)>;\r
- def : Pat<(i32 (OpNode Int64Regs:$a, imm:$b)),\r
- (set_64ri Int64Regs:$a, imm:$b, Mode)>;\r
- def : Pat<(i32 (OpNode imm:$a, Int64Regs:$b)),\r
- (set_64ir imm:$a, Int64Regs:$b, Mode)>;\r
-}\r
-\r
-multiclass ISET_FORMAT_SIGNED<PatFrag OpNode, PatLeaf Mode>\r
- : ISET_FORMAT<OpNode, Mode,\r
- SETP_s16rr, SETP_s16ri, SETP_s16ir,\r
- SETP_s32rr, SETP_s32ri, SETP_s32ir,\r
- SETP_s64rr, SETP_s64ri, SETP_s64ir,\r
- SET_s16rr, SET_s16ri, SET_s16ir,\r
- SET_s32rr, SET_s32ri, SET_s32ir,\r
- SET_s64rr, SET_s64ri, SET_s64ir> {\r
- // TableGen doesn't like empty multiclasses.\r
- def : PatLeaf<(i32 0)>;\r
-}\r
-\r
-multiclass ISET_FORMAT_UNSIGNED<PatFrag OpNode, PatLeaf Mode>\r
- : ISET_FORMAT<OpNode, Mode,\r
- SETP_u16rr, SETP_u16ri, SETP_u16ir,\r
- SETP_u32rr, SETP_u32ri, SETP_u32ir,\r
- SETP_u64rr, SETP_u64ri, SETP_u64ir,\r
- SET_u16rr, SET_u16ri, SET_u16ir,\r
- SET_u32rr, SET_u32ri, SET_u32ir,\r
- SET_u64rr, SET_u64ri, SET_u64ir> {\r
- // TableGen doesn't like empty multiclasses.\r
- def : PatLeaf<(i32 0)>;\r
-}\r
-\r
-defm : ISET_FORMAT_SIGNED<setgt, CmpGT>;\r
-defm : ISET_FORMAT_SIGNED<setlt, CmpLT>;\r
-defm : ISET_FORMAT_SIGNED<setge, CmpGE>;\r
-defm : ISET_FORMAT_SIGNED<setle, CmpLE>;\r
-defm : ISET_FORMAT_SIGNED<seteq, CmpEQ>;\r
-defm : ISET_FORMAT_SIGNED<setne, CmpNE>;\r
-defm : ISET_FORMAT_UNSIGNED<setugt, CmpGT>;\r
-defm : ISET_FORMAT_UNSIGNED<setult, CmpLT>;\r
-defm : ISET_FORMAT_UNSIGNED<setuge, CmpGE>;\r
-defm : ISET_FORMAT_UNSIGNED<setule, CmpLE>;\r
-defm : ISET_FORMAT_UNSIGNED<setueq, CmpEQ>;\r
-defm : ISET_FORMAT_UNSIGNED<setune, CmpNE>;\r
-\r
-// i1 compares\r
-def : Pat<(setne Int1Regs:$a, Int1Regs:$b),\r
- (XORb1rr Int1Regs:$a, Int1Regs:$b)>;\r
-def : Pat<(setune Int1Regs:$a, Int1Regs:$b),\r
- (XORb1rr Int1Regs:$a, Int1Regs:$b)>;\r
-\r
-def : Pat<(seteq Int1Regs:$a, Int1Regs:$b),\r
- (NOT1 (XORb1rr Int1Regs:$a, Int1Regs:$b))>;\r
-def : Pat<(setueq Int1Regs:$a, Int1Regs:$b),\r
- (NOT1 (XORb1rr Int1Regs:$a, Int1Regs:$b))>;\r
-\r
-// i1 compare -> i32\r
-def : Pat<(i32 (setne Int1Regs:$a, Int1Regs:$b)),\r
- (SELP_u32ii -1, 0, (XORb1rr Int1Regs:$a, Int1Regs:$b))>;\r
-def : Pat<(i32 (setne Int1Regs:$a, Int1Regs:$b)),\r
- (SELP_u32ii 0, -1, (XORb1rr Int1Regs:$a, Int1Regs:$b))>;\r
-\r
-\r
-\r
-multiclass FSET_FORMAT<PatFrag OpNode, PatLeaf Mode, PatLeaf ModeFTZ> {\r
- // f16 -> pred\r
- def : Pat<(i1 (OpNode Float16Regs:$a, Float16Regs:$b)),\r
- (SETP_f16rr Float16Regs:$a, Float16Regs:$b, ModeFTZ)>,\r
- Requires<[useFP16Math,doF32FTZ]>;\r
- def : Pat<(i1 (OpNode Float16Regs:$a, Float16Regs:$b)),\r
- (SETP_f16rr Float16Regs:$a, Float16Regs:$b, Mode)>,\r
- Requires<[useFP16Math]>;\r
- def : Pat<(i1 (OpNode Float16Regs:$a, fpimm:$b)),\r
- (SETP_f16rr Float16Regs:$a, (LOAD_CONST_F16 fpimm:$b), ModeFTZ)>,\r
- Requires<[useFP16Math,doF32FTZ]>;\r
- def : Pat<(i1 (OpNode Float16Regs:$a, fpimm:$b)),\r
- (SETP_f16rr Float16Regs:$a, (LOAD_CONST_F16 fpimm:$b), Mode)>,\r
- Requires<[useFP16Math]>;\r
- def : Pat<(i1 (OpNode fpimm:$a, Float16Regs:$b)),\r
- (SETP_f16rr (LOAD_CONST_F16 fpimm:$a), Float16Regs:$b, ModeFTZ)>,\r
- Requires<[useFP16Math,doF32FTZ]>;\r
- def : Pat<(i1 (OpNode fpimm:$a, Float16Regs:$b)),\r
- (SETP_f16rr (LOAD_CONST_F16 fpimm:$a), Float16Regs:$b, Mode)>,\r
- Requires<[useFP16Math]>;\r
-\r
- // f32 -> pred\r
- def : Pat<(i1 (OpNode Float32Regs:$a, Float32Regs:$b)),\r
- (SETP_f32rr Float32Regs:$a, Float32Regs:$b, ModeFTZ)>,\r
- Requires<[doF32FTZ]>;\r
- def : Pat<(i1 (OpNode Float32Regs:$a, Float32Regs:$b)),\r
- (SETP_f32rr Float32Regs:$a, Float32Regs:$b, Mode)>;\r
- def : Pat<(i1 (OpNode Float32Regs:$a, fpimm:$b)),\r
- (SETP_f32ri Float32Regs:$a, fpimm:$b, ModeFTZ)>,\r
- Requires<[doF32FTZ]>;\r
- def : Pat<(i1 (OpNode Float32Regs:$a, fpimm:$b)),\r
- (SETP_f32ri Float32Regs:$a, fpimm:$b, Mode)>;\r
- def : Pat<(i1 (OpNode fpimm:$a, Float32Regs:$b)),\r
- (SETP_f32ir fpimm:$a, Float32Regs:$b, ModeFTZ)>,\r
- Requires<[doF32FTZ]>;\r
- def : Pat<(i1 (OpNode fpimm:$a, Float32Regs:$b)),\r
- (SETP_f32ir fpimm:$a, Float32Regs:$b, Mode)>;\r
-\r
- // f64 -> pred\r
- def : Pat<(i1 (OpNode Float64Regs:$a, Float64Regs:$b)),\r
- (SETP_f64rr Float64Regs:$a, Float64Regs:$b, Mode)>;\r
- def : Pat<(i1 (OpNode Float64Regs:$a, fpimm:$b)),\r
- (SETP_f64ri Float64Regs:$a, fpimm:$b, Mode)>;\r
- def : Pat<(i1 (OpNode fpimm:$a, Float64Regs:$b)),\r
- (SETP_f64ir fpimm:$a, Float64Regs:$b, Mode)>;\r
-\r
- // f16 -> i32\r
- def : Pat<(i32 (OpNode Float16Regs:$a, Float16Regs:$b)),\r
- (SET_f16rr Float16Regs:$a, Float16Regs:$b, ModeFTZ)>,\r
- Requires<[useFP16Math, doF32FTZ]>;\r
- def : Pat<(i32 (OpNode Float16Regs:$a, Float16Regs:$b)),\r
- (SET_f16rr Float16Regs:$a, Float16Regs:$b, Mode)>,\r
- Requires<[useFP16Math]>;\r
- def : Pat<(i32 (OpNode Float16Regs:$a, fpimm:$b)),\r
- (SET_f16rr Float16Regs:$a, (LOAD_CONST_F16 fpimm:$b), ModeFTZ)>,\r
- Requires<[useFP16Math, doF32FTZ]>;\r
- def : Pat<(i32 (OpNode Float16Regs:$a, fpimm:$b)),\r
- (SET_f16rr Float16Regs:$a, (LOAD_CONST_F16 fpimm:$b), Mode)>,\r
- Requires<[useFP16Math]>;\r
- def : Pat<(i32 (OpNode fpimm:$a, Float16Regs:$b)),\r
- (SET_f16ir (LOAD_CONST_F16 fpimm:$a), Float16Regs:$b, ModeFTZ)>,\r
- Requires<[useFP16Math, doF32FTZ]>;\r
- def : Pat<(i32 (OpNode fpimm:$a, Float16Regs:$b)),\r
- (SET_f16ir (LOAD_CONST_F16 fpimm:$a), Float16Regs:$b, Mode)>,\r
- Requires<[useFP16Math]>;\r
-\r
- // f32 -> i32\r
- def : Pat<(i32 (OpNode Float32Regs:$a, Float32Regs:$b)),\r
- (SET_f32rr Float32Regs:$a, Float32Regs:$b, ModeFTZ)>,\r
- Requires<[doF32FTZ]>;\r
- def : Pat<(i32 (OpNode Float32Regs:$a, Float32Regs:$b)),\r
- (SET_f32rr Float32Regs:$a, Float32Regs:$b, Mode)>;\r
- def : Pat<(i32 (OpNode Float32Regs:$a, fpimm:$b)),\r
- (SET_f32ri Float32Regs:$a, fpimm:$b, ModeFTZ)>,\r
- Requires<[doF32FTZ]>;\r
- def : Pat<(i32 (OpNode Float32Regs:$a, fpimm:$b)),\r
- (SET_f32ri Float32Regs:$a, fpimm:$b, Mode)>;\r
- def : Pat<(i32 (OpNode fpimm:$a, Float32Regs:$b)),\r
- (SET_f32ir fpimm:$a, Float32Regs:$b, ModeFTZ)>,\r
- Requires<[doF32FTZ]>;\r
- def : Pat<(i32 (OpNode fpimm:$a, Float32Regs:$b)),\r
- (SET_f32ir fpimm:$a, Float32Regs:$b, Mode)>;\r
-\r
- // f64 -> i32\r
- def : Pat<(i32 (OpNode Float64Regs:$a, Float64Regs:$b)),\r
- (SET_f64rr Float64Regs:$a, Float64Regs:$b, Mode)>;\r
- def : Pat<(i32 (OpNode Float64Regs:$a, fpimm:$b)),\r
- (SET_f64ri Float64Regs:$a, fpimm:$b, Mode)>;\r
- def : Pat<(i32 (OpNode fpimm:$a, Float64Regs:$b)),\r
- (SET_f64ir fpimm:$a, Float64Regs:$b, Mode)>;\r
-}\r
-\r
-defm FSetOGT : FSET_FORMAT<setogt, CmpGT, CmpGT_FTZ>;\r
-defm FSetOLT : FSET_FORMAT<setolt, CmpLT, CmpLT_FTZ>;\r
-defm FSetOGE : FSET_FORMAT<setoge, CmpGE, CmpGE_FTZ>;\r
-defm FSetOLE : FSET_FORMAT<setole, CmpLE, CmpLE_FTZ>;\r
-defm FSetOEQ : FSET_FORMAT<setoeq, CmpEQ, CmpEQ_FTZ>;\r
-defm FSetONE : FSET_FORMAT<setone, CmpNE, CmpNE_FTZ>;\r
-\r
-defm FSetUGT : FSET_FORMAT<setugt, CmpGTU, CmpGTU_FTZ>;\r
-defm FSetULT : FSET_FORMAT<setult, CmpLTU, CmpLTU_FTZ>;\r
-defm FSetUGE : FSET_FORMAT<setuge, CmpGEU, CmpGEU_FTZ>;\r
-defm FSetULE : FSET_FORMAT<setule, CmpLEU, CmpLEU_FTZ>;\r
-defm FSetUEQ : FSET_FORMAT<setueq, CmpEQU, CmpEQU_FTZ>;\r
-defm FSetUNE : FSET_FORMAT<setune, CmpNEU, CmpNEU_FTZ>;\r
-\r
-defm FSetGT : FSET_FORMAT<setgt, CmpGT, CmpGT_FTZ>;\r
-defm FSetLT : FSET_FORMAT<setlt, CmpLT, CmpLT_FTZ>;\r
-defm FSetGE : FSET_FORMAT<setge, CmpGE, CmpGE_FTZ>;\r
-defm FSetLE : FSET_FORMAT<setle, CmpLE, CmpLE_FTZ>;\r
-defm FSetEQ : FSET_FORMAT<seteq, CmpEQ, CmpEQ_FTZ>;\r
-defm FSetNE : FSET_FORMAT<setne, CmpNE, CmpNE_FTZ>;\r
-\r
-defm FSetNUM : FSET_FORMAT<seto, CmpNUM, CmpNUM_FTZ>;\r
-defm FSetNAN : FSET_FORMAT<setuo, CmpNAN, CmpNAN_FTZ>;\r
-\r
-// FIXME: What is this doing here? Can it be deleted?\r
-// def ld_param : SDNode<"NVPTXISD::LOAD_PARAM", SDTLoad,\r
-// [SDNPHasChain, SDNPMayLoad, SDNPMemOperand]>;\r
-\r
-def SDTDeclareParamProfile :\r
- SDTypeProfile<0, 3, [SDTCisInt<0>, SDTCisInt<1>, SDTCisInt<2>]>;\r
-def SDTDeclareScalarParamProfile :\r
- SDTypeProfile<0, 3, [SDTCisInt<0>, SDTCisInt<1>, SDTCisInt<2>]>;\r
-def SDTLoadParamProfile : SDTypeProfile<1, 2, [SDTCisInt<1>, SDTCisInt<2>]>;\r
-def SDTLoadParamV2Profile : SDTypeProfile<2, 2, [SDTCisSameAs<0, 1>, SDTCisInt<2>, SDTCisInt<3>]>;\r
-def SDTLoadParamV4Profile : SDTypeProfile<4, 2, [SDTCisInt<4>, SDTCisInt<5>]>;\r
-def SDTPrintCallProfile : SDTypeProfile<0, 1, [SDTCisInt<0>]>;\r
-def SDTPrintCallUniProfile : SDTypeProfile<0, 1, [SDTCisInt<0>]>;\r
-def SDTStoreParamProfile : SDTypeProfile<0, 3, [SDTCisInt<0>, SDTCisInt<1>]>;\r
-def SDTStoreParamV2Profile : SDTypeProfile<0, 4, [SDTCisInt<0>, SDTCisInt<1>]>;\r
-def SDTStoreParamV4Profile : SDTypeProfile<0, 6, [SDTCisInt<0>, SDTCisInt<1>]>;\r
-def SDTStoreParam32Profile : SDTypeProfile<0, 3, [SDTCisInt<0>, SDTCisInt<1>]>;\r
-def SDTCallArgProfile : SDTypeProfile<0, 2, [SDTCisInt<0>]>;\r
-def SDTCallArgMarkProfile : SDTypeProfile<0, 0, []>;\r
-def SDTCallVoidProfile : SDTypeProfile<0, 1, []>;\r
-def SDTCallValProfile : SDTypeProfile<1, 0, []>;\r
-def SDTMoveParamProfile : SDTypeProfile<1, 1, []>;\r
-def SDTStoreRetvalProfile : SDTypeProfile<0, 2, [SDTCisInt<0>]>;\r
-def SDTStoreRetvalV2Profile : SDTypeProfile<0, 3, [SDTCisInt<0>]>;\r
-def SDTStoreRetvalV4Profile : SDTypeProfile<0, 5, [SDTCisInt<0>]>;\r
-def SDTPseudoUseParamProfile : SDTypeProfile<0, 1, []>;\r
-\r
-def DeclareParam :\r
- SDNode<"NVPTXISD::DeclareParam", SDTDeclareParamProfile,\r
- [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;\r
-def DeclareScalarParam :\r
- SDNode<"NVPTXISD::DeclareScalarParam", SDTDeclareScalarParamProfile,\r
- [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;\r
-def DeclareRetParam :\r
- SDNode<"NVPTXISD::DeclareRetParam", SDTDeclareParamProfile,\r
- [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;\r
-def DeclareRet :\r
- SDNode<"NVPTXISD::DeclareRet", SDTDeclareScalarParamProfile,\r
- [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;\r
-def LoadParam :\r
- SDNode<"NVPTXISD::LoadParam", SDTLoadParamProfile,\r
- [SDNPHasChain, SDNPMayLoad, SDNPOutGlue, SDNPInGlue]>;\r
-def LoadParamV2 :\r
- SDNode<"NVPTXISD::LoadParamV2", SDTLoadParamV2Profile,\r
- [SDNPHasChain, SDNPMayLoad, SDNPOutGlue, SDNPInGlue]>;\r
-def LoadParamV4 :\r
- SDNode<"NVPTXISD::LoadParamV4", SDTLoadParamV4Profile,\r
- [SDNPHasChain, SDNPMayLoad, SDNPOutGlue, SDNPInGlue]>;\r
-def PrintCall :\r
- SDNode<"NVPTXISD::PrintCall", SDTPrintCallProfile,\r
- [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;\r
-def PrintConvergentCall :\r
- SDNode<"NVPTXISD::PrintConvergentCall", SDTPrintCallProfile,\r
- [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;\r
-def PrintCallUni :\r
- SDNode<"NVPTXISD::PrintCallUni", SDTPrintCallUniProfile,\r
- [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;\r
-def PrintConvergentCallUni :\r
- SDNode<"NVPTXISD::PrintConvergentCallUni", SDTPrintCallUniProfile,\r
- [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;\r
-def StoreParam :\r
- SDNode<"NVPTXISD::StoreParam", SDTStoreParamProfile,\r
- [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;\r
-def StoreParamV2 :\r
- SDNode<"NVPTXISD::StoreParamV2", SDTStoreParamV2Profile,\r
- [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;\r
-def StoreParamV4 :\r
- SDNode<"NVPTXISD::StoreParamV4", SDTStoreParamV4Profile,\r
- [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;\r
-def StoreParamU32 :\r
- SDNode<"NVPTXISD::StoreParamU32", SDTStoreParam32Profile,\r
- [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;\r
-def StoreParamS32 :\r
- SDNode<"NVPTXISD::StoreParamS32", SDTStoreParam32Profile,\r
- [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;\r
-def CallArgBegin :\r
- SDNode<"NVPTXISD::CallArgBegin", SDTCallArgMarkProfile,\r
- [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;\r
-def CallArg :\r
- SDNode<"NVPTXISD::CallArg", SDTCallArgProfile,\r
- [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;\r
-def LastCallArg :\r
- SDNode<"NVPTXISD::LastCallArg", SDTCallArgProfile,\r
- [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;\r
-def CallArgEnd :\r
- SDNode<"NVPTXISD::CallArgEnd", SDTCallVoidProfile,\r
- [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;\r
-def CallVoid :\r
- SDNode<"NVPTXISD::CallVoid", SDTCallVoidProfile,\r
- [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;\r
-def Prototype :\r
- SDNode<"NVPTXISD::Prototype", SDTCallVoidProfile,\r
- [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;\r
-def CallVal :\r
- SDNode<"NVPTXISD::CallVal", SDTCallValProfile,\r
- [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;\r
-def MoveParam :\r
- SDNode<"NVPTXISD::MoveParam", SDTMoveParamProfile, []>;\r
-def StoreRetval :\r
- SDNode<"NVPTXISD::StoreRetval", SDTStoreRetvalProfile,\r
- [SDNPHasChain, SDNPSideEffect]>;\r
-def StoreRetvalV2 :\r
- SDNode<"NVPTXISD::StoreRetvalV2", SDTStoreRetvalV2Profile,\r
- [SDNPHasChain, SDNPSideEffect]>;\r
-def StoreRetvalV4 :\r
- SDNode<"NVPTXISD::StoreRetvalV4", SDTStoreRetvalV4Profile,\r
- [SDNPHasChain, SDNPSideEffect]>;\r
-def PseudoUseParam :\r
- SDNode<"NVPTXISD::PseudoUseParam", SDTPseudoUseParamProfile,\r
- [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;\r
-def RETURNNode :\r
- SDNode<"NVPTXISD::RETURN", SDTCallArgMarkProfile,\r
- [SDNPHasChain, SDNPSideEffect]>;\r
-\r
-let mayLoad = 1 in {\r
- class LoadParamMemInst<NVPTXRegClass regclass, string opstr> :\r
- NVPTXInst<(outs regclass:$dst), (ins i32imm:$b),\r
- !strconcat("ld.param", opstr, " \t$dst, [retval0+$b];"),\r
- []>;\r
-\r
- class LoadParamV2MemInst<NVPTXRegClass regclass, string opstr> :\r
- NVPTXInst<(outs regclass:$dst, regclass:$dst2), (ins i32imm:$b),\r
- !strconcat("ld.param.v2", opstr,\r
- " \t{{$dst, $dst2}}, [retval0+$b];"), []>;\r
-\r
- class LoadParamV4MemInst<NVPTXRegClass regclass, string opstr> :\r
- NVPTXInst<(outs regclass:$dst, regclass:$dst2, regclass:$dst3,\r
- regclass:$dst4),\r
- (ins i32imm:$b),\r
- !strconcat("ld.param.v4", opstr,\r
- " \t{{$dst, $dst2, $dst3, $dst4}}, [retval0+$b];"),\r
- []>;\r
-}\r
-\r
-class LoadParamRegInst<NVPTXRegClass regclass, string opstr> :\r
- NVPTXInst<(outs regclass:$dst), (ins i32imm:$b),\r
- !strconcat("mov", opstr, " \t$dst, retval$b;"),\r
- [(set regclass:$dst, (LoadParam (i32 0), (i32 imm:$b)))]>;\r
-\r
-let mayStore = 1 in {\r
- class StoreParamInst<NVPTXRegClass regclass, string opstr> :\r
- NVPTXInst<(outs), (ins regclass:$val, i32imm:$a, i32imm:$b),\r
- !strconcat("st.param", opstr, " \t[param$a+$b], $val;"),\r
- []>;\r
-\r
- class StoreParamV2Inst<NVPTXRegClass regclass, string opstr> :\r
- NVPTXInst<(outs), (ins regclass:$val, regclass:$val2,\r
- i32imm:$a, i32imm:$b),\r
- !strconcat("st.param.v2", opstr,\r
- " \t[param$a+$b], {{$val, $val2}};"),\r
- []>;\r
-\r
- class StoreParamV4Inst<NVPTXRegClass regclass, string opstr> :\r
- NVPTXInst<(outs), (ins regclass:$val, regclass:$val2, regclass:$val3,\r
- regclass:$val4, i32imm:$a,\r
- i32imm:$b),\r
- !strconcat("st.param.v4", opstr,\r
- " \t[param$a+$b], {{$val, $val2, $val3, $val4}};"),\r
- []>;\r
-\r
- class StoreRetvalInst<NVPTXRegClass regclass, string opstr> :\r
- NVPTXInst<(outs), (ins regclass:$val, i32imm:$a),\r
- !strconcat("st.param", opstr, " \t[func_retval0+$a], $val;"),\r
- []>;\r
-\r
- class StoreRetvalV2Inst<NVPTXRegClass regclass, string opstr> :\r
- NVPTXInst<(outs), (ins regclass:$val, regclass:$val2, i32imm:$a),\r
- !strconcat("st.param.v2", opstr,\r
- " \t[func_retval0+$a], {{$val, $val2}};"),\r
- []>;\r
-\r
- class StoreRetvalV4Inst<NVPTXRegClass regclass, string opstr> :\r
- NVPTXInst<(outs),\r
- (ins regclass:$val, regclass:$val2, regclass:$val3,\r
- regclass:$val4, i32imm:$a),\r
- !strconcat("st.param.v4", opstr,\r
- " \t[func_retval0+$a], {{$val, $val2, $val3, $val4}};"),\r
- []>;\r
-}\r
-\r
-let isCall=1 in {\r
- multiclass CALL<string OpcStr, SDNode OpNode> {\r
- def PrintCallNoRetInst : NVPTXInst<(outs), (ins),\r
- !strconcat(OpcStr, " "), [(OpNode (i32 0))]>;\r
- def PrintCallRetInst1 : NVPTXInst<(outs), (ins),\r
- !strconcat(OpcStr, " (retval0), "), [(OpNode (i32 1))]>;\r
- def PrintCallRetInst2 : NVPTXInst<(outs), (ins),\r
- !strconcat(OpcStr, " (retval0, retval1), "), [(OpNode (i32 2))]>;\r
- def PrintCallRetInst3 : NVPTXInst<(outs), (ins),\r
- !strconcat(OpcStr, " (retval0, retval1, retval2), "), [(OpNode (i32 3))]>;\r
- def PrintCallRetInst4 : NVPTXInst<(outs), (ins),\r
- !strconcat(OpcStr, " (retval0, retval1, retval2, retval3), "),\r
- [(OpNode (i32 4))]>;\r
- def PrintCallRetInst5 : NVPTXInst<(outs), (ins),\r
- !strconcat(OpcStr, " (retval0, retval1, retval2, retval3, retval4), "),\r
- [(OpNode (i32 5))]>;\r
- def PrintCallRetInst6 : NVPTXInst<(outs), (ins),\r
- !strconcat(OpcStr, " (retval0, retval1, retval2, retval3, retval4, "\r
- "retval5), "),\r
- [(OpNode (i32 6))]>;\r
- def PrintCallRetInst7 : NVPTXInst<(outs), (ins),\r
- !strconcat(OpcStr, " (retval0, retval1, retval2, retval3, retval4, "\r
- "retval5, retval6), "),\r
- [(OpNode (i32 7))]>;\r
- def PrintCallRetInst8 : NVPTXInst<(outs), (ins),\r
- !strconcat(OpcStr, " (retval0, retval1, retval2, retval3, retval4, "\r
- "retval5, retval6, retval7), "),\r
- [(OpNode (i32 8))]>;\r
- }\r
-}\r
-\r
-defm Call : CALL<"call", PrintCall>;\r
-defm CallUni : CALL<"call.uni", PrintCallUni>;\r
-\r
-// Convergent call instructions. These are identical to regular calls, except\r
-// they have the isConvergent bit set.\r
-let isConvergent=1 in {\r
- defm ConvergentCall : CALL<"call", PrintConvergentCall>;\r
- defm ConvergentCallUni : CALL<"call.uni", PrintConvergentCallUni>;\r
-}\r
-\r
-def LoadParamMemI64 : LoadParamMemInst<Int64Regs, ".b64">;\r
-def LoadParamMemI32 : LoadParamMemInst<Int32Regs, ".b32">;\r
-def LoadParamMemI16 : LoadParamMemInst<Int16Regs, ".b16">;\r
-def LoadParamMemI8 : LoadParamMemInst<Int16Regs, ".b8">;\r
-def LoadParamMemV2I64 : LoadParamV2MemInst<Int64Regs, ".b64">;\r
-def LoadParamMemV2I32 : LoadParamV2MemInst<Int32Regs, ".b32">;\r
-def LoadParamMemV2I16 : LoadParamV2MemInst<Int16Regs, ".b16">;\r
-def LoadParamMemV2I8 : LoadParamV2MemInst<Int16Regs, ".b8">;\r
-def LoadParamMemV4I32 : LoadParamV4MemInst<Int32Regs, ".b32">;\r
-def LoadParamMemV4I16 : LoadParamV4MemInst<Int16Regs, ".b16">;\r
-def LoadParamMemV4I8 : LoadParamV4MemInst<Int16Regs, ".b8">;\r
-def LoadParamMemF16 : LoadParamMemInst<Float16Regs, ".b16">;\r
-def LoadParamMemF16x2 : LoadParamMemInst<Float16x2Regs, ".b32">;\r
-def LoadParamMemF32 : LoadParamMemInst<Float32Regs, ".f32">;\r
-def LoadParamMemF64 : LoadParamMemInst<Float64Regs, ".f64">;\r
-def LoadParamMemV2F16 : LoadParamV2MemInst<Float16Regs, ".b16">;\r
-def LoadParamMemV2F16x2: LoadParamV2MemInst<Float16x2Regs, ".b32">;\r
-def LoadParamMemV2F32 : LoadParamV2MemInst<Float32Regs, ".f32">;\r
-def LoadParamMemV2F64 : LoadParamV2MemInst<Float64Regs, ".f64">;\r
-def LoadParamMemV4F16 : LoadParamV4MemInst<Float16Regs, ".b16">;\r
-def LoadParamMemV4F16x2: LoadParamV4MemInst<Float16x2Regs, ".b32">;\r
-def LoadParamMemV4F32 : LoadParamV4MemInst<Float32Regs, ".f32">;\r
-\r
-def StoreParamI64 : StoreParamInst<Int64Regs, ".b64">;\r
-def StoreParamI32 : StoreParamInst<Int32Regs, ".b32">;\r
-\r
-def StoreParamI16 : StoreParamInst<Int16Regs, ".b16">;\r
-def StoreParamI8 : StoreParamInst<Int16Regs, ".b8">;\r
-def StoreParamV2I64 : StoreParamV2Inst<Int64Regs, ".b64">;\r
-def StoreParamV2I32 : StoreParamV2Inst<Int32Regs, ".b32">;\r
-def StoreParamV2I16 : StoreParamV2Inst<Int16Regs, ".b16">;\r
-def StoreParamV2I8 : StoreParamV2Inst<Int16Regs, ".b8">;\r
-\r
-def StoreParamV4I32 : StoreParamV4Inst<Int32Regs, ".b32">;\r
-def StoreParamV4I16 : StoreParamV4Inst<Int16Regs, ".b16">;\r
-def StoreParamV4I8 : StoreParamV4Inst<Int16Regs, ".b8">;\r
-\r
-def StoreParamF16 : StoreParamInst<Float16Regs, ".b16">;\r
-def StoreParamF16x2 : StoreParamInst<Float16x2Regs, ".b32">;\r
-def StoreParamF32 : StoreParamInst<Float32Regs, ".f32">;\r
-def StoreParamF64 : StoreParamInst<Float64Regs, ".f64">;\r
-def StoreParamV2F16 : StoreParamV2Inst<Float16Regs, ".b16">;\r
-def StoreParamV2F16x2 : StoreParamV2Inst<Float16x2Regs, ".b32">;\r
-def StoreParamV2F32 : StoreParamV2Inst<Float32Regs, ".f32">;\r
-def StoreParamV2F64 : StoreParamV2Inst<Float64Regs, ".f64">;\r
-def StoreParamV4F16 : StoreParamV4Inst<Float16Regs, ".b16">;\r
-def StoreParamV4F16x2 : StoreParamV4Inst<Float16x2Regs, ".b32">;\r
-def StoreParamV4F32 : StoreParamV4Inst<Float32Regs, ".f32">;\r
-\r
-def StoreRetvalI64 : StoreRetvalInst<Int64Regs, ".b64">;\r
-def StoreRetvalI32 : StoreRetvalInst<Int32Regs, ".b32">;\r
-def StoreRetvalI16 : StoreRetvalInst<Int16Regs, ".b16">;\r
-def StoreRetvalI8 : StoreRetvalInst<Int16Regs, ".b8">;\r
-def StoreRetvalV2I64 : StoreRetvalV2Inst<Int64Regs, ".b64">;\r
-def StoreRetvalV2I32 : StoreRetvalV2Inst<Int32Regs, ".b32">;\r
-def StoreRetvalV2I16 : StoreRetvalV2Inst<Int16Regs, ".b16">;\r
-def StoreRetvalV2I8 : StoreRetvalV2Inst<Int16Regs, ".b8">;\r
-def StoreRetvalV4I32 : StoreRetvalV4Inst<Int32Regs, ".b32">;\r
-def StoreRetvalV4I16 : StoreRetvalV4Inst<Int16Regs, ".b16">;\r
-def StoreRetvalV4I8 : StoreRetvalV4Inst<Int16Regs, ".b8">;\r
-\r
-def StoreRetvalF64 : StoreRetvalInst<Float64Regs, ".f64">;\r
-def StoreRetvalF32 : StoreRetvalInst<Float32Regs, ".f32">;\r
-def StoreRetvalF16 : StoreRetvalInst<Float16Regs, ".b16">;\r
-def StoreRetvalF16x2 : StoreRetvalInst<Float16x2Regs, ".b32">;\r
-def StoreRetvalV2F64 : StoreRetvalV2Inst<Float64Regs, ".f64">;\r
-def StoreRetvalV2F32 : StoreRetvalV2Inst<Float32Regs, ".f32">;\r
-def StoreRetvalV2F16 : StoreRetvalV2Inst<Float16Regs, ".b16">;\r
-def StoreRetvalV2F16x2: StoreRetvalV2Inst<Float16x2Regs, ".b32">;\r
-def StoreRetvalV4F32 : StoreRetvalV4Inst<Float32Regs, ".f32">;\r
-def StoreRetvalV4F16 : StoreRetvalV4Inst<Float16Regs, ".b16">;\r
-def StoreRetvalV4F16x2: StoreRetvalV4Inst<Float16x2Regs, ".b32">;\r
-\r
-def CallArgBeginInst : NVPTXInst<(outs), (ins), "(", [(CallArgBegin)]>;\r
-def CallArgEndInst1 : NVPTXInst<(outs), (ins), ");", [(CallArgEnd (i32 1))]>;\r
-def CallArgEndInst0 : NVPTXInst<(outs), (ins), ")", [(CallArgEnd (i32 0))]>;\r
-def RETURNInst : NVPTXInst<(outs), (ins), "ret;", [(RETURNNode)]>;\r
-\r
-class CallArgInst<NVPTXRegClass regclass> :\r
- NVPTXInst<(outs), (ins regclass:$a), "$a, ",\r
- [(CallArg (i32 0), regclass:$a)]>;\r
-\r
-class LastCallArgInst<NVPTXRegClass regclass> :\r
- NVPTXInst<(outs), (ins regclass:$a), "$a",\r
- [(LastCallArg (i32 0), regclass:$a)]>;\r
-\r
-def CallArgI64 : CallArgInst<Int64Regs>;\r
-def CallArgI32 : CallArgInst<Int32Regs>;\r
-def CallArgI16 : CallArgInst<Int16Regs>;\r
-def CallArgF64 : CallArgInst<Float64Regs>;\r
-def CallArgF32 : CallArgInst<Float32Regs>;\r
-\r
-def LastCallArgI64 : LastCallArgInst<Int64Regs>;\r
-def LastCallArgI32 : LastCallArgInst<Int32Regs>;\r
-def LastCallArgI16 : LastCallArgInst<Int16Regs>;\r
-def LastCallArgF64 : LastCallArgInst<Float64Regs>;\r
-def LastCallArgF32 : LastCallArgInst<Float32Regs>;\r
-\r
-def CallArgI32imm : NVPTXInst<(outs), (ins i32imm:$a), "$a, ",\r
- [(CallArg (i32 0), (i32 imm:$a))]>;\r
-def LastCallArgI32imm : NVPTXInst<(outs), (ins i32imm:$a), "$a",\r
- [(LastCallArg (i32 0), (i32 imm:$a))]>;\r
-\r
-def CallArgParam : NVPTXInst<(outs), (ins i32imm:$a), "param$a, ",\r
- [(CallArg (i32 1), (i32 imm:$a))]>;\r
-def LastCallArgParam : NVPTXInst<(outs), (ins i32imm:$a), "param$a",\r
- [(LastCallArg (i32 1), (i32 imm:$a))]>;\r
-\r
-def CallVoidInst : NVPTXInst<(outs), (ins imem:$addr), "$addr, ",\r
- [(CallVoid (Wrapper tglobaladdr:$addr))]>;\r
-def CallVoidInstReg : NVPTXInst<(outs), (ins Int32Regs:$addr), "$addr, ",\r
- [(CallVoid Int32Regs:$addr)]>;\r
-def CallVoidInstReg64 : NVPTXInst<(outs), (ins Int64Regs:$addr), "$addr, ",\r
- [(CallVoid Int64Regs:$addr)]>;\r
-def PrototypeInst : NVPTXInst<(outs), (ins i32imm:$val), ", prototype_$val;",\r
- [(Prototype (i32 imm:$val))]>;\r
-\r
-def DeclareRetMemInst :\r
- NVPTXInst<(outs), (ins i32imm:$align, i32imm:$size, i32imm:$num),\r
- ".param .align $align .b8 retval$num[$size];",\r
- [(DeclareRetParam (i32 imm:$align), (i32 imm:$size), (i32 imm:$num))]>;\r
-def DeclareRetScalarInst :\r
- NVPTXInst<(outs), (ins i32imm:$size, i32imm:$num),\r
- ".param .b$size retval$num;",\r
- [(DeclareRet (i32 1), (i32 imm:$size), (i32 imm:$num))]>;\r
-def DeclareRetRegInst :\r
- NVPTXInst<(outs), (ins i32imm:$size, i32imm:$num),\r
- ".reg .b$size retval$num;",\r
- [(DeclareRet (i32 2), (i32 imm:$size), (i32 imm:$num))]>;\r
-\r
-def DeclareParamInst :\r
- NVPTXInst<(outs), (ins i32imm:$align, i32imm:$a, i32imm:$size),\r
- ".param .align $align .b8 param$a[$size];",\r
- [(DeclareParam (i32 imm:$align), (i32 imm:$a), (i32 imm:$size))]>;\r
-def DeclareScalarParamInst :\r
- NVPTXInst<(outs), (ins i32imm:$a, i32imm:$size),\r
- ".param .b$size param$a;",\r
- [(DeclareScalarParam (i32 imm:$a), (i32 imm:$size), (i32 0))]>;\r
-def DeclareScalarRegInst :\r
- NVPTXInst<(outs), (ins i32imm:$a, i32imm:$size),\r
- ".reg .b$size param$a;",\r
- [(DeclareScalarParam (i32 imm:$a), (i32 imm:$size), (i32 1))]>;\r
-\r
-class MoveParamInst<NVPTXRegClass regclass, string asmstr> :\r
- NVPTXInst<(outs regclass:$dst), (ins regclass:$src),\r
- !strconcat("mov", asmstr, " \t$dst, $src;"),\r
- [(set regclass:$dst, (MoveParam regclass:$src))]>;\r
-\r
-def MoveParamI64 : MoveParamInst<Int64Regs, ".b64">;\r
-def MoveParamI32 : MoveParamInst<Int32Regs, ".b32">;\r
-def MoveParamI16 :\r
- NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$src),\r
- "cvt.u16.u32 \t$dst, $src;",\r
- [(set Int16Regs:$dst, (MoveParam Int16Regs:$src))]>;\r
-def MoveParamF64 : MoveParamInst<Float64Regs, ".f64">;\r
-def MoveParamF32 : MoveParamInst<Float32Regs, ".f32">;\r
-def MoveParamF16 : MoveParamInst<Float16Regs, ".f16">;\r
-\r
-class PseudoUseParamInst<NVPTXRegClass regclass> :\r
- NVPTXInst<(outs), (ins regclass:$src),\r
- "// Pseudo use of $src",\r
- [(PseudoUseParam regclass:$src)]>;\r
-\r
-def PseudoUseParamI64 : PseudoUseParamInst<Int64Regs>;\r
-def PseudoUseParamI32 : PseudoUseParamInst<Int32Regs>;\r
-def PseudoUseParamI16 : PseudoUseParamInst<Int16Regs>;\r
-def PseudoUseParamF64 : PseudoUseParamInst<Float64Regs>;\r
-def PseudoUseParamF32 : PseudoUseParamInst<Float32Regs>;\r
-\r
-\r
-//\r
-// Load / Store Handling\r
-//\r
-multiclass LD<NVPTXRegClass regclass> {\r
- def _avar : NVPTXInst<\r
- (outs regclass:$dst),\r
- (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,\r
- i32imm:$fromWidth, imem:$addr),\r
- "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "\r
- "\t$dst, [$addr];", []>;\r
- def _areg : NVPTXInst<\r
- (outs regclass:$dst),\r
- (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,\r
- i32imm:$fromWidth, Int32Regs:$addr),\r
- "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "\r
- "\t$dst, [$addr];", []>;\r
- def _areg_64 : NVPTXInst<\r
- (outs regclass:$dst),\r
- (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,\r
- i32imm:$fromWidth, Int64Regs:$addr),\r
- "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "\r
- "\t$dst, [$addr];", []>;\r
- def _ari : NVPTXInst<\r
- (outs regclass:$dst),\r
- (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,\r
- i32imm:$fromWidth, Int32Regs:$addr, i32imm:$offset),\r
- "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "\r
- "\t$dst, [$addr+$offset];", []>;\r
- def _ari_64 : NVPTXInst<\r
- (outs regclass:$dst),\r
- (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec,\r
- LdStCode:$Sign, i32imm:$fromWidth, Int64Regs:$addr, i32imm:$offset),\r
- "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "\r
- "\t$dst, [$addr+$offset];", []>;\r
- def _asi : NVPTXInst<\r
- (outs regclass:$dst),\r
- (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec,\r
- LdStCode:$Sign, i32imm:$fromWidth, imem:$addr, i32imm:$offset),\r
- "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "\r
- "\t$dst, [$addr+$offset];", []>;\r
-}\r
-\r
-let mayLoad=1, hasSideEffects=0 in {\r
- defm LD_i8 : LD<Int16Regs>;\r
- defm LD_i16 : LD<Int16Regs>;\r
- defm LD_i32 : LD<Int32Regs>;\r
- defm LD_i64 : LD<Int64Regs>;\r
- defm LD_f16 : LD<Float16Regs>;\r
- defm LD_f16x2 : LD<Float16x2Regs>;\r
- defm LD_f32 : LD<Float32Regs>;\r
- defm LD_f64 : LD<Float64Regs>;\r
-}\r
-\r
-multiclass ST<NVPTXRegClass regclass> {\r
- def _avar : NVPTXInst<\r
- (outs),\r
- (ins regclass:$src, LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec,\r
- LdStCode:$Sign, i32imm:$toWidth, imem:$addr),\r
- "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$toWidth"\r
- " \t[$addr], $src;", []>;\r
- def _areg : NVPTXInst<\r
- (outs),\r
- (ins regclass:$src, LdStCode:$isVol, LdStCode:$addsp,\r
- LdStCode:$Vec, LdStCode:$Sign, i32imm:$toWidth, Int32Regs:$addr),\r
- "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$toWidth"\r
- " \t[$addr], $src;", []>;\r
- def _areg_64 : NVPTXInst<\r
- (outs),\r
- (ins regclass:$src, LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec,\r
- LdStCode:$Sign, i32imm:$toWidth, Int64Regs:$addr),\r
- "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$toWidth"\r
- " \t[$addr], $src;", []>;\r
- def _ari : NVPTXInst<\r
- (outs),\r
- (ins regclass:$src, LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec,\r
- LdStCode:$Sign, i32imm:$toWidth, Int32Regs:$addr, i32imm:$offset),\r
- "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$toWidth"\r
- " \t[$addr+$offset], $src;", []>;\r
- def _ari_64 : NVPTXInst<\r
- (outs),\r
- (ins regclass:$src, LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec,\r
- LdStCode:$Sign, i32imm:$toWidth, Int64Regs:$addr, i32imm:$offset),\r
- "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$toWidth"\r
- " \t[$addr+$offset], $src;", []>;\r
- def _asi : NVPTXInst<\r
- (outs),\r
- (ins regclass:$src, LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec,\r
- LdStCode:$Sign, i32imm:$toWidth, imem:$addr, i32imm:$offset),\r
- "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$toWidth"\r
- " \t[$addr+$offset], $src;", []>;\r
-}\r
-\r
-let mayStore=1, hasSideEffects=0 in {\r
- defm ST_i8 : ST<Int16Regs>;\r
- defm ST_i16 : ST<Int16Regs>;\r
- defm ST_i32 : ST<Int32Regs>;\r
- defm ST_i64 : ST<Int64Regs>;\r
- defm ST_f16 : ST<Float16Regs>;\r
- defm ST_f16x2 : ST<Float16x2Regs>;\r
- defm ST_f32 : ST<Float32Regs>;\r
- defm ST_f64 : ST<Float64Regs>;\r
-}\r
-\r
-// The following is used only in and after vector elementizations. Vector\r
-// elementization happens at the machine instruction level, so the following\r
-// instructions never appear in the DAG.\r
-multiclass LD_VEC<NVPTXRegClass regclass> {\r
- def _v2_avar : NVPTXInst<\r
- (outs regclass:$dst1, regclass:$dst2),\r
- (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,\r
- i32imm:$fromWidth, imem:$addr),\r
- "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "\r
- "\t{{$dst1, $dst2}}, [$addr];", []>;\r
- def _v2_areg : NVPTXInst<\r
- (outs regclass:$dst1, regclass:$dst2),\r
- (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,\r
- i32imm:$fromWidth, Int32Regs:$addr),\r
- "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "\r
- "\t{{$dst1, $dst2}}, [$addr];", []>;\r
- def _v2_areg_64 : NVPTXInst<\r
- (outs regclass:$dst1, regclass:$dst2),\r
- (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,\r
- i32imm:$fromWidth, Int64Regs:$addr),\r
- "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "\r
- "\t{{$dst1, $dst2}}, [$addr];", []>;\r
- def _v2_ari : NVPTXInst<\r
- (outs regclass:$dst1, regclass:$dst2),\r
- (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,\r
- i32imm:$fromWidth, Int32Regs:$addr, i32imm:$offset),\r
- "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "\r
- "\t{{$dst1, $dst2}}, [$addr+$offset];", []>;\r
- def _v2_ari_64 : NVPTXInst<\r
- (outs regclass:$dst1, regclass:$dst2),\r
- (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,\r
- i32imm:$fromWidth, Int64Regs:$addr, i32imm:$offset),\r
- "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "\r
- "\t{{$dst1, $dst2}}, [$addr+$offset];", []>;\r
- def _v2_asi : NVPTXInst<\r
- (outs regclass:$dst1, regclass:$dst2),\r
- (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,\r
- i32imm:$fromWidth, imem:$addr, i32imm:$offset),\r
- "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "\r
- "\t{{$dst1, $dst2}}, [$addr+$offset];", []>;\r
- def _v4_avar : NVPTXInst<\r
- (outs regclass:$dst1, regclass:$dst2, regclass:$dst3, regclass:$dst4),\r
- (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,\r
- i32imm:$fromWidth, imem:$addr),\r
- "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "\r
- "\t{{$dst1, $dst2, $dst3, $dst4}}, [$addr];", []>;\r
- def _v4_areg : NVPTXInst<\r
- (outs regclass:$dst1, regclass:$dst2, regclass:$dst3, regclass:$dst4),\r
- (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,\r
- i32imm:$fromWidth, Int32Regs:$addr),\r
- "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "\r
- "\t{{$dst1, $dst2, $dst3, $dst4}}, [$addr];", []>;\r
- def _v4_areg_64 : NVPTXInst<\r
- (outs regclass:$dst1, regclass:$dst2, regclass:$dst3, regclass:$dst4),\r
- (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,\r
- i32imm:$fromWidth, Int64Regs:$addr),\r
- "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "\r
- "\t{{$dst1, $dst2, $dst3, $dst4}}, [$addr];", []>;\r
- def _v4_ari : NVPTXInst<\r
- (outs regclass:$dst1, regclass:$dst2, regclass:$dst3, regclass:$dst4),\r
- (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,\r
- i32imm:$fromWidth, Int32Regs:$addr, i32imm:$offset),\r
- "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "\r
- "\t{{$dst1, $dst2, $dst3, $dst4}}, [$addr+$offset];", []>;\r
- def _v4_ari_64 : NVPTXInst<\r
- (outs regclass:$dst1, regclass:$dst2, regclass:$dst3, regclass:$dst4),\r
- (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,\r
- i32imm:$fromWidth, Int64Regs:$addr, i32imm:$offset),\r
- "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "\r
- "\t{{$dst1, $dst2, $dst3, $dst4}}, [$addr+$offset];", []>;\r
- def _v4_asi : NVPTXInst<\r
- (outs regclass:$dst1, regclass:$dst2, regclass:$dst3, regclass:$dst4),\r
- (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,\r
- i32imm:$fromWidth, imem:$addr, i32imm:$offset),\r
- "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "\r
- "\t{{$dst1, $dst2, $dst3, $dst4}}, [$addr+$offset];", []>;\r
-}\r
-let mayLoad=1, hasSideEffects=0 in {\r
- defm LDV_i8 : LD_VEC<Int16Regs>;\r
- defm LDV_i16 : LD_VEC<Int16Regs>;\r
- defm LDV_i32 : LD_VEC<Int32Regs>;\r
- defm LDV_i64 : LD_VEC<Int64Regs>;\r
- defm LDV_f16 : LD_VEC<Float16Regs>;\r
- defm LDV_f16x2 : LD_VEC<Float16x2Regs>;\r
- defm LDV_f32 : LD_VEC<Float32Regs>;\r
- defm LDV_f64 : LD_VEC<Float64Regs>;\r
-}\r
-\r
-multiclass ST_VEC<NVPTXRegClass regclass> {\r
- def _v2_avar : NVPTXInst<\r
- (outs),\r
- (ins regclass:$src1, regclass:$src2, LdStCode:$isVol, LdStCode:$addsp,\r
- LdStCode:$Vec, LdStCode:$Sign, i32imm:$fromWidth, imem:$addr),\r
- "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "\r
- "\t[$addr], {{$src1, $src2}};", []>;\r
- def _v2_areg : NVPTXInst<\r
- (outs),\r
- (ins regclass:$src1, regclass:$src2, LdStCode:$isVol, LdStCode:$addsp,\r
- LdStCode:$Vec, LdStCode:$Sign, i32imm:$fromWidth, Int32Regs:$addr),\r
- "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "\r
- "\t[$addr], {{$src1, $src2}};", []>;\r
- def _v2_areg_64 : NVPTXInst<\r
- (outs),\r
- (ins regclass:$src1, regclass:$src2, LdStCode:$isVol, LdStCode:$addsp,\r
- LdStCode:$Vec, LdStCode:$Sign, i32imm:$fromWidth, Int64Regs:$addr),\r
- "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "\r
- "\t[$addr], {{$src1, $src2}};", []>;\r
- def _v2_ari : NVPTXInst<\r
- (outs),\r
- (ins regclass:$src1, regclass:$src2, LdStCode:$isVol, LdStCode:$addsp,\r
- LdStCode:$Vec, LdStCode:$Sign, i32imm:$fromWidth, Int32Regs:$addr,\r
- i32imm:$offset),\r
- "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "\r
- "\t[$addr+$offset], {{$src1, $src2}};", []>;\r
- def _v2_ari_64 : NVPTXInst<\r
- (outs),\r
- (ins regclass:$src1, regclass:$src2, LdStCode:$isVol, LdStCode:$addsp,\r
- LdStCode:$Vec, LdStCode:$Sign, i32imm:$fromWidth, Int64Regs:$addr,\r
- i32imm:$offset),\r
- "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "\r
- "\t[$addr+$offset], {{$src1, $src2}};", []>;\r
- def _v2_asi : NVPTXInst<\r
- (outs),\r
- (ins regclass:$src1, regclass:$src2, LdStCode:$isVol, LdStCode:$addsp,\r
- LdStCode:$Vec, LdStCode:$Sign, i32imm:$fromWidth, imem:$addr,\r
- i32imm:$offset),\r
- "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "\r
- "\t[$addr+$offset], {{$src1, $src2}};", []>;\r
- def _v4_avar : NVPTXInst<\r
- (outs),\r
- (ins regclass:$src1, regclass:$src2, regclass:$src3, regclass:$src4,\r
- LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,\r
- i32imm:$fromWidth, imem:$addr),\r
- "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "\r
- "\t[$addr], {{$src1, $src2, $src3, $src4}};", []>;\r
- def _v4_areg : NVPTXInst<\r
- (outs),\r
- (ins regclass:$src1, regclass:$src2, regclass:$src3, regclass:$src4,\r
- LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,\r
- i32imm:$fromWidth, Int32Regs:$addr),\r
- "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "\r
- "\t[$addr], {{$src1, $src2, $src3, $src4}};", []>;\r
- def _v4_areg_64 : NVPTXInst<\r
- (outs),\r
- (ins regclass:$src1, regclass:$src2, regclass:$src3, regclass:$src4,\r
- LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,\r
- i32imm:$fromWidth, Int64Regs:$addr),\r
- "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "\r
- "\t[$addr], {{$src1, $src2, $src3, $src4}};", []>;\r
- def _v4_ari : NVPTXInst<\r
- (outs),\r
- (ins regclass:$src1, regclass:$src2, regclass:$src3, regclass:$src4,\r
- LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,\r
- i32imm:$fromWidth, Int32Regs:$addr, i32imm:$offset),\r
- "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "\r
- "\t[$addr+$offset], {{$src1, $src2, $src3, $src4}};", []>;\r
- def _v4_ari_64 : NVPTXInst<\r
- (outs),\r
- (ins regclass:$src1, regclass:$src2, regclass:$src3, regclass:$src4,\r
- LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,\r
- i32imm:$fromWidth, Int64Regs:$addr, i32imm:$offset),\r
- "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "\r
- "\t[$addr+$offset], {{$src1, $src2, $src3, $src4}};", []>;\r
- def _v4_asi : NVPTXInst<\r
- (outs),\r
- (ins regclass:$src1, regclass:$src2, regclass:$src3, regclass:$src4,\r
- LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,\r
- i32imm:$fromWidth, imem:$addr, i32imm:$offset),\r
- "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}"\r
- "$fromWidth \t[$addr+$offset], {{$src1, $src2, $src3, $src4}};", []>;\r
-}\r
-\r
-let mayStore=1, hasSideEffects=0 in {\r
- defm STV_i8 : ST_VEC<Int16Regs>;\r
- defm STV_i16 : ST_VEC<Int16Regs>;\r
- defm STV_i32 : ST_VEC<Int32Regs>;\r
- defm STV_i64 : ST_VEC<Int64Regs>;\r
- defm STV_f16 : ST_VEC<Float16Regs>;\r
- defm STV_f16x2 : ST_VEC<Float16x2Regs>;\r
- defm STV_f32 : ST_VEC<Float32Regs>;\r
- defm STV_f64 : ST_VEC<Float64Regs>;\r
-}\r
-\r
-//---- Conversion ----\r
-\r
-class F_BITCONVERT<string SzStr, NVPTXRegClass regclassIn,\r
- NVPTXRegClass regclassOut> :\r
- NVPTXInst<(outs regclassOut:$d), (ins regclassIn:$a),\r
- !strconcat("mov.b", !strconcat(SzStr, " \t$d, $a;")),\r
- [(set regclassOut:$d, (bitconvert regclassIn:$a))]>;\r
-\r
-def BITCONVERT_16_I2F : F_BITCONVERT<"16", Int16Regs, Float16Regs>;\r
-def BITCONVERT_16_F2I : F_BITCONVERT<"16", Float16Regs, Int16Regs>;\r
-def BITCONVERT_32_I2F : F_BITCONVERT<"32", Int32Regs, Float32Regs>;\r
-def BITCONVERT_32_F2I : F_BITCONVERT<"32", Float32Regs, Int32Regs>;\r
-def BITCONVERT_64_I2F : F_BITCONVERT<"64", Int64Regs, Float64Regs>;\r
-def BITCONVERT_64_F2I : F_BITCONVERT<"64", Float64Regs, Int64Regs>;\r
-def BITCONVERT_32_I2F16x2 : F_BITCONVERT<"32", Int32Regs, Float16x2Regs>;\r
-def BITCONVERT_32_F16x22I : F_BITCONVERT<"32", Float16x2Regs, Int32Regs>;\r
-\r
-// NOTE: pred->fp are currently sub-optimal due to an issue in TableGen where\r
-// we cannot specify floating-point literals in isel patterns. Therefore, we\r
-// use an integer selp to select either 1 or 0 and then cvt to floating-point.\r
-\r
-// sint -> f16\r
-def : Pat<(f16 (sint_to_fp Int1Regs:$a)),\r
- (CVT_f16_s32 (SELP_u32ii 1, 0, Int1Regs:$a), CvtRN)>;\r
-def : Pat<(f16 (sint_to_fp Int16Regs:$a)),\r
- (CVT_f16_s16 Int16Regs:$a, CvtRN)>;\r
-def : Pat<(f16 (sint_to_fp Int32Regs:$a)),\r
- (CVT_f16_s32 Int32Regs:$a, CvtRN)>;\r
-def : Pat<(f16 (sint_to_fp Int64Regs:$a)),\r
- (CVT_f16_s64 Int64Regs:$a, CvtRN)>;\r
-\r
-// uint -> f16\r
-def : Pat<(f16 (uint_to_fp Int1Regs:$a)),\r
- (CVT_f16_u32 (SELP_u32ii 1, 0, Int1Regs:$a), CvtRN)>;\r
-def : Pat<(f16 (uint_to_fp Int16Regs:$a)),\r
- (CVT_f16_u16 Int16Regs:$a, CvtRN)>;\r
-def : Pat<(f16 (uint_to_fp Int32Regs:$a)),\r
- (CVT_f16_u32 Int32Regs:$a, CvtRN)>;\r
-def : Pat<(f16 (uint_to_fp Int64Regs:$a)),\r
- (CVT_f16_u64 Int64Regs:$a, CvtRN)>;\r
-\r
-// sint -> f32\r
-def : Pat<(f32 (sint_to_fp Int1Regs:$a)),\r
- (CVT_f32_s32 (SELP_u32ii 1, 0, Int1Regs:$a), CvtRN)>;\r
-def : Pat<(f32 (sint_to_fp Int16Regs:$a)),\r
- (CVT_f32_s16 Int16Regs:$a, CvtRN)>;\r
-def : Pat<(f32 (sint_to_fp Int32Regs:$a)),\r
- (CVT_f32_s32 Int32Regs:$a, CvtRN)>;\r
-def : Pat<(f32 (sint_to_fp Int64Regs:$a)),\r
- (CVT_f32_s64 Int64Regs:$a, CvtRN)>;\r
-\r
-// uint -> f32\r
-def : Pat<(f32 (uint_to_fp Int1Regs:$a)),\r
- (CVT_f32_u32 (SELP_u32ii 1, 0, Int1Regs:$a), CvtRN)>;\r
-def : Pat<(f32 (uint_to_fp Int16Regs:$a)),\r
- (CVT_f32_u16 Int16Regs:$a, CvtRN)>;\r
-def : Pat<(f32 (uint_to_fp Int32Regs:$a)),\r
- (CVT_f32_u32 Int32Regs:$a, CvtRN)>;\r
-def : Pat<(f32 (uint_to_fp Int64Regs:$a)),\r
- (CVT_f32_u64 Int64Regs:$a, CvtRN)>;\r
-\r
-// sint -> f64\r
-def : Pat<(f64 (sint_to_fp Int1Regs:$a)),\r
- (CVT_f64_s32 (SELP_u32ii 1, 0, Int1Regs:$a), CvtRN)>;\r
-def : Pat<(f64 (sint_to_fp Int16Regs:$a)),\r
- (CVT_f64_s16 Int16Regs:$a, CvtRN)>;\r
-def : Pat<(f64 (sint_to_fp Int32Regs:$a)),\r
- (CVT_f64_s32 Int32Regs:$a, CvtRN)>;\r
-def : Pat<(f64 (sint_to_fp Int64Regs:$a)),\r
- (CVT_f64_s64 Int64Regs:$a, CvtRN)>;\r
-\r
-// uint -> f64\r
-def : Pat<(f64 (uint_to_fp Int1Regs:$a)),\r
- (CVT_f64_u32 (SELP_u32ii 1, 0, Int1Regs:$a), CvtRN)>;\r
-def : Pat<(f64 (uint_to_fp Int16Regs:$a)),\r
- (CVT_f64_u16 Int16Regs:$a, CvtRN)>;\r
-def : Pat<(f64 (uint_to_fp Int32Regs:$a)),\r
- (CVT_f64_u32 Int32Regs:$a, CvtRN)>;\r
-def : Pat<(f64 (uint_to_fp Int64Regs:$a)),\r
- (CVT_f64_u64 Int64Regs:$a, CvtRN)>;\r
-\r
-\r
-// f16 -> sint\r
-def : Pat<(i1 (fp_to_sint Float16Regs:$a)),\r
- (SETP_b16ri (BITCONVERT_16_F2I Float16Regs:$a), 0, CmpEQ)>;\r
-def : Pat<(i16 (fp_to_sint Float16Regs:$a)),\r
- (CVT_s16_f16 Float16Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>;\r
-def : Pat<(i16 (fp_to_sint Float16Regs:$a)),\r
- (CVT_s16_f16 Float16Regs:$a, CvtRZI)>;\r
-def : Pat<(i32 (fp_to_sint Float16Regs:$a)),\r
- (CVT_s32_f16 Float16Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>;\r
-def : Pat<(i32 (fp_to_sint Float16Regs:$a)),\r
- (CVT_s32_f16 Float16Regs:$a, CvtRZI)>;\r
-def : Pat<(i64 (fp_to_sint Float16Regs:$a)),\r
- (CVT_s64_f16 Float16Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>;\r
-def : Pat<(i64 (fp_to_sint Float16Regs:$a)),\r
- (CVT_s64_f16 Float16Regs:$a, CvtRZI)>;\r
-\r
-// f16 -> uint\r
-def : Pat<(i1 (fp_to_uint Float16Regs:$a)),\r
- (SETP_b16ri (BITCONVERT_16_F2I Float16Regs:$a), 0, CmpEQ)>;\r
-def : Pat<(i16 (fp_to_uint Float16Regs:$a)),\r
- (CVT_u16_f16 Float16Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>;\r
-def : Pat<(i16 (fp_to_uint Float16Regs:$a)),\r
- (CVT_u16_f16 Float16Regs:$a, CvtRZI)>;\r
-def : Pat<(i32 (fp_to_uint Float16Regs:$a)),\r
- (CVT_u32_f16 Float16Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>;\r
-def : Pat<(i32 (fp_to_uint Float16Regs:$a)),\r
- (CVT_u32_f16 Float16Regs:$a, CvtRZI)>;\r
-def : Pat<(i64 (fp_to_uint Float16Regs:$a)),\r
- (CVT_u64_f16 Float16Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>;\r
-def : Pat<(i64 (fp_to_uint Float16Regs:$a)),\r
- (CVT_u64_f16 Float16Regs:$a, CvtRZI)>;\r
-\r
-// f32 -> sint\r
-def : Pat<(i1 (fp_to_sint Float32Regs:$a)),\r
- (SETP_b32ri (BITCONVERT_32_F2I Float32Regs:$a), 0, CmpEQ)>;\r
-def : Pat<(i16 (fp_to_sint Float32Regs:$a)),\r
- (CVT_s16_f32 Float32Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>;\r
-def : Pat<(i16 (fp_to_sint Float32Regs:$a)),\r
- (CVT_s16_f32 Float32Regs:$a, CvtRZI)>;\r
-def : Pat<(i32 (fp_to_sint Float32Regs:$a)),\r
- (CVT_s32_f32 Float32Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>;\r
-def : Pat<(i32 (fp_to_sint Float32Regs:$a)),\r
- (CVT_s32_f32 Float32Regs:$a, CvtRZI)>;\r
-def : Pat<(i64 (fp_to_sint Float32Regs:$a)),\r
- (CVT_s64_f32 Float32Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>;\r
-def : Pat<(i64 (fp_to_sint Float32Regs:$a)),\r
- (CVT_s64_f32 Float32Regs:$a, CvtRZI)>;\r
-\r
-// f32 -> uint\r
-def : Pat<(i1 (fp_to_uint Float32Regs:$a)),\r
- (SETP_b32ri (BITCONVERT_32_F2I Float32Regs:$a), 0, CmpEQ)>;\r
-def : Pat<(i16 (fp_to_uint Float32Regs:$a)),\r
- (CVT_u16_f32 Float32Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>;\r
-def : Pat<(i16 (fp_to_uint Float32Regs:$a)),\r
- (CVT_u16_f32 Float32Regs:$a, CvtRZI)>;\r
-def : Pat<(i32 (fp_to_uint Float32Regs:$a)),\r
- (CVT_u32_f32 Float32Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>;\r
-def : Pat<(i32 (fp_to_uint Float32Regs:$a)),\r
- (CVT_u32_f32 Float32Regs:$a, CvtRZI)>;\r
-def : Pat<(i64 (fp_to_uint Float32Regs:$a)),\r
- (CVT_u64_f32 Float32Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>;\r
-def : Pat<(i64 (fp_to_uint Float32Regs:$a)),\r
- (CVT_u64_f32 Float32Regs:$a, CvtRZI)>;\r
-\r
-// f64 -> sint\r
-def : Pat<(i1 (fp_to_sint Float64Regs:$a)),\r
- (SETP_b64ri (BITCONVERT_64_F2I Float64Regs:$a), 0, CmpEQ)>;\r
-def : Pat<(i16 (fp_to_sint Float64Regs:$a)),\r
- (CVT_s16_f64 Float64Regs:$a, CvtRZI)>;\r
-def : Pat<(i32 (fp_to_sint Float64Regs:$a)),\r
- (CVT_s32_f64 Float64Regs:$a, CvtRZI)>;\r
-def : Pat<(i64 (fp_to_sint Float64Regs:$a)),\r
- (CVT_s64_f64 Float64Regs:$a, CvtRZI)>;\r
-\r
-// f64 -> uint\r
-def : Pat<(i1 (fp_to_uint Float64Regs:$a)),\r
- (SETP_b64ri (BITCONVERT_64_F2I Float64Regs:$a), 0, CmpEQ)>;\r
-def : Pat<(i16 (fp_to_uint Float64Regs:$a)),\r
- (CVT_u16_f64 Float64Regs:$a, CvtRZI)>;\r
-def : Pat<(i32 (fp_to_uint Float64Regs:$a)),\r
- (CVT_u32_f64 Float64Regs:$a, CvtRZI)>;\r
-def : Pat<(i64 (fp_to_uint Float64Regs:$a)),\r
- (CVT_u64_f64 Float64Regs:$a, CvtRZI)>;\r
-\r
-// sext i1\r
-def : Pat<(i16 (sext Int1Regs:$a)),\r
- (SELP_s16ii -1, 0, Int1Regs:$a)>;\r
-def : Pat<(i32 (sext Int1Regs:$a)),\r
- (SELP_s32ii -1, 0, Int1Regs:$a)>;\r
-def : Pat<(i64 (sext Int1Regs:$a)),\r
- (SELP_s64ii -1, 0, Int1Regs:$a)>;\r
-\r
-// zext i1\r
-def : Pat<(i16 (zext Int1Regs:$a)),\r
- (SELP_u16ii 1, 0, Int1Regs:$a)>;\r
-def : Pat<(i32 (zext Int1Regs:$a)),\r
- (SELP_u32ii 1, 0, Int1Regs:$a)>;\r
-def : Pat<(i64 (zext Int1Regs:$a)),\r
- (SELP_u64ii 1, 0, Int1Regs:$a)>;\r
-\r
-// anyext i1\r
-def : Pat<(i16 (anyext Int1Regs:$a)),\r
- (SELP_u16ii -1, 0, Int1Regs:$a)>;\r
-def : Pat<(i32 (anyext Int1Regs:$a)),\r
- (SELP_u32ii -1, 0, Int1Regs:$a)>;\r
-def : Pat<(i64 (anyext Int1Regs:$a)),\r
- (SELP_u64ii -1, 0, Int1Regs:$a)>;\r
-\r
-// sext i16\r
-def : Pat<(i32 (sext Int16Regs:$a)),\r
- (CVT_s32_s16 Int16Regs:$a, CvtNONE)>;\r
-def : Pat<(i64 (sext Int16Regs:$a)),\r
- (CVT_s64_s16 Int16Regs:$a, CvtNONE)>;\r
-\r
-// zext i16\r
-def : Pat<(i32 (zext Int16Regs:$a)),\r
- (CVT_u32_u16 Int16Regs:$a, CvtNONE)>;\r
-def : Pat<(i64 (zext Int16Regs:$a)),\r
- (CVT_u64_u16 Int16Regs:$a, CvtNONE)>;\r
-\r
-// anyext i16\r
-def : Pat<(i32 (anyext Int16Regs:$a)),\r
- (CVT_u32_u16 Int16Regs:$a, CvtNONE)>;\r
-def : Pat<(i64 (anyext Int16Regs:$a)),\r
- (CVT_u64_u16 Int16Regs:$a, CvtNONE)>;\r
-\r
-// sext i32\r
-def : Pat<(i64 (sext Int32Regs:$a)),\r
- (CVT_s64_s32 Int32Regs:$a, CvtNONE)>;\r
-\r
-// zext i32\r
-def : Pat<(i64 (zext Int32Regs:$a)),\r
- (CVT_u64_u32 Int32Regs:$a, CvtNONE)>;\r
-\r
-// anyext i32\r
-def : Pat<(i64 (anyext Int32Regs:$a)),\r
- (CVT_u64_u32 Int32Regs:$a, CvtNONE)>;\r
-\r
-\r
-// truncate i64\r
-def : Pat<(i32 (trunc Int64Regs:$a)),\r
- (CVT_u32_u64 Int64Regs:$a, CvtNONE)>;\r
-def : Pat<(i16 (trunc Int64Regs:$a)),\r
- (CVT_u16_u64 Int64Regs:$a, CvtNONE)>;\r
-def : Pat<(i1 (trunc Int64Regs:$a)),\r
- (SETP_b64ri (ANDb64ri Int64Regs:$a, 1), 1, CmpEQ)>;\r
-\r
-// truncate i32\r
-def : Pat<(i16 (trunc Int32Regs:$a)),\r
- (CVT_u16_u32 Int32Regs:$a, CvtNONE)>;\r
-def : Pat<(i1 (trunc Int32Regs:$a)),\r
- (SETP_b32ri (ANDb32ri Int32Regs:$a, 1), 1, CmpEQ)>;\r
-\r
-// truncate i16\r
-def : Pat<(i1 (trunc Int16Regs:$a)),\r
- (SETP_b16ri (ANDb16ri Int16Regs:$a, 1), 1, CmpEQ)>;\r
-\r
-// sext_inreg\r
-def : Pat<(sext_inreg Int16Regs:$a, i8), (CVT_INREG_s16_s8 Int16Regs:$a)>;\r
-def : Pat<(sext_inreg Int32Regs:$a, i8), (CVT_INREG_s32_s8 Int32Regs:$a)>;\r
-def : Pat<(sext_inreg Int32Regs:$a, i16), (CVT_INREG_s32_s16 Int32Regs:$a)>;\r
-def : Pat<(sext_inreg Int64Regs:$a, i8), (CVT_INREG_s64_s8 Int64Regs:$a)>;\r
-def : Pat<(sext_inreg Int64Regs:$a, i16), (CVT_INREG_s64_s16 Int64Regs:$a)>;\r
-def : Pat<(sext_inreg Int64Regs:$a, i32), (CVT_INREG_s64_s32 Int64Regs:$a)>;\r
-\r
-\r
-// Select instructions with 32-bit predicates\r
-def : Pat<(select Int32Regs:$pred, Int16Regs:$a, Int16Regs:$b),\r
- (SELP_b16rr Int16Regs:$a, Int16Regs:$b,\r
- (SETP_b32ri (ANDb32ri Int32Regs:$pred, 1), 1, CmpEQ))>;\r
-def : Pat<(select Int32Regs:$pred, Int32Regs:$a, Int32Regs:$b),\r
- (SELP_b32rr Int32Regs:$a, Int32Regs:$b,\r
- (SETP_b32ri (ANDb32ri Int32Regs:$pred, 1), 1, CmpEQ))>;\r
-def : Pat<(select Int32Regs:$pred, Int64Regs:$a, Int64Regs:$b),\r
- (SELP_b64rr Int64Regs:$a, Int64Regs:$b,\r
- (SETP_b32ri (ANDb32ri Int32Regs:$pred, 1), 1, CmpEQ))>;\r
-def : Pat<(select Int32Regs:$pred, Float16Regs:$a, Float16Regs:$b),\r
- (SELP_f16rr Float16Regs:$a, Float16Regs:$b,\r
- (SETP_b32ri (ANDb32ri Int32Regs:$pred, 1), 1, CmpEQ))>;\r
-def : Pat<(select Int32Regs:$pred, Float32Regs:$a, Float32Regs:$b),\r
- (SELP_f32rr Float32Regs:$a, Float32Regs:$b,\r
- (SETP_b32ri (ANDb32ri Int32Regs:$pred, 1), 1, CmpEQ))>;\r
-def : Pat<(select Int32Regs:$pred, Float64Regs:$a, Float64Regs:$b),\r
- (SELP_f64rr Float64Regs:$a, Float64Regs:$b,\r
- (SETP_b32ri (ANDb32ri Int32Regs:$pred, 1), 1, CmpEQ))>;\r
-\r
-\r
-let hasSideEffects = 0 in {\r
- // pack a set of smaller int registers to a larger int register\r
- def V4I16toI64 : NVPTXInst<(outs Int64Regs:$d),\r
- (ins Int16Regs:$s1, Int16Regs:$s2,\r
- Int16Regs:$s3, Int16Regs:$s4),\r
- "mov.b64 \t$d, {{$s1, $s2, $s3, $s4}};", []>;\r
- def V2I16toI32 : NVPTXInst<(outs Int32Regs:$d),\r
- (ins Int16Regs:$s1, Int16Regs:$s2),\r
- "mov.b32 \t$d, {{$s1, $s2}};", []>;\r
- def V2I32toI64 : NVPTXInst<(outs Int64Regs:$d),\r
- (ins Int32Regs:$s1, Int32Regs:$s2),\r
- "mov.b64 \t$d, {{$s1, $s2}};", []>;\r
- def V2F32toF64 : NVPTXInst<(outs Float64Regs:$d),\r
- (ins Float32Regs:$s1, Float32Regs:$s2),\r
- "mov.b64 \t$d, {{$s1, $s2}};", []>;\r
-\r
- // unpack a larger int register to a set of smaller int registers\r
- def I64toV4I16 : NVPTXInst<(outs Int16Regs:$d1, Int16Regs:$d2,\r
- Int16Regs:$d3, Int16Regs:$d4),\r
- (ins Int64Regs:$s),\r
- "mov.b64 \t{{$d1, $d2, $d3, $d4}}, $s;", []>;\r
- def I32toV2I16 : NVPTXInst<(outs Int16Regs:$d1, Int16Regs:$d2),\r
- (ins Int32Regs:$s),\r
- "mov.b32 \t{{$d1, $d2}}, $s;", []>;\r
- def I64toV2I32 : NVPTXInst<(outs Int32Regs:$d1, Int32Regs:$d2),\r
- (ins Int64Regs:$s),\r
- "mov.b64 \t{{$d1, $d2}}, $s;", []>;\r
- def F64toV2F32 : NVPTXInst<(outs Float32Regs:$d1, Float32Regs:$d2),\r
- (ins Float64Regs:$s),\r
- "mov.b64 \t{{$d1, $d2}}, $s;", []>;\r
-\r
-}\r
-\r
-let hasSideEffects = 0 in {\r
- // Extract element of f16x2 register. PTX does not provide any way\r
- // to access elements of f16x2 vector directly, so we need to\r
- // extract it using a temporary register.\r
- def F16x2toF16_0 : NVPTXInst<(outs Float16Regs:$dst),\r
- (ins Float16x2Regs:$src),\r
- "{{ .reg .b16 \t%tmp_hi;\n\t"\r
- " mov.b32 \t{$dst, %tmp_hi}, $src; }}",\r
- [(set Float16Regs:$dst,\r
- (extractelt (v2f16 Float16x2Regs:$src), 0))]>;\r
- def F16x2toF16_1 : NVPTXInst<(outs Float16Regs:$dst),\r
- (ins Float16x2Regs:$src),\r
- "{{ .reg .b16 \t%tmp_lo;\n\t"\r
- " mov.b32 \t{%tmp_lo, $dst}, $src; }}",\r
- [(set Float16Regs:$dst,\r
- (extractelt (v2f16 Float16x2Regs:$src), 1))]>;\r
-\r
- // Coalesce two f16 registers into f16x2\r
- def BuildF16x2 : NVPTXInst<(outs Float16x2Regs:$dst),\r
- (ins Float16Regs:$a, Float16Regs:$b),\r
- "mov.b32 \t$dst, {{$a, $b}};",\r
- [(set Float16x2Regs:$dst,\r
- (build_vector (f16 Float16Regs:$a), (f16 Float16Regs:$b)))]>;\r
-\r
- // Directly initializing underlying the b32 register is one less SASS\r
- // instruction than than vector-packing move.\r
- def BuildF16x2i : NVPTXInst<(outs Float16x2Regs:$dst), (ins i32imm:$src),\r
- "mov.b32 \t$dst, $src;",\r
- []>;\r
-\r
- // Split f16x2 into two f16 registers.\r
- def SplitF16x2 : NVPTXInst<(outs Float16Regs:$lo, Float16Regs:$hi),\r
- (ins Float16x2Regs:$src),\r
- "mov.b32 \t{{$lo, $hi}}, $src;",\r
- []>;\r
- // Split an i32 into two f16\r
- def SplitI32toF16x2 : NVPTXInst<(outs Float16Regs:$lo, Float16Regs:$hi),\r
- (ins Int32Regs:$src),\r
- "mov.b32 \t{{$lo, $hi}}, $src;",\r
- []>;\r
-}\r
-\r
-// Count leading zeros\r
-let hasSideEffects = 0 in {\r
- def CLZr32 : NVPTXInst<(outs Int32Regs:$d), (ins Int32Regs:$a),\r
- "clz.b32 \t$d, $a;", []>;\r
- def CLZr64 : NVPTXInst<(outs Int32Regs:$d), (ins Int64Regs:$a),\r
- "clz.b64 \t$d, $a;", []>;\r
-}\r
-\r
-// 32-bit has a direct PTX instruction\r
-def : Pat<(ctlz Int32Regs:$a), (CLZr32 Int32Regs:$a)>;\r
-\r
-// The return type of the ctlz ISD node is the same as its input, but the PTX\r
-// ctz instruction always returns a 32-bit value. For ctlz.i64, convert the\r
-// ptx value to 64 bits to match the ISD node's semantics, unless we know we're\r
-// truncating back down to 32 bits.\r
-def : Pat<(ctlz Int64Regs:$a), (CVT_u64_u32 (CLZr64 Int64Regs:$a), CvtNONE)>;\r
-def : Pat<(i32 (trunc (ctlz Int64Regs:$a))), (CLZr64 Int64Regs:$a)>;\r
-\r
-// For 16-bit ctlz, we zero-extend to 32-bit, perform the count, then trunc the\r
-// result back to 16-bits if necessary. We also need to subtract 16 because\r
-// the high-order 16 zeros were counted.\r
-//\r
-// TODO: NVPTX has a mov.b32 b32reg, {imm, b16reg} instruction, which we could\r
-// use to save one SASS instruction (on sm_35 anyway):\r
-//\r
-// mov.b32 $tmp, {0xffff, $a}\r
-// ctlz.b32 $result, $tmp\r
-//\r
-// That is, instead of zero-extending the input to 32 bits, we'd "one-extend"\r
-// and then ctlz that value. This way we don't have to subtract 16 from the\r
-// result. Unfortunately today we don't have a way to generate\r
-// "mov b32reg, {b16imm, b16reg}", so we don't do this optimization.\r
-def : Pat<(ctlz Int16Regs:$a),\r
- (SUBi16ri (CVT_u16_u32\r
- (CLZr32 (CVT_u32_u16 Int16Regs:$a, CvtNONE)), CvtNONE), 16)>;\r
-def : Pat<(i32 (zext (ctlz Int16Regs:$a))),\r
- (SUBi32ri (CLZr32 (CVT_u32_u16 Int16Regs:$a, CvtNONE)), 16)>;\r
-\r
-// Population count\r
-let hasSideEffects = 0 in {\r
- def POPCr32 : NVPTXInst<(outs Int32Regs:$d), (ins Int32Regs:$a),\r
- "popc.b32 \t$d, $a;", []>;\r
- def POPCr64 : NVPTXInst<(outs Int32Regs:$d), (ins Int64Regs:$a),\r
- "popc.b64 \t$d, $a;", []>;\r
-}\r
-\r
-// 32-bit has a direct PTX instruction\r
-def : Pat<(ctpop Int32Regs:$a), (POPCr32 Int32Regs:$a)>;\r
-\r
-// For 64-bit, the result in PTX is actually 32-bit so we zero-extend to 64-bit\r
-// to match the LLVM semantics. Just as with ctlz.i64, we provide a second\r
-// pattern that avoids the type conversion if we're truncating the result to\r
-// i32 anyway.\r
-def : Pat<(ctpop Int64Regs:$a), (CVT_u64_u32 (POPCr64 Int64Regs:$a), CvtNONE)>;\r
-def : Pat<(i32 (trunc (ctpop Int64Regs:$a))), (POPCr64 Int64Regs:$a)>;\r
-\r
-// For 16-bit, we zero-extend to 32-bit, then trunc the result back to 16-bits.\r
-// If we know that we're storing into an i32, we can avoid the final trunc.\r
-def : Pat<(ctpop Int16Regs:$a),\r
- (CVT_u16_u32 (POPCr32 (CVT_u32_u16 Int16Regs:$a, CvtNONE)), CvtNONE)>;\r
-def : Pat<(i32 (zext (ctpop Int16Regs:$a))),\r
- (POPCr32 (CVT_u32_u16 Int16Regs:$a, CvtNONE))>;\r
-\r
-// fpround f32 -> f16\r
-def : Pat<(f16 (fpround Float32Regs:$a)),\r
- (CVT_f16_f32 Float32Regs:$a, CvtRN_FTZ)>, Requires<[doF32FTZ]>;\r
-def : Pat<(f16 (fpround Float32Regs:$a)),\r
- (CVT_f16_f32 Float32Regs:$a, CvtRN)>;\r
-\r
-// fpround f64 -> f16\r
-def : Pat<(f16 (fpround Float64Regs:$a)),\r
- (CVT_f16_f64 Float64Regs:$a, CvtRN_FTZ)>, Requires<[doF32FTZ]>;\r
-def : Pat<(f16 (fpround Float64Regs:$a)),\r
- (CVT_f16_f64 Float64Regs:$a, CvtRN)>;\r
-\r
-// fpround f64 -> f32\r
-def : Pat<(f32 (fpround Float64Regs:$a)),\r
- (CVT_f32_f64 Float64Regs:$a, CvtRN_FTZ)>, Requires<[doF32FTZ]>;\r
-def : Pat<(f32 (fpround Float64Regs:$a)),\r
- (CVT_f32_f64 Float64Regs:$a, CvtRN)>;\r
-\r
-// fpextend f16 -> f32\r
-def : Pat<(f32 (fpextend Float16Regs:$a)),\r
- (CVT_f32_f16 Float16Regs:$a, CvtNONE_FTZ)>, Requires<[doF32FTZ]>;\r
-def : Pat<(f32 (fpextend Float16Regs:$a)),\r
- (CVT_f32_f16 Float16Regs:$a, CvtNONE)>;\r
-\r
-// fpextend f16 -> f64\r
-def : Pat<(f64 (fpextend Float16Regs:$a)),\r
- (CVT_f64_f16 Float16Regs:$a, CvtNONE_FTZ)>, Requires<[doF32FTZ]>;\r
-def : Pat<(f64 (fpextend Float16Regs:$a)),\r
- (CVT_f64_f16 Float16Regs:$a, CvtNONE)>;\r
-\r
-// fpextend f32 -> f64\r
-def : Pat<(f64 (fpextend Float32Regs:$a)),\r
- (CVT_f64_f32 Float32Regs:$a, CvtNONE_FTZ)>, Requires<[doF32FTZ]>;\r
-def : Pat<(f64 (fpextend Float32Regs:$a)),\r
- (CVT_f64_f32 Float32Regs:$a, CvtNONE)>;\r
-\r
-def retflag : SDNode<"NVPTXISD::RET_FLAG", SDTNone,\r
- [SDNPHasChain, SDNPOptInGlue]>;\r
-\r
-// fceil, ffloor, fround, ftrunc.\r
-\r
-def : Pat<(fceil Float16Regs:$a),\r
- (CVT_f16_f16 Float16Regs:$a, CvtRPI_FTZ)>, Requires<[doF32FTZ]>;\r
-def : Pat<(fceil Float16Regs:$a),\r
- (CVT_f16_f16 Float16Regs:$a, CvtRPI)>, Requires<[doNoF32FTZ]>;\r
-def : Pat<(fceil Float32Regs:$a),\r
- (CVT_f32_f32 Float32Regs:$a, CvtRPI_FTZ)>, Requires<[doF32FTZ]>;\r
-def : Pat<(fceil Float32Regs:$a),\r
- (CVT_f32_f32 Float32Regs:$a, CvtRPI)>, Requires<[doNoF32FTZ]>;\r
-def : Pat<(fceil Float64Regs:$a),\r
- (CVT_f64_f64 Float64Regs:$a, CvtRPI)>;\r
-\r
-def : Pat<(ffloor Float16Regs:$a),\r
- (CVT_f16_f16 Float16Regs:$a, CvtRMI_FTZ)>, Requires<[doF32FTZ]>;\r
-def : Pat<(ffloor Float16Regs:$a),\r
- (CVT_f16_f16 Float16Regs:$a, CvtRMI)>, Requires<[doNoF32FTZ]>;\r
-def : Pat<(ffloor Float32Regs:$a),\r
- (CVT_f32_f32 Float32Regs:$a, CvtRMI_FTZ)>, Requires<[doF32FTZ]>;\r
-def : Pat<(ffloor Float32Regs:$a),\r
- (CVT_f32_f32 Float32Regs:$a, CvtRMI)>, Requires<[doNoF32FTZ]>;\r
-def : Pat<(ffloor Float64Regs:$a),\r
- (CVT_f64_f64 Float64Regs:$a, CvtRMI)>;\r
-\r
-def : Pat<(fround Float16Regs:$a),\r
- (CVT_f16_f16 Float16Regs:$a, CvtRNI_FTZ)>, Requires<[doF32FTZ]>;\r
-def : Pat<(f16 (fround Float16Regs:$a)),\r
- (CVT_f16_f16 Float16Regs:$a, CvtRNI)>, Requires<[doNoF32FTZ]>;\r
-def : Pat<(fround Float32Regs:$a),\r
- (CVT_f32_f32 Float32Regs:$a, CvtRNI_FTZ)>, Requires<[doF32FTZ]>;\r
-def : Pat<(f32 (fround Float32Regs:$a)),\r
- (CVT_f32_f32 Float32Regs:$a, CvtRNI)>, Requires<[doNoF32FTZ]>;\r
-def : Pat<(f64 (fround Float64Regs:$a)),\r
- (CVT_f64_f64 Float64Regs:$a, CvtRNI)>;\r
-\r
-def : Pat<(ftrunc Float16Regs:$a),\r
- (CVT_f16_f16 Float16Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>;\r
-def : Pat<(ftrunc Float16Regs:$a),\r
- (CVT_f16_f16 Float16Regs:$a, CvtRZI)>, Requires<[doNoF32FTZ]>;\r
-def : Pat<(ftrunc Float32Regs:$a),\r
- (CVT_f32_f32 Float32Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>;\r
-def : Pat<(ftrunc Float32Regs:$a),\r
- (CVT_f32_f32 Float32Regs:$a, CvtRZI)>, Requires<[doNoF32FTZ]>;\r
-def : Pat<(ftrunc Float64Regs:$a),\r
- (CVT_f64_f64 Float64Regs:$a, CvtRZI)>;\r
-\r
-// nearbyint and rint are implemented as rounding to nearest even. This isn't\r
-// strictly correct, because it causes us to ignore the rounding mode. But it\r
-// matches what CUDA's "libm" does.\r
-\r
-def : Pat<(fnearbyint Float16Regs:$a),\r
- (CVT_f16_f16 Float16Regs:$a, CvtRNI_FTZ)>, Requires<[doF32FTZ]>;\r
-def : Pat<(fnearbyint Float16Regs:$a),\r
- (CVT_f16_f16 Float16Regs:$a, CvtRNI)>, Requires<[doNoF32FTZ]>;\r
-def : Pat<(fnearbyint Float32Regs:$a),\r
- (CVT_f32_f32 Float32Regs:$a, CvtRNI_FTZ)>, Requires<[doF32FTZ]>;\r
-def : Pat<(fnearbyint Float32Regs:$a),\r
- (CVT_f32_f32 Float32Regs:$a, CvtRNI)>, Requires<[doNoF32FTZ]>;\r
-def : Pat<(fnearbyint Float64Regs:$a),\r
- (CVT_f64_f64 Float64Regs:$a, CvtRNI)>;\r
-\r
-def : Pat<(frint Float16Regs:$a),\r
- (CVT_f16_f16 Float16Regs:$a, CvtRNI_FTZ)>, Requires<[doF32FTZ]>;\r
-def : Pat<(frint Float16Regs:$a),\r
- (CVT_f16_f16 Float16Regs:$a, CvtRNI)>, Requires<[doNoF32FTZ]>;\r
-def : Pat<(frint Float32Regs:$a),\r
- (CVT_f32_f32 Float32Regs:$a, CvtRNI_FTZ)>, Requires<[doF32FTZ]>;\r
-def : Pat<(frint Float32Regs:$a),\r
- (CVT_f32_f32 Float32Regs:$a, CvtRNI)>, Requires<[doNoF32FTZ]>;\r
-def : Pat<(frint Float64Regs:$a),\r
- (CVT_f64_f64 Float64Regs:$a, CvtRNI)>;\r
-\r
-\r
-//-----------------------------------\r
-// Control-flow\r
-//-----------------------------------\r
-\r
-let isTerminator=1 in {\r
- let isReturn=1, isBarrier=1 in\r
- def Return : NVPTXInst<(outs), (ins), "ret;", [(retflag)]>;\r
-\r
- let isBranch=1 in\r
- def CBranch : NVPTXInst<(outs), (ins Int1Regs:$a, brtarget:$target),\r
- "@$a bra \t$target;",\r
- [(brcond Int1Regs:$a, bb:$target)]>;\r
- let isBranch=1 in\r
- def CBranchOther : NVPTXInst<(outs), (ins Int1Regs:$a, brtarget:$target),\r
- "@!$a bra \t$target;", []>;\r
-\r
- let isBranch=1, isBarrier=1 in\r
- def GOTO : NVPTXInst<(outs), (ins brtarget:$target),\r
- "bra.uni \t$target;", [(br bb:$target)]>;\r
-}\r
-\r
-def : Pat<(brcond Int32Regs:$a, bb:$target),\r
- (CBranch (SETP_u32ri Int32Regs:$a, 0, CmpNE), bb:$target)>;\r
-\r
-// SelectionDAGBuilder::visitSWitchCase() will invert the condition of a\r
-// conditional branch if the target block is the next block so that the code\r
-// can fall through to the target block. The invertion is done by 'xor\r
-// condition, 1', which will be translated to (setne condition, -1). Since ptx\r
-// supports '@!pred bra target', we should use it.\r
-def : Pat<(brcond (i1 (setne Int1Regs:$a, -1)), bb:$target),\r
- (CBranchOther Int1Regs:$a, bb:$target)>;\r
-\r
-// Call\r
-def SDT_NVPTXCallSeqStart : SDCallSeqStart<[SDTCisVT<0, i32>,\r
- SDTCisVT<1, i32>]>;\r
-def SDT_NVPTXCallSeqEnd : SDCallSeqEnd<[SDTCisVT<0, i32>, SDTCisVT<1, i32>]>;\r
-\r
-def callseq_start : SDNode<"ISD::CALLSEQ_START", SDT_NVPTXCallSeqStart,\r
- [SDNPHasChain, SDNPOutGlue, SDNPSideEffect]>;\r
-def callseq_end : SDNode<"ISD::CALLSEQ_END", SDT_NVPTXCallSeqEnd,\r
- [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue,\r
- SDNPSideEffect]>;\r
-\r
-def SDT_NVPTXCall : SDTypeProfile<0, 1, [SDTCisVT<0, i32>]>;\r
-def call : SDNode<"NVPTXISD::CALL", SDT_NVPTXCall,\r
- [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>;\r
-def calltarget : Operand<i32>;\r
-let isCall=1 in {\r
- def CALL : NVPTXInst<(outs), (ins calltarget:$dst), "call \t$dst, (1);", []>;\r
-}\r
-\r
-def : Pat<(call tglobaladdr:$dst), (CALL tglobaladdr:$dst)>;\r
-def : Pat<(call texternalsym:$dst), (CALL texternalsym:$dst)>;\r
-\r
-// Pseudo instructions.\r
-class Pseudo<dag outs, dag ins, string asmstr, list<dag> pattern>\r
- : NVPTXInst<outs, ins, asmstr, pattern>;\r
-\r
-def Callseq_Start :\r
- NVPTXInst<(outs), (ins i32imm:$amt1, i32imm:$amt2),\r
- "\\{ // callseq $amt1, $amt2\n"\r
- "\t.reg .b32 temp_param_reg;",\r
- [(callseq_start timm:$amt1, timm:$amt2)]>;\r
-def Callseq_End :\r
- NVPTXInst<(outs), (ins i32imm:$amt1, i32imm:$amt2),\r
- "\\} // callseq $amt1",\r
- [(callseq_end timm:$amt1, timm:$amt2)]>;\r
-\r
-// trap instruction\r
-def trapinst : NVPTXInst<(outs), (ins), "trap;", [(trap)]>;\r
-\r
-// Call prototype wrapper\r
-def SDTCallPrototype : SDTypeProfile<0, 1, [SDTCisInt<0>]>;\r
-def CallPrototype :\r
- SDNode<"NVPTXISD::CallPrototype", SDTCallPrototype,\r
- [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;\r
-def ProtoIdent : Operand<i32> {\r
- let PrintMethod = "printProtoIdent";\r
-}\r
-def CALL_PROTOTYPE :\r
- NVPTXInst<(outs), (ins ProtoIdent:$ident),\r
- "$ident", [(CallPrototype (i32 texternalsym:$ident))]>;\r
-\r
-\r
-include "NVPTXIntrinsics.td"\r
-\r
-\r
-//-----------------------------------\r
-// Notes\r
-//-----------------------------------\r
-// BSWAP is currently expanded. The following is a more efficient\r
-// - for < sm_20, use vector scalar mov, as tesla support native 16-bit register\r
-// - for sm_20, use pmpt (use vector scalar mov to get the pack and\r
-// unpack). sm_20 supports native 32-bit register, but not native 16-bit\r
-// register.\r
+//===- NVPTXInstrInfo.td - NVPTX Instruction defs -------------*- tblgen-*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file describes the PTX instructions in TableGen format.
+//
+//===----------------------------------------------------------------------===//
+
+include "NVPTXInstrFormats.td"
+
+// A NOP instruction
+let hasSideEffects = 0 in {
+ def NOP : NVPTXInst<(outs), (ins), "", []>;
+}
+
+let OperandType = "OPERAND_IMMEDIATE" in {
+ def f16imm : Operand<f16>;
+}
+
+// List of vector specific properties
+def isVecLD : VecInstTypeEnum<1>;
+def isVecST : VecInstTypeEnum<2>;
+def isVecBuild : VecInstTypeEnum<3>;
+def isVecShuffle : VecInstTypeEnum<4>;
+def isVecExtract : VecInstTypeEnum<5>;
+def isVecInsert : VecInstTypeEnum<6>;
+def isVecDest : VecInstTypeEnum<7>;
+def isVecOther : VecInstTypeEnum<15>;
+
+//===----------------------------------------------------------------------===//
+// NVPTX Operand Definitions.
+//===----------------------------------------------------------------------===//
+
+def brtarget : Operand<OtherVT>;
+
+// CVT conversion modes
+// These must match the enum in NVPTX.h
+def CvtNONE : PatLeaf<(i32 0x0)>;
+def CvtRNI : PatLeaf<(i32 0x1)>;
+def CvtRZI : PatLeaf<(i32 0x2)>;
+def CvtRMI : PatLeaf<(i32 0x3)>;
+def CvtRPI : PatLeaf<(i32 0x4)>;
+def CvtRN : PatLeaf<(i32 0x5)>;
+def CvtRZ : PatLeaf<(i32 0x6)>;
+def CvtRM : PatLeaf<(i32 0x7)>;
+def CvtRP : PatLeaf<(i32 0x8)>;
+
+def CvtNONE_FTZ : PatLeaf<(i32 0x10)>;
+def CvtRNI_FTZ : PatLeaf<(i32 0x11)>;
+def CvtRZI_FTZ : PatLeaf<(i32 0x12)>;
+def CvtRMI_FTZ : PatLeaf<(i32 0x13)>;
+def CvtRPI_FTZ : PatLeaf<(i32 0x14)>;
+def CvtRN_FTZ : PatLeaf<(i32 0x15)>;
+def CvtRZ_FTZ : PatLeaf<(i32 0x16)>;
+def CvtRM_FTZ : PatLeaf<(i32 0x17)>;
+def CvtRP_FTZ : PatLeaf<(i32 0x18)>;
+
+def CvtSAT : PatLeaf<(i32 0x20)>;
+def CvtSAT_FTZ : PatLeaf<(i32 0x30)>;
+
+def CvtMode : Operand<i32> {
+ let PrintMethod = "printCvtMode";
+}
+
+// Compare modes
+// These must match the enum in NVPTX.h
+def CmpEQ : PatLeaf<(i32 0)>;
+def CmpNE : PatLeaf<(i32 1)>;
+def CmpLT : PatLeaf<(i32 2)>;
+def CmpLE : PatLeaf<(i32 3)>;
+def CmpGT : PatLeaf<(i32 4)>;
+def CmpGE : PatLeaf<(i32 5)>;
+def CmpEQU : PatLeaf<(i32 10)>;
+def CmpNEU : PatLeaf<(i32 11)>;
+def CmpLTU : PatLeaf<(i32 12)>;
+def CmpLEU : PatLeaf<(i32 13)>;
+def CmpGTU : PatLeaf<(i32 14)>;
+def CmpGEU : PatLeaf<(i32 15)>;
+def CmpNUM : PatLeaf<(i32 16)>;
+def CmpNAN : PatLeaf<(i32 17)>;
+
+def CmpEQ_FTZ : PatLeaf<(i32 0x100)>;
+def CmpNE_FTZ : PatLeaf<(i32 0x101)>;
+def CmpLT_FTZ : PatLeaf<(i32 0x102)>;
+def CmpLE_FTZ : PatLeaf<(i32 0x103)>;
+def CmpGT_FTZ : PatLeaf<(i32 0x104)>;
+def CmpGE_FTZ : PatLeaf<(i32 0x105)>;
+def CmpEQU_FTZ : PatLeaf<(i32 0x10A)>;
+def CmpNEU_FTZ : PatLeaf<(i32 0x10B)>;
+def CmpLTU_FTZ : PatLeaf<(i32 0x10C)>;
+def CmpLEU_FTZ : PatLeaf<(i32 0x10D)>;
+def CmpGTU_FTZ : PatLeaf<(i32 0x10E)>;
+def CmpGEU_FTZ : PatLeaf<(i32 0x10F)>;
+def CmpNUM_FTZ : PatLeaf<(i32 0x110)>;
+def CmpNAN_FTZ : PatLeaf<(i32 0x111)>;
+
+def CmpMode : Operand<i32> {
+ let PrintMethod = "printCmpMode";
+}
+def VecElement : Operand<i32> {
+ let PrintMethod = "printVecElement";
+}
+
+//===----------------------------------------------------------------------===//
+// NVPTX Instruction Predicate Definitions
+//===----------------------------------------------------------------------===//
+
+
+def hasAtomRedG32 : Predicate<"Subtarget->hasAtomRedG32()">;
+def hasAtomRedS32 : Predicate<"Subtarget->hasAtomRedS32()">;
+def hasAtomRedGen32 : Predicate<"Subtarget->hasAtomRedGen32()">;
+def useAtomRedG32forGen32 :
+ Predicate<"!Subtarget->hasAtomRedGen32() && Subtarget->hasAtomRedG32()">;
+def hasBrkPt : Predicate<"Subtarget->hasBrkPt()">;
+def hasAtomRedG64 : Predicate<"Subtarget->hasAtomRedG64()">;
+def hasAtomRedS64 : Predicate<"Subtarget->hasAtomRedS64()">;
+def hasAtomRedGen64 : Predicate<"Subtarget->hasAtomRedGen64()">;
+def useAtomRedG64forGen64 :
+ Predicate<"!Subtarget->hasAtomRedGen64() && Subtarget->hasAtomRedG64()">;
+def hasAtomAddF32 : Predicate<"Subtarget->hasAtomAddF32()">;
+def hasAtomAddF64 : Predicate<"Subtarget->hasAtomAddF64()">;
+def hasAtomScope : Predicate<"Subtarget->hasAtomScope()">;
+def hasAtomBitwise64 : Predicate<"Subtarget->hasAtomBitwise64()">;
+def hasAtomMinMax64 : Predicate<"Subtarget->hasAtomMinMax64()">;
+def hasVote : Predicate<"Subtarget->hasVote()">;
+def hasDouble : Predicate<"Subtarget->hasDouble()">;
+def reqPTX20 : Predicate<"Subtarget->reqPTX20()">;
+def hasLDG : Predicate<"Subtarget->hasLDG()">;
+def hasLDU : Predicate<"Subtarget->hasLDU()">;
+def hasGenericLdSt : Predicate<"Subtarget->hasGenericLdSt()">;
+
+def doF32FTZ : Predicate<"useF32FTZ()">;
+def doNoF32FTZ : Predicate<"!useF32FTZ()">;
+
+def doMulWide : Predicate<"doMulWide">;
+
+def allowFMA : Predicate<"allowFMA()">;
+def noFMA : Predicate<"!allowFMA()">;
+def allowUnsafeFPMath : Predicate<"allowUnsafeFPMath()">;
+
+def do_DIVF32_APPROX : Predicate<"getDivF32Level()==0">;
+def do_DIVF32_FULL : Predicate<"getDivF32Level()==1">;
+
+def do_SQRTF32_APPROX : Predicate<"!usePrecSqrtF32()">;
+def do_SQRTF32_RN : Predicate<"usePrecSqrtF32()">;
+
+def hasHWROT32 : Predicate<"Subtarget->hasHWROT32()">;
+def noHWROT32 : Predicate<"!Subtarget->hasHWROT32()">;
+
+def true : Predicate<"true">;
+
+def hasPTX31 : Predicate<"Subtarget->getPTXVersion() >= 31">;
+
+def useFP16Math: Predicate<"Subtarget->allowFP16Math()">;
+
+//===----------------------------------------------------------------------===//
+// Some Common Instruction Class Templates
+//===----------------------------------------------------------------------===//
+
+// Template for instructions which take three int64, int32, or int16 args.
+// The instructions are named "<OpcStr><Width>" (e.g. "add.s64").
+multiclass I3<string OpcStr, SDNode OpNode> {
+ def i64rr :
+ NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$a, Int64Regs:$b),
+ !strconcat(OpcStr, "64 \t$dst, $a, $b;"),
+ [(set Int64Regs:$dst, (OpNode Int64Regs:$a, Int64Regs:$b))]>;
+ def i64ri :
+ NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$a, i64imm:$b),
+ !strconcat(OpcStr, "64 \t$dst, $a, $b;"),
+ [(set Int64Regs:$dst, (OpNode Int64Regs:$a, imm:$b))]>;
+ def i32rr :
+ NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a, Int32Regs:$b),
+ !strconcat(OpcStr, "32 \t$dst, $a, $b;"),
+ [(set Int32Regs:$dst, (OpNode Int32Regs:$a, Int32Regs:$b))]>;
+ def i32ri :
+ NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a, i32imm:$b),
+ !strconcat(OpcStr, "32 \t$dst, $a, $b;"),
+ [(set Int32Regs:$dst, (OpNode Int32Regs:$a, imm:$b))]>;
+ def i16rr :
+ NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$a, Int16Regs:$b),
+ !strconcat(OpcStr, "16 \t$dst, $a, $b;"),
+ [(set Int16Regs:$dst, (OpNode Int16Regs:$a, Int16Regs:$b))]>;
+ def i16ri :
+ NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$a, i16imm:$b),
+ !strconcat(OpcStr, "16 \t$dst, $a, $b;"),
+ [(set Int16Regs:$dst, (OpNode Int16Regs:$a, (imm):$b))]>;
+}
+
+// Template for instructions which take 3 int32 args. The instructions are
+// named "<OpcStr>.s32" (e.g. "addc.cc.s32").
+multiclass ADD_SUB_INT_32<string OpcStr, SDNode OpNode> {
+ def i32rr :
+ NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a, Int32Regs:$b),
+ !strconcat(OpcStr, ".s32 \t$dst, $a, $b;"),
+ [(set Int32Regs:$dst, (OpNode Int32Regs:$a, Int32Regs:$b))]>;
+ def i32ri :
+ NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a, i32imm:$b),
+ !strconcat(OpcStr, ".s32 \t$dst, $a, $b;"),
+ [(set Int32Regs:$dst, (OpNode Int32Regs:$a, imm:$b))]>;
+}
+
+// Template for instructions which take three fp64 or fp32 args. The
+// instructions are named "<OpcStr>.f<Width>" (e.g. "min.f64").
+//
+// Also defines ftz (flush subnormal inputs and results to sign-preserving
+// zero) variants for fp32 functions.
+//
+// This multiclass should be used for nodes that cannot be folded into FMAs.
+// For nodes that can be folded into FMAs (i.e. adds and muls), use
+// F3_fma_component.
+multiclass F3<string OpcStr, SDNode OpNode> {
+ def f64rr :
+ NVPTXInst<(outs Float64Regs:$dst),
+ (ins Float64Regs:$a, Float64Regs:$b),
+ !strconcat(OpcStr, ".f64 \t$dst, $a, $b;"),
+ [(set Float64Regs:$dst, (OpNode Float64Regs:$a, Float64Regs:$b))]>;
+ def f64ri :
+ NVPTXInst<(outs Float64Regs:$dst),
+ (ins Float64Regs:$a, f64imm:$b),
+ !strconcat(OpcStr, ".f64 \t$dst, $a, $b;"),
+ [(set Float64Regs:$dst, (OpNode Float64Regs:$a, fpimm:$b))]>;
+ def f32rr_ftz :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, Float32Regs:$b),
+ !strconcat(OpcStr, ".ftz.f32 \t$dst, $a, $b;"),
+ [(set Float32Regs:$dst, (OpNode Float32Regs:$a, Float32Regs:$b))]>,
+ Requires<[doF32FTZ]>;
+ def f32ri_ftz :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, f32imm:$b),
+ !strconcat(OpcStr, ".ftz.f32 \t$dst, $a, $b;"),
+ [(set Float32Regs:$dst, (OpNode Float32Regs:$a, fpimm:$b))]>,
+ Requires<[doF32FTZ]>;
+ def f32rr :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, Float32Regs:$b),
+ !strconcat(OpcStr, ".f32 \t$dst, $a, $b;"),
+ [(set Float32Regs:$dst, (OpNode Float32Regs:$a, Float32Regs:$b))]>;
+ def f32ri :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, f32imm:$b),
+ !strconcat(OpcStr, ".f32 \t$dst, $a, $b;"),
+ [(set Float32Regs:$dst, (OpNode Float32Regs:$a, fpimm:$b))]>;
+}
+
+// Template for instructions which take three FP args. The
+// instructions are named "<OpcStr>.f<Width>" (e.g. "add.f64").
+//
+// Also defines ftz (flush subnormal inputs and results to sign-preserving
+// zero) variants for fp32/fp16 functions.
+//
+// This multiclass should be used for nodes that can be folded to make fma ops.
+// In this case, we use the ".rn" variant when FMA is disabled, as this behaves
+// just like the non ".rn" op, but prevents ptxas from creating FMAs.
+multiclass F3_fma_component<string OpcStr, SDNode OpNode> {
+ def f64rr :
+ NVPTXInst<(outs Float64Regs:$dst),
+ (ins Float64Regs:$a, Float64Regs:$b),
+ !strconcat(OpcStr, ".f64 \t$dst, $a, $b;"),
+ [(set Float64Regs:$dst, (OpNode Float64Regs:$a, Float64Regs:$b))]>,
+ Requires<[allowFMA]>;
+ def f64ri :
+ NVPTXInst<(outs Float64Regs:$dst),
+ (ins Float64Regs:$a, f64imm:$b),
+ !strconcat(OpcStr, ".f64 \t$dst, $a, $b;"),
+ [(set Float64Regs:$dst, (OpNode Float64Regs:$a, fpimm:$b))]>,
+ Requires<[allowFMA]>;
+ def f32rr_ftz :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, Float32Regs:$b),
+ !strconcat(OpcStr, ".ftz.f32 \t$dst, $a, $b;"),
+ [(set Float32Regs:$dst, (OpNode Float32Regs:$a, Float32Regs:$b))]>,
+ Requires<[allowFMA, doF32FTZ]>;
+ def f32ri_ftz :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, f32imm:$b),
+ !strconcat(OpcStr, ".ftz.f32 \t$dst, $a, $b;"),
+ [(set Float32Regs:$dst, (OpNode Float32Regs:$a, fpimm:$b))]>,
+ Requires<[allowFMA, doF32FTZ]>;
+ def f32rr :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, Float32Regs:$b),
+ !strconcat(OpcStr, ".f32 \t$dst, $a, $b;"),
+ [(set Float32Regs:$dst, (OpNode Float32Regs:$a, Float32Regs:$b))]>,
+ Requires<[allowFMA]>;
+ def f32ri :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, f32imm:$b),
+ !strconcat(OpcStr, ".f32 \t$dst, $a, $b;"),
+ [(set Float32Regs:$dst, (OpNode Float32Regs:$a, fpimm:$b))]>,
+ Requires<[allowFMA]>;
+
+ def f16rr_ftz :
+ NVPTXInst<(outs Float16Regs:$dst),
+ (ins Float16Regs:$a, Float16Regs:$b),
+ !strconcat(OpcStr, ".ftz.f16 \t$dst, $a, $b;"),
+ [(set Float16Regs:$dst, (OpNode Float16Regs:$a, Float16Regs:$b))]>,
+ Requires<[useFP16Math, allowFMA, doF32FTZ]>;
+ def f16rr :
+ NVPTXInst<(outs Float16Regs:$dst),
+ (ins Float16Regs:$a, Float16Regs:$b),
+ !strconcat(OpcStr, ".f16 \t$dst, $a, $b;"),
+ [(set Float16Regs:$dst, (OpNode Float16Regs:$a, Float16Regs:$b))]>,
+ Requires<[useFP16Math, allowFMA]>;
+
+ def f16x2rr_ftz :
+ NVPTXInst<(outs Float16x2Regs:$dst),
+ (ins Float16x2Regs:$a, Float16x2Regs:$b),
+ !strconcat(OpcStr, ".ftz.f16x2 \t$dst, $a, $b;"),
+ [(set Float16x2Regs:$dst, (OpNode Float16x2Regs:$a, Float16x2Regs:$b))]>,
+ Requires<[useFP16Math, allowFMA, doF32FTZ]>;
+ def f16x2rr :
+ NVPTXInst<(outs Float16x2Regs:$dst),
+ (ins Float16x2Regs:$a, Float16x2Regs:$b),
+ !strconcat(OpcStr, ".f16x2 \t$dst, $a, $b;"),
+ [(set Float16x2Regs:$dst, (OpNode Float16x2Regs:$a, Float16x2Regs:$b))]>,
+ Requires<[useFP16Math, allowFMA]>;
+
+ // These have strange names so we don't perturb existing mir tests.
+ def _rnf64rr :
+ NVPTXInst<(outs Float64Regs:$dst),
+ (ins Float64Regs:$a, Float64Regs:$b),
+ !strconcat(OpcStr, ".rn.f64 \t$dst, $a, $b;"),
+ [(set Float64Regs:$dst, (OpNode Float64Regs:$a, Float64Regs:$b))]>,
+ Requires<[noFMA]>;
+ def _rnf64ri :
+ NVPTXInst<(outs Float64Regs:$dst),
+ (ins Float64Regs:$a, f64imm:$b),
+ !strconcat(OpcStr, ".rn.f64 \t$dst, $a, $b;"),
+ [(set Float64Regs:$dst, (OpNode Float64Regs:$a, fpimm:$b))]>,
+ Requires<[noFMA]>;
+ def _rnf32rr_ftz :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, Float32Regs:$b),
+ !strconcat(OpcStr, ".rn.ftz.f32 \t$dst, $a, $b;"),
+ [(set Float32Regs:$dst, (OpNode Float32Regs:$a, Float32Regs:$b))]>,
+ Requires<[noFMA, doF32FTZ]>;
+ def _rnf32ri_ftz :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, f32imm:$b),
+ !strconcat(OpcStr, ".rn.ftz.f32 \t$dst, $a, $b;"),
+ [(set Float32Regs:$dst, (OpNode Float32Regs:$a, fpimm:$b))]>,
+ Requires<[noFMA, doF32FTZ]>;
+ def _rnf32rr :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, Float32Regs:$b),
+ !strconcat(OpcStr, ".rn.f32 \t$dst, $a, $b;"),
+ [(set Float32Regs:$dst, (OpNode Float32Regs:$a, Float32Regs:$b))]>,
+ Requires<[noFMA]>;
+ def _rnf32ri :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, f32imm:$b),
+ !strconcat(OpcStr, ".rn.f32 \t$dst, $a, $b;"),
+ [(set Float32Regs:$dst, (OpNode Float32Regs:$a, fpimm:$b))]>,
+ Requires<[noFMA]>;
+ def _rnf16rr_ftz :
+ NVPTXInst<(outs Float16Regs:$dst),
+ (ins Float16Regs:$a, Float16Regs:$b),
+ !strconcat(OpcStr, ".rn.ftz.f16 \t$dst, $a, $b;"),
+ [(set Float16Regs:$dst, (OpNode Float16Regs:$a, Float16Regs:$b))]>,
+ Requires<[useFP16Math, noFMA, doF32FTZ]>;
+ def _rnf16rr :
+ NVPTXInst<(outs Float16Regs:$dst),
+ (ins Float16Regs:$a, Float16Regs:$b),
+ !strconcat(OpcStr, ".rn.f16 \t$dst, $a, $b;"),
+ [(set Float16Regs:$dst, (OpNode Float16Regs:$a, Float16Regs:$b))]>,
+ Requires<[useFP16Math, noFMA]>;
+ def _rnf16x2rr_ftz :
+ NVPTXInst<(outs Float16x2Regs:$dst),
+ (ins Float16x2Regs:$a, Float16x2Regs:$b),
+ !strconcat(OpcStr, ".rn.ftz.f16x2 \t$dst, $a, $b;"),
+ [(set Float16x2Regs:$dst, (OpNode Float16x2Regs:$a, Float16x2Regs:$b))]>,
+ Requires<[useFP16Math, noFMA, doF32FTZ]>;
+ def _rnf16x2rr :
+ NVPTXInst<(outs Float16x2Regs:$dst),
+ (ins Float16x2Regs:$a, Float16x2Regs:$b),
+ !strconcat(OpcStr, ".rn.f16x2 \t$dst, $a, $b;"),
+ [(set Float16x2Regs:$dst, (OpNode Float16x2Regs:$a, Float16x2Regs:$b))]>,
+ Requires<[useFP16Math, noFMA]>;
+}
+
+// Template for operations which take two f32 or f64 operands. Provides three
+// instructions: <OpcStr>.f64, <OpcStr>.f32, and <OpcStr>.ftz.f32 (flush
+// subnormal inputs and results to zero).
+multiclass F2<string OpcStr, SDNode OpNode> {
+ def f64 : NVPTXInst<(outs Float64Regs:$dst), (ins Float64Regs:$a),
+ !strconcat(OpcStr, ".f64 \t$dst, $a;"),
+ [(set Float64Regs:$dst, (OpNode Float64Regs:$a))]>;
+ def f32_ftz : NVPTXInst<(outs Float32Regs:$dst), (ins Float32Regs:$a),
+ !strconcat(OpcStr, ".ftz.f32 \t$dst, $a;"),
+ [(set Float32Regs:$dst, (OpNode Float32Regs:$a))]>,
+ Requires<[doF32FTZ]>;
+ def f32 : NVPTXInst<(outs Float32Regs:$dst), (ins Float32Regs:$a),
+ !strconcat(OpcStr, ".f32 \t$dst, $a;"),
+ [(set Float32Regs:$dst, (OpNode Float32Regs:$a))]>;
+}
+
+//===----------------------------------------------------------------------===//
+// NVPTX Instructions.
+//===----------------------------------------------------------------------===//
+
+//-----------------------------------
+// Type Conversion
+//-----------------------------------
+
+let hasSideEffects = 0 in {
+ // Generate a cvt to the given type from all possible types. Each instance
+ // takes a CvtMode immediate that defines the conversion mode to use. It can
+ // be CvtNONE to omit a conversion mode.
+ multiclass CVT_FROM_ALL<string FromName, RegisterClass RC> {
+ def _s8 :
+ NVPTXInst<(outs RC:$dst),
+ (ins Int16Regs:$src, CvtMode:$mode),
+ !strconcat("cvt${mode:base}${mode:ftz}${mode:sat}.",
+ FromName, ".s8 \t$dst, $src;"), []>;
+ def _u8 :
+ NVPTXInst<(outs RC:$dst),
+ (ins Int16Regs:$src, CvtMode:$mode),
+ !strconcat("cvt${mode:base}${mode:ftz}${mode:sat}.",
+ FromName, ".u8 \t$dst, $src;"), []>;
+ def _s16 :
+ NVPTXInst<(outs RC:$dst),
+ (ins Int16Regs:$src, CvtMode:$mode),
+ !strconcat("cvt${mode:base}${mode:ftz}${mode:sat}.",
+ FromName, ".s16 \t$dst, $src;"), []>;
+ def _u16 :
+ NVPTXInst<(outs RC:$dst),
+ (ins Int16Regs:$src, CvtMode:$mode),
+ !strconcat("cvt${mode:base}${mode:ftz}${mode:sat}.",
+ FromName, ".u16 \t$dst, $src;"), []>;
+ def _s32 :
+ NVPTXInst<(outs RC:$dst),
+ (ins Int32Regs:$src, CvtMode:$mode),
+ !strconcat("cvt${mode:base}${mode:ftz}${mode:sat}.",
+ FromName, ".s32 \t$dst, $src;"), []>;
+ def _u32 :
+ NVPTXInst<(outs RC:$dst),
+ (ins Int32Regs:$src, CvtMode:$mode),
+ !strconcat("cvt${mode:base}${mode:ftz}${mode:sat}.",
+ FromName, ".u32 \t$dst, $src;"), []>;
+ def _s64 :
+ NVPTXInst<(outs RC:$dst),
+ (ins Int64Regs:$src, CvtMode:$mode),
+ !strconcat("cvt${mode:base}${mode:ftz}${mode:sat}.",
+ FromName, ".s64 \t$dst, $src;"), []>;
+ def _u64 :
+ NVPTXInst<(outs RC:$dst),
+ (ins Int64Regs:$src, CvtMode:$mode),
+ !strconcat("cvt${mode:base}${mode:ftz}${mode:sat}.",
+ FromName, ".u64 \t$dst, $src;"), []>;
+ def _f16 :
+ NVPTXInst<(outs RC:$dst),
+ (ins Float16Regs:$src, CvtMode:$mode),
+ !strconcat("cvt${mode:base}${mode:ftz}${mode:sat}.",
+ FromName, ".f16 \t$dst, $src;"), []>;
+ def _f32 :
+ NVPTXInst<(outs RC:$dst),
+ (ins Float32Regs:$src, CvtMode:$mode),
+ !strconcat("cvt${mode:base}${mode:ftz}${mode:sat}.",
+ FromName, ".f32 \t$dst, $src;"), []>;
+ def _f64 :
+ NVPTXInst<(outs RC:$dst),
+ (ins Float64Regs:$src, CvtMode:$mode),
+ !strconcat("cvt${mode:base}${mode:ftz}${mode:sat}.",
+ FromName, ".f64 \t$dst, $src;"), []>;
+ }
+
+ // Generate cvts from all types to all types.
+ defm CVT_s8 : CVT_FROM_ALL<"s8", Int16Regs>;
+ defm CVT_u8 : CVT_FROM_ALL<"u8", Int16Regs>;
+ defm CVT_s16 : CVT_FROM_ALL<"s16", Int16Regs>;
+ defm CVT_u16 : CVT_FROM_ALL<"u16", Int16Regs>;
+ defm CVT_s32 : CVT_FROM_ALL<"s32", Int32Regs>;
+ defm CVT_u32 : CVT_FROM_ALL<"u32", Int32Regs>;
+ defm CVT_s64 : CVT_FROM_ALL<"s64", Int64Regs>;
+ defm CVT_u64 : CVT_FROM_ALL<"u64", Int64Regs>;
+ defm CVT_f16 : CVT_FROM_ALL<"f16", Float16Regs>;
+ defm CVT_f32 : CVT_FROM_ALL<"f32", Float32Regs>;
+ defm CVT_f64 : CVT_FROM_ALL<"f64", Float64Regs>;
+
+ // These cvts are different from those above: The source and dest registers
+ // are of the same type.
+ def CVT_INREG_s16_s8 : NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$src),
+ "cvt.s16.s8 \t$dst, $src;", []>;
+ def CVT_INREG_s32_s8 : NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$src),
+ "cvt.s32.s8 \t$dst, $src;", []>;
+ def CVT_INREG_s32_s16 : NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$src),
+ "cvt.s32.s16 \t$dst, $src;", []>;
+ def CVT_INREG_s64_s8 : NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$src),
+ "cvt.s64.s8 \t$dst, $src;", []>;
+ def CVT_INREG_s64_s16 : NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$src),
+ "cvt.s64.s16 \t$dst, $src;", []>;
+ def CVT_INREG_s64_s32 : NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$src),
+ "cvt.s64.s32 \t$dst, $src;", []>;
+}
+
+//-----------------------------------
+// Integer Arithmetic
+//-----------------------------------
+
+// Template for xor masquerading as int1 arithmetic.
+multiclass ADD_SUB_i1<SDNode OpNode> {
+ def _rr: NVPTXInst<(outs Int1Regs:$dst), (ins Int1Regs:$a, Int1Regs:$b),
+ "xor.pred \t$dst, $a, $b;",
+ [(set Int1Regs:$dst, (OpNode Int1Regs:$a, Int1Regs:$b))]>;
+ def _ri: NVPTXInst<(outs Int1Regs:$dst), (ins Int1Regs:$a, i1imm:$b),
+ "xor.pred \t$dst, $a, $b;",
+ [(set Int1Regs:$dst, (OpNode Int1Regs:$a, (imm):$b))]>;
+}
+
+// int1 addition and subtraction are both just xor.
+defm ADD_i1 : ADD_SUB_i1<add>;
+defm SUB_i1 : ADD_SUB_i1<sub>;
+
+// int16, int32, and int64 signed addition. Since nvptx is 2's complement, we
+// also use these for unsigned arithmetic.
+defm ADD : I3<"add.s", add>;
+defm SUB : I3<"sub.s", sub>;
+
+// int32 addition and subtraction with carry-out.
+// FIXME: PTX 4.3 adds a 64-bit add.cc (and maybe also 64-bit addc.cc?).
+defm ADDCC : ADD_SUB_INT_32<"add.cc", addc>;
+defm SUBCC : ADD_SUB_INT_32<"sub.cc", subc>;
+
+// int32 addition and subtraction with carry-in and carry-out.
+defm ADDCCC : ADD_SUB_INT_32<"addc.cc", adde>;
+defm SUBCCC : ADD_SUB_INT_32<"subc.cc", sube>;
+
+defm MULT : I3<"mul.lo.s", mul>;
+
+defm MULTHS : I3<"mul.hi.s", mulhs>;
+defm MULTHU : I3<"mul.hi.u", mulhu>;
+
+defm SDIV : I3<"div.s", sdiv>;
+defm UDIV : I3<"div.u", udiv>;
+
+// The ri versions of rem.s and rem.u won't be selected; DAGCombiner::visitSREM
+// will lower it.
+defm SREM : I3<"rem.s", srem>;
+defm UREM : I3<"rem.u", urem>;
+
+// Integer absolute value. NumBits should be one minus the bit width of RC.
+// This idiom implements the algorithm at
+// http://graphics.stanford.edu/~seander/bithacks.html#IntegerAbs.
+multiclass ABS<RegisterClass RC, string SizeName> {
+ def : NVPTXInst<(outs RC:$dst), (ins RC:$a),
+ !strconcat("abs", SizeName, " \t$dst, $a;"),
+ [(set RC:$dst, (abs RC:$a))]>;
+}
+defm ABS_16 : ABS<Int16Regs, ".s16">;
+defm ABS_32 : ABS<Int32Regs, ".s32">;
+defm ABS_64 : ABS<Int64Regs, ".s64">;
+
+// Integer min/max.
+defm SMAX : I3<"max.s", smax>;
+defm UMAX : I3<"max.u", umax>;
+defm SMIN : I3<"min.s", smin>;
+defm UMIN : I3<"min.u", umin>;
+
+//
+// Wide multiplication
+//
+def MULWIDES64 :
+ NVPTXInst<(outs Int64Regs:$dst), (ins Int32Regs:$a, Int32Regs:$b),
+ "mul.wide.s32 \t$dst, $a, $b;", []>;
+def MULWIDES64Imm :
+ NVPTXInst<(outs Int64Regs:$dst), (ins Int32Regs:$a, i32imm:$b),
+ "mul.wide.s32 \t$dst, $a, $b;", []>;
+def MULWIDES64Imm64 :
+ NVPTXInst<(outs Int64Regs:$dst), (ins Int32Regs:$a, i64imm:$b),
+ "mul.wide.s32 \t$dst, $a, $b;", []>;
+
+def MULWIDEU64 :
+ NVPTXInst<(outs Int64Regs:$dst), (ins Int32Regs:$a, Int32Regs:$b),
+ "mul.wide.u32 \t$dst, $a, $b;", []>;
+def MULWIDEU64Imm :
+ NVPTXInst<(outs Int64Regs:$dst), (ins Int32Regs:$a, i32imm:$b),
+ "mul.wide.u32 \t$dst, $a, $b;", []>;
+def MULWIDEU64Imm64 :
+ NVPTXInst<(outs Int64Regs:$dst), (ins Int32Regs:$a, i64imm:$b),
+ "mul.wide.u32 \t$dst, $a, $b;", []>;
+
+def MULWIDES32 :
+ NVPTXInst<(outs Int32Regs:$dst), (ins Int16Regs:$a, Int16Regs:$b),
+ "mul.wide.s16 \t$dst, $a, $b;", []>;
+def MULWIDES32Imm :
+ NVPTXInst<(outs Int32Regs:$dst), (ins Int16Regs:$a, i16imm:$b),
+ "mul.wide.s16 \t$dst, $a, $b;", []>;
+def MULWIDES32Imm32 :
+ NVPTXInst<(outs Int32Regs:$dst), (ins Int16Regs:$a, i32imm:$b),
+ "mul.wide.s16 \t$dst, $a, $b;", []>;
+
+def MULWIDEU32 :
+ NVPTXInst<(outs Int32Regs:$dst), (ins Int16Regs:$a, Int16Regs:$b),
+ "mul.wide.u16 \t$dst, $a, $b;", []>;
+def MULWIDEU32Imm :
+ NVPTXInst<(outs Int32Regs:$dst), (ins Int16Regs:$a, i16imm:$b),
+ "mul.wide.u16 \t$dst, $a, $b;", []>;
+def MULWIDEU32Imm32 :
+ NVPTXInst<(outs Int32Regs:$dst), (ins Int16Regs:$a, i32imm:$b),
+ "mul.wide.u16 \t$dst, $a, $b;", []>;
+
+def SDTMulWide : SDTypeProfile<1, 2, [SDTCisSameAs<1, 2>]>;
+def mul_wide_signed : SDNode<"NVPTXISD::MUL_WIDE_SIGNED", SDTMulWide>;
+def mul_wide_unsigned : SDNode<"NVPTXISD::MUL_WIDE_UNSIGNED", SDTMulWide>;
+
+// Matchers for signed, unsigned mul.wide ISD nodes.
+def : Pat<(i32 (mul_wide_signed Int16Regs:$a, Int16Regs:$b)),
+ (MULWIDES32 Int16Regs:$a, Int16Regs:$b)>,
+ Requires<[doMulWide]>;
+def : Pat<(i32 (mul_wide_signed Int16Regs:$a, imm:$b)),
+ (MULWIDES32Imm Int16Regs:$a, imm:$b)>,
+ Requires<[doMulWide]>;
+def : Pat<(i32 (mul_wide_unsigned Int16Regs:$a, Int16Regs:$b)),
+ (MULWIDEU32 Int16Regs:$a, Int16Regs:$b)>,
+ Requires<[doMulWide]>;
+def : Pat<(i32 (mul_wide_unsigned Int16Regs:$a, imm:$b)),
+ (MULWIDEU32Imm Int16Regs:$a, imm:$b)>,
+ Requires<[doMulWide]>;
+
+def : Pat<(i64 (mul_wide_signed Int32Regs:$a, Int32Regs:$b)),
+ (MULWIDES64 Int32Regs:$a, Int32Regs:$b)>,
+ Requires<[doMulWide]>;
+def : Pat<(i64 (mul_wide_signed Int32Regs:$a, imm:$b)),
+ (MULWIDES64Imm Int32Regs:$a, imm:$b)>,
+ Requires<[doMulWide]>;
+def : Pat<(i64 (mul_wide_unsigned Int32Regs:$a, Int32Regs:$b)),
+ (MULWIDEU64 Int32Regs:$a, Int32Regs:$b)>,
+ Requires<[doMulWide]>;
+def : Pat<(i64 (mul_wide_unsigned Int32Regs:$a, imm:$b)),
+ (MULWIDEU64Imm Int32Regs:$a, imm:$b)>,
+ Requires<[doMulWide]>;
+
+// Predicates used for converting some patterns to mul.wide.
+def SInt32Const : PatLeaf<(imm), [{
+ const APInt &v = N->getAPIntValue();
+ return v.isSignedIntN(32);
+}]>;
+
+def UInt32Const : PatLeaf<(imm), [{
+ const APInt &v = N->getAPIntValue();
+ return v.isIntN(32);
+}]>;
+
+def SInt16Const : PatLeaf<(imm), [{
+ const APInt &v = N->getAPIntValue();
+ return v.isSignedIntN(16);
+}]>;
+
+def UInt16Const : PatLeaf<(imm), [{
+ const APInt &v = N->getAPIntValue();
+ return v.isIntN(16);
+}]>;
+
+def Int5Const : PatLeaf<(imm), [{
+ // Check if 0 <= v < 32; only then will the result of (x << v) be an int32.
+ const APInt &v = N->getAPIntValue();
+ return v.sge(0) && v.slt(32);
+}]>;
+
+def Int4Const : PatLeaf<(imm), [{
+ // Check if 0 <= v < 16; only then will the result of (x << v) be an int16.
+ const APInt &v = N->getAPIntValue();
+ return v.sge(0) && v.slt(16);
+}]>;
+
+def SHL2MUL32 : SDNodeXForm<imm, [{
+ const APInt &v = N->getAPIntValue();
+ APInt temp(32, 1);
+ return CurDAG->getTargetConstant(temp.shl(v), SDLoc(N), MVT::i32);
+}]>;
+
+def SHL2MUL16 : SDNodeXForm<imm, [{
+ const APInt &v = N->getAPIntValue();
+ APInt temp(16, 1);
+ return CurDAG->getTargetConstant(temp.shl(v), SDLoc(N), MVT::i16);
+}]>;
+
+// Convert "sign/zero-extend, then shift left by an immediate" to mul.wide.
+def : Pat<(shl (sext Int32Regs:$a), (i32 Int5Const:$b)),
+ (MULWIDES64Imm Int32Regs:$a, (SHL2MUL32 node:$b))>,
+ Requires<[doMulWide]>;
+def : Pat<(shl (zext Int32Regs:$a), (i32 Int5Const:$b)),
+ (MULWIDEU64Imm Int32Regs:$a, (SHL2MUL32 node:$b))>,
+ Requires<[doMulWide]>;
+
+def : Pat<(shl (sext Int16Regs:$a), (i16 Int4Const:$b)),
+ (MULWIDES32Imm Int16Regs:$a, (SHL2MUL16 node:$b))>,
+ Requires<[doMulWide]>;
+def : Pat<(shl (zext Int16Regs:$a), (i16 Int4Const:$b)),
+ (MULWIDEU32Imm Int16Regs:$a, (SHL2MUL16 node:$b))>,
+ Requires<[doMulWide]>;
+
+// Convert "sign/zero-extend then multiply" to mul.wide.
+def : Pat<(mul (sext Int32Regs:$a), (sext Int32Regs:$b)),
+ (MULWIDES64 Int32Regs:$a, Int32Regs:$b)>,
+ Requires<[doMulWide]>;
+def : Pat<(mul (sext Int32Regs:$a), (i64 SInt32Const:$b)),
+ (MULWIDES64Imm64 Int32Regs:$a, (i64 SInt32Const:$b))>,
+ Requires<[doMulWide]>;
+
+def : Pat<(mul (zext Int32Regs:$a), (zext Int32Regs:$b)),
+ (MULWIDEU64 Int32Regs:$a, Int32Regs:$b)>,
+ Requires<[doMulWide]>;
+def : Pat<(mul (zext Int32Regs:$a), (i64 UInt32Const:$b)),
+ (MULWIDEU64Imm64 Int32Regs:$a, (i64 UInt32Const:$b))>,
+ Requires<[doMulWide]>;
+
+def : Pat<(mul (sext Int16Regs:$a), (sext Int16Regs:$b)),
+ (MULWIDES32 Int16Regs:$a, Int16Regs:$b)>,
+ Requires<[doMulWide]>;
+def : Pat<(mul (sext Int16Regs:$a), (i32 SInt16Const:$b)),
+ (MULWIDES32Imm32 Int16Regs:$a, (i32 SInt16Const:$b))>,
+ Requires<[doMulWide]>;
+
+def : Pat<(mul (zext Int16Regs:$a), (zext Int16Regs:$b)),
+ (MULWIDEU32 Int16Regs:$a, Int16Regs:$b)>,
+ Requires<[doMulWide]>;
+def : Pat<(mul (zext Int16Regs:$a), (i32 UInt16Const:$b)),
+ (MULWIDEU32Imm32 Int16Regs:$a, (i32 UInt16Const:$b))>,
+ Requires<[doMulWide]>;
+
+//
+// Integer multiply-add
+//
+def SDTIMAD :
+ SDTypeProfile<1, 3, [SDTCisSameAs<0, 1>, SDTCisInt<0>, SDTCisInt<2>,
+ SDTCisSameAs<0, 2>, SDTCisSameAs<0, 3>]>;
+def imad : SDNode<"NVPTXISD::IMAD", SDTIMAD>;
+
+def MAD16rrr :
+ NVPTXInst<(outs Int16Regs:$dst),
+ (ins Int16Regs:$a, Int16Regs:$b, Int16Regs:$c),
+ "mad.lo.s16 \t$dst, $a, $b, $c;",
+ [(set Int16Regs:$dst, (imad Int16Regs:$a, Int16Regs:$b, Int16Regs:$c))]>;
+def MAD16rri :
+ NVPTXInst<(outs Int16Regs:$dst),
+ (ins Int16Regs:$a, Int16Regs:$b, i16imm:$c),
+ "mad.lo.s16 \t$dst, $a, $b, $c;",
+ [(set Int16Regs:$dst, (imad Int16Regs:$a, Int16Regs:$b, imm:$c))]>;
+def MAD16rir :
+ NVPTXInst<(outs Int16Regs:$dst),
+ (ins Int16Regs:$a, i16imm:$b, Int16Regs:$c),
+ "mad.lo.s16 \t$dst, $a, $b, $c;",
+ [(set Int16Regs:$dst, (imad Int16Regs:$a, imm:$b, Int16Regs:$c))]>;
+def MAD16rii :
+ NVPTXInst<(outs Int16Regs:$dst),
+ (ins Int16Regs:$a, i16imm:$b, i16imm:$c),
+ "mad.lo.s16 \t$dst, $a, $b, $c;",
+ [(set Int16Regs:$dst, (imad Int16Regs:$a, imm:$b, imm:$c))]>;
+
+def MAD32rrr :
+ NVPTXInst<(outs Int32Regs:$dst),
+ (ins Int32Regs:$a, Int32Regs:$b, Int32Regs:$c),
+ "mad.lo.s32 \t$dst, $a, $b, $c;",
+ [(set Int32Regs:$dst, (imad Int32Regs:$a, Int32Regs:$b, Int32Regs:$c))]>;
+def MAD32rri :
+ NVPTXInst<(outs Int32Regs:$dst),
+ (ins Int32Regs:$a, Int32Regs:$b, i32imm:$c),
+ "mad.lo.s32 \t$dst, $a, $b, $c;",
+ [(set Int32Regs:$dst, (imad Int32Regs:$a, Int32Regs:$b, imm:$c))]>;
+def MAD32rir :
+ NVPTXInst<(outs Int32Regs:$dst),
+ (ins Int32Regs:$a, i32imm:$b, Int32Regs:$c),
+ "mad.lo.s32 \t$dst, $a, $b, $c;",
+ [(set Int32Regs:$dst, (imad Int32Regs:$a, imm:$b, Int32Regs:$c))]>;
+def MAD32rii :
+ NVPTXInst<(outs Int32Regs:$dst),
+ (ins Int32Regs:$a, i32imm:$b, i32imm:$c),
+ "mad.lo.s32 \t$dst, $a, $b, $c;",
+ [(set Int32Regs:$dst, (imad Int32Regs:$a, imm:$b, imm:$c))]>;
+
+def MAD64rrr :
+ NVPTXInst<(outs Int64Regs:$dst),
+ (ins Int64Regs:$a, Int64Regs:$b, Int64Regs:$c),
+ "mad.lo.s64 \t$dst, $a, $b, $c;",
+ [(set Int64Regs:$dst, (imad Int64Regs:$a, Int64Regs:$b, Int64Regs:$c))]>;
+def MAD64rri :
+ NVPTXInst<(outs Int64Regs:$dst),
+ (ins Int64Regs:$a, Int64Regs:$b, i64imm:$c),
+ "mad.lo.s64 \t$dst, $a, $b, $c;",
+ [(set Int64Regs:$dst, (imad Int64Regs:$a, Int64Regs:$b, imm:$c))]>;
+def MAD64rir :
+ NVPTXInst<(outs Int64Regs:$dst),
+ (ins Int64Regs:$a, i64imm:$b, Int64Regs:$c),
+ "mad.lo.s64 \t$dst, $a, $b, $c;",
+ [(set Int64Regs:$dst, (imad Int64Regs:$a, imm:$b, Int64Regs:$c))]>;
+def MAD64rii :
+ NVPTXInst<(outs Int64Regs:$dst),
+ (ins Int64Regs:$a, i64imm:$b, i64imm:$c),
+ "mad.lo.s64 \t$dst, $a, $b, $c;",
+ [(set Int64Regs:$dst, (imad Int64Regs:$a, imm:$b, imm:$c))]>;
+
+def INEG16 :
+ NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$src),
+ "neg.s16 \t$dst, $src;",
+ [(set Int16Regs:$dst, (ineg Int16Regs:$src))]>;
+def INEG32 :
+ NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$src),
+ "neg.s32 \t$dst, $src;",
+ [(set Int32Regs:$dst, (ineg Int32Regs:$src))]>;
+def INEG64 :
+ NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$src),
+ "neg.s64 \t$dst, $src;",
+ [(set Int64Regs:$dst, (ineg Int64Regs:$src))]>;
+
+//-----------------------------------
+// Floating Point Arithmetic
+//-----------------------------------
+
+// Constant 1.0f
+def FloatConst1 : PatLeaf<(fpimm), [{
+ return &N->getValueAPF().getSemantics() == &llvm::APFloat::IEEEsingle() &&
+ N->getValueAPF().convertToFloat() == 1.0f;
+}]>;
+// Constant 1.0 (double)
+def DoubleConst1 : PatLeaf<(fpimm), [{
+ return &N->getValueAPF().getSemantics() == &llvm::APFloat::IEEEdouble() &&
+ N->getValueAPF().convertToDouble() == 1.0;
+}]>;
+
+// Loads FP16 constant into a register.
+//
+// ptxas does not have hex representation for fp16, so we can't use
+// fp16 immediate values in .f16 instructions. Instead we have to load
+// the constant into a register using mov.b16.
+def LOAD_CONST_F16 :
+ NVPTXInst<(outs Float16Regs:$dst), (ins f16imm:$a),
+ "mov.b16 \t$dst, $a;", []>;
+
+defm FADD : F3_fma_component<"add", fadd>;
+defm FSUB : F3_fma_component<"sub", fsub>;
+defm FMUL : F3_fma_component<"mul", fmul>;
+
+defm FMIN : F3<"min", fminnum>;
+defm FMAX : F3<"max", fmaxnum>;
+
+defm FABS : F2<"abs", fabs>;
+defm FNEG : F2<"neg", fneg>;
+defm FSQRT : F2<"sqrt.rn", fsqrt>;
+
+//
+// F64 division
+//
+def FDIV641r :
+ NVPTXInst<(outs Float64Regs:$dst),
+ (ins f64imm:$a, Float64Regs:$b),
+ "rcp.rn.f64 \t$dst, $b;",
+ [(set Float64Regs:$dst, (fdiv DoubleConst1:$a, Float64Regs:$b))]>;
+def FDIV64rr :
+ NVPTXInst<(outs Float64Regs:$dst),
+ (ins Float64Regs:$a, Float64Regs:$b),
+ "div.rn.f64 \t$dst, $a, $b;",
+ [(set Float64Regs:$dst, (fdiv Float64Regs:$a, Float64Regs:$b))]>;
+def FDIV64ri :
+ NVPTXInst<(outs Float64Regs:$dst),
+ (ins Float64Regs:$a, f64imm:$b),
+ "div.rn.f64 \t$dst, $a, $b;",
+ [(set Float64Regs:$dst, (fdiv Float64Regs:$a, fpimm:$b))]>;
+
+//
+// F32 Approximate reciprocal
+//
+def FDIV321r_ftz :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins f32imm:$a, Float32Regs:$b),
+ "rcp.approx.ftz.f32 \t$dst, $b;",
+ [(set Float32Regs:$dst, (fdiv FloatConst1:$a, Float32Regs:$b))]>,
+ Requires<[do_DIVF32_APPROX, doF32FTZ]>;
+def FDIV321r :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins f32imm:$a, Float32Regs:$b),
+ "rcp.approx.f32 \t$dst, $b;",
+ [(set Float32Regs:$dst, (fdiv FloatConst1:$a, Float32Regs:$b))]>,
+ Requires<[do_DIVF32_APPROX]>;
+//
+// F32 Approximate division
+//
+def FDIV32approxrr_ftz :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, Float32Regs:$b),
+ "div.approx.ftz.f32 \t$dst, $a, $b;",
+ [(set Float32Regs:$dst, (fdiv Float32Regs:$a, Float32Regs:$b))]>,
+ Requires<[do_DIVF32_APPROX, doF32FTZ]>;
+def FDIV32approxri_ftz :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, f32imm:$b),
+ "div.approx.ftz.f32 \t$dst, $a, $b;",
+ [(set Float32Regs:$dst, (fdiv Float32Regs:$a, fpimm:$b))]>,
+ Requires<[do_DIVF32_APPROX, doF32FTZ]>;
+def FDIV32approxrr :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, Float32Regs:$b),
+ "div.approx.f32 \t$dst, $a, $b;",
+ [(set Float32Regs:$dst, (fdiv Float32Regs:$a, Float32Regs:$b))]>,
+ Requires<[do_DIVF32_APPROX]>;
+def FDIV32approxri :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, f32imm:$b),
+ "div.approx.f32 \t$dst, $a, $b;",
+ [(set Float32Regs:$dst, (fdiv Float32Regs:$a, fpimm:$b))]>,
+ Requires<[do_DIVF32_APPROX]>;
+//
+// F32 Semi-accurate reciprocal
+//
+// rcp.approx gives the same result as div.full(1.0f, a) and is faster.
+//
+def FDIV321r_approx_ftz :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins f32imm:$a, Float32Regs:$b),
+ "rcp.approx.ftz.f32 \t$dst, $b;",
+ [(set Float32Regs:$dst, (fdiv FloatConst1:$a, Float32Regs:$b))]>,
+ Requires<[do_DIVF32_FULL, doF32FTZ]>;
+def FDIV321r_approx :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins f32imm:$a, Float32Regs:$b),
+ "rcp.approx.f32 \t$dst, $b;",
+ [(set Float32Regs:$dst, (fdiv FloatConst1:$a, Float32Regs:$b))]>,
+ Requires<[do_DIVF32_FULL]>;
+//
+// F32 Semi-accurate division
+//
+def FDIV32rr_ftz :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, Float32Regs:$b),
+ "div.full.ftz.f32 \t$dst, $a, $b;",
+ [(set Float32Regs:$dst, (fdiv Float32Regs:$a, Float32Regs:$b))]>,
+ Requires<[do_DIVF32_FULL, doF32FTZ]>;
+def FDIV32ri_ftz :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, f32imm:$b),
+ "div.full.ftz.f32 \t$dst, $a, $b;",
+ [(set Float32Regs:$dst, (fdiv Float32Regs:$a, fpimm:$b))]>,
+ Requires<[do_DIVF32_FULL, doF32FTZ]>;
+def FDIV32rr :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, Float32Regs:$b),
+ "div.full.f32 \t$dst, $a, $b;",
+ [(set Float32Regs:$dst, (fdiv Float32Regs:$a, Float32Regs:$b))]>,
+ Requires<[do_DIVF32_FULL]>;
+def FDIV32ri :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, f32imm:$b),
+ "div.full.f32 \t$dst, $a, $b;",
+ [(set Float32Regs:$dst, (fdiv Float32Regs:$a, fpimm:$b))]>,
+ Requires<[do_DIVF32_FULL]>;
+//
+// F32 Accurate reciprocal
+//
+def FDIV321r_prec_ftz :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins f32imm:$a, Float32Regs:$b),
+ "rcp.rn.ftz.f32 \t$dst, $b;",
+ [(set Float32Regs:$dst, (fdiv FloatConst1:$a, Float32Regs:$b))]>,
+ Requires<[reqPTX20, doF32FTZ]>;
+def FDIV321r_prec :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins f32imm:$a, Float32Regs:$b),
+ "rcp.rn.f32 \t$dst, $b;",
+ [(set Float32Regs:$dst, (fdiv FloatConst1:$a, Float32Regs:$b))]>,
+ Requires<[reqPTX20]>;
+//
+// F32 Accurate division
+//
+def FDIV32rr_prec_ftz :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, Float32Regs:$b),
+ "div.rn.ftz.f32 \t$dst, $a, $b;",
+ [(set Float32Regs:$dst, (fdiv Float32Regs:$a, Float32Regs:$b))]>,
+ Requires<[doF32FTZ, reqPTX20]>;
+def FDIV32ri_prec_ftz :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, f32imm:$b),
+ "div.rn.ftz.f32 \t$dst, $a, $b;",
+ [(set Float32Regs:$dst, (fdiv Float32Regs:$a, fpimm:$b))]>,
+ Requires<[doF32FTZ, reqPTX20]>;
+def FDIV32rr_prec :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, Float32Regs:$b),
+ "div.rn.f32 \t$dst, $a, $b;",
+ [(set Float32Regs:$dst, (fdiv Float32Regs:$a, Float32Regs:$b))]>,
+ Requires<[reqPTX20]>;
+def FDIV32ri_prec :
+ NVPTXInst<(outs Float32Regs:$dst),
+ (ins Float32Regs:$a, f32imm:$b),
+ "div.rn.f32 \t$dst, $a, $b;",
+ [(set Float32Regs:$dst, (fdiv Float32Regs:$a, fpimm:$b))]>,
+ Requires<[reqPTX20]>;
+
+//
+// FMA
+//
+
+multiclass FMA<string OpcStr, RegisterClass RC, Operand ImmCls, Predicate Pred> {
+ def rrr : NVPTXInst<(outs RC:$dst), (ins RC:$a, RC:$b, RC:$c),
+ !strconcat(OpcStr, " \t$dst, $a, $b, $c;"),
+ [(set RC:$dst, (fma RC:$a, RC:$b, RC:$c))]>,
+ Requires<[Pred]>;
+ def rri : NVPTXInst<(outs RC:$dst),
+ (ins RC:$a, RC:$b, ImmCls:$c),
+ !strconcat(OpcStr, " \t$dst, $a, $b, $c;"),
+ [(set RC:$dst, (fma RC:$a, RC:$b, fpimm:$c))]>,
+ Requires<[Pred]>;
+ def rir : NVPTXInst<(outs RC:$dst),
+ (ins RC:$a, ImmCls:$b, RC:$c),
+ !strconcat(OpcStr, " \t$dst, $a, $b, $c;"),
+ [(set RC:$dst, (fma RC:$a, fpimm:$b, RC:$c))]>,
+ Requires<[Pred]>;
+ def rii : NVPTXInst<(outs RC:$dst),
+ (ins RC:$a, ImmCls:$b, ImmCls:$c),
+ !strconcat(OpcStr, " \t$dst, $a, $b, $c;"),
+ [(set RC:$dst, (fma RC:$a, fpimm:$b, fpimm:$c))]>,
+ Requires<[Pred]>;
+}
+
+multiclass FMA_F16<string OpcStr, RegisterClass RC, Predicate Pred> {
+ def rrr : NVPTXInst<(outs RC:$dst), (ins RC:$a, RC:$b, RC:$c),
+ !strconcat(OpcStr, " \t$dst, $a, $b, $c;"),
+ [(set RC:$dst, (fma RC:$a, RC:$b, RC:$c))]>,
+ Requires<[useFP16Math, Pred]>;
+}
+
+defm FMA16_ftz : FMA_F16<"fma.rn.ftz.f16", Float16Regs, doF32FTZ>;
+defm FMA16 : FMA_F16<"fma.rn.f16", Float16Regs, true>;
+defm FMA16x2_ftz : FMA_F16<"fma.rn.ftz.f16x2", Float16x2Regs, doF32FTZ>;
+defm FMA16x2 : FMA_F16<"fma.rn.f16x2", Float16x2Regs, true>;
+defm FMA32_ftz : FMA<"fma.rn.ftz.f32", Float32Regs, f32imm, doF32FTZ>;
+defm FMA32 : FMA<"fma.rn.f32", Float32Regs, f32imm, true>;
+defm FMA64 : FMA<"fma.rn.f64", Float64Regs, f64imm, true>;
+
+// sin/cos
+def SINF: NVPTXInst<(outs Float32Regs:$dst), (ins Float32Regs:$src),
+ "sin.approx.f32 \t$dst, $src;",
+ [(set Float32Regs:$dst, (fsin Float32Regs:$src))]>,
+ Requires<[allowUnsafeFPMath]>;
+def COSF: NVPTXInst<(outs Float32Regs:$dst), (ins Float32Regs:$src),
+ "cos.approx.f32 \t$dst, $src;",
+ [(set Float32Regs:$dst, (fcos Float32Regs:$src))]>,
+ Requires<[allowUnsafeFPMath]>;
+
+// Lower (frem x, y) into (sub x, (mul (floor (div x, y)) y)),
+// i.e. "poor man's fmod()"
+
+// frem - f32 FTZ
+def : Pat<(frem Float32Regs:$x, Float32Regs:$y),
+ (FSUBf32rr_ftz Float32Regs:$x, (FMULf32rr_ftz (CVT_f32_f32
+ (FDIV32rr_prec_ftz Float32Regs:$x, Float32Regs:$y), CvtRMI_FTZ),
+ Float32Regs:$y))>,
+ Requires<[doF32FTZ]>;
+def : Pat<(frem Float32Regs:$x, fpimm:$y),
+ (FSUBf32rr_ftz Float32Regs:$x, (FMULf32ri_ftz (CVT_f32_f32
+ (FDIV32ri_prec_ftz Float32Regs:$x, fpimm:$y), CvtRMI_FTZ),
+ fpimm:$y))>,
+ Requires<[doF32FTZ]>;
+
+// frem - f32
+def : Pat<(frem Float32Regs:$x, Float32Regs:$y),
+ (FSUBf32rr Float32Regs:$x, (FMULf32rr (CVT_f32_f32
+ (FDIV32rr_prec Float32Regs:$x, Float32Regs:$y), CvtRMI),
+ Float32Regs:$y))>;
+def : Pat<(frem Float32Regs:$x, fpimm:$y),
+ (FSUBf32rr Float32Regs:$x, (FMULf32ri (CVT_f32_f32
+ (FDIV32ri_prec Float32Regs:$x, fpimm:$y), CvtRMI),
+ fpimm:$y))>;
+
+// frem - f64
+def : Pat<(frem Float64Regs:$x, Float64Regs:$y),
+ (FSUBf64rr Float64Regs:$x, (FMULf64rr (CVT_f64_f64
+ (FDIV64rr Float64Regs:$x, Float64Regs:$y), CvtRMI),
+ Float64Regs:$y))>;
+def : Pat<(frem Float64Regs:$x, fpimm:$y),
+ (FSUBf64rr Float64Regs:$x, (FMULf64ri (CVT_f64_f64
+ (FDIV64ri Float64Regs:$x, fpimm:$y), CvtRMI),
+ fpimm:$y))>;
+
+//-----------------------------------
+// Bitwise operations
+//-----------------------------------
+
+// Template for three-arg bitwise operations. Takes three args, Creates .b16,
+// .b32, .b64, and .pred (predicate registers -- i.e., i1) versions of OpcStr.
+multiclass BITWISE<string OpcStr, SDNode OpNode> {
+ def b1rr :
+ NVPTXInst<(outs Int1Regs:$dst), (ins Int1Regs:$a, Int1Regs:$b),
+ !strconcat(OpcStr, ".pred \t$dst, $a, $b;"),
+ [(set Int1Regs:$dst, (OpNode Int1Regs:$a, Int1Regs:$b))]>;
+ def b1ri :
+ NVPTXInst<(outs Int1Regs:$dst), (ins Int1Regs:$a, i1imm:$b),
+ !strconcat(OpcStr, ".pred \t$dst, $a, $b;"),
+ [(set Int1Regs:$dst, (OpNode Int1Regs:$a, imm:$b))]>;
+ def b16rr :
+ NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$a, Int16Regs:$b),
+ !strconcat(OpcStr, ".b16 \t$dst, $a, $b;"),
+ [(set Int16Regs:$dst, (OpNode Int16Regs:$a, Int16Regs:$b))]>;
+ def b16ri :
+ NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$a, i16imm:$b),
+ !strconcat(OpcStr, ".b16 \t$dst, $a, $b;"),
+ [(set Int16Regs:$dst, (OpNode Int16Regs:$a, imm:$b))]>;
+ def b32rr :
+ NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a, Int32Regs:$b),
+ !strconcat(OpcStr, ".b32 \t$dst, $a, $b;"),
+ [(set Int32Regs:$dst, (OpNode Int32Regs:$a, Int32Regs:$b))]>;
+ def b32ri :
+ NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a, i32imm:$b),
+ !strconcat(OpcStr, ".b32 \t$dst, $a, $b;"),
+ [(set Int32Regs:$dst, (OpNode Int32Regs:$a, imm:$b))]>;
+ def b64rr :
+ NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$a, Int64Regs:$b),
+ !strconcat(OpcStr, ".b64 \t$dst, $a, $b;"),
+ [(set Int64Regs:$dst, (OpNode Int64Regs:$a, Int64Regs:$b))]>;
+ def b64ri :
+ NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$a, i64imm:$b),
+ !strconcat(OpcStr, ".b64 \t$dst, $a, $b;"),
+ [(set Int64Regs:$dst, (OpNode Int64Regs:$a, imm:$b))]>;
+}
+
+defm OR : BITWISE<"or", or>;
+defm AND : BITWISE<"and", and>;
+defm XOR : BITWISE<"xor", xor>;
+
+def NOT1 : NVPTXInst<(outs Int1Regs:$dst), (ins Int1Regs:$src),
+ "not.pred \t$dst, $src;",
+ [(set Int1Regs:$dst, (not Int1Regs:$src))]>;
+def NOT16 : NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$src),
+ "not.b16 \t$dst, $src;",
+ [(set Int16Regs:$dst, (not Int16Regs:$src))]>;
+def NOT32 : NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$src),
+ "not.b32 \t$dst, $src;",
+ [(set Int32Regs:$dst, (not Int32Regs:$src))]>;
+def NOT64 : NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$src),
+ "not.b64 \t$dst, $src;",
+ [(set Int64Regs:$dst, (not Int64Regs:$src))]>;
+
+// Template for left/right shifts. Takes three operands,
+// [dest (reg), src (reg), shift (reg or imm)].
+// dest and src may be int64, int32, or int16, but shift is always int32.
+//
+// This template also defines a 32-bit shift (imm, imm) instruction.
+multiclass SHIFT<string OpcStr, SDNode OpNode> {
+ def i64rr :
+ NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$a, Int32Regs:$b),
+ !strconcat(OpcStr, "64 \t$dst, $a, $b;"),
+ [(set Int64Regs:$dst, (OpNode Int64Regs:$a, Int32Regs:$b))]>;
+ def i64ri :
+ NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$a, i32imm:$b),
+ !strconcat(OpcStr, "64 \t$dst, $a, $b;"),
+ [(set Int64Regs:$dst, (OpNode Int64Regs:$a, (i32 imm:$b)))]>;
+ def i32rr :
+ NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a, Int32Regs:$b),
+ !strconcat(OpcStr, "32 \t$dst, $a, $b;"),
+ [(set Int32Regs:$dst, (OpNode Int32Regs:$a, Int32Regs:$b))]>;
+ def i32ri :
+ NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a, i32imm:$b),
+ !strconcat(OpcStr, "32 \t$dst, $a, $b;"),
+ [(set Int32Regs:$dst, (OpNode Int32Regs:$a, (i32 imm:$b)))]>;
+ def i32ii :
+ NVPTXInst<(outs Int32Regs:$dst), (ins i32imm:$a, i32imm:$b),
+ !strconcat(OpcStr, "32 \t$dst, $a, $b;"),
+ [(set Int32Regs:$dst, (OpNode (i32 imm:$a), (i32 imm:$b)))]>;
+ def i16rr :
+ NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$a, Int32Regs:$b),
+ !strconcat(OpcStr, "16 \t$dst, $a, $b;"),
+ [(set Int16Regs:$dst, (OpNode Int16Regs:$a, Int32Regs:$b))]>;
+ def i16ri :
+ NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$a, i32imm:$b),
+ !strconcat(OpcStr, "16 \t$dst, $a, $b;"),
+ [(set Int16Regs:$dst, (OpNode Int16Regs:$a, (i32 imm:$b)))]>;
+}
+
+defm SHL : SHIFT<"shl.b", shl>;
+defm SRA : SHIFT<"shr.s", sra>;
+defm SRL : SHIFT<"shr.u", srl>;
+
+// Bit-reverse
+def BREV32 :
+ NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a),
+ "brev.b32 \t$dst, $a;",
+ [(set Int32Regs:$dst, (bitreverse Int32Regs:$a))]>;
+def BREV64 :
+ NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$a),
+ "brev.b64 \t$dst, $a;",
+ [(set Int64Regs:$dst, (bitreverse Int64Regs:$a))]>;
+
+//
+// Rotate: Use ptx shf instruction if available.
+//
+
+// 32 bit r2 = rotl r1, n
+// =>
+// r2 = shf.l r1, r1, n
+def ROTL32imm_hw :
+ NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$src, i32imm:$amt),
+ "shf.l.wrap.b32 \t$dst, $src, $src, $amt;",
+ [(set Int32Regs:$dst, (rotl Int32Regs:$src, (i32 imm:$amt)))]>,
+ Requires<[hasHWROT32]>;
+
+def ROTL32reg_hw :
+ NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$src, Int32Regs:$amt),
+ "shf.l.wrap.b32 \t$dst, $src, $src, $amt;",
+ [(set Int32Regs:$dst, (rotl Int32Regs:$src, Int32Regs:$amt))]>,
+ Requires<[hasHWROT32]>;
+
+// 32 bit r2 = rotr r1, n
+// =>
+// r2 = shf.r r1, r1, n
+def ROTR32imm_hw :
+ NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$src, i32imm:$amt),
+ "shf.r.wrap.b32 \t$dst, $src, $src, $amt;",
+ [(set Int32Regs:$dst, (rotr Int32Regs:$src, (i32 imm:$amt)))]>,
+ Requires<[hasHWROT32]>;
+
+def ROTR32reg_hw :
+ NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$src, Int32Regs:$amt),
+ "shf.r.wrap.b32 \t$dst, $src, $src, $amt;",
+ [(set Int32Regs:$dst, (rotr Int32Regs:$src, Int32Regs:$amt))]>,
+ Requires<[hasHWROT32]>;
+
+// 32-bit software rotate by immediate. $amt2 should equal 32 - $amt1.
+def ROT32imm_sw :
+ NVPTXInst<(outs Int32Regs:$dst),
+ (ins Int32Regs:$src, i32imm:$amt1, i32imm:$amt2),
+ "{{\n\t"
+ ".reg .b32 %lhs;\n\t"
+ ".reg .b32 %rhs;\n\t"
+ "shl.b32 \t%lhs, $src, $amt1;\n\t"
+ "shr.b32 \t%rhs, $src, $amt2;\n\t"
+ "add.u32 \t$dst, %lhs, %rhs;\n\t"
+ "}}",
+ []>;
+
+def SUB_FRM_32 : SDNodeXForm<imm, [{
+ return CurDAG->getTargetConstant(32 - N->getZExtValue(), SDLoc(N), MVT::i32);
+}]>;
+
+def : Pat<(rotl Int32Regs:$src, (i32 imm:$amt)),
+ (ROT32imm_sw Int32Regs:$src, imm:$amt, (SUB_FRM_32 node:$amt))>,
+ Requires<[noHWROT32]>;
+def : Pat<(rotr Int32Regs:$src, (i32 imm:$amt)),
+ (ROT32imm_sw Int32Regs:$src, (SUB_FRM_32 node:$amt), imm:$amt)>,
+ Requires<[noHWROT32]>;
+
+// 32-bit software rotate left by register.
+def ROTL32reg_sw :
+ NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$src, Int32Regs:$amt),
+ "{{\n\t"
+ ".reg .b32 %lhs;\n\t"
+ ".reg .b32 %rhs;\n\t"
+ ".reg .b32 %amt2;\n\t"
+ "shl.b32 \t%lhs, $src, $amt;\n\t"
+ "sub.s32 \t%amt2, 32, $amt;\n\t"
+ "shr.b32 \t%rhs, $src, %amt2;\n\t"
+ "add.u32 \t$dst, %lhs, %rhs;\n\t"
+ "}}",
+ [(set Int32Regs:$dst, (rotl Int32Regs:$src, Int32Regs:$amt))]>,
+ Requires<[noHWROT32]>;
+
+// 32-bit software rotate right by register.
+def ROTR32reg_sw :
+ NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$src, Int32Regs:$amt),
+ "{{\n\t"
+ ".reg .b32 %lhs;\n\t"
+ ".reg .b32 %rhs;\n\t"
+ ".reg .b32 %amt2;\n\t"
+ "shr.b32 \t%lhs, $src, $amt;\n\t"
+ "sub.s32 \t%amt2, 32, $amt;\n\t"
+ "shl.b32 \t%rhs, $src, %amt2;\n\t"
+ "add.u32 \t$dst, %lhs, %rhs;\n\t"
+ "}}",
+ [(set Int32Regs:$dst, (rotr Int32Regs:$src, Int32Regs:$amt))]>,
+ Requires<[noHWROT32]>;
+
+// 64-bit software rotate by immediate. $amt2 should equal 64 - $amt1.
+def ROT64imm_sw :
+ NVPTXInst<(outs Int64Regs:$dst),
+ (ins Int64Regs:$src, i32imm:$amt1, i32imm:$amt2),
+ "{{\n\t"
+ ".reg .b64 %lhs;\n\t"
+ ".reg .b64 %rhs;\n\t"
+ "shl.b64 \t%lhs, $src, $amt1;\n\t"
+ "shr.b64 \t%rhs, $src, $amt2;\n\t"
+ "add.u64 \t$dst, %lhs, %rhs;\n\t"
+ "}}",
+ []>;
+
+def SUB_FRM_64 : SDNodeXForm<imm, [{
+ return CurDAG->getTargetConstant(64-N->getZExtValue(), SDLoc(N), MVT::i32);
+}]>;
+
+def : Pat<(rotl Int64Regs:$src, (i32 imm:$amt)),
+ (ROT64imm_sw Int64Regs:$src, imm:$amt, (SUB_FRM_64 node:$amt))>;
+def : Pat<(rotr Int64Regs:$src, (i32 imm:$amt)),
+ (ROT64imm_sw Int64Regs:$src, (SUB_FRM_64 node:$amt), imm:$amt)>;
+
+// 64-bit software rotate left by register.
+def ROTL64reg_sw :
+ NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$src, Int32Regs:$amt),
+ "{{\n\t"
+ ".reg .b64 %lhs;\n\t"
+ ".reg .b64 %rhs;\n\t"
+ ".reg .u32 %amt2;\n\t"
+ "shl.b64 \t%lhs, $src, $amt;\n\t"
+ "sub.u32 \t%amt2, 64, $amt;\n\t"
+ "shr.b64 \t%rhs, $src, %amt2;\n\t"
+ "add.u64 \t$dst, %lhs, %rhs;\n\t"
+ "}}",
+ [(set Int64Regs:$dst, (rotl Int64Regs:$src, Int32Regs:$amt))]>;
+
+def ROTR64reg_sw :
+ NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$src, Int32Regs:$amt),
+ "{{\n\t"
+ ".reg .b64 %lhs;\n\t"
+ ".reg .b64 %rhs;\n\t"
+ ".reg .u32 %amt2;\n\t"
+ "shr.b64 \t%lhs, $src, $amt;\n\t"
+ "sub.u32 \t%amt2, 64, $amt;\n\t"
+ "shl.b64 \t%rhs, $src, %amt2;\n\t"
+ "add.u64 \t$dst, %lhs, %rhs;\n\t"
+ "}}",
+ [(set Int64Regs:$dst, (rotr Int64Regs:$src, Int32Regs:$amt))]>;
+
+//
+// Funnnel shift in clamp mode
+//
+
+// Create SDNodes so they can be used in the DAG code, e.g.
+// NVPTXISelLowering (LowerShiftLeftParts and LowerShiftRightParts)
+def SDTIntShiftDOp :
+ SDTypeProfile<1, 3, [SDTCisSameAs<0, 1>, SDTCisSameAs<0, 2>,
+ SDTCisInt<0>, SDTCisInt<3>]>;
+def FUN_SHFL_CLAMP : SDNode<"NVPTXISD::FUN_SHFL_CLAMP", SDTIntShiftDOp, []>;
+def FUN_SHFR_CLAMP : SDNode<"NVPTXISD::FUN_SHFR_CLAMP", SDTIntShiftDOp, []>;
+
+def FUNSHFLCLAMP :
+ NVPTXInst<(outs Int32Regs:$dst),
+ (ins Int32Regs:$lo, Int32Regs:$hi, Int32Regs:$amt),
+ "shf.l.clamp.b32 \t$dst, $lo, $hi, $amt;",
+ [(set Int32Regs:$dst,
+ (FUN_SHFL_CLAMP Int32Regs:$lo, Int32Regs:$hi, Int32Regs:$amt))]>;
+
+def FUNSHFRCLAMP :
+ NVPTXInst<(outs Int32Regs:$dst),
+ (ins Int32Regs:$lo, Int32Regs:$hi, Int32Regs:$amt),
+ "shf.r.clamp.b32 \t$dst, $lo, $hi, $amt;",
+ [(set Int32Regs:$dst,
+ (FUN_SHFR_CLAMP Int32Regs:$lo, Int32Regs:$hi, Int32Regs:$amt))]>;
+
+//
+// BFE - bit-field extract
+//
+
+// Template for BFE instructions. Takes four args,
+// [dest (reg), src (reg), start (reg or imm), end (reg or imm)].
+// Start may be an imm only if end is also an imm. FIXME: Is this a
+// restriction in PTX?
+//
+// dest and src may be int32 or int64, but start and end are always int32.
+multiclass BFE<string TyStr, RegisterClass RC> {
+ def rrr
+ : NVPTXInst<(outs RC:$d),
+ (ins RC:$a, Int32Regs:$b, Int32Regs:$c),
+ !strconcat("bfe.", TyStr, " \t$d, $a, $b, $c;"), []>;
+ def rri
+ : NVPTXInst<(outs RC:$d),
+ (ins RC:$a, Int32Regs:$b, i32imm:$c),
+ !strconcat("bfe.", TyStr, " \t$d, $a, $b, $c;"), []>;
+ def rii
+ : NVPTXInst<(outs RC:$d),
+ (ins RC:$a, i32imm:$b, i32imm:$c),
+ !strconcat("bfe.", TyStr, " \t$d, $a, $b, $c;"), []>;
+}
+
+let hasSideEffects = 0 in {
+ defm BFE_S32 : BFE<"s32", Int32Regs>;
+ defm BFE_U32 : BFE<"u32", Int32Regs>;
+ defm BFE_S64 : BFE<"s64", Int64Regs>;
+ defm BFE_U64 : BFE<"u64", Int64Regs>;
+}
+
+//-----------------------------------
+// Comparison instructions (setp, set)
+//-----------------------------------
+
+// FIXME: This doesn't cover versions of set and setp that combine with a
+// boolean predicate, e.g. setp.eq.and.b16.
+
+let hasSideEffects = 0 in {
+ multiclass SETP<string TypeStr, RegisterClass RC, Operand ImmCls> {
+ def rr :
+ NVPTXInst<(outs Int1Regs:$dst), (ins RC:$a, RC:$b, CmpMode:$cmp),
+ !strconcat("setp${cmp:base}${cmp:ftz}.", TypeStr,
+ " \t$dst, $a, $b;"), []>;
+ def ri :
+ NVPTXInst<(outs Int1Regs:$dst), (ins RC:$a, ImmCls:$b, CmpMode:$cmp),
+ !strconcat("setp${cmp:base}${cmp:ftz}.", TypeStr,
+ " \t$dst, $a, $b;"), []>;
+ def ir :
+ NVPTXInst<(outs Int1Regs:$dst), (ins ImmCls:$a, RC:$b, CmpMode:$cmp),
+ !strconcat("setp${cmp:base}${cmp:ftz}.", TypeStr,
+ " \t$dst, $a, $b;"), []>;
+ }
+}
+
+defm SETP_b16 : SETP<"b16", Int16Regs, i16imm>;
+defm SETP_s16 : SETP<"s16", Int16Regs, i16imm>;
+defm SETP_u16 : SETP<"u16", Int16Regs, i16imm>;
+defm SETP_b32 : SETP<"b32", Int32Regs, i32imm>;
+defm SETP_s32 : SETP<"s32", Int32Regs, i32imm>;
+defm SETP_u32 : SETP<"u32", Int32Regs, i32imm>;
+defm SETP_b64 : SETP<"b64", Int64Regs, i64imm>;
+defm SETP_s64 : SETP<"s64", Int64Regs, i64imm>;
+defm SETP_u64 : SETP<"u64", Int64Regs, i64imm>;
+defm SETP_f32 : SETP<"f32", Float32Regs, f32imm>;
+defm SETP_f64 : SETP<"f64", Float64Regs, f64imm>;
+def SETP_f16rr :
+ NVPTXInst<(outs Int1Regs:$dst),
+ (ins Float16Regs:$a, Float16Regs:$b, CmpMode:$cmp),
+ "setp${cmp:base}${cmp:ftz}.f16 \t$dst, $a, $b;",
+ []>, Requires<[useFP16Math]>;
+
+def SETP_f16x2rr :
+ NVPTXInst<(outs Int1Regs:$p, Int1Regs:$q),
+ (ins Float16x2Regs:$a, Float16x2Regs:$b, CmpMode:$cmp),
+ "setp${cmp:base}${cmp:ftz}.f16x2 \t$p|$q, $a, $b;",
+ []>,
+ Requires<[useFP16Math]>;
+
+
+// FIXME: This doesn't appear to be correct. The "set" mnemonic has the form
+// "set.CmpOp{.ftz}.dtype.stype", where dtype is the type of the destination
+// reg, either u32, s32, or f32. Anyway these aren't used at the moment.
+
+let hasSideEffects = 0 in {
+ multiclass SET<string TypeStr, RegisterClass RC, Operand ImmCls> {
+ def rr : NVPTXInst<(outs Int32Regs:$dst),
+ (ins RC:$a, RC:$b, CmpMode:$cmp),
+ !strconcat("set$cmp.", TypeStr, " \t$dst, $a, $b;"), []>;
+ def ri : NVPTXInst<(outs Int32Regs:$dst),
+ (ins RC:$a, ImmCls:$b, CmpMode:$cmp),
+ !strconcat("set$cmp.", TypeStr, " \t$dst, $a, $b;"), []>;
+ def ir : NVPTXInst<(outs Int32Regs:$dst),
+ (ins ImmCls:$a, RC:$b, CmpMode:$cmp),
+ !strconcat("set$cmp.", TypeStr, " \t$dst, $a, $b;"), []>;
+ }
+}
+
+defm SET_b16 : SET<"b16", Int16Regs, i16imm>;
+defm SET_s16 : SET<"s16", Int16Regs, i16imm>;
+defm SET_u16 : SET<"u16", Int16Regs, i16imm>;
+defm SET_b32 : SET<"b32", Int32Regs, i32imm>;
+defm SET_s32 : SET<"s32", Int32Regs, i32imm>;
+defm SET_u32 : SET<"u32", Int32Regs, i32imm>;
+defm SET_b64 : SET<"b64", Int64Regs, i64imm>;
+defm SET_s64 : SET<"s64", Int64Regs, i64imm>;
+defm SET_u64 : SET<"u64", Int64Regs, i64imm>;
+defm SET_f16 : SET<"f16", Float16Regs, f16imm>;
+defm SET_f32 : SET<"f32", Float32Regs, f32imm>;
+defm SET_f64 : SET<"f64", Float64Regs, f64imm>;
+
+//-----------------------------------
+// Selection instructions (selp)
+//-----------------------------------
+
+// FIXME: Missing slct
+
+// selp instructions that don't have any pattern matches; we explicitly use
+// them within this file.
+let hasSideEffects = 0 in {
+ multiclass SELP<string TypeStr, RegisterClass RC, Operand ImmCls> {
+ def rr : NVPTXInst<(outs RC:$dst),
+ (ins RC:$a, RC:$b, Int1Regs:$p),
+ !strconcat("selp.", TypeStr, " \t$dst, $a, $b, $p;"), []>;
+ def ri : NVPTXInst<(outs RC:$dst),
+ (ins RC:$a, ImmCls:$b, Int1Regs:$p),
+ !strconcat("selp.", TypeStr, " \t$dst, $a, $b, $p;"), []>;
+ def ir : NVPTXInst<(outs RC:$dst),
+ (ins ImmCls:$a, RC:$b, Int1Regs:$p),
+ !strconcat("selp.", TypeStr, " \t$dst, $a, $b, $p;"), []>;
+ def ii : NVPTXInst<(outs RC:$dst),
+ (ins ImmCls:$a, ImmCls:$b, Int1Regs:$p),
+ !strconcat("selp.", TypeStr, " \t$dst, $a, $b, $p;"), []>;
+ }
+
+ multiclass SELP_PATTERN<string TypeStr, RegisterClass RC, Operand ImmCls,
+ SDNode ImmNode> {
+ def rr :
+ NVPTXInst<(outs RC:$dst),
+ (ins RC:$a, RC:$b, Int1Regs:$p),
+ !strconcat("selp.", TypeStr, " \t$dst, $a, $b, $p;"),
+ [(set RC:$dst, (select Int1Regs:$p, RC:$a, RC:$b))]>;
+ def ri :
+ NVPTXInst<(outs RC:$dst),
+ (ins RC:$a, ImmCls:$b, Int1Regs:$p),
+ !strconcat("selp.", TypeStr, " \t$dst, $a, $b, $p;"),
+ [(set RC:$dst, (select Int1Regs:$p, RC:$a, ImmNode:$b))]>;
+ def ir :
+ NVPTXInst<(outs RC:$dst),
+ (ins ImmCls:$a, RC:$b, Int1Regs:$p),
+ !strconcat("selp.", TypeStr, " \t$dst, $a, $b, $p;"),
+ [(set RC:$dst, (select Int1Regs:$p, ImmNode:$a, RC:$b))]>;
+ def ii :
+ NVPTXInst<(outs RC:$dst),
+ (ins ImmCls:$a, ImmCls:$b, Int1Regs:$p),
+ !strconcat("selp.", TypeStr, " \t$dst, $a, $b, $p;"),
+ [(set RC:$dst, (select Int1Regs:$p, ImmNode:$a, ImmNode:$b))]>;
+ }
+}
+
+// Don't pattern match on selp.{s,u}{16,32,64} -- selp.b{16,32,64} is just as
+// good.
+defm SELP_b16 : SELP_PATTERN<"b16", Int16Regs, i16imm, imm>;
+defm SELP_s16 : SELP<"s16", Int16Regs, i16imm>;
+defm SELP_u16 : SELP<"u16", Int16Regs, i16imm>;
+defm SELP_b32 : SELP_PATTERN<"b32", Int32Regs, i32imm, imm>;
+defm SELP_s32 : SELP<"s32", Int32Regs, i32imm>;
+defm SELP_u32 : SELP<"u32", Int32Regs, i32imm>;
+defm SELP_b64 : SELP_PATTERN<"b64", Int64Regs, i64imm, imm>;
+defm SELP_s64 : SELP<"s64", Int64Regs, i64imm>;
+defm SELP_u64 : SELP<"u64", Int64Regs, i64imm>;
+defm SELP_f16 : SELP_PATTERN<"b16", Float16Regs, f16imm, fpimm>;
+defm SELP_f32 : SELP_PATTERN<"f32", Float32Regs, f32imm, fpimm>;
+defm SELP_f64 : SELP_PATTERN<"f64", Float64Regs, f64imm, fpimm>;
+
+def SELP_f16x2rr :
+ NVPTXInst<(outs Float16x2Regs:$dst),
+ (ins Float16x2Regs:$a, Float16x2Regs:$b, Int1Regs:$p),
+ "selp.b32 \t$dst, $a, $b, $p;",
+ [(set Float16x2Regs:$dst,
+ (select Int1Regs:$p, Float16x2Regs:$a, Float16x2Regs:$b))]>;
+
+//-----------------------------------
+// Data Movement (Load / Store, Move)
+//-----------------------------------
+
+def ADDRri : ComplexPattern<i32, 2, "SelectADDRri", [frameindex],
+ [SDNPWantRoot]>;
+def ADDRri64 : ComplexPattern<i64, 2, "SelectADDRri64", [frameindex],
+ [SDNPWantRoot]>;
+
+def MEMri : Operand<i32> {
+ let PrintMethod = "printMemOperand";
+ let MIOperandInfo = (ops Int32Regs, i32imm);
+}
+def MEMri64 : Operand<i64> {
+ let PrintMethod = "printMemOperand";
+ let MIOperandInfo = (ops Int64Regs, i64imm);
+}
+
+def imem : Operand<iPTR> {
+ let PrintMethod = "printOperand";
+}
+
+def imemAny : Operand<iPTRAny> {
+ let PrintMethod = "printOperand";
+}
+
+def LdStCode : Operand<i32> {
+ let PrintMethod = "printLdStCode";
+}
+
+def SDTWrapper : SDTypeProfile<1, 1, [SDTCisSameAs<0, 1>, SDTCisPtrTy<0>]>;
+def Wrapper : SDNode<"NVPTXISD::Wrapper", SDTWrapper>;
+
+// Load a memory address into a u32 or u64 register.
+def MOV_ADDR : NVPTXInst<(outs Int32Regs:$dst), (ins imem:$a),
+ "mov.u32 \t$dst, $a;",
+ [(set Int32Regs:$dst, (Wrapper tglobaladdr:$a))]>;
+def MOV_ADDR64 : NVPTXInst<(outs Int64Regs:$dst), (ins imem:$a),
+ "mov.u64 \t$dst, $a;",
+ [(set Int64Regs:$dst, (Wrapper tglobaladdr:$a))]>;
+
+// Get pointer to local stack.
+let hasSideEffects = 0 in {
+ def MOV_DEPOT_ADDR : NVPTXInst<(outs Int32Regs:$d), (ins i32imm:$num),
+ "mov.u32 \t$d, __local_depot$num;", []>;
+ def MOV_DEPOT_ADDR_64 : NVPTXInst<(outs Int64Regs:$d), (ins i32imm:$num),
+ "mov.u64 \t$d, __local_depot$num;", []>;
+}
+
+
+// copyPhysreg is hard-coded in NVPTXInstrInfo.cpp
+let IsSimpleMove=1, hasSideEffects=0 in {
+ def IMOV1rr : NVPTXInst<(outs Int1Regs:$dst), (ins Int1Regs:$sss),
+ "mov.pred \t$dst, $sss;", []>;
+ def IMOV16rr : NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$sss),
+ "mov.u16 \t$dst, $sss;", []>;
+ def IMOV32rr : NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$sss),
+ "mov.u32 \t$dst, $sss;", []>;
+ def IMOV64rr : NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$sss),
+ "mov.u64 \t$dst, $sss;", []>;
+
+ def FMOV16rr : NVPTXInst<(outs Float16Regs:$dst), (ins Float16Regs:$src),
+ // We have to use .b16 here as there's no mov.f16.
+ "mov.b16 \t$dst, $src;", []>;
+ def FMOV32rr : NVPTXInst<(outs Float32Regs:$dst), (ins Float32Regs:$src),
+ "mov.f32 \t$dst, $src;", []>;
+ def FMOV64rr : NVPTXInst<(outs Float64Regs:$dst), (ins Float64Regs:$src),
+ "mov.f64 \t$dst, $src;", []>;
+}
+
+def IMOV1ri : NVPTXInst<(outs Int1Regs:$dst), (ins i1imm:$src),
+ "mov.pred \t$dst, $src;",
+ [(set Int1Regs:$dst, imm:$src)]>;
+def IMOV16ri : NVPTXInst<(outs Int16Regs:$dst), (ins i16imm:$src),
+ "mov.u16 \t$dst, $src;",
+ [(set Int16Regs:$dst, imm:$src)]>;
+def IMOV32ri : NVPTXInst<(outs Int32Regs:$dst), (ins i32imm:$src),
+ "mov.u32 \t$dst, $src;",
+ [(set Int32Regs:$dst, imm:$src)]>;
+def IMOV64i : NVPTXInst<(outs Int64Regs:$dst), (ins i64imm:$src),
+ "mov.u64 \t$dst, $src;",
+ [(set Int64Regs:$dst, imm:$src)]>;
+
+def FMOV32ri : NVPTXInst<(outs Float32Regs:$dst), (ins f32imm:$src),
+ "mov.f32 \t$dst, $src;",
+ [(set Float32Regs:$dst, fpimm:$src)]>;
+def FMOV64ri : NVPTXInst<(outs Float64Regs:$dst), (ins f64imm:$src),
+ "mov.f64 \t$dst, $src;",
+ [(set Float64Regs:$dst, fpimm:$src)]>;
+
+def : Pat<(i32 (Wrapper texternalsym:$dst)), (IMOV32ri texternalsym:$dst)>;
+
+//---- Copy Frame Index ----
+def LEA_ADDRi : NVPTXInst<(outs Int32Regs:$dst), (ins MEMri:$addr),
+ "add.u32 \t$dst, ${addr:add};",
+ [(set Int32Regs:$dst, ADDRri:$addr)]>;
+def LEA_ADDRi64 : NVPTXInst<(outs Int64Regs:$dst), (ins MEMri64:$addr),
+ "add.u64 \t$dst, ${addr:add};",
+ [(set Int64Regs:$dst, ADDRri64:$addr)]>;
+
+//-----------------------------------
+// Comparison and Selection
+//-----------------------------------
+
+multiclass ISET_FORMAT<PatFrag OpNode, PatLeaf Mode,
+ Instruction setp_16rr,
+ Instruction setp_16ri,
+ Instruction setp_16ir,
+ Instruction setp_32rr,
+ Instruction setp_32ri,
+ Instruction setp_32ir,
+ Instruction setp_64rr,
+ Instruction setp_64ri,
+ Instruction setp_64ir,
+ Instruction set_16rr,
+ Instruction set_16ri,
+ Instruction set_16ir,
+ Instruction set_32rr,
+ Instruction set_32ri,
+ Instruction set_32ir,
+ Instruction set_64rr,
+ Instruction set_64ri,
+ Instruction set_64ir> {
+ // i16 -> pred
+ def : Pat<(i1 (OpNode Int16Regs:$a, Int16Regs:$b)),
+ (setp_16rr Int16Regs:$a, Int16Regs:$b, Mode)>;
+ def : Pat<(i1 (OpNode Int16Regs:$a, imm:$b)),
+ (setp_16ri Int16Regs:$a, imm:$b, Mode)>;
+ def : Pat<(i1 (OpNode imm:$a, Int16Regs:$b)),
+ (setp_16ir imm:$a, Int16Regs:$b, Mode)>;
+ // i32 -> pred
+ def : Pat<(i1 (OpNode Int32Regs:$a, Int32Regs:$b)),
+ (setp_32rr Int32Regs:$a, Int32Regs:$b, Mode)>;
+ def : Pat<(i1 (OpNode Int32Regs:$a, imm:$b)),
+ (setp_32ri Int32Regs:$a, imm:$b, Mode)>;
+ def : Pat<(i1 (OpNode imm:$a, Int32Regs:$b)),
+ (setp_32ir imm:$a, Int32Regs:$b, Mode)>;
+ // i64 -> pred
+ def : Pat<(i1 (OpNode Int64Regs:$a, Int64Regs:$b)),
+ (setp_64rr Int64Regs:$a, Int64Regs:$b, Mode)>;
+ def : Pat<(i1 (OpNode Int64Regs:$a, imm:$b)),
+ (setp_64ri Int64Regs:$a, imm:$b, Mode)>;
+ def : Pat<(i1 (OpNode imm:$a, Int64Regs:$b)),
+ (setp_64ir imm:$a, Int64Regs:$b, Mode)>;
+
+ // i16 -> i32
+ def : Pat<(i32 (OpNode Int16Regs:$a, Int16Regs:$b)),
+ (set_16rr Int16Regs:$a, Int16Regs:$b, Mode)>;
+ def : Pat<(i32 (OpNode Int16Regs:$a, imm:$b)),
+ (set_16ri Int16Regs:$a, imm:$b, Mode)>;
+ def : Pat<(i32 (OpNode imm:$a, Int16Regs:$b)),
+ (set_16ir imm:$a, Int16Regs:$b, Mode)>;
+ // i32 -> i32
+ def : Pat<(i32 (OpNode Int32Regs:$a, Int32Regs:$b)),
+ (set_32rr Int32Regs:$a, Int32Regs:$b, Mode)>;
+ def : Pat<(i32 (OpNode Int32Regs:$a, imm:$b)),
+ (set_32ri Int32Regs:$a, imm:$b, Mode)>;
+ def : Pat<(i32 (OpNode imm:$a, Int32Regs:$b)),
+ (set_32ir imm:$a, Int32Regs:$b, Mode)>;
+ // i64 -> i32
+ def : Pat<(i32 (OpNode Int64Regs:$a, Int64Regs:$b)),
+ (set_64rr Int64Regs:$a, Int64Regs:$b, Mode)>;
+ def : Pat<(i32 (OpNode Int64Regs:$a, imm:$b)),
+ (set_64ri Int64Regs:$a, imm:$b, Mode)>;
+ def : Pat<(i32 (OpNode imm:$a, Int64Regs:$b)),
+ (set_64ir imm:$a, Int64Regs:$b, Mode)>;
+}
+
+multiclass ISET_FORMAT_SIGNED<PatFrag OpNode, PatLeaf Mode>
+ : ISET_FORMAT<OpNode, Mode,
+ SETP_s16rr, SETP_s16ri, SETP_s16ir,
+ SETP_s32rr, SETP_s32ri, SETP_s32ir,
+ SETP_s64rr, SETP_s64ri, SETP_s64ir,
+ SET_s16rr, SET_s16ri, SET_s16ir,
+ SET_s32rr, SET_s32ri, SET_s32ir,
+ SET_s64rr, SET_s64ri, SET_s64ir> {
+ // TableGen doesn't like empty multiclasses.
+ def : PatLeaf<(i32 0)>;
+}
+
+multiclass ISET_FORMAT_UNSIGNED<PatFrag OpNode, PatLeaf Mode>
+ : ISET_FORMAT<OpNode, Mode,
+ SETP_u16rr, SETP_u16ri, SETP_u16ir,
+ SETP_u32rr, SETP_u32ri, SETP_u32ir,
+ SETP_u64rr, SETP_u64ri, SETP_u64ir,
+ SET_u16rr, SET_u16ri, SET_u16ir,
+ SET_u32rr, SET_u32ri, SET_u32ir,
+ SET_u64rr, SET_u64ri, SET_u64ir> {
+ // TableGen doesn't like empty multiclasses.
+ def : PatLeaf<(i32 0)>;
+}
+
+defm : ISET_FORMAT_SIGNED<setgt, CmpGT>;
+defm : ISET_FORMAT_SIGNED<setlt, CmpLT>;
+defm : ISET_FORMAT_SIGNED<setge, CmpGE>;
+defm : ISET_FORMAT_SIGNED<setle, CmpLE>;
+defm : ISET_FORMAT_SIGNED<seteq, CmpEQ>;
+defm : ISET_FORMAT_SIGNED<setne, CmpNE>;
+defm : ISET_FORMAT_UNSIGNED<setugt, CmpGT>;
+defm : ISET_FORMAT_UNSIGNED<setult, CmpLT>;
+defm : ISET_FORMAT_UNSIGNED<setuge, CmpGE>;
+defm : ISET_FORMAT_UNSIGNED<setule, CmpLE>;
+defm : ISET_FORMAT_UNSIGNED<setueq, CmpEQ>;
+defm : ISET_FORMAT_UNSIGNED<setune, CmpNE>;
+
+// i1 compares
+def : Pat<(setne Int1Regs:$a, Int1Regs:$b),
+ (XORb1rr Int1Regs:$a, Int1Regs:$b)>;
+def : Pat<(setune Int1Regs:$a, Int1Regs:$b),
+ (XORb1rr Int1Regs:$a, Int1Regs:$b)>;
+
+def : Pat<(seteq Int1Regs:$a, Int1Regs:$b),
+ (NOT1 (XORb1rr Int1Regs:$a, Int1Regs:$b))>;
+def : Pat<(setueq Int1Regs:$a, Int1Regs:$b),
+ (NOT1 (XORb1rr Int1Regs:$a, Int1Regs:$b))>;
+
+// i1 compare -> i32
+def : Pat<(i32 (setne Int1Regs:$a, Int1Regs:$b)),
+ (SELP_u32ii -1, 0, (XORb1rr Int1Regs:$a, Int1Regs:$b))>;
+def : Pat<(i32 (setne Int1Regs:$a, Int1Regs:$b)),
+ (SELP_u32ii 0, -1, (XORb1rr Int1Regs:$a, Int1Regs:$b))>;
+
+
+
+multiclass FSET_FORMAT<PatFrag OpNode, PatLeaf Mode, PatLeaf ModeFTZ> {
+ // f16 -> pred
+ def : Pat<(i1 (OpNode Float16Regs:$a, Float16Regs:$b)),
+ (SETP_f16rr Float16Regs:$a, Float16Regs:$b, ModeFTZ)>,
+ Requires<[useFP16Math,doF32FTZ]>;
+ def : Pat<(i1 (OpNode Float16Regs:$a, Float16Regs:$b)),
+ (SETP_f16rr Float16Regs:$a, Float16Regs:$b, Mode)>,
+ Requires<[useFP16Math]>;
+ def : Pat<(i1 (OpNode Float16Regs:$a, fpimm:$b)),
+ (SETP_f16rr Float16Regs:$a, (LOAD_CONST_F16 fpimm:$b), ModeFTZ)>,
+ Requires<[useFP16Math,doF32FTZ]>;
+ def : Pat<(i1 (OpNode Float16Regs:$a, fpimm:$b)),
+ (SETP_f16rr Float16Regs:$a, (LOAD_CONST_F16 fpimm:$b), Mode)>,
+ Requires<[useFP16Math]>;
+ def : Pat<(i1 (OpNode fpimm:$a, Float16Regs:$b)),
+ (SETP_f16rr (LOAD_CONST_F16 fpimm:$a), Float16Regs:$b, ModeFTZ)>,
+ Requires<[useFP16Math,doF32FTZ]>;
+ def : Pat<(i1 (OpNode fpimm:$a, Float16Regs:$b)),
+ (SETP_f16rr (LOAD_CONST_F16 fpimm:$a), Float16Regs:$b, Mode)>,
+ Requires<[useFP16Math]>;
+
+ // f32 -> pred
+ def : Pat<(i1 (OpNode Float32Regs:$a, Float32Regs:$b)),
+ (SETP_f32rr Float32Regs:$a, Float32Regs:$b, ModeFTZ)>,
+ Requires<[doF32FTZ]>;
+ def : Pat<(i1 (OpNode Float32Regs:$a, Float32Regs:$b)),
+ (SETP_f32rr Float32Regs:$a, Float32Regs:$b, Mode)>;
+ def : Pat<(i1 (OpNode Float32Regs:$a, fpimm:$b)),
+ (SETP_f32ri Float32Regs:$a, fpimm:$b, ModeFTZ)>,
+ Requires<[doF32FTZ]>;
+ def : Pat<(i1 (OpNode Float32Regs:$a, fpimm:$b)),
+ (SETP_f32ri Float32Regs:$a, fpimm:$b, Mode)>;
+ def : Pat<(i1 (OpNode fpimm:$a, Float32Regs:$b)),
+ (SETP_f32ir fpimm:$a, Float32Regs:$b, ModeFTZ)>,
+ Requires<[doF32FTZ]>;
+ def : Pat<(i1 (OpNode fpimm:$a, Float32Regs:$b)),
+ (SETP_f32ir fpimm:$a, Float32Regs:$b, Mode)>;
+
+ // f64 -> pred
+ def : Pat<(i1 (OpNode Float64Regs:$a, Float64Regs:$b)),
+ (SETP_f64rr Float64Regs:$a, Float64Regs:$b, Mode)>;
+ def : Pat<(i1 (OpNode Float64Regs:$a, fpimm:$b)),
+ (SETP_f64ri Float64Regs:$a, fpimm:$b, Mode)>;
+ def : Pat<(i1 (OpNode fpimm:$a, Float64Regs:$b)),
+ (SETP_f64ir fpimm:$a, Float64Regs:$b, Mode)>;
+
+ // f16 -> i32
+ def : Pat<(i32 (OpNode Float16Regs:$a, Float16Regs:$b)),
+ (SET_f16rr Float16Regs:$a, Float16Regs:$b, ModeFTZ)>,
+ Requires<[useFP16Math, doF32FTZ]>;
+ def : Pat<(i32 (OpNode Float16Regs:$a, Float16Regs:$b)),
+ (SET_f16rr Float16Regs:$a, Float16Regs:$b, Mode)>,
+ Requires<[useFP16Math]>;
+ def : Pat<(i32 (OpNode Float16Regs:$a, fpimm:$b)),
+ (SET_f16rr Float16Regs:$a, (LOAD_CONST_F16 fpimm:$b), ModeFTZ)>,
+ Requires<[useFP16Math, doF32FTZ]>;
+ def : Pat<(i32 (OpNode Float16Regs:$a, fpimm:$b)),
+ (SET_f16rr Float16Regs:$a, (LOAD_CONST_F16 fpimm:$b), Mode)>,
+ Requires<[useFP16Math]>;
+ def : Pat<(i32 (OpNode fpimm:$a, Float16Regs:$b)),
+ (SET_f16ir (LOAD_CONST_F16 fpimm:$a), Float16Regs:$b, ModeFTZ)>,
+ Requires<[useFP16Math, doF32FTZ]>;
+ def : Pat<(i32 (OpNode fpimm:$a, Float16Regs:$b)),
+ (SET_f16ir (LOAD_CONST_F16 fpimm:$a), Float16Regs:$b, Mode)>,
+ Requires<[useFP16Math]>;
+
+ // f32 -> i32
+ def : Pat<(i32 (OpNode Float32Regs:$a, Float32Regs:$b)),
+ (SET_f32rr Float32Regs:$a, Float32Regs:$b, ModeFTZ)>,
+ Requires<[doF32FTZ]>;
+ def : Pat<(i32 (OpNode Float32Regs:$a, Float32Regs:$b)),
+ (SET_f32rr Float32Regs:$a, Float32Regs:$b, Mode)>;
+ def : Pat<(i32 (OpNode Float32Regs:$a, fpimm:$b)),
+ (SET_f32ri Float32Regs:$a, fpimm:$b, ModeFTZ)>,
+ Requires<[doF32FTZ]>;
+ def : Pat<(i32 (OpNode Float32Regs:$a, fpimm:$b)),
+ (SET_f32ri Float32Regs:$a, fpimm:$b, Mode)>;
+ def : Pat<(i32 (OpNode fpimm:$a, Float32Regs:$b)),
+ (SET_f32ir fpimm:$a, Float32Regs:$b, ModeFTZ)>,
+ Requires<[doF32FTZ]>;
+ def : Pat<(i32 (OpNode fpimm:$a, Float32Regs:$b)),
+ (SET_f32ir fpimm:$a, Float32Regs:$b, Mode)>;
+
+ // f64 -> i32
+ def : Pat<(i32 (OpNode Float64Regs:$a, Float64Regs:$b)),
+ (SET_f64rr Float64Regs:$a, Float64Regs:$b, Mode)>;
+ def : Pat<(i32 (OpNode Float64Regs:$a, fpimm:$b)),
+ (SET_f64ri Float64Regs:$a, fpimm:$b, Mode)>;
+ def : Pat<(i32 (OpNode fpimm:$a, Float64Regs:$b)),
+ (SET_f64ir fpimm:$a, Float64Regs:$b, Mode)>;
+}
+
+defm FSetOGT : FSET_FORMAT<setogt, CmpGT, CmpGT_FTZ>;
+defm FSetOLT : FSET_FORMAT<setolt, CmpLT, CmpLT_FTZ>;
+defm FSetOGE : FSET_FORMAT<setoge, CmpGE, CmpGE_FTZ>;
+defm FSetOLE : FSET_FORMAT<setole, CmpLE, CmpLE_FTZ>;
+defm FSetOEQ : FSET_FORMAT<setoeq, CmpEQ, CmpEQ_FTZ>;
+defm FSetONE : FSET_FORMAT<setone, CmpNE, CmpNE_FTZ>;
+
+defm FSetUGT : FSET_FORMAT<setugt, CmpGTU, CmpGTU_FTZ>;
+defm FSetULT : FSET_FORMAT<setult, CmpLTU, CmpLTU_FTZ>;
+defm FSetUGE : FSET_FORMAT<setuge, CmpGEU, CmpGEU_FTZ>;
+defm FSetULE : FSET_FORMAT<setule, CmpLEU, CmpLEU_FTZ>;
+defm FSetUEQ : FSET_FORMAT<setueq, CmpEQU, CmpEQU_FTZ>;
+defm FSetUNE : FSET_FORMAT<setune, CmpNEU, CmpNEU_FTZ>;
+
+defm FSetGT : FSET_FORMAT<setgt, CmpGT, CmpGT_FTZ>;
+defm FSetLT : FSET_FORMAT<setlt, CmpLT, CmpLT_FTZ>;
+defm FSetGE : FSET_FORMAT<setge, CmpGE, CmpGE_FTZ>;
+defm FSetLE : FSET_FORMAT<setle, CmpLE, CmpLE_FTZ>;
+defm FSetEQ : FSET_FORMAT<seteq, CmpEQ, CmpEQ_FTZ>;
+defm FSetNE : FSET_FORMAT<setne, CmpNE, CmpNE_FTZ>;
+
+defm FSetNUM : FSET_FORMAT<seto, CmpNUM, CmpNUM_FTZ>;
+defm FSetNAN : FSET_FORMAT<setuo, CmpNAN, CmpNAN_FTZ>;
+
+// FIXME: What is this doing here? Can it be deleted?
+// def ld_param : SDNode<"NVPTXISD::LOAD_PARAM", SDTLoad,
+// [SDNPHasChain, SDNPMayLoad, SDNPMemOperand]>;
+
+def SDTDeclareParamProfile :
+ SDTypeProfile<0, 3, [SDTCisInt<0>, SDTCisInt<1>, SDTCisInt<2>]>;
+def SDTDeclareScalarParamProfile :
+ SDTypeProfile<0, 3, [SDTCisInt<0>, SDTCisInt<1>, SDTCisInt<2>]>;
+def SDTLoadParamProfile : SDTypeProfile<1, 2, [SDTCisInt<1>, SDTCisInt<2>]>;
+def SDTLoadParamV2Profile : SDTypeProfile<2, 2, [SDTCisSameAs<0, 1>, SDTCisInt<2>, SDTCisInt<3>]>;
+def SDTLoadParamV4Profile : SDTypeProfile<4, 2, [SDTCisInt<4>, SDTCisInt<5>]>;
+def SDTPrintCallProfile : SDTypeProfile<0, 1, [SDTCisInt<0>]>;
+def SDTPrintCallUniProfile : SDTypeProfile<0, 1, [SDTCisInt<0>]>;
+def SDTStoreParamProfile : SDTypeProfile<0, 3, [SDTCisInt<0>, SDTCisInt<1>]>;
+def SDTStoreParamV2Profile : SDTypeProfile<0, 4, [SDTCisInt<0>, SDTCisInt<1>]>;
+def SDTStoreParamV4Profile : SDTypeProfile<0, 6, [SDTCisInt<0>, SDTCisInt<1>]>;
+def SDTStoreParam32Profile : SDTypeProfile<0, 3, [SDTCisInt<0>, SDTCisInt<1>]>;
+def SDTCallArgProfile : SDTypeProfile<0, 2, [SDTCisInt<0>]>;
+def SDTCallArgMarkProfile : SDTypeProfile<0, 0, []>;
+def SDTCallVoidProfile : SDTypeProfile<0, 1, []>;
+def SDTCallValProfile : SDTypeProfile<1, 0, []>;
+def SDTMoveParamProfile : SDTypeProfile<1, 1, []>;
+def SDTStoreRetvalProfile : SDTypeProfile<0, 2, [SDTCisInt<0>]>;
+def SDTStoreRetvalV2Profile : SDTypeProfile<0, 3, [SDTCisInt<0>]>;
+def SDTStoreRetvalV4Profile : SDTypeProfile<0, 5, [SDTCisInt<0>]>;
+def SDTPseudoUseParamProfile : SDTypeProfile<0, 1, []>;
+
+def DeclareParam :
+ SDNode<"NVPTXISD::DeclareParam", SDTDeclareParamProfile,
+ [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
+def DeclareScalarParam :
+ SDNode<"NVPTXISD::DeclareScalarParam", SDTDeclareScalarParamProfile,
+ [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
+def DeclareRetParam :
+ SDNode<"NVPTXISD::DeclareRetParam", SDTDeclareParamProfile,
+ [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
+def DeclareRet :
+ SDNode<"NVPTXISD::DeclareRet", SDTDeclareScalarParamProfile,
+ [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
+def LoadParam :
+ SDNode<"NVPTXISD::LoadParam", SDTLoadParamProfile,
+ [SDNPHasChain, SDNPMayLoad, SDNPOutGlue, SDNPInGlue]>;
+def LoadParamV2 :
+ SDNode<"NVPTXISD::LoadParamV2", SDTLoadParamV2Profile,
+ [SDNPHasChain, SDNPMayLoad, SDNPOutGlue, SDNPInGlue]>;
+def LoadParamV4 :
+ SDNode<"NVPTXISD::LoadParamV4", SDTLoadParamV4Profile,
+ [SDNPHasChain, SDNPMayLoad, SDNPOutGlue, SDNPInGlue]>;
+def PrintCall :
+ SDNode<"NVPTXISD::PrintCall", SDTPrintCallProfile,
+ [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
+def PrintConvergentCall :
+ SDNode<"NVPTXISD::PrintConvergentCall", SDTPrintCallProfile,
+ [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
+def PrintCallUni :
+ SDNode<"NVPTXISD::PrintCallUni", SDTPrintCallUniProfile,
+ [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
+def PrintConvergentCallUni :
+ SDNode<"NVPTXISD::PrintConvergentCallUni", SDTPrintCallUniProfile,
+ [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
+def StoreParam :
+ SDNode<"NVPTXISD::StoreParam", SDTStoreParamProfile,
+ [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
+def StoreParamV2 :
+ SDNode<"NVPTXISD::StoreParamV2", SDTStoreParamV2Profile,
+ [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
+def StoreParamV4 :
+ SDNode<"NVPTXISD::StoreParamV4", SDTStoreParamV4Profile,
+ [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
+def StoreParamU32 :
+ SDNode<"NVPTXISD::StoreParamU32", SDTStoreParam32Profile,
+ [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
+def StoreParamS32 :
+ SDNode<"NVPTXISD::StoreParamS32", SDTStoreParam32Profile,
+ [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
+def CallArgBegin :
+ SDNode<"NVPTXISD::CallArgBegin", SDTCallArgMarkProfile,
+ [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
+def CallArg :
+ SDNode<"NVPTXISD::CallArg", SDTCallArgProfile,
+ [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
+def LastCallArg :
+ SDNode<"NVPTXISD::LastCallArg", SDTCallArgProfile,
+ [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
+def CallArgEnd :
+ SDNode<"NVPTXISD::CallArgEnd", SDTCallVoidProfile,
+ [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
+def CallVoid :
+ SDNode<"NVPTXISD::CallVoid", SDTCallVoidProfile,
+ [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
+def Prototype :
+ SDNode<"NVPTXISD::Prototype", SDTCallVoidProfile,
+ [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
+def CallVal :
+ SDNode<"NVPTXISD::CallVal", SDTCallValProfile,
+ [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
+def MoveParam :
+ SDNode<"NVPTXISD::MoveParam", SDTMoveParamProfile, []>;
+def StoreRetval :
+ SDNode<"NVPTXISD::StoreRetval", SDTStoreRetvalProfile,
+ [SDNPHasChain, SDNPSideEffect]>;
+def StoreRetvalV2 :
+ SDNode<"NVPTXISD::StoreRetvalV2", SDTStoreRetvalV2Profile,
+ [SDNPHasChain, SDNPSideEffect]>;
+def StoreRetvalV4 :
+ SDNode<"NVPTXISD::StoreRetvalV4", SDTStoreRetvalV4Profile,
+ [SDNPHasChain, SDNPSideEffect]>;
+def PseudoUseParam :
+ SDNode<"NVPTXISD::PseudoUseParam", SDTPseudoUseParamProfile,
+ [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
+def RETURNNode :
+ SDNode<"NVPTXISD::RETURN", SDTCallArgMarkProfile,
+ [SDNPHasChain, SDNPSideEffect]>;
+
+let mayLoad = 1 in {
+ class LoadParamMemInst<NVPTXRegClass regclass, string opstr> :
+ NVPTXInst<(outs regclass:$dst), (ins i32imm:$b),
+ !strconcat("ld.param", opstr, " \t$dst, [retval0+$b];"),
+ []>;
+
+ class LoadParamV2MemInst<NVPTXRegClass regclass, string opstr> :
+ NVPTXInst<(outs regclass:$dst, regclass:$dst2), (ins i32imm:$b),
+ !strconcat("ld.param.v2", opstr,
+ " \t{{$dst, $dst2}}, [retval0+$b];"), []>;
+
+ class LoadParamV4MemInst<NVPTXRegClass regclass, string opstr> :
+ NVPTXInst<(outs regclass:$dst, regclass:$dst2, regclass:$dst3,
+ regclass:$dst4),
+ (ins i32imm:$b),
+ !strconcat("ld.param.v4", opstr,
+ " \t{{$dst, $dst2, $dst3, $dst4}}, [retval0+$b];"),
+ []>;
+}
+
+class LoadParamRegInst<NVPTXRegClass regclass, string opstr> :
+ NVPTXInst<(outs regclass:$dst), (ins i32imm:$b),
+ !strconcat("mov", opstr, " \t$dst, retval$b;"),
+ [(set regclass:$dst, (LoadParam (i32 0), (i32 imm:$b)))]>;
+
+let mayStore = 1 in {
+ class StoreParamInst<NVPTXRegClass regclass, string opstr> :
+ NVPTXInst<(outs), (ins regclass:$val, i32imm:$a, i32imm:$b),
+ !strconcat("st.param", opstr, " \t[param$a+$b], $val;"),
+ []>;
+
+ class StoreParamV2Inst<NVPTXRegClass regclass, string opstr> :
+ NVPTXInst<(outs), (ins regclass:$val, regclass:$val2,
+ i32imm:$a, i32imm:$b),
+ !strconcat("st.param.v2", opstr,
+ " \t[param$a+$b], {{$val, $val2}};"),
+ []>;
+
+ class StoreParamV4Inst<NVPTXRegClass regclass, string opstr> :
+ NVPTXInst<(outs), (ins regclass:$val, regclass:$val2, regclass:$val3,
+ regclass:$val4, i32imm:$a,
+ i32imm:$b),
+ !strconcat("st.param.v4", opstr,
+ " \t[param$a+$b], {{$val, $val2, $val3, $val4}};"),
+ []>;
+
+ class StoreRetvalInst<NVPTXRegClass regclass, string opstr> :
+ NVPTXInst<(outs), (ins regclass:$val, i32imm:$a),
+ !strconcat("st.param", opstr, " \t[func_retval0+$a], $val;"),
+ []>;
+
+ class StoreRetvalV2Inst<NVPTXRegClass regclass, string opstr> :
+ NVPTXInst<(outs), (ins regclass:$val, regclass:$val2, i32imm:$a),
+ !strconcat("st.param.v2", opstr,
+ " \t[func_retval0+$a], {{$val, $val2}};"),
+ []>;
+
+ class StoreRetvalV4Inst<NVPTXRegClass regclass, string opstr> :
+ NVPTXInst<(outs),
+ (ins regclass:$val, regclass:$val2, regclass:$val3,
+ regclass:$val4, i32imm:$a),
+ !strconcat("st.param.v4", opstr,
+ " \t[func_retval0+$a], {{$val, $val2, $val3, $val4}};"),
+ []>;
+}
+
+let isCall=1 in {
+ multiclass CALL<string OpcStr, SDNode OpNode> {
+ def PrintCallNoRetInst : NVPTXInst<(outs), (ins),
+ !strconcat(OpcStr, " "), [(OpNode (i32 0))]>;
+ def PrintCallRetInst1 : NVPTXInst<(outs), (ins),
+ !strconcat(OpcStr, " (retval0), "), [(OpNode (i32 1))]>;
+ def PrintCallRetInst2 : NVPTXInst<(outs), (ins),
+ !strconcat(OpcStr, " (retval0, retval1), "), [(OpNode (i32 2))]>;
+ def PrintCallRetInst3 : NVPTXInst<(outs), (ins),
+ !strconcat(OpcStr, " (retval0, retval1, retval2), "), [(OpNode (i32 3))]>;
+ def PrintCallRetInst4 : NVPTXInst<(outs), (ins),
+ !strconcat(OpcStr, " (retval0, retval1, retval2, retval3), "),
+ [(OpNode (i32 4))]>;
+ def PrintCallRetInst5 : NVPTXInst<(outs), (ins),
+ !strconcat(OpcStr, " (retval0, retval1, retval2, retval3, retval4), "),
+ [(OpNode (i32 5))]>;
+ def PrintCallRetInst6 : NVPTXInst<(outs), (ins),
+ !strconcat(OpcStr, " (retval0, retval1, retval2, retval3, retval4, "
+ "retval5), "),
+ [(OpNode (i32 6))]>;
+ def PrintCallRetInst7 : NVPTXInst<(outs), (ins),
+ !strconcat(OpcStr, " (retval0, retval1, retval2, retval3, retval4, "
+ "retval5, retval6), "),
+ [(OpNode (i32 7))]>;
+ def PrintCallRetInst8 : NVPTXInst<(outs), (ins),
+ !strconcat(OpcStr, " (retval0, retval1, retval2, retval3, retval4, "
+ "retval5, retval6, retval7), "),
+ [(OpNode (i32 8))]>;
+ }
+}
+
+defm Call : CALL<"call", PrintCall>;
+defm CallUni : CALL<"call.uni", PrintCallUni>;
+
+// Convergent call instructions. These are identical to regular calls, except
+// they have the isConvergent bit set.
+let isConvergent=1 in {
+ defm ConvergentCall : CALL<"call", PrintConvergentCall>;
+ defm ConvergentCallUni : CALL<"call.uni", PrintConvergentCallUni>;
+}
+
+def LoadParamMemI64 : LoadParamMemInst<Int64Regs, ".b64">;
+def LoadParamMemI32 : LoadParamMemInst<Int32Regs, ".b32">;
+def LoadParamMemI16 : LoadParamMemInst<Int16Regs, ".b16">;
+def LoadParamMemI8 : LoadParamMemInst<Int16Regs, ".b8">;
+def LoadParamMemV2I64 : LoadParamV2MemInst<Int64Regs, ".b64">;
+def LoadParamMemV2I32 : LoadParamV2MemInst<Int32Regs, ".b32">;
+def LoadParamMemV2I16 : LoadParamV2MemInst<Int16Regs, ".b16">;
+def LoadParamMemV2I8 : LoadParamV2MemInst<Int16Regs, ".b8">;
+def LoadParamMemV4I32 : LoadParamV4MemInst<Int32Regs, ".b32">;
+def LoadParamMemV4I16 : LoadParamV4MemInst<Int16Regs, ".b16">;
+def LoadParamMemV4I8 : LoadParamV4MemInst<Int16Regs, ".b8">;
+def LoadParamMemF16 : LoadParamMemInst<Float16Regs, ".b16">;
+def LoadParamMemF16x2 : LoadParamMemInst<Float16x2Regs, ".b32">;
+def LoadParamMemF32 : LoadParamMemInst<Float32Regs, ".f32">;
+def LoadParamMemF64 : LoadParamMemInst<Float64Regs, ".f64">;
+def LoadParamMemV2F16 : LoadParamV2MemInst<Float16Regs, ".b16">;
+def LoadParamMemV2F16x2: LoadParamV2MemInst<Float16x2Regs, ".b32">;
+def LoadParamMemV2F32 : LoadParamV2MemInst<Float32Regs, ".f32">;
+def LoadParamMemV2F64 : LoadParamV2MemInst<Float64Regs, ".f64">;
+def LoadParamMemV4F16 : LoadParamV4MemInst<Float16Regs, ".b16">;
+def LoadParamMemV4F16x2: LoadParamV4MemInst<Float16x2Regs, ".b32">;
+def LoadParamMemV4F32 : LoadParamV4MemInst<Float32Regs, ".f32">;
+
+def StoreParamI64 : StoreParamInst<Int64Regs, ".b64">;
+def StoreParamI32 : StoreParamInst<Int32Regs, ".b32">;
+
+def StoreParamI16 : StoreParamInst<Int16Regs, ".b16">;
+def StoreParamI8 : StoreParamInst<Int16Regs, ".b8">;
+def StoreParamV2I64 : StoreParamV2Inst<Int64Regs, ".b64">;
+def StoreParamV2I32 : StoreParamV2Inst<Int32Regs, ".b32">;
+def StoreParamV2I16 : StoreParamV2Inst<Int16Regs, ".b16">;
+def StoreParamV2I8 : StoreParamV2Inst<Int16Regs, ".b8">;
+
+def StoreParamV4I32 : StoreParamV4Inst<Int32Regs, ".b32">;
+def StoreParamV4I16 : StoreParamV4Inst<Int16Regs, ".b16">;
+def StoreParamV4I8 : StoreParamV4Inst<Int16Regs, ".b8">;
+
+def StoreParamF16 : StoreParamInst<Float16Regs, ".b16">;
+def StoreParamF16x2 : StoreParamInst<Float16x2Regs, ".b32">;
+def StoreParamF32 : StoreParamInst<Float32Regs, ".f32">;
+def StoreParamF64 : StoreParamInst<Float64Regs, ".f64">;
+def StoreParamV2F16 : StoreParamV2Inst<Float16Regs, ".b16">;
+def StoreParamV2F16x2 : StoreParamV2Inst<Float16x2Regs, ".b32">;
+def StoreParamV2F32 : StoreParamV2Inst<Float32Regs, ".f32">;
+def StoreParamV2F64 : StoreParamV2Inst<Float64Regs, ".f64">;
+def StoreParamV4F16 : StoreParamV4Inst<Float16Regs, ".b16">;
+def StoreParamV4F16x2 : StoreParamV4Inst<Float16x2Regs, ".b32">;
+def StoreParamV4F32 : StoreParamV4Inst<Float32Regs, ".f32">;
+
+def StoreRetvalI64 : StoreRetvalInst<Int64Regs, ".b64">;
+def StoreRetvalI32 : StoreRetvalInst<Int32Regs, ".b32">;
+def StoreRetvalI16 : StoreRetvalInst<Int16Regs, ".b16">;
+def StoreRetvalI8 : StoreRetvalInst<Int16Regs, ".b8">;
+def StoreRetvalV2I64 : StoreRetvalV2Inst<Int64Regs, ".b64">;
+def StoreRetvalV2I32 : StoreRetvalV2Inst<Int32Regs, ".b32">;
+def StoreRetvalV2I16 : StoreRetvalV2Inst<Int16Regs, ".b16">;
+def StoreRetvalV2I8 : StoreRetvalV2Inst<Int16Regs, ".b8">;
+def StoreRetvalV4I32 : StoreRetvalV4Inst<Int32Regs, ".b32">;
+def StoreRetvalV4I16 : StoreRetvalV4Inst<Int16Regs, ".b16">;
+def StoreRetvalV4I8 : StoreRetvalV4Inst<Int16Regs, ".b8">;
+
+def StoreRetvalF64 : StoreRetvalInst<Float64Regs, ".f64">;
+def StoreRetvalF32 : StoreRetvalInst<Float32Regs, ".f32">;
+def StoreRetvalF16 : StoreRetvalInst<Float16Regs, ".b16">;
+def StoreRetvalF16x2 : StoreRetvalInst<Float16x2Regs, ".b32">;
+def StoreRetvalV2F64 : StoreRetvalV2Inst<Float64Regs, ".f64">;
+def StoreRetvalV2F32 : StoreRetvalV2Inst<Float32Regs, ".f32">;
+def StoreRetvalV2F16 : StoreRetvalV2Inst<Float16Regs, ".b16">;
+def StoreRetvalV2F16x2: StoreRetvalV2Inst<Float16x2Regs, ".b32">;
+def StoreRetvalV4F32 : StoreRetvalV4Inst<Float32Regs, ".f32">;
+def StoreRetvalV4F16 : StoreRetvalV4Inst<Float16Regs, ".b16">;
+def StoreRetvalV4F16x2: StoreRetvalV4Inst<Float16x2Regs, ".b32">;
+
+def CallArgBeginInst : NVPTXInst<(outs), (ins), "(", [(CallArgBegin)]>;
+def CallArgEndInst1 : NVPTXInst<(outs), (ins), ");", [(CallArgEnd (i32 1))]>;
+def CallArgEndInst0 : NVPTXInst<(outs), (ins), ")", [(CallArgEnd (i32 0))]>;
+def RETURNInst : NVPTXInst<(outs), (ins), "ret;", [(RETURNNode)]>;
+
+class CallArgInst<NVPTXRegClass regclass> :
+ NVPTXInst<(outs), (ins regclass:$a), "$a, ",
+ [(CallArg (i32 0), regclass:$a)]>;
+
+class LastCallArgInst<NVPTXRegClass regclass> :
+ NVPTXInst<(outs), (ins regclass:$a), "$a",
+ [(LastCallArg (i32 0), regclass:$a)]>;
+
+def CallArgI64 : CallArgInst<Int64Regs>;
+def CallArgI32 : CallArgInst<Int32Regs>;
+def CallArgI16 : CallArgInst<Int16Regs>;
+def CallArgF64 : CallArgInst<Float64Regs>;
+def CallArgF32 : CallArgInst<Float32Regs>;
+
+def LastCallArgI64 : LastCallArgInst<Int64Regs>;
+def LastCallArgI32 : LastCallArgInst<Int32Regs>;
+def LastCallArgI16 : LastCallArgInst<Int16Regs>;
+def LastCallArgF64 : LastCallArgInst<Float64Regs>;
+def LastCallArgF32 : LastCallArgInst<Float32Regs>;
+
+def CallArgI32imm : NVPTXInst<(outs), (ins i32imm:$a), "$a, ",
+ [(CallArg (i32 0), (i32 imm:$a))]>;
+def LastCallArgI32imm : NVPTXInst<(outs), (ins i32imm:$a), "$a",
+ [(LastCallArg (i32 0), (i32 imm:$a))]>;
+
+def CallArgParam : NVPTXInst<(outs), (ins i32imm:$a), "param$a, ",
+ [(CallArg (i32 1), (i32 imm:$a))]>;
+def LastCallArgParam : NVPTXInst<(outs), (ins i32imm:$a), "param$a",
+ [(LastCallArg (i32 1), (i32 imm:$a))]>;
+
+def CallVoidInst : NVPTXInst<(outs), (ins imem:$addr), "$addr, ",
+ [(CallVoid (Wrapper tglobaladdr:$addr))]>;
+def CallVoidInstReg : NVPTXInst<(outs), (ins Int32Regs:$addr), "$addr, ",
+ [(CallVoid Int32Regs:$addr)]>;
+def CallVoidInstReg64 : NVPTXInst<(outs), (ins Int64Regs:$addr), "$addr, ",
+ [(CallVoid Int64Regs:$addr)]>;
+def PrototypeInst : NVPTXInst<(outs), (ins i32imm:$val), ", prototype_$val;",
+ [(Prototype (i32 imm:$val))]>;
+
+def DeclareRetMemInst :
+ NVPTXInst<(outs), (ins i32imm:$align, i32imm:$size, i32imm:$num),
+ ".param .align $align .b8 retval$num[$size];",
+ [(DeclareRetParam (i32 imm:$align), (i32 imm:$size), (i32 imm:$num))]>;
+def DeclareRetScalarInst :
+ NVPTXInst<(outs), (ins i32imm:$size, i32imm:$num),
+ ".param .b$size retval$num;",
+ [(DeclareRet (i32 1), (i32 imm:$size), (i32 imm:$num))]>;
+def DeclareRetRegInst :
+ NVPTXInst<(outs), (ins i32imm:$size, i32imm:$num),
+ ".reg .b$size retval$num;",
+ [(DeclareRet (i32 2), (i32 imm:$size), (i32 imm:$num))]>;
+
+def DeclareParamInst :
+ NVPTXInst<(outs), (ins i32imm:$align, i32imm:$a, i32imm:$size),
+ ".param .align $align .b8 param$a[$size];",
+ [(DeclareParam (i32 imm:$align), (i32 imm:$a), (i32 imm:$size))]>;
+def DeclareScalarParamInst :
+ NVPTXInst<(outs), (ins i32imm:$a, i32imm:$size),
+ ".param .b$size param$a;",
+ [(DeclareScalarParam (i32 imm:$a), (i32 imm:$size), (i32 0))]>;
+def DeclareScalarRegInst :
+ NVPTXInst<(outs), (ins i32imm:$a, i32imm:$size),
+ ".reg .b$size param$a;",
+ [(DeclareScalarParam (i32 imm:$a), (i32 imm:$size), (i32 1))]>;
+
+class MoveParamInst<NVPTXRegClass regclass, string asmstr> :
+ NVPTXInst<(outs regclass:$dst), (ins regclass:$src),
+ !strconcat("mov", asmstr, " \t$dst, $src;"),
+ [(set regclass:$dst, (MoveParam regclass:$src))]>;
+
+def MoveParamI64 : MoveParamInst<Int64Regs, ".b64">;
+def MoveParamI32 : MoveParamInst<Int32Regs, ".b32">;
+def MoveParamI16 :
+ NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$src),
+ "cvt.u16.u32 \t$dst, $src;",
+ [(set Int16Regs:$dst, (MoveParam Int16Regs:$src))]>;
+def MoveParamF64 : MoveParamInst<Float64Regs, ".f64">;
+def MoveParamF32 : MoveParamInst<Float32Regs, ".f32">;
+def MoveParamF16 : MoveParamInst<Float16Regs, ".f16">;
+
+class PseudoUseParamInst<NVPTXRegClass regclass> :
+ NVPTXInst<(outs), (ins regclass:$src),
+ "// Pseudo use of $src",
+ [(PseudoUseParam regclass:$src)]>;
+
+def PseudoUseParamI64 : PseudoUseParamInst<Int64Regs>;
+def PseudoUseParamI32 : PseudoUseParamInst<Int32Regs>;
+def PseudoUseParamI16 : PseudoUseParamInst<Int16Regs>;
+def PseudoUseParamF64 : PseudoUseParamInst<Float64Regs>;
+def PseudoUseParamF32 : PseudoUseParamInst<Float32Regs>;
+
+
+//
+// Load / Store Handling
+//
+multiclass LD<NVPTXRegClass regclass> {
+ def _avar : NVPTXInst<
+ (outs regclass:$dst),
+ (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
+ i32imm:$fromWidth, imem:$addr),
+ "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t$dst, [$addr];", []>;
+ def _areg : NVPTXInst<
+ (outs regclass:$dst),
+ (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
+ i32imm:$fromWidth, Int32Regs:$addr),
+ "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t$dst, [$addr];", []>;
+ def _areg_64 : NVPTXInst<
+ (outs regclass:$dst),
+ (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
+ i32imm:$fromWidth, Int64Regs:$addr),
+ "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t$dst, [$addr];", []>;
+ def _ari : NVPTXInst<
+ (outs regclass:$dst),
+ (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
+ i32imm:$fromWidth, Int32Regs:$addr, i32imm:$offset),
+ "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t$dst, [$addr+$offset];", []>;
+ def _ari_64 : NVPTXInst<
+ (outs regclass:$dst),
+ (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec,
+ LdStCode:$Sign, i32imm:$fromWidth, Int64Regs:$addr, i32imm:$offset),
+ "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t$dst, [$addr+$offset];", []>;
+ def _asi : NVPTXInst<
+ (outs regclass:$dst),
+ (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec,
+ LdStCode:$Sign, i32imm:$fromWidth, imem:$addr, i32imm:$offset),
+ "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t$dst, [$addr+$offset];", []>;
+}
+
+let mayLoad=1, hasSideEffects=0 in {
+ defm LD_i8 : LD<Int16Regs>;
+ defm LD_i16 : LD<Int16Regs>;
+ defm LD_i32 : LD<Int32Regs>;
+ defm LD_i64 : LD<Int64Regs>;
+ defm LD_f16 : LD<Float16Regs>;
+ defm LD_f16x2 : LD<Float16x2Regs>;
+ defm LD_f32 : LD<Float32Regs>;
+ defm LD_f64 : LD<Float64Regs>;
+}
+
+multiclass ST<NVPTXRegClass regclass> {
+ def _avar : NVPTXInst<
+ (outs),
+ (ins regclass:$src, LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec,
+ LdStCode:$Sign, i32imm:$toWidth, imem:$addr),
+ "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$toWidth"
+ " \t[$addr], $src;", []>;
+ def _areg : NVPTXInst<
+ (outs),
+ (ins regclass:$src, LdStCode:$isVol, LdStCode:$addsp,
+ LdStCode:$Vec, LdStCode:$Sign, i32imm:$toWidth, Int32Regs:$addr),
+ "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$toWidth"
+ " \t[$addr], $src;", []>;
+ def _areg_64 : NVPTXInst<
+ (outs),
+ (ins regclass:$src, LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec,
+ LdStCode:$Sign, i32imm:$toWidth, Int64Regs:$addr),
+ "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$toWidth"
+ " \t[$addr], $src;", []>;
+ def _ari : NVPTXInst<
+ (outs),
+ (ins regclass:$src, LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec,
+ LdStCode:$Sign, i32imm:$toWidth, Int32Regs:$addr, i32imm:$offset),
+ "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$toWidth"
+ " \t[$addr+$offset], $src;", []>;
+ def _ari_64 : NVPTXInst<
+ (outs),
+ (ins regclass:$src, LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec,
+ LdStCode:$Sign, i32imm:$toWidth, Int64Regs:$addr, i32imm:$offset),
+ "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$toWidth"
+ " \t[$addr+$offset], $src;", []>;
+ def _asi : NVPTXInst<
+ (outs),
+ (ins regclass:$src, LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec,
+ LdStCode:$Sign, i32imm:$toWidth, imem:$addr, i32imm:$offset),
+ "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$toWidth"
+ " \t[$addr+$offset], $src;", []>;
+}
+
+let mayStore=1, hasSideEffects=0 in {
+ defm ST_i8 : ST<Int16Regs>;
+ defm ST_i16 : ST<Int16Regs>;
+ defm ST_i32 : ST<Int32Regs>;
+ defm ST_i64 : ST<Int64Regs>;
+ defm ST_f16 : ST<Float16Regs>;
+ defm ST_f16x2 : ST<Float16x2Regs>;
+ defm ST_f32 : ST<Float32Regs>;
+ defm ST_f64 : ST<Float64Regs>;
+}
+
+// The following is used only in and after vector elementizations. Vector
+// elementization happens at the machine instruction level, so the following
+// instructions never appear in the DAG.
+multiclass LD_VEC<NVPTXRegClass regclass> {
+ def _v2_avar : NVPTXInst<
+ (outs regclass:$dst1, regclass:$dst2),
+ (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
+ i32imm:$fromWidth, imem:$addr),
+ "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t{{$dst1, $dst2}}, [$addr];", []>;
+ def _v2_areg : NVPTXInst<
+ (outs regclass:$dst1, regclass:$dst2),
+ (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
+ i32imm:$fromWidth, Int32Regs:$addr),
+ "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t{{$dst1, $dst2}}, [$addr];", []>;
+ def _v2_areg_64 : NVPTXInst<
+ (outs regclass:$dst1, regclass:$dst2),
+ (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
+ i32imm:$fromWidth, Int64Regs:$addr),
+ "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t{{$dst1, $dst2}}, [$addr];", []>;
+ def _v2_ari : NVPTXInst<
+ (outs regclass:$dst1, regclass:$dst2),
+ (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
+ i32imm:$fromWidth, Int32Regs:$addr, i32imm:$offset),
+ "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t{{$dst1, $dst2}}, [$addr+$offset];", []>;
+ def _v2_ari_64 : NVPTXInst<
+ (outs regclass:$dst1, regclass:$dst2),
+ (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
+ i32imm:$fromWidth, Int64Regs:$addr, i32imm:$offset),
+ "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t{{$dst1, $dst2}}, [$addr+$offset];", []>;
+ def _v2_asi : NVPTXInst<
+ (outs regclass:$dst1, regclass:$dst2),
+ (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
+ i32imm:$fromWidth, imem:$addr, i32imm:$offset),
+ "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t{{$dst1, $dst2}}, [$addr+$offset];", []>;
+ def _v4_avar : NVPTXInst<
+ (outs regclass:$dst1, regclass:$dst2, regclass:$dst3, regclass:$dst4),
+ (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
+ i32imm:$fromWidth, imem:$addr),
+ "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t{{$dst1, $dst2, $dst3, $dst4}}, [$addr];", []>;
+ def _v4_areg : NVPTXInst<
+ (outs regclass:$dst1, regclass:$dst2, regclass:$dst3, regclass:$dst4),
+ (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
+ i32imm:$fromWidth, Int32Regs:$addr),
+ "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t{{$dst1, $dst2, $dst3, $dst4}}, [$addr];", []>;
+ def _v4_areg_64 : NVPTXInst<
+ (outs regclass:$dst1, regclass:$dst2, regclass:$dst3, regclass:$dst4),
+ (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
+ i32imm:$fromWidth, Int64Regs:$addr),
+ "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t{{$dst1, $dst2, $dst3, $dst4}}, [$addr];", []>;
+ def _v4_ari : NVPTXInst<
+ (outs regclass:$dst1, regclass:$dst2, regclass:$dst3, regclass:$dst4),
+ (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
+ i32imm:$fromWidth, Int32Regs:$addr, i32imm:$offset),
+ "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t{{$dst1, $dst2, $dst3, $dst4}}, [$addr+$offset];", []>;
+ def _v4_ari_64 : NVPTXInst<
+ (outs regclass:$dst1, regclass:$dst2, regclass:$dst3, regclass:$dst4),
+ (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
+ i32imm:$fromWidth, Int64Regs:$addr, i32imm:$offset),
+ "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t{{$dst1, $dst2, $dst3, $dst4}}, [$addr+$offset];", []>;
+ def _v4_asi : NVPTXInst<
+ (outs regclass:$dst1, regclass:$dst2, regclass:$dst3, regclass:$dst4),
+ (ins LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
+ i32imm:$fromWidth, imem:$addr, i32imm:$offset),
+ "ld${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t{{$dst1, $dst2, $dst3, $dst4}}, [$addr+$offset];", []>;
+}
+let mayLoad=1, hasSideEffects=0 in {
+ defm LDV_i8 : LD_VEC<Int16Regs>;
+ defm LDV_i16 : LD_VEC<Int16Regs>;
+ defm LDV_i32 : LD_VEC<Int32Regs>;
+ defm LDV_i64 : LD_VEC<Int64Regs>;
+ defm LDV_f16 : LD_VEC<Float16Regs>;
+ defm LDV_f16x2 : LD_VEC<Float16x2Regs>;
+ defm LDV_f32 : LD_VEC<Float32Regs>;
+ defm LDV_f64 : LD_VEC<Float64Regs>;
+}
+
+multiclass ST_VEC<NVPTXRegClass regclass> {
+ def _v2_avar : NVPTXInst<
+ (outs),
+ (ins regclass:$src1, regclass:$src2, LdStCode:$isVol, LdStCode:$addsp,
+ LdStCode:$Vec, LdStCode:$Sign, i32imm:$fromWidth, imem:$addr),
+ "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t[$addr], {{$src1, $src2}};", []>;
+ def _v2_areg : NVPTXInst<
+ (outs),
+ (ins regclass:$src1, regclass:$src2, LdStCode:$isVol, LdStCode:$addsp,
+ LdStCode:$Vec, LdStCode:$Sign, i32imm:$fromWidth, Int32Regs:$addr),
+ "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t[$addr], {{$src1, $src2}};", []>;
+ def _v2_areg_64 : NVPTXInst<
+ (outs),
+ (ins regclass:$src1, regclass:$src2, LdStCode:$isVol, LdStCode:$addsp,
+ LdStCode:$Vec, LdStCode:$Sign, i32imm:$fromWidth, Int64Regs:$addr),
+ "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t[$addr], {{$src1, $src2}};", []>;
+ def _v2_ari : NVPTXInst<
+ (outs),
+ (ins regclass:$src1, regclass:$src2, LdStCode:$isVol, LdStCode:$addsp,
+ LdStCode:$Vec, LdStCode:$Sign, i32imm:$fromWidth, Int32Regs:$addr,
+ i32imm:$offset),
+ "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t[$addr+$offset], {{$src1, $src2}};", []>;
+ def _v2_ari_64 : NVPTXInst<
+ (outs),
+ (ins regclass:$src1, regclass:$src2, LdStCode:$isVol, LdStCode:$addsp,
+ LdStCode:$Vec, LdStCode:$Sign, i32imm:$fromWidth, Int64Regs:$addr,
+ i32imm:$offset),
+ "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t[$addr+$offset], {{$src1, $src2}};", []>;
+ def _v2_asi : NVPTXInst<
+ (outs),
+ (ins regclass:$src1, regclass:$src2, LdStCode:$isVol, LdStCode:$addsp,
+ LdStCode:$Vec, LdStCode:$Sign, i32imm:$fromWidth, imem:$addr,
+ i32imm:$offset),
+ "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t[$addr+$offset], {{$src1, $src2}};", []>;
+ def _v4_avar : NVPTXInst<
+ (outs),
+ (ins regclass:$src1, regclass:$src2, regclass:$src3, regclass:$src4,
+ LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
+ i32imm:$fromWidth, imem:$addr),
+ "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t[$addr], {{$src1, $src2, $src3, $src4}};", []>;
+ def _v4_areg : NVPTXInst<
+ (outs),
+ (ins regclass:$src1, regclass:$src2, regclass:$src3, regclass:$src4,
+ LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
+ i32imm:$fromWidth, Int32Regs:$addr),
+ "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t[$addr], {{$src1, $src2, $src3, $src4}};", []>;
+ def _v4_areg_64 : NVPTXInst<
+ (outs),
+ (ins regclass:$src1, regclass:$src2, regclass:$src3, regclass:$src4,
+ LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
+ i32imm:$fromWidth, Int64Regs:$addr),
+ "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t[$addr], {{$src1, $src2, $src3, $src4}};", []>;
+ def _v4_ari : NVPTXInst<
+ (outs),
+ (ins regclass:$src1, regclass:$src2, regclass:$src3, regclass:$src4,
+ LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
+ i32imm:$fromWidth, Int32Regs:$addr, i32imm:$offset),
+ "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t[$addr+$offset], {{$src1, $src2, $src3, $src4}};", []>;
+ def _v4_ari_64 : NVPTXInst<
+ (outs),
+ (ins regclass:$src1, regclass:$src2, regclass:$src3, regclass:$src4,
+ LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
+ i32imm:$fromWidth, Int64Regs:$addr, i32imm:$offset),
+ "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}$fromWidth "
+ "\t[$addr+$offset], {{$src1, $src2, $src3, $src4}};", []>;
+ def _v4_asi : NVPTXInst<
+ (outs),
+ (ins regclass:$src1, regclass:$src2, regclass:$src3, regclass:$src4,
+ LdStCode:$isVol, LdStCode:$addsp, LdStCode:$Vec, LdStCode:$Sign,
+ i32imm:$fromWidth, imem:$addr, i32imm:$offset),
+ "st${isVol:volatile}${addsp:addsp}${Vec:vec}.${Sign:sign}"
+ "$fromWidth \t[$addr+$offset], {{$src1, $src2, $src3, $src4}};", []>;
+}
+
+let mayStore=1, hasSideEffects=0 in {
+ defm STV_i8 : ST_VEC<Int16Regs>;
+ defm STV_i16 : ST_VEC<Int16Regs>;
+ defm STV_i32 : ST_VEC<Int32Regs>;
+ defm STV_i64 : ST_VEC<Int64Regs>;
+ defm STV_f16 : ST_VEC<Float16Regs>;
+ defm STV_f16x2 : ST_VEC<Float16x2Regs>;
+ defm STV_f32 : ST_VEC<Float32Regs>;
+ defm STV_f64 : ST_VEC<Float64Regs>;
+}
+
+//---- Conversion ----
+
+class F_BITCONVERT<string SzStr, NVPTXRegClass regclassIn,
+ NVPTXRegClass regclassOut> :
+ NVPTXInst<(outs regclassOut:$d), (ins regclassIn:$a),
+ !strconcat("mov.b", !strconcat(SzStr, " \t$d, $a;")),
+ [(set regclassOut:$d, (bitconvert regclassIn:$a))]>;
+
+def BITCONVERT_16_I2F : F_BITCONVERT<"16", Int16Regs, Float16Regs>;
+def BITCONVERT_16_F2I : F_BITCONVERT<"16", Float16Regs, Int16Regs>;
+def BITCONVERT_32_I2F : F_BITCONVERT<"32", Int32Regs, Float32Regs>;
+def BITCONVERT_32_F2I : F_BITCONVERT<"32", Float32Regs, Int32Regs>;
+def BITCONVERT_64_I2F : F_BITCONVERT<"64", Int64Regs, Float64Regs>;
+def BITCONVERT_64_F2I : F_BITCONVERT<"64", Float64Regs, Int64Regs>;
+def BITCONVERT_32_I2F16x2 : F_BITCONVERT<"32", Int32Regs, Float16x2Regs>;
+def BITCONVERT_32_F16x22I : F_BITCONVERT<"32", Float16x2Regs, Int32Regs>;
+
+// NOTE: pred->fp are currently sub-optimal due to an issue in TableGen where
+// we cannot specify floating-point literals in isel patterns. Therefore, we
+// use an integer selp to select either 1 or 0 and then cvt to floating-point.
+
+// sint -> f16
+def : Pat<(f16 (sint_to_fp Int1Regs:$a)),
+ (CVT_f16_s32 (SELP_u32ii 1, 0, Int1Regs:$a), CvtRN)>;
+def : Pat<(f16 (sint_to_fp Int16Regs:$a)),
+ (CVT_f16_s16 Int16Regs:$a, CvtRN)>;
+def : Pat<(f16 (sint_to_fp Int32Regs:$a)),
+ (CVT_f16_s32 Int32Regs:$a, CvtRN)>;
+def : Pat<(f16 (sint_to_fp Int64Regs:$a)),
+ (CVT_f16_s64 Int64Regs:$a, CvtRN)>;
+
+// uint -> f16
+def : Pat<(f16 (uint_to_fp Int1Regs:$a)),
+ (CVT_f16_u32 (SELP_u32ii 1, 0, Int1Regs:$a), CvtRN)>;
+def : Pat<(f16 (uint_to_fp Int16Regs:$a)),
+ (CVT_f16_u16 Int16Regs:$a, CvtRN)>;
+def : Pat<(f16 (uint_to_fp Int32Regs:$a)),
+ (CVT_f16_u32 Int32Regs:$a, CvtRN)>;
+def : Pat<(f16 (uint_to_fp Int64Regs:$a)),
+ (CVT_f16_u64 Int64Regs:$a, CvtRN)>;
+
+// sint -> f32
+def : Pat<(f32 (sint_to_fp Int1Regs:$a)),
+ (CVT_f32_s32 (SELP_u32ii 1, 0, Int1Regs:$a), CvtRN)>;
+def : Pat<(f32 (sint_to_fp Int16Regs:$a)),
+ (CVT_f32_s16 Int16Regs:$a, CvtRN)>;
+def : Pat<(f32 (sint_to_fp Int32Regs:$a)),
+ (CVT_f32_s32 Int32Regs:$a, CvtRN)>;
+def : Pat<(f32 (sint_to_fp Int64Regs:$a)),
+ (CVT_f32_s64 Int64Regs:$a, CvtRN)>;
+
+// uint -> f32
+def : Pat<(f32 (uint_to_fp Int1Regs:$a)),
+ (CVT_f32_u32 (SELP_u32ii 1, 0, Int1Regs:$a), CvtRN)>;
+def : Pat<(f32 (uint_to_fp Int16Regs:$a)),
+ (CVT_f32_u16 Int16Regs:$a, CvtRN)>;
+def : Pat<(f32 (uint_to_fp Int32Regs:$a)),
+ (CVT_f32_u32 Int32Regs:$a, CvtRN)>;
+def : Pat<(f32 (uint_to_fp Int64Regs:$a)),
+ (CVT_f32_u64 Int64Regs:$a, CvtRN)>;
+
+// sint -> f64
+def : Pat<(f64 (sint_to_fp Int1Regs:$a)),
+ (CVT_f64_s32 (SELP_u32ii 1, 0, Int1Regs:$a), CvtRN)>;
+def : Pat<(f64 (sint_to_fp Int16Regs:$a)),
+ (CVT_f64_s16 Int16Regs:$a, CvtRN)>;
+def : Pat<(f64 (sint_to_fp Int32Regs:$a)),
+ (CVT_f64_s32 Int32Regs:$a, CvtRN)>;
+def : Pat<(f64 (sint_to_fp Int64Regs:$a)),
+ (CVT_f64_s64 Int64Regs:$a, CvtRN)>;
+
+// uint -> f64
+def : Pat<(f64 (uint_to_fp Int1Regs:$a)),
+ (CVT_f64_u32 (SELP_u32ii 1, 0, Int1Regs:$a), CvtRN)>;
+def : Pat<(f64 (uint_to_fp Int16Regs:$a)),
+ (CVT_f64_u16 Int16Regs:$a, CvtRN)>;
+def : Pat<(f64 (uint_to_fp Int32Regs:$a)),
+ (CVT_f64_u32 Int32Regs:$a, CvtRN)>;
+def : Pat<(f64 (uint_to_fp Int64Regs:$a)),
+ (CVT_f64_u64 Int64Regs:$a, CvtRN)>;
+
+
+// f16 -> sint
+def : Pat<(i1 (fp_to_sint Float16Regs:$a)),
+ (SETP_b16ri (BITCONVERT_16_F2I Float16Regs:$a), 0, CmpEQ)>;
+def : Pat<(i16 (fp_to_sint Float16Regs:$a)),
+ (CVT_s16_f16 Float16Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>;
+def : Pat<(i16 (fp_to_sint Float16Regs:$a)),
+ (CVT_s16_f16 Float16Regs:$a, CvtRZI)>;
+def : Pat<(i32 (fp_to_sint Float16Regs:$a)),
+ (CVT_s32_f16 Float16Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>;
+def : Pat<(i32 (fp_to_sint Float16Regs:$a)),
+ (CVT_s32_f16 Float16Regs:$a, CvtRZI)>;
+def : Pat<(i64 (fp_to_sint Float16Regs:$a)),
+ (CVT_s64_f16 Float16Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>;
+def : Pat<(i64 (fp_to_sint Float16Regs:$a)),
+ (CVT_s64_f16 Float16Regs:$a, CvtRZI)>;
+
+// f16 -> uint
+def : Pat<(i1 (fp_to_uint Float16Regs:$a)),
+ (SETP_b16ri (BITCONVERT_16_F2I Float16Regs:$a), 0, CmpEQ)>;
+def : Pat<(i16 (fp_to_uint Float16Regs:$a)),
+ (CVT_u16_f16 Float16Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>;
+def : Pat<(i16 (fp_to_uint Float16Regs:$a)),
+ (CVT_u16_f16 Float16Regs:$a, CvtRZI)>;
+def : Pat<(i32 (fp_to_uint Float16Regs:$a)),
+ (CVT_u32_f16 Float16Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>;
+def : Pat<(i32 (fp_to_uint Float16Regs:$a)),
+ (CVT_u32_f16 Float16Regs:$a, CvtRZI)>;
+def : Pat<(i64 (fp_to_uint Float16Regs:$a)),
+ (CVT_u64_f16 Float16Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>;
+def : Pat<(i64 (fp_to_uint Float16Regs:$a)),
+ (CVT_u64_f16 Float16Regs:$a, CvtRZI)>;
+
+// f32 -> sint
+def : Pat<(i1 (fp_to_sint Float32Regs:$a)),
+ (SETP_b32ri (BITCONVERT_32_F2I Float32Regs:$a), 0, CmpEQ)>;
+def : Pat<(i16 (fp_to_sint Float32Regs:$a)),
+ (CVT_s16_f32 Float32Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>;
+def : Pat<(i16 (fp_to_sint Float32Regs:$a)),
+ (CVT_s16_f32 Float32Regs:$a, CvtRZI)>;
+def : Pat<(i32 (fp_to_sint Float32Regs:$a)),
+ (CVT_s32_f32 Float32Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>;
+def : Pat<(i32 (fp_to_sint Float32Regs:$a)),
+ (CVT_s32_f32 Float32Regs:$a, CvtRZI)>;
+def : Pat<(i64 (fp_to_sint Float32Regs:$a)),
+ (CVT_s64_f32 Float32Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>;
+def : Pat<(i64 (fp_to_sint Float32Regs:$a)),
+ (CVT_s64_f32 Float32Regs:$a, CvtRZI)>;
+
+// f32 -> uint
+def : Pat<(i1 (fp_to_uint Float32Regs:$a)),
+ (SETP_b32ri (BITCONVERT_32_F2I Float32Regs:$a), 0, CmpEQ)>;
+def : Pat<(i16 (fp_to_uint Float32Regs:$a)),
+ (CVT_u16_f32 Float32Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>;
+def : Pat<(i16 (fp_to_uint Float32Regs:$a)),
+ (CVT_u16_f32 Float32Regs:$a, CvtRZI)>;
+def : Pat<(i32 (fp_to_uint Float32Regs:$a)),
+ (CVT_u32_f32 Float32Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>;
+def : Pat<(i32 (fp_to_uint Float32Regs:$a)),
+ (CVT_u32_f32 Float32Regs:$a, CvtRZI)>;
+def : Pat<(i64 (fp_to_uint Float32Regs:$a)),
+ (CVT_u64_f32 Float32Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>;
+def : Pat<(i64 (fp_to_uint Float32Regs:$a)),
+ (CVT_u64_f32 Float32Regs:$a, CvtRZI)>;
+
+// f64 -> sint
+def : Pat<(i1 (fp_to_sint Float64Regs:$a)),
+ (SETP_b64ri (BITCONVERT_64_F2I Float64Regs:$a), 0, CmpEQ)>;
+def : Pat<(i16 (fp_to_sint Float64Regs:$a)),
+ (CVT_s16_f64 Float64Regs:$a, CvtRZI)>;
+def : Pat<(i32 (fp_to_sint Float64Regs:$a)),
+ (CVT_s32_f64 Float64Regs:$a, CvtRZI)>;
+def : Pat<(i64 (fp_to_sint Float64Regs:$a)),
+ (CVT_s64_f64 Float64Regs:$a, CvtRZI)>;
+
+// f64 -> uint
+def : Pat<(i1 (fp_to_uint Float64Regs:$a)),
+ (SETP_b64ri (BITCONVERT_64_F2I Float64Regs:$a), 0, CmpEQ)>;
+def : Pat<(i16 (fp_to_uint Float64Regs:$a)),
+ (CVT_u16_f64 Float64Regs:$a, CvtRZI)>;
+def : Pat<(i32 (fp_to_uint Float64Regs:$a)),
+ (CVT_u32_f64 Float64Regs:$a, CvtRZI)>;
+def : Pat<(i64 (fp_to_uint Float64Regs:$a)),
+ (CVT_u64_f64 Float64Regs:$a, CvtRZI)>;
+
+// sext i1
+def : Pat<(i16 (sext Int1Regs:$a)),
+ (SELP_s16ii -1, 0, Int1Regs:$a)>;
+def : Pat<(i32 (sext Int1Regs:$a)),
+ (SELP_s32ii -1, 0, Int1Regs:$a)>;
+def : Pat<(i64 (sext Int1Regs:$a)),
+ (SELP_s64ii -1, 0, Int1Regs:$a)>;
+
+// zext i1
+def : Pat<(i16 (zext Int1Regs:$a)),
+ (SELP_u16ii 1, 0, Int1Regs:$a)>;
+def : Pat<(i32 (zext Int1Regs:$a)),
+ (SELP_u32ii 1, 0, Int1Regs:$a)>;
+def : Pat<(i64 (zext Int1Regs:$a)),
+ (SELP_u64ii 1, 0, Int1Regs:$a)>;
+
+// anyext i1
+def : Pat<(i16 (anyext Int1Regs:$a)),
+ (SELP_u16ii -1, 0, Int1Regs:$a)>;
+def : Pat<(i32 (anyext Int1Regs:$a)),
+ (SELP_u32ii -1, 0, Int1Regs:$a)>;
+def : Pat<(i64 (anyext Int1Regs:$a)),
+ (SELP_u64ii -1, 0, Int1Regs:$a)>;
+
+// sext i16
+def : Pat<(i32 (sext Int16Regs:$a)),
+ (CVT_s32_s16 Int16Regs:$a, CvtNONE)>;
+def : Pat<(i64 (sext Int16Regs:$a)),
+ (CVT_s64_s16 Int16Regs:$a, CvtNONE)>;
+
+// zext i16
+def : Pat<(i32 (zext Int16Regs:$a)),
+ (CVT_u32_u16 Int16Regs:$a, CvtNONE)>;
+def : Pat<(i64 (zext Int16Regs:$a)),
+ (CVT_u64_u16 Int16Regs:$a, CvtNONE)>;
+
+// anyext i16
+def : Pat<(i32 (anyext Int16Regs:$a)),
+ (CVT_u32_u16 Int16Regs:$a, CvtNONE)>;
+def : Pat<(i64 (anyext Int16Regs:$a)),
+ (CVT_u64_u16 Int16Regs:$a, CvtNONE)>;
+
+// sext i32
+def : Pat<(i64 (sext Int32Regs:$a)),
+ (CVT_s64_s32 Int32Regs:$a, CvtNONE)>;
+
+// zext i32
+def : Pat<(i64 (zext Int32Regs:$a)),
+ (CVT_u64_u32 Int32Regs:$a, CvtNONE)>;
+
+// anyext i32
+def : Pat<(i64 (anyext Int32Regs:$a)),
+ (CVT_u64_u32 Int32Regs:$a, CvtNONE)>;
+
+
+// truncate i64
+def : Pat<(i32 (trunc Int64Regs:$a)),
+ (CVT_u32_u64 Int64Regs:$a, CvtNONE)>;
+def : Pat<(i16 (trunc Int64Regs:$a)),
+ (CVT_u16_u64 Int64Regs:$a, CvtNONE)>;
+def : Pat<(i1 (trunc Int64Regs:$a)),
+ (SETP_b64ri (ANDb64ri Int64Regs:$a, 1), 1, CmpEQ)>;
+
+// truncate i32
+def : Pat<(i16 (trunc Int32Regs:$a)),
+ (CVT_u16_u32 Int32Regs:$a, CvtNONE)>;
+def : Pat<(i1 (trunc Int32Regs:$a)),
+ (SETP_b32ri (ANDb32ri Int32Regs:$a, 1), 1, CmpEQ)>;
+
+// truncate i16
+def : Pat<(i1 (trunc Int16Regs:$a)),
+ (SETP_b16ri (ANDb16ri Int16Regs:$a, 1), 1, CmpEQ)>;
+
+// sext_inreg
+def : Pat<(sext_inreg Int16Regs:$a, i8), (CVT_INREG_s16_s8 Int16Regs:$a)>;
+def : Pat<(sext_inreg Int32Regs:$a, i8), (CVT_INREG_s32_s8 Int32Regs:$a)>;
+def : Pat<(sext_inreg Int32Regs:$a, i16), (CVT_INREG_s32_s16 Int32Regs:$a)>;
+def : Pat<(sext_inreg Int64Regs:$a, i8), (CVT_INREG_s64_s8 Int64Regs:$a)>;
+def : Pat<(sext_inreg Int64Regs:$a, i16), (CVT_INREG_s64_s16 Int64Regs:$a)>;
+def : Pat<(sext_inreg Int64Regs:$a, i32), (CVT_INREG_s64_s32 Int64Regs:$a)>;
+
+
+// Select instructions with 32-bit predicates
+def : Pat<(select Int32Regs:$pred, Int16Regs:$a, Int16Regs:$b),
+ (SELP_b16rr Int16Regs:$a, Int16Regs:$b,
+ (SETP_b32ri (ANDb32ri Int32Regs:$pred, 1), 1, CmpEQ))>;
+def : Pat<(select Int32Regs:$pred, Int32Regs:$a, Int32Regs:$b),
+ (SELP_b32rr Int32Regs:$a, Int32Regs:$b,
+ (SETP_b32ri (ANDb32ri Int32Regs:$pred, 1), 1, CmpEQ))>;
+def : Pat<(select Int32Regs:$pred, Int64Regs:$a, Int64Regs:$b),
+ (SELP_b64rr Int64Regs:$a, Int64Regs:$b,
+ (SETP_b32ri (ANDb32ri Int32Regs:$pred, 1), 1, CmpEQ))>;
+def : Pat<(select Int32Regs:$pred, Float16Regs:$a, Float16Regs:$b),
+ (SELP_f16rr Float16Regs:$a, Float16Regs:$b,
+ (SETP_b32ri (ANDb32ri Int32Regs:$pred, 1), 1, CmpEQ))>;
+def : Pat<(select Int32Regs:$pred, Float32Regs:$a, Float32Regs:$b),
+ (SELP_f32rr Float32Regs:$a, Float32Regs:$b,
+ (SETP_b32ri (ANDb32ri Int32Regs:$pred, 1), 1, CmpEQ))>;
+def : Pat<(select Int32Regs:$pred, Float64Regs:$a, Float64Regs:$b),
+ (SELP_f64rr Float64Regs:$a, Float64Regs:$b,
+ (SETP_b32ri (ANDb32ri Int32Regs:$pred, 1), 1, CmpEQ))>;
+
+
+let hasSideEffects = 0 in {
+ // pack a set of smaller int registers to a larger int register
+ def V4I16toI64 : NVPTXInst<(outs Int64Regs:$d),
+ (ins Int16Regs:$s1, Int16Regs:$s2,
+ Int16Regs:$s3, Int16Regs:$s4),
+ "mov.b64 \t$d, {{$s1, $s2, $s3, $s4}};", []>;
+ def V2I16toI32 : NVPTXInst<(outs Int32Regs:$d),
+ (ins Int16Regs:$s1, Int16Regs:$s2),
+ "mov.b32 \t$d, {{$s1, $s2}};", []>;
+ def V2I32toI64 : NVPTXInst<(outs Int64Regs:$d),
+ (ins Int32Regs:$s1, Int32Regs:$s2),
+ "mov.b64 \t$d, {{$s1, $s2}};", []>;
+ def V2F32toF64 : NVPTXInst<(outs Float64Regs:$d),
+ (ins Float32Regs:$s1, Float32Regs:$s2),
+ "mov.b64 \t$d, {{$s1, $s2}};", []>;
+
+ // unpack a larger int register to a set of smaller int registers
+ def I64toV4I16 : NVPTXInst<(outs Int16Regs:$d1, Int16Regs:$d2,
+ Int16Regs:$d3, Int16Regs:$d4),
+ (ins Int64Regs:$s),
+ "mov.b64 \t{{$d1, $d2, $d3, $d4}}, $s;", []>;
+ def I32toV2I16 : NVPTXInst<(outs Int16Regs:$d1, Int16Regs:$d2),
+ (ins Int32Regs:$s),
+ "mov.b32 \t{{$d1, $d2}}, $s;", []>;
+ def I64toV2I32 : NVPTXInst<(outs Int32Regs:$d1, Int32Regs:$d2),
+ (ins Int64Regs:$s),
+ "mov.b64 \t{{$d1, $d2}}, $s;", []>;
+ def F64toV2F32 : NVPTXInst<(outs Float32Regs:$d1, Float32Regs:$d2),
+ (ins Float64Regs:$s),
+ "mov.b64 \t{{$d1, $d2}}, $s;", []>;
+
+}
+
+let hasSideEffects = 0 in {
+ // Extract element of f16x2 register. PTX does not provide any way
+ // to access elements of f16x2 vector directly, so we need to
+ // extract it using a temporary register.
+ def F16x2toF16_0 : NVPTXInst<(outs Float16Regs:$dst),
+ (ins Float16x2Regs:$src),
+ "{{ .reg .b16 \t%tmp_hi;\n\t"
+ " mov.b32 \t{$dst, %tmp_hi}, $src; }}",
+ [(set Float16Regs:$dst,
+ (extractelt (v2f16 Float16x2Regs:$src), 0))]>;
+ def F16x2toF16_1 : NVPTXInst<(outs Float16Regs:$dst),
+ (ins Float16x2Regs:$src),
+ "{{ .reg .b16 \t%tmp_lo;\n\t"
+ " mov.b32 \t{%tmp_lo, $dst}, $src; }}",
+ [(set Float16Regs:$dst,
+ (extractelt (v2f16 Float16x2Regs:$src), 1))]>;
+
+ // Coalesce two f16 registers into f16x2
+ def BuildF16x2 : NVPTXInst<(outs Float16x2Regs:$dst),
+ (ins Float16Regs:$a, Float16Regs:$b),
+ "mov.b32 \t$dst, {{$a, $b}};",
+ [(set Float16x2Regs:$dst,
+ (build_vector (f16 Float16Regs:$a), (f16 Float16Regs:$b)))]>;
+
+ // Directly initializing underlying the b32 register is one less SASS
+ // instruction than than vector-packing move.
+ def BuildF16x2i : NVPTXInst<(outs Float16x2Regs:$dst), (ins i32imm:$src),
+ "mov.b32 \t$dst, $src;",
+ []>;
+
+ // Split f16x2 into two f16 registers.
+ def SplitF16x2 : NVPTXInst<(outs Float16Regs:$lo, Float16Regs:$hi),
+ (ins Float16x2Regs:$src),
+ "mov.b32 \t{{$lo, $hi}}, $src;",
+ []>;
+ // Split an i32 into two f16
+ def SplitI32toF16x2 : NVPTXInst<(outs Float16Regs:$lo, Float16Regs:$hi),
+ (ins Int32Regs:$src),
+ "mov.b32 \t{{$lo, $hi}}, $src;",
+ []>;
+}
+
+// Count leading zeros
+let hasSideEffects = 0 in {
+ def CLZr32 : NVPTXInst<(outs Int32Regs:$d), (ins Int32Regs:$a),
+ "clz.b32 \t$d, $a;", []>;
+ def CLZr64 : NVPTXInst<(outs Int32Regs:$d), (ins Int64Regs:$a),
+ "clz.b64 \t$d, $a;", []>;
+}
+
+// 32-bit has a direct PTX instruction
+def : Pat<(ctlz Int32Regs:$a), (CLZr32 Int32Regs:$a)>;
+
+// The return type of the ctlz ISD node is the same as its input, but the PTX
+// ctz instruction always returns a 32-bit value. For ctlz.i64, convert the
+// ptx value to 64 bits to match the ISD node's semantics, unless we know we're
+// truncating back down to 32 bits.
+def : Pat<(ctlz Int64Regs:$a), (CVT_u64_u32 (CLZr64 Int64Regs:$a), CvtNONE)>;
+def : Pat<(i32 (trunc (ctlz Int64Regs:$a))), (CLZr64 Int64Regs:$a)>;
+
+// For 16-bit ctlz, we zero-extend to 32-bit, perform the count, then trunc the
+// result back to 16-bits if necessary. We also need to subtract 16 because
+// the high-order 16 zeros were counted.
+//
+// TODO: NVPTX has a mov.b32 b32reg, {imm, b16reg} instruction, which we could
+// use to save one SASS instruction (on sm_35 anyway):
+//
+// mov.b32 $tmp, {0xffff, $a}
+// ctlz.b32 $result, $tmp
+//
+// That is, instead of zero-extending the input to 32 bits, we'd "one-extend"
+// and then ctlz that value. This way we don't have to subtract 16 from the
+// result. Unfortunately today we don't have a way to generate
+// "mov b32reg, {b16imm, b16reg}", so we don't do this optimization.
+def : Pat<(ctlz Int16Regs:$a),
+ (SUBi16ri (CVT_u16_u32
+ (CLZr32 (CVT_u32_u16 Int16Regs:$a, CvtNONE)), CvtNONE), 16)>;
+def : Pat<(i32 (zext (ctlz Int16Regs:$a))),
+ (SUBi32ri (CLZr32 (CVT_u32_u16 Int16Regs:$a, CvtNONE)), 16)>;
+
+// Population count
+let hasSideEffects = 0 in {
+ def POPCr32 : NVPTXInst<(outs Int32Regs:$d), (ins Int32Regs:$a),
+ "popc.b32 \t$d, $a;", []>;
+ def POPCr64 : NVPTXInst<(outs Int32Regs:$d), (ins Int64Regs:$a),
+ "popc.b64 \t$d, $a;", []>;
+}
+
+// 32-bit has a direct PTX instruction
+def : Pat<(ctpop Int32Regs:$a), (POPCr32 Int32Regs:$a)>;
+
+// For 64-bit, the result in PTX is actually 32-bit so we zero-extend to 64-bit
+// to match the LLVM semantics. Just as with ctlz.i64, we provide a second
+// pattern that avoids the type conversion if we're truncating the result to
+// i32 anyway.
+def : Pat<(ctpop Int64Regs:$a), (CVT_u64_u32 (POPCr64 Int64Regs:$a), CvtNONE)>;
+def : Pat<(i32 (trunc (ctpop Int64Regs:$a))), (POPCr64 Int64Regs:$a)>;
+
+// For 16-bit, we zero-extend to 32-bit, then trunc the result back to 16-bits.
+// If we know that we're storing into an i32, we can avoid the final trunc.
+def : Pat<(ctpop Int16Regs:$a),
+ (CVT_u16_u32 (POPCr32 (CVT_u32_u16 Int16Regs:$a, CvtNONE)), CvtNONE)>;
+def : Pat<(i32 (zext (ctpop Int16Regs:$a))),
+ (POPCr32 (CVT_u32_u16 Int16Regs:$a, CvtNONE))>;
+
+// fpround f32 -> f16
+def : Pat<(f16 (fpround Float32Regs:$a)),
+ (CVT_f16_f32 Float32Regs:$a, CvtRN_FTZ)>, Requires<[doF32FTZ]>;
+def : Pat<(f16 (fpround Float32Regs:$a)),
+ (CVT_f16_f32 Float32Regs:$a, CvtRN)>;
+
+// fpround f64 -> f16
+def : Pat<(f16 (fpround Float64Regs:$a)),
+ (CVT_f16_f64 Float64Regs:$a, CvtRN_FTZ)>, Requires<[doF32FTZ]>;
+def : Pat<(f16 (fpround Float64Regs:$a)),
+ (CVT_f16_f64 Float64Regs:$a, CvtRN)>;
+
+// fpround f64 -> f32
+def : Pat<(f32 (fpround Float64Regs:$a)),
+ (CVT_f32_f64 Float64Regs:$a, CvtRN_FTZ)>, Requires<[doF32FTZ]>;
+def : Pat<(f32 (fpround Float64Regs:$a)),
+ (CVT_f32_f64 Float64Regs:$a, CvtRN)>;
+
+// fpextend f16 -> f32
+def : Pat<(f32 (fpextend Float16Regs:$a)),
+ (CVT_f32_f16 Float16Regs:$a, CvtNONE_FTZ)>, Requires<[doF32FTZ]>;
+def : Pat<(f32 (fpextend Float16Regs:$a)),
+ (CVT_f32_f16 Float16Regs:$a, CvtNONE)>;
+
+// fpextend f16 -> f64
+def : Pat<(f64 (fpextend Float16Regs:$a)),
+ (CVT_f64_f16 Float16Regs:$a, CvtNONE_FTZ)>, Requires<[doF32FTZ]>;
+def : Pat<(f64 (fpextend Float16Regs:$a)),
+ (CVT_f64_f16 Float16Regs:$a, CvtNONE)>;
+
+// fpextend f32 -> f64
+def : Pat<(f64 (fpextend Float32Regs:$a)),
+ (CVT_f64_f32 Float32Regs:$a, CvtNONE_FTZ)>, Requires<[doF32FTZ]>;
+def : Pat<(f64 (fpextend Float32Regs:$a)),
+ (CVT_f64_f32 Float32Regs:$a, CvtNONE)>;
+
+def retflag : SDNode<"NVPTXISD::RET_FLAG", SDTNone,
+ [SDNPHasChain, SDNPOptInGlue]>;
+
+// fceil, ffloor, fround, ftrunc.
+
+def : Pat<(fceil Float16Regs:$a),
+ (CVT_f16_f16 Float16Regs:$a, CvtRPI_FTZ)>, Requires<[doF32FTZ]>;
+def : Pat<(fceil Float16Regs:$a),
+ (CVT_f16_f16 Float16Regs:$a, CvtRPI)>, Requires<[doNoF32FTZ]>;
+def : Pat<(fceil Float32Regs:$a),
+ (CVT_f32_f32 Float32Regs:$a, CvtRPI_FTZ)>, Requires<[doF32FTZ]>;
+def : Pat<(fceil Float32Regs:$a),
+ (CVT_f32_f32 Float32Regs:$a, CvtRPI)>, Requires<[doNoF32FTZ]>;
+def : Pat<(fceil Float64Regs:$a),
+ (CVT_f64_f64 Float64Regs:$a, CvtRPI)>;
+
+def : Pat<(ffloor Float16Regs:$a),
+ (CVT_f16_f16 Float16Regs:$a, CvtRMI_FTZ)>, Requires<[doF32FTZ]>;
+def : Pat<(ffloor Float16Regs:$a),
+ (CVT_f16_f16 Float16Regs:$a, CvtRMI)>, Requires<[doNoF32FTZ]>;
+def : Pat<(ffloor Float32Regs:$a),
+ (CVT_f32_f32 Float32Regs:$a, CvtRMI_FTZ)>, Requires<[doF32FTZ]>;
+def : Pat<(ffloor Float32Regs:$a),
+ (CVT_f32_f32 Float32Regs:$a, CvtRMI)>, Requires<[doNoF32FTZ]>;
+def : Pat<(ffloor Float64Regs:$a),
+ (CVT_f64_f64 Float64Regs:$a, CvtRMI)>;
+
+def : Pat<(fround Float16Regs:$a),
+ (CVT_f16_f16 Float16Regs:$a, CvtRNI_FTZ)>, Requires<[doF32FTZ]>;
+def : Pat<(f16 (fround Float16Regs:$a)),
+ (CVT_f16_f16 Float16Regs:$a, CvtRNI)>, Requires<[doNoF32FTZ]>;
+def : Pat<(fround Float32Regs:$a),
+ (CVT_f32_f32 Float32Regs:$a, CvtRNI_FTZ)>, Requires<[doF32FTZ]>;
+def : Pat<(f32 (fround Float32Regs:$a)),
+ (CVT_f32_f32 Float32Regs:$a, CvtRNI)>, Requires<[doNoF32FTZ]>;
+def : Pat<(f64 (fround Float64Regs:$a)),
+ (CVT_f64_f64 Float64Regs:$a, CvtRNI)>;
+
+def : Pat<(ftrunc Float16Regs:$a),
+ (CVT_f16_f16 Float16Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>;
+def : Pat<(ftrunc Float16Regs:$a),
+ (CVT_f16_f16 Float16Regs:$a, CvtRZI)>, Requires<[doNoF32FTZ]>;
+def : Pat<(ftrunc Float32Regs:$a),
+ (CVT_f32_f32 Float32Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>;
+def : Pat<(ftrunc Float32Regs:$a),
+ (CVT_f32_f32 Float32Regs:$a, CvtRZI)>, Requires<[doNoF32FTZ]>;
+def : Pat<(ftrunc Float64Regs:$a),
+ (CVT_f64_f64 Float64Regs:$a, CvtRZI)>;
+
+// nearbyint and rint are implemented as rounding to nearest even. This isn't
+// strictly correct, because it causes us to ignore the rounding mode. But it
+// matches what CUDA's "libm" does.
+
+def : Pat<(fnearbyint Float16Regs:$a),
+ (CVT_f16_f16 Float16Regs:$a, CvtRNI_FTZ)>, Requires<[doF32FTZ]>;
+def : Pat<(fnearbyint Float16Regs:$a),
+ (CVT_f16_f16 Float16Regs:$a, CvtRNI)>, Requires<[doNoF32FTZ]>;
+def : Pat<(fnearbyint Float32Regs:$a),
+ (CVT_f32_f32 Float32Regs:$a, CvtRNI_FTZ)>, Requires<[doF32FTZ]>;
+def : Pat<(fnearbyint Float32Regs:$a),
+ (CVT_f32_f32 Float32Regs:$a, CvtRNI)>, Requires<[doNoF32FTZ]>;
+def : Pat<(fnearbyint Float64Regs:$a),
+ (CVT_f64_f64 Float64Regs:$a, CvtRNI)>;
+
+def : Pat<(frint Float16Regs:$a),
+ (CVT_f16_f16 Float16Regs:$a, CvtRNI_FTZ)>, Requires<[doF32FTZ]>;
+def : Pat<(frint Float16Regs:$a),
+ (CVT_f16_f16 Float16Regs:$a, CvtRNI)>, Requires<[doNoF32FTZ]>;
+def : Pat<(frint Float32Regs:$a),
+ (CVT_f32_f32 Float32Regs:$a, CvtRNI_FTZ)>, Requires<[doF32FTZ]>;
+def : Pat<(frint Float32Regs:$a),
+ (CVT_f32_f32 Float32Regs:$a, CvtRNI)>, Requires<[doNoF32FTZ]>;
+def : Pat<(frint Float64Regs:$a),
+ (CVT_f64_f64 Float64Regs:$a, CvtRNI)>;
+
+
+//-----------------------------------
+// Control-flow
+//-----------------------------------
+
+let isTerminator=1 in {
+ let isReturn=1, isBarrier=1 in
+ def Return : NVPTXInst<(outs), (ins), "ret;", [(retflag)]>;
+
+ let isBranch=1 in
+ def CBranch : NVPTXInst<(outs), (ins Int1Regs:$a, brtarget:$target),
+ "@$a bra \t$target;",
+ [(brcond Int1Regs:$a, bb:$target)]>;
+ let isBranch=1 in
+ def CBranchOther : NVPTXInst<(outs), (ins Int1Regs:$a, brtarget:$target),
+ "@!$a bra \t$target;", []>;
+
+ let isBranch=1, isBarrier=1 in
+ def GOTO : NVPTXInst<(outs), (ins brtarget:$target),
+ "bra.uni \t$target;", [(br bb:$target)]>;
+}
+
+def : Pat<(brcond Int32Regs:$a, bb:$target),
+ (CBranch (SETP_u32ri Int32Regs:$a, 0, CmpNE), bb:$target)>;
+
+// SelectionDAGBuilder::visitSWitchCase() will invert the condition of a
+// conditional branch if the target block is the next block so that the code
+// can fall through to the target block. The invertion is done by 'xor
+// condition, 1', which will be translated to (setne condition, -1). Since ptx
+// supports '@!pred bra target', we should use it.
+def : Pat<(brcond (i1 (setne Int1Regs:$a, -1)), bb:$target),
+ (CBranchOther Int1Regs:$a, bb:$target)>;
+
+// Call
+def SDT_NVPTXCallSeqStart : SDCallSeqStart<[SDTCisVT<0, i32>,
+ SDTCisVT<1, i32>]>;
+def SDT_NVPTXCallSeqEnd : SDCallSeqEnd<[SDTCisVT<0, i32>, SDTCisVT<1, i32>]>;
+
+def callseq_start : SDNode<"ISD::CALLSEQ_START", SDT_NVPTXCallSeqStart,
+ [SDNPHasChain, SDNPOutGlue, SDNPSideEffect]>;
+def callseq_end : SDNode<"ISD::CALLSEQ_END", SDT_NVPTXCallSeqEnd,
+ [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue,
+ SDNPSideEffect]>;
+
+def SDT_NVPTXCall : SDTypeProfile<0, 1, [SDTCisVT<0, i32>]>;
+def call : SDNode<"NVPTXISD::CALL", SDT_NVPTXCall,
+ [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>;
+def calltarget : Operand<i32>;
+let isCall=1 in {
+ def CALL : NVPTXInst<(outs), (ins calltarget:$dst), "call \t$dst, (1);", []>;
+}
+
+def : Pat<(call tglobaladdr:$dst), (CALL tglobaladdr:$dst)>;
+def : Pat<(call texternalsym:$dst), (CALL texternalsym:$dst)>;
+
+// Pseudo instructions.
+class Pseudo<dag outs, dag ins, string asmstr, list<dag> pattern>
+ : NVPTXInst<outs, ins, asmstr, pattern>;
+
+def Callseq_Start :
+ NVPTXInst<(outs), (ins i32imm:$amt1, i32imm:$amt2),
+ "\\{ // callseq $amt1, $amt2\n"
+ "\t.reg .b32 temp_param_reg;",
+ [(callseq_start timm:$amt1, timm:$amt2)]>;
+def Callseq_End :
+ NVPTXInst<(outs), (ins i32imm:$amt1, i32imm:$amt2),
+ "\\} // callseq $amt1",
+ [(callseq_end timm:$amt1, timm:$amt2)]>;
+
+// trap instruction
+def trapinst : NVPTXInst<(outs), (ins), "trap;", [(trap)]>;
+
+// Call prototype wrapper
+def SDTCallPrototype : SDTypeProfile<0, 1, [SDTCisInt<0>]>;
+def CallPrototype :
+ SDNode<"NVPTXISD::CallPrototype", SDTCallPrototype,
+ [SDNPHasChain, SDNPOutGlue, SDNPInGlue, SDNPSideEffect]>;
+def ProtoIdent : Operand<i32> {
+ let PrintMethod = "printProtoIdent";
+}
+def CALL_PROTOTYPE :
+ NVPTXInst<(outs), (ins ProtoIdent:$ident),
+ "$ident", [(CallPrototype (i32 texternalsym:$ident))]>;
+
+
+include "NVPTXIntrinsics.td"
+
+
+//-----------------------------------
+// Notes
+//-----------------------------------
+// BSWAP is currently expanded. The following is a more efficient
+// - for < sm_20, use vector scalar mov, as tesla support native 16-bit register
+// - for sm_20, use pmpt (use vector scalar mov to get the pack and
+// unpack). sm_20 supports native 32-bit register, but not native 16-bit
+// register.