#include "Utils/AMDKernelCodeTUtils.h"
#include "llvm/ADT/APFloat.h"
#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallBitVector.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/ADT/Twine.h"
MCContext *Ctx;
+ typedef std::unique_ptr<AMDGPUOperand> Ptr;
+
enum ImmTy {
ImmTyNone,
ImmTyGDS,
}
}
- static std::unique_ptr<AMDGPUOperand> CreateImm(int64_t Val, SMLoc Loc,
- enum ImmTy Type = ImmTyNone,
- bool IsFPImm = false) {
+ static AMDGPUOperand::Ptr CreateImm(int64_t Val, SMLoc Loc,
+ enum ImmTy Type = ImmTyNone,
+ bool IsFPImm = false) {
auto Op = llvm::make_unique<AMDGPUOperand>(Immediate);
Op->Imm.Val = Val;
Op->Imm.IsFPImm = IsFPImm;
return Op;
}
- static std::unique_ptr<AMDGPUOperand> CreateToken(StringRef Str, SMLoc Loc,
- bool HasExplicitEncodingSize = true) {
+ static AMDGPUOperand::Ptr CreateToken(StringRef Str, SMLoc Loc,
+ bool HasExplicitEncodingSize = true) {
auto Res = llvm::make_unique<AMDGPUOperand>(Token);
Res->Tok.Data = Str.data();
Res->Tok.Length = Str.size();
return Res;
}
- static std::unique_ptr<AMDGPUOperand> CreateReg(unsigned RegNo, SMLoc S,
- SMLoc E,
- const MCRegisterInfo *TRI,
- const MCSubtargetInfo *STI,
- bool ForceVOP3) {
+ static AMDGPUOperand::Ptr CreateReg(unsigned RegNo, SMLoc S,
+ SMLoc E,
+ const MCRegisterInfo *TRI,
+ const MCSubtargetInfo *STI,
+ bool ForceVOP3) {
auto Op = llvm::make_unique<AMDGPUOperand>(Register);
Op->Reg.RegNo = RegNo;
Op->Reg.TRI = TRI;
return Op;
}
- static std::unique_ptr<AMDGPUOperand> CreateExpr(const class MCExpr *Expr, SMLoc S) {
+ static AMDGPUOperand::Ptr CreateExpr(const class MCExpr *Expr, SMLoc S) {
auto Op = llvm::make_unique<AMDGPUOperand>(Expression);
Op->Expr = Expr;
Op->StartLoc = S;
bool parseHwregOperand(int64_t &HwRegCode, int64_t &Offset, int64_t &Width, bool &IsIdentifier);
OperandMatchResultTy parseHwreg(OperandVector &Operands);
OperandMatchResultTy parseSOppBrTarget(OperandVector &Operands);
+ AMDGPUOperand::Ptr defaultHwreg() const;
- void cvtFlat(MCInst &Inst, const OperandVector &Operands);
- void cvtFlatAtomic(MCInst &Inst, const OperandVector &Operands);
void cvtMubuf(MCInst &Inst, const OperandVector &Operands);
+ AMDGPUOperand::Ptr defaultMubufOffset() const;
+ AMDGPUOperand::Ptr defaultGLC() const;
+ AMDGPUOperand::Ptr defaultSLC() const;
+ AMDGPUOperand::Ptr defaultTFE() const;
+
OperandMatchResultTy parseOModSI(OperandVector &Operands) { return parseAMDGPUOperand(Operands, "omod"); }
OperandMatchResultTy parseClampSI(OperandVector &Operands) { return parseAMDGPUOperand(Operands, "clamp"); }
OperandMatchResultTy parseSMRDOffset(OperandVector &Operands) { return parseAMDGPUOperand(Operands, "smrd_offset"); }
OperandMatchResultTy parseDA(OperandVector &Operands);
OperandMatchResultTy parseR128(OperandVector &Operands);
OperandMatchResultTy parseLWE(OperandVector &Operands);
-
+ AMDGPUOperand::Ptr defaultDMask() const;
+ AMDGPUOperand::Ptr defaultUNorm() const;
+ AMDGPUOperand::Ptr defaultDA() const;
+ AMDGPUOperand::Ptr defaultR128() const;
+ AMDGPUOperand::Ptr defaultLWE() const;
+ AMDGPUOperand::Ptr defaultSMRDOffset() const;
+ AMDGPUOperand::Ptr defaultSMRDLiteralOffset() const;
+
+ AMDGPUOperand::Ptr defaultClampSI() const;
+ AMDGPUOperand::Ptr defaultOModSI() const;
+
OperandMatchResultTy parseOModOperand(OperandVector &Operands);
void cvtId(MCInst &Inst, const OperandVector &Operands);
void cvtVOP3_2_mod(MCInst &Inst, const OperandVector &Operands);
- void cvtVOP3_2_nomod(MCInst &Inst, const OperandVector &Operands);
- void cvtVOP3_only(MCInst &Inst, const OperandVector &Operands);
void cvtVOP3(MCInst &Inst, const OperandVector &Operands);
void cvtMIMG(MCInst &Inst, const OperandVector &Operands);
void cvtMIMGAtomic(MCInst &Inst, const OperandVector &Operands);
OperandMatchResultTy parseDPPCtrlOps(OperandVector &Operands, bool AddDefault);
- void cvtDPP_mod(MCInst &Inst, const OperandVector &Operands);
- void cvtDPP_nomod(MCInst &Inst, const OperandVector &Operands);
- void cvtDPP(MCInst &Inst, const OperandVector &Operands, bool HasMods);
+ AMDGPUOperand::Ptr defaultRowMask() const;
+ AMDGPUOperand::Ptr defaultBankMask() const;
+ AMDGPUOperand::Ptr defaultBoundCtrl() const;
+ void cvtDPP(MCInst &Inst, const OperandVector &Operands);
OperandMatchResultTy parseSDWASel(OperandVector &Operands);
OperandMatchResultTy parseSDWADstUnused(OperandVector &Operands);
+ AMDGPUOperand::Ptr defaultSDWASel() const;
+ AMDGPUOperand::Ptr defaultSDWADstUnused() const;
};
struct OptionalOperand {
return true;
}
-static bool operandsHaveModifiers(const OperandVector &Operands) {
-
- for (unsigned i = 0, e = Operands.size(); i != e; ++i) {
- const AMDGPUOperand &Op = ((AMDGPUOperand&)*Operands[i]);
- if (Op.isRegKind() && Op.hasModifiers())
- return true;
- if (Op.isImm() && Op.hasModifiers())
- return true;
- if (Op.isImm() && (Op.getImmTy() == AMDGPUOperand::ImmTyOModSI ||
- Op.getImmTy() == AMDGPUOperand::ImmTyClampSI))
- return true;
- }
- return false;
-}
-
AMDGPUAsmParser::OperandMatchResultTy
AMDGPUAsmParser::parseOperand(OperandVector &Operands, StringRef Mnemonic) {
return isImmTy(ImmTyHwreg);
}
+AMDGPUOperand::Ptr AMDGPUAsmParser::defaultHwreg() const {
+ return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyHwreg);
+}
+
//===----------------------------------------------------------------------===//
// sopp branch targets
//===----------------------------------------------------------------------===//
// flat
//===----------------------------------------------------------------------===//
-void AMDGPUAsmParser::cvtFlat(MCInst &Inst,
- const OperandVector &Operands) {
- OptionalImmIndexMap OptionalIdx;
-
- for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
- AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
-
- // Add the register arguments
- if (Op.isReg()) {
- Op.addRegOperands(Inst, 1);
- continue;
- }
+//===----------------------------------------------------------------------===//
+// mubuf
+//===----------------------------------------------------------------------===//
- OptionalIdx[Op.getImmTy()] = i;
- }
- addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC);
- addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
- addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
+bool AMDGPUOperand::isMubufOffset() const {
+ return isImmTy(ImmTyOffset) && isUInt<12>(getImm());
}
+AMDGPUOperand::Ptr AMDGPUAsmParser::defaultMubufOffset() const {
+ return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyOffset);
+}
-void AMDGPUAsmParser::cvtFlatAtomic(MCInst &Inst,
- const OperandVector &Operands) {
- OptionalImmIndexMap OptionalIdx;
-
- for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
- AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
-
- // Add the register arguments
- if (Op.isReg()) {
- Op.addRegOperands(Inst, 1);
- continue;
- }
-
- // Handle 'glc' token for flat atomics.
- if (Op.isToken()) {
- continue;
- }
-
- // Handle optional arguments
- OptionalIdx[Op.getImmTy()] = i;
- }
- addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
- addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
+AMDGPUOperand::Ptr AMDGPUAsmParser::defaultGLC() const {
+ return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyGLC);
}
-//===----------------------------------------------------------------------===//
-// mubuf
-//===----------------------------------------------------------------------===//
+AMDGPUOperand::Ptr AMDGPUAsmParser::defaultSLC() const {
+ return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTySLC);
+}
-bool AMDGPUOperand::isMubufOffset() const {
- return isImmTy(ImmTyOffset) && isUInt<12>(getImm());
+AMDGPUOperand::Ptr AMDGPUAsmParser::defaultTFE() const {
+ return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyTFE);
}
void AMDGPUAsmParser::cvtMubuf(MCInst &Inst,
return parseNamedBit("lwe", Operands, AMDGPUOperand::ImmTyLWE);
}
+AMDGPUOperand::Ptr AMDGPUAsmParser::defaultDMask() const {
+ return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyDMask);
+}
+
+AMDGPUOperand::Ptr AMDGPUAsmParser::defaultUNorm() const {
+ return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyUNorm);
+}
+
+AMDGPUOperand::Ptr AMDGPUAsmParser::defaultDA() const {
+ return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyDA);
+}
+
+AMDGPUOperand::Ptr AMDGPUAsmParser::defaultR128() const {
+ return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyR128);
+}
+
+AMDGPUOperand::Ptr AMDGPUAsmParser::defaultLWE() const {
+ return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyLWE);
+}
+
//===----------------------------------------------------------------------===//
// smrd
//===----------------------------------------------------------------------===//
return isImm() && !isUInt<8>(getImm()) && isUInt<32>(getImm());
}
+AMDGPUOperand::Ptr AMDGPUAsmParser::defaultSMRDOffset() const {
+ return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyOffset);
+}
+
+AMDGPUOperand::Ptr AMDGPUAsmParser::defaultSMRDLiteralOffset() const {
+ return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyOffset);
+}
+
//===----------------------------------------------------------------------===//
// vop3
//===----------------------------------------------------------------------===//
}
}
+AMDGPUOperand::Ptr AMDGPUAsmParser::defaultClampSI() const {
+ return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyClampSI);
+}
+
+AMDGPUOperand::Ptr AMDGPUAsmParser::defaultOModSI() const {
+ return AMDGPUOperand::CreateImm(1, SMLoc(), AMDGPUOperand::ImmTyOModSI);
+}
+
void AMDGPUAsmParser::cvtId(MCInst &Inst, const OperandVector &Operands) {
unsigned I = 1;
const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
}
}
-void AMDGPUAsmParser::cvtVOP3_2_nomod(MCInst &Inst, const OperandVector &Operands) {
- if (operandsHaveModifiers(Operands)) {
- cvtVOP3(Inst, Operands);
- } else {
- cvtId(Inst, Operands);
- }
-}
-
-void AMDGPUAsmParser::cvtVOP3_only(MCInst &Inst, const OperandVector &Operands) {
- cvtVOP3(Inst, Operands);
-}
-
void AMDGPUAsmParser::cvtVOP3(MCInst &Inst, const OperandVector &Operands) {
OptionalImmIndexMap OptionalIdx;
unsigned I = 1;
return MatchOperand_Success;
}
-void AMDGPUAsmParser::cvtDPP_mod(MCInst &Inst, const OperandVector &Operands) {
- cvtDPP(Inst, Operands, true);
+AMDGPUOperand::Ptr AMDGPUAsmParser::defaultRowMask() const {
+ return AMDGPUOperand::CreateImm(0xf, SMLoc(), AMDGPUOperand::ImmTyDppRowMask);
+}
+
+AMDGPUOperand::Ptr AMDGPUAsmParser::defaultBankMask() const {
+ return AMDGPUOperand::CreateImm(0xf, SMLoc(), AMDGPUOperand::ImmTyDppBankMask);
}
-void AMDGPUAsmParser::cvtDPP_nomod(MCInst &Inst, const OperandVector &Operands) {
- cvtDPP(Inst, Operands, false);
+AMDGPUOperand::Ptr AMDGPUAsmParser::defaultBoundCtrl() const {
+ return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyDppBoundCtrl);
}
-void AMDGPUAsmParser::cvtDPP(MCInst &Inst, const OperandVector &Operands,
- bool HasMods) {
+void AMDGPUAsmParser::cvtDPP(MCInst &Inst, const OperandVector &Operands) {
OptionalImmIndexMap OptionalIdx;
unsigned I = 1;
for (unsigned E = Operands.size(); I != E; ++I) {
AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
// Add the register arguments
- if (!HasMods && Op.isReg()) {
- Op.addRegOperands(Inst, 1);
- } else if (HasMods && Op.isRegOrImmWithInputMods()) {
+ if (Op.isRegOrImmWithInputMods()) {
+ // We convert only instructions with modifiers
Op.addRegOrImmWithInputModsOperands(Inst, 2);
} else if (Op.isDPPCtrl()) {
Op.addImmOperands(Inst, 1);
return MatchOperand_Success;
}
+AMDGPUOperand::Ptr AMDGPUAsmParser::defaultSDWASel() const {
+ return AMDGPUOperand::CreateImm(6, SMLoc(), AMDGPUOperand::ImmTySdwaSel);
+}
+
+AMDGPUOperand::Ptr AMDGPUAsmParser::defaultSDWADstUnused() const {
+ return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTySdwaDstUnused);
+}
+
/// Force static initialization.
extern "C" void LLVMInitializeAMDGPUAsmParser() {
/// Is this operand optional and not always required.
bool IsOptional;
+ /// DefaultMethod - The name of the method that returns the default operand
+ /// for optional operand
+ std::string DefaultMethod;
+
public:
/// isRegisterClass() - Check if this is a register class.
bool isRegisterClass() const {
RecordKeeper &getRecords() const {
return Records;
}
+
+ bool hasOptionalOperands() const {
+ return std::find_if(Classes.begin(), Classes.end(),
+ [](const ClassInfo& Class){ return Class.IsOptional; })
+ != Classes.end();
+ }
};
} // end anonymous namespace
Entry->ParserMethod = "";
Entry->DiagnosticType = "";
Entry->IsOptional = false;
+ Entry->DefaultMethod = "<invalid>";
}
return Entry;
// FIXME: diagnostic type.
CI->DiagnosticType = "";
CI->IsOptional = false;
+ CI->DefaultMethod = ""; // unused
RegisterSetClasses.insert(std::make_pair(RS, CI));
++Index;
}
if (BitInit *BI = dyn_cast<BitInit>(IsOptional))
CI->IsOptional = BI->getValue();
+ // Get or construct the default method name.
+ Init *DMName = Rec->getValueInit("DefaultMethod");
+ if (StringInit *SI = dyn_cast<StringInit>(DMName)) {
+ CI->DefaultMethod = SI->getValue();
+ } else {
+ assert(isa<UnsetInit>(DMName) && "Unexpected DefaultMethod field!");
+ CI->DefaultMethod = "default" + CI->ClassName + "Operands";
+ }
+
++Index;
}
}
static void emitConvertFuncs(CodeGenTarget &Target, StringRef ClassName,
std::vector<std::unique_ptr<MatchableInfo>> &Infos,
- bool HasMnemonicFirst, raw_ostream &OS) {
+ bool HasMnemonicFirst, bool HasOptionalOperands,
+ raw_ostream &OS) {
SmallSetVector<std::string, 16> OperandConversionKinds;
SmallSetVector<std::string, 16> InstructionConversionKinds;
std::vector<std::vector<uint8_t> > ConversionTable;
std::string ConvertFnBody;
raw_string_ostream CvtOS(ConvertFnBody);
// Start the unified conversion function.
- CvtOS << "void " << Target.getName() << ClassName << "::\n"
- << "convertToMCInst(unsigned Kind, MCInst &Inst, "
- << "unsigned Opcode,\n"
- << " const OperandVector"
- << " &Operands) {\n"
- << " assert(Kind < CVT_NUM_SIGNATURES && \"Invalid signature!\");\n"
- << " const uint8_t *Converter = ConversionTable[Kind];\n"
- << " Inst.setOpcode(Opcode);\n"
- << " for (const uint8_t *p = Converter; *p; p+= 2) {\n"
- << " switch (*p) {\n"
- << " default: llvm_unreachable(\"invalid conversion entry!\");\n"
- << " case CVT_Reg:\n"
- << " static_cast<" << TargetOperandClass
- << "&>(*Operands[*(p + 1)]).addRegOperands(Inst, 1);\n"
- << " break;\n"
- << " case CVT_Tied:\n"
- << " Inst.addOperand(Inst.getOperand(*(p + 1)));\n"
- << " break;\n";
+ if (HasOptionalOperands) {
+ CvtOS << "void " << Target.getName() << ClassName << "::\n"
+ << "convertToMCInst(unsigned Kind, MCInst &Inst, "
+ << "unsigned Opcode,\n"
+ << " const OperandVector &Operands,\n"
+ << " const SmallBitVector &OptionalOperandsMask) {\n";
+ } else {
+ CvtOS << "void " << Target.getName() << ClassName << "::\n"
+ << "convertToMCInst(unsigned Kind, MCInst &Inst, "
+ << "unsigned Opcode,\n"
+ << " const OperandVector &Operands) {\n";
+ }
+ CvtOS << " assert(Kind < CVT_NUM_SIGNATURES && \"Invalid signature!\");\n";
+ CvtOS << " const uint8_t *Converter = ConversionTable[Kind];\n";
+ if (HasOptionalOperands) {
+ CvtOS << " unsigned NumDefaults = 0;\n";
+ }
+ CvtOS << " unsigned OpIdx;\n";
+ CvtOS << " Inst.setOpcode(Opcode);\n";
+ CvtOS << " for (const uint8_t *p = Converter; *p; p+= 2) {\n";
+ if (HasOptionalOperands) {
+ CvtOS << " OpIdx = *(p + 1) - NumDefaults;\n";
+ } else {
+ CvtOS << " OpIdx = *(p + 1);\n";
+ }
+ CvtOS << " switch (*p) {\n";
+ CvtOS << " default: llvm_unreachable(\"invalid conversion entry!\");\n";
+ CvtOS << " case CVT_Reg:\n";
+ CvtOS << " static_cast<" << TargetOperandClass
+ << "&>(*Operands[OpIdx]).addRegOperands(Inst, 1);\n";
+ CvtOS << " break;\n";
+ CvtOS << " case CVT_Tied:\n";
+ CvtOS << " Inst.addOperand(Inst.getOperand(OpIdx));\n";
+ CvtOS << " break;\n";
std::string OperandFnBody;
raw_string_ostream OpOS(OperandFnBody);
// the index of its entry in the vector).
std::string Name = "CVT_" + (Op.Class->isRegisterClass() ? "Reg" :
Op.Class->RenderMethod);
+ if (Op.Class->IsOptional) {
+ // For optional operands we must also care about DefaultMethod
+ assert(HasOptionalOperands);
+ Name += "_" + Op.Class->DefaultMethod;
+ }
Name = getEnumNameForToken(Name);
bool IsNewConverter = false;
// This is a new operand kind. Add a handler for it to the
// converter driver.
- CvtOS << " case " << Name << ":\n"
- << " static_cast<" << TargetOperandClass
- << "&>(*Operands[*(p + 1)])." << Op.Class->RenderMethod
- << "(Inst, " << OpInfo.MINumOperands << ");\n"
- << " break;\n";
+ CvtOS << " case " << Name << ":\n";
+ if (Op.Class->IsOptional) {
+ // If optional operand is not present in actual instruction then we
+ // should call its DefaultMethod before RenderMethod
+ assert(HasOptionalOperands);
+ CvtOS << " if (OptionalOperandsMask[*(p + 1) - 1]) {\n"
+ << " " << Op.Class->DefaultMethod << "()"
+ << "->" << Op.Class->RenderMethod << "(Inst, "
+ << OpInfo.MINumOperands << ");\n"
+ << " ++NumDefaults;\n"
+ << " } else {\n"
+ << " static_cast<" << TargetOperandClass
+ << "&>(*Operands[OpIdx])." << Op.Class->RenderMethod
+ << "(Inst, " << OpInfo.MINumOperands << ");\n"
+ << " }\n";
+ } else {
+ CvtOS << " static_cast<" << TargetOperandClass
+ << "&>(*Operands[OpIdx])." << Op.Class->RenderMethod
+ << "(Inst, " << OpInfo.MINumOperands << ");\n";
+ }
+ CvtOS << " break;\n";
// Add a handler for the operand number lookup.
OpOS << " case " << Name << ":\n"
Info.buildOperandMatchInfo();
bool HasMnemonicFirst = AsmParser->getValueAsBit("HasMnemonicFirst");
+ bool HasOptionalOperands = Info.hasOptionalOperands();
// Write the output.
OS << " // This should be included into the middle of the declaration of\n";
OS << " // your subclasses implementation of MCTargetAsmParser.\n";
OS << " uint64_t ComputeAvailableFeatures(const FeatureBitset& FB) const;\n";
- OS << " void convertToMCInst(unsigned Kind, MCInst &Inst, "
- << "unsigned Opcode,\n"
- << " const OperandVector "
- << "&Operands);\n";
+ if (HasOptionalOperands) {
+ OS << " void convertToMCInst(unsigned Kind, MCInst &Inst, "
+ << "unsigned Opcode,\n"
+ << " const OperandVector &Operands,\n"
+ << " const SmallBitVector &OptionalOperandsMask);\n";
+ } else {
+ OS << " void convertToMCInst(unsigned Kind, MCInst &Inst, "
+ << "unsigned Opcode,\n"
+ << " const OperandVector &Operands);\n";
+ }
OS << " void convertToMapAndConstraints(unsigned Kind,\n ";
OS << " const OperandVector &Operands) override;\n";
if (HasMnemonicFirst)
// Generate the convertToMCInst function to convert operands into an MCInst.
// Also, generate the convertToMapAndConstraints function for MS-style inline
// assembly. The latter doesn't actually generate a MCInst.
- emitConvertFuncs(Target, ClassName, Info.Matchables, HasMnemonicFirst, OS);
+ emitConvertFuncs(Target, ClassName, Info.Matchables, HasMnemonicFirst,
+ HasOptionalOperands, OS);
// Emit the enumeration for classes which participate in matching.
emitMatchClassEnumeration(Target, Info.Classes, OS);
OS << " bool HadMatchOtherThanPredicate = false;\n";
OS << " unsigned RetCode = Match_InvalidOperand;\n";
OS << " uint64_t MissingFeatures = ~0ULL;\n";
+ if (HasOptionalOperands) {
+ OS << " SmallBitVector OptionalOperandsMask(" << MaxNumOperands << ");\n";
+ }
OS << " // Set ErrorInfo to the operand that mismatches if it is\n";
OS << " // wrong for all instances of the instruction.\n";
OS << " ErrorInfo = ~0ULL;\n";
// Emit check that the subclasses match.
OS << " bool OperandsValid = true;\n";
+ if (HasOptionalOperands) {
+ OS << " OptionalOperandsMask.reset(0, " << MaxNumOperands << ");\n";
+ }
OS << " for (unsigned FormalIdx = " << (HasMnemonicFirst ? "0" : "SIndex")
<< ", ActualIdx = " << (HasMnemonicFirst ? "1" : "SIndex")
<< "; FormalIdx != " << MaxNumOperands << "; ++FormalIdx) {\n";
OS << " OperandsValid = (Formal == " <<"InvalidMatchClass) || "
"isSubclass(Formal, OptionalMatchClass);\n";
OS << " if (!OperandsValid) ErrorInfo = ActualIdx;\n";
+ if (HasOptionalOperands) {
+ OS << " OptionalOperandsMask.set(FormalIdx, " << MaxNumOperands
+ << ");\n";
+ }
OS << " break;\n";
OS << " }\n";
OS << " MCParsedAsmOperand &Actual = *Operands[ActualIdx];\n";
OS << " // If current formal operand wasn't matched and it is optional\n"
<< " // then try to match next formal operand\n";
OS << " if (Diag == Match_InvalidOperand "
- << "&& isSubclass(Formal, OptionalMatchClass))\n";
+ << "&& isSubclass(Formal, OptionalMatchClass)) {\n";
+ if (HasOptionalOperands) {
+ OS << " OptionalOperandsMask.set(FormalIdx);\n";
+ }
OS << " continue;\n";
+ OS << " }\n";
OS << " // If this operand is broken for all of the instances of this\n";
OS << " // mnemonic, keep track of it so we can report loc info.\n";
OS << " // If we already had a match that only failed due to a\n";
OS << " }\n\n";
OS << " // We have selected a definite instruction, convert the parsed\n"
<< " // operands into the appropriate MCInst.\n";
- OS << " convertToMCInst(it->ConvertFn, Inst, it->Opcode, Operands);\n";
+ if (HasOptionalOperands) {
+ OS << " convertToMCInst(it->ConvertFn, Inst, it->Opcode, Operands,\n"
+ << " OptionalOperandsMask);\n";
+ } else {
+ OS << " convertToMCInst(it->ConvertFn, Inst, it->Opcode, Operands);\n";
+ }
OS << "\n";
// Verify the instruction with the target-specific match predicate function.