CreateMemForInlineAsm(unsigned SegReg, const MCExpr *Disp, unsigned BaseReg,
unsigned IndexReg, unsigned Scale, SMLoc Start,
SMLoc End, unsigned Size, StringRef Identifier,
- InlineAsmIdentifierInfo &Info);
+ InlineAsmIdentifierInfo &Info,
+ bool AllowBetterSizeMatch = false);
bool parseDirectiveEven(SMLoc L);
bool ParseDirectiveWord(unsigned Size, SMLoc L);
bool ParseZ(std::unique_ptr<X86Operand> &Z, const SMLoc &StartLoc);
+ /// MS-compatibility:
+ /// Obtain an appropriate size qualifier, when facing its absence,
+ /// upon AVX512 vector/broadcast memory operand
+ unsigned AdjustAVX512Mem(unsigned Size, X86Operand* UnsizedMemOpNext);
+
bool is64BitMode() const {
// FIXME: Can tablegen auto-generate this?
return getSTI().getFeatureBits()[X86::Mode64Bit];
std::unique_ptr<X86Operand> X86AsmParser::CreateMemForInlineAsm(
unsigned SegReg, const MCExpr *Disp, unsigned BaseReg, unsigned IndexReg,
unsigned Scale, SMLoc Start, SMLoc End, unsigned Size, StringRef Identifier,
- InlineAsmIdentifierInfo &Info) {
+ InlineAsmIdentifierInfo &Info, bool AllowBetterSizeMatch) {
// If we found a decl other than a VarDecl, then assume it is a FuncDecl or
// some other label reference.
if (isa<MCSymbolRefExpr>(Disp) && Info.OpDecl && !Info.IsVarDecl) {
if (Size)
InstInfo->AsmRewrites->emplace_back(AOK_SizeDirective, Start,
/*Len=*/0, Size);
+ if (AllowBetterSizeMatch)
+ // Handle cases where size qualifier is absent, upon an indirect symbol
+ // reference - e.g. "vaddps zmm1, zmm2, [var]"
+ // set Size to zero to allow matching mechansim to try and find a better
+ // size qualifier than our initial guess, based on available variants of
+ // the given instruction
+ Size = 0;
}
}
InlineAsmIdentifierInfo &Info = SM.getIdentifierInfo();
return CreateMemForInlineAsm(SegReg, Disp, BaseReg, IndexReg, Scale, Start,
- End, Size, SM.getSymName(), Info);
+ End, Size, SM.getSymName(), Info,
+ isParsingInlineAsm());
}
// Inline assembly may use variable names with namespace alias qualifiers.
return true;
}
+unsigned X86AsmParser::AdjustAVX512Mem(unsigned Size,
+ X86Operand* UnsizedMemOpNext) {
+ // Check for the existence of an AVX512 platform
+ if (!getSTI().getFeatureBits()[X86::FeatureAVX512])
+ return 0;
+ // Allow adjusting upon a (x|y|z)mm
+ if (Size == 512 || Size == 256 || Size == 128)
+ return Size;
+ // This is an allegadly broadcasting mem op adjustment,
+ // allow some more inquiring to validate it
+ if (Size == 64 || Size == 32)
+ return UnsizedMemOpNext && UnsizedMemOpNext->isToken() &&
+ UnsizedMemOpNext->getToken().substr(0, 4).equals("{1to") ? Size : 0;
+ // Do not allow any other type of adjustments
+ return 0;
+}
+
bool X86AsmParser::MatchAndEmitIntelInstruction(SMLoc IDLoc, unsigned &Opcode,
OperandVector &Operands,
MCStreamer &Out,
// Find one unsized memory operand, if present.
X86Operand *UnsizedMemOp = nullptr;
+ // If unsized memory operand was found - obtain following operand.
+ // For use in AdjustAVX512Mem
+ X86Operand *UnsizedMemOpNext = nullptr;
for (const auto &Op : Operands) {
X86Operand *X86Op = static_cast<X86Operand *>(Op.get());
+ if (UnsizedMemOp) {
+ UnsizedMemOpNext = X86Op;
+ // Have we found an unqualified memory operand,
+ // break. IA allows only one memory operand.
+ break;
+ }
if (X86Op->isMemUnsized())
UnsizedMemOp = X86Op;
}
// If an unsized memory operand is present, try to match with each memory
// operand size. In Intel assembly, the size is not part of the instruction
// mnemonic.
+ unsigned MatchedSize = 0;
if (UnsizedMemOp && UnsizedMemOp->isMemUnsized()) {
static const unsigned MopSizes[] = {8, 16, 32, 64, 80, 128, 256, 512};
for (unsigned Size : MopSizes) {
// If this returned as a missing feature failure, remember that.
if (Match.back() == Match_MissingFeature)
ErrorInfoMissingFeature = ErrorInfoIgnore;
+ if (M == Match_Success)
+ // MS-compatability:
+ // Adjust AVX512 vector/broadcast memory operand,
+ // when facing the absence of a size qualifier.
+ // Match GCC behavior on respective cases.
+ MatchedSize = AdjustAVX512Mem(Size, UnsizedMemOpNext);
}
// Restore the size of the unsized memory operand if we modified it.
unsigned NumSuccessfulMatches =
std::count(std::begin(Match), std::end(Match), Match_Success);
if (NumSuccessfulMatches == 1) {
+ if (MatchedSize && isParsingInlineAsm() && isParsingIntelSyntax())
+ // MS compatibility -
+ // Fix the rewrite according to the matched memory size
+ // MS inline assembly only
+ for (AsmRewrite &AR : *InstInfo->AsmRewrites)
+ if ((AR.Loc.getPointer() == UnsizedMemOp->StartLoc.getPointer()) &&
+ (AR.Kind == AOK_SizeDirective))
+ AR.Val = MatchedSize;
// Some instructions need post-processing to, for example, tweak which
// encoding is selected. Loop on it while changes happen so the individual
// transformations can chain off each other.