// the token this macro expanded to.
Loc = SourceMgr.getLogicalLoc(Loc);
const char *StrData = SourceMgr.getCharacterData(Loc);
+ const char *BufEnd = SourceMgr.getBufferData(Loc.getFileID()).second;
// TODO: this could be special cased for common tokens like identifiers, ')',
// etc to make this faster, if it mattered. This could use
// Lexer::isObviouslySimpleCharacter for example.
+ // Create a langops struct and enable trigraphs. This is sufficient for
+ // measuring tokens.
+ LangOptions LangOpts;
+ LangOpts.Trigraphs = true;
+
// Create a lexer starting at the beginning of this token.
- Lexer TheLexer(Loc, *ThePreprocessor, StrData);
+ Lexer TheLexer(Loc, LangOpts, StrData, BufEnd);
Token TheTok;
TheLexer.LexRawToken(TheTok);
return TheTok.getLength();
HeaderSearch *TheHeaderSearch;
protected:
SourceManager &SourceMgr;
- Preprocessor *ThePreprocessor;
std::string FormatDiagnostic(Diagnostic::Level Level,
diag::kind ID,
virtual ~TextDiagnostics();
void setHeaderSearch(HeaderSearch &HS) { TheHeaderSearch = &HS; }
- void setPreprocessor(Preprocessor &P) { ThePreprocessor = &P; }
virtual bool IgnoreDiagnostic(Diagnostic::Level Level,
SourceLocation Pos);
for (unsigned i = 0, e = InputFilenames.size(); i != e; ++i) {
// Set up the preprocessor with these options.
Preprocessor PP(Diags, LangInfo, *Target, SourceMgr, HeaderInfo);
- DiagClient->setPreprocessor(PP);
const std::string &InFile = InputFilenames[i];
std::vector<char> PredefineBuffer;
unsigned MainFileID = InitializePreprocessor(PP, InFile, SourceMgr,