// Enter the cave.
PP.EnterSourceFile(MainFileID, 0, true);
- LexerToken Tok;
+ Token Tok;
do {
PP.Lex(Tok);
virtual void Ident(SourceLocation Loc, const std::string &str);
- void HandleFirstTokOnLine(LexerToken &Tok);
+ void HandleFirstTokOnLine(Token &Tok);
void MoveToLine(SourceLocation Loc);
- bool AvoidConcat(const LexerToken &PrevTok, const LexerToken &Tok);
+ bool AvoidConcat(const Token &PrevTok, const Token &Tok);
};
}
/// HandleFirstTokOnLine - When emitting a preprocessed file in -E mode, this
/// is called for the first token on each new line.
-void PrintPPOutputPPCallbacks::HandleFirstTokOnLine(LexerToken &Tok) {
+void PrintPPOutputPPCallbacks::HandleFirstTokOnLine(Token &Tok) {
// Figure out what line we went to and insert the appropriate number of
// newline characters.
MoveToLine(Tok.getLocation());
UnknownPragmaHandler(const char *prefix, PrintPPOutputPPCallbacks *callbacks)
: PragmaHandler(0), Prefix(prefix), Callbacks(callbacks) {}
- virtual void HandlePragma(Preprocessor &PP, LexerToken &PragmaTok) {
+ virtual void HandlePragma(Preprocessor &PP, Token &PragmaTok) {
// Figure out what line we went to and insert the appropriate number of
// newline characters.
Callbacks->MoveToLine(PragmaTok.getLocation());
/// the resulting output won't have incorrect concatenations going on. Examples
/// include "..", which we print with a space between, because we don't want to
/// track enough to tell "x.." from "...".
-bool PrintPPOutputPPCallbacks::AvoidConcat(const LexerToken &PrevTok,
- const LexerToken &Tok) {
+bool PrintPPOutputPPCallbacks::AvoidConcat(const Token &PrevTok,
+ const Token &Tok) {
char Buffer[256];
// If we haven't emitted a token on this line yet, PrevTok isn't useful to
InitOutputBuffer();
- LexerToken Tok, PrevTok;
+ Token Tok, PrevTok;
char Buffer[256];
PrintPPOutputPPCallbacks *Callbacks = new PrintPPOutputPPCallbacks(PP);
PP.setPPCallbacks(Callbacks);
// Create a lexer starting at the beginning of this token.
Lexer TheLexer(Loc, *ThePreprocessor, StrData);
- LexerToken TheTok;
+ Token TheTok;
TheLexer.LexRawToken(TheTok);
return TheTok.getLength();
}
PP.EnterSourceFile(FileID, 0);
// Lex the file, which will read all the macros.
- LexerToken Tok;
+ Token Tok;
PP.Lex(Tok);
assert(Tok.getKind() == tok::eof && "Didn't read entire file!");
fprintf(stderr, "Unexpected program action!\n");
return;
case DumpTokens: { // Token dump mode.
- LexerToken Tok;
+ Token Tok;
// Start parsing the specified input file.
PP.EnterSourceFile(MainFileID, 0, true);
do {
break;
}
case RunPreprocessorOnly: { // Just lex as fast as we can, no output.
- LexerToken Tok;
+ Token Tok;
// Start parsing the specified input file.
PP.EnterSourceFile(MainFileID, 0, true);
do {
//
//===----------------------------------------------------------------------===//
//
-// This file implements the Lexer and LexerToken interfaces.
+// This file implements the Lexer and Token interfaces.
//
//===----------------------------------------------------------------------===//
//
/// be updated to match.
///
char Lexer::getCharAndSizeSlow(const char *Ptr, unsigned &Size,
- LexerToken *Tok) {
+ Token *Tok) {
// If we have a slash, look for an escaped newline.
if (Ptr[0] == '\\') {
++Size;
++SizeTmp;
if (Ptr[SizeTmp-1] == '\n' || Ptr[SizeTmp-1] == '\r') {
// Remember that this token needs to be cleaned.
- if (Tok) Tok->setFlag(LexerToken::NeedsCleaning);
+ if (Tok) Tok->setFlag(Token::NeedsCleaning);
// Warn if there was whitespace between the backslash and newline.
if (SizeTmp != 1 && Tok)
// a trigraph warning. If so, and if trigraphs are enabled, return it.
if (char C = DecodeTrigraphChar(Ptr+2, Tok ? this : 0)) {
// Remember that this token needs to be cleaned.
- if (Tok) Tok->setFlag(LexerToken::NeedsCleaning);
+ if (Tok) Tok->setFlag(Token::NeedsCleaning);
Ptr += 3;
Size += 3;
// Helper methods for lexing.
//===----------------------------------------------------------------------===//
-void Lexer::LexIdentifier(LexerToken &Result, const char *CurPtr) {
+void Lexer::LexIdentifier(Token &Result, const char *CurPtr) {
// Match [_A-Za-z0-9]*, we have already matched [_A-Za-z$]
unsigned Size;
unsigned char C = *CurPtr++;
/// LexNumericConstant - Lex the remainer of a integer or floating point
/// constant. From[-1] is the first character lexed. Return the end of the
/// constant.
-void Lexer::LexNumericConstant(LexerToken &Result, const char *CurPtr) {
+void Lexer::LexNumericConstant(Token &Result, const char *CurPtr) {
unsigned Size;
char C = getCharAndSize(CurPtr, Size);
char PrevCh = 0;
/// LexStringLiteral - Lex the remainder of a string literal, after having lexed
/// either " or L".
-void Lexer::LexStringLiteral(LexerToken &Result, const char *CurPtr, bool Wide){
+void Lexer::LexStringLiteral(Token &Result, const char *CurPtr, bool Wide){
const char *NulCharacter = 0; // Does this string contain the \0 character?
char C = getAndAdvanceChar(CurPtr, Result);
/// LexAngledStringLiteral - Lex the remainder of an angled string literal,
/// after having lexed the '<' character. This is used for #include filenames.
-void Lexer::LexAngledStringLiteral(LexerToken &Result, const char *CurPtr) {
+void Lexer::LexAngledStringLiteral(Token &Result, const char *CurPtr) {
const char *NulCharacter = 0; // Does this string contain the \0 character?
char C = getAndAdvanceChar(CurPtr, Result);
/// LexCharConstant - Lex the remainder of a character constant, after having
/// lexed either ' or L'.
-void Lexer::LexCharConstant(LexerToken &Result, const char *CurPtr) {
+void Lexer::LexCharConstant(Token &Result, const char *CurPtr) {
const char *NulCharacter = 0; // Does this character contain the \0 character?
// Handle the common case of 'x' and '\y' efficiently.
/// SkipWhitespace - Efficiently skip over a series of whitespace characters.
/// Update BufferPtr to point to the next non-whitespace character and return.
-void Lexer::SkipWhitespace(LexerToken &Result, const char *CurPtr) {
+void Lexer::SkipWhitespace(Token &Result, const char *CurPtr) {
// Whitespace - Skip it, then return the token after the whitespace.
unsigned char Char = *CurPtr; // Skip consequtive spaces efficiently.
while (1) {
// ok, but handle newline.
// The returned token is at the start of the line.
- Result.setFlag(LexerToken::StartOfLine);
+ Result.setFlag(Token::StartOfLine);
// No leading whitespace seen so far.
- Result.clearFlag(LexerToken::LeadingSpace);
+ Result.clearFlag(Token::LeadingSpace);
Char = *++CurPtr;
}
// If this isn't immediately after a newline, there is leading space.
char PrevChar = CurPtr[-1];
if (PrevChar != '\n' && PrevChar != '\r')
- Result.setFlag(LexerToken::LeadingSpace);
+ Result.setFlag(Token::LeadingSpace);
// If the next token is obviously a // or /* */ comment, skip it efficiently
// too (without going through the big switch stmt).
// SkipBCPLComment - We have just read the // characters from input. Skip until
// we find the newline character thats terminate the comment. Then update
/// BufferPtr and return.
-bool Lexer::SkipBCPLComment(LexerToken &Result, const char *CurPtr) {
+bool Lexer::SkipBCPLComment(Token &Result, const char *CurPtr) {
// If BCPL comments aren't explicitly enabled for this language, emit an
// extension warning.
if (!Features.BCPLComment) {
++CurPtr;
// The next returned token is at the start of the line.
- Result.setFlag(LexerToken::StartOfLine);
+ Result.setFlag(Token::StartOfLine);
// No leading whitespace seen so far.
- Result.clearFlag(LexerToken::LeadingSpace);
+ Result.clearFlag(Token::LeadingSpace);
// It is common for the tokens immediately after a // comment to be
// whitespace (indentation for the next line). Instead of going through the
// big switch, handle it efficiently now.
if (isWhitespace(*CurPtr)) {
- Result.setFlag(LexerToken::LeadingSpace);
+ Result.setFlag(Token::LeadingSpace);
SkipWhitespace(Result, CurPtr+1);
return true;
}
/// SaveBCPLComment - If in save-comment mode, package up this BCPL comment in
/// an appropriate way and return it.
-bool Lexer::SaveBCPLComment(LexerToken &Result, const char *CurPtr) {
+bool Lexer::SaveBCPLComment(Token &Result, const char *CurPtr) {
Result.setKind(tok::comment);
FormTokenWithChars(Result, CurPtr);
/// because they cannot cause the comment to end. The only thing that can
/// happen is the comment could end with an escaped newline between the */ end
/// of comment.
-bool Lexer::SkipBlockComment(LexerToken &Result, const char *CurPtr) {
+bool Lexer::SkipBlockComment(Token &Result, const char *CurPtr) {
// Scan one character past where we should, looking for a '/' character. Once
// we find it, check to see if it was preceeded by a *. This common
// optimization helps people who like to put a lot of * characters in their
// whitespace. Instead of going through the big switch, handle it
// efficiently now.
if (isHorizontalWhitespace(*CurPtr)) {
- Result.setFlag(LexerToken::LeadingSpace);
+ Result.setFlag(Token::LeadingSpace);
SkipWhitespace(Result, CurPtr+1);
return true;
}
// Otherwise, just return so that the next character will be lexed as a token.
BufferPtr = CurPtr;
- Result.setFlag(LexerToken::LeadingSpace);
+ Result.setFlag(Token::LeadingSpace);
return true;
}
/// LexIncludeFilename - After the preprocessor has parsed a #include, lex and
/// (potentially) macro expand the filename.
-void Lexer::LexIncludeFilename(LexerToken &FilenameTok) {
+void Lexer::LexIncludeFilename(Token &FilenameTok) {
assert(ParsingPreprocessorDirective &&
ParsingFilename == false &&
"Must be in a preprocessing directive!");
assert(ParsingPreprocessorDirective && ParsingFilename == false &&
"Must be in a preprocessing directive!");
std::string Result;
- LexerToken Tmp;
+ Token Tmp;
// CurPtr - Cache BufferPtr in an automatic variable.
const char *CurPtr = BufferPtr;
/// condition, reporting diagnostics and handling other edge cases as required.
/// This returns true if Result contains a token, false if PP.Lex should be
/// called again.
-bool Lexer::LexEndOfFile(LexerToken &Result, const char *CurPtr) {
+bool Lexer::LexEndOfFile(Token &Result, const char *CurPtr) {
// If we hit the end of the file while parsing a preprocessor directive,
// end the preprocessor directive first. The next token returned will
// then be the end of file.
// Save state that can be changed while lexing so that we can restore it.
const char *TmpBufferPtr = BufferPtr;
- LexerToken Tok;
+ Token Tok;
Tok.startToken();
LexTokenInternal(Tok);
/// preprocessing token, not a normal token, as such, it is an internal
/// interface. It assumes that the Flags of result have been cleared before
/// calling this.
-void Lexer::LexTokenInternal(LexerToken &Result) {
+void Lexer::LexTokenInternal(Token &Result) {
LexNextToken:
// New token, can't need cleaning yet.
- Result.clearFlag(LexerToken::NeedsCleaning);
+ Result.clearFlag(Token::NeedsCleaning);
Result.setIdentifierInfo(0);
// CurPtr - Cache BufferPtr in an automatic variable.
while ((*CurPtr == ' ') || (*CurPtr == '\t'))
++CurPtr;
BufferPtr = CurPtr;
- Result.setFlag(LexerToken::LeadingSpace);
+ Result.setFlag(Token::LeadingSpace);
}
unsigned SizeTmp, SizeTmp2; // Temporaries for use in cases below.
}
Diag(CurPtr-1, diag::null_in_file);
- Result.setFlag(LexerToken::LeadingSpace);
+ Result.setFlag(Token::LeadingSpace);
SkipWhitespace(Result, CurPtr);
goto LexNextToken; // GCC isn't tail call eliminating.
case '\n':
break;
}
// The returned token is at the start of the line.
- Result.setFlag(LexerToken::StartOfLine);
+ Result.setFlag(Token::StartOfLine);
// No leading whitespace seen so far.
- Result.clearFlag(LexerToken::LeadingSpace);
+ Result.clearFlag(Token::LeadingSpace);
SkipWhitespace(Result, CurPtr);
goto LexNextToken; // GCC isn't tail call eliminating.
case ' ':
case '\t':
case '\f':
case '\v':
- Result.setFlag(LexerToken::LeadingSpace);
+ Result.setFlag(Token::LeadingSpace);
SkipWhitespace(Result, CurPtr);
goto LexNextToken; // GCC isn't tail call eliminating.
// want us starting at the beginning of the line again. If so, set
// the StartOfLine flag.
if (IsAtStartOfLine) {
- Result.setFlag(LexerToken::StartOfLine);
+ Result.setFlag(Token::StartOfLine);
IsAtStartOfLine = false;
}
goto LexNextToken; // GCC isn't tail call eliminating.
// want us starting at the beginning of the line again. If so, set
// the StartOfLine flag.
if (IsAtStartOfLine) {
- Result.setFlag(LexerToken::StartOfLine);
+ Result.setFlag(Token::StartOfLine);
IsAtStartOfLine = false;
}
goto LexNextToken; // GCC isn't tail call eliminating.
/// hex-digit hex-digit hex-digit hex-digit
///
StringLiteralParser::
-StringLiteralParser(const LexerToken *StringToks, unsigned NumStringToks,
+StringLiteralParser(const Token *StringToks, unsigned NumStringToks,
Preprocessor &pp, TargetInfo &t)
: PP(pp), Target(t) {
// Scan all of the string portions, remember the max individual token length,
/// MacroArgs ctor function - This destroys the vector passed in.
MacroArgs *MacroArgs::create(const MacroInfo *MI,
- const LexerToken *UnexpArgTokens,
+ const Token *UnexpArgTokens,
unsigned NumToks, bool VarargsElided) {
assert(MI->isFunctionLike() &&
"Can't have args for an object-like macro!");
// Allocate memory for the MacroArgs object with the lexer tokens at the end.
MacroArgs *Result = (MacroArgs*)malloc(sizeof(MacroArgs) +
- NumToks*sizeof(LexerToken));
+ NumToks*sizeof(Token));
// Construct the macroargs object.
new (Result) MacroArgs(NumToks, VarargsElided);
// Copy the actual unexpanded tokens to immediately after the result ptr.
if (NumToks)
- memcpy(const_cast<LexerToken*>(Result->getUnexpArgument(0)),
- UnexpArgTokens, NumToks*sizeof(LexerToken));
+ memcpy(const_cast<Token*>(Result->getUnexpArgument(0)),
+ UnexpArgTokens, NumToks*sizeof(Token));
return Result;
}
/// getArgLength - Given a pointer to an expanded or unexpanded argument,
/// return the number of tokens, not counting the EOF, that make up the
/// argument.
-unsigned MacroArgs::getArgLength(const LexerToken *ArgPtr) {
+unsigned MacroArgs::getArgLength(const Token *ArgPtr) {
unsigned NumArgTokens = 0;
for (; ArgPtr->getKind() != tok::eof; ++ArgPtr)
++NumArgTokens;
/// getUnexpArgument - Return the unexpanded tokens for the specified formal.
///
-const LexerToken *MacroArgs::getUnexpArgument(unsigned Arg) const {
+const Token *MacroArgs::getUnexpArgument(unsigned Arg) const {
// The unexpanded argument tokens start immediately after the MacroArgs object
// in memory.
- const LexerToken *Start = (const LexerToken *)(this+1);
- const LexerToken *Result = Start;
+ const Token *Start = (const Token *)(this+1);
+ const Token *Result = Start;
// Scan to find Arg.
for (; Arg; ++Result) {
assert(Result < Start+NumUnexpArgTokens && "Invalid arg #");
/// ArgNeedsPreexpansion - If we can prove that the argument won't be affected
/// by pre-expansion, return false. Otherwise, conservatively return true.
-bool MacroArgs::ArgNeedsPreexpansion(const LexerToken *ArgTok) const {
+bool MacroArgs::ArgNeedsPreexpansion(const Token *ArgTok) const {
// If there are no identifiers in the argument list, or if the identifiers are
// known to not be macros, pre-expansion won't modify it.
for (; ArgTok->getKind() != tok::eof; ++ArgTok)
/// getPreExpArgument - Return the pre-expanded form of the specified
/// argument.
-const std::vector<LexerToken> &
+const std::vector<Token> &
MacroArgs::getPreExpArgument(unsigned Arg, Preprocessor &PP) {
assert(Arg < NumUnexpArgTokens && "Invalid argument number!");
if (PreExpArgTokens.empty())
PreExpArgTokens.resize(NumUnexpArgTokens);
- std::vector<LexerToken> &Result = PreExpArgTokens[Arg];
+ std::vector<Token> &Result = PreExpArgTokens[Arg];
if (!Result.empty()) return Result;
- const LexerToken *AT = getUnexpArgument(Arg);
+ const Token *AT = getUnexpArgument(Arg);
unsigned NumToks = getArgLength(AT)+1; // Include the EOF.
// Otherwise, we have to pre-expand this argument, populating Result. To do
// Lex all of the macro-expanded tokens into Result.
do {
- Result.push_back(LexerToken());
+ Result.push_back(Token());
PP.Lex(Result.back());
} while (Result.back().getKind() != tok::eof);
/// tokens into the literal string token that should be produced by the C #
/// preprocessor operator.
///
-static LexerToken StringifyArgument(const LexerToken *ArgToks,
+static Token StringifyArgument(const Token *ArgToks,
Preprocessor &PP, bool Charify = false) {
- LexerToken Tok;
+ Token Tok;
Tok.startToken();
Tok.setKind(tok::string_literal);
- const LexerToken *ArgTokStart = ArgToks;
+ const Token *ArgTokStart = ArgToks;
// Stringify all the tokens.
std::string Result = "\"";
// FIXME: Optimize this loop to not use std::strings.
bool isFirst = true;
for (; ArgToks->getKind() != tok::eof; ++ArgToks) {
- const LexerToken &Tok = *ArgToks;
+ const Token &Tok = *ArgToks;
if (!isFirst && (Tok.hasLeadingSpace() || Tok.isAtStartOfLine()))
Result += ' ';
isFirst = false;
/// getStringifiedArgument - Compute, cache, and return the specified argument
/// that has been 'stringified' as required by the # operator.
-const LexerToken &MacroArgs::getStringifiedArgument(unsigned ArgNo,
+const Token &MacroArgs::getStringifiedArgument(unsigned ArgNo,
Preprocessor &PP) {
assert(ArgNo < NumUnexpArgTokens && "Invalid argument number!");
if (StringifiedArgs.empty()) {
/// Create a macro expander for the specified macro with the specified actual
/// arguments. Note that this ctor takes ownership of the ActualArgs pointer.
-void MacroExpander::Init(LexerToken &Tok, MacroArgs *Actuals) {
+void MacroExpander::Init(Token &Tok, MacroArgs *Actuals) {
// If the client is reusing a macro expander, make sure to free any memory
// associated with it.
destroy();
/// Create a macro expander for the specified token stream. This does not
/// take ownership of the specified token vector.
-void MacroExpander::Init(const LexerToken *TokArray, unsigned NumToks) {
+void MacroExpander::Init(const Token *TokArray, unsigned NumToks) {
// If the client is reusing a macro expander, make sure to free any memory
// associated with it.
destroy();
/// Expand the arguments of a function-like macro so that we can quickly
/// return preexpanded tokens from MacroTokens.
void MacroExpander::ExpandFunctionArguments() {
- llvm::SmallVector<LexerToken, 128> ResultToks;
+ llvm::SmallVector<Token, 128> ResultToks;
// Loop through the MacroTokens tokens, expanding them into ResultToks. Keep
// track of whether we change anything. If not, no need to keep them. If so,
// If we found the stringify operator, get the argument stringified. The
// preprocessor already verified that the following token is a macro name
// when the #define was parsed.
- const LexerToken &CurTok = MacroTokens[i];
+ const Token &CurTok = MacroTokens[i];
if (CurTok.getKind() == tok::hash || CurTok.getKind() == tok::hashat) {
int ArgNo = Macro->getArgumentNum(MacroTokens[i+1].getIdentifierInfo());
assert(ArgNo != -1 && "Token following # is not an argument?");
- LexerToken Res;
+ Token Res;
if (CurTok.getKind() == tok::hash) // Stringify
Res = ActualArgs->getStringifiedArgument(ArgNo, PP);
else {
// The stringified/charified string leading space flag gets set to match
// the #/#@ operator.
if (CurTok.hasLeadingSpace() || NextTokGetsSpace)
- Res.setFlag(LexerToken::LeadingSpace);
+ Res.setFlag(Token::LeadingSpace);
ResultToks.push_back(Res);
MadeChange = true;
ResultToks.push_back(CurTok);
if (NextTokGetsSpace) {
- ResultToks.back().setFlag(LexerToken::LeadingSpace);
+ ResultToks.back().setFlag(Token::LeadingSpace);
NextTokGetsSpace = false;
}
continue;
// argument and substitute the expanded tokens into the result. This is
// C99 6.10.3.1p1.
if (!PasteBefore && !PasteAfter) {
- const LexerToken *ResultArgToks;
+ const Token *ResultArgToks;
// Only preexpand the argument if it could possibly need it. This
// avoids some work in common cases.
- const LexerToken *ArgTok = ActualArgs->getUnexpArgument(ArgNo);
+ const Token *ArgTok = ActualArgs->getUnexpArgument(ArgNo);
if (ActualArgs->ArgNeedsPreexpansion(ArgTok))
ResultArgToks = &ActualArgs->getPreExpArgument(ArgNo, PP)[0];
else
// If any tokens were substituted from the argument, the whitespace
// before the first token should match the whitespace of the arg
// identifier.
- ResultToks[FirstResult].setFlagValue(LexerToken::LeadingSpace,
+ ResultToks[FirstResult].setFlagValue(Token::LeadingSpace,
CurTok.hasLeadingSpace() ||
NextTokGetsSpace);
NextTokGetsSpace = false;
// Okay, we have a token that is either the LHS or RHS of a paste (##)
// argument. It gets substituted as its non-pre-expanded tokens.
- const LexerToken *ArgToks = ActualArgs->getUnexpArgument(ArgNo);
+ const Token *ArgToks = ActualArgs->getUnexpArgument(ArgNo);
unsigned NumToks = MacroArgs::getArgLength(ArgToks);
if (NumToks) { // Not an empty argument?
ResultToks.append(ArgToks, ArgToks+NumToks);
// If the next token was supposed to get leading whitespace, ensure it has
// it now.
if (NextTokGetsSpace) {
- ResultToks[ResultToks.size()-NumToks].setFlag(LexerToken::LeadingSpace);
+ ResultToks[ResultToks.size()-NumToks].setFlag(Token::LeadingSpace);
NextTokGetsSpace = false;
}
continue;
if (MadeChange) {
// This is deleted in the dtor.
NumMacroTokens = ResultToks.size();
- LexerToken *Res = new LexerToken[ResultToks.size()];
+ Token *Res = new Token[ResultToks.size()];
if (NumMacroTokens)
- memcpy(Res, &ResultToks[0], NumMacroTokens*sizeof(LexerToken));
+ memcpy(Res, &ResultToks[0], NumMacroTokens*sizeof(Token));
MacroTokens = Res;
}
}
/// Lex - Lex and return a token from this macro stream.
///
-void MacroExpander::Lex(LexerToken &Tok) {
+void MacroExpander::Lex(Token &Tok) {
// Lexing off the end of the macro, pop this macro off the expansion stack.
if (isAtEnd()) {
// If this is a macro (not a token stream), mark the macro enabled now
// If this is the first token, set the lexical properties of the token to
// match the lexical properties of the macro identifier.
if (isFirstToken) {
- Tok.setFlagValue(LexerToken::StartOfLine , AtStartOfLine);
- Tok.setFlagValue(LexerToken::LeadingSpace, HasLeadingSpace);
+ Tok.setFlagValue(Token::StartOfLine , AtStartOfLine);
+ Tok.setFlagValue(Token::LeadingSpace, HasLeadingSpace);
}
// Handle recursive expansion!
/// PasteTokens - Tok is the LHS of a ## operator, and CurToken is the ##
/// operator. Read the ## and RHS, and paste the LHS/RHS together. If there
/// are is another ## after it, chomp it iteratively. Return the result as Tok.
-void MacroExpander::PasteTokens(LexerToken &Tok) {
+void MacroExpander::PasteTokens(Token &Tok) {
llvm::SmallVector<char, 128> Buffer;
do {
// Consume the ## operator.
assert(!isAtEnd() && "No token on the RHS of a paste operator!");
// Get the RHS token.
- const LexerToken &RHS = MacroTokens[CurToken];
+ const Token &RHS = MacroTokens[CurToken];
bool isInvalid = false;
SourceLocation ResultTokLoc = PP.CreateString(&Buffer[0], Buffer.size());
// Lex the resultant pasted token into Result.
- LexerToken Result;
+ Token Result;
// Avoid testing /*, as the lexer would think it is the start of a comment
// and emit an error that it is unterminated.
// FIXME: Turn __VARRGS__ into "not a token"?
// Transfer properties of the LHS over the the Result.
- Result.setFlagValue(LexerToken::StartOfLine , Tok.isAtStartOfLine());
- Result.setFlagValue(LexerToken::LeadingSpace, Tok.hasLeadingSpace());
+ Result.setFlagValue(Token::StartOfLine , Tok.isAtStartOfLine());
+ Result.setFlagValue(Token::LeadingSpace, Tok.hasLeadingSpace());
// Finally, replace LHS with the result, consume the RHS, and iterate.
++CurToken;
// Check all the tokens.
for (unsigned i = 0, e = ReplacementTokens.size(); i != e; ++i) {
- const LexerToken &A = ReplacementTokens[i];
- const LexerToken &B = Other.ReplacementTokens[i];
+ const Token &A = ReplacementTokens[i];
+ const Token &B = Other.ReplacementTokens[i];
if (A.getKind() != B.getKind() ||
A.isAtStartOfLine() != B.isAtStartOfLine() ||
A.hasLeadingSpace() != B.hasLeadingSpace())
using namespace clang;
static bool EvaluateDirectiveSubExpr(llvm::APSInt &LHS, unsigned MinPrec,
- LexerToken &PeekTok, bool ValueLive,
+ Token &PeekTok, bool ValueLive,
Preprocessor &PP);
/// DefinedTracker - This struct is used while parsing expressions to keep track
/// If ValueLive is false, then this value is being evaluated in a context where
/// the result is not used. As such, avoid diagnostics that relate to
/// evaluation.
-static bool EvaluateValue(llvm::APSInt &Result, LexerToken &PeekTok,
+static bool EvaluateValue(llvm::APSInt &Result, Token &PeekTok,
DefinedTracker &DT, bool ValueLive,
Preprocessor &PP) {
Result = 0;
/// the result is not used. As such, avoid diagnostics that relate to
/// evaluation.
static bool EvaluateDirectiveSubExpr(llvm::APSInt &LHS, unsigned MinPrec,
- LexerToken &PeekTok, bool ValueLive,
+ Token &PeekTok, bool ValueLive,
Preprocessor &PP) {
unsigned PeekPrec = getPrecedence(PeekTok.getKind());
// If this token isn't valid, report the error.
RHSIsLive = ValueLive;
// Consume the operator, saving the operator token for error reporting.
- LexerToken OpToken = PeekTok;
+ Token OpToken = PeekTok;
PP.LexNonComment(PeekTok);
llvm::APSInt RHS(LHS.getBitWidth());
bool Preprocessor::
EvaluateDirectiveExpression(IdentifierInfo *&IfNDefMacro) {
// Peek ahead one token.
- LexerToken Tok;
+ Token Tok;
Lex(Tok);
// C99 6.10.1p3 - All expressions are evaluated as intmax_t or uintmax_t.
return IgnoreNull ? 0 : NullHandler;
}
-void PragmaNamespace::HandlePragma(Preprocessor &PP, LexerToken &Tok) {
+void PragmaNamespace::HandlePragma(Preprocessor &PP, Token &Tok) {
// Read the 'namespace' that the directive is in, e.g. STDC. Do not macro
// expand it, the user can have a STDC #define, that should not affect this.
PP.LexUnexpandedToken(Tok);
++NumPragma;
// Invoke the first level of pragma handlers which reads the namespace id.
- LexerToken Tok;
+ Token Tok;
PragmaHandlers->HandlePragma(*this, Tok);
// If the pragma handler didn't read the rest of the line, consume it now.
/// Handle_Pragma - Read a _Pragma directive, slice it up, process it, then
/// return the first token after the directive. The _Pragma token has just
/// been read into 'Tok'.
-void Preprocessor::Handle_Pragma(LexerToken &Tok) {
+void Preprocessor::Handle_Pragma(Token &Tok) {
// Remember the pragma token location.
SourceLocation PragmaLoc = Tok.getLocation();
/// HandlePragmaOnce - Handle #pragma once. OnceTok is the 'once'.
///
-void Preprocessor::HandlePragmaOnce(LexerToken &OnceTok) {
+void Preprocessor::HandlePragmaOnce(Token &OnceTok) {
if (isInPrimaryFile()) {
Diag(OnceTok, diag::pp_pragma_once_in_main_file);
return;
/// HandlePragmaPoison - Handle #pragma GCC poison. PoisonTok is the 'poison'.
///
-void Preprocessor::HandlePragmaPoison(LexerToken &PoisonTok) {
- LexerToken Tok;
+void Preprocessor::HandlePragmaPoison(Token &PoisonTok) {
+ Token Tok;
while (1) {
// Read the next token to poison. While doing this, pretend that we are
/// HandlePragmaSystemHeader - Implement #pragma GCC system_header. We know
/// that the whole directive has been parsed.
-void Preprocessor::HandlePragmaSystemHeader(LexerToken &SysHeaderTok) {
+void Preprocessor::HandlePragmaSystemHeader(Token &SysHeaderTok) {
if (isInPrimaryFile()) {
Diag(SysHeaderTok, diag::pp_pragma_sysheader_in_main_file);
return;
/// HandlePragmaDependency - Handle #pragma GCC dependency "foo" blah.
///
-void Preprocessor::HandlePragmaDependency(LexerToken &DependencyTok) {
- LexerToken FilenameTok;
+void Preprocessor::HandlePragmaDependency(Token &DependencyTok) {
+ Token FilenameTok;
CurLexer->LexIncludeFilename(FilenameTok);
// If the token kind is EOM, the error has already been diagnosed.
namespace {
struct PragmaOnceHandler : public PragmaHandler {
PragmaOnceHandler(const IdentifierInfo *OnceID) : PragmaHandler(OnceID) {}
- virtual void HandlePragma(Preprocessor &PP, LexerToken &OnceTok) {
+ virtual void HandlePragma(Preprocessor &PP, Token &OnceTok) {
PP.CheckEndOfDirective("#pragma once");
PP.HandlePragmaOnce(OnceTok);
}
struct PragmaPoisonHandler : public PragmaHandler {
PragmaPoisonHandler(const IdentifierInfo *ID) : PragmaHandler(ID) {}
- virtual void HandlePragma(Preprocessor &PP, LexerToken &PoisonTok) {
+ virtual void HandlePragma(Preprocessor &PP, Token &PoisonTok) {
PP.HandlePragmaPoison(PoisonTok);
}
};
struct PragmaSystemHeaderHandler : public PragmaHandler {
PragmaSystemHeaderHandler(const IdentifierInfo *ID) : PragmaHandler(ID) {}
- virtual void HandlePragma(Preprocessor &PP, LexerToken &SHToken) {
+ virtual void HandlePragma(Preprocessor &PP, Token &SHToken) {
PP.HandlePragmaSystemHeader(SHToken);
PP.CheckEndOfDirective("#pragma");
}
};
struct PragmaDependencyHandler : public PragmaHandler {
PragmaDependencyHandler(const IdentifierInfo *ID) : PragmaHandler(ID) {}
- virtual void HandlePragma(Preprocessor &PP, LexerToken &DepToken) {
+ virtual void HandlePragma(Preprocessor &PP, Token &DepToken) {
PP.HandlePragmaDependency(DepToken);
}
};
}
/// Diag - Forwarding function for diagnostics. This emits a diagnostic at
-/// the specified LexerToken's location, translating the token's start
+/// the specified Token's location, translating the token's start
/// position in the current buffer into a SourcePosition object for rendering.
void Preprocessor::Diag(SourceLocation Loc, unsigned DiagID) {
Diags.Report(Loc, DiagID);
Diags.Report(Loc, DiagID, &Msg, 1);
}
-void Preprocessor::DumpToken(const LexerToken &Tok, bool DumpFlags) const {
+void Preprocessor::DumpToken(const Token &Tok, bool DumpFlags) const {
std::cerr << tok::getTokenName(Tok.getKind()) << " '"
<< getSpelling(Tok) << "'";
/// after trigraph expansion and escaped-newline folding. In particular, this
/// wants to get the true, uncanonicalized, spelling of things like digraphs
/// UCNs, etc.
-std::string Preprocessor::getSpelling(const LexerToken &Tok) const {
+std::string Preprocessor::getSpelling(const Token &Tok) const {
assert((int)Tok.getLength() >= 0 && "Token character range is bogus!");
// If this token contains nothing interesting, return it directly.
/// to point to a constant buffer with the data already in it (avoiding a
/// copy). The caller is not allowed to modify the returned buffer pointer
/// if an internal buffer is returned.
-unsigned Preprocessor::getSpelling(const LexerToken &Tok,
+unsigned Preprocessor::getSpelling(const Token &Tok,
const char *&Buffer) const {
assert((int)Tok.getLength() >= 0 && "Token character range is bogus!");
if (CharNo != 0) {
// Create a lexer starting at this token position.
Lexer TheLexer(TokStart, *this, TokPtr);
- LexerToken Tok;
+ Token Tok;
// Skip over characters the remaining characters.
const char *TokStartPtr = TokPtr;
for (; CharNo; --CharNo)
/// EnterMacro - Add a Macro to the top of the include stack and start lexing
/// tokens from it instead of the current buffer.
-void Preprocessor::EnterMacro(LexerToken &Tok, MacroArgs *Args) {
+void Preprocessor::EnterMacro(Token &Tok, MacroArgs *Args) {
IncludeMacroStack.push_back(IncludeStackInfo(CurLexer, CurDirLookup,
CurMacroExpander));
CurLexer = 0;
/// that these tokens will be re-macro-expanded when/if expansion is enabled.
/// This method assumes that the specified stream of tokens has a permanent
/// owner somewhere, so they do not need to be copied.
-void Preprocessor::EnterTokenStream(const LexerToken *Toks, unsigned NumToks) {
+void Preprocessor::EnterTokenStream(const Token *Toks, unsigned NumToks) {
// Save our current state.
IncludeMacroStack.push_back(IncludeStackInfo(CurLexer, CurDirLookup,
CurMacroExpander));
if (Val != 1)
return false;
- LexerToken Tok;
+ Token Tok;
LexUnexpandedToken(Tok);
assert(Tok.getKind() == tok::l_paren && "Error computing l-paren-ness?");
return true;
/// HandleMacroExpandedIdentifier - If an identifier token is read that is to be
/// expanded as a macro, handle it and return the next token as 'Identifier'.
-bool Preprocessor::HandleMacroExpandedIdentifier(LexerToken &Identifier,
+bool Preprocessor::HandleMacroExpandedIdentifier(Token &Identifier,
MacroInfo *MI) {
// If this is a builtin macro, like __LINE__ or _Pragma, handle it specially.
// stuff like "! XX," -> "! ," and " XX," -> " ,", when XX is
// empty.
if (!Identifier.isAtStartOfLine()) {
- if (IsAtStartOfLine) Identifier.setFlag(LexerToken::StartOfLine);
- if (HadLeadingSpace) Identifier.setFlag(LexerToken::LeadingSpace);
+ if (IsAtStartOfLine) Identifier.setFlag(Token::StartOfLine);
+ if (HadLeadingSpace) Identifier.setFlag(Token::LeadingSpace);
}
++NumFastMacroExpanded;
return false;
Identifier = MI->getReplacementToken(0);
// Restore the StartOfLine/LeadingSpace markers.
- Identifier.setFlagValue(LexerToken::StartOfLine , isAtStartOfLine);
- Identifier.setFlagValue(LexerToken::LeadingSpace, hasLeadingSpace);
+ Identifier.setFlagValue(Token::StartOfLine , isAtStartOfLine);
+ Identifier.setFlagValue(Token::LeadingSpace, hasLeadingSpace);
// Update the tokens location to include both its logical and physical
// locations.
// If this is #define X X, we must mark the result as unexpandible.
if (IdentifierInfo *NewII = Identifier.getIdentifierInfo())
if (NewII->getMacroInfo() == MI)
- Identifier.setFlag(LexerToken::DisableExpand);
+ Identifier.setFlag(Token::DisableExpand);
// Since this is not an identifier token, it can't be macro expanded, so
// we're done.
/// ReadFunctionLikeMacroArgs - After reading "MACRO(", this method is
/// invoked to read all of the actual arguments specified for the macro
/// invocation. This returns null on error.
-MacroArgs *Preprocessor::ReadFunctionLikeMacroArgs(LexerToken &MacroName,
+MacroArgs *Preprocessor::ReadFunctionLikeMacroArgs(Token &MacroName,
MacroInfo *MI) {
// The number of fixed arguments to parse.
unsigned NumFixedArgsLeft = MI->getNumArgs();
bool isVariadic = MI->isVariadic();
// Outer loop, while there are more arguments, keep reading them.
- LexerToken Tok;
+ Token Tok;
Tok.setKind(tok::comma);
--NumFixedArgsLeft; // Start reading the first arg.
// ArgTokens - Build up a list of tokens that make up each argument. Each
// argument is separated by an EOF token. Use a SmallVector so we can avoid
// heap allocations in the common case.
- llvm::SmallVector<LexerToken, 64> ArgTokens;
+ llvm::SmallVector<Token, 64> ArgTokens;
unsigned NumActuals = 0;
while (Tok.getKind() == tok::comma) {
Diag(Tok, diag::ext_empty_fnmacro_arg);
// Add a marker EOF token to the end of the token list for this argument.
- LexerToken EOFTok;
+ Token EOFTok;
EOFTok.startToken();
EOFTok.setKind(tok::eof);
EOFTok.setLocation(Tok.getLocation());
/// ExpandBuiltinMacro - If an identifier token is read that is to be expanded
/// as a builtin macro, handle it and return the next token as 'Tok'.
-void Preprocessor::ExpandBuiltinMacro(LexerToken &Tok) {
+void Preprocessor::ExpandBuiltinMacro(Token &Tok) {
// Figure out which token this is.
IdentifierInfo *II = Tok.getIdentifierInfo();
assert(II && "Can't be a macro without id info!");
// Set up the return result.
Tok.setIdentifierInfo(0);
- Tok.clearFlag(LexerToken::NeedsCleaning);
+ Tok.clearFlag(Token::NeedsCleaning);
if (II == Ident__LINE__) {
// __LINE__ expands to a simple numeric value.
/// LookUpIdentifierInfo - Given a tok::identifier token, look up the
/// identifier information for the token and install it into the token.
-IdentifierInfo *Preprocessor::LookUpIdentifierInfo(LexerToken &Identifier,
+IdentifierInfo *Preprocessor::LookUpIdentifierInfo(Token &Identifier,
const char *BufPtr) {
assert(Identifier.getKind() == tok::identifier && "Not an identifier!");
assert(Identifier.getIdentifierInfo() == 0 && "Identinfo already exists!");
/// HandleIdentifier - This callback is invoked when the lexer reads an
/// identifier. This callback looks up the identifier in the map and/or
/// potentially macro expands it or turns it into a named token (like 'for').
-void Preprocessor::HandleIdentifier(LexerToken &Identifier) {
+void Preprocessor::HandleIdentifier(Token &Identifier) {
assert(Identifier.getIdentifierInfo() &&
"Can't handle identifiers without identifier info!");
// C99 6.10.3.4p2 says that a disabled macro may never again be
// expanded, even if it's in a context where it could be expanded in the
// future.
- Identifier.setFlag(LexerToken::DisableExpand);
+ Identifier.setFlag(Token::DisableExpand);
}
}
} else if (II.isOtherTargetMacro() && !DisableMacroExpansion) {
/// HandleEndOfFile - This callback is invoked when the lexer hits the end of
/// the current file. This either returns the EOF token or pops a level off
/// the include stack and keeps going.
-bool Preprocessor::HandleEndOfFile(LexerToken &Result, bool isEndOfMacro) {
+bool Preprocessor::HandleEndOfFile(Token &Result, bool isEndOfMacro) {
assert(!CurMacroExpander &&
"Ending a file when currently in a macro!");
/// HandleEndOfMacro - This callback is invoked when the lexer hits the end of
/// the current macro expansion or token stream expansion.
-bool Preprocessor::HandleEndOfMacro(LexerToken &Result) {
+bool Preprocessor::HandleEndOfMacro(Token &Result) {
assert(CurMacroExpander && !CurLexer &&
"Ending a macro when currently in a #include file!");
/// DiscardUntilEndOfDirective - Read and discard all tokens remaining on the
/// current line until the tok::eom token is found.
void Preprocessor::DiscardUntilEndOfDirective() {
- LexerToken Tmp;
+ Token Tmp;
do {
LexUnexpandedToken(Tmp);
} while (Tmp.getKind() != tok::eom);
/// of the macro line if the macro name is invalid. isDefineUndef is 1 if
/// this is due to a a #define, 2 if #undef directive, 0 if it is something
/// else (e.g. #ifdef).
-void Preprocessor::ReadMacroName(LexerToken &MacroNameTok, char isDefineUndef) {
+void Preprocessor::ReadMacroName(Token &MacroNameTok, char isDefineUndef) {
// Read the token, don't allow macro expansion on it.
LexUnexpandedToken(MacroNameTok);
/// CheckEndOfDirective - Ensure that the next token is a tok::eom token. If
/// not, emit a diagnostic and consume up until the eom.
void Preprocessor::CheckEndOfDirective(const char *DirType) {
- LexerToken Tmp;
+ Token Tmp;
Lex(Tmp);
// There should be no tokens after the directive, but we allow them as an
// extension.
// Enter raw mode to disable identifier lookup (and thus macro expansion),
// disabling warnings, etc.
CurLexer->LexingRawMode = true;
- LexerToken Tok;
+ Token Tok;
while (1) {
CurLexer->Lex(Tok);
/// at the start of a line. This consumes the directive, modifies the
/// lexer/preprocessor state, and advances the lexer(s) so that the next token
/// read is the correct one.
-void Preprocessor::HandleDirective(LexerToken &Result) {
+void Preprocessor::HandleDirective(Token &Result) {
// FIXME: Traditional: # with whitespace before it not recognized by K&R?
// We just parsed a # character at the start of a line, so we're in directive
// Okay, we're done parsing the directive.
}
-void Preprocessor::HandleUserDiagnosticDirective(LexerToken &Tok,
+void Preprocessor::HandleUserDiagnosticDirective(Token &Tok,
bool isWarning) {
// Read the rest of the line raw. We do this because we don't want macros
// to be expanded and we don't require that the tokens be valid preprocessing
/// HandleIdentSCCSDirective - Handle a #ident/#sccs directive.
///
-void Preprocessor::HandleIdentSCCSDirective(LexerToken &Tok) {
+void Preprocessor::HandleIdentSCCSDirective(Token &Tok) {
// Yes, this directive is an extension.
Diag(Tok, diag::ext_pp_ident_directive);
// Read the string argument.
- LexerToken StrTok;
+ Token StrTok;
Lex(StrTok);
// If the token kind isn't a string, it's a malformed directive.
/// caller is expected to provide a buffer that is large enough to hold the
/// spelling of the filename, but is also expected to handle the case when
/// this method decides to use a different buffer.
-bool Preprocessor::GetIncludeFilenameSpelling(const LexerToken &FilenameTok,
+bool Preprocessor::GetIncludeFilenameSpelling(const Token &FilenameTok,
const char *&BufStart,
const char *&BufEnd) {
// Get the text form of the filename.
/// file to be included from the lexer, then include it! This is a common
/// routine with functionality shared between #include, #include_next and
/// #import.
-void Preprocessor::HandleIncludeDirective(LexerToken &IncludeTok,
+void Preprocessor::HandleIncludeDirective(Token &IncludeTok,
const DirectoryLookup *LookupFrom,
bool isImport) {
- LexerToken FilenameTok;
+ Token FilenameTok;
CurLexer->LexIncludeFilename(FilenameTok);
// If the token kind is EOM, the error has already been diagnosed.
/// HandleIncludeNextDirective - Implements #include_next.
///
-void Preprocessor::HandleIncludeNextDirective(LexerToken &IncludeNextTok) {
+void Preprocessor::HandleIncludeNextDirective(Token &IncludeNextTok) {
Diag(IncludeNextTok, diag::ext_pp_include_next_directive);
// #include_next is like #include, except that we start searching after
/// HandleImportDirective - Implements #import.
///
-void Preprocessor::HandleImportDirective(LexerToken &ImportTok) {
+void Preprocessor::HandleImportDirective(Token &ImportTok) {
Diag(ImportTok, diag::ext_pp_import_directive);
return HandleIncludeDirective(ImportTok, 0, true);
bool Preprocessor::ReadMacroDefinitionArgList(MacroInfo *MI) {
llvm::SmallVector<IdentifierInfo*, 32> Arguments;
- LexerToken Tok;
+ Token Tok;
while (1) {
LexUnexpandedToken(Tok);
switch (Tok.getKind()) {
/// line then lets the caller lex the next real token. If 'isTargetSpecific' is
/// true, then this is a "#define_target", otherwise this is a "#define".
///
-void Preprocessor::HandleDefineDirective(LexerToken &DefineTok,
+void Preprocessor::HandleDefineDirective(Token &DefineTok,
bool isTargetSpecific) {
++NumDefined;
- LexerToken MacroNameTok;
+ Token MacroNameTok;
ReadMacroName(MacroNameTok, 1);
// Error reading macro name? If so, diagnostic already issued.
MacroNameTok.getIdentifierInfo()->setIsOtherTargetMacro(false);
- LexerToken Tok;
+ Token Tok;
LexUnexpandedToken(Tok);
// If this is a function-like macro definition, parse the argument list,
} else {
// This is a normal token with leading space. Clear the leading space
// marker on the first token to get proper expansion.
- Tok.clearFlag(LexerToken::LeadingSpace);
+ Tok.clearFlag(Token::LeadingSpace);
}
// If this is a definition of a variadic C99 function-like macro, not using
}
/// HandleDefineOtherTargetDirective - Implements #define_other_target.
-void Preprocessor::HandleDefineOtherTargetDirective(LexerToken &Tok) {
- LexerToken MacroNameTok;
+void Preprocessor::HandleDefineOtherTargetDirective(Token &Tok) {
+ Token MacroNameTok;
ReadMacroName(MacroNameTok, 1);
// Error reading macro name? If so, diagnostic already issued.
/// HandleUndefDirective - Implements #undef.
///
-void Preprocessor::HandleUndefDirective(LexerToken &UndefTok) {
+void Preprocessor::HandleUndefDirective(Token &UndefTok) {
++NumUndefined;
- LexerToken MacroNameTok;
+ Token MacroNameTok;
ReadMacroName(MacroNameTok, 2);
// Error reading macro name? If so, diagnostic already issued.
/// if any tokens have been returned or pp-directives activated before this
/// #ifndef has been lexed.
///
-void Preprocessor::HandleIfdefDirective(LexerToken &Result, bool isIfndef,
+void Preprocessor::HandleIfdefDirective(Token &Result, bool isIfndef,
bool ReadAnyTokensBeforeDirective) {
++NumIf;
- LexerToken DirectiveTok = Result;
+ Token DirectiveTok = Result;
- LexerToken MacroNameTok;
+ Token MacroNameTok;
ReadMacroName(MacroNameTok);
// Error reading macro name? If so, diagnostic already issued.
/// HandleIfDirective - Implements the #if directive.
///
-void Preprocessor::HandleIfDirective(LexerToken &IfToken,
+void Preprocessor::HandleIfDirective(Token &IfToken,
bool ReadAnyTokensBeforeDirective) {
++NumIf;
/// HandleEndifDirective - Implements the #endif directive.
///
-void Preprocessor::HandleEndifDirective(LexerToken &EndifToken) {
+void Preprocessor::HandleEndifDirective(Token &EndifToken) {
++NumEndif;
// Check that this is the whole directive.
}
-void Preprocessor::HandleElseDirective(LexerToken &Result) {
+void Preprocessor::HandleElseDirective(Token &Result) {
++NumElse;
// #else directive in a non-skipping conditional... start skipping.
/*FoundElse*/true);
}
-void Preprocessor::HandleElifDirective(LexerToken &ElifToken) {
+void Preprocessor::HandleElifDirective(Token &ElifToken) {
++NumElse;
// #elif directive in a non-skipping conditional... start skipping.
ExprResult NumElements(false);
if (Tok.getKind() == tok::star) {
// Remember the '*' token, in case we have to un-get it.
- LexerToken StarTok = Tok;
+ Token StarTok = Tok;
ConsumeToken();
// Check that the ']' token is present to avoid incorrectly parsing
/// of part of an expression. For example, in "A[1]+B", we consumed "A" (which
/// is now in 'IdTok') and the current token is "[".
Parser::ExprResult Parser::
-ParseExpressionWithLeadingIdentifier(const LexerToken &IdTok) {
+ParseExpressionWithLeadingIdentifier(const Token &IdTok) {
// We know that 'IdTok' must correspond to this production:
// primary-expression: identifier
/// of part of an assignment-expression. For example, in "A[1]+B", we consumed
/// "A" (which is now in 'IdTok') and the current token is "[".
Parser::ExprResult Parser::
-ParseAssignmentExprWithLeadingIdentifier(const LexerToken &IdTok) {
+ParseAssignmentExprWithLeadingIdentifier(const Token &IdTok) {
// We know that 'IdTok' must correspond to this production:
// primary-expression: identifier
/// expression. For example, in "*(int*)P+B", we consumed "*" (which is
/// now in 'StarTok') and the current token is "(".
Parser::ExprResult Parser::
-ParseAssignmentExpressionWithLeadingStar(const LexerToken &StarTok) {
+ParseAssignmentExpressionWithLeadingStar(const Token &StarTok) {
// We know that 'StarTok' must correspond to this production:
// unary-expression: unary-operator cast-expression
// where 'unary-operator' is '*'.
return LHS;
// Consume the operator, saving the operator token for error reporting.
- LexerToken OpToken = Tok;
+ Token OpToken = Tok;
ConsumeToken();
// Special case handling for the ternary operator.
assert((Tok.getKind() == tok::kw_sizeof ||
Tok.getKind() == tok::kw___alignof) &&
"Not a sizeof/alignof expression!");
- LexerToken OpTok = Tok;
+ Token OpTok = Tok;
ConsumeToken();
// If the operand doesn't start with an '(', it must be an expression.
// String concat. Note that keywords like __func__ and __FUNCTION__ are not
// considered to be strings for concatenation purposes.
- llvm::SmallVector<LexerToken, 4> StringToks;
+ llvm::SmallVector<Token, 4> StringToks;
do {
StringToks.push_back(Tok);
// assignment-expression or if it is an old-style structure field
// designator.
// TODO: Check that this is the first designator.
- LexerToken Ident = Tok;
+ Token Ident = Tok;
ConsumeToken();
// If this is the gross GNU extension, handle it now.
assert(Tok.getKind() == tok::identifier && Tok.getIdentifierInfo() &&
"Not an identifier!");
- LexerToken IdentTok = Tok; // Save the whole token.
+ Token IdentTok = Tok; // Save the whole token.
ConsumeToken(); // eat the identifier.
// identifier ':' statement
class QualType;
class LangOptions;
class DeclaratorChunk;
- class LexerToken;
+ class Token;
class IntegerLiteral;
class ArrayType;
class LabelStmt;
bool HasTrailingLParen);
virtual ExprResult ParseSimplePrimaryExpr(SourceLocation Loc,
tok::TokenKind Kind);
- virtual ExprResult ParseNumericConstant(const LexerToken &);
- virtual ExprResult ParseCharacterConstant(const LexerToken &);
+ virtual ExprResult ParseNumericConstant(const Token &);
+ virtual ExprResult ParseCharacterConstant(const Token &);
virtual ExprResult ParseParenExpr(SourceLocation L, SourceLocation R,
ExprTy *Val);
/// ParseStringLiteral - The specified tokens were lexed as pasted string
/// fragments (e.g. "foo" "bar" L"baz").
- virtual ExprResult ParseStringLiteral(const LexerToken *Toks, unsigned NumToks);
+ virtual ExprResult ParseStringLiteral(const Token *Toks, unsigned NumToks);
// Binary/Unary Operators. 'Tok' is the token for the operator.
virtual ExprResult ParseUnaryOp(SourceLocation OpLoc, tok::TokenKind Op,
/// string.
///
Action::ExprResult
-Sema::ParseStringLiteral(const LexerToken *StringToks, unsigned NumStringToks) {
+Sema::ParseStringLiteral(const Token *StringToks, unsigned NumStringToks) {
assert(NumStringToks && "Must have at least one string!");
StringLiteralParser Literal(StringToks, NumStringToks, PP, Context.Target);
}
}
-Sema::ExprResult Sema::ParseCharacterConstant(const LexerToken &Tok) {
+Sema::ExprResult Sema::ParseCharacterConstant(const Token &Tok) {
llvm::SmallString<16> CharBuffer;
CharBuffer.resize(Tok.getLength());
const char *ThisTokBegin = &CharBuffer[0];
Tok.getLocation());
}
-Action::ExprResult Sema::ParseNumericConstant(const LexerToken &Tok) {
+Action::ExprResult Sema::ParseNumericConstant(const Token &Tok) {
// fast path for a single digit (which is quite common). A single digit
// cannot have a trigraph, escaped newline, radix prefix, or type suffix.
if (Tok.getLength() == 1) {
#ifndef LLVM_CLANG_LEXER_H
#define LLVM_CLANG_LEXER_H
-#include "clang/Lex/LexerToken.h"
+#include "clang/Lex/Token.h"
#include "clang/Lex/MultipleIncludeOpt.h"
#include "clang/Basic/LangOptions.h"
#include <string>
/// return the tok::eof token. Return true if an error occurred and
/// compilation should terminate, false if normal. This implicitly involves
/// the preprocessor.
- void Lex(LexerToken &Result) {
+ void Lex(Token &Result) {
// Start a new token.
Result.startToken();
// NOTE, any changes here should also change code after calls to
// Preprocessor::HandleDirective
if (IsAtStartOfLine) {
- Result.setFlag(LexerToken::StartOfLine);
+ Result.setFlag(Token::StartOfLine);
IsAtStartOfLine = false;
}
/// LexRawToken - Switch the lexer to raw mode, lex a token into Result and
/// switch it back. Return true if the 'next character to read' pointer
/// points and the end of the lexer buffer, false otherwise.
- bool LexRawToken(LexerToken &Result) {
+ bool LexRawToken(Token &Result) {
assert(!LexingRawMode && "Already in raw mode!");
LexingRawMode = true;
Lex(Result);
/// LexTokenInternal - Internal interface to lex a preprocessing token. Called
/// by Lex.
///
- void LexTokenInternal(LexerToken &Result);
+ void LexTokenInternal(Token &Result);
/// FormTokenWithChars - When we lex a token, we have identified a span
/// starting at BufferPtr, going to TokEnd that forms the token. This method
/// takes that range and assigns it to the token as its location and size. In
/// addition, since tokens cannot overlap, this also updates BufferPtr to be
/// TokEnd.
- void FormTokenWithChars(LexerToken &Result, const char *TokEnd) {
+ void FormTokenWithChars(Token &Result, const char *TokEnd) {
Result.setLocation(getSourceLocation(BufferPtr));
Result.setLength(TokEnd-BufferPtr);
BufferPtr = TokEnd;
/// advance over it, and return it. This is tricky in several cases. Here we
/// just handle the trivial case and fall-back to the non-inlined
/// getCharAndSizeSlow method to handle the hard case.
- inline char getAndAdvanceChar(const char *&Ptr, LexerToken &Tok) {
+ inline char getAndAdvanceChar(const char *&Ptr, Token &Tok) {
// If this is not a trigraph and not a UCN or escaped newline, return
// quickly.
if (isObviouslySimpleCharacter(Ptr[0])) return *Ptr++;
/// and added to a given token, check to see if there are diagnostics that
/// need to be emitted or flags that need to be set on the token. If so, do
/// it.
- const char *ConsumeChar(const char *Ptr, unsigned Size, LexerToken &Tok) {
+ const char *ConsumeChar(const char *Ptr, unsigned Size, Token &Tok) {
// Normal case, we consumed exactly one token. Just return it.
if (Size == 1)
return Ptr+Size;
/// getCharAndSizeSlow - Handle the slow/uncommon case of the getCharAndSize
/// method.
- char getCharAndSizeSlow(const char *Ptr, unsigned &Size, LexerToken *Tok = 0);
+ char getCharAndSizeSlow(const char *Ptr, unsigned &Size, Token *Tok = 0);
/// getCharAndSizeNoWarn - Like the getCharAndSize method, but does not ever
/// emit a warning.
// Other lexer functions.
// Helper functions to lex the remainder of a token of the specific type.
- void LexIdentifier (LexerToken &Result, const char *CurPtr);
- void LexNumericConstant (LexerToken &Result, const char *CurPtr);
- void LexStringLiteral (LexerToken &Result, const char *CurPtr,bool Wide);
- void LexAngledStringLiteral(LexerToken &Result, const char *CurPtr);
- void LexCharConstant (LexerToken &Result, const char *CurPtr);
- bool LexEndOfFile (LexerToken &Result, const char *CurPtr);
-
- void SkipWhitespace (LexerToken &Result, const char *CurPtr);
- bool SkipBCPLComment (LexerToken &Result, const char *CurPtr);
- bool SkipBlockComment (LexerToken &Result, const char *CurPtr);
- bool SaveBCPLComment (LexerToken &Result, const char *CurPtr);
+ void LexIdentifier (Token &Result, const char *CurPtr);
+ void LexNumericConstant (Token &Result, const char *CurPtr);
+ void LexStringLiteral (Token &Result, const char *CurPtr,bool Wide);
+ void LexAngledStringLiteral(Token &Result, const char *CurPtr);
+ void LexCharConstant (Token &Result, const char *CurPtr);
+ bool LexEndOfFile (Token &Result, const char *CurPtr);
+
+ void SkipWhitespace (Token &Result, const char *CurPtr);
+ bool SkipBCPLComment (Token &Result, const char *CurPtr);
+ bool SkipBlockComment (Token &Result, const char *CurPtr);
+ bool SaveBCPLComment (Token &Result, const char *CurPtr);
/// LexIncludeFilename - After the preprocessor has parsed a #include, lex and
/// (potentially) macro expand the filename. If the sequence parsed is not
/// lexically legal, emit a diagnostic and return a result EOM token.
- void LexIncludeFilename(LexerToken &Result);
+ void LexIncludeFilename(Token &Result);
};
class Diagnostic;
class Preprocessor;
-class LexerToken;
+class Token;
class SourceLocation;
class TargetInfo;
llvm::SmallString<512> ResultBuf;
char *ResultPtr; // cursor
public:
- StringLiteralParser(const LexerToken *StringToks, unsigned NumStringToks,
+ StringLiteralParser(const Token *StringToks, unsigned NumStringToks,
Preprocessor &PP, TargetInfo &T);
bool hadError;
bool AnyWide;
namespace clang {
class MacroInfo;
class Preprocessor;
- class LexerToken;
+ class Token;
/// MacroArgs - An instance of this class captures information about
/// the formal arguments specified to a function-like macro invocation.
/// PreExpArgTokens - Pre-expanded tokens for arguments that need them. Empty
/// if not yet computed. This includes the EOF marker at the end of the
/// stream.
- std::vector<std::vector<LexerToken> > PreExpArgTokens;
+ std::vector<std::vector<Token> > PreExpArgTokens;
/// StringifiedArgs - This contains arguments in 'stringified' form. If the
/// stringified form of an argument has not yet been computed, this is empty.
- std::vector<LexerToken> StringifiedArgs;
+ std::vector<Token> StringifiedArgs;
/// VarargsElided - True if this is a C99 style varargs macro invocation and
/// there was no argument specified for the "..." argument. If the argument
/// MacroArgs ctor function - Create a new MacroArgs object with the specified
/// macro and argument info.
static MacroArgs *create(const MacroInfo *MI,
- const LexerToken *UnexpArgTokens,
+ const Token *UnexpArgTokens,
unsigned NumArgTokens, bool VarargsElided);
/// destroy - Destroy and deallocate the memory for this object.
/// ArgNeedsPreexpansion - If we can prove that the argument won't be affected
/// by pre-expansion, return false. Otherwise, conservatively return true.
- bool ArgNeedsPreexpansion(const LexerToken *ArgTok) const;
+ bool ArgNeedsPreexpansion(const Token *ArgTok) const;
/// getUnexpArgument - Return a pointer to the first token of the unexpanded
/// token list for the specified formal.
///
- const LexerToken *getUnexpArgument(unsigned Arg) const;
+ const Token *getUnexpArgument(unsigned Arg) const;
/// getArgLength - Given a pointer to an expanded or unexpanded argument,
/// return the number of tokens, not counting the EOF, that make up the
/// argument.
- static unsigned getArgLength(const LexerToken *ArgPtr);
+ static unsigned getArgLength(const Token *ArgPtr);
/// getPreExpArgument - Return the pre-expanded form of the specified
/// argument.
- const std::vector<LexerToken> &
+ const std::vector<Token> &
getPreExpArgument(unsigned Arg, Preprocessor &PP);
/// getStringifiedArgument - Compute, cache, and return the specified argument
/// that has been 'stringified' as required by the # operator.
- const LexerToken &getStringifiedArgument(unsigned ArgNo, Preprocessor &PP);
+ const Token &getStringifiedArgument(unsigned ArgNo, Preprocessor &PP);
/// getNumArguments - Return the number of arguments passed into this macro
/// invocation.
/// MacroTokens - This is the pointer to an array of tokens that the macro is
/// defined to, with arguments expanded for function-like macros. If this is
/// a token stream, these are the tokens we are returning.
- const LexerToken *MacroTokens;
+ const Token *MacroTokens;
/// NumMacroTokens - This is the length of the MacroTokens array.
///
public:
/// Create a macro expander for the specified macro with the specified actual
/// arguments. Note that this ctor takes ownership of the ActualArgs pointer.
- MacroExpander(LexerToken &Tok, MacroArgs *ActualArgs, Preprocessor &pp)
+ MacroExpander(Token &Tok, MacroArgs *ActualArgs, Preprocessor &pp)
: Macro(0), ActualArgs(0), PP(pp) {
Init(Tok, ActualArgs);
}
/// Init - Initialize this macro expander to expand from the specified macro
/// with the specified argument information. Note that this ctor takes
/// ownership of the ActualArgs pointer.
- void Init(LexerToken &Tok, MacroArgs *ActualArgs);
+ void Init(Token &Tok, MacroArgs *ActualArgs);
/// Create a macro expander for the specified token stream. This does not
/// take ownership of the specified token vector.
- MacroExpander(const LexerToken *TokArray, unsigned NumToks, Preprocessor &pp)
+ MacroExpander(const Token *TokArray, unsigned NumToks, Preprocessor &pp)
: Macro(0), ActualArgs(0), PP(pp) {
Init(TokArray, NumToks);
}
/// Init - Initialize this macro expander with the specified token stream.
/// This does not take ownership of the specified token vector.
- void Init(const LexerToken *TokArray, unsigned NumToks);
+ void Init(const Token *TokArray, unsigned NumToks);
~MacroExpander() { destroy(); }
unsigned isNextTokenLParen() const;
/// Lex - Lex and return a token from this macro stream.
- void Lex(LexerToken &Tok);
+ void Lex(Token &Tok);
private:
void destroy();
/// operator. Read the ## and RHS, and paste the LHS/RHS together. If there
/// are is another ## after it, chomp it iteratively. Return the result as
/// Tok.
- void PasteTokens(LexerToken &Tok);
+ void PasteTokens(Token &Tok);
/// Expand the arguments of a function-like macro so that we can quickly
/// return preexpanded tokens from MacroTokens.
#ifndef LLVM_CLANG_MACROINFO_H
#define LLVM_CLANG_MACROINFO_H
-#include "clang/Lex/LexerToken.h"
+#include "clang/Lex/Token.h"
#include "llvm/ADT/SmallVector.h"
#include <vector>
#include <cassert>
/// ReplacementTokens - This is the list of tokens that the macro is defined
/// to.
- llvm::SmallVector<LexerToken, 8> ReplacementTokens;
+ llvm::SmallVector<Token, 8> ReplacementTokens;
/// IsFunctionLike - True if this macro is a function-like macro, false if it
/// is an object-like macro.
return ReplacementTokens.size();
}
- const LexerToken &getReplacementToken(unsigned Tok) const {
+ const Token &getReplacementToken(unsigned Tok) const {
assert(Tok < ReplacementTokens.size() && "Invalid token #");
return ReplacementTokens[Tok];
}
- typedef llvm::SmallVector<LexerToken, 8>::const_iterator tokens_iterator;
+ typedef llvm::SmallVector<Token, 8>::const_iterator tokens_iterator;
tokens_iterator tokens_begin() const { return ReplacementTokens.begin(); }
tokens_iterator tokens_end() const { return ReplacementTokens.end(); }
/// AddTokenToBody - Add the specified token to the replacement text for the
/// macro.
- void AddTokenToBody(const LexerToken &Tok) {
+ void AddTokenToBody(const Token &Tok) {
ReplacementTokens.push_back(Tok);
}
namespace clang {
class Preprocessor;
- class LexerToken;
+ class Token;
class IdentifierInfo;
class PragmaNamespace;
virtual ~PragmaHandler();
const IdentifierInfo *getName() const { return Name; }
- virtual void HandlePragma(Preprocessor &PP, LexerToken &FirstToken) = 0;
+ virtual void HandlePragma(Preprocessor &PP, Token &FirstToken) = 0;
/// getIfNamespace - If this is a namespace, return it. This is equivalent to
/// using a dynamic_cast, but doesn't require RTTI.
Handlers.push_back(Handler);
}
- virtual void HandlePragma(Preprocessor &PP, LexerToken &FirstToken);
+ virtual void HandlePragma(Preprocessor &PP, Token &FirstToken);
virtual PragmaNamespace *getIfNamespace() { return this; }
};
/// EnterMacro - Add a Macro to the top of the include stack and start lexing
/// tokens from it instead of the current buffer. Args specifies the
/// tokens input to a function-like macro.
- void EnterMacro(LexerToken &Identifier, MacroArgs *Args);
+ void EnterMacro(Token &Identifier, MacroArgs *Args);
/// EnterTokenStream - Add a "macro" context to the top of the include stack,
/// which will cause the lexer to start returning the specified tokens. Note
/// that these tokens will be re-macro-expanded when/if expansion is enabled.
/// This method assumes that the specified stream of tokens has a permanent
/// owner somewhere, so they do not need to be copied.
- void EnterTokenStream(const LexerToken *Toks, unsigned NumToks);
+ void EnterTokenStream(const Token *Toks, unsigned NumToks);
/// RemoveTopOfLexerStack - Pop the current lexer/macro exp off the top of the
/// lexer stack. This should only be used in situations where the current
/// Lex - To lex a token from the preprocessor, just pull a token from the
/// current lexer or macro object.
- void Lex(LexerToken &Result) {
+ void Lex(Token &Result) {
if (CurLexer)
CurLexer->Lex(Result);
else
/// LexNonComment - Lex a token. If it's a comment, keep lexing until we get
/// something not a comment. This is useful in -E -C mode where comments
/// would foul up preprocessor directive handling.
- void LexNonComment(LexerToken &Result) {
+ void LexNonComment(Token &Result) {
do
Lex(Result);
while (Result.getKind() == tok::comment);
/// LexUnexpandedToken - This is just like Lex, but this disables macro
/// expansion of identifier tokens.
- void LexUnexpandedToken(LexerToken &Result) {
+ void LexUnexpandedToken(Token &Result) {
// Disable macro expansion.
bool OldVal = DisableMacroExpansion;
DisableMacroExpansion = true;
}
/// Diag - Forwarding function for diagnostics. This emits a diagnostic at
- /// the specified LexerToken's location, translating the token's start
+ /// the specified Token's location, translating the token's start
/// position in the current buffer into a SourcePosition object for rendering.
void Diag(SourceLocation Loc, unsigned DiagID);
void Diag(SourceLocation Loc, unsigned DiagID, const std::string &Msg);
- void Diag(const LexerToken &Tok, unsigned DiagID) {
+ void Diag(const Token &Tok, unsigned DiagID) {
Diag(Tok.getLocation(), DiagID);
}
- void Diag(const LexerToken &Tok, unsigned DiagID, const std::string &Msg) {
+ void Diag(const Token &Tok, unsigned DiagID, const std::string &Msg) {
Diag(Tok.getLocation(), DiagID, Msg);
}
/// after trigraph expansion and escaped-newline folding. In particular, this
/// wants to get the true, uncanonicalized, spelling of things like digraphs
/// UCNs, etc.
- std::string getSpelling(const LexerToken &Tok) const;
+ std::string getSpelling(const Token &Tok) const;
/// getSpelling - This method is used to get the spelling of a token into a
/// preallocated buffer, instead of as an std::string. The caller is required
/// to point to a constant buffer with the data already in it (avoiding a
/// copy). The caller is not allowed to modify the returned buffer pointer
/// if an internal buffer is returned.
- unsigned getSpelling(const LexerToken &Tok, const char *&Buffer) const;
+ unsigned getSpelling(const Token &Tok, const char *&Buffer) const;
/// CreateString - Plop the specified string into a scratch buffer and return
/// DumpToken - Print the token to stderr, used for debugging.
///
- void DumpToken(const LexerToken &Tok, bool DumpFlags = false) const;
+ void DumpToken(const Token &Tok, bool DumpFlags = false) const;
void DumpMacro(const MacroInfo &MI) const;
/// AdvanceToTokenCharacter - Given a location that specifies the start of a
/// LookUpIdentifierInfo - Given a tok::identifier token, look up the
/// identifier information for the token and install it into the token.
- IdentifierInfo *LookUpIdentifierInfo(LexerToken &Identifier,
+ IdentifierInfo *LookUpIdentifierInfo(Token &Identifier,
const char *BufPtr = 0);
/// HandleIdentifier - This callback is invoked when the lexer reads an
/// identifier and has filled in the tokens IdentifierInfo member. This
/// callback potentially macro expands it or turns it into a named token (like
/// 'for').
- void HandleIdentifier(LexerToken &Identifier);
+ void HandleIdentifier(Token &Identifier);
/// HandleEndOfFile - This callback is invoked when the lexer hits the end of
/// the current file. This either returns the EOF token and returns true, or
/// pops a level off the include stack and returns false, at which point the
/// client should call lex again.
- bool HandleEndOfFile(LexerToken &Result, bool isEndOfMacro = false);
+ bool HandleEndOfFile(Token &Result, bool isEndOfMacro = false);
/// HandleEndOfMacro - This callback is invoked when the lexer hits the end of
/// the current macro line. It returns true if Result is filled in with a
/// token, or false if Lex should be called again.
- bool HandleEndOfMacro(LexerToken &Result);
+ bool HandleEndOfMacro(Token &Result);
/// HandleDirective - This callback is invoked when the lexer sees a # token
/// at the start of a line. This consumes the directive, modifies the
/// lexer/preprocessor state, and advances the lexer(s) so that the next token
/// read is the correct one.
- void HandleDirective(LexerToken &Result);
+ void HandleDirective(Token &Result);
/// CheckEndOfDirective - Ensure that the next token is a tok::eom token. If
/// not, emit a diagnostic and consume up until the eom.
/// ReadMacroName - Lex and validate a macro name, which occurs after a
/// #define or #undef. This emits a diagnostic, sets the token kind to eom,
/// and discards the rest of the macro line if the macro name is invalid.
- void ReadMacroName(LexerToken &MacroNameTok, char isDefineUndef = 0);
+ void ReadMacroName(Token &MacroNameTok, char isDefineUndef = 0);
/// ReadMacroDefinitionArgList - The ( starting an argument list of a macro
/// definition has just been read. Lex the rest of the arguments and the
/// HandleMacroExpandedIdentifier - If an identifier token is read that is to
/// be expanded as a macro, handle it and return the next token as 'Tok'. If
/// the macro should not be expanded return true, otherwise return false.
- bool HandleMacroExpandedIdentifier(LexerToken &Tok, MacroInfo *MI);
+ bool HandleMacroExpandedIdentifier(Token &Tok, MacroInfo *MI);
/// isNextPPTokenLParen - Determine whether the next preprocessor token to be
/// lexed is a '('. If so, consume the token and return true, if not, this
/// ReadFunctionLikeMacroArgs - After reading "MACRO(", this method is
/// invoked to read all of the formal arguments specified for the macro
/// invocation. This returns null on error.
- MacroArgs *ReadFunctionLikeMacroArgs(LexerToken &MacroName, MacroInfo *MI);
+ MacroArgs *ReadFunctionLikeMacroArgs(Token &MacroName, MacroInfo *MI);
/// ExpandBuiltinMacro - If an identifier token is read that is to be expanded
/// as a builtin macro, handle it and return the next token as 'Tok'.
- void ExpandBuiltinMacro(LexerToken &Tok);
+ void ExpandBuiltinMacro(Token &Tok);
/// Handle_Pragma - Read a _Pragma directive, slice it up, process it, then
/// return the first token after the directive. The _Pragma token has just
/// been read into 'Tok'.
- void Handle_Pragma(LexerToken &Tok);
+ void Handle_Pragma(Token &Tok);
/// EnterSourceFileWithLexer - Add a lexer to the top of the include stack and
/// caller is expected to provide a buffer that is large enough to hold the
/// spelling of the filename, but is also expected to handle the case when
/// this method decides to use a different buffer.
- bool GetIncludeFilenameSpelling(const LexerToken &FNTok,
+ bool GetIncludeFilenameSpelling(const Token &FNTok,
const char *&BufStart, const char *&BufEnd);
/// LookupFile - Given a "foo" or <foo> reference, look up the indicated file,
/// should side-effect the current preprocessor object so that the next call
/// to Lex() will return the appropriate token next.
- void HandleUserDiagnosticDirective(LexerToken &Tok, bool isWarning);
- void HandleIdentSCCSDirective(LexerToken &Tok);
+ void HandleUserDiagnosticDirective(Token &Tok, bool isWarning);
+ void HandleIdentSCCSDirective(Token &Tok);
// File inclusion.
- void HandleIncludeDirective(LexerToken &Tok,
+ void HandleIncludeDirective(Token &Tok,
const DirectoryLookup *LookupFrom = 0,
bool isImport = false);
- void HandleIncludeNextDirective(LexerToken &Tok);
- void HandleImportDirective(LexerToken &Tok);
+ void HandleIncludeNextDirective(Token &Tok);
+ void HandleImportDirective(Token &Tok);
// Macro handling.
- void HandleDefineDirective(LexerToken &Tok, bool isTargetSpecific);
- void HandleUndefDirective(LexerToken &Tok);
- void HandleDefineOtherTargetDirective(LexerToken &Tok);
- // HandleAssertDirective(LexerToken &Tok);
- // HandleUnassertDirective(LexerToken &Tok);
+ void HandleDefineDirective(Token &Tok, bool isTargetSpecific);
+ void HandleUndefDirective(Token &Tok);
+ void HandleDefineOtherTargetDirective(Token &Tok);
+ // HandleAssertDirective(Token &Tok);
+ // HandleUnassertDirective(Token &Tok);
// Conditional Inclusion.
- void HandleIfdefDirective(LexerToken &Tok, bool isIfndef,
+ void HandleIfdefDirective(Token &Tok, bool isIfndef,
bool ReadAnyTokensBeforeDirective);
- void HandleIfDirective(LexerToken &Tok, bool ReadAnyTokensBeforeDirective);
- void HandleEndifDirective(LexerToken &Tok);
- void HandleElseDirective(LexerToken &Tok);
- void HandleElifDirective(LexerToken &Tok);
+ void HandleIfDirective(Token &Tok, bool ReadAnyTokensBeforeDirective);
+ void HandleEndifDirective(Token &Tok);
+ void HandleElseDirective(Token &Tok);
+ void HandleElifDirective(Token &Tok);
// Pragmas.
void HandlePragmaDirective();
public:
- void HandlePragmaOnce(LexerToken &OnceTok);
- void HandlePragmaPoison(LexerToken &PoisonTok);
- void HandlePragmaSystemHeader(LexerToken &SysHeaderTok);
- void HandlePragmaDependency(LexerToken &DependencyTok);
+ void HandlePragmaOnce(Token &OnceTok);
+ void HandlePragmaPoison(Token &PoisonTok);
+ void HandlePragmaSystemHeader(Token &SysHeaderTok);
+ void HandlePragmaDependency(Token &DependencyTok);
};
} // end namespace clang
-//===--- LexerToken.h - Token interface -------------------------*- C++ -*-===//
+//===--- Token.h - Token interface ------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
//
//===----------------------------------------------------------------------===//
//
-// This file defines the LexerToken interface.
+// This file defines the Token interface.
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_LEXERTOKEN_H
-#define LLVM_CLANG_LEXERTOKEN_H
+#ifndef LLVM_CLANG_TOKEN_H
+#define LLVM_CLANG_TOKEN_H
#include "clang/Basic/TokenKinds.h"
#include "clang/Basic/SourceLocation.h"
class IdentifierInfo;
-/// LexerToken - This structure provides full information about a lexed token.
+/// Token - This structure provides full information about a lexed token.
/// It is not intended to be space efficient, it is intended to return as much
/// information as possible about each returned token. This is expected to be
/// compressed into a smaller form if memory footprint is important.
-class LexerToken {
+class Token {
/// The location and length of the token text itself.
SourceLocation Loc;
unsigned Length;
class Action;
// Lex.
class IdentifierInfo;
- class LexerToken;
+ class Token;
/// Action - As the parser reads the input file and recognizes the productions
/// of the grammar, it invokes methods on this class to turn the parsed input
tok::TokenKind Kind) {
return 0;
}
- virtual ExprResult ParseCharacterConstant(const LexerToken &) { return 0; }
- virtual ExprResult ParseNumericConstant(const LexerToken &) { return 0; }
+ virtual ExprResult ParseCharacterConstant(const Token &) { return 0; }
+ virtual ExprResult ParseNumericConstant(const Token &) { return 0; }
/// ParseStringLiteral - The specified tokens were lexed as pasted string
/// fragments (e.g. "foo" "bar" L"baz").
- virtual ExprResult ParseStringLiteral(const LexerToken *Toks, unsigned NumToks) {
+ virtual ExprResult ParseStringLiteral(const Token *Toks, unsigned NumToks) {
return 0;
}
/// Tok - The current token we are peeking head. All parsing methods assume
/// that this is valid.
- LexerToken Tok;
+ Token Tok;
unsigned short ParenCount, BracketCount, BraceCount;
void Diag(SourceLocation Loc, unsigned DiagID,
const std::string &Msg = std::string());
- void Diag(const LexerToken &Tok, unsigned DiagID,
+ void Diag(const Token &Tok, unsigned DiagID,
const std::string &M = std::string()) {
Diag(Tok.getLocation(), DiagID, M);
}
ExprResult ParseConstantExpression();
ExprResult ParseAssignmentExpression(); // Expr that doesn't include commas.
- ExprResult ParseExpressionWithLeadingIdentifier(const LexerToken &Tok);
- ExprResult ParseAssignmentExprWithLeadingIdentifier(const LexerToken &Tok);
- ExprResult ParseAssignmentExpressionWithLeadingStar(const LexerToken &Tok);
+ ExprResult ParseExpressionWithLeadingIdentifier(const Token &Tok);
+ ExprResult ParseAssignmentExprWithLeadingIdentifier(const Token &Tok);
+ ExprResult ParseAssignmentExpressionWithLeadingStar(const Token &Tok);
ExprResult ParseRHSOfBinaryExpression(ExprResult LHS, unsigned MinPrec);
ExprResult ParseCastExpression(bool isUnaryExpression);