// this, we set up a fake TokenLexer to lex from the unexpanded argument
// list. With this installed, we lex expanded tokens until we hit the EOF
// token at the end of the unexp list.
- PP.EnterTokenStream(AT, NumToks);
+ PP.EnterTokenStream(AT, NumToks, false /*disable expand*/,
+ false /*owns tokens*/);
// Lex all of the macro-expanded tokens into Result.
do {
//===----------------------------------------------------------------------===//
-// Source File Location Methods.
+// Miscellaneous Methods.
//===----------------------------------------------------------------------===//
/// isInPrimaryFile - Return true if we're in the top-level file, not in a
return 0;
}
+/// LookAhead - This peeks ahead N tokens and returns that token without
+/// consuming any tokens. LookAhead(0) returns 'Tok', LookAhead(1) returns
+/// the token after Tok, etc.
+///
+/// NOTE: is a relatively expensive method, so it should not be used in common
+/// code paths if possible!
+///
+Token Preprocessor::LookAhead(unsigned N) {
+ Token *LookaheadTokens = new Token[N];
+
+ // Read N+1 tokens into LookaheadTokens. After this loop, Tok is the token
+ // to return.
+ Token Tok;
+ unsigned NumTokens = 0;
+ for (; N != ~0U; --N, ++NumTokens) {
+ Lex(Tok);
+ LookaheadTokens[NumTokens] = Tok;
+
+ // If we got to EOF, don't lex past it. This will cause LookAhead to return
+ // the EOF token.
+ if (Tok.is(tok::eof))
+ break;
+ }
+
+ // Okay, at this point, we have the token we want to return in Tok. However,
+ // we read it and a bunch of other stuff (in LookaheadTokens) that we must
+ // allow subsequent calls to 'Lex' to return. To do this, we push a new token
+ // lexer onto the lexer stack with the tokens we read here. This passes
+ // ownership of LookaheadTokens to EnterTokenStream.
+ //
+ // Note that we disable macro expansion of the tokens from this buffer, since
+ // any macros have already been expanded, and the internal preprocessor state
+ // may already read past new macros. Consider something like LookAhead(1) on
+ // X
+ // #define X 14
+ // Y
+ // The lookahead call should return 'Y', and the next Lex call should return
+ // 'X' even though X -> 14 has already been entered as a macro.
+ //
+ EnterTokenStream(LookaheadTokens, NumTokens, true /*DisableExpansion*/,
+ true /*OwnsTokens*/);
+ return Tok;
+}
+
+
+//===----------------------------------------------------------------------===//
+// Methods for Entering and Callbacks for leaving various contexts
+//===----------------------------------------------------------------------===//
/// EnterSourceFile - Add a source file to the top of the include stack and
/// start lexing tokens from it instead of the current buffer. Return true
}
/// EnterTokenStream - Add a "macro" context to the top of the include stack,
-/// which will cause the lexer to start returning the specified tokens. Note
-/// that these tokens will be re-macro-expanded when/if expansion is enabled.
-/// This method assumes that the specified stream of tokens has a permanent
-/// owner somewhere, so they do not need to be copied.
-void Preprocessor::EnterTokenStream(const Token *Toks, unsigned NumToks) {
+/// which will cause the lexer to start returning the specified tokens.
+///
+/// If DisableMacroExpansion is true, tokens lexed from the token stream will
+/// not be subject to further macro expansion. Otherwise, these tokens will
+/// be re-macro-expanded when/if expansion is enabled.
+///
+/// If OwnsTokens is false, this method assumes that the specified stream of
+/// tokens has a permanent owner somewhere, so they do not need to be copied.
+/// If it is true, it assumes the array of tokens is allocated with new[] and
+/// must be freed.
+///
+void Preprocessor::EnterTokenStream(const Token *Toks, unsigned NumToks,
+ bool DisableMacroExpansion,
+ bool OwnsTokens) {
// Save our current state.
IncludeMacroStack.push_back(IncludeStackInfo(CurLexer, CurDirLookup,
CurTokenLexer));
// Create a macro expander to expand from the specified token stream.
if (NumCachedTokenLexers == 0) {
- CurTokenLexer = new TokenLexer(Toks, NumToks, *this);
+ CurTokenLexer = new TokenLexer(Toks, NumToks, DisableMacroExpansion,
+ OwnsTokens, *this);
} else {
CurTokenLexer = TokenLexerCache[--NumCachedTokenLexers];
- CurTokenLexer->Init(Toks, NumToks);
+ CurTokenLexer->Init(Toks, NumToks, DisableMacroExpansion, OwnsTokens);
}
}
HasLeadingSpace = Tok.hasLeadingSpace();
Tokens = &*Macro->tokens_begin();
OwnsTokens = false;
+ DisableMacroExpansion = false;
NumTokens = Macro->tokens_end()-Macro->tokens_begin();
// If this is a function-like macro, expand the arguments and change
/// Create a TokenLexer for the specified token stream. This does not
/// take ownership of the specified token vector.
-void TokenLexer::Init(const Token *TokArray, unsigned NumToks) {
+void TokenLexer::Init(const Token *TokArray, unsigned NumToks,
+ bool disableMacroExpansion, bool ownsTokens) {
// If the client is reusing a TokenLexer, make sure to free any memory
// associated with it.
destroy();
Macro = 0;
ActualArgs = 0;
Tokens = TokArray;
- OwnsTokens = false;
+ OwnsTokens = ownsTokens;
+ DisableMacroExpansion = disableMacroExpansion;
NumTokens = NumToks;
CurToken = 0;
InstantiateLoc = SourceLocation();
}
// Handle recursive expansion!
- if (Tok.getIdentifierInfo())
+ if (Tok.getIdentifierInfo() && !DisableMacroExpansion)
return PP.HandleIdentifier(Tok);
// Otherwise, return a normal token.
/// parameter-declaration
/// '...' [OBJC2]
///
-Parser::StmtResult Parser::ParseObjCTryStmt(SourceLocation atLoc,
- bool &processAtKeyword) {
+Parser::StmtResult Parser::ParseObjCTryStmt(SourceLocation atLoc) {
bool catch_or_finally_seen = false;
- processAtKeyword = false;
ConsumeToken(); // consume try
if (Tok.isNot(tok::l_brace)) {
StmtResult TryBody = ParseCompoundStatementBody();
if (TryBody.isInvalid)
TryBody = Actions.ActOnNullStmt(Tok.getLocation());
+
while (Tok.is(tok::at)) {
+ // At this point, we need to lookahead to determine if this @ is the start
+ // of an @catch or @finally. We don't want to consume the @ token if this
+ // is an @try or @encode or something else.
+ Token AfterAt = GetLookAheadToken(1);
+ if (!AfterAt.isObjCAtKeyword(tok::objc_catch) &&
+ !AfterAt.isObjCAtKeyword(tok::objc_finally))
+ break;
+
SourceLocation AtCatchFinallyLoc = ConsumeToken();
if (Tok.isObjCAtKeyword(tok::objc_catch)) {
StmtTy *FirstPart = 0;
return true;
}
catch_or_finally_seen = true;
- } else if (Tok.isObjCAtKeyword(tok::objc_finally)) {
+ } else {
+ assert(Tok.isObjCAtKeyword(tok::objc_finally) && "Lookahead confused?");
ConsumeToken(); // consume finally
StmtResult FinallyBody(true);
FinallyBody.Val);
catch_or_finally_seen = true;
break;
- } else {
- processAtKeyword = true;
- break;
}
}
if (!catch_or_finally_seen) {
Parser::StmtResult Parser::ParseObjCAtStatement(SourceLocation AtLoc) {
if (Tok.isObjCAtKeyword(tok::objc_try)) {
- bool parsedAtSign;
-
- StmtResult Res = ParseObjCTryStmt(AtLoc, parsedAtSign);
- // FIXME: This hack results in a dropped AST node. To correctly implement
- // the hack, parseAtSign would need to bubble up to
- // ParseCompoundStatement(). This would involve adding an argument to this
- // routine and ParseStatementOrDeclaration(). Changing the parser in this
- // fashion to solve such a conceptually simple problem is undesirable.
- // Rework this clause once 2-token lookahead is implemented.
- if (!Res.isInvalid && parsedAtSign)
- return ParseObjCAtStatement(AtLoc);
- return Res;
+ return ParseObjCTryStmt(AtLoc);
} else if (Tok.isObjCAtKeyword(tok::objc_throw))
return ParseObjCThrowStmt(AtLoc);
else if (Tok.isObjCAtKeyword(tok::objc_synchronized))
void EnterMacro(Token &Identifier, MacroArgs *Args);
/// EnterTokenStream - Add a "macro" context to the top of the include stack,
- /// which will cause the lexer to start returning the specified tokens. Note
- /// that these tokens will be re-macro-expanded when/if expansion is enabled.
- /// This method assumes that the specified stream of tokens has a permanent
- /// owner somewhere, so they do not need to be copied.
- void EnterTokenStream(const Token *Toks, unsigned NumToks);
+ /// which will cause the lexer to start returning the specified tokens.
+ ///
+ /// If DisableMacroExpansion is true, tokens lexed from the token stream will
+ /// not be subject to further macro expansion. Otherwise, these tokens will
+ /// be re-macro-expanded when/if expansion is enabled.
+ ///
+ /// If OwnsTokens is false, this method assumes that the specified stream of
+ /// tokens has a permanent owner somewhere, so they do not need to be copied.
+ /// If it is true, it assumes the array of tokens is allocated with new[] and
+ /// must be freed.
+ ///
+ void EnterTokenStream(const Token *Toks, unsigned NumToks,
+ bool DisableMacroExpansion, bool OwnsTokens);
/// RemoveTopOfLexerStack - Pop the current lexer/macro exp off the top of the
/// lexer stack. This should only be used in situations where the current
DisableMacroExpansion = OldVal;
}
+ /// LookAhead - This peeks ahead N tokens and returns that token without
+ /// consuming any tokens. LookAhead(0) returns the next token that would be
+ /// returned by Lex(), LookAhead(1) returns the token after it, etc. This
+ /// returns normal tokens after phase 5. As such, it is equivalent to using
+ /// 'Lex', not 'LexUnexpandedToken'.
+ ///
+ /// NOTE: is a relatively expensive method, so it should not be used in common
+ /// code paths if possible!
+ ///
+ Token LookAhead(unsigned N);
+
/// Diag - Forwarding function for diagnostics. This emits a diagnostic at
/// the specified Token's location, translating the token's start
/// position in the current buffer into a SourcePosition object for rendering.
/// definition, we don't make a copy of it.
bool OwnsTokens : 1;
+ /// DisableMacroExpansion - This is true when tokens lexed from the TokenLexer
+ /// should not be subject to further macro expansion.
+ bool DisableMacroExpansion : 1;
+
TokenLexer(const TokenLexer&); // DO NOT IMPLEMENT
void operator=(const TokenLexer&); // DO NOT IMPLEMENT
public:
/// Create a TokenLexer for the specified token stream. This does not
/// take ownership of the specified token vector.
- TokenLexer(const Token *TokArray, unsigned NumToks, Preprocessor &pp)
+ TokenLexer(const Token *TokArray, unsigned NumToks, bool DisableExpansion,
+ bool OwnsTokens, Preprocessor &pp)
: Macro(0), ActualArgs(0), PP(pp), OwnsTokens(false) {
- Init(TokArray, NumToks);
+ Init(TokArray, NumToks, DisableExpansion, OwnsTokens);
}
/// Init - Initialize this TokenLexer with the specified token stream.
/// This does not take ownership of the specified token vector.
- void Init(const Token *TokArray, unsigned NumToks);
+ ///
+ /// DisableExpansion is true when macro expansion of tokens lexed from this
+ /// stream should be disabled.
+ void Init(const Token *TokArray, unsigned NumToks,
+ bool DisableMacroExpansion, bool OwnsTokens);
~TokenLexer() { destroy(); }
return L;
}
+ /// GetLookAheadToken - This peeks ahead N tokens and returns that token
+ /// without consuming any tokens. LookAhead(0) returns 'Tok', LookAhead(1)
+ /// returns the token after Tok, etc.
+ ///
+ /// Note that this differs from the Preprocessor's LookAhead method, because
+ /// the Parser always has one token lexed that the preprocessor doesn't.
+ ///
+ /// NOTE: is a relatively expensive method, so it should not be used in common
+ /// code paths if possible!
+ ///
+ Token GetLookAheadToken(unsigned N) {
+ if (N == 0 || Tok.is(tok::eof)) return Tok;
+ return PP.LookAhead(N-1);
+ }
+
+
/// MatchRHSPunctuation - For punctuation with a LHS and RHS (e.g. '['/']'),
/// this helper function matches and consumes the specified RHS token if
/// present. If not present, it emits the specified diagnostic indicating
StmtResult ParseAsmStatement(bool &msAsm);
StmtResult FuzzyParseMicrosoftAsmStatement();
StmtResult ParseObjCAtStatement(SourceLocation atLoc);
- StmtResult ParseObjCTryStmt(SourceLocation atLoc, bool &processAtKeyword);
+ StmtResult ParseObjCTryStmt(SourceLocation atLoc);
StmtResult ParseObjCThrowStmt(SourceLocation atLoc);
StmtResult ParseObjCSynchronizedStmt(SourceLocation atLoc);
bool ParseAsmOperandsOpt(llvm::SmallVectorImpl<std::string> &Names,
void * proc();
+@interface NSConstantString
+@end
+
@interface Frob
@end
void bar()
{
@try {}// expected-error {{@try statment without a @catch and @finally clause}}
- @"s" {} // expected-warning {{result unused}} expected-error {{expected ';'}}
+ @"s"; // expected-warning {{result unused}}
}
void baz()
{
@try {}// expected-error {{@try statment without a @catch and @finally clause}}
- @try {}// expected-error {{undeclared identifier}}
+ @try {}
@finally {}
}