else
return PeekAhead(N+1);
}
+
+ /// EnterToken - Enters a token in the token stream to be lexed next. If
+ /// BackTrack() is called afterwards, the token will remain at the insertion
+ /// point.
+ void EnterToken(const Token &Tok) {
+ EnterCachingLexMode();
+ CachedTokens.insert(CachedTokens.begin()+CachedLexPos, Tok);
+ }
+
+ /// AnnotateCachedTokens - We notify the Preprocessor that if it is caching
+ /// tokens (because backtrack is enabled) it should replace the most recent
+ /// cached tokens with the given annotation token. This function has no effect
+ /// if backtracking is not enabled.
+ ///
+ /// Note that the use of this function is just for optimization; so that the
+ /// cached tokens doesn't get re-parsed and re-resolved after a backtrack is
+ /// invoked.
+ void AnnotateCachedTokens(const Token &Tok) {
+ assert(Tok.isAnnotationToken() && "Expected annotation token");
+ if (CachedLexPos != 0 && InCachingLexMode())
+ AnnotatePreviousCachedTokens(Tok);
+ }
/// Diag - Forwarding function for diagnostics. This emits a diagnostic at
/// the specified Token's location, translating the token's start
RemoveTopOfLexerStack();
}
const Token &PeekAhead(unsigned N);
+ void AnnotatePreviousCachedTokens(const Token &Tok);
//===--------------------------------------------------------------------===//
/// Handle*Directive - implement the various preprocessor directives. These
/// It is not intended to be space efficient, it is intended to return as much
/// information as possible about each returned token. This is expected to be
/// compressed into a smaller form if memory footprint is important.
+///
+/// The parser can create a special "annotation token" representing a stream of
+/// tokens that were parsed and semantically resolved, e.g.: "foo::MyClass<int>"
+/// can be represented by a single typename annotation token that carries
+/// information about the SourceRange of the tokens and the type object.
class Token {
- /// The location and length of the token text itself.
+ /// The location of the token.
SourceLocation Loc;
- unsigned Length;
+
+ union {
+ /// The end of the SourceRange of an annotation token.
+ unsigned AnnotEndLocID;
+
+ /// The length of the token text itself.
+ unsigned Length;
+ };
- /// IdentifierInfo - If this was an identifier, this points to the uniqued
- /// information about this identifier.
- IdentifierInfo *IdentInfo;
+ union {
+ /// IdentifierInfo - If this was an identifier, this points to the uniqued
+ /// information about this identifier.
+ IdentifierInfo *IdentInfo;
+
+ /// AnnotVal - Information specific to an annotation token.
+ void *AnnotVal;
+ };
/// Kind - The actual flavor of token this is.
///
bool is(tok::TokenKind K) const { return Kind == (unsigned) K; }
bool isNot(tok::TokenKind K) const { return Kind != (unsigned) K; }
+ bool isAnnotationToken() const {
+ return is(tok::annot_qualtypename) || is(tok::annot_cxxscope);
+ }
+
/// getLocation - Return a source location identifier for the specified
/// offset in the current file.
SourceLocation getLocation() const { return Loc; }
- unsigned getLength() const { return Length; }
+ unsigned getLength() const {
+ assert(!isAnnotationToken() && "Used Length on annotation token");
+ return Length;
+ }
void setLocation(SourceLocation L) { Loc = L; }
void setLength(unsigned Len) { Length = Len; }
+
+ SourceLocation getAnnotationEndLoc() const {
+ assert(isAnnotationToken() && "Used AnnotEndLocID on non-annotation token");
+ return SourceLocation::getFromRawEncoding(AnnotEndLocID);
+ }
+ void setAnnotationEndLoc(SourceLocation L) {
+ assert(isAnnotationToken() && "Used AnnotEndLocID on non-annotation token");
+ AnnotEndLocID = L.getRawEncoding();
+ }
+
+ /// getAnnotationRange - SourceRange of the group of tokens that this
+ /// annotation token represents.
+ SourceRange getAnnotationRange() const {
+ return SourceRange(getLocation(), getAnnotationEndLoc());
+ }
+ void setAnnotationRange(SourceRange R) {
+ setLocation(R.getBegin());
+ setAnnotationEndLoc(R.getEnd());
+ }
const char *getName() const {
return tok::getTokenName( (tok::TokenKind) Kind);
Loc = SourceLocation();
}
- IdentifierInfo *getIdentifierInfo() const { return IdentInfo; }
+ IdentifierInfo *getIdentifierInfo() const {
+ assert(!isAnnotationToken() && "Used IdentInfo on annotation token");
+ return IdentInfo;
+ }
void setIdentifierInfo(IdentifierInfo *II) {
IdentInfo = II;
}
+
+ void *getAnnotationValue() const {
+ assert(isAnnotationToken() && "Used AnnotVal on non-annotation token");
+ return AnnotVal;
+ }
+ void setAnnotationValue(void *val) {
+ assert(isAnnotationToken() && "Used AnnotVal on non-annotation token");
+ AnnotVal = val;
+ }
/// setFlag - Set the specified flag.
void setFlag(TokenFlags Flag) {
EnterCachingLexMode();
return CachedTokens.back();
}
+
+void Preprocessor::AnnotatePreviousCachedTokens(const Token &Tok) {
+ assert(Tok.isAnnotationToken() && "Expected annotation token");
+ assert(CachedLexPos != 0 && "Expected to have some cached tokens");
+ assert(CachedTokens[CachedLexPos-1].getLocation() == Tok.getAnnotationEndLoc()
+ && "The annotation should be until the most recent cached token");
+
+ // Start from the end of the cached tokens list and look for the token
+ // that is the beginning of the annotation token.
+ for (CachedTokensTy::size_type i = CachedLexPos; i != 0; --i) {
+ CachedTokensTy::iterator AnnotBegin = CachedTokens.begin() + i-1;
+ if (AnnotBegin->getLocation() == Tok.getLocation()) {
+ assert((BacktrackPositions.empty() || BacktrackPositions.back() < i) &&
+ "The backtrack pos points inside the annotated tokens!");
+ // Replace the cached tokens with the single annotation token.
+ CachedTokens.erase(AnnotBegin + 1, CachedTokens.begin() + CachedLexPos);
+ *AnnotBegin = Tok;
+ CachedLexPos = i;
+ return;
+ }
+ }
+
+ assert(0&&"Didn't find the first token represented by the annotation token!");
+}