/// ReadToEndOfLine - Read the rest of the current preprocessor line as an
/// uninterpreted string. This switches the lexer out of directive mode.
- std::string ReadToEndOfLine();
+ void ReadToEndOfLine(SmallVectorImpl<char> *Result = 0);
/// Diag - Forwarding function for diagnostics. This translate a source
/// ReadToEndOfLine - Read the rest of the current preprocessor line as an
/// uninterpreted string. This switches the lexer out of directive mode.
-std::string Lexer::ReadToEndOfLine() {
+void Lexer::ReadToEndOfLine(SmallVectorImpl<char> *Result) {
assert(ParsingPreprocessorDirective && ParsingFilename == false &&
"Must be in a preprocessing directive!");
- std::string Result;
Token Tmp;
// CurPtr - Cache BufferPtr in an automatic variable.
char Char = getAndAdvanceChar(CurPtr, Tmp);
switch (Char) {
default:
- Result += Char;
+ if (Result)
+ Result->push_back(Char);
break;
case 0: // Null.
// Found end of file?
if (isCodeCompletionPoint(CurPtr-1)) {
PP->CodeCompleteNaturalLanguage();
cutOffLexing();
- return Result;
+ return;
}
// Nope, normal character, continue.
- Result += Char;
+ if (Result)
+ Result->push_back(Char);
break;
}
// FALL THROUGH.
}
assert(Tmp.is(tok::eod) && "Unexpected token!");
- // Finally, we're done, return the string we found.
- return Result;
+ // Finally, we're done;
+ return;
}
}
}
// tokens. For example, this is allowed: "#warning ` 'foo". GCC does
// collapse multiple consequtive white space between tokens, but this isn't
// specified by the standard.
- std::string Message = CurLexer->ReadToEndOfLine();
+ SmallString<128> Message;
+ CurLexer->ReadToEndOfLine(&Message);
// Find the first non-whitespace character, so that we can make the
// diagnostic more succinct.
- StringRef Msg(Message);
- size_t i = Msg.find_first_not_of(' ');
- if (i < Msg.size())
- Msg = Msg.substr(i);
-
+ StringRef Msg = Message.str().ltrim(" ");
+
if (isWarning)
Diag(Tok, diag::pp_hash_warning) << Msg;
else