PP, nonfunctional: Properly encapsulate a TokenStream.

This commit is contained in:
John Kessenich 2017-02-10 18:03:01 -07:00
parent b49bb2ca5c
commit 9a2733a978
3 changed files with 91 additions and 95 deletions

View File

@ -148,10 +148,10 @@ int TPpContext::CPPdefine(TPpToken* ppToken)
// record the definition of the macro
TSourceLoc defineLoc = ppToken->loc; // because ppToken is going to go to the next line before we report errors
while (token != '\n' && token != EndOfInput) {
RecordToken(mac.body, token, ppToken);
mac.body.putToken(token, ppToken);
token = scanToken(ppToken);
if (token != '\n' && ppToken->space)
RecordToken(mac.body, ' ', ppToken);
mac.body.putToken(' ', ppToken);
}
// check for duplicate definition
@ -166,15 +166,15 @@ int TPpContext::CPPdefine(TPpToken* ppToken)
else {
if (existing->args != mac.args)
parseContext.ppError(defineLoc, "Macro redefined; different argument names:", "#define", atomStrings.getString(defAtom));
RewindTokenStream(existing->body);
RewindTokenStream(mac.body);
existing->body.reset();
mac.body.reset();
int newToken;
do {
int oldToken;
TPpToken oldPpToken;
TPpToken newPpToken;
oldToken = ReadToken(existing->body, &oldPpToken);
newToken = ReadToken(mac.body, &newPpToken);
oldToken = existing->body.getToken(parseContext, &oldPpToken);
newToken = mac.body.getToken(parseContext, &newPpToken);
if (oldToken != newToken || oldPpToken != newPpToken) {
parseContext.ppError(defineLoc, "Macro redefined; different substitutions:", "#define", atomStrings.getString(defAtom));
break;
@ -988,7 +988,7 @@ TPpContext::TokenStream* TPpContext::PrescanMacroArg(TokenStream& arg, TPpToken*
token = tokenPaste(token, *ppToken);
if (token == PpAtomIdentifier && MacroExpand(ppToken, false, newLineOkay) != 0)
continue;
RecordToken(*expandedArg, token, ppToken);
expandedArg->putToken(token, ppToken);
}
if (token == EndOfInput) {
@ -1011,7 +1011,7 @@ int TPpContext::tMacroInput::scan(TPpToken* ppToken)
{
int token;
do {
token = pp->ReadToken(mac->body, ppToken);
token = mac->body.getToken(pp->parseContext, ppToken);
} while (token == ' '); // handle white space in macro
// Hash operators basically turn off a round of macro substitution
@ -1042,7 +1042,7 @@ int TPpContext::tMacroInput::scan(TPpToken* ppToken)
}
// see if are preceding a ##
if (peekMacPasting()) {
if (mac->body.peekUntokenizedPasting()) {
prepaste = true;
pasting = true;
}
@ -1069,31 +1069,6 @@ int TPpContext::tMacroInput::scan(TPpToken* ppToken)
return token;
}
// See if the next non-white-space token in the macro is ##
bool TPpContext::tMacroInput::peekMacPasting()
{
// don't return early, have to restore this
size_t savePos = mac->body.current;
// skip white-space
int subtoken;
do {
subtoken = pp->getSubtoken(mac->body);
} while (subtoken == ' ');
// check for ##
bool pasting = false;
if (subtoken == '#') {
subtoken = pp->getSubtoken(mac->body);
if (subtoken == '#')
pasting = true;
}
mac->body.current = savePos;
return pasting;
}
// return a textual zero, for scanning a macro that was never defined
int TPpContext::tZeroInput::scan(TPpToken* ppToken)
{
@ -1218,7 +1193,7 @@ int TPpContext::MacroExpand(TPpToken* ppToken, bool expandUndef, bool newLineOka
depth++;
if (token == ')')
depth--;
RecordToken(*in->args[arg], token, ppToken);
in->args[arg]->putToken(token, ppToken);
tokenRecorded = true;
}
if (token == ')') {
@ -1258,7 +1233,7 @@ int TPpContext::MacroExpand(TPpToken* ppToken, bool expandUndef, bool newLineOka
pushInput(in);
macro->busy = 1;
RewindTokenStream(macro->body);
macro->body.reset();
return 1;
}

View File

@ -220,8 +220,26 @@ public:
inputStack.pop_back();
}
struct TokenStream {
//
// From PpTokens.cpp
//
class TokenStream {
public:
TokenStream() : current(0) { }
void putToken(int token, TPpToken* ppToken);
int getToken(TParseContextBase&, TPpToken*);
bool atEnd() { return current >= data.size(); }
bool peekTokenizedPasting(bool lastTokenPastes);
bool peekUntokenizedPasting();
void reset() { current = 0; }
protected:
void putSubtoken(int);
int getSubtoken();
void ungetSubtoken();
TVector<unsigned char> data;
size_t current;
};
@ -306,14 +324,13 @@ protected:
virtual int getch() override { assert(0); return EndOfInput; }
virtual void ungetch() override { assert(0); }
bool peekPasting() override { return prepaste; }
bool endOfReplacementList() override { return mac->body.current >= mac->body.data.size(); }
bool endOfReplacementList() override { return mac->body.atEnd(); }
MacroSymbol *mac;
TVector<TokenStream*> args;
TVector<TokenStream*> expandedArgs;
protected:
bool peekMacPasting();
bool prepaste; // true if we are just before ##
bool postpaste; // true if we are right after ##
};
@ -375,22 +392,16 @@ protected:
//
// From PpTokens.cpp
//
void putSubtoken(TokenStream&, int fVal);
int getSubtoken(TokenStream&);
void ungetSubtoken(TokenStream&);
void RecordToken(TokenStream&, int token, TPpToken* ppToken);
void RewindTokenStream(TokenStream&);
int ReadToken(TokenStream&, TPpToken*);
void pushTokenStreamInput(TokenStream&, bool pasting = false);
void UngetToken(int token, TPpToken*);
class tTokenInput : public tInput {
public:
tTokenInput(TPpContext* pp, TokenStream* t, bool prepasting) : tInput(pp), tokens(t), lastTokenPastes(prepasting) { }
virtual int scan(TPpToken *) override;
virtual int scan(TPpToken *ppToken) override { return tokens->getToken(pp->parseContext, ppToken); }
virtual int getch() override { assert(0); return EndOfInput; }
virtual void ungetch() override { assert(0); }
virtual bool peekPasting() override;
virtual bool peekPasting() override { return tokens->peekTokenizedPasting(lastTokenPastes); }
protected:
TokenStream* tokens;
bool lastTokenPastes; // true if the last token in the input is to be pasted, rather than consumed as a token

View File

@ -96,45 +96,44 @@ NVIDIA HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
namespace glslang {
// push onto back of stream
void TPpContext::putSubtoken(TokenStream& stream, int subtoken)
void TPpContext::TokenStream::putSubtoken(int subtoken)
{
assert((subtoken & ~0xff) == 0);
stream.data.push_back(static_cast<unsigned char>(subtoken));
data.push_back(static_cast<unsigned char>(subtoken));
}
// get the next token in stream
int TPpContext::getSubtoken(TokenStream& stream)
int TPpContext::TokenStream::getSubtoken()
{
if (stream.current < stream.data.size())
return stream.data[stream.current++];
if (current < data.size())
return data[current++];
else
return EndOfInput;
}
// back up one position in the stream
void TPpContext::ungetSubtoken(TokenStream& stream)
void TPpContext::TokenStream::ungetSubtoken()
{
if (stream.current > 0)
--stream.current;
if (current > 0)
--current;
}
/*
* Add a token to the end of a list for later playback.
*/
void TPpContext::RecordToken(TokenStream& pTok, int token, TPpToken* ppToken)
// Add a complete token (including backing string) to the end of a list
// for later playback.
void TPpContext::TokenStream::putToken(int token, TPpToken* ppToken)
{
const char* s;
char* str = NULL;
putSubtoken(pTok, token);
putSubtoken(token);
switch (token) {
case PpAtomIdentifier:
case PpAtomConstString:
s = ppToken->name;
while (*s)
putSubtoken(pTok, *s++);
putSubtoken(pTok, 0);
putSubtoken(*s++);
putSubtoken(0);
break;
case PpAtomConstInt:
case PpAtomConstUint:
@ -147,44 +146,35 @@ void TPpContext::RecordToken(TokenStream& pTok, int token, TPpToken* ppToken)
#endif
str = ppToken->name;
while (*str) {
putSubtoken(pTok, *str);
putSubtoken(*str);
str++;
}
putSubtoken(pTok, 0);
putSubtoken(0);
break;
default:
break;
}
}
/*
* Reset a token stream in preparation for reading.
*/
void TPpContext::RewindTokenStream(TokenStream& pTok)
{
pTok.current = 0;
}
/*
* Read the next token from a token stream (not the source stream, but stream used to hold a tokenized macro).
*/
int TPpContext::ReadToken(TokenStream& pTok, TPpToken *ppToken)
// Read the next token from a token stream.
// (Not the source stream, but a stream used to hold a tokenized macro).
int TPpContext::TokenStream::getToken(TParseContextBase& parseContext, TPpToken *ppToken)
{
int len;
int ch;
int subtoken = getSubtoken(pTok);
int subtoken = getSubtoken();
ppToken->loc = parseContext.getCurrentLoc();
switch (subtoken) {
case '#':
// Check for ##, unless the current # is the last character
if (pTok.current < pTok.data.size()) {
if (getSubtoken(pTok) == '#') {
if (current < data.size()) {
if (getSubtoken() == '#') {
parseContext.requireProfile(ppToken->loc, ~EEsProfile, "token pasting (##)");
parseContext.profileRequires(ppToken->loc, ~EEsProfile, 130, 0, "token pasting (##)");
subtoken = PpAtomPaste;
} else
ungetSubtoken(pTok);
ungetSubtoken();
}
break;
case PpAtomConstString:
@ -199,12 +189,12 @@ int TPpContext::ReadToken(TokenStream& pTok, TPpToken *ppToken)
case PpAtomConstInt64:
case PpAtomConstUint64:
len = 0;
ch = getSubtoken(pTok);
ch = getSubtoken();
while (ch != 0 && ch != EndOfInput) {
if (len < MaxTokenLength) {
ppToken->name[len] = (char)ch;
len++;
ch = getSubtoken(pTok);
ch = getSubtoken();
} else {
parseContext.error(ppToken->loc, "token too long", "", "");
break;
@ -266,27 +256,22 @@ int TPpContext::ReadToken(TokenStream& pTok, TPpToken *ppToken)
return subtoken;
}
int TPpContext::tTokenInput::scan(TPpToken* ppToken)
{
return pp->ReadToken(*tokens, ppToken);
}
// We are pasting if
// 1. we are preceding a pasting operator within this stream
// or
// 2. the entire macro is preceding a pasting operator (lastTokenPastes)
// and we are also on the last token
bool TPpContext::tTokenInput::peekPasting()
bool TPpContext::TokenStream::peekTokenizedPasting(bool lastTokenPastes)
{
// 1. preceding ##?
size_t savePos = tokens->current;
size_t savePos = current;
int subtoken;
// skip white space
do {
subtoken = pp->getSubtoken(*tokens);
subtoken = getSubtoken();
} while (subtoken == ' ');
tokens->current = savePos;
current = savePos;
if (subtoken == PpAtomPaste)
return true;
@ -297,10 +282,10 @@ bool TPpContext::tTokenInput::peekPasting()
// Getting here means the last token will be pasted, after this
// Are we at the last non-whitespace token?
savePos = tokens->current;
savePos = current;
bool moreTokens = false;
do {
subtoken = pp->getSubtoken(*tokens);
subtoken = getSubtoken();
if (subtoken == EndOfInput)
break;
if (subtoken != ' ') {
@ -308,15 +293,40 @@ bool TPpContext::tTokenInput::peekPasting()
break;
}
} while (true);
tokens->current = savePos;
current = savePos;
return !moreTokens;
}
// See if the next non-white-space tokens are two consecutive #
bool TPpContext::TokenStream::peekUntokenizedPasting()
{
// don't return early, have to restore this
size_t savePos = current;
// skip white-space
int subtoken;
do {
subtoken = getSubtoken();
} while (subtoken == ' ');
// check for ##
bool pasting = false;
if (subtoken == '#') {
subtoken = getSubtoken();
if (subtoken == '#')
pasting = true;
}
current = savePos;
return pasting;
}
void TPpContext::pushTokenStreamInput(TokenStream& ts, bool prepasting)
{
pushInput(new tTokenInput(this, &ts, prepasting));
RewindTokenStream(ts);
ts.reset();
}
int TPpContext::tUngotTokenInput::scan(TPpToken* ppToken)