PP, nonfunctional: Properly encapsulate a TokenStream.

This commit is contained in:
John Kessenich 2017-02-10 18:03:01 -07:00
parent b49bb2ca5c
commit 9a2733a978
3 changed files with 91 additions and 95 deletions

View File

@ -148,10 +148,10 @@ int TPpContext::CPPdefine(TPpToken* ppToken)
// record the definition of the macro // record the definition of the macro
TSourceLoc defineLoc = ppToken->loc; // because ppToken is going to go to the next line before we report errors TSourceLoc defineLoc = ppToken->loc; // because ppToken is going to go to the next line before we report errors
while (token != '\n' && token != EndOfInput) { while (token != '\n' && token != EndOfInput) {
RecordToken(mac.body, token, ppToken); mac.body.putToken(token, ppToken);
token = scanToken(ppToken); token = scanToken(ppToken);
if (token != '\n' && ppToken->space) if (token != '\n' && ppToken->space)
RecordToken(mac.body, ' ', ppToken); mac.body.putToken(' ', ppToken);
} }
// check for duplicate definition // check for duplicate definition
@ -166,15 +166,15 @@ int TPpContext::CPPdefine(TPpToken* ppToken)
else { else {
if (existing->args != mac.args) if (existing->args != mac.args)
parseContext.ppError(defineLoc, "Macro redefined; different argument names:", "#define", atomStrings.getString(defAtom)); parseContext.ppError(defineLoc, "Macro redefined; different argument names:", "#define", atomStrings.getString(defAtom));
RewindTokenStream(existing->body); existing->body.reset();
RewindTokenStream(mac.body); mac.body.reset();
int newToken; int newToken;
do { do {
int oldToken; int oldToken;
TPpToken oldPpToken; TPpToken oldPpToken;
TPpToken newPpToken; TPpToken newPpToken;
oldToken = ReadToken(existing->body, &oldPpToken); oldToken = existing->body.getToken(parseContext, &oldPpToken);
newToken = ReadToken(mac.body, &newPpToken); newToken = mac.body.getToken(parseContext, &newPpToken);
if (oldToken != newToken || oldPpToken != newPpToken) { if (oldToken != newToken || oldPpToken != newPpToken) {
parseContext.ppError(defineLoc, "Macro redefined; different substitutions:", "#define", atomStrings.getString(defAtom)); parseContext.ppError(defineLoc, "Macro redefined; different substitutions:", "#define", atomStrings.getString(defAtom));
break; break;
@ -988,7 +988,7 @@ TPpContext::TokenStream* TPpContext::PrescanMacroArg(TokenStream& arg, TPpToken*
token = tokenPaste(token, *ppToken); token = tokenPaste(token, *ppToken);
if (token == PpAtomIdentifier && MacroExpand(ppToken, false, newLineOkay) != 0) if (token == PpAtomIdentifier && MacroExpand(ppToken, false, newLineOkay) != 0)
continue; continue;
RecordToken(*expandedArg, token, ppToken); expandedArg->putToken(token, ppToken);
} }
if (token == EndOfInput) { if (token == EndOfInput) {
@ -1011,7 +1011,7 @@ int TPpContext::tMacroInput::scan(TPpToken* ppToken)
{ {
int token; int token;
do { do {
token = pp->ReadToken(mac->body, ppToken); token = mac->body.getToken(pp->parseContext, ppToken);
} while (token == ' '); // handle white space in macro } while (token == ' '); // handle white space in macro
// Hash operators basically turn off a round of macro substitution // Hash operators basically turn off a round of macro substitution
@ -1042,7 +1042,7 @@ int TPpContext::tMacroInput::scan(TPpToken* ppToken)
} }
// see if are preceding a ## // see if are preceding a ##
if (peekMacPasting()) { if (mac->body.peekUntokenizedPasting()) {
prepaste = true; prepaste = true;
pasting = true; pasting = true;
} }
@ -1069,31 +1069,6 @@ int TPpContext::tMacroInput::scan(TPpToken* ppToken)
return token; return token;
} }
// See if the next non-white-space token in the macro is ##
bool TPpContext::tMacroInput::peekMacPasting()
{
// don't return early, have to restore this
size_t savePos = mac->body.current;
// skip white-space
int subtoken;
do {
subtoken = pp->getSubtoken(mac->body);
} while (subtoken == ' ');
// check for ##
bool pasting = false;
if (subtoken == '#') {
subtoken = pp->getSubtoken(mac->body);
if (subtoken == '#')
pasting = true;
}
mac->body.current = savePos;
return pasting;
}
// return a textual zero, for scanning a macro that was never defined // return a textual zero, for scanning a macro that was never defined
int TPpContext::tZeroInput::scan(TPpToken* ppToken) int TPpContext::tZeroInput::scan(TPpToken* ppToken)
{ {
@ -1218,7 +1193,7 @@ int TPpContext::MacroExpand(TPpToken* ppToken, bool expandUndef, bool newLineOka
depth++; depth++;
if (token == ')') if (token == ')')
depth--; depth--;
RecordToken(*in->args[arg], token, ppToken); in->args[arg]->putToken(token, ppToken);
tokenRecorded = true; tokenRecorded = true;
} }
if (token == ')') { if (token == ')') {
@ -1258,7 +1233,7 @@ int TPpContext::MacroExpand(TPpToken* ppToken, bool expandUndef, bool newLineOka
pushInput(in); pushInput(in);
macro->busy = 1; macro->busy = 1;
RewindTokenStream(macro->body); macro->body.reset();
return 1; return 1;
} }

View File

@ -220,8 +220,26 @@ public:
inputStack.pop_back(); inputStack.pop_back();
} }
struct TokenStream { //
// From PpTokens.cpp
//
class TokenStream {
public:
TokenStream() : current(0) { } TokenStream() : current(0) { }
void putToken(int token, TPpToken* ppToken);
int getToken(TParseContextBase&, TPpToken*);
bool atEnd() { return current >= data.size(); }
bool peekTokenizedPasting(bool lastTokenPastes);
bool peekUntokenizedPasting();
void reset() { current = 0; }
protected:
void putSubtoken(int);
int getSubtoken();
void ungetSubtoken();
TVector<unsigned char> data; TVector<unsigned char> data;
size_t current; size_t current;
}; };
@ -306,14 +324,13 @@ protected:
virtual int getch() override { assert(0); return EndOfInput; } virtual int getch() override { assert(0); return EndOfInput; }
virtual void ungetch() override { assert(0); } virtual void ungetch() override { assert(0); }
bool peekPasting() override { return prepaste; } bool peekPasting() override { return prepaste; }
bool endOfReplacementList() override { return mac->body.current >= mac->body.data.size(); } bool endOfReplacementList() override { return mac->body.atEnd(); }
MacroSymbol *mac; MacroSymbol *mac;
TVector<TokenStream*> args; TVector<TokenStream*> args;
TVector<TokenStream*> expandedArgs; TVector<TokenStream*> expandedArgs;
protected: protected:
bool peekMacPasting();
bool prepaste; // true if we are just before ## bool prepaste; // true if we are just before ##
bool postpaste; // true if we are right after ## bool postpaste; // true if we are right after ##
}; };
@ -375,22 +392,16 @@ protected:
// //
// From PpTokens.cpp // From PpTokens.cpp
// //
void putSubtoken(TokenStream&, int fVal);
int getSubtoken(TokenStream&);
void ungetSubtoken(TokenStream&);
void RecordToken(TokenStream&, int token, TPpToken* ppToken);
void RewindTokenStream(TokenStream&);
int ReadToken(TokenStream&, TPpToken*);
void pushTokenStreamInput(TokenStream&, bool pasting = false); void pushTokenStreamInput(TokenStream&, bool pasting = false);
void UngetToken(int token, TPpToken*); void UngetToken(int token, TPpToken*);
class tTokenInput : public tInput { class tTokenInput : public tInput {
public: public:
tTokenInput(TPpContext* pp, TokenStream* t, bool prepasting) : tInput(pp), tokens(t), lastTokenPastes(prepasting) { } tTokenInput(TPpContext* pp, TokenStream* t, bool prepasting) : tInput(pp), tokens(t), lastTokenPastes(prepasting) { }
virtual int scan(TPpToken *) override; virtual int scan(TPpToken *ppToken) override { return tokens->getToken(pp->parseContext, ppToken); }
virtual int getch() override { assert(0); return EndOfInput; } virtual int getch() override { assert(0); return EndOfInput; }
virtual void ungetch() override { assert(0); } virtual void ungetch() override { assert(0); }
virtual bool peekPasting() override; virtual bool peekPasting() override { return tokens->peekTokenizedPasting(lastTokenPastes); }
protected: protected:
TokenStream* tokens; TokenStream* tokens;
bool lastTokenPastes; // true if the last token in the input is to be pasted, rather than consumed as a token bool lastTokenPastes; // true if the last token in the input is to be pasted, rather than consumed as a token

View File

@ -96,45 +96,44 @@ NVIDIA HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
namespace glslang { namespace glslang {
// push onto back of stream // push onto back of stream
void TPpContext::putSubtoken(TokenStream& stream, int subtoken) void TPpContext::TokenStream::putSubtoken(int subtoken)
{ {
assert((subtoken & ~0xff) == 0); assert((subtoken & ~0xff) == 0);
stream.data.push_back(static_cast<unsigned char>(subtoken)); data.push_back(static_cast<unsigned char>(subtoken));
} }
// get the next token in stream // get the next token in stream
int TPpContext::getSubtoken(TokenStream& stream) int TPpContext::TokenStream::getSubtoken()
{ {
if (stream.current < stream.data.size()) if (current < data.size())
return stream.data[stream.current++]; return data[current++];
else else
return EndOfInput; return EndOfInput;
} }
// back up one position in the stream // back up one position in the stream
void TPpContext::ungetSubtoken(TokenStream& stream) void TPpContext::TokenStream::ungetSubtoken()
{ {
if (stream.current > 0) if (current > 0)
--stream.current; --current;
} }
/* // Add a complete token (including backing string) to the end of a list
* Add a token to the end of a list for later playback. // for later playback.
*/ void TPpContext::TokenStream::putToken(int token, TPpToken* ppToken)
void TPpContext::RecordToken(TokenStream& pTok, int token, TPpToken* ppToken)
{ {
const char* s; const char* s;
char* str = NULL; char* str = NULL;
putSubtoken(pTok, token); putSubtoken(token);
switch (token) { switch (token) {
case PpAtomIdentifier: case PpAtomIdentifier:
case PpAtomConstString: case PpAtomConstString:
s = ppToken->name; s = ppToken->name;
while (*s) while (*s)
putSubtoken(pTok, *s++); putSubtoken(*s++);
putSubtoken(pTok, 0); putSubtoken(0);
break; break;
case PpAtomConstInt: case PpAtomConstInt:
case PpAtomConstUint: case PpAtomConstUint:
@ -147,44 +146,35 @@ void TPpContext::RecordToken(TokenStream& pTok, int token, TPpToken* ppToken)
#endif #endif
str = ppToken->name; str = ppToken->name;
while (*str) { while (*str) {
putSubtoken(pTok, *str); putSubtoken(*str);
str++; str++;
} }
putSubtoken(pTok, 0); putSubtoken(0);
break; break;
default: default:
break; break;
} }
} }
/* // Read the next token from a token stream.
* Reset a token stream in preparation for reading. // (Not the source stream, but a stream used to hold a tokenized macro).
*/ int TPpContext::TokenStream::getToken(TParseContextBase& parseContext, TPpToken *ppToken)
void TPpContext::RewindTokenStream(TokenStream& pTok)
{
pTok.current = 0;
}
/*
* Read the next token from a token stream (not the source stream, but stream used to hold a tokenized macro).
*/
int TPpContext::ReadToken(TokenStream& pTok, TPpToken *ppToken)
{ {
int len; int len;
int ch; int ch;
int subtoken = getSubtoken(pTok); int subtoken = getSubtoken();
ppToken->loc = parseContext.getCurrentLoc(); ppToken->loc = parseContext.getCurrentLoc();
switch (subtoken) { switch (subtoken) {
case '#': case '#':
// Check for ##, unless the current # is the last character // Check for ##, unless the current # is the last character
if (pTok.current < pTok.data.size()) { if (current < data.size()) {
if (getSubtoken(pTok) == '#') { if (getSubtoken() == '#') {
parseContext.requireProfile(ppToken->loc, ~EEsProfile, "token pasting (##)"); parseContext.requireProfile(ppToken->loc, ~EEsProfile, "token pasting (##)");
parseContext.profileRequires(ppToken->loc, ~EEsProfile, 130, 0, "token pasting (##)"); parseContext.profileRequires(ppToken->loc, ~EEsProfile, 130, 0, "token pasting (##)");
subtoken = PpAtomPaste; subtoken = PpAtomPaste;
} else } else
ungetSubtoken(pTok); ungetSubtoken();
} }
break; break;
case PpAtomConstString: case PpAtomConstString:
@ -199,12 +189,12 @@ int TPpContext::ReadToken(TokenStream& pTok, TPpToken *ppToken)
case PpAtomConstInt64: case PpAtomConstInt64:
case PpAtomConstUint64: case PpAtomConstUint64:
len = 0; len = 0;
ch = getSubtoken(pTok); ch = getSubtoken();
while (ch != 0 && ch != EndOfInput) { while (ch != 0 && ch != EndOfInput) {
if (len < MaxTokenLength) { if (len < MaxTokenLength) {
ppToken->name[len] = (char)ch; ppToken->name[len] = (char)ch;
len++; len++;
ch = getSubtoken(pTok); ch = getSubtoken();
} else { } else {
parseContext.error(ppToken->loc, "token too long", "", ""); parseContext.error(ppToken->loc, "token too long", "", "");
break; break;
@ -266,27 +256,22 @@ int TPpContext::ReadToken(TokenStream& pTok, TPpToken *ppToken)
return subtoken; return subtoken;
} }
int TPpContext::tTokenInput::scan(TPpToken* ppToken)
{
return pp->ReadToken(*tokens, ppToken);
}
// We are pasting if // We are pasting if
// 1. we are preceding a pasting operator within this stream // 1. we are preceding a pasting operator within this stream
// or // or
// 2. the entire macro is preceding a pasting operator (lastTokenPastes) // 2. the entire macro is preceding a pasting operator (lastTokenPastes)
// and we are also on the last token // and we are also on the last token
bool TPpContext::tTokenInput::peekPasting() bool TPpContext::TokenStream::peekTokenizedPasting(bool lastTokenPastes)
{ {
// 1. preceding ##? // 1. preceding ##?
size_t savePos = tokens->current; size_t savePos = current;
int subtoken; int subtoken;
// skip white space // skip white space
do { do {
subtoken = pp->getSubtoken(*tokens); subtoken = getSubtoken();
} while (subtoken == ' '); } while (subtoken == ' ');
tokens->current = savePos; current = savePos;
if (subtoken == PpAtomPaste) if (subtoken == PpAtomPaste)
return true; return true;
@ -297,10 +282,10 @@ bool TPpContext::tTokenInput::peekPasting()
// Getting here means the last token will be pasted, after this // Getting here means the last token will be pasted, after this
// Are we at the last non-whitespace token? // Are we at the last non-whitespace token?
savePos = tokens->current; savePos = current;
bool moreTokens = false; bool moreTokens = false;
do { do {
subtoken = pp->getSubtoken(*tokens); subtoken = getSubtoken();
if (subtoken == EndOfInput) if (subtoken == EndOfInput)
break; break;
if (subtoken != ' ') { if (subtoken != ' ') {
@ -308,15 +293,40 @@ bool TPpContext::tTokenInput::peekPasting()
break; break;
} }
} while (true); } while (true);
tokens->current = savePos; current = savePos;
return !moreTokens; return !moreTokens;
} }
// See if the next non-white-space tokens are two consecutive #
bool TPpContext::TokenStream::peekUntokenizedPasting()
{
// don't return early, have to restore this
size_t savePos = current;
// skip white-space
int subtoken;
do {
subtoken = getSubtoken();
} while (subtoken == ' ');
// check for ##
bool pasting = false;
if (subtoken == '#') {
subtoken = getSubtoken();
if (subtoken == '#')
pasting = true;
}
current = savePos;
return pasting;
}
void TPpContext::pushTokenStreamInput(TokenStream& ts, bool prepasting) void TPpContext::pushTokenStreamInput(TokenStream& ts, bool prepasting)
{ {
pushInput(new tTokenInput(this, &ts, prepasting)); pushInput(new tTokenInput(this, &ts, prepasting));
RewindTokenStream(ts); ts.reset();
} }
int TPpContext::tUngotTokenInput::scan(TPpToken* ppToken) int TPpContext::tUngotTokenInput::scan(TPpToken* ppToken)