PP: Non-functional: rationalize TPpToken.

Always keep 'token' outside.
Always return the string to upper levels inside.
This commit is contained in:
John Kessenich
2016-12-20 19:42:53 -07:00
parent 1fbb9c1430
commit 54af2de761
6 changed files with 38 additions and 36 deletions

View File

@@ -3,8 +3,9 @@ ERROR: 0:2: 'preprocessor evaluation' : bad expression
ERROR: 0:2: '#if' : unexpected tokens following directive ERROR: 0:2: '#if' : unexpected tokens following directive
ERROR: 0:5: 'string' : End of line in string ERROR: 0:5: 'string' : End of line in string
ERROR: 0:5: 'macro expansion' : expected '(' following n ERROR: 0:5: 'macro expansion' : expected '(' following n
ERROR: 0:5: '""' : string literals not supported
ERROR: 0:5: '' : syntax error ERROR: 0:5: '' : syntax error
ERROR: 5 compilation errors. No code generated. ERROR: 6 compilation errors. No code generated.
Shader version: 100 Shader version: 100

View File

@@ -638,13 +638,14 @@ int TScanContext::tokenize(TPpContext* pp, TParserToken& token)
do { do {
parserToken = &token; parserToken = &token;
TPpToken ppToken; TPpToken ppToken;
tokenText = pp->tokenize(ppToken); int token = pp->tokenize(ppToken);
if (tokenText == nullptr || tokenText[0] == 0) if (token == EndOfInput)
return 0; return 0;
tokenText = ppToken.name;
loc = ppToken.loc; loc = ppToken.loc;
parserToken->sType.lex.loc = loc; parserToken->sType.lex.loc = loc;
switch (ppToken.token) { switch (token) {
case ';': afterType = false; return SEMICOLON; case ';': afterType = false; return SEMICOLON;
case ',': afterType = false; return COMMA; case ',': afterType = false; return COMMA;
case ':': return COLON; case ':': return COLON;
@@ -720,7 +721,7 @@ int TScanContext::tokenize(TPpContext* pp, TParserToken& token)
default: default:
char buf[2]; char buf[2];
buf[0] = (char)ppToken.token; buf[0] = token;
buf[1] = 0; buf[1] = 0;
parseContext.error(loc, "unexpected token", buf, ""); parseContext.error(loc, "unexpected token", buf, "");
break; break;

View File

@@ -931,7 +931,11 @@ struct DoPreprocessing {
}); });
int lastToken = EndOfInput; // lastToken records the last token processed. int lastToken = EndOfInput; // lastToken records the last token processed.
while (const char* tok = ppContext.tokenize(ppToken)) { do {
int token = ppContext.tokenize(ppToken);
if (token == EndOfInput)
break;
bool isNewString = lineSync.syncToMostRecentString(); bool isNewString = lineSync.syncToMostRecentString();
bool isNewLine = lineSync.syncToLine(ppToken.loc.line); bool isNewLine = lineSync.syncToLine(ppToken.loc.line);
@@ -946,14 +950,14 @@ struct DoPreprocessing {
// and also not around special tokens. This helps with readability // and also not around special tokens. This helps with readability
// and consistency. // and consistency.
if (!isNewString && !isNewLine && lastToken != EndOfInput && if (!isNewString && !isNewLine && lastToken != EndOfInput &&
(unNeededSpaceTokens.find((char)ppToken.token) == std::string::npos) && (unNeededSpaceTokens.find((char)token) == std::string::npos) &&
(unNeededSpaceTokens.find((char)lastToken) == std::string::npos) && (unNeededSpaceTokens.find((char)lastToken) == std::string::npos) &&
(noSpaceBeforeTokens.find((char)ppToken.token) == std::string::npos)) { (noSpaceBeforeTokens.find((char)token) == std::string::npos)) {
outputStream << " "; outputStream << " ";
} }
lastToken = ppToken.token; lastToken = token;
outputStream << tok; outputStream << ppToken.name;
} } while (true);
outputStream << std::endl; outputStream << std::endl;
*outputString = outputStream.str(); *outputString = outputStream.str();

View File

@@ -92,7 +92,7 @@ namespace glslang {
class TPpToken { class TPpToken {
public: public:
TPpToken() : token(0), space(false), ival(0), dval(0.0), i64val(0), atom(0) TPpToken() : space(false), ival(0), dval(0.0), i64val(0), atom(0)
{ {
loc.init(); loc.init();
name[0] = 0; name[0] = 0;
@@ -101,14 +101,13 @@ public:
// This is used for comparing macro definitions, so checks what is relevant for that. // This is used for comparing macro definitions, so checks what is relevant for that.
bool operator==(const TPpToken& right) bool operator==(const TPpToken& right)
{ {
return token == right.token && space == right.space && return space == right.space &&
ival == right.ival && dval == right.dval && i64val == right.i64val && ival == right.ival && dval == right.dval && i64val == right.i64val &&
strncmp(name, right.name, MaxTokenLength) == 0; strncmp(name, right.name, MaxTokenLength) == 0;
} }
bool operator!=(const TPpToken& right) { return ! operator==(right); } bool operator!=(const TPpToken& right) { return ! operator==(right); }
TSourceLoc loc; TSourceLoc loc;
int token;
bool space; // true if a space (for white space or a removed comment) should also be recognized, in front of the token returned bool space; // true if a space (for white space or a removed comment) should also be recognized, in front of the token returned
int ival; int ival;
double dval; double dval;
@@ -129,7 +128,7 @@ public:
void setPreamble(const char* preamble, size_t length); void setPreamble(const char* preamble, size_t length);
const char* tokenize(TPpToken& ppToken); int tokenize(TPpToken& ppToken);
int tokenPaste(int token, TPpToken&); int tokenPaste(int token, TPpToken&);
class tInput { class tInput {

View File

@@ -715,33 +715,31 @@ int TPpContext::tStringInput::scan(TPpToken* ppToken)
// The main functional entry point into the preprocessor, which will // The main functional entry point into the preprocessor, which will
// scan the source strings to figure out and return the next processing token. // scan the source strings to figure out and return the next processing token.
// //
// Return string pointer to next token. // Return the token, or EndOfInput when no more tokens.
// Return 0 when no more tokens.
// //
const char* TPpContext::tokenize(TPpToken& ppToken) int TPpContext::tokenize(TPpToken& ppToken)
{ {
for(;;) { for(;;) {
int token = scanToken(&ppToken); int token = scanToken(&ppToken);
// Handle token-pasting logic // Handle token-pasting logic
token = tokenPaste(token, ppToken); token = tokenPaste(token, ppToken);
ppToken.token = token;
if (token == EndOfInput) { if (token == EndOfInput) {
missingEndifCheck(); missingEndifCheck();
return nullptr; return EndOfInput;
} }
if (token == '#') { if (token == '#') {
if (previous_token == '\n') { if (previous_token == '\n') {
token = readCPPline(&ppToken); token = readCPPline(&ppToken);
if (token == EndOfInput) { if (token == EndOfInput) {
missingEndifCheck(); missingEndifCheck();
return nullptr; return EndOfInput;
} }
continue; continue;
} else { } else {
parseContext.ppError(ppToken.loc, "preprocessor directive cannot be preceded by another token", "#", ""); parseContext.ppError(ppToken.loc, "preprocessor directive cannot be preceded by another token", "#", "");
return nullptr; return EndOfInput;
} }
} }
previous_token = token; previous_token = token;
@@ -753,7 +751,6 @@ const char* TPpContext::tokenize(TPpToken& ppToken)
if (token == PpAtomIdentifier && MacroExpand(&ppToken, false, true) != 0) if (token == PpAtomIdentifier && MacroExpand(&ppToken, false, true) != 0)
continue; continue;
const char* tokenString = nullptr;
switch (token) { switch (token) {
case PpAtomIdentifier: case PpAtomIdentifier:
case PpAtomConstInt: case PpAtomConstInt:
@@ -765,26 +762,25 @@ const char* TPpContext::tokenize(TPpToken& ppToken)
#ifdef AMD_EXTENSIONS #ifdef AMD_EXTENSIONS
case PpAtomConstFloat16: case PpAtomConstFloat16:
#endif #endif
tokenString = ppToken.name; if (ppToken.name[0] == '\0')
continue;
break; break;
case PpAtomConstString: case PpAtomConstString:
if (parseContext.intermediate.getSource() == EShSourceHlsl) { if (parseContext.intermediate.getSource() != EShSourceHlsl) {
// HLSL allows string literals. // HLSL allows string literals.
tokenString = ppToken.name;
} else {
parseContext.ppError(ppToken.loc, "string literals not supported", "\"\"", ""); parseContext.ppError(ppToken.loc, "string literals not supported", "\"\"", "");
continue;
} }
break; break;
case '\'': case '\'':
parseContext.ppError(ppToken.loc, "character literals not supported", "\'", ""); parseContext.ppError(ppToken.loc, "character literals not supported", "\'", "");
break; continue;
default: default:
tokenString = GetAtomString(token); strcpy(ppToken.name, GetAtomString(token));
break; break;
} }
if (tokenString) return token;
return tokenString;
} }
} }

View File

@@ -394,13 +394,14 @@ EHlslTokenClass HlslScanContext::tokenizeClass(HlslToken& token)
do { do {
parserToken = &token; parserToken = &token;
TPpToken ppToken; TPpToken ppToken;
tokenText = ppContext.tokenize(ppToken); int token = ppContext.tokenize(ppToken);
if (tokenText == nullptr) if (token == EndOfInput)
return EHTokNone; return EHTokNone;
tokenText = ppToken.name;
loc = ppToken.loc; loc = ppToken.loc;
parserToken->loc = loc; parserToken->loc = loc;
switch (ppToken.token) { switch (token) {
case ';': return EHTokSemicolon; case ';': return EHTokSemicolon;
case ',': return EHTokComma; case ',': return EHTokComma;
case ':': return EHTokColon; case ':': return EHTokColon;
@@ -467,7 +468,7 @@ EHlslTokenClass HlslScanContext::tokenizeClass(HlslToken& token)
} }
case PpAtomConstString: { case PpAtomConstString: {
parserToken->string = NewPoolTString(ppToken.name); parserToken->string = NewPoolTString(tokenText);
return EHTokStringConstant; return EHTokStringConstant;
} }
@@ -475,7 +476,7 @@ EHlslTokenClass HlslScanContext::tokenizeClass(HlslToken& token)
default: default:
char buf[2]; char buf[2];
buf[0] = (char)ppToken.token; buf[0] = (char)token;
buf[1] = 0; buf[1] = 0;
parseContext.error(loc, "unexpected token", buf, ""); parseContext.error(loc, "unexpected token", buf, "");
break; break;