PP: Non-functional: clean up, simplify, completely identical operation.

This commit is contained in:
John Kessenich 2016-12-20 11:10:09 -07:00
parent 3c264ce8f3
commit 1fbb9c1430
10 changed files with 63 additions and 90 deletions

View File

@ -2,5 +2,5 @@
// For the version, it uses the latest git tag followed by the number of commits.
// For the date, it uses the current date (when then script is run).
#define GLSLANG_REVISION "Overload400-PrecQual.1706"
#define GLSLANG_DATE "19-Dec-2016"
#define GLSLANG_REVISION "Overload400-PrecQual.1713"
#define GLSLANG_DATE "20-Dec-2016"

View File

@ -638,7 +638,7 @@ int TScanContext::tokenize(TPpContext* pp, TParserToken& token)
do {
parserToken = &token;
TPpToken ppToken;
tokenText = pp->tokenize(&ppToken);
tokenText = pp->tokenize(ppToken);
if (tokenText == nullptr || tokenText[0] == 0)
return 0;

View File

@ -868,7 +868,7 @@ struct DoPreprocessing {
// This is a list of tokens that do not require a space before or after.
static const std::string unNeededSpaceTokens = ";()[]";
static const std::string noSpaceBeforeTokens = ",";
glslang::TPpToken token;
glslang::TPpToken ppToken;
parseContext.setScanner(&input);
ppContext.setInput(input, versionWillBeError);
@ -931,27 +931,27 @@ struct DoPreprocessing {
});
int lastToken = EndOfInput; // lastToken records the last token processed.
while (const char* tok = ppContext.tokenize(&token)) {
while (const char* tok = ppContext.tokenize(ppToken)) {
bool isNewString = lineSync.syncToMostRecentString();
bool isNewLine = lineSync.syncToLine(token.loc.line);
bool isNewLine = lineSync.syncToLine(ppToken.loc.line);
if (isNewLine) {
// Don't emit whitespace onto empty lines.
// Copy any whitespace characters at the start of a line
// from the input to the output.
outputStream << std::string(token.loc.column - 1, ' ');
outputStream << std::string(ppToken.loc.column - 1, ' ');
}
// Output a space in between tokens, but not at the start of a line,
// and also not around special tokens. This helps with readability
// and consistency.
if (!isNewString && !isNewLine && lastToken != EndOfInput &&
(unNeededSpaceTokens.find((char)token.token) == std::string::npos) &&
(unNeededSpaceTokens.find((char)ppToken.token) == std::string::npos) &&
(unNeededSpaceTokens.find((char)lastToken) == std::string::npos) &&
(noSpaceBeforeTokens.find((char)token.token) == std::string::npos)) {
(noSpaceBeforeTokens.find((char)ppToken.token) == std::string::npos)) {
outputStream << " ";
}
lastToken = token.token;
lastToken = ppToken.token;
outputStream << tok;
}
outputStream << std::endl;

View File

@ -75,9 +75,6 @@ NVIDIA SOFTWARE, HOWEVER CAUSED AND WHETHER UNDER THEORY OF CONTRACT,
TORT (INCLUDING NEGLIGENCE), STRICT LIABILITY OR OTHERWISE, EVEN IF
NVIDIA HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
\****************************************************************************/
//
// cpp.c
//
#define _CRT_SECURE_NO_WARNINGS
@ -107,7 +104,7 @@ int TPpContext::CPPdefine(TPpToken* ppToken)
parseContext.reservedPpErrorCheck(ppToken->loc, ppToken->name, "#define");
}
// save the original atom
// save the original macro name
const int defAtom = ppToken->atom;
// gather parameters to the macro, between (...)
@ -219,7 +216,6 @@ int TPpContext::CPPundef(TPpToken* ppToken)
*/
int TPpContext::CPPelse(int matchelse, TPpToken* ppToken)
{
int atom;
int depth = 0;
int token = scanToken(ppToken);
@ -238,7 +234,7 @@ int TPpContext::CPPelse(int matchelse, TPpToken* ppToken)
if ((token = scanToken(ppToken)) != PpAtomIdentifier)
continue;
atom = ppToken->atom;
int atom = ppToken->atom;
if (atom == PpAtomIf || atom == PpAtomIfdef || atom == PpAtomIfndef) {
depth++;
ifdepth++;
@ -501,7 +497,7 @@ int TPpContext::eval(int token, int precedence, bool shortCircuit, int& res, boo
int TPpContext::evalToToken(int token, bool shortCircuit, int& res, bool& err, TPpToken* ppToken)
{
while (token == PpAtomIdentifier && ppToken->atom != PpAtomDefined) {
int macroReturn = MacroExpand(ppToken->atom, ppToken, true, false);
int macroReturn = MacroExpand(ppToken, true, false);
if (macroReturn == 0) {
parseContext.ppError(ppToken->loc, "can't evaluate expression", "preprocessor evaluation", "");
err = true;
@ -932,7 +928,7 @@ TPpContext::TokenStream* TPpContext::PrescanMacroArg(TokenStream& arg, TPpToken*
pushInput(new tMarkerInput(this));
pushTokenStreamInput(arg);
while ((token = scanToken(ppToken)) != tMarkerInput::marker) {
if (token == PpAtomIdentifier && MacroExpand(ppToken->atom, ppToken, false, newLineOkay) != 0)
if (token == PpAtomIdentifier && MacroExpand(ppToken, false, newLineOkay) != 0)
continue;
RecordToken(*expandedArg, token, ppToken);
}
@ -1047,17 +1043,17 @@ int TPpContext::tZeroInput::scan(TPpToken* ppToken)
}
//
// Check an identifier (atom) to see if it is a macro that should be expanded.
// Check a token to see if it is a macro that should be expanded.
// If it is, and defined, push a tInput that will produce the appropriate expansion
// and return 1.
// If it is, but undefined, and expandUndef is requested, push a tInput that will
// expand to 0 and return -1.
// Otherwise, return 0 to indicate no expansion, which is not necessarily an error.
//
int TPpContext::MacroExpand(int atom, TPpToken* ppToken, bool expandUndef, bool newLineOkay)
int TPpContext::MacroExpand(TPpToken* ppToken, bool expandUndef, bool newLineOkay)
{
ppToken->space = false;
switch (atom) {
switch (ppToken->atom) {
case PpAtomLineMacro:
ppToken->ival = parseContext.getCurrentLoc().line;
snprintf(ppToken->name, sizeof(ppToken->name), "%d", ppToken->ival);
@ -1083,7 +1079,7 @@ int TPpContext::MacroExpand(int atom, TPpToken* ppToken, bool expandUndef, bool
break;
}
MacroSymbol* macro = lookupMacroDef(atom);
MacroSymbol* macro = lookupMacroDef(ppToken->atom);
int token;
int depth = 0;
@ -1105,6 +1101,7 @@ int TPpContext::MacroExpand(int atom, TPpToken* ppToken, bool expandUndef, bool
TSourceLoc loc = ppToken->loc; // in case we go to the next line before discovering the error
in->mac = macro;
int atom = ppToken->atom;
if (macro->args.size() > 0 || macro->emptyArgs) {
token = scanToken(ppToken);
if (newLineOkay) {

View File

@ -76,10 +76,6 @@ TORT (INCLUDING NEGLIGENCE), STRICT LIABILITY OR OTHERWISE, EVEN IF
NVIDIA HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
\****************************************************************************/
//
// atom.c
//
#define _CRT_SECURE_NO_WARNINGS
#include <cassert>
@ -167,7 +163,7 @@ int TPpContext::LookUpAddString(const char* s)
}
//
// Map an already created atom to its string.
// Lookup up mapping of atom -> string.
//
const char* TPpContext::GetAtomString(int atom)
{
@ -180,7 +176,9 @@ const char* TPpContext::GetAtomString(int atom)
}
//
// Add forced mapping of string to atom.
// Add mappings:
// - string -> atom
// - atom -> string
//
void TPpContext::AddAtomFixed(const char* s, int atom)
{

View File

@ -129,8 +129,8 @@ public:
void setPreamble(const char* preamble, size_t length);
const char* tokenize(TPpToken* ppToken);
int tokenPaste(TPpToken&);
const char* tokenize(TPpToken& ppToken);
int tokenPaste(int token, TPpToken&);
class tInput {
public:
@ -314,7 +314,7 @@ protected:
int CPPextension(TPpToken * ppToken);
int readCPPline(TPpToken * ppToken);
TokenStream* PrescanMacroArg(TokenStream&, TPpToken*, bool newLineOkay);
int MacroExpand(int atom, TPpToken* ppToken, bool expandUndef, bool newLineOkay);
int MacroExpand(TPpToken* ppToken, bool expandUndef, bool newLineOkay);
//
// From PpTokens.cpp
@ -537,6 +537,9 @@ protected:
}
bool inComment;
std::string rootFileName;
std::stack<TShader::Includer::IncludeResult*> includeStack;
std::string currentSourceFile;
//
// From PpAtom.cpp
@ -546,9 +549,6 @@ protected:
TAtomMap atomMap;
TStringMap stringMap;
std::stack<TShader::Includer::IncludeResult*> includeStack;
std::string currentSourceFile;
std::string rootFileName;
int nextAtom;
void InitAtomTable();
void AddAtomFixed(const char* s, int atom);

View File

@ -75,9 +75,6 @@ NVIDIA SOFTWARE, HOWEVER CAUSED AND WHETHER UNDER THEORY OF CONTRACT,
TORT (INCLUDING NEGLIGENCE), STRICT LIABILITY OR OTHERWISE, EVEN IF
NVIDIA HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
\****************************************************************************/
//
// scanner.c
//
#define _CRT_SECURE_NO_WARNINGS
@ -721,17 +718,14 @@ int TPpContext::tStringInput::scan(TPpToken* ppToken)
// Return string pointer to next token.
// Return 0 when no more tokens.
//
const char* TPpContext::tokenize(TPpToken* ppToken)
const char* TPpContext::tokenize(TPpToken& ppToken)
{
int token = '\n';
for(;;) {
token = scanToken(ppToken);
ppToken->token = token;
int token = scanToken(&ppToken);
// Handle token-pasting logic
token = tokenPaste(*ppToken);
ppToken->token = token;
token = tokenPaste(token, ppToken);
ppToken.token = token;
if (token == EndOfInput) {
missingEndifCheck();
@ -739,14 +733,14 @@ const char* TPpContext::tokenize(TPpToken* ppToken)
}
if (token == '#') {
if (previous_token == '\n') {
token = readCPPline(ppToken);
token = readCPPline(&ppToken);
if (token == EndOfInput) {
missingEndifCheck();
return nullptr;
}
continue;
} else {
parseContext.ppError(ppToken->loc, "preprocessor directive cannot be preceded by another token", "#", "");
parseContext.ppError(ppToken.loc, "preprocessor directive cannot be preceded by another token", "#", "");
return nullptr;
}
}
@ -756,7 +750,7 @@ const char* TPpContext::tokenize(TPpToken* ppToken)
continue;
// expand macros
if (token == PpAtomIdentifier && MacroExpand(ppToken->atom, ppToken, false, true) != 0)
if (token == PpAtomIdentifier && MacroExpand(&ppToken, false, true) != 0)
continue;
const char* tokenString = nullptr;
@ -771,18 +765,18 @@ const char* TPpContext::tokenize(TPpToken* ppToken)
#ifdef AMD_EXTENSIONS
case PpAtomConstFloat16:
#endif
tokenString = ppToken->name;
tokenString = ppToken.name;
break;
case PpAtomConstString:
if (parseContext.intermediate.getSource() == EShSourceHlsl) {
// HLSL allows string literals.
tokenString = ppToken->name;
tokenString = ppToken.name;
} else {
parseContext.ppError(ppToken->loc, "string literals not supported", "\"\"", "");
parseContext.ppError(ppToken.loc, "string literals not supported", "\"\"", "");
}
break;
case '\'':
parseContext.ppError(ppToken->loc, "character literals not supported", "\'", "");
parseContext.ppError(ppToken.loc, "character literals not supported", "\'", "");
break;
default:
tokenString = GetAtomString(token);
@ -799,21 +793,23 @@ const char* TPpContext::tokenize(TPpToken* ppToken)
// stream of tokens from a replacement list. Degenerates to no processing if a
// replacement list is not the source of the token stream.
//
int TPpContext::tokenPaste(TPpToken& ppToken)
int TPpContext::tokenPaste(int token, TPpToken& ppToken)
{
// starting with ## is illegal, skip to next token
if (ppToken.token == PpAtomPaste) {
if (token == PpAtomPaste) {
parseContext.ppError(ppToken.loc, "unexpected location", "##", "");
ppToken.token = scanToken(&ppToken);
return scanToken(&ppToken);
}
int resultToken = token; // "foo" pasted with "35" is an identifier, not a number
// ## can be chained, process all in the chain at once
while (peekPasting()) {
TPpToken pastedPpToken;
// next token has to be ##
pastedPpToken.token = scanToken(&pastedPpToken);
assert(pastedPpToken.token == PpAtomPaste);
token = scanToken(&pastedPpToken);
assert(token == PpAtomPaste);
if (endOfReplacementList()) {
parseContext.ppError(ppToken.loc, "unexpected location; end of replacement list", "##", "");
@ -821,18 +817,18 @@ int TPpContext::tokenPaste(TPpToken& ppToken)
}
// get the token after the ##
scanToken(&pastedPpToken);
token = scanToken(&pastedPpToken);
// combine the tokens
if (resultToken != PpAtomIdentifier)
parseContext.ppError(ppToken.loc, "only supported for preprocessing identifiers", "##", "");
if (strlen(ppToken.name) + strlen(pastedPpToken.name) > MaxTokenLength)
parseContext.ppError(ppToken.loc, "combined tokens are too long", "##", "");
strncat(ppToken.name, pastedPpToken.name, MaxTokenLength - strlen(ppToken.name));
ppToken.atom = LookUpAddString(ppToken.name);
if (ppToken.token != PpAtomIdentifier)
parseContext.ppError(ppToken.loc, "only supported for preprocessing identifiers", "##", "");
}
return ppToken.token;
return resultToken;
}
// Checks if we've seen balanced #if...#endif

View File

@ -75,20 +75,3 @@ NVIDIA SOFTWARE, HOWEVER CAUSED AND WHETHER UNDER THEORY OF CONTRACT,
TORT (INCLUDING NEGLIGENCE), STRICT LIABILITY OR OTHERWISE, EVEN IF
NVIDIA HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
\****************************************************************************/
//
// symbols.c
//
#include <cassert>
#include <cstdlib>
#include <cstring>
#include "PpContext.h"
///////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////// Symbol Table Variables: ///////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////
namespace glslang {
} // end namespace glslang

View File

@ -172,7 +172,6 @@ void TPpContext::RewindTokenStream(TokenStream& pTok)
*/
int TPpContext::ReadToken(TokenStream& pTok, TPpToken *ppToken)
{
char* tokenText = ppToken->name;
int ltoken, len;
int ch;
@ -207,7 +206,7 @@ int TPpContext::ReadToken(TokenStream& pTok, TPpToken *ppToken)
ch = lReadByte(pTok);
while (ch != 0 && ch != EndOfInput) {
if (len < MaxTokenLength) {
tokenText[len] = (char)ch;
ppToken->name[len] = (char)ch;
len++;
ch = lReadByte(pTok);
} else {
@ -215,11 +214,11 @@ int TPpContext::ReadToken(TokenStream& pTok, TPpToken *ppToken)
break;
}
}
tokenText[len] = 0;
ppToken->name[len] = 0;
switch (ltoken) {
case PpAtomIdentifier:
ppToken->atom = LookUpAddString(tokenText);
ppToken->atom = LookUpAddString(ppToken->name);
break;
case PpAtomConstString:
break;
@ -231,8 +230,8 @@ int TPpContext::ReadToken(TokenStream& pTok, TPpToken *ppToken)
ppToken->dval = atof(ppToken->name);
break;
case PpAtomConstInt:
if (len > 0 && tokenText[0] == '0') {
if (len > 1 && (tokenText[1] == 'x' || tokenText[1] == 'X'))
if (len > 0 && ppToken->name[0] == '0') {
if (len > 1 && (ppToken->name[1] == 'x' || ppToken->name[1] == 'X'))
ppToken->ival = (int)strtol(ppToken->name, 0, 16);
else
ppToken->ival = (int)strtol(ppToken->name, 0, 8);
@ -240,8 +239,8 @@ int TPpContext::ReadToken(TokenStream& pTok, TPpToken *ppToken)
ppToken->ival = atoi(ppToken->name);
break;
case PpAtomConstUint:
if (len > 0 && tokenText[0] == '0') {
if (len > 1 && (tokenText[1] == 'x' || tokenText[1] == 'X'))
if (len > 0 && ppToken->name[0] == '0') {
if (len > 1 && (ppToken->name[1] == 'x' || ppToken->name[1] == 'X'))
ppToken->ival = (int)strtoul(ppToken->name, 0, 16);
else
ppToken->ival = (int)strtoul(ppToken->name, 0, 8);
@ -249,8 +248,8 @@ int TPpContext::ReadToken(TokenStream& pTok, TPpToken *ppToken)
ppToken->ival = (int)strtoul(ppToken->name, 0, 10);
break;
case PpAtomConstInt64:
if (len > 0 && tokenText[0] == '0') {
if (len > 1 && (tokenText[1] == 'x' || tokenText[1] == 'X'))
if (len > 0 && ppToken->name[0] == '0') {
if (len > 1 && (ppToken->name[1] == 'x' || ppToken->name[1] == 'X'))
ppToken->i64val = strtoll(ppToken->name, nullptr, 16);
else
ppToken->i64val = strtoll(ppToken->name, nullptr, 8);
@ -258,8 +257,8 @@ int TPpContext::ReadToken(TokenStream& pTok, TPpToken *ppToken)
ppToken->i64val = atoll(ppToken->name);
break;
case PpAtomConstUint64:
if (len > 0 && tokenText[0] == '0') {
if (len > 1 && (tokenText[1] == 'x' || tokenText[1] == 'X'))
if (len > 0 && ppToken->name[0] == '0') {
if (len > 1 && (ppToken->name[1] == 'x' || ppToken->name[1] == 'X'))
ppToken->i64val = (long long)strtoull(ppToken->name, nullptr, 16);
else
ppToken->i64val = (long long)strtoull(ppToken->name, nullptr, 8);

View File

@ -394,7 +394,7 @@ EHlslTokenClass HlslScanContext::tokenizeClass(HlslToken& token)
do {
parserToken = &token;
TPpToken ppToken;
tokenText = ppContext.tokenize(&ppToken);
tokenText = ppContext.tokenize(ppToken);
if (tokenText == nullptr)
return EHTokNone;