PP: Non-functional: clean up, simplify, completely identical operation.
This commit is contained in:
parent
3c264ce8f3
commit
1fbb9c1430
@ -2,5 +2,5 @@
|
|||||||
// For the version, it uses the latest git tag followed by the number of commits.
|
// For the version, it uses the latest git tag followed by the number of commits.
|
||||||
// For the date, it uses the current date (when then script is run).
|
// For the date, it uses the current date (when then script is run).
|
||||||
|
|
||||||
#define GLSLANG_REVISION "Overload400-PrecQual.1706"
|
#define GLSLANG_REVISION "Overload400-PrecQual.1713"
|
||||||
#define GLSLANG_DATE "19-Dec-2016"
|
#define GLSLANG_DATE "20-Dec-2016"
|
||||||
|
|||||||
@ -638,7 +638,7 @@ int TScanContext::tokenize(TPpContext* pp, TParserToken& token)
|
|||||||
do {
|
do {
|
||||||
parserToken = &token;
|
parserToken = &token;
|
||||||
TPpToken ppToken;
|
TPpToken ppToken;
|
||||||
tokenText = pp->tokenize(&ppToken);
|
tokenText = pp->tokenize(ppToken);
|
||||||
if (tokenText == nullptr || tokenText[0] == 0)
|
if (tokenText == nullptr || tokenText[0] == 0)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
|||||||
@ -868,7 +868,7 @@ struct DoPreprocessing {
|
|||||||
// This is a list of tokens that do not require a space before or after.
|
// This is a list of tokens that do not require a space before or after.
|
||||||
static const std::string unNeededSpaceTokens = ";()[]";
|
static const std::string unNeededSpaceTokens = ";()[]";
|
||||||
static const std::string noSpaceBeforeTokens = ",";
|
static const std::string noSpaceBeforeTokens = ",";
|
||||||
glslang::TPpToken token;
|
glslang::TPpToken ppToken;
|
||||||
|
|
||||||
parseContext.setScanner(&input);
|
parseContext.setScanner(&input);
|
||||||
ppContext.setInput(input, versionWillBeError);
|
ppContext.setInput(input, versionWillBeError);
|
||||||
@ -931,27 +931,27 @@ struct DoPreprocessing {
|
|||||||
});
|
});
|
||||||
|
|
||||||
int lastToken = EndOfInput; // lastToken records the last token processed.
|
int lastToken = EndOfInput; // lastToken records the last token processed.
|
||||||
while (const char* tok = ppContext.tokenize(&token)) {
|
while (const char* tok = ppContext.tokenize(ppToken)) {
|
||||||
bool isNewString = lineSync.syncToMostRecentString();
|
bool isNewString = lineSync.syncToMostRecentString();
|
||||||
bool isNewLine = lineSync.syncToLine(token.loc.line);
|
bool isNewLine = lineSync.syncToLine(ppToken.loc.line);
|
||||||
|
|
||||||
if (isNewLine) {
|
if (isNewLine) {
|
||||||
// Don't emit whitespace onto empty lines.
|
// Don't emit whitespace onto empty lines.
|
||||||
// Copy any whitespace characters at the start of a line
|
// Copy any whitespace characters at the start of a line
|
||||||
// from the input to the output.
|
// from the input to the output.
|
||||||
outputStream << std::string(token.loc.column - 1, ' ');
|
outputStream << std::string(ppToken.loc.column - 1, ' ');
|
||||||
}
|
}
|
||||||
|
|
||||||
// Output a space in between tokens, but not at the start of a line,
|
// Output a space in between tokens, but not at the start of a line,
|
||||||
// and also not around special tokens. This helps with readability
|
// and also not around special tokens. This helps with readability
|
||||||
// and consistency.
|
// and consistency.
|
||||||
if (!isNewString && !isNewLine && lastToken != EndOfInput &&
|
if (!isNewString && !isNewLine && lastToken != EndOfInput &&
|
||||||
(unNeededSpaceTokens.find((char)token.token) == std::string::npos) &&
|
(unNeededSpaceTokens.find((char)ppToken.token) == std::string::npos) &&
|
||||||
(unNeededSpaceTokens.find((char)lastToken) == std::string::npos) &&
|
(unNeededSpaceTokens.find((char)lastToken) == std::string::npos) &&
|
||||||
(noSpaceBeforeTokens.find((char)token.token) == std::string::npos)) {
|
(noSpaceBeforeTokens.find((char)ppToken.token) == std::string::npos)) {
|
||||||
outputStream << " ";
|
outputStream << " ";
|
||||||
}
|
}
|
||||||
lastToken = token.token;
|
lastToken = ppToken.token;
|
||||||
outputStream << tok;
|
outputStream << tok;
|
||||||
}
|
}
|
||||||
outputStream << std::endl;
|
outputStream << std::endl;
|
||||||
|
|||||||
@ -75,9 +75,6 @@ NVIDIA SOFTWARE, HOWEVER CAUSED AND WHETHER UNDER THEORY OF CONTRACT,
|
|||||||
TORT (INCLUDING NEGLIGENCE), STRICT LIABILITY OR OTHERWISE, EVEN IF
|
TORT (INCLUDING NEGLIGENCE), STRICT LIABILITY OR OTHERWISE, EVEN IF
|
||||||
NVIDIA HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
NVIDIA HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
\****************************************************************************/
|
\****************************************************************************/
|
||||||
//
|
|
||||||
// cpp.c
|
|
||||||
//
|
|
||||||
|
|
||||||
#define _CRT_SECURE_NO_WARNINGS
|
#define _CRT_SECURE_NO_WARNINGS
|
||||||
|
|
||||||
@ -107,7 +104,7 @@ int TPpContext::CPPdefine(TPpToken* ppToken)
|
|||||||
parseContext.reservedPpErrorCheck(ppToken->loc, ppToken->name, "#define");
|
parseContext.reservedPpErrorCheck(ppToken->loc, ppToken->name, "#define");
|
||||||
}
|
}
|
||||||
|
|
||||||
// save the original atom
|
// save the original macro name
|
||||||
const int defAtom = ppToken->atom;
|
const int defAtom = ppToken->atom;
|
||||||
|
|
||||||
// gather parameters to the macro, between (...)
|
// gather parameters to the macro, between (...)
|
||||||
@ -219,7 +216,6 @@ int TPpContext::CPPundef(TPpToken* ppToken)
|
|||||||
*/
|
*/
|
||||||
int TPpContext::CPPelse(int matchelse, TPpToken* ppToken)
|
int TPpContext::CPPelse(int matchelse, TPpToken* ppToken)
|
||||||
{
|
{
|
||||||
int atom;
|
|
||||||
int depth = 0;
|
int depth = 0;
|
||||||
int token = scanToken(ppToken);
|
int token = scanToken(ppToken);
|
||||||
|
|
||||||
@ -238,7 +234,7 @@ int TPpContext::CPPelse(int matchelse, TPpToken* ppToken)
|
|||||||
if ((token = scanToken(ppToken)) != PpAtomIdentifier)
|
if ((token = scanToken(ppToken)) != PpAtomIdentifier)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
atom = ppToken->atom;
|
int atom = ppToken->atom;
|
||||||
if (atom == PpAtomIf || atom == PpAtomIfdef || atom == PpAtomIfndef) {
|
if (atom == PpAtomIf || atom == PpAtomIfdef || atom == PpAtomIfndef) {
|
||||||
depth++;
|
depth++;
|
||||||
ifdepth++;
|
ifdepth++;
|
||||||
@ -501,7 +497,7 @@ int TPpContext::eval(int token, int precedence, bool shortCircuit, int& res, boo
|
|||||||
int TPpContext::evalToToken(int token, bool shortCircuit, int& res, bool& err, TPpToken* ppToken)
|
int TPpContext::evalToToken(int token, bool shortCircuit, int& res, bool& err, TPpToken* ppToken)
|
||||||
{
|
{
|
||||||
while (token == PpAtomIdentifier && ppToken->atom != PpAtomDefined) {
|
while (token == PpAtomIdentifier && ppToken->atom != PpAtomDefined) {
|
||||||
int macroReturn = MacroExpand(ppToken->atom, ppToken, true, false);
|
int macroReturn = MacroExpand(ppToken, true, false);
|
||||||
if (macroReturn == 0) {
|
if (macroReturn == 0) {
|
||||||
parseContext.ppError(ppToken->loc, "can't evaluate expression", "preprocessor evaluation", "");
|
parseContext.ppError(ppToken->loc, "can't evaluate expression", "preprocessor evaluation", "");
|
||||||
err = true;
|
err = true;
|
||||||
@ -932,7 +928,7 @@ TPpContext::TokenStream* TPpContext::PrescanMacroArg(TokenStream& arg, TPpToken*
|
|||||||
pushInput(new tMarkerInput(this));
|
pushInput(new tMarkerInput(this));
|
||||||
pushTokenStreamInput(arg);
|
pushTokenStreamInput(arg);
|
||||||
while ((token = scanToken(ppToken)) != tMarkerInput::marker) {
|
while ((token = scanToken(ppToken)) != tMarkerInput::marker) {
|
||||||
if (token == PpAtomIdentifier && MacroExpand(ppToken->atom, ppToken, false, newLineOkay) != 0)
|
if (token == PpAtomIdentifier && MacroExpand(ppToken, false, newLineOkay) != 0)
|
||||||
continue;
|
continue;
|
||||||
RecordToken(*expandedArg, token, ppToken);
|
RecordToken(*expandedArg, token, ppToken);
|
||||||
}
|
}
|
||||||
@ -1047,17 +1043,17 @@ int TPpContext::tZeroInput::scan(TPpToken* ppToken)
|
|||||||
}
|
}
|
||||||
|
|
||||||
//
|
//
|
||||||
// Check an identifier (atom) to see if it is a macro that should be expanded.
|
// Check a token to see if it is a macro that should be expanded.
|
||||||
// If it is, and defined, push a tInput that will produce the appropriate expansion
|
// If it is, and defined, push a tInput that will produce the appropriate expansion
|
||||||
// and return 1.
|
// and return 1.
|
||||||
// If it is, but undefined, and expandUndef is requested, push a tInput that will
|
// If it is, but undefined, and expandUndef is requested, push a tInput that will
|
||||||
// expand to 0 and return -1.
|
// expand to 0 and return -1.
|
||||||
// Otherwise, return 0 to indicate no expansion, which is not necessarily an error.
|
// Otherwise, return 0 to indicate no expansion, which is not necessarily an error.
|
||||||
//
|
//
|
||||||
int TPpContext::MacroExpand(int atom, TPpToken* ppToken, bool expandUndef, bool newLineOkay)
|
int TPpContext::MacroExpand(TPpToken* ppToken, bool expandUndef, bool newLineOkay)
|
||||||
{
|
{
|
||||||
ppToken->space = false;
|
ppToken->space = false;
|
||||||
switch (atom) {
|
switch (ppToken->atom) {
|
||||||
case PpAtomLineMacro:
|
case PpAtomLineMacro:
|
||||||
ppToken->ival = parseContext.getCurrentLoc().line;
|
ppToken->ival = parseContext.getCurrentLoc().line;
|
||||||
snprintf(ppToken->name, sizeof(ppToken->name), "%d", ppToken->ival);
|
snprintf(ppToken->name, sizeof(ppToken->name), "%d", ppToken->ival);
|
||||||
@ -1083,7 +1079,7 @@ int TPpContext::MacroExpand(int atom, TPpToken* ppToken, bool expandUndef, bool
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
MacroSymbol* macro = lookupMacroDef(atom);
|
MacroSymbol* macro = lookupMacroDef(ppToken->atom);
|
||||||
int token;
|
int token;
|
||||||
int depth = 0;
|
int depth = 0;
|
||||||
|
|
||||||
@ -1105,6 +1101,7 @@ int TPpContext::MacroExpand(int atom, TPpToken* ppToken, bool expandUndef, bool
|
|||||||
|
|
||||||
TSourceLoc loc = ppToken->loc; // in case we go to the next line before discovering the error
|
TSourceLoc loc = ppToken->loc; // in case we go to the next line before discovering the error
|
||||||
in->mac = macro;
|
in->mac = macro;
|
||||||
|
int atom = ppToken->atom;
|
||||||
if (macro->args.size() > 0 || macro->emptyArgs) {
|
if (macro->args.size() > 0 || macro->emptyArgs) {
|
||||||
token = scanToken(ppToken);
|
token = scanToken(ppToken);
|
||||||
if (newLineOkay) {
|
if (newLineOkay) {
|
||||||
|
|||||||
@ -76,10 +76,6 @@ TORT (INCLUDING NEGLIGENCE), STRICT LIABILITY OR OTHERWISE, EVEN IF
|
|||||||
NVIDIA HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
NVIDIA HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
\****************************************************************************/
|
\****************************************************************************/
|
||||||
|
|
||||||
//
|
|
||||||
// atom.c
|
|
||||||
//
|
|
||||||
|
|
||||||
#define _CRT_SECURE_NO_WARNINGS
|
#define _CRT_SECURE_NO_WARNINGS
|
||||||
|
|
||||||
#include <cassert>
|
#include <cassert>
|
||||||
@ -167,7 +163,7 @@ int TPpContext::LookUpAddString(const char* s)
|
|||||||
}
|
}
|
||||||
|
|
||||||
//
|
//
|
||||||
// Map an already created atom to its string.
|
// Lookup up mapping of atom -> string.
|
||||||
//
|
//
|
||||||
const char* TPpContext::GetAtomString(int atom)
|
const char* TPpContext::GetAtomString(int atom)
|
||||||
{
|
{
|
||||||
@ -180,7 +176,9 @@ const char* TPpContext::GetAtomString(int atom)
|
|||||||
}
|
}
|
||||||
|
|
||||||
//
|
//
|
||||||
// Add forced mapping of string to atom.
|
// Add mappings:
|
||||||
|
// - string -> atom
|
||||||
|
// - atom -> string
|
||||||
//
|
//
|
||||||
void TPpContext::AddAtomFixed(const char* s, int atom)
|
void TPpContext::AddAtomFixed(const char* s, int atom)
|
||||||
{
|
{
|
||||||
|
|||||||
@ -129,8 +129,8 @@ public:
|
|||||||
|
|
||||||
void setPreamble(const char* preamble, size_t length);
|
void setPreamble(const char* preamble, size_t length);
|
||||||
|
|
||||||
const char* tokenize(TPpToken* ppToken);
|
const char* tokenize(TPpToken& ppToken);
|
||||||
int tokenPaste(TPpToken&);
|
int tokenPaste(int token, TPpToken&);
|
||||||
|
|
||||||
class tInput {
|
class tInput {
|
||||||
public:
|
public:
|
||||||
@ -314,7 +314,7 @@ protected:
|
|||||||
int CPPextension(TPpToken * ppToken);
|
int CPPextension(TPpToken * ppToken);
|
||||||
int readCPPline(TPpToken * ppToken);
|
int readCPPline(TPpToken * ppToken);
|
||||||
TokenStream* PrescanMacroArg(TokenStream&, TPpToken*, bool newLineOkay);
|
TokenStream* PrescanMacroArg(TokenStream&, TPpToken*, bool newLineOkay);
|
||||||
int MacroExpand(int atom, TPpToken* ppToken, bool expandUndef, bool newLineOkay);
|
int MacroExpand(TPpToken* ppToken, bool expandUndef, bool newLineOkay);
|
||||||
|
|
||||||
//
|
//
|
||||||
// From PpTokens.cpp
|
// From PpTokens.cpp
|
||||||
@ -537,6 +537,9 @@ protected:
|
|||||||
}
|
}
|
||||||
|
|
||||||
bool inComment;
|
bool inComment;
|
||||||
|
std::string rootFileName;
|
||||||
|
std::stack<TShader::Includer::IncludeResult*> includeStack;
|
||||||
|
std::string currentSourceFile;
|
||||||
|
|
||||||
//
|
//
|
||||||
// From PpAtom.cpp
|
// From PpAtom.cpp
|
||||||
@ -546,9 +549,6 @@ protected:
|
|||||||
|
|
||||||
TAtomMap atomMap;
|
TAtomMap atomMap;
|
||||||
TStringMap stringMap;
|
TStringMap stringMap;
|
||||||
std::stack<TShader::Includer::IncludeResult*> includeStack;
|
|
||||||
std::string currentSourceFile;
|
|
||||||
std::string rootFileName;
|
|
||||||
int nextAtom;
|
int nextAtom;
|
||||||
void InitAtomTable();
|
void InitAtomTable();
|
||||||
void AddAtomFixed(const char* s, int atom);
|
void AddAtomFixed(const char* s, int atom);
|
||||||
|
|||||||
@ -75,9 +75,6 @@ NVIDIA SOFTWARE, HOWEVER CAUSED AND WHETHER UNDER THEORY OF CONTRACT,
|
|||||||
TORT (INCLUDING NEGLIGENCE), STRICT LIABILITY OR OTHERWISE, EVEN IF
|
TORT (INCLUDING NEGLIGENCE), STRICT LIABILITY OR OTHERWISE, EVEN IF
|
||||||
NVIDIA HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
NVIDIA HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
\****************************************************************************/
|
\****************************************************************************/
|
||||||
//
|
|
||||||
// scanner.c
|
|
||||||
//
|
|
||||||
|
|
||||||
#define _CRT_SECURE_NO_WARNINGS
|
#define _CRT_SECURE_NO_WARNINGS
|
||||||
|
|
||||||
@ -721,17 +718,14 @@ int TPpContext::tStringInput::scan(TPpToken* ppToken)
|
|||||||
// Return string pointer to next token.
|
// Return string pointer to next token.
|
||||||
// Return 0 when no more tokens.
|
// Return 0 when no more tokens.
|
||||||
//
|
//
|
||||||
const char* TPpContext::tokenize(TPpToken* ppToken)
|
const char* TPpContext::tokenize(TPpToken& ppToken)
|
||||||
{
|
{
|
||||||
int token = '\n';
|
|
||||||
|
|
||||||
for(;;) {
|
for(;;) {
|
||||||
token = scanToken(ppToken);
|
int token = scanToken(&ppToken);
|
||||||
ppToken->token = token;
|
|
||||||
|
|
||||||
// Handle token-pasting logic
|
// Handle token-pasting logic
|
||||||
token = tokenPaste(*ppToken);
|
token = tokenPaste(token, ppToken);
|
||||||
ppToken->token = token;
|
ppToken.token = token;
|
||||||
|
|
||||||
if (token == EndOfInput) {
|
if (token == EndOfInput) {
|
||||||
missingEndifCheck();
|
missingEndifCheck();
|
||||||
@ -739,14 +733,14 @@ const char* TPpContext::tokenize(TPpToken* ppToken)
|
|||||||
}
|
}
|
||||||
if (token == '#') {
|
if (token == '#') {
|
||||||
if (previous_token == '\n') {
|
if (previous_token == '\n') {
|
||||||
token = readCPPline(ppToken);
|
token = readCPPline(&ppToken);
|
||||||
if (token == EndOfInput) {
|
if (token == EndOfInput) {
|
||||||
missingEndifCheck();
|
missingEndifCheck();
|
||||||
return nullptr;
|
return nullptr;
|
||||||
}
|
}
|
||||||
continue;
|
continue;
|
||||||
} else {
|
} else {
|
||||||
parseContext.ppError(ppToken->loc, "preprocessor directive cannot be preceded by another token", "#", "");
|
parseContext.ppError(ppToken.loc, "preprocessor directive cannot be preceded by another token", "#", "");
|
||||||
return nullptr;
|
return nullptr;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -756,7 +750,7 @@ const char* TPpContext::tokenize(TPpToken* ppToken)
|
|||||||
continue;
|
continue;
|
||||||
|
|
||||||
// expand macros
|
// expand macros
|
||||||
if (token == PpAtomIdentifier && MacroExpand(ppToken->atom, ppToken, false, true) != 0)
|
if (token == PpAtomIdentifier && MacroExpand(&ppToken, false, true) != 0)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
const char* tokenString = nullptr;
|
const char* tokenString = nullptr;
|
||||||
@ -771,18 +765,18 @@ const char* TPpContext::tokenize(TPpToken* ppToken)
|
|||||||
#ifdef AMD_EXTENSIONS
|
#ifdef AMD_EXTENSIONS
|
||||||
case PpAtomConstFloat16:
|
case PpAtomConstFloat16:
|
||||||
#endif
|
#endif
|
||||||
tokenString = ppToken->name;
|
tokenString = ppToken.name;
|
||||||
break;
|
break;
|
||||||
case PpAtomConstString:
|
case PpAtomConstString:
|
||||||
if (parseContext.intermediate.getSource() == EShSourceHlsl) {
|
if (parseContext.intermediate.getSource() == EShSourceHlsl) {
|
||||||
// HLSL allows string literals.
|
// HLSL allows string literals.
|
||||||
tokenString = ppToken->name;
|
tokenString = ppToken.name;
|
||||||
} else {
|
} else {
|
||||||
parseContext.ppError(ppToken->loc, "string literals not supported", "\"\"", "");
|
parseContext.ppError(ppToken.loc, "string literals not supported", "\"\"", "");
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case '\'':
|
case '\'':
|
||||||
parseContext.ppError(ppToken->loc, "character literals not supported", "\'", "");
|
parseContext.ppError(ppToken.loc, "character literals not supported", "\'", "");
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
tokenString = GetAtomString(token);
|
tokenString = GetAtomString(token);
|
||||||
@ -799,21 +793,23 @@ const char* TPpContext::tokenize(TPpToken* ppToken)
|
|||||||
// stream of tokens from a replacement list. Degenerates to no processing if a
|
// stream of tokens from a replacement list. Degenerates to no processing if a
|
||||||
// replacement list is not the source of the token stream.
|
// replacement list is not the source of the token stream.
|
||||||
//
|
//
|
||||||
int TPpContext::tokenPaste(TPpToken& ppToken)
|
int TPpContext::tokenPaste(int token, TPpToken& ppToken)
|
||||||
{
|
{
|
||||||
// starting with ## is illegal, skip to next token
|
// starting with ## is illegal, skip to next token
|
||||||
if (ppToken.token == PpAtomPaste) {
|
if (token == PpAtomPaste) {
|
||||||
parseContext.ppError(ppToken.loc, "unexpected location", "##", "");
|
parseContext.ppError(ppToken.loc, "unexpected location", "##", "");
|
||||||
ppToken.token = scanToken(&ppToken);
|
return scanToken(&ppToken);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int resultToken = token; // "foo" pasted with "35" is an identifier, not a number
|
||||||
|
|
||||||
// ## can be chained, process all in the chain at once
|
// ## can be chained, process all in the chain at once
|
||||||
while (peekPasting()) {
|
while (peekPasting()) {
|
||||||
TPpToken pastedPpToken;
|
TPpToken pastedPpToken;
|
||||||
|
|
||||||
// next token has to be ##
|
// next token has to be ##
|
||||||
pastedPpToken.token = scanToken(&pastedPpToken);
|
token = scanToken(&pastedPpToken);
|
||||||
assert(pastedPpToken.token == PpAtomPaste);
|
assert(token == PpAtomPaste);
|
||||||
|
|
||||||
if (endOfReplacementList()) {
|
if (endOfReplacementList()) {
|
||||||
parseContext.ppError(ppToken.loc, "unexpected location; end of replacement list", "##", "");
|
parseContext.ppError(ppToken.loc, "unexpected location; end of replacement list", "##", "");
|
||||||
@ -821,18 +817,18 @@ int TPpContext::tokenPaste(TPpToken& ppToken)
|
|||||||
}
|
}
|
||||||
|
|
||||||
// get the token after the ##
|
// get the token after the ##
|
||||||
scanToken(&pastedPpToken);
|
token = scanToken(&pastedPpToken);
|
||||||
|
|
||||||
// combine the tokens
|
// combine the tokens
|
||||||
|
if (resultToken != PpAtomIdentifier)
|
||||||
|
parseContext.ppError(ppToken.loc, "only supported for preprocessing identifiers", "##", "");
|
||||||
if (strlen(ppToken.name) + strlen(pastedPpToken.name) > MaxTokenLength)
|
if (strlen(ppToken.name) + strlen(pastedPpToken.name) > MaxTokenLength)
|
||||||
parseContext.ppError(ppToken.loc, "combined tokens are too long", "##", "");
|
parseContext.ppError(ppToken.loc, "combined tokens are too long", "##", "");
|
||||||
strncat(ppToken.name, pastedPpToken.name, MaxTokenLength - strlen(ppToken.name));
|
strncat(ppToken.name, pastedPpToken.name, MaxTokenLength - strlen(ppToken.name));
|
||||||
ppToken.atom = LookUpAddString(ppToken.name);
|
ppToken.atom = LookUpAddString(ppToken.name);
|
||||||
if (ppToken.token != PpAtomIdentifier)
|
|
||||||
parseContext.ppError(ppToken.loc, "only supported for preprocessing identifiers", "##", "");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return ppToken.token;
|
return resultToken;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Checks if we've seen balanced #if...#endif
|
// Checks if we've seen balanced #if...#endif
|
||||||
|
|||||||
@ -75,20 +75,3 @@ NVIDIA SOFTWARE, HOWEVER CAUSED AND WHETHER UNDER THEORY OF CONTRACT,
|
|||||||
TORT (INCLUDING NEGLIGENCE), STRICT LIABILITY OR OTHERWISE, EVEN IF
|
TORT (INCLUDING NEGLIGENCE), STRICT LIABILITY OR OTHERWISE, EVEN IF
|
||||||
NVIDIA HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
NVIDIA HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
\****************************************************************************/
|
\****************************************************************************/
|
||||||
//
|
|
||||||
// symbols.c
|
|
||||||
//
|
|
||||||
|
|
||||||
#include <cassert>
|
|
||||||
#include <cstdlib>
|
|
||||||
#include <cstring>
|
|
||||||
|
|
||||||
#include "PpContext.h"
|
|
||||||
|
|
||||||
///////////////////////////////////////////////////////////////////////////////////////////////
|
|
||||||
/////////////////////////////////// Symbol Table Variables: ///////////////////////////////////
|
|
||||||
///////////////////////////////////////////////////////////////////////////////////////////////
|
|
||||||
|
|
||||||
namespace glslang {
|
|
||||||
|
|
||||||
} // end namespace glslang
|
|
||||||
|
|||||||
@ -172,7 +172,6 @@ void TPpContext::RewindTokenStream(TokenStream& pTok)
|
|||||||
*/
|
*/
|
||||||
int TPpContext::ReadToken(TokenStream& pTok, TPpToken *ppToken)
|
int TPpContext::ReadToken(TokenStream& pTok, TPpToken *ppToken)
|
||||||
{
|
{
|
||||||
char* tokenText = ppToken->name;
|
|
||||||
int ltoken, len;
|
int ltoken, len;
|
||||||
int ch;
|
int ch;
|
||||||
|
|
||||||
@ -207,7 +206,7 @@ int TPpContext::ReadToken(TokenStream& pTok, TPpToken *ppToken)
|
|||||||
ch = lReadByte(pTok);
|
ch = lReadByte(pTok);
|
||||||
while (ch != 0 && ch != EndOfInput) {
|
while (ch != 0 && ch != EndOfInput) {
|
||||||
if (len < MaxTokenLength) {
|
if (len < MaxTokenLength) {
|
||||||
tokenText[len] = (char)ch;
|
ppToken->name[len] = (char)ch;
|
||||||
len++;
|
len++;
|
||||||
ch = lReadByte(pTok);
|
ch = lReadByte(pTok);
|
||||||
} else {
|
} else {
|
||||||
@ -215,11 +214,11 @@ int TPpContext::ReadToken(TokenStream& pTok, TPpToken *ppToken)
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
tokenText[len] = 0;
|
ppToken->name[len] = 0;
|
||||||
|
|
||||||
switch (ltoken) {
|
switch (ltoken) {
|
||||||
case PpAtomIdentifier:
|
case PpAtomIdentifier:
|
||||||
ppToken->atom = LookUpAddString(tokenText);
|
ppToken->atom = LookUpAddString(ppToken->name);
|
||||||
break;
|
break;
|
||||||
case PpAtomConstString:
|
case PpAtomConstString:
|
||||||
break;
|
break;
|
||||||
@ -231,8 +230,8 @@ int TPpContext::ReadToken(TokenStream& pTok, TPpToken *ppToken)
|
|||||||
ppToken->dval = atof(ppToken->name);
|
ppToken->dval = atof(ppToken->name);
|
||||||
break;
|
break;
|
||||||
case PpAtomConstInt:
|
case PpAtomConstInt:
|
||||||
if (len > 0 && tokenText[0] == '0') {
|
if (len > 0 && ppToken->name[0] == '0') {
|
||||||
if (len > 1 && (tokenText[1] == 'x' || tokenText[1] == 'X'))
|
if (len > 1 && (ppToken->name[1] == 'x' || ppToken->name[1] == 'X'))
|
||||||
ppToken->ival = (int)strtol(ppToken->name, 0, 16);
|
ppToken->ival = (int)strtol(ppToken->name, 0, 16);
|
||||||
else
|
else
|
||||||
ppToken->ival = (int)strtol(ppToken->name, 0, 8);
|
ppToken->ival = (int)strtol(ppToken->name, 0, 8);
|
||||||
@ -240,8 +239,8 @@ int TPpContext::ReadToken(TokenStream& pTok, TPpToken *ppToken)
|
|||||||
ppToken->ival = atoi(ppToken->name);
|
ppToken->ival = atoi(ppToken->name);
|
||||||
break;
|
break;
|
||||||
case PpAtomConstUint:
|
case PpAtomConstUint:
|
||||||
if (len > 0 && tokenText[0] == '0') {
|
if (len > 0 && ppToken->name[0] == '0') {
|
||||||
if (len > 1 && (tokenText[1] == 'x' || tokenText[1] == 'X'))
|
if (len > 1 && (ppToken->name[1] == 'x' || ppToken->name[1] == 'X'))
|
||||||
ppToken->ival = (int)strtoul(ppToken->name, 0, 16);
|
ppToken->ival = (int)strtoul(ppToken->name, 0, 16);
|
||||||
else
|
else
|
||||||
ppToken->ival = (int)strtoul(ppToken->name, 0, 8);
|
ppToken->ival = (int)strtoul(ppToken->name, 0, 8);
|
||||||
@ -249,8 +248,8 @@ int TPpContext::ReadToken(TokenStream& pTok, TPpToken *ppToken)
|
|||||||
ppToken->ival = (int)strtoul(ppToken->name, 0, 10);
|
ppToken->ival = (int)strtoul(ppToken->name, 0, 10);
|
||||||
break;
|
break;
|
||||||
case PpAtomConstInt64:
|
case PpAtomConstInt64:
|
||||||
if (len > 0 && tokenText[0] == '0') {
|
if (len > 0 && ppToken->name[0] == '0') {
|
||||||
if (len > 1 && (tokenText[1] == 'x' || tokenText[1] == 'X'))
|
if (len > 1 && (ppToken->name[1] == 'x' || ppToken->name[1] == 'X'))
|
||||||
ppToken->i64val = strtoll(ppToken->name, nullptr, 16);
|
ppToken->i64val = strtoll(ppToken->name, nullptr, 16);
|
||||||
else
|
else
|
||||||
ppToken->i64val = strtoll(ppToken->name, nullptr, 8);
|
ppToken->i64val = strtoll(ppToken->name, nullptr, 8);
|
||||||
@ -258,8 +257,8 @@ int TPpContext::ReadToken(TokenStream& pTok, TPpToken *ppToken)
|
|||||||
ppToken->i64val = atoll(ppToken->name);
|
ppToken->i64val = atoll(ppToken->name);
|
||||||
break;
|
break;
|
||||||
case PpAtomConstUint64:
|
case PpAtomConstUint64:
|
||||||
if (len > 0 && tokenText[0] == '0') {
|
if (len > 0 && ppToken->name[0] == '0') {
|
||||||
if (len > 1 && (tokenText[1] == 'x' || tokenText[1] == 'X'))
|
if (len > 1 && (ppToken->name[1] == 'x' || ppToken->name[1] == 'X'))
|
||||||
ppToken->i64val = (long long)strtoull(ppToken->name, nullptr, 16);
|
ppToken->i64val = (long long)strtoull(ppToken->name, nullptr, 16);
|
||||||
else
|
else
|
||||||
ppToken->i64val = (long long)strtoull(ppToken->name, nullptr, 8);
|
ppToken->i64val = (long long)strtoull(ppToken->name, nullptr, 8);
|
||||||
|
|||||||
@ -394,7 +394,7 @@ EHlslTokenClass HlslScanContext::tokenizeClass(HlslToken& token)
|
|||||||
do {
|
do {
|
||||||
parserToken = &token;
|
parserToken = &token;
|
||||||
TPpToken ppToken;
|
TPpToken ppToken;
|
||||||
tokenText = ppContext.tokenize(&ppToken);
|
tokenText = ppContext.tokenize(ppToken);
|
||||||
if (tokenText == nullptr)
|
if (tokenText == nullptr)
|
||||||
return EHTokNone;
|
return EHTokNone;
|
||||||
|
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user