HLSL: Non-functional: Move partial flattened access into symbol node.

Lays the groundwork for fixing issue #954.

Partial flattenings were previously tracked through a stack of active subsets
in the parse context, but full functionality needs AST nodes to represent
this across time, removing the need for parsecontext tracking.
This commit is contained in:
John Kessenich 2017-06-29 17:43:31 -06:00
parent 02a14e7c99
commit d1be7545c6
4 changed files with 26 additions and 36 deletions

View File

@ -945,7 +945,11 @@ public:
// per process threadPoolAllocator, then it causes increased memory usage per compile // per process threadPoolAllocator, then it causes increased memory usage per compile
// it is essential to use "symbol = sym" to assign to symbol // it is essential to use "symbol = sym" to assign to symbol
TIntermSymbol(int i, const TString& n, const TType& t) TIntermSymbol(int i, const TString& n, const TType& t)
: TIntermTyped(t), id(i), constSubtree(nullptr) : TIntermTyped(t), id(i),
#ifdef ENABLE_HLSL
flattenSubset(-1),
#endif
constSubtree(nullptr)
{ name = n; } { name = n; }
virtual int getId() const { return id; } virtual int getId() const { return id; }
virtual const TString& getName() const { return name; } virtual const TString& getName() const { return name; }
@ -956,9 +960,16 @@ public:
const TConstUnionArray& getConstArray() const { return constArray; } const TConstUnionArray& getConstArray() const { return constArray; }
void setConstSubtree(TIntermTyped* subtree) { constSubtree = subtree; } void setConstSubtree(TIntermTyped* subtree) { constSubtree = subtree; }
TIntermTyped* getConstSubtree() const { return constSubtree; } TIntermTyped* getConstSubtree() const { return constSubtree; }
#ifdef ENABLE_HLSL
void setFlattenSubset(int subset) { flattenSubset = subset; }
int getFlattenSubset() const { return flattenSubset; } // -1 means full object
#endif
protected: protected:
int id; // the unique id of the symbol this node represents int id; // the unique id of the symbol this node represents
#ifdef ENABLE_HLSL
int flattenSubset; // how deeply the flattened object rooted at id has been dereferenced
#endif
TString name; // the name of the symbol this node represents TString name; // the name of the symbol this node represents
TConstUnionArray constArray; // if the symbol is a front-end compile-time constant, this is its value TConstUnionArray constArray; // if the symbol is a front-end compile-time constant, this is its value
TIntermTyped* constSubtree; TIntermTyped* constSubtree;

View File

@ -2870,23 +2870,6 @@ bool HlslGrammar::acceptPostfixExpression(TIntermTyped*& node)
return false; return false;
} }
// This is to guarantee we do this no matter how we get out of the stack frame.
// This way there's no bug if an early return forgets to do it.
struct tFinalize {
tFinalize(HlslParseContext& p) : parseContext(p) { }
~tFinalize() { parseContext.finalizeFlattening(); }
HlslParseContext& parseContext;
private:
const tFinalize& operator=(const tFinalize&) { return *this; }
tFinalize(const tFinalize& f) : parseContext(f.parseContext) { }
} finalize(parseContext);
// Initialize the flattening accumulation data, so we can track data across multiple bracket or
// dot operators. This can also be nested, e.g, for [], so we have to track each nesting
// level: hence the init and finalize. Even though in practice these must be
// constants, they are parsed no matter what.
parseContext.initFlattening();
// Something was found, chain as many postfix operations as exist. // Something was found, chain as many postfix operations as exist.
do { do {
TSourceLoc loc = token.loc; TSourceLoc loc = token.loc;

View File

@ -1348,11 +1348,11 @@ TIntermTyped* HlslParseContext::flattenAccess(TIntermTyped* base, int member)
const TType dereferencedType(base->getType(), member); // dereferenced type const TType dereferencedType(base->getType(), member); // dereferenced type
const TIntermSymbol& symbolNode = *base->getAsSymbolNode(); const TIntermSymbol& symbolNode = *base->getAsSymbolNode();
TIntermTyped* flattened = flattenAccess(symbolNode.getId(), member, dereferencedType); TIntermTyped* flattened = flattenAccess(symbolNode.getId(), member, dereferencedType, symbolNode.getFlattenSubset());
return flattened ? flattened : base; return flattened ? flattened : base;
} }
TIntermTyped* HlslParseContext::flattenAccess(int uniqueId, int member, const TType& dereferencedType) TIntermTyped* HlslParseContext::flattenAccess(int uniqueId, int member, const TType& dereferencedType, int subset)
{ {
const auto flattenData = flattenMap.find(uniqueId); const auto flattenData = flattenMap.find(uniqueId);
@ -1360,18 +1360,24 @@ TIntermTyped* HlslParseContext::flattenAccess(int uniqueId, int member, const TT
return nullptr; return nullptr;
// Calculate new cumulative offset from the packed tree // Calculate new cumulative offset from the packed tree
flattenOffset.back() = flattenData->second.offsets[flattenOffset.back() + member]; int newSubset = flattenData->second.offsets[subset >= 0 ? subset + member : member];
TIntermSymbol* subsetSymbol;
if (isFinalFlattening(dereferencedType)) { if (isFinalFlattening(dereferencedType)) {
// Finished flattening: create symbol for variable // Finished flattening: create symbol for variable
member = flattenData->second.offsets[flattenOffset.back()]; member = flattenData->second.offsets[newSubset];
const TVariable* memberVariable = flattenData->second.members[member]; const TVariable* memberVariable = flattenData->second.members[member];
return intermediate.addSymbol(*memberVariable); subsetSymbol = intermediate.addSymbol(*memberVariable);
subsetSymbol->setFlattenSubset(-1);
} else { } else {
// If this is not the final flattening, accumulate the position and return // If this is not the final flattening, accumulate the position and return
// an object of the partially dereferenced type. // an object of the partially dereferenced type.
return new TIntermSymbol(uniqueId, "flattenShadow", dereferencedType); subsetSymbol = new TIntermSymbol(uniqueId, "flattenShadow", dereferencedType);
subsetSymbol->setFlattenSubset(newSubset);
} }
return subsetSymbol;
} }
// Find and return the split IO TVariable for id, or nullptr if none. // Find and return the split IO TVariable for id, or nullptr if none.
@ -1753,11 +1759,9 @@ TIntermAggregate* HlslParseContext::handleFunctionDefinition(const TSourceLoc& l
flatten(loc, *variable); flatten(loc, *variable);
const TTypeList* structure = variable->getType().getStruct(); const TTypeList* structure = variable->getType().getStruct();
for (int mem = 0; mem < (int)structure->size(); ++mem) { for (int mem = 0; mem < (int)structure->size(); ++mem) {
initFlattening();
paramNodes = intermediate.growAggregate(paramNodes, paramNodes = intermediate.growAggregate(paramNodes,
flattenAccess(variable->getUniqueId(), mem, *(*structure)[mem].type), flattenAccess(variable->getUniqueId(), mem, *(*structure)[mem].type),
loc); loc);
finalizeFlattening();
} }
} else { } else {
// Add the parameter to the AST // Add the parameter to the AST
@ -4908,11 +4912,8 @@ void HlslParseContext::expandArguments(const TSourceLoc& loc, const TFunction& f
if (wasFlattened(arg) && shouldFlatten(*function[param].type)) { if (wasFlattened(arg) && shouldFlatten(*function[param].type)) {
// Need to pass the structure members instead of the structure. // Need to pass the structure members instead of the structure.
TVector<TIntermTyped*> memberArgs; TVector<TIntermTyped*> memberArgs;
for (int memb = 0; memb < (int)arg->getType().getStruct()->size(); ++memb) { for (int memb = 0; memb < (int)arg->getType().getStruct()->size(); ++memb)
initFlattening();
memberArgs.push_back(flattenAccess(arg, memb)); memberArgs.push_back(flattenAccess(arg, memb));
finalizeFlattening();
}
setArgList(param + functionParamNumberOffset, memberArgs); setArgList(param + functionParamNumberOffset, memberArgs);
} }
} }

View File

@ -204,10 +204,6 @@ public:
// Potentially rename shader entry point function // Potentially rename shader entry point function
void renameShaderFunction(const TString*& name) const; void renameShaderFunction(const TString*& name) const;
// Reset data for incrementally built referencing of flattened composite structures
void initFlattening() { flattenLevel.push_back(0); flattenOffset.push_back(0); }
void finalizeFlattening() { flattenLevel.pop_back(); flattenOffset.pop_back(); }
// Share struct buffer deep types // Share struct buffer deep types
void shareStructBufferType(TType&); void shareStructBufferType(TType&);
@ -242,7 +238,7 @@ protected:
// Array and struct flattening // Array and struct flattening
TIntermTyped* flattenAccess(TIntermTyped* base, int member); TIntermTyped* flattenAccess(TIntermTyped* base, int member);
TIntermTyped* flattenAccess(int uniqueId, int member, const TType&); TIntermTyped* flattenAccess(int uniqueId, int member, const TType&, int subset = -1);
bool shouldFlatten(const TType&) const; bool shouldFlatten(const TType&) const;
bool wasFlattened(const TIntermTyped* node) const; bool wasFlattened(const TIntermTyped* node) const;
bool wasFlattened(int id) const { return flattenMap.find(id) != flattenMap.end(); } bool wasFlattened(int id) const { return flattenMap.find(id) != flattenMap.end(); }
@ -368,7 +364,6 @@ protected:
TMap<int, TFlattenData> flattenMap; TMap<int, TFlattenData> flattenMap;
TVector<int> flattenLevel; // nested postfix operator level for flattening TVector<int> flattenLevel; // nested postfix operator level for flattening
TVector<int> flattenOffset; // cumulative offset for flattening
// IO-type map. Maps a pure symbol-table form of a structure-member list into // IO-type map. Maps a pure symbol-table form of a structure-member list into
// each of the (up to) three kinds of IO, as each as different allowed decorations, // each of the (up to) three kinds of IO, as each as different allowed decorations,