Reflection: Flesh out expansions of complex aggregates not fully dereferenced, visit all array elements for a variable index, and correct max array index when a later occurrence is larger than an earlier one.

git-svn-id: https://cvs.khronos.org/svn/repos/ogl/trunk/ecosystem/public/sdk/tools/glslang@24080 e7fa87d3-cd2b-0410-9028-fcbf551c1848
This commit is contained in:
John Kessenich 2013-11-15 20:41:31 +00:00
parent c17dbd516f
commit 43e43ce77a
3 changed files with 200 additions and 87 deletions

View File

@ -19,7 +19,7 @@ scalarAfterm23: offset 48, type 1404, size 1, index 0
c_m23: offset 16, type 8b67, size 1, index 2
c_scalarAfterm23: offset 64, type 1404, size 1, index 2
scalarBeforeArray: offset 96, type 1404, size 1, index 0
floatArray: offset 112, type 1406, size 3, index 0
floatArray: offset 112, type 1406, size 5, index 0
scalarAfterArray: offset 192, type 1404, size 1, index 0
ablock.memvec2: offset 48, type 8b50, size 1, index 1
ablock.memf1: offset 56, type 1406, size 1, index 1
@ -33,6 +33,49 @@ nest.foo.n1.a: offset 0, type 1406, size 1, index 3
nest.foo.n2.b: offset 16, type 1406, size 1, index 3
nest.foo.n2.c: offset 20, type 1406, size 1, index 3
nest.foo.n2.d: offset 24, type 1406, size 1, index 3
deepA[0].d2.d1[2].va: offset -1, type 8b50, size 2, index -1
deepA[1].d2.d1[2].va: offset -1, type 8b50, size 2, index -1
deepB[1].d2.d1[0].va: offset -1, type 8b50, size 2, index -1
deepB[1].d2.d1[1].va: offset -1, type 8b50, size 2, index -1
deepB[1].d2.d1[2].va: offset -1, type 8b50, size 2, index -1
deepB[1].d2.d1[3].va: offset -1, type 8b50, size 2, index -1
deepB[0].d2.d1[0].va: offset -1, type 8b50, size 2, index -1
deepB[0].d2.d1[1].va: offset -1, type 8b50, size 2, index -1
deepB[0].d2.d1[2].va: offset -1, type 8b50, size 2, index -1
deepB[0].d2.d1[3].va: offset -1, type 8b50, size 2, index -1
deepC[1].iv4: offset -1, type 8b52, size 1, index -1
deepC[1].d2.i: offset -1, type 1404, size 1, index -1
deepC[1].d2.d1[0].va: offset -1, type 8b50, size 3, index -1
deepC[1].d2.d1[0].b: offset -1, type 8b56, size 1, index -1
deepC[1].d2.d1[1].va: offset -1, type 8b50, size 3, index -1
deepC[1].d2.d1[1].b: offset -1, type 8b56, size 1, index -1
deepC[1].d2.d1[2].va: offset -1, type 8b50, size 3, index -1
deepC[1].d2.d1[2].b: offset -1, type 8b56, size 1, index -1
deepC[1].d2.d1[3].va: offset -1, type 8b50, size 3, index -1
deepC[1].d2.d1[3].b: offset -1, type 8b56, size 1, index -1
deepC[1].v3: offset -1, type 8b54, size 1, index -1
deepD[0].iv4: offset -1, type 8b52, size 1, index -1
deepD[0].d2.i: offset -1, type 1404, size 1, index -1
deepD[0].d2.d1[0].va: offset -1, type 8b50, size 3, index -1
deepD[0].d2.d1[0].b: offset -1, type 8b56, size 1, index -1
deepD[0].d2.d1[1].va: offset -1, type 8b50, size 3, index -1
deepD[0].d2.d1[1].b: offset -1, type 8b56, size 1, index -1
deepD[0].d2.d1[2].va: offset -1, type 8b50, size 3, index -1
deepD[0].d2.d1[2].b: offset -1, type 8b56, size 1, index -1
deepD[0].d2.d1[3].va: offset -1, type 8b50, size 3, index -1
deepD[0].d2.d1[3].b: offset -1, type 8b56, size 1, index -1
deepD[0].v3: offset -1, type 8b54, size 1, index -1
deepD[1].iv4: offset -1, type 8b52, size 1, index -1
deepD[1].d2.i: offset -1, type 1404, size 1, index -1
deepD[1].d2.d1[0].va: offset -1, type 8b50, size 3, index -1
deepD[1].d2.d1[0].b: offset -1, type 8b56, size 1, index -1
deepD[1].d2.d1[1].va: offset -1, type 8b50, size 3, index -1
deepD[1].d2.d1[1].b: offset -1, type 8b56, size 1, index -1
deepD[1].d2.d1[2].va: offset -1, type 8b50, size 3, index -1
deepD[1].d2.d1[2].b: offset -1, type 8b56, size 1, index -1
deepD[1].d2.d1[3].va: offset -1, type 8b50, size 3, index -1
deepD[1].d2.d1[3].b: offset -1, type 8b56, size 1, index -1
deepD[1].v3: offset -1, type 8b54, size 1, index -1
anonMember1: offset 0, type 8b51, size 1, index 0
uf1: offset -1, type 1406, size 1, index -1
uf2: offset -1, type 1406, size 1, index -1
@ -40,7 +83,7 @@ ablock.member3: offset 32, type 8b52, size 1, index 1
Uniform block reflection:
nameless: offset -1, type ffffffff, size 496, index -1
ablock: offset -1, type ffffffff, size 304, index -1
named: offset -1, type ffffffff, size 304, index -1
c_nameless: offset -1, type ffffffff, size 112, index -1
nest: offset -1, type ffffffff, size 28, index -1
nested: offset -1, type ffffffff, size 28, index -1

View File

@ -78,6 +78,24 @@ uniform sampler2DMSArray sampler_2DMSArray;
uniform mat2 dm22[10];
struct deep1 {
vec2 va[3];
bool b;
};
struct deep2 {
int i;
deep1 d1[4];
};
struct deep3 {
vec4 iv4;
deep2 d2;
ivec3 v3;
};
uniform deep3 deepA[2], deepB[2], deepC[3], deepD[2];
const bool control = true;
void deadFunction()
@ -120,6 +138,7 @@ void main()
f = c_m23[1].y + c_scalarAfterm23;
f += scalarBeforeArray;
f += floatArray[2];
f += floatArray[4];
f += scalarAfterArray;
f += ablock.memvec2.x;
f += ablock.memf1;
@ -130,6 +149,11 @@ void main()
f += dm22[3][0][1];
f += m22[2][1].y;
f += nest.foo.n1.a + nest.foo.n2.b + nest.foo.n2.c + nest.foo.n2.d;
f += deepA[i].d2.d1[2].va[1].x;
f += deepB[1].d2.d1[i].va[1].x;
f += deepB[i].d2.d1[i].va[1].x;
deep3 d = deepC[1];
deep3 da[2] = deepD;
} else
f = ufDead3;
}

View File

@ -89,14 +89,17 @@ public:
}
}
// Add a simple uniform variable reference to the uniform database, no dereference involved.
void addUniform(const TIntermSymbol& symbol)
// Add a simple reference to a uniform variable to the uniform database, no dereference involved.
// However, no dereference doesn't mean simple... it could be a complex aggregate.
void addUniform(const TIntermSymbol& base)
{
if (reflection.nameToIndex.find(symbol.getName()) == reflection.nameToIndex.end()) {
if (isReflectionGranularity(symbol.getType())) {
reflection.nameToIndex[symbol.getName()] = reflection.indexToUniform.size();
reflection.indexToUniform.push_back(TObjectReflection(symbol.getName(), -1, mapToGlType(symbol.getType()), mapToGlArraySize(symbol.getType()), -1));
}
if (processedDerefs.find(&base) == processedDerefs.end()) {
processedDerefs.insert(&base);
// Use a degenerate (empty) set of dereferences to immediately put as at the end of
// the dereference change expected by blowUpActiveAggregate.
TList<TIntermBinary*> derefs;
blowUpActiveAggregate(base.getType(), base.getName(), derefs, derefs.end(), -1, -1, 0);
}
}
@ -235,6 +238,97 @@ public:
return size;
}
// Traverse the provided deref chain, including the base, and
// - build a full reflection-granularity name, array size, etc. entry out of it, if it goes down to that granularity
// - recursively expand any variable array index in the middle of that traversal
// - recursively expand what's left at the end if the deref chain did not reach down to reflection granularity
//
// arraySize tracks, just for the final dereference in the chain, if there was a specific known size.
// A value of 0 for arraySize will mean to use the full array's size.
void blowUpActiveAggregate(const TType& baseType, const TString& baseName, const TList<TIntermBinary*>& derefs,
TList<TIntermBinary*>::const_iterator deref, int offset, int blockIndex, int arraySize)
{
TString name = baseName;
const TType* terminalType = &baseType;
for (; deref != derefs.end(); ++deref) {
TIntermBinary* visitNode = *deref;
terminalType = &visitNode->getType();
int index;
switch (visitNode->getOp()) {
case EOpIndexIndirect:
// Visit all the indices of this array, and for each one, then add on the remaining dereferencing
for (int i = 0; i < visitNode->getLeft()->getType().getArraySize(); ++i) {
TString newBaseName = name;
newBaseName.append(TString("[") + String(i) + "]");
TList<TIntermBinary*>::const_iterator nextDeref = deref;
++nextDeref;
TType derefType(*terminalType, 0);
blowUpActiveAggregate(derefType, newBaseName, derefs, nextDeref, offset, blockIndex, arraySize);
}
// it was all completed in the recursive calls above
return;
case EOpIndexDirect:
index = visitNode->getRight()->getAsConstantUnion()->getConstArray()[0].getIConst();
name.append(TString("[") + String(index) + "]");
break;
case EOpIndexDirectStruct:
index = visitNode->getRight()->getAsConstantUnion()->getConstArray()[0].getIConst();
if (offset >= 0)
offset += getBlockMemberOffset(visitNode->getLeft()->getType(), index);
if (name.size() > 0)
name.append(".");
name.append((*visitNode->getLeft()->getType().getStruct())[index].type->getFieldName());
break;
default:
break;
}
}
// if the terminalType is still too coarse a granularity, this is still an aggregate to expand, expand it...
if (! isReflectionGranularity(*terminalType)) {
if (terminalType->isArray()) {
// Visit all the indices of this array, and for each one,
// fully explode the remaining aggregate to dereference
for (int i = 0; i < terminalType->getArraySize(); ++i) {
TString newBaseName = name;
newBaseName.append(TString("[") + String(i) + "]");
TType derefType(*terminalType, 0);
blowUpActiveAggregate(derefType, newBaseName, derefs, derefs.end(), offset, blockIndex, 0);
}
} else {
// Visit all members of this aggregate, and for each one,
// fully explode the remaining aggregate to dereference
const TTypeList& typeList = *terminalType->getStruct();
for (size_t i = 0; i < typeList.size(); ++i) {
TString newBaseName = name;
newBaseName.append(TString(".") + typeList[i].type->getFieldName());
TType derefType(*terminalType, i);
blowUpActiveAggregate(derefType, newBaseName, derefs, derefs.end(), offset, blockIndex, 0);
}
}
// it was all completed in the recursive calls above
return;
}
// Finally, add a full string to the reflection database, and update the array size if necessary.
// If the derefenced entity to record is an array, compute the size and update the maximum size.
// there might not be a final array dereference, it could have been copied as an array object
if (arraySize == 0)
arraySize = mapToGlArraySize(*terminalType);
TReflection::TNameToIndex::const_iterator it = reflection.nameToIndex.find(name);
if (it == reflection.nameToIndex.end()) {
reflection.nameToIndex[name] = reflection.indexToUniform.size();
reflection.indexToUniform.push_back(TObjectReflection(name, offset, mapToGlType(*terminalType), arraySize, blockIndex));
} else if (arraySize > 1) {
int& reflectedArraySize = reflection.indexToUniform[it->second].size;
reflectedArraySize = std::max(arraySize, reflectedArraySize);
}
}
// Add a uniform dereference where blocks/struct/arrays are involved in the access.
// Handles the situation where the left node is at the correct or too coarse a
// granularity for reflection. (That is, further dereferences up the tree will be
@ -279,7 +373,7 @@ public:
if (block) {
// TODO: how is an array of blocks handled differently?
anonymous = base->getName().compare(0, 6, "__anon") == 0;
const TString& blockName = anonymous ? base->getType().getTypeName() : base->getName();
const TString& blockName = anonymous ? base->getType().getTypeName() : base->getType().getTypeName();
TReflection::TNameToIndex::const_iterator it = reflection.nameToIndex.find(blockName);
if (it == reflection.nameToIndex.end()) {
blockIndex = reflection.indexToUniformBlock.size();
@ -287,81 +381,33 @@ public:
reflection.indexToUniformBlock.push_back(TObjectReflection(blockName, offset, -1, getBlockSize(base->getType()), -1));
} else
blockIndex = it->second;
}
// If the derefenced entity to record is an array, note the maximum array size.
int maxArraySize;
const TType* reflectionType;
if (isReflectionGranularity(topNode->getLeft()->getType()) && topNode->getLeft()->isArray()) {
reflectionType = &topNode->getLeft()->getType();
switch (topNode->getOp()) {
case EOpIndexIndirect:
maxArraySize = topNode->getLeft()->getType().getArraySize();
break;
case EOpIndexDirect:
maxArraySize = topNode->getRight()->getAsConstantUnion()->getConstArray()[0].getIConst() + 1;
break;
default:
assert(0);
maxArraySize = 1;
break;
}
} else {
reflectionType = &topNode->getType();
maxArraySize = 1;
}
// TODO: fully expand a partially dereferenced aggregate
// Process the dereference chain, backward, accumulating the pieces on a stack.
// If the topNode is a simple array dereference, don't include that.
if (block)
offset = 0;
std::list<TString> derefs;
}
// Process the dereference chain, backward, accumulating the pieces for later forward traversal.
// If the topNode is a reflection-granularity-array dereference, don't include that last dereference.
TList<TIntermBinary*> derefs;
for (TIntermBinary* visitNode = topNode; visitNode; visitNode = visitNode->getLeft()->getAsBinaryNode()) {
if (isReflectionGranularity(visitNode->getLeft()->getType()))
continue;
derefs.push_front(visitNode);
processedDerefs.insert(visitNode);
int index;
switch (visitNode->getOp()) {
case EOpIndexIndirect:
// TODO handle indirect references in mid-chain: enumerate all possibilities?
if (! isReflectionGranularity(visitNode->getLeft()->getType()))
derefs.push_back(TString("[") + String(0) + "]");
break;
case EOpIndexDirect:
if (! isReflectionGranularity(visitNode->getLeft()->getType())) {
index = visitNode->getRight()->getAsConstantUnion()->getConstArray()[0].getIConst();
derefs.push_back(TString("[") + String(index) + "]");
}
break;
case EOpIndexDirectStruct:
index = visitNode->getRight()->getAsConstantUnion()->getConstArray()[0].getIConst();
if (block)
offset += getBlockMemberOffset(visitNode->getLeft()->getType(), index);
derefs.push_back(TString(""));
if (visitNode->getLeft()->getAsSymbolNode() != base || ! anonymous)
derefs.back().append(".");
derefs.back().append((*visitNode->getLeft()->getType().getStruct())[index].type->getFieldName().c_str());
break;
default:
break;
}
}
processedDerefs.insert(base);
// See if we have a specific array size to stick to while enumerating the explosion of the aggregate
int arraySize = 0;
if (isReflectionGranularity(topNode->getLeft()->getType()) && topNode->getLeft()->isArray()) {
if (topNode->getOp() == EOpIndexDirect)
arraySize = topNode->getRight()->getAsConstantUnion()->getConstArray()[0].getIConst() + 1;
}
// Put the dereference chain together, forward (reversing the stack)
TString name;
// Put the dereference chain together, forward
TString baseName;
if (! anonymous)
name = base->getName();
while (! derefs.empty()) {
name += derefs.back();
derefs.pop_back();
}
if (name.size() > 0) {
if (reflection.nameToIndex.find(name) == reflection.nameToIndex.end()) {
reflection.nameToIndex[name] = reflection.indexToUniform.size();
reflection.indexToUniform.push_back(TObjectReflection(name, offset, mapToGlType(*reflectionType), maxArraySize, blockIndex));
}
}
baseName = base->getName();
blowUpActiveAggregate(base->getType(), baseName, derefs, derefs.begin(), offset, blockIndex, arraySize);
}
//
@ -390,9 +436,9 @@ public:
// Return 0 if the topology does not fit this situation.
TIntermSymbol* findBase(const TIntermBinary* node)
{
TIntermSymbol *symbol = node->getLeft()->getAsSymbolNode();
if (symbol)
return symbol;
TIntermSymbol *base = node->getLeft()->getAsSymbolNode();
if (base)
return base;
TIntermBinary* left = node->getLeft()->getAsBinaryNode();
if (! left)
return 0;
@ -643,7 +689,7 @@ public:
TFunctionStack functions;
const TIntermediate& intermediate;
TReflection& reflection;
std::set<TIntermNode*> processedDerefs;
std::set<const TIntermNode*> processedDerefs;
};
const int TLiveTraverser::baseAlignmentVec4Std140 = 16;
@ -687,12 +733,12 @@ bool LiveBinary(bool /* preVisit */, TIntermBinary* node, TIntermTraverser* it)
}
// To reflect non-dereferenced objects.
void LiveSymbol(TIntermSymbol* symbol, TIntermTraverser* it)
void LiveSymbol(TIntermSymbol* base, TIntermTraverser* it)
{
TLiveTraverser* oit = static_cast<TLiveTraverser*>(it);
if (symbol->getQualifier().storage == EvqUniform)
oit->addUniform(*symbol);
if (base->getQualifier().storage == EvqUniform)
oit->addUniform(*base);
}
// To prune semantically dead paths.