Web: Remove/rationalize a set of *_EXTENSIONS, using GLSLANG_WEB.

Focus was on the front end (not SPIR-V), minus the grammar.
Reduces #ifdef count by around 320 and makes the web build 270K smaller,
which is about 90% the  target size.

The grammar and scanner will be another step, as will the SPIR-V backend.
This makes heavy use of methods #ifdef'd to return false as a global way
of turning off code, relying on C++ DCE to do the rest.
This commit is contained in:
John Kessenich 2019-08-01 03:28:08 -06:00
parent e66dace97e
commit 7015bd658e
32 changed files with 2661 additions and 2712 deletions

View File

@ -441,7 +441,7 @@ spv::Decoration TGlslangToSpvTraverser::TranslateInterpolationDecoration(const g
if (qualifier.smooth)
// Smooth decoration doesn't exist in SPIR-V 1.0
return spv::DecorationMax;
else if (qualifier.nopersp)
else if (qualifier.isNonPerspective())
return spv::DecorationNoPerspective;
else if (qualifier.flat)
return spv::DecorationFlat;
@ -984,7 +984,7 @@ spv::ImageFormat TGlslangToSpvTraverser::TranslateImageFormat(const glslang::TTy
assert(type.getBasicType() == glslang::EbtSampler);
// Check for capabilities
switch (type.getQualifier().layoutFormat) {
switch (type.getQualifier().getFormat()) {
case glslang::ElfRg32f:
case glslang::ElfRg16f:
case glslang::ElfR11fG11fB10f:
@ -1021,7 +1021,7 @@ spv::ImageFormat TGlslangToSpvTraverser::TranslateImageFormat(const glslang::TTy
}
// do the translation
switch (type.getQualifier().layoutFormat) {
switch (type.getQualifier().getFormat()) {
case glslang::ElfNone: return spv::ImageFormatUnknown;
case glslang::ElfRgba32f: return spv::ImageFormatRgba32f;
case glslang::ElfRgba16f: return spv::ImageFormatRgba16f;
@ -1155,7 +1155,7 @@ spv::StorageClass TGlslangToSpvTraverser::TranslateStorageClass(const glslang::T
}
if (type.getQualifier().isUniformOrBuffer()) {
if (type.getQualifier().layoutPushConstant)
if (type.getQualifier().isPushConstant())
return spv::StorageClassPushConstant;
if (type.getBasicType() == glslang::EbtBlock)
return spv::StorageClassUniform;
@ -1230,10 +1230,8 @@ bool IsDescriptorResource(const glslang::TType& type)
// uniform and buffer blocks are included, unless it is a push_constant
if (type.getBasicType() == glslang::EbtBlock)
return type.getQualifier().isUniformOrBuffer() &&
#ifdef NV_EXTENSIONS
! type.getQualifier().layoutShaderRecordNV &&
#endif
! type.getQualifier().layoutPushConstant;
! type.getQualifier().isShaderRecordNV() &&
! type.getQualifier().isPushConstant();
// non block...
// basically samplerXXX/subpass/sampler/texture are all included
@ -1253,9 +1251,9 @@ void InheritQualifiers(glslang::TQualifier& child, const glslang::TQualifier& pa
if (parent.invariant)
child.invariant = true;
#ifndef GLSLANG_WEB
if (parent.nopersp)
child.nopersp = true;
#ifdef AMD_EXTENSIONS
if (parent.explicitInterp)
child.explicitInterp = true;
#endif
@ -1802,7 +1800,7 @@ bool TGlslangToSpvTraverser::visitBinary(glslang::TVisit /* visit */, glslang::T
// Load through a block reference is performed with a dot operator that
// is mapped to EOpIndexDirectStruct. When we get to the actual reference,
// do a load and reset the access chain.
if (node->getLeft()->getBasicType() == glslang::EbtReference &&
if (node->getLeft()->isReference() &&
!node->getLeft()->getType().isArray() &&
node->getOp() == glslang::EOpIndexDirectStruct)
{
@ -3560,7 +3558,7 @@ spv::Id TGlslangToSpvTraverser::convertGlslangStructToSpvType(const glslang::TTy
// Make forward pointers for any pointer members, and create a list of members to
// convert to spirv types after creating the struct.
if (glslangMember.getBasicType() == glslang::EbtReference) {
if (glslangMember.isReference()) {
if (forwardPointers.find(glslangMember.getReferentType()) == forwardPointers.end()) {
deferredForwardPointers.push_back(std::make_pair(&glslangMember, memberQualifier));
}
@ -4093,7 +4091,7 @@ void TGlslangToSpvTraverser::makeFunctions(const glslang::TIntermSequence& glslF
if (paramPrecision != spv::NoPrecision)
decorations.push_back(paramPrecision);
TranslateMemoryDecoration(type.getQualifier(), decorations, useVulkanMemoryModel);
if (type.getBasicType() == glslang::EbtReference) {
if (type.isReference()) {
// Original and non-writable params pass the pointer directly and
// use restrict/aliased, others are stored to a pointer in Function
// memory and use RestrictPointer/AliasedPointer.
@ -7741,6 +7739,7 @@ spv::Id TGlslangToSpvTraverser::getSymbolId(const glslang::TIntermSymbol* symbol
}
if (symbol->getQualifier().hasAttachment())
builder.addDecoration(id, spv::DecorationInputAttachmentIndex, symbol->getQualifier().layoutAttachment);
#ifndef GLSLANG_WEB
if (glslangIntermediate->getXfbMode()) {
builder.addCapability(spv::CapabilityTransformFeedback);
if (symbol->getQualifier().hasXfbBuffer()) {
@ -7752,6 +7751,7 @@ spv::Id TGlslangToSpvTraverser::getSymbolId(const glslang::TIntermSymbol* symbol
if (symbol->getQualifier().hasXfbOffset())
builder.addDecoration(id, spv::DecorationOffset, symbol->getQualifier().layoutXfbOffset);
}
#endif
if (symbol->getType().isImage()) {
std::vector<spv::Decoration> memory;
@ -7815,7 +7815,7 @@ spv::Id TGlslangToSpvTraverser::getSymbolId(const glslang::TIntermSymbol* symbol
symbol->getType().getQualifier().semanticName);
}
if (symbol->getBasicType() == glslang::EbtReference) {
if (symbol->isReference()) {
builder.addDecoration(id, symbol->getType().getQualifier().restrict ? spv::DecorationRestrictPointerEXT : spv::DecorationAliasedPointerEXT);
}

View File

@ -50,12 +50,8 @@ namespace spv {
// Include C-based headers that don't have a namespace
#include "GLSL.ext.KHR.h"
#include "GLSL.ext.EXT.h"
#ifdef AMD_EXTENSIONS
#include "GLSL.ext.AMD.h"
#endif
#ifdef NV_EXTENSIONS
#include "GLSL.ext.NV.h"
#endif
}
}
@ -98,22 +94,17 @@ const char* ExecutionModelString(int model)
case 4: return "Fragment";
case 5: return "GLCompute";
case 6: return "Kernel";
#ifdef NV_EXTENSIONS
case ExecutionModelTaskNV: return "TaskNV";
case ExecutionModelMeshNV: return "MeshNV";
#endif
default: return "Bad";
#ifdef NV_EXTENSIONS
case ExecutionModelRayGenerationNV: return "RayGenerationNV";
case ExecutionModelIntersectionNV: return "IntersectionNV";
case ExecutionModelAnyHitNV: return "AnyHitNV";
case ExecutionModelClosestHitNV: return "ClosestHitNV";
case ExecutionModelMissNV: return "MissNV";
case ExecutionModelCallableNV: return "CallableNV";
#endif
}
}
@ -183,13 +174,11 @@ const char* ExecutionModeString(int mode)
case 4446: return "PostDepthCoverage";
#ifdef NV_EXTENSIONS
case ExecutionModeOutputLinesNV: return "OutputLinesNV";
case ExecutionModeOutputPrimitivesNV: return "OutputPrimitivesNV";
case ExecutionModeOutputTrianglesNV: return "OutputTrianglesNV";
case ExecutionModeDerivativeGroupQuadsNV: return "DerivativeGroupQuadsNV";
case ExecutionModeDerivativeGroupLinearNV: return "DerivativeGroupLinearNV";
#endif
case ExecutionModePixelInterlockOrderedEXT: return "PixelInterlockOrderedEXT";
case ExecutionModePixelInterlockUnorderedEXT: return "PixelInterlockUnorderedEXT";
@ -220,14 +209,12 @@ const char* StorageClassString(int StorageClass)
case 11: return "Image";
case 12: return "StorageBuffer";
#ifdef NV_EXTENSIONS
case StorageClassRayPayloadNV: return "RayPayloadNV";
case StorageClassHitAttributeNV: return "HitAttributeNV";
case StorageClassIncomingRayPayloadNV: return "IncomingRayPayloadNV";
case StorageClassShaderRecordBufferNV: return "ShaderRecordBufferNV";
case StorageClassCallableDataNV: return "CallableDataNV";
case StorageClassIncomingCallableDataNV: return "IncomingCallableDataNV";
#endif
case StorageClassPhysicalStorageBufferEXT: return "PhysicalStorageBufferEXT";
@ -289,10 +276,7 @@ const char* DecorationString(int decoration)
case DecorationCeiling:
default: return "Bad";
#ifdef AMD_EXTENSIONS
case DecorationExplicitInterpAMD: return "ExplicitInterpAMD";
#endif
#ifdef NV_EXTENSIONS
case DecorationOverrideCoverageNV: return "OverrideCoverageNV";
case DecorationPassthroughNV: return "PassthroughNV";
case DecorationViewportRelativeNV: return "ViewportRelativeNV";
@ -301,7 +285,6 @@ const char* DecorationString(int decoration)
case DecorationPerViewNV: return "PerViewNV";
case DecorationPerTaskNV: return "PerTaskNV";
case DecorationPerVertexNV: return "PerVertexNV";
#endif
case DecorationNonUniformEXT: return "DecorationNonUniformEXT";
case DecorationHlslCounterBufferGOOGLE: return "DecorationHlslCounterBufferGOOGLE";
@ -371,7 +354,6 @@ const char* BuiltInString(int builtIn)
case 4426: return "DrawIndex";
case 5014: return "FragStencilRefEXT";
#ifdef AMD_EXTENSIONS
case 4992: return "BaryCoordNoPerspAMD";
case 4993: return "BaryCoordNoPerspCentroidAMD";
case 4994: return "BaryCoordNoPerspSampleAMD";
@ -379,9 +361,6 @@ const char* BuiltInString(int builtIn)
case 4996: return "BaryCoordSmoothCentroidAMD";
case 4997: return "BaryCoordSmoothSampleAMD";
case 4998: return "BaryCoordPullModelAMD";
#endif
#ifdef NV_EXTENSIONS
case BuiltInLaunchIdNV: return "LaunchIdNV";
case BuiltInLaunchSizeNV: return "LaunchSizeNV";
case BuiltInWorldRayOriginNV: return "WorldRayOriginNV";
@ -405,14 +384,12 @@ const char* BuiltInString(int builtIn)
// case BuiltInInvocationsPerPixelNV: return "InvocationsPerPixelNV"; // superseded by BuiltInFragInvocationCountEXT
case BuiltInBaryCoordNV: return "BaryCoordNV";
case BuiltInBaryCoordNoPerspNV: return "BaryCoordNoPerspNV";
#endif
case BuiltInFragSizeEXT: return "FragSizeEXT";
case BuiltInFragInvocationCountEXT: return "FragInvocationCountEXT";
case 5264: return "FullyCoveredEXT";
#ifdef NV_EXTENSIONS
case BuiltInTaskCountNV: return "TaskCountNV";
case BuiltInPrimitiveCountNV: return "PrimitiveCountNV";
case BuiltInPrimitiveIndicesNV: return "PrimitiveIndicesNV";
@ -421,7 +398,6 @@ const char* BuiltInString(int builtIn)
case BuiltInLayerPerViewNV: return "LayerPerViewNV";
case BuiltInMeshViewCountNV: return "MeshViewCountNV";
case BuiltInMeshViewIndicesNV: return "MeshViewIndicesNV";
#endif
case BuiltInWarpsPerSMNV: return "WarpsPerSMNV";
case BuiltInSMCountNV: return "SMCountNV";
case BuiltInWarpIDNV: return "WarpIDNV";
@ -780,11 +756,9 @@ const char* GroupOperationString(int gop)
case GroupOperationInclusiveScan: return "InclusiveScan";
case GroupOperationExclusiveScan: return "ExclusiveScan";
case GroupOperationClusteredReduce: return "ClusteredReduce";
#ifdef NV_EXTENSIONS
case GroupOperationPartitionedReduceNV: return "PartitionedReduceNV";
case GroupOperationPartitionedInclusiveScanNV: return "PartitionedInclusiveScanNV";
case GroupOperationPartitionedExclusiveScanNV: return "PartitionedExclusiveScanNV";
#endif
default: return "Bad";
}
@ -901,17 +875,14 @@ const char* CapabilityString(int info)
case CapabilityStencilExportEXT: return "StencilExportEXT";
#ifdef AMD_EXTENSIONS
case CapabilityFloat16ImageAMD: return "Float16ImageAMD";
case CapabilityImageGatherBiasLodAMD: return "ImageGatherBiasLodAMD";
case CapabilityFragmentMaskAMD: return "FragmentMaskAMD";
case CapabilityImageReadWriteLodAMD: return "ImageReadWriteLodAMD";
#endif
case CapabilityAtomicStorageOps: return "AtomicStorageOps";
case CapabilitySampleMaskPostDepthCoverage: return "SampleMaskPostDepthCoverage";
#ifdef NV_EXTENSIONS
case CapabilityGeometryShaderPassthroughNV: return "GeometryShaderPassthroughNV";
case CapabilityShaderViewportIndexLayerNV: return "ShaderViewportIndexLayerNV";
case CapabilityShaderViewportMaskNV: return "ShaderViewportMaskNV";
@ -926,7 +897,6 @@ const char* CapabilityString(int info)
case CapabilityImageFootprintNV: return "ImageFootprintNV";
// case CapabilityShadingRateNV: return "ShadingRateNV"; // superseded by FragmentDensityEXT
case CapabilitySampleMaskOverrideCoverageNV: return "SampleMaskOverrideCoverageNV";
#endif
case CapabilityFragmentDensityEXT: return "FragmentDensityEXT";
case CapabilityFragmentFullyCoveredEXT: return "FragmentFullyCoveredEXT";
@ -1336,7 +1306,6 @@ const char* OpcodeString(int op)
case 4430: return "OpSubgroupAllEqualKHR";
case 4432: return "OpSubgroupReadInvocationKHR";
#ifdef AMD_EXTENSIONS
case 5000: return "OpGroupIAddNonUniformAMD";
case 5001: return "OpGroupFAddNonUniformAMD";
case 5002: return "OpGroupFMinNonUniformAMD";
@ -1348,14 +1317,12 @@ const char* OpcodeString(int op)
case 5011: return "OpFragmentMaskFetchAMD";
case 5012: return "OpFragmentFetchAMD";
#endif
case OpReadClockKHR: return "OpReadClockKHR";
case OpDecorateStringGOOGLE: return "OpDecorateStringGOOGLE";
case OpMemberDecorateStringGOOGLE: return "OpMemberDecorateStringGOOGLE";
#ifdef NV_EXTENSIONS
case OpGroupNonUniformPartitionNV: return "OpGroupNonUniformPartitionNV";
case OpReportIntersectionNV: return "OpReportIntersectionNV";
case OpIgnoreIntersectionNV: return "OpIgnoreIntersectionNV";
@ -1365,7 +1332,6 @@ const char* OpcodeString(int op)
case OpExecuteCallableNV: return "OpExecuteCallableNV";
case OpImageSampleFootprintNV: return "OpImageSampleFootprintNV";
case OpWritePackedPrimitiveIndices4x8NV: return "OpWritePackedPrimitiveIndices4x8NV";
#endif
case OpTypeCooperativeMatrixNV: return "OpTypeCooperativeMatrixNV";
case OpCooperativeMatrixLoadNV: return "OpCooperativeMatrixLoadNV";
@ -2685,7 +2651,6 @@ void Parameterize()
InstructionDesc[OpModuleProcessed].operands.push(OperandLiteralString, "'process'");
#ifdef AMD_EXTENSIONS
InstructionDesc[OpGroupIAddNonUniformAMD].operands.push(OperandScope, "'Execution'");
InstructionDesc[OpGroupIAddNonUniformAMD].operands.push(OperandGroupOperation, "'Operation'");
InstructionDesc[OpGroupIAddNonUniformAMD].operands.push(OperandId, "'X'");
@ -2724,9 +2689,7 @@ void Parameterize()
InstructionDesc[OpFragmentFetchAMD].operands.push(OperandId, "'Image'");
InstructionDesc[OpFragmentFetchAMD].operands.push(OperandId, "'Coordinate'");
InstructionDesc[OpFragmentFetchAMD].operands.push(OperandId, "'Fragment Index'");
#endif
#ifdef NV_EXTENSIONS
InstructionDesc[OpGroupNonUniformPartitionNV].operands.push(OperandId, "X");
InstructionDesc[OpTypeAccelerationStructureNV].setResultAndType(true, false);
@ -2764,7 +2727,6 @@ void Parameterize()
InstructionDesc[OpWritePackedPrimitiveIndices4x8NV].operands.push(OperandId, "'Index Offset'");
InstructionDesc[OpWritePackedPrimitiveIndices4x8NV].operands.push(OperandId, "'Packed Indices'");
#endif
InstructionDesc[OpTypeCooperativeMatrixNV].operands.push(OperandId, "'Component Type'");
InstructionDesc[OpTypeCooperativeMatrixNV].operands.push(OperandId, "'Scope'");

View File

@ -257,7 +257,6 @@ const char* GetBinaryName(EShLanguage stage)
case EShLangGeometry: name = "geom.spv"; break;
case EShLangFragment: name = "frag.spv"; break;
case EShLangCompute: name = "comp.spv"; break;
#ifdef NV_EXTENSIONS
case EShLangRayGenNV: name = "rgen.spv"; break;
case EShLangIntersectNV: name = "rint.spv"; break;
case EShLangAnyHitNV: name = "rahit.spv"; break;
@ -266,7 +265,6 @@ const char* GetBinaryName(EShLanguage stage)
case EShLangCallableNV: name = "rcall.spv"; break;
case EShLangMeshNV: name = "mesh.spv"; break;
case EShLangTaskNV: name = "task.spv"; break;
#endif
default: name = "unknown"; break;
}
} else
@ -1389,7 +1387,6 @@ EShLanguage FindLanguage(const std::string& name, bool parseStageName)
return EShLangFragment;
else if (stageName == "comp")
return EShLangCompute;
#ifdef NV_EXTENSIONS
else if (stageName == "rgen")
return EShLangRayGenNV;
else if (stageName == "rint")
@ -1406,7 +1403,6 @@ EShLanguage FindLanguage(const std::string& name, bool parseStageName)
return EShLangMeshNV;
else if (stageName == "task")
return EShLangTaskNV;
#endif
usage();
return EShLangVertex;
@ -1476,7 +1472,6 @@ void usage()
" .geom for a geometry shader\n"
" .frag for a fragment shader\n"
" .comp for a compute shader\n"
#ifdef NV_EXTENSIONS
" .mesh for a mesh shader\n"
" .task for a task shader\n"
" .rgen for a ray generation shader\n"
@ -1485,7 +1480,6 @@ void usage()
" .rchit for a ray closest hit shader\n"
" .rmiss for a ray miss shader\n"
" .rcall for a ray callable shader\n"
#endif
" .glsl for .vert.glsl, .tesc.glsl, ..., .comp.glsl compound suffixes\n"
" .hlsl for .vert.hlsl, .tesc.hlsl, ..., .comp.hlsl compound suffixes\n"
"\n"

View File

@ -1 +1 @@
1085952 ../build/install/bin/glslangValidator.exe
812032 ../build/install/bin/glslangValidator.exe

View File

@ -61,11 +61,7 @@ enum TBasicType {
EbtSampler,
EbtStruct,
EbtBlock,
#ifdef NV_EXTENSIONS
EbtAccStructNV,
#endif
EbtReference,
// HLSL types that live only temporarily.
@ -94,13 +90,11 @@ enum TStorageQualifier {
EvqBuffer, // read/write, shared with app
EvqShared, // compute shader's read/write 'shared' qualifier
#ifdef NV_EXTENSIONS
EvqPayloadNV,
EvqPayloadInNV,
EvqHitAttrNV,
EvqCallableDataNV,
EvqCallableDataInNV,
#endif
// parameters
EvqIn, // also, for 'in' in the grammar before we know if it's a pipeline input or an 'in' parameter
@ -221,7 +215,6 @@ enum TBuiltInVariable {
EbvSampleMask,
EbvHelperInvocation,
#ifdef AMD_EXTENSIONS
EbvBaryCoordNoPersp,
EbvBaryCoordNoPerspCentroid,
EbvBaryCoordNoPerspSample,
@ -229,7 +222,6 @@ enum TBuiltInVariable {
EbvBaryCoordSmoothCentroid,
EbvBaryCoordSmoothSample,
EbvBaryCoordPullModel,
#endif
EbvViewIndex,
EbvDeviceIndex,
@ -237,7 +229,6 @@ enum TBuiltInVariable {
EbvFragSizeEXT,
EbvFragInvocationCountEXT,
#ifdef NV_EXTENSIONS
EbvViewportMaskNV,
EbvSecondaryPositionNV,
EbvSecondaryViewportMaskNV,
@ -273,7 +264,6 @@ enum TBuiltInVariable {
EbvLayerPerViewNV,
EbvMeshViewCountNV,
EbvMeshViewIndicesNV,
#endif
// sm builtins
EbvWarpsPerSM,
@ -299,6 +289,19 @@ enum TBuiltInVariable {
EbvLast
};
// In this enum, order matters; users can assume higher precision is a bigger value
// and EpqNone is 0.
enum TPrecisionQualifier {
EpqNone = 0,
EpqLow,
EpqMedium,
EpqHigh
};
#ifdef GLSLANG_WEB
__inline const char* GetStorageQualifierString(TStorageQualifier q) { return ""; }
__inline const char* GetPrecisionQualifierString(TPrecisionQualifier p) { return ""; }
#else
// These will show up in error messages
__inline const char* GetStorageQualifierString(TStorageQualifier q)
{
@ -325,13 +328,11 @@ __inline const char* GetStorageQualifierString(TStorageQualifier q)
case EvqPointCoord: return "gl_PointCoord"; break;
case EvqFragColor: return "fragColor"; break;
case EvqFragDepth: return "gl_FragDepth"; break;
#ifdef NV_EXTENSIONS
case EvqPayloadNV: return "rayPayloadNV"; break;
case EvqPayloadInNV: return "rayPayloadInNV"; break;
case EvqHitAttrNV: return "hitAttributeNV"; break;
case EvqCallableDataNV: return "callableDataNV"; break;
case EvqCallableDataInNV: return "callableDataInNV"; break;
#endif
default: return "unknown qualifier";
}
}
@ -413,7 +414,6 @@ __inline const char* GetBuiltInVariableString(TBuiltInVariable v)
case EbvSampleMask: return "SampleMaskIn";
case EbvHelperInvocation: return "HelperInvocation";
#ifdef AMD_EXTENSIONS
case EbvBaryCoordNoPersp: return "BaryCoordNoPersp";
case EbvBaryCoordNoPerspCentroid: return "BaryCoordNoPerspCentroid";
case EbvBaryCoordNoPerspSample: return "BaryCoordNoPerspSample";
@ -421,7 +421,6 @@ __inline const char* GetBuiltInVariableString(TBuiltInVariable v)
case EbvBaryCoordSmoothCentroid: return "BaryCoordSmoothCentroid";
case EbvBaryCoordSmoothSample: return "BaryCoordSmoothSample";
case EbvBaryCoordPullModel: return "BaryCoordPullModel";
#endif
case EbvViewIndex: return "ViewIndex";
case EbvDeviceIndex: return "DeviceIndex";
@ -429,7 +428,6 @@ __inline const char* GetBuiltInVariableString(TBuiltInVariable v)
case EbvFragSizeEXT: return "FragSizeEXT";
case EbvFragInvocationCountEXT: return "FragInvocationCountEXT";
#ifdef NV_EXTENSIONS
case EbvViewportMaskNV: return "ViewportMaskNV";
case EbvSecondaryPositionNV: return "SecondaryPositionNV";
case EbvSecondaryViewportMaskNV: return "SecondaryViewportMaskNV";
@ -464,7 +462,6 @@ __inline const char* GetBuiltInVariableString(TBuiltInVariable v)
case EbvLayerPerViewNV: return "LayerPerViewNV";
case EbvMeshViewCountNV: return "MeshViewCountNV";
case EbvMeshViewIndicesNV: return "MeshViewIndicesNV";
#endif
case EbvWarpsPerSM: return "WarpsPerSMNV";
case EbvSMCount: return "SMCountNV";
@ -475,15 +472,6 @@ __inline const char* GetBuiltInVariableString(TBuiltInVariable v)
}
}
// In this enum, order matters; users can assume higher precision is a bigger value
// and EpqNone is 0.
enum TPrecisionQualifier {
EpqNone = 0,
EpqLow,
EpqMedium,
EpqHigh
};
__inline const char* GetPrecisionQualifierString(TPrecisionQualifier p)
{
switch (p) {
@ -494,6 +482,7 @@ __inline const char* GetPrecisionQualifierString(TPrecisionQualifier p)
default: return "unknown precision qualifier";
}
}
#endif
__inline bool isTypeSignedInt(TBasicType type)
{

View File

@ -209,18 +209,18 @@ struct TSampler { // misnomer now; includes images, textures without sampler,
switch (type) {
case EbtFloat: break;
#ifdef AMD_EXTENSIONS
case EbtInt: s.append("i"); break;
case EbtUint: s.append("u"); break;
#ifndef GLSLANG_WEB
case EbtFloat16: s.append("f16"); break;
#endif
case EbtInt8: s.append("i8"); break;
case EbtUint16: s.append("u8"); break;
case EbtInt16: s.append("i16"); break;
case EbtUint8: s.append("u16"); break;
case EbtInt: s.append("i"); break;
case EbtUint: s.append("u"); break;
case EbtInt64: s.append("i64"); break;
case EbtUint64: s.append("u64"); break;
default: break; // some compilers want this
#endif
default: break;
}
if (image) {
if (dim == EsdSubpass)
@ -472,11 +472,9 @@ public:
centroid = false;
smooth = false;
flat = false;
#ifndef GLSLANG_WEB
nopersp = false;
#ifdef AMD_EXTENSIONS
explicitInterp = false;
#endif
#ifdef NV_EXTENSIONS
pervertexNV = false;
perPrimitiveNV = false;
perViewNV = false;
@ -523,11 +521,9 @@ public:
bool centroid : 1;
bool smooth : 1;
bool flat : 1;
#ifndef GLSLANG_WEB
bool nopersp : 1;
#ifdef AMD_EXTENSIONS
bool explicitInterp : 1;
#endif
#ifdef NV_EXTENSIONS
bool pervertexNV : 1;
bool perPrimitiveNV : 1;
bool perViewNV : 1;
@ -558,20 +554,24 @@ public:
}
bool bufferReferenceNeedsVulkanMemoryModel() const
{
#ifdef GLSLANG_WEB
return false;
#else
// include qualifiers that map to load/store availability/visibility/nonprivate memory access operands
return subgroupcoherent || workgroupcoherent || queuefamilycoherent || devicecoherent || coherent || nonprivate;
#endif
}
bool isInterpolation() const
{
#ifdef AMD_EXTENSIONS
#ifndef GLSLANG_WEB
return flat || smooth || nopersp || explicitInterp;
#else
return flat || smooth || nopersp;
return flat || smooth;
#endif
}
#ifdef AMD_EXTENSIONS
#ifndef GLSLANG_WEB
bool isExplicitInterpolation() const
{
return explicitInterp;
@ -580,10 +580,10 @@ public:
bool isAuxiliary() const
{
#ifdef NV_EXTENSIONS
#ifndef GLSLANG_WEB
return centroid || patch || sample || pervertexNV;
#else
return centroid || patch || sample;
return centroid;
#endif
}
@ -651,33 +651,6 @@ public:
}
}
bool isPerPrimitive() const
{
#ifdef NV_EXTENSIONS
return perPrimitiveNV;
#else
return false;
#endif
}
bool isPerView() const
{
#ifdef NV_EXTENSIONS
return perViewNV;
#else
return false;
#endif
}
bool isTaskMemory() const
{
#ifdef NV_EXTENSIONS
return perTaskNV;
#else
return false;
#endif
}
bool isIo() const
{
switch (storage) {
@ -717,6 +690,15 @@ public:
}
}
#ifdef GLSLANG_WEB
bool isPerView() const { return false; }
bool isTaskMemory() const { return false; }
bool isArrayedIo(EShLanguage language) const { return false; }
#else
bool isPerPrimitive() const { return perPrimitiveNV; }
bool isPerView() const { return perViewNV; }
bool isTaskMemory() const { return perTaskNV; }
// True if this type of IO is supposed to be arrayed with extra level for per-vertex data
bool isArrayedIo(EShLanguage language) const
{
@ -727,40 +709,37 @@ public:
return ! patch && (isPipeInput() || isPipeOutput());
case EShLangTessEvaluation:
return ! patch && isPipeInput();
#ifdef NV_EXTENSIONS
case EShLangFragment:
return pervertexNV && isPipeInput();
case EShLangMeshNV:
return ! perTaskNV && isPipeOutput();
#endif
default:
return false;
}
}
#endif
// Implementing an embedded layout-qualifier class here, since C++ can't have a real class bitfield
void clearLayout() // all layout
{
clearUniformLayout();
#ifndef GLSLANG_WEB
layoutPushConstant = false;
layoutBufferReference = false;
#ifdef NV_EXTENSIONS
layoutPassthrough = false;
layoutViewportRelative = false;
// -2048 as the default value indicating layoutSecondaryViewportRelative is not set
layoutSecondaryViewportRelativeOffset = -2048;
layoutShaderRecordNV = false;
#endif
layoutBufferReferenceAlign = layoutBufferReferenceAlignEnd;
layoutFormat = ElfNone;
#endif
clearInterstageLayout();
layoutSpecConstantId = layoutSpecConstantIdEnd;
layoutFormat = ElfNone;
}
void clearInterstageLayout()
{
@ -787,11 +766,9 @@ public:
hasAnyLocation() ||
hasStream() ||
hasFormat() ||
#ifdef NV_EXTENSIONS
layoutShaderRecordNV ||
#endif
layoutPushConstant ||
layoutBufferReference;
isShaderRecordNV() ||
isPushConstant() ||
hasBufferReference();
}
bool hasLayout() const
{
@ -836,6 +813,7 @@ public:
unsigned int layoutSpecConstantId : 11;
static const unsigned int layoutSpecConstantIdEnd = 0x7FF;
#ifndef GLSLANG_WEB
// stored as log2 of the actual alignment value
unsigned int layoutBufferReferenceAlign : 6;
static const unsigned int layoutBufferReferenceAlignEnd = 0x3F;
@ -844,8 +822,6 @@ public:
bool layoutPushConstant;
bool layoutBufferReference;
#ifdef NV_EXTENSIONS
bool layoutPassthrough;
bool layoutViewportRelative;
int layoutSecondaryViewportRelativeOffset;
@ -899,14 +875,6 @@ public:
{
return layoutLocation != layoutLocationEnd;
}
bool hasComponent() const
{
return layoutComponent != layoutComponentEnd;
}
bool hasIndex() const
{
return layoutIndex != layoutIndexEnd;
}
bool hasSet() const
{
return layoutSet != layoutSetEnd;
@ -915,6 +883,32 @@ public:
{
return layoutBinding != layoutBindingEnd;
}
#ifdef GLSLANG_WEB
bool isNonPerspective() const { return false; }
bool hasIndex() const { return false; }
bool hasComponent() const { return false; }
bool hasStream() const { return false; }
bool hasFormat() const { return false; }
bool hasXfb() const { return false; }
bool hasXfbBuffer() const { return false; }
bool hasXfbStride() const { return false; }
bool hasXfbOffset() const { return false; }
bool hasAttachment() const { return false; }
TLayoutFormat getFormat() const { return ElfNone; }
bool isPushConstant() const { return false; }
bool isShaderRecordNV() const { return false; }
bool hasBufferReference() const { return false; }
bool hasBufferReferenceAlign() const { return false; }
#else
bool isNonPerspective() const { return nopersp; }
bool hasIndex() const
{
return layoutIndex != layoutIndexEnd;
}
bool hasComponent() const
{
return layoutComponent != layoutComponentEnd;
}
bool hasStream() const
{
return layoutStream != layoutStreamEnd;
@ -945,16 +939,21 @@ public:
{
return layoutAttachment != layoutAttachmentEnd;
}
TLayoutFormat getFormat() const { return layoutFormat; }
bool isPushConstant() const { return layoutPushConstant; }
bool isShaderRecordNV() const { return layoutShaderRecordNV; }
bool hasBufferReference() const { return layoutBufferReference; }
bool hasBufferReferenceAlign() const
{
return layoutBufferReferenceAlign != layoutBufferReferenceAlignEnd;
}
#endif
bool hasSpecConstantId() const
{
// Not the same thing as being a specialization constant, this
// is just whether or not it was declared with an ID.
return layoutSpecConstantId != layoutSpecConstantIdEnd;
}
bool hasBufferReferenceAlign() const
{
return layoutBufferReferenceAlign != layoutBufferReferenceAlignEnd;
}
bool isSpecConstant() const
{
// True if type is a specialization constant, whether or not it
@ -1150,18 +1149,20 @@ struct TShaderQualifiers {
bool pointMode;
int localSize[3]; // compute shader
int localSizeSpecId[3]; // compute shader specialization id for gl_WorkGroupSize
#ifndef GLSLANG_WEB
bool earlyFragmentTests; // fragment input
bool postDepthCoverage; // fragment input
TLayoutDepth layoutDepth;
bool blendEquation; // true if any blend equation was specified
int numViews; // multiview extenstions
TInterlockOrdering interlockOrdering;
#ifdef NV_EXTENSIONS
bool layoutOverrideCoverage; // true if layout override_coverage set
bool layoutDerivativeGroupQuads; // true if layout derivative_group_quadsNV set
bool layoutDerivativeGroupLinear; // true if layout derivative_group_linearNV set
int primitives; // mesh shader "max_primitives"DerivativeGroupLinear; // true if layout derivative_group_linearNV set
TLayoutDepth getDepth() const { return layoutDepth; }
#else
TLayoutDepth getDepth() const { return EldNone; }
#endif
void init()
@ -1180,20 +1181,26 @@ struct TShaderQualifiers {
localSizeSpecId[0] = TQualifier::layoutNotSet;
localSizeSpecId[1] = TQualifier::layoutNotSet;
localSizeSpecId[2] = TQualifier::layoutNotSet;
#ifndef GLSLANG_WEB
earlyFragmentTests = false;
postDepthCoverage = false;
layoutDepth = EldNone;
blendEquation = false;
numViews = TQualifier::layoutNotSet;
#ifdef NV_EXTENSIONS
layoutOverrideCoverage = false;
layoutDerivativeGroupQuads = false;
layoutDerivativeGroupLinear = false;
primitives = TQualifier::layoutNotSet;
#endif
interlockOrdering = EioNone;
#endif
}
#ifdef GLSLANG_WEB
bool hasBlendEquation() const { return false; }
#else
bool hasBlendEquation() const { return blendEquation; }
#endif
// Merge in characteristics from the 'src' qualifier. They can override when
// set, but never erase when not set.
void merge(const TShaderQualifiers& src)
@ -1222,6 +1229,7 @@ struct TShaderQualifiers {
if (src.localSizeSpecId[i] != TQualifier::layoutNotSet)
localSizeSpecId[i] = src.localSizeSpecId[i];
}
#ifndef GLSLANG_WEB
if (src.earlyFragmentTests)
earlyFragmentTests = true;
if (src.postDepthCoverage)
@ -1232,7 +1240,6 @@ struct TShaderQualifiers {
blendEquation = src.blendEquation;
if (src.numViews != TQualifier::layoutNotSet)
numViews = src.numViews;
#ifdef NV_EXTENSIONS
if (src.layoutOverrideCoverage)
layoutOverrideCoverage = src.layoutOverrideCoverage;
if (src.layoutDerivativeGroupQuads)
@ -1241,10 +1248,9 @@ struct TShaderQualifiers {
layoutDerivativeGroupLinear = src.layoutDerivativeGroupLinear;
if (src.primitives != TQualifier::layoutNotSet)
primitives = src.primitives;
#endif
if (src.interlockOrdering != EioNone)
interlockOrdering = src.interlockOrdering;
#endif
}
};
@ -1580,9 +1586,9 @@ public:
}
return false;
}
virtual bool isOpaque() const { return basicType == EbtSampler || basicType == EbtAtomicUint
#ifdef NV_EXTENSIONS
|| basicType == EbtAccStructNV
virtual bool isOpaque() const { return basicType == EbtSampler
#ifndef GLSLANG_WEB
|| basicType == EbtAtomicUint || basicType == EbtAccStructNV
#endif
; }
virtual bool isBuiltIn() const { return getQualifier().builtIn != EbvNone; }
@ -1592,7 +1598,13 @@ public:
virtual bool isSubpass() const { return basicType == EbtSampler && getSampler().isSubpass(); }
virtual bool isTexture() const { return basicType == EbtSampler && getSampler().isTexture(); }
virtual bool isParameterized() const { return typeParameters != nullptr; }
#ifdef GLSLANG_WEB
virtual bool isCoopMat() const { return false; }
virtual bool isReference() const { return false; }
#else
virtual bool isCoopMat() const { return coopmat; }
virtual bool isReference() const { return getBasicType() == EbtReference; }
#endif
// return true if this type contains any subtype which satisfies the given predicate.
template <typename P>
@ -1673,20 +1685,29 @@ public:
return contains([](const TType* t) { return t->isArray() && t->arraySizes->isOuterSpecialization(); } );
}
#ifdef GLSLANG_WEB
virtual bool contains16BitFloat() const { return false; }
virtual bool contains16BitInt() const { return false; }
virtual bool contains8BitInt() const { return false; }
virtual bool containsCoopMat() const { return false; }
#else
virtual bool contains16BitFloat() const
{
return containsBasicType(EbtFloat16);
}
virtual bool contains16BitInt() const
{
return containsBasicType(EbtInt16) || containsBasicType(EbtUint16);
}
virtual bool contains8BitInt() const
{
return containsBasicType(EbtInt8) || containsBasicType(EbtUint8);
}
virtual bool containsCoopMat() const
{
return contains([](const TType* t) { return t->coopmat; } );
}
#endif
// Array editing methods. Array descriptors can be shared across
// type instances. This allows all uses of the same array
@ -1746,11 +1767,9 @@ public:
{
if (isUnsizedArray() && !(skipNonvariablyIndexed || isArrayVariablyIndexed()))
changeOuterArraySize(getImplicitArraySize());
#ifdef NV_EXTENSIONS
// For multi-dim per-view arrays, set unsized inner dimension size to 1
if (qualifier.isPerView() && arraySizes && arraySizes->isInnerUnsized())
arraySizes->clearInnerUnsized();
#endif
if (isStruct() && structure->size() > 0) {
int lastMember = (int)structure->size() - 1;
for (int i = 0; i < lastMember; ++i)
@ -1808,16 +1827,17 @@ public:
static const char* getBasicString(TBasicType t)
{
switch (t) {
case EbtVoid: return "void";
case EbtFloat: return "float";
case EbtInt: return "int";
case EbtUint: return "uint";
#ifndef GLSLANG_WEB
case EbtVoid: return "void";
case EbtDouble: return "double";
case EbtFloat16: return "float16_t";
case EbtInt8: return "int8_t";
case EbtUint8: return "uint8_t";
case EbtInt16: return "int16_t";
case EbtUint16: return "uint16_t";
case EbtInt: return "int";
case EbtUint: return "uint";
case EbtInt64: return "int64_t";
case EbtUint64: return "uint64_t";
case EbtBool: return "bool";
@ -1825,14 +1845,20 @@ public:
case EbtSampler: return "sampler/image";
case EbtStruct: return "structure";
case EbtBlock: return "block";
#ifdef NV_EXTENSIONS
case EbtAccStructNV: return "accelerationStructureNV";
#endif
case EbtReference: return "reference";
#endif
default: return "unknown type";
}
}
#ifdef GLSLANG_WEB
TString getCompleteString() const { return ""; }
const char* getStorageQualifierString() const { return ""; }
const char* getBuiltInVariableString() const { return ""; }
const char* getPrecisionQualifierString() const { return ""; }
TString getBasicTypeString() const { return ""; }
#else
TString getCompleteString() const
{
TString typeString;
@ -1921,7 +1947,6 @@ public:
appendUint(1u << qualifier.layoutBufferReferenceAlign);
}
#ifdef NV_EXTENSIONS
if (qualifier.layoutPassthrough)
appendStr(" passthrough");
if (qualifier.layoutViewportRelative)
@ -1932,7 +1957,6 @@ public:
}
if (qualifier.layoutShaderRecordNV)
appendStr(" shaderRecordNV");
#endif
appendStr(")");
}
@ -1950,11 +1974,8 @@ public:
appendStr(" flat");
if (qualifier.nopersp)
appendStr(" noperspective");
#ifdef AMD_EXTENSIONS
if (qualifier.explicitInterp)
appendStr(" __explicitInterpAMD");
#endif
#ifdef NV_EXTENSIONS
if (qualifier.pervertexNV)
appendStr(" pervertexNV");
if (qualifier.perPrimitiveNV)
@ -1963,7 +1984,6 @@ public:
appendStr(" perviewNV");
if (qualifier.perTaskNV)
appendStr(" taskNV");
#endif
if (qualifier.patch)
appendStr(" patch");
if (qualifier.sample)
@ -2078,6 +2098,8 @@ public:
const char* getStorageQualifierString() const { return GetStorageQualifierString(qualifier.storage); }
const char* getBuiltInVariableString() const { return GetBuiltInVariableString(qualifier.builtIn); }
const char* getPrecisionQualifierString() const { return GetPrecisionQualifierString(qualifier.precision); }
#endif
const TTypeList* getStruct() const { assert(isStruct()); return structure; }
void setStruct(TTypeList* s) { assert(isStruct()); structure = s; }
TTypeList* getWritableStruct() const { assert(isStruct()); return structure; } // This should only be used when known to not be sharing with other threads
@ -2146,10 +2168,10 @@ public:
bool sameReferenceType(const TType& right) const
{
if ((basicType == EbtReference) != (right.basicType == EbtReference))
if (isReference() != right.isReference())
return false;
if ((basicType != EbtReference) && (right.basicType != EbtReference))
if (!isReference() && !right.isReference())
return true;
assert(referentType != nullptr);
@ -2220,6 +2242,9 @@ public:
return ! operator==(right);
}
#ifdef GLSLANG_WEB
unsigned int getBufferReferenceAlignment() const { return 0; }
#else
unsigned int getBufferReferenceAlignment() const
{
if (getBasicType() == glslang::EbtReference) {
@ -2229,6 +2254,7 @@ public:
return 0;
}
}
#endif
protected:
// Require consumer to pick between deep copy and shallow copy.

View File

@ -422,11 +422,9 @@ enum TOperator {
EOpReflect,
EOpRefract,
#ifdef AMD_EXTENSIONS
EOpMin3,
EOpMax3,
EOpMid3,
#endif
EOpDPdx, // Fragment only
EOpDPdy, // Fragment only
@ -441,10 +439,7 @@ enum TOperator {
EOpInterpolateAtCentroid, // Fragment only
EOpInterpolateAtSample, // Fragment only
EOpInterpolateAtOffset, // Fragment only
#ifdef AMD_EXTENSIONS
EOpInterpolateAtVertex,
#endif
EOpMatrixTimesMatrix,
EOpOuterProduct,
@ -534,7 +529,6 @@ enum TOperator {
EOpSubgroupQuadSwapVertical,
EOpSubgroupQuadSwapDiagonal,
#ifdef NV_EXTENSIONS
EOpSubgroupPartition,
EOpSubgroupPartitionedAdd,
EOpSubgroupPartitionedMul,
@ -557,11 +551,9 @@ enum TOperator {
EOpSubgroupPartitionedExclusiveAnd,
EOpSubgroupPartitionedExclusiveOr,
EOpSubgroupPartitionedExclusiveXor,
#endif
EOpSubgroupGuardStop,
#ifdef AMD_EXTENSIONS
EOpMinInvocations,
EOpMaxInvocations,
EOpAddInvocations,
@ -588,7 +580,6 @@ enum TOperator {
EOpCubeFaceIndex,
EOpCubeFaceCoord,
EOpTime,
#endif
EOpAtomicAdd,
EOpAtomicMin,
@ -795,10 +786,8 @@ enum TOperator {
EOpImageQuerySamples,
EOpImageLoad,
EOpImageStore,
#ifdef AMD_EXTENSIONS
EOpImageLoadLod,
EOpImageStoreLod,
#endif
EOpImageAtomicAdd,
EOpImageAtomicMin,
EOpImageAtomicMax,
@ -813,9 +802,7 @@ enum TOperator {
EOpSubpassLoad,
EOpSubpassLoadMS,
EOpSparseImageLoad,
#ifdef AMD_EXTENSIONS
EOpSparseImageLoadLod,
#endif
EOpImageGuardEnd,
@ -853,13 +840,11 @@ enum TOperator {
EOpTextureOffsetClamp,
EOpTextureGradClamp,
EOpTextureGradOffsetClamp,
#ifdef AMD_EXTENSIONS
EOpTextureGatherLod,
EOpTextureGatherLodOffset,
EOpTextureGatherLodOffsets,
EOpFragmentMaskFetch,
EOpFragmentFetch,
#endif
EOpSparseTextureGuardBegin,
@ -879,15 +864,12 @@ enum TOperator {
EOpSparseTextureOffsetClamp,
EOpSparseTextureGradClamp,
EOpSparseTextureGradOffsetClamp,
#ifdef AMD_EXTENSIONS
EOpSparseTextureGatherLod,
EOpSparseTextureGatherLodOffset,
EOpSparseTextureGatherLodOffsets,
#endif
EOpSparseTextureGuardEnd,
#ifdef NV_EXTENSIONS
EOpImageFootprintGuardBegin,
EOpImageSampleFootprintNV,
EOpImageSampleFootprintClampNV,
@ -895,7 +877,6 @@ enum TOperator {
EOpImageSampleFootprintGradNV,
EOpImageSampleFootprintGradClampNV,
EOpImageFootprintGuardEnd,
#endif
EOpSamplingGuardEnd,
EOpTextureGuardEnd,
@ -914,14 +895,12 @@ enum TOperator {
EOpFindLSB,
EOpFindMSB,
#ifdef NV_EXTENSIONS
EOpTraceNV,
EOpReportIntersectionNV,
EOpIgnoreIntersectionNV,
EOpTerminateRayNV,
EOpExecuteCallableNV,
EOpWritePackedPrimitiveIndices4x8NV,
#endif
//
// HLSL operations
//
@ -1110,6 +1089,7 @@ public:
virtual bool isStruct() const { return type.isStruct(); }
virtual bool isFloatingDomain() const { return type.isFloatingDomain(); }
virtual bool isIntegerDomain() const { return type.isIntegerDomain(); }
virtual bool isReference() const { return type.isReference(); }
TString getCompleteString() const { return type.getCompleteString(); }
protected:
@ -1303,9 +1283,7 @@ struct TCrackedTextureOp {
bool grad;
bool subpass;
bool lodClamp;
#ifdef AMD_EXTENSIONS
bool fragMask;
#endif
};
//
@ -1323,9 +1301,7 @@ public:
bool isSampling() const { return op > EOpSamplingGuardBegin && op < EOpSamplingGuardEnd; }
bool isImage() const { return op > EOpImageGuardBegin && op < EOpImageGuardEnd; }
bool isSparseTexture() const { return op > EOpSparseTextureGuardBegin && op < EOpSparseTextureGuardEnd; }
#ifdef NV_EXTENSIONS
bool isImageFootprint() const { return op > EOpImageFootprintGuardBegin && op < EOpImageFootprintGuardEnd; }
#endif
bool isSparseImage() const { return op == EOpSparseImageLoad; }
void setOperationPrecision(TPrecisionQualifier p) { operationPrecision = p; }
@ -1356,9 +1332,7 @@ public:
cracked.grad = false;
cracked.subpass = false;
cracked.lodClamp = false;
#ifdef AMD_EXTENSIONS
cracked.fragMask = false;
#endif
switch (op) {
case EOpImageQuerySize:
@ -1467,7 +1441,6 @@ public:
cracked.gather = true;
cracked.offsets = true;
break;
#ifdef AMD_EXTENSIONS
case EOpTextureGatherLod:
case EOpSparseTextureGatherLod:
cracked.gather = true;
@ -1498,8 +1471,6 @@ public:
cracked.subpass = sampler.dim == EsdSubpass;
cracked.fragMask = true;
break;
#endif
#ifdef NV_EXTENSIONS
case EOpImageSampleFootprintNV:
break;
case EOpImageSampleFootprintClampNV:
@ -1515,7 +1486,6 @@ public:
cracked.lodClamp = true;
cracked.grad = true;
break;
#endif
case EOpSubpassLoad:
case EOpSubpassLoadMS:
cracked.subpass = true;

View File

@ -292,13 +292,12 @@ TIntermTyped* TIntermConstantUnion::fold(TOperator op, const TIntermTyped* right
newConstArray[i].setIConst(0);
break;
} else goto modulo_default;
#ifndef GLSLANG_WEB
case EbtInt64:
if (rightUnionArray[i].getI64Const() == -1 && leftUnionArray[i].getI64Const() == LLONG_MIN) {
newConstArray[i].setI64Const(0);
break;
} else goto modulo_default;
#ifdef AMD_EXTENSIONS
case EbtInt16:
if (rightUnionArray[i].getIConst() == -1 && leftUnionArray[i].getIConst() == SHRT_MIN) {
newConstArray[i].setIConst(0);

File diff suppressed because it is too large Load Diff

View File

@ -123,12 +123,12 @@ TIntermTyped* TIntermediate::addBinaryMath(TOperator op, TIntermTyped* left, TIn
if ((op == EOpAdd || op == EOpSub) && extensionRequested(E_GL_EXT_buffer_reference2)) {
// No addressing math on struct with unsized array.
if ((left->getBasicType() == EbtReference && left->getType().getReferentType()->containsUnsizedArray()) ||
(right->getBasicType() == EbtReference && right->getType().getReferentType()->containsUnsizedArray())) {
if ((left->isReference() && left->getType().getReferentType()->containsUnsizedArray()) ||
(right->isReference() && right->getType().getReferentType()->containsUnsizedArray())) {
return nullptr;
}
if (left->getBasicType() == EbtReference && isTypeInt(right->getBasicType())) {
if (left->isReference() && isTypeInt(right->getBasicType())) {
const TType& referenceType = left->getType();
TIntermConstantUnion* size = addConstantUnion((unsigned long long)computeBufferReferenceTypeSize(left->getType()), loc, true);
left = addBuiltInFunctionCall(loc, EOpConvPtrToUint64, true, left, TType(EbtUint64));
@ -141,7 +141,7 @@ TIntermTyped* TIntermediate::addBinaryMath(TOperator op, TIntermTyped* left, TIn
return node;
}
if (op == EOpAdd && right->getBasicType() == EbtReference && isTypeInt(left->getBasicType())) {
if (op == EOpAdd && right->isReference() && isTypeInt(left->getBasicType())) {
const TType& referenceType = right->getType();
TIntermConstantUnion* size = addConstantUnion((unsigned long long)computeBufferReferenceTypeSize(right->getType()), loc, true);
right = addBuiltInFunctionCall(loc, EOpConvPtrToUint64, true, right, TType(EbtUint64));
@ -154,7 +154,7 @@ TIntermTyped* TIntermediate::addBinaryMath(TOperator op, TIntermTyped* left, TIn
return node;
}
if (op == EOpSub && left->getBasicType() == EbtReference && right->getBasicType() == EbtReference) {
if (op == EOpSub && left->isReference() && right->isReference()) {
TIntermConstantUnion* size = addConstantUnion((long long)computeBufferReferenceTypeSize(left->getType()), loc, true);
left = addBuiltInFunctionCall(loc, EOpConvPtrToUint64, true, left, TType(EbtUint64));
@ -170,7 +170,7 @@ TIntermTyped* TIntermediate::addBinaryMath(TOperator op, TIntermTyped* left, TIn
}
// No other math operators supported on references
if (left->getBasicType() == EbtReference || right->getBasicType() == EbtReference) {
if (left->isReference() || right->isReference()) {
return nullptr;
}
}
@ -290,7 +290,7 @@ TIntermTyped* TIntermediate::addAssign(TOperator op, TIntermTyped* left, TInterm
// Convert "reference += int" to "reference = reference + int". We need this because the
// "reference + int" calculation involves a cast back to the original type, which makes it
// not an lvalue.
if ((op == EOpAddAssign || op == EOpSubAssign) && left->getBasicType() == EbtReference &&
if ((op == EOpAddAssign || op == EOpSubAssign) && left->isReference() &&
extensionRequested(E_GL_EXT_buffer_reference2)) {
if (!(right->getType().isScalar() && right->getType().isIntegerDomain()))
@ -536,9 +536,7 @@ bool TIntermediate::isConversionAllowed(TOperator op, TIntermTyped* node) const
return false;
case EbtAtomicUint:
case EbtSampler:
#ifdef NV_EXTENSIONS
case EbtAccStructNV:
#endif
// opaque types can be passed to functions
if (op == EOpFunction)
break;
@ -1110,7 +1108,7 @@ TIntermTyped* TIntermediate::addConversion(TOperator op, const TType& type, TInt
case EOpConstructStruct:
case EOpConstructCooperativeMatrix:
if (type.getBasicType() == EbtReference || node->getType().getBasicType() == EbtReference) {
if (type.isReference() || node->getType().isReference()) {
// types must match to assign a reference
if (type == node->getType())
return node;
@ -1670,7 +1668,7 @@ bool TIntermediate::canImplicitlyPromote(TBasicType from, TBasicType to, TOperat
case EbtFloat:
case EbtDouble:
return true;
#ifdef AMD_EXTENSIONS
#ifndef GLSLANG_WEB
case EbtInt16:
case EbtUint16:
return extensionRequested(E_GL_AMD_gpu_shader_int16);
@ -1688,17 +1686,15 @@ bool TIntermediate::canImplicitlyPromote(TBasicType from, TBasicType to, TOperat
return true;
case EbtBool:
return (getSource() == EShSourceHlsl);
#ifdef AMD_EXTENSIONS
#ifndef GLSLANG_WEB
case EbtInt16:
case EbtUint16:
return extensionRequested(E_GL_AMD_gpu_shader_int16);
#endif
case EbtFloat16:
return
#ifdef AMD_EXTENSIONS
extensionRequested(E_GL_AMD_gpu_shader_half_float) ||
#endif
getSource() == EShSourceHlsl;
#endif
default:
return false;
}
@ -1710,7 +1706,7 @@ bool TIntermediate::canImplicitlyPromote(TBasicType from, TBasicType to, TOperat
return true;
case EbtBool:
return getSource() == EShSourceHlsl;
#ifdef AMD_EXTENSIONS
#ifndef GLSLANG_WEB
case EbtInt16:
case EbtUint16:
return extensionRequested(E_GL_AMD_gpu_shader_int16);
@ -1724,7 +1720,7 @@ bool TIntermediate::canImplicitlyPromote(TBasicType from, TBasicType to, TOperat
return true;
case EbtBool:
return getSource() == EShSourceHlsl;
#ifdef AMD_EXTENSIONS
#ifndef GLSLANG_WEB
case EbtInt16:
return extensionRequested(E_GL_AMD_gpu_shader_int16);
#endif
@ -1738,7 +1734,7 @@ bool TIntermediate::canImplicitlyPromote(TBasicType from, TBasicType to, TOperat
case EbtInt64:
case EbtUint64:
return true;
#ifdef AMD_EXTENSIONS
#ifndef GLSLANG_WEB
case EbtInt16:
case EbtUint16:
return extensionRequested(E_GL_AMD_gpu_shader_int16);
@ -1751,15 +1747,15 @@ bool TIntermediate::canImplicitlyPromote(TBasicType from, TBasicType to, TOperat
case EbtInt:
case EbtInt64:
return true;
#ifdef AMD_EXTENSIONS
#ifndef GLSLANG_WEB
case EbtInt16:
return extensionRequested(E_GL_AMD_gpu_shader_int16);
#endif
default:
return false;
}
#ifndef GLSLANG_WEB
case EbtFloat16:
#ifdef AMD_EXTENSIONS
switch (from) {
case EbtInt16:
case EbtUint16:
@ -1769,10 +1765,8 @@ bool TIntermediate::canImplicitlyPromote(TBasicType from, TBasicType to, TOperat
default:
break;
}
#endif
return false;
case EbtUint16:
#ifdef AMD_EXTENSIONS
switch (from) {
case EbtInt16:
case EbtUint16:
@ -1780,8 +1774,8 @@ bool TIntermediate::canImplicitlyPromote(TBasicType from, TBasicType to, TOperat
default:
break;
}
#endif
return false;
#endif
default:
return false;
}

View File

@ -152,12 +152,10 @@ bool TParseContextBase::lValueErrorCheck(const TSourceLoc& loc, const char* op,
case EvqBuffer:
if (node->getQualifier().readonly)
message = "can't modify a readonly buffer";
#ifdef NV_EXTENSIONS
if (node->getQualifier().layoutShaderRecordNV)
if (node->getQualifier().isShaderRecordNV())
message = "can't modify a shaderrecordnv qualified buffer";
#endif
break;
#ifdef NV_EXTENSIONS
#ifndef GLSLANG_WEB
case EvqHitAttrNV:
if (language != EShLangIntersectNV)
message = "cannot modify hitAttributeNV in this stage";
@ -178,7 +176,7 @@ bool TParseContextBase::lValueErrorCheck(const TSourceLoc& loc, const char* op,
case EbtVoid:
message = "can't modify void";
break;
#ifdef NV_EXTENSIONS
#ifndef GLSLANG_WEB
case EbtAccStructNV:
message = "can't modify accelerationStructureNV";
break;

File diff suppressed because it is too large Load Diff

View File

@ -299,10 +299,12 @@ public:
TIntermTyped* handleBracketDereference(const TSourceLoc&, TIntermTyped* base, TIntermTyped* index);
void handleIndexLimits(const TSourceLoc&, TIntermTyped* base, TIntermTyped* index);
#ifndef GLSLANG_WEB
void makeEditable(TSymbol*&) override;
void ioArrayCheck(const TSourceLoc&, const TType&, const TString& identifier);
#endif
bool isIoResizeArray(const TType&) const;
void fixIoArraySize(const TSourceLoc&, TType&);
void ioArrayCheck(const TSourceLoc&, const TType&, const TString& identifier);
void handleIoResizeArrayAccess(const TSourceLoc&, TIntermTyped* base);
void checkIoArraysConsistency(const TSourceLoc&, bool tailOnly = false);
int getIoArrayImplicitSize(const TQualifier&, TString* featureString = nullptr) const;
@ -443,7 +445,9 @@ protected:
bool isRuntimeLength(const TIntermTyped&) const;
TIntermNode* executeInitializer(const TSourceLoc&, TIntermTyped* initializer, TVariable* variable);
TIntermTyped* convertInitializerList(const TSourceLoc&, const TType&, TIntermTyped* initializer);
#ifndef GLSLANG_WEB
void finish() override;
#endif
public:
//

View File

@ -1645,7 +1645,7 @@ int TScanContext::identifierOrType()
if (const TVariable* variable = parserToken->sType.lex.symbol->getAsVariable()) {
if (variable->isUserType() &&
// treat redeclaration of forward-declared buffer/uniform reference as an identifier
!(variable->getType().getBasicType() == EbtReference && afterBuffer)) {
!(variable->getType().isReference() && afterBuffer)) {
afterType = true;
return TYPE_NAME;

View File

@ -326,6 +326,7 @@ bool InitializeSymbolTables(TInfoSink& infoSink, TSymbolTable** commonTable, TS
InitializeStageSymbolTable(*builtInParseables, version, profile, spvVersion, EShLangFragment, source,
infoSink, commonTable, symbolTables);
#ifndef GLSLANG_WEB
// check for tessellation
if ((profile != EEsProfile && version >= 150) ||
(profile == EEsProfile && version >= 310)) {
@ -347,7 +348,6 @@ bool InitializeSymbolTables(TInfoSink& infoSink, TSymbolTable** commonTable, TS
InitializeStageSymbolTable(*builtInParseables, version, profile, spvVersion, EShLangCompute, source,
infoSink, commonTable, symbolTables);
#ifdef NV_EXTENSIONS
// check for ray tracing stages
if (profile != EEsProfile && version >= 450) {
InitializeStageSymbolTable(*builtInParseables, version, profile, spvVersion, EShLangRayGenNV, source,
@ -581,6 +581,7 @@ bool DeduceVersionProfile(TInfoSink& infoSink, EShLanguage stage, bool versionNo
break;
}
#ifndef GLSLANG_WEB
// Correct for stage type...
switch (stage) {
case EShLangGeometry:
@ -612,7 +613,6 @@ bool DeduceVersionProfile(TInfoSink& infoSink, EShLanguage stage, bool versionNo
version = profile == EEsProfile ? 310 : 420;
}
break;
#ifdef NV_EXTENSIONS
case EShLangRayGenNV:
case EShLangIntersectNV:
case EShLangAnyHitNV:
@ -633,10 +633,10 @@ bool DeduceVersionProfile(TInfoSink& infoSink, EShLanguage stage, bool versionNo
infoSink.info.message(EPrefixError, "#version: mesh/task shaders require es profile with version 320 or above, or non-es profile with version 450 or above");
version = profile == EEsProfile ? 320 : 450;
}
#endif
default:
break;
}
#endif
if (profile == EEsProfile && version >= 300 && versionNotFirst) {
correct = false;

View File

@ -61,24 +61,24 @@ void TType::buildMangledName(TString& mangledName) const
switch (basicType) {
case EbtFloat: mangledName += 'f'; break;
case EbtDouble: mangledName += 'd'; break;
case EbtFloat16: mangledName += "f16"; break;
case EbtInt: mangledName += 'i'; break;
case EbtUint: mangledName += 'u'; break;
case EbtBool: mangledName += 'b'; break;
#ifndef GLSLANG_WEB
case EbtDouble: mangledName += 'd'; break;
case EbtFloat16: mangledName += "f16"; break;
case EbtInt8: mangledName += "i8"; break;
case EbtUint8: mangledName += "u8"; break;
case EbtInt16: mangledName += "i16"; break;
case EbtUint16: mangledName += "u16"; break;
case EbtInt64: mangledName += "i64"; break;
case EbtUint64: mangledName += "u64"; break;
case EbtBool: mangledName += 'b'; break;
case EbtAtomicUint: mangledName += "au"; break;
#ifdef NV_EXTENSIONS
case EbtAccStructNV: mangledName += "asnv"; break;
#endif
case EbtSampler:
switch (sampler.type) {
#ifdef AMD_EXTENSIONS
#ifndef GLSLANG_WEB
case EbtFloat16: mangledName += "f16"; break;
#endif
case EbtInt: mangledName += "i"; break;

View File

@ -145,6 +145,8 @@
namespace glslang {
#ifndef GLSLANG_WEB
//
// Initialize all extensions, almost always to 'disable', as once their features
// are incorporated into a core version, their features are supported through allowing that
@ -221,7 +223,6 @@ void TParseVersions::initializeExtensionBehavior()
extensionBehavior[E_GL_GOOGLE_cpp_style_line_directive] = EBhDisable;
extensionBehavior[E_GL_GOOGLE_include_directive] = EBhDisable;
#ifdef AMD_EXTENSIONS
extensionBehavior[E_GL_AMD_shader_ballot] = EBhDisable;
extensionBehavior[E_GL_AMD_shader_trinary_minmax] = EBhDisable;
extensionBehavior[E_GL_AMD_shader_explicit_vertex_parameter] = EBhDisable;
@ -232,9 +233,7 @@ void TParseVersions::initializeExtensionBehavior()
extensionBehavior[E_GL_AMD_shader_image_load_store_lod] = EBhDisable;
extensionBehavior[E_GL_AMD_shader_fragment_mask] = EBhDisable;
extensionBehavior[E_GL_AMD_gpu_shader_half_float_fetch] = EBhDisable;
#endif
#ifdef NV_EXTENSIONS
extensionBehavior[E_GL_NV_sample_mask_override_coverage] = EBhDisable;
extensionBehavior[E_SPV_NV_geometry_shader_passthrough] = EBhDisable;
extensionBehavior[E_GL_NV_viewport_array2] = EBhDisable;
@ -250,7 +249,6 @@ void TParseVersions::initializeExtensionBehavior()
extensionBehavior[E_GL_NV_compute_shader_derivatives] = EBhDisable;
extensionBehavior[E_GL_NV_shader_texture_footprint] = EBhDisable;
extensionBehavior[E_GL_NV_mesh_shader] = EBhDisable;
#endif
extensionBehavior[E_GL_NV_cooperative_matrix] = EBhDisable;
extensionBehavior[E_GL_NV_shader_sm_builtins] = EBhDisable;
@ -302,6 +300,7 @@ void TParseVersions::initializeExtensionBehavior()
extensionBehavior[E_GL_EXT_shader_explicit_arithmetic_types_float32] = EBhDisable;
extensionBehavior[E_GL_EXT_shader_explicit_arithmetic_types_float64] = EBhDisable;
}
#endif // GLSLANG_WEB
// Get code that is not part of a shared symbol table, is specific to this shader,
// or needed by the preprocessor (which does not use a shared symbol table).
@ -311,6 +310,9 @@ void TParseVersions::getPreamble(std::string& preamble)
preamble =
"#define GL_ES 1\n"
"#define GL_FRAGMENT_PRECISION_HIGH 1\n"
#ifdef GLSLANG_WEB
;
#else
"#define GL_OES_texture_3D 1\n"
"#define GL_OES_standard_derivatives 1\n"
"#define GL_EXT_frag_depth 1\n"
@ -350,11 +352,9 @@ void TParseVersions::getPreamble(std::string& preamble)
"#define GL_EXT_shader_non_constant_global_initializers 1\n"
;
#ifdef NV_EXTENSIONS
if (profile == EEsProfile && version >= 300) {
preamble += "#define GL_NV_shader_noperspective_interpolation 1\n";
}
#endif
} else {
preamble =
@ -412,7 +412,6 @@ void TParseVersions::getPreamble(std::string& preamble)
"#define E_GL_EXT_shader_atomic_int64 1\n"
"#define E_GL_EXT_shader_realtime_clock 1\n"
#ifdef AMD_EXTENSIONS
"#define GL_AMD_shader_ballot 1\n"
"#define GL_AMD_shader_trinary_minmax 1\n"
"#define GL_AMD_shader_explicit_vertex_parameter 1\n"
@ -423,9 +422,7 @@ void TParseVersions::getPreamble(std::string& preamble)
"#define GL_AMD_shader_image_load_store_lod 1\n"
"#define GL_AMD_shader_fragment_mask 1\n"
"#define GL_AMD_gpu_shader_half_float_fetch 1\n"
#endif
#ifdef NV_EXTENSIONS
"#define GL_NV_sample_mask_override_coverage 1\n"
"#define GL_NV_geometry_shader_passthrough 1\n"
"#define GL_NV_viewport_array2 1\n"
@ -438,7 +435,6 @@ void TParseVersions::getPreamble(std::string& preamble)
"#define GL_NV_compute_shader_derivatives 1\n"
"#define GL_NV_shader_texture_footprint 1\n"
"#define GL_NV_mesh_shader 1\n"
#endif
"#define GL_NV_cooperative_matrix 1\n"
"#define GL_EXT_shader_explicit_arithmetic_types 1\n"
@ -458,8 +454,10 @@ void TParseVersions::getPreamble(std::string& preamble)
if (profile == ECompatibilityProfile)
preamble += "#define GL_compatibility_profile 1\n";
}
#endif // GLSLANG_WEB
}
#ifndef GLSLANG_WEB
if ((profile != EEsProfile && version >= 140) ||
(profile == EEsProfile && version >= 310)) {
preamble +=
@ -481,6 +479,7 @@ void TParseVersions::getPreamble(std::string& preamble)
"#define GL_GOOGLE_cpp_style_line_directive 1\n"
"#define GL_GOOGLE_include_directive 1\n"
;
#endif
// #define VULKAN XXXX
const int numberBufSize = 12;
@ -491,6 +490,8 @@ void TParseVersions::getPreamble(std::string& preamble)
preamble += numberBuf;
preamble += "\n";
}
#ifndef GLSLANG_WEB
// #define GL_SPIRV XXXX
if (spvVersion.openGl > 0) {
preamble += "#define GL_SPIRV ";
@ -498,7 +499,7 @@ void TParseVersions::getPreamble(std::string& preamble)
preamble += numberBuf;
preamble += "\n";
}
#endif
}
//
@ -528,7 +529,6 @@ const char* StageName(EShLanguage stage)
case EShLangGeometry: return "geometry";
case EShLangFragment: return "fragment";
case EShLangCompute: return "compute";
#ifdef NV_EXTENSIONS
case EShLangRayGenNV: return "ray-generation";
case EShLangIntersectNV: return "intersection";
case EShLangAnyHitNV: return "any-hit";
@ -537,7 +537,6 @@ const char* StageName(EShLanguage stage)
case EShLangCallableNV: return "callable";
case EShLangMeshNV: return "mesh";
case EShLangTaskNV: return "task";
#endif
default: return "unknown stage";
}
}
@ -609,6 +608,13 @@ void TParseVersions::requireStage(const TSourceLoc& loc, EShLanguage stage, cons
requireStage(loc, static_cast<EShLanguageMask>(1 << stage), featureDesc);
}
#ifndef GLSLANG_WEB
void TParseVersions::unimplemented(const TSourceLoc& loc, const char* featureDesc)
{
error(loc, "feature not yet implemented", featureDesc, "");
}
//
// Within a set of profiles, see if a feature is deprecated and give an error or warning based on whether
// a future compatibility context is being use.
@ -642,11 +648,6 @@ void TParseVersions::requireNotRemoved(const TSourceLoc& loc, int profileMask, i
}
}
void TParseVersions::unimplemented(const TSourceLoc& loc, const char* featureDesc)
{
error(loc, "feature not yet implemented", featureDesc, "");
}
// Returns true if at least one of the extensions in the extensions parameter is requested. Otherwise, returns false.
// Warns appropriately if the requested behavior of an extension is "warn".
bool TParseVersions::checkExtensionsRequested(const TSourceLoc& loc, int numExtensions, const char* const extensions[], const char* featureDesc)
@ -815,10 +816,8 @@ void TParseVersions::updateExtensionBehavior(int line, const char* extension, co
updateExtensionBehavior(line, "GL_KHR_shader_subgroup_basic", behaviorString);
else if (strcmp(extension, "GL_KHR_shader_subgroup_quad") == 0)
updateExtensionBehavior(line, "GL_KHR_shader_subgroup_basic", behaviorString);
#ifdef NV_EXTENSIONS
else if (strcmp(extension, "GL_NV_shader_subgroup_partitioned") == 0)
updateExtensionBehavior(line, "GL_KHR_shader_subgroup_basic", behaviorString);
#endif
else if (strcmp(extension, "GL_EXT_buffer_reference2") == 0)
updateExtensionBehavior(line, "GL_EXT_buffer_reference", behaviorString);
}
@ -866,7 +865,6 @@ void TParseVersions::updateExtensionBehavior(const char* extension, TExtensionBe
// Check if extension is used with correct shader stage.
void TParseVersions::checkExtensionStage(const TSourceLoc& loc, const char * const extension)
{
#ifdef NV_EXTENSIONS
// GL_NV_mesh_shader extension is only allowed in task/mesh shaders
if (strcmp(extension, "GL_NV_mesh_shader") == 0) {
requireStage(loc, (EShLanguageMask)(EShLangTaskNVMask | EShLangMeshNVMask | EShLangFragmentMask),
@ -874,7 +872,6 @@ void TParseVersions::checkExtensionStage(const TSourceLoc& loc, const char * con
profileRequires(loc, ECoreProfile, 450, 0, "#extension GL_NV_mesh_shader");
profileRequires(loc, EEsProfile, 320, 0, "#extension GL_NV_mesh_shader");
}
#endif
}
// Call for any operation needing full GLSL integer data-type support.
@ -896,9 +893,7 @@ void TParseVersions::float16Check(const TSourceLoc& loc, const char* op, bool bu
{
if (!builtIn) {
const char* const extensions[] = {
#if AMD_EXTENSIONS
E_GL_AMD_gpu_shader_half_float,
#endif
E_GL_EXT_shader_explicit_arithmetic_types,
E_GL_EXT_shader_explicit_arithmetic_types_float16};
requireExtensions(loc, sizeof(extensions)/sizeof(extensions[0]), extensions, op);
@ -908,9 +903,7 @@ void TParseVersions::float16Check(const TSourceLoc& loc, const char* op, bool bu
bool TParseVersions::float16Arithmetic()
{
const char* const extensions[] = {
#if AMD_EXTENSIONS
E_GL_AMD_gpu_shader_half_float,
#endif
E_GL_EXT_shader_explicit_arithmetic_types,
E_GL_EXT_shader_explicit_arithmetic_types_float16};
return extensionsTurnedOn(sizeof(extensions)/sizeof(extensions[0]), extensions);
@ -919,9 +912,7 @@ bool TParseVersions::float16Arithmetic()
bool TParseVersions::int16Arithmetic()
{
const char* const extensions[] = {
#if AMD_EXTENSIONS
E_GL_AMD_gpu_shader_int16,
#endif
E_GL_EXT_shader_explicit_arithmetic_types,
E_GL_EXT_shader_explicit_arithmetic_types_int16};
return extensionsTurnedOn(sizeof(extensions)/sizeof(extensions[0]), extensions);
@ -943,9 +934,7 @@ void TParseVersions::requireFloat16Arithmetic(const TSourceLoc& loc, const char*
combined += featureDesc;
const char* const extensions[] = {
#if AMD_EXTENSIONS
E_GL_AMD_gpu_shader_half_float,
#endif
E_GL_EXT_shader_explicit_arithmetic_types,
E_GL_EXT_shader_explicit_arithmetic_types_float16};
requireExtensions(loc, sizeof(extensions)/sizeof(extensions[0]), extensions, combined.c_str());
@ -959,9 +948,7 @@ void TParseVersions::requireInt16Arithmetic(const TSourceLoc& loc, const char* o
combined += featureDesc;
const char* const extensions[] = {
#if AMD_EXTENSIONS
E_GL_AMD_gpu_shader_int16,
#endif
E_GL_EXT_shader_explicit_arithmetic_types,
E_GL_EXT_shader_explicit_arithmetic_types_int16};
requireExtensions(loc, sizeof(extensions)/sizeof(extensions[0]), extensions, combined.c_str());
@ -984,9 +971,7 @@ void TParseVersions::float16ScalarVectorCheck(const TSourceLoc& loc, const char*
{
if (!builtIn) {
const char* const extensions[] = {
#if AMD_EXTENSIONS
E_GL_AMD_gpu_shader_half_float,
#endif
E_GL_EXT_shader_16bit_storage,
E_GL_EXT_shader_explicit_arithmetic_types,
E_GL_EXT_shader_explicit_arithmetic_types_float16};
@ -1026,7 +1011,6 @@ void TParseVersions::explicitInt8Check(const TSourceLoc& loc, const char* op, bo
}
}
#ifdef AMD_EXTENSIONS
// Call for any operation needing GLSL float16 opaque-type support
void TParseVersions::float16OpaqueCheck(const TSourceLoc& loc, const char* op, bool builtIn)
{
@ -1036,16 +1020,13 @@ void TParseVersions::float16OpaqueCheck(const TSourceLoc& loc, const char* op, b
profileRequires(loc, ECoreProfile | ECompatibilityProfile, 400, nullptr, op);
}
}
#endif
// Call for any operation needing GLSL explicit int16 data-type support.
void TParseVersions::explicitInt16Check(const TSourceLoc& loc, const char* op, bool builtIn)
{
if (! builtIn) {
const char* const extensions[] = {
#if AMD_EXTENSIONS
E_GL_AMD_gpu_shader_int16,
#endif
E_GL_EXT_shader_explicit_arithmetic_types,
E_GL_EXT_shader_explicit_arithmetic_types_int16};
requireExtensions(loc, sizeof(extensions)/sizeof(extensions[0]), extensions, op);
@ -1056,9 +1037,7 @@ void TParseVersions::int16ScalarVectorCheck(const TSourceLoc& loc, const char* o
{
if (! builtIn) {
const char* const extensions[] = {
#if AMD_EXTENSIONS
E_GL_AMD_gpu_shader_int16,
#endif
E_GL_EXT_shader_16bit_storage,
E_GL_EXT_shader_explicit_arithmetic_types,
E_GL_EXT_shader_explicit_arithmetic_types_int16};
@ -1107,6 +1086,7 @@ void TParseVersions::fcoopmatCheck(const TSourceLoc& loc, const char* op, bool b
requireExtensions(loc, sizeof(extensions)/sizeof(extensions[0]), extensions, op);
}
}
#endif // GLSLANG_WEB
// Call for any operation removed because SPIR-V is in use.
void TParseVersions::spvRemoved(const TSourceLoc& loc, const char* op)

View File

@ -193,7 +193,6 @@ const int Num_OVR_multiview_EXTs = sizeof(OVR_multiview_EXTs) / sizeof(OVR_multi
const char* const E_GL_GOOGLE_cpp_style_line_directive = "GL_GOOGLE_cpp_style_line_directive";
const char* const E_GL_GOOGLE_include_directive = "GL_GOOGLE_include_directive";
#ifdef AMD_EXTENSIONS
const char* const E_GL_AMD_shader_ballot = "GL_AMD_shader_ballot";
const char* const E_GL_AMD_shader_trinary_minmax = "GL_AMD_shader_trinary_minmax";
const char* const E_GL_AMD_shader_explicit_vertex_parameter = "GL_AMD_shader_explicit_vertex_parameter";
@ -204,9 +203,6 @@ const char* const E_GL_AMD_gpu_shader_int16 = "GL_AMD_gpu_sh
const char* const E_GL_AMD_shader_image_load_store_lod = "GL_AMD_shader_image_load_store_lod";
const char* const E_GL_AMD_shader_fragment_mask = "GL_AMD_shader_fragment_mask";
const char* const E_GL_AMD_gpu_shader_half_float_fetch = "GL_AMD_gpu_shader_half_float_fetch";
#endif
#ifdef NV_EXTENSIONS
const char* const E_GL_NV_sample_mask_override_coverage = "GL_NV_sample_mask_override_coverage";
const char* const E_SPV_NV_geometry_shader_passthrough = "GL_NV_geometry_shader_passthrough";
@ -228,7 +224,6 @@ const char* const E_GL_NV_mesh_shader = "GL_NV_mesh_sh
const char* const viewportEXTs[] = { E_GL_ARB_shader_viewport_layer_array, E_GL_NV_viewport_array2 };
const int Num_viewportEXTs = sizeof(viewportEXTs) / sizeof(viewportEXTs[0]);
#endif
const char* const E_GL_NV_cooperative_matrix = "GL_NV_cooperative_matrix";
const char* const E_GL_NV_shader_sm_builtins = "GL_NV_shader_sm_builtins";

View File

@ -78,7 +78,6 @@
#define GL_DOUBLE_MAT4x2 0x8F4D
#define GL_DOUBLE_MAT4x3 0x8F4E
#ifdef AMD_EXTENSIONS
// Those constants are borrowed from extension NV_gpu_shader5
#define GL_FLOAT16_NV 0x8FF8
#define GL_FLOAT16_VEC2_NV 0x8FF9
@ -94,7 +93,6 @@
#define GL_FLOAT16_MAT3x4_AMD 0x91CB
#define GL_FLOAT16_MAT4x2_AMD 0x91CC
#define GL_FLOAT16_MAT4x3_AMD 0x91CD
#endif
#define GL_SAMPLER_1D 0x8B5D
#define GL_SAMPLER_2D 0x8B5E

View File

@ -309,12 +309,16 @@ primary_expression
$$ = $1;
}
| INT32CONSTANT {
#ifndef GLSLANG_WEB
parseContext.explicitInt32Check($1.loc, "32-bit signed literal");
$$ = parseContext.intermediate.addConstantUnion($1.i, $1.loc, true);
#endif
}
| UINT32CONSTANT {
#ifndef GLSLANG_WEB
parseContext.explicitInt32Check($1.loc, "32-bit signed literal");
$$ = parseContext.intermediate.addConstantUnion($1.u, $1.loc, true);
#endif
}
| INTCONSTANT {
$$ = parseContext.intermediate.addConstantUnion($1.i, $1.loc, true);
@ -332,12 +336,16 @@ primary_expression
$$ = parseContext.intermediate.addConstantUnion($1.u64, $1.loc, true);
}
| INT16CONSTANT {
#ifndef GLSLANG_WEB
parseContext.explicitInt16Check($1.loc, "16-bit integer literal");
$$ = parseContext.intermediate.addConstantUnion((short)$1.i, $1.loc, true);
#endif
}
| UINT16CONSTANT {
#ifndef GLSLANG_WEB
parseContext.explicitInt16Check($1.loc, "16-bit unsigned integer literal");
$$ = parseContext.intermediate.addConstantUnion((unsigned short)$1.u, $1.loc, true);
#endif
}
| FLOATCONSTANT {
$$ = parseContext.intermediate.addConstantUnion($1.d, EbtFloat, $1.loc, true);
@ -347,8 +355,10 @@ primary_expression
$$ = parseContext.intermediate.addConstantUnion($1.d, EbtDouble, $1.loc, true);
}
| FLOAT16CONSTANT {
#ifndef GLSLANG_WEB
parseContext.float16Check($1.loc, "half float literal");
$$ = parseContext.intermediate.addConstantUnion($1.d, EbtFloat16, $1.loc, true);
#endif
}
| BOOLCONSTANT {
$$ = parseContext.intermediate.addConstantUnion($1.b, $1.loc, true);
@ -1136,15 +1146,13 @@ interpolation_qualifier
$$.qualifier.flat = true;
}
| NOPERSPECTIVE {
#ifndef GLSLANG_WEB
parseContext.globalCheck($1.loc, "noperspective");
#ifdef NV_EXTENSIONS
parseContext.profileRequires($1.loc, EEsProfile, 0, E_GL_NV_shader_noperspective_interpolation, "noperspective");
#else
parseContext.requireProfile($1.loc, ~EEsProfile, "noperspective");
#endif
parseContext.profileRequires($1.loc, ENoProfile, 130, 0, "noperspective");
$$.init($1.loc);
$$.qualifier.nopersp = true;
#endif
}
| EXPLICITINTERPAMD {
#ifdef AMD_EXTENSIONS
@ -1463,16 +1471,20 @@ storage_qualifier
$$.qualifier.writeonly = true;
}
| SUBROUTINE {
#ifndef GLSLANG_WEB
parseContext.spvRemoved($1.loc, "subroutine");
parseContext.globalCheck($1.loc, "subroutine");
parseContext.unimplemented($1.loc, "subroutine");
$$.init($1.loc);
#endif
}
| SUBROUTINE LEFT_PAREN type_name_list RIGHT_PAREN {
#ifndef GLSLANG_WEB
parseContext.spvRemoved($1.loc, "subroutine");
parseContext.globalCheck($1.loc, "subroutine");
parseContext.unimplemented($1.loc, "subroutine");
$$.init($1.loc);
#endif
}
;
@ -1583,19 +1595,25 @@ type_specifier_nonarray
$$.basicType = EbtDouble;
}
| FLOAT16_T {
#ifndef GLSLANG_WEB
parseContext.float16ScalarVectorCheck($1.loc, "float16_t", parseContext.symbolTable.atBuiltInLevel());
$$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
$$.basicType = EbtFloat16;
#endif
}
| FLOAT32_T {
#ifndef GLSLANG_WEB
parseContext.explicitFloat32Check($1.loc, "float32_t", parseContext.symbolTable.atBuiltInLevel());
$$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
$$.basicType = EbtFloat;
#endif
}
| FLOAT64_T {
#ifndef GLSLANG_WEB
parseContext.explicitFloat64Check($1.loc, "float64_t", parseContext.symbolTable.atBuiltInLevel());
$$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
$$.basicType = EbtDouble;
#endif
}
| INT {
$$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
@ -1607,34 +1625,46 @@ type_specifier_nonarray
$$.basicType = EbtUint;
}
| INT8_T {
#ifndef GLSLANG_WEB
parseContext.int8ScalarVectorCheck($1.loc, "8-bit signed integer", parseContext.symbolTable.atBuiltInLevel());
$$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
$$.basicType = EbtInt8;
#endif
}
| UINT8_T {
#ifndef GLSLANG_WEB
parseContext.int8ScalarVectorCheck($1.loc, "8-bit unsigned integer", parseContext.symbolTable.atBuiltInLevel());
$$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
$$.basicType = EbtUint8;
#endif
}
| INT16_T {
#ifndef GLSLANG_WEB
parseContext.int16ScalarVectorCheck($1.loc, "16-bit signed integer", parseContext.symbolTable.atBuiltInLevel());
$$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
$$.basicType = EbtInt16;
#endif
}
| UINT16_T {
#ifndef GLSLANG_WEB
parseContext.int16ScalarVectorCheck($1.loc, "16-bit unsigned integer", parseContext.symbolTable.atBuiltInLevel());
$$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
$$.basicType = EbtUint16;
#endif
}
| INT32_T {
#ifndef GLSLANG_WEB
parseContext.explicitInt32Check($1.loc, "32-bit signed integer", parseContext.symbolTable.atBuiltInLevel());
$$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
$$.basicType = EbtInt;
#endif
}
| UINT32_T {
#ifndef GLSLANG_WEB
parseContext.explicitInt32Check($1.loc, "32-bit unsigned integer", parseContext.symbolTable.atBuiltInLevel());
$$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
$$.basicType = EbtUint;
#endif
}
| INT64_T {
parseContext.int64Check($1.loc, "64-bit integer", parseContext.symbolTable.atBuiltInLevel());
@ -1684,40 +1714,52 @@ type_specifier_nonarray
$$.setVector(4);
}
| F16VEC2 {
#ifndef GLSLANG_WEB
parseContext.float16ScalarVectorCheck($1.loc, "half float vector", parseContext.symbolTable.atBuiltInLevel());
$$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
$$.basicType = EbtFloat16;
$$.setVector(2);
#endif
}
| F16VEC3 {
#ifndef GLSLANG_WEB
parseContext.float16ScalarVectorCheck($1.loc, "half float vector", parseContext.symbolTable.atBuiltInLevel());
$$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
$$.basicType = EbtFloat16;
$$.setVector(3);
#endif
}
| F16VEC4 {
#ifndef GLSLANG_WEB
parseContext.float16ScalarVectorCheck($1.loc, "half float vector", parseContext.symbolTable.atBuiltInLevel());
$$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
$$.basicType = EbtFloat16;
$$.setVector(4);
#endif
}
| F32VEC2 {
#ifndef GLSLANG_WEB
parseContext.explicitFloat32Check($1.loc, "float32_t vector", parseContext.symbolTable.atBuiltInLevel());
$$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
$$.basicType = EbtFloat;
$$.setVector(2);
#endif
}
| F32VEC3 {
#ifndef GLSLANG_WEB
parseContext.explicitFloat32Check($1.loc, "float32_t vector", parseContext.symbolTable.atBuiltInLevel());
$$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
$$.basicType = EbtFloat;
$$.setVector(3);
#endif
}
| F32VEC4 {
#ifndef GLSLANG_WEB
parseContext.explicitFloat32Check($1.loc, "float32_t vector", parseContext.symbolTable.atBuiltInLevel());
$$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
$$.basicType = EbtFloat;
$$.setVector(4);
#endif
}
| F64VEC2 {
parseContext.explicitFloat64Check($1.loc, "float64_t vector", parseContext.symbolTable.atBuiltInLevel());
@ -1768,58 +1810,76 @@ type_specifier_nonarray
$$.setVector(4);
}
| I8VEC2 {
#ifndef GLSLANG_WEB
parseContext.int8ScalarVectorCheck($1.loc, "8-bit signed integer vector", parseContext.symbolTable.atBuiltInLevel());
$$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
$$.basicType = EbtInt8;
$$.setVector(2);
#endif
}
| I8VEC3 {
#ifndef GLSLANG_WEB
parseContext.int8ScalarVectorCheck($1.loc, "8-bit signed integer vector", parseContext.symbolTable.atBuiltInLevel());
$$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
$$.basicType = EbtInt8;
$$.setVector(3);
#endif
}
| I8VEC4 {
#ifndef GLSLANG_WEB
parseContext.int8ScalarVectorCheck($1.loc, "8-bit signed integer vector", parseContext.symbolTable.atBuiltInLevel());
$$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
$$.basicType = EbtInt8;
$$.setVector(4);
#endif
}
| I16VEC2 {
#ifndef GLSLANG_WEB
parseContext.int16ScalarVectorCheck($1.loc, "16-bit signed integer vector", parseContext.symbolTable.atBuiltInLevel());
$$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
$$.basicType = EbtInt16;
$$.setVector(2);
#endif
}
| I16VEC3 {
#ifndef GLSLANG_WEB
parseContext.int16ScalarVectorCheck($1.loc, "16-bit signed integer vector", parseContext.symbolTable.atBuiltInLevel());
$$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
$$.basicType = EbtInt16;
$$.setVector(3);
#endif
}
| I16VEC4 {
#ifndef GLSLANG_WEB
parseContext.int16ScalarVectorCheck($1.loc, "16-bit signed integer vector", parseContext.symbolTable.atBuiltInLevel());
$$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
$$.basicType = EbtInt16;
$$.setVector(4);
#endif
}
| I32VEC2 {
#ifndef GLSLANG_WEB
parseContext.explicitInt32Check($1.loc, "32-bit signed integer vector", parseContext.symbolTable.atBuiltInLevel());
$$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
$$.basicType = EbtInt;
$$.setVector(2);
#endif
}
| I32VEC3 {
#ifndef GLSLANG_WEB
parseContext.explicitInt32Check($1.loc, "32-bit signed integer vector", parseContext.symbolTable.atBuiltInLevel());
$$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
$$.basicType = EbtInt;
$$.setVector(3);
#endif
}
| I32VEC4 {
#ifndef GLSLANG_WEB
parseContext.explicitInt32Check($1.loc, "32-bit signed integer vector", parseContext.symbolTable.atBuiltInLevel());
$$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
$$.basicType = EbtInt;
$$.setVector(4);
#endif
}
| I64VEC2 {
parseContext.int64Check($1.loc, "64-bit integer vector", parseContext.symbolTable.atBuiltInLevel());
@ -1858,58 +1918,76 @@ type_specifier_nonarray
$$.setVector(4);
}
| U8VEC2 {
#ifndef GLSLANG_WEB
parseContext.int8ScalarVectorCheck($1.loc, "8-bit unsigned integer vector", parseContext.symbolTable.atBuiltInLevel());
$$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
$$.basicType = EbtUint8;
$$.setVector(2);
#endif
}
| U8VEC3 {
#ifndef GLSLANG_WEB
parseContext.int8ScalarVectorCheck($1.loc, "8-bit unsigned integer vector", parseContext.symbolTable.atBuiltInLevel());
$$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
$$.basicType = EbtUint8;
$$.setVector(3);
#endif
}
| U8VEC4 {
#ifndef GLSLANG_WEB
parseContext.int8ScalarVectorCheck($1.loc, "8-bit unsigned integer vector", parseContext.symbolTable.atBuiltInLevel());
$$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
$$.basicType = EbtUint8;
$$.setVector(4);
#endif
}
| U16VEC2 {
#ifndef GLSLANG_WEB
parseContext.int16ScalarVectorCheck($1.loc, "16-bit unsigned integer vector", parseContext.symbolTable.atBuiltInLevel());
$$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
$$.basicType = EbtUint16;
$$.setVector(2);
#endif
}
| U16VEC3 {
#ifndef GLSLANG_WEB
parseContext.int16ScalarVectorCheck($1.loc, "16-bit unsigned integer vector", parseContext.symbolTable.atBuiltInLevel());
$$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
$$.basicType = EbtUint16;
$$.setVector(3);
#endif
}
| U16VEC4 {
#ifndef GLSLANG_WEB
parseContext.int16ScalarVectorCheck($1.loc, "16-bit unsigned integer vector", parseContext.symbolTable.atBuiltInLevel());
$$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
$$.basicType = EbtUint16;
$$.setVector(4);
#endif
}
| U32VEC2 {
#ifndef GLSLANG_WEB
parseContext.explicitInt32Check($1.loc, "32-bit unsigned integer vector", parseContext.symbolTable.atBuiltInLevel());
$$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
$$.basicType = EbtUint;
$$.setVector(2);
#endif
}
| U32VEC3 {
#ifndef GLSLANG_WEB
parseContext.explicitInt32Check($1.loc, "32-bit unsigned integer vector", parseContext.symbolTable.atBuiltInLevel());
$$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
$$.basicType = EbtUint;
$$.setVector(3);
#endif
}
| U32VEC4 {
#ifndef GLSLANG_WEB
parseContext.explicitInt32Check($1.loc, "32-bit unsigned integer vector", parseContext.symbolTable.atBuiltInLevel());
$$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
$$.basicType = EbtUint;
$$.setVector(4);
#endif
}
| U64VEC2 {
parseContext.int64Check($1.loc, "64-bit unsigned integer vector", parseContext.symbolTable.atBuiltInLevel());
@ -2062,220 +2140,292 @@ type_specifier_nonarray
$$.setMatrix(4, 4);
}
| F16MAT2 {
#ifndef GLSLANG_WEB
parseContext.float16Check($1.loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel());
$$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
$$.basicType = EbtFloat16;
$$.setMatrix(2, 2);
#endif
}
| F16MAT3 {
#ifndef GLSLANG_WEB
parseContext.float16Check($1.loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel());
$$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
$$.basicType = EbtFloat16;
$$.setMatrix(3, 3);
#endif
}
| F16MAT4 {
#ifndef GLSLANG_WEB
parseContext.float16Check($1.loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel());
$$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
$$.basicType = EbtFloat16;
$$.setMatrix(4, 4);
#endif
}
| F16MAT2X2 {
#ifndef GLSLANG_WEB
parseContext.float16Check($1.loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel());
$$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
$$.basicType = EbtFloat16;
$$.setMatrix(2, 2);
#endif
}
| F16MAT2X3 {
#ifndef GLSLANG_WEB
parseContext.float16Check($1.loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel());
$$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
$$.basicType = EbtFloat16;
$$.setMatrix(2, 3);
#endif
}
| F16MAT2X4 {
#ifndef GLSLANG_WEB
parseContext.float16Check($1.loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel());
$$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
$$.basicType = EbtFloat16;
$$.setMatrix(2, 4);
#endif
}
| F16MAT3X2 {
#ifndef GLSLANG_WEB
parseContext.float16Check($1.loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel());
$$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
$$.basicType = EbtFloat16;
$$.setMatrix(3, 2);
#endif
}
| F16MAT3X3 {
#ifndef GLSLANG_WEB
parseContext.float16Check($1.loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel());
$$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
$$.basicType = EbtFloat16;
$$.setMatrix(3, 3);
#endif
}
| F16MAT3X4 {
#ifndef GLSLANG_WEB
parseContext.float16Check($1.loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel());
$$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
$$.basicType = EbtFloat16;
$$.setMatrix(3, 4);
#endif
}
| F16MAT4X2 {
#ifndef GLSLANG_WEB
parseContext.float16Check($1.loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel());
$$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
$$.basicType = EbtFloat16;
$$.setMatrix(4, 2);
#endif
}
| F16MAT4X3 {
#ifndef GLSLANG_WEB
parseContext.float16Check($1.loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel());
$$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
$$.basicType = EbtFloat16;
$$.setMatrix(4, 3);
#endif
}
| F16MAT4X4 {
#ifndef GLSLANG_WEB
parseContext.float16Check($1.loc, "half float matrix", parseContext.symbolTable.atBuiltInLevel());
$$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
$$.basicType = EbtFloat16;
$$.setMatrix(4, 4);
#endif
}
| F32MAT2 {
#ifndef GLSLANG_WEB
parseContext.explicitFloat32Check($1.loc, "float32_t matrix", parseContext.symbolTable.atBuiltInLevel());
$$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
$$.basicType = EbtFloat;
$$.setMatrix(2, 2);
#endif
}
| F32MAT3 {
#ifndef GLSLANG_WEB
parseContext.explicitFloat32Check($1.loc, "float32_t matrix", parseContext.symbolTable.atBuiltInLevel());
$$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
$$.basicType = EbtFloat;
$$.setMatrix(3, 3);
#endif
}
| F32MAT4 {
#ifndef GLSLANG_WEB
parseContext.explicitFloat32Check($1.loc, "float32_t matrix", parseContext.symbolTable.atBuiltInLevel());
$$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
$$.basicType = EbtFloat;
$$.setMatrix(4, 4);
#endif
}
| F32MAT2X2 {
#ifndef GLSLANG_WEB
parseContext.explicitFloat32Check($1.loc, "float32_t matrix", parseContext.symbolTable.atBuiltInLevel());
$$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
$$.basicType = EbtFloat;
$$.setMatrix(2, 2);
#endif
}
| F32MAT2X3 {
#ifndef GLSLANG_WEB
parseContext.explicitFloat32Check($1.loc, "float32_t matrix", parseContext.symbolTable.atBuiltInLevel());
$$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
$$.basicType = EbtFloat;
$$.setMatrix(2, 3);
#endif
}
| F32MAT2X4 {
#ifndef GLSLANG_WEB
parseContext.explicitFloat32Check($1.loc, "float32_t matrix", parseContext.symbolTable.atBuiltInLevel());
$$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
$$.basicType = EbtFloat;
$$.setMatrix(2, 4);
#endif
}
| F32MAT3X2 {
#ifndef GLSLANG_WEB
parseContext.explicitFloat32Check($1.loc, "float32_t matrix", parseContext.symbolTable.atBuiltInLevel());
$$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
$$.basicType = EbtFloat;
$$.setMatrix(3, 2);
#endif
}
| F32MAT3X3 {
#ifndef GLSLANG_WEB
parseContext.explicitFloat32Check($1.loc, "float32_t matrix", parseContext.symbolTable.atBuiltInLevel());
$$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
$$.basicType = EbtFloat;
$$.setMatrix(3, 3);
#endif
}
| F32MAT3X4 {
#ifndef GLSLANG_WEB
parseContext.explicitFloat32Check($1.loc, "float32_t matrix", parseContext.symbolTable.atBuiltInLevel());
$$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
$$.basicType = EbtFloat;
$$.setMatrix(3, 4);
#endif
}
| F32MAT4X2 {
#ifndef GLSLANG_WEB
parseContext.explicitFloat32Check($1.loc, "float32_t matrix", parseContext.symbolTable.atBuiltInLevel());
$$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
$$.basicType = EbtFloat;
$$.setMatrix(4, 2);
#endif
}
| F32MAT4X3 {
#ifndef GLSLANG_WEB
parseContext.explicitFloat32Check($1.loc, "float32_t matrix", parseContext.symbolTable.atBuiltInLevel());
$$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
$$.basicType = EbtFloat;
$$.setMatrix(4, 3);
#endif
}
| F32MAT4X4 {
#ifndef GLSLANG_WEB
parseContext.explicitFloat32Check($1.loc, "float32_t matrix", parseContext.symbolTable.atBuiltInLevel());
$$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
$$.basicType = EbtFloat;
$$.setMatrix(4, 4);
#endif
}
| F64MAT2 {
#ifndef GLSLANG_WEB
parseContext.explicitFloat64Check($1.loc, "float64_t matrix", parseContext.symbolTable.atBuiltInLevel());
$$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
$$.basicType = EbtDouble;
$$.setMatrix(2, 2);
#endif
}
| F64MAT3 {
#ifndef GLSLANG_WEB
parseContext.explicitFloat64Check($1.loc, "float64_t matrix", parseContext.symbolTable.atBuiltInLevel());
$$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
$$.basicType = EbtDouble;
$$.setMatrix(3, 3);
#endif
}
| F64MAT4 {
#ifndef GLSLANG_WEB
parseContext.explicitFloat64Check($1.loc, "float64_t matrix", parseContext.symbolTable.atBuiltInLevel());
$$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
$$.basicType = EbtDouble;
$$.setMatrix(4, 4);
#endif
}
| F64MAT2X2 {
#ifndef GLSLANG_WEB
parseContext.explicitFloat64Check($1.loc, "float64_t matrix", parseContext.symbolTable.atBuiltInLevel());
$$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
$$.basicType = EbtDouble;
$$.setMatrix(2, 2);
#endif
}
| F64MAT2X3 {
#ifndef GLSLANG_WEB
parseContext.explicitFloat64Check($1.loc, "float64_t matrix", parseContext.symbolTable.atBuiltInLevel());
$$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
$$.basicType = EbtDouble;
$$.setMatrix(2, 3);
#endif
}
| F64MAT2X4 {
#ifndef GLSLANG_WEB
parseContext.explicitFloat64Check($1.loc, "float64_t matrix", parseContext.symbolTable.atBuiltInLevel());
$$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
$$.basicType = EbtDouble;
$$.setMatrix(2, 4);
#endif
}
| F64MAT3X2 {
#ifndef GLSLANG_WEB
parseContext.explicitFloat64Check($1.loc, "float64_t matrix", parseContext.symbolTable.atBuiltInLevel());
$$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
$$.basicType = EbtDouble;
$$.setMatrix(3, 2);
#endif
}
| F64MAT3X3 {
#ifndef GLSLANG_WEB
parseContext.explicitFloat64Check($1.loc, "float64_t matrix", parseContext.symbolTable.atBuiltInLevel());
$$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
$$.basicType = EbtDouble;
$$.setMatrix(3, 3);
#endif
}
| F64MAT3X4 {
#ifndef GLSLANG_WEB
parseContext.explicitFloat64Check($1.loc, "float64_t matrix", parseContext.symbolTable.atBuiltInLevel());
$$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
$$.basicType = EbtDouble;
$$.setMatrix(3, 4);
#endif
}
| F64MAT4X2 {
#ifndef GLSLANG_WEB
parseContext.explicitFloat64Check($1.loc, "float64_t matrix", parseContext.symbolTable.atBuiltInLevel());
$$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
$$.basicType = EbtDouble;
$$.setMatrix(4, 2);
#endif
}
| F64MAT4X3 {
#ifndef GLSLANG_WEB
parseContext.explicitFloat64Check($1.loc, "float64_t matrix", parseContext.symbolTable.atBuiltInLevel());
$$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
$$.basicType = EbtDouble;
$$.setMatrix(4, 3);
#endif
}
| F64MAT4X4 {
#ifndef GLSLANG_WEB
parseContext.explicitFloat64Check($1.loc, "float64_t matrix", parseContext.symbolTable.atBuiltInLevel());
$$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
$$.basicType = EbtDouble;
$$.setMatrix(4, 4);
#endif
}
| ACCSTRUCTNV {
#ifdef NV_EXTENSIONS
@ -3215,10 +3365,12 @@ type_specifier_nonarray
$$.sampler.setSubpass(EbtUint, true);
}
| FCOOPMATNV {
#ifndef GLSLANG_WEB
parseContext.fcoopmatCheck($1.loc, "fcoopmatNV", parseContext.symbolTable.atBuiltInLevel());
$$.init($1.loc, parseContext.symbolTable.atGlobalLevel());
$$.basicType = EbtFloat;
$$.coopmat = true;
#endif
}
| struct_specifier {
$$ = $1;

File diff suppressed because it is too large Load Diff

View File

@ -35,6 +35,8 @@
// POSSIBILITY OF SUCH DAMAGE.
//
#ifndef GLSLANG_WEB
#include "localintermediate.h"
#include "../Include/InfoSink.h"
@ -174,7 +176,7 @@ bool TOutputTraverser::visitBinary(TVisit /* visit */, TIntermBinary* node)
case EOpIndexIndirect: out.debug << "indirect index"; break;
case EOpIndexDirectStruct:
{
bool reference = node->getLeft()->getType().getBasicType() == EbtReference;
bool reference = node->getLeft()->getType().isReference();
const TTypeList *members = reference ? node->getLeft()->getType().getReferentType()->getStruct() : node->getLeft()->getType().getStruct();
out.debug << (*members)[node->getRight()->getAsConstantUnion()->getConstArray()[0].getIConst()].type->getFieldName();
out.debug << ": direct index for structure"; break;
@ -615,7 +617,6 @@ bool TOutputTraverser::visitUnary(TVisit /* visit */, TIntermUnary* node)
case EOpSubgroupQuadSwapVertical: out.debug << "subgroupQuadSwapVertical"; break;
case EOpSubgroupQuadSwapDiagonal: out.debug << "subgroupQuadSwapDiagonal"; break;
#ifdef NV_EXTENSIONS
case EOpSubgroupPartition: out.debug << "subgroupPartitionNV"; break;
case EOpSubgroupPartitionedAdd: out.debug << "subgroupPartitionedAddNV"; break;
case EOpSubgroupPartitionedMul: out.debug << "subgroupPartitionedMulNV"; break;
@ -638,7 +639,6 @@ bool TOutputTraverser::visitUnary(TVisit /* visit */, TIntermUnary* node)
case EOpSubgroupPartitionedExclusiveAnd: out.debug << "subgroupPartitionedExclusiveAndNV"; break;
case EOpSubgroupPartitionedExclusiveOr: out.debug << "subgroupPartitionedExclusiveOrNV"; break;
case EOpSubgroupPartitionedExclusiveXor: out.debug << "subgroupPartitionedExclusiveXorNV"; break;
#endif
case EOpClip: out.debug << "clip"; break;
case EOpIsFinite: out.debug << "isfinite"; break;
@ -648,7 +648,6 @@ bool TOutputTraverser::visitUnary(TVisit /* visit */, TIntermUnary* node)
case EOpSparseTexelsResident: out.debug << "sparseTexelsResident"; break;
#ifdef AMD_EXTENSIONS
case EOpMinInvocations: out.debug << "minInvocations"; break;
case EOpMaxInvocations: out.debug << "maxInvocations"; break;
case EOpAddInvocations: out.debug << "addInvocations"; break;
@ -677,7 +676,6 @@ bool TOutputTraverser::visitUnary(TVisit /* visit */, TIntermUnary* node)
case EOpCubeFaceIndex: out.debug << "cubeFaceIndex"; break;
case EOpCubeFaceCoord: out.debug << "cubeFaceCoord"; break;
#endif
case EOpSubpassLoad: out.debug << "subpassLoad"; break;
case EOpSubpassLoadMS: out.debug << "subpassLoadMS"; break;
@ -863,7 +861,6 @@ bool TOutputTraverser::visitAggregate(TVisit /* visit */, TIntermAggregate* node
case EOpReadInvocation: out.debug << "readInvocation"; break;
#ifdef AMD_EXTENSIONS
case EOpSwizzleInvocations: out.debug << "swizzleInvocations"; break;
case EOpSwizzleInvocationsMasked: out.debug << "swizzleInvocationsMasked"; break;
case EOpWriteInvocation: out.debug << "writeInvocation"; break;
@ -871,9 +868,7 @@ bool TOutputTraverser::visitAggregate(TVisit /* visit */, TIntermAggregate* node
case EOpMin3: out.debug << "min3"; break;
case EOpMax3: out.debug << "max3"; break;
case EOpMid3: out.debug << "mid3"; break;
case EOpTime: out.debug << "time"; break;
#endif
case EOpAtomicAdd: out.debug << "AtomicAdd"; break;
case EOpAtomicMin: out.debug << "AtomicMin"; break;
@ -910,10 +905,8 @@ bool TOutputTraverser::visitAggregate(TVisit /* visit */, TIntermAggregate* node
case EOpImageAtomicCompSwap: out.debug << "imageAtomicCompSwap"; break;
case EOpImageAtomicLoad: out.debug << "imageAtomicLoad"; break;
case EOpImageAtomicStore: out.debug << "imageAtomicStore"; break;
#ifdef AMD_EXTENSIONS
case EOpImageLoadLod: out.debug << "imageLoadLod"; break;
case EOpImageStoreLod: out.debug << "imageStoreLod"; break;
#endif
case EOpTextureQuerySize: out.debug << "textureSize"; break;
case EOpTextureQueryLod: out.debug << "textureQueryLod"; break;
@ -940,11 +933,9 @@ bool TOutputTraverser::visitAggregate(TVisit /* visit */, TIntermAggregate* node
case EOpTextureOffsetClamp: out.debug << "textureOffsetClamp"; break;
case EOpTextureGradClamp: out.debug << "textureGradClamp"; break;
case EOpTextureGradOffsetClamp: out.debug << "textureGradOffsetClamp"; break;
#ifdef AMD_EXTENSIONS
case EOpTextureGatherLod: out.debug << "textureGatherLod"; break;
case EOpTextureGatherLodOffset: out.debug << "textureGatherLodOffset"; break;
case EOpTextureGatherLodOffsets: out.debug << "textureGatherLodOffsets"; break;
#endif
case EOpSparseTexture: out.debug << "sparseTexture"; break;
case EOpSparseTextureOffset: out.debug << "sparseTextureOffset"; break;
@ -962,19 +953,15 @@ bool TOutputTraverser::visitAggregate(TVisit /* visit */, TIntermAggregate* node
case EOpSparseTextureOffsetClamp: out.debug << "sparseTextureOffsetClamp"; break;
case EOpSparseTextureGradClamp: out.debug << "sparseTextureGradClamp"; break;
case EOpSparseTextureGradOffsetClamp: out.debug << "sparseTextureGradOffsetClam"; break;
#ifdef AMD_EXTENSIONS
case EOpSparseTextureGatherLod: out.debug << "sparseTextureGatherLod"; break;
case EOpSparseTextureGatherLodOffset: out.debug << "sparseTextureGatherLodOffset"; break;
case EOpSparseTextureGatherLodOffsets: out.debug << "sparseTextureGatherLodOffsets"; break;
case EOpSparseImageLoadLod: out.debug << "sparseImageLoadLod"; break;
#endif
#ifdef NV_EXTENSIONS
case EOpImageSampleFootprintNV: out.debug << "imageSampleFootprintNV"; break;
case EOpImageSampleFootprintClampNV: out.debug << "imageSampleFootprintClampNV"; break;
case EOpImageSampleFootprintLodNV: out.debug << "imageSampleFootprintLodNV"; break;
case EOpImageSampleFootprintGradNV: out.debug << "imageSampleFootprintGradNV"; break;
case EOpImageSampleFootprintGradClampNV: out.debug << "mageSampleFootprintGradClampNV"; break;
#endif
case EOpAddCarry: out.debug << "addCarry"; break;
case EOpSubBorrow: out.debug << "subBorrow"; break;
case EOpUMulExtended: out.debug << "uMulExtended"; break;
@ -988,9 +975,7 @@ bool TOutputTraverser::visitAggregate(TVisit /* visit */, TIntermAggregate* node
case EOpInterpolateAtSample: out.debug << "interpolateAtSample"; break;
case EOpInterpolateAtOffset: out.debug << "interpolateAtOffset"; break;
#ifdef AMD_EXTENSIONS
case EOpInterpolateAtVertex: out.debug << "interpolateAtVertex"; break;
#endif
case EOpSinCos: out.debug << "sincos"; break;
case EOpGenMul: out.debug << "mul"; break;
@ -1057,7 +1042,6 @@ bool TOutputTraverser::visitAggregate(TVisit /* visit */, TIntermAggregate* node
case EOpSubgroupQuadSwapVertical: out.debug << "subgroupQuadSwapVertical"; break;
case EOpSubgroupQuadSwapDiagonal: out.debug << "subgroupQuadSwapDiagonal"; break;
#ifdef NV_EXTENSIONS
case EOpSubgroupPartition: out.debug << "subgroupPartitionNV"; break;
case EOpSubgroupPartitionedAdd: out.debug << "subgroupPartitionedAddNV"; break;
case EOpSubgroupPartitionedMul: out.debug << "subgroupPartitionedMulNV"; break;
@ -1080,19 +1064,16 @@ bool TOutputTraverser::visitAggregate(TVisit /* visit */, TIntermAggregate* node
case EOpSubgroupPartitionedExclusiveAnd: out.debug << "subgroupPartitionedExclusiveAndNV"; break;
case EOpSubgroupPartitionedExclusiveOr: out.debug << "subgroupPartitionedExclusiveOrNV"; break;
case EOpSubgroupPartitionedExclusiveXor: out.debug << "subgroupPartitionedExclusiveXorNV"; break;
#endif
case EOpSubpassLoad: out.debug << "subpassLoad"; break;
case EOpSubpassLoadMS: out.debug << "subpassLoadMS"; break;
#ifdef NV_EXTENSIONS
case EOpTraceNV: out.debug << "traceNV"; break;
case EOpReportIntersectionNV: out.debug << "reportIntersectionNV"; break;
case EOpIgnoreIntersectionNV: out.debug << "ignoreIntersectionNV"; break;
case EOpTerminateRayNV: out.debug << "terminateRayNV"; break;
case EOpExecuteCallableNV: out.debug << "executeCallableNV"; break;
case EOpWritePackedPrimitiveIndices4x8NV: out.debug << "writePackedPrimitiveIndices4x8NV"; break;
#endif
case EOpCooperativeMatrixLoad: out.debug << "Load cooperative matrix"; break;
case EOpCooperativeMatrixStore: out.debug << "Store cooperative matrix"; break;
@ -1509,16 +1490,13 @@ void TIntermediate::output(TInfoSink& infoSink, bool tree)
infoSink.debug << "interlock ordering = " << TQualifier::getInterlockOrderingString(interlockOrdering) << "\n";
break;
#ifdef NV_EXTENSIONS
case EShLangMeshNV:
infoSink.debug << "max_vertices = " << vertices << "\n";
infoSink.debug << "max_primitives = " << primitives << "\n";
infoSink.debug << "output primitive = " << TQualifier::getGeometryString(outputPrimitive) << "\n";
// Fall through
case EShLangTaskNV:
// Fall through
#endif
case EShLangCompute:
infoSink.debug << "local_size = (" << localSize[0] << ", " << localSize[1] << ", " << localSize[2] << ")\n";
{
@ -1547,3 +1525,5 @@ void TIntermediate::output(TInfoSink& infoSink, bool tree)
}
} // end namespace glslang
#endif // not GLSLANG_WEB

View File

@ -75,7 +75,7 @@ public:
target = &inputList;
else if (base->getQualifier().storage == EvqVaryingOut)
target = &outputList;
else if (base->getQualifier().isUniformOrBuffer() && !base->getQualifier().layoutPushConstant)
else if (base->getQualifier().isUniformOrBuffer() && !base->getQualifier().isPushConstant())
target = &uniformList;
if (target) {
TVarEntryInfo ent = {base->getId(), base, ! traverseAll};
@ -355,7 +355,7 @@ struct TSymbolValidater
}
return;
}
} else if (base->getQualifier().isUniformOrBuffer() && ! base->getQualifier().layoutPushConstant) {
} else if (base->getQualifier().isUniformOrBuffer() && ! base->getQualifier().isPushConstant()) {
// validate uniform type;
for (int i = 0; i < EShLangCount; i++) {
if (i != currentStage && outVarMaps[i] != nullptr) {

View File

@ -78,11 +78,15 @@ void TIntermediate::warn(TInfoSink& infoSink, const char* message)
//
void TIntermediate::merge(TInfoSink& infoSink, TIntermediate& unit)
{
#ifndef GLSLANG_WEB
mergeCallGraphs(infoSink, unit);
mergeModes(infoSink, unit);
mergeTrees(infoSink, unit);
#endif
}
#ifndef GLSLANG_WEB
void TIntermediate::mergeCallGraphs(TInfoSink& infoSink, TIntermediate& unit)
{
if (unit.getNumEntryPoints() > 0) {
@ -142,18 +146,13 @@ void TIntermediate::mergeModes(TInfoSink& infoSink, TIntermediate& unit)
if (vertices == TQualifier::layoutNotSet)
vertices = unit.vertices;
else if (vertices != unit.vertices) {
if (language == EShLangGeometry
#ifdef NV_EXTENSIONS
|| language == EShLangMeshNV
#endif
)
if (language == EShLangGeometry || language == EShLangMeshNV)
error(infoSink, "Contradictory layout max_vertices values");
else if (language == EShLangTessControl)
error(infoSink, "Contradictory layout vertices values");
else
assert(0);
}
#ifdef NV_EXTENSIONS
if (primitives == TQualifier::layoutNotSet)
primitives = unit.primitives;
else if (primitives != unit.primitives) {
@ -162,7 +161,6 @@ void TIntermediate::mergeModes(TInfoSink& infoSink, TIntermediate& unit)
else
assert(0);
}
#endif
if (inputPrimitive == ElgNone)
inputPrimitive = unit.inputPrimitive;
@ -224,21 +222,16 @@ void TIntermediate::mergeModes(TInfoSink& infoSink, TIntermediate& unit)
xfbBuffers[b].implicitStride = std::max(xfbBuffers[b].implicitStride, unit.xfbBuffers[b].implicitStride);
if (unit.xfbBuffers[b].contains64BitType)
xfbBuffers[b].contains64BitType = true;
#ifdef AMD_EXTENSIONS
if (unit.xfbBuffers[b].contains32BitType)
xfbBuffers[b].contains32BitType = true;
if (unit.xfbBuffers[b].contains16BitType)
xfbBuffers[b].contains16BitType = true;
#endif
// TODO: 4.4 link: enhanced layouts: compare ranges
}
MERGE_TRUE(multiStream);
#ifdef NV_EXTENSIONS
MERGE_TRUE(layoutOverrideCoverage);
MERGE_TRUE(geoPassthroughEXT);
#endif
for (unsigned int i = 0; i < unit.shiftBinding.size(); ++i) {
if (unit.shiftBinding[i] > 0)
@ -287,13 +280,8 @@ void TIntermediate::mergeTrees(TInfoSink& infoSink, TIntermediate& unit)
}
// Getting this far means we have two existing trees to merge...
#ifdef NV_EXTENSIONS
numShaderRecordNVBlocks += unit.numShaderRecordNVBlocks;
#endif
#ifdef NV_EXTENSIONS
numTaskNVBlocks += unit.numTaskNVBlocks;
#endif
// Get the top-level globals of each unit
TIntermSequence& globals = treeRoot->getAsAggregate()->getSequence();
@ -493,6 +481,7 @@ void TIntermediate::mergeImplicitArraySizes(TType& type, const TType& unitType)
for (int i = 0; i < (int)type.getStruct()->size(); ++i)
mergeImplicitArraySizes(*(*type.getStruct())[i].type, *(*unitType.getStruct())[i].type);
}
#endif // not GLSLANG_WEB
//
// Compare two global objects from two compilation units and see if they match
@ -547,7 +536,7 @@ void TIntermediate::mergeErrorCheck(TInfoSink& infoSink, const TIntermSymbol& sy
symbol.getQualifier().flat != unitSymbol.getQualifier().flat ||
symbol.getQualifier().sample != unitSymbol.getQualifier().sample ||
symbol.getQualifier().patch != unitSymbol.getQualifier().patch ||
symbol.getQualifier().nopersp != unitSymbol.getQualifier().nopersp) {
symbol.getQualifier().isNonPerspective() != unitSymbol.getQualifier().isNonPerspective()) {
error(infoSink, "Interpolation and auxiliary storage qualifiers must match:");
writeTypeComparison = true;
}
@ -615,7 +604,7 @@ void TIntermediate::finalCheck(TInfoSink& infoSink, bool keepUncalled)
warn(infoSink, "Entry point not found");
}
if (numPushConstants > 1)
if (getNumPushConstants() > 1)
error(infoSink, "Only one push_constant block is allowed per stage");
// recursion and missing body checking
@ -629,6 +618,7 @@ void TIntermediate::finalCheck(TInfoSink& infoSink, bool keepUncalled)
if (invocations == TQualifier::layoutNotSet)
invocations = 1;
#ifndef GLSLANG_WEB
if (inIoAccessed("gl_ClipDistance") && inIoAccessed("gl_ClipVertex"))
error(infoSink, "Can only use one of gl_ClipDistance or gl_ClipVertex (gl_ClipDistance is preferred)");
if (inIoAccessed("gl_CullDistance") && inIoAccessed("gl_ClipVertex"))
@ -642,12 +632,10 @@ void TIntermediate::finalCheck(TInfoSink& infoSink, bool keepUncalled)
for (size_t b = 0; b < xfbBuffers.size(); ++b) {
if (xfbBuffers[b].contains64BitType)
RoundToPow2(xfbBuffers[b].implicitStride, 8);
#ifdef AMD_EXTENSIONS
else if (xfbBuffers[b].contains32BitType)
RoundToPow2(xfbBuffers[b].implicitStride, 4);
else if (xfbBuffers[b].contains16BitType)
RoundToPow2(xfbBuffers[b].implicitStride, 2);
#endif
// "It is a compile-time or link-time error to have
// any xfb_offset that overflows xfb_stride, whether stated on declarations before or after the xfb_stride, or
@ -668,16 +656,11 @@ void TIntermediate::finalCheck(TInfoSink& infoSink, bool keepUncalled)
error(infoSink, "xfb_stride must be multiple of 8 for buffer holding a double or 64-bit integer:");
infoSink.info.prefix(EPrefixError);
infoSink.info << " xfb_buffer " << (unsigned int)b << ", xfb_stride " << xfbBuffers[b].stride << "\n";
#ifdef AMD_EXTENSIONS
} else if (xfbBuffers[b].contains32BitType && ! IsMultipleOfPow2(xfbBuffers[b].stride, 4)) {
#else
} else if (! IsMultipleOfPow2(xfbBuffers[b].stride, 4)) {
#endif
error(infoSink, "xfb_stride must be multiple of 4:");
infoSink.info.prefix(EPrefixError);
infoSink.info << " xfb_buffer " << (unsigned int)b << ", xfb_stride " << xfbBuffers[b].stride << "\n";
}
#ifdef AMD_EXTENSIONS
// "If the buffer is capturing any
// outputs with half-precision or 16-bit integer components, the stride must be a multiple of 2"
else if (xfbBuffers[b].contains16BitType && ! IsMultipleOfPow2(xfbBuffers[b].stride, 2)) {
@ -686,7 +669,6 @@ void TIntermediate::finalCheck(TInfoSink& infoSink, bool keepUncalled)
infoSink.info << " xfb_buffer " << (unsigned int)b << ", xfb_stride " << xfbBuffers[b].stride << "\n";
}
#endif
// "The resulting stride (implicit or explicit), when divided by 4, must be less than or equal to the
// implementation-dependent constant gl_MaxTransformFeedbackInterleavedComponents."
if (xfbBuffers[b].stride > (unsigned int)(4 * resources.maxTransformFeedbackInterleavedComponents)) {
@ -730,8 +712,6 @@ void TIntermediate::finalCheck(TInfoSink& infoSink, bool keepUncalled)
break;
case EShLangCompute:
break;
#ifdef NV_EXTENSIONS
case EShLangRayGenNV:
case EShLangIntersectNV:
case EShLangAnyHitNV:
@ -764,8 +744,6 @@ void TIntermediate::finalCheck(TInfoSink& infoSink, bool keepUncalled)
if (numTaskNVBlocks > 1)
error(infoSink, "Only one taskNV interface block is allowed per shader");
break;
#endif
default:
error(infoSink, "Unknown Stage.");
break;
@ -787,6 +765,7 @@ void TIntermediate::finalCheck(TInfoSink& infoSink, bool keepUncalled)
} finalLinkTraverser;
treeRoot->traverse(&finalLinkTraverser);
#endif
}
//
@ -1187,14 +1166,10 @@ int TIntermediate::computeTypeLocationSize(const TType& type, EShLanguage stage)
// TODO: perf: this can be flattened by using getCumulativeArraySize(), and a deref that discards all arrayness
// TODO: are there valid cases of having an unsized array with a location? If so, running this code too early.
TType elementType(type, 0);
if (type.isSizedArray()
#ifdef NV_EXTENSIONS
&& !type.getQualifier().isPerView()
#endif
)
if (type.isSizedArray() && !type.getQualifier().isPerView())
return type.getOuterArraySize() * computeTypeLocationSize(elementType, stage);
else {
#ifdef NV_EXTENSIONS
#ifndef GLSLANG_WEB
// unset perViewNV attributes for arrayed per-view outputs: "perviewNV vec4 v[MAX_VIEWS][3];"
elementType.getQualifier().perViewNV = false;
#endif
@ -1273,6 +1248,7 @@ int TIntermediate::computeTypeUniformLocationSize(const TType& type)
return 1;
}
#ifndef GLSLANG_WEB
// Accumulate xfb buffer ranges and check for collisions as the accumulation is done.
//
// Returns < 0 if no collision, >= 0 if collision and the value returned is a colliding value.
@ -1285,11 +1261,7 @@ int TIntermediate::addXfbBufferOffset(const TType& type)
TXfbBuffer& buffer = xfbBuffers[qualifier.layoutXfbBuffer];
// compute the range
#ifdef AMD_EXTENSIONS
unsigned int size = computeTypeXfbSize(type, buffer.contains64BitType, buffer.contains32BitType, buffer.contains16BitType);
#else
unsigned int size = computeTypeXfbSize(type, buffer.contains64BitType);
#endif
buffer.implicitStride = std::max(buffer.implicitStride, qualifier.layoutXfbOffset + size);
TRange range(qualifier.layoutXfbOffset, qualifier.layoutXfbOffset + size - 1);
@ -1309,15 +1281,10 @@ int TIntermediate::addXfbBufferOffset(const TType& type)
// Recursively figure out how many bytes of xfb buffer are used by the given type.
// Return the size of type, in bytes.
// Sets contains64BitType to true if the type contains a 64-bit data type.
#ifdef AMD_EXTENSIONS
// Sets contains32BitType to true if the type contains a 32-bit data type.
// Sets contains16BitType to true if the type contains a 16-bit data type.
// N.B. Caller must set contains64BitType, contains32BitType, and contains16BitType to false before calling.
unsigned int TIntermediate::computeTypeXfbSize(const TType& type, bool& contains64BitType, bool& contains32BitType, bool& contains16BitType) const
#else
// N.B. Caller must set contains64BitType to false before calling.
unsigned int TIntermediate::computeTypeXfbSize(const TType& type, bool& contains64BitType) const
#endif
{
// "...if applied to an aggregate containing a double or 64-bit integer, the offset must also be a multiple of 8,
// and the space taken in the buffer will be a multiple of 8.
@ -1330,44 +1297,32 @@ unsigned int TIntermediate::computeTypeXfbSize(const TType& type, bool& contains
// TODO: perf: this can be flattened by using getCumulativeArraySize(), and a deref that discards all arrayness
assert(type.isSizedArray());
TType elementType(type, 0);
#ifdef AMD_EXTENSIONS
return type.getOuterArraySize() * computeTypeXfbSize(elementType, contains64BitType, contains16BitType, contains16BitType);
#else
return type.getOuterArraySize() * computeTypeXfbSize(elementType, contains64BitType);
#endif
}
if (type.isStruct()) {
unsigned int size = 0;
bool structContains64BitType = false;
#ifdef AMD_EXTENSIONS
bool structContains32BitType = false;
bool structContains16BitType = false;
#endif
for (int member = 0; member < (int)type.getStruct()->size(); ++member) {
TType memberType(type, member);
// "... if applied to
// an aggregate containing a double or 64-bit integer, the offset must also be a multiple of 8,
// and the space taken in the buffer will be a multiple of 8."
bool memberContains64BitType = false;
#ifdef AMD_EXTENSIONS
bool memberContains32BitType = false;
bool memberContains16BitType = false;
int memberSize = computeTypeXfbSize(memberType, memberContains64BitType, memberContains32BitType, memberContains16BitType);
#else
int memberSize = computeTypeXfbSize(memberType, memberContains64BitType);
#endif
if (memberContains64BitType) {
structContains64BitType = true;
RoundToPow2(size, 8);
#ifdef AMD_EXTENSIONS
} else if (memberContains32BitType) {
structContains32BitType = true;
RoundToPow2(size, 4);
} else if (memberContains16BitType) {
structContains16BitType = true;
RoundToPow2(size, 2);
#endif
}
size += memberSize;
}
@ -1375,14 +1330,12 @@ unsigned int TIntermediate::computeTypeXfbSize(const TType& type, bool& contains
if (structContains64BitType) {
contains64BitType = true;
RoundToPow2(size, 8);
#ifdef AMD_EXTENSIONS
} else if (structContains32BitType) {
contains32BitType = true;
RoundToPow2(size, 4);
} else if (structContains16BitType) {
contains16BitType = true;
RoundToPow2(size, 2);
#endif
}
return size;
}
@ -1402,7 +1355,6 @@ unsigned int TIntermediate::computeTypeXfbSize(const TType& type, bool& contains
if (type.getBasicType() == EbtDouble || type.getBasicType() == EbtInt64 || type.getBasicType() == EbtUint64) {
contains64BitType = true;
return 8 * numComponents;
#ifdef AMD_EXTENSIONS
} else if (type.getBasicType() == EbtFloat16 || type.getBasicType() == EbtInt16 || type.getBasicType() == EbtUint16) {
contains16BitType = true;
return 2 * numComponents;
@ -1412,11 +1364,8 @@ unsigned int TIntermediate::computeTypeXfbSize(const TType& type, bool& contains
contains32BitType = true;
return 4 * numComponents;
}
#else
} else
return 4 * numComponents;
#endif
}
#endif // not GLSLANG_WEB
const int baseAlignmentVec4Std140 = 16;
@ -1741,7 +1690,7 @@ int TIntermediate::getBlockSize(const TType& blockType)
int TIntermediate::computeBufferReferenceTypeSize(const TType& type)
{
assert(type.getBasicType() == EbtReference);
assert(type.isReference());
int size = getBlockSize(*type.getReferentType());
int align = type.getBufferReferenceAlignment();

View File

@ -149,20 +149,14 @@ struct TOffsetRange {
// Things that need to be tracked per xfb buffer.
struct TXfbBuffer {
#ifdef AMD_EXTENSIONS
TXfbBuffer() : stride(TQualifier::layoutXfbStrideEnd), implicitStride(0), contains64BitType(false),
contains32BitType(false), contains16BitType(false) { }
#else
TXfbBuffer() : stride(TQualifier::layoutXfbStrideEnd), implicitStride(0), contains64BitType(false) { }
#endif
std::vector<TRange> ranges; // byte offsets that have already been assigned
unsigned int stride;
unsigned int implicitStride;
bool contains64BitType;
#ifdef AMD_EXTENSIONS
bool contains32BitType;
bool contains16BitType;
#endif
};
// Track a set of strings describing how the module was processed.
@ -217,7 +211,6 @@ class TSymbolTable;
class TSymbol;
class TVariable;
#ifdef NV_EXTENSIONS
//
// Texture and Sampler transformation mode.
//
@ -226,7 +219,6 @@ enum ComputeDerivativeMode {
LayoutDerivativeGroupQuads, // derivative_group_quadsNV
LayoutDerivativeGroupLinear, // derivative_group_linearNV
};
#endif
//
// Set of helper functions to help parse and build the tree.
@ -248,7 +240,7 @@ public:
postDepthCoverage(false), depthLayout(EldNone), depthReplacing(false),
hlslFunctionality1(false),
blendEquations(0), xfbMode(false), multiStream(false),
#ifdef NV_EXTENSIONS
#ifndef GLSLANG_WEB
layoutOverrideCoverage(false),
geoPassthroughEXT(false),
numShaderRecordNVBlocks(0),
@ -286,7 +278,11 @@ public:
void setLimits(const TBuiltInResource& r) { resources = r; }
bool postProcess(TIntermNode*, EShLanguage);
#ifdef GLSLANG_WEB
void output(TInfoSink&, bool tree) { }
#else
void output(TInfoSink&, bool tree);
#endif
void removeTree();
#ifdef ENABLE_HLSL
@ -480,7 +476,12 @@ public:
int getNumEntryPoints() const { return numEntryPoints; }
int getNumErrors() const { return numErrors; }
void addPushConstantCount() { ++numPushConstants; }
#ifdef NV_EXTENSIONS
#ifdef GLSLANG_WEB
int getNumPushConstants() const { return 0; }
void addShaderRecordNVCount() { }
void addTaskNVCount() { }
#else
int getNumPushConstants() const { return numPushConstants; }
void addShaderRecordNVCount() { ++numShaderRecordNVBlocks; }
void addTaskNVCount() { ++numTaskNVBlocks; }
#endif
@ -702,6 +703,7 @@ public:
static int computeTypeLocationSize(const TType&, EShLanguage);
static int computeTypeUniformLocationSize(const TType&);
#ifndef GLSLANG_WEB
bool setXfbBufferStride(int buffer, unsigned stride)
{
if (xfbBuffers[buffer].stride != TQualifier::layoutXfbStrideEnd)
@ -711,9 +713,7 @@ public:
}
unsigned getXfbStride(int buffer) const { return xfbBuffers[buffer].stride; }
int addXfbBufferOffset(const TType&);
#ifdef AMD_EXTENSIONS
unsigned int computeTypeXfbSize(const TType&, bool& contains64BitType, bool& contains32BitType, bool& contains16BitType) const;
#else
unsigned int computeTypeXfbSize(const TType&, bool& contains64BitType) const;
#endif
static int getBaseAlignmentScalar(const TType&, int& size);
@ -727,7 +727,7 @@ public:
static int computeBufferReferenceTypeSize(const TType&);
bool promote(TIntermOperator*);
#ifdef NV_EXTENSIONS
#ifndef GLSLANG_WEB
void setLayoutOverrideCoverage() { layoutOverrideCoverage = true; }
bool getLayoutOverrideCoverage() const { return layoutOverrideCoverage; }
void setGeoPassthroughEXT() { geoPassthroughEXT = true; }
@ -796,25 +796,27 @@ public:
const char* const implicitCounterName;
// Certain explicit conversions are allowed conditionally
#ifdef GLSLANG_WEB
bool getArithemeticInt8Enabled() const { return false; }
bool getArithemeticInt16Enabled() const { return false; }
bool getArithemeticFloat16Enabled() const { return false; }
#else
bool getArithemeticInt8Enabled() const {
return extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types) ||
extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types_int8);
}
bool getArithemeticInt16Enabled() const {
return extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types) ||
#ifdef AMD_EXTENSIONS
extensionRequested(E_GL_AMD_gpu_shader_int16) ||
#endif
extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types_int16);
}
bool getArithemeticFloat16Enabled() const {
return extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types) ||
#ifdef AMD_EXTENSIONS
extensionRequested(E_GL_AMD_gpu_shader_half_float) ||
#endif
extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types_float16);
}
#endif
protected:
TIntermSymbol* addSymbol(int Id, const TString&, const TType&, const TConstUnionArray&, TIntermTyped* subtree, const TSourceLoc&);
@ -847,7 +849,15 @@ protected:
bool isConversionAllowed(TOperator op, TIntermTyped* node) const;
TIntermTyped* createConversion(TBasicType convertTo, TIntermTyped* node) const;
std::tuple<TBasicType, TBasicType> getConversionDestinatonType(TBasicType type0, TBasicType type1, TOperator op) const;
#ifdef GLSLANG_WEB
bool extensionRequested(const char *extension) const { return false; }
#else
// I think this function should go away.
// This data structure is just a log to pass on to back ends.
// Versioning and extensions are handled in Version.cpp, with a rich
// set of functions for querying stages, versions, extension enable/disabled, etc.
bool extensionRequested(const char *extension) const {return requestedExtensions.find(extension) != requestedExtensions.end();}
#endif
static const char* getResourceName(TResourceType);
const EShLanguage language; // stage, known at construction time
@ -891,7 +901,7 @@ protected:
std::vector<TXfbBuffer> xfbBuffers; // all the data we need to track per xfb buffer
bool multiStream;
#ifdef NV_EXTENSIONS
#ifndef GLSLANG_WEB
bool layoutOverrideCoverage;
bool geoPassthroughEXT;
int numShaderRecordNVBlocks;

View File

@ -61,22 +61,55 @@ public:
spvVersion(spvVersion), forwardCompatible(forwardCompatible),
intermediate(interm), messages(messages), numErrors(0), currentScanner(0) { }
virtual ~TParseVersions() { }
virtual void initializeExtensionBehavior();
virtual void requireProfile(const TSourceLoc&, int queryProfiles, const char* featureDesc);
virtual void profileRequires(const TSourceLoc&, int queryProfiles, int minVersion, int numExtensions, const char* const extensions[], const char* featureDesc);
virtual void profileRequires(const TSourceLoc&, int queryProfiles, int minVersion, const char* const extension, const char* featureDesc);
virtual void requireStage(const TSourceLoc&, EShLanguageMask, const char* featureDesc);
virtual void requireStage(const TSourceLoc&, EShLanguage, const char* featureDesc);
#ifdef GLSLANG_WEB
virtual void initializeExtensionBehavior() { }
virtual void checkDeprecated(const TSourceLoc&, int queryProfiles, int depVersion, const char* featureDesc) { }
virtual void requireNotRemoved(const TSourceLoc&, int queryProfiles, int removedVersion, const char* featureDesc) { }
virtual void requireExtensions(const TSourceLoc&, int numExtensions, const char* const extensions[],
const char* featureDesc) { }
virtual void ppRequireExtensions(const TSourceLoc&, int numExtensions, const char* const extensions[],
const char* featureDesc) { }
virtual TExtensionBehavior getExtensionBehavior(const char*) { return EBhMissing; }
virtual bool extensionTurnedOn(const char* const extension) { return false; }
virtual bool extensionsTurnedOn(int numExtensions, const char* const extensions[]) { return false; }
virtual void updateExtensionBehavior(int line, const char* const extension, const char* behavior) { }
virtual void updateExtensionBehavior(const char* const extension, TExtensionBehavior) { }
virtual void checkExtensionStage(const TSourceLoc&, const char* const extension) { }
virtual void fullIntegerCheck(const TSourceLoc&, const char* op) { }
virtual void doubleCheck(const TSourceLoc&, const char* op) { }
virtual bool float16Arithmetic() { return false; }
virtual void requireFloat16Arithmetic(const TSourceLoc& loc, const char* op, const char* featureDesc) { }
virtual bool int16Arithmetic() { return false; }
virtual void requireInt16Arithmetic(const TSourceLoc& loc, const char* op, const char* featureDesc) { }
virtual bool int8Arithmetic() { return false; }
virtual void requireInt8Arithmetic(const TSourceLoc& loc, const char* op, const char* featureDesc) { }
virtual void int64Check(const TSourceLoc&, const char* op, bool builtIn = false) { }
virtual void explicitFloat32Check(const TSourceLoc&, const char* op, bool builtIn = false) { }
virtual void explicitFloat64Check(const TSourceLoc&, const char* op, bool builtIn = false) { }
#else
virtual void initializeExtensionBehavior();
virtual void checkDeprecated(const TSourceLoc&, int queryProfiles, int depVersion, const char* featureDesc);
virtual void requireNotRemoved(const TSourceLoc&, int queryProfiles, int removedVersion, const char* featureDesc);
virtual void unimplemented(const TSourceLoc&, const char* featureDesc);
virtual void requireExtensions(const TSourceLoc&, int numExtensions, const char* const extensions[], const char* featureDesc);
virtual void ppRequireExtensions(const TSourceLoc&, int numExtensions, const char* const extensions[], const char* featureDesc);
virtual void requireExtensions(const TSourceLoc&, int numExtensions, const char* const extensions[],
const char* featureDesc);
virtual void ppRequireExtensions(const TSourceLoc&, int numExtensions, const char* const extensions[],
const char* featureDesc);
virtual TExtensionBehavior getExtensionBehavior(const char*);
virtual bool extensionTurnedOn(const char* const extension);
virtual bool extensionsTurnedOn(int numExtensions, const char* const extensions[]);
virtual void updateExtensionBehavior(int line, const char* const extension, const char* behavior);
virtual void updateExtensionBehavior(const char* const extension, TExtensionBehavior);
virtual bool checkExtensionsRequested(const TSourceLoc&, int numExtensions, const char* const extensions[],
const char* featureDesc);
virtual void checkExtensionStage(const TSourceLoc&, const char* const extension);
virtual void fullIntegerCheck(const TSourceLoc&, const char* op);
virtual void unimplemented(const TSourceLoc&, const char* featureDesc);
virtual void doubleCheck(const TSourceLoc&, const char* op);
virtual void float16Check(const TSourceLoc&, const char* op, bool builtIn = false);
virtual void float16ScalarVectorCheck(const TSourceLoc&, const char* op, bool builtIn = false);
@ -88,23 +121,19 @@ public:
virtual void int8ScalarVectorCheck(const TSourceLoc&, const char* op, bool builtIn = false);
virtual bool int8Arithmetic();
virtual void requireInt8Arithmetic(const TSourceLoc& loc, const char* op, const char* featureDesc);
#ifdef AMD_EXTENSIONS
virtual void float16OpaqueCheck(const TSourceLoc&, const char* op, bool builtIn = false);
#endif
virtual void int64Check(const TSourceLoc&, const char* op, bool builtIn = false);
virtual void explicitInt8Check(const TSourceLoc&, const char* op, bool builtIn = false);
virtual void explicitInt16Check(const TSourceLoc&, const char* op, bool builtIn = false);
virtual void explicitInt32Check(const TSourceLoc&, const char* op, bool builtIn = false);
virtual void explicitFloat32Check(const TSourceLoc&, const char* op, bool builtIn = false);
virtual void explicitFloat64Check(const TSourceLoc&, const char* op, bool builtIn = false);
virtual void fcoopmatCheck(const TSourceLoc&, const char* op, bool builtIn = false);
#endif // GLSLANG_WEB
virtual void spvRemoved(const TSourceLoc&, const char* op);
virtual void vulkanRemoved(const TSourceLoc&, const char* op);
virtual void requireVulkan(const TSourceLoc&, const char* op);
virtual void requireSpv(const TSourceLoc&, const char* op);
virtual bool checkExtensionsRequested(const TSourceLoc&, int numExtensions, const char* const extensions[], const char* featureDesc);
virtual void updateExtensionBehavior(const char* const extension, TExtensionBehavior);
virtual void checkExtensionStage(const TSourceLoc&, const char* const extension);
virtual void fcoopmatCheck(const TSourceLoc&, const char* op, bool builtIn = false);
virtual void C_DECL error(const TSourceLoc&, const char* szReason, const char* szToken,
const char* szExtraInfoFormat, ...) = 0;

View File

@ -792,10 +792,8 @@ int TPpContext::CPPpragma(TPpToken* ppToken)
case PpAtomConstUint:
case PpAtomConstInt64:
case PpAtomConstUint64:
#ifdef AMD_EXTENSIONS
case PpAtomConstInt16:
case PpAtomConstUint16:
#endif
case PpAtomConstFloat:
case PpAtomConstDouble:
case PpAtomConstFloat16:
@ -963,9 +961,11 @@ int TPpContext::readCPPline(TPpToken* ppToken)
case PpAtomLine:
token = CPPline(ppToken);
break;
#ifndef GLSLANG_WEB
case PpAtomPragma:
token = CPPpragma(ppToken);
break;
#endif
case PpAtomUndef:
token = CPPundef(ppToken);
break;

View File

@ -259,6 +259,7 @@ int TPpContext::lFloatConst(int len, int ch, TPpToken* ppToken)
// Suffix:
bool isDouble = false;
bool isFloat16 = false;
#ifndef GLSLANG_WEB
if (ch == 'l' || ch == 'L') {
if (ifdepth == 0 && parseContext.intermediate.getSource() == EShSourceGlsl)
parseContext.doubleCheck(ppToken->loc, "double floating-point suffix");
@ -297,11 +298,15 @@ int TPpContext::lFloatConst(int len, int ch, TPpToken* ppToken)
saveName(ch);
isFloat16 = true;
}
} else if (ch == 'f' || ch == 'F') {
} else
#endif
if (ch == 'f' || ch == 'F') {
#ifndef GLSLANG_WEB
if (ifdepth == 0)
parseContext.profileRequires(ppToken->loc, EEsProfile, 300, nullptr, "floating-point suffix");
if (ifdepth == 0 && !parseContext.relaxedErrors())
parseContext.profileRequires(ppToken->loc, ~EEsProfile, 120, nullptr, "floating-point suffix");
#endif
if (ifdepth == 0 && !hasDecimalOrExponent)
parseContext.ppError(ppToken->loc, "float literal needs a decimal point or exponent", "", "");
saveName(ch);
@ -470,9 +475,7 @@ int TPpContext::tStringInput::scan(TPpToken* ppToken)
static const int Num_Int64_Extensions = sizeof(Int64_Extensions) / sizeof(Int64_Extensions[0]);
static const char* const Int16_Extensions[] = {
#ifdef AMD_EXTENSIONS
E_GL_AMD_gpu_shader_int16,
#endif
E_GL_EXT_shader_explicit_arithmetic_types,
E_GL_EXT_shader_explicit_arithmetic_types_int16 };
static const int Num_Int16_Extensions = sizeof(Int16_Extensions) / sizeof(Int16_Extensions[0]);
@ -581,6 +584,7 @@ int TPpContext::tStringInput::scan(TPpToken* ppToken)
ppToken->name[len++] = (char)ch;
isUnsigned = true;
#ifndef GLSLANG_WEB
int nextCh = getch();
if (nextCh == 'l' || nextCh == 'L') {
if (len < MaxTokenLength)
@ -589,7 +593,6 @@ int TPpContext::tStringInput::scan(TPpToken* ppToken)
} else
ungetch();
#ifdef AMD_EXTENSIONS
nextCh = getch();
if ((nextCh == 's' || nextCh == 'S') &&
pp->parseContext.intermediate.getSource() == EShSourceGlsl) {
@ -598,12 +601,10 @@ int TPpContext::tStringInput::scan(TPpToken* ppToken)
isInt16 = true;
} else
ungetch();
#endif
} else if (ch == 'l' || ch == 'L') {
if (len < MaxTokenLength)
ppToken->name[len++] = (char)ch;
isInt64 = true;
#ifdef AMD_EXTENSIONS
} else if ((ch == 's' || ch == 'S') &&
pp->parseContext.intermediate.getSource() == EShSourceGlsl) {
if (len < MaxTokenLength)
@ -614,6 +615,7 @@ int TPpContext::tStringInput::scan(TPpToken* ppToken)
ungetch();
ppToken->name[len] = '\0';
#ifndef GLSLANG_WEB
if (isInt64 && pp->parseContext.intermediate.getSource() == EShSourceGlsl) {
if (pp->ifdepth == 0) {
pp->parseContext.requireProfile(ppToken->loc, ~EEsProfile,
@ -634,7 +636,9 @@ int TPpContext::tStringInput::scan(TPpToken* ppToken)
}
ppToken->ival = (int)ival;
return isUnsigned ? PpAtomConstUint16 : PpAtomConstInt16;
} else {
} else
#endif
{
if (ival > 0xffffffffu && !AlreadyComplained)
pp->parseContext.ppError(ppToken->loc, "hexadecimal literal too big", "", "");
ppToken->ival = (int)ival;
@ -699,7 +703,7 @@ int TPpContext::tStringInput::scan(TPpToken* ppToken)
} else
ungetch();
#ifdef AMD_EXTENSIONS
#ifndef GLSLANG_WEB
nextCh = getch();
if ((nextCh == 's' || nextCh == 'S') &&
pp->parseContext.intermediate.getSource() == EShSourceGlsl) {
@ -708,12 +712,10 @@ int TPpContext::tStringInput::scan(TPpToken* ppToken)
isInt16 = true;
} else
ungetch();
#endif
} else if (ch == 'l' || ch == 'L') {
if (len < MaxTokenLength)
ppToken->name[len++] = (char)ch;
isInt64 = true;
#ifdef AMD_EXTENSIONS
} else if ((ch == 's' || ch == 'S') &&
pp->parseContext.intermediate.getSource() == EShSourceGlsl) {
if (len < MaxTokenLength)
@ -730,6 +732,7 @@ int TPpContext::tStringInput::scan(TPpToken* ppToken)
if (octalOverflow)
pp->parseContext.ppError(ppToken->loc, "octal literal too big", "", "");
#ifndef GLSLANG_WEB
if (isInt64 && pp->parseContext.intermediate.getSource() == EShSourceGlsl) {
if (pp->ifdepth == 0) {
pp->parseContext.requireProfile(ppToken->loc, ~EEsProfile,
@ -750,7 +753,9 @@ int TPpContext::tStringInput::scan(TPpToken* ppToken)
}
ppToken->ival = (int)ival;
return isUnsigned ? PpAtomConstUint16 : PpAtomConstInt16;
} else {
} else
#endif
{
ppToken->ival = (int)ival;
return isUnsigned ? PpAtomConstUint : PpAtomConstInt;
}
@ -790,7 +795,7 @@ int TPpContext::tStringInput::scan(TPpToken* ppToken)
} else
ungetch();
#ifdef AMD_EXTENSIONS
#ifndef GLSLANG_WEB
nextCh = getch();
if ((nextCh == 's' || nextCh == 'S') &&
pp->parseContext.intermediate.getSource() == EShSourceGlsl) {
@ -799,12 +804,10 @@ int TPpContext::tStringInput::scan(TPpToken* ppToken)
isInt16 = true;
} else
ungetch();
#endif
} else if (ch == 'l' || ch == 'L') {
if (len < MaxTokenLength)
ppToken->name[len++] = (char)ch;
isInt64 = true;
#ifdef AMD_EXTENSIONS
} else if ((ch == 's' || ch == 'S') &&
pp->parseContext.intermediate.getSource() == EShSourceGlsl) {
if (len < MaxTokenLength)

View File

@ -41,9 +41,7 @@ namespace {
using CompileToAstTest = GlslangTest<::testing::TestWithParam<std::string>>;
#ifdef NV_EXTENSIONS
using CompileToAstTestNV = GlslangTest<::testing::TestWithParam<std::string>>;
#endif
TEST_P(CompileToAstTest, FromFile)
{
@ -52,7 +50,6 @@ TEST_P(CompileToAstTest, FromFile)
Target::AST);
}
#ifdef NV_EXTENSIONS
// Compiling GLSL to SPIR-V under OpenGL semantics (NV extensions enabled).
TEST_P(CompileToAstTestNV, FromFile)
{
@ -60,7 +57,6 @@ TEST_P(CompileToAstTestNV, FromFile)
Source::GLSL, Semantics::OpenGL, glslang::EShTargetVulkan_1_0, glslang::EShTargetSpv_1_0,
Target::AST);
}
#endif
// clang-format off
INSTANTIATE_TEST_CASE_P(
@ -281,7 +277,6 @@ INSTANTIATE_TEST_CASE_P(
FileNameAsCustomTestSuffix
);
#ifdef NV_EXTENSIONS
INSTANTIATE_TEST_CASE_P(
Glsl, CompileToAstTestNV,
::testing::ValuesIn(std::vector<std::string>({
@ -289,7 +284,6 @@ INSTANTIATE_TEST_CASE_P(
})),
FileNameAsCustomTestSuffix
);
#endif
// clang-format on
} // anonymous namespace

View File

@ -72,12 +72,8 @@ using OpenGLSemantics = GlslangTest<::testing::TestWithParam<std::string>>;
using VulkanAstSemantics = GlslangTest<::testing::TestWithParam<std::string>>;
using HlslIoMap = GlslangTest<::testing::TestWithParam<IoMapData>>;
using GlslIoMap = GlslangTest<::testing::TestWithParam<IoMapData>>;
#ifdef AMD_EXTENSIONS
using CompileVulkanToSpirvTestAMD = GlslangTest<::testing::TestWithParam<std::string>>;
#endif
#ifdef NV_EXTENSIONS
using CompileVulkanToSpirvTestNV = GlslangTest<::testing::TestWithParam<std::string>>;
#endif
using CompileUpgradeTextureToSampledTextureAndDropSamplersTest = GlslangTest<::testing::TestWithParam<std::string>>;
// Compiling GLSL to SPIR-V under Vulkan semantics. Expected to successfully
@ -179,7 +175,6 @@ TEST_P(GlslIoMap, FromFile)
GetParam().flattenUniforms);
}
#ifdef AMD_EXTENSIONS
// Compiling GLSL to SPIR-V under Vulkan semantics (AMD extensions enabled).
// Expected to successfully generate SPIR-V.
TEST_P(CompileVulkanToSpirvTestAMD, FromFile)
@ -188,9 +183,7 @@ TEST_P(CompileVulkanToSpirvTestAMD, FromFile)
Source::GLSL, Semantics::Vulkan, glslang::EShTargetVulkan_1_0, glslang::EShTargetSpv_1_0,
Target::Spv);
}
#endif
#ifdef NV_EXTENSIONS
// Compiling GLSL to SPIR-V under Vulkan semantics (NV extensions enabled).
// Expected to successfully generate SPIR-V.
TEST_P(CompileVulkanToSpirvTestNV, FromFile)
@ -199,7 +192,6 @@ TEST_P(CompileVulkanToSpirvTestNV, FromFile)
Source::GLSL, Semantics::Vulkan, glslang::EShTargetVulkan_1_0, glslang::EShTargetSpv_1_0,
Target::Spv);
}
#endif
TEST_P(CompileUpgradeTextureToSampledTextureAndDropSamplersTest, FromFile)
{
@ -572,7 +564,6 @@ INSTANTIATE_TEST_CASE_P(
FileNameAsCustomTestSuffix
);
#ifdef AMD_EXTENSIONS
INSTANTIATE_TEST_CASE_P(
Glsl, CompileVulkanToSpirvTestAMD,
::testing::ValuesIn(std::vector<std::string>({
@ -588,9 +579,7 @@ INSTANTIATE_TEST_CASE_P(
})),
FileNameAsCustomTestSuffix
);
#endif
#ifdef NV_EXTENSIONS
INSTANTIATE_TEST_CASE_P(
Glsl, CompileVulkanToSpirvTestNV,
::testing::ValuesIn(std::vector<std::string>({
@ -638,7 +627,6 @@ INSTANTIATE_TEST_CASE_P(
})),
FileNameAsCustomTestSuffix
);
#endif
INSTANTIATE_TEST_CASE_P(
Glsl, CompileUpgradeTextureToSampledTextureAndDropSamplersTest,

View File

@ -60,7 +60,6 @@ EShLanguage GetShaderStage(const std::string& stage)
return EShLangFragment;
} else if (stage == "comp") {
return EShLangCompute;
#ifdef NV_EXTENSIONS
} else if (stage == "rgen") {
return EShLangRayGenNV;
} else if (stage == "rint") {
@ -77,7 +76,6 @@ EShLanguage GetShaderStage(const std::string& stage)
return EShLangTaskNV;
} else if (stage == "mesh") {
return EShLangMeshNV;
#endif
} else {
assert(0 && "Unknown shader stage");
return EShLangCount;

View File

@ -8703,25 +8703,19 @@ void HlslParseContext::fixXfbOffsets(TQualifier& qualifier, TTypeList& typeList)
for (unsigned int member = 0; member < typeList.size(); ++member) {
TQualifier& memberQualifier = typeList[member].type->getQualifier();
bool contains64BitType = false;
#ifdef AMD_EXTENSIONS
bool contains32BitType = false;
bool contains16BitType = false;
int memberSize = intermediate.computeTypeXfbSize(*typeList[member].type, contains64BitType, contains32BitType, contains16BitType);
#else
int memberSize = intermediate.computeTypeXfbSize(*typeList[member].type, contains64BitType);
#endif
// see if we need to auto-assign an offset to this member
if (! memberQualifier.hasXfbOffset()) {
// "if applied to an aggregate containing a double or 64-bit integer, the offset must also be a multiple of 8"
if (contains64BitType)
RoundToPow2(nextOffset, 8);
#ifdef AMD_EXTENSIONS
else if (contains32BitType)
RoundToPow2(nextOffset, 4);
// "if applied to an aggregate containing a half float or 16-bit integer, the offset must also be a multiple of 2"
else if (contains16BitType)
RoundToPow2(nextOffset, 2);
#endif
memberQualifier.layoutXfbOffset = nextOffset;
} else
nextOffset = memberQualifier.layoutXfbOffset;