initial commit
This commit is contained in:
548
source/util/glsl_compiler.cpp
Normal file
548
source/util/glsl_compiler.cpp
Normal file
@@ -0,0 +1,548 @@
|
||||
|
||||
#include "iwa/util/glsl_compiler.hpp"
|
||||
|
||||
#include <filesystem>
|
||||
#include <utility>
|
||||
#include <glslang/Include/InfoSink.h>
|
||||
#include <glslang/Public/ShaderLang.h>
|
||||
#include <glslang/MachineIndependent/iomapper.h>
|
||||
#include <glslang/MachineIndependent/localintermediate.h>
|
||||
#include <glslang/Public/ResourceLimits.h>
|
||||
#include <glslang/SPIRV/GlslangToSpv.h>
|
||||
#include <yaml-cpp/yaml.h>
|
||||
#include "iwa/device.hpp"
|
||||
#include "iwa/instance.hpp"
|
||||
#include "iwa/log.hpp"
|
||||
#include "iwa/util/dir_stack_file_includer.hpp"
|
||||
#include "iwa/util/reflect_glsl.hpp"
|
||||
|
||||
namespace fs = std::filesystem;
|
||||
|
||||
namespace iwa
|
||||
{
|
||||
namespace
|
||||
{
|
||||
class CustomFileIncluder : public impl::DirStackFileIncluder
|
||||
{
|
||||
private:
|
||||
fs::path workingDir;
|
||||
mijin::FileSystemAdapter& mFsAdapter;
|
||||
public:
|
||||
explicit CustomFileIncluder(mijin::FileSystemAdapter& fsAdapter) noexcept: mFsAdapter(fsAdapter)
|
||||
{}
|
||||
|
||||
public:
|
||||
void setWorkingDir(const fs::path& workingDir_)
|
||||
{ workingDir = workingDir_; }
|
||||
|
||||
protected: // overrides
|
||||
IncludeResult* readLocalPath(const char* headerName, const char* includerName, int depth) override
|
||||
{
|
||||
// Discard popped include directories, and
|
||||
// initialize when at parse-time first level.
|
||||
directoryStack.resize(depth + externalLocalDirectoryCount);
|
||||
if (depth == 1)
|
||||
{
|
||||
directoryStack.back() = getDirectory(includerName);
|
||||
}
|
||||
|
||||
// Find a directory that works, using a reverse search of the include stack.
|
||||
for (auto it = directoryStack.rbegin(); it != directoryStack.rend(); ++it)
|
||||
{
|
||||
std::string path = *it + '/' + headerName;
|
||||
std::replace(path.begin(), path.end(), '\\', '/');
|
||||
|
||||
std::unique_ptr<mijin::Stream> stream;
|
||||
mijin::StreamError error = mijin::StreamError::UNKNOWN_ERROR;
|
||||
if (workingDir != fs::path())
|
||||
{
|
||||
// try relative include first
|
||||
error = mFsAdapter.open(workingDir / path, mijin::FileOpenMode::READ, stream);
|
||||
}
|
||||
if (error != mijin::StreamError::SUCCESS)
|
||||
{
|
||||
error = mFsAdapter.open(path, mijin::FileOpenMode::READ, stream);
|
||||
}
|
||||
if (error == mijin::StreamError::SUCCESS)
|
||||
{
|
||||
directoryStack.push_back(getDirectory(path));
|
||||
includedFiles.insert(path);
|
||||
return newCustomIncludeResult(path, *stream);
|
||||
}
|
||||
}
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
// Do actual reading of the file, filling in a new include result.
|
||||
IncludeResult* newCustomIncludeResult(const std::string& path, mijin::Stream& stream) const
|
||||
{
|
||||
(void) stream.seek(0, mijin::SeekMode::RELATIVE_TO_END);
|
||||
const std::size_t length = stream.tell();
|
||||
(void) stream.seek(0);
|
||||
|
||||
char* content = new tUserDataElement[length]; // NOLINT(cppcoreguidelines-owning-memory)
|
||||
const mijin::StreamError error = stream.readRaw(content, length);
|
||||
if (error != mijin::StreamError::SUCCESS)
|
||||
{
|
||||
logAndDie("Error reading include file.");
|
||||
}
|
||||
return new IncludeResult(path, content, length, content); // NOLINT(cppcoreguidelines-owning-memory)
|
||||
}
|
||||
};
|
||||
|
||||
class SemanticIoResolver : public glslang::TDefaultIoResolverBase
|
||||
{
|
||||
private:
|
||||
const std::vector<GLSLSemanticMapping>& mMappings;
|
||||
public:
|
||||
SemanticIoResolver(const glslang::TIntermediate& intermediate, const std::vector<GLSLSemanticMapping>& mappings)
|
||||
: glslang::TDefaultIoResolverBase(intermediate), mMappings(mappings) {}
|
||||
|
||||
bool validateBinding(EShLanguage /* stage */, glslang::TVarEntryInfo& /* ent */) override { return true; }
|
||||
|
||||
glslang::TResourceType getResourceType(const glslang::TType& type) override {
|
||||
if (isImageType(type)) {
|
||||
return glslang::EResImage;
|
||||
}
|
||||
if (isTextureType(type)) {
|
||||
return glslang::EResTexture;
|
||||
}
|
||||
if (isSsboType(type)) {
|
||||
return glslang::EResSsbo;
|
||||
}
|
||||
if (isSamplerType(type)) {
|
||||
return glslang::EResSampler;
|
||||
}
|
||||
if (isUboType(type)) {
|
||||
return glslang::EResUbo;
|
||||
}
|
||||
return glslang::EResCount;
|
||||
}
|
||||
|
||||
int resolveBinding(EShLanguage stage, glslang::TVarEntryInfo& ent) override
|
||||
{
|
||||
const glslang::TType& type = ent.symbol->getType();
|
||||
if (type.getQualifier().hasSemantic())
|
||||
{
|
||||
const unsigned semantic = type.getQualifier().layoutSemantic;
|
||||
const unsigned semanticIdx = type.getQualifier().hasSemanticIndex() ? type.getQualifier().layoutSemanticIndex : 0;
|
||||
auto it = std::ranges::find_if(mMappings, [&](const GLSLSemanticMapping& mapping)
|
||||
{
|
||||
return mapping.semantic == semantic && mapping.semanticIdx == semanticIdx;
|
||||
});
|
||||
if (it != mMappings.end()) {
|
||||
return ent.newBinding = it->newBinding;
|
||||
}
|
||||
}
|
||||
|
||||
// default implementation
|
||||
const int set = getLayoutSet(type);
|
||||
// On OpenGL arrays of opaque types take a seperate binding for each element
|
||||
const int numBindings = referenceIntermediate.getSpv().openGl != 0 && type.isSizedArray() ? type.getCumulativeArraySize() : 1;
|
||||
const glslang::TResourceType resource = getResourceType(type);
|
||||
if (resource < glslang::EResCount) {
|
||||
if (type.getQualifier().hasBinding()) {
|
||||
return ent.newBinding = reserveSlot(
|
||||
set, getBaseBinding(stage, resource, set) + type.getQualifier().layoutBinding, numBindings);
|
||||
}
|
||||
if (ent.live && doAutoBindingMapping()) {
|
||||
// find free slot, the caller did make sure it passes all vars with binding
|
||||
// first and now all are passed that do not have a binding and needs one
|
||||
return ent.newBinding = getFreeSlot(set, getBaseBinding(stage, resource, set), numBindings);
|
||||
}
|
||||
}
|
||||
return ent.newBinding = -1;
|
||||
}
|
||||
int resolveSet(EShLanguage stage, glslang::TVarEntryInfo& ent) override
|
||||
{
|
||||
const glslang::TType& type = ent.symbol->getType();
|
||||
if (type.getQualifier().hasSemantic())
|
||||
{
|
||||
const unsigned semantic = type.getQualifier().layoutSemantic;
|
||||
const unsigned semanticIdx = type.getQualifier().hasSemanticIndex() ? type.getQualifier().layoutSemanticIndex : 0;
|
||||
auto it = std::ranges::find_if(mMappings, [&](const GLSLSemanticMapping& mapping)
|
||||
{
|
||||
return mapping.semantic == semantic && mapping.semanticIdx == semanticIdx;
|
||||
});
|
||||
if (it == mMappings.end()) {
|
||||
return glslang::TDefaultIoResolverBase::resolveSet(stage, ent);
|
||||
}
|
||||
return ent.newSet = it->newSet;
|
||||
}
|
||||
return glslang::TDefaultIoResolverBase::resolveSet(stage, ent);
|
||||
}
|
||||
void addStage(EShLanguage stage, glslang::TIntermediate& stageIntermediate) override
|
||||
{
|
||||
nextInputLocation = nextOutputLocation = 0;
|
||||
glslang::TDefaultIoResolverBase::addStage(stage, stageIntermediate);
|
||||
}
|
||||
};
|
||||
|
||||
void initGlslang() noexcept
|
||||
{
|
||||
static bool inited = false;
|
||||
if (inited)
|
||||
{
|
||||
return;
|
||||
}
|
||||
inited = true;
|
||||
if (!glslang::InitializeProcess())
|
||||
{
|
||||
logAndDie("Error initializing Glslang.");
|
||||
}
|
||||
}
|
||||
} // namespace
|
||||
|
||||
EShLanguage typeToGlslang(vk::ShaderStageFlagBits type) noexcept
|
||||
{
|
||||
switch (type)
|
||||
{
|
||||
case vk::ShaderStageFlagBits::eCompute:
|
||||
return EShLangCompute;
|
||||
case vk::ShaderStageFlagBits::eVertex:
|
||||
return EShLangVertex;
|
||||
case vk::ShaderStageFlagBits::eFragment:
|
||||
return EShLangFragment;
|
||||
case vk::ShaderStageFlagBits::eRaygenKHR:
|
||||
return EShLangRayGen;
|
||||
case vk::ShaderStageFlagBits::eClosestHitKHR:
|
||||
return EShLangClosestHit;
|
||||
case vk::ShaderStageFlagBits::eAnyHitKHR:
|
||||
return EShLangAnyHit;
|
||||
case vk::ShaderStageFlagBits::eMissKHR:
|
||||
return EShLangMiss;
|
||||
case vk::ShaderStageFlagBits::eIntersectionKHR:
|
||||
return EShLangIntersect;
|
||||
case vk::ShaderStageFlagBits::eCallableKHR:
|
||||
return EShLangCallable;
|
||||
case vk::ShaderStageFlagBits::eTaskEXT:
|
||||
return EShLangTask;
|
||||
case vk::ShaderStageFlagBits::eMeshEXT:
|
||||
return EShLangMesh;
|
||||
case vk::ShaderStageFlagBits::eTessellationControl:
|
||||
return EShLangTessControl;
|
||||
case vk::ShaderStageFlagBits::eTessellationEvaluation:
|
||||
return EShLangTessEvaluation;
|
||||
case vk::ShaderStageFlagBits::eGeometry:
|
||||
return EShLangGeometry;
|
||||
case vk::ShaderStageFlagBits::eAllGraphics:
|
||||
case vk::ShaderStageFlagBits::eAll:
|
||||
case vk::ShaderStageFlagBits::eSubpassShadingHUAWEI:
|
||||
case vk::ShaderStageFlagBits::eClusterCullingHUAWEI:
|
||||
break; // let it fail
|
||||
}
|
||||
|
||||
logAndDie("Invalid value passed to typeToGlslang!");
|
||||
}
|
||||
|
||||
ShaderSource ShaderSource::fromStream(mijin::Stream& stream, std::string fileName)
|
||||
{
|
||||
ShaderSource result = {
|
||||
.fileName = std::move(fileName)
|
||||
};
|
||||
if (const mijin::StreamError error = stream.readAsString(result.code); error != mijin::StreamError::SUCCESS)
|
||||
{
|
||||
// TODO: custom exception type, for stacktrace and stuff
|
||||
throw std::runtime_error("Error reading shader source.");
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
ShaderSource ShaderSource::fromFile(const mijin::PathReference& file)
|
||||
{
|
||||
std::unique_ptr<mijin::Stream> stream;
|
||||
if (const mijin::StreamError error = file.open(mijin::FileOpenMode::READ, stream); error != mijin::StreamError::SUCCESS)
|
||||
{
|
||||
throw std::runtime_error("Error opening file for reading shader source.");
|
||||
}
|
||||
return fromStream(*stream, file.getPath().string());
|
||||
}
|
||||
|
||||
ShaderSource ShaderSource::fromYaml(const YAML::Node& node, const mijin::PathReference& yamlFile)
|
||||
{
|
||||
if (node.Tag() == "!load")
|
||||
{
|
||||
return fromFile(yamlFile.getAdapter()->getPath(node.as<std::string>()));
|
||||
}
|
||||
const std::string& source = node["source"].as<std::string>();
|
||||
std::string fileName;
|
||||
if (const YAML::Node fileNameNode = node["fileName"]; !fileNameNode.IsNull())
|
||||
{
|
||||
fileName = fileNameNode.as<std::string>();
|
||||
}
|
||||
return {
|
||||
.code = source,
|
||||
.fileName = std::move(fileName)
|
||||
};
|
||||
}
|
||||
|
||||
GLSLShader::GLSLShader(ObjectPtr<Instance> owner, GLSLShaderCreationArgs args) noexcept
|
||||
: super_t(std::move(owner)), mType(args.type), mSources(std::move(args.sources)), mDefines(std::move(args.defines))
|
||||
{
|
||||
MIJIN_ASSERT(!mSources.empty(), "Cannot compile without sources.");
|
||||
compile();
|
||||
}
|
||||
|
||||
GLSLShader::~GLSLShader() noexcept = default;
|
||||
|
||||
std::unique_ptr<glslang::TShader> GLSLShader::releaseHandle()
|
||||
{
|
||||
if (mHandle == nullptr) {
|
||||
compile();
|
||||
}
|
||||
return std::exchange(mHandle, nullptr);
|
||||
}
|
||||
|
||||
ShaderMeta GLSLShader::getPartialMeta()
|
||||
{
|
||||
if (mHandle == nullptr) {
|
||||
compile();
|
||||
}
|
||||
return reflectShader(*mHandle);
|
||||
}
|
||||
|
||||
void GLSLShader::compile()
|
||||
{
|
||||
initGlslang();
|
||||
|
||||
const EShLanguage stage = typeToGlslang(mType);
|
||||
|
||||
std::unique_ptr<glslang::TShader> shader = std::make_unique<glslang::TShader>(stage); // NOLINT(cppcoreguidelines-owning-memory)
|
||||
|
||||
std::vector<const char*> sources;
|
||||
std::vector<int> lengths;
|
||||
std::vector<const char*> names;
|
||||
sources.reserve(mSources.size() + 1);
|
||||
lengths.reserve(mSources.size() + 1);
|
||||
names.reserve(mSources.size() + 1);
|
||||
|
||||
std::string preamble = getOwner()->getInstanceExtension<GLSLCompilerSettings>().getCommonPreamble();
|
||||
for (const std::string& define : mDefines) {
|
||||
preamble.append(fmt::format("\n#define {}\n", define));
|
||||
}
|
||||
sources.push_back(preamble.c_str());
|
||||
lengths.push_back(static_cast<int>(preamble.size()));
|
||||
names.push_back("<preamble>");
|
||||
|
||||
for (const ShaderSource& source : mSources)
|
||||
{
|
||||
sources.push_back(source.code.c_str());
|
||||
lengths.push_back(static_cast<int>(source.code.size()));
|
||||
names.push_back(source.fileName.c_str());
|
||||
}
|
||||
shader->setStringsWithLengthsAndNames(sources.data(), lengths.data(), names.data(), static_cast<int>(sources.size()));
|
||||
shader->setDebugInfo(true);
|
||||
shader->setEnvInput(glslang::EShSourceGlsl, stage, glslang::EShClientVulkan, glslang::EShTargetVulkan_1_3);
|
||||
shader->setEnvClient(glslang::EShClientVulkan, glslang::EShTargetVulkan_1_3);
|
||||
shader->setEnvTarget(glslang::EShTargetLanguage::EShTargetSpv, glslang::EShTargetSpv_1_6);
|
||||
shader->setAutoMapLocations(true);
|
||||
shader->setAutoMapBindings(true);
|
||||
|
||||
const EShMessages PREPROCESS_MESSAGES = static_cast<EShMessages>(EShMsgDefault
|
||||
#if !defined(KAZAN_RELEASE)
|
||||
| EShMsgDebugInfo
|
||||
#endif
|
||||
);
|
||||
std::string completeCode;
|
||||
CustomFileIncluder includer(getOwner()->getPrimaryFSAdapter()); // TODO: this type seems to be doing stupid things, investigate
|
||||
const std::string sourceFileAbsStr = mSources[0].fileName; // just for you MSVC <3
|
||||
if (!sourceFileAbsStr.empty()) {
|
||||
includer.setWorkingDir(fs::path(sourceFileAbsStr).parent_path());
|
||||
}
|
||||
const bool couldPreprocess = shader->preprocess(
|
||||
/* builtInResources = */ GetDefaultResources(),
|
||||
/* defaultVersion = */ 450,
|
||||
/* defaultProfile = */ ECoreProfile,
|
||||
/* forceDefaultVersionAndProfile = */ false,
|
||||
/* forwardCompatible = */ false,
|
||||
/* message = */ PREPROCESS_MESSAGES,
|
||||
/* outputString = */ &completeCode,
|
||||
/* includer = */ includer
|
||||
);
|
||||
if (!couldPreprocess)
|
||||
{
|
||||
logMsg("GLSL preprocessing failed:\ninfo log:\n{}\ndebug log:\n{}",
|
||||
shader->getInfoLog(), shader->getInfoDebugLog()
|
||||
);
|
||||
logAndDie("Error preprocessing shader.");
|
||||
}
|
||||
|
||||
#if 0
|
||||
ShaderPreprocessResult preprocessResult = preprocessShader(completeCode);
|
||||
|
||||
for (std::string& module : preprocessResult.importedModules) {
|
||||
importedModules.insert(std::move(module));
|
||||
}
|
||||
|
||||
for (std::string& option : preprocessResult.supportedOptions) {
|
||||
supportedOptions.insert(std::move(option));
|
||||
}
|
||||
#endif
|
||||
|
||||
const char* newSource = completeCode.c_str();
|
||||
#if defined(KAZAN_RELEASE)
|
||||
shader->setStrings(&newSource, 1); // replace source with the preprocessed one
|
||||
#else
|
||||
const int newSourceLen = static_cast<int>(std::strlen(newSource));
|
||||
const char* newSourceName = sourceFileAbsStr.c_str();
|
||||
shader->setStringsWithLengthsAndNames(&newSource, &newSourceLen, &newSourceName, 1);
|
||||
#endif
|
||||
const EShMessages PARSE_MESSAGES = static_cast<EShMessages>(EShMsgDefault
|
||||
#if !defined(KAZAN_RELEASE)
|
||||
| EShMsgDebugInfo
|
||||
#endif
|
||||
);
|
||||
const bool couldParse = shader->parse(
|
||||
/* builtinResources = */ GetDefaultResources(),
|
||||
/* defaultVersion = */ 450,
|
||||
/* forwardCompatible = */ false,
|
||||
/* messages = */ PARSE_MESSAGES
|
||||
);
|
||||
if (!couldParse)
|
||||
{
|
||||
logMsg("GLSL parsing failed:\ninfo log:\n{}\ndebug log:\n{}",
|
||||
shader->getInfoLog(), shader->getInfoDebugLog()
|
||||
);
|
||||
logAndDie("Error parsing shader.");
|
||||
}
|
||||
|
||||
mHandle = std::move(shader);
|
||||
}
|
||||
|
||||
GLSLShaderProgram::GLSLShaderProgram(ObjectPtr<Device> owner, GLSLShaderProgramCreationArgs args)
|
||||
: super_t(std::move(owner)), mLinkFlags(args.linkFlags)
|
||||
{
|
||||
MIJIN_ASSERT_FATAL(!args.shaders.empty(), "At least one shader per program is required!");
|
||||
|
||||
mHandle = std::make_unique<glslang::TProgram>();
|
||||
for (const ObjectPtr<GLSLShader>& shader : args.shaders)
|
||||
{
|
||||
mShaderHandles.push_back(shader->releaseHandle());
|
||||
mHandle->addShader(mShaderHandles.back().get());
|
||||
}
|
||||
|
||||
const EShMessages linkMessages = static_cast<EShMessages>(EShMsgSpvRules | EShMsgVulkanRules
|
||||
| (args.linkFlags.withDebugInfo ? EShMsgDebugInfo : EShMessages(0)));
|
||||
|
||||
if (!mHandle->link(linkMessages))
|
||||
{
|
||||
logAndDie("GLSL linking failed!\ninfo log:\n{}\ndebug log:\n{}",
|
||||
mHandle->getInfoLog(), mHandle->getInfoDebugLog()
|
||||
);
|
||||
}
|
||||
|
||||
glslang::TIntermediate* referenceIntermediate = mHandle->getIntermediate(typeToGlslang(args.shaders[0]->getType()));
|
||||
SemanticIoResolver ioResolver(*referenceIntermediate, args.semanticMappings);
|
||||
if (!mHandle->mapIO(&ioResolver))
|
||||
{
|
||||
logAndDie("GLSL io mapping failed!\ninfo log:\n{}\ndebug log:\n{}",
|
||||
mHandle->getInfoLog(), mHandle->getInfoDebugLog()
|
||||
);
|
||||
}
|
||||
|
||||
mMeta = reflectProgram(*mHandle);
|
||||
}
|
||||
|
||||
std::vector<std::uint32_t> GLSLShaderProgram::generateSpirv(vk::ShaderStageFlagBits stage) const
|
||||
{
|
||||
const EShLanguage glslLang = typeToGlslang(stage);
|
||||
|
||||
glslang::SpvOptions options = {};
|
||||
if (mLinkFlags.withDebugInfo)
|
||||
{
|
||||
options.generateDebugInfo = true;
|
||||
options.stripDebugInfo = false;
|
||||
options.disableOptimizer = true;
|
||||
options.emitNonSemanticShaderDebugInfo = true;
|
||||
options.emitNonSemanticShaderDebugSource = false; // TODO: this should be true, but makes GLSLang crash
|
||||
}
|
||||
else
|
||||
{
|
||||
options.generateDebugInfo = false;
|
||||
options.stripDebugInfo = true;
|
||||
options.disableOptimizer = false;
|
||||
options.emitNonSemanticShaderDebugInfo = true; // TODO: this should be false, but that also crashes GLSLang ...
|
||||
options.emitNonSemanticShaderDebugSource = false;
|
||||
}
|
||||
options.optimizeSize = false;
|
||||
options.disassemble = false;
|
||||
options.validate = true;
|
||||
|
||||
spv::SpvBuildLogger logger;
|
||||
const glslang::TIntermediate* intermediate = mHandle->getIntermediate(glslLang);
|
||||
if (intermediate == nullptr)
|
||||
{
|
||||
throw std::runtime_error("Attempting to generate SpirV from invalid shader stage.");
|
||||
}
|
||||
std::vector<std::uint32_t> spirv;
|
||||
glslang::GlslangToSpv(*intermediate, spirv, &logger, &options);
|
||||
|
||||
const std::string messages = logger.getAllMessages();
|
||||
if (!messages.empty())
|
||||
{
|
||||
logMsg("SpirV messages: {}", messages);
|
||||
}
|
||||
|
||||
return spirv;
|
||||
}
|
||||
|
||||
std::vector<PipelineStage> GLSLShaderProgram::generatePipelineStages() const
|
||||
{
|
||||
std::vector<PipelineStage> stages;
|
||||
for (const vk::ShaderStageFlagBits stage : mMeta.stages)
|
||||
{
|
||||
const std::vector<std::uint32_t> spirv = generateSpirv(stage);
|
||||
stages.push_back({
|
||||
.shader = getOwner()->createChild<ShaderModule>(ShaderModuleCreationArgs{.code = spirv}),
|
||||
.stage = stage
|
||||
});
|
||||
}
|
||||
return stages;
|
||||
}
|
||||
|
||||
GraphicsPipelineCreationArgs GLSLShaderProgram::prepareGraphicsPipeline(PrepareGraphicsPipelineArgs& args) const
|
||||
{
|
||||
args.pipelineLayoutMeta = mMeta.generatePipelineLayout(args.layoutArgs);
|
||||
args.layouts = args.pipelineLayoutMeta.createPipelineLayout(*getOwner());
|
||||
return {
|
||||
.stages = generatePipelineStages(),
|
||||
.vertexInput = mMeta.generateVertexInputFromLayout(args.vertexLayout),
|
||||
.layout = args.layouts.pipelineLayout
|
||||
};
|
||||
}
|
||||
|
||||
ComputePipelineCreationArgs GLSLShaderProgram::prepareComputePipeline(PrepareComputePipelineArgs& args) const
|
||||
{
|
||||
args.pipelineLayoutMeta = mMeta.generatePipelineLayout(args.layoutArgs);
|
||||
args.layouts = args.pipelineLayoutMeta.createPipelineLayout(*getOwner());
|
||||
return {
|
||||
.stage = generatePipelineStages()[0],
|
||||
.layout = args.layouts.pipelineLayout
|
||||
};
|
||||
}
|
||||
|
||||
// GraphicsPipelineCreationArgs prepareGLSLGraphicsPipeline(const PrepareGLSLGraphicsPipelineArgs& args)
|
||||
// {
|
||||
// return {
|
||||
// .stages =
|
||||
// {
|
||||
// PipelineStage{.shader = vertexShaderModule, .stage = vk::ShaderStageFlagBits::eVertex},
|
||||
// PipelineStage{.shader = fragmentShaderModule, .stage = vk::ShaderStageFlagBits::eFragment}
|
||||
// },
|
||||
// .vertexInput = std::move(vertexInput),
|
||||
// .inputAssembly =
|
||||
// {
|
||||
// .topology = vk::PrimitiveTopology::eTriangleStrip
|
||||
// },
|
||||
// .colorBlend =
|
||||
// {
|
||||
// .attachements = {DEFAULT_BLEND_ATTACHMENT}
|
||||
// },
|
||||
// .renderingInfo = GraphicsPipelineRenderingInfo{
|
||||
// .colorAttachmentFormats = {mRenderTarget->getFormat()}
|
||||
// },
|
||||
// .layout = mPipelineLayout
|
||||
// };
|
||||
// }
|
||||
} // namespace iwa
|
||||
33
source/util/growing_descriptor_pool.cpp
Normal file
33
source/util/growing_descriptor_pool.cpp
Normal file
@@ -0,0 +1,33 @@
|
||||
|
||||
#include "iwa/util/growing_descriptor_pool.hpp"
|
||||
|
||||
#include <utility>
|
||||
|
||||
#include "iwa/device.hpp"
|
||||
|
||||
namespace iwa
|
||||
{
|
||||
GrowingDescriptorPool::GrowingDescriptorPool(ObjectPtr<Device> owner, GrowingDescriptorPoolCreationArgs args)
|
||||
: super_t(std::move(owner)), mCreationArgs(std::move(args))
|
||||
{
|
||||
|
||||
}
|
||||
|
||||
ObjectPtr<DescriptorSet> GrowingDescriptorPool::allocateDescriptorSet(const DescriptorSetAllocateArgs& args)
|
||||
{
|
||||
for (const ObjectPtr<DescriptorPool>& pool : mPools)
|
||||
{
|
||||
try
|
||||
{
|
||||
return pool->allocateDescriptorSet(args);
|
||||
}
|
||||
catch (vk::FragmentedPoolError&) {}
|
||||
catch (vk::OutOfPoolMemoryError&) {}
|
||||
// any other error shall be forwarded
|
||||
}
|
||||
|
||||
// couldn't allocate in any of the existing pools, create a new one
|
||||
mPools.push_back(getOwner()->createChild<DescriptorPool>(mCreationArgs));
|
||||
return mPools.back()->allocateDescriptorSet(args); // raise any error that may occur
|
||||
}
|
||||
} // namespace iwa
|
||||
147
source/util/image_reference.cpp
Normal file
147
source/util/image_reference.cpp
Normal file
@@ -0,0 +1,147 @@
|
||||
|
||||
#include "iwa/util/image_reference.hpp"
|
||||
|
||||
#include <mijin/util/iterators.hpp>
|
||||
#include "iwa/command.hpp"
|
||||
#include "iwa/device.hpp"
|
||||
|
||||
namespace iwa
|
||||
{
|
||||
ImageReference::ImageReference(ObjectPtr<Device> owner) : super_t(std::move(owner))
|
||||
{
|
||||
|
||||
}
|
||||
|
||||
void ImageReference::finalize(ImageReferenceFinalizeArgs& /* args */) {}
|
||||
|
||||
mijin::Task<> ImageReference::c_present()
|
||||
{
|
||||
co_return;
|
||||
}
|
||||
|
||||
SwapchainImageReference::SwapchainImageReference(ObjectPtr<Device> owner, SwapchainImageReferenceCreationArgs args)
|
||||
: super_t(std::move(owner)), mSwapchain(std::move(args.swapchain))
|
||||
{
|
||||
mPresentReadySemaphores.resize(mSwapchain->getNumParallelFrames());
|
||||
for (ObjectPtr<Semaphore>& semaphore : mPresentReadySemaphores)
|
||||
{
|
||||
semaphore = getOwner()->createChild<Semaphore>();
|
||||
}
|
||||
createImageViews();
|
||||
mSwapchain->recreated.connect([this]()
|
||||
{
|
||||
createImageViews();
|
||||
});
|
||||
}
|
||||
|
||||
vk::Format SwapchainImageReference::getFormat()
|
||||
{
|
||||
return mSwapchain->getFormat();
|
||||
}
|
||||
|
||||
vk::Extent2D SwapchainImageReference::getExtent()
|
||||
{
|
||||
return mSwapchain->getExtent();
|
||||
}
|
||||
|
||||
ImageReferenceFrame SwapchainImageReference::getCurrentFrame()
|
||||
{
|
||||
return ImageReferenceFrame{
|
||||
.image = mSwapchain->getCurrentImage().getRaw(),
|
||||
.imageView = mImageViews[mSwapchain->getCurrentImageIdx()].getRaw()
|
||||
};
|
||||
}
|
||||
|
||||
void SwapchainImageReference::finalize(ImageReferenceFinalizeArgs& args)
|
||||
{
|
||||
args.waitSemaphores.push_back(*mSwapchain->getCurrentAvailableSemaphore());
|
||||
args.signalSemaphores.push_back(*mPresentReadySemaphores[mSwapchain->getCurrentFrameIdx()]);
|
||||
|
||||
mSwapchain->getCurrentImage()->applyTransition(args.cmdBuffer, ImageTransition{
|
||||
.stages = vk::PipelineStageFlagBits::eBottomOfPipe,
|
||||
.layout = vk::ImageLayout::ePresentSrcKHR,
|
||||
.access = {}
|
||||
});
|
||||
}
|
||||
|
||||
mijin::Task<> SwapchainImageReference::c_present()
|
||||
{
|
||||
// and present
|
||||
co_await mSwapchain->c_present({
|
||||
.queue = getOwner()->getGraphicsQueue(),
|
||||
.waitSemaphores = {mPresentReadySemaphores[mSwapchain->getCurrentFrameIdx()]->getVkHandle()}
|
||||
});
|
||||
}
|
||||
|
||||
void SwapchainImageReference::createImageViews()
|
||||
{
|
||||
mImageViews.resize(mSwapchain->getImages().size());
|
||||
for (auto [image, imageView] : mijin::zip(mSwapchain->getImages(), mImageViews))
|
||||
{
|
||||
imageView = image->createImageView();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
DirectImageReference::DirectImageReference(ObjectPtr<Device> owner, DirectImageReferenceCreationArgs args)
|
||||
: super_t(std::move(owner)), mImage(std::move(args.image)), mImageView(std::move(args.imageView))
|
||||
{
|
||||
|
||||
}
|
||||
|
||||
vk::Format DirectImageReference::getFormat()
|
||||
{
|
||||
return mImage->getFormat();
|
||||
}
|
||||
|
||||
vk::Extent2D DirectImageReference::getExtent()
|
||||
{
|
||||
return {
|
||||
.width = mImage->getSize().width,
|
||||
.height = mImage->getSize().height
|
||||
};
|
||||
}
|
||||
|
||||
ImageReferenceFrame DirectImageReference::getCurrentFrame()
|
||||
{
|
||||
return ImageReferenceFrame{
|
||||
.image = mImage.getRaw(),
|
||||
.imageView = mImageView.getRaw()
|
||||
};
|
||||
}
|
||||
|
||||
AutoResizeImageReference::AutoResizeImageReference(ObjectPtr<Device> owner, AutoResizeImageReferenceCreationArgs args)
|
||||
: super_t(std::move(owner), DirectImageReferenceCreationArgs{}), mReferenceImageRef(std::move(args.referenceImageRef)),
|
||||
mImageCreationArgs(std::move(args.imageCreationArgs)), mImageViewCreationArgs(args.imageViewCreationArgs)
|
||||
{
|
||||
createImage();
|
||||
}
|
||||
|
||||
vk::Extent2D AutoResizeImageReference::getExtent()
|
||||
{
|
||||
return mReferenceImageRef->getExtent();
|
||||
}
|
||||
|
||||
ImageReferenceFrame AutoResizeImageReference::getCurrentFrame()
|
||||
{
|
||||
const vk::Extent2D extent = mReferenceImageRef->getExtent();
|
||||
if (extent.width != mImage->getSize().width || extent.height != mImage->getSize().height) {
|
||||
createImage();
|
||||
}
|
||||
return ImageReferenceFrame{
|
||||
.image = mImage.getRaw(),
|
||||
.imageView = mImageView.getRaw()
|
||||
};
|
||||
}
|
||||
|
||||
void AutoResizeImageReference::createImage()
|
||||
{
|
||||
const vk::Extent2D extent = mReferenceImageRef->getExtent();
|
||||
mImageCreationArgs.extent.width = extent.width;
|
||||
mImageCreationArgs.extent.height = extent.height;
|
||||
mImageCreationArgs.extent.depth = 1;
|
||||
mImage = getOwner()->createChild<Image>(mImageCreationArgs);
|
||||
mImage->allocateMemory();
|
||||
mImageView = mImage->createImageView(mImageViewCreationArgs);
|
||||
}
|
||||
} // namespace iwa
|
||||
587
source/util/reflect_glsl.cpp
Normal file
587
source/util/reflect_glsl.cpp
Normal file
@@ -0,0 +1,587 @@
|
||||
|
||||
#include "iwa/util/reflect_glsl.hpp"
|
||||
|
||||
#include <glslang/Include/InfoSink.h>
|
||||
#include <glslang/Public/ShaderLang.h>
|
||||
#include <glslang/MachineIndependent/localintermediate.h>
|
||||
#include <glslang/Public/ResourceLimits.h>
|
||||
|
||||
namespace iwa
|
||||
{
|
||||
namespace
|
||||
{
|
||||
class MetaCollectingTraverser : public glslang::TIntermTraverser
|
||||
{
|
||||
private:
|
||||
ShaderMeta& meta;
|
||||
vk::ShaderStageFlagBits shaderType;
|
||||
public:
|
||||
inline MetaCollectingTraverser(ShaderMeta& meta_, vk::ShaderStageFlagBits shaderType_) : meta(meta_), shaderType(shaderType_)
|
||||
{}
|
||||
|
||||
bool visitBinary(glslang::TVisit, glslang::TIntermBinary* node) override;
|
||||
bool visitUnary(glslang::TVisit, glslang::TIntermUnary* node) override;
|
||||
bool visitAggregate(glslang::TVisit, glslang::TIntermAggregate* node) override;
|
||||
bool visitSelection(glslang::TVisit, glslang::TIntermSelection* node) override;
|
||||
void visitConstantUnion(glslang::TIntermConstantUnion* node) override;
|
||||
void visitSymbol(glslang::TIntermSymbol* node) override;
|
||||
bool visitLoop(glslang::TVisit, glslang::TIntermLoop* node) override;
|
||||
bool visitBranch(glslang::TVisit, glslang::TIntermBranch* node) override;
|
||||
bool visitSwitch(glslang::TVisit, glslang::TIntermSwitch* node) override;
|
||||
};
|
||||
|
||||
vk::Format convertGlslangBaseType(const glslang::TType& type)
|
||||
{
|
||||
switch (type.getBasicType())
|
||||
{
|
||||
case glslang::EbtInt:
|
||||
return vk::Format::eR32Sint;
|
||||
case glslang::EbtUint:
|
||||
return vk::Format::eR32Uint;
|
||||
case glslang::EbtFloat:
|
||||
return vk::Format::eR32Sfloat;
|
||||
case glslang::EbtDouble:
|
||||
return vk::Format::eR64Sfloat;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
logAndDie("Don't know how to convert Glslang basic type :*(");
|
||||
}
|
||||
|
||||
vk::Format convertGlslangLayoutFormat(glslang::TLayoutFormat layoutFormat)
|
||||
{
|
||||
switch (layoutFormat)
|
||||
{
|
||||
case glslang::TLayoutFormat::ElfNone:
|
||||
return vk::Format::eUndefined;
|
||||
|
||||
// Float image
|
||||
case glslang::TLayoutFormat::ElfRgba32f:
|
||||
return vk::Format::eR32G32B32A32Sfloat;
|
||||
case glslang::TLayoutFormat::ElfRgba16f:
|
||||
return vk::Format::eR16G16B16A16Sfloat;
|
||||
case glslang::TLayoutFormat::ElfR32f:
|
||||
return vk::Format::eR32Sfloat;
|
||||
case glslang::TLayoutFormat::ElfRgba8:
|
||||
return vk::Format::eR8G8B8A8Unorm;
|
||||
case glslang::TLayoutFormat::ElfRgba8Snorm:
|
||||
return vk::Format::eR8G8B8A8Snorm;
|
||||
|
||||
|
||||
case glslang::TLayoutFormat::ElfRg32f:
|
||||
return vk::Format::eR32G32Sfloat;
|
||||
case glslang::TLayoutFormat::ElfRg16f:
|
||||
return vk::Format::eR16G16Sfloat;
|
||||
case glslang::TLayoutFormat::ElfR11fG11fB10f:
|
||||
return vk::Format::eB10G11R11UfloatPack32; // TODO: ?
|
||||
case glslang::TLayoutFormat::ElfR16f:
|
||||
return vk::Format::eR16Sfloat;
|
||||
case glslang::TLayoutFormat::ElfRgba16:
|
||||
return vk::Format::eR16G16B16A16Unorm;
|
||||
case glslang::TLayoutFormat::ElfRgb10A2:
|
||||
return vk::Format::eA2R10G10B10SnormPack32; // TODO: ?
|
||||
case glslang::TLayoutFormat::ElfRg16:
|
||||
return vk::Format::eR16G16Unorm;
|
||||
case glslang::TLayoutFormat::ElfRg8:
|
||||
return vk::Format::eR8G8Unorm;
|
||||
case glslang::TLayoutFormat::ElfR16:
|
||||
return vk::Format::eR16Unorm;
|
||||
case glslang::TLayoutFormat::ElfR8:
|
||||
return vk::Format::eR8Unorm;
|
||||
case glslang::TLayoutFormat::ElfRgba16Snorm:
|
||||
return vk::Format::eR16G16B16A16Snorm;
|
||||
case glslang::TLayoutFormat::ElfRg16Snorm:
|
||||
return vk::Format::eR16G16Unorm;
|
||||
case glslang::TLayoutFormat::ElfRg8Snorm:
|
||||
return vk::Format::eR8G8Snorm;
|
||||
case glslang::TLayoutFormat::ElfR16Snorm:
|
||||
return vk::Format::eR16G16Snorm;
|
||||
case glslang::TLayoutFormat::ElfR8Snorm:
|
||||
return vk::Format::eR8Snorm;
|
||||
|
||||
// Int image
|
||||
case glslang::TLayoutFormat::ElfRgba32i:
|
||||
return vk::Format::eR32G32B32A32Sint;
|
||||
case glslang::TLayoutFormat::ElfRgba16i:
|
||||
return vk::Format::eR16G16B16A16Sint;
|
||||
case glslang::TLayoutFormat::ElfRgba8i:
|
||||
return vk::Format::eR8G8B8A8Sint;
|
||||
case glslang::TLayoutFormat::ElfR32i:
|
||||
return vk::Format::eR32Sint;
|
||||
|
||||
case glslang::TLayoutFormat::ElfRg32i:
|
||||
return vk::Format::eR32G32Sint;
|
||||
case glslang::TLayoutFormat::ElfRg16i:
|
||||
return vk::Format::eR16G16Sint;
|
||||
case glslang::TLayoutFormat::ElfRg8i:
|
||||
return vk::Format::eR8G8Sint;
|
||||
case glslang::TLayoutFormat::ElfR16i:
|
||||
return vk::Format::eR16Sint;
|
||||
case glslang::TLayoutFormat::ElfR8i:
|
||||
return vk::Format::eR8Sint;
|
||||
case glslang::TLayoutFormat::ElfR64i:
|
||||
return vk::Format::eR64Sint;
|
||||
|
||||
// Uint image
|
||||
case glslang::TLayoutFormat::ElfRgba32ui:
|
||||
return vk::Format::eR32G32B32A32Uint;
|
||||
case glslang::TLayoutFormat::ElfRgba16ui:
|
||||
return vk::Format::eR16G16B16A16Uint;
|
||||
case glslang::TLayoutFormat::ElfRgba8ui:
|
||||
return vk::Format::eR8G8B8A8Uint;
|
||||
case glslang::TLayoutFormat::ElfR32ui:
|
||||
return vk::Format::eR32Uint;
|
||||
|
||||
|
||||
case glslang::TLayoutFormat::ElfRg32ui:
|
||||
return vk::Format::eR32G32Uint;
|
||||
case glslang::TLayoutFormat::ElfRg16ui:
|
||||
return vk::Format::eR16G16Uint;
|
||||
case glslang::TLayoutFormat::ElfRgb10a2ui:
|
||||
return vk::Format::eA2R10G10B10UintPack32;
|
||||
case glslang::TLayoutFormat::ElfRg8ui:
|
||||
return vk::Format::eR8G8Uint;
|
||||
case glslang::TLayoutFormat::ElfR16ui:
|
||||
return vk::Format::eR16Uint;
|
||||
case glslang::TLayoutFormat::ElfR8ui:
|
||||
return vk::Format::eR8Uint;
|
||||
case glslang::TLayoutFormat::ElfR64ui:
|
||||
return vk::Format::eR64Uint;
|
||||
|
||||
// other/unknown
|
||||
case glslang::TLayoutFormat::ElfSize1x8:
|
||||
case glslang::TLayoutFormat::ElfSize1x16:
|
||||
case glslang::TLayoutFormat::ElfSize1x32:
|
||||
case glslang::TLayoutFormat::ElfSize2x32:
|
||||
case glslang::TLayoutFormat::ElfSize4x32:
|
||||
case glslang::TLayoutFormat::ElfEsFloatGuard:
|
||||
case glslang::TLayoutFormat::ElfFloatGuard:
|
||||
case glslang::TLayoutFormat::ElfEsIntGuard:
|
||||
case glslang::TLayoutFormat::ElfIntGuard:
|
||||
case glslang::TLayoutFormat::ElfEsUintGuard:
|
||||
case glslang::TLayoutFormat::ElfExtSizeGuard:
|
||||
case glslang::TLayoutFormat::ElfCount:
|
||||
break;
|
||||
}
|
||||
|
||||
logAndDie("Unexpected format in convertGlslangLayoutFormat()."); // : {}", layoutFormat);
|
||||
}
|
||||
|
||||
vk::Format convertGlslangVectorType(glslang::TBasicType basicType, int vectorSize)
|
||||
{
|
||||
switch (basicType)
|
||||
{
|
||||
case glslang::EbtFloat:
|
||||
switch (vectorSize)
|
||||
{
|
||||
case 2:
|
||||
return vk::Format::eR32G32Sfloat;
|
||||
case 3:
|
||||
return vk::Format::eR32G32B32Sfloat;
|
||||
case 4:
|
||||
return vk::Format::eR32G32B32A32Sfloat;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
break;
|
||||
case glslang::EbtDouble:
|
||||
switch (vectorSize)
|
||||
{
|
||||
case 2:
|
||||
return vk::Format::eR64G64Sfloat;
|
||||
case 3:
|
||||
return vk::Format::eR64G64B64Sfloat;
|
||||
case 4:
|
||||
return vk::Format::eR64G64B64A64Sfloat;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
break;
|
||||
case glslang::EbtInt:
|
||||
switch (vectorSize)
|
||||
{
|
||||
case 2:
|
||||
return vk::Format::eR32G32Sint;
|
||||
case 3:
|
||||
return vk::Format::eR32G32B32Sint;
|
||||
case 4:
|
||||
return vk::Format::eR32G32B32A32Sint;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
break;
|
||||
case glslang::EbtUint:
|
||||
switch (vectorSize)
|
||||
{
|
||||
case 2:
|
||||
return vk::Format::eR32G32Uint;
|
||||
case 3:
|
||||
return vk::Format::eR32G32B32Uint;
|
||||
case 4:
|
||||
return vk::Format::eR32G32B32A32Uint;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
break;
|
||||
case glslang::EbtBool: // NOLINT(bugprone-branch-clone) TODO: ???
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
logAndDie("Don't know how to convert Glslang vector type :(");
|
||||
}
|
||||
|
||||
vk::Format convertGlslangVectorType(const glslang::TType& type)
|
||||
{
|
||||
assert(type.isVector());
|
||||
return convertGlslangVectorType(type.getBasicType(), type.getVectorSize());
|
||||
}
|
||||
|
||||
ShaderVariableMatrixType convertGlslangMatrixType(const glslang::TType& type)
|
||||
{
|
||||
assert(type.isMatrix());
|
||||
assert(type.getMatrixCols() == type.getMatrixRows()); // only supported types yet...
|
||||
switch (type.getMatrixCols())
|
||||
{
|
||||
case 2:
|
||||
return ShaderVariableMatrixType::MAT2;
|
||||
case 3:
|
||||
return ShaderVariableMatrixType::MAT3;
|
||||
case 4:
|
||||
return ShaderVariableMatrixType::MAT4;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
logAndDie("Don't know how to convert Glslang matrix type -.-");
|
||||
}
|
||||
|
||||
ImageDim convertGlslangSamplerDim(glslang::TSamplerDim dim)
|
||||
{
|
||||
switch (dim)
|
||||
{
|
||||
case glslang::TSamplerDim::Esd1D:
|
||||
return ImageDim::ONE;
|
||||
case glslang::TSamplerDim::Esd2D:
|
||||
return ImageDim::TWO;
|
||||
case glslang::TSamplerDim::Esd3D:
|
||||
return ImageDim::THREE;
|
||||
case glslang::TSamplerDim::EsdCube:
|
||||
return ImageDim::CUBE;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
logAndDie("Don't know how to convert Glslang sampler dimensions ...");
|
||||
}
|
||||
|
||||
ShaderVariableType convertGlslangType(const glslang::TType& type)
|
||||
{
|
||||
ShaderVariableType result;
|
||||
if (type.isVector())
|
||||
{
|
||||
result.baseType = ShaderVariableBaseType::SIMPLE;
|
||||
result.simple.format = convertGlslangVectorType(type);
|
||||
} else if (type.isMatrix())
|
||||
{
|
||||
result.baseType = ShaderVariableBaseType::MATRIX;
|
||||
result.matrixType = convertGlslangMatrixType(type);
|
||||
} else if (type.isStruct())
|
||||
{
|
||||
const std::size_t numMembers = type.getStruct()->size();
|
||||
result.baseType = ShaderVariableBaseType::STRUCT;
|
||||
result.struct_.members.reserve(numMembers);
|
||||
|
||||
std::size_t currentOffset = 0;
|
||||
for (const glslang::TTypeLoc& typeLoc: *type.getStruct())
|
||||
{
|
||||
ShaderVariableStructMember& member = result.struct_.members.emplace_back();
|
||||
member.name = typeLoc.type->getFieldName();
|
||||
member.type = convertGlslangType(*typeLoc.type);
|
||||
member.offset = currentOffset;
|
||||
if (typeLoc.type->getQualifier().hasSemantic())
|
||||
{
|
||||
member.semantic = typeLoc.type->getQualifier().layoutSemantic;
|
||||
}
|
||||
if (typeLoc.type->getQualifier().hasSemanticIndex())
|
||||
{
|
||||
member.semanticIdx = typeLoc.type->getQualifier().layoutSemanticIndex;
|
||||
}
|
||||
currentOffset = member.offset + calcShaderTypeSize(member.type); // TODO: padding
|
||||
}
|
||||
|
||||
} else if (type.getBasicType() == glslang::EbtSampler)
|
||||
{
|
||||
const glslang::TSampler& sampler = type.getSampler();
|
||||
result.baseType = ShaderVariableBaseType::IMAGE;
|
||||
result.image.dimensions = convertGlslangSamplerDim(sampler.dim);
|
||||
result.image.format = convertGlslangLayoutFormat(type.getQualifier().layoutFormat);
|
||||
} else
|
||||
{
|
||||
result.baseType = ShaderVariableBaseType::SIMPLE;
|
||||
result.simple.format = convertGlslangBaseType(type);
|
||||
}
|
||||
|
||||
if (type.isArray())
|
||||
{
|
||||
if (type.isArrayVariablyIndexed())
|
||||
{
|
||||
result.arraySize = 0;
|
||||
result.dynamicArraySize = true;
|
||||
} else
|
||||
{
|
||||
assert(type.getArraySizes()->getNumDims() == 1); // don't support multi dimensional arrays yet
|
||||
result.arraySize = type.getOuterArraySize();
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
vk::DescriptorType getGlslangDescriptorType(const glslang::TType& type)
|
||||
{
|
||||
if (type.getBasicType() == glslang::EbtSampler)
|
||||
{
|
||||
if (type.getSampler().combined)
|
||||
{
|
||||
return vk::DescriptorType::eCombinedImageSampler;
|
||||
}
|
||||
if (type.getSampler().isImage())
|
||||
{
|
||||
return vk::DescriptorType::eStorageImage;
|
||||
}
|
||||
} else if (type.isStruct())
|
||||
{
|
||||
if (type.getQualifier().isUniform())
|
||||
{
|
||||
return vk::DescriptorType::eUniformBuffer;
|
||||
}
|
||||
return vk::DescriptorType::eStorageBuffer;
|
||||
}
|
||||
logAndDie("No idea what to do with this type :/");
|
||||
}
|
||||
|
||||
bool MetaCollectingTraverser::visitBinary(glslang::TVisit, glslang::TIntermBinary* node)
|
||||
{
|
||||
(void) node;
|
||||
return false;
|
||||
}
|
||||
|
||||
bool MetaCollectingTraverser::visitUnary(glslang::TVisit, glslang::TIntermUnary* node)
|
||||
{
|
||||
(void) node;
|
||||
return false;
|
||||
}
|
||||
|
||||
bool MetaCollectingTraverser::visitAggregate(glslang::TVisit, glslang::TIntermAggregate* node)
|
||||
{
|
||||
switch (node->getOp())
|
||||
{
|
||||
case glslang::EOpSequence:
|
||||
return true;
|
||||
case glslang::EOpFunction:
|
||||
break;
|
||||
case glslang::EOpLinkerObjects:
|
||||
return true;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
bool MetaCollectingTraverser::visitSelection(glslang::TVisit, glslang::TIntermSelection* node)
|
||||
{
|
||||
(void) node;
|
||||
return false;
|
||||
}
|
||||
|
||||
void MetaCollectingTraverser::visitConstantUnion(glslang::TIntermConstantUnion* node)
|
||||
{
|
||||
(void) node;
|
||||
}
|
||||
|
||||
void MetaCollectingTraverser::visitSymbol(glslang::TIntermSymbol* node)
|
||||
{
|
||||
const bool isLinkerObject = getParentNode()
|
||||
&& getParentNode()->getAsAggregate()
|
||||
&& getParentNode()->getAsAggregate()->getOp() == glslang::EOpLinkerObjects;
|
||||
if (isLinkerObject)
|
||||
{
|
||||
if (node->getQualifier().builtIn)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
if (node->getQualifier().isUniformOrBuffer())
|
||||
{
|
||||
if (node->getQualifier().isPushConstant())
|
||||
{
|
||||
ShaderPushConstantBlock pushConstantBlock;
|
||||
pushConstantBlock.type = convertGlslangType(node->getType());
|
||||
assert(pushConstantBlock.type.baseType == ShaderVariableBaseType::STRUCT);
|
||||
meta.extendPushConstant(pushConstantBlock, ShaderTypeBits::make(shaderType));
|
||||
return;
|
||||
}
|
||||
const unsigned setIdx = node->getQualifier().hasSet() ? node->getQualifier().layoutSet : UNSPECIFIED_INDEX;
|
||||
const unsigned binding = node->getQualifier().hasBinding() ? node->getQualifier().layoutBinding : UNSPECIFIED_INDEX;
|
||||
ShaderVariableSet& set = meta.getOrCreateInterfaceVariableSet(setIdx);
|
||||
assert(setIdx == UNSPECIFIED_INDEX || !set.getVariableAtBindingOpt(binding)); // multiple bindings at the same index?
|
||||
set.usedInStages.set(shaderType, true);
|
||||
|
||||
ShaderVariable& var = set.variables.emplace_back();
|
||||
var.binding = binding;
|
||||
var.name = node->getName();
|
||||
if (node->getQualifier().hasSemantic())
|
||||
{
|
||||
var.semantic = node->getQualifier().layoutSemantic;
|
||||
}
|
||||
if (node->getQualifier().hasSemanticIndex())
|
||||
{
|
||||
var.semanticIndex = node->getQualifier().layoutSemanticIndex;
|
||||
}
|
||||
|
||||
// uniform blocks are identified by the name of their type
|
||||
if (var.name.empty() || var.name.starts_with("anon@"))
|
||||
{
|
||||
const glslang::TString& typeName = node->getType().getTypeName();
|
||||
if (!typeName.empty())
|
||||
{
|
||||
var.name = typeName;
|
||||
}
|
||||
}
|
||||
var.descriptorType = getGlslangDescriptorType(node->getType());
|
||||
var.type = convertGlslangType(node->getType());
|
||||
}
|
||||
else if (node->getQualifier().storage == glslang::EvqVaryingIn)
|
||||
{
|
||||
ShaderAttribute attribute;
|
||||
attribute.stage = shaderType;
|
||||
attribute.type = convertGlslangType(node->getType());
|
||||
attribute.location = node->getQualifier().hasLocation() ? node->getQualifier().layoutLocation : UNSPECIFIED_INDEX;
|
||||
attribute.name = node->getName();
|
||||
if (node->getQualifier().hasSemantic())
|
||||
{
|
||||
attribute.semantic = node->getQualifier().layoutSemantic;
|
||||
}
|
||||
if (node->getQualifier().hasSemanticIndex())
|
||||
{
|
||||
attribute.semanticIndex = node->getQualifier().layoutSemanticIndex;
|
||||
}
|
||||
meta.addInputAttribute(std::move(attribute));
|
||||
}
|
||||
else if (node->getQualifier().storage == glslang::EvqVaryingOut)
|
||||
{
|
||||
ShaderAttribute attribute;
|
||||
attribute.stage = shaderType;
|
||||
attribute.type = convertGlslangType(node->getType());
|
||||
attribute.location = node->getQualifier().hasLocation() ? node->getQualifier().layoutLocation : UNSPECIFIED_INDEX;
|
||||
attribute.name = node->getName();
|
||||
if (node->getQualifier().hasSemantic())
|
||||
{
|
||||
attribute.semantic = node->getQualifier().layoutSemantic;
|
||||
}
|
||||
if (node->getQualifier().hasSemanticIndex())
|
||||
{
|
||||
attribute.semanticIndex = node->getQualifier().layoutSemanticIndex;
|
||||
}
|
||||
meta.addOutputAttribute(std::move(attribute));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bool MetaCollectingTraverser::visitLoop(glslang::TVisit, glslang::TIntermLoop* node)
|
||||
{
|
||||
(void) node;
|
||||
return false;
|
||||
}
|
||||
|
||||
bool MetaCollectingTraverser::visitBranch(glslang::TVisit, glslang::TIntermBranch* node)
|
||||
{
|
||||
(void) node;
|
||||
return false;
|
||||
}
|
||||
|
||||
bool MetaCollectingTraverser::visitSwitch(glslang::TVisit, glslang::TIntermSwitch* node)
|
||||
{
|
||||
(void) node;
|
||||
return false;
|
||||
}
|
||||
|
||||
vk::ShaderStageFlagBits shaderStageFromGlslang(EShLanguage language)
|
||||
{
|
||||
switch (language)
|
||||
{
|
||||
case EShLangVertex:
|
||||
return vk::ShaderStageFlagBits::eVertex;
|
||||
case EShLangTessControl:
|
||||
return vk::ShaderStageFlagBits::eTessellationControl;
|
||||
case EShLangTessEvaluation:
|
||||
return vk::ShaderStageFlagBits::eTessellationEvaluation;
|
||||
case EShLangGeometry:
|
||||
return vk::ShaderStageFlagBits::eGeometry;
|
||||
case EShLangFragment:
|
||||
return vk::ShaderStageFlagBits::eFragment;
|
||||
case EShLangCompute:
|
||||
return vk::ShaderStageFlagBits::eCompute;
|
||||
case EShLangRayGen:
|
||||
return vk::ShaderStageFlagBits::eRaygenKHR;
|
||||
case EShLangIntersect:
|
||||
return vk::ShaderStageFlagBits::eIntersectionKHR;
|
||||
case EShLangAnyHit:
|
||||
return vk::ShaderStageFlagBits::eAnyHitKHR;
|
||||
case EShLangClosestHit:
|
||||
return vk::ShaderStageFlagBits::eClosestHitKHR;
|
||||
case EShLangMiss:
|
||||
return vk::ShaderStageFlagBits::eMissKHR;
|
||||
case EShLangCallable:
|
||||
return vk::ShaderStageFlagBits::eCallableKHR;
|
||||
case EShLangTask:
|
||||
return vk::ShaderStageFlagBits::eTaskEXT;
|
||||
case EShLangMesh:
|
||||
return vk::ShaderStageFlagBits::eMeshEXT;
|
||||
case EShLangCount:
|
||||
break; // fall through
|
||||
}
|
||||
logAndDie("Invalid value passed to shaderStageFromGlslang!");
|
||||
}
|
||||
}
|
||||
|
||||
ShaderMeta reflectShader(glslang::TShader& shader)
|
||||
{
|
||||
return reflectIntermediate(*shader.getIntermediate(), shaderStageFromGlslang(shader.getStage()));
|
||||
}
|
||||
|
||||
ShaderMeta reflectProgram(glslang::TProgram& program)
|
||||
{
|
||||
ShaderMeta result;
|
||||
for (int stage = 0; stage < EShLangCount; ++stage)
|
||||
{
|
||||
glslang::TIntermediate* intermediate = program.getIntermediate(static_cast<EShLanguage>(stage));
|
||||
if (intermediate == nullptr) {
|
||||
continue;
|
||||
}
|
||||
result.extend(reflectIntermediate(*intermediate, shaderStageFromGlslang(static_cast<EShLanguage>(stage))));
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
ShaderMeta reflectIntermediate(glslang::TIntermediate& intermediate, vk::ShaderStageFlagBits stage)
|
||||
{
|
||||
ShaderMeta meta;
|
||||
MetaCollectingTraverser traverser(meta, stage);
|
||||
intermediate.getTreeRoot()->traverse(&traverser);
|
||||
|
||||
meta.stages.set(stage, true);
|
||||
|
||||
if (stage == vk::ShaderStageFlagBits::eCompute)
|
||||
{
|
||||
meta.localSizeX = static_cast<unsigned>(intermediate.getLocalSize(0));
|
||||
meta.localSizeY = static_cast<unsigned>(intermediate.getLocalSize(1));
|
||||
meta.localSizeZ = static_cast<unsigned>(intermediate.getLocalSize(2));
|
||||
}
|
||||
|
||||
return meta;
|
||||
}
|
||||
} // namespace iwa
|
||||
128
source/util/render_loop.cpp
Normal file
128
source/util/render_loop.cpp
Normal file
@@ -0,0 +1,128 @@
|
||||
|
||||
#include "iwa/util/render_loop.hpp"
|
||||
|
||||
#include <mijin/async/task_mutex.hpp>
|
||||
#include "iwa/device.hpp"
|
||||
#include "iwa/instance.hpp"
|
||||
|
||||
namespace iwa
|
||||
{
|
||||
namespace
|
||||
{
|
||||
// BIG BIG TODO: This is a dumb workaround for sharing images (e.g. the UI image) between multiple renderers.
|
||||
// The reason is that the layout change mechanism doesn't work if multiple command buffers (that are executed
|
||||
// sequentially) are recorded in parallel.
|
||||
// A possible fix could be to move the state tracking mechanism to the renderer and generate the barriers
|
||||
// before submitting.
|
||||
mijin::TaskMutex gRenderMutex;
|
||||
}
|
||||
|
||||
RenderLoop::RenderLoop(ObjectPtr<Device> owner, RenderLoopCreationArgs args)
|
||||
: super_t(std::move(owner)), mAdvanceDeleteQueue(args.flags.advanceDeleteQueue)
|
||||
{
|
||||
mAlternating.resize(args.parallelFrames);
|
||||
|
||||
ObjectPtr<CommandPool> commandPool = std::move(args.commandPool);
|
||||
if (!commandPool)
|
||||
{
|
||||
commandPool = getOwner()->createChild<CommandPool>(CommandPoolCreationArgs{
|
||||
.flags = vk::CommandPoolCreateFlagBits::eResetCommandBuffer,
|
||||
.queueFamilyIndex = getOwner()->getDeviceInfo().graphicsQueueFamily
|
||||
});
|
||||
}
|
||||
for (Alternating& alt : mAlternating)
|
||||
{
|
||||
alt.commandBuffer = commandPool->allocateCommandBuffer();
|
||||
alt.renderDoneFence = getOwner()->createChild<Fence>(FenceCreationArgs{.flags = vk::FenceCreateFlagBits::eSignaled});
|
||||
}
|
||||
}
|
||||
|
||||
void RenderLoop::start() noexcept
|
||||
{
|
||||
addTask(c_renderLoop());
|
||||
}
|
||||
|
||||
mijin::Task<> RenderLoop::c_init()
|
||||
{
|
||||
co_return;
|
||||
}
|
||||
|
||||
mijin::SimpleTaskLoop& RenderLoop::getTaskLoop() const noexcept
|
||||
{
|
||||
return getOwner()->getOwner()->getMainTaskLoop();
|
||||
}
|
||||
|
||||
mijin::Task<> RenderLoop::c_renderLoop()
|
||||
{
|
||||
co_await c_init();
|
||||
|
||||
while (!getOwner()->getOwner()->isQuitRequested())
|
||||
{
|
||||
Alternating& alt = mAlternating.at(mFrameIdx);
|
||||
|
||||
// wait for the command buffer to be ready
|
||||
co_await alt.renderDoneFence->c_wait();
|
||||
|
||||
// reset the fence
|
||||
alt.renderDoneFence->reset();
|
||||
|
||||
vk::CommandBuffer cmdBuffer = alt.commandBuffer->getVkHandle();
|
||||
cmdBuffer.begin(vk::CommandBufferBeginInfo{
|
||||
.flags = vk::CommandBufferUsageFlagBits::eOneTimeSubmit
|
||||
});
|
||||
|
||||
// record the commands
|
||||
RenderLoopRenderArgs renderArgs = {
|
||||
.cmdBuffer = *alt.commandBuffer,
|
||||
.frameIdx = mFrameIdx
|
||||
};
|
||||
{ // gRenderMutex lock
|
||||
const mijin::TaskMutexLock lock = co_await gRenderMutex.c_lock();
|
||||
co_await c_render(renderArgs);
|
||||
|
||||
std::vector<vk::Semaphore> waitSemaphores;
|
||||
std::vector<vk::Semaphore> signalSemaphores;
|
||||
|
||||
ImageReferenceFinalizeArgs finalizeArgs{
|
||||
.cmdBuffer = *alt.commandBuffer,
|
||||
.waitSemaphores = waitSemaphores,
|
||||
.signalSemaphores = signalSemaphores
|
||||
};
|
||||
for (const ObjectPtr<ImageReference>& imageRef: renderArgs.usedImageReferences)
|
||||
{
|
||||
imageRef->finalize(finalizeArgs);
|
||||
}
|
||||
|
||||
cmdBuffer.end();
|
||||
|
||||
// submit them
|
||||
const vk::PipelineStageFlags waitStage = vk::PipelineStageFlagBits::eFragmentShader;
|
||||
getOwner()->getGraphicsQueue().submit(vk::SubmitInfo{
|
||||
.waitSemaphoreCount = static_cast<std::uint32_t>(waitSemaphores.size()),
|
||||
.pWaitSemaphores = waitSemaphores.data(),
|
||||
.pWaitDstStageMask = &waitStage,
|
||||
.commandBufferCount = 1,
|
||||
.pCommandBuffers = &cmdBuffer,
|
||||
.signalSemaphoreCount = static_cast<std::uint32_t>(signalSemaphores.size()),
|
||||
.pSignalSemaphores = signalSemaphores.data()
|
||||
}, *alt.renderDoneFence);
|
||||
} // gRenderMutex lock
|
||||
|
||||
// finally present
|
||||
for (const ObjectPtr<ImageReference>& imageRef : renderArgs.usedImageReferences)
|
||||
{
|
||||
co_await imageRef->c_present();
|
||||
}
|
||||
|
||||
// tick deleters
|
||||
// TODO: what if there are multiple render loops?
|
||||
if (mAdvanceDeleteQueue)
|
||||
{
|
||||
getOwner()->getOwner()->tickDeleteQueue();
|
||||
}
|
||||
|
||||
mFrameIdx = (mFrameIdx + 1) % mAlternating.size();
|
||||
}
|
||||
co_return;
|
||||
}
|
||||
} // namespace iwa
|
||||
676
source/util/shader_meta.cpp
Normal file
676
source/util/shader_meta.cpp
Normal file
@@ -0,0 +1,676 @@
|
||||
|
||||
#include "iwa/util/shader_meta.hpp"
|
||||
|
||||
#include "iwa/log.hpp"
|
||||
#include "kazan/resource/mesh.hpp"
|
||||
#include "iwa/util/glsl_compiler.hpp"
|
||||
#include "iwa/util/vkutil.hpp"
|
||||
|
||||
namespace
|
||||
{
|
||||
template<typename T>
|
||||
inline std::size_t calcCrcSizeAppend(T, std::size_t) noexcept
|
||||
{
|
||||
MIJIN_TRAP(); // TODO
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
namespace iwa
|
||||
{
|
||||
namespace
|
||||
{
|
||||
vk::ShaderStageFlags typeBitsToVkStages(ShaderTypeBits bits)
|
||||
{
|
||||
vk::ShaderStageFlags flags = {};
|
||||
if (bits.compute)
|
||||
{
|
||||
flags |= vk::ShaderStageFlagBits::eCompute;
|
||||
}
|
||||
if (bits.vertex)
|
||||
{
|
||||
flags |= vk::ShaderStageFlagBits::eVertex;
|
||||
}
|
||||
if (bits.fragment)
|
||||
{
|
||||
flags |= vk::ShaderStageFlagBits::eFragment;
|
||||
}
|
||||
if (bits.rayGeneration)
|
||||
{
|
||||
flags |= vk::ShaderStageFlagBits::eRaygenKHR;
|
||||
}
|
||||
if (bits.rayClosestHit)
|
||||
{
|
||||
flags |= vk::ShaderStageFlagBits::eClosestHitKHR;
|
||||
}
|
||||
if (bits.rayAnyHit)
|
||||
{
|
||||
flags |= vk::ShaderStageFlagBits::eAnyHitKHR;
|
||||
}
|
||||
if (bits.rayMiss)
|
||||
{
|
||||
flags |= vk::ShaderStageFlagBits::eMissKHR;
|
||||
}
|
||||
if (bits.rayIntersection)
|
||||
{
|
||||
flags |= vk::ShaderStageFlagBits::eIntersectionKHR;
|
||||
}
|
||||
if (bits.callable)
|
||||
{
|
||||
flags |= vk::ShaderStageFlagBits::eCallableKHR;
|
||||
}
|
||||
return flags;
|
||||
}
|
||||
|
||||
void addShaderAttribute(std::vector<ShaderAttribute>& attributes, ShaderAttribute&& attribute)
|
||||
{
|
||||
bool doInsert = true;
|
||||
for (const ShaderAttribute& myAttribute: attributes)
|
||||
{
|
||||
if (myAttribute.stage == attribute.stage && myAttribute.location == attribute.location && myAttribute.location != UNSPECIFIED_INDEX)
|
||||
{
|
||||
// same location, type must be the same
|
||||
if (myAttribute.type != attribute.type)
|
||||
{
|
||||
logAndDie(
|
||||
"Attempting to merge incompatible shader metas, attributes {} and {} are incompatible. {} != {}",
|
||||
myAttribute.name, attribute.name, myAttribute.type, attribute.type);
|
||||
}
|
||||
doInsert = false; // member already exists, don't insert
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
if (!doInsert)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
auto it = attributes.begin();
|
||||
for (; it != attributes.end(); ++it)
|
||||
{
|
||||
if (static_cast<unsigned>(it->stage) > static_cast<unsigned>(attribute.stage)
|
||||
|| (it->stage == attribute.stage && it->location > attribute.location))
|
||||
{
|
||||
break; // insert here
|
||||
}
|
||||
}
|
||||
attributes.insert(it, std::move(attribute));
|
||||
}
|
||||
}
|
||||
ShaderVariableStructType::ShaderVariableStructType() {} // NOLINT(modernize-use-equals-default)
|
||||
ShaderVariableStructType::~ShaderVariableStructType() {} // NOLINT(modernize-use-equals-default)
|
||||
|
||||
void ShaderMeta::extendPushConstant(ShaderPushConstantBlock pushConstantBlock_, ShaderTypeBits stages)
|
||||
{
|
||||
if (pushConstantBlock_.type.baseType == ShaderVariableBaseType::NONE) {
|
||||
return;
|
||||
}
|
||||
if (pushConstantBlock.type.baseType == ShaderVariableBaseType::NONE)
|
||||
{
|
||||
pushConstantBlock = std::move(pushConstantBlock_);
|
||||
pushConstantStages = stages;
|
||||
return;
|
||||
}
|
||||
|
||||
// now comes the actual merging
|
||||
assert(pushConstantBlock.type.baseType == ShaderVariableBaseType::STRUCT);
|
||||
assert(pushConstantBlock_.type.baseType == ShaderVariableBaseType::STRUCT);
|
||||
assert(stages);
|
||||
|
||||
for (ShaderVariableStructMember& member : pushConstantBlock_.type.struct_.members)
|
||||
{
|
||||
bool doInsert = true;
|
||||
for (const ShaderVariableStructMember& myMember : pushConstantBlock.type.struct_.members)
|
||||
{
|
||||
if (myMember.offset == member.offset)
|
||||
{
|
||||
// same offset, type must be the same
|
||||
if (myMember.type != member.type)
|
||||
{
|
||||
logAndDie("Attempting to merge incompatible push constant blocks, members {} and {} are incompatible. {} != {}",
|
||||
myMember.name, member.name, myMember.type, member.type);
|
||||
}
|
||||
doInsert = false; // member already exists, don't insert
|
||||
continue;
|
||||
}
|
||||
|
||||
// otherwise check for overlaps
|
||||
if ((myMember.offset < member.offset && myMember.offset + calcShaderTypeSize(myMember.type) > member.offset)
|
||||
|| (myMember.offset > member.offset && myMember.offset < member.offset + calcShaderTypeSize(member.type)))
|
||||
{
|
||||
logAndDie("Attempting to merge incompatible push constant blocks, members {} and {} are overlapping.",
|
||||
myMember.name, member.name);
|
||||
}
|
||||
}
|
||||
|
||||
if (!doInsert) {
|
||||
continue;
|
||||
}
|
||||
|
||||
auto it = pushConstantBlock.type.struct_.members.begin();
|
||||
for (; it != pushConstantBlock.type.struct_.members.end(); ++it)
|
||||
{
|
||||
if (it->offset > member.offset) {
|
||||
break; // insert here
|
||||
}
|
||||
}
|
||||
pushConstantBlock.type.struct_.members.insert(it, std::move(member));
|
||||
}
|
||||
|
||||
pushConstantStages |= stages;
|
||||
}
|
||||
|
||||
void ShaderMeta::addInputAttribute(ShaderAttribute attribute)
|
||||
{
|
||||
addShaderAttribute(inputAttributes, std::move(attribute));
|
||||
}
|
||||
|
||||
void ShaderMeta::addOutputAttribute(ShaderAttribute attribute)
|
||||
{
|
||||
addShaderAttribute(outputAttributes, std::move(attribute));
|
||||
}
|
||||
|
||||
ObjectPtr<DescriptorSetLayout> DescriptorSetMeta::createDescriptorSetLayout(Device& device) const
|
||||
{
|
||||
assert(bindings.size() == bindingFlags.size());
|
||||
return device.createChild<DescriptorSetLayout>(DescriptorSetLayoutCreationArgs{
|
||||
.bindings = bindings,
|
||||
.bindingFlags = bindingFlags,
|
||||
.flags = flags,
|
||||
});
|
||||
}
|
||||
|
||||
std::vector<ObjectPtr<DescriptorSet>> PipelineAndDescriptorSetLayouts::createDescriptorSets(DescriptorPool& pool) const
|
||||
{
|
||||
std::vector<ObjectPtr<DescriptorSet>> result;
|
||||
result.reserve(descriptorSetLayouts.size());
|
||||
|
||||
for (const ObjectPtr<DescriptorSetLayout>& layout : descriptorSetLayouts)
|
||||
{
|
||||
result.push_back(pool.allocateDescriptorSet({
|
||||
.layout = layout
|
||||
}));
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
ObjectPtr<DescriptorSet> PipelineAndDescriptorSetLayouts::createDescriptorSet(DescriptorPool& pool, unsigned setIdx) const
|
||||
{
|
||||
MIJIN_ASSERT(setIdx < descriptorSetLayouts.size(), "Invalid set index.");
|
||||
return pool.allocateDescriptorSet({
|
||||
.layout = descriptorSetLayouts[setIdx]
|
||||
});
|
||||
}
|
||||
|
||||
PipelineAndDescriptorSetLayouts PipelineLayoutMeta::createPipelineLayout(Device& device) const
|
||||
{
|
||||
std::vector<ObjectPtr<DescriptorSetLayout>> descSetLayouts;
|
||||
descSetLayouts.reserve(descriptorSets.size());
|
||||
|
||||
for (const DescriptorSetMeta& dslMeta : descriptorSets)
|
||||
{
|
||||
descSetLayouts.push_back(dslMeta.createDescriptorSetLayout(device));
|
||||
}
|
||||
|
||||
std::vector<vk::PushConstantRange> pushConstantRanges;
|
||||
if (pushConstantRange.stageFlags)
|
||||
{
|
||||
pushConstantRanges.push_back(pushConstantRange);
|
||||
}
|
||||
|
||||
ObjectPtr<PipelineLayout> pipelineLayout = device.createChild<PipelineLayout>(PipelineLayoutCreationArgs{
|
||||
.setLayouts = descSetLayouts,
|
||||
.pushConstantRanges = std::move(pushConstantRanges)
|
||||
});
|
||||
return
|
||||
{
|
||||
.descriptorSetLayouts = std::move(descSetLayouts),
|
||||
.pipelineLayout = std::move(pipelineLayout)
|
||||
};
|
||||
}
|
||||
|
||||
void ShaderVariable::verifyCompatible(const ShaderVariable& other) const
|
||||
{
|
||||
std::vector<std::string> errors;
|
||||
if (other.binding != binding) {
|
||||
errors.push_back(fmt::format("Variable bindings do not match: {} != {}.", binding, other.binding)); // NOLINT
|
||||
}
|
||||
if (other.descriptorType != descriptorType) {
|
||||
errors.push_back(fmt::format("Descriptor types do not match: {} != {}.",
|
||||
magic_enum::enum_name(descriptorType),
|
||||
magic_enum::enum_name(other.descriptorType)));
|
||||
}
|
||||
if (other.name != name) {
|
||||
logMsg("Warning: shader variable names do not match, variable will only be referrable to by one of them! ({} != {})",
|
||||
name, other.name);
|
||||
}
|
||||
if (other.type != type) {
|
||||
errors.push_back(fmt::format("Variable types do not match: {} != {}.", type, other.type));
|
||||
}
|
||||
|
||||
if (errors.empty()) {
|
||||
return;
|
||||
}
|
||||
|
||||
logMsg("Error(s) verifying shader variable compatibility:");
|
||||
for (const std::string& error : errors) {
|
||||
logMsg(error);
|
||||
}
|
||||
std::abort();
|
||||
}
|
||||
|
||||
std::size_t ShaderVariable::calcHash(std::size_t appendTo) const
|
||||
{
|
||||
(void) appendTo;
|
||||
MIJIN_TRAP(); // TODO
|
||||
return 0;
|
||||
#if 0
|
||||
std::size_t hash = appendTo;
|
||||
hash = type.calcHash(hash);
|
||||
hash = calcCrcSizeAppend(descriptorType, hash);
|
||||
hash = calcCrcSizeAppend(binding, hash);
|
||||
hash = calcCrcSizeAppend(name, hash);
|
||||
return hash;
|
||||
#endif
|
||||
}
|
||||
|
||||
#if 0
|
||||
ShaderSource ShaderSource::fromFile(std::string fileName, std::string name)
|
||||
{
|
||||
(void) fileName;
|
||||
(void) name;
|
||||
MIJIN_TRAP(); // TODO
|
||||
return {};
|
||||
std::string code = readFileText(fileName);
|
||||
return {
|
||||
.code = std::move(code),
|
||||
.fileName = std::move(fileName),
|
||||
#if !defined(KAZAN_RELEASE)
|
||||
.name = std::move(name)
|
||||
#endif
|
||||
};
|
||||
}
|
||||
#endif
|
||||
|
||||
bool ShaderVariableSet::find(std::string_view varName, ShaderVariableFindResult& outResult) const noexcept
|
||||
{
|
||||
for (const ShaderVariable& var : variables)
|
||||
{
|
||||
if (var.name == varName)
|
||||
{
|
||||
outResult.setIndex = setIndex;
|
||||
outResult.bindIndex = var.binding;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
bool ShaderVariableSet::find(unsigned semantic, unsigned semanticIdx, ShaderVariableFindResult& outResult) const noexcept
|
||||
{
|
||||
for (const ShaderVariable& var : variables)
|
||||
{
|
||||
if (var.semantic == semantic && var.semanticIndex == semanticIdx)
|
||||
{
|
||||
outResult.setIndex = setIndex;
|
||||
outResult.bindIndex = var.binding;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
const ShaderVariable& ShaderVariableSet::getVariableAtBinding(unsigned bindingIdx) const
|
||||
{
|
||||
for (const ShaderVariable& var : variables)
|
||||
{
|
||||
if (var.binding == bindingIdx)
|
||||
{
|
||||
return var;
|
||||
}
|
||||
}
|
||||
|
||||
logAndDie("Could not find shader variable with binding {}!", bindingIdx);
|
||||
}
|
||||
|
||||
const ShaderVariable* ShaderVariableSet::getVariableAtBindingOpt(unsigned bindingIdx) const
|
||||
{
|
||||
for (const ShaderVariable& var : variables)
|
||||
{
|
||||
if (var.binding == bindingIdx)
|
||||
{
|
||||
return &var;
|
||||
}
|
||||
}
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
const ShaderVariable* ShaderVariableSet::getVariableAtSemanticOpt(unsigned semantic, unsigned semanticIdx) const
|
||||
{
|
||||
for (const ShaderVariable& var : variables)
|
||||
{
|
||||
if (var.semantic == semantic && var.semanticIndex == semanticIdx)
|
||||
{
|
||||
return &var;
|
||||
}
|
||||
}
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
std::size_t ShaderVariableSet::calcHash(std::size_t appendTo) const
|
||||
{
|
||||
std::size_t hash = appendTo;
|
||||
for (const ShaderVariable& var : variables) {
|
||||
hash = var.calcHash(hash);
|
||||
}
|
||||
return hash;
|
||||
}
|
||||
|
||||
void ShaderMeta::extend(ShaderMeta other)
|
||||
{
|
||||
for (ShaderVariableSet& set : other.interfaceVariableSets)
|
||||
{
|
||||
ShaderVariableSet& mySet = getOrCreateInterfaceVariableSet(set.setIndex);
|
||||
mySet.usedInStages.bits |= set.usedInStages.bits;
|
||||
|
||||
for (ShaderVariable& variable : set.variables)
|
||||
{
|
||||
const ShaderVariable* myVariable = nullptr;
|
||||
if (variable.binding != UNSPECIFIED_INDEX)
|
||||
{
|
||||
myVariable = mySet.getVariableAtBindingOpt(variable.binding);
|
||||
}
|
||||
else if (variable.semantic != UNSPECIFIED_INDEX)
|
||||
{
|
||||
myVariable = mySet.getVariableAtSemanticOpt(variable.semantic, variable.semanticIndex);
|
||||
}
|
||||
if (myVariable)
|
||||
{
|
||||
myVariable->verifyCompatible(variable);
|
||||
continue;
|
||||
}
|
||||
mySet.variables.push_back(std::move(variable));
|
||||
}
|
||||
}
|
||||
|
||||
for (ShaderAttribute& attribute : other.inputAttributes)
|
||||
{
|
||||
addInputAttribute(std::move(attribute));
|
||||
}
|
||||
|
||||
for (ShaderAttribute& attribute : other.outputAttributes)
|
||||
{
|
||||
addOutputAttribute(std::move(attribute));
|
||||
}
|
||||
|
||||
extendPushConstant(other.pushConstantBlock, other.pushConstantStages);
|
||||
stages |= other.stages;
|
||||
|
||||
if (localSizeX == 0 && localSizeY == 0 && localSizeZ == 0)
|
||||
{
|
||||
localSizeX = other.localSizeX;
|
||||
localSizeY = other.localSizeY;
|
||||
localSizeZ = other.localSizeZ;
|
||||
}
|
||||
else if ((other.localSizeX != 0 || other.localSizeY != 0 || other.localSizeZ != 0) &&
|
||||
(localSizeX != other.localSizeX || localSizeY != other.localSizeY || localSizeZ != other.localSizeZ))
|
||||
{
|
||||
logAndDie("Error merging shader metas, conflicting local size!");
|
||||
}
|
||||
|
||||
hash = 0;
|
||||
}
|
||||
|
||||
bool ShaderMeta::findInterfaceVariable(std::string_view varName, ShaderVariableFindResult& outResult) const noexcept
|
||||
{
|
||||
for (const ShaderVariableSet& set : interfaceVariableSets)
|
||||
{
|
||||
if (set.find(varName, outResult)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
bool ShaderMeta::findInterfaceVariable(unsigned semantic, unsigned semanticIdx, ShaderVariableFindResult& outResult) const noexcept
|
||||
{
|
||||
for (const ShaderVariableSet& set : interfaceVariableSets)
|
||||
{
|
||||
if (set.find(semantic, semanticIdx, outResult)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
const ShaderVariableSet& ShaderMeta::getInterfaceVariableSet(unsigned setIdx) const
|
||||
{
|
||||
const ShaderVariableSet* variableSet = getInterfaceVariableSetOpt(setIdx);
|
||||
MIJIN_ASSERT(variableSet != nullptr, "Could not find interface variable set.");
|
||||
return *variableSet;
|
||||
}
|
||||
|
||||
const ShaderVariableSet* ShaderMeta::getInterfaceVariableSetOpt(unsigned setIdx) const
|
||||
{
|
||||
for (const ShaderVariableSet& set : interfaceVariableSets)
|
||||
{
|
||||
if (set.setIndex == setIdx) {
|
||||
return &set;
|
||||
}
|
||||
}
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
const ShaderVariableType& ShaderMeta::getInterfaceVariableType(unsigned setIdx, unsigned bindingIdx) const
|
||||
{
|
||||
return getInterfaceVariableSet(setIdx).getVariableAtBinding(bindingIdx).type;
|
||||
}
|
||||
|
||||
VertexInput ShaderMeta::generateVertexInput(const NamedVertexInput& namedInput) const noexcept
|
||||
{
|
||||
VertexInput result{
|
||||
.bindings = namedInput.bindings
|
||||
};
|
||||
|
||||
for (const ShaderAttribute& attribute : inputAttributes)
|
||||
{
|
||||
if (attribute.stage != vk::ShaderStageFlagBits::eVertex) {
|
||||
continue;
|
||||
}
|
||||
MIJIN_ASSERT_FATAL(attribute.type.baseType == ShaderVariableBaseType::SIMPLE, "Vertex shader input must be a simple type.");
|
||||
auto itAttribute = namedInput.attributes.find(attribute.name);
|
||||
MIJIN_ASSERT_FATAL(itAttribute != namedInput.attributes.end(), "Missing attribute in input.");
|
||||
result.attributes.push_back(vk::VertexInputAttributeDescription{
|
||||
.location = attribute.location,
|
||||
.binding = itAttribute->second.binding,
|
||||
.format = attribute.type.simple.format,
|
||||
.offset = itAttribute->second.offset
|
||||
});
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
VertexInput ShaderMeta::generateVertexInputFromLayout(const VertexLayout& layout) const noexcept
|
||||
{
|
||||
VertexInput result{
|
||||
.bindings = {
|
||||
vk::VertexInputBindingDescription{
|
||||
.binding = 0,
|
||||
.stride = layout.stride,
|
||||
.inputRate = vk::VertexInputRate::eVertex
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
for (const ShaderAttribute& attribute : inputAttributes)
|
||||
{
|
||||
if (attribute.stage != vk::ShaderStageFlagBits::eVertex) {
|
||||
continue;
|
||||
}
|
||||
if (attribute.semantic == UNSPECIFIED_INDEX) {
|
||||
continue;
|
||||
}
|
||||
MIJIN_ASSERT_FATAL(attribute.type.baseType == ShaderVariableBaseType::SIMPLE, "Vertex shader input must be a simple type.");
|
||||
auto itAttribute = std::ranges::find_if(layout.attributes, [&attribute](const VertexAttribute& attrib) {
|
||||
return static_cast<unsigned>(attrib.semantic) == attribute.semantic && attrib.semanticIdx == attribute.semanticIndex;
|
||||
});
|
||||
MIJIN_ASSERT_FATAL(itAttribute != layout.attributes.end(), "Missing attribute in vertex layout.");
|
||||
result.attributes.push_back(vk::VertexInputAttributeDescription{
|
||||
.location = attribute.location,
|
||||
.binding = 0,
|
||||
.format = attribute.type.simple.format,
|
||||
.offset = itAttribute->offset
|
||||
});
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
DescriptorSetMeta ShaderMeta::generateDescriptorSetLayout(const ShaderVariableSet& set, const GenerateDescriptorSetLayoutArgs& args) const
|
||||
{
|
||||
DescriptorSetMeta setInfo{
|
||||
.flags = args.flags
|
||||
};
|
||||
|
||||
for (const ShaderVariable& var : set.variables)
|
||||
{
|
||||
auto itVar = std::ranges::find_if(setInfo.bindings, [&](const vk::DescriptorSetLayoutBinding& binding) {
|
||||
return binding.binding == var.binding;
|
||||
});
|
||||
assert(itVar == setInfo.bindings.end()); // should have been merged!
|
||||
if (itVar != setInfo.bindings.end())
|
||||
{
|
||||
itVar->stageFlags |= typeBitsToVkStages(set.usedInStages);
|
||||
continue; // TODO: verify the bindings are compatible
|
||||
}
|
||||
vk::DescriptorSetLayoutBinding& binding = setInfo.bindings.emplace_back();
|
||||
vk::DescriptorBindingFlags& flags = setInfo.bindingFlags.emplace_back();
|
||||
binding.binding = var.binding;
|
||||
binding.descriptorType = var.descriptorType;
|
||||
binding.descriptorCount = 1;
|
||||
binding.stageFlags = typeBitsToVkStages(set.usedInStages);
|
||||
|
||||
// support for dynamically sized descriptors
|
||||
auto itCounts = args.descriptorCounts.find(var.binding);
|
||||
if (itCounts != args.descriptorCounts.end() && itCounts->second > 0)
|
||||
{
|
||||
binding.descriptorCount = itCounts->second;
|
||||
flags |= vk::DescriptorBindingFlagBits::ePartiallyBound;
|
||||
}
|
||||
|
||||
if (setInfo.descriptorTypes.size() <= var.binding) {
|
||||
setInfo.descriptorTypes.resize(var.binding + 1);
|
||||
}
|
||||
setInfo.descriptorTypes[var.binding] = var.descriptorType;
|
||||
}
|
||||
|
||||
return setInfo;
|
||||
}
|
||||
|
||||
PipelineLayoutMeta ShaderMeta::generatePipelineLayout(const GeneratePipelineLayoutArgs& args) const
|
||||
{
|
||||
static const std::vector<std::uint32_t> NO_DESCRIPTOR_COUNTS = {};
|
||||
static const GenerateDescriptorSetLayoutArgs NO_DESCRIPTOR_SET_ARGS = {};
|
||||
|
||||
PipelineLayoutMeta result;
|
||||
for (const ShaderVariableSet& set : interfaceVariableSets)
|
||||
{
|
||||
if (set.setIndex >= result.descriptorSets.size()) {
|
||||
result.descriptorSets.resize(set.setIndex + 1);
|
||||
}
|
||||
auto itSet = args.descriptorSets.find(set.setIndex);
|
||||
const GenerateDescriptorSetLayoutArgs setArgs =
|
||||
itSet != args.descriptorSets.end()
|
||||
? itSet->second
|
||||
: NO_DESCRIPTOR_SET_ARGS;
|
||||
result.descriptorSets[set.setIndex] = generateDescriptorSetLayout(set, setArgs);
|
||||
}
|
||||
|
||||
if (pushConstantBlock.type.baseType != ShaderVariableBaseType::NONE)
|
||||
{
|
||||
assert(pushConstantStages);
|
||||
result.pushConstantRange.stageFlags = typeBitsToVkStages(pushConstantStages);
|
||||
result.pushConstantRange.size = pushConstantBlock.offset + calcShaderTypeSize(pushConstantBlock.type);
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
bool ShaderMeta::empty() const
|
||||
{
|
||||
static_assert(ShaderMeta::STRUCT_VERSION == 1, "Update me");
|
||||
return interfaceVariableSets.empty()
|
||||
&& inputAttributes.empty()
|
||||
&& outputAttributes.empty()
|
||||
&& pushConstantStages == ShaderTypeBits()
|
||||
&& pushConstantBlock.type.baseType == ShaderVariableBaseType::NONE
|
||||
&& localSizeX == 0
|
||||
&& localSizeY == 0
|
||||
&& localSizeZ == 0;
|
||||
}
|
||||
|
||||
std::size_t ShaderMeta::getHash() const
|
||||
{
|
||||
if (hash == 0)
|
||||
{
|
||||
hash = 1; // TODO
|
||||
MIJIN_TRAP();
|
||||
#if 0
|
||||
for (const ShaderVariableSet& variableSet : interfaceVariableSets) {
|
||||
hash = variableSet.calcHash(hash);
|
||||
}
|
||||
hash = calcCrcSizeAppend(pushConstantStages.bits, hash);
|
||||
hash = pushConstantBlock.type.calcHash(hash);
|
||||
hash = calcCrcSizeAppend(pushConstantBlock.offset, hash);
|
||||
hash = calcCrcSizeAppend(localSizeX, hash);
|
||||
hash = calcCrcSizeAppend(localSizeY, hash);
|
||||
hash = calcCrcSizeAppend(localSizeZ, hash);
|
||||
#endif
|
||||
}
|
||||
|
||||
return hash;
|
||||
}
|
||||
|
||||
unsigned calcShaderTypeSize(const ShaderVariableType& type, bool ignoreArraySize) noexcept
|
||||
{
|
||||
unsigned size = 0;
|
||||
switch (type.baseType)
|
||||
{
|
||||
case ShaderVariableBaseType::SIMPLE:
|
||||
size = vkFormatSize(type.simple.format);
|
||||
break;
|
||||
case ShaderVariableBaseType::MATRIX:
|
||||
switch (type.matrixType)
|
||||
{
|
||||
case ShaderVariableMatrixType::MAT2:
|
||||
size = 16;
|
||||
break;
|
||||
case ShaderVariableMatrixType::MAT3:
|
||||
size = 36;
|
||||
break;
|
||||
case ShaderVariableMatrixType::MAT4:
|
||||
size = 64;
|
||||
break;
|
||||
default:
|
||||
logAndDie("Lol, what's this?");
|
||||
}
|
||||
break;
|
||||
case ShaderVariableBaseType::STRUCT:
|
||||
assert(!type.struct_.members.empty());
|
||||
size = static_cast<unsigned>(type.struct_.members.back().offset + calcShaderTypeSize(type.struct_.members.back().type));
|
||||
break;
|
||||
default:
|
||||
logAndDie("How would I know?");
|
||||
}
|
||||
if (!ignoreArraySize) {
|
||||
size *= type.arraySize;
|
||||
}
|
||||
return size;
|
||||
}
|
||||
|
||||
} // namespace iwa
|
||||
402
source/util/texture_atlas.cpp
Normal file
402
source/util/texture_atlas.cpp
Normal file
@@ -0,0 +1,402 @@
|
||||
|
||||
#include "iwa/util/texture_atlas.hpp"
|
||||
|
||||
#include <bit>
|
||||
#include "iwa/device.hpp"
|
||||
#include "iwa/instance.hpp"
|
||||
#include "iwa/resource/bitmap.hpp"
|
||||
|
||||
namespace iwa
|
||||
{
|
||||
TextureSlot::TextureSlot(ObjectPtr<TextureAtlas> owner, const TextureSlotCreationArgs& args)
|
||||
: super_t(std::move(owner)), mUsedSpace(args.usedSpace), mLayer(args.layer), mUvOffset(args.uvOffset), mUvScale(args.uvScale)
|
||||
{
|
||||
|
||||
}
|
||||
|
||||
TextureAtlas::TextureAtlas(ObjectPtr<> owner, const TextureAtlasCreationArgs& args)
|
||||
: super_t(std::move(owner)), mLayerSize(args.layerSize)
|
||||
{
|
||||
// start with a single layer with one free space that takes up the entire layer
|
||||
mLayers.push_back({
|
||||
.freeSpaces = {
|
||||
vk::Rect2D{
|
||||
.offset = { .x = 0, .y = 0 },
|
||||
.extent = args.layerSize
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
ObjectPtr<TextureSlot> TextureAtlas::allocateSlot(vk::Extent2D slotSize)
|
||||
{
|
||||
// only uses multiples of 2
|
||||
// TODO: check if this actually improves the results
|
||||
const vk::Extent2D size = {
|
||||
.width = std::bit_ceil(slotSize.width),
|
||||
.height = std::bit_ceil(slotSize.height)
|
||||
};
|
||||
|
||||
// check if it can even fit
|
||||
if (size.width > mLayerSize.width || size.height > mLayerSize.height) {
|
||||
throw std::runtime_error("Cannot allocate texture slot, size too big.");
|
||||
}
|
||||
|
||||
// find the best fit (minimize product of "wasted" space)
|
||||
unsigned lowestWasteSum = std::numeric_limits<unsigned>::max();
|
||||
unsigned lowestWasteProduct = std::numeric_limits<unsigned>::max();
|
||||
std::vector<TextureAtlasLayer>::iterator foundLayer = mLayers.end();
|
||||
std::vector<vk::Rect2D>::iterator foundSpace;
|
||||
for (auto itLayer = mLayers.begin(); itLayer != mLayers.end(); ++itLayer)
|
||||
{
|
||||
for (auto itSpace = itLayer->freeSpaces.begin(); itSpace != itLayer->freeSpaces.end(); ++itSpace)
|
||||
{
|
||||
if (itSpace->extent.width < size.width || itSpace->extent.height < size.height) {
|
||||
continue;
|
||||
}
|
||||
const unsigned wasteWidth = itSpace->extent.width - size.width;
|
||||
const unsigned wasteHeight = itSpace->extent.height - size.height;
|
||||
const unsigned wasteProduct = wasteWidth * wasteHeight;
|
||||
if (wasteProduct <= lowestWasteProduct)
|
||||
{
|
||||
const unsigned wasteSum = wasteWidth + wasteHeight;
|
||||
if (wasteProduct < lowestWasteProduct || wasteSum < lowestWasteSum)
|
||||
{
|
||||
lowestWasteSum = wasteSum;
|
||||
lowestWasteProduct = wasteProduct;
|
||||
foundLayer = itLayer;
|
||||
foundSpace = itSpace;
|
||||
}
|
||||
}
|
||||
} // for (itLayer->freeSpaces)
|
||||
} // for (mLayers)
|
||||
|
||||
// if no space was found, make space
|
||||
if (foundLayer == mLayers.end())
|
||||
{
|
||||
mLayers.resize(mLayers.size() + 1);
|
||||
mLayers.back().freeSpaces.push_back({
|
||||
.offset = { .x = 0, .y = 0},
|
||||
.extent = mLayerSize
|
||||
});
|
||||
foundLayer = std::prev(mLayers.end());
|
||||
foundSpace = foundLayer->freeSpaces.begin();
|
||||
}
|
||||
|
||||
// save in case the iterator gets invalidated
|
||||
const vk::Rect2D space = *foundSpace;
|
||||
|
||||
// remove it
|
||||
foundLayer->freeSpaces.erase(foundSpace);
|
||||
|
||||
// now split the space, if necessary
|
||||
const bool splitX = space.extent.width > size.width;
|
||||
const bool splitY = space.extent.height > size.height;
|
||||
if (splitX)
|
||||
{
|
||||
foundLayer->freeSpaces.push_back({
|
||||
.offset = {
|
||||
.x = static_cast<std::int32_t>(space.offset.x + size.width),
|
||||
.y = space.offset.y
|
||||
},
|
||||
.extent = {
|
||||
.width = space.extent.width - size.width,
|
||||
.height = size.height
|
||||
}
|
||||
});
|
||||
}
|
||||
if (splitY)
|
||||
{
|
||||
foundLayer->freeSpaces.push_back({
|
||||
.offset = {
|
||||
.x = space.offset.x,
|
||||
.y = static_cast<std::int32_t>(space.offset.y + size.height)
|
||||
},
|
||||
.extent = {
|
||||
.width = size.width,
|
||||
.height = space.extent.height - size.height
|
||||
}
|
||||
});
|
||||
}
|
||||
if (splitX && splitY)
|
||||
{
|
||||
foundLayer->freeSpaces.push_back({
|
||||
.offset = {
|
||||
.x = static_cast<std::int32_t>(space.offset.x + size.width),
|
||||
.y = static_cast<std::int32_t>(space.offset.y + size.height)
|
||||
},
|
||||
.extent = {
|
||||
.width = space.extent.width - size.width,
|
||||
.height = space.extent.height - size.height
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
// return the result
|
||||
return createChild<TextureSlot>(TextureSlotCreationArgs{
|
||||
.usedSpace = {
|
||||
.offset = space.offset,
|
||||
.extent = slotSize
|
||||
},
|
||||
.layer = static_cast<unsigned>(std::distance(mLayers.begin(), foundLayer)),
|
||||
.uvOffset = {
|
||||
static_cast<float>(space.offset.x) / static_cast<float>(mLayerSize.width),
|
||||
static_cast<float>(space.offset.y) / static_cast<float>(mLayerSize.height)
|
||||
},
|
||||
.uvScale = {
|
||||
static_cast<float>(slotSize.width) / static_cast<float>(mLayerSize.width),
|
||||
static_cast<float>(slotSize.height) / static_cast<float>(mLayerSize.height)
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
AtlasedImage::AtlasedImage(ObjectPtr<Device> owner, const AtlasedImageCreationArgs& args)
|
||||
: super_t(std::move(owner)), mAtlas(TextureAtlas::create(TextureAtlasCreationArgs{.layerSize = args.size})),
|
||||
mFormat(args.format), mMipLevels(args.mipLevels), mUsage(args.usage | vk::ImageUsageFlagBits::eTransferSrc | vk::ImageUsageFlagBits::eTransferDst)
|
||||
{
|
||||
mImage = allocateImage(args.initialLayers);
|
||||
mImageView = mImage->createImageView({
|
||||
.viewType = vk::ImageViewType::e2DArray
|
||||
});
|
||||
}
|
||||
|
||||
mijin::Task<ObjectPtr<TextureSlot>> AtlasedImage::c_allocateSlot(vk::Extent2D slotSize)
|
||||
{
|
||||
IWA_CORO_ENSURE_MAIN_THREAD(*getOwner()->getOwner());
|
||||
|
||||
ObjectPtr<TextureSlot> slot = mAtlas->allocateSlot(slotSize);
|
||||
if (slot->getLayer() >= mImage->getArrayLayers())
|
||||
{
|
||||
const mijin::TaskMutexLock lock = co_await mImageMutex.c_lock();
|
||||
|
||||
// image is too small, resize it
|
||||
// this includes a complete copy of the existing image
|
||||
ObjectPtr<Image> newImage = allocateImage(slot->getLayer() + 1);
|
||||
ObjectPtr<CommandBuffer> cmdBufferPtr = getOwner()->beginScratchCommandBuffer();
|
||||
vk::CommandBuffer cmdBuffer = *cmdBufferPtr;
|
||||
mImage->applyTransition(cmdBuffer, IMAGE_TRANSITION_TRANSFER_READ);
|
||||
newImage->applyTransition(cmdBuffer, IMAGE_TRANSITION_TRANSFER_WRITE);
|
||||
|
||||
// copy ALL the mip levels
|
||||
std::vector<vk::ImageCopy> regions;
|
||||
regions.reserve(mImage->getMipLevels());
|
||||
for (unsigned level = 0; level < mImage->getMipLevels(); ++level)
|
||||
{
|
||||
const vk::ImageSubresourceLayers copySubresource{
|
||||
.aspectMask = vk::ImageAspectFlagBits::eColor,
|
||||
.mipLevel = level,
|
||||
.baseArrayLayer = 0,
|
||||
.layerCount = mImage->getArrayLayers()
|
||||
};
|
||||
regions.push_back({
|
||||
.srcSubresource = copySubresource,
|
||||
.srcOffset = {.x = 0, .y = 0, .z = 0},
|
||||
.dstSubresource = copySubresource,
|
||||
.dstOffset = {.x = 0, .y = 0, .z = 0},
|
||||
.extent = {
|
||||
.width = mAtlas->getLayerSize().width,
|
||||
.height = mAtlas->getLayerSize().height,
|
||||
.depth = 1
|
||||
}
|
||||
});
|
||||
}
|
||||
cmdBuffer.copyImage(
|
||||
/* srcImage = */ *mImage,
|
||||
/* srcImageLayout = */ vk::ImageLayout::eTransferSrcOptimal,
|
||||
/* dstImage = */ *newImage,
|
||||
/* dstImageLayout = */ vk::ImageLayout::eTransferDstOptimal,
|
||||
/* regions = */ regions
|
||||
);
|
||||
co_await getOwner()->endScratchCommandBuffer(cmdBufferPtr);
|
||||
mImage = std::move(newImage);
|
||||
mImageView = mImage->createImageView({
|
||||
.viewType = vk::ImageViewType::e2DArray
|
||||
});
|
||||
imageRecreated.emit();
|
||||
}
|
||||
co_return slot;
|
||||
}
|
||||
|
||||
mijin::Task<> AtlasedImage::c_upload(const TextureSlot& slot, const Bitmap& bitmap) const noexcept
|
||||
{
|
||||
IWA_CORO_ENSURE_MAIN_THREAD(*getOwner()->getOwner());
|
||||
MIJIN_ASSERT(slot.getUsedSpace().extent.width >= bitmap.getSize().width
|
||||
&& slot.getUsedSpace().extent.height >= bitmap.getSize().height, "Can't upload image, invalid size.");
|
||||
|
||||
const mijin::TaskMutexLock lock = co_await mImageMutex.c_lock();
|
||||
co_await mImage->c_upload(
|
||||
/* bitmap = */ bitmap,
|
||||
/* imageOffset = */ {
|
||||
.x = slot.getUsedSpace().offset.x,
|
||||
.y = slot.getUsedSpace().offset.y,
|
||||
.z = 0
|
||||
},
|
||||
/* baseLayer = */ slot.getLayer()
|
||||
);
|
||||
}
|
||||
|
||||
mijin::Task<> AtlasedImage::c_upload(const TextureSlot& slot, const void* data, std::size_t bytes, const vk::Extent2D& bufferImageSize) const noexcept
|
||||
{
|
||||
IWA_CORO_ENSURE_MAIN_THREAD(*getOwner()->getOwner());
|
||||
MIJIN_ASSERT(slot.getUsedSpace().extent.width >= bufferImageSize.width
|
||||
&& slot.getUsedSpace().extent.height >= bufferImageSize.height, "Can't upload image, invalid size.");
|
||||
|
||||
const mijin::TaskMutexLock lock = co_await mImageMutex.c_lock();
|
||||
co_await mImage->c_upload(
|
||||
/* data = */ data,
|
||||
/* bytes = */ bytes,
|
||||
/* bufferImageSize = */ {
|
||||
.width = bufferImageSize.width,
|
||||
.height = bufferImageSize.height,
|
||||
.depth = 1
|
||||
},
|
||||
/* imageOffset = */ {
|
||||
.x = slot.getUsedSpace().offset.x,
|
||||
.y = slot.getUsedSpace().offset.y,
|
||||
.z = 0
|
||||
},
|
||||
/* baseLayer = */ slot.getLayer()
|
||||
);
|
||||
}
|
||||
|
||||
mijin::Task<> AtlasedImage::c_blit(const TextureSlot& slot, Image& srcImage) const noexcept
|
||||
{
|
||||
IWA_CORO_ENSURE_MAIN_THREAD(*getOwner()->getOwner());
|
||||
MIJIN_ASSERT(slot.getUsedSpace().extent.width >= srcImage.getSize().width
|
||||
&& slot.getUsedSpace().extent.height >= srcImage.getSize().height
|
||||
&& srcImage.getSize().depth == 1, "Can't upload image, invalid size.");
|
||||
|
||||
const mijin::TaskMutexLock lock = co_await mImageMutex.c_lock();
|
||||
co_await mImage->c_blitFrom(
|
||||
/* srcImage = */ srcImage,
|
||||
/* regions = */ {
|
||||
vk::ImageBlit{
|
||||
.srcSubresource = DEFAULT_SUBRESOURCE_LAYERS,
|
||||
.srcOffsets = std::array{
|
||||
vk::Offset3D{
|
||||
.x = 0, .y = 0, .z = 0
|
||||
},
|
||||
vk::Offset3D{
|
||||
.x = static_cast<std::int32_t>(srcImage.getSize().width),
|
||||
.y = static_cast<std::int32_t>(srcImage.getSize().height),
|
||||
.z = 1
|
||||
}
|
||||
},
|
||||
.dstSubresource = vk::ImageSubresourceLayers{
|
||||
.aspectMask = vk::ImageAspectFlagBits::eColor,
|
||||
.mipLevel = 0,
|
||||
.baseArrayLayer = slot.getLayer(),
|
||||
.layerCount = 1
|
||||
},
|
||||
.dstOffsets = std::array{
|
||||
vk::Offset3D{
|
||||
.x = slot.getUsedSpace().offset.x,
|
||||
.y = slot.getUsedSpace().offset.y,
|
||||
.z = 0
|
||||
},
|
||||
vk::Offset3D{
|
||||
.x = slot.getUsedSpace().offset.x + static_cast<std::int32_t>(slot.getUsedSpace().extent.width),
|
||||
.y = slot.getUsedSpace().offset.y + static_cast<std::int32_t>(slot.getUsedSpace().extent.height),
|
||||
.z = 1
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
mijin::Task<> AtlasedImage::c_blit(const TextureSlot& slot, const Bitmap& bitmap) const noexcept
|
||||
{
|
||||
IWA_CORO_ENSURE_MAIN_THREAD(*getOwner()->getOwner());
|
||||
MIJIN_ASSERT(slot.getUsedSpace().extent.width >= bitmap.getSize().width
|
||||
&& slot.getUsedSpace().extent.height >= bitmap.getSize().height, "Can't upload image, invalid size.");
|
||||
|
||||
const mijin::TaskMutexLock lock = co_await mImageMutex.c_lock();
|
||||
co_await mImage->c_blitFrom(
|
||||
/* bitmap = */ bitmap,
|
||||
/* regions = */ {
|
||||
vk::ImageBlit{
|
||||
.srcSubresource = DEFAULT_SUBRESOURCE_LAYERS,
|
||||
.srcOffsets = std::array{
|
||||
vk::Offset3D{
|
||||
.x = 0, .y = 0, .z = 0
|
||||
},
|
||||
vk::Offset3D{
|
||||
.x = static_cast<std::int32_t>(bitmap.getSize().width),
|
||||
.y = static_cast<std::int32_t>(bitmap.getSize().height),
|
||||
.z = 1
|
||||
}
|
||||
},
|
||||
.dstSubresource = vk::ImageSubresourceLayers{
|
||||
.aspectMask = vk::ImageAspectFlagBits::eColor,
|
||||
.mipLevel = 0,
|
||||
.baseArrayLayer = slot.getLayer(),
|
||||
.layerCount = 1
|
||||
},
|
||||
.dstOffsets = std::array{
|
||||
vk::Offset3D{
|
||||
.x = slot.getUsedSpace().offset.x,
|
||||
.y = slot.getUsedSpace().offset.y,
|
||||
.z = 0
|
||||
},
|
||||
vk::Offset3D{
|
||||
.x = slot.getUsedSpace().offset.x + static_cast<std::int32_t>(slot.getUsedSpace().extent.width),
|
||||
.y = slot.getUsedSpace().offset.y + static_cast<std::int32_t>(slot.getUsedSpace().extent.height),
|
||||
.z = 1
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
mijin::Task<> AtlasedImage::c_copy(const TextureSlot& slot, Image& srcImage) const noexcept
|
||||
{
|
||||
IWA_CORO_ENSURE_MAIN_THREAD(*getOwner()->getOwner());
|
||||
MIJIN_ASSERT(slot.getUsedSpace().extent.width >= srcImage.getSize().width
|
||||
&& slot.getUsedSpace().extent.height >= srcImage.getSize().height
|
||||
&& srcImage.getSize().depth == 1, "Can't upload image, invalid size.");
|
||||
|
||||
const mijin::TaskMutexLock lock = co_await mImageMutex.c_lock();
|
||||
co_await mImage->c_copyFrom(
|
||||
/* srcImage = */ srcImage,
|
||||
/* regions = */ {
|
||||
vk::ImageCopy{
|
||||
.srcSubresource = DEFAULT_SUBRESOURCE_LAYERS,
|
||||
.srcOffset = {
|
||||
.x = 0, .y = 0, .z = 0
|
||||
},
|
||||
.dstSubresource = vk::ImageSubresourceLayers{
|
||||
.aspectMask = vk::ImageAspectFlagBits::eColor,
|
||||
.mipLevel = 0,
|
||||
.baseArrayLayer = slot.getLayer(),
|
||||
.layerCount = 1
|
||||
},
|
||||
.dstOffset = {
|
||||
.x = slot.getUsedSpace().offset.x,
|
||||
.y = slot.getUsedSpace().offset.y,
|
||||
.z = 0
|
||||
},
|
||||
.extent = srcImage.getSize()
|
||||
}
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
ObjectPtr<Image> AtlasedImage::allocateImage(unsigned layers)
|
||||
{
|
||||
ObjectPtr<Image> image = getOwner()->createChild<Image>(ImageCreationArgs{
|
||||
.format = mFormat,
|
||||
.extent = {
|
||||
.width = mAtlas->getLayerSize().width,
|
||||
.height = mAtlas->getLayerSize().height,
|
||||
.depth = 1
|
||||
},
|
||||
.mipLevels = mMipLevels,
|
||||
.arrayLayers = layers,
|
||||
.usage = mUsage
|
||||
});
|
||||
image->allocateMemory();
|
||||
return image;
|
||||
}
|
||||
} // namespace iwa
|
||||
29
source/util/vertex_layout.cpp
Normal file
29
source/util/vertex_layout.cpp
Normal file
@@ -0,0 +1,29 @@
|
||||
|
||||
#include "iwa/util/vertex_layout.hpp"
|
||||
|
||||
namespace iwa
|
||||
{
|
||||
mijin::Optional<VertexAttribute&> VertexLayout::findAttribute(VertexAttributeSemantic semantic, unsigned semanticIdx) noexcept
|
||||
{
|
||||
for (VertexAttribute& attribute : attributes)
|
||||
{
|
||||
if (attribute.semantic == semantic && attribute.semanticIdx == semanticIdx)
|
||||
{
|
||||
return attribute;
|
||||
}
|
||||
}
|
||||
return mijin::NULL_OPTIONAL;
|
||||
}
|
||||
|
||||
mijin::Optional<const VertexAttribute&> VertexLayout::findAttribute(VertexAttributeSemantic semantic, unsigned semanticIdx) const noexcept
|
||||
{
|
||||
for (const VertexAttribute& attribute : attributes)
|
||||
{
|
||||
if (attribute.semantic == semantic && attribute.semanticIdx == semanticIdx)
|
||||
{
|
||||
return attribute;
|
||||
}
|
||||
}
|
||||
return mijin::NULL_OPTIONAL;
|
||||
}
|
||||
} // namespace iwa
|
||||
275
source/util/vkutil.cpp
Normal file
275
source/util/vkutil.cpp
Normal file
@@ -0,0 +1,275 @@
|
||||
|
||||
#include "iwa/util/vkutil.hpp"
|
||||
|
||||
#include "iwa/device.hpp"
|
||||
#include "iwa/log.hpp"
|
||||
|
||||
namespace iwa
|
||||
{
|
||||
unsigned vkFormatSize(vk::Format format) noexcept
|
||||
{
|
||||
switch (format)
|
||||
{
|
||||
// 8 bit integer
|
||||
case vk::Format::eR8Uint:
|
||||
case vk::Format::eR8Sint:
|
||||
case vk::Format::eR8Unorm:
|
||||
case vk::Format::eR8Srgb:
|
||||
return 1;
|
||||
case vk::Format::eR8G8Uint:
|
||||
case vk::Format::eR8G8Sint:
|
||||
case vk::Format::eR8G8Unorm:
|
||||
case vk::Format::eR8G8Srgb:
|
||||
return 2;
|
||||
case vk::Format::eR8G8B8Uint:
|
||||
case vk::Format::eR8G8B8Sint:
|
||||
case vk::Format::eR8G8B8Unorm:
|
||||
case vk::Format::eR8G8B8Srgb:
|
||||
return 3;
|
||||
case vk::Format::eR8G8B8A8Uint:
|
||||
case vk::Format::eR8G8B8A8Sint:
|
||||
case vk::Format::eR8G8B8A8Unorm:
|
||||
case vk::Format::eR8G8B8A8Srgb:
|
||||
return 4;
|
||||
// 16 bit integer
|
||||
case vk::Format::eR16Uint:
|
||||
case vk::Format::eR16Sint:
|
||||
case vk::Format::eR16Unorm:
|
||||
return 2;
|
||||
case vk::Format::eR16G16Uint:
|
||||
case vk::Format::eR16G16Sint:
|
||||
case vk::Format::eR16G16Unorm:
|
||||
return 4;
|
||||
case vk::Format::eR16G16B16Uint:
|
||||
case vk::Format::eR16G16B16Sint:
|
||||
case vk::Format::eR16G16B16Unorm:
|
||||
return 6;
|
||||
case vk::Format::eR16G16B16A16Uint:
|
||||
case vk::Format::eR16G16B16A16Sint:
|
||||
case vk::Format::eR16G16B16A16Unorm:
|
||||
return 8;
|
||||
// 32 bit integer
|
||||
case vk::Format::eR32Uint:
|
||||
case vk::Format::eR32Sint:
|
||||
return 4;
|
||||
case vk::Format::eR32G32Uint:
|
||||
case vk::Format::eR32G32Sint:
|
||||
return 8;
|
||||
case vk::Format::eR32G32B32Uint:
|
||||
case vk::Format::eR32G32B32Sint:
|
||||
return 12;
|
||||
case vk::Format::eR32G32B32A32Uint:
|
||||
case vk::Format::eR32G32B32A32Sint:
|
||||
return 16;
|
||||
// 64 bit integer
|
||||
case vk::Format::eR64Uint:
|
||||
case vk::Format::eR64Sint:
|
||||
return 8;
|
||||
case vk::Format::eR64G64Uint:
|
||||
case vk::Format::eR64G64Sint:
|
||||
return 16;
|
||||
case vk::Format::eR64G64B64Uint:
|
||||
case vk::Format::eR64G64B64Sint:
|
||||
return 24;
|
||||
case vk::Format::eR64G64B64A64Uint:
|
||||
case vk::Format::eR64G64B64A64Sint:
|
||||
return 32;
|
||||
// 16 bit float
|
||||
case vk::Format::eR16Sfloat:
|
||||
return 2;
|
||||
case vk::Format::eR16G16Sfloat:
|
||||
return 4;
|
||||
case vk::Format::eR16G16B16Sfloat:
|
||||
return 6;
|
||||
case vk::Format::eR16G16B16A16Sfloat:
|
||||
return 8;
|
||||
// 32 bit float
|
||||
case vk::Format::eR32Sfloat:
|
||||
return 4;
|
||||
case vk::Format::eR32G32Sfloat:
|
||||
return 8;
|
||||
case vk::Format::eR32G32B32Sfloat:
|
||||
return 12;
|
||||
case vk::Format::eR32G32B32A32Sfloat:
|
||||
return 16;
|
||||
// 64 bit float
|
||||
case vk::Format::eR64Sfloat:
|
||||
return 8;
|
||||
case vk::Format::eR64G64Sfloat:
|
||||
return 16;
|
||||
case vk::Format::eR64G64B64Sfloat:
|
||||
return 24;
|
||||
case vk::Format::eR64G64B64A64Sfloat:
|
||||
return 32;
|
||||
default:
|
||||
logAndDie("I've never seen this format :(");
|
||||
}
|
||||
}
|
||||
|
||||
unsigned vkIndexTypeSize(vk::IndexType indexType) noexcept
|
||||
{
|
||||
switch (indexType)
|
||||
{
|
||||
case vk::IndexType::eNoneKHR:
|
||||
return 0;
|
||||
case vk::IndexType::eUint8EXT:
|
||||
return 1;
|
||||
case vk::IndexType::eUint16:
|
||||
return 2;
|
||||
case vk::IndexType::eUint32:
|
||||
return 4;
|
||||
default:
|
||||
logAndDie("What is this sorcery?");
|
||||
}
|
||||
}
|
||||
|
||||
bool isDepthFormat(vk::Format format) noexcept
|
||||
{
|
||||
for (const vk::Format depthFormat : DEPTH_FORMATS) {
|
||||
if (format == depthFormat) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
bool isStencilFormat(vk::Format format) noexcept
|
||||
{
|
||||
for (const vk::Format stencilFormat : STENCIL_FORMATS) {
|
||||
if (format == stencilFormat) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
#if 0
|
||||
std::string formatVkVariant(const VkVariantMWN& variant)
|
||||
{
|
||||
switch (variant.type)
|
||||
{
|
||||
case VK_VARIANT_TYPE_UNKNOWN_MWN:
|
||||
return "???";
|
||||
case VK_VARIANT_TYPE_NONE_MWN:
|
||||
return "<none>";
|
||||
case VK_VARIANT_TYPE_BOOL_MWN:
|
||||
return variant.uintValue ? "true" : "false";
|
||||
case VK_VARIANT_TYPE_UINT8_MWN:
|
||||
case VK_VARIANT_TYPE_UINT16_MWN:
|
||||
case VK_VARIANT_TYPE_UINT32_MWN:
|
||||
case VK_VARIANT_TYPE_UINT64_MWN:
|
||||
return std::to_string(variant.uintValue);
|
||||
case VK_VARIANT_TYPE_INT8_MWN:
|
||||
case VK_VARIANT_TYPE_INT16_MWN:
|
||||
case VK_VARIANT_TYPE_INT32_MWN:
|
||||
case VK_VARIANT_TYPE_INT64_MWN:
|
||||
return std::to_string(variant.intValue);
|
||||
case VK_VARIANT_TYPE_FLOAT_MWN:
|
||||
case VK_VARIANT_TYPE_DOUBLE_MWN:
|
||||
return std::to_string(variant.doubleValue);
|
||||
case VK_VARIANT_TYPE_STRING_MWN:
|
||||
return fmt::format("\"{}\"", variant.stringValue);
|
||||
case VK_VARIANT_TYPE_VOID_POINTER_MWN:
|
||||
return fmt::format("{}", fmt::ptr(variant.voidPointerValue)); // TODO: this doesnt make sense, store the original pointer!
|
||||
case VK_VARIANT_TYPE_POINTER_MWN:
|
||||
return fmt::format("{}", fmt::ptr(variant.pointerValue)); // TODO: this doesnt make sense, store the original pointer!
|
||||
case VK_VARIANT_TYPE_ARRAY_MWN:
|
||||
return fmt::format("<array of {}>", variant.arrayValue.numElements);
|
||||
case VK_VARIANT_TYPE_IN_STRUCTURE_MWN:
|
||||
return "<in struct>";
|
||||
case VK_VARIANT_TYPE_OUT_STRUCTURE_MWN:
|
||||
return "<out struct>";
|
||||
case VK_VARIANT_TYPE_OBJECT_MWN:
|
||||
return "<handle>";
|
||||
default:
|
||||
assert(0);
|
||||
return "???";
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
#if 0
|
||||
std::size_t calcVkStructHash(const void* structure, std::size_t appendTo)
|
||||
{
|
||||
if (structure == nullptr) {
|
||||
return appendTo;
|
||||
}
|
||||
|
||||
const vk::BaseInStructure* inStruct = static_cast<const vk::BaseInStructure*>(structure);
|
||||
|
||||
std::size_t hash = appendTo;
|
||||
switch (inStruct->sType)
|
||||
{
|
||||
case vk::StructureType::eDescriptorSetLayoutBindingFlagsCreateInfo: {
|
||||
const auto& flagsInfo = *static_cast<const vk::DescriptorSetLayoutBindingFlagsCreateInfo*>(structure);
|
||||
for (std::uint32_t bindingIdx = 0; bindingIdx < flagsInfo.bindingCount; ++bindingIdx) {
|
||||
hash = calcCrcSizeAppend(flagsInfo.pBindingFlags[bindingIdx], hash);
|
||||
}
|
||||
}
|
||||
break;
|
||||
default:
|
||||
assert(false); // missing struct here, bad
|
||||
break;
|
||||
}
|
||||
|
||||
return calcVkStructHash(inStruct->pNext, hash);
|
||||
}
|
||||
#endif
|
||||
|
||||
vk::SampleCountFlagBits samplesToVk(unsigned samples) noexcept
|
||||
{
|
||||
switch (samples)
|
||||
{
|
||||
case 1:
|
||||
return vk::SampleCountFlagBits::e1;
|
||||
case 2:
|
||||
return vk::SampleCountFlagBits::e2;
|
||||
case 4:
|
||||
return vk::SampleCountFlagBits::e4;
|
||||
case 8:
|
||||
return vk::SampleCountFlagBits::e8;
|
||||
case 16:
|
||||
return vk::SampleCountFlagBits::e16;
|
||||
case 32:
|
||||
return vk::SampleCountFlagBits::e32;
|
||||
case 64:
|
||||
return vk::SampleCountFlagBits::e64;
|
||||
default:
|
||||
logAndDie("Invalid sample count: {}.", samples);
|
||||
}
|
||||
}
|
||||
|
||||
vk::Format detectDepthBufferFormat(Device& device, unsigned samples) noexcept
|
||||
{
|
||||
const vk::SampleCountFlagBits sampleCount = samplesToVk(samples);
|
||||
for (const vk::Format depthFormat : DEPTH_FORMATS)
|
||||
{
|
||||
try
|
||||
{
|
||||
const vk::ImageFormatProperties props = device.getVkPhysicalDevice().getImageFormatProperties(depthFormat, vk::ImageType::e2D, vk::ImageTiling::eOptimal, vk::ImageUsageFlagBits::eDepthStencilAttachment);
|
||||
if (props.sampleCounts & sampleCount) {
|
||||
return depthFormat;
|
||||
}
|
||||
}
|
||||
catch(vk::FormatNotSupportedError&)
|
||||
{
|
||||
continue; // not supported
|
||||
}
|
||||
}
|
||||
return vk::Format::eUndefined;
|
||||
}
|
||||
|
||||
std::vector<unsigned> detectSupportedSampleCounts(Device& device) noexcept
|
||||
{
|
||||
std::vector<unsigned> result = {1};
|
||||
|
||||
for (const unsigned samples : {2, 4, 8, 16, 32, 64})
|
||||
{
|
||||
if (detectDepthBufferFormat(device, samples) != vk::Format::eUndefined) {
|
||||
result.push_back(samples);
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
} // namespace iwa
|
||||
Reference in New Issue
Block a user