initial commit

This commit is contained in:
Patrick 2024-04-06 14:11:26 +02:00
commit 1d44ecc0ee
85 changed files with 11573 additions and 0 deletions

60
.gitignore vendored Normal file
View File

@ -0,0 +1,60 @@
# Compile commands
compile_commands.json
# whatever this is
.cache
# Environment setup
/.env
# Prerequisites
*.d
# Compiled Object files
*.slo
*.lo
*.o
*.obj
# Precompiled Headers
*.gch
*.pch
# Compiled Dynamic libraries
*.so
*.dylib
*.dll
# Fortran module files
*.mod
*.smod
# Compiled Static libraries
*.lai
*.la
*.a
*.lib
# Executables
*.exe
*.out
*.app
# Debug Info
*.pdb
# for projects that use SCons for building: http://http://www.scons.org/
.sconsign.dblite
/.sconf_temp
/config.log
# Byte-compiled / optimized python files
__pycache__/
*.py[cod]
# Backup files
*.bak
# Generated files (but not their templates)
*.gen.*
!*.gen.*.jinja

92
LibConf Normal file
View File

@ -0,0 +1,92 @@
Import('env')
# LibFmt
lib_fmt = env.Cook('fmt', git_ref = 'refs/tags/10.0.0')
# magic_enum
lib_magic_enum = env.Cook('magic_enum', git_ref = 'refs/tags/v0.9.3')
# Glslang
lib_glslang = env.Cook('glslang', remote = 'mewin')
# GLM
env.Append(CPPDEFINES = ['GLM_FORCE_DEPTH_ZERO_TO_ONE', 'GLM_FORCE_RADIANS'])
lib_glm = env.Cook('glm', remote = 'mewin')
# Mijin
lib_mijin = env.Cook('mijin')
# SDL
lib_sdl = env.Cook('SDL', git_ref = 'refs/tags/release-2.28.4')
# stb
lib_stb = env.Cook('stb')
# VulkanHeaders
lib_vulkan_headers = env.Cook('VulkanHeaders', remote = 'mewin', git_ref = 'refs/tags/v1.3.271-2')
# Argparse
lib_argparse = env.Cook('argparse', git_ref = 'refs/tags/v3.0')
# yaml-cpp
lib_yaml_cpp = env.Cook('yaml-cpp', git_ref = 'refs/tags/0.8.0')
# Generated files
env.Jinja('public/iwa/util/vertex_attribute_semantic.gen.hpp')
# Iwa
src_files = Split("""
source/addon.cpp
source/buffer.cpp
source/command.cpp
source/descriptor_set.cpp
source/device.cpp
source/device_memory.cpp
source/event.cpp
source/fence.cpp
source/image.cpp
source/input.cpp
source/instance.cpp
source/object.cpp
source/pipeline.cpp
source/render_pass.cpp
source/semaphore.cpp
source/shader_module.cpp
source/swapchain.cpp
source/texture.cpp
source/window.cpp
source/app/vulkan_application.cpp
source/io/bitmap.cpp
source/io/font.cpp
source/io/mesh.cpp
source/resource/bitmap.cpp
source/resource/font.cpp
source/util/glsl_compiler.cpp
source/util/growing_descriptor_pool.cpp
source/util/image_reference.cpp
source/util/reflect_glsl.cpp
source/util/render_loop.cpp
source/util/shader_meta.cpp
source/util/texture_atlas.cpp
source/util/vertex_layout.cpp
source/util/vkutil.cpp
""")
lib_iwa = env.UnityStaticLibrary(
target = env['LIB_DIR'] + '/iwa',
source = src_files,
dependencies = [lib_fmt, lib_glslang, lib_glm, lib_magic_enum, lib_mijin, lib_sdl, lib_stb, lib_vulkan_headers,
lib_argparse, lib_yaml_cpp]
)
LIB_CONFIG = {
'CPPPATH': [env.Dir('include')],
'DEPENDENCIES': [lib_iwa]
}
Return('LIB_CONFIG')

29
include/iwa/addon.hpp Normal file
View File

@ -0,0 +1,29 @@
#pragma once
#if !defined(IWA_ADDON_HPP_INCLUDED)
#define IWA_ADDON_HPP_INCLUDED
#include <functional>
#include <span>
#include "iwa/instance.hpp"
namespace iwa
{
struct AddonInitArgs
{
ObjectPtr<Instance> instance;
InstanceCreationArgs& instanceCreationArgs;
};
class Addon
{
public:
Addon();
virtual ~Addon() = default;
virtual void init(const AddonInitArgs& args) = 0;
virtual void cleanup() = 0;
};
[[nodiscard]] std::span<Addon* const> getAddons() noexcept;
} // namespace iwa
#endif // !defined(IWA_ADDON_HPP_INCLUDED)

View File

@ -0,0 +1,62 @@
#pragma once
#if !defined(IWA_ADDONS_IMGUI_ADDON_HPP_INCLUDED)
#define IWA_ADDONS_IMGUI_ADDON_HPP_INCLUDED
#include <memory>
#include <type_traits>
#include <vector>
#include "iwa/addon.hpp"
#include "iwa/descriptor_set.hpp"
#include "iwa/swapchain.hpp"
#include "iwa/window.hpp"
#include "iwa/addons/imgui/widget.hpp"
namespace iwa
{
struct ImguiCreateResourcesArgs
{
Swapchain& swapchain;
vk::Format format;
};
class ImGuiAddon : public Addon
{
private:
ObjectPtr<DescriptorPool> mDescriptorPool;
std::vector<std::unique_ptr<ImGuiWidget>> mWidgets;
public:
void init(const AddonInitArgs& args) override;
void cleanup() override;
mijin::Task<> c_createResources(const ImguiCreateResourcesArgs& args);
void renderFrame(vk::CommandBuffer cmdBuffer);
template<typename TWidget> requires (std::is_base_of_v<ImGuiWidget, TWidget>)
TWidget* addWidget(std::unique_ptr<TWidget>&& widget)
{
TWidget* raw = widget.get();
mWidgets.push_back(std::move(widget));
return raw;
}
template<typename TWidget, typename... TArgs> requires (std::is_base_of_v<ImGuiWidget, TWidget>)
TWidget* emplaceWidget(TArgs&&... args)
{
return addWidget(std::make_unique<TWidget>(std::forward<TArgs>(args)...));
}
void removeWidget(ImGuiWidget* widget);
private:
void beginFrame() noexcept;
void handleKeyChanged(const KeyEvent& event);
void handleMouseButtonChanged(const MouseButtonEvent& event);
void handleMouseMoved(const MouseMoveEvent& event);
void handleMouseScrolled(const MouseWheelEvent& event);
void handleTextEntered(const TextInputEvent& event);
public:
static ImGuiAddon& get() noexcept;
};
} // namespace
#endif // !defined(IWA_ADDONS_IMGUI_ADDON_HPP_INCLUDED)

View File

@ -0,0 +1,22 @@
#pragma once
#if !defined(IWA_ADDONS_IMGUI_FPS_WIDGET_HPP_INCLUDED)
#define IWA_ADDONS_IMGUI_FPS_WIDGET_HPP_INCLUDED
#include <chrono>
#include "iwa/addons/imgui/widget.hpp"
#include "iwa/util/fps_calculator.hpp"
namespace iwa
{
class ImGuiFpsWidget : public ImGuiWidget
{
private:
FpsCalculator<100> mFpsCalculator;
public:
void draw() override;
};
} // namespace iwa
#endif // !defined(IWA_ADDONS_IMGUI_FPS_WIDGET_HPP_INCLUDED)

View File

@ -0,0 +1,34 @@
#pragma once
#if !defined(IWA_ADDONS_IMGUI_MISC_WIDGETS_HPP_INCLUDED)
#define IWA_ADDONS_IMGUI_MISC_WIDGETS_HPP_INCLUDED
#include <imgui.h>
#include <magic_enum.hpp>
namespace iwa
{
template<typename TEnum>
bool MagicCombo(const char* label, TEnum& currentValue)
{
const std::string currentValueName(magic_enum::enum_name(currentValue));
bool result = false;
if (ImGui::BeginCombo(label, currentValueName.c_str()))
{
for (TEnum value : magic_enum::enum_values<TEnum>())
{
const std::string valueName(magic_enum::enum_name(value));
if (ImGui::Selectable(valueName.c_str())) {
currentValue = value;
result = true;
}
}
ImGui::EndCombo();
}
return result;
}
} // namespace iwa
#endif // !defined(IWA_ADDONS_IMGUI_MISC_WIDGETS_HPP_INCLUDED)

View File

@ -0,0 +1,20 @@
#pragma once
#if !defined(IWA_ADDONS_IMGUI_WIDGET_HPP_INCLUDED)
#define IWA_ADDONS_IMGUI_WIDGET_HPP_INCLUDED
#include "iwa/object.hpp"
namespace iwa
{
class ImGuiWidget
{
public:
virtual ~ImGuiWidget() = default;
virtual void draw() = 0;
};
} // namespace iwa
#endif // !defined(IWA_ADDONS_IMGUI_WIDGET_HPP_INCLUDED)

View File

@ -0,0 +1,45 @@
#pragma once
#if !defined(IWA_APP_VULLKAN_APPLICATION_HPP_INCLUDED)
#define IWA_APP_VULLKAN_APPLICATION_HPP_INCLUDED
#include <mijin/util/bitflags.hpp>
#include "iwa/device.hpp"
#include "iwa/instance.hpp"
#include "iwa/swapchain.hpp"
namespace iwa
{
struct ApplicationCreationFlags
{
std::uint8_t skipDefaultArgs : 1 = 0;
};
struct ApplicationCreationArgs
{
ApplicationCreationFlags flags;
InstanceCreationArgs instanceArgs;
DeviceCreationArgs deviceArgs;
WindowCreationArgs mainWindowArgs;
SwapchainCreationArgs mainWindowSwapchainArgs;
fs::path assetPath = fs::current_path() / "assets";
};
class VulkanApplication : public Object<VulkanApplication>
{
protected:
ObjectPtr<Instance> mInstance;
ObjectPtr<Device> mDevice;
ObjectPtr<Window> mMainWindow;
ObjectPtr<Swapchain> mMainWindowSwapchain;
protected:
explicit VulkanApplication(const ApplicationCreationArgs& args, ObjectPtr<> owner = nullptr);
public:
virtual mijin::Task<> c_init() = 0;
[[nodiscard]] int execute(int argc, char** argv); // NOLINT
};
} // namespace iwa
#endif // !defined(IWA_APP_VULLKAN_APPLICATION_HPP_INCLUDED)

152
include/iwa/buffer.hpp Normal file
View File

@ -0,0 +1,152 @@
#pragma once
#if !defined(IWA_BUFFER_HPP_INCLUDED)
#define IWA_BUFFER_HPP_INCLUDED
#include <ranges>
#include <mijin/async/coroutine.hpp>
#include <mijin/container/stride_span.hpp>
#include <mijin/container/typeless_buffer.hpp>
#include <mijin/util/flag.hpp>
#include "iwa/device_memory.hpp"
#include "iwa/util/vkutil.hpp"
namespace iwa
{
MIJIN_DEFINE_FLAG(HostVisible);
MIJIN_DEFINE_FLAG(HostCoherent);
struct BufferCreationArgs
{
vk::BufferCreateFlags flags;
vk::DeviceSize size;
vk::BufferUsageFlags usage;
vk::SharingMode sharingMode = vk::SharingMode::eExclusive;
std::vector<std::uint32_t> queueFamilyIndices;
};
class Buffer : public Object<Buffer, BaseObject, class Device>, public MixinVulkanObject<vk::Buffer>
{
public:
ObjectPtr<DeviceMemory> mMemory;
vk::DeviceSize mBytesSize;
public:
Buffer(ObjectPtr<class Device> owner, const BufferCreationArgs& args);
~Buffer() noexcept override;
[[nodiscard]] const ObjectPtr<DeviceMemory>& getMemory() const noexcept { return mMemory; }
void allocateMemory(HostVisible hostVisible = HostVisible::NO, HostCoherent hostCoherent = HostCoherent::YES); // the latter is ignored if the first one is NO
void bindMemory(ObjectPtr<DeviceMemory> memory, vk::DeviceSize offset = 0);
mijin::Task<> c_fill(std::uint32_t data, std::size_t bytes = VK_WHOLE_SIZE, std::size_t byteOffset = 0);
mijin::Task<> c_copyFrom(vk::Buffer srcBuffer, vk::BufferCopy region);
mijin::Task<> c_upload(const void* data, std::size_t bytes, std::size_t byteOffset = 0);
mijin::Task<> c_upload(const mijin::TypelessBuffer& data, std::size_t byteOffset = 0);
template<std::ranges::contiguous_range TRange>
mijin::Task<> c_upload(const TRange& range, std::size_t byteOffset = 0)
{
return c_upload(std::ranges::cdata(range), std::ranges::size(range) * sizeof(std::ranges::range_value_t<TRange>), byteOffset);
}
};
template<typename T>
struct TypedBufferCreationArgs
{
vk::BufferCreateFlags flags;
vk::BufferUsageFlags usage;
unsigned arraySize = 1;
vk::DeviceSize stride = sizeof(T);
vk::SharingMode sharingMode = vk::SharingMode::eExclusive;
std::vector<std::uint32_t> queueFamilyIndices;
};
template<typename Type>
class TypedBuffer : public Object<TypedBuffer<Type>, Buffer>
{
public:
using super_t = typename Object<TypedBuffer<Type>, Buffer>::super_t;
protected:
unsigned mArraySize = 0;
vk::DeviceSize mStride = 0;
public:
TypedBuffer(ObjectPtr<class Device> owner, const TypedBufferCreationArgs<Type>& args)
: super_t(std::move(owner), BufferCreationArgs{
.flags = args.flags,
.size = sizeof(Type) + (args.arraySize - 1) * args.stride,
.usage = args.usage,
.sharingMode = args.sharingMode,
.queueFamilyIndices = args.queueFamilyIndices
}), mArraySize(args.arraySize), mStride(args.stride) {}
[[nodiscard]] unsigned getArraySize() const noexcept { return mArraySize; }
[[nodiscard]] vk::DeviceSize getStride() const noexcept { return mStride; }
[[nodiscard]] mijin::StrideSpan<Type> mapToSpan() const;
};
template<typename T>
struct TypedUniformBufferCreationArgs
{
vk::BufferCreateFlags flags;
vk::BufferUsageFlags usage;
unsigned arraySize = 1;
vk::SharingMode sharingMode = vk::SharingMode::eExclusive;
std::vector<std::uint32_t> queueFamilyIndices;
};
template<typename Type>
class TypedUniformBuffer : public Object<TypedUniformBuffer<Type>, TypedBuffer<Type>>
{
public:
using super_t = typename Object<TypedUniformBuffer<Type>, TypedBuffer<Type>>::super_t;
public:
TypedUniformBuffer(ObjectPtr<class Device> owner, const TypedUniformBufferCreationArgs<Type>& args)
: super_t(owner, TypedBufferCreationArgs<Type>{
.flags = args.flags,
.usage = args.usage | vk::BufferUsageFlagBits::eUniformBuffer,
.arraySize = args.arraySize,
.stride = calcVkUniformStride<Type>(*owner),
.sharingMode = args.sharingMode,
.queueFamilyIndices = args.queueFamilyIndices
}) {}
};
template<typename Type>
using TypedStorageBufferCreationArgs = TypedUniformBufferCreationArgs<Type>;
template<typename Type>
class TypedStorageBuffer : public Object<TypedStorageBuffer<Type>, TypedBuffer<Type>>
{
public:
using super_t = typename Object<TypedStorageBuffer<Type>, TypedBuffer<Type>>::super_t;
public:
TypedStorageBuffer(ObjectPtr<class Device> owner, const TypedUniformBufferCreationArgs<Type>& args)
: super_t(owner, TypedBufferCreationArgs<Type>{
.flags = args.flags,
.usage = args.usage | vk::BufferUsageFlagBits::eStorageBuffer,
.arraySize = args.arraySize,
.stride = calcVkStorageBufferStride<Type>(*owner),
.sharingMode = args.sharingMode,
.queueFamilyIndices = args.queueFamilyIndices
}) {}
};
template<typename Type>
mijin::StrideSpan<Type> TypedBuffer<Type>::mapToSpan() const
{
std::byte* rawMemory = static_cast<std::byte*>(this->getOwner()->getVkHandle().mapMemory(
/* memory = */ *this->getMemory(),
/* offset = */ 0,
/* size = */ VK_WHOLE_SIZE
));
return mijin::StrideSpan<Type>(
std::bit_cast<Type*>(rawMemory),
std::bit_cast<Type*>(rawMemory + mArraySize * mStride),
mStride
);
}
} // namespace iwa
#endif // !defined(IWA_BUFFER_HPP_INCLUDED)

41
include/iwa/command.hpp Normal file
View File

@ -0,0 +1,41 @@
#pragma once
#if !defined(IWA_COMMAND_HPP_INCLUDED)
#define IWA_COMMAND_HPP_INCLUDED
#include "iwa/object.hpp"
#include "iwa/vkwrapper.hpp"
namespace iwa
{
struct CommandPoolCreationArgs
{
vk::CommandPoolCreateFlags flags;
std::uint32_t queueFamilyIndex;
};
struct CommandBufferAllocateArgs
{
vk::CommandBufferLevel level = vk::CommandBufferLevel::ePrimary;
};
class CommandPool : public Object<CommandPool, BaseObject, class Device>, public MixinVulkanObject<vk::CommandPool>
{
public:
CommandPool(ObjectPtr<class Device> owner, CommandPoolCreationArgs args);
~CommandPool() noexcept override;
[[nodiscard]] ObjectPtr<class CommandBuffer> allocateCommandBuffer(const CommandBufferAllocateArgs& args = {});
// [[nodiscard]] std::vector<vk::CommandBuffer> allocateCommandBuffers(std::size_t count, const CommandBufferAllocateArgs& args = {}) const noexcept;
};
class CommandBuffer : public Object<CommandBuffer, BaseObject, CommandPool>, public MixinVulkanObject<vk::CommandBuffer>
{
public:
CommandBuffer(ObjectPtr<CommandPool> owner, vk::CommandBuffer handle);
~CommandBuffer() noexcept override;
};
} // namespace iwa
#endif // !defined(IWA_COMMAND_HPP_INCLUDED)

View File

@ -0,0 +1,61 @@
#pragma once
#if !defined(IWA_DESCRIPTOR_SET_HPP_INCLUDED)
#define IWA_DESCRIPTOR_SET_HPP_INCLUDED
#include <vector>
#include "iwa/object.hpp"
#include "iwa/vkwrapper.hpp"
namespace iwa
{
struct DescriptorSetLayoutCreationArgs
{
std::vector<vk::DescriptorSetLayoutBinding> bindings;
std::vector<vk::DescriptorBindingFlags> bindingFlags;
vk::DescriptorSetLayoutCreateFlags flags = {};
};
class DescriptorSetLayout : public Object<DescriptorSetLayout, BaseObject, class Device>, public MixinVulkanObject<vk::DescriptorSetLayout>
{
public:
DescriptorSetLayout(ObjectPtr<class Device> owner, const DescriptorSetLayoutCreationArgs& args);
~DescriptorSetLayout() noexcept override;
};
struct DescriptorPoolCreationArgs
{
vk::DescriptorPoolCreateFlags flags = {};
unsigned maxSets = 0;
std::vector<vk::DescriptorPoolSize> poolSizes;
};
struct DescriptorSetAllocateArgs
{
ObjectPtr<DescriptorSetLayout> layout;
std::uint32_t variableDescriptorCount = 0;
};
class DescriptorPool : public Object<DescriptorPool, BaseObject, class Device>, public MixinVulkanObject<vk::DescriptorPool>
{
private:
bool mCanFree = false;
public:
DescriptorPool(ObjectPtr<class Device> owner, const DescriptorPoolCreationArgs& args);
~DescriptorPool() noexcept override;
[[nodiscard]] ObjectPtr<class DescriptorSet> allocateDescriptorSet(const DescriptorSetAllocateArgs& args);
[[nodiscard]] bool getCanFree() const noexcept { return mCanFree; }
};
class DescriptorSet : public Object<DescriptorSet, BaseObject, DescriptorPool>, public MixinVulkanObject<vk::DescriptorSet>
{
public:
DescriptorSet(ObjectPtr<DescriptorPool> owner, vk::DescriptorSet handle);
~DescriptorSet() noexcept override;
};
} // namespace iwa
#endif // !defined(IWA_DESCRIPTOR_SET_HPP_INCLUDED)

164
include/iwa/device.hpp Normal file
View File

@ -0,0 +1,164 @@
#pragma once
#ifndef IWA_VULKAN_DEVICE_HPP_INCLUDED
#define IWA_VULKAN_DEVICE_HPP_INCLUDED
#include <limits>
#include <shared_mutex>
#include <mijin/util/bitflags.hpp>
#include <mijin/util/flag.hpp>
#include "iwa/command.hpp"
#include "iwa/device_memory.hpp"
#include "iwa/fence.hpp"
#include "iwa/vkwrapper.hpp"
#include "iwa/util/task_runner.hpp"
#define IWA_DELETE_DEVICE_OBJECT(device, var, deleter) \
if (var) \
{ \
(device)->queueDelete([handle=(var), dev=(device)->getVkHandle()]() \
{ \
dev.deleter(handle); \
}); \
}
namespace iwa
{
struct ExtensionInfo // TODO: where to put this?
{
const char* name;
union
{
// same thing, one for initialisation, one for runtime
bool required;
bool enabled;
};
};
using LayerInfo = ExtensionInfo;
struct RendererFeatures : mijin::BitFlags<RendererFeatures>
{
bool rayTracing : 1 = false;
bool meshShaders : 1 = false;
};
struct PhysicalDeviceInfo
{
vk::PhysicalDevice device;
vk::PhysicalDeviceProperties properties;
vk::SurfaceCapabilitiesKHR surfaceCapabilities;
vk::PhysicalDeviceFeatures features;
vk::PhysicalDeviceVulkan11Features vulkan11Features;
vk::PhysicalDeviceVulkan12Features vulkan12Features;
vk::PhysicalDeviceVulkan13Features vulkan13Features;
vk::PhysicalDeviceAccelerationStructureFeaturesKHR accelerationStructureFeatures;
vk::PhysicalDeviceRayTracingPipelineFeaturesKHR rayTracingPipelineFeatures;
vk::PhysicalDeviceMeshShaderFeaturesEXT meshShaderFeatures;
vk::PhysicalDeviceMemoryProperties memoryProperties;
vk::PhysicalDeviceRayTracingPipelinePropertiesKHR rayTracingProperties;
std::vector<vk::ExtensionProperties> extensions;
std::uint32_t graphicsQueueFamily = std::numeric_limits<std::uint32_t>::max();
std::uint32_t computeQueueFamily = std::numeric_limits<std::uint32_t>::max();
RendererFeatures availableFeatures = {};
};
struct PhysicalDeviceCriteria
{
template<typename T>
using feature_flag_t = vk::Bool32 T::*;
std::vector<feature_flag_t<vk::PhysicalDeviceFeatures>> requiredFeatures;
std::vector<feature_flag_t<vk::PhysicalDeviceVulkan11Features>> requiredVulkan11Features;
std::vector<feature_flag_t<vk::PhysicalDeviceVulkan12Features>> requiredVulkan12Features;
std::vector<feature_flag_t<vk::PhysicalDeviceVulkan13Features>> requiredVulkan13Features;
std::vector<feature_flag_t<vk::PhysicalDeviceAccelerationStructureFeaturesKHR>> requiredAccelerationStructureFeatures;
std::vector<feature_flag_t<vk::PhysicalDeviceDescriptorIndexingFeatures>> requiredDescriptorIndexingFeatures;
std::vector<feature_flag_t<vk::PhysicalDeviceRayTracingPipelineFeaturesKHR>> requiredRayTracingPipelineFeatures;
std::vector<feature_flag_t<vk::PhysicalDeviceMeshShaderFeaturesEXT>> requredMeshShaderFeatures;
};
struct DeviceCreationFlags : mijin::BitFlags<DeviceCreationFlags>
{
/** Optimize Vulkan API calls for this device. Set to NO if you plan to create multiple devices. */
std::uint8_t singleDevice : 1 = 1;
std::uint8_t noDefaultExtensions : 1 = 0;
};
struct DeviceCreationArgs
{
PhysicalDeviceCriteria physicalDeviceCriteria;
std::vector<ExtensionInfo> extensions;
DeviceCreationFlags flags;
};
class ScratchCommandPool
{
public:
struct Buffer
{
ObjectPtr<CommandBuffer> cmdBuffer;
mijin::FuturePtr<void> doneFuture;
};
private:
ObjectPtr<CommandPool> mCommandPool;
std::vector<Buffer> mBuffers;
public:
explicit ScratchCommandPool(Device& device);
ScratchCommandPool(const ScratchCommandPool&) = default;
ScratchCommandPool(ScratchCommandPool&&) = default;
ScratchCommandPool& operator=(const ScratchCommandPool&) = default;
ScratchCommandPool& operator=(ScratchCommandPool&&) = default;
[[nodiscard]] ObjectPtr<CommandBuffer> allocateCommandBuffer();
[[nodiscard]] mijin::FuturePtr<void> getFuture(const ObjectPtr<CommandBuffer>& cmdBuffer) noexcept;
};
class Device : public Object<Device, BaseObject, class Instance>, public MixinVulkanObject<vk::Device>
{
private:
struct PendingCommandBuffer
{
ObjectPtr<CommandBuffer> cmdBuffer;
ObjectPtr<Fence> doneFence;
mijin::FuturePtr<void> future;
};
struct PendingSubmit
{
vk::Queue queue;
ObjectPtr<CommandBuffer> cmdBuffer;
mijin::FuturePtr<void> future;
};
vk::Queue mGraphicsQueue;
vk::Queue mComputeQueue;
std::vector<ExtensionInfo> mExtensions;
const PhysicalDeviceInfo* mDeviceInfo;
std::unordered_map<std::thread::id, ScratchCommandPool> mScratchCommandPools;
std::shared_mutex mScratchCommandPoolsMutex;
std::vector<PendingCommandBuffer> mPendingScratchCmdBuffers;
std::mutex mPendingScratchCmdBuffersMutex;
mijin::TaskHandle mUpdateLoopHandle;
mijin::MessageQueue<PendingSubmit> pendingSubmits;
public:
Device(ObjectPtr<class Instance> owner, DeviceCreationArgs args);
~Device() noexcept override;
[[nodiscard]] vk::Queue getGraphicsQueue() const noexcept { return mGraphicsQueue; }
[[nodiscard]] vk::Queue getComputeQueue() const noexcept { return mComputeQueue; }
[[nodiscard]] vk::PhysicalDevice getVkPhysicalDevice() const noexcept { return mDeviceInfo->device; }
[[nodiscard]] const PhysicalDeviceInfo& getDeviceInfo() const noexcept { return *mDeviceInfo; }
[[nodiscard]] ObjectPtr<DeviceMemory> allocateDeviceMemory(const DeviceMemoryAllocationArgs& args);
[[nodiscard]] ObjectPtr<CommandBuffer> beginScratchCommandBuffer();
mijin::FuturePtr<void> endScratchCommandBuffer(ObjectPtr<CommandBuffer> cmdBuffer);
void queueDelete(std::function<void()> deleter) noexcept;
private:
mijin::Task<> c_updateLoop() noexcept;
};
}
#endif // IWA_VULKAN_DEVICE_HPP_INCLUDED

View File

@ -0,0 +1,29 @@
#pragma once
#if !defined(IWA_DEVICE_MEMORY_HPP_INCLUDED)
#define IWA_DEVICE_MEMORY_HPP_INCLUDED
#include <optional>
#include "iwa/object.hpp"
#include "iwa/vkwrapper.hpp"
namespace iwa
{
struct DeviceMemoryAllocationArgs
{
vk::DeviceSize allocationSize;
std::uint32_t memoryTypeIndex;
};
class DeviceMemory : public Object<DeviceMemory, BaseObject, class Device>, public MixinVulkanObject<vk::DeviceMemory>
{
public:
DeviceMemory(ObjectPtr<class Device> owner, const DeviceMemoryAllocationArgs& args);
~DeviceMemory() noexcept override;
};
[[nodiscard]] std::optional<std::uint32_t> findMemoryType(class Device& device, const vk::MemoryRequirements& requirements, vk::MemoryPropertyFlags properties);
} // namespace iwa
#endif // !defined(IWA_DEVICE_MEMORY_HPP_INCLUDED)

26
include/iwa/event.hpp Normal file
View File

@ -0,0 +1,26 @@
#if !defined(IWA_EVENT_HPP_INCLUDED)
#define IWA_EVENT_HPP_INCLUDED
#include <mijin/async/coroutine.hpp>
#include "iwa/object.hpp"
#include "iwa/vkwrapper.hpp"
namespace iwa
{
struct EventCreationArgs
{
vk::EventCreateFlags flags;
};
class Event : public Object<Event, BaseObject, class Device>, public MixinVulkanObject<vk::Event>
{
public:
explicit Event(ObjectPtr<class Device> owner, const EventCreationArgs& args = {});
~Event() noexcept override;
mijin::Task<> c_wait();
};
}
#endif // !defined(IWA_EVENT_HPP_INCLUDED)

30
include/iwa/fence.hpp Normal file
View File

@ -0,0 +1,30 @@
#pragma once
#if !defined(IWA_FENCE_HPP_INCLUDED)
#define IWA_FENCE_HPP_INCLUDED
#include <mijin/async/coroutine.hpp>
#include "iwa/object.hpp"
#include "iwa/vkwrapper.hpp"
namespace iwa
{
struct FenceCreationArgs
{
vk::FenceCreateFlags flags;
};
class Fence : public Object<Fence, BaseObject, class Device>, public MixinVulkanObject<vk::Fence>
{
public:
explicit Fence(ObjectPtr<class Device> owner, const FenceCreationArgs& args = {});
~Fence() noexcept override;
[[nodiscard]] bool isDone() const;
[[nodiscard]] mijin::Task<> c_wait() const;
void reset() const;
};
} // namespace iwa
#endif // !defined(IWA_FENCE_HPP_INCLUDED)

270
include/iwa/image.hpp Normal file
View File

@ -0,0 +1,270 @@
#pragma once
#ifndef IWA_IMAGE_HPP_INCLUDED
#define IWA_IMAGE_HPP_INCLUDED
#include <mijin/async/coroutine.hpp>
#include <mijin/util/bitflags.hpp>
#include <mijin/util/flag.hpp>
#include "iwa/device_memory.hpp"
#include "iwa/object.hpp"
#include "iwa/vkwrapper.hpp"
#include "iwa/util/vkutil.hpp"
namespace iwa
{
inline constexpr std::uint32_t MAX_MIP_LEVELS = std::numeric_limits<std::uint32_t>::max();
static constexpr vk::ComponentMapping DEFAULT_COMPONENT_MAPPING = {
.r = vk::ComponentSwizzle::eIdentity,
.g = vk::ComponentSwizzle::eIdentity,
.b = vk::ComponentSwizzle::eIdentity,
.a = vk::ComponentSwizzle::eIdentity
};
static constexpr vk::ImageSubresourceRange DEFAULT_SUBRESOURCE_RANGE = {
.aspectMask = vk::ImageAspectFlagBits::eColor,
.baseMipLevel = 0,
.levelCount = VK_REMAINING_MIP_LEVELS,
.baseArrayLayer = 0,
.layerCount = VK_REMAINING_ARRAY_LAYERS
};
static constexpr vk::ImageSubresourceRange DEFAULT_DEPTH_SUBRESOURCE_RANGE = {
.aspectMask = vk::ImageAspectFlagBits::eDepth,
.baseMipLevel = 0,
.levelCount = VK_REMAINING_MIP_LEVELS,
.baseArrayLayer = 0,
.layerCount = VK_REMAINING_ARRAY_LAYERS
};
static constexpr vk::ImageSubresourceLayers DEFAULT_SUBRESOURCE_LAYERS = {
.aspectMask = vk::ImageAspectFlagBits::eColor,
.mipLevel = 0,
.baseArrayLayer = 0,
.layerCount = 1
};
struct ImageTransition
{
vk::PipelineStageFlags stages;
vk::ImageLayout layout;
vk::AccessFlags access;
vk::ImageSubresourceRange subResourceRange = {
.aspectMask = vk::ImageAspectFlagBits::eColor,
.baseMipLevel = 0,
.levelCount = VK_REMAINING_MIP_LEVELS,
.baseArrayLayer = 0,
.layerCount = VK_REMAINING_ARRAY_LAYERS
};
};
static constexpr ImageTransition IMAGE_TRANSITION_FRAGMENT_READ = {
.stages = vk::PipelineStageFlagBits::eFragmentShader,
.layout = vk::ImageLayout::eShaderReadOnlyOptimal,
.access = vk::AccessFlagBits::eShaderRead
};
static constexpr ImageTransition IMAGE_TRANSITION_COMPUTE_WRITE = {
.stages = vk::PipelineStageFlagBits::eComputeShader,
.layout = vk::ImageLayout::eGeneral,
.access = vk::AccessFlagBits::eShaderWrite
};
static constexpr ImageTransition IMAGE_TRANSITION_COMPUTE_READ = {
.stages = vk::PipelineStageFlagBits::eComputeShader,
.layout = vk::ImageLayout::eGeneral,
.access = vk::AccessFlagBits::eShaderRead
};
static constexpr ImageTransition IMAGE_TRANSITION_COLOR_ATTACHMENT = {
.stages = vk::PipelineStageFlagBits::eColorAttachmentOutput,
.layout = vk::ImageLayout::eColorAttachmentOptimal,
.access = vk::AccessFlagBits::eColorAttachmentWrite | vk::AccessFlagBits::eColorAttachmentRead
};
static constexpr ImageTransition IMAGE_TRANSITION_TRANSFER_READ = {
.stages = vk::PipelineStageFlagBits::eTransfer,
.layout = vk::ImageLayout::eTransferSrcOptimal,
.access = vk::AccessFlagBits::eTransferRead
};
static constexpr ImageTransition IMAGE_TRANSITION_TRANSFER_WRITE = {
.stages = vk::PipelineStageFlagBits::eTransfer,
.layout = vk::ImageLayout::eTransferDstOptimal,
.access = vk::AccessFlagBits::eTransferWrite
};
struct ImageCreationArgs
{
vk::ImageCreateFlags flags = {};
vk::ImageType imageType = vk::ImageType::e2D;
vk::Format format = vk::Format::eR8G8B8A8Unorm;
vk::Extent3D extent = {};
uint32_t mipLevels = 1;
uint32_t arrayLayers = 1;
vk::SampleCountFlagBits samples = vk::SampleCountFlagBits::e1;
vk::ImageTiling tiling = vk::ImageTiling::eOptimal;
vk::ImageUsageFlags usage = {};
vk::SharingMode sharingMode = vk::SharingMode::eExclusive;
std::vector<std::uint32_t> queueFamilyIndices;
vk::ImageLayout initialLayout = vk::ImageLayout::eUndefined;
};
struct ImageWrapArgs
{
vk::Image handle;
vk::ImageType type;
vk::Format format;
vk::ImageUsageFlags usage;
vk::Extent3D size;
unsigned mipLevels = 1;
};
struct ImageFromBitmapArgs
{
const class Bitmap* bitmap;
vk::ImageCreateFlags flags = {};
vk::ImageTiling tiling = vk::ImageTiling::eOptimal;
vk::ImageUsageFlags usage = {};
vk::SharingMode sharingMode = vk::SharingMode::eExclusive;
std::vector<std::uint32_t> queueFamilyIndices;
vk::ImageLayout initialLayout = vk::ImageLayout::eUndefined;
};
struct ImageViewCreationArgs
{
vk::ImageViewCreateFlags flags;
vk::ImageViewType viewType = vk::ImageViewType::e2D;
vk::Format format = vk::Format::eUndefined;
vk::ComponentMapping components = DEFAULT_COMPONENT_MAPPING;
vk::ImageSubresourceRange subresourceRange = DEFAULT_SUBRESOURCE_RANGE;
};
MIJIN_DEFINE_FLAG(ResetLayout);
class Image : public Object<Image, BaseObject, class Device>, public MixinVulkanObject<vk::Image>
{
private:
vk::ImageCreateFlags mFlags;
vk::ImageType mType;
vk::Format mFormat;
vk::ImageTiling mTiling;
vk::ImageUsageFlags mUsage;
vk::Extent3D mSize;
unsigned mArrayLayers;
unsigned mMipLevels;
bool mWrapped = false;
ObjectPtr<DeviceMemory> mMemory;
vk::ImageLayout currentLayout = vk::ImageLayout::eUndefined;
vk::PipelineStageFlags lastUsageStages = vk::PipelineStageFlagBits::eTopOfPipe;
vk::AccessFlags lastAccess = {};
public:
Image(ObjectPtr<class Device> owner, ImageCreationArgs args);
Image(ObjectPtr<class Device> owner, ImageWrapArgs args);
~Image() noexcept override;
[[nodiscard]] vk::ImageType getType() const noexcept { return mType; }
[[nodiscard]] vk::Format getFormat() const noexcept { return mFormat; }
[[nodiscard]] vk::ImageUsageFlags getUsage() const noexcept { return mUsage; }
[[nodiscard]] const vk::Extent3D& getSize() const noexcept { return mSize; }
[[nodiscard]] unsigned getArrayLayers() const noexcept { return mArrayLayers; }
[[nodiscard]] unsigned getMipLevels() const noexcept { return mMipLevels; }
void allocateMemory();
void bindMemory(ObjectPtr<DeviceMemory> memory, vk::DeviceSize offset = 0);
void resetUsage(ResetLayout resetLayout = ResetLayout::NO) noexcept;
void applyTransition(vk::CommandBuffer cmdBuffer, const ImageTransition& transition);
[[nodiscard]] ObjectPtr<class ImageView> createImageView(const ImageViewCreationArgs& args = {});
mijin::Task<> c_doTransition(const ImageTransition& transition);
mijin::Task<> c_upload(const void* data, std::size_t bytes, vk::Extent3D bufferImageSize, vk::Offset3D imageOffset, unsigned baseLayer = 0, unsigned layerCount = 1);
mijin::Task<> c_upload(const class Bitmap& bitmap, vk::Offset3D imageOffset = {}, unsigned baseLayer = 0, unsigned layerCount = 1);
mijin::Task<> c_blitFrom(Image& srcImage, std::vector<vk::ImageBlit> regions, vk::Filter filter = vk::Filter::eNearest);
mijin::Task<> c_blitFrom(const class Bitmap& bitmap, std::vector<vk::ImageBlit> regions, vk::Filter filter = vk::Filter::eNearest);
mijin::Task<> c_copyFrom(Image& srcImage, std::vector<vk::ImageCopy> regions);
private:
[[nodiscard]] std::uint32_t clampMipLevels(std::uint32_t levels) const;
void generateMipMaps(vk::CommandBuffer cmdBuffer);
public:
static mijin::Task<ObjectPtr<Image>> c_create(ObjectPtr<class Device> owner, ImageFromBitmapArgs args);
};
class ImageView : public Object<ImageView, BaseObject, Image>, public MixinVulkanObject<vk::ImageView>
{
public:
ImageView(ObjectPtr<Image> owner, const ImageViewCreationArgs& args = {});
~ImageView() noexcept override;
};
struct SamplerCreationOptions : mijin::BitFlags<SamplerCreationOptions>
{
std::uint8_t anisotropyEnable : 1 = 0;
std::uint8_t compareEnable : 1 = 0;
std::uint8_t unnormalizedCoordinates : 1 = 0;
};
struct SamplerCreationArgs
{
vk::SamplerCreateFlags flags = {};
SamplerCreationOptions options;
vk::Filter magFilter = vk::Filter::eLinear;
vk::Filter minFilter = vk::Filter::eNearest;
vk::SamplerMipmapMode mipmapMode = vk::SamplerMipmapMode::eLinear;
vk::SamplerAddressMode addressModeU = vk::SamplerAddressMode::eRepeat;
vk::SamplerAddressMode addressModeV = vk::SamplerAddressMode::eRepeat;
vk::SamplerAddressMode addressModeW = vk::SamplerAddressMode::eRepeat;
float mipLodBias = 0.f;
float maxAnisotropy = 1.f;
vk::CompareOp compareOp = vk::CompareOp::eAlways;
float minLod = 0.f;
float maxLod = VK_LOD_CLAMP_NONE;
vk::BorderColor borderColor = vk::BorderColor::eFloatTransparentBlack;
};
class Sampler : public Object<Sampler, BaseObject, class Device>, public MixinVulkanObject<vk::Sampler>
{
public:
Sampler(ObjectPtr<class Device> owner, const SamplerCreationArgs& args = {});
~Sampler() noexcept override;
};
inline vk::ImageSubresourceRange defaultDepthSubresourceRange(vk::Format format, bool withStencil = true)
{
return {
.aspectMask = vk::ImageAspectFlagBits::eDepth | (withStencil && isStencilFormat(format) ? vk::ImageAspectFlagBits::eStencil : vk::ImageAspectFlagBits()),
.baseMipLevel = 0,
.levelCount = 1, // not really any miplevels for depth textures
.baseArrayLayer = 0,
.layerCount = VK_REMAINING_ARRAY_LAYERS
};
}
inline vk::ImageSubresourceLayers defaultDepthSubresourceLayers(vk::Format format)
{
return {
.aspectMask = vk::ImageAspectFlagBits::eDepth | (isStencilFormat(format) ? vk::ImageAspectFlagBits::eStencil : vk::ImageAspectFlagBits()),
.mipLevel = 0,
.baseArrayLayer = 0,
.layerCount = VK_REMAINING_ARRAY_LAYERS
};
}
inline ImageTransition imageTransitionDepthAttachment(vk::Format depthFormat)
{
return {
.stages = vk::PipelineStageFlagBits::eEarlyFragmentTests | vk::PipelineStageFlagBits::eLateFragmentTests,
.layout = isStencilFormat(depthFormat) ? vk::ImageLayout::eDepthAttachmentStencilReadOnlyOptimal : vk::ImageLayout::eDepthAttachmentOptimal,
.access = vk::AccessFlagBits::eDepthStencilAttachmentRead | vk::AccessFlagBits::eDepthStencilAttachmentWrite,
.subResourceRange = defaultDepthSubresourceRange(depthFormat)
};
}
} // namespace iwa
#endif // !included(IWA_IMAGE_HPP_INCLUDED)

483
include/iwa/input.hpp Normal file
View File

@ -0,0 +1,483 @@
#pragma once
#if !defined(IWA_INPUT_HPP_INCLUDED)
#define IWA_INPUT_HPP_INCLUDED
#include <cstdint>
#include <string>
#include <SDL.h>
namespace iwa
{
// <editor-fold desc="KeyCode and ScanCode enums">
enum class KeyCode
{
UNKNOWN = -1,
RETURN = SDLK_RETURN,
ESCAPE = SDLK_ESCAPE,
BACKSPACE = SDLK_BACKSPACE,
TAB = SDLK_TAB,
SPACE = SDLK_SPACE,
EXCLAIM = SDLK_EXCLAIM,
QUOTEDBL = SDLK_QUOTEDBL,
HASH = SDLK_HASH,
PERCENT = SDLK_PERCENT,
DOLLAR = SDLK_DOLLAR,
AMPERSAND = SDLK_AMPERSAND,
QUOTE = SDLK_QUOTE,
LEFTPAREN = SDLK_LEFTPAREN,
RIGHTPAREN = SDLK_RIGHTPAREN,
ASTERISK = SDLK_ASTERISK,
PLUS = SDLK_PLUS,
COMMA = SDLK_COMMA,
MINUS = SDLK_MINUS,
PERIOD = SDLK_PERIOD,
SLASH = SDLK_SLASH,
_0 = SDLK_0,
_1 = SDLK_1,
_2 = SDLK_2,
_3 = SDLK_3,
_4 = SDLK_4,
_5 = SDLK_5,
_6 = SDLK_6,
_7 = SDLK_7,
_8 = SDLK_8,
_9 = SDLK_9,
COLON = SDLK_COLON,
SEMICOLON = SDLK_SEMICOLON,
LESS = SDLK_LESS,
EQUALS = SDLK_EQUALS,
GREATER = SDLK_GREATER,
QUESTION = SDLK_QUESTION,
AT = SDLK_AT,
/*
Skip uppercase letters
*/
LEFTBRACKET = SDLK_LEFTBRACKET,
BACKSLASH = SDLK_BACKSLASH,
RIGHTBRACKET = SDLK_RIGHTBRACKET,
CARET = SDLK_CARET,
UNDERSCORE = SDLK_UNDERSCORE,
BACKQUOTE = SDLK_BACKQUOTE,
A = SDLK_a,
B = SDLK_b,
C = SDLK_c,
D = SDLK_d,
E = SDLK_e,
F = SDLK_f,
G = SDLK_g,
H = SDLK_h,
I = SDLK_i,
J = SDLK_j,
K = SDLK_k,
L = SDLK_l,
M = SDLK_m,
N = SDLK_n,
O = SDLK_o,
P = SDLK_p,
Q = SDLK_q,
R = SDLK_r,
S = SDLK_s,
T = SDLK_t,
U = SDLK_u,
V = SDLK_v,
W = SDLK_w,
X = SDLK_x,
Y = SDLK_y,
Z = SDLK_z,
CAPSLOCK = SDLK_CAPSLOCK,
F1 = SDLK_F1,
F2 = SDLK_F2,
F3 = SDLK_F3,
F4 = SDLK_F4,
F5 = SDLK_F5,
F6 = SDLK_F6,
F7 = SDLK_F7,
F8 = SDLK_F8,
F9 = SDLK_F9,
F10 = SDLK_F10,
F11 = SDLK_F11,
F12 = SDLK_F12,
PRINTSCREEN = SDLK_PRINTSCREEN,
SCROLLLOCK = SDLK_SCROLLLOCK,
PAUSE = SDLK_PAUSE,
INSERT = SDLK_INSERT,
HOME = SDLK_HOME,
PAGEUP = SDLK_PAGEUP,
DELETE = SDLK_DELETE,
END = SDLK_END,
PAGEDOWN = SDLK_PAGEDOWN,
RIGHT = SDLK_RIGHT,
LEFT = SDLK_LEFT,
DOWN = SDLK_DOWN,
UP = SDLK_UP,
NUMLOCKCLEAR = SDLK_NUMLOCKCLEAR,
KP_DIVIDE = SDLK_KP_DIVIDE,
KP_MULTIPLY = SDLK_KP_MULTIPLY,
KP_MINUS = SDLK_KP_MINUS,
KP_PLUS = SDLK_KP_PLUS,
KP_ENTER = SDLK_KP_ENTER,
KP_1 = SDLK_KP_1,
KP_2 = SDLK_KP_2,
KP_3 = SDLK_KP_3,
KP_4 = SDLK_KP_4,
KP_5 = SDLK_KP_5,
KP_6 = SDLK_KP_6,
KP_7 = SDLK_KP_7,
KP_8 = SDLK_KP_8,
KP_9 = SDLK_KP_9,
KP_0 = SDLK_KP_0,
KP_PERIOD = SDLK_KP_PERIOD,
APPLICATION = SDLK_APPLICATION,
POWER = SDLK_POWER,
KP_EQUALS = SDLK_KP_EQUALS,
F13 = SDLK_F13,
F14 = SDLK_F14,
F15 = SDLK_F15,
F16 = SDLK_F16,
F17 = SDLK_F17,
F18 = SDLK_F18,
F19 = SDLK_F19,
F20 = SDLK_F20,
F21 = SDLK_F21,
F22 = SDLK_F22,
F23 = SDLK_F23,
F24 = SDLK_F24,
EXECUTE = SDLK_EXECUTE,
HELP = SDLK_HELP,
MENU = SDLK_MENU,
SELECT = SDLK_SELECT,
STOP = SDLK_STOP,
AGAIN = SDLK_AGAIN,
UNDO = SDLK_UNDO,
CUT = SDLK_CUT,
COPY = SDLK_COPY,
PASTE = SDLK_PASTE,
FIND = SDLK_FIND,
MUTE = SDLK_MUTE,
VOLUMEUP = SDLK_VOLUMEUP,
VOLUMEDOWN = SDLK_VOLUMEDOWN,
KP_COMMA = SDLK_KP_COMMA,
KP_EQUALSAS400 = SDLK_KP_EQUALSAS400,
ALTERASE = SDLK_ALTERASE,
SYSREQ = SDLK_SYSREQ,
CANCEL = SDLK_CANCEL,
CLEAR = SDLK_CLEAR,
PRIOR = SDLK_PRIOR,
RETURN2 = SDLK_RETURN2,
SEPARATOR = SDLK_SEPARATOR,
OUT = SDLK_OUT,
OPER = SDLK_OPER,
CLEARAGAIN = SDLK_CLEARAGAIN,
CRSEL = SDLK_CRSEL,
EXSEL = SDLK_EXSEL,
KP_00 = SDLK_KP_00,
KP_000 = SDLK_KP_000,
THOUSANDSSEPARATOR = SDLK_THOUSANDSSEPARATOR,
DECIMALSEPARATOR = SDLK_DECIMALSEPARATOR,
CURRENCYUNIT = SDLK_CURRENCYUNIT,
CURRENCYSUBUNIT = SDLK_CURRENCYSUBUNIT,
KP_LEFTPAREN = SDLK_KP_LEFTPAREN,
KP_RIGHTPAREN = SDLK_KP_RIGHTPAREN,
KP_LEFTBRACE = SDLK_KP_LEFTBRACE,
KP_RIGHTBRACE = SDLK_KP_RIGHTBRACE,
KP_TAB = SDLK_KP_TAB,
KP_BACKSPACE = SDLK_KP_BACKSPACE,
KP_A = SDLK_KP_A,
KP_B = SDLK_KP_B,
KP_C = SDLK_KP_C,
KP_D = SDLK_KP_D,
KP_E = SDLK_KP_E,
KP_F = SDLK_KP_F,
KP_XOR = SDLK_KP_XOR,
KP_POWER = SDLK_KP_POWER,
KP_PERCENT = SDLK_KP_PERCENT,
KP_LESS = SDLK_KP_LESS,
KP_GREATER = SDLK_KP_GREATER,
KP_AMPERSAND = SDLK_KP_AMPERSAND,
KP_DBLAMPERSAND = SDLK_KP_DBLAMPERSAND,
KP_VERTICALBAR = SDLK_KP_VERTICALBAR,
KP_DBLVERTICALBAR = SDLK_KP_DBLVERTICALBAR,
KP_COLON = SDLK_KP_COLON,
KP_HASH = SDLK_KP_HASH,
KP_SPACE = SDLK_KP_SPACE,
KP_AT = SDLK_KP_AT,
KP_EXCLAM = SDLK_KP_EXCLAM,
KP_MEMSTORE = SDLK_KP_MEMSTORE,
KP_MEMRECALL = SDLK_KP_MEMRECALL,
KP_MEMCLEAR = SDLK_KP_MEMCLEAR,
KP_MEMADD = SDLK_KP_MEMADD,
KP_MEMSUBTRACT = SDLK_KP_MEMSUBTRACT,
KP_MEMMULTIPLY = SDLK_KP_MEMMULTIPLY,
KP_MEMDIVIDE = SDLK_KP_MEMDIVIDE,
KP_PLUSMINUS = SDLK_KP_PLUSMINUS,
KP_CLEAR = SDLK_KP_CLEAR,
KP_CLEARENTRY = SDLK_KP_CLEARENTRY,
KP_BINARY = SDLK_KP_BINARY,
KP_OCTAL = SDLK_KP_OCTAL,
KP_DECIMAL = SDLK_KP_DECIMAL,
KP_HEXADECIMAL = SDLK_KP_HEXADECIMAL,
LCTRL = SDLK_LCTRL,
LSHIFT = SDLK_LSHIFT,
LALT = SDLK_LALT,
LGUI = SDLK_LGUI,
RCTRL = SDLK_RCTRL,
RSHIFT = SDLK_RSHIFT,
RALT = SDLK_RALT,
RGUI = SDLK_RGUI,
MODE = SDLK_MODE,
AUDIONEXT = SDLK_AUDIONEXT,
AUDIOPREV = SDLK_AUDIOPREV,
AUDIOSTOP = SDLK_AUDIOSTOP,
AUDIOPLAY = SDLK_AUDIOPLAY,
AUDIOMUTE = SDLK_AUDIOMUTE,
MEDIASELECT = SDLK_MEDIASELECT,
WWW = SDLK_WWW,
MAIL = SDLK_MAIL,
CALCULATOR = SDLK_CALCULATOR,
COMPUTER = SDLK_COMPUTER,
AC_SEARCH = SDLK_AC_SEARCH,
AC_HOME = SDLK_AC_HOME,
AC_BACK = SDLK_AC_BACK,
AC_FORWARD = SDLK_AC_FORWARD,
AC_STOP = SDLK_AC_STOP,
AC_REFRESH = SDLK_AC_REFRESH,
AC_BOOKMARKS = SDLK_AC_BOOKMARKS,
BRIGHTNESSDOWN = SDLK_BRIGHTNESSDOWN,
BRIGHTNESSUP = SDLK_BRIGHTNESSUP,
DISPLAYSWITCH = SDLK_DISPLAYSWITCH,
KBDILLUMTOGGLE = SDLK_KBDILLUMTOGGLE,
KBDILLUMDOWN = SDLK_KBDILLUMDOWN,
KBDILLUMUP = SDLK_KBDILLUMUP,
EJECT = SDLK_EJECT,
SLEEP = SDLK_SLEEP,
APP1 = SDLK_APP1,
APP2 = SDLK_APP2,
AUDIOREWIND = SDLK_AUDIOREWIND,
AUDIOFASTFORWARD = SDLK_AUDIOFASTFORWARD,
SOFTLEFT = SDLK_SOFTLEFT,
SOFTRIGHT = SDLK_SOFTRIGHT,
CALL = SDLK_CALL,
ENDCALL = SDLK_ENDCALL
};
enum class ScanCode
{
UNKNOWN = -1,
/* Printable keys */
SPACE = SDL_SCANCODE_SPACE,
APOSTROPHE = SDL_SCANCODE_APOSTROPHE,
COMMA = SDL_SCANCODE_COMMA,
MINUS = SDL_SCANCODE_MINUS,
PERIOD = SDL_SCANCODE_PERIOD,
SLASH = SDL_SCANCODE_SLASH,
_0 = SDL_SCANCODE_0,
_1 = SDL_SCANCODE_1,
_2 = SDL_SCANCODE_2,
_3 = SDL_SCANCODE_3,
_4 = SDL_SCANCODE_4,
_5 = SDL_SCANCODE_5,
_6 = SDL_SCANCODE_6,
_7 = SDL_SCANCODE_7,
_8 = SDL_SCANCODE_8,
_9 = SDL_SCANCODE_9,
SEMICOLON = SDL_SCANCODE_SEMICOLON,
EQUALS = SDL_SCANCODE_EQUALS,
A = SDL_SCANCODE_A,
B = SDL_SCANCODE_B,
C = SDL_SCANCODE_C,
D = SDL_SCANCODE_D,
E = SDL_SCANCODE_E,
F = SDL_SCANCODE_F,
G = SDL_SCANCODE_G,
H = SDL_SCANCODE_H,
I = SDL_SCANCODE_I,
J = SDL_SCANCODE_J,
K = SDL_SCANCODE_K,
L = SDL_SCANCODE_L,
M = SDL_SCANCODE_M,
N = SDL_SCANCODE_N,
O = SDL_SCANCODE_O,
P = SDL_SCANCODE_P,
Q = SDL_SCANCODE_Q,
R = SDL_SCANCODE_R,
S = SDL_SCANCODE_S,
T = SDL_SCANCODE_T,
U = SDL_SCANCODE_U,
V = SDL_SCANCODE_V,
W = SDL_SCANCODE_W,
X = SDL_SCANCODE_X,
Y = SDL_SCANCODE_Y,
Z = SDL_SCANCODE_Z,
LEFTBRACKET = SDL_SCANCODE_LEFTBRACKET,
BACKSLASH = SDL_SCANCODE_BACKSLASH,
RIGHTBRACKET = SDL_SCANCODE_RIGHTBRACKET,
GRAVE = SDL_SCANCODE_GRAVE,
// WORLD_1 = SDL_SCANCODE_WORLD_1,
// WORLD_2 = SDL_SCANCODE_WORLD_2,
/* Function keys */
ESCAPE = SDL_SCANCODE_ESCAPE,
ENTER = SDL_SCANCODE_RETURN,
TAB = SDL_SCANCODE_TAB,
BACKSPACE = SDL_SCANCODE_BACKSPACE,
INSERT = SDL_SCANCODE_INSERT,
DELETE = SDL_SCANCODE_DELETE,
RIGHT = SDL_SCANCODE_RIGHT,
LEFT = SDL_SCANCODE_LEFT,
DOWN = SDL_SCANCODE_DOWN,
UP = SDL_SCANCODE_UP,
PAGEUP = SDL_SCANCODE_PAGEUP,
PAGEDOWN = SDL_SCANCODE_PAGEDOWN,
HOME = SDL_SCANCODE_HOME,
END = SDL_SCANCODE_END,
CAPSLOCK = SDL_SCANCODE_CAPSLOCK,
SCROLLLOCK = SDL_SCANCODE_SCROLLLOCK,
NUMLOCK = SDL_SCANCODE_NUMLOCKCLEAR,
PRINTSCREEN = SDL_SCANCODE_PRINTSCREEN,
PAUSE = SDL_SCANCODE_PAUSE,
F1 = SDL_SCANCODE_F1,
F2 = SDL_SCANCODE_F2,
F3 = SDL_SCANCODE_F3,
F4 = SDL_SCANCODE_F4,
F5 = SDL_SCANCODE_F5,
F6 = SDL_SCANCODE_F6,
F7 = SDL_SCANCODE_F7,
F8 = SDL_SCANCODE_F8,
F9 = SDL_SCANCODE_F9,
F10 = SDL_SCANCODE_F10,
F11 = SDL_SCANCODE_F11,
F12 = SDL_SCANCODE_F12,
F13 = SDL_SCANCODE_F13,
F14 = SDL_SCANCODE_F14,
F15 = SDL_SCANCODE_F15,
F16 = SDL_SCANCODE_F16,
F17 = SDL_SCANCODE_F17,
F18 = SDL_SCANCODE_F18,
F19 = SDL_SCANCODE_F19,
F20 = SDL_SCANCODE_F20,
F21 = SDL_SCANCODE_F21,
F22 = SDL_SCANCODE_F22,
F23 = SDL_SCANCODE_F23,
F24 = SDL_SCANCODE_F24,
KP_0 = SDL_SCANCODE_KP_0,
KP_1 = SDL_SCANCODE_KP_1,
KP_2 = SDL_SCANCODE_KP_2,
KP_3 = SDL_SCANCODE_KP_3,
KP_4 = SDL_SCANCODE_KP_4,
KP_5 = SDL_SCANCODE_KP_5,
KP_6 = SDL_SCANCODE_KP_6,
KP_7 = SDL_SCANCODE_KP_7,
KP_8 = SDL_SCANCODE_KP_8,
KP_9 = SDL_SCANCODE_KP_9,
KP_DECIMAL = SDL_SCANCODE_KP_DECIMAL,
KP_DIVIDE = SDL_SCANCODE_KP_DIVIDE,
KP_MULTIPLY = SDL_SCANCODE_KP_MULTIPLY,
KP_MINUS = SDL_SCANCODE_KP_MINUS,
KP_PLUS = SDL_SCANCODE_KP_PLUS,
KP_ENTER = SDL_SCANCODE_KP_ENTER,
KP_EQUALS = SDL_SCANCODE_KP_EQUALS,
LSHIFT = SDL_SCANCODE_LSHIFT,
LCONTROL = SDL_SCANCODE_LCTRL,
LALT = SDL_SCANCODE_LALT,
LGUI = SDL_SCANCODE_LGUI,
RSHIFT = SDL_SCANCODE_RSHIFT,
RCONTROL = SDL_SCANCODE_RCTRL,
RALT = SDL_SCANCODE_RALT,
RGUI = SDL_SCANCODE_RGUI,
MENU = SDL_SCANCODE_MENU,
};
// </editor-fold>
struct KeyModifiers
{
bool leftShift : 1;
bool rightShift : 1;
bool leftCtrl : 1;
bool rightCtrl : 1;
bool leftAlt : 1;
bool rightAlt : 1;
bool leftMeta : 1;
bool rightMeta : 1;
};
struct InputEvent
{
};
struct KeyEvent : InputEvent
{
KeyCode keyCode;
ScanCode scanCode;
KeyModifiers modifiers;
bool down : 1;
bool repeat : 1;
};
enum class MouseButton
{
LEFT = SDL_BUTTON_LEFT,
MIDDLE = SDL_BUTTON_MIDDLE,
RIGHT = SDL_BUTTON_RIGHT,
EXTRA_1 = SDL_BUTTON_X1,
EXTRA_2 = SDL_BUTTON_X2
};
struct MouseMoveEvent
{
int relativeX;
int relativeY;
int absoluteX;
int absoluteY;
bool warped;
};
struct MouseWheelEvent
{
int relativeX;
int relativeY;
};
struct MouseButtonEvent
{
MouseButton button;
std::uint8_t clicks : 7;
bool down : 1;
};
struct TextInputEvent
{
std::string text;
};
struct KeyState
{
bool pressed;
};
[[nodiscard]] KeyState getKeyState(ScanCode scanCode) noexcept;
void captureMouse() noexcept;
void uncaptureMouse() noexcept;
[[nodiscard]] std::pair<int, int> getMouseScreenPosition() noexcept;
} // namespace iwa
#endif // !defined(IWA_INPUT_HPP_INCLUDED)

157
include/iwa/instance.hpp Normal file
View File

@ -0,0 +1,157 @@
#pragma once
#ifndef IWA_VULKAN_INSTANCE_HPP_INCLUDED
#define IWA_VULKAN_INSTANCE_HPP_INCLUDED
#include <thread>
#include <typeindex>
#include <unordered_map>
#include <mijin/async/coroutine.hpp>
#include <mijin/util/bitflags.hpp>
#include <mijin/virtual_filesystem/stacked.hpp>
#include "iwa/object.hpp"
#include "iwa/device.hpp"
#include "iwa/window.hpp"
#include "iwa/vkwrapper.hpp"
#define IWA_DELETE_INSTANCE_OBJECT(instance, var, deleter) \
(instance)->queueDelete([handle=(var), inst=(instance)]() \
{ \
inst->getVkInstance().deleter(handle); \
})
#define IWA_CORO_ENSURE_MAIN_THREAD(instance) \
if (!(instance).isOnMainThread()) { \
co_await mijin::switchContext((instance).getMainTaskLoop()); \
}
namespace iwa
{
struct InstanceCreationFlags : mijin::BitFlags<InstanceCreationFlags>
{
std::uint8_t noDefaultExtensions : 1 = 0;
std::uint8_t noDefaultLayers : 1 = 0;
};
struct InstanceCreationArgs
{
vk::ApplicationInfo applicationInfo = {
.pApplicationName = "Iwa VulkanApplication",
.applicationVersion = 0,
.pEngineName = "Iwa Engine",
.engineVersion = 0,
.apiVersion = VK_MAKE_API_VERSION(0, 1, 3, 0)
};
std::vector<ExtensionInfo> extensions;
std::vector<LayerInfo> layers;
InstanceCreationFlags flags;
};
class InstanceExtension : public AbstractObject<InstanceExtension>
{
protected:
InstanceExtension() noexcept : super_t(nullptr) {}
};
class Instance : public Object<Instance>, public MixinVulkanObject<vk::Instance>
{
public:
using deleter_t = std::function<void()>;
private:
struct DeleteQueueEntry
{
deleter_t deleter;
int remainingFrames;
};
mijin::SimpleTaskLoop mMainTaskLoop;
mijin::MultiThreadedTaskLoop mWorkerTaskLoop;
mijin::StackedFileSystemAdapter mPrimaryFSAdapter;
std::vector<ExtensionInfo> mExtensions;
std::vector<LayerInfo> mLayers;
std::vector<PhysicalDeviceInfo> mPhysicalDevices;
std::vector<DeleteQueueEntry> mDeleteQueue;
std::unordered_map<std::type_index, ObjectPtr<InstanceExtension>> mInstanceExtensions;
vk::DebugUtilsMessengerEXT mDebugMessenger = VK_NULL_HANDLE;
bool mQuitRequested = false;
std::thread::id mMainThread;
public:
explicit Instance(InstanceCreationArgs args = {});
~Instance() noexcept override;
[[nodiscard]] const std::vector<PhysicalDeviceInfo>& getPhysicalDevices() const noexcept { return mPhysicalDevices; }
[[nodiscard]] inline bool isQuitRequested() const noexcept { return mQuitRequested; }
[[nodiscard]] inline mijin::SimpleTaskLoop& getMainTaskLoop() noexcept { return mMainTaskLoop; }
[[nodiscard]] inline mijin::MultiThreadedTaskLoop& getWorkerTaskLoop() noexcept { return mWorkerTaskLoop; }
[[nodiscard]] inline mijin::StackedFileSystemAdapter& getPrimaryFSAdapter() noexcept { return mPrimaryFSAdapter; }
[[nodiscard]] bool isExtensionEnabled(const char* name) const noexcept;
[[nodiscard]] bool isLayerEnabled(const char* name) const noexcept;
ObjectPtr<Window> createWindow(const WindowCreationArgs& args = {});
ObjectPtr<Device> createDevice(DeviceCreationArgs args = {});
void queueDelete(deleter_t deleter) noexcept;
void tickDeleteQueue();
void setMainThread();
void requestQuit() noexcept;
[[nodiscard]] bool isOnMainThread() const noexcept { return mMainThread == std::this_thread::get_id(); }
template<typename TFunction>
mijin::FuturePtr<std::invoke_result_t<TFunction>> runOnMainThread(TFunction&& function);
template<typename TExtension> requires (std::is_base_of_v<InstanceExtension, TExtension>)
[[nodiscard]] TExtension& getInstanceExtension()
{
ObjectPtr<InstanceExtension>& extensionPtr = mInstanceExtensions[typeid(TExtension)];
if (extensionPtr == nullptr)
{
extensionPtr = TExtension::create();
}
return static_cast<TExtension&>(*extensionPtr);
}
private:
void runDeleters(bool runAll = false);
public:
mijin::Signal<> quitRequested;
mijin::Signal<Device&> deviceCreated;
mijin::Signal<Window&> windowCreated;
};
template<typename TFunction>
mijin::FuturePtr<std::invoke_result_t<TFunction>> Instance::runOnMainThread(TFunction&& function)
{
using result_t = std::invoke_result_t<TFunction>;
if (std::this_thread::get_id() == mMainThread)
{
mijin::FuturePtr<result_t> future = std::make_shared<mijin::Future<result_t>>();
if constexpr (std::is_same_v<result_t, void>)
{
function();
future->set();
}
else
{
future->set(function());
}
return future;
}
return getMainTaskLoop().addTask([](std::function<result_t()> function) -> mijin::Task<result_t>
{
if constexpr (std::is_same_v<result_t, void>)
{
function();
co_return;
}
else
{
co_return function();
}
}(std::forward<TFunction>(function)));
}
}
#endif // IWA_VULKAN_INSTANCE_HPP_INCLUDED

123
include/iwa/io/bitmap.hpp Normal file
View File

@ -0,0 +1,123 @@
#pragma once
#if !defined(IWA_IO_BITMAP_HPP)
#define IWA_IO_BITMAP_HPP
#include <cstdint>
#include <filesystem>
#include <span>
#include <string>
#include <mijin/io/stream.hpp>
#include <mijin/util/hash.hpp>
#include <mijin/virtual_filesystem/filesystem.hpp>
#include "iwa/resource/bitmap.hpp"
namespace iwa
{
namespace fs = std::filesystem;
enum class ImageFormatType
{
UNORM = 0,
UINT = 1,
SRGB = 2
};
enum class BitmapCodec
{
NONE = 0,
PNG = 1,
JPEG = 2
};
struct BitmapLoadOptions
{
mijin::Stream* stream;
BitmapCodec codec = BitmapCodec::NONE;
ImageFormatType formatType = ImageFormatType::UNORM;
};
struct BitmapLoadFileOptions
{
mijin::PathReference path;
BitmapCodec codec = BitmapCodec::NONE;
ImageFormatType formatType = ImageFormatType::UNORM;
auto operator<=>(const BitmapLoadFileOptions&) const noexcept = default;
};
struct BitmapLoadMemoryOptions
{
std::span<std::uint8_t> data;
BitmapCodec codec = BitmapCodec::NONE;
ImageFormatType formatType = ImageFormatType::UNORM;
};
struct BitmapSaveOptions
{
std::string fileName = {};
BitmapCodec codec = BitmapCodec::NONE;
int jpegQuality = 90;
auto operator<=>(const BitmapSaveOptions&) const noexcept = default;
};
//
// public functions
//
[[nodiscard]] ObjectPtr<Bitmap> loadBitmap(const BitmapLoadOptions& options);
[[nodiscard]] inline ObjectPtr<Bitmap> loadBitmapFromFile(const BitmapLoadFileOptions& options)
{
std::unique_ptr<mijin::Stream> stream;
const mijin::StreamError error = options.path.open(mijin::FileOpenMode::READ, stream);
if (error != mijin::StreamError::SUCCESS) {
throw std::runtime_error("Error opening bitmap for reading.");
}
return loadBitmap({
.stream = stream.get(),
.codec = options.codec,
.formatType = options.formatType
});
}
[[nodiscard]] inline ObjectPtr<Bitmap> loadBitmapFromMemory(const BitmapLoadMemoryOptions& options)
{
mijin::MemoryStream stream;
stream.openRO(options.data);
return loadBitmap({
.stream = &stream,
.codec = options.codec,
.formatType = options.formatType
});
}
void saveBitmap(const Bitmap& bitmap, const BitmapSaveOptions& options);
constexpr BitmapCodec bitmapCodecFromMimeType(std::string_view mimeType)
{
if (mimeType == "image/png") {
return BitmapCodec::PNG;
}
if (mimeType == "image/jpeg") {
return BitmapCodec::JPEG;
}
return BitmapCodec::NONE;
}
} // namespace iwa
template<>
struct std::hash<iwa::BitmapLoadFileOptions>
{
std::size_t operator()(const iwa::BitmapLoadFileOptions& options) const noexcept
{
std::size_t hash = 0;
mijin::hashCombine(hash, options.path);
mijin::hashCombine(hash, options.codec);
mijin::hashCombine(hash, options.formatType);
return hash;
}
};
#endif // !defined(IWA_IO_BITMAP_HPP)

55
include/iwa/io/font.hpp Normal file
View File

@ -0,0 +1,55 @@
#pragma once
#if !defined(IWA_IO_FONT_HPP_INCLUDED)
#define IWA_IO_FONT_HPP_INCLUDED
#include <mijin/io/stream.hpp>
#include <mijin/virtual_filesystem/filesystem.hpp>
#include "iwa/resource/font.hpp"
namespace iwa
{
struct FontLoadOptions
{
mijin::Stream* stream;
float size = 20.f;
};
struct FontLoadFileOptions
{
mijin::PathReference path;
float size = 20.f;
auto operator<=>(const FontLoadFileOptions& other) const noexcept = default;
};
[[nodiscard]] ObjectPtr<Font> loadFont(const FontLoadOptions& options);
[[nodiscard]] inline ObjectPtr<Font> loadFontFromFile(const FontLoadFileOptions& options)
{
std::unique_ptr<mijin::Stream> stream;
const mijin::StreamError error = options.path.open(mijin::FileOpenMode::READ, stream);
if (error != mijin::StreamError::SUCCESS) {
throw std::runtime_error("Error opening font file.");
}
return loadFont({
.stream = stream.get(),
.size = options.size
});
}
} // namespace iwa
template<>
struct std::hash<iwa::FontLoadFileOptions>
{
std::size_t operator()(const iwa::FontLoadFileOptions& options) const noexcept
{
std::size_t hash = 0;
mijin::hashCombine(hash, options.path);
mijin::hashCombine(hash, options.size);
return hash;
}
};
#endif // !defined(IWA_IO_FONT_HPP_INCLUDED)

11
include/iwa/io/mesh.hpp Normal file
View File

@ -0,0 +1,11 @@
#pragma once
#if !defined(IWA_IO_MESH_HPP_INCLUDED)
#define IWA_IO_MESH_HPP_INCLUDED
namespace iwa
{
} // namespace iwa
#endif // !defined(IWA_IO_MESH_HPP_INCLUDED)

57
include/iwa/log.hpp Normal file
View File

@ -0,0 +1,57 @@
#pragma once
#ifndef IWA_LOG_HPP_INCLUDED
#define IWA_LOG_HPP_INCLUDED
#include <cstdio>
#include <fmt/format.h>
#include <fmt/ranges.h>
#include <mijin/debug/stacktrace.hpp>
#include <mijin/util/iterators.hpp>
namespace iwa
{
inline void logMsg(const std::string& msg) noexcept
{
std::puts(msg.c_str());
(void) std::fflush(stdout);
}
template<typename... TArgs>
inline void logMsg(std::string_view msg, TArgs&&... args) noexcept
{
fmt::print(stdout, fmt::runtime(msg), std::forward<TArgs>(args)...);
(void) std::fputc('\n', stdout);
(void) std::fflush(stdout);
}
inline void vlogMsg(std::string_view msg, fmt::format_args args)
{
fmt::vprint(stdout, msg, args);
(void) std::fputc('\n', stdout);
(void) std::fflush(stdout);
}
template<typename... TArgs>
[[noreturn]]
inline void logAndDie(std::string_view msg, TArgs&&... args) noexcept
{
fmt::print(stderr, fmt::runtime(msg), std::forward<TArgs>(args)...);
(void) std::fputc('\n', stderr);
(void) std::fflush(stderr);
std::abort();
}
template<typename... TArgs>
inline void logVerbose([[maybe_unused]] std::string_view msg, [[maybe_unused]] TArgs&&... args) noexcept
{
#if defined(KAZAN_VERBOSE_LOGGING)
std::printf("[VERBOSE] ");
logMsg(msg, std::forward<TArgs>(args)...);
#endif
}
}
#endif // IWA_LOG_HPP_INCLUDED

566
include/iwa/object.hpp Normal file
View File

@ -0,0 +1,566 @@
#pragma once
#ifndef IWA_OBJECT_HPP_INCLUDED
#define IWA_OBJECT_HPP_INCLUDED
#include <atomic>
#include <cstdint>
#include <functional>
#include <list>
#include <optional>
#include <type_traits>
#include <utility>
#include <vector>
#include <mijin/debug/assert.hpp>
#if !defined(IWA_OBJECTPTR_TRACKING)
# if !defined(KAZAN_RELEASE)
# define IWA_OBJECTPTR_TRACKING 1
# else
# define IWA_OBJECTPTR_TRACKING 0
# endif
#endif
#define IWA_NEW_OBJECT(type, ...) (new type(__VA_ARGS__))->getPointer(/* skipCheck = */ true)
#if IWA_OBJECTPTR_TRACKING > 1
#include <mijin/debug/stacktrace.hpp>
#endif
namespace iwa
{
using object_id_t = std::uint64_t;
using object_destruction_handler_t = std::function<void()>;
class BaseObject;
template<typename TObject>
class WeakObjectPtr;
#if IWA_OBJECTPTR_TRACKING
namespace impl
{
struct ObjectPtrAllocation
{
class BaseObject* object;
#if IWA_OBJECTPTR_TRACKING > 1
mijin::Result<mijin::Stacktrace> stacktrace;
#endif
};
using objectptr_allocation_handle_t = std::optional<std::list<ObjectPtrAllocation>::iterator>;
#if IWA_OBJECTPTR_TRACKING > 1
objectptr_allocation_handle_t trackObjectPtr(class BaseObject* object, mijin::Result<mijin::Stacktrace>&& stacktrace) noexcept;
#else
objectptr_allocation_handle_t trackObjectPtr(class BaseObject* object) noexcept;
#endif
void untrackObjectPtr(objectptr_allocation_handle_t handle) noexcept;
}
#if IWA_OBJECTPTR_TRACKING > 1
#define IWA_DECLARE_OBJECTPTR_TRACKING() impl::objectptr_allocation_handle_t mAllocationHandle;
#define IWA_TRACK_OBJECTPTR() mAllocationHandle = impl::trackObjectPtr(mObject, mijin::captureStacktrace(1));
#define IWA_TRACK_OBJECTPTR_NEW() ptr.mAllocationHandle = impl::trackObjectPtr(ptr.mObject, mijin::captureStacktrace(1));
#define IWA_UNTRACK_OBJECTPTR() \
if (mAllocationHandle != impl::objectptr_allocation_handle_t()) \
{ \
impl::untrackObjectPtr(mAllocationHandle); \
mAllocationHandle = impl::objectptr_allocation_handle_t(); \
}
#define IWA_MOVE_CONSTRUCT_OBJECTPTR_HANDLE() , mAllocationHandle(std::exchange(otherPtr.mAllocationHandle, impl::objectptr_allocation_handle_t()))
#define IWA_MOVE_ASSIGN_OBJECTPTR_HANDLE() mAllocationHandle = std::exchange(otherPtr.mAllocationHandle, impl::objectptr_allocation_handle_t());
#define IWA_UPDATE_OBJECTPTR_HANDLE() \
if (mAllocationHandle != impl::objectptr_allocation_handle_t()) \
{ \
(*mAllocationHandle)->stacktrace = mijin::captureStacktrace(1); \
}
#else
#define IWA_DECLARE_OBJECTPTR_TRACKING() impl::objectptr_allocation_handle_t mAllocationHandle;
#define IWA_TRACK_OBJECTPTR() mAllocationHandle = impl::trackObjectPtr(mObject);
#define IWA_TRACK_OBJECTPTR_NEW() ptr.mAllocationHandle = impl::trackObjectPtr(ptr.mObject);
#define IWA_UNTRACK_OBJECTPTR() \
if (mAllocationHandle != impl::objectptr_allocation_handle_t()) \
{ \
impl::untrackObjectPtr(mAllocationHandle); \
mAllocationHandle = impl::objectptr_allocation_handle_t(); \
}
#define IWA_MOVE_CONSTRUCT_OBJECTPTR_HANDLE() , mAllocationHandle(std::exchange(otherPtr.mAllocationHandle, impl::objectptr_allocation_handle_t()))
#define IWA_MOVE_ASSIGN_OBJECTPTR_HANDLE() mAllocationHandle = std::exchange(otherPtr.mAllocationHandle, impl::objectptr_allocation_handle_t());
#define IWA_UPDATE_OBJECTPTR_HANDLE()
#endif
#else // IWA_OBJECTPTR_TRACKING
#define IWA_DECLARE_OBJECTPTR_TRACKING()
#define IWA_TRACK_OBJECTPTR()
#define IWA_TRACK_OBJECTPTR_NEW()
#define IWA_UNTRACK_OBJECTPTR()
#define IWA_MOVE_CONSTRUCT_OBJECTPTR_HANDLE()
#define IWA_MOVE_ASSIGN_OBJECTPTR_HANDLE()
#define IWA_UPDATE_OBJECTPTR_HANDLE()
#endif // else IWA_OBJECTPTR_TRACKING
template<typename TObject = class BaseObject>
class ObjectPtr
{
private:
TObject* mObject = nullptr;
IWA_DECLARE_OBJECTPTR_TRACKING()
private:
// ObjectPtr(TObject* object) noexcept : mObject(object) {}
public:
/** default construction */
ObjectPtr() = default;
/** nullptr initialization */
ObjectPtr(std::nullptr_t) noexcept : mObject(nullptr) {}
/** copy */
ObjectPtr(const ObjectPtr& otherPtr) : mObject(otherPtr.mObject)
{
if (mObject != nullptr)
{
mObject->increaseReferenceCount();
IWA_TRACK_OBJECTPTR()
}
}
/** move */
ObjectPtr(ObjectPtr&& otherPtr) noexcept : mObject(std::exchange(otherPtr.mObject, nullptr))
IWA_MOVE_CONSTRUCT_OBJECTPTR_HANDLE()
{
IWA_UPDATE_OBJECTPTR_HANDLE()
}
/** implicit conversion */
template<typename TOtherObject> requires(std::is_base_of_v<TObject, TOtherObject>)
ObjectPtr(const ObjectPtr<TOtherObject>& otherPtr) noexcept : mObject(otherPtr.mObject)
{
if (mObject != nullptr)
{
mObject->increaseReferenceCount();
IWA_TRACK_OBJECTPTR()
}
}
/** implicit conversion */
template<typename TOtherObject> requires(std::is_base_of_v<TObject, TOtherObject>)
ObjectPtr(ObjectPtr<TOtherObject>&& otherPtr) noexcept : mObject(std::exchange(otherPtr.mObject, nullptr))
IWA_MOVE_CONSTRUCT_OBJECTPTR_HANDLE()
{
IWA_UPDATE_OBJECTPTR_HANDLE()
}
/** explicit conversion */
template<typename TOtherObject> requires(!std::is_same_v<TObject, TOtherObject> && std::is_base_of_v<TOtherObject, TObject>)
explicit ObjectPtr(const ObjectPtr<TOtherObject>& otherPtr) noexcept : mObject(static_cast<TObject*>(otherPtr.mObject))
{
if (mObject != nullptr)
{
mObject->increaseReferenceCount();
IWA_TRACK_OBJECTPTR()
}
}
/** explicit conversion */
template<typename TOtherObject> requires(!std::is_same_v<TObject, TOtherObject> && std::is_base_of_v<TOtherObject, TObject>)
explicit ObjectPtr(ObjectPtr<TOtherObject>&& otherPtr) noexcept : mObject(static_cast<TObject*>(std::exchange(otherPtr.mObject, nullptr)))
IWA_MOVE_CONSTRUCT_OBJECTPTR_HANDLE()
{
IWA_UPDATE_OBJECTPTR_HANDLE()
}
~ObjectPtr() noexcept
{
if (mObject != nullptr)
{
mObject->decreaseReferenceCount();
}
IWA_UNTRACK_OBJECTPTR()
}
ObjectPtr& operator=(const ObjectPtr& otherPtr) noexcept
{
if (this != &otherPtr)
{
if (mObject != nullptr)
{
mObject->decreaseReferenceCount();
}
IWA_UNTRACK_OBJECTPTR()
mObject = otherPtr.mObject;
if (mObject != nullptr)
{
mObject->increaseReferenceCount();
IWA_TRACK_OBJECTPTR()
}
}
return *this;
}
ObjectPtr& operator=(ObjectPtr&& otherPtr) noexcept
{
if (this != &otherPtr)
{
if (mObject != nullptr)
{
mObject->decreaseReferenceCount();
}
IWA_UNTRACK_OBJECTPTR()
mObject = std::exchange(otherPtr.mObject, nullptr);
IWA_MOVE_ASSIGN_OBJECTPTR_HANDLE()
IWA_UPDATE_OBJECTPTR_HANDLE()
}
return *this;
}
template<typename TOtherObject> requires(std::is_base_of_v<TObject, TOtherObject>)
ObjectPtr& operator=(const ObjectPtr<TOtherObject>& otherPtr) noexcept
{
if (static_cast<const void*>(this) != static_cast<const void*>(&otherPtr))
{
if (mObject != nullptr)
{
mObject->decreaseReferenceCount();
}
IWA_UNTRACK_OBJECTPTR()
mObject = otherPtr.mObject;
if (mObject != nullptr)
{
mObject->increaseReferenceCount();
IWA_TRACK_OBJECTPTR()
}
}
return *this;
}
template<typename TOtherObject> requires(std::is_base_of_v<TObject, TOtherObject>)
ObjectPtr& operator=(ObjectPtr<TOtherObject>&& otherPtr) noexcept
{
if (static_cast<void*>(this) != static_cast<void*>(&otherPtr))
{
if (mObject != nullptr)
{
mObject->decreaseReferenceCount();
}
mObject = std::exchange(otherPtr.mObject, nullptr);
IWA_MOVE_ASSIGN_OBJECTPTR_HANDLE()
IWA_UPDATE_OBJECTPTR_HANDLE()
}
return *this;
}
ObjectPtr& operator=(std::nullptr_t) noexcept
{
if (mObject != nullptr)
{
mObject->decreaseReferenceCount();
}
IWA_UNTRACK_OBJECTPTR()
mObject = nullptr;
return *this;
}
[[nodiscard]] bool operator!() const noexcept { return mObject == nullptr; }
[[nodiscard]] TObject& operator*() const noexcept { return *mObject; }
[[nodiscard]] TObject* operator->() const noexcept { return mObject; }
[[nodiscard]] auto operator<=>(const ObjectPtr&) const noexcept = default;
[[nodiscard]] operator bool() const noexcept { return mObject != nullptr; }
[[nodiscard]] bool operator==(std::nullptr_t) const noexcept { return mObject == nullptr; }
[[nodiscard]] bool operator!=(std::nullptr_t) const noexcept { return mObject != nullptr; }
#define MAKE_OPERATOR(op) \
template<typename TOtherObject> requires (std::is_base_of_v<TObject, TOtherObject> || std::is_base_of_v<TOtherObject, TObject>) \
auto operator op (const ObjectPtr<TOtherObject>& otherPtr) const noexcept \
{ \
return mObject op otherPtr.mObject; \
}
MAKE_OPERATOR(<=>)
MAKE_OPERATOR(==)
MAKE_OPERATOR(!=)
MAKE_OPERATOR(<)
MAKE_OPERATOR(<=)
MAKE_OPERATOR(>)
MAKE_OPERATOR(>=)
#undef MAKE_OPERATOR
[[nodiscard]] TObject* getRaw() const noexcept { return mObject; }
[[nodiscard]] WeakObjectPtr<TObject> makeWeak() const noexcept;
template<typename TOtherObject> requires(std::is_base_of_v<TObject, TOtherObject>)
[[nodiscard]] ObjectPtr<TOtherObject> dynamicCast() const noexcept;
friend class BaseObject;
template<typename TConcrete, typename TBase, typename TOwner>
friend class Object;
template<typename TOthterObject>
friend class ObjectPtr;
};
namespace impl
{
[[nodiscard]] object_id_t nextObjectId() noexcept;
void registerObject(BaseObject* object) noexcept;
void unregisterObject(BaseObject* object) noexcept;
[[nodiscard]] ObjectPtr<BaseObject> getRegisteredObject(object_id_t objectId) noexcept;
}
class BaseObject
{
public:
using owner_t = BaseObject;
private:
object_id_t mId = 0;
std::atomic_uint32_t mReferenceCount = 1;
protected:
ObjectPtr<BaseObject> mOwner = nullptr;
#if !defined(KAZAN_RELEASE)
// std::vector<BaseObject*> mChildren;
#endif
protected:
explicit BaseObject(ObjectPtr<BaseObject> owner = nullptr) noexcept : mId(impl::nextObjectId()), mOwner(std::move(owner))
{
impl::registerObject(this);
}
public:
BaseObject(const BaseObject&) = delete;
BaseObject(BaseObject&&) = delete;
virtual ~BaseObject() noexcept
{
impl::unregisterObject(this);
}
BaseObject& operator=(const BaseObject&) = delete;
BaseObject& operator=(BaseObject&&) = delete;
[[nodiscard]] inline bool canCreatePointer() const noexcept { return mReferenceCount > 0; }
[[nodiscard]] inline object_id_t getId() const noexcept { return mId; }
[[nodiscard]] inline ObjectPtr<BaseObject> getPointer(bool skipCheck = false) noexcept;
[[nodiscard]] inline WeakObjectPtr<BaseObject> getWeakPointer() noexcept;
protected:
inline void increaseReferenceCount() noexcept
{
++mReferenceCount;
}
inline void decreaseReferenceCount() noexcept
{
if (--mReferenceCount == 0)
{
delete this;
}
}
[[nodiscard]] inline std::uint32_t getReferenceCount() const noexcept { return mReferenceCount; }
template<typename TObject>
friend class ObjectPtr;
};
template<typename TConcrete, typename TBase = BaseObject, typename TOwner = TBase::owner_t>
class Object : public TBase
{
public:
using super_t = Object<TConcrete, TBase, TOwner>;
using owner_t = TOwner;
protected:
template<typename... TArgs>
explicit Object(TArgs&&... args) : TBase(std::forward<TArgs>(args)...) {}
public:
[[nodiscard]] TOwner* getOwner() const noexcept { return static_cast<TOwner*>(TBase::mOwner.getRaw()); }
[[nodiscard]] ObjectPtr<TConcrete> getPointer(bool skipCheck = false) noexcept { return static_cast<ObjectPtr<TConcrete>>(BaseObject::getPointer(skipCheck)); }
[[nodiscard]] WeakObjectPtr<TConcrete> getWeakPointer() noexcept { return static_cast<WeakObjectPtr<TConcrete>>(BaseObject::getWeakPointer()); }
inline ObjectPtr<TConcrete> makeUnique()
{
if (TBase::getReferenceCount() == 1)
{
return getPointer();
}
else
{
return static_cast<TConcrete*>(this)->clone();
}
}
template<typename TObject, typename... TArgs>
ObjectPtr<TObject> createChild(TArgs&&... args)
{
ObjectPtr<TObject> child = TObject::create(static_cast<TConcrete*>(this)->getPointer(), std::forward<TArgs>(args)...);
#if !defined(KAZAN_RELEASE)
// mChildren.push_back(&*child);
#endif
return child;
}
public:
template<typename... TArgs>
static ObjectPtr<TConcrete> create(TArgs&&... args) noexcept;
};
template<typename TConcrete, typename TBase = BaseObject, typename TOwner = TBase::owner_t>
class AbstractObject : public TBase
{
public:
using super_t = AbstractObject<TConcrete, TBase, TOwner>;
using owner_t = TOwner;
protected:
template<typename... TArgs>
explicit AbstractObject(TArgs&&... args) noexcept : TBase(std::forward<TArgs>(args)...) {}
public:
[[nodiscard]] TOwner* getOwner() const noexcept { return static_cast<TOwner*>(TBase::mOwner.getRaw()); }
[[nodiscard]] ObjectPtr<TConcrete> getPointer(bool skipCheck = false) noexcept { return static_cast<ObjectPtr<TConcrete>>(BaseObject::getPointer(skipCheck)); }
[[nodiscard]] WeakObjectPtr<TConcrete> getWeakPointer() noexcept { return static_cast<WeakObjectPtr<TConcrete>>(BaseObject::getWeakPointer()); }
};
template<typename TObject>
class WeakObjectPtr;
template<typename TObject>
class WeakObjectPtr
{
private:
object_id_t mId = 0;
public:
/** default construction */
WeakObjectPtr() = default;
/** copy */
WeakObjectPtr(const WeakObjectPtr&) = default;
/** implicit conversion */
template<typename TOtherObject> requires(std::is_base_of_v<TObject, TOtherObject>)
WeakObjectPtr(const WeakObjectPtr<TOtherObject>& otherPtr) noexcept : mId(otherPtr.mId) {}
/** explicit conversion */
template<typename TOtherObject> requires(!std::is_same_v<TObject, TOtherObject> && std::is_base_of_v<TOtherObject, TObject>)
explicit WeakObjectPtr(const WeakObjectPtr<TOtherObject>& otherPtr) noexcept : mId(otherPtr.mId) {}
/** construction from a regular ObjectPtr */
template<typename TOtherObject> requires(std::is_base_of_v<TObject, TOtherObject>)
WeakObjectPtr(const ObjectPtr<TOtherObject>& otherPtr) noexcept : mId(otherPtr ? otherPtr->getId() : 0) {}
/** construction directly from the object */
template<typename TOtherObject> requires(std::is_base_of_v<TObject, TOtherObject>)
WeakObjectPtr(const TOtherObject& object) noexcept : mId(object.getId()) {}
WeakObjectPtr& operator=(const WeakObjectPtr&) noexcept = default;
template<typename TOtherObject> requires(std::is_base_of_v<TObject, TOtherObject>)
WeakObjectPtr& operator=(const WeakObjectPtr<TOtherObject>& otherPtr) noexcept
{
mId = otherPtr.mId;
return *this;
}
WeakObjectPtr& operator=(std::nullptr_t) noexcept
{
mId = 0;
return *this;
}
[[nodiscard]] bool operator!() const noexcept { return mId == 0; }
[[nodiscard]] auto operator<=>(const WeakObjectPtr&) const noexcept = default;
[[nodiscard]] bool operator==(std::nullptr_t) const noexcept { return mId == 0; }
[[nodiscard]] bool operator!=(std::nullptr_t) const noexcept { return mId != 0; }
[[nodiscard]] operator bool() const noexcept { return mId != 0; }
#define MAKE_OPERATOR(op) \
template<typename TOtherObject> requires (std::is_base_of_v<TObject, TOtherObject> || std::is_base_of_v<TOtherObject, TObject>) \
auto operator op (const ObjectPtr<TOtherObject>& otherPtr) const noexcept \
{ \
return mId op otherPtr.mId; \
}
MAKE_OPERATOR(<=>)
MAKE_OPERATOR(==)
MAKE_OPERATOR(!=)
MAKE_OPERATOR(<)
MAKE_OPERATOR(<=)
MAKE_OPERATOR(>)
MAKE_OPERATOR(>=)
#undef MAKE_OPERATOR
[[nodiscard]] object_id_t getId() const noexcept { return mId; }
[[nodiscard]] ObjectPtr<TObject> pin() const noexcept
{
return static_cast<ObjectPtr<TObject>>(impl::getRegisteredObject(mId));
}
template<typename TOtherObject>
friend class WeakObjectPtr;
};
ObjectPtr<BaseObject> BaseObject::getPointer([[maybe_unused]] bool skipCheck) noexcept
{
// MIJIN_ASSERT(skipCheck || canCreatePointer(), "Cannot create an object pointer for an object that has not been created using ::create()!");
if (!skipCheck)
{
increaseReferenceCount();
}
ObjectPtr<BaseObject> ptr;
ptr.mObject = this;
IWA_TRACK_OBJECTPTR_NEW();
return ptr;
}
WeakObjectPtr<BaseObject> BaseObject::getWeakPointer() noexcept
{
return WeakObjectPtr<BaseObject>(*this);
}
template<typename TObject>
WeakObjectPtr<TObject> ObjectPtr<TObject>::makeWeak() const noexcept
{
return WeakObjectPtr<TObject>(*this);
}
template<typename TObject>
template<typename TOtherObject> requires(std::is_base_of_v<TObject, TOtherObject>)
ObjectPtr<TOtherObject> ObjectPtr<TObject>::dynamicCast() const noexcept
{
if (dynamic_cast<const TOtherObject*>(mObject)) {
return ObjectPtr<TOtherObject>(*this);
}
return nullptr;
}
template<typename TConcrete, typename TBase, typename TOwner>
template<typename... TArgs>
ObjectPtr<TConcrete> Object<TConcrete, TBase, TOwner>::create(TArgs&&... args) noexcept
{
return (new TConcrete(std::forward<TArgs>(args)...))->getPointer(/* skipCheck = */ true);
}
void registerObjectDestructionHandler(const BaseObject& object, object_destruction_handler_t handler) noexcept;
} // namespace iwa
template<typename TObject>
struct std::hash<iwa::ObjectPtr<TObject>> // NOLINT false positive
{
std::size_t operator()(const iwa::ObjectPtr<TObject>& ptr) const noexcept
{
return std::hash<void*>()(ptr.getRaw());
}
};
template<typename TObject>
struct std::hash<iwa::WeakObjectPtr<TObject>> // NOLINT false positive
{
std::size_t operator()(const iwa::WeakObjectPtr<TObject>& ptr) const noexcept
{
return std::hash<iwa::object_id_t>()(ptr.getId());
}
};
#undef IWA_DECLARE_OBJECTPTR_TRACKING
#undef IWA_TRACK_OBJECTPTR
#undef IWA_TRACK_OBJECTPTR_NEW
#undef IWA_UNTRACK_OBJECTPTR
#undef IWA_MOVE_CONSTRUCT_OBJECTPTR_HANDLE
#undef IWA_MOVE_ASSIGN_OBJECTPTR_HANDLE
#undef IWA_UPDATE_OBJECTPTR_HANDLE
#endif // IWA_OBJECT_HPP_INCLUDED

136
include/iwa/pipeline.hpp Normal file
View File

@ -0,0 +1,136 @@
#pragma once
#if !defined(IWA_PIPELINE_HPP_INCLUDED)
#define IWA_PIPELINE_HPP_INCLUDED
#include <optional>
#include "iwa/descriptor_set.hpp"
#include "iwa/object.hpp"
#include "iwa/render_pass.hpp"
#include "iwa/shader_module.hpp"
#include "iwa/vkwrapper.hpp"
namespace iwa
{
inline constexpr vk::PipelineColorBlendAttachmentState DEFAULT_BLEND_ATTACHMENT =
{
.blendEnable = VK_TRUE,
.srcColorBlendFactor = vk::BlendFactor::eSrcAlpha,
.dstColorBlendFactor = vk::BlendFactor::eOneMinusSrcAlpha,
.colorBlendOp = vk::BlendOp::eAdd,
.srcAlphaBlendFactor = vk::BlendFactor::eSrcAlpha,
.dstAlphaBlendFactor = vk::BlendFactor::eDstAlpha,
.alphaBlendOp = vk::BlendOp::eAdd,
.colorWriteMask = vk::ColorComponentFlagBits::eR | vk::ColorComponentFlagBits::eG
| vk::ColorComponentFlagBits::eB | vk::ColorComponentFlagBits::eA
};
inline constexpr vk::PipelineColorBlendAttachmentState DISABLED_BLEND_ATTACHMENT =
{
.blendEnable = VK_FALSE,
.colorWriteMask = vk::ColorComponentFlagBits::eR | vk::ColorComponentFlagBits::eG
| vk::ColorComponentFlagBits::eB | vk::ColorComponentFlagBits::eA
};
struct PipelineLayoutCreationArgs
{
std::vector<ObjectPtr<DescriptorSetLayout>> setLayouts;
std::vector<vk::PushConstantRange> pushConstantRanges;
vk::PipelineLayoutCreateFlags flags = {};
};
class PipelineLayout : public Object<PipelineLayout, BaseObject, class Device>, public MixinVulkanObject<vk::PipelineLayout>
{
public:
PipelineLayout(ObjectPtr<class Device> owner, const PipelineLayoutCreationArgs& args);
~PipelineLayout() noexcept override;
};
class Pipeline : public AbstractObject<Pipeline, BaseObject, class Device>, public MixinVulkanObject<vk::Pipeline>
{
protected:
explicit Pipeline(ObjectPtr<class Device> owner) noexcept;
public:
~Pipeline() noexcept override;
};
struct PipelineStage
{
ObjectPtr<ShaderModule> shader;
vk::ShaderStageFlagBits stage;
std::string name = "main";
};
struct GraphicsPipelineRenderingInfo
{
std::uint32_t viewMask = 0;
std::vector<vk::Format> colorAttachmentFormats;
vk::Format depthFormat = vk::Format::eUndefined;
vk::Format stencilFormat = vk::Format::eUndefined;
};
struct VertexInput
{
std::vector<vk::VertexInputBindingDescription> bindings;
std::vector<vk::VertexInputAttributeDescription> attributes;
};
struct GraphicsPipelineCreationArgs
{
std::vector<PipelineStage> stages;
VertexInput vertexInput;
vk::PipelineInputAssemblyStateCreateInfo inputAssembly = { .topology = vk::PrimitiveTopology::eTriangleList };
vk::PipelineViewportStateCreateInfo viewport = { .viewportCount = 1, .scissorCount = 1 };
vk::PipelineRasterizationStateCreateInfo rasterization =
{ .rasterizerDiscardEnable = VK_FALSE, .polygonMode = vk::PolygonMode::eFill, .cullMode = vk::CullModeFlagBits::eBack,
.frontFace = vk::FrontFace::eCounterClockwise, .depthBiasEnable = VK_FALSE, .lineWidth = 1.f };
vk::PipelineMultisampleStateCreateInfo multisample =
{ .rasterizationSamples = vk::SampleCountFlagBits::e1, .sampleShadingEnable = VK_FALSE, .minSampleShading = 0.2f,
.pSampleMask = nullptr, .alphaToCoverageEnable = VK_FALSE, .alphaToOneEnable = VK_FALSE };
vk::PipelineDepthStencilStateCreateInfo depthStencil =
{ .depthTestEnable = false, .depthWriteEnable = false, .depthCompareOp = vk::CompareOp::eLess,
.depthBoundsTestEnable = VK_FALSE, .minDepthBounds = 0.0, .maxDepthBounds = 1.0 };
struct
{
std::optional<vk::LogicOp> logicOp;
std::vector<vk::PipelineColorBlendAttachmentState> attachements;
} colorBlend;
std::vector<vk::DynamicState> dynamicState = {vk::DynamicState::eViewport, vk::DynamicState::eScissor};
std::optional<GraphicsPipelineRenderingInfo> renderingInfo;
ObjectPtr<PipelineLayout> layout;
ObjectPtr<RenderPass> renderPass;
std::uint32_t subpass = 0;
};
class GraphicsPipeline : public Object<GraphicsPipeline, Pipeline>
{
public:
GraphicsPipeline(ObjectPtr<class Device> owner, const GraphicsPipelineCreationArgs& args) noexcept;
};
struct ComputePipelineCreationArgs
{
PipelineStage stage;
ObjectPtr<PipelineLayout> layout;
};
class ComputePipeline : public Object<ComputePipeline, Pipeline>
{
public:
ComputePipeline(ObjectPtr<class Device> owner, const ComputePipelineCreationArgs& args) noexcept;
};
struct RayTracingPipelineCreationArgs
{
};
class RayTracingPipeline : public Object<RayTracingPipeline, Pipeline>
{
public:
RayTracingPipeline(ObjectPtr<class Device> owner, const RayTracingPipelineCreationArgs& args) noexcept;
};
} // namespace iwa
#endif // !defined(IWA_PIPELINE_HPP_INCLUDED)

View File

@ -0,0 +1,59 @@
#pragma once
#if !defined(IWA_RENDER_PASS_HPP_INCLUDED)
#define IWA_RENDER_PASS_HPP_INCLUDED
#include <optional>
#include "iwa/object.hpp"
#include "iwa/vkwrapper.hpp"
namespace iwa
{
struct SubpassDescription
{
vk::SubpassDescriptionFlags flags = {};
vk::PipelineBindPoint pipelineBindPoint;
std::vector<vk::AttachmentReference> inputAttachments;
std::vector<vk::AttachmentReference> colorAttachments;
std::vector<vk::AttachmentReference> resolveAttachments;
std::optional<vk::AttachmentReference> depthStencilAttachment;
std::vector<std::uint32_t> preserveAttachments;
};
struct RenderPassCreationArgs
{
vk::RenderPassCreateFlags flags = {};
std::vector<vk::AttachmentDescription> attachments;
std::vector<SubpassDescription> subpasses;
std::vector<vk::SubpassDependency> dependencies;
};
class RenderPass : public Object<RenderPass, BaseObject, class Device>, public MixinVulkanObject<vk::RenderPass>
{
public:
RenderPass(ObjectPtr<class Device> owner, const RenderPassCreationArgs& args);
~RenderPass() noexcept override;
};
struct FramebufferCreationArgs
{
vk::FramebufferCreateFlags flags = {};
ObjectPtr<RenderPass> renderPass;
std::vector<ObjectPtr<class ImageView>> attachments;
std::uint32_t width = 0;
std::uint32_t height = 0;
std::uint32_t layers = 1;
};
class Framebuffer : public Object<Framebuffer, BaseObject, class Device>, public MixinVulkanObject<vk::Framebuffer>
{
private:
std::vector<ObjectPtr<class ImageView>> mImageViews;
public:
Framebuffer(ObjectPtr<class Device> owner, const FramebufferCreationArgs& args);
~Framebuffer() noexcept override;
};
} // namespace iwa
#endif // !defined(IWA_RENDER_PASS_HPP_INCLUDED)

View File

@ -0,0 +1,73 @@
#pragma once
#if !defined(IWA_RESOURCE_BITMAP_HPP_INCLUDED)
#define IWA_RESOURCE_BITMAP_HPP_INCLUDED
#include <memory>
#include <span>
#include <vector>
#include <glm/vec4.hpp>
#include <mijin/container/optional.hpp>
#include <mijin/container/typeless_buffer.hpp>
#include "../object.hpp"
#include "../vkwrapper.hpp"
namespace iwa
{
enum class ColorChannel
{
R = 0,
G = 1,
B = 2,
A = 3
};
struct ChannelMapping
{
ColorChannel from;
ColorChannel to;
};
struct BitmapCreationArgs
{
vk::Format format;
vk::Extent2D size;
mijin::Optional<mijin::TypelessBuffer> initialData;
};
class Bitmap : public Object<Bitmap>
{
private:
mijin::TypelessBuffer mData;
vk::Format mFormat;
vk::Extent2D mSize;
std::unique_ptr<class BitmapViewBase> mView;
public:
explicit Bitmap(BitmapCreationArgs args, ObjectPtr<BaseObject> owner = nullptr);
~Bitmap() override;
// properties
inline vk::Format getFormat() const { return mFormat; }
inline vk::Extent2D getSize() const { return mSize; }
inline std::span<std::uint8_t> getData() { return mData.makeSpan<std::uint8_t>(); }
inline std::span<const std::uint8_t> getData() const { return mData.makeSpan<const std::uint8_t>(); }
// access
// TODO: maybe add accessors for whole rows or the whole image?
[[nodiscard]] glm::vec4 getPixel(unsigned x, unsigned y) const;
[[nodiscard]] std::vector<glm::vec4> getPixels(unsigned x, unsigned y, unsigned width, unsigned height) const;
[[nodiscard]] inline std::vector<glm::vec4> getAllPixels() const {
return getPixels(0, 0, mSize.width, mSize.height);
}
// drawing
void fill(const glm::vec4& color);
void copyChannels(const Bitmap& other, const std::vector<ChannelMapping>& mappings);
void multiply(const glm::vec4& color);
private:
void createView();
};
} // namespace iwa
#endif // !defined(IWA_RESOURCE_BITMAP_HPP_INCLUDED)

View File

@ -0,0 +1,55 @@
#pragma once
#if !defined(IWA_RESOURCE_FONT_HPP_INCLUDED)
#define IWA_RESOURCE_FONT_HPP_INCLUDED
#include <unordered_map>
#include <glm/vec2.hpp>
#include "iwa/object.hpp"
#include "iwa/resource/bitmap.hpp"
namespace iwa
{
struct GlyphInfo
{
glm::vec2 uvPos0;
glm::vec2 uvPos1;
float xOffsetBefore;
float xOffsetAfter;
float yOffsetBefore;
float yOffsetAfter;
float xAdvance;
};
struct FontMetrics
{
float ascent;
float descent;
float lineGap;
float sizeFactor;
};
struct FontCreationArgs
{
ObjectPtr<Bitmap> bitmap;
std::unordered_map<char32_t, GlyphInfo> glyphMap;
FontMetrics metrics;
};
class Font : public Object<Font>
{
private:
ObjectPtr<Bitmap> mBitmap;
std::unordered_map<char32_t, GlyphInfo> mGlyphMap;
FontMetrics mMetrics;
public:
explicit Font(FontCreationArgs args);
[[nodiscard]] const ObjectPtr<Bitmap>& getBitmap() const noexcept { return mBitmap; }
[[nodiscard]] const std::unordered_map<char32_t, GlyphInfo>& getGlyphMap() const noexcept { return mGlyphMap; }
[[nodiscard]] const FontMetrics& getMetrics() const noexcept { return mMetrics; }
};
} // namespace iwa
#endif // !defined(IWA_RESOURCE_FONT_HPP_INCLUDED)

25
include/iwa/semaphore.hpp Normal file
View File

@ -0,0 +1,25 @@
#pragma once
#if !defined(IWA_SEMAPHORE_HPP_INCLUDED)
#define IWA_SEMAPHORE_HPP_INCLUDED
#include "iwa/object.hpp"
#include "iwa/vkwrapper.hpp"
namespace iwa
{
struct SemaphoreCreationArgs
{
vk::SemaphoreCreateFlags flags = {};
};
class Semaphore : public Object<Semaphore, BaseObject, class Device>, public MixinVulkanObject<vk::Semaphore>
{
public:
Semaphore(ObjectPtr<class Device> owner, const SemaphoreCreationArgs& args = {});
~Semaphore() noexcept override;
};
}
#endif // !defined(IWA_SEMAPHORE_HPP_INCLUDED)

View File

@ -0,0 +1,27 @@
#pragma once
#if !defined(IWA_SHADER_MODULE_HPP_INCLUDED)
#define IWA_SHADER_MODULE_HPP_INCLUDED
#include <span>
#include <mijin/container/typeless_buffer.hpp>
#include "iwa/object.hpp"
#include "iwa/vkwrapper.hpp"
namespace iwa
{
struct ShaderModuleCreationArgs
{
std::span<const std::uint32_t> code;
};
class ShaderModule : public Object<ShaderModule, BaseObject, class Device>, public MixinVulkanObject<vk::ShaderModule>
{
public:
ShaderModule(ObjectPtr<class Device> owner, const ShaderModuleCreationArgs& args);
~ShaderModule() noexcept override;
};
} // namespace iwa
#endif // !defined(IWA_SHADER_MODULE_HPP_INCLUDED)

68
include/iwa/swapchain.hpp Normal file
View File

@ -0,0 +1,68 @@
#pragma once
#ifndef IWA_SWAPCHAIN_HPP_INCLUDED
#define IWA_SWAPCHAIN_HPP_INCLUDED
#include <vector>
#include <mijin/async/signal.hpp>
#include "iwa/image.hpp"
#include "iwa/object.hpp"
#include "iwa/semaphore.hpp"
#include "iwa/vkwrapper.hpp"
#include "iwa/window.hpp"
namespace iwa
{
struct SwapchainCreationArgs
{
ObjectPtr<Window> window;
std::uint32_t parallelFrames = 3;
vk::ImageUsageFlags imageUsage = vk::ImageUsageFlagBits::eColorAttachment;
};
struct PresentArgs
{
vk::Queue queue;
std::vector<vk::Semaphore> waitSemaphores;
};
class Swapchain : public Object<Swapchain, BaseObject, class Device>, public MixinVulkanObject<vk::SwapchainKHR>
{
private:
static inline std::uint32_t INVALID_IMAGE_INDEX = std::numeric_limits<std::uint32_t>::max();
std::uint32_t mCurrentImageIdx = INVALID_IMAGE_INDEX;
unsigned mCurrentFrameIdx = 0;
ObjectPtr<Window> mWindow;
std::vector<ObjectPtr<Image>> mImages;
std::vector<ObjectPtr<Semaphore>> mImageAvailableSemaphores;
const vk::ImageUsageFlags mImageUsage;
vk::Format mFormat = vk::Format::eUndefined;
vk::Extent2D mExtent;
public:
Swapchain(ObjectPtr<class Device> owner, SwapchainCreationArgs args);
~Swapchain() noexcept override;
[[nodiscard]] const ObjectPtr<Window>& getWindow() const noexcept { return mWindow; }
[[nodiscard]] std::size_t getNumParallelFrames() const noexcept { return mImageAvailableSemaphores.size(); }
[[nodiscard]] const ObjectPtr<Semaphore>& getCurrentAvailableSemaphore() const noexcept { return mImageAvailableSemaphores[mCurrentFrameIdx]; }
[[nodiscard]] unsigned getCurrentFrameIdx() const noexcept { return mCurrentFrameIdx; }
[[nodiscard]] const std::vector<ObjectPtr<Image>>& getImages() const noexcept { return mImages; }
[[nodiscard]] std::uint32_t getCurrentImageIdx() const noexcept { return mCurrentImageIdx; }
[[nodiscard]] const ObjectPtr<Image>& getCurrentImage() const noexcept { return mImages[mCurrentImageIdx]; }
[[nodiscard]] vk::Format getFormat() const noexcept { return mFormat; }
[[nodiscard]] const vk::Extent2D& getExtent() const noexcept { return mExtent; }
mijin::Task<> c_present(const PresentArgs& args);
private:
void recreate();
void acquireImage();
mijin::Task<> c_acquireImage();
public: // signals
mijin::Signal<> recreated;
};
}
#endif // IWA_SWAPCHAIN_HPP_INCLUDED

72
include/iwa/texture.hpp Normal file
View File

@ -0,0 +1,72 @@
#pragma once
#if !defined(IWA_TEXTURE_HPP_INCLUDED)
#define IWA_TEXTURE_HPP_INCLUDED
#include <glm/vec4.hpp>
#include <glm/gtx/hash.hpp>
#include <glm/gtx/spaceship.hpp>
#include <mijin/async/coroutine.hpp>
#include <mijin/util/hash.hpp>
#include "iwa/image.hpp"
namespace iwa
{
struct TextureCreationArgs
{
ObjectPtr<Image> image;
ImageViewCreationArgs imageViewArgs = {};
SamplerCreationArgs samplerArgs = {};
};
struct SingleColorTextureArgs
{
glm::vec4 color;
vk::Format format = vk::Format::eR8G8B8A8Unorm;
auto operator <=>(const SingleColorTextureArgs&) const noexcept = default;
};
struct TextureFromBitmapArgs
{
const Bitmap& bitmap;
ImageCreationArgs imageArgs = {};
ImageViewCreationArgs imageViewArgs = {};
SamplerCreationArgs samplerArgs = {};
};
class Texture : public Object<Texture>
{
private:
ObjectPtr<Image> mImage;
ObjectPtr<ImageView> mImageView;
ObjectPtr<Sampler> mSampler;
public:
explicit Texture(TextureCreationArgs args);
[[nodiscard]] const ObjectPtr<Image>& getImage() const noexcept { return mImage; }
[[nodiscard]] const ObjectPtr<ImageView>& getImageView() const noexcept { return mImageView; }
[[nodiscard]] const ObjectPtr<Sampler>& getSampler() const noexcept { return mSampler; }
static mijin::Task<ObjectPtr<Texture>> c_createSingleColor(const ObjectPtr<Device>& device, const SingleColorTextureArgs& args);
static mijin::Task<ObjectPtr<Texture>> c_createFromBitmap(const ObjectPtr<Device>& device, const TextureFromBitmapArgs& args);
};
} // namespace iwa
namespace std
{
template<>
struct hash<iwa::SingleColorTextureArgs>
{
std::size_t operator()(const iwa::SingleColorTextureArgs& args) const noexcept
{
std::size_t hash = 0;
mijin::hashCombine(hash, args.color);
mijin::hashCombine(hash, args.format);
return hash;
}
};
}
#endif // !defined(IWA_TEXTURE_HPP_INCLUDED)

View File

@ -0,0 +1,47 @@
#pragma once
#if !defined(IWA_UTIL_COLOR_HPP_INCLUDED)
#define IWA_UTIL_COLOR_HPP_INCLUDED
#include <cmath>
#include <glm/vec3.hpp>
#include <glm/vec4.hpp>
namespace iwa
{
[[nodiscard]] inline float linearToSrgb(float value) noexcept
{
if (value < 0.f) {
return 0.f;
}
if (value < 0.04045f) {
return value / 12.92f;
}
if (value < 1.f) {
return std::pow((value + 0.055f) / 1.055f, 2.4f);
}
return 1.f;
}
[[nodiscard]] inline glm::vec3 linearToSrgb(const glm::vec3& vector) noexcept
{
return glm::vec3(
linearToSrgb(vector[0]),
linearToSrgb(vector[1]),
linearToSrgb(vector[2])
);
}
[[nodiscard]] inline glm::vec4 linearToSrgb(const glm::vec4& vector) noexcept
{
return glm::vec4(
linearToSrgb(vector[0]),
linearToSrgb(vector[1]),
linearToSrgb(vector[2]),
vector.a
);
}
} // namespace iwa
#endif // !defined(IWA_UTIL_COLOR_HPP_INCLUDED)

View File

@ -0,0 +1,116 @@
// copied from Glslang source and adjusted just a bit
#pragma once
#if !defined(KAZAN_DIR_STACK_FILE_INCLUDER_HPP_INCLUDED)
#define KAZAN_DIR_STACK_FILE_INCLUDER_HPP_INCLUDED
#include <algorithm>
#include <fstream>
#include <unordered_set>
#include <glslang/Public/ShaderLang.h>
namespace iwa::impl
{
class DirStackFileIncluder : public glslang::TShader::Includer
{
protected:
using tUserDataElement = char;
std::vector<std::string> directoryStack;
int externalLocalDirectoryCount = 0;
std::unordered_set<std::string> includedFiles;
public:
IncludeResult* includeLocal(const char* headerName, const char* includerName, size_t inclusionDepth) override;
IncludeResult* includeSystem(const char* headerName, const char* /*includerName*/, size_t /*inclusionDepth*/) override;
// Externally set directories. E.g., from a command-line -I<dir>.
// - Most-recently pushed are checked first.
// - All these are checked after the parse-time stack of local directories
// is checked.
// - This only applies to the "local" form of #include.
// - Makes its own copy of the path.
virtual void pushExternalLocalDirectory(const std::string& dir)
{
directoryStack.push_back(dir);
externalLocalDirectoryCount = (int)directoryStack.size();
}
void releaseInclude(IncludeResult* result) override
{
if (result != nullptr)
{
delete [] static_cast<tUserDataElement*>(result->userData);
delete result;
}
}
virtual std::unordered_set<std::string> getIncludedFiles() noexcept
{
return includedFiles;
}
protected:
// Search for a valid "local" path based on combining the stack of include
// directories and the nominal name of the header.
virtual IncludeResult* readLocalPath(const char* headerName, const char* includerName, int depth)
{
// Discard popped include directories, and
// initialize when at parse-time first level.
directoryStack.resize(depth + externalLocalDirectoryCount);
if (depth == 1)
directoryStack.back() = getDirectory(includerName);
// Find a directory that works, using a reverse search of the include stack.
for (auto it = directoryStack.rbegin(); it != directoryStack.rend(); ++it) {
std::string path = *it + '/' + headerName;
std::replace(path.begin(), path.end(), '\\', '/');
std::ifstream file(path, std::ios_base::binary | std::ios_base::ate);
if (file) {
directoryStack.push_back(getDirectory(path));
includedFiles.insert(path);
return newIncludeResult(path, file, (int)file.tellg());
}
}
return nullptr;
}
// Search for a valid <system> path.
// Not implemented yet; returning nullptr signals failure to find.
virtual IncludeResult* readSystemPath(const char* /*headerName*/) const
{
return nullptr;
}
// Do actual reading of the file, filling in a new include result.
virtual IncludeResult* newIncludeResult(const std::string& path, std::ifstream& file, int length) const
{
char* content = new tUserDataElement [length];
file.seekg(0, file.beg);
file.read(content, length);
return new IncludeResult(path, content, length, content);
}
// If no path markers, return current working directory.
// Otherwise, strip file name and return path leading up to it.
virtual std::string getDirectory(const std::string& path) const
{
const std::size_t last = path.find_last_of("/\\");
return last == std::string::npos ? "." : path.substr(0, last);
}
};
glslang::TShader::Includer::IncludeResult*
DirStackFileIncluder::includeLocal(const char* headerName, const char* includerName, size_t inclusionDepth)
{
return readLocalPath(headerName, includerName, (int)inclusionDepth);
}
glslang::TShader::Includer::IncludeResult*
DirStackFileIncluder::includeSystem(const char* headerName, const char*, size_t)
{
return readSystemPath(headerName);
}
} // namespace iwa::impl
#endif // !defined(KAZAN_DIR_STACK_FILE_INCLUDER_HPP_INCLUDED)

View File

@ -0,0 +1,49 @@
#pragma once
#if !defined(IWA_UTIL_FPS_CALCULATOR_HPP_INCLUDED)
#define IWA_UTIL_FPS_CALCULATOR_HPP_INCLUDED
#include <array>
#include <chrono>
namespace iwa
{
template<unsigned NUM_VALUES = 20>
class FpsCalculator
{
private:
using clock_t = std::chrono::high_resolution_clock;
using doubledur_t = std::chrono::duration<double>;
clock_t::time_point mLastFrame = clock_t::now();
std::array<double, NUM_VALUES> mLatestValues = {};
unsigned mNextValueIndex = 0;
public:
inline void tickFrame() noexcept;
[[nodiscard]] inline double getFps() const noexcept;
};
template<unsigned NUM_VALUES>
void FpsCalculator<NUM_VALUES>::tickFrame() noexcept
{
const clock_t::time_point currentFrame = clock_t::now();
const clock_t::duration difference = currentFrame - mLastFrame;
mLastFrame = currentFrame;
mLatestValues[mNextValueIndex] = 1.0 / std::chrono::duration_cast<doubledur_t>(difference).count();
mNextValueIndex = (mNextValueIndex + 1) % NUM_VALUES;
}
template<unsigned NUM_VALUES>
double FpsCalculator<NUM_VALUES>::getFps() const noexcept
{
double sum = 0;
for (const double fps : mLatestValues)
{
sum += fps;
}
return sum / NUM_VALUES;
}
} // namespace iwa
#endif // !defined(IWA_UTIL_FPS_CALCULATOR_HPP_INCLUDED)

View File

@ -0,0 +1,152 @@
#pragma once
#if !defined(IWA_UTIL_GLSL_COMPILER_HPP_INCLUDED)
#define IWA_UTIL_GLSL_COMPILER_HPP_INCLUDED
#include <memory>
#include <string>
#include <mijin/container/optional.hpp>
#include <mijin/util/bitflags.hpp>
#include <mijin/virtual_filesystem/filesystem.hpp>
#include "iwa/instance.hpp"
#include "iwa/object.hpp"
#include "iwa/pipeline.hpp"
#include "iwa/shader_module.hpp"
#include "iwa/vkwrapper.hpp"
#include "iwa/util/shader_meta.hpp"
namespace glslang
{
class TShader;
class TProgram;
}
namespace YAML
{
class Node;
}
namespace iwa
{
struct ShaderSource
{
std::string code;
std::string fileName = {}; // also required for parsing (e.g. relative includes)
#if !defined(KAZAN_RELEASE)
std::string name = {}; // only for debug information!
#endif
[[nodiscard]] static ShaderSource fromStream(mijin::Stream& stream, std::string fileName = {});
[[nodiscard]] static ShaderSource fromFile(const mijin::PathReference& file);
[[nodiscard]] static ShaderSource fromYaml(const YAML::Node& node, const mijin::PathReference& yamlFile);
// static ShaderSource fromFile(std::string fileName, std::string name = "");
};
class GLSLCompilerSettings : public Object<GLSLCompilerSettings, InstanceExtension>
{
private:
std::string mCommonPreamble;
public:
[[nodiscard]] const std::string& getCommonPreamble() const noexcept { return mCommonPreamble; }
void setCommonPreamble(std::string preamble) noexcept { mCommonPreamble = std::move(preamble); }
};
struct GLSLShaderCreationArgs
{
std::vector<ShaderSource> sources;
std::vector<std::string> defines;
vk::ShaderStageFlagBits type;
};
struct GLSLShaderLinkFlags : mijin::BitFlags<GLSLShaderLinkFlags>
{
#if !defined(KAZAN_RELEASE)
std::uint8_t withDebugInfo : 1 = 1;
#else
std::uint8_t withDebugInfo : 1 = 0;
#endif
};
class GLSLShader : public Object<GLSLShader, BaseObject, class Instance>
{
private:
std::unique_ptr<glslang::TShader> mHandle;
vk::ShaderStageFlagBits mType;
std::vector<ShaderSource> mSources;
std::vector<std::string> mDefines;
public:
GLSLShader(ObjectPtr<class Instance> owner, GLSLShaderCreationArgs args) noexcept;
~GLSLShader() noexcept override;
[[nodiscard]] std::unique_ptr<glslang::TShader> releaseHandle();
[[nodiscard]] vk::ShaderStageFlagBits getType() const noexcept { return mType; }
[[nodiscard]] ShaderMeta getPartialMeta();
private:
void compile();
};
struct GLSLSemanticMapping
{
unsigned semantic;
unsigned semanticIdx = 0;
int newSet = -1;
int newBinding = -1;
};
struct GLSLShaderProgramCreationArgs
{
std::vector<ObjectPtr<GLSLShader>> shaders;
GLSLShaderLinkFlags linkFlags;
std::vector<GLSLSemanticMapping> semanticMappings;
};
struct PrepareGraphicsPipelineArgs
{
const struct VertexLayout& vertexLayout;
GeneratePipelineLayoutArgs layoutArgs;
PipelineLayoutMeta pipelineLayoutMeta;
PipelineAndDescriptorSetLayouts layouts;
};
struct PrepareComputePipelineArgs
{
GeneratePipelineLayoutArgs layoutArgs;
PipelineLayoutMeta pipelineLayoutMeta;
PipelineAndDescriptorSetLayouts layouts;
};
class GLSLShaderProgram : public Object<GLSLShaderProgram, BaseObject, class Device>
{
private:
std::vector<std::unique_ptr<glslang::TShader>> mShaderHandles; // must keep the TShaders alive
std::unique_ptr<glslang::TProgram> mHandle;
ShaderMeta mMeta;
GLSLShaderLinkFlags mLinkFlags;
public:
GLSLShaderProgram(ObjectPtr<class Device> owner, GLSLShaderProgramCreationArgs args);
[[nodiscard]] glslang::TProgram* getHandle() const noexcept { return mHandle.get(); }
[[nodiscard]] const ShaderMeta& getMeta() const noexcept { return mMeta; }
[[nodiscard]] std::vector<std::uint32_t> generateSpirv(vk::ShaderStageFlagBits stage) const;
[[nodiscard]] std::vector<PipelineStage> generatePipelineStages() const;
[[nodiscard]] GraphicsPipelineCreationArgs prepareGraphicsPipeline(PrepareGraphicsPipelineArgs& args) const;
[[nodiscard]] ComputePipelineCreationArgs prepareComputePipeline(PrepareComputePipelineArgs& args) const;
};
// struct PrepareGLSLGraphicsPipelineArgs
// {
// std::vector<ObjectPtr<GLSLShader>> shaders;
// };
//
// struct PrepareGLSLGraphicsPipelineResult
// {
// std::vector<PipelineStage> stages;
// ShaderMeta
// };
//
// [[nodiscard]] PrepareGLSLGraphicsPipelineResult prepareGLSLGraphicsPipeline(const PrepareGLSLGraphicsPipelineArgs& args);
} // namespace iwa
#endif // !defined(IWA_UTIL_GLSL_COMPILER_HPP_INCLUDED)

View File

@ -0,0 +1,27 @@
#pragma once
#if !defined(IWA_UTIL_GROWING_DESCRIPTOR_POOL_HPP_INCLUDED)
#define IWA_UTIL_GROWING_DESCRIPTOR_POOL_HPP_INCLUDED
#include "iwa/descriptor_set.hpp"
namespace iwa
{
using GrowingDescriptorPoolCreationArgs = DescriptorPoolCreationArgs;
class GrowingDescriptorPool : public Object<GrowingDescriptorPool, BaseObject, class Device>
{
private:
std::vector<ObjectPtr<DescriptorPool>> mPools;
DescriptorPoolCreationArgs mCreationArgs;
public:
GrowingDescriptorPool(ObjectPtr<class Device> owner, GrowingDescriptorPoolCreationArgs args);
[[nodiscard]] ObjectPtr<class DescriptorSet> allocateDescriptorSet(const DescriptorSetAllocateArgs& args);
[[nodiscard]] bool getCanFree() const noexcept { return (mCreationArgs.flags & vk::DescriptorPoolCreateFlagBits::eFreeDescriptorSet) != vk::DescriptorPoolCreateFlags(); }
};
} // namespace iwa
#endif // !defined(IWA_UTIL_GROWING_DESCRIPTOR_POOL_HPP_INCLUDED)

View File

@ -0,0 +1,106 @@
#pragma once
#if !defined(IWA_UTIL_RENDER_TARGET_HPP_INCLUDED)
#define IWA_UTIL_RENDER_TARGET_HPP_INCLUDED
#include <mijin/container/optional.hpp>
#include "iwa/fence.hpp"
#include "iwa/object.hpp"
#include "iwa/semaphore.hpp"
#include "iwa/swapchain.hpp"
namespace iwa
{
struct ImageReferenceFrame
{
Image* image;
ImageView* imageView;
vk::Offset2D offset = {0, 0};
};
struct ImageReferenceFinalizeArgs
{
const class CommandBuffer& cmdBuffer;
std::vector<vk::Semaphore>& waitSemaphores;
std::vector<vk::Semaphore>& signalSemaphores;
};
class ImageReference : public AbstractObject<ImageReference, BaseObject, class Device>
{
protected:
explicit ImageReference(ObjectPtr<class Device> owner);
public:
[[nodiscard]] virtual vk::Format getFormat() = 0;
[[nodiscard]] virtual vk::Extent2D getExtent() = 0;
virtual ImageReferenceFrame getCurrentFrame() = 0;
virtual void finalize(ImageReferenceFinalizeArgs& args);
virtual mijin::Task<> c_present();
};
struct SwapchainImageReferenceCreationArgs
{
ObjectPtr<Swapchain> swapchain;
};
class SwapchainImageReference : public Object<SwapchainImageReference, ImageReference>
{
private:
std::vector<ObjectPtr<Semaphore>> mPresentReadySemaphores;
std::vector<ObjectPtr<ImageView>> mImageViews;
ObjectPtr<Swapchain> mSwapchain;
public:
SwapchainImageReference(ObjectPtr<class Device> owner, SwapchainImageReferenceCreationArgs args);
[[nodiscard]] vk::Format getFormat() override;
[[nodiscard]] vk::Extent2D getExtent() override;
ImageReferenceFrame getCurrentFrame() override;
void finalize(ImageReferenceFinalizeArgs& args) override;
mijin::Task<> c_present() override;
private:
void createImageViews();
};
struct DirectImageReferenceCreationArgs
{
ObjectPtr<Image> image;
ObjectPtr<ImageView> imageView;
};
class DirectImageReference : public Object<DirectImageReference, ImageReference>
{
protected:
ObjectPtr<Image> mImage;
ObjectPtr<ImageView> mImageView;
public:
DirectImageReference(ObjectPtr<class Device> owner, DirectImageReferenceCreationArgs args);
[[nodiscard]] vk::Format getFormat() override;
[[nodiscard]] vk::Extent2D getExtent() override;
ImageReferenceFrame getCurrentFrame() override;
};
struct AutoResizeImageReferenceCreationArgs
{
ObjectPtr<ImageReference> referenceImageRef;
ImageCreationArgs imageCreationArgs = {};
ImageViewCreationArgs imageViewCreationArgs = {};
};
class AutoResizeImageReference : public Object<AutoResizeImageReference, DirectImageReference>
{
private:
ObjectPtr<ImageReference> mReferenceImageRef;
ImageCreationArgs mImageCreationArgs;
ImageViewCreationArgs mImageViewCreationArgs;
public:
AutoResizeImageReference(ObjectPtr<class Device> owner, AutoResizeImageReferenceCreationArgs args);
[[nodiscard]] vk::Extent2D getExtent() override;
ImageReferenceFrame getCurrentFrame() override;
private:
void createImage();
};
} // namespace iwa
#endif // !defined(IWA_UTIL_RENDER_TARGET_HPP_INCLUDED)

View File

@ -0,0 +1,58 @@
#pragma once
#if !defined(KAZAN_NEXT_CHAIN_HPP_INCLUDED)
#define KAZAN_NEXT_CHAIN_HPP_INCLUDED
#include <cstddef>
#include <cstring>
#include <vector>
#include <mijin/util/align.hpp>
#include "iwa/vkwrapper.hpp"
namespace iwa
{
class NextChain
{
private:
std::vector<std::byte> data;
std::vector<std::size_t> offsets;
public:
template<typename TStruct>
void append(const TStruct& element) noexcept
{
const std::size_t oldSize = mijin::alignUp(data.size(), alignof(TStruct));
data.resize(oldSize + sizeof(TStruct));
std::memcpy(&data[oldSize], &element, sizeof(TStruct));
offsets.push_back(oldSize);
}
void* finalize() noexcept
{
auto itCur = offsets.begin();
if (itCur == offsets.end())
{
return nullptr;
}
auto itNext = std::next(itCur);
while (true)
{
vk::BaseOutStructure& curStruct = *std::bit_cast<vk::BaseOutStructure*>(&data[*itCur]);
if (itNext == offsets.end())
{
curStruct.pNext = nullptr;
break;
}
curStruct.pNext = std::bit_cast<vk::BaseOutStructure*>(&data[*itNext]);
itCur = itNext;
++itNext;
}
// first is always at offset 0
return data.data();
}
};
} // namespace iwa
#endif // !defined(KAZAN_NEXT_CHAIN_HPP_INCLUDED)

View File

@ -0,0 +1,17 @@
#pragma once
#if !defined(IWA_UTIL_REFLECT_GLSL_HPP_INCLUDED)
#define IWA_UTIL_REFLECT_GLSL_HPP_INCLUDED
#include <glslang/Public/ShaderLang.h>
#include "iwa/util/shader_meta.hpp"
namespace iwa
{
[[nodiscard]] ShaderMeta reflectShader(glslang::TShader& shader);
[[nodiscard]] ShaderMeta reflectProgram(glslang::TProgram& program);
[[nodiscard]] ShaderMeta reflectIntermediate(glslang::TIntermediate& intermediate, vk::ShaderStageFlagBits stage);
} // namespace iwa
#endif // !defined(IWA_UTIL_REFLECT_GLSL_HPP_INCLUDED)

View File

@ -0,0 +1,67 @@
#pragma once
#if !defined(IWA_UTIL_RENDER_LOOP_HPP_INCLUDED)
#define IWA_UTIL_RENDER_LOOP_HPP_INCLUDED
#include <unordered_set>
#include <mijin/util/bitflags.hpp>
#include "iwa/device.hpp"
#include "iwa/fence.hpp"
#include "iwa/object.hpp"
#include "iwa/swapchain.hpp"
#include "iwa/vkwrapper.hpp"
#include "iwa/util/image_reference.hpp"
#include "iwa/util/task_runner.hpp"
namespace iwa
{
struct RenderLoopCreationFlags : mijin::BitFlags<RenderLoopCreationFlags>
{
std::uint8_t advanceDeleteQueue : 1 = 1;
};
struct RenderLoopCreationArgs
{
ObjectPtr<CommandPool> commandPool;
RenderLoopCreationFlags flags;
unsigned parallelFrames = 3;
unsigned targetFps = 0;
};
struct RenderLoopRenderArgs
{
const CommandBuffer& cmdBuffer;
const unsigned frameIdx;
std::unordered_set<ObjectPtr<ImageReference>> usedImageReferences;
};
class RenderLoop : public AbstractObject<RenderLoop, BaseObject, class Device>, public MixinTaskRunner<RenderLoop>
{
private:
struct Alternating
{
ObjectPtr<CommandBuffer> commandBuffer;
ObjectPtr<Fence> renderDoneFence;
};
std::vector<Alternating> mAlternating;
unsigned mFrameIdx = 0;
protected:
const bool mAdvanceDeleteQueue;
protected:
explicit RenderLoop(ObjectPtr<class Device> owner, RenderLoopCreationArgs args);
public:
void start() noexcept;
virtual mijin::Task<> c_init();
virtual mijin::Task<> c_render(RenderLoopRenderArgs& args) = 0;
[[nodiscard]] std::size_t getNumParallelFrames() const noexcept { return mAlternating.size(); }
mijin::SimpleTaskLoop& getTaskLoop() const noexcept;
private:
mijin::Task<> c_renderLoop();
};
} // namespace iwa
#endif // !defined(IWA_UTIL_RENDER_LOOP_HPP_INCLUDED)

View File

@ -0,0 +1,683 @@
#pragma once
#if !defined(IWA_UTIL_SHADER_META_HPP_INCLUDED)
#define IWA_UTIL_SHADER_META_HPP_INCLUDED
#include <fmt/format.h>
#include <magic_enum.hpp>
#include <mijin/detect.hpp>
#include <cstdint>
#include <unordered_map>
#include "iwa/descriptor_set.hpp"
#include "iwa/device.hpp"
#include "iwa/instance.hpp"
#include "iwa/log.hpp"
#include "iwa/pipeline.hpp"
#include "iwa/vkwrapper.hpp"
namespace iwa
{
inline constexpr unsigned UNSPECIFIED_INDEX = std::numeric_limits<unsigned>::max();
enum class ImageDim
{
ONE = VK_IMAGE_TYPE_1D,
TWO = VK_IMAGE_TYPE_2D,
THREE = VK_IMAGE_TYPE_3D,
CUBE
};
#if MIJIN_COMPILER == MIJIN_COMPILER_GCC || MIJIN_COMPILER == MIJIN_COMPILER_CLANG
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wpedantic"
#endif
union ShaderTypeBits
{
struct
{
bool compute : 1;
bool vertex : 1;
bool fragment : 1;
bool rayGeneration : 1;
bool rayClosestHit : 1;
bool rayAnyHit : 1;
bool rayMiss : 1;
bool rayIntersection : 1;
bool callable : 1;
bool task : 1;
bool mesh : 1;
bool tessellationControl : 1;
bool tesselationEvaluation : 1;
bool geometry : 1;
};
std::uint16_t bits = 0;
// static_assert(static_cast<int>(ShaderType::NUM_VALUES) < 8 * sizeof(bits));
[[nodiscard]] constexpr bool get(vk::ShaderStageFlagBits shaderType) const;
constexpr void set(vk::ShaderStageFlagBits shaderType, bool value);
constexpr ShaderTypeBits& operator |=(const ShaderTypeBits& other) {
bits |= other.bits;
return *this;
}
constexpr operator bool() const {
return bits != 0;
}
constexpr bool operator!() const {
return bits == 0;
}
[[nodiscard]] constexpr vk::ShaderStageFlags toVulkan() const
{
return vk::ShaderStageFlagBits{}
| (compute ? vk::ShaderStageFlagBits::eCompute : vk::ShaderStageFlagBits{})
| (vertex ? vk::ShaderStageFlagBits::eVertex : vk::ShaderStageFlagBits{})
| (fragment ? vk::ShaderStageFlagBits::eFragment : vk::ShaderStageFlagBits{})
| (rayGeneration ? vk::ShaderStageFlagBits::eRaygenKHR : vk::ShaderStageFlagBits{})
| (rayClosestHit ? vk::ShaderStageFlagBits::eClosestHitKHR : vk::ShaderStageFlagBits{})
| (rayAnyHit ? vk::ShaderStageFlagBits::eAnyHitKHR : vk::ShaderStageFlagBits{})
| (rayMiss ? vk::ShaderStageFlagBits::eMissKHR : vk::ShaderStageFlagBits{})
| (rayIntersection ? vk::ShaderStageFlagBits::eIntersectionKHR : vk::ShaderStageFlagBits{})
| (callable ? vk::ShaderStageFlagBits::eCallableKHR : vk::ShaderStageFlagBits{})
| (task ? vk::ShaderStageFlagBits::eTaskEXT : vk::ShaderStageFlagBits{})
| (mesh ? vk::ShaderStageFlagBits::eMeshEXT : vk::ShaderStageFlagBits{})
| (tessellationControl ? vk::ShaderStageFlagBits::eTessellationControl : vk::ShaderStageFlagBits{})
| (tesselationEvaluation ? vk::ShaderStageFlagBits::eTessellationEvaluation : vk::ShaderStageFlagBits{})
| (geometry ? vk::ShaderStageFlagBits::eGeometry : vk::ShaderStageFlagBits{});
}
[[nodiscard]] constexpr vk::ShaderStageFlagBits getFirst() const
{
if (compute) {
return vk::ShaderStageFlagBits::eCompute;
}
if (vertex) {
return vk::ShaderStageFlagBits::eVertex;
}
if (fragment) {
return vk::ShaderStageFlagBits::eFragment;
}
if (rayGeneration) {
return vk::ShaderStageFlagBits::eRaygenKHR;
}
if (rayClosestHit) {
return vk::ShaderStageFlagBits::eClosestHitKHR;
}
if (rayAnyHit) {
return vk::ShaderStageFlagBits::eAnyHitKHR;
}
if (rayMiss) {
return vk::ShaderStageFlagBits::eMissKHR;
}
if (rayIntersection) {
return vk::ShaderStageFlagBits::eIntersectionKHR;
}
if (callable) {
return vk::ShaderStageFlagBits::eCallableKHR;
}
if (task) {
return vk::ShaderStageFlagBits::eTaskEXT;
}
if (mesh) {
return vk::ShaderStageFlagBits::eMeshEXT;
}
if (tessellationControl) {
return vk::ShaderStageFlagBits::eTessellationControl;
}
if (tesselationEvaluation) {
return vk::ShaderStageFlagBits::eTessellationEvaluation;
}
if (geometry) {
return vk::ShaderStageFlagBits::eGeometry;
}
return vk::ShaderStageFlagBits();
}
[[nodiscard]] constexpr struct ShaderTypeBitsIterator begin() const noexcept;
[[nodiscard]] constexpr struct ShaderTypeBitsIterator end() const noexcept;
static constexpr ShaderTypeBits make(vk::ShaderStageFlagBits type) {
ShaderTypeBits bits;
bits.set(type, true);
return bits;
}
};
struct ShaderTypeBitsIterator
{
using value_type = vk::ShaderStageFlagBits;
ShaderTypeBits value;
unsigned pos = 16;
ShaderTypeBitsIterator() = default;
explicit constexpr ShaderTypeBitsIterator(ShaderTypeBits value_) noexcept : value(value_), pos(0)
{
for (; pos < 16; ++pos)
{
if (value.bits & (1 << pos))
{
break;
}
}
}
constexpr bool operator==(const ShaderTypeBitsIterator& other) const noexcept {
return pos == other.pos;
}
constexpr bool operator!=(const ShaderTypeBitsIterator& other) const noexcept {
return !(*this == other);
}
value_type operator*() const noexcept
{
MIJIN_ASSERT(pos < 16, "Attempt to dereference invalid iterator.");
return ShaderTypeBits{.bits = static_cast<std::uint16_t>(1 << pos)}.getFirst();
}
ShaderTypeBitsIterator& operator++() noexcept
{
++pos;
for (; pos < 16; ++pos)
{
if (value.bits & (1 << pos))
{
break;
}
}
return *this;
}
};
#if MIJIN_COMPILER == MIJIN_COMPILER_GCC || MIJIN_COMPILER == MIJIN_COMPILER_CLANG
#pragma GCC diagnostic pop
#endif
enum class ShaderVariableBaseType
{
NONE = 0,
SIMPLE = 1,
IMAGE = 2,
STRUCT = 3,
MATRIX = 4,
ACCELERATION_STRUCTURE = 5
};
struct ShaderVariableSimpleType
{
vk::Format format;
};
struct ShaderVariableImageType
{
ImageDim dimensions;
vk::Format format;
};
struct ShaderVariableStructMember;
struct ShaderVariableStructType
{
std::vector<ShaderVariableStructMember> members;
ShaderVariableStructType();
~ShaderVariableStructType();
};
enum class ShaderVariableMatrixType
{
UNDEFINED = 0,
MAT2 = 1,
MAT3 = 2,
MAT4 = 3
};
struct ShaderVariableType
{
ShaderVariableBaseType baseType = ShaderVariableBaseType::NONE;
ShaderVariableImageType image = {};
ShaderVariableSimpleType simple = {};
ShaderVariableStructType struct_ = {};
ShaderVariableMatrixType matrixType = ShaderVariableMatrixType::UNDEFINED;
unsigned arraySize = 1;
bool dynamicArraySize : 1 = false;
static ShaderVariableType fromVkFormat(vk::Format format)
{
return ShaderVariableType{
.baseType = ShaderVariableBaseType::SIMPLE,
.simple = {
.format = format
}
};
}
inline bool operator==(const ShaderVariableType& other) const;
inline bool operator!=(const ShaderVariableType& other) const {
return !(*this == other);
}
std::size_t calcHash(std::size_t appendTo = 0) const;
};
struct ShaderVariableStructMember
{
std::string name;
std::size_t offset;
ShaderVariableType type;
unsigned semantic = UNSPECIFIED_INDEX;
unsigned semanticIdx = 0;
// inline ~ShaderVariableStructMember();
};
struct ShaderVariableOffsetComparator
{
constexpr bool operator()(const ShaderVariableStructMember& first, const ShaderVariableStructMember& second) const
{
return first.offset < second.offset;
}
};
struct ShaderVariable
{
ShaderVariableType type;
vk::DescriptorType descriptorType;
unsigned binding = UNSPECIFIED_INDEX;
unsigned semantic = UNSPECIFIED_INDEX;
unsigned semanticIndex = 0;
std::string name;
void verifyCompatible(const ShaderVariable& other) const;
std::size_t calcHash(std::size_t appendTo = 0) const;
};
struct ShaderPushConstantBlock
{
ShaderVariableType type;
std::uint32_t offset = 0;
inline operator bool() const {
return type.baseType != ShaderVariableBaseType::NONE;
}
inline bool operator!() const {
return type.baseType == ShaderVariableBaseType::NONE;
}
};
struct ShaderVariableFindResult
{
unsigned setIndex = 0;
unsigned bindIndex = 0;
};
struct ShaderVariableSet
{
ShaderTypeBits usedInStages; // primarily for use in pipelines which may have multiple stages (in contrast to shaders and fragments)
unsigned setIndex = UNSPECIFIED_INDEX;
std::vector<ShaderVariable> variables;
[[nodiscard]] bool find(std::string_view varName, ShaderVariableFindResult& outResult) const noexcept;
[[nodiscard]] bool find(unsigned semantic, unsigned semanticIdx, ShaderVariableFindResult& outResult) const noexcept;
[[nodiscard]] bool find(unsigned semantic, ShaderVariableFindResult& outResult) const noexcept {
return find(semantic, 0, outResult);
}
[[nodiscard]] const ShaderVariable& getVariableAtBinding(unsigned bindingIdx) const;
[[nodiscard]] const ShaderVariable* getVariableAtBindingOpt(unsigned bindingIdx) const;
[[nodiscard]] const ShaderVariable* getVariableAtSemanticOpt(unsigned semantic, unsigned semanticIdx) const;
std::size_t calcHash(std::size_t appendTo = 0) const;
};
struct ShaderAttribute
{
vk::ShaderStageFlagBits stage;
ShaderVariableType type;
unsigned location = UNSPECIFIED_INDEX;
unsigned semantic = UNSPECIFIED_INDEX;
unsigned semanticIndex = 0;
std::string name;
};
struct ShaderSpecializationConstant
{
ShaderVariableType type;
unsigned id;
};
struct DescriptorSetMeta
{
vk::DescriptorSetLayoutCreateFlags flags = {};
std::vector<vk::DescriptorSetLayoutBinding> bindings;
std::vector<vk::DescriptorBindingFlags> bindingFlags;
std::vector<vk::DescriptorType> descriptorTypes;
[[nodiscard]] ObjectPtr<DescriptorSetLayout> createDescriptorSetLayout(Device& device) const;
};
struct PipelineAndDescriptorSetLayouts
{
std::vector<ObjectPtr<DescriptorSetLayout>> descriptorSetLayouts;
ObjectPtr<PipelineLayout> pipelineLayout;
[[nodiscard]] std::vector<ObjectPtr<DescriptorSet>> createDescriptorSets(DescriptorPool& pool) const;
[[nodiscard]] ObjectPtr<DescriptorSet> createDescriptorSet(DescriptorPool& pool, unsigned setIdx) const;
};
struct PipelineLayoutMeta
{
std::vector<DescriptorSetMeta> descriptorSets;
vk::PushConstantRange pushConstantRange;
[[nodiscard]] PipelineAndDescriptorSetLayouts createPipelineLayout(Device& device) const;
};
struct NamedVertexInput
{
struct Attribute
{
unsigned binding;
unsigned offset;
};
std::vector<vk::VertexInputBindingDescription> bindings;
std::unordered_map<std::string, Attribute> attributes;
};
struct GenerateDescriptorSetLayoutArgs
{
std::unordered_map<unsigned, std::uint32_t> descriptorCounts = {};
vk::DescriptorSetLayoutCreateFlags flags = {};
};
struct GeneratePipelineLayoutArgs
{
std::unordered_map<unsigned, GenerateDescriptorSetLayoutArgs> descriptorSets;
};
struct ShaderMeta
{
static constexpr const int STRUCT_VERSION = 1;
std::vector<ShaderVariableSet> interfaceVariableSets;
std::vector<ShaderAttribute> inputAttributes;
std::vector<ShaderAttribute> outputAttributes;
ShaderTypeBits stages;
ShaderTypeBits pushConstantStages;
ShaderPushConstantBlock pushConstantBlock;
unsigned localSizeX = 0;
unsigned localSizeY = 0;
unsigned localSizeZ = 0;
private:
mutable std::size_t hash = 0;
public:
[[nodiscard]] inline ShaderVariableSet& getOrCreateInterfaceVariableSet(unsigned setIdx);
void extend(ShaderMeta other);
[[nodiscard]] bool findInterfaceVariable(std::string_view varName, ShaderVariableFindResult& outResult) const noexcept;
[[nodiscard]] bool findInterfaceVariable(unsigned semantic, unsigned semanticIdx, ShaderVariableFindResult& outResult) const noexcept;
[[nodiscard]] bool findInterfaceVariable(unsigned semantic, ShaderVariableFindResult& outResult) const noexcept {
return findInterfaceVariable(semantic, 0, outResult);
}
[[nodiscard]] const ShaderVariableSet& getInterfaceVariableSet(unsigned setIdx) const;
[[nodiscard]] const ShaderVariableSet* getInterfaceVariableSetOpt(unsigned setIdx) const;
[[nodiscard]] const ShaderVariableType& getInterfaceVariableType(unsigned setIdx, unsigned bindingIdx) const;
[[nodiscard]] inline const ShaderVariableType& getInterfaceVariableType(const ShaderVariableFindResult& findResult) const {
return getInterfaceVariableType(findResult.setIndex, findResult.bindIndex);
}
[[nodiscard]] VertexInput generateVertexInput(const NamedVertexInput& namedInput) const noexcept;
[[nodiscard]] VertexInput generateVertexInputFromLayout(const struct VertexLayout& layout) const noexcept;
[[nodiscard]] DescriptorSetMeta generateDescriptorSetLayout(const ShaderVariableSet& set, const GenerateDescriptorSetLayoutArgs& args = {}) const;
[[nodiscard]] inline DescriptorSetMeta generateDescriptorSetLayout(unsigned setIdx, const GenerateDescriptorSetLayoutArgs& args = {}) const {
return generateDescriptorSetLayout(getInterfaceVariableSet(setIdx), args);
}
[[nodiscard]] PipelineLayoutMeta generatePipelineLayout(const GeneratePipelineLayoutArgs& args = {}) const;
[[nodiscard]] bool empty() const;
std::size_t getHash() const;
void extendPushConstant(ShaderPushConstantBlock pushConstantBlock, ShaderTypeBits stages);
void addInputAttribute(ShaderAttribute attribute);
void addOutputAttribute(ShaderAttribute attribute);
};
// ShaderVariableType::~ShaderVariableType() // NOLINT clang-tidy just doesn't understand my genius
// {
// // defined here since
// }
// ShaderVariableStructMember::~ShaderVariableStructMember() {}
[[nodiscard]] unsigned calcShaderTypeSize(const ShaderVariableType& type, bool ignoreArraySize = false) noexcept;
constexpr bool ShaderTypeBits::get(vk::ShaderStageFlagBits shaderType) const
{
switch(shaderType)
{
case vk::ShaderStageFlagBits::eCompute:
return compute;
case vk::ShaderStageFlagBits::eVertex:
return vertex;
case vk::ShaderStageFlagBits::eFragment:
return fragment;
case vk::ShaderStageFlagBits::eRaygenKHR:
return rayGeneration;
case vk::ShaderStageFlagBits::eClosestHitKHR:
return rayClosestHit;
case vk::ShaderStageFlagBits::eAnyHitKHR:
return rayAnyHit;
case vk::ShaderStageFlagBits::eMissKHR:
return rayMiss;
case vk::ShaderStageFlagBits::eIntersectionKHR:
return rayIntersection;
case vk::ShaderStageFlagBits::eCallableKHR:
return callable;
case vk::ShaderStageFlagBits::eTaskEXT:
return task;
case vk::ShaderStageFlagBits::eMeshEXT:
return mesh;
case vk::ShaderStageFlagBits::eTessellationControl:
return tessellationControl;
case vk::ShaderStageFlagBits::eTessellationEvaluation:
return tesselationEvaluation;
case vk::ShaderStageFlagBits::eGeometry:
return geometry;
case vk::ShaderStageFlagBits::eAllGraphics:
case vk::ShaderStageFlagBits::eAll:
case vk::ShaderStageFlagBits::eSubpassShadingHUAWEI:
case vk::ShaderStageFlagBits::eClusterCullingHUAWEI:
break; // let it fail
}
logAndDie("Invalid shader type in ShaderTypeBits::get()");
}
constexpr void ShaderTypeBits::set(vk::ShaderStageFlagBits shaderType, bool value)
{
switch(shaderType)
{
case vk::ShaderStageFlagBits::eCompute:
compute = value;
return;
case vk::ShaderStageFlagBits::eVertex:
vertex = value;
return;
case vk::ShaderStageFlagBits::eFragment:
fragment = value;
return;
case vk::ShaderStageFlagBits::eRaygenKHR:
rayGeneration = value;
return;
case vk::ShaderStageFlagBits::eClosestHitKHR:
rayClosestHit = value;
return;
case vk::ShaderStageFlagBits::eAnyHitKHR:
rayAnyHit = value;
return;
case vk::ShaderStageFlagBits::eMissKHR:
rayMiss = value;
return;
case vk::ShaderStageFlagBits::eIntersectionKHR:
rayIntersection = value;
return;
case vk::ShaderStageFlagBits::eCallableKHR:
callable = value;
return;
case vk::ShaderStageFlagBits::eTaskEXT:
task = value;
return;
case vk::ShaderStageFlagBits::eMeshEXT:
mesh = value;
return;
case vk::ShaderStageFlagBits::eTessellationControl:
tessellationControl = value;
return;
case vk::ShaderStageFlagBits::eTessellationEvaluation:
tesselationEvaluation = value;
return;
case vk::ShaderStageFlagBits::eGeometry:
geometry = value;
return;
case vk::ShaderStageFlagBits::eAllGraphics:
case vk::ShaderStageFlagBits::eAll:
case vk::ShaderStageFlagBits::eSubpassShadingHUAWEI:
case vk::ShaderStageFlagBits::eClusterCullingHUAWEI:
break; // let it fail
}
logAndDie("Invalid shader type in ShaderTypeBits::set()");
}
constexpr ShaderTypeBitsIterator ShaderTypeBits::begin() const noexcept
{
return ShaderTypeBitsIterator(*this);
}
constexpr ShaderTypeBitsIterator ShaderTypeBits::end() const noexcept
{
return ShaderTypeBitsIterator();
}
inline bool ShaderVariableType::operator==(const ShaderVariableType& other) const
{
if (baseType != other.baseType) {
return false;
}
if (dynamicArraySize != other.dynamicArraySize) {
return false;
}
if (!dynamicArraySize && (arraySize != other.arraySize)) {
return false;
}
switch (baseType)
{
case ShaderVariableBaseType::NONE:
return true;
case ShaderVariableBaseType::SIMPLE:
return simple.format == other.simple.format;
case ShaderVariableBaseType::IMAGE:
return image.format == other.image.format
&& image.dimensions == other.image.dimensions;
case ShaderVariableBaseType::MATRIX:
return matrixType == other.matrixType;
case ShaderVariableBaseType::ACCELERATION_STRUCTURE:
return true;
case ShaderVariableBaseType::STRUCT:
if (struct_.members.size() != other.struct_.members.size()) {
return false;
}
assert(std::is_sorted(struct_.members.begin(), struct_.members.end(), ShaderVariableOffsetComparator()));
assert(std::is_sorted(other.struct_.members.begin(), other.struct_.members.end(), ShaderVariableOffsetComparator()));
for (std::size_t idx = 0; idx < struct_.members.size(); ++idx)
{
if (struct_.members[idx].offset != other.struct_.members[idx].offset) {
return false;
}
if (struct_.members[idx].type != other.struct_.members[idx].type) {
return false;
}
// name doesn't really matter, does it?
}
return true;
}
logAndDie("Unhandled base type in ShaderVariableType::operator==()!");
}
inline ShaderVariableSet& getOrCreateSet(std::vector<ShaderVariableSet>& sets, unsigned setIdx)
{
for (ShaderVariableSet& set : sets)
{
if (set.setIndex == setIdx) {
return set;
}
}
ShaderVariableSet& newSet = sets.emplace_back();
newSet.setIndex = setIdx;
return newSet;
}
inline ShaderVariableSet& ShaderMeta::getOrCreateInterfaceVariableSet(unsigned setIdx)
{
return getOrCreateSet(interfaceVariableSets, setIdx);
}
} // namespace iwa
template<>
struct fmt::formatter<iwa::ShaderVariableType>
{
constexpr auto parse(format_parse_context& ctx) -> decltype(ctx.begin())
{
auto it = ctx.begin();
auto end = ctx.end();
if (it != end && *it != '}') throw format_error("invalid format");
return it;
}
template<typename TContext>
auto format(const iwa::ShaderVariableType& varType, TContext& ctx) const -> decltype(ctx.out())
{
auto it = ctx.out();
it = fmt::format_to(it, "[");
switch (varType.baseType)
{
case iwa::ShaderVariableBaseType::NONE:
it = fmt::format_to(it, "<none>]");
return it;
case iwa::ShaderVariableBaseType::SIMPLE:
it = fmt::format_to(it, "{}", "<TODO!>"); // magic_enum::enum_name(varType.simple.format));
break;
case iwa::ShaderVariableBaseType::IMAGE:
it = fmt::format_to(it, "<img {} {}>", "<TODO!>", // magic_enum::enum_name(varType.image.dimensions),
"<TODO!>"); // magic_enum::enum_name(varType.image.format));
break;
case iwa::ShaderVariableBaseType::MATRIX:
it = fmt::format_to(it, "{}", magic_enum::enum_name(varType.matrixType));
break;
case iwa::ShaderVariableBaseType::ACCELERATION_STRUCTURE:
it = fmt::format_to(it, "acceleration structure");
break;
case iwa::ShaderVariableBaseType::STRUCT:
it = fmt::format_to(it, "<struct of");
for (const iwa::ShaderVariableStructMember& member: varType.struct_.members)
{
it = fmt::format_to(it, " {}(@{}) {}", member.name, member.offset, member.type);
}
it = fmt::format_to(it, ">");
break;
}
if (varType.dynamicArraySize)
{
it = fmt::format_to(it, "[]");
} else if (varType.arraySize > 0)
{
it = fmt::format_to(it, "[{}]", varType.arraySize);
}
it = fmt::format_to(it, "]");
return it;
}
};
#endif // !defined(IWA_UTIL_SHADER_META_HPP_INCLUDED)

View File

@ -0,0 +1,60 @@
#pragma once
#if !defined(IWA_UTIL_TASK_RUNNER_HPP_INCLUDED)
#define IWA_UTIL_TASK_RUNNER_HPP_INCLUDED
#include <mijin/async/coroutine.hpp>
namespace iwa
{
template<typename TConcrete>
class MixinTaskRunner
{
private:
std::vector<mijin::TaskHandle> mTasks;
public:
MixinTaskRunner() noexcept;
~MixinTaskRunner() noexcept
{
for (const mijin::TaskHandle& handle : mTasks)
{
handle.cancel();
}
}
public:
template<typename TResult>
mijin::FuturePtr<TResult> addTask(mijin::TaskBase<TResult>&& task, mijin::TaskHandle* outHandle = nullptr) noexcept
{
mijin::TaskLoop& loop = static_cast<TConcrete*>(this)->getTaskLoop();
mijin::TaskHandle handle;
mijin::FuturePtr<TResult> result = loop.addTask(std::move(task), &handle);
if (outHandle != nullptr)
{
*outHandle = handle;
}
mTasks.push_back(std::move(handle));
return result;
}
};
template<typename TConcrete>
MixinTaskRunner<TConcrete>::MixinTaskRunner() noexcept
{
// addTask([&]() -> mijin::Task<>
// {
// while (true)
// {
// co_await mijin::c_suspend();
// auto newEnd = std::remove_if(mTasks.begin(), mTasks.end(), [](const mijin::TaskHandle& handle)
// {
// return !handle.isValid();
// });
// mTasks.erase(newEnd, mTasks.end());
// }
// }());
}
} // namespace iwa
#endif // !defined(IWA_UTIL_TASK_RUNNER_HPP_INCLUDED)

View File

@ -0,0 +1,104 @@
#pragma once
#if !defined(IWA_UTIL_TEXTURE_ATLAS_HPP_INCLUDED)
#define IWA_UTIL_TEXTURE_ATLAS_HPP_INCLUDED
#include <glm/vec2.hpp>
#include <mijin/async/coroutine.hpp>
#include <mijin/async/signal.hpp>
#include <mijin/async/task_mutex.hpp>
#include <vector>
#include "iwa/image.hpp"
#include "iwa/object.hpp"
#include "iwa/vkwrapper.hpp"
namespace iwa
{
struct TextureAtlasCreationArgs
{
vk::Extent2D layerSize;
};
struct TextureSlotCreationArgs
{
vk::Rect2D usedSpace;
unsigned layer;
glm::vec2 uvOffset;
glm::vec2 uvScale;
};
struct AtlasedImageCreationArgs
{
vk::Extent2D size;
unsigned initialLayers = 1;
vk::Format format;
unsigned mipLevels = 1;
vk::ImageUsageFlags usage = vk::ImageUsageFlagBits::eSampled;
};
struct TextureAtlasLayer
{
std::vector<vk::Rect2D> freeSpaces;
};
class TextureSlot : public Object<TextureSlot, BaseObject, class TextureAtlas>
{
private:
vk::Rect2D mUsedSpace;
unsigned mLayer;
glm::vec2 mUvOffset;
glm::vec2 mUvScale;
public:
TextureSlot(ObjectPtr<class TextureAtlas> owner, const TextureSlotCreationArgs& args);
[[nodiscard]] const vk::Rect2D& getUsedSpace() const noexcept { return mUsedSpace; }
[[nodiscard]] unsigned getLayer() const noexcept { return mLayer; }
[[nodiscard]] const glm::vec2& getUvOffset() const noexcept { return mUvOffset; }
[[nodiscard]] const glm::vec2& getUvScale() const noexcept { return mUvScale; }
};
class TextureAtlas : public Object<TextureAtlas>
{
private:
std::vector<TextureAtlasLayer> mLayers;
vk::Extent2D mLayerSize;
public:
TextureAtlas(ObjectPtr<> owner, const TextureAtlasCreationArgs& args);
explicit TextureAtlas(const TextureAtlasCreationArgs& args) : TextureAtlas(nullptr, args) {}
[[nodiscard]] const vk::Extent2D& getLayerSize() const noexcept { return mLayerSize; }
[[nodiscard]] ObjectPtr<TextureSlot> allocateSlot(vk::Extent2D slotSize);
};
class AtlasedImage : public Object<AtlasedImage, BaseObject, Device>
{
private:
ObjectPtr<TextureAtlas> mAtlas;
ObjectPtr<Image> mImage;
ObjectPtr<ImageView> mImageView;
mutable mijin::TaskMutex mImageMutex;
vk::Format mFormat;
unsigned mMipLevels;
vk::ImageUsageFlags mUsage;
public:
AtlasedImage(ObjectPtr<Device> owner, const AtlasedImageCreationArgs& args);
[[nodiscard]] const ObjectPtr<Image>& getImage() const noexcept { return mImage; }
[[nodiscard]] const ObjectPtr<ImageView>& getImageView() const noexcept { return mImageView; }
mijin::Task<ObjectPtr<TextureSlot>> c_allocateSlot(vk::Extent2D slotSize);
mijin::Task<> c_upload(const TextureSlot& slot, const class Bitmap& bitmap) const noexcept;
mijin::Task<> c_upload(const TextureSlot& slot, const void* data, std::size_t bytes, const vk::Extent2D& bufferImageSize) const noexcept;
mijin::Task<> c_blit(const TextureSlot& slot, Image& srcImage) const noexcept;
mijin::Task<> c_blit(const TextureSlot& slot, const class Bitmap& bitmap) const noexcept;
mijin::Task<> c_copy(const TextureSlot& slot, Image& srcImage) const noexcept;
private:
ObjectPtr<Image> allocateImage(unsigned layers);
public:
mijin::Signal<> imageRecreated;
};
} // namespace iwa
#endif // !defined(IWA_UTIL_TEXTURE_ATLAS_HPP_INCLUDED)

View File

@ -0,0 +1,13 @@
{% import 'enums.jinja' as enums -%}
// auto-generated using Jinja, do not edit!
#pragma once
#if !defined(IWA_UTIL_VERTEX_ATTRIBUTE_SEMANTIC_GEN_HPP_INCLUDED)
#define IWA_UTIL_VERTEX_ATTRIBUTE_SEMANTIC_GEN_HPP_INCLUDED
namespace iwa
{
{{ enums.cpp_enum('vertex_input_semantic') }}
}
#endif // defined(IWA_UTIL_VERTEX_ATTRIBUTE_SEMANTIC_GEN_HPP_INCLUDED)

View File

@ -0,0 +1,42 @@
#pragma once
#if !defined(IWA_UTIL_VERTEX_LAYOUT_HPP_INCLUDED)
#define IWA_UTIL_VERTEX_LAYOUT_HPP_INCLUDED
#include <mijin/container/optional.hpp>
#include "iwa/vkwrapper.hpp"
#include "iwa/util/vertex_attribute_semantic.gen.hpp"
#include "iwa/util/vkutil.hpp"
#define IWA_VERTEX_ATTRIB_IDX(struct_, member_, semantic_, idx_) \
iwa::VertexAttribute \
{ \
.semantic = iwa::VertexAttributeSemantic::semantic_, \
.semanticIdx = idx_, \
.offset = offsetof(struct_, member_), \
.format = iwa::vkMemberFormat(&struct_::member_) \
}
#define IWA_VERTEX_ATTRIB(struct_, member_, semantic_) IWA_VERTEX_ATTRIB_IDX(struct_, member_, semantic_, 0)
namespace iwa
{
struct VertexAttribute
{
VertexAttributeSemantic semantic = VertexAttributeSemantic::CUSTOM;
unsigned semanticIdx = 0;
unsigned offset = 0;
vk::Format format = vk::Format::eUndefined;
};
struct VertexLayout
{
std::vector<VertexAttribute> attributes;
unsigned stride = 0;
[[nodiscard]] mijin::Optional<VertexAttribute&> findAttribute(VertexAttributeSemantic semantic, unsigned semanticIdx = 0) noexcept;
[[nodiscard]] mijin::Optional<const VertexAttribute&> findAttribute(VertexAttributeSemantic semantic, unsigned semanticIdx = 0) const noexcept;
};
} // namespace iwa
#endif // !defined(_VERTEX_LAYOUT_HPP_INCLUDED)

210
include/iwa/util/vkutil.hpp Normal file
View File

@ -0,0 +1,210 @@
#pragma once
#if !defined(IWA_UTIL_VKUTIL_HPP_INCLUDED)
#define IWA_UTIL_VKUTIL_HPP_INCLUDED
#include <cstdint>
#include <glm/vec2.hpp>
#include <glm/vec3.hpp>
#include <glm/vec4.hpp>
#include <magic_enum.hpp>
#include <mijin/util/align.hpp>
#include <mijin/util/traits.hpp>
#include "iwa/vkwrapper.hpp"
namespace iwa
{
inline constexpr std::array DEPTH_FORMATS = {
vk::Format::eD24UnormS8Uint,
vk::Format::eD32Sfloat,
vk::Format::eD32SfloatS8Uint,
vk::Format::eD16Unorm,
vk::Format::eD16UnormS8Uint,
vk::Format::eX8D24UnormPack32
};
inline constexpr std::array STENCIL_FORMATS = {
vk::Format::eS8Uint,
vk::Format::eD24UnormS8Uint,
vk::Format::eD32SfloatS8Uint,
vk::Format::eD16UnormS8Uint
};
namespace impl
{
template<typename T>
constexpr vk::Format getVkFormat() noexcept
{
if constexpr (std::is_same_v<T, std::uint8_t>) {
return vk::Format::eR8Uint;
}
else if constexpr (std::is_same_v<T, std::int8_t>) {
return vk::Format::eR8Sint;
}
else if constexpr (std::is_same_v<T, std::uint16_t>) {
return vk::Format::eR16Uint;
}
else if constexpr (std::is_same_v<T, std::int16_t>) {
return vk::Format::eR16Sint;
}
else if constexpr (std::is_same_v<T, std::uint32_t>) {
return vk::Format::eR32Uint;
}
else if constexpr (std::is_same_v<T, std::int32_t>) {
return vk::Format::eR32Sint;
}
else if constexpr (std::is_same_v<T, std::uint64_t>) {
return vk::Format::eR64Uint;
}
else if constexpr (std::is_same_v<T, std::int64_t>) {
return vk::Format::eR64Sint;
}
else if constexpr (std::is_same_v<T, float>) {
return vk::Format::eR32Sfloat;
}
else if constexpr (std::is_same_v<T, double>) {
return vk::Format::eR64Sfloat;
}
else if constexpr (std::is_same_v<T, glm::vec2>) {
return vk::Format::eR32G32Sfloat;
}
else if constexpr (std::is_same_v<T, glm::vec3>) {
return vk::Format::eR32G32B32Sfloat;
}
else if constexpr (std::is_same_v<T, glm::vec4>) {
return vk::Format::eR32G32B32A32Sfloat;
}
else if constexpr (std::is_same_v<T, glm::ivec2>) {
return vk::Format::eR32G32Sint;
}
else if constexpr (std::is_same_v<T, glm::ivec3>) {
return vk::Format::eR32G32B32Sint;
}
else if constexpr (std::is_same_v<T, glm::ivec4>) {
return vk::Format::eR32G32B32A32Sint;
}
else {
static_assert(mijin::always_false_v<T>, "No Vulkan format for that type.");
}
}
template<typename T>
constexpr vk::IndexType getVkIndexType() noexcept
{
if constexpr (std::is_same_v<T, std::uint8_t>) {
return vk::IndexType::eUint8EXT;
}
else if constexpr (std::is_same_v<T, std::uint16_t>) {
return vk::IndexType::eUint16;
}
else if constexpr (std::is_same_v<T, std::uint32_t>) {
return vk::IndexType::eUint32;
}
else {
static_assert(mijin::always_false_v<T>, "No Vulkan index type for that type.");
}
}
} // namespace impl
template<typename T>
static constexpr vk::Format vk_format_v = impl::getVkFormat<T>();
template<typename T>
static constexpr vk::IndexType vk_index_type_v = impl::getVkIndexType<T>();
template<typename T, typename U>
static constexpr vk::Format vkMemberFormat(U T::*) noexcept { return vk_format_v<U>; }
[[nodiscard]] unsigned vkFormatSize(vk::Format format) noexcept;
[[nodiscard]] unsigned vkIndexTypeSize(vk::IndexType indexType) noexcept;
[[nodiscard]] bool isDepthFormat(vk::Format format) noexcept;
[[nodiscard]] bool isStencilFormat(vk::Format format) noexcept; // NOLINT(readability-redundant-declaration) forward declared in image.hpp, but it wouldn't make sense to remove it from here
template<typename TEnum>
std::optional<TEnum> vkEnumFromStringOpt(std::string_view string) noexcept
{
std::string enumName;
enumName.reserve(string.size() + 1);
bool first = true;
for (const char chr : string)
{
if (first)
{
enumName.push_back(std::toupper(chr));
first = false;
}
else {
enumName.push_back(chr);
}
}
return vk::from_string<TEnum>(enumName);
}
template<typename TEnum>
TEnum vkEnumFromString(std::string_view string, const char* error = "Invalid enum value.")
{
const std::optional<TEnum> value = vkEnumFromStringOpt<TEnum>(string);
if (!value.has_value()) {
throw std::runtime_error(error);
}
return *value;
}
template<typename T>
[[nodiscard]] const T* findInNextChain(const void* pNext, vk::StructureType sType = T().sType) noexcept
{
while (pNext)
{
const vk::BaseInStructure* inStruct = static_cast<const vk::BaseInStructure*>(pNext);
if (inStruct->sType == sType)
{
return static_cast<const T*>(pNext);
}
pNext = inStruct->pNext;
}
return nullptr;
}
// TODO
// std::size_t calcVkStructHash(const void* structure, std::size_t appendTo = 0);
[[nodiscard]] vk::SampleCountFlagBits samplesToVk(unsigned samples) noexcept;
[[nodiscard]] vk::Format detectDepthBufferFormat(class Device& device, unsigned samples = 1) noexcept;
[[nodiscard]] std::vector<unsigned> detectSupportedSampleCounts(class Device& device) noexcept;
template<typename T>
vk::DeviceSize calcVkUniformStride(class Device& device)
{
vk::DeviceSize stride = mijin::alignUp(sizeof(T), alignof(T));
return mijin::alignUp(stride, mijin::delayEvaluation<T>(device).getDeviceInfo().properties.limits.minUniformBufferOffsetAlignment);
}
template<typename T>
vk::DeviceSize calcVkStorageBufferStride(class Device& device)
{
vk::DeviceSize stride = mijin::alignUp(sizeof(T), alignof(T));
return mijin::alignUp(stride, mijin::delayEvaluation<T>(device).getDeviceInfo().properties.limits.minStorageBufferOffsetAlignment);
}
[[nodiscard]] inline bool vkIsSrgbFormat(vk::Format format) noexcept
{
switch (format)
{
case vk::Format::eR8Srgb:
case vk::Format::eR8G8Srgb:
case vk::Format::eR8G8B8Srgb:
case vk::Format::eR8G8B8A8Srgb:
case vk::Format::eB8G8R8Srgb:
case vk::Format::eB8G8R8A8Srgb:
case vk::Format::eA8B8G8R8SrgbPack32:
// TODO: all the weird compressed formats, I don't need them yet
return true;
default:
return false;
}
}
} // namespace iwa
#endif // !defined(IWA_UTIL_VKUTIL_HPP_INCLUDED)

28
include/iwa/vkwrapper.hpp Normal file
View File

@ -0,0 +1,28 @@
#pragma once
#ifndef IWA_VULKAN_VKWRAPPER_HPP_INCLUDED
#define IWA_VULKAN_VKWRAPPER_HPP_INCLUDED
// disable what we don't need
#define VULKAN_HPP_NO_STRUCT_CONSTRUCTORS
#define VULKAN_HPP_NO_SMART_HANDLE
#define VULKAN_HPP_DISPATCH_LOADER_DYNAMIC 1
#include "vulkan/vulkan.hpp"
namespace iwa
{
template<typename THandle>
class MixinVulkanObject
{
protected:
THandle mHandle;
protected:
explicit MixinVulkanObject(THandle handle = VK_NULL_HANDLE) noexcept : mHandle(handle) {}
public:
[[nodiscard]] const THandle& getVkHandle() const noexcept { return mHandle; }
operator THandle() const noexcept { return mHandle; }
};
}
#endif // IWA_VULKAN_VKWRAPPER_HPP_INCLUDED

86
include/iwa/window.hpp Normal file
View File

@ -0,0 +1,86 @@
#pragma once
#if !defined(IWA_WINDOW_HPP_INCLUDED)
#define IWA_WINDOW_HPP_INCLUDED
#include <string>
#include <mijin/async/signal.hpp>
#include <mijin/container/optional.hpp>
#include <mijin/util/bitflags.hpp>
#include <SDL.h>
#include "iwa/input.hpp"
#include "iwa/object.hpp"
#include "iwa/vkwrapper.hpp"
namespace iwa
{
enum class MouseMode
{
NORMAL = 0,
CAPTURED = 1
};
struct WindowCreationFlags : mijin::BitFlags<WindowCreationFlags>
{
bool hidden : 1 = false;
bool resizable : 1 = true;
bool borderless : 1 = false;
bool alwayOnTop : 1 = false;
bool skipTaskbar : 1 = false;
};
struct WindowBorder
{
int left;
int right;
int top;
int bottom;
};
struct WindowCreationArgs
{
std::string title = "Iwa Window";
WindowCreationFlags flags;
int width = 1280;
int height = 720;
};
class Window : public Object<Window, BaseObject, class Instance>
{
private:
SDL_Window* mHandle = nullptr;
vk::SurfaceKHR mSurface = VK_NULL_HANDLE;
public:
Window(ObjectPtr<class Instance> owner, const WindowCreationArgs& args);
~Window() noexcept override;
[[nodiscard]] bool isVisible() const noexcept;
void setVisible(bool visible) noexcept;
[[nodiscard]] std::pair<int, int> getSize() const noexcept;
void setSize(int width, int height) noexcept;
[[nodiscard]] std::pair<int, int> getPosition() const noexcept;
void setPosition(int xPos, int yPos) noexcept;
[[nodiscard]] WindowBorder getWindowBorder() const noexcept;
[[nodiscard]] bool isFocused() const noexcept;
void focus() noexcept;
void setMouseMode(MouseMode mouseMode) noexcept;
void setModalFor(mijin::Optional<const Window&> parent) noexcept;
[[nodiscard]] SDL_Window* getSDLWindow() const noexcept { return mHandle; }
[[nodiscard]] vk::SurfaceKHR getVkSurface() const noexcept { return mSurface; }
mijin::Signal<const KeyEvent&> keyChanged;
mijin::Signal<const MouseButtonEvent&> mouseButtonChanged;
mijin::Signal<const MouseMoveEvent&> mouseMoved;
mijin::Signal<const MouseWheelEvent&> mouseScrolled;
mijin::Signal<const TextInputEvent&> textEntered;
mijin::Signal<> focusGained;
mijin::Signal<> focusLost;
mijin::Signal<> mouseEntered;
mijin::Signal<> mouseLeft;
mijin::Signal<> closeRequested;
};
}
#endif //IWA_WINDOW_HPP_INCLUDED

26
source/addon.cpp Normal file
View File

@ -0,0 +1,26 @@
#include "iwa/addon.hpp"
#include <vector>
namespace iwa
{
namespace
{
std::vector<Addon*>& getAddonsVector() noexcept
{
static std::vector<Addon*> addons;
return addons;
}
}
Addon::Addon()
{
getAddonsVector().push_back(this);
}
std::span<Addon* const> getAddons() noexcept
{
return getAddonsVector();
}
} // namespace iwa

View File

@ -0,0 +1,20 @@
Import('env')
# Imgui
lib_imgui = env.Cook('imgui', backends = ['vulkan', 'sdl2'], git_ref = 'refs/tags/v1.90')
src_files = Split("""
fps_widget.cpp
""")
add_src_files = [env.File('addon.cpp')]
lib_iwa_imgui = env.UnityStaticLibrary(
target = env['LIB_DIR'] + '/iwa_imgui',
source = src_files,
add_source = add_src_files,
dependencies = [env['lib_iwa'], lib_imgui]
)
env['lib_iwa_imgui'] = lib_iwa_imgui
Return('env')

View File

@ -0,0 +1,299 @@
#include "iwa/addons/imgui/addon.hpp"
#include <imgui.h>
#include <backends/imgui_impl_sdl2.h>
#include <backends/imgui_impl_vulkan.h>
namespace iwa
{
namespace
{
ImGuiAddon gImguiAddon;
PFN_vkVoidFunction imguiLoaderCallback(const char* functionName, void* userData)
{
return static_cast<Instance*>(userData)->getVkHandle().getProcAddr(functionName);
}
ImGuiKey keyToImGui(const KeyCode keyCode)
{
switch (keyCode)
{
//<editor-fold desc="Many cases">
case KeyCode::TAB: return ImGuiKey_Tab;
case KeyCode::LEFT: return ImGuiKey_LeftArrow;
case KeyCode::RIGHT: return ImGuiKey_RightArrow;
case KeyCode::UP: return ImGuiKey_UpArrow;
case KeyCode::DOWN: return ImGuiKey_DownArrow;
case KeyCode::PAGEUP: return ImGuiKey_PageUp;
case KeyCode::PAGEDOWN: return ImGuiKey_PageDown;
case KeyCode::HOME: return ImGuiKey_Home;
case KeyCode::END: return ImGuiKey_End;
case KeyCode::INSERT: return ImGuiKey_Insert;
case KeyCode::DELETE: return ImGuiKey_Delete;
case KeyCode::BACKSPACE: return ImGuiKey_Backspace;
case KeyCode::SPACE: return ImGuiKey_Space;
case KeyCode::RETURN: return ImGuiKey_Enter;
case KeyCode::ESCAPE: return ImGuiKey_Escape;
case KeyCode::QUOTE: return ImGuiKey_Apostrophe;
case KeyCode::COMMA: return ImGuiKey_Comma;
case KeyCode::MINUS: return ImGuiKey_Minus;
case KeyCode::PERIOD: return ImGuiKey_Period;
case KeyCode::SLASH: return ImGuiKey_Slash;
case KeyCode::SEMICOLON: return ImGuiKey_Semicolon;
case KeyCode::EQUALS: return ImGuiKey_Equal;
case KeyCode::LEFTBRACKET: return ImGuiKey_LeftBracket;
case KeyCode::BACKSLASH: return ImGuiKey_Backslash;
case KeyCode::RIGHTBRACKET: return ImGuiKey_RightBracket;
case KeyCode::BACKQUOTE: return ImGuiKey_GraveAccent;
case KeyCode::CAPSLOCK: return ImGuiKey_CapsLock;
case KeyCode::SCROLLLOCK: return ImGuiKey_ScrollLock;
case KeyCode::NUMLOCKCLEAR: return ImGuiKey_NumLock;
case KeyCode::PRINTSCREEN: return ImGuiKey_PrintScreen;
case KeyCode::PAUSE: return ImGuiKey_Pause;
case KeyCode::KP_0: return ImGuiKey_Keypad0;
case KeyCode::KP_1: return ImGuiKey_Keypad1;
case KeyCode::KP_2: return ImGuiKey_Keypad2;
case KeyCode::KP_3: return ImGuiKey_Keypad3;
case KeyCode::KP_4: return ImGuiKey_Keypad4;
case KeyCode::KP_5: return ImGuiKey_Keypad5;
case KeyCode::KP_6: return ImGuiKey_Keypad6;
case KeyCode::KP_7: return ImGuiKey_Keypad7;
case KeyCode::KP_8: return ImGuiKey_Keypad8;
case KeyCode::KP_9: return ImGuiKey_Keypad9;
case KeyCode::KP_PERIOD: return ImGuiKey_KeypadDecimal;
case KeyCode::KP_DIVIDE: return ImGuiKey_KeypadDivide;
case KeyCode::KP_MULTIPLY: return ImGuiKey_KeypadMultiply;
case KeyCode::KP_MINUS: return ImGuiKey_KeypadSubtract;
case KeyCode::KP_PLUS: return ImGuiKey_KeypadAdd;
case KeyCode::KP_ENTER: return ImGuiKey_KeypadEnter;
case KeyCode::KP_EQUALS: return ImGuiKey_KeypadEqual;
case KeyCode::LCTRL: return ImGuiKey_LeftCtrl;
case KeyCode::LSHIFT: return ImGuiKey_LeftShift;
case KeyCode::LALT: return ImGuiKey_LeftAlt;
case KeyCode::LGUI: return ImGuiKey_LeftSuper;
case KeyCode::RCTRL: return ImGuiKey_RightCtrl;
case KeyCode::RSHIFT: return ImGuiKey_RightShift;
case KeyCode::RALT: return ImGuiKey_RightAlt;
case KeyCode::RGUI: return ImGuiKey_RightSuper;
case KeyCode::APPLICATION: return ImGuiKey_Menu;
case KeyCode::_0: return ImGuiKey_0;
case KeyCode::_1: return ImGuiKey_1;
case KeyCode::_2: return ImGuiKey_2;
case KeyCode::_3: return ImGuiKey_3;
case KeyCode::_4: return ImGuiKey_4;
case KeyCode::_5: return ImGuiKey_5;
case KeyCode::_6: return ImGuiKey_6;
case KeyCode::_7: return ImGuiKey_7;
case KeyCode::_8: return ImGuiKey_8;
case KeyCode::_9: return ImGuiKey_9;
case KeyCode::A: return ImGuiKey_A;
case KeyCode::B: return ImGuiKey_B;
case KeyCode::C: return ImGuiKey_C;
case KeyCode::D: return ImGuiKey_D;
case KeyCode::E: return ImGuiKey_E;
case KeyCode::F: return ImGuiKey_F;
case KeyCode::G: return ImGuiKey_G;
case KeyCode::H: return ImGuiKey_H;
case KeyCode::I: return ImGuiKey_I;
case KeyCode::J: return ImGuiKey_J;
case KeyCode::K: return ImGuiKey_K;
case KeyCode::L: return ImGuiKey_L;
case KeyCode::M: return ImGuiKey_M;
case KeyCode::N: return ImGuiKey_N;
case KeyCode::O: return ImGuiKey_O;
case KeyCode::P: return ImGuiKey_P;
case KeyCode::Q: return ImGuiKey_Q;
case KeyCode::R: return ImGuiKey_R;
case KeyCode::S: return ImGuiKey_S;
case KeyCode::T: return ImGuiKey_T;
case KeyCode::U: return ImGuiKey_U;
case KeyCode::V: return ImGuiKey_V;
case KeyCode::W: return ImGuiKey_W;
case KeyCode::X: return ImGuiKey_X;
case KeyCode::Y: return ImGuiKey_Y;
case KeyCode::Z: return ImGuiKey_Z;
case KeyCode::F1: return ImGuiKey_F1;
case KeyCode::F2: return ImGuiKey_F2;
case KeyCode::F3: return ImGuiKey_F3;
case KeyCode::F4: return ImGuiKey_F4;
case KeyCode::F5: return ImGuiKey_F5;
case KeyCode::F6: return ImGuiKey_F6;
case KeyCode::F7: return ImGuiKey_F7;
case KeyCode::F8: return ImGuiKey_F8;
case KeyCode::F9: return ImGuiKey_F9;
case KeyCode::F10: return ImGuiKey_F10;
case KeyCode::F11: return ImGuiKey_F11;
case KeyCode::F12: return ImGuiKey_F12;
case KeyCode::F13: return ImGuiKey_F13;
case KeyCode::F14: return ImGuiKey_F14;
case KeyCode::F15: return ImGuiKey_F15;
case KeyCode::F16: return ImGuiKey_F16;
case KeyCode::F17: return ImGuiKey_F17;
case KeyCode::F18: return ImGuiKey_F18;
case KeyCode::F19: return ImGuiKey_F19;
case KeyCode::F20: return ImGuiKey_F20;
case KeyCode::F21: return ImGuiKey_F21;
case KeyCode::F22: return ImGuiKey_F22;
case KeyCode::F23: return ImGuiKey_F23;
case KeyCode::F24: return ImGuiKey_F24;
case KeyCode::AC_BACK: return ImGuiKey_AppBack;
case KeyCode::AC_FORWARD: return ImGuiKey_AppForward;
default: return ImGuiKey_None;
//</editor-fold>
}
}
ImGuiMouseButton mouseButtonToImGui(const MouseButton button)
{
switch (button)
{
case MouseButton::LEFT:
return ImGuiMouseButton_Left;
case MouseButton::RIGHT:
return ImGuiMouseButton_Right;
case MouseButton::MIDDLE:
return ImGuiMouseButton_Middle;
case MouseButton::EXTRA_1:
return ImGuiMouseButton_Middle + 1;
case MouseButton::EXTRA_2:
return ImGuiMouseButton_Middle + 2;
}
return -1;
}
}
void ImGuiAddon::init(const AddonInitArgs& args)
{
(void) args;
IMGUI_CHECKVERSION();
ImGui::CreateContext();
ImGui::StyleColorsDark();
}
void ImGuiAddon::cleanup()
{
ImGui::DestroyContext();
}
mijin::Task<> ImGuiAddon::c_createResources(const ImguiCreateResourcesArgs& args)
{
const unsigned QUEUED_FRAMES = 3; // TODO: is this okay?
const unsigned MAX_IMAGES = 256;
Device& device = *args.swapchain.getOwner();
mDescriptorPool = device.createChild<DescriptorPool>(DescriptorPoolCreationArgs{
.flags = vk::DescriptorPoolCreateFlagBits::eFreeDescriptorSet,
.maxSets = MAX_IMAGES,
.poolSizes = {
vk::DescriptorPoolSize{.type = vk::DescriptorType::eCombinedImageSampler, .descriptorCount = MAX_IMAGES}
}
});
registerObjectDestructionHandler(args.swapchain, [this, device = device.getPointer()]
{
// destroy the descriptor pool when the window is destroyed so we don't keep the device alive
device->getVkHandle().waitIdle();
ImGui_ImplVulkan_Shutdown();
ImGui_ImplSDL2_Shutdown();
mDescriptorPool = nullptr;
mWidgets.clear();
});
if (!ImGui_ImplSDL2_InitForVulkan(args.swapchain.getWindow()->getSDLWindow())) {
throw std::runtime_error("Error initializing ImGui for SDL2.");
}
ImGui_ImplVulkan_InitInfo initInfo{
.Instance = device.getOwner()->getVkHandle(),
.PhysicalDevice = device.getVkPhysicalDevice(),
.Device = device.getVkHandle(),
.QueueFamily = device.getDeviceInfo().graphicsQueueFamily,
.Queue = device.getGraphicsQueue(),
.PipelineCache = VK_NULL_HANDLE,
.DescriptorPool = mDescriptorPool->getVkHandle(),
.Subpass = 0,
.MinImageCount = QUEUED_FRAMES,
.ImageCount = QUEUED_FRAMES,
.MSAASamples = VK_SAMPLE_COUNT_1_BIT,
.UseDynamicRendering = true,
.ColorAttachmentFormat = static_cast<VkFormat>(args.format),
.Allocator = nullptr,
.CheckVkResultFn = nullptr,
};
const bool success = ImGui_ImplVulkan_LoadFunctions(imguiLoaderCallback, device.getOwner())
&& ImGui_ImplVulkan_Init(&initInfo, VK_NULL_HANDLE);
if (!success) {
throw std::runtime_error("Error initializing ImGui for Vulkan.");
}
// setup input
args.swapchain.getWindow()->keyChanged.connect(*this, &ImGuiAddon::handleKeyChanged);
args.swapchain.getWindow()->mouseButtonChanged.connect(*this, &ImGuiAddon::handleMouseButtonChanged);
args.swapchain.getWindow()->mouseMoved.connect(*this, &ImGuiAddon::handleMouseMoved);
args.swapchain.getWindow()->mouseScrolled.connect(*this, &ImGuiAddon::handleMouseScrolled);
args.swapchain.getWindow()->textEntered.connect(*this, &ImGuiAddon::handleTextEntered);
// first frame
beginFrame();
co_return;
}
void ImGuiAddon::renderFrame(vk::CommandBuffer cmdBuffer)
{
for (const std::unique_ptr<ImGuiWidget>& widget : mWidgets) {
widget->draw();
}
ImGui::Render();
ImGui_ImplVulkan_RenderDrawData(ImGui::GetDrawData(), cmdBuffer);
beginFrame();
}
void ImGuiAddon::removeWidget(ImGuiWidget* widget)
{
auto it = std::ranges::find_if(mWidgets, [widget](const std::unique_ptr<ImGuiWidget>& widgetPtr)
{
return widgetPtr.get() == widget;
});
mWidgets.erase(it);
}
void ImGuiAddon::beginFrame() noexcept
{
ImGui_ImplVulkan_NewFrame();
ImGui_ImplSDL2_NewFrame();
ImGui::NewFrame();
}
void ImGuiAddon::handleKeyChanged(const KeyEvent& event)
{
ImGui::GetIO().AddKeyEvent(keyToImGui(event.keyCode), event.down);
}
void ImGuiAddon::handleMouseButtonChanged(const MouseButtonEvent& event)
{
ImGui::GetIO().AddMouseButtonEvent(mouseButtonToImGui(event.button), event.down);
}
void ImGuiAddon::handleMouseMoved(const MouseMoveEvent& event)
{
ImGui::GetIO().AddMousePosEvent(static_cast<float>(event.absoluteX), static_cast<float>(event.absoluteY));
}
void ImGuiAddon::handleMouseScrolled(const MouseWheelEvent& event)
{
ImGui::GetIO().AddMouseWheelEvent(static_cast<float>(event.relativeX), static_cast<float>(event.relativeY));
}
void ImGuiAddon::handleTextEntered(const TextInputEvent& event)
{
ImGui::GetIO().AddInputCharactersUTF8(event.text.c_str());
}
ImGuiAddon& ImGuiAddon::get() noexcept
{
return gImguiAddon;
}
} // namespace iwa

View File

@ -0,0 +1,21 @@
#include "iwa/addons/imgui/fps_widget.hpp"
#include <fmt/format.h>
#include <imgui.h>
namespace iwa
{
void ImGuiFpsWidget::draw()
{
mFpsCalculator.tickFrame();
const std::string fpsText = fmt::format("{}", static_cast<unsigned>(mFpsCalculator.getFps()));
const ImVec2 textSize = ImGui::CalcTextSize(fpsText.c_str());
ImGui::SetNextWindowPos(ImVec2(0, 0));
ImGui::SetNextWindowSize(ImVec2(textSize.x + 2 * ImGui::GetStyle().WindowPadding.x, 0));
ImGui::Begin("#fps", nullptr, ImGuiWindowFlags_NoDecoration | ImGuiWindowFlags_NoInputs);
ImGui::TextUnformatted(fpsText.c_str());
ImGui::End();
}
} // namespace iwa

View File

@ -0,0 +1,37 @@
#include "iwa/app/vulkan_application.hpp"
#include <mijin/virtual_filesystem/filesystem.hpp>
#include <mijin/virtual_filesystem/relative.hpp>
namespace iwa
{
VulkanApplication::VulkanApplication(const ApplicationCreationArgs& args, ObjectPtr<> owner) : super_t(std::move(owner))
{
mInstance = Instance::create(args.instanceArgs);
mDevice = mInstance->createDevice(args.deviceArgs);
mMainWindow = mInstance->createWindow(args.mainWindowArgs);
SwapchainCreationArgs swapchainCreationArgs = args.mainWindowSwapchainArgs;
MIJIN_ASSERT(swapchainCreationArgs.window == nullptr, "Main window swapchain args shouldn't contain a window.");
swapchainCreationArgs.window = mMainWindow;
mMainWindowSwapchain = mDevice->createChild<Swapchain>(swapchainCreationArgs);
if (!args.assetPath.empty())
{
mInstance->getPrimaryFSAdapter().emplaceAdapter<mijin::RelativeFileSystemAdapter<mijin::OSFileSystemAdapter>>(
/* root = */ fs::current_path() / "assets"
);
}
}
int VulkanApplication::execute(int argc, char** argv) // NOLINT
{
(void) argc;
(void) argv;
mInstance->getMainTaskLoop().addTask(c_init());
mInstance->getMainTaskLoop().runUntilDone();
return 0;
}
} // namespace iwa

101
source/buffer.cpp Normal file
View File

@ -0,0 +1,101 @@
#include "iwa/buffer.hpp"
#include "iwa/device.hpp"
namespace iwa
{
Buffer::Buffer(ObjectPtr<Device> owner, const BufferCreationArgs& args) : super_t(std::move(owner))
{
mHandle = getOwner()->getVkHandle().createBuffer(vk::BufferCreateInfo
{
.flags = args.flags,
.size = args.size,
.usage = args.usage,
.sharingMode = args.sharingMode,
.queueFamilyIndexCount = static_cast<std::uint32_t>(args.queueFamilyIndices.size()),
.pQueueFamilyIndices = args.queueFamilyIndices.data()
});
}
Buffer::~Buffer() noexcept
{
IWA_DELETE_DEVICE_OBJECT(getOwner(), mHandle, destroyBuffer);
}
void Buffer::allocateMemory(HostVisible hostVisible, HostCoherent hostCoherent)
{
const vk::MemoryRequirements memoryRequirements = getOwner()->getVkHandle().getBufferMemoryRequirements(mHandle);
const vk::MemoryPropertyFlags memoryFlags = hostVisible ?
(vk::MemoryPropertyFlagBits::eHostVisible | (hostCoherent ? vk::MemoryPropertyFlagBits::eHostCoherent : vk::MemoryPropertyFlags())) :
vk::MemoryPropertyFlagBits::eDeviceLocal;
const std::optional<std::uint32_t> memoryTypeIdx = findMemoryType(*getOwner(), memoryRequirements, memoryFlags);
if (!memoryTypeIdx.has_value())
{
throw std::runtime_error("Could not find a suitable memory type.");
}
ObjectPtr<DeviceMemory> memory = getOwner()->allocateDeviceMemory(
{
.allocationSize = memoryRequirements.size,
.memoryTypeIndex = memoryTypeIdx.value()
});
bindMemory(std::move(memory));
}
void Buffer::bindMemory(ObjectPtr<DeviceMemory> memory, vk::DeviceSize offset)
{
mMemory = std::move(memory);
getOwner()->getVkHandle().bindBufferMemory(mHandle, *mMemory, offset);
}
mijin::Task<> Buffer::c_fill(std::uint32_t data, std::size_t bytes, std::size_t byteOffset)
{
ObjectPtr<CommandBuffer> cmdBufferPtr = getOwner()->beginScratchCommandBuffer();
const vk::CommandBuffer cmdBuffer = *cmdBufferPtr;
cmdBuffer.fillBuffer(mHandle, byteOffset, bytes, data);
co_await getOwner()->endScratchCommandBuffer(std::move(cmdBufferPtr));
}
mijin::Task<> Buffer::c_copyFrom(vk::Buffer srcBuffer, vk::BufferCopy region)
{
ObjectPtr<CommandBuffer> cmdBufferPtr = getOwner()->beginScratchCommandBuffer();
const vk::CommandBuffer cmdBuffer = *cmdBufferPtr;
cmdBuffer.copyBuffer(srcBuffer, mHandle, region);
co_await getOwner()->endScratchCommandBuffer(std::move(cmdBufferPtr));
}
mijin::Task<> Buffer::c_upload(const void* data, std::size_t bytes, std::size_t byteOffset)
{
// assert(bytes == SIZE_REST || bytes + byteOffset <= byteSize);
// if (bytes == SIZE_REST) {
// bytes = byteSize - byteOffset;
// }
// create scratch buffer
vk::Device device = *getOwner();
ObjectPtr<Buffer> scratchBuffer = getOwner()->createChild<Buffer>(BufferCreationArgs{
.size = bytes,
.usage = vk::BufferUsageFlagBits::eTransferSrc
});
scratchBuffer->allocateMemory(HostVisible::YES);
// copy to scratch buffer
void* mapped = device.mapMemory(*scratchBuffer->getMemory(), 0, bytes);
std::memcpy(mapped, data, bytes);
device.unmapMemory(*scratchBuffer->getMemory());
// copy to actual buffer
co_await c_copyFrom(*scratchBuffer, vk::BufferCopy{
.srcOffset = 0,
.dstOffset = byteOffset,
.size = bytes
});
}
mijin::Task<> Buffer::c_upload(const mijin::TypelessBuffer& data, std::size_t byteOffset)
{
return c_upload(data.data(), data.byteSize(), byteOffset);
}
} // namespace iwa

65
source/command.cpp Normal file
View File

@ -0,0 +1,65 @@
#include "iwa/command.hpp"
#include "iwa/device.hpp"
namespace iwa
{
CommandPool::CommandPool(ObjectPtr<Device> owner, CommandPoolCreationArgs args) : super_t(std::move(owner))
{
mHandle = getOwner()->getVkHandle().createCommandPool(vk::CommandPoolCreateInfo{
.flags = args.flags,
.queueFamilyIndex = args.queueFamilyIndex
});
}
CommandPool::~CommandPool() noexcept
{
IWA_DELETE_DEVICE_OBJECT(getOwner(), mHandle, destroyCommandPool);
}
ObjectPtr<CommandBuffer> CommandPool::allocateCommandBuffer(const CommandBufferAllocateArgs& args)
{
vk::CommandBuffer commandBuffer;
const vk::CommandBufferAllocateInfo allocateInfo
{
.commandPool = mHandle,
.level = args.level,
.commandBufferCount = 1
};
vk::resultCheck(getOwner()->getVkHandle().allocateCommandBuffers(&allocateInfo, &commandBuffer),
"vkAllocateCommandBuffers failed");
return createChild<CommandBuffer>(commandBuffer);
}
// std::vector<vk::CommandBuffer> CommandPool::allocateCommandBuffers(
// std::size_t count,
// const CommandBufferAllocateArgs& args) const noexcept
// {
// return getOwner()->getVkHandle().allocateCommandBuffers(vk::CommandBufferAllocateInfo{
// .commandPool = mHandle,
// .level = args.level,
// .commandBufferCount = static_cast<std::uint32_t>(count)
// });
// }
CommandBuffer::CommandBuffer(ObjectPtr<iwa::CommandPool> owner, vk::CommandBuffer handle)
: super_t(std::move(owner)), MixinVulkanObject(handle)
{
}
CommandBuffer::~CommandBuffer() noexcept
{
if (mHandle)
{
getOwner()->getOwner()->queueDelete([
poolHandle = getOwner()->getVkHandle(),
handle = mHandle,
device = getOwner()->getOwner()->getVkHandle()]
{
device.freeCommandBuffers(poolHandle, handle);
});
}
}
} // namespace iwa

90
source/descriptor_set.cpp Normal file
View File

@ -0,0 +1,90 @@
#include "iwa/descriptor_set.hpp"
#include "iwa/device.hpp"
namespace iwa
{
DescriptorSetLayout::DescriptorSetLayout(ObjectPtr<Device> owner, const DescriptorSetLayoutCreationArgs& args)
: super_t(std::move(owner))
{
vk::DescriptorSetLayoutCreateInfo createInfo{
.flags = args.flags,
.bindingCount = static_cast<std::uint32_t>(args.bindings.size()),
.pBindings = args.bindings.data()
};
vk::DescriptorSetLayoutBindingFlagsCreateInfo flagsInfo;
if (!args.bindingFlags.empty())
{
MIJIN_ASSERT(args.bindings.size() == args.bindingFlags.size(), "Binding flags must be empty or same size as bindings.");
flagsInfo.bindingCount = static_cast<std::uint32_t>(args.bindingFlags.size()),
flagsInfo.pBindingFlags = args.bindingFlags.data();
createInfo.pNext = &flagsInfo;
}
mHandle = getOwner()->getVkHandle().createDescriptorSetLayout(createInfo);
}
DescriptorSetLayout::~DescriptorSetLayout() noexcept
{
IWA_DELETE_DEVICE_OBJECT(getOwner(), mHandle, destroyDescriptorSetLayout)
}
DescriptorPool::DescriptorPool(ObjectPtr<Device> owner, const DescriptorPoolCreationArgs& args)
: super_t(std::move(owner)), mCanFree(args.flags & vk::DescriptorPoolCreateFlagBits::eFreeDescriptorSet)
{
mHandle = getOwner()->getVkHandle().createDescriptorPool(vk::DescriptorPoolCreateInfo
{
.flags = args.flags,
.maxSets = args.maxSets,
.poolSizeCount = static_cast<std::uint32_t>(args.poolSizes.size()),
.pPoolSizes = args.poolSizes.data()
});
}
DescriptorPool::~DescriptorPool() noexcept
{
IWA_DELETE_DEVICE_OBJECT(getOwner(), mHandle, destroyDescriptorPool);
}
ObjectPtr<DescriptorSet> DescriptorPool::allocateDescriptorSet(const DescriptorSetAllocateArgs& args)
{
vk::DescriptorSet descriptorSet;
vk::DescriptorSetAllocateInfo allocateInfo
{
.descriptorPool = mHandle,
.descriptorSetCount = 1,
.pSetLayouts = &args.layout->getVkHandle(),
};
vk::DescriptorSetVariableDescriptorCountAllocateInfo variableSetInfo;
if (args.variableDescriptorCount > 0)
{
variableSetInfo.descriptorSetCount = 1;
variableSetInfo.pDescriptorCounts = &args.variableDescriptorCount;
allocateInfo.pNext = &variableSetInfo;
}
vk::resultCheck(getOwner()->getVkHandle().allocateDescriptorSets(&allocateInfo, &descriptorSet),
"vkAllocateDescriptorSets failed");
return createChild<DescriptorSet>(descriptorSet);
}
DescriptorSet::DescriptorSet(ObjectPtr<DescriptorPool> owner, vk::DescriptorSet handle)
: super_t(std::move(owner)), MixinVulkanObject(handle)
{
}
DescriptorSet::~DescriptorSet() noexcept
{
if (mHandle && getOwner()->getCanFree())
{
getOwner()->getOwner()->queueDelete([
poolHandle = getOwner()->getVkHandle(),
handle = mHandle,
device = getOwner()->getOwner()]
{
device->getVkHandle().freeDescriptorSets(poolHandle, handle);
});
}
}
} // namespace iwa

381
source/device.cpp Normal file
View File

@ -0,0 +1,381 @@
#include "iwa/device.hpp"
#include "iwa/log.hpp"
#include "iwa/instance.hpp"
namespace iwa
{
namespace
{
void buildDefaultDeviceExtensionList(std::vector<ExtensionInfo>& outExtensions) noexcept
{
outExtensions.push_back({.name = VK_KHR_SWAPCHAIN_EXTENSION_NAME, .required = true});
}
bool checkQueueFamilies(const PhysicalDeviceInfo& deviceInfo, [[maybe_unused]] int& score)
{
return deviceInfo.graphicsQueueFamily != std::numeric_limits<std::uint32_t>::max()
&& deviceInfo.computeQueueFamily != std::numeric_limits<std::uint32_t>::max();
}
bool checkDeviceExtensions(const std::vector<ExtensionInfo>& extensions, const PhysicalDeviceInfo& deviceInfo, int& score)
{
auto isExtensionSupported = [&deviceInfo](const char* extension)
{
for (const vk::ExtensionProperties& props : deviceInfo.extensions)
{
if (std::strncmp(props.extensionName, extension, VK_MAX_EXTENSION_NAME_SIZE) == 0)
{
return true;
}
}
return false;
};
for (const ExtensionInfo& extInfo : extensions)
{
if (isExtensionSupported(extInfo.name))
{
score += 10;
}
else if (extInfo.required)
{
logVerbose("Vulkan device {} not supported as it is missing required extension {}.", deviceInfo.properties.deviceName, extInfo.name);
return false;
}
}
return true;
}
int scorePhysicalDevice(
const PhysicalDeviceCriteria& deviceCriteria,
const std::vector<ExtensionInfo>& extensions,
const PhysicalDeviceInfo& deviceInfo)
{
int score = 0;
switch (deviceInfo.properties.deviceType)
{
case vk::PhysicalDeviceType::eDiscreteGpu:
score = 1000;
break;
case vk::PhysicalDeviceType::eIntegratedGpu:
score = 100;
break;
default: break;
}
#define CHECK_FEATURE_STRUCT(infoName, requiredName) \
for (auto feature : deviceCriteria.requiredName) \
{ \
if (!(deviceInfo.infoName.*feature)) \
{ \
return -1; \
} \
}
CHECK_FEATURE_STRUCT(features, requiredFeatures)
CHECK_FEATURE_STRUCT(vulkan11Features, requiredVulkan11Features)
CHECK_FEATURE_STRUCT(vulkan12Features, requiredVulkan12Features)
CHECK_FEATURE_STRUCT(vulkan13Features, requiredVulkan13Features)
CHECK_FEATURE_STRUCT(accelerationStructureFeatures, requiredAccelerationStructureFeatures)
CHECK_FEATURE_STRUCT(rayTracingPipelineFeatures, requiredRayTracingPipelineFeatures)
CHECK_FEATURE_STRUCT(meshShaderFeatures, requredMeshShaderFeatures)
#undef CHECK_FEATURE_STRUCT
if (!checkQueueFamilies(deviceInfo, score))
{
logVerbose("Vulkan device {} not supported as no suitable queue family configuration could be found.", deviceInfo.properties.deviceName);
return -1;
}
if (!checkDeviceExtensions(extensions, deviceInfo, score))
{
return -1;
}
return score;
}
std::size_t findBestPhysicalDevice(
PhysicalDeviceCriteria& deviceCriteria,
const std::vector<ExtensionInfo>& extensions,
const std::vector<PhysicalDeviceInfo>& deviceInfos)
{
int bestScore = -1;
std::size_t bestIdx = 0;
for (std::size_t idx = 0; idx < deviceInfos.size(); ++idx)
{
const int score = scorePhysicalDevice(deviceCriteria, extensions, deviceInfos[idx]);
if (score > bestScore)
{
bestScore = score;
bestIdx = idx;
}
}
if (bestScore < 0)
{
logAndDie("Could not find a suitable Vulkan device!");
}
return bestIdx;
}
std::vector<const char*> buildDeviceExtensionNameList(const PhysicalDeviceInfo& deviceInfo, std::vector<ExtensionInfo>& extensions)
{
auto isExtensionSupported = [&](const char* extension)
{
for (const vk::ExtensionProperties& props : deviceInfo.extensions)
{
if (std::strncmp(props.extensionName, extension, VK_MAX_EXTENSION_NAME_SIZE) == 0)
{
return true;
}
}
return false;
};
std::vector<const char*> enabledExtensions;
for (ExtensionInfo& extInfo : extensions)
{
const bool supported = isExtensionSupported(extInfo.name);
if (!supported && extInfo.required)
{
// this shouldn't be possible, we already checked when scoring the device
logAndDie("Required Vulkan device extension not supported: {}.", extInfo.name);
}
if (supported) {
enabledExtensions.push_back(extInfo.name);
}
extInfo.enabled = supported;
}
return enabledExtensions;
}
std::vector<vk::DeviceQueueCreateInfo> buildQueueCreateInfoList(const PhysicalDeviceInfo& deviceInfo)
{
static const float QUEUE_PRIORITY_ONE = 1.0f;
std::vector<vk::DeviceQueueCreateInfo> createInfos;
createInfos.emplace_back()
.setQueueFamilyIndex(deviceInfo.graphicsQueueFamily)
.setQueueCount(1)
.setPQueuePriorities(&QUEUE_PRIORITY_ONE);
return createInfos;
}
} // namespace
Device::Device(ObjectPtr<Instance> owner, DeviceCreationArgs args)
: super_t(std::move(owner)),
mExtensions(std::move(args.extensions))
{
if (!args.flags.noDefaultExtensions)
{
buildDefaultDeviceExtensionList(mExtensions);
}
const std::vector<PhysicalDeviceInfo>& physicalDevices = getOwner()->getPhysicalDevices();
const std::size_t physicalDeviceIdx = findBestPhysicalDevice(args.physicalDeviceCriteria, mExtensions, physicalDevices);
mDeviceInfo = &physicalDevices[physicalDeviceIdx];
const std::vector<const char*> enabledExtensions = buildDeviceExtensionNameList(*mDeviceInfo, mExtensions);
const std::vector<vk::DeviceQueueCreateInfo> queueCreateInfos = buildQueueCreateInfoList(*mDeviceInfo);
void* pNext = nullptr;
#define APPEND_FEATURE_STRUCT(type, name, requiredArray) \
type name{.pNext = pNext}; \
if (!args.physicalDeviceCriteria.requiredArray.empty()) \
{ \
for (auto feature : args.physicalDeviceCriteria.requiredArray) \
{ \
(name).*(feature) = VK_TRUE; \
} \
pNext = &(name); \
}
APPEND_FEATURE_STRUCT(vk::PhysicalDeviceVulkan11Features, vulkan11Features, requiredVulkan11Features)
APPEND_FEATURE_STRUCT(vk::PhysicalDeviceVulkan12Features, vulkan12Features, requiredVulkan12Features)
APPEND_FEATURE_STRUCT(vk::PhysicalDeviceVulkan13Features, vulkan13Features, requiredVulkan13Features)
APPEND_FEATURE_STRUCT(vk::PhysicalDeviceAccelerationStructureFeaturesKHR, accelerationStructureFeatures, requiredAccelerationStructureFeatures)
APPEND_FEATURE_STRUCT(vk::PhysicalDeviceRayTracingPipelineFeaturesKHR, rayTracingPipelineFeature, requiredRayTracingPipelineFeatures)
APPEND_FEATURE_STRUCT(vk::PhysicalDeviceMeshShaderFeaturesEXT, meshShaderFeatures, requredMeshShaderFeatures)
#undef APPEND_FEATURE_STRUCT
vk::PhysicalDeviceFeatures enabledFeatures; // NOLINT(*-const-correctness) false positive
for (auto feature : args.physicalDeviceCriteria.requiredFeatures)
{
enabledFeatures.*feature = VK_TRUE;
}
const vk::DeviceCreateInfo deviceCreateInfo =
{
.pNext = pNext,
.queueCreateInfoCount = static_cast<std::uint32_t>(queueCreateInfos.size()),
.pQueueCreateInfos = queueCreateInfos.data(),
.enabledExtensionCount = static_cast<std::uint32_t>(enabledExtensions.size()),
.ppEnabledExtensionNames = enabledExtensions.data(),
.pEnabledFeatures = &enabledFeatures
};
mHandle = mDeviceInfo->device.createDevice(deviceCreateInfo);
if (args.flags.singleDevice)
{
VULKAN_HPP_DEFAULT_DISPATCHER.init(mHandle);
}
mGraphicsQueue = mHandle.getQueue(mDeviceInfo->graphicsQueueFamily, 0);
if (mDeviceInfo->graphicsQueueFamily == mDeviceInfo->computeQueueFamily)
{
mComputeQueue = mGraphicsQueue;
}
else
{
mComputeQueue = mHandle.getQueue(mDeviceInfo->computeQueueFamily, 0);
}
getOwner()->getMainTaskLoop().addTask(c_updateLoop(), &mUpdateLoopHandle);
getOwner()->deviceCreated.emit(*this);
}
Device::~Device() noexcept
{
mUpdateLoopHandle.cancel();
mScratchCommandPools.clear(); // hack2: due to the below hack the pools would get destroyed after the device, reset them manually
if (mHandle)
{
mHandle.waitIdle();
mPendingScratchCmdBuffers.clear();
getOwner()->queueDelete([handle=mHandle]()
{
handle.destroy();
});
}
}
ScratchCommandPool::ScratchCommandPool(Device& device)
{
mCommandPool = device.createChild<CommandPool>(CommandPoolCreationArgs{
.flags = vk::CommandPoolCreateFlagBits::eResetCommandBuffer,
.queueFamilyIndex = device.getDeviceInfo().graphicsQueueFamily
});
}
ObjectPtr<CommandBuffer> ScratchCommandPool::allocateCommandBuffer()
{
for (Buffer& buffer : mBuffers)
{
if (buffer.doneFuture->ready())
{
buffer.doneFuture = std::make_shared<mijin::Future<void>>();
buffer.cmdBuffer->getVkHandle().reset();
return buffer.cmdBuffer;
}
}
// nothing found, allocate a new one
return mBuffers.emplace_back(mCommandPool->allocateCommandBuffer(), std::make_shared<mijin::Future<void>>()).cmdBuffer;
}
mijin::FuturePtr<void> ScratchCommandPool::getFuture(const ObjectPtr<CommandBuffer>& cmdBuffer) noexcept
{
for (const Buffer& buffer : mBuffers)
{
if (buffer.cmdBuffer == cmdBuffer)
{
return buffer.doneFuture;
}
}
logAndDie("Someone passed an invalid cmdBuffer to getFuture()!");
}
ObjectPtr<DeviceMemory> Device::allocateDeviceMemory(const DeviceMemoryAllocationArgs& args)
{
return createChild<DeviceMemory>(args);
}
ObjectPtr<CommandBuffer> Device::beginScratchCommandBuffer()
{
ObjectPtr<CommandBuffer> cmdBuffer;
{
const std::shared_lock readLock(mScratchCommandPoolsMutex);
auto it = mScratchCommandPools.find(std::this_thread::get_id());
if (it != mScratchCommandPools.end())
{
cmdBuffer = it->second.allocateCommandBuffer();
}
}
if (cmdBuffer == nullptr)
{
// create the scratch command pool
const std::unique_lock writeLock(mScratchCommandPoolsMutex);
ScratchCommandPool& pool = mScratchCommandPools.emplace(std::this_thread::get_id(), ScratchCommandPool(*this)).first->second;
cmdBuffer = pool.allocateCommandBuffer();
// hack: an object should normally not contain pointers to its own child as that leads to cyclic dependencies
// decrease reference count by 1 to migitate that
decreaseReferenceCount();
}
const vk::CommandBufferBeginInfo beginInfo =
{
.flags = vk::CommandBufferUsageFlagBits::eOneTimeSubmit
};
cmdBuffer->getVkHandle().begin(beginInfo);
return cmdBuffer;
}
mijin::FuturePtr<void> Device::endScratchCommandBuffer(ObjectPtr<CommandBuffer> cmdBuffer)
{
mijin::FuturePtr<void> future;
{
const std::shared_lock readLock(mScratchCommandPoolsMutex);
future = mScratchCommandPools.at(std::this_thread::get_id()).getFuture(cmdBuffer);
}
cmdBuffer->getVkHandle().end();
getOwner()->runOnMainThread([cmdBuffer = std::move(cmdBuffer), self = getPointer(), future]() mutable
{
ObjectPtr<Fence> doneFence = Fence::create(self);
const vk::SubmitInfo submitInfo =
{
.commandBufferCount = 1,
.pCommandBuffers = &cmdBuffer->getVkHandle()
};
self->getGraphicsQueue().submit(submitInfo, *doneFence);
self->mPendingScratchCmdBuffers.push_back({
.cmdBuffer = std::move(cmdBuffer),
.doneFence = std::move(doneFence),
.future = std::move(future)
});
});
return future;
}
void Device::queueDelete(std::function<void()> deleter) noexcept
{
getOwner()->queueDelete(std::move(deleter));
}
mijin::Task<> Device::c_updateLoop() noexcept
{
while(!getOwner()->isQuitRequested())
{
for (auto it = mPendingScratchCmdBuffers.begin(); it != mPendingScratchCmdBuffers.end();)
{
if (it->doneFence->isDone())
{
it->future->set();
it = mPendingScratchCmdBuffers.erase(it);
} else
{
++it;
}
}
co_await mijin::c_suspend();
}
}
}

39
source/device_memory.cpp Normal file
View File

@ -0,0 +1,39 @@
#include "iwa/device_memory.hpp"
#include "iwa/device.hpp"
namespace iwa
{
DeviceMemory::DeviceMemory(ObjectPtr<Device> owner, const DeviceMemoryAllocationArgs& args) : super_t(std::move(owner))
{
mHandle = getOwner()->getVkHandle().allocateMemory(vk::MemoryAllocateInfo
{
.allocationSize = args.allocationSize,
.memoryTypeIndex = args.memoryTypeIndex
});
}
DeviceMemory::~DeviceMemory() noexcept
{
IWA_DELETE_DEVICE_OBJECT(getOwner(), mHandle, freeMemory);
}
std::optional<std::uint32_t> findMemoryType(Device& device, const vk::MemoryRequirements& requirements, vk::MemoryPropertyFlags properties)
{
const vk::PhysicalDeviceMemoryProperties& memoryProperties = device.getDeviceInfo().memoryProperties;
for (std::uint32_t idx = 0; idx < memoryProperties.memoryTypeCount; ++idx)
{
if ((requirements.memoryTypeBits & (1 << idx)) == 0) {
continue; // not suitable for this buffer
}
if ((memoryProperties.memoryTypes[idx].propertyFlags & properties) != properties) {
continue; // does not fulfill required properties
}
return idx;
}
return std::nullopt;
}
} // namespace iwa

28
source/event.cpp Normal file
View File

@ -0,0 +1,28 @@
#include "iwa/event.hpp"
#include "iwa/device.hpp"
namespace iwa
{
Event::Event(ObjectPtr<Device> owner, const EventCreationArgs& args) : super_t(std::move(owner))
{
mHandle = getOwner()->getVkHandle().createEvent(vk::EventCreateInfo
{
.flags = args.flags
});
}
Event::~Event() noexcept
{
IWA_DELETE_DEVICE_OBJECT(getOwner(), mHandle, destroyEvent);
}
mijin::Task<> Event::c_wait()
{
while (getOwner()->getVkHandle().getEventStatus(mHandle) != vk::Result::eEventSet)
{
co_await mijin::c_suspend();
}
}
}

38
source/fence.cpp Normal file
View File

@ -0,0 +1,38 @@
#include "iwa/fence.hpp"
#include "iwa/device.hpp"
namespace iwa
{
Fence::Fence(ObjectPtr<Device> owner, const FenceCreationArgs& args) : super_t(std::move(owner))
{
mHandle = getOwner()->getVkHandle().createFence(vk::FenceCreateInfo{
.flags = args.flags
});
}
Fence::~Fence() noexcept
{
IWA_DELETE_DEVICE_OBJECT(getOwner(), mHandle, destroyFence)
}
bool Fence::isDone() const
{
return getOwner()->getVkHandle().waitForFences(mHandle, VK_TRUE, 0) != vk::Result::eTimeout;
}
mijin::Task<> Fence::c_wait() const
{
while (getOwner()->getVkHandle().waitForFences(mHandle, VK_TRUE, 0) == vk::Result::eTimeout)
{
co_await mijin::c_suspend();
}
co_return;
}
void Fence::reset() const
{
getOwner()->getVkHandle().resetFences(mHandle);
}
} // namespace iwa

384
source/image.cpp Normal file
View File

@ -0,0 +1,384 @@
#include "iwa/image.hpp"
#include <cmath>
#include "iwa/resource/bitmap.hpp"
#include "iwa/buffer.hpp"
#include "iwa/device.hpp"
#include "iwa/util/vkutil.hpp"
namespace iwa
{
Image::Image(ObjectPtr<Device> owner, ImageCreationArgs args) : super_t(std::move(owner)), mFlags(args.flags), mType(args.imageType),
mFormat(args.format), mTiling(args.tiling), mUsage(args.usage), mSize(args.extent), mArrayLayers(args.arrayLayers),
mMipLevels(clampMipLevels(args.mipLevels))
{
if (mMipLevels > 1) {
mUsage |= vk::ImageUsageFlagBits::eTransferDst | vk::ImageUsageFlagBits::eTransferSrc;
}
mHandle = getOwner()->getVkHandle().createImage(vk::ImageCreateInfo{
.flags = args.flags,
.imageType = args.imageType,
.format = args.format,
.extent = args.extent,
.mipLevels = mMipLevels,
.arrayLayers = args.arrayLayers,
.samples = args.samples,
.tiling = args.tiling,
.usage = mUsage,
.sharingMode = args.sharingMode,
.queueFamilyIndexCount = static_cast<std::uint32_t>(args.queueFamilyIndices.size()),
.pQueueFamilyIndices = args.queueFamilyIndices.data(),
.initialLayout = args.initialLayout
});
}
Image::Image(ObjectPtr<Device> owner, ImageWrapArgs args)
: super_t(std::move(owner)), MixinVulkanObject(args.handle), mType(args.type), mFormat(args.format), mUsage(args.usage),
mSize(args.size), mMipLevels(args.mipLevels), mWrapped(true)
{
}
Image::~Image() noexcept
{
if (!mWrapped)
{
IWA_DELETE_DEVICE_OBJECT(getOwner(), mHandle, destroyImage)
}
}
void Image::resetUsage(ResetLayout resetLayout) noexcept
{
lastUsageStages = vk::PipelineStageFlagBits::eTopOfPipe;
lastAccess = vk::AccessFlags();
if (resetLayout)
{
currentLayout = vk::ImageLayout::eUndefined;
}
}
void Image::allocateMemory()
{
const vk::MemoryRequirements memoryRequirements = getOwner()->getVkHandle().getImageMemoryRequirements(mHandle);
const vk::MemoryPropertyFlags memoryFlags = vk::MemoryPropertyFlagBits::eDeviceLocal;
const std::optional<std::uint32_t> memoryTypeIdx = findMemoryType(*getOwner(), memoryRequirements, memoryFlags);
if (!memoryTypeIdx.has_value())
{
throw std::runtime_error("Could not find a suitable memory type.");
}
ObjectPtr<DeviceMemory> memory = getOwner()->allocateDeviceMemory(
{
.allocationSize = memoryRequirements.size,
.memoryTypeIndex = memoryTypeIdx.value()
});
bindMemory(std::move(memory));
}
void Image::bindMemory(ObjectPtr<DeviceMemory> memory, vk::DeviceSize offset)
{
mMemory = std::move(memory);
getOwner()->getVkHandle().bindImageMemory(mHandle, *mMemory, offset);
}
void Image::applyTransition(vk::CommandBuffer cmdBuffer, const ImageTransition& transition)
{
assert(transition.layout != vk::ImageLayout::eUndefined);
if (transition.layout != currentLayout)
{
cmdBuffer.pipelineBarrier(
/* srcStageMask = */ lastUsageStages,
/* dstStageMask = */ transition.stages,
/* dependencyFlags = */ vk::DependencyFlags(),
/* memoryBarriers = */ {},
/* bufferMemoryBarriers = */ {},
/* imageMemoryBarriers = */ {
vk::ImageMemoryBarrier{
.srcAccessMask = lastAccess,
.dstAccessMask = transition.access,
.oldLayout = currentLayout,
.newLayout = transition.layout,
.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
.image = mHandle,
.subresourceRange = transition.subResourceRange
}
});
}
lastUsageStages = transition.stages;
lastAccess = transition.access;
currentLayout = transition.layout;
}
ObjectPtr<ImageView> Image::createImageView(const ImageViewCreationArgs& args)
{
return createChild<ImageView>(args);
}
mijin::Task<> Image::c_upload(const void* data, std::size_t bytes, vk::Extent3D bufferImageSize, vk::Offset3D imageOffset,
unsigned baseLayer, unsigned layerCount)
{
MIJIN_ASSERT(bytes >= static_cast<std::size_t>(bufferImageSize.width) * bufferImageSize.height * bufferImageSize.depth
* vkFormatSize(mFormat), "Buffer for image upload is too small!");
// TODO: optimize this whole process
// create scratch buffer
vk::Device device = *getOwner();
ObjectPtr<Buffer> scratchBuffer = getOwner()->createChild<Buffer>(BufferCreationArgs{
.size = bytes,
.usage = vk::BufferUsageFlagBits::eTransferSrc
});
scratchBuffer->allocateMemory(HostVisible::YES);
// copy to scratch buffer
void* mapped = device.mapMemory(*scratchBuffer->getMemory(), 0, bytes);
std::memcpy(mapped, data, bytes);
device.unmapMemory(*scratchBuffer->getMemory());
ObjectPtr<CommandBuffer> cmdBufferPtr = getOwner()->beginScratchCommandBuffer();
const vk::CommandBuffer cmdBuffer = *cmdBufferPtr;
applyTransition(cmdBuffer, {
.stages = vk::PipelineStageFlagBits::eTransfer,
.layout = vk::ImageLayout::eTransferDstOptimal,
.access = vk::AccessFlagBits::eTransferWrite
});
// copy to actual buffer
cmdBuffer.copyBufferToImage(*scratchBuffer, mHandle, vk::ImageLayout::eTransferDstOptimal, vk::BufferImageCopy{
.bufferOffset = 0,
.bufferRowLength = 0,
.bufferImageHeight = 0,
.imageSubresource = {
.aspectMask = vk::ImageAspectFlagBits::eColor,
.mipLevel = 0,
.baseArrayLayer = baseLayer,
.layerCount = layerCount
},
.imageOffset = {
.x = imageOffset.x,
.y = imageOffset.y,
.z = imageOffset.z
},
.imageExtent = {
.width = bufferImageSize.width,
.height = bufferImageSize.height,
.depth = bufferImageSize.depth
}
});
if (mMipLevels > 1)
{
generateMipMaps(cmdBuffer);
}
co_await getOwner()->endScratchCommandBuffer(std::move(cmdBufferPtr));
}
mijin::Task<> Image::c_doTransition(const ImageTransition& transition)
{
ObjectPtr<CommandBuffer> cmdBufferPtr = getOwner()->beginScratchCommandBuffer();
const vk::CommandBuffer cmdBuffer = *cmdBufferPtr;
applyTransition(cmdBuffer, transition);
co_await getOwner()->endScratchCommandBuffer(std::move(cmdBufferPtr));
}
mijin::Task<> Image::c_upload(const Bitmap& bitmap, vk::Offset3D imageOffset, unsigned baseLayer, unsigned layerCount)
{
MIJIN_ASSERT(vkFormatSize(mFormat) == vkFormatSize(bitmap.getFormat()), "Bitmap format size doesn't match image format size.");
const vk::Extent3D bufferImageSize = {
.width = bitmap.getSize().width,
.height = bitmap.getSize().height,
.depth = 1
};
return c_upload(bitmap.getData().data(), bitmap.getData().size_bytes(), bufferImageSize, imageOffset, baseLayer, layerCount);
}
mijin::Task<> Image::c_blitFrom(Image& srcImage, std::vector<vk::ImageBlit> regions, vk::Filter filter)
{
ObjectPtr<CommandBuffer> cmdBufferPtr = getOwner()->beginScratchCommandBuffer();
const vk::CommandBuffer cmdBuffer = *cmdBufferPtr;
applyTransition(cmdBuffer, IMAGE_TRANSITION_TRANSFER_WRITE);
srcImage.applyTransition(cmdBuffer, IMAGE_TRANSITION_TRANSFER_READ);
cmdBuffer.blitImage(
/* srcImage = */ srcImage,
/* srcImageLayout = */ vk::ImageLayout::eTransferSrcOptimal,
/* dstImage = */ *this,
/* dstImageLayout = */ vk::ImageLayout::eTransferDstOptimal,
/* regions = */ regions,
/* filter = */ filter
);
co_await getOwner()->endScratchCommandBuffer(std::move(cmdBufferPtr));
}
mijin::Task<> Image::c_blitFrom(const Bitmap& bitmap, std::vector<vk::ImageBlit> regions, vk::Filter filter)
{
ObjectPtr<Image> scratchImage = co_await c_create(getOwner()->getPointer(), ImageFromBitmapArgs{
.bitmap = &bitmap,
.usage = vk::ImageUsageFlagBits::eTransferDst | vk::ImageUsageFlagBits::eTransferSrc
});
co_await c_blitFrom(*scratchImage, std::move(regions), filter);
}
mijin::Task<> Image::c_copyFrom(Image& srcImage, std::vector<vk::ImageCopy> regions)
{
ObjectPtr<CommandBuffer> cmdBufferPtr = getOwner()->beginScratchCommandBuffer();
const vk::CommandBuffer cmdBuffer = *cmdBufferPtr;
applyTransition(cmdBuffer, IMAGE_TRANSITION_TRANSFER_WRITE);
srcImage.applyTransition(cmdBuffer, IMAGE_TRANSITION_TRANSFER_READ);
cmdBuffer.copyImage(
/* srcImage = */ srcImage,
/* srcImageLayout = */ vk::ImageLayout::eTransferSrcOptimal,
/* dstImage = */ *this,
/* dstImageLayout = */ vk::ImageLayout::eTransferDstOptimal,
/* regions = */ regions
);
co_await getOwner()->endScratchCommandBuffer(std::move(cmdBufferPtr));
}
std::uint32_t Image::clampMipLevels(std::uint32_t levels) const
{
if (levels <= 1) {
return 1;
}
const vk::ImageFormatProperties props = getOwner()->getVkPhysicalDevice().getImageFormatProperties(mFormat, mType, mTiling, mUsage, mFlags);
return std::min(levels, std::min(props.maxMipLevels, static_cast<std::uint32_t>(std::log2(std::max(mSize.width, mSize.height)))+1));
}
void Image::generateMipMaps(vk::CommandBuffer cmdBuffer)
{
std::vector<vk::ImageBlit> regions;
regions.resize(mMipLevels - 1);
applyTransition(cmdBuffer, {
.stages = vk::PipelineStageFlagBits::eTransfer,
.layout = vk::ImageLayout::eGeneral, // TODO: using transfer dst/src optimal would be better, but I don't want to deal with different layout in the mips
.access = vk::AccessFlagBits::eTransferWrite | vk::AccessFlagBits::eTransferRead
});
unsigned mipWidth = mSize.width / 2;
unsigned mipHeight = mSize.height / 2;
for (unsigned level = 1; level < mMipLevels; ++level)
{
vk::ImageBlit& region = regions[level - 1];
region.srcSubresource = {
.aspectMask = vk::ImageAspectFlagBits::eColor,
.mipLevel = 0,
.baseArrayLayer = 0,
.layerCount = 1
};
region.srcOffsets[0].x = 0;
region.srcOffsets[0].y = 0;
region.srcOffsets[0].z = 0;
region.srcOffsets[1].x = static_cast<std::int32_t>(mSize.width);
region.srcOffsets[1].y = static_cast<std::int32_t>(mSize.height);
region.srcOffsets[1].z = 1;
region.dstSubresource = {
.aspectMask = vk::ImageAspectFlagBits::eColor,
.mipLevel = level,
.baseArrayLayer = 0,
.layerCount = 1
};
region.dstOffsets[0].x = 0;
region.dstOffsets[0].y = 0;
region.dstOffsets[0].z = 0;
region.dstOffsets[1].x = static_cast<std::int32_t>(mipWidth);
region.dstOffsets[1].y = static_cast<std::int32_t>(mipHeight);
region.dstOffsets[1].z = 1;
if (mipWidth > 1) {
mipWidth /= 2;
}
if (mipHeight > 1) {
mipHeight /= 2;
}
}
cmdBuffer.blitImage(
/* srcImage = */ mHandle,
/* srcImageLayout = */ vk::ImageLayout::eGeneral,
/* dstImage = */ mHandle,
/* dstImageLayout = */ vk::ImageLayout::eGeneral,
/* regions = */ regions,
/* filter = */ vk::Filter::eLinear
);
}
mijin::Task<ObjectPtr<Image>> Image::c_create(ObjectPtr<Device> owner, ImageFromBitmapArgs args)
{
ObjectPtr<Image> image = owner->createChild<Image>(ImageCreationArgs{
.flags = args.flags,
.imageType = vk::ImageType::e2D,
.format = args.bitmap->getFormat(),
.extent = {
.width = args.bitmap->getSize().width,
.height = args.bitmap->getSize().height,
.depth = 1
},
.mipLevels = 1,
.arrayLayers = 1,
.samples = vk::SampleCountFlagBits::e1,
.tiling = args.tiling,
.usage = args.usage | vk::ImageUsageFlagBits::eTransferDst,
.sharingMode = args.sharingMode,
.queueFamilyIndices = std::move(args.queueFamilyIndices),
.initialLayout = args.initialLayout
});
image->allocateMemory();
co_await image->c_upload(*args.bitmap);
co_return image;
}
ImageView::ImageView(ObjectPtr<Image> owner, const ImageViewCreationArgs& args) : super_t(std::move(owner))
{
mHandle = getOwner()->getOwner()->getVkHandle().createImageView(vk::ImageViewCreateInfo
{
.flags = args.flags,
.image = *getOwner(),
.viewType = args.viewType,
.format = args.format != vk::Format::eUndefined ? args.format : getOwner()->getFormat(),
.components = args.components,
.subresourceRange = args.subresourceRange
});
}
ImageView::~ImageView() noexcept
{
IWA_DELETE_DEVICE_OBJECT(getOwner()->getOwner(), mHandle, destroyImageView);
}
Sampler::Sampler(ObjectPtr<Device> owner, const SamplerCreationArgs& args) : super_t(std::move(owner))
{
mHandle = getOwner()->getVkHandle().createSampler(vk::SamplerCreateInfo
{
.flags = args.flags,
.magFilter = args.magFilter,
.minFilter = args.minFilter,
.mipmapMode = args.mipmapMode,
.addressModeU = args.addressModeU,
.addressModeV = args.addressModeV,
.addressModeW = args.addressModeW,
.mipLodBias = args.mipLodBias,
.anisotropyEnable = args.options.anisotropyEnable,
.maxAnisotropy = args.maxAnisotropy,
.compareEnable = args.options.compareEnable,
.compareOp = args.compareOp,
.minLod = args.minLod,
.maxLod = args.maxLod,
.borderColor = args.borderColor,
.unnormalizedCoordinates = args.options.unnormalizedCoordinates
});
}
Sampler::~Sampler() noexcept
{
IWA_DELETE_DEVICE_OBJECT(getOwner(), mHandle, destroySampler);
}
} // namespace iwa

37
source/input.cpp Normal file
View File

@ -0,0 +1,37 @@
#include "iwa/input.hpp"
#include "iwa/log.hpp"
namespace iwa
{
KeyState getKeyState(ScanCode scanCode) noexcept
{
const int index = static_cast<int>(scanCode);
int numKeys = 0;
const Uint8* keys = SDL_GetKeyboardState(&numKeys);
if (index >= numKeys) {
return {};
}
return {
.pressed = keys[index] > 0
};
}
void captureMouse() noexcept
{
SDL_CaptureMouse(SDL_TRUE);
}
void uncaptureMouse() noexcept
{
SDL_CaptureMouse(SDL_FALSE);
}
std::pair<int, int> getMouseScreenPosition() noexcept
{
std::pair<int, int> position;
SDL_GetGlobalMouseState(&position.first, &position.second);
return position;
}
} // namespace iwa

462
source/instance.cpp Normal file
View File

@ -0,0 +1,462 @@
#include "iwa/instance.hpp"
#include <mijin/detect.hpp>
#include <mijin/debug/assert.hpp>
#include "iwa/addon.hpp"
#include "iwa/log.hpp"
#include "iwa/window.hpp"
#include <vulkan/vulkan_from_string.inl>
VULKAN_HPP_DEFAULT_DISPATCH_LOADER_DYNAMIC_STORAGE
namespace iwa
{
namespace
{
void buildDefaultInstanceExtensionList(std::vector<ExtensionInfo>& outExtensions) noexcept
{
#if !defined(KAZAN_RELEASE)
outExtensions.push_back({.name = VK_EXT_DEBUG_UTILS_EXTENSION_NAME, .required = false});
#endif
// if (!getEngineOptions().headless)
{
outExtensions.push_back({.name = VK_KHR_SURFACE_EXTENSION_NAME, .required = true});
#if MIJIN_TARGET_OS == MIJIN_OS_LINUX
// There are two possible APIs for X11 surfaces (SDL supports both)
// just try to enable both and hope SDL will be happy.
// Both are optional, in case one is not supported, but in the end
// one of them will be needed.
outExtensions.push_back({.name = "VK_KHR_xcb_surface", .required = false});
outExtensions.push_back({.name = "VK_KHR_xlib_surface", .required = false});
#elif MIJIN_TARGET_OS == MIJIN_OS_WINDOWS
g_instanceExtensions.push_back({.name = "VK_KHR_win32_surface", .required = true});
#endif
}
}
void buildDefaultInstanceLayerList(std::vector<LayerInfo>& outLayers) noexcept
{
#if !defined(KAZAN_RELEASE)
outLayers.push_back({.name = "VK_LAYER_KHRONOS_validation", .required = false});
#else
(void) outLayers;
#endif
}
std::vector<const char*> checkInstanceExtensions(std::vector<ExtensionInfo>& extensions)
{
std::vector<vk::ExtensionProperties> properties = vk::enumerateInstanceExtensionProperties(nullptr);
auto isExtensionSupported = [&properties](const char* extension)
{
for (const vk::ExtensionProperties& props : properties)
{
if (std::strncmp(props.extensionName, extension, VK_MAX_EXTENSION_NAME_SIZE) == 0)
{
return true;
}
}
return false;
};
std::vector<const char*> enabledExtensions;
for (ExtensionInfo& extInfo : extensions)
{
const bool supported = isExtensionSupported(extInfo.name);
if (!supported && extInfo.required)
{
logAndDie("Required Vulkan instance extension not supported: {}.", extInfo.name);
}
if (supported) {
enabledExtensions.push_back(extInfo.name);
}
extInfo.enabled = supported;
}
return enabledExtensions;
}
std::vector<const char*> checkInstanceLayers(std::vector<LayerInfo>& layers)
{
std::vector<const char*> enabledLayers;
#if !defined(KAZAN_RELEASE)
std::vector<vk::LayerProperties> properties = vk::enumerateInstanceLayerProperties();
auto isLayerSupported = [&properties](const char* extension)
{
for (const vk::LayerProperties& props : properties)
{
if (std::strncmp(props.layerName, extension, VK_MAX_EXTENSION_NAME_SIZE) == 0)
{
return true;
}
}
return false;
};
for (LayerInfo& layerInfo : layers)
{
const bool supported = isLayerSupported(layerInfo.name);
if (!supported && layerInfo.required)
{
logAndDie("Required Vulkan instance layer not supported: {}.", layerInfo.name);
}
if (supported) {
enabledLayers.push_back(layerInfo.name);
}
layerInfo.enabled = supported;
}
#endif
return enabledLayers;
}
vk::Bool32 VKAPI_PTR vulkanMessengerCallback(
VkDebugUtilsMessageSeverityFlagBitsEXT severity,
VkDebugUtilsMessageTypeFlagsEXT /* types */,
const VkDebugUtilsMessengerCallbackDataEXT* pCallbackData,
void* /* pUserData */
)
{
switch (pCallbackData->messageIdNumber)
{
case 1303270965: // We use general layout to copy texture to mips. I know it's not optimal, but it's allowed and
// works for me ¯\_(ツ)_/¯
case 148949623: // The spec says that a) you can't create descriptor sets from layouts with the push-descriptor bit
// set and b) you have to bind a descriptor set when using said set layout in vkCmdPushDescriptorSetKHR.
// So you have to use a set, but can't create it... Something is off.
// Apart from that c) it also says that dstSet is ignored when using vkCmdPushDescriptorSetKHR, so
// maybe the validation layers are just wrong.
// a) https://registry.khronos.org/vulkan/specs/1.3-extensions/man/html/VkDescriptorSetAllocateInfo.html#VUID-VkDescriptorSetAllocateInfo-pSetLayouts-00308
// b) https://registry.khronos.org/vulkan/specs/1.3-extensions/man/html/VkWriteDescriptorSet.html#VUID-VkWriteDescriptorSet-dstSet-00320
// c) https://registry.khronos.org/vulkan/specs/1.3-extensions/man/html/vkCmdPushDescriptorSetKHR.html
return VK_FALSE;
}
logMsg("VK> {}", pCallbackData->pMessage);
if (/* g_breakOnVulkanError && */ severity == VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT)
{
if (const mijin::Result<mijin::Stacktrace> trace = mijin::captureStacktrace(0); trace.isSuccess())
{
logMsg("{}", *trace);
}
MIJIN_TRAP();
}
return VK_FALSE;
}
std::pair<std::uint32_t, std::uint32_t> detectQueueFamilies(vk::PhysicalDevice physicalDevice, vk::SurfaceKHR dummySurface)
{
std::vector<vk::QueueFamilyProperties> queueFamilyProperties = physicalDevice.getQueueFamilyProperties();
for (std::uint32_t idx = 0; idx < queueFamilyProperties.size(); ++idx)
{
const vk::QueueFamilyProperties& properties = queueFamilyProperties[idx];
const vk::Bool32 surfaceSupport = dummySurface ? physicalDevice.getSurfaceSupportKHR(idx, dummySurface) : VK_TRUE; // no dummy surface -> no surface support needed
if (surfaceSupport
&& (properties.queueFlags & vk::QueueFlagBits::eGraphics)
&& (properties.queueFlags & vk::QueueFlagBits::eCompute))
{
return std::make_pair(idx, idx);
}
}
return std::make_pair(std::numeric_limits<std::uint32_t>::max(), std::numeric_limits<std::uint32_t>::max());
}
PhysicalDeviceInfo getPhysicalDeviceInfo(vk::PhysicalDevice physicalDevice, vk::SurfaceKHR dummySurface)
{
PhysicalDeviceInfo deviceInfo;
deviceInfo.device = physicalDevice;
deviceInfo.memoryProperties = physicalDevice.getMemoryProperties();
deviceInfo.extensions = physicalDevice.enumerateDeviceExtensionProperties();
auto isExtensionSupported = [&](const char* extension)
{
for (const vk::ExtensionProperties& props : deviceInfo.extensions)
{
if (std::strncmp(props.extensionName, extension, VK_MAX_EXTENSION_NAME_SIZE) == 0) {
return true;
}
}
return false;
};
const bool rayTracingSupported = isExtensionSupported(VK_KHR_ACCELERATION_STRUCTURE_EXTENSION_NAME)
&& isExtensionSupported(VK_KHR_RAY_TRACING_PIPELINE_EXTENSION_NAME)
&& isExtensionSupported(VK_KHR_DEFERRED_HOST_OPERATIONS_EXTENSION_NAME);
const bool meshShadersSupported = isExtensionSupported(VK_EXT_MESH_SHADER_EXTENSION_NAME);
vk::PhysicalDeviceProperties2 deviceProperties = {
.pNext = &deviceInfo.rayTracingProperties
};
physicalDevice.getProperties2(&deviceProperties);
deviceInfo.properties = deviceProperties.properties;
#define ADD_TO_NEXT_CHAIN(struct_name) \
*ppNext = &(struct_name); ppNext = &(struct_name).pNext
vk::PhysicalDeviceFeatures2 deviceFeatures;
void** ppNext = &deviceFeatures.pNext;
ADD_TO_NEXT_CHAIN(deviceInfo.vulkan11Features);
ADD_TO_NEXT_CHAIN(deviceInfo.vulkan12Features);
ADD_TO_NEXT_CHAIN(deviceInfo.vulkan13Features);
if (rayTracingSupported)
{
ADD_TO_NEXT_CHAIN(deviceInfo.accelerationStructureFeatures);
ADD_TO_NEXT_CHAIN(deviceInfo.rayTracingPipelineFeatures);
}
if (meshShadersSupported) {
ADD_TO_NEXT_CHAIN(deviceInfo.meshShaderFeatures);
}
#undef ADD_TO_NEXT_CHAIN
physicalDevice.getFeatures2(&deviceFeatures);
deviceInfo.features = deviceFeatures.features;
// fill the renderer features
deviceInfo.availableFeatures.rayTracing = rayTracingSupported;
deviceInfo.availableFeatures.meshShaders = meshShadersSupported && deviceInfo.meshShaderFeatures.meshShader;
deviceInfo.surfaceCapabilities = physicalDevice.getSurfaceCapabilitiesKHR(dummySurface);
std::tie(deviceInfo.graphicsQueueFamily, deviceInfo.computeQueueFamily) = detectQueueFamilies(physicalDevice, dummySurface);
return deviceInfo;
}
std::vector<PhysicalDeviceInfo> getPhysicalDeviceInfos(Instance& instance)
{
const std::vector<vk::PhysicalDevice> devices = instance.getVkHandle().enumeratePhysicalDevices();
std::vector<PhysicalDeviceInfo> deviceInfos;
deviceInfos.reserve(devices.size());
const ObjectPtr<Window> dummyWindow = instance.createWindow({.flags={.hidden=true}});
const vk::SurfaceKHR dummySurface = dummyWindow->getVkSurface();
for (const vk::PhysicalDevice device : devices)
{
deviceInfos.push_back(getPhysicalDeviceInfo(device, dummySurface));
}
return deviceInfos;
}
} // namespace
Instance::Instance(InstanceCreationArgs args)
: super_t(nullptr),
mExtensions(std::move(args.extensions)),
mLayers(std::move(args.layers))
{
setMainThread();
const AddonInitArgs addonInitArgs = {
.instance = getPointer(),
.instanceCreationArgs = args
};
for (Addon* addon : getAddons())
{
addon->init(addonInitArgs);
}
VULKAN_HPP_DEFAULT_DISPATCHER.init();
if (!args.flags.noDefaultExtensions)
{
buildDefaultInstanceExtensionList(mExtensions);
}
if (!args.flags.noDefaultLayers)
{
buildDefaultInstanceLayerList(mLayers);
}
const std::vector<const char*> enabledLayerNames = checkInstanceLayers(mLayers);
const std::vector<const char*> enabledExtensionNames = checkInstanceExtensions(mExtensions);
const vk::InstanceCreateInfo createInfo =
{
.pApplicationInfo = &args.applicationInfo,
.enabledLayerCount = static_cast<std::uint32_t>(enabledLayerNames.size()),
.ppEnabledLayerNames = enabledLayerNames.data(),
.enabledExtensionCount = static_cast<std::uint32_t>(enabledExtensionNames.size()),
.ppEnabledExtensionNames = enabledExtensionNames.data()
};
mHandle = vk::createInstance(createInfo);
VULKAN_HPP_DEFAULT_DISPATCHER.init(mHandle);
// dump info about enabled extensions and layers
logMsg("Enabled Vulkan instance extensions: {}", enabledExtensionNames);
logMsg("Enabled Vulkan instance layers: {}", enabledLayerNames);
// also dump the actual instance version
const std::uint32_t apiVersion = vk::enumerateInstanceVersion();
logMsg("Vulkan instance version: {}.{}.{}.\n",
VK_API_VERSION_MAJOR(apiVersion),
VK_API_VERSION_MINOR(apiVersion),
VK_API_VERSION_PATCH(apiVersion)
);
if (isExtensionEnabled(VK_EXT_DEBUG_UTILS_EXTENSION_NAME))
{
// create debug messenger
const vk::DebugUtilsMessengerCreateInfoEXT dumCreateInfo =
{
.messageSeverity = vk::DebugUtilsMessageSeverityFlagBitsEXT::eWarning | vk::DebugUtilsMessageSeverityFlagBitsEXT::eError,
.messageType = vk::DebugUtilsMessageTypeFlagBitsEXT::eGeneral | vk::DebugUtilsMessageTypeFlagBitsEXT::ePerformance | vk::DebugUtilsMessageTypeFlagBitsEXT::eValidation,
.pfnUserCallback = &vulkanMessengerCallback
};
mDebugMessenger = mHandle.createDebugUtilsMessengerEXT(dumCreateInfo);
}
mPhysicalDevices = getPhysicalDeviceInfos(*this);
mWorkerTaskLoop.start(10);
}
Instance::~Instance() noexcept
{
for (Addon* addon : getAddons())
{
addon->cleanup();
}
while (!mDeleteQueue.empty())
{
runDeleters(true);
}
if (mDebugMessenger)
{
mHandle.destroyDebugUtilsMessengerEXT(mDebugMessenger);
}
mHandle.destroy();
}
bool Instance::isExtensionEnabled(const char* name) const noexcept
{
for (const ExtensionInfo& extInfo : mExtensions)
{
if (std::strcmp(extInfo.name, name) == 0)
{
return extInfo.enabled;
}
}
return false;
}
bool Instance::isLayerEnabled(const char* name) const noexcept
{
for (const LayerInfo& layerInfo : mLayers)
{
if (std::strcmp(layerInfo.name, name) == 0)
{
return layerInfo.enabled;
}
}
return false;
}
ObjectPtr<Window> Instance::createWindow(const WindowCreationArgs& args)
{
return createChild<Window>(args);
}
ObjectPtr<Device> Instance::createDevice(DeviceCreationArgs args)
{
return createChild<Device>(std::move(args));
}
void Instance::queueDelete(deleter_t deleter) noexcept
{
MIJIN_ASSERT(deleter, "Don't pass an empty function into queueDelete()!");
if (!deleter)
{
return;
}
runOnMainThread([deleter = std::move(deleter), this]() mutable
{
static const int DELETE_DELAY_FRAMES = 5; // TODO: ?
mDeleteQueue.push_back({
.deleter = std::move(deleter),
.remainingFrames = DELETE_DELAY_FRAMES
});
});
}
void Instance::tickDeleteQueue()
{
for (DeleteQueueEntry& entry : mDeleteQueue)
{
--entry.remainingFrames;
}
runDeleters();
}
void Instance::setMainThread()
{
mMainThread = std::this_thread::get_id();
}
void Instance::requestQuit() noexcept
{
if (mQuitRequested)
{
return;
}
mQuitRequested = true;
quitRequested.emit();
mMainTaskLoop.addTask([](Instance* instance) -> mijin::Task<>
{
for (int tick = 0; tick < 100; ++tick)
{
if (instance->getMainTaskLoop().getNumTasks() == 1)
{
// just us left, bye bye!
co_return;
}
co_await mijin::c_suspend();
}
const std::size_t activeTasks = instance->getMainTaskLoop().getActiveTasks() - 1; // substract yourself
if (activeTasks > 0)
{
logMsg("{} tasks did not finish when shutting down!", activeTasks - 1);
#if MIJIN_COROUTINE_ENABLE_DEBUG_INFO
const std::vector<mijin::TaskHandle> tasks = instance->getMainTaskLoop().getAllTasks();
for (auto [index, handle] : mijin::enumerate(tasks))
{
if (handle == mijin::getCurrentTask())
{
continue;
}
const mijin::Optional<mijin::Stacktrace> stack = handle.getCreationStack();
if (!stack.empty())
{
logMsg("Task {} creation stack:\n{}", index, *stack);
}
}
#endif // MIJIN_COROUTINE_ENABLE_DEBUG_INFO
MIJIN_TRAP();
}
else
{
const std::size_t totalTasks = instance->getMainTaskLoop().getNumTasks() - 1;
logMsg("{} tasks still waiting on program exit.", totalTasks);
}
instance->getMainTaskLoop().cancelAllTasks();
co_return;
}(this));
}
void Instance::runDeleters(bool runAll)
{
for (auto it = mDeleteQueue.begin(); it != mDeleteQueue.end();)
{
if (runAll || it->remainingFrames <= 0)
{
it->deleter();
it = mDeleteQueue.erase(it);
}
else
{
++it;
}
}
}
}

207
source/io/bitmap.cpp Normal file
View File

@ -0,0 +1,207 @@
#include "iwa/io/bitmap.hpp"
#include <cstring>
#include <filesystem>
#include <mijin/detect.hpp>
#if MIJIN_COMPILER == MIJIN_COMPILER_GCC || MIJIN_COMPILER == MIJIN_COMPILER_CLANG
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-but-set-variable"
#pragma GCC diagnostic ignored "-Wmissing-field-initializers"
#endif // MIJIN_COMPILER == MIJIN_COMPILER_GCC || MIJIN_COMPILER == MIJIN_COMPILER_CLANG
#define STB_IMAGE_IMPLEMENTATION
#define STB_IMAGE_WRITE_IMPLEMENTATION
#define STBI_FAILURE_USERMSG
#include <stb_image.h>
#include <stb_image_write.h>
#undef STB_IMAGE_IMPLEMENTATION
#undef STB_IMAGE_WRITE_IMPLEMENTATION
#if MIJIN_COMPILER == MIJIN_COMPILER_GCC || MIJIN_COMPILER == MIJIN_COMPILER_CLANG
#pragma GCC diagnostic pop
#endif // MIJIN_COMPILER == MIJIN_COMPILER_GCC || MIJIN_COMPILER == MIJIN_COMPILER_CLANG
#include "iwa/log.hpp"
namespace iwa
{
namespace
{
vk::Format formatByComponents(int components, ImageFormatType formatType)
{
switch (formatType)
{
case ImageFormatType::UNORM:
switch (components)
{
case 1:
return vk::Format::eR8Unorm;
case 2:
return vk::Format::eR8G8Unorm;
case 3:
return vk::Format::eR8G8B8Unorm;
case 4:
return vk::Format::eR8G8B8A8Unorm;
}
break;
case ImageFormatType::UINT:
switch (components)
{
case 1:
return vk::Format::eR8Uint;
case 2:
return vk::Format::eR8G8Uint;
case 3:
return vk::Format::eR8G8B8Uint;
case 4:
return vk::Format::eR8G8B8A8Uint;
}
break;
case ImageFormatType::SRGB:
switch (components)
{
case 1:
return vk::Format::eR8Srgb;
case 2:
return vk::Format::eR8G8Srgb;
case 3:
return vk::Format::eR8G8B8Srgb;
case 4:
return vk::Format::eR8G8B8A8Srgb;
}
break;
}
logAndDie("Could not detect image format.");
}
ObjectPtr<Bitmap> loadBitmapFromSTBI(stbi_uc* data, std::size_t width, std::size_t height, std::size_t /* components */,
const BitmapLoadOptions& options)
{
if (data == nullptr)
{
logAndDie("Could not load texture: {}", stbi_failure_reason());
}
const BitmapCreationArgs args =
{
.format = formatByComponents(/*components */ 4, options.formatType),
.size = {static_cast<unsigned>(width), static_cast<unsigned>(height)}
};
ObjectPtr <Bitmap> bitmap = Bitmap::create(args);
std::memcpy(bitmap->getData().data(), data, bitmap->getData().size());
stbi_image_free(data);
return bitmap;
}
int stbiReadCallback(void* user, char* data, int size)
{
mijin::Stream& stream = *static_cast<mijin::Stream*>(user);
std::size_t bytesRead = 0;
const mijin::StreamError error = stream.readRaw(data, size, /* partial = */ true, &bytesRead);
if (error != mijin::StreamError::SUCCESS)
{
// TODO: return what?
return 0;
}
return static_cast<int>(bytesRead);
}
void stbiSkipCallback(void* user, int bytes)
{
mijin::Stream& stream = *static_cast<mijin::Stream*>(user);
(void) stream.seek(bytes, mijin::SeekMode::RELATIVE);
}
int stbiEofCallback(void* user)
{
mijin::Stream& stream = *static_cast<mijin::Stream*>(user);
return stream.isAtEnd();
}
} // namespace
ObjectPtr<Bitmap> loadBitmap(const BitmapLoadOptions& options)
{
int width, height, components; // NOLINT(cppcoreguidelines-init-variables)
const stbi_io_callbacks callbacks{
.read = &stbiReadCallback,
.skip = &stbiSkipCallback,
.eof = &stbiEofCallback
};
stbi_uc* data = stbi_load_from_callbacks(
/* clbk = */ &callbacks,
/* user = */ options.stream,
/* x = */ &width,
/* y = */ &height,
/* channels_in_file = */ &components,
/* desired_channels = */ 4);
// NOLINTNEXTLINE(clang-analyzer-core.CallAndMessage) Yes, width isn't initialized if STB fails. No, that is not a problem
return loadBitmapFromSTBI(data, width, height, components, options);
}
void saveBitmap(const Bitmap& bitmap, const BitmapSaveOptions& options)
{
namespace fs = std::filesystem;
BitmapCodec codec = options.codec;
if (codec == BitmapCodec::NONE)
{
std::string extension = fs::path(options.fileName).extension().string();
std::ranges::transform(extension, extension.begin(), [](char chr)
{ return static_cast<char>(std::tolower(chr)); });
if (extension == ".png")
{
codec = BitmapCodec::PNG;
} else if (extension == ".jpg" || extension == ".jpeg")
{
codec = BitmapCodec::JPEG;
} else
{
logAndDie("Couldn't guess image file codec from file extension: {}.", extension);
}
}
int comp = 0;
switch (bitmap.getFormat())
{
case vk::Format::eR8G8B8A8Unorm:
case vk::Format::eR8G8B8A8Srgb:
comp = 4;
break;
case vk::Format::eR8G8B8Unorm:
case vk::Format::eR8G8B8Srgb:
comp = 3;
break;
default:
logAndDie("Cannot write this image format (yet).");
break;
}
switch (codec)
{
case BitmapCodec::NONE:
assert(0);
break;
case BitmapCodec::PNG:
stbi_write_png(
/* filename = */ options.fileName.c_str(),
/* w = */ static_cast<int>(bitmap.getSize().width),
/* h = */ static_cast<int>(bitmap.getSize().height),
/* comp = */ comp,
/* data = */ bitmap.getData().data(),
/* stride_in_bytes = */ comp * static_cast<int>(bitmap.getSize().width));
break;
case BitmapCodec::JPEG:
stbi_write_jpg(
/* filename = */ options.fileName.c_str(),
/* x = */ static_cast<int>(bitmap.getSize().width),
/* y = */ static_cast<int>(bitmap.getSize().height),
/* comp = */ comp,
/* data = */ bitmap.getData().data(),
/* quality = */ options.jpegQuality
);
break;
}
}
} // namespace iwa

165
source/io/font.cpp Normal file
View File

@ -0,0 +1,165 @@
#include "iwa/io/font.hpp"
#include <mijin/util/iterators.hpp>
#define STB_RECT_PACK_IMPLEMENTATION
#define STB_TRUETYPE_IMPLEMENTATION
#include <stb_rect_pack.h>
#include <stb_truetype.h>
#undef STB_RECT_PACK_IMPLEMENTATION
#undef STB_TRUETYPE_IMPLEMENTATION
namespace iwa
{
namespace
{
std::vector<int> getRenderCodePoints()
{
std::vector<int> result;
for (int chr = 32; chr <= 255; ++chr) {
result.push_back(chr);
}
return result;
}
}
ObjectPtr<Font> loadFont(const FontLoadOptions& options)
{
mijin::TypelessBuffer fontData;
if (const mijin::StreamError error = options.stream->readRest(fontData); error != mijin::StreamError::SUCCESS) {
throw std::runtime_error("IO error reading font from stream.");
}
// init font data
stbtt_fontinfo fontInfo;
const int fontOffset = stbtt_GetFontOffsetForIndex(static_cast<const unsigned char*>(fontData.data()), 0);
stbtt_InitFont(&fontInfo, static_cast<const unsigned char*>(fontData.data()), fontOffset);
// prepare data for packing
std::vector<int> codepoints = getRenderCodePoints();
std::vector<stbtt_packedchar> chardata;
std::vector<stbrp_rect> rects;
chardata.resize(codepoints.size());
rects.resize(codepoints.size());
stbtt_pack_range range = {
.font_size = options.size,
.first_unicode_codepoint_in_range = 0,
.array_of_unicode_codepoints = codepoints.data(),
.num_chars = static_cast<int>(codepoints.size()),
.chardata_for_range = chardata.data()
};
// TODO: this is just a guess, maybe there is a better way to detect this
int oversample = 1;
if (options.size < 30) {
oversample = 4;
}
else if (options.size < 35) {
oversample = 3;
}
else if (options.size < 40) {
oversample = 4;
}
int textureSize = 64;
mijin::TypelessBuffer pixels;
while (textureSize <= 4096)
{
pixels.resize(textureSize * textureSize * sizeof(unsigned char)); // NOLINT
// reset char data
for (stbtt_packedchar& packedChar : chardata) {
packedChar.x0 = packedChar.y0 = packedChar.x1 = packedChar.y1 = 0;
}
// try packing with this size
stbtt_pack_context packContext;
stbtt_PackBegin(
/* spc = */ &packContext,
/* pixels = */ static_cast<unsigned char*>(pixels.data()),
/* width = */ textureSize,
/* height = */ textureSize,
/* stride_in_bytes = */ 0,
/* padding = */ 1,
/* alloc_context = */ nullptr
);
stbtt_PackSetOversampling(&packContext, oversample, oversample); // TODO: make adjustable for better font quality
const int nRects = stbtt_PackFontRangesGatherRects(&packContext, &fontInfo, &range, 1, rects.data());
stbtt_PackFontRangesPackRects(&packContext, rects.data(), nRects);
bool allPacked = true;
for (int rectIdx = 0; rectIdx < nRects; ++rectIdx)
{
if (!rects[rectIdx].was_packed) {
allPacked = false;
break;
}
}
if (!allPacked)
{
textureSize *= 2;
continue;
}
stbtt_PackFontRangesRenderIntoRects(
/* spc = */ &packContext,
/* info = */ &fontInfo,
/* ranges = */ &range,
/* num_ranges = */ 1,
/* rects = */ rects.data()
);
stbtt_PackEnd(&packContext);
break;
}
// now create a bitmap from pixels
ObjectPtr<Bitmap> bitmap = Bitmap::create(BitmapCreationArgs{
.format = vk::Format::eR8Unorm,
.size = {
.width = static_cast<std::uint32_t>(textureSize),
.height = static_cast<std::uint32_t>(textureSize)
},
.initialData = std::move(pixels)
});
std::unordered_map<char32_t, GlyphInfo> glyphMap;
glyphMap.reserve(chardata.size());
for (auto [codepoint, chrdata] : mijin::zip(codepoints, chardata))
{
glyphMap.emplace(static_cast<char32_t>(codepoint), GlyphInfo{
.uvPos0 = {
static_cast<float>(chrdata.x0) / static_cast<float>(textureSize),
static_cast<float>(chrdata.y0) / static_cast<float>(textureSize)
},
.uvPos1 = {
static_cast<float>(chrdata.x1) / static_cast<float>(textureSize),
static_cast<float>(chrdata.y1) / static_cast<float>(textureSize)
},
.xOffsetBefore = chrdata.xoff,
.xOffsetAfter = chrdata.xoff2,
.yOffsetBefore = chrdata.yoff,
.yOffsetAfter = chrdata.yoff2,
.xAdvance = chrdata.xadvance
});
}
const float scale = options.size > 0 ? stbtt_ScaleForPixelHeight(&fontInfo, options.size) : stbtt_ScaleForMappingEmToPixels(&fontInfo, -options.size);
int ascent = 0, descent = 0, lineGap = 0;
stbtt_GetFontVMetrics(&fontInfo, &ascent, &descent, &lineGap);
return Font::create(FontCreationArgs{
.bitmap = std::move(bitmap),
.glyphMap = std::move(glyphMap),
.metrics = {
.ascent = scale * static_cast<float>(ascent),
.descent = scale * static_cast<float>(descent),
.lineGap = scale * static_cast<float>(lineGap),
.sizeFactor = 1.f / static_cast<float>(oversample)
}
});
}
} // namespace iwa

6
source/io/mesh.cpp Normal file
View File

@ -0,0 +1,6 @@
#include "iwa/io/mesh.hpp"
namespace iwa
{
} // namespace iwa

130
source/object.cpp Normal file
View File

@ -0,0 +1,130 @@
#include "iwa/object.hpp"
#if IWA_OBJECTPTR_TRACKING
#include <iostream>
#endif
#include <mutex>
#include <shared_mutex>
#include <unordered_map>
namespace iwa::impl
{
namespace
{
std::atomic<object_id_t> gNextObjectId;
std::unordered_map<object_id_t, BaseObject*> gAllObjects;
std::shared_mutex gAllObjectMutex;
std::unordered_map<object_id_t, object_destruction_handler_t> gDestructionHandlers;
std::shared_mutex gDestructionHandlersMutex;
#if IWA_OBJECTPTR_TRACKING
std::list<ObjectPtrAllocation> gObjectPtrAllocations;
std::mutex gObjectPtrAllocationsMutex;
struct ObjectPtrAllocationsChecker
{
~ObjectPtrAllocationsChecker()
{
const std::unique_lock lock(gObjectPtrAllocationsMutex);
if (!gObjectPtrAllocations.empty())
{
std::cerr << "ObjectPtrs pending deletion:\n";
for (const ObjectPtrAllocation& allocation : gObjectPtrAllocations)
{
std::cerr << typeid(*allocation.object).name() /* << "@" << allocation.stacktrace */ <<"\n";
#if IWA_OBJECTPTR_TRACKING > 1
if (allocation.stacktrace.isSuccess())
{
std::cerr << *allocation.stacktrace << "\n";
}
#endif
}
std::cerr.flush();
MIJIN_TRAP();
}
}
} gObjectPtrAllocationsChecker;
#endif // IWA_OBJECTPTR_TRACKING
}
#if IWA_OBJECTPTR_TRACKING
#if IWA_OBJECTPTR_TRACKING > 1
objectptr_allocation_handle_t trackObjectPtr(BaseObject* object, mijin::Result<mijin::Stacktrace>&& stacktrace) noexcept
{
const std::unique_lock lock(gObjectPtrAllocationsMutex);
gObjectPtrAllocations.emplace_back(object, std::move(stacktrace));
return std::prev(gObjectPtrAllocations.end());
}
#else
objectptr_allocation_handle_t trackObjectPtr(BaseObject* object) noexcept
{
const std::unique_lock lock(gObjectPtrAllocationsMutex);
gObjectPtrAllocations.emplace_back(object);
return std::prev(gObjectPtrAllocations.end());
}
#endif
void untrackObjectPtr(objectptr_allocation_handle_t handle) noexcept
{
const std::unique_lock lock(gObjectPtrAllocationsMutex);
gObjectPtrAllocations.erase(*handle); // NOLINT(bugprone-unchecked-optional-access)
}
#endif // IWA_OBJECTPTR_TRACKING
object_id_t nextObjectId() noexcept
{
return ++gNextObjectId;
}
void registerObject(BaseObject* object) noexcept
{
const std::unique_lock lock(gAllObjectMutex);
MIJIN_ASSERT(gAllObjects.find(object->getId()) == gAllObjects.end(), "Duplicate object id!");
gAllObjects.emplace(object->getId(), object);
}
void unregisterObject(BaseObject* object) noexcept
{
{
const std::unique_lock lock(gAllObjectMutex);
gAllObjects.erase(object->getId());
}
std::unordered_map<object_id_t, object_destruction_handler_t>::iterator itHandler;
{
const std::shared_lock readLock(gDestructionHandlersMutex);
itHandler = gDestructionHandlers.find(object->getId());
}
if (itHandler != gDestructionHandlers.end())
{
object_destruction_handler_t handler;
{
const std::unique_lock writeLock(gDestructionHandlersMutex);
handler = std::move(itHandler->second);
gDestructionHandlers.erase(itHandler);
}
handler();
}
}
ObjectPtr<BaseObject> getRegisteredObject(object_id_t objectId) noexcept
{
const std::shared_lock lock(gAllObjectMutex);
auto it = gAllObjects.find(objectId);
if (it != gAllObjects.end()) {
return it->second->getPointer();
}
return {};
}
} // namespace iwa::impl
namespace iwa
{
void registerObjectDestructionHandler(const BaseObject& object, object_destruction_handler_t handler) noexcept
{
const std::unique_lock writeLock(impl::gDestructionHandlersMutex);
impl::gDestructionHandlers.emplace(object.getId(), std::move(handler));
} // namespace iwa
}

136
source/pipeline.cpp Normal file
View File

@ -0,0 +1,136 @@
#include "iwa/pipeline.hpp"
#include <mijin/io/stream.hpp>
#include <yaml-cpp/yaml.h>
#include "iwa/device.hpp"
#include "iwa/util/glsl_compiler.hpp"
#include "iwa/util/next_chain.hpp"
namespace iwa
{
PipelineLayout::PipelineLayout(ObjectPtr<Device> owner, const PipelineLayoutCreationArgs& args) : super_t(std::move(owner))
{
std::vector<vk::DescriptorSetLayout> setLayoutHandles;
setLayoutHandles.reserve(args.setLayouts.size());
for (const ObjectPtr<DescriptorSetLayout>& setLayout : args.setLayouts)
{
setLayoutHandles.push_back(setLayout->getVkHandle());
}
mHandle = getOwner()->getVkHandle().createPipelineLayout(vk::PipelineLayoutCreateInfo{
.flags = args.flags,
.setLayoutCount = static_cast<std::uint32_t>(setLayoutHandles.size()),
.pSetLayouts = setLayoutHandles.data(),
.pushConstantRangeCount = static_cast<std::uint32_t>(args.pushConstantRanges.size()),
.pPushConstantRanges = args.pushConstantRanges.data()
});
}
PipelineLayout::~PipelineLayout() noexcept
{
IWA_DELETE_DEVICE_OBJECT(getOwner(), mHandle, destroyPipelineLayout)
}
Pipeline::Pipeline(ObjectPtr<Device> owner) noexcept : super_t(std::move(owner))
{
}
Pipeline::~Pipeline() noexcept
{
IWA_DELETE_DEVICE_OBJECT(getOwner(), mHandle, destroyPipeline)
}
GraphicsPipeline::GraphicsPipeline(ObjectPtr<Device> owner, const GraphicsPipelineCreationArgs& args) noexcept
: super_t(std::move(owner))
{
std::vector<vk::PipelineShaderStageCreateInfo> vkStages;
for (const PipelineStage& stage : args.stages)
{
vk::PipelineShaderStageCreateInfo& vkStage = vkStages.emplace_back();
vkStage.stage = stage.stage;
vkStage.module = *stage.shader;
vkStage.pName = stage.name.c_str();
}
const vk::PipelineVertexInputStateCreateInfo vkVertexInput =
{
.vertexBindingDescriptionCount = static_cast<std::uint32_t>(args.vertexInput.bindings.size()),
.pVertexBindingDescriptions = args.vertexInput.bindings.data(),
.vertexAttributeDescriptionCount = static_cast<std::uint32_t>(args.vertexInput.attributes.size()),
.pVertexAttributeDescriptions = args.vertexInput.attributes.data()
};
const vk::PipelineColorBlendStateCreateInfo vkColorBlend =
{
.logicOpEnable = args.colorBlend.logicOp.has_value(),
.logicOp = args.colorBlend.logicOp.has_value() ? *args.colorBlend.logicOp : vk::LogicOp(),
.attachmentCount = static_cast<std::uint32_t>(args.colorBlend.attachements.size()),
.pAttachments = args.colorBlend.attachements.data()
};
const vk::PipelineDynamicStateCreateInfo vkDynamic =
{
.dynamicStateCount = static_cast<std::uint32_t>(args.dynamicState.size()),
.pDynamicStates = args.dynamicState.data()
};
NextChain nextChain;
if (args.renderingInfo.has_value())
{
const GraphicsPipelineRenderingInfo& rinfo = *args.renderingInfo;
nextChain.append(vk::PipelineRenderingCreateInfo{
.viewMask = rinfo.viewMask,
.colorAttachmentCount = static_cast<std::uint32_t>(rinfo.colorAttachmentFormats.size()),
.pColorAttachmentFormats = rinfo.colorAttachmentFormats.data(),
.depthAttachmentFormat = rinfo.depthFormat,
.stencilAttachmentFormat = rinfo.stencilFormat
});
}
const vk::GraphicsPipelineCreateInfo vkCreateInfo =
{
.pNext = nextChain.finalize(),
.stageCount = static_cast<std::uint32_t>(vkStages.size()),
.pStages = vkStages.data(),
.pVertexInputState = &vkVertexInput,
.pInputAssemblyState = &args.inputAssembly,
.pViewportState = &args.viewport,
.pRasterizationState = &args.rasterization,
.pMultisampleState = &args.multisample,
.pDepthStencilState = &args.depthStencil,
.pColorBlendState = &vkColorBlend,
.pDynamicState = &vkDynamic,
.layout = *args.layout,
.renderPass = args.renderPass ? *args.renderPass : vk::RenderPass(),
.subpass = args.subpass
};
const auto result = getOwner()->getVkHandle().createGraphicsPipeline(VK_NULL_HANDLE, vkCreateInfo);
MIJIN_ASSERT(result.result == vk::Result::eSuccess, "Graphics pipeline creation failed.");
mHandle = result.value;
}
ComputePipeline::ComputePipeline(ObjectPtr<Device> owner, const ComputePipelineCreationArgs& args) noexcept
: super_t(std::move(owner))
{
const vk::ComputePipelineCreateInfo vkCreateInfo{
.stage = {
.stage = vk::ShaderStageFlagBits::eCompute,
.module = *args.stage.shader,
.pName = args.stage.name.c_str()
},
.layout = *args.layout
};
const vk::ResultValue<vk::Pipeline> result = getOwner()->getVkHandle().createComputePipeline(VK_NULL_HANDLE, vkCreateInfo);
MIJIN_ASSERT(result.result == vk::Result::eSuccess, "Compute pipeline creation failed.");
mHandle = result.value;
}
RayTracingPipeline::RayTracingPipeline(ObjectPtr<Device> owner, const RayTracingPipelineCreationArgs& args) noexcept
: super_t(std::move(owner))
{
(void) args;
}
} // namespace iwa

70
source/render_pass.cpp Normal file
View File

@ -0,0 +1,70 @@
#include "iwa/render_pass.hpp"
#include "iwa/device.hpp"
#include "iwa/image.hpp"
namespace iwa
{
RenderPass::RenderPass(ObjectPtr<Device> owner, const RenderPassCreationArgs& args) : super_t(std::move(owner))
{
std::vector<vk::SubpassDescription> vkSubpasses;
vkSubpasses.reserve(args.subpasses.size());
for (const SubpassDescription& subpass : args.subpasses)
{
MIJIN_ASSERT(subpass.resolveAttachments.empty() || subpass.resolveAttachments.size() == subpass.colorAttachments.size(),
"Number of resolve attachments must be either 0 or the same as color attachments.");
vkSubpasses.push_back({
.flags = subpass.flags,
.pipelineBindPoint = subpass.pipelineBindPoint,
.inputAttachmentCount = static_cast<std::uint32_t>(subpass.inputAttachments.size()),
.pInputAttachments = subpass.inputAttachments.data(),
.colorAttachmentCount = static_cast<std::uint32_t>(subpass.colorAttachments.size()),
.pColorAttachments = subpass.colorAttachments.data(),
.pResolveAttachments = subpass.resolveAttachments.data(),
.pDepthStencilAttachment = subpass.depthStencilAttachment.has_value() ? &subpass.depthStencilAttachment.value() : nullptr,
.preserveAttachmentCount = static_cast<std::uint32_t>(subpass.preserveAttachments.size()),
.pPreserveAttachments = subpass.preserveAttachments.data()
});
}
mHandle = getOwner()->getVkHandle().createRenderPass(vk::RenderPassCreateInfo{
.flags = args.flags,
.attachmentCount = static_cast<std::uint32_t>(args.attachments.size()),
.pAttachments = args.attachments.data(),
.subpassCount = static_cast<std::uint32_t>(vkSubpasses.size()),
.pSubpasses = vkSubpasses.data(),
.dependencyCount = static_cast<std::uint32_t>(args.dependencies.size()),
.pDependencies = args.dependencies.data()
});
}
RenderPass::~RenderPass() noexcept
{
IWA_DELETE_DEVICE_OBJECT(getOwner(), mHandle, destroyRenderPass);
}
Framebuffer::Framebuffer(ObjectPtr<Device> owner, const FramebufferCreationArgs& args)
: super_t(std::move(owner)), mImageViews(args.attachments)
{
std::vector<vk::ImageView> vkImageViews;
vkImageViews.reserve(mImageViews.size());
for (const ObjectPtr<ImageView>& imageView : mImageViews)
{
vkImageViews.push_back(*imageView);
}
mHandle = getOwner()->getVkHandle().createFramebuffer(vk::FramebufferCreateInfo{
.flags = args.flags,
.renderPass = *args.renderPass,
.attachmentCount = static_cast<std::uint32_t>(vkImageViews.size()),
.pAttachments = vkImageViews.data(),
.width = args.width,
.height = args.height,
.layers = args.layers
});
}
Framebuffer::~Framebuffer() noexcept
{
IWA_DELETE_DEVICE_OBJECT(getOwner(), mHandle, destroyFramebuffer);
}
} // namespace iwa

305
source/resource/bitmap.cpp Normal file
View File

@ -0,0 +1,305 @@
#include "iwa/resource/bitmap.hpp"
#include <cmath>
#include "iwa/log.hpp"
#include "iwa/util/vkutil.hpp"
namespace iwa
{
enum class InterpolationMethod
{
NEAREST
};
class BitmapViewBase
{
protected:
Bitmap* mBitmap;
protected:
explicit inline BitmapViewBase(Bitmap* bitmap_) : mBitmap(bitmap_) {}
public:
BitmapViewBase(const BitmapViewBase&) = delete;
BitmapViewBase(BitmapViewBase&&) = default;
BitmapViewBase& operator=(const BitmapViewBase&) = delete;
BitmapViewBase& operator=(BitmapViewBase&&) = default;
virtual ~BitmapViewBase() = default; // just to make clang happy, should be optimised out again
[[nodiscard]] virtual glm::vec4 getPixel(unsigned x, unsigned y) const = 0;
[[nodiscard]] virtual glm::vec4 getPixelRelative(float x, float y, InterpolationMethod interpolation = InterpolationMethod::NEAREST) const = 0;
[[nodiscard]] virtual std::vector<glm::vec4> getPixels(unsigned x, unsigned y, unsigned width, unsigned height) const = 0;
virtual void fill(const glm::vec4& color) = 0;
virtual void copyChannels(const Bitmap& other, const std::vector<ChannelMapping>& mappings) = 0;
virtual void multiply(const glm::vec4& color) = 0;
};
template<typename TData, int numComponents, bool normalized = std::is_floating_point_v<TData>>
class BitmapView : public BitmapViewBase
{
private:
static constexpr std::size_t pixelSize = sizeof(TData) * numComponents;
public:
explicit inline BitmapView(Bitmap* bitmap_) : BitmapViewBase(bitmap_) {}
TData* getPixelRaw(unsigned x, unsigned y)
{
const std::size_t offset = x + y * mBitmap->getSize().width;
return reinterpret_cast<TData*>(&mBitmap->getData()[offset * pixelSize]);
}
[[nodiscard]] const TData* getPixelRaw(unsigned x, unsigned y) const
{
const std::size_t offset = x + y * mBitmap->getSize().width;
return reinterpret_cast<const TData*>(&mBitmap->getData()[offset * pixelSize]);
}
[[nodiscard]] TData convertChannel(float channel) const
{
if constexpr (normalized) {
return static_cast<TData>(channel);
} else {
return static_cast<TData>(255 * channel);
}
}
[[nodiscard]] float convertChannelBack(TData channel) const
{
if constexpr (normalized) {
return static_cast<float>(channel);
}
else {
return channel / 255.f;
}
}
[[nodiscard]] TData clampChannel(TData value)
{
if constexpr (normalized) {
return std::clamp(value, TData(0), TData(1));
}
else {
return std::clamp(value, TData(0), TData(255));
}
}
void convertColor(const glm::vec4& color, std::array<TData, numComponents>& outConverted) const
{
for (int comp = 0; comp < numComponents; ++comp)
{
outConverted[comp] = convertChannel(color[comp]);
}
}
[[nodiscard]] glm::vec4 convertColorBack(const TData* raw) const
{
glm::vec4 result;
for (int comp = 0; comp < numComponents; ++comp)
{
result[comp] = convertChannelBack(raw[comp]);
}
return result;
}
// overrides
[[nodiscard]] glm::vec4 getPixel(unsigned x, unsigned y) const override
{
const TData* pixelRaw = getPixelRaw(x, y);
return convertColorBack(pixelRaw);
}
[[nodiscard]] glm::vec4 getPixelRelative(float x, float y, InterpolationMethod interpolation = InterpolationMethod::NEAREST) const override
{
const vk::Extent2D size = mBitmap->getSize();
(void) interpolation; // TODO
const unsigned myX = std::clamp(static_cast<unsigned>(std::round(x * static_cast<float>(size.width))), 0u, size.width - 1);
const unsigned myY = std::clamp(static_cast<unsigned>(std::round(y * static_cast<float>(size.height))), 0u, size.height - 1);
return getPixel(myX, myY);
}
[[nodiscard]] std::vector<glm::vec4> getPixels(unsigned x, unsigned y, unsigned width, unsigned height) const override
{
std::vector<glm::vec4> pixels;
pixels.resize(static_cast<std::size_t>(width) * height);
for (unsigned ypos = y; ypos < y + height; ++ypos)
{
for (unsigned xpos = x; xpos < x + width; ++xpos)
{
pixels[xpos + height * ypos] = getPixel(xpos, ypos);
}
}
return pixels;
}
void fill(const glm::vec4& color) override
{
std::array<TData, numComponents> convertedColor; // NOLINT(cppcoreguidelines-pro-type-member-init)
convertColor(color, convertedColor);
for (unsigned y = 0; y < mBitmap->getSize().height; ++y)
{
for (unsigned x = 0; x < mBitmap->getSize().width; ++x)
{
TData* pixel = getPixelRaw(x, y);
for (int comp = 0; comp < numComponents; ++comp)
{
pixel[comp] = convertedColor[comp];
}
}
}
};
// TODO: maybe introduce a "dual-mView" class that includes both types at once?
void copyChannels(const Bitmap& other, const std::vector<ChannelMapping>& mappings) override
{
const vk::Extent2D size = mBitmap->getSize();
const std::vector<glm::vec4> otherPixels = other.getAllPixels();
if(otherPixels.size() == static_cast<std::size_t>(size.width) * size.height)
{
for (unsigned y = 0; y < size.height; ++y)
{
for (unsigned x = 0; x < size.width; ++x)
{
const glm::vec4 color = otherPixels[x + y * size.height];
for (const ChannelMapping& mapping : mappings)
{
assert(static_cast<int>(mapping.to) < numComponents);
TData* myPixel = getPixelRaw(x, y);
myPixel[static_cast<int>(mapping.to)] = convertChannel(color[static_cast<int>(mapping.from)]);
}
}
}
}
else
{
const vk::Extent2D otherSize = other.getSize();
const float factorX = static_cast<float>(otherSize.width) / static_cast<float>(size.width);
const float factorY = static_cast<float>(otherSize.height) / static_cast<float>(size.height);
for (unsigned y = 0; y < size.height; ++y)
{
for (unsigned x = 0; x < size.width; ++x)
{
const unsigned otherX = std::clamp(static_cast<unsigned>(std::round(factorX * static_cast<float>(x))), 0u, otherSize.width - 1);
const unsigned otherY = std::clamp(static_cast<unsigned>(std::round(factorY * static_cast<float>(x))), 0u, otherSize.height - 1);
const glm::vec4 color = otherPixels[otherX + otherY * size.height];
for (const ChannelMapping& mapping : mappings)
{
assert(static_cast<int>(mapping.to) < numComponents);
TData* myPixel = getPixelRaw(x, y);
myPixel[static_cast<int>(mapping.to)] = convertChannel(color[static_cast<int>(mapping.from)]);
}
}
}
}
}
void multiply(const glm::vec4& color) override
{
for (unsigned y = 0; y < mBitmap->getSize().height; ++y)
{
for (unsigned x = 0; x < mBitmap->getSize().width; ++x)
{
TData* pixel = getPixelRaw(x, y);
for (int comp = 0; comp < numComponents; ++comp)
{
pixel[comp] = clampChannel(static_cast<TData>(pixel[comp] * color[comp]));
}
}
}
}
};
void Bitmap::createView()
{
switch(mFormat)
{
case vk::Format::eR8Uint:
case vk::Format::eR8Unorm:
case vk::Format::eR8Srgb:
mView = std::make_unique<BitmapView<std::uint8_t, 1>>(this);
break;
case vk::Format::eR8G8Uint:
case vk::Format::eR8G8Unorm:
case vk::Format::eR8G8Srgb:
mView = std::make_unique<BitmapView<std::uint8_t, 2>>(this);
break;
case vk::Format::eR8G8B8Uint:
case vk::Format::eR8G8B8Unorm:
case vk::Format::eR8G8B8Srgb:
mView = std::make_unique<BitmapView<std::uint8_t, 3>>(this);
break;
case vk::Format::eR8G8B8A8Uint:
case vk::Format::eR8G8B8A8Unorm:
case vk::Format::eR8G8B8A8Srgb:
mView = std::make_unique<BitmapView<std::uint8_t, 4>>(this);
break;
case vk::Format::eR32Sfloat:
mView = std::make_unique<BitmapView<float, 1>>(this);
break;
case vk::Format::eR32G32Sfloat:
mView = std::make_unique<BitmapView<float, 2>>(this);
break;
case vk::Format::eR32G32B32Sfloat:
mView = std::make_unique<BitmapView<float, 3>>(this);
break;
case vk::Format::eR32G32B32A32Sfloat:
mView = std::make_unique<BitmapView<float, 4>>(this);
break;
default:
logAndDie("Missing format for bitmap mView!");
}
}
//
// public functions
//
Bitmap::Bitmap(BitmapCreationArgs args, ObjectPtr<BaseObject> owner)
: super_t(std::move(owner)), mFormat(args.format), mSize(args.size)
{
const std::size_t dataSize = static_cast<std::size_t>(args.size.width) * args.size.height * vkFormatSize(args.format);
if (args.initialData.empty())
{
mData.resize(dataSize);
}
else
{
MIJIN_ASSERT(args.initialData->byteSize() == dataSize, "Bitmap initial data size is invalid.");
mData = std::move(*args.initialData);
}
createView();
}
Bitmap::~Bitmap() // NOLINT(modernize-use-equals-default)
{
// needs to be implemented for the mView to be deleted correctly
}
glm::vec4 Bitmap::getPixel(unsigned int x, unsigned int y) const
{
return mView->getPixel(x, y);
}
std::vector<glm::vec4> Bitmap::getPixels(unsigned x, unsigned y, unsigned width, unsigned height) const
{
return mView->getPixels(x, y, width, height);
}
void Bitmap::fill(const glm::vec4& color)
{
mView->fill(color);
}
void Bitmap::copyChannels(const Bitmap& other, const std::vector<ChannelMapping>& mappings)
{
mView->copyChannels(other, mappings);
}
void Bitmap::multiply(const glm::vec4& color)
{
mView->multiply(color);
}
} // namespace iwa

11
source/resource/font.cpp Normal file
View File

@ -0,0 +1,11 @@
#include "iwa/resource/font.hpp"
namespace iwa
{
Font::Font(FontCreationArgs args)
: mBitmap(std::move(args.bitmap)), mGlyphMap(std::move(args.glyphMap)), mMetrics(args.metrics)
{
}
} // namespace iwa

19
source/semaphore.cpp Normal file
View File

@ -0,0 +1,19 @@
#include "iwa/semaphore.hpp"
#include "iwa/device.hpp"
namespace iwa
{
Semaphore::Semaphore(ObjectPtr<Device> owner, const SemaphoreCreationArgs& args) : super_t(std::move(owner))
{
mHandle = getOwner()->getVkHandle().createSemaphore(vk::SemaphoreCreateInfo{
.flags = args.flags
});
}
Semaphore::~Semaphore() noexcept
{
IWA_DELETE_DEVICE_OBJECT(getOwner(), mHandle, destroySemaphore)
}
} // namespace iwa

20
source/shader_module.cpp Normal file
View File

@ -0,0 +1,20 @@
#include "iwa/shader_module.hpp"
#include "iwa/device.hpp"
namespace iwa
{
ShaderModule::ShaderModule(ObjectPtr<Device> owner, const ShaderModuleCreationArgs& args) : super_t(std::move(owner))
{
mHandle = getOwner()->getVkHandle().createShaderModule(vk::ShaderModuleCreateInfo{
.codeSize = static_cast<std::uint32_t>(args.code.size_bytes()),
.pCode = args.code.data()
});
}
ShaderModule::~ShaderModule() noexcept
{
IWA_DELETE_DEVICE_OBJECT(getOwner(), mHandle, destroyShaderModule)
}
} // namespace iwa

316
source/swapchain.cpp Normal file
View File

@ -0,0 +1,316 @@
#include "iwa/swapchain.hpp"
#include <mijin/debug/assert.hpp>
#include <SDL_vulkan.h>
#include "iwa/device.hpp"
#include "iwa/log.hpp"
namespace iwa
{
namespace
{
std::pair<vk::PresentModeKHR, std::uint32_t> detectPresentMode(const Device& device, const Window& window, const vk::SurfaceCapabilitiesKHR& surfaceCapabilities)
{
std::vector<vk::PresentModeKHR> presentModes = device.getVkPhysicalDevice().getSurfacePresentModesKHR(window.getVkSurface());
vk::PresentModeKHR presentMode = vk::PresentModeKHR::eImmediate;
std::uint32_t imageCount = 2;
if (std::ranges::find(presentModes, vk::PresentModeKHR::eMailbox) != presentModes.end())
{
presentMode = vk::PresentModeKHR::eMailbox;
imageCount = 3;
}
imageCount = std::max(imageCount, surfaceCapabilities.minImageCount);
if (surfaceCapabilities.maxImageCount > 0)
{
imageCount = std::min(imageCount, surfaceCapabilities.maxImageCount);
}
return std::make_pair(presentMode, imageCount);
}
vk::SurfaceFormatKHR detectImageFormat(const Device& device, const Window& window, const vk::ImageUsageFlags imageUsage)
{
std::vector<vk::SurfaceFormatKHR> surfaceFormats = device.getVkPhysicalDevice().getSurfaceFormatsKHR(window.getVkSurface());
int resultIdx = -1;
for (std::size_t idx = 0; idx < surfaceFormats.size(); ++idx)
{
const vk::SurfaceFormatKHR surfaceFormat = surfaceFormats[idx];
vk::ImageFormatProperties formatProperties;
try
{
formatProperties = device.getVkPhysicalDevice().getImageFormatProperties(
/* format = */ surfaceFormat.format,
/* type = */ vk::ImageType::e2D,
/* tiling = */ vk::ImageTiling::eOptimal,
/* usage = */ imageUsage
);
}
catch(vk::FormatNotSupportedError&)
{
continue; // not supported
}
if (resultIdx < 0)
{
resultIdx = static_cast<int>(idx);
}
else if (surfaceFormat.format == vk::Format::eB8G8R8A8Srgb
&& surfaceFormat.colorSpace == vk::ColorSpaceKHR::eSrgbNonlinear)
{
resultIdx = static_cast<int>(idx);
break;
}
}
if (resultIdx < 0)
{
logAndDie("Error creating Vulkan swapchain, no compatible image format found.");
}
return surfaceFormats[resultIdx];
}
vk::Extent2D detectImageExtent(const Window& window, const vk::SurfaceCapabilitiesKHR& surfaceCapabilities)
{
// default to current extent (chosen by the driver)
vk::Extent2D extent = surfaceCapabilities.currentExtent;
// alternatively ask SDL about it
if (extent.width == 0xFFFFFFFF && extent.height == 0xFFFFFFFF)
{
int width, height; // NOLINT(readability-isolate-declaration, cppcoreguidelines-init-variables)
SDL_Vulkan_GetDrawableSize(window.getSDLWindow(), &width, &height);
extent.width = std::clamp(static_cast<std::uint32_t>(width), surfaceCapabilities.minImageExtent.width, surfaceCapabilities.maxImageExtent.width);
extent.height = std::clamp(static_cast<std::uint32_t>(height), surfaceCapabilities.minImageExtent.height, surfaceCapabilities.maxImageExtent.height);
}
return extent;
}
} // namespace
Swapchain::Swapchain(ObjectPtr<Device> owner, SwapchainCreationArgs args)
: super_t(std::move(owner)), mWindow(std::move(args.window)), mImageUsage(args.imageUsage)
{
MIJIN_ASSERT(mWindow, "Invalid SwapchainCreationArgs: window cannot be NULL.");
mImageAvailableSemaphores.resize(args.parallelFrames);
recreate();
}
Swapchain::~Swapchain() noexcept
{
IWA_DELETE_DEVICE_OBJECT(getOwner(), mHandle, destroySwapchainKHR)
}
mijin::Task<> Swapchain::c_present(const PresentArgs& args)
{
while (!mWindow->isVisible()) {
co_await mijin::c_suspend();
}
const vk::PresentInfoKHR presentInfo =
{
.waitSemaphoreCount = static_cast<std::uint32_t>(args.waitSemaphores.size()),
.pWaitSemaphores = args.waitSemaphores.data(),
.swapchainCount = 1,
.pSwapchains = &mHandle,
.pImageIndices = &mCurrentImageIdx
};
vk::Result result; // NOLINT(cppcoreguidelines-init-variables)
try {
result = args.queue.presentKHR(presentInfo);
} catch(vk::OutOfDateKHRError&) {
result = vk::Result::eErrorOutOfDateKHR;
}
mCurrentImageIdx = INVALID_IMAGE_INDEX;
// next image, please
mCurrentFrameIdx = (mCurrentFrameIdx + 1) % static_cast<int>(getNumParallelFrames());
if (result == vk::Result::eSuccess) {
co_await c_acquireImage();
}
else {
recreate();
}
}
void Swapchain::recreate()
{
const vk::SurfaceCapabilitiesKHR surfaceCapabilities = getOwner()->getVkPhysicalDevice().getSurfaceCapabilitiesKHR(mWindow->getVkSurface());
const bool firstCreate = !mHandle;
// doing this here seems to fix the device loss during recreation
getOwner()->getVkHandle().waitIdle();
vk::SwapchainCreateInfoKHR createInfo;
// surface
createInfo.surface = mWindow->getVkSurface();
// old swapchain
createInfo.oldSwapchain = mHandle;
// some defaults
createInfo.imageSharingMode = vk::SharingMode::eExclusive;
createInfo.clipped = VK_TRUE;
createInfo.preTransform = surfaceCapabilities.currentTransform;
// queue families
createInfo.queueFamilyIndexCount = 1;
createInfo.pQueueFamilyIndices = &getOwner()->getDeviceInfo().graphicsQueueFamily;
// present mode and image count
std::tie(createInfo.presentMode, createInfo.minImageCount) = detectPresentMode(*getOwner(), *mWindow, surfaceCapabilities);
// image usage
createInfo.imageUsage = mImageUsage;
MIJIN_ASSERT((surfaceCapabilities.supportedUsageFlags & createInfo.imageUsage) == createInfo.imageUsage, "Invalid image usage flags when creating surface.");
// image format and color space
const vk::SurfaceFormatKHR surfaceFormat = detectImageFormat(*getOwner(), *mWindow, createInfo.imageUsage);
createInfo.imageFormat = surfaceFormat.format;
createInfo.imageColorSpace = surfaceFormat.colorSpace;
// image extent
createInfo.imageExtent = detectImageExtent(*mWindow, surfaceCapabilities);
createInfo.imageArrayLayers = 1;
// composite alpha
createInfo.compositeAlpha = vk::CompositeAlphaFlagBitsKHR::eOpaque;
MIJIN_ASSERT((surfaceCapabilities.supportedCompositeAlpha & createInfo.compositeAlpha) == createInfo.compositeAlpha, "Invalid composite alpha when creating surface.");
mHandle = getOwner()->getVkHandle().createSwapchainKHR(createInfo);
mFormat = createInfo.imageFormat;
mExtent = createInfo.imageExtent;
//for (ObjectPtr<Image>& image : mImages)
//{
// image->unwrap();
// unregisterImage(&image);
//}
mImages.clear();
// retrieve swapchain images
const std::vector<vk::Image> imageHandles = getOwner()->getVkHandle().getSwapchainImagesKHR(mHandle);
mImages.reserve(imageHandles.size());
// and create image views
// destroyImageViews();
// imageViews.reserve(imageHandles.size());
for (const vk::Image image : imageHandles)
{
mImages.emplace_back(getOwner()->createChild<Image>(ImageWrapArgs{
.handle = image,
.type = vk::ImageType::e2D,
.format = createInfo.imageFormat,
.usage = createInfo.imageUsage,
.size = {
createInfo.imageExtent.width,
createInfo.imageExtent.height,
1
}
}));
// const ImageViewCreateInfo ivCreateInfo =
// {
// .nativeImage = image,
// .viewType = vk::ImageViewType::e2D,
// .format = createInfo.imageFormat,
// .components =
// {
// .r = vk::ComponentSwizzle::eIdentity,
// .g = vk::ComponentSwizzle::eIdentity,
// .b = vk::ComponentSwizzle::eIdentity,
// .a = vk::ComponentSwizzle::eIdentity
// },
// .subresourceRange =
// {
// .aspectMask = vk::ImageAspectFlagBits::eColor,
// .baseMipLevel = 0,
// .levelCount = 1,
// .baseArrayLayer = 0,
// .layerCount = 1
// }
// };
// imageViews.emplace_back().create(ivCreateInfo);
// images.back().setDebugName(fmt::format("Swapchain Image #{}", images.size() - 1));
// registerImage(&images.back());
}
// create "image available" semaphores
for (ObjectPtr<Semaphore>& semaphore : mImageAvailableSemaphores)
{
semaphore = getOwner()->createChild<Semaphore>();
}
if (!firstCreate)
{
getOwner()->getVkHandle().destroySwapchainKHR(createInfo.oldSwapchain);
}
// update size
// imageSize.width = createInfo.imageExtent.width;
// imageSize.height = createInfo.imageExtent.height;
// imageFormat = createInfo.imageFormat;
// already request the first frame
acquireImage();
recreated.emit();
}
void Swapchain::acquireImage()
{
MIJIN_ASSERT(mCurrentImageIdx == INVALID_IMAGE_INDEX, "Attempting to acquire a new image before presenting.");
try
{
const vk::ResultValue<std::uint32_t> result = getOwner()->getVkHandle().acquireNextImageKHR(mHandle, std::numeric_limits<std::uint64_t>::max(), *mImageAvailableSemaphores[mCurrentFrameIdx]);
if (result.result != vk::Result::eSuccess)
{
assert(result.result == vk::Result::eSuboptimalKHR);
recreate();
return;
}
mCurrentImageIdx = result.value;
mImages[mCurrentImageIdx]->resetUsage(ResetLayout::YES);
}
catch(vk::OutOfDateKHRError&) {
recreate();
}
}
mijin::Task<> Swapchain::c_acquireImage()
{
try
{
vk::ResultValue<std::uint32_t> result = vk::ResultValue<std::uint32_t>(vk::Result::eTimeout, 0);
while(true)
{
result = getOwner()->getVkHandle().acquireNextImageKHR(mHandle, 0, *mImageAvailableSemaphores[mCurrentFrameIdx]);
if (result.result != vk::Result::eTimeout && result.result != vk::Result::eNotReady) {
break;
}
co_await mijin::c_suspend();
}
if (result.result != vk::Result::eSuccess)
{
MIJIN_ASSERT(result.result == vk::Result::eSuboptimalKHR, "Error acquiring swapchain image.");
recreate();
co_return;
}
mCurrentImageIdx = result.value;
mImages[mCurrentImageIdx]->resetUsage(ResetLayout::YES);
}
catch(vk::OutOfDateKHRError&) {
recreate();
}
}
}

47
source/texture.cpp Normal file
View File

@ -0,0 +1,47 @@
#include "iwa/texture.hpp"
#include "iwa/device.hpp"
#include "iwa/resource/bitmap.hpp"
namespace iwa
{
Texture::Texture(TextureCreationArgs args) : super_t(nullptr), mImage(std::move(args.image)),
mImageView(mImage->createImageView(args.imageViewArgs)), mSampler(mImage->getOwner()->createChild<Sampler>(args.samplerArgs))
{
}
mijin::Task<ObjectPtr<Texture>> Texture::c_createSingleColor(const ObjectPtr<Device>& device, const SingleColorTextureArgs& args)
{
ObjectPtr<Bitmap> bitmap = Bitmap::create(BitmapCreationArgs{
.format = args.format,
.size = {.width = 1, .height = 1}
});
bitmap->fill(args.color);
co_return co_await c_createFromBitmap(device, {.bitmap = *bitmap});
}
mijin::Task<ObjectPtr<Texture>> Texture::c_createFromBitmap(const ObjectPtr<Device>& device, const TextureFromBitmapArgs& args)
{
ImageCreationArgs imageCreationArgs = args.imageArgs;
imageCreationArgs.format = args.bitmap.getFormat();
imageCreationArgs.extent.width = args.bitmap.getSize().width;
imageCreationArgs.extent.height = args.bitmap.getSize().height;
imageCreationArgs.extent.depth = 1;
imageCreationArgs.usage |= vk::ImageUsageFlagBits::eSampled | vk::ImageUsageFlagBits::eTransferDst;
ObjectPtr<Image> image = device->createChild<Image>(imageCreationArgs);
image->allocateMemory();
co_await image->c_upload(args.bitmap);
co_await image->c_doTransition({
.stages = vk::PipelineStageFlagBits::eFragmentShader,
.layout = vk::ImageLayout::eShaderReadOnlyOptimal,
.access = vk::AccessFlagBits::eShaderRead
});
co_return create(TextureCreationArgs{
.image = std::move(image),
.imageViewArgs = args.imageViewArgs,
.samplerArgs = args.samplerArgs
});
}
} // namespace iwa

View File

@ -0,0 +1,548 @@
#include "iwa/util/glsl_compiler.hpp"
#include <filesystem>
#include <utility>
#include <glslang/Include/InfoSink.h>
#include <glslang/Public/ShaderLang.h>
#include <glslang/MachineIndependent/iomapper.h>
#include <glslang/MachineIndependent/localintermediate.h>
#include <glslang/Public/ResourceLimits.h>
#include <glslang/SPIRV/GlslangToSpv.h>
#include <yaml-cpp/yaml.h>
#include "iwa/device.hpp"
#include "iwa/instance.hpp"
#include "iwa/log.hpp"
#include "iwa/util/dir_stack_file_includer.hpp"
#include "iwa/util/reflect_glsl.hpp"
namespace fs = std::filesystem;
namespace iwa
{
namespace
{
class CustomFileIncluder : public impl::DirStackFileIncluder
{
private:
fs::path workingDir;
mijin::FileSystemAdapter& mFsAdapter;
public:
explicit CustomFileIncluder(mijin::FileSystemAdapter& fsAdapter) noexcept: mFsAdapter(fsAdapter)
{}
public:
void setWorkingDir(const fs::path& workingDir_)
{ workingDir = workingDir_; }
protected: // overrides
IncludeResult* readLocalPath(const char* headerName, const char* includerName, int depth) override
{
// Discard popped include directories, and
// initialize when at parse-time first level.
directoryStack.resize(depth + externalLocalDirectoryCount);
if (depth == 1)
{
directoryStack.back() = getDirectory(includerName);
}
// Find a directory that works, using a reverse search of the include stack.
for (auto it = directoryStack.rbegin(); it != directoryStack.rend(); ++it)
{
std::string path = *it + '/' + headerName;
std::replace(path.begin(), path.end(), '\\', '/');
std::unique_ptr<mijin::Stream> stream;
mijin::StreamError error = mijin::StreamError::UNKNOWN_ERROR;
if (workingDir != fs::path())
{
// try relative include first
error = mFsAdapter.open(workingDir / path, mijin::FileOpenMode::READ, stream);
}
if (error != mijin::StreamError::SUCCESS)
{
error = mFsAdapter.open(path, mijin::FileOpenMode::READ, stream);
}
if (error == mijin::StreamError::SUCCESS)
{
directoryStack.push_back(getDirectory(path));
includedFiles.insert(path);
return newCustomIncludeResult(path, *stream);
}
}
return nullptr;
}
// Do actual reading of the file, filling in a new include result.
IncludeResult* newCustomIncludeResult(const std::string& path, mijin::Stream& stream) const
{
(void) stream.seek(0, mijin::SeekMode::RELATIVE_TO_END);
const std::size_t length = stream.tell();
(void) stream.seek(0);
char* content = new tUserDataElement[length]; // NOLINT(cppcoreguidelines-owning-memory)
const mijin::StreamError error = stream.readRaw(content, length);
if (error != mijin::StreamError::SUCCESS)
{
logAndDie("Error reading include file.");
}
return new IncludeResult(path, content, length, content); // NOLINT(cppcoreguidelines-owning-memory)
}
};
class SemanticIoResolver : public glslang::TDefaultIoResolverBase
{
private:
const std::vector<GLSLSemanticMapping>& mMappings;
public:
SemanticIoResolver(const glslang::TIntermediate& intermediate, const std::vector<GLSLSemanticMapping>& mappings)
: glslang::TDefaultIoResolverBase(intermediate), mMappings(mappings) {}
bool validateBinding(EShLanguage /* stage */, glslang::TVarEntryInfo& /* ent */) override { return true; }
glslang::TResourceType getResourceType(const glslang::TType& type) override {
if (isImageType(type)) {
return glslang::EResImage;
}
if (isTextureType(type)) {
return glslang::EResTexture;
}
if (isSsboType(type)) {
return glslang::EResSsbo;
}
if (isSamplerType(type)) {
return glslang::EResSampler;
}
if (isUboType(type)) {
return glslang::EResUbo;
}
return glslang::EResCount;
}
int resolveBinding(EShLanguage stage, glslang::TVarEntryInfo& ent) override
{
const glslang::TType& type = ent.symbol->getType();
if (type.getQualifier().hasSemantic())
{
const unsigned semantic = type.getQualifier().layoutSemantic;
const unsigned semanticIdx = type.getQualifier().hasSemanticIndex() ? type.getQualifier().layoutSemanticIndex : 0;
auto it = std::ranges::find_if(mMappings, [&](const GLSLSemanticMapping& mapping)
{
return mapping.semantic == semantic && mapping.semanticIdx == semanticIdx;
});
if (it != mMappings.end()) {
return ent.newBinding = it->newBinding;
}
}
// default implementation
const int set = getLayoutSet(type);
// On OpenGL arrays of opaque types take a seperate binding for each element
const int numBindings = referenceIntermediate.getSpv().openGl != 0 && type.isSizedArray() ? type.getCumulativeArraySize() : 1;
const glslang::TResourceType resource = getResourceType(type);
if (resource < glslang::EResCount) {
if (type.getQualifier().hasBinding()) {
return ent.newBinding = reserveSlot(
set, getBaseBinding(stage, resource, set) + type.getQualifier().layoutBinding, numBindings);
}
if (ent.live && doAutoBindingMapping()) {
// find free slot, the caller did make sure it passes all vars with binding
// first and now all are passed that do not have a binding and needs one
return ent.newBinding = getFreeSlot(set, getBaseBinding(stage, resource, set), numBindings);
}
}
return ent.newBinding = -1;
}
int resolveSet(EShLanguage stage, glslang::TVarEntryInfo& ent) override
{
const glslang::TType& type = ent.symbol->getType();
if (type.getQualifier().hasSemantic())
{
const unsigned semantic = type.getQualifier().layoutSemantic;
const unsigned semanticIdx = type.getQualifier().hasSemanticIndex() ? type.getQualifier().layoutSemanticIndex : 0;
auto it = std::ranges::find_if(mMappings, [&](const GLSLSemanticMapping& mapping)
{
return mapping.semantic == semantic && mapping.semanticIdx == semanticIdx;
});
if (it == mMappings.end()) {
return glslang::TDefaultIoResolverBase::resolveSet(stage, ent);
}
return ent.newSet = it->newSet;
}
return glslang::TDefaultIoResolverBase::resolveSet(stage, ent);
}
void addStage(EShLanguage stage, glslang::TIntermediate& stageIntermediate) override
{
nextInputLocation = nextOutputLocation = 0;
glslang::TDefaultIoResolverBase::addStage(stage, stageIntermediate);
}
};
void initGlslang() noexcept
{
static bool inited = false;
if (inited)
{
return;
}
inited = true;
if (!glslang::InitializeProcess())
{
logAndDie("Error initializing Glslang.");
}
}
} // namespace
EShLanguage typeToGlslang(vk::ShaderStageFlagBits type) noexcept
{
switch (type)
{
case vk::ShaderStageFlagBits::eCompute:
return EShLangCompute;
case vk::ShaderStageFlagBits::eVertex:
return EShLangVertex;
case vk::ShaderStageFlagBits::eFragment:
return EShLangFragment;
case vk::ShaderStageFlagBits::eRaygenKHR:
return EShLangRayGen;
case vk::ShaderStageFlagBits::eClosestHitKHR:
return EShLangClosestHit;
case vk::ShaderStageFlagBits::eAnyHitKHR:
return EShLangAnyHit;
case vk::ShaderStageFlagBits::eMissKHR:
return EShLangMiss;
case vk::ShaderStageFlagBits::eIntersectionKHR:
return EShLangIntersect;
case vk::ShaderStageFlagBits::eCallableKHR:
return EShLangCallable;
case vk::ShaderStageFlagBits::eTaskEXT:
return EShLangTask;
case vk::ShaderStageFlagBits::eMeshEXT:
return EShLangMesh;
case vk::ShaderStageFlagBits::eTessellationControl:
return EShLangTessControl;
case vk::ShaderStageFlagBits::eTessellationEvaluation:
return EShLangTessEvaluation;
case vk::ShaderStageFlagBits::eGeometry:
return EShLangGeometry;
case vk::ShaderStageFlagBits::eAllGraphics:
case vk::ShaderStageFlagBits::eAll:
case vk::ShaderStageFlagBits::eSubpassShadingHUAWEI:
case vk::ShaderStageFlagBits::eClusterCullingHUAWEI:
break; // let it fail
}
logAndDie("Invalid value passed to typeToGlslang!");
}
ShaderSource ShaderSource::fromStream(mijin::Stream& stream, std::string fileName)
{
ShaderSource result = {
.fileName = std::move(fileName)
};
if (const mijin::StreamError error = stream.readAsString(result.code); error != mijin::StreamError::SUCCESS)
{
// TODO: custom exception type, for stacktrace and stuff
throw std::runtime_error("Error reading shader source.");
}
return result;
}
ShaderSource ShaderSource::fromFile(const mijin::PathReference& file)
{
std::unique_ptr<mijin::Stream> stream;
if (const mijin::StreamError error = file.open(mijin::FileOpenMode::READ, stream); error != mijin::StreamError::SUCCESS)
{
throw std::runtime_error("Error opening file for reading shader source.");
}
return fromStream(*stream, file.getPath().string());
}
ShaderSource ShaderSource::fromYaml(const YAML::Node& node, const mijin::PathReference& yamlFile)
{
if (node.Tag() == "!load")
{
return fromFile(yamlFile.getAdapter()->getPath(node.as<std::string>()));
}
const std::string& source = node["source"].as<std::string>();
std::string fileName;
if (const YAML::Node fileNameNode = node["fileName"]; !fileNameNode.IsNull())
{
fileName = fileNameNode.as<std::string>();
}
return {
.code = source,
.fileName = std::move(fileName)
};
}
GLSLShader::GLSLShader(ObjectPtr<Instance> owner, GLSLShaderCreationArgs args) noexcept
: super_t(std::move(owner)), mType(args.type), mSources(std::move(args.sources)), mDefines(std::move(args.defines))
{
MIJIN_ASSERT(!mSources.empty(), "Cannot compile without sources.");
compile();
}
GLSLShader::~GLSLShader() noexcept = default;
std::unique_ptr<glslang::TShader> GLSLShader::releaseHandle()
{
if (mHandle == nullptr) {
compile();
}
return std::exchange(mHandle, nullptr);
}
ShaderMeta GLSLShader::getPartialMeta()
{
if (mHandle == nullptr) {
compile();
}
return reflectShader(*mHandle);
}
void GLSLShader::compile()
{
initGlslang();
const EShLanguage stage = typeToGlslang(mType);
std::unique_ptr<glslang::TShader> shader = std::make_unique<glslang::TShader>(stage); // NOLINT(cppcoreguidelines-owning-memory)
std::vector<const char*> sources;
std::vector<int> lengths;
std::vector<const char*> names;
sources.reserve(mSources.size() + 1);
lengths.reserve(mSources.size() + 1);
names.reserve(mSources.size() + 1);
std::string preamble = getOwner()->getInstanceExtension<GLSLCompilerSettings>().getCommonPreamble();
for (const std::string& define : mDefines) {
preamble.append(fmt::format("\n#define {}\n", define));
}
sources.push_back(preamble.c_str());
lengths.push_back(static_cast<int>(preamble.size()));
names.push_back("<preamble>");
for (const ShaderSource& source : mSources)
{
sources.push_back(source.code.c_str());
lengths.push_back(static_cast<int>(source.code.size()));
names.push_back(source.fileName.c_str());
}
shader->setStringsWithLengthsAndNames(sources.data(), lengths.data(), names.data(), static_cast<int>(sources.size()));
shader->setDebugInfo(true);
shader->setEnvInput(glslang::EShSourceGlsl, stage, glslang::EShClientVulkan, glslang::EShTargetVulkan_1_3);
shader->setEnvClient(glslang::EShClientVulkan, glslang::EShTargetVulkan_1_3);
shader->setEnvTarget(glslang::EShTargetLanguage::EShTargetSpv, glslang::EShTargetSpv_1_6);
shader->setAutoMapLocations(true);
shader->setAutoMapBindings(true);
const EShMessages PREPROCESS_MESSAGES = static_cast<EShMessages>(EShMsgDefault
#if !defined(KAZAN_RELEASE)
| EShMsgDebugInfo
#endif
);
std::string completeCode;
CustomFileIncluder includer(getOwner()->getPrimaryFSAdapter()); // TODO: this type seems to be doing stupid things, investigate
const std::string sourceFileAbsStr = mSources[0].fileName; // just for you MSVC <3
if (!sourceFileAbsStr.empty()) {
includer.setWorkingDir(fs::path(sourceFileAbsStr).parent_path());
}
const bool couldPreprocess = shader->preprocess(
/* builtInResources = */ GetDefaultResources(),
/* defaultVersion = */ 450,
/* defaultProfile = */ ECoreProfile,
/* forceDefaultVersionAndProfile = */ false,
/* forwardCompatible = */ false,
/* message = */ PREPROCESS_MESSAGES,
/* outputString = */ &completeCode,
/* includer = */ includer
);
if (!couldPreprocess)
{
logMsg("GLSL preprocessing failed:\ninfo log:\n{}\ndebug log:\n{}",
shader->getInfoLog(), shader->getInfoDebugLog()
);
logAndDie("Error preprocessing shader.");
}
#if 0
ShaderPreprocessResult preprocessResult = preprocessShader(completeCode);
for (std::string& module : preprocessResult.importedModules) {
importedModules.insert(std::move(module));
}
for (std::string& option : preprocessResult.supportedOptions) {
supportedOptions.insert(std::move(option));
}
#endif
const char* newSource = completeCode.c_str();
#if defined(KAZAN_RELEASE)
shader->setStrings(&newSource, 1); // replace source with the preprocessed one
#else
const int newSourceLen = static_cast<int>(std::strlen(newSource));
const char* newSourceName = sourceFileAbsStr.c_str();
shader->setStringsWithLengthsAndNames(&newSource, &newSourceLen, &newSourceName, 1);
#endif
const EShMessages PARSE_MESSAGES = static_cast<EShMessages>(EShMsgDefault
#if !defined(KAZAN_RELEASE)
| EShMsgDebugInfo
#endif
);
const bool couldParse = shader->parse(
/* builtinResources = */ GetDefaultResources(),
/* defaultVersion = */ 450,
/* forwardCompatible = */ false,
/* messages = */ PARSE_MESSAGES
);
if (!couldParse)
{
logMsg("GLSL parsing failed:\ninfo log:\n{}\ndebug log:\n{}",
shader->getInfoLog(), shader->getInfoDebugLog()
);
logAndDie("Error parsing shader.");
}
mHandle = std::move(shader);
}
GLSLShaderProgram::GLSLShaderProgram(ObjectPtr<Device> owner, GLSLShaderProgramCreationArgs args)
: super_t(std::move(owner)), mLinkFlags(args.linkFlags)
{
MIJIN_ASSERT_FATAL(!args.shaders.empty(), "At least one shader per program is required!");
mHandle = std::make_unique<glslang::TProgram>();
for (const ObjectPtr<GLSLShader>& shader : args.shaders)
{
mShaderHandles.push_back(shader->releaseHandle());
mHandle->addShader(mShaderHandles.back().get());
}
const EShMessages linkMessages = static_cast<EShMessages>(EShMsgSpvRules | EShMsgVulkanRules
| (args.linkFlags.withDebugInfo ? EShMsgDebugInfo : EShMessages(0)));
if (!mHandle->link(linkMessages))
{
logAndDie("GLSL linking failed!\ninfo log:\n{}\ndebug log:\n{}",
mHandle->getInfoLog(), mHandle->getInfoDebugLog()
);
}
glslang::TIntermediate* referenceIntermediate = mHandle->getIntermediate(typeToGlslang(args.shaders[0]->getType()));
SemanticIoResolver ioResolver(*referenceIntermediate, args.semanticMappings);
if (!mHandle->mapIO(&ioResolver))
{
logAndDie("GLSL io mapping failed!\ninfo log:\n{}\ndebug log:\n{}",
mHandle->getInfoLog(), mHandle->getInfoDebugLog()
);
}
mMeta = reflectProgram(*mHandle);
}
std::vector<std::uint32_t> GLSLShaderProgram::generateSpirv(vk::ShaderStageFlagBits stage) const
{
const EShLanguage glslLang = typeToGlslang(stage);
glslang::SpvOptions options = {};
if (mLinkFlags.withDebugInfo)
{
options.generateDebugInfo = true;
options.stripDebugInfo = false;
options.disableOptimizer = true;
options.emitNonSemanticShaderDebugInfo = true;
options.emitNonSemanticShaderDebugSource = false; // TODO: this should be true, but makes GLSLang crash
}
else
{
options.generateDebugInfo = false;
options.stripDebugInfo = true;
options.disableOptimizer = false;
options.emitNonSemanticShaderDebugInfo = true; // TODO: this should be false, but that also crashes GLSLang ...
options.emitNonSemanticShaderDebugSource = false;
}
options.optimizeSize = false;
options.disassemble = false;
options.validate = true;
spv::SpvBuildLogger logger;
const glslang::TIntermediate* intermediate = mHandle->getIntermediate(glslLang);
if (intermediate == nullptr)
{
throw std::runtime_error("Attempting to generate SpirV from invalid shader stage.");
}
std::vector<std::uint32_t> spirv;
glslang::GlslangToSpv(*intermediate, spirv, &logger, &options);
const std::string messages = logger.getAllMessages();
if (!messages.empty())
{
logMsg("SpirV messages: {}", messages);
}
return spirv;
}
std::vector<PipelineStage> GLSLShaderProgram::generatePipelineStages() const
{
std::vector<PipelineStage> stages;
for (const vk::ShaderStageFlagBits stage : mMeta.stages)
{
const std::vector<std::uint32_t> spirv = generateSpirv(stage);
stages.push_back({
.shader = getOwner()->createChild<ShaderModule>(ShaderModuleCreationArgs{.code = spirv}),
.stage = stage
});
}
return stages;
}
GraphicsPipelineCreationArgs GLSLShaderProgram::prepareGraphicsPipeline(PrepareGraphicsPipelineArgs& args) const
{
args.pipelineLayoutMeta = mMeta.generatePipelineLayout(args.layoutArgs);
args.layouts = args.pipelineLayoutMeta.createPipelineLayout(*getOwner());
return {
.stages = generatePipelineStages(),
.vertexInput = mMeta.generateVertexInputFromLayout(args.vertexLayout),
.layout = args.layouts.pipelineLayout
};
}
ComputePipelineCreationArgs GLSLShaderProgram::prepareComputePipeline(PrepareComputePipelineArgs& args) const
{
args.pipelineLayoutMeta = mMeta.generatePipelineLayout(args.layoutArgs);
args.layouts = args.pipelineLayoutMeta.createPipelineLayout(*getOwner());
return {
.stage = generatePipelineStages()[0],
.layout = args.layouts.pipelineLayout
};
}
// GraphicsPipelineCreationArgs prepareGLSLGraphicsPipeline(const PrepareGLSLGraphicsPipelineArgs& args)
// {
// return {
// .stages =
// {
// PipelineStage{.shader = vertexShaderModule, .stage = vk::ShaderStageFlagBits::eVertex},
// PipelineStage{.shader = fragmentShaderModule, .stage = vk::ShaderStageFlagBits::eFragment}
// },
// .vertexInput = std::move(vertexInput),
// .inputAssembly =
// {
// .topology = vk::PrimitiveTopology::eTriangleStrip
// },
// .colorBlend =
// {
// .attachements = {DEFAULT_BLEND_ATTACHMENT}
// },
// .renderingInfo = GraphicsPipelineRenderingInfo{
// .colorAttachmentFormats = {mRenderTarget->getFormat()}
// },
// .layout = mPipelineLayout
// };
// }
} // namespace iwa

View File

@ -0,0 +1,33 @@
#include "iwa/util/growing_descriptor_pool.hpp"
#include <utility>
#include "iwa/device.hpp"
namespace iwa
{
GrowingDescriptorPool::GrowingDescriptorPool(ObjectPtr<Device> owner, GrowingDescriptorPoolCreationArgs args)
: super_t(std::move(owner)), mCreationArgs(std::move(args))
{
}
ObjectPtr<DescriptorSet> GrowingDescriptorPool::allocateDescriptorSet(const DescriptorSetAllocateArgs& args)
{
for (const ObjectPtr<DescriptorPool>& pool : mPools)
{
try
{
return pool->allocateDescriptorSet(args);
}
catch (vk::FragmentedPoolError&) {}
catch (vk::OutOfPoolMemoryError&) {}
// any other error shall be forwarded
}
// couldn't allocate in any of the existing pools, create a new one
mPools.push_back(getOwner()->createChild<DescriptorPool>(mCreationArgs));
return mPools.back()->allocateDescriptorSet(args); // raise any error that may occur
}
} // namespace iwa

View File

@ -0,0 +1,147 @@
#include "iwa/util/image_reference.hpp"
#include <mijin/util/iterators.hpp>
#include "iwa/command.hpp"
#include "iwa/device.hpp"
namespace iwa
{
ImageReference::ImageReference(ObjectPtr<Device> owner) : super_t(std::move(owner))
{
}
void ImageReference::finalize(ImageReferenceFinalizeArgs& /* args */) {}
mijin::Task<> ImageReference::c_present()
{
co_return;
}
SwapchainImageReference::SwapchainImageReference(ObjectPtr<Device> owner, SwapchainImageReferenceCreationArgs args)
: super_t(std::move(owner)), mSwapchain(std::move(args.swapchain))
{
mPresentReadySemaphores.resize(mSwapchain->getNumParallelFrames());
for (ObjectPtr<Semaphore>& semaphore : mPresentReadySemaphores)
{
semaphore = getOwner()->createChild<Semaphore>();
}
createImageViews();
mSwapchain->recreated.connect([this]()
{
createImageViews();
});
}
vk::Format SwapchainImageReference::getFormat()
{
return mSwapchain->getFormat();
}
vk::Extent2D SwapchainImageReference::getExtent()
{
return mSwapchain->getExtent();
}
ImageReferenceFrame SwapchainImageReference::getCurrentFrame()
{
return ImageReferenceFrame{
.image = mSwapchain->getCurrentImage().getRaw(),
.imageView = mImageViews[mSwapchain->getCurrentImageIdx()].getRaw()
};
}
void SwapchainImageReference::finalize(ImageReferenceFinalizeArgs& args)
{
args.waitSemaphores.push_back(*mSwapchain->getCurrentAvailableSemaphore());
args.signalSemaphores.push_back(*mPresentReadySemaphores[mSwapchain->getCurrentFrameIdx()]);
mSwapchain->getCurrentImage()->applyTransition(args.cmdBuffer, ImageTransition{
.stages = vk::PipelineStageFlagBits::eBottomOfPipe,
.layout = vk::ImageLayout::ePresentSrcKHR,
.access = {}
});
}
mijin::Task<> SwapchainImageReference::c_present()
{
// and present
co_await mSwapchain->c_present({
.queue = getOwner()->getGraphicsQueue(),
.waitSemaphores = {mPresentReadySemaphores[mSwapchain->getCurrentFrameIdx()]->getVkHandle()}
});
}
void SwapchainImageReference::createImageViews()
{
mImageViews.resize(mSwapchain->getImages().size());
for (auto [image, imageView] : mijin::zip(mSwapchain->getImages(), mImageViews))
{
imageView = image->createImageView();
}
}
DirectImageReference::DirectImageReference(ObjectPtr<Device> owner, DirectImageReferenceCreationArgs args)
: super_t(std::move(owner)), mImage(std::move(args.image)), mImageView(std::move(args.imageView))
{
}
vk::Format DirectImageReference::getFormat()
{
return mImage->getFormat();
}
vk::Extent2D DirectImageReference::getExtent()
{
return {
.width = mImage->getSize().width,
.height = mImage->getSize().height
};
}
ImageReferenceFrame DirectImageReference::getCurrentFrame()
{
return ImageReferenceFrame{
.image = mImage.getRaw(),
.imageView = mImageView.getRaw()
};
}
AutoResizeImageReference::AutoResizeImageReference(ObjectPtr<Device> owner, AutoResizeImageReferenceCreationArgs args)
: super_t(std::move(owner), DirectImageReferenceCreationArgs{}), mReferenceImageRef(std::move(args.referenceImageRef)),
mImageCreationArgs(std::move(args.imageCreationArgs)), mImageViewCreationArgs(args.imageViewCreationArgs)
{
createImage();
}
vk::Extent2D AutoResizeImageReference::getExtent()
{
return mReferenceImageRef->getExtent();
}
ImageReferenceFrame AutoResizeImageReference::getCurrentFrame()
{
const vk::Extent2D extent = mReferenceImageRef->getExtent();
if (extent.width != mImage->getSize().width || extent.height != mImage->getSize().height) {
createImage();
}
return ImageReferenceFrame{
.image = mImage.getRaw(),
.imageView = mImageView.getRaw()
};
}
void AutoResizeImageReference::createImage()
{
const vk::Extent2D extent = mReferenceImageRef->getExtent();
mImageCreationArgs.extent.width = extent.width;
mImageCreationArgs.extent.height = extent.height;
mImageCreationArgs.extent.depth = 1;
mImage = getOwner()->createChild<Image>(mImageCreationArgs);
mImage->allocateMemory();
mImageView = mImage->createImageView(mImageViewCreationArgs);
}
} // namespace iwa

View File

@ -0,0 +1,587 @@
#include "iwa/util/reflect_glsl.hpp"
#include <glslang/Include/InfoSink.h>
#include <glslang/Public/ShaderLang.h>
#include <glslang/MachineIndependent/localintermediate.h>
#include <glslang/Public/ResourceLimits.h>
namespace iwa
{
namespace
{
class MetaCollectingTraverser : public glslang::TIntermTraverser
{
private:
ShaderMeta& meta;
vk::ShaderStageFlagBits shaderType;
public:
inline MetaCollectingTraverser(ShaderMeta& meta_, vk::ShaderStageFlagBits shaderType_) : meta(meta_), shaderType(shaderType_)
{}
bool visitBinary(glslang::TVisit, glslang::TIntermBinary* node) override;
bool visitUnary(glslang::TVisit, glslang::TIntermUnary* node) override;
bool visitAggregate(glslang::TVisit, glslang::TIntermAggregate* node) override;
bool visitSelection(glslang::TVisit, glslang::TIntermSelection* node) override;
void visitConstantUnion(glslang::TIntermConstantUnion* node) override;
void visitSymbol(glslang::TIntermSymbol* node) override;
bool visitLoop(glslang::TVisit, glslang::TIntermLoop* node) override;
bool visitBranch(glslang::TVisit, glslang::TIntermBranch* node) override;
bool visitSwitch(glslang::TVisit, glslang::TIntermSwitch* node) override;
};
vk::Format convertGlslangBaseType(const glslang::TType& type)
{
switch (type.getBasicType())
{
case glslang::EbtInt:
return vk::Format::eR32Sint;
case glslang::EbtUint:
return vk::Format::eR32Uint;
case glslang::EbtFloat:
return vk::Format::eR32Sfloat;
case glslang::EbtDouble:
return vk::Format::eR64Sfloat;
default:
break;
}
logAndDie("Don't know how to convert Glslang basic type :*(");
}
vk::Format convertGlslangLayoutFormat(glslang::TLayoutFormat layoutFormat)
{
switch (layoutFormat)
{
case glslang::TLayoutFormat::ElfNone:
return vk::Format::eUndefined;
// Float image
case glslang::TLayoutFormat::ElfRgba32f:
return vk::Format::eR32G32B32A32Sfloat;
case glslang::TLayoutFormat::ElfRgba16f:
return vk::Format::eR16G16B16A16Sfloat;
case glslang::TLayoutFormat::ElfR32f:
return vk::Format::eR32Sfloat;
case glslang::TLayoutFormat::ElfRgba8:
return vk::Format::eR8G8B8A8Unorm;
case glslang::TLayoutFormat::ElfRgba8Snorm:
return vk::Format::eR8G8B8A8Snorm;
case glslang::TLayoutFormat::ElfRg32f:
return vk::Format::eR32G32Sfloat;
case glslang::TLayoutFormat::ElfRg16f:
return vk::Format::eR16G16Sfloat;
case glslang::TLayoutFormat::ElfR11fG11fB10f:
return vk::Format::eB10G11R11UfloatPack32; // TODO: ?
case glslang::TLayoutFormat::ElfR16f:
return vk::Format::eR16Sfloat;
case glslang::TLayoutFormat::ElfRgba16:
return vk::Format::eR16G16B16A16Unorm;
case glslang::TLayoutFormat::ElfRgb10A2:
return vk::Format::eA2R10G10B10SnormPack32; // TODO: ?
case glslang::TLayoutFormat::ElfRg16:
return vk::Format::eR16G16Unorm;
case glslang::TLayoutFormat::ElfRg8:
return vk::Format::eR8G8Unorm;
case glslang::TLayoutFormat::ElfR16:
return vk::Format::eR16Unorm;
case glslang::TLayoutFormat::ElfR8:
return vk::Format::eR8Unorm;
case glslang::TLayoutFormat::ElfRgba16Snorm:
return vk::Format::eR16G16B16A16Snorm;
case glslang::TLayoutFormat::ElfRg16Snorm:
return vk::Format::eR16G16Unorm;
case glslang::TLayoutFormat::ElfRg8Snorm:
return vk::Format::eR8G8Snorm;
case glslang::TLayoutFormat::ElfR16Snorm:
return vk::Format::eR16G16Snorm;
case glslang::TLayoutFormat::ElfR8Snorm:
return vk::Format::eR8Snorm;
// Int image
case glslang::TLayoutFormat::ElfRgba32i:
return vk::Format::eR32G32B32A32Sint;
case glslang::TLayoutFormat::ElfRgba16i:
return vk::Format::eR16G16B16A16Sint;
case glslang::TLayoutFormat::ElfRgba8i:
return vk::Format::eR8G8B8A8Sint;
case glslang::TLayoutFormat::ElfR32i:
return vk::Format::eR32Sint;
case glslang::TLayoutFormat::ElfRg32i:
return vk::Format::eR32G32Sint;
case glslang::TLayoutFormat::ElfRg16i:
return vk::Format::eR16G16Sint;
case glslang::TLayoutFormat::ElfRg8i:
return vk::Format::eR8G8Sint;
case glslang::TLayoutFormat::ElfR16i:
return vk::Format::eR16Sint;
case glslang::TLayoutFormat::ElfR8i:
return vk::Format::eR8Sint;
case glslang::TLayoutFormat::ElfR64i:
return vk::Format::eR64Sint;
// Uint image
case glslang::TLayoutFormat::ElfRgba32ui:
return vk::Format::eR32G32B32A32Uint;
case glslang::TLayoutFormat::ElfRgba16ui:
return vk::Format::eR16G16B16A16Uint;
case glslang::TLayoutFormat::ElfRgba8ui:
return vk::Format::eR8G8B8A8Uint;
case glslang::TLayoutFormat::ElfR32ui:
return vk::Format::eR32Uint;
case glslang::TLayoutFormat::ElfRg32ui:
return vk::Format::eR32G32Uint;
case glslang::TLayoutFormat::ElfRg16ui:
return vk::Format::eR16G16Uint;
case glslang::TLayoutFormat::ElfRgb10a2ui:
return vk::Format::eA2R10G10B10UintPack32;
case glslang::TLayoutFormat::ElfRg8ui:
return vk::Format::eR8G8Uint;
case glslang::TLayoutFormat::ElfR16ui:
return vk::Format::eR16Uint;
case glslang::TLayoutFormat::ElfR8ui:
return vk::Format::eR8Uint;
case glslang::TLayoutFormat::ElfR64ui:
return vk::Format::eR64Uint;
// other/unknown
case glslang::TLayoutFormat::ElfSize1x8:
case glslang::TLayoutFormat::ElfSize1x16:
case glslang::TLayoutFormat::ElfSize1x32:
case glslang::TLayoutFormat::ElfSize2x32:
case glslang::TLayoutFormat::ElfSize4x32:
case glslang::TLayoutFormat::ElfEsFloatGuard:
case glslang::TLayoutFormat::ElfFloatGuard:
case glslang::TLayoutFormat::ElfEsIntGuard:
case glslang::TLayoutFormat::ElfIntGuard:
case glslang::TLayoutFormat::ElfEsUintGuard:
case glslang::TLayoutFormat::ElfExtSizeGuard:
case glslang::TLayoutFormat::ElfCount:
break;
}
logAndDie("Unexpected format in convertGlslangLayoutFormat()."); // : {}", layoutFormat);
}
vk::Format convertGlslangVectorType(glslang::TBasicType basicType, int vectorSize)
{
switch (basicType)
{
case glslang::EbtFloat:
switch (vectorSize)
{
case 2:
return vk::Format::eR32G32Sfloat;
case 3:
return vk::Format::eR32G32B32Sfloat;
case 4:
return vk::Format::eR32G32B32A32Sfloat;
default:
break;
}
break;
case glslang::EbtDouble:
switch (vectorSize)
{
case 2:
return vk::Format::eR64G64Sfloat;
case 3:
return vk::Format::eR64G64B64Sfloat;
case 4:
return vk::Format::eR64G64B64A64Sfloat;
default:
break;
}
break;
case glslang::EbtInt:
switch (vectorSize)
{
case 2:
return vk::Format::eR32G32Sint;
case 3:
return vk::Format::eR32G32B32Sint;
case 4:
return vk::Format::eR32G32B32A32Sint;
default:
break;
}
break;
case glslang::EbtUint:
switch (vectorSize)
{
case 2:
return vk::Format::eR32G32Uint;
case 3:
return vk::Format::eR32G32B32Uint;
case 4:
return vk::Format::eR32G32B32A32Uint;
default:
break;
}
break;
case glslang::EbtBool: // NOLINT(bugprone-branch-clone) TODO: ???
break;
default:
break;
}
logAndDie("Don't know how to convert Glslang vector type :(");
}
vk::Format convertGlslangVectorType(const glslang::TType& type)
{
assert(type.isVector());
return convertGlslangVectorType(type.getBasicType(), type.getVectorSize());
}
ShaderVariableMatrixType convertGlslangMatrixType(const glslang::TType& type)
{
assert(type.isMatrix());
assert(type.getMatrixCols() == type.getMatrixRows()); // only supported types yet...
switch (type.getMatrixCols())
{
case 2:
return ShaderVariableMatrixType::MAT2;
case 3:
return ShaderVariableMatrixType::MAT3;
case 4:
return ShaderVariableMatrixType::MAT4;
default:
break;
}
logAndDie("Don't know how to convert Glslang matrix type -.-");
}
ImageDim convertGlslangSamplerDim(glslang::TSamplerDim dim)
{
switch (dim)
{
case glslang::TSamplerDim::Esd1D:
return ImageDim::ONE;
case glslang::TSamplerDim::Esd2D:
return ImageDim::TWO;
case glslang::TSamplerDim::Esd3D:
return ImageDim::THREE;
case glslang::TSamplerDim::EsdCube:
return ImageDim::CUBE;
default:
break;
}
logAndDie("Don't know how to convert Glslang sampler dimensions ...");
}
ShaderVariableType convertGlslangType(const glslang::TType& type)
{
ShaderVariableType result;
if (type.isVector())
{
result.baseType = ShaderVariableBaseType::SIMPLE;
result.simple.format = convertGlslangVectorType(type);
} else if (type.isMatrix())
{
result.baseType = ShaderVariableBaseType::MATRIX;
result.matrixType = convertGlslangMatrixType(type);
} else if (type.isStruct())
{
const std::size_t numMembers = type.getStruct()->size();
result.baseType = ShaderVariableBaseType::STRUCT;
result.struct_.members.reserve(numMembers);
std::size_t currentOffset = 0;
for (const glslang::TTypeLoc& typeLoc: *type.getStruct())
{
ShaderVariableStructMember& member = result.struct_.members.emplace_back();
member.name = typeLoc.type->getFieldName();
member.type = convertGlslangType(*typeLoc.type);
member.offset = currentOffset;
if (typeLoc.type->getQualifier().hasSemantic())
{
member.semantic = typeLoc.type->getQualifier().layoutSemantic;
}
if (typeLoc.type->getQualifier().hasSemanticIndex())
{
member.semanticIdx = typeLoc.type->getQualifier().layoutSemanticIndex;
}
currentOffset = member.offset + calcShaderTypeSize(member.type); // TODO: padding
}
} else if (type.getBasicType() == glslang::EbtSampler)
{
const glslang::TSampler& sampler = type.getSampler();
result.baseType = ShaderVariableBaseType::IMAGE;
result.image.dimensions = convertGlslangSamplerDim(sampler.dim);
result.image.format = convertGlslangLayoutFormat(type.getQualifier().layoutFormat);
} else
{
result.baseType = ShaderVariableBaseType::SIMPLE;
result.simple.format = convertGlslangBaseType(type);
}
if (type.isArray())
{
if (type.isArrayVariablyIndexed())
{
result.arraySize = 0;
result.dynamicArraySize = true;
} else
{
assert(type.getArraySizes()->getNumDims() == 1); // don't support multi dimensional arrays yet
result.arraySize = type.getOuterArraySize();
}
}
return result;
}
vk::DescriptorType getGlslangDescriptorType(const glslang::TType& type)
{
if (type.getBasicType() == glslang::EbtSampler)
{
if (type.getSampler().combined)
{
return vk::DescriptorType::eCombinedImageSampler;
}
if (type.getSampler().isImage())
{
return vk::DescriptorType::eStorageImage;
}
} else if (type.isStruct())
{
if (type.getQualifier().isUniform())
{
return vk::DescriptorType::eUniformBuffer;
}
return vk::DescriptorType::eStorageBuffer;
}
logAndDie("No idea what to do with this type :/");
}
bool MetaCollectingTraverser::visitBinary(glslang::TVisit, glslang::TIntermBinary* node)
{
(void) node;
return false;
}
bool MetaCollectingTraverser::visitUnary(glslang::TVisit, glslang::TIntermUnary* node)
{
(void) node;
return false;
}
bool MetaCollectingTraverser::visitAggregate(glslang::TVisit, glslang::TIntermAggregate* node)
{
switch (node->getOp())
{
case glslang::EOpSequence:
return true;
case glslang::EOpFunction:
break;
case glslang::EOpLinkerObjects:
return true;
default:
break;
}
return false;
}
bool MetaCollectingTraverser::visitSelection(glslang::TVisit, glslang::TIntermSelection* node)
{
(void) node;
return false;
}
void MetaCollectingTraverser::visitConstantUnion(glslang::TIntermConstantUnion* node)
{
(void) node;
}
void MetaCollectingTraverser::visitSymbol(glslang::TIntermSymbol* node)
{
const bool isLinkerObject = getParentNode()
&& getParentNode()->getAsAggregate()
&& getParentNode()->getAsAggregate()->getOp() == glslang::EOpLinkerObjects;
if (isLinkerObject)
{
if (node->getQualifier().builtIn)
{
return;
}
if (node->getQualifier().isUniformOrBuffer())
{
if (node->getQualifier().isPushConstant())
{
ShaderPushConstantBlock pushConstantBlock;
pushConstantBlock.type = convertGlslangType(node->getType());
assert(pushConstantBlock.type.baseType == ShaderVariableBaseType::STRUCT);
meta.extendPushConstant(pushConstantBlock, ShaderTypeBits::make(shaderType));
return;
}
const unsigned setIdx = node->getQualifier().hasSet() ? node->getQualifier().layoutSet : UNSPECIFIED_INDEX;
const unsigned binding = node->getQualifier().hasBinding() ? node->getQualifier().layoutBinding : UNSPECIFIED_INDEX;
ShaderVariableSet& set = meta.getOrCreateInterfaceVariableSet(setIdx);
assert(setIdx == UNSPECIFIED_INDEX || !set.getVariableAtBindingOpt(binding)); // multiple bindings at the same index?
set.usedInStages.set(shaderType, true);
ShaderVariable& var = set.variables.emplace_back();
var.binding = binding;
var.name = node->getName();
if (node->getQualifier().hasSemantic())
{
var.semantic = node->getQualifier().layoutSemantic;
}
if (node->getQualifier().hasSemanticIndex())
{
var.semanticIndex = node->getQualifier().layoutSemanticIndex;
}
// uniform blocks are identified by the name of their type
if (var.name.empty() || var.name.starts_with("anon@"))
{
const glslang::TString& typeName = node->getType().getTypeName();
if (!typeName.empty())
{
var.name = typeName;
}
}
var.descriptorType = getGlslangDescriptorType(node->getType());
var.type = convertGlslangType(node->getType());
}
else if (node->getQualifier().storage == glslang::EvqVaryingIn)
{
ShaderAttribute attribute;
attribute.stage = shaderType;
attribute.type = convertGlslangType(node->getType());
attribute.location = node->getQualifier().hasLocation() ? node->getQualifier().layoutLocation : UNSPECIFIED_INDEX;
attribute.name = node->getName();
if (node->getQualifier().hasSemantic())
{
attribute.semantic = node->getQualifier().layoutSemantic;
}
if (node->getQualifier().hasSemanticIndex())
{
attribute.semanticIndex = node->getQualifier().layoutSemanticIndex;
}
meta.addInputAttribute(std::move(attribute));
}
else if (node->getQualifier().storage == glslang::EvqVaryingOut)
{
ShaderAttribute attribute;
attribute.stage = shaderType;
attribute.type = convertGlslangType(node->getType());
attribute.location = node->getQualifier().hasLocation() ? node->getQualifier().layoutLocation : UNSPECIFIED_INDEX;
attribute.name = node->getName();
if (node->getQualifier().hasSemantic())
{
attribute.semantic = node->getQualifier().layoutSemantic;
}
if (node->getQualifier().hasSemanticIndex())
{
attribute.semanticIndex = node->getQualifier().layoutSemanticIndex;
}
meta.addOutputAttribute(std::move(attribute));
}
}
}
bool MetaCollectingTraverser::visitLoop(glslang::TVisit, glslang::TIntermLoop* node)
{
(void) node;
return false;
}
bool MetaCollectingTraverser::visitBranch(glslang::TVisit, glslang::TIntermBranch* node)
{
(void) node;
return false;
}
bool MetaCollectingTraverser::visitSwitch(glslang::TVisit, glslang::TIntermSwitch* node)
{
(void) node;
return false;
}
vk::ShaderStageFlagBits shaderStageFromGlslang(EShLanguage language)
{
switch (language)
{
case EShLangVertex:
return vk::ShaderStageFlagBits::eVertex;
case EShLangTessControl:
return vk::ShaderStageFlagBits::eTessellationControl;
case EShLangTessEvaluation:
return vk::ShaderStageFlagBits::eTessellationEvaluation;
case EShLangGeometry:
return vk::ShaderStageFlagBits::eGeometry;
case EShLangFragment:
return vk::ShaderStageFlagBits::eFragment;
case EShLangCompute:
return vk::ShaderStageFlagBits::eCompute;
case EShLangRayGen:
return vk::ShaderStageFlagBits::eRaygenKHR;
case EShLangIntersect:
return vk::ShaderStageFlagBits::eIntersectionKHR;
case EShLangAnyHit:
return vk::ShaderStageFlagBits::eAnyHitKHR;
case EShLangClosestHit:
return vk::ShaderStageFlagBits::eClosestHitKHR;
case EShLangMiss:
return vk::ShaderStageFlagBits::eMissKHR;
case EShLangCallable:
return vk::ShaderStageFlagBits::eCallableKHR;
case EShLangTask:
return vk::ShaderStageFlagBits::eTaskEXT;
case EShLangMesh:
return vk::ShaderStageFlagBits::eMeshEXT;
case EShLangCount:
break; // fall through
}
logAndDie("Invalid value passed to shaderStageFromGlslang!");
}
}
ShaderMeta reflectShader(glslang::TShader& shader)
{
return reflectIntermediate(*shader.getIntermediate(), shaderStageFromGlslang(shader.getStage()));
}
ShaderMeta reflectProgram(glslang::TProgram& program)
{
ShaderMeta result;
for (int stage = 0; stage < EShLangCount; ++stage)
{
glslang::TIntermediate* intermediate = program.getIntermediate(static_cast<EShLanguage>(stage));
if (intermediate == nullptr) {
continue;
}
result.extend(reflectIntermediate(*intermediate, shaderStageFromGlslang(static_cast<EShLanguage>(stage))));
}
return result;
}
ShaderMeta reflectIntermediate(glslang::TIntermediate& intermediate, vk::ShaderStageFlagBits stage)
{
ShaderMeta meta;
MetaCollectingTraverser traverser(meta, stage);
intermediate.getTreeRoot()->traverse(&traverser);
meta.stages.set(stage, true);
if (stage == vk::ShaderStageFlagBits::eCompute)
{
meta.localSizeX = static_cast<unsigned>(intermediate.getLocalSize(0));
meta.localSizeY = static_cast<unsigned>(intermediate.getLocalSize(1));
meta.localSizeZ = static_cast<unsigned>(intermediate.getLocalSize(2));
}
return meta;
}
} // namespace iwa

128
source/util/render_loop.cpp Normal file
View File

@ -0,0 +1,128 @@
#include "iwa/util/render_loop.hpp"
#include <mijin/async/task_mutex.hpp>
#include "iwa/device.hpp"
#include "iwa/instance.hpp"
namespace iwa
{
namespace
{
// BIG BIG TODO: This is a dumb workaround for sharing images (e.g. the UI image) between multiple renderers.
// The reason is that the layout change mechanism doesn't work if multiple command buffers (that are executed
// sequentially) are recorded in parallel.
// A possible fix could be to move the state tracking mechanism to the renderer and generate the barriers
// before submitting.
mijin::TaskMutex gRenderMutex;
}
RenderLoop::RenderLoop(ObjectPtr<Device> owner, RenderLoopCreationArgs args)
: super_t(std::move(owner)), mAdvanceDeleteQueue(args.flags.advanceDeleteQueue)
{
mAlternating.resize(args.parallelFrames);
ObjectPtr<CommandPool> commandPool = std::move(args.commandPool);
if (!commandPool)
{
commandPool = getOwner()->createChild<CommandPool>(CommandPoolCreationArgs{
.flags = vk::CommandPoolCreateFlagBits::eResetCommandBuffer,
.queueFamilyIndex = getOwner()->getDeviceInfo().graphicsQueueFamily
});
}
for (Alternating& alt : mAlternating)
{
alt.commandBuffer = commandPool->allocateCommandBuffer();
alt.renderDoneFence = getOwner()->createChild<Fence>(FenceCreationArgs{.flags = vk::FenceCreateFlagBits::eSignaled});
}
}
void RenderLoop::start() noexcept
{
addTask(c_renderLoop());
}
mijin::Task<> RenderLoop::c_init()
{
co_return;
}
mijin::SimpleTaskLoop& RenderLoop::getTaskLoop() const noexcept
{
return getOwner()->getOwner()->getMainTaskLoop();
}
mijin::Task<> RenderLoop::c_renderLoop()
{
co_await c_init();
while (!getOwner()->getOwner()->isQuitRequested())
{
Alternating& alt = mAlternating.at(mFrameIdx);
// wait for the command buffer to be ready
co_await alt.renderDoneFence->c_wait();
// reset the fence
alt.renderDoneFence->reset();
vk::CommandBuffer cmdBuffer = alt.commandBuffer->getVkHandle();
cmdBuffer.begin(vk::CommandBufferBeginInfo{
.flags = vk::CommandBufferUsageFlagBits::eOneTimeSubmit
});
// record the commands
RenderLoopRenderArgs renderArgs = {
.cmdBuffer = *alt.commandBuffer,
.frameIdx = mFrameIdx
};
{ // gRenderMutex lock
const mijin::TaskMutexLock lock = co_await gRenderMutex.c_lock();
co_await c_render(renderArgs);
std::vector<vk::Semaphore> waitSemaphores;
std::vector<vk::Semaphore> signalSemaphores;
ImageReferenceFinalizeArgs finalizeArgs{
.cmdBuffer = *alt.commandBuffer,
.waitSemaphores = waitSemaphores,
.signalSemaphores = signalSemaphores
};
for (const ObjectPtr<ImageReference>& imageRef: renderArgs.usedImageReferences)
{
imageRef->finalize(finalizeArgs);
}
cmdBuffer.end();
// submit them
const vk::PipelineStageFlags waitStage = vk::PipelineStageFlagBits::eFragmentShader;
getOwner()->getGraphicsQueue().submit(vk::SubmitInfo{
.waitSemaphoreCount = static_cast<std::uint32_t>(waitSemaphores.size()),
.pWaitSemaphores = waitSemaphores.data(),
.pWaitDstStageMask = &waitStage,
.commandBufferCount = 1,
.pCommandBuffers = &cmdBuffer,
.signalSemaphoreCount = static_cast<std::uint32_t>(signalSemaphores.size()),
.pSignalSemaphores = signalSemaphores.data()
}, *alt.renderDoneFence);
} // gRenderMutex lock
// finally present
for (const ObjectPtr<ImageReference>& imageRef : renderArgs.usedImageReferences)
{
co_await imageRef->c_present();
}
// tick deleters
// TODO: what if there are multiple render loops?
if (mAdvanceDeleteQueue)
{
getOwner()->getOwner()->tickDeleteQueue();
}
mFrameIdx = (mFrameIdx + 1) % mAlternating.size();
}
co_return;
}
} // namespace iwa

676
source/util/shader_meta.cpp Normal file
View File

@ -0,0 +1,676 @@
#include "iwa/util/shader_meta.hpp"
#include "iwa/log.hpp"
#include "kazan/resource/mesh.hpp"
#include "iwa/util/glsl_compiler.hpp"
#include "iwa/util/vkutil.hpp"
namespace
{
template<typename T>
inline std::size_t calcCrcSizeAppend(T, std::size_t) noexcept
{
MIJIN_TRAP(); // TODO
return 0;
}
}
namespace iwa
{
namespace
{
vk::ShaderStageFlags typeBitsToVkStages(ShaderTypeBits bits)
{
vk::ShaderStageFlags flags = {};
if (bits.compute)
{
flags |= vk::ShaderStageFlagBits::eCompute;
}
if (bits.vertex)
{
flags |= vk::ShaderStageFlagBits::eVertex;
}
if (bits.fragment)
{
flags |= vk::ShaderStageFlagBits::eFragment;
}
if (bits.rayGeneration)
{
flags |= vk::ShaderStageFlagBits::eRaygenKHR;
}
if (bits.rayClosestHit)
{
flags |= vk::ShaderStageFlagBits::eClosestHitKHR;
}
if (bits.rayAnyHit)
{
flags |= vk::ShaderStageFlagBits::eAnyHitKHR;
}
if (bits.rayMiss)
{
flags |= vk::ShaderStageFlagBits::eMissKHR;
}
if (bits.rayIntersection)
{
flags |= vk::ShaderStageFlagBits::eIntersectionKHR;
}
if (bits.callable)
{
flags |= vk::ShaderStageFlagBits::eCallableKHR;
}
return flags;
}
void addShaderAttribute(std::vector<ShaderAttribute>& attributes, ShaderAttribute&& attribute)
{
bool doInsert = true;
for (const ShaderAttribute& myAttribute: attributes)
{
if (myAttribute.stage == attribute.stage && myAttribute.location == attribute.location && myAttribute.location != UNSPECIFIED_INDEX)
{
// same location, type must be the same
if (myAttribute.type != attribute.type)
{
logAndDie(
"Attempting to merge incompatible shader metas, attributes {} and {} are incompatible. {} != {}",
myAttribute.name, attribute.name, myAttribute.type, attribute.type);
}
doInsert = false; // member already exists, don't insert
continue;
}
}
if (!doInsert)
{
return;
}
auto it = attributes.begin();
for (; it != attributes.end(); ++it)
{
if (static_cast<unsigned>(it->stage) > static_cast<unsigned>(attribute.stage)
|| (it->stage == attribute.stage && it->location > attribute.location))
{
break; // insert here
}
}
attributes.insert(it, std::move(attribute));
}
}
ShaderVariableStructType::ShaderVariableStructType() {} // NOLINT(modernize-use-equals-default)
ShaderVariableStructType::~ShaderVariableStructType() {} // NOLINT(modernize-use-equals-default)
void ShaderMeta::extendPushConstant(ShaderPushConstantBlock pushConstantBlock_, ShaderTypeBits stages)
{
if (pushConstantBlock_.type.baseType == ShaderVariableBaseType::NONE) {
return;
}
if (pushConstantBlock.type.baseType == ShaderVariableBaseType::NONE)
{
pushConstantBlock = std::move(pushConstantBlock_);
pushConstantStages = stages;
return;
}
// now comes the actual merging
assert(pushConstantBlock.type.baseType == ShaderVariableBaseType::STRUCT);
assert(pushConstantBlock_.type.baseType == ShaderVariableBaseType::STRUCT);
assert(stages);
for (ShaderVariableStructMember& member : pushConstantBlock_.type.struct_.members)
{
bool doInsert = true;
for (const ShaderVariableStructMember& myMember : pushConstantBlock.type.struct_.members)
{
if (myMember.offset == member.offset)
{
// same offset, type must be the same
if (myMember.type != member.type)
{
logAndDie("Attempting to merge incompatible push constant blocks, members {} and {} are incompatible. {} != {}",
myMember.name, member.name, myMember.type, member.type);
}
doInsert = false; // member already exists, don't insert
continue;
}
// otherwise check for overlaps
if ((myMember.offset < member.offset && myMember.offset + calcShaderTypeSize(myMember.type) > member.offset)
|| (myMember.offset > member.offset && myMember.offset < member.offset + calcShaderTypeSize(member.type)))
{
logAndDie("Attempting to merge incompatible push constant blocks, members {} and {} are overlapping.",
myMember.name, member.name);
}
}
if (!doInsert) {
continue;
}
auto it = pushConstantBlock.type.struct_.members.begin();
for (; it != pushConstantBlock.type.struct_.members.end(); ++it)
{
if (it->offset > member.offset) {
break; // insert here
}
}
pushConstantBlock.type.struct_.members.insert(it, std::move(member));
}
pushConstantStages |= stages;
}
void ShaderMeta::addInputAttribute(ShaderAttribute attribute)
{
addShaderAttribute(inputAttributes, std::move(attribute));
}
void ShaderMeta::addOutputAttribute(ShaderAttribute attribute)
{
addShaderAttribute(outputAttributes, std::move(attribute));
}
ObjectPtr<DescriptorSetLayout> DescriptorSetMeta::createDescriptorSetLayout(Device& device) const
{
assert(bindings.size() == bindingFlags.size());
return device.createChild<DescriptorSetLayout>(DescriptorSetLayoutCreationArgs{
.bindings = bindings,
.bindingFlags = bindingFlags,
.flags = flags,
});
}
std::vector<ObjectPtr<DescriptorSet>> PipelineAndDescriptorSetLayouts::createDescriptorSets(DescriptorPool& pool) const
{
std::vector<ObjectPtr<DescriptorSet>> result;
result.reserve(descriptorSetLayouts.size());
for (const ObjectPtr<DescriptorSetLayout>& layout : descriptorSetLayouts)
{
result.push_back(pool.allocateDescriptorSet({
.layout = layout
}));
}
return result;
}
ObjectPtr<DescriptorSet> PipelineAndDescriptorSetLayouts::createDescriptorSet(DescriptorPool& pool, unsigned setIdx) const
{
MIJIN_ASSERT(setIdx < descriptorSetLayouts.size(), "Invalid set index.");
return pool.allocateDescriptorSet({
.layout = descriptorSetLayouts[setIdx]
});
}
PipelineAndDescriptorSetLayouts PipelineLayoutMeta::createPipelineLayout(Device& device) const
{
std::vector<ObjectPtr<DescriptorSetLayout>> descSetLayouts;
descSetLayouts.reserve(descriptorSets.size());
for (const DescriptorSetMeta& dslMeta : descriptorSets)
{
descSetLayouts.push_back(dslMeta.createDescriptorSetLayout(device));
}
std::vector<vk::PushConstantRange> pushConstantRanges;
if (pushConstantRange.stageFlags)
{
pushConstantRanges.push_back(pushConstantRange);
}
ObjectPtr<PipelineLayout> pipelineLayout = device.createChild<PipelineLayout>(PipelineLayoutCreationArgs{
.setLayouts = descSetLayouts,
.pushConstantRanges = std::move(pushConstantRanges)
});
return
{
.descriptorSetLayouts = std::move(descSetLayouts),
.pipelineLayout = std::move(pipelineLayout)
};
}
void ShaderVariable::verifyCompatible(const ShaderVariable& other) const
{
std::vector<std::string> errors;
if (other.binding != binding) {
errors.push_back(fmt::format("Variable bindings do not match: {} != {}.", binding, other.binding)); // NOLINT
}
if (other.descriptorType != descriptorType) {
errors.push_back(fmt::format("Descriptor types do not match: {} != {}.",
magic_enum::enum_name(descriptorType),
magic_enum::enum_name(other.descriptorType)));
}
if (other.name != name) {
logMsg("Warning: shader variable names do not match, variable will only be referrable to by one of them! ({} != {})",
name, other.name);
}
if (other.type != type) {
errors.push_back(fmt::format("Variable types do not match: {} != {}.", type, other.type));
}
if (errors.empty()) {
return;
}
logMsg("Error(s) verifying shader variable compatibility:");
for (const std::string& error : errors) {
logMsg(error);
}
std::abort();
}
std::size_t ShaderVariable::calcHash(std::size_t appendTo) const
{
(void) appendTo;
MIJIN_TRAP(); // TODO
return 0;
#if 0
std::size_t hash = appendTo;
hash = type.calcHash(hash);
hash = calcCrcSizeAppend(descriptorType, hash);
hash = calcCrcSizeAppend(binding, hash);
hash = calcCrcSizeAppend(name, hash);
return hash;
#endif
}
#if 0
ShaderSource ShaderSource::fromFile(std::string fileName, std::string name)
{
(void) fileName;
(void) name;
MIJIN_TRAP(); // TODO
return {};
std::string code = readFileText(fileName);
return {
.code = std::move(code),
.fileName = std::move(fileName),
#if !defined(KAZAN_RELEASE)
.name = std::move(name)
#endif
};
}
#endif
bool ShaderVariableSet::find(std::string_view varName, ShaderVariableFindResult& outResult) const noexcept
{
for (const ShaderVariable& var : variables)
{
if (var.name == varName)
{
outResult.setIndex = setIndex;
outResult.bindIndex = var.binding;
return true;
}
}
return false;
}
bool ShaderVariableSet::find(unsigned semantic, unsigned semanticIdx, ShaderVariableFindResult& outResult) const noexcept
{
for (const ShaderVariable& var : variables)
{
if (var.semantic == semantic && var.semanticIndex == semanticIdx)
{
outResult.setIndex = setIndex;
outResult.bindIndex = var.binding;
return true;
}
}
return false;
}
const ShaderVariable& ShaderVariableSet::getVariableAtBinding(unsigned bindingIdx) const
{
for (const ShaderVariable& var : variables)
{
if (var.binding == bindingIdx)
{
return var;
}
}
logAndDie("Could not find shader variable with binding {}!", bindingIdx);
}
const ShaderVariable* ShaderVariableSet::getVariableAtBindingOpt(unsigned bindingIdx) const
{
for (const ShaderVariable& var : variables)
{
if (var.binding == bindingIdx)
{
return &var;
}
}
return nullptr;
}
const ShaderVariable* ShaderVariableSet::getVariableAtSemanticOpt(unsigned semantic, unsigned semanticIdx) const
{
for (const ShaderVariable& var : variables)
{
if (var.semantic == semantic && var.semanticIndex == semanticIdx)
{
return &var;
}
}
return nullptr;
}
std::size_t ShaderVariableSet::calcHash(std::size_t appendTo) const
{
std::size_t hash = appendTo;
for (const ShaderVariable& var : variables) {
hash = var.calcHash(hash);
}
return hash;
}
void ShaderMeta::extend(ShaderMeta other)
{
for (ShaderVariableSet& set : other.interfaceVariableSets)
{
ShaderVariableSet& mySet = getOrCreateInterfaceVariableSet(set.setIndex);
mySet.usedInStages.bits |= set.usedInStages.bits;
for (ShaderVariable& variable : set.variables)
{
const ShaderVariable* myVariable = nullptr;
if (variable.binding != UNSPECIFIED_INDEX)
{
myVariable = mySet.getVariableAtBindingOpt(variable.binding);
}
else if (variable.semantic != UNSPECIFIED_INDEX)
{
myVariable = mySet.getVariableAtSemanticOpt(variable.semantic, variable.semanticIndex);
}
if (myVariable)
{
myVariable->verifyCompatible(variable);
continue;
}
mySet.variables.push_back(std::move(variable));
}
}
for (ShaderAttribute& attribute : other.inputAttributes)
{
addInputAttribute(std::move(attribute));
}
for (ShaderAttribute& attribute : other.outputAttributes)
{
addOutputAttribute(std::move(attribute));
}
extendPushConstant(other.pushConstantBlock, other.pushConstantStages);
stages |= other.stages;
if (localSizeX == 0 && localSizeY == 0 && localSizeZ == 0)
{
localSizeX = other.localSizeX;
localSizeY = other.localSizeY;
localSizeZ = other.localSizeZ;
}
else if ((other.localSizeX != 0 || other.localSizeY != 0 || other.localSizeZ != 0) &&
(localSizeX != other.localSizeX || localSizeY != other.localSizeY || localSizeZ != other.localSizeZ))
{
logAndDie("Error merging shader metas, conflicting local size!");
}
hash = 0;
}
bool ShaderMeta::findInterfaceVariable(std::string_view varName, ShaderVariableFindResult& outResult) const noexcept
{
for (const ShaderVariableSet& set : interfaceVariableSets)
{
if (set.find(varName, outResult)) {
return true;
}
}
return false;
}
bool ShaderMeta::findInterfaceVariable(unsigned semantic, unsigned semanticIdx, ShaderVariableFindResult& outResult) const noexcept
{
for (const ShaderVariableSet& set : interfaceVariableSets)
{
if (set.find(semantic, semanticIdx, outResult)) {
return true;
}
}
return false;
}
const ShaderVariableSet& ShaderMeta::getInterfaceVariableSet(unsigned setIdx) const
{
const ShaderVariableSet* variableSet = getInterfaceVariableSetOpt(setIdx);
MIJIN_ASSERT(variableSet != nullptr, "Could not find interface variable set.");
return *variableSet;
}
const ShaderVariableSet* ShaderMeta::getInterfaceVariableSetOpt(unsigned setIdx) const
{
for (const ShaderVariableSet& set : interfaceVariableSets)
{
if (set.setIndex == setIdx) {
return &set;
}
}
return nullptr;
}
const ShaderVariableType& ShaderMeta::getInterfaceVariableType(unsigned setIdx, unsigned bindingIdx) const
{
return getInterfaceVariableSet(setIdx).getVariableAtBinding(bindingIdx).type;
}
VertexInput ShaderMeta::generateVertexInput(const NamedVertexInput& namedInput) const noexcept
{
VertexInput result{
.bindings = namedInput.bindings
};
for (const ShaderAttribute& attribute : inputAttributes)
{
if (attribute.stage != vk::ShaderStageFlagBits::eVertex) {
continue;
}
MIJIN_ASSERT_FATAL(attribute.type.baseType == ShaderVariableBaseType::SIMPLE, "Vertex shader input must be a simple type.");
auto itAttribute = namedInput.attributes.find(attribute.name);
MIJIN_ASSERT_FATAL(itAttribute != namedInput.attributes.end(), "Missing attribute in input.");
result.attributes.push_back(vk::VertexInputAttributeDescription{
.location = attribute.location,
.binding = itAttribute->second.binding,
.format = attribute.type.simple.format,
.offset = itAttribute->second.offset
});
}
return result;
}
VertexInput ShaderMeta::generateVertexInputFromLayout(const VertexLayout& layout) const noexcept
{
VertexInput result{
.bindings = {
vk::VertexInputBindingDescription{
.binding = 0,
.stride = layout.stride,
.inputRate = vk::VertexInputRate::eVertex
}
}
};
for (const ShaderAttribute& attribute : inputAttributes)
{
if (attribute.stage != vk::ShaderStageFlagBits::eVertex) {
continue;
}
if (attribute.semantic == UNSPECIFIED_INDEX) {
continue;
}
MIJIN_ASSERT_FATAL(attribute.type.baseType == ShaderVariableBaseType::SIMPLE, "Vertex shader input must be a simple type.");
auto itAttribute = std::ranges::find_if(layout.attributes, [&attribute](const VertexAttribute& attrib) {
return static_cast<unsigned>(attrib.semantic) == attribute.semantic && attrib.semanticIdx == attribute.semanticIndex;
});
MIJIN_ASSERT_FATAL(itAttribute != layout.attributes.end(), "Missing attribute in vertex layout.");
result.attributes.push_back(vk::VertexInputAttributeDescription{
.location = attribute.location,
.binding = 0,
.format = attribute.type.simple.format,
.offset = itAttribute->offset
});
}
return result;
}
DescriptorSetMeta ShaderMeta::generateDescriptorSetLayout(const ShaderVariableSet& set, const GenerateDescriptorSetLayoutArgs& args) const
{
DescriptorSetMeta setInfo{
.flags = args.flags
};
for (const ShaderVariable& var : set.variables)
{
auto itVar = std::ranges::find_if(setInfo.bindings, [&](const vk::DescriptorSetLayoutBinding& binding) {
return binding.binding == var.binding;
});
assert(itVar == setInfo.bindings.end()); // should have been merged!
if (itVar != setInfo.bindings.end())
{
itVar->stageFlags |= typeBitsToVkStages(set.usedInStages);
continue; // TODO: verify the bindings are compatible
}
vk::DescriptorSetLayoutBinding& binding = setInfo.bindings.emplace_back();
vk::DescriptorBindingFlags& flags = setInfo.bindingFlags.emplace_back();
binding.binding = var.binding;
binding.descriptorType = var.descriptorType;
binding.descriptorCount = 1;
binding.stageFlags = typeBitsToVkStages(set.usedInStages);
// support for dynamically sized descriptors
auto itCounts = args.descriptorCounts.find(var.binding);
if (itCounts != args.descriptorCounts.end() && itCounts->second > 0)
{
binding.descriptorCount = itCounts->second;
flags |= vk::DescriptorBindingFlagBits::ePartiallyBound;
}
if (setInfo.descriptorTypes.size() <= var.binding) {
setInfo.descriptorTypes.resize(var.binding + 1);
}
setInfo.descriptorTypes[var.binding] = var.descriptorType;
}
return setInfo;
}
PipelineLayoutMeta ShaderMeta::generatePipelineLayout(const GeneratePipelineLayoutArgs& args) const
{
static const std::vector<std::uint32_t> NO_DESCRIPTOR_COUNTS = {};
static const GenerateDescriptorSetLayoutArgs NO_DESCRIPTOR_SET_ARGS = {};
PipelineLayoutMeta result;
for (const ShaderVariableSet& set : interfaceVariableSets)
{
if (set.setIndex >= result.descriptorSets.size()) {
result.descriptorSets.resize(set.setIndex + 1);
}
auto itSet = args.descriptorSets.find(set.setIndex);
const GenerateDescriptorSetLayoutArgs setArgs =
itSet != args.descriptorSets.end()
? itSet->second
: NO_DESCRIPTOR_SET_ARGS;
result.descriptorSets[set.setIndex] = generateDescriptorSetLayout(set, setArgs);
}
if (pushConstantBlock.type.baseType != ShaderVariableBaseType::NONE)
{
assert(pushConstantStages);
result.pushConstantRange.stageFlags = typeBitsToVkStages(pushConstantStages);
result.pushConstantRange.size = pushConstantBlock.offset + calcShaderTypeSize(pushConstantBlock.type);
}
return result;
}
bool ShaderMeta::empty() const
{
static_assert(ShaderMeta::STRUCT_VERSION == 1, "Update me");
return interfaceVariableSets.empty()
&& inputAttributes.empty()
&& outputAttributes.empty()
&& pushConstantStages == ShaderTypeBits()
&& pushConstantBlock.type.baseType == ShaderVariableBaseType::NONE
&& localSizeX == 0
&& localSizeY == 0
&& localSizeZ == 0;
}
std::size_t ShaderMeta::getHash() const
{
if (hash == 0)
{
hash = 1; // TODO
MIJIN_TRAP();
#if 0
for (const ShaderVariableSet& variableSet : interfaceVariableSets) {
hash = variableSet.calcHash(hash);
}
hash = calcCrcSizeAppend(pushConstantStages.bits, hash);
hash = pushConstantBlock.type.calcHash(hash);
hash = calcCrcSizeAppend(pushConstantBlock.offset, hash);
hash = calcCrcSizeAppend(localSizeX, hash);
hash = calcCrcSizeAppend(localSizeY, hash);
hash = calcCrcSizeAppend(localSizeZ, hash);
#endif
}
return hash;
}
unsigned calcShaderTypeSize(const ShaderVariableType& type, bool ignoreArraySize) noexcept
{
unsigned size = 0;
switch (type.baseType)
{
case ShaderVariableBaseType::SIMPLE:
size = vkFormatSize(type.simple.format);
break;
case ShaderVariableBaseType::MATRIX:
switch (type.matrixType)
{
case ShaderVariableMatrixType::MAT2:
size = 16;
break;
case ShaderVariableMatrixType::MAT3:
size = 36;
break;
case ShaderVariableMatrixType::MAT4:
size = 64;
break;
default:
logAndDie("Lol, what's this?");
}
break;
case ShaderVariableBaseType::STRUCT:
assert(!type.struct_.members.empty());
size = static_cast<unsigned>(type.struct_.members.back().offset + calcShaderTypeSize(type.struct_.members.back().type));
break;
default:
logAndDie("How would I know?");
}
if (!ignoreArraySize) {
size *= type.arraySize;
}
return size;
}
} // namespace iwa

View File

@ -0,0 +1,402 @@
#include "iwa/util/texture_atlas.hpp"
#include <bit>
#include "iwa/device.hpp"
#include "iwa/instance.hpp"
#include "iwa/resource/bitmap.hpp"
namespace iwa
{
TextureSlot::TextureSlot(ObjectPtr<TextureAtlas> owner, const TextureSlotCreationArgs& args)
: super_t(std::move(owner)), mUsedSpace(args.usedSpace), mLayer(args.layer), mUvOffset(args.uvOffset), mUvScale(args.uvScale)
{
}
TextureAtlas::TextureAtlas(ObjectPtr<> owner, const TextureAtlasCreationArgs& args)
: super_t(std::move(owner)), mLayerSize(args.layerSize)
{
// start with a single layer with one free space that takes up the entire layer
mLayers.push_back({
.freeSpaces = {
vk::Rect2D{
.offset = { .x = 0, .y = 0 },
.extent = args.layerSize
}
}
});
}
ObjectPtr<TextureSlot> TextureAtlas::allocateSlot(vk::Extent2D slotSize)
{
// only uses multiples of 2
// TODO: check if this actually improves the results
const vk::Extent2D size = {
.width = std::bit_ceil(slotSize.width),
.height = std::bit_ceil(slotSize.height)
};
// check if it can even fit
if (size.width > mLayerSize.width || size.height > mLayerSize.height) {
throw std::runtime_error("Cannot allocate texture slot, size too big.");
}
// find the best fit (minimize product of "wasted" space)
unsigned lowestWasteSum = std::numeric_limits<unsigned>::max();
unsigned lowestWasteProduct = std::numeric_limits<unsigned>::max();
std::vector<TextureAtlasLayer>::iterator foundLayer = mLayers.end();
std::vector<vk::Rect2D>::iterator foundSpace;
for (auto itLayer = mLayers.begin(); itLayer != mLayers.end(); ++itLayer)
{
for (auto itSpace = itLayer->freeSpaces.begin(); itSpace != itLayer->freeSpaces.end(); ++itSpace)
{
if (itSpace->extent.width < size.width || itSpace->extent.height < size.height) {
continue;
}
const unsigned wasteWidth = itSpace->extent.width - size.width;
const unsigned wasteHeight = itSpace->extent.height - size.height;
const unsigned wasteProduct = wasteWidth * wasteHeight;
if (wasteProduct <= lowestWasteProduct)
{
const unsigned wasteSum = wasteWidth + wasteHeight;
if (wasteProduct < lowestWasteProduct || wasteSum < lowestWasteSum)
{
lowestWasteSum = wasteSum;
lowestWasteProduct = wasteProduct;
foundLayer = itLayer;
foundSpace = itSpace;
}
}
} // for (itLayer->freeSpaces)
} // for (mLayers)
// if no space was found, make space
if (foundLayer == mLayers.end())
{
mLayers.resize(mLayers.size() + 1);
mLayers.back().freeSpaces.push_back({
.offset = { .x = 0, .y = 0},
.extent = mLayerSize
});
foundLayer = std::prev(mLayers.end());
foundSpace = foundLayer->freeSpaces.begin();
}
// save in case the iterator gets invalidated
const vk::Rect2D space = *foundSpace;
// remove it
foundLayer->freeSpaces.erase(foundSpace);
// now split the space, if necessary
const bool splitX = space.extent.width > size.width;
const bool splitY = space.extent.height > size.height;
if (splitX)
{
foundLayer->freeSpaces.push_back({
.offset = {
.x = static_cast<std::int32_t>(space.offset.x + size.width),
.y = space.offset.y
},
.extent = {
.width = space.extent.width - size.width,
.height = size.height
}
});
}
if (splitY)
{
foundLayer->freeSpaces.push_back({
.offset = {
.x = space.offset.x,
.y = static_cast<std::int32_t>(space.offset.y + size.height)
},
.extent = {
.width = size.width,
.height = space.extent.height - size.height
}
});
}
if (splitX && splitY)
{
foundLayer->freeSpaces.push_back({
.offset = {
.x = static_cast<std::int32_t>(space.offset.x + size.width),
.y = static_cast<std::int32_t>(space.offset.y + size.height)
},
.extent = {
.width = space.extent.width - size.width,
.height = space.extent.height - size.height
}
});
}
// return the result
return createChild<TextureSlot>(TextureSlotCreationArgs{
.usedSpace = {
.offset = space.offset,
.extent = slotSize
},
.layer = static_cast<unsigned>(std::distance(mLayers.begin(), foundLayer)),
.uvOffset = {
static_cast<float>(space.offset.x) / static_cast<float>(mLayerSize.width),
static_cast<float>(space.offset.y) / static_cast<float>(mLayerSize.height)
},
.uvScale = {
static_cast<float>(slotSize.width) / static_cast<float>(mLayerSize.width),
static_cast<float>(slotSize.height) / static_cast<float>(mLayerSize.height)
}
});
}
AtlasedImage::AtlasedImage(ObjectPtr<Device> owner, const AtlasedImageCreationArgs& args)
: super_t(std::move(owner)), mAtlas(TextureAtlas::create(TextureAtlasCreationArgs{.layerSize = args.size})),
mFormat(args.format), mMipLevels(args.mipLevels), mUsage(args.usage | vk::ImageUsageFlagBits::eTransferSrc | vk::ImageUsageFlagBits::eTransferDst)
{
mImage = allocateImage(args.initialLayers);
mImageView = mImage->createImageView({
.viewType = vk::ImageViewType::e2DArray
});
}
mijin::Task<ObjectPtr<TextureSlot>> AtlasedImage::c_allocateSlot(vk::Extent2D slotSize)
{
IWA_CORO_ENSURE_MAIN_THREAD(*getOwner()->getOwner());
ObjectPtr<TextureSlot> slot = mAtlas->allocateSlot(slotSize);
if (slot->getLayer() >= mImage->getArrayLayers())
{
const mijin::TaskMutexLock lock = co_await mImageMutex.c_lock();
// image is too small, resize it
// this includes a complete copy of the existing image
ObjectPtr<Image> newImage = allocateImage(slot->getLayer() + 1);
ObjectPtr<CommandBuffer> cmdBufferPtr = getOwner()->beginScratchCommandBuffer();
vk::CommandBuffer cmdBuffer = *cmdBufferPtr;
mImage->applyTransition(cmdBuffer, IMAGE_TRANSITION_TRANSFER_READ);
newImage->applyTransition(cmdBuffer, IMAGE_TRANSITION_TRANSFER_WRITE);
// copy ALL the mip levels
std::vector<vk::ImageCopy> regions;
regions.reserve(mImage->getMipLevels());
for (unsigned level = 0; level < mImage->getMipLevels(); ++level)
{
const vk::ImageSubresourceLayers copySubresource{
.aspectMask = vk::ImageAspectFlagBits::eColor,
.mipLevel = level,
.baseArrayLayer = 0,
.layerCount = mImage->getArrayLayers()
};
regions.push_back({
.srcSubresource = copySubresource,
.srcOffset = {.x = 0, .y = 0, .z = 0},
.dstSubresource = copySubresource,
.dstOffset = {.x = 0, .y = 0, .z = 0},
.extent = {
.width = mAtlas->getLayerSize().width,
.height = mAtlas->getLayerSize().height,
.depth = 1
}
});
}
cmdBuffer.copyImage(
/* srcImage = */ *mImage,
/* srcImageLayout = */ vk::ImageLayout::eTransferSrcOptimal,
/* dstImage = */ *newImage,
/* dstImageLayout = */ vk::ImageLayout::eTransferDstOptimal,
/* regions = */ regions
);
co_await getOwner()->endScratchCommandBuffer(cmdBufferPtr);
mImage = std::move(newImage);
mImageView = mImage->createImageView({
.viewType = vk::ImageViewType::e2DArray
});
imageRecreated.emit();
}
co_return slot;
}
mijin::Task<> AtlasedImage::c_upload(const TextureSlot& slot, const Bitmap& bitmap) const noexcept
{
IWA_CORO_ENSURE_MAIN_THREAD(*getOwner()->getOwner());
MIJIN_ASSERT(slot.getUsedSpace().extent.width >= bitmap.getSize().width
&& slot.getUsedSpace().extent.height >= bitmap.getSize().height, "Can't upload image, invalid size.");
const mijin::TaskMutexLock lock = co_await mImageMutex.c_lock();
co_await mImage->c_upload(
/* bitmap = */ bitmap,
/* imageOffset = */ {
.x = slot.getUsedSpace().offset.x,
.y = slot.getUsedSpace().offset.y,
.z = 0
},
/* baseLayer = */ slot.getLayer()
);
}
mijin::Task<> AtlasedImage::c_upload(const TextureSlot& slot, const void* data, std::size_t bytes, const vk::Extent2D& bufferImageSize) const noexcept
{
IWA_CORO_ENSURE_MAIN_THREAD(*getOwner()->getOwner());
MIJIN_ASSERT(slot.getUsedSpace().extent.width >= bufferImageSize.width
&& slot.getUsedSpace().extent.height >= bufferImageSize.height, "Can't upload image, invalid size.");
const mijin::TaskMutexLock lock = co_await mImageMutex.c_lock();
co_await mImage->c_upload(
/* data = */ data,
/* bytes = */ bytes,
/* bufferImageSize = */ {
.width = bufferImageSize.width,
.height = bufferImageSize.height,
.depth = 1
},
/* imageOffset = */ {
.x = slot.getUsedSpace().offset.x,
.y = slot.getUsedSpace().offset.y,
.z = 0
},
/* baseLayer = */ slot.getLayer()
);
}
mijin::Task<> AtlasedImage::c_blit(const TextureSlot& slot, Image& srcImage) const noexcept
{
IWA_CORO_ENSURE_MAIN_THREAD(*getOwner()->getOwner());
MIJIN_ASSERT(slot.getUsedSpace().extent.width >= srcImage.getSize().width
&& slot.getUsedSpace().extent.height >= srcImage.getSize().height
&& srcImage.getSize().depth == 1, "Can't upload image, invalid size.");
const mijin::TaskMutexLock lock = co_await mImageMutex.c_lock();
co_await mImage->c_blitFrom(
/* srcImage = */ srcImage,
/* regions = */ {
vk::ImageBlit{
.srcSubresource = DEFAULT_SUBRESOURCE_LAYERS,
.srcOffsets = std::array{
vk::Offset3D{
.x = 0, .y = 0, .z = 0
},
vk::Offset3D{
.x = static_cast<std::int32_t>(srcImage.getSize().width),
.y = static_cast<std::int32_t>(srcImage.getSize().height),
.z = 1
}
},
.dstSubresource = vk::ImageSubresourceLayers{
.aspectMask = vk::ImageAspectFlagBits::eColor,
.mipLevel = 0,
.baseArrayLayer = slot.getLayer(),
.layerCount = 1
},
.dstOffsets = std::array{
vk::Offset3D{
.x = slot.getUsedSpace().offset.x,
.y = slot.getUsedSpace().offset.y,
.z = 0
},
vk::Offset3D{
.x = slot.getUsedSpace().offset.x + static_cast<std::int32_t>(slot.getUsedSpace().extent.width),
.y = slot.getUsedSpace().offset.y + static_cast<std::int32_t>(slot.getUsedSpace().extent.height),
.z = 1
}
}
}
}
);
}
mijin::Task<> AtlasedImage::c_blit(const TextureSlot& slot, const Bitmap& bitmap) const noexcept
{
IWA_CORO_ENSURE_MAIN_THREAD(*getOwner()->getOwner());
MIJIN_ASSERT(slot.getUsedSpace().extent.width >= bitmap.getSize().width
&& slot.getUsedSpace().extent.height >= bitmap.getSize().height, "Can't upload image, invalid size.");
const mijin::TaskMutexLock lock = co_await mImageMutex.c_lock();
co_await mImage->c_blitFrom(
/* bitmap = */ bitmap,
/* regions = */ {
vk::ImageBlit{
.srcSubresource = DEFAULT_SUBRESOURCE_LAYERS,
.srcOffsets = std::array{
vk::Offset3D{
.x = 0, .y = 0, .z = 0
},
vk::Offset3D{
.x = static_cast<std::int32_t>(bitmap.getSize().width),
.y = static_cast<std::int32_t>(bitmap.getSize().height),
.z = 1
}
},
.dstSubresource = vk::ImageSubresourceLayers{
.aspectMask = vk::ImageAspectFlagBits::eColor,
.mipLevel = 0,
.baseArrayLayer = slot.getLayer(),
.layerCount = 1
},
.dstOffsets = std::array{
vk::Offset3D{
.x = slot.getUsedSpace().offset.x,
.y = slot.getUsedSpace().offset.y,
.z = 0
},
vk::Offset3D{
.x = slot.getUsedSpace().offset.x + static_cast<std::int32_t>(slot.getUsedSpace().extent.width),
.y = slot.getUsedSpace().offset.y + static_cast<std::int32_t>(slot.getUsedSpace().extent.height),
.z = 1
}
}
}
}
);
}
mijin::Task<> AtlasedImage::c_copy(const TextureSlot& slot, Image& srcImage) const noexcept
{
IWA_CORO_ENSURE_MAIN_THREAD(*getOwner()->getOwner());
MIJIN_ASSERT(slot.getUsedSpace().extent.width >= srcImage.getSize().width
&& slot.getUsedSpace().extent.height >= srcImage.getSize().height
&& srcImage.getSize().depth == 1, "Can't upload image, invalid size.");
const mijin::TaskMutexLock lock = co_await mImageMutex.c_lock();
co_await mImage->c_copyFrom(
/* srcImage = */ srcImage,
/* regions = */ {
vk::ImageCopy{
.srcSubresource = DEFAULT_SUBRESOURCE_LAYERS,
.srcOffset = {
.x = 0, .y = 0, .z = 0
},
.dstSubresource = vk::ImageSubresourceLayers{
.aspectMask = vk::ImageAspectFlagBits::eColor,
.mipLevel = 0,
.baseArrayLayer = slot.getLayer(),
.layerCount = 1
},
.dstOffset = {
.x = slot.getUsedSpace().offset.x,
.y = slot.getUsedSpace().offset.y,
.z = 0
},
.extent = srcImage.getSize()
}
}
);
}
ObjectPtr<Image> AtlasedImage::allocateImage(unsigned layers)
{
ObjectPtr<Image> image = getOwner()->createChild<Image>(ImageCreationArgs{
.format = mFormat,
.extent = {
.width = mAtlas->getLayerSize().width,
.height = mAtlas->getLayerSize().height,
.depth = 1
},
.mipLevels = mMipLevels,
.arrayLayers = layers,
.usage = mUsage
});
image->allocateMemory();
return image;
}
} // namespace iwa

View File

@ -0,0 +1,29 @@
#include "iwa/util/vertex_layout.hpp"
namespace iwa
{
mijin::Optional<VertexAttribute&> VertexLayout::findAttribute(VertexAttributeSemantic semantic, unsigned semanticIdx) noexcept
{
for (VertexAttribute& attribute : attributes)
{
if (attribute.semantic == semantic && attribute.semanticIdx == semanticIdx)
{
return attribute;
}
}
return mijin::NULL_OPTIONAL;
}
mijin::Optional<const VertexAttribute&> VertexLayout::findAttribute(VertexAttributeSemantic semantic, unsigned semanticIdx) const noexcept
{
for (const VertexAttribute& attribute : attributes)
{
if (attribute.semantic == semantic && attribute.semanticIdx == semanticIdx)
{
return attribute;
}
}
return mijin::NULL_OPTIONAL;
}
} // namespace iwa

275
source/util/vkutil.cpp Normal file
View File

@ -0,0 +1,275 @@
#include "iwa/util/vkutil.hpp"
#include "iwa/device.hpp"
#include "iwa/log.hpp"
namespace iwa
{
unsigned vkFormatSize(vk::Format format) noexcept
{
switch (format)
{
// 8 bit integer
case vk::Format::eR8Uint:
case vk::Format::eR8Sint:
case vk::Format::eR8Unorm:
case vk::Format::eR8Srgb:
return 1;
case vk::Format::eR8G8Uint:
case vk::Format::eR8G8Sint:
case vk::Format::eR8G8Unorm:
case vk::Format::eR8G8Srgb:
return 2;
case vk::Format::eR8G8B8Uint:
case vk::Format::eR8G8B8Sint:
case vk::Format::eR8G8B8Unorm:
case vk::Format::eR8G8B8Srgb:
return 3;
case vk::Format::eR8G8B8A8Uint:
case vk::Format::eR8G8B8A8Sint:
case vk::Format::eR8G8B8A8Unorm:
case vk::Format::eR8G8B8A8Srgb:
return 4;
// 16 bit integer
case vk::Format::eR16Uint:
case vk::Format::eR16Sint:
case vk::Format::eR16Unorm:
return 2;
case vk::Format::eR16G16Uint:
case vk::Format::eR16G16Sint:
case vk::Format::eR16G16Unorm:
return 4;
case vk::Format::eR16G16B16Uint:
case vk::Format::eR16G16B16Sint:
case vk::Format::eR16G16B16Unorm:
return 6;
case vk::Format::eR16G16B16A16Uint:
case vk::Format::eR16G16B16A16Sint:
case vk::Format::eR16G16B16A16Unorm:
return 8;
// 32 bit integer
case vk::Format::eR32Uint:
case vk::Format::eR32Sint:
return 4;
case vk::Format::eR32G32Uint:
case vk::Format::eR32G32Sint:
return 8;
case vk::Format::eR32G32B32Uint:
case vk::Format::eR32G32B32Sint:
return 12;
case vk::Format::eR32G32B32A32Uint:
case vk::Format::eR32G32B32A32Sint:
return 16;
// 64 bit integer
case vk::Format::eR64Uint:
case vk::Format::eR64Sint:
return 8;
case vk::Format::eR64G64Uint:
case vk::Format::eR64G64Sint:
return 16;
case vk::Format::eR64G64B64Uint:
case vk::Format::eR64G64B64Sint:
return 24;
case vk::Format::eR64G64B64A64Uint:
case vk::Format::eR64G64B64A64Sint:
return 32;
// 16 bit float
case vk::Format::eR16Sfloat:
return 2;
case vk::Format::eR16G16Sfloat:
return 4;
case vk::Format::eR16G16B16Sfloat:
return 6;
case vk::Format::eR16G16B16A16Sfloat:
return 8;
// 32 bit float
case vk::Format::eR32Sfloat:
return 4;
case vk::Format::eR32G32Sfloat:
return 8;
case vk::Format::eR32G32B32Sfloat:
return 12;
case vk::Format::eR32G32B32A32Sfloat:
return 16;
// 64 bit float
case vk::Format::eR64Sfloat:
return 8;
case vk::Format::eR64G64Sfloat:
return 16;
case vk::Format::eR64G64B64Sfloat:
return 24;
case vk::Format::eR64G64B64A64Sfloat:
return 32;
default:
logAndDie("I've never seen this format :(");
}
}
unsigned vkIndexTypeSize(vk::IndexType indexType) noexcept
{
switch (indexType)
{
case vk::IndexType::eNoneKHR:
return 0;
case vk::IndexType::eUint8EXT:
return 1;
case vk::IndexType::eUint16:
return 2;
case vk::IndexType::eUint32:
return 4;
default:
logAndDie("What is this sorcery?");
}
}
bool isDepthFormat(vk::Format format) noexcept
{
for (const vk::Format depthFormat : DEPTH_FORMATS) {
if (format == depthFormat) {
return true;
}
}
return false;
}
bool isStencilFormat(vk::Format format) noexcept
{
for (const vk::Format stencilFormat : STENCIL_FORMATS) {
if (format == stencilFormat) {
return true;
}
}
return false;
}
#if 0
std::string formatVkVariant(const VkVariantMWN& variant)
{
switch (variant.type)
{
case VK_VARIANT_TYPE_UNKNOWN_MWN:
return "???";
case VK_VARIANT_TYPE_NONE_MWN:
return "<none>";
case VK_VARIANT_TYPE_BOOL_MWN:
return variant.uintValue ? "true" : "false";
case VK_VARIANT_TYPE_UINT8_MWN:
case VK_VARIANT_TYPE_UINT16_MWN:
case VK_VARIANT_TYPE_UINT32_MWN:
case VK_VARIANT_TYPE_UINT64_MWN:
return std::to_string(variant.uintValue);
case VK_VARIANT_TYPE_INT8_MWN:
case VK_VARIANT_TYPE_INT16_MWN:
case VK_VARIANT_TYPE_INT32_MWN:
case VK_VARIANT_TYPE_INT64_MWN:
return std::to_string(variant.intValue);
case VK_VARIANT_TYPE_FLOAT_MWN:
case VK_VARIANT_TYPE_DOUBLE_MWN:
return std::to_string(variant.doubleValue);
case VK_VARIANT_TYPE_STRING_MWN:
return fmt::format("\"{}\"", variant.stringValue);
case VK_VARIANT_TYPE_VOID_POINTER_MWN:
return fmt::format("{}", fmt::ptr(variant.voidPointerValue)); // TODO: this doesnt make sense, store the original pointer!
case VK_VARIANT_TYPE_POINTER_MWN:
return fmt::format("{}", fmt::ptr(variant.pointerValue)); // TODO: this doesnt make sense, store the original pointer!
case VK_VARIANT_TYPE_ARRAY_MWN:
return fmt::format("<array of {}>", variant.arrayValue.numElements);
case VK_VARIANT_TYPE_IN_STRUCTURE_MWN:
return "<in struct>";
case VK_VARIANT_TYPE_OUT_STRUCTURE_MWN:
return "<out struct>";
case VK_VARIANT_TYPE_OBJECT_MWN:
return "<handle>";
default:
assert(0);
return "???";
}
}
#endif
#if 0
std::size_t calcVkStructHash(const void* structure, std::size_t appendTo)
{
if (structure == nullptr) {
return appendTo;
}
const vk::BaseInStructure* inStruct = static_cast<const vk::BaseInStructure*>(structure);
std::size_t hash = appendTo;
switch (inStruct->sType)
{
case vk::StructureType::eDescriptorSetLayoutBindingFlagsCreateInfo: {
const auto& flagsInfo = *static_cast<const vk::DescriptorSetLayoutBindingFlagsCreateInfo*>(structure);
for (std::uint32_t bindingIdx = 0; bindingIdx < flagsInfo.bindingCount; ++bindingIdx) {
hash = calcCrcSizeAppend(flagsInfo.pBindingFlags[bindingIdx], hash);
}
}
break;
default:
assert(false); // missing struct here, bad
break;
}
return calcVkStructHash(inStruct->pNext, hash);
}
#endif
vk::SampleCountFlagBits samplesToVk(unsigned samples) noexcept
{
switch (samples)
{
case 1:
return vk::SampleCountFlagBits::e1;
case 2:
return vk::SampleCountFlagBits::e2;
case 4:
return vk::SampleCountFlagBits::e4;
case 8:
return vk::SampleCountFlagBits::e8;
case 16:
return vk::SampleCountFlagBits::e16;
case 32:
return vk::SampleCountFlagBits::e32;
case 64:
return vk::SampleCountFlagBits::e64;
default:
logAndDie("Invalid sample count: {}.", samples);
}
}
vk::Format detectDepthBufferFormat(Device& device, unsigned samples) noexcept
{
const vk::SampleCountFlagBits sampleCount = samplesToVk(samples);
for (const vk::Format depthFormat : DEPTH_FORMATS)
{
try
{
const vk::ImageFormatProperties props = device.getVkPhysicalDevice().getImageFormatProperties(depthFormat, vk::ImageType::e2D, vk::ImageTiling::eOptimal, vk::ImageUsageFlagBits::eDepthStencilAttachment);
if (props.sampleCounts & sampleCount) {
return depthFormat;
}
}
catch(vk::FormatNotSupportedError&)
{
continue; // not supported
}
}
return vk::Format::eUndefined;
}
std::vector<unsigned> detectSupportedSampleCounts(Device& device) noexcept
{
std::vector<unsigned> result = {1};
for (const unsigned samples : {2, 4, 8, 16, 32, 64})
{
if (detectDepthBufferFormat(device, samples) != vk::Format::eUndefined) {
result.push_back(samples);
}
}
return result;
}
} // namespace iwa

319
source/window.cpp Normal file
View File

@ -0,0 +1,319 @@
#include "iwa/window.hpp"
#include <SDL_vulkan.h>
#include "iwa/log.hpp"
#include "iwa/instance.hpp"
namespace iwa
{
inline constexpr const char* WINDOW_DATA_NAME = "iwa_window";
namespace
{
Window* gLastEventWindow = nullptr;
Window* getWindowFromEvent(Uint32 windowID) noexcept
{
SDL_Window* sdlWindow = SDL_GetWindowFromID(windowID);
Window* iwaWindow = static_cast<Window*>(SDL_GetWindowData(sdlWindow, WINDOW_DATA_NAME));
if (iwaWindow != nullptr)
{
gLastEventWindow = iwaWindow;
}
return gLastEventWindow;
}
void handleWindowEvent(const SDL_Event& event)
{
Window* iwaWindow = getWindowFromEvent(event.window.windowID);
if (iwaWindow == nullptr) {
return;
}
switch (event.window.event)
{
case SDL_WINDOWEVENT_FOCUS_GAINED:
iwaWindow->focusGained.emit();
break;
case SDL_WINDOWEVENT_FOCUS_LOST:
iwaWindow->focusLost.emit();
break;
case SDL_WINDOWEVENT_ENTER:
iwaWindow->mouseEntered.emit();
break;
case SDL_WINDOWEVENT_LEAVE:
iwaWindow->mouseLeft.emit();
break;
case SDL_WINDOWEVENT_CLOSE:
iwaWindow->closeRequested.emit();
break;
}
}
void handleKeyEvent(const SDL_Event& sdlEvent)
{
Window* iwaWindow = getWindowFromEvent(sdlEvent.key.windowID);
if (iwaWindow == nullptr) {
return;
}
const KeyEvent event = {
.keyCode = static_cast<KeyCode>(sdlEvent.key.keysym.sym),
.scanCode = static_cast<ScanCode>(sdlEvent.key.keysym.scancode),
.modifiers = {
.leftShift = (sdlEvent.key.keysym.mod & KMOD_LSHIFT) != 0,
.rightShift = (sdlEvent.key.keysym.mod & KMOD_RSHIFT) != 0,
.leftCtrl = (sdlEvent.key.keysym.mod & KMOD_LCTRL) != 0,
.rightCtrl = (sdlEvent.key.keysym.mod & KMOD_RCTRL) != 0,
.leftAlt = (sdlEvent.key.keysym.mod & KMOD_LALT) != 0,
.rightAlt = (sdlEvent.key.keysym.mod & KMOD_RALT) != 0,
.leftMeta = (sdlEvent.key.keysym.mod & KMOD_LGUI) != 0,
.rightMeta = (sdlEvent.key.keysym.mod & KMOD_RGUI) != 0,
},
.down = sdlEvent.type == SDL_KEYDOWN,
.repeat = sdlEvent.key.repeat > 0
};
iwaWindow->keyChanged.emit(event);
}
void handleMouseMotion(const SDL_Event& sdlEvent)
{
Window* iwaWindow = getWindowFromEvent(sdlEvent.motion.windowID);
if (iwaWindow == nullptr) {
return;
}
const MouseMoveEvent event = {
.relativeX = sdlEvent.motion.xrel,
.relativeY = sdlEvent.motion.yrel,
.absoluteX = sdlEvent.motion.x,
.absoluteY = sdlEvent.motion.y,
.warped = false // TODO?
};
iwaWindow->mouseMoved.emit(event);
}
void handleMouseButtonEvent(const SDL_Event& sdlEvent)
{
Window* iwaWindow = getWindowFromEvent(sdlEvent.button.windowID);
if (iwaWindow == nullptr) {
return;
}
const MouseButtonEvent event = {
.button = static_cast<MouseButton>(sdlEvent.button.button),
.clicks = sdlEvent.button.clicks,
.down = sdlEvent.type == SDL_MOUSEBUTTONDOWN
};
iwaWindow->mouseButtonChanged.emit(event);
}
void handleMouseWheelEvent(const SDL_Event& sdlEvent)
{
Window* iwaWindow = getWindowFromEvent(sdlEvent.wheel.windowID);
if (iwaWindow == nullptr) {
return;
}
const MouseWheelEvent event = {
.relativeX = sdlEvent.wheel.x,
.relativeY = sdlEvent.wheel.y
};
iwaWindow->mouseScrolled.emit(event);
}
void handleTextInputEvent(const SDL_Event& sdlEvent)
{
Window* iwaWindow = getWindowFromEvent(sdlEvent.text.windowID);
if (iwaWindow == nullptr) {
return;
}
const TextInputEvent event = {
.text = sdlEvent.text.text
};
iwaWindow->textEntered.emit(event);
}
void handleEvent(const SDL_Event& event)
{
switch (event.type)
{
case SDL_WINDOWEVENT:
handleWindowEvent(event);
break;
case SDL_KEYDOWN:
case SDL_KEYUP:
handleKeyEvent(event);
break;
case SDL_MOUSEMOTION:
handleMouseMotion(event);
break;
case SDL_MOUSEBUTTONDOWN:
case SDL_MOUSEBUTTONUP:
handleMouseButtonEvent(event);
break;
case SDL_MOUSEWHEEL:
handleMouseWheelEvent(event);
break;
case SDL_TEXTINPUT:
handleTextInputEvent(event);
break;
}
}
mijin::Task<> c_sdlLoop(ObjectPtr<Instance> instance)
{
while (!instance->isQuitRequested())
{
SDL_Event event;
while (SDL_PollEvent(&event))
{
handleEvent(event);
}
co_await mijin::c_suspend();
}
// SDL_Quit();
co_return;
}
void initSDL(Instance& instance)
{
static bool sdlInited = false;
if (sdlInited) {
return;
}
sdlInited = true;
if (SDL_Init(0) != 0)
{
logAndDie("Error initializing SDL: {}.", SDL_GetError());
}
SDL_SetHint(SDL_HINT_MOUSE_AUTO_CAPTURE, "0");
instance.getMainTaskLoop().addTask(c_sdlLoop(instance.getPointer()));
}
}
Window::Window(ObjectPtr<Instance> owner, const WindowCreationArgs& args) : super_t(std::move(owner))
{
initSDL(*getOwner());
const Uint32 flags = SDL_WINDOW_VULKAN
| (args.flags.hidden ? SDL_WINDOW_HIDDEN : 0)
| (args.flags.resizable ? SDL_WINDOW_RESIZABLE : 0)
| (args.flags.borderless ? SDL_WINDOW_BORDERLESS : 0)
| (args.flags.alwayOnTop ? SDL_WINDOW_ALWAYS_ON_TOP : 0)
| (args.flags.skipTaskbar ? SDL_WINDOW_UTILITY : 0);
mHandle = SDL_CreateWindow(
/* title = */ args.title.c_str(),
/* x = */ SDL_WINDOWPOS_CENTERED,
/* y = */ SDL_WINDOWPOS_CENTERED,
/* w = */ args.width,
/* h = */ args.height,
/* flags = */ flags
);
if (mHandle == nullptr)
{
logAndDie("Error creating SDL window: {}.", SDL_GetError());
}
SDL_SetWindowData(mHandle, WINDOW_DATA_NAME, this);
VkSurfaceKHR surface = VK_NULL_HANDLE;
if (!SDL_Vulkan_CreateSurface(mHandle, getOwner()->getVkHandle(), &surface))
{
logAndDie("Error creating Vulkan surface for SDL window: {}", SDL_GetError());
}
mSurface = surface;
getOwner()->windowCreated.emit(*this);
}
Window::~Window() noexcept
{
getOwner()->queueDelete([surface=mSurface, handle=mHandle, instance= getOwner()]
{
if (surface)
{
instance->getVkHandle().destroySurfaceKHR(surface);
}
if (handle)
{
SDL_DestroyWindow(handle);
}
});
}
bool Window::isVisible() const noexcept
{
return (SDL_GetWindowFlags(mHandle) & SDL_WINDOW_HIDDEN) == 0;
}
void Window::setVisible(bool visible) noexcept
{
if (visible) {
SDL_ShowWindow(mHandle);
}
else {
SDL_HideWindow(mHandle);
}
}
std::pair<int, int> Window::getSize() const noexcept
{
std::pair<int, int> size;
SDL_GetWindowSize(mHandle, &size.first, &size.second);
return size;
}
void Window::setSize(int width, int height) noexcept
{
SDL_SetWindowSize(mHandle, std::max(width, 1), std::max(height, 1));
}
std::pair<int, int> Window::getPosition() const noexcept
{
std::pair<int, int> position;
SDL_GetWindowPosition(mHandle, &position.first, &position.second);
return position;
}
void Window::setPosition(int xPos, int yPos) noexcept
{
SDL_SetWindowPosition(mHandle, xPos, yPos);
}
WindowBorder Window::getWindowBorder() const noexcept
{
WindowBorder windowBorder;
SDL_GetWindowBordersSize(mHandle, &windowBorder.top, &windowBorder.left, &windowBorder.bottom, &windowBorder.right);
return windowBorder;
}
bool Window::isFocused() const noexcept
{
return (SDL_GetWindowFlags(mHandle) & SDL_WINDOW_INPUT_FOCUS) != 0;
}
void Window::focus() noexcept
{
SDL_RaiseWindow(mHandle);
}
void Window::setMouseMode(MouseMode mouseMode) noexcept
{
switch (mouseMode)
{
case MouseMode::NORMAL:
SDL_SetRelativeMouseMode(SDL_FALSE);
SDL_SetWindowMouseGrab(mHandle, SDL_FALSE);
break;
case MouseMode::CAPTURED:
SDL_SetRelativeMouseMode(SDL_TRUE);
SDL_SetWindowMouseGrab(mHandle, SDL_TRUE);
break;
}
}
void Window::setModalFor(mijin::Optional<const Window&> parent) noexcept
{
SDL_SetWindowModalFor(mHandle, parent.empty() ? nullptr : parent->getSDLWindow());
}
}