iwa/source/util/render_loop.cpp
2024-04-06 14:11:26 +02:00

129 lines
4.2 KiB
C++

#include "iwa/util/render_loop.hpp"
#include <mijin/async/task_mutex.hpp>
#include "iwa/device.hpp"
#include "iwa/instance.hpp"
namespace iwa
{
namespace
{
// BIG BIG TODO: This is a dumb workaround for sharing images (e.g. the UI image) between multiple renderers.
// The reason is that the layout change mechanism doesn't work if multiple command buffers (that are executed
// sequentially) are recorded in parallel.
// A possible fix could be to move the state tracking mechanism to the renderer and generate the barriers
// before submitting.
mijin::TaskMutex gRenderMutex;
}
RenderLoop::RenderLoop(ObjectPtr<Device> owner, RenderLoopCreationArgs args)
: super_t(std::move(owner)), mAdvanceDeleteQueue(args.flags.advanceDeleteQueue)
{
mAlternating.resize(args.parallelFrames);
ObjectPtr<CommandPool> commandPool = std::move(args.commandPool);
if (!commandPool)
{
commandPool = getOwner()->createChild<CommandPool>(CommandPoolCreationArgs{
.flags = vk::CommandPoolCreateFlagBits::eResetCommandBuffer,
.queueFamilyIndex = getOwner()->getDeviceInfo().graphicsQueueFamily
});
}
for (Alternating& alt : mAlternating)
{
alt.commandBuffer = commandPool->allocateCommandBuffer();
alt.renderDoneFence = getOwner()->createChild<Fence>(FenceCreationArgs{.flags = vk::FenceCreateFlagBits::eSignaled});
}
}
void RenderLoop::start() noexcept
{
addTask(c_renderLoop());
}
mijin::Task<> RenderLoop::c_init()
{
co_return;
}
mijin::SimpleTaskLoop& RenderLoop::getTaskLoop() const noexcept
{
return getOwner()->getOwner()->getMainTaskLoop();
}
mijin::Task<> RenderLoop::c_renderLoop()
{
co_await c_init();
while (!getOwner()->getOwner()->isQuitRequested())
{
Alternating& alt = mAlternating.at(mFrameIdx);
// wait for the command buffer to be ready
co_await alt.renderDoneFence->c_wait();
// reset the fence
alt.renderDoneFence->reset();
vk::CommandBuffer cmdBuffer = alt.commandBuffer->getVkHandle();
cmdBuffer.begin(vk::CommandBufferBeginInfo{
.flags = vk::CommandBufferUsageFlagBits::eOneTimeSubmit
});
// record the commands
RenderLoopRenderArgs renderArgs = {
.cmdBuffer = *alt.commandBuffer,
.frameIdx = mFrameIdx
};
{ // gRenderMutex lock
const mijin::TaskMutexLock lock = co_await gRenderMutex.c_lock();
co_await c_render(renderArgs);
std::vector<vk::Semaphore> waitSemaphores;
std::vector<vk::Semaphore> signalSemaphores;
ImageReferenceFinalizeArgs finalizeArgs{
.cmdBuffer = *alt.commandBuffer,
.waitSemaphores = waitSemaphores,
.signalSemaphores = signalSemaphores
};
for (const ObjectPtr<ImageReference>& imageRef: renderArgs.usedImageReferences)
{
imageRef->finalize(finalizeArgs);
}
cmdBuffer.end();
// submit them
const vk::PipelineStageFlags waitStage = vk::PipelineStageFlagBits::eFragmentShader;
getOwner()->getGraphicsQueue().submit(vk::SubmitInfo{
.waitSemaphoreCount = static_cast<std::uint32_t>(waitSemaphores.size()),
.pWaitSemaphores = waitSemaphores.data(),
.pWaitDstStageMask = &waitStage,
.commandBufferCount = 1,
.pCommandBuffers = &cmdBuffer,
.signalSemaphoreCount = static_cast<std::uint32_t>(signalSemaphores.size()),
.pSignalSemaphores = signalSemaphores.data()
}, *alt.renderDoneFence);
} // gRenderMutex lock
// finally present
for (const ObjectPtr<ImageReference>& imageRef : renderArgs.usedImageReferences)
{
co_await imageRef->c_present();
}
// tick deleters
// TODO: what if there are multiple render loops?
if (mAdvanceDeleteQueue)
{
getOwner()->getOwner()->tickDeleteQueue();
}
mFrameIdx = (mFrameIdx + 1) % mAlternating.size();
}
co_return;
}
} // namespace iwa