1201 lines
39 KiB
C++
1201 lines
39 KiB
C++
|
|
#pragma once
|
|
|
|
#if !defined(SDL_GPU_TEST_PRIVATE_SDL_GPU_TEST_SDLPP_GPU_HPP_INCLUDED)
|
|
#define SDL_GPU_TEST_PRIVATE_SDL_GPU_TEST_SDLPP_GPU_HPP_INCLUDED 1
|
|
|
|
#include "./common.hpp"
|
|
|
|
namespace sdlpp
|
|
{
|
|
static_assert(sizeof(SDL_bool) == sizeof(bool)); // we assume this in the whole file...
|
|
|
|
//
|
|
// enums
|
|
//
|
|
enum class GPUVertexInputRate
|
|
{
|
|
VERTEX = SDL_GPU_VERTEXINPUTRATE_VERTEX,
|
|
INSTANCE = SDL_GPU_VERTEXINPUTRATE_INSTANCE
|
|
};
|
|
|
|
enum class GPUVertexElementFormat
|
|
{
|
|
INT = SDL_GPU_VERTEXELEMENTFORMAT_INT,
|
|
INT2 = SDL_GPU_VERTEXELEMENTFORMAT_INT2,
|
|
INT3 = SDL_GPU_VERTEXELEMENTFORMAT_INT3,
|
|
INT4 = SDL_GPU_VERTEXELEMENTFORMAT_INT4,
|
|
UINT = SDL_GPU_VERTEXELEMENTFORMAT_UINT,
|
|
UINT2 = SDL_GPU_VERTEXELEMENTFORMAT_UINT2,
|
|
UINT3 = SDL_GPU_VERTEXELEMENTFORMAT_UINT3,
|
|
UINT4 = SDL_GPU_VERTEXELEMENTFORMAT_UINT4,
|
|
FLOAT = SDL_GPU_VERTEXELEMENTFORMAT_FLOAT,
|
|
FLOAT2 = SDL_GPU_VERTEXELEMENTFORMAT_FLOAT2,
|
|
FLOAT3 = SDL_GPU_VERTEXELEMENTFORMAT_FLOAT3,
|
|
FLOAT4 = SDL_GPU_VERTEXELEMENTFORMAT_FLOAT4,
|
|
BYTE2 = SDL_GPU_VERTEXELEMENTFORMAT_BYTE2,
|
|
BYTE4 = SDL_GPU_VERTEXELEMENTFORMAT_BYTE4,
|
|
UBYTE2 = SDL_GPU_VERTEXELEMENTFORMAT_UBYTE2,
|
|
UBYTE4 = SDL_GPU_VERTEXELEMENTFORMAT_UBYTE4,
|
|
BYTE2_NORM = SDL_GPU_VERTEXELEMENTFORMAT_BYTE2_NORM,
|
|
BYTE4_NORM = SDL_GPU_VERTEXELEMENTFORMAT_BYTE4_NORM,
|
|
UBYTE2_NORM = SDL_GPU_VERTEXELEMENTFORMAT_UBYTE2_NORM,
|
|
UBYTE4_NORM = SDL_GPU_VERTEXELEMENTFORMAT_UBYTE4_NORM,
|
|
SHORT2 = SDL_GPU_VERTEXELEMENTFORMAT_SHORT2,
|
|
SHORT4 = SDL_GPU_VERTEXELEMENTFORMAT_SHORT4,
|
|
USHORT2 = SDL_GPU_VERTEXELEMENTFORMAT_USHORT2,
|
|
USHORT4 = SDL_GPU_VERTEXELEMENTFORMAT_USHORT4,
|
|
SHORT2_NORM = SDL_GPU_VERTEXELEMENTFORMAT_SHORT2_NORM,
|
|
SHORT4_NORM = SDL_GPU_VERTEXELEMENTFORMAT_SHORT4_NORM,
|
|
USHORT2_NORM = SDL_GPU_VERTEXELEMENTFORMAT_USHORT2_NORM,
|
|
USHORT4_NORM = SDL_GPU_VERTEXELEMENTFORMAT_USHORT4_NORM,
|
|
HALF2 = SDL_GPU_VERTEXELEMENTFORMAT_HALF2,
|
|
HALF4 = SDL_GPU_VERTEXELEMENTFORMAT_HALF4
|
|
};
|
|
|
|
enum class GPUPrimitiveType
|
|
{
|
|
POINTLIST = SDL_GPU_PRIMITIVETYPE_POINTLIST,
|
|
LINELIST = SDL_GPU_PRIMITIVETYPE_LINELIST,
|
|
LINESTRIP = SDL_GPU_PRIMITIVETYPE_LINESTRIP,
|
|
TRIANGLELIST = SDL_GPU_PRIMITIVETYPE_TRIANGLELIST,
|
|
TRIANGLESTRIP = SDL_GPU_PRIMITIVETYPE_TRIANGLESTRIP
|
|
};
|
|
|
|
enum class GPUFillMode
|
|
{
|
|
FILL = SDL_GPU_FILLMODE_FILL,
|
|
LINE = SDL_GPU_FILLMODE_LINE
|
|
};
|
|
|
|
enum class GPUCullMode
|
|
{
|
|
NONE = SDL_GPU_CULLMODE_NONE,
|
|
FRONT = SDL_GPU_CULLMODE_FRONT,
|
|
BACK = SDL_GPU_CULLMODE_BACK
|
|
};
|
|
|
|
enum class GPUFrontFace
|
|
{
|
|
COUNTER_CLOCKWISE = SDL_GPU_FRONTFACE_COUNTER_CLOCKWISE,
|
|
CLOCKWISE = SDL_GPU_FRONTFACE_CLOCKWISE
|
|
};
|
|
|
|
enum class GPUSampleCount
|
|
{
|
|
ONE = SDL_GPU_SAMPLECOUNT_1,
|
|
TWO = SDL_GPU_SAMPLECOUNT_2,
|
|
FOUR = SDL_GPU_SAMPLECOUNT_4,
|
|
EIGHT = SDL_GPU_SAMPLECOUNT_8
|
|
};
|
|
|
|
enum class GPUCompareOp
|
|
{
|
|
NEVER = SDL_GPU_COMPAREOP_NEVER,
|
|
LESS = SDL_GPU_COMPAREOP_LESS,
|
|
EQUAL = SDL_GPU_COMPAREOP_EQUAL,
|
|
LESS_OR_EQUAL = SDL_GPU_COMPAREOP_LESS_OR_EQUAL,
|
|
GREATER = SDL_GPU_COMPAREOP_GREATER,
|
|
NOT_EQUAL = SDL_GPU_COMPAREOP_NOT_EQUAL,
|
|
GREATER_OR_EQUAL = SDL_GPU_COMPAREOP_GREATER_OR_EQUAL,
|
|
ALWAYS = SDL_GPU_COMPAREOP_ALWAYS
|
|
};
|
|
|
|
enum class GPUStencilOp
|
|
{
|
|
KEEP = SDL_GPU_STENCILOP_KEEP,
|
|
ZERO = SDL_GPU_STENCILOP_ZERO,
|
|
REPLACE = SDL_GPU_STENCILOP_REPLACE,
|
|
INCREMENT_AND_CLAMP = SDL_GPU_STENCILOP_INCREMENT_AND_CLAMP,
|
|
DECREMENT_AND_CLAMP = SDL_GPU_STENCILOP_DECREMENT_AND_CLAMP,
|
|
INVERT = SDL_GPU_STENCILOP_INVERT,
|
|
INCREMENT_AND_WRAP = SDL_GPU_STENCILOP_INCREMENT_AND_WRAP,
|
|
DECREMENT_AND_WRAP = SDL_GPU_STENCILOP_DECREMENT_AND_WRAP
|
|
};
|
|
|
|
enum class GPUBlendFactor
|
|
{
|
|
ZERO = SDL_GPU_BLENDFACTOR_ZERO,
|
|
ONE = SDL_GPU_BLENDFACTOR_ONE,
|
|
SRC_COLOR = SDL_GPU_BLENDFACTOR_SRC_COLOR,
|
|
ONE_MINUS_SRC_COLOR = SDL_GPU_BLENDFACTOR_ONE_MINUS_SRC_COLOR,
|
|
DST_COLOR = SDL_GPU_BLENDFACTOR_DST_COLOR,
|
|
ONE_MINUS_DST_COLOR = SDL_GPU_BLENDFACTOR_ONE_MINUS_DST_COLOR,
|
|
SRC_ALPHA = SDL_GPU_BLENDFACTOR_SRC_ALPHA,
|
|
ONE_MINUS_SRC_ALPHA = SDL_GPU_BLENDFACTOR_ONE_MINUS_SRC_ALPHA,
|
|
DST_ALPHA = SDL_GPU_BLENDFACTOR_DST_ALPHA,
|
|
ONE_MINUS_DST_ALPHA = SDL_GPU_BLENDFACTOR_ONE_MINUS_DST_ALPHA,
|
|
CONSTANT_COLOR = SDL_GPU_BLENDFACTOR_CONSTANT_COLOR,
|
|
ONE_MINUS_CONSTANT_COLOR = SDL_GPU_BLENDFACTOR_ONE_MINUS_CONSTANT_COLOR,
|
|
SRC_ALPHA_SATURATE = SDL_GPU_BLENDFACTOR_SRC_ALPHA_SATURATE
|
|
};
|
|
|
|
enum class GPUBlendOp
|
|
{
|
|
ADD = SDL_GPU_BLENDOP_ADD,
|
|
SUBTRACT = SDL_GPU_BLENDOP_SUBTRACT,
|
|
REVERSE_SUBTRACT = SDL_GPU_BLENDOP_REVERSE_SUBTRACT,
|
|
MIN = SDL_GPU_BLENDOP_MIN,
|
|
MAX = SDL_GPU_BLENDOP_MAX
|
|
};
|
|
|
|
enum class GPUTextureType
|
|
{
|
|
TWOD = SDL_GPU_TEXTURETYPE_2D,
|
|
TWOD_ARRAY = SDL_GPU_TEXTURETYPE_2D_ARRAY,
|
|
THREED = SDL_GPU_TEXTURETYPE_3D,
|
|
CUBE = SDL_GPU_TEXTURETYPE_CUBE
|
|
};
|
|
|
|
enum class GPUTextureFormat
|
|
{
|
|
INVALID = SDL_GPU_TEXTUREFORMAT_INVALID,
|
|
A8_UNORM = SDL_GPU_TEXTUREFORMAT_A8_UNORM,
|
|
R8_UNORM = SDL_GPU_TEXTUREFORMAT_R8_UNORM,
|
|
R8G8_UNORM = SDL_GPU_TEXTUREFORMAT_R8G8_UNORM,
|
|
R8G8B8A8_UNORM = SDL_GPU_TEXTUREFORMAT_R8G8B8A8_UNORM,
|
|
R16_UNORM = SDL_GPU_TEXTUREFORMAT_R16_UNORM,
|
|
R16G16_UNORM = SDL_GPU_TEXTUREFORMAT_R16G16_UNORM,
|
|
R16G16B16A16_UNORM = SDL_GPU_TEXTUREFORMAT_R16G16B16A16_UNORM,
|
|
R10G10B10A2_UNORM = SDL_GPU_TEXTUREFORMAT_R10G10B10A2_UNORM,
|
|
B5G6R5_UNORM = SDL_GPU_TEXTUREFORMAT_B5G6R5_UNORM,
|
|
B5G5R5A1_UNORM = SDL_GPU_TEXTUREFORMAT_B5G5R5A1_UNORM,
|
|
B4G4R4A4_UNORM = SDL_GPU_TEXTUREFORMAT_B4G4R4A4_UNORM,
|
|
B8G8R8A8_UNORM = SDL_GPU_TEXTUREFORMAT_B8G8R8A8_UNORM,
|
|
BC1_RGBA_UNORM = SDL_GPU_TEXTUREFORMAT_BC1_RGBA_UNORM,
|
|
BC2_RGBA_UNORM = SDL_GPU_TEXTUREFORMAT_BC2_RGBA_UNORM,
|
|
BC3_RGBA_UNORM = SDL_GPU_TEXTUREFORMAT_BC3_RGBA_UNORM,
|
|
BC4_R_UNORM = SDL_GPU_TEXTUREFORMAT_BC4_R_UNORM,
|
|
BC5_RG_UNORM = SDL_GPU_TEXTUREFORMAT_BC5_RG_UNORM,
|
|
BC7_RGBA_UNORM = SDL_GPU_TEXTUREFORMAT_BC7_RGBA_UNORM,
|
|
BC6H_RGB_FLOAT = SDL_GPU_TEXTUREFORMAT_BC6H_RGB_FLOAT,
|
|
BC6H_RGB_UFLOAT = SDL_GPU_TEXTUREFORMAT_BC6H_RGB_UFLOAT,
|
|
R8_SNORM = SDL_GPU_TEXTUREFORMAT_R8_SNORM,
|
|
R8G8_SNORM = SDL_GPU_TEXTUREFORMAT_R8G8_SNORM,
|
|
R8G8B8A8_SNORM = SDL_GPU_TEXTUREFORMAT_R8G8B8A8_SNORM,
|
|
R16_SNORM = SDL_GPU_TEXTUREFORMAT_R16_SNORM,
|
|
R16G16_SNORM = SDL_GPU_TEXTUREFORMAT_R16G16_SNORM,
|
|
R16G16B16A16_SNORM = SDL_GPU_TEXTUREFORMAT_R16G16B16A16_SNORM,
|
|
R16_FLOAT = SDL_GPU_TEXTUREFORMAT_R16_FLOAT,
|
|
R16G16_FLOAT = SDL_GPU_TEXTUREFORMAT_R16G16_FLOAT,
|
|
R16G16B16A16_FLOAT = SDL_GPU_TEXTUREFORMAT_R16G16B16A16_FLOAT,
|
|
R32_FLOAT = SDL_GPU_TEXTUREFORMAT_R32_FLOAT,
|
|
R32G32_FLOAT = SDL_GPU_TEXTUREFORMAT_R32G32_FLOAT,
|
|
R32G32B32A32_FLOAT = SDL_GPU_TEXTUREFORMAT_R32G32B32A32_FLOAT,
|
|
R11G11B10_UFLOAT = SDL_GPU_TEXTUREFORMAT_R11G11B10_UFLOAT,
|
|
R8_UINT = SDL_GPU_TEXTUREFORMAT_R8_UINT,
|
|
R8G8_UINT = SDL_GPU_TEXTUREFORMAT_R8G8_UINT,
|
|
R8G8B8A8_UINT = SDL_GPU_TEXTUREFORMAT_R8G8B8A8_UINT,
|
|
R16_UINT = SDL_GPU_TEXTUREFORMAT_R16_UINT,
|
|
R16G16_UINT = SDL_GPU_TEXTUREFORMAT_R16G16_UINT,
|
|
R16G16B16A16_UINT = SDL_GPU_TEXTUREFORMAT_R16G16B16A16_UINT,
|
|
R8_INT = SDL_GPU_TEXTUREFORMAT_R8_INT,
|
|
R8G8_INT = SDL_GPU_TEXTUREFORMAT_R8G8_INT,
|
|
R8G8B8A8_INT = SDL_GPU_TEXTUREFORMAT_R8G8B8A8_INT,
|
|
R16_INT = SDL_GPU_TEXTUREFORMAT_R16_INT,
|
|
R16G16_INT = SDL_GPU_TEXTUREFORMAT_R16G16_INT,
|
|
R16G16B16A16_INT = SDL_GPU_TEXTUREFORMAT_R16G16B16A16_INT,
|
|
R8G8B8A8_UNORM_SRGB = SDL_GPU_TEXTUREFORMAT_R8G8B8A8_UNORM_SRGB,
|
|
B8G8R8A8_UNORM_SRGB = SDL_GPU_TEXTUREFORMAT_B8G8R8A8_UNORM_SRGB,
|
|
BC1_RGBA_UNORM_SRGB = SDL_GPU_TEXTUREFORMAT_BC1_RGBA_UNORM_SRGB,
|
|
BC2_RGBA_UNORM_SRGB = SDL_GPU_TEXTUREFORMAT_BC2_RGBA_UNORM_SRGB,
|
|
BC3_RGBA_UNORM_SRGB = SDL_GPU_TEXTUREFORMAT_BC3_RGBA_UNORM_SRGB,
|
|
BC7_RGBA_UNORM_SRGB = SDL_GPU_TEXTUREFORMAT_BC7_RGBA_UNORM_SRGB,
|
|
D16_UNORM = SDL_GPU_TEXTUREFORMAT_D16_UNORM,
|
|
D24_UNORM = SDL_GPU_TEXTUREFORMAT_D24_UNORM,
|
|
D32_FLOAT = SDL_GPU_TEXTUREFORMAT_D32_FLOAT,
|
|
D24_UNORM_S8_UINT = SDL_GPU_TEXTUREFORMAT_D24_UNORM_S8_UINT,
|
|
D32_FLOAT_S8_UINT = SDL_GPU_TEXTUREFORMAT_D32_FLOAT_S8_UINT
|
|
};
|
|
|
|
enum class GPUFilter
|
|
{
|
|
NEAREST = SDL_GPU_FILTER_NEAREST,
|
|
LINEAR = SDL_GPU_FILTER_LINEAR
|
|
};
|
|
|
|
enum class GPUSamplerMipmapMode
|
|
{
|
|
NEAREST = SDL_GPU_SAMPLERMIPMAPMODE_NEAREST,
|
|
LINEAR = SDL_GPU_SAMPLERMIPMAPMODE_LINEAR
|
|
};
|
|
|
|
enum class GPUSamplerAddressMode
|
|
{
|
|
REPEAT = SDL_GPU_SAMPLERADDRESSMODE_REPEAT,
|
|
MIRRORED_REPEAT = SDL_GPU_SAMPLERADDRESSMODE_MIRRORED_REPEAT,
|
|
CLAMP_TO_EDGE = SDL_GPU_SAMPLERADDRESSMODE_CLAMP_TO_EDGE
|
|
};
|
|
|
|
enum class GPUShaderFormat
|
|
{
|
|
PRIVATE = SDL_GPU_SHADERFORMAT_PRIVATE,
|
|
SPIRV = SDL_GPU_SHADERFORMAT_SPIRV,
|
|
DXBC = SDL_GPU_SHADERFORMAT_DXBC,
|
|
DXIL = SDL_GPU_SHADERFORMAT_DXIL,
|
|
MSL = SDL_GPU_SHADERFORMAT_MSL,
|
|
METALLIB = SDL_GPU_SHADERFORMAT_METALLIB
|
|
};
|
|
|
|
enum class GPUShaderStage
|
|
{
|
|
VERTEX = SDL_GPU_SHADERSTAGE_VERTEX,
|
|
FRAGMENT = SDL_GPU_SHADERSTAGE_FRAGMENT
|
|
};
|
|
|
|
enum class GPULoadOp
|
|
{
|
|
LOAD = SDL_GPU_LOADOP_LOAD,
|
|
CLEAR = SDL_GPU_LOADOP_CLEAR,
|
|
DONT_CARE = SDL_GPU_LOADOP_DONT_CARE
|
|
};
|
|
|
|
enum class GPUStoreOp
|
|
{
|
|
STORE = SDL_GPU_STOREOP_STORE,
|
|
DONT_CARE = SDL_GPU_STOREOP_DONT_CARE
|
|
};
|
|
|
|
enum class GPUTransferBufferUsage
|
|
{
|
|
UPLOAD = SDL_GPU_TRANSFERBUFFERUSAGE_UPLOAD,
|
|
DOWNLOAD = SDL_GPU_TRANSFERBUFFERUSAGE_DOWNLOAD
|
|
};
|
|
|
|
enum class GPUSwapchainComposition
|
|
{
|
|
SDR = SDL_GPU_SWAPCHAINCOMPOSITION_SDR,
|
|
SDR_LINEAR = SDL_GPU_SWAPCHAINCOMPOSITION_SDR_LINEAR,
|
|
HDR_EXTENDED_LINEAR = SDL_GPU_SWAPCHAINCOMPOSITION_HDR_EXTENDED_LINEAR,
|
|
HDR10_ST2048 = SDL_GPU_SWAPCHAINCOMPOSITION_HDR10_ST2048
|
|
};
|
|
|
|
enum class GPUPresentMode
|
|
{
|
|
VSYNC = SDL_GPU_PRESENTMODE_VSYNC,
|
|
IMMEDIATE = SDL_GPU_PRESENTMODE_IMMEDIATE,
|
|
MAILBOX = SDL_GPU_PRESENTMODE_MAILBOX
|
|
};
|
|
|
|
//
|
|
// bitflags
|
|
//
|
|
struct GPUShaderFormatFlags : mijin::BitFlags<GPUShaderFormatFlags>
|
|
{
|
|
bool private_ : 1 = false;
|
|
bool spirv : 1 = false;
|
|
bool dxbc : 1 = false;
|
|
bool dxil : 1 = false;
|
|
bool msl : 1 = false;
|
|
bool metallib : 1 = false;
|
|
|
|
constexpr operator SDL_GPUShaderFormat() const noexcept
|
|
{
|
|
return std::bit_cast<std::uint8_t>(*this);
|
|
}
|
|
};
|
|
|
|
struct GPUColorComponentFlags : mijin::BitFlags<GPUColorComponentFlags>
|
|
{
|
|
bool r : 1 = false;
|
|
bool g : 1 = false;
|
|
bool b : 1 = false;
|
|
bool a : 1 = false;
|
|
};
|
|
|
|
struct GPUBufferUsageFlags : mijin::BitFlags<GPUBufferUsageFlags>
|
|
{
|
|
bool vertex : 1 = false;
|
|
bool index : 1 = false;
|
|
bool indirect : 1 = false;
|
|
bool graphicsStorageRead : 1 = false;
|
|
bool computeStorageRead : 1 = false;
|
|
bool computeStorageWrite : 1 = false;
|
|
|
|
explicit operator SDL_GPUBufferUsageFlags() const noexcept
|
|
{
|
|
return std::bit_cast<std::uint8_t>(*this);
|
|
}
|
|
};
|
|
|
|
struct GPUTextureUsageFlags : mijin::BitFlags<GPUTextureUsageFlags>
|
|
{
|
|
bool sampler : 1 = false;
|
|
bool colorTarget : 1 = false;
|
|
bool depthStencilTarget : 1 = false;
|
|
bool graphicsStorageRead : 1 = false;
|
|
bool computeStorageRead : 1 = false;
|
|
bool computeStorageWrite : 1 = false;
|
|
|
|
explicit operator SDL_GPUTextureUsageFlags() const noexcept
|
|
{
|
|
return std::bit_cast<std::uint8_t>(*this);
|
|
}
|
|
};
|
|
|
|
//
|
|
// structs
|
|
//
|
|
struct GPUVertexBinding
|
|
{
|
|
Uint32 index;
|
|
Uint32 pitch;
|
|
GPUVertexInputRate inputRate = GPUVertexInputRate::VERTEX;
|
|
Uint32 instanceStepRate;
|
|
};
|
|
static_assert(sizeof(GPUVertexBinding) == sizeof(SDL_GPUVertexBinding)
|
|
&& alignof(GPUVertexBinding) == alignof(SDL_GPUVertexBinding));
|
|
|
|
struct GPUVertexAttribute
|
|
{
|
|
Uint32 location;
|
|
Uint32 bindingIndex;
|
|
GPUVertexElementFormat format;
|
|
Uint32 offset;
|
|
};
|
|
static_assert(sizeof(GPUVertexAttribute) == sizeof(SDL_GPUVertexAttribute)
|
|
&& alignof(GPUVertexAttribute) == alignof(SDL_GPUVertexAttribute));
|
|
|
|
struct GPUVertexInputState
|
|
{
|
|
std::span<const GPUVertexBinding> vertexBindings;
|
|
std::span<const GPUVertexAttribute> vertexAttributes;
|
|
};
|
|
|
|
struct GPURasterizerState
|
|
{
|
|
GPUFillMode fillMode = GPUFillMode::FILL;
|
|
GPUCullMode cullMode = GPUCullMode::NONE;
|
|
GPUFrontFace frontFace = GPUFrontFace::COUNTER_CLOCKWISE;
|
|
bool enableDepthBias = false;
|
|
float depthBiasConstantFactor;
|
|
float depthBiasClamp;
|
|
float depthBiasSlopeFactor;
|
|
};
|
|
|
|
struct GPUMultisampleState
|
|
{
|
|
GPUSampleCount sampleCount = GPUSampleCount::ONE;
|
|
Uint32 sampleMask = 0xFFFFFFFF;
|
|
};
|
|
|
|
struct GPUStencilOpState
|
|
{
|
|
GPUStencilOp failOp;
|
|
GPUStencilOp passOp;
|
|
GPUStencilOp depthFailOp;
|
|
GPUCompareOp compareOp;
|
|
|
|
explicit operator SDL_GPUStencilOpState() const noexcept
|
|
{
|
|
return {
|
|
.fail_op = static_cast<SDL_GPUStencilOp>(failOp),
|
|
.pass_op = static_cast<SDL_GPUStencilOp>(passOp),
|
|
.depth_fail_op = static_cast<SDL_GPUStencilOp>(depthFailOp),
|
|
.compare_op = static_cast<SDL_GPUCompareOp>(compareOp),
|
|
};
|
|
}
|
|
};
|
|
|
|
struct GPUDepthStencilState
|
|
{
|
|
bool enableDepthTest = false;
|
|
bool enableDepthWrite = false;
|
|
bool enableStencilTest = false;
|
|
GPUCompareOp compareOp = GPUCompareOp::LESS_OR_EQUAL;
|
|
GPUStencilOpState backStencilState;
|
|
GPUStencilOpState frontStencilState;
|
|
Uint8 compareMask;
|
|
Uint8 writeMask;
|
|
};
|
|
|
|
struct GPUColorTargetBlendState
|
|
{
|
|
bool enableBlend = false;
|
|
GPUBlendFactor srcColorBlendfactor;
|
|
GPUBlendFactor dstColorBlendfactor;
|
|
GPUBlendOp colorBlendOp;
|
|
GPUBlendFactor srcAlphaBlendfactor;
|
|
GPUBlendFactor dstAlphaBlendfactor;
|
|
GPUBlendOp alphaBlendOp;
|
|
GPUColorComponentFlags colorWriteMask = {.r = true, .g = true, .b = true, .a = true};
|
|
};
|
|
|
|
struct GPUColorTargetDescription
|
|
{
|
|
GPUTextureFormat format = GPUTextureFormat::INVALID;
|
|
GPUColorTargetBlendState blendState;
|
|
};
|
|
|
|
static_assert(sizeof(GPUColorTargetDescription) == sizeof(SDL_GPUColorTargetDescription)
|
|
&& alignof(GPUColorTargetDescription) == alignof(SDL_GPUColorTargetDescription));
|
|
|
|
struct GPUGraphicsPipelineTargetInfo
|
|
{
|
|
std::span<const GPUColorTargetDescription> colorTargetDescriptions;
|
|
bool hasDepthStencilTarget = false;
|
|
GPUTextureFormat depthStencilFormat = GPUTextureFormat::INVALID;
|
|
|
|
explicit operator SDL_GpuGraphicsPipelineTargetInfo() const noexcept
|
|
{
|
|
return {
|
|
.color_target_descriptions = std::bit_cast<const SDL_GPUColorTargetDescription*>(colorTargetDescriptions.data()),
|
|
.num_color_targets = static_cast<Uint32>(colorTargetDescriptions.size()),
|
|
.has_depth_stencil_target = hasDepthStencilTarget,
|
|
.depth_stencil_format = static_cast<SDL_GPUTextureFormat>(depthStencilFormat)
|
|
};
|
|
}
|
|
};
|
|
|
|
using FColor = SDL_FColor;
|
|
|
|
struct GPUColorTargetInfo
|
|
{
|
|
SDL_GPUTexture* texture = nullptr;
|
|
Uint32 mipLevel = 0;
|
|
Uint32 layerOrDepthPlane = 0;
|
|
FColor clearColor = {.r = 0, .g = 0, .b = 0, .a = 1};
|
|
GPULoadOp loadOp = GPULoadOp::LOAD;
|
|
GPUStoreOp storeOp = GPUStoreOp::STORE;
|
|
bool cycle = false;
|
|
};
|
|
static_assert(sizeof(GPUColorTargetInfo) == sizeof(SDL_GPUColorTargetInfo)
|
|
&& alignof(GPUColorTargetInfo) == alignof(SDL_GPUColorTargetInfo));
|
|
|
|
struct GPUDepthStencilTargetInfo
|
|
{
|
|
SDL_GPUTexture* texture = nullptr;
|
|
float clearDepth = 0.f;
|
|
GPULoadOp loadOp = GPULoadOp::LOAD;
|
|
GPUStoreOp storeOp = GPUStoreOp::STORE;
|
|
GPULoadOp stencilLoadOp = GPULoadOp::LOAD;
|
|
GPUStoreOp stencilStoreOp = GPUStoreOp::STORE;
|
|
bool cycle = false;
|
|
Uint8 clearStencil;
|
|
};
|
|
static_assert(sizeof(GPUDepthStencilTargetInfo) == sizeof(SDL_GPUDepthStencilTargetInfo)
|
|
&& alignof(GPUDepthStencilTargetInfo) == alignof(SDL_GPUDepthStencilTargetInfo));
|
|
|
|
struct GPUTransferBufferLocation
|
|
{
|
|
SDL_GPUTransferBuffer* transferBuffer;
|
|
Uint32 offset = 0;
|
|
};
|
|
static_assert(sizeof(GPUTransferBufferLocation) == sizeof(SDL_GPUTransferBufferLocation));
|
|
|
|
struct GPUBufferRegion
|
|
{
|
|
SDL_GPUBuffer* buffer;
|
|
Uint32 offset = 0;
|
|
Uint32 size;
|
|
};
|
|
static_assert(sizeof(GPUBufferRegion) == sizeof(SDL_GPUBufferRegion));
|
|
|
|
struct GPUTextureTransferInfo
|
|
{
|
|
SDL_GPUTransferBuffer* transferBuffer;
|
|
Uint32 offset = 0;
|
|
Uint32 pixelsPerRow;
|
|
Uint32 rowsPerLayer;
|
|
};
|
|
static_assert(sizeof(GPUTextureTransferInfo) == sizeof(SDL_GPUTextureTransferInfo));
|
|
|
|
struct GPUTextureRegion
|
|
{
|
|
SDL_GPUTexture* texture;
|
|
Uint32 mipLevel = 0;
|
|
Uint32 layer = 0;
|
|
Uint32 x = 0;
|
|
Uint32 y = 0;
|
|
Uint32 z = 0;
|
|
Uint32 width;
|
|
Uint32 height = 1;
|
|
Uint32 depth = 1;
|
|
};
|
|
static_assert(sizeof(GPUTextureRegion) == sizeof(SDL_GPUTextureRegion));
|
|
|
|
using GPUBufferBinding = SDL_GPUBufferBinding;
|
|
|
|
using GPUTextureSamplerBinding = SDL_GPUTextureSamplerBinding;
|
|
|
|
//
|
|
// classes
|
|
//
|
|
|
|
struct GPUDrawPrimitivesArgs
|
|
{
|
|
Uint32 numVertices = 0;
|
|
Uint32 numInstances = 1;
|
|
Uint32 firstVertex = 0;
|
|
Uint32 firstInstance = 0;
|
|
};
|
|
|
|
class GPURenderPass : public Base<SDL_GPURenderPass, GPURenderPass>
|
|
{
|
|
public:
|
|
GPURenderPass() noexcept = default;
|
|
GPURenderPass(const GPURenderPass&) = delete;
|
|
GPURenderPass(GPURenderPass&& other) noexcept : Base(std::move(other)) {}
|
|
|
|
GPURenderPass& operator=(const GPURenderPass&) = delete;
|
|
GPURenderPass& operator=(GPURenderPass&& other) noexcept
|
|
{
|
|
Base::operator=(std::move(other));
|
|
return *this;
|
|
}
|
|
auto operator<=>(const GPURenderPass& other) const noexcept = default;
|
|
|
|
void end() noexcept
|
|
{
|
|
SDL_EndGPURenderPass(mHandle);
|
|
mHandle = nullptr;
|
|
}
|
|
|
|
void destroy() noexcept
|
|
{
|
|
MIJIN_ASSERT(mHandle == nullptr, "Renderpass has not been ended.");
|
|
}
|
|
|
|
void bindGraphicsPipeline(SDL_GPUGraphicsPipeline* pipeline) const noexcept
|
|
{
|
|
SDL_BindGPUGraphicsPipeline(mHandle, pipeline);
|
|
}
|
|
|
|
void bindVertexBuffers(std::span<const GPUBufferBinding> bindings, Uint32 firstBinding = 0) const noexcept
|
|
{
|
|
SDL_BindGPUVertexBuffers(
|
|
/* render_pass = */ mHandle,
|
|
/* first_binding = */ firstBinding,
|
|
/* bindings = */ bindings.data(),
|
|
/* num_bindings = */ static_cast<Uint32>(bindings.size())
|
|
);
|
|
}
|
|
|
|
void bindVertexBuffer(const GPUBufferBinding& binding, Uint32 offset = 0) const noexcept
|
|
{
|
|
bindVertexBuffers({&binding, 1}, offset);
|
|
}
|
|
|
|
void bindFragmentSamplers(std::span<const GPUTextureSamplerBinding> textureSamplerBindings, Uint32 firstSlot = 0) const noexcept
|
|
{
|
|
SDL_BindGPUFragmentSamplers(
|
|
/* render_pass = */ mHandle,
|
|
/* first_slot = */ firstSlot,
|
|
/* texture_sampler_bindings = */ textureSamplerBindings.data(),
|
|
/* num_bindings = */ static_cast<Uint32>(textureSamplerBindings.size())
|
|
);
|
|
}
|
|
|
|
void bindFragmentSampler(const GPUTextureSamplerBinding& binding, Uint32 offset = 0) const noexcept
|
|
{
|
|
bindFragmentSamplers({&binding, 1}, offset);
|
|
}
|
|
|
|
void drawPrimitives(const GPUDrawPrimitivesArgs& args) const noexcept
|
|
{
|
|
SDL_DrawGPUPrimitives(mHandle, args.numVertices, args.numInstances, args.firstVertex, args.firstInstance);
|
|
}
|
|
|
|
friend class GPUCommandBuffer;
|
|
};
|
|
|
|
class GPUCopyPass : public Base<SDL_GPUCopyPass, GPUCopyPass>
|
|
{
|
|
public:
|
|
GPUCopyPass() noexcept = default;
|
|
GPUCopyPass(const GPUCopyPass&) = delete;
|
|
GPUCopyPass(GPUCopyPass&& other) noexcept : Base(std::move(other)) {}
|
|
|
|
GPUCopyPass& operator=(const GPUCopyPass&) = delete;
|
|
GPUCopyPass& operator=(GPUCopyPass&& other) noexcept
|
|
{
|
|
Base::operator=(std::move(other));
|
|
return *this;
|
|
}
|
|
auto operator<=>(const GPUCopyPass& other) const noexcept = default;
|
|
|
|
void end() noexcept
|
|
{
|
|
SDL_EndGPUCopyPass(mHandle);
|
|
mHandle = nullptr;
|
|
}
|
|
|
|
void destroy() noexcept
|
|
{
|
|
MIJIN_ASSERT(mHandle == nullptr, "Copypass has not been ended.");
|
|
}
|
|
|
|
void uploadToGPUBuffer(const GPUTransferBufferLocation& source, const GPUBufferRegion& destination, bool cycle = false)
|
|
{
|
|
SDL_UploadToGPUBuffer(
|
|
/* copy_pass = */ mHandle,
|
|
/* source = */ std::bit_cast<const SDL_GPUTransferBufferLocation*>(&source),
|
|
/* destination = */ std::bit_cast<const SDL_GPUBufferRegion*>(&destination),
|
|
/* cycle = */ cycle
|
|
);
|
|
}
|
|
|
|
void uploadToGPUTexture(const GPUTextureTransferInfo& source, const GPUTextureRegion& destination, bool cycle = false)
|
|
{
|
|
SDL_UploadToGPUTexture(
|
|
/* copy_pass = */ mHandle,
|
|
/* source = */ std::bit_cast<const SDL_GPUTextureTransferInfo*>(&source),
|
|
/* destination = */ std::bit_cast<const SDL_GPUTextureRegion*>(&destination),
|
|
/* cycle = */ cycle
|
|
);
|
|
}
|
|
|
|
friend class GPUCommandBuffer;
|
|
};
|
|
|
|
struct GPUTextureCreateArgs
|
|
{
|
|
GPUTextureType type = GPUTextureType::TWOD;
|
|
GPUTextureFormat format = GPUTextureFormat::R8G8B8A8_UNORM;
|
|
GPUTextureUsageFlags usage;
|
|
Uint32 width = 1;
|
|
Uint32 height = 1;
|
|
Uint32 layerCountOrDepth = 1;
|
|
Uint32 numLevels = 1;
|
|
GPUSampleCount sampleCount = GPUSampleCount::ONE;
|
|
};
|
|
|
|
class GPUTexture : public BaseWithDevice<SDL_GPUTexture, GPUTexture>
|
|
{
|
|
public:
|
|
GPUTexture() noexcept = default;
|
|
GPUTexture(const GPUTexture&) = delete;
|
|
GPUTexture(GPUTexture&& other) noexcept : BaseWithDevice(std::move(other)) {}
|
|
|
|
GPUTexture& operator=(const GPUTexture&) = delete;
|
|
GPUTexture& operator=(GPUTexture&& other) noexcept
|
|
{
|
|
BaseWithDevice::operator=(std::move(other));
|
|
return *this;
|
|
}
|
|
auto operator<=>(const GPUTexture& other) const noexcept = default;
|
|
|
|
void create(SDL_GPUDevice* device, const GPUTextureCreateArgs& args)
|
|
{
|
|
MIJIN_ASSERT(mHandle == nullptr, "GPUTexture has already been created.");
|
|
const SDL_GPUTextureCreateInfo createInfo = {
|
|
.type = static_cast<SDL_GPUTextureType>(args.type),
|
|
.format = static_cast<SDL_GPUTextureFormat>(args.format),
|
|
.usage = static_cast<SDL_GPUTextureUsageFlags>(args.usage),
|
|
.width = args.width,
|
|
.height = args.height,
|
|
.layer_count_or_depth = args.layerCountOrDepth,
|
|
.num_levels = args.numLevels,
|
|
.sample_count = static_cast<SDL_GPUSampleCount>(args.sampleCount)
|
|
};
|
|
mHandle = SDL_CreateGPUTexture(device, &createInfo);
|
|
if (mHandle == nullptr)
|
|
{
|
|
throw SDLError();
|
|
}
|
|
mDevice = device;
|
|
}
|
|
|
|
void destroy() noexcept
|
|
{
|
|
if (mHandle != nullptr)
|
|
{
|
|
// if this is not manually created (e.g. a swapchain image), device will be nullptr
|
|
if (mDevice != nullptr)
|
|
{
|
|
SDL_ReleaseGPUTexture(mDevice, mHandle);
|
|
}
|
|
mHandle = nullptr;
|
|
mDevice = nullptr;
|
|
}
|
|
}
|
|
|
|
friend class GPUCommandBuffer;
|
|
};
|
|
|
|
struct GPUSamplerCreateArgs
|
|
{
|
|
GPUFilter minFilter = GPUFilter::NEAREST;
|
|
GPUFilter magFilter = GPUFilter::LINEAR;
|
|
GPUSamplerMipmapMode mipmapMode = GPUSamplerMipmapMode::LINEAR;
|
|
GPUSamplerAddressMode addressModeU = GPUSamplerAddressMode::REPEAT;
|
|
GPUSamplerAddressMode addressModeV = GPUSamplerAddressMode::REPEAT;
|
|
GPUSamplerAddressMode addressModeW = GPUSamplerAddressMode::REPEAT;
|
|
float mipLodBias = 0.f;
|
|
float maxAnisotropy = 1.f;
|
|
bool enableAnisotropy = false;
|
|
bool enableCompare = false;
|
|
GPUCompareOp compareOp;
|
|
float minLod = 0.f;
|
|
float maxLod = 1.f;
|
|
};
|
|
|
|
class GPUSampler : public BaseWithDevice<SDL_GPUSampler, GPUSampler>
|
|
{
|
|
public:
|
|
GPUSampler() noexcept = default;
|
|
GPUSampler(const GPUSampler&) = delete;
|
|
GPUSampler(GPUSampler&& other) noexcept : BaseWithDevice(std::move(other)) {}
|
|
|
|
GPUSampler& operator=(const GPUSampler&) = delete;
|
|
GPUSampler& operator=(GPUSampler&& other) noexcept
|
|
{
|
|
BaseWithDevice::operator=(std::move(other));
|
|
return *this;
|
|
}
|
|
auto operator<=>(const GPUSampler& other) const noexcept = default;
|
|
|
|
void create(SDL_GPUDevice* device, const GPUSamplerCreateArgs& args)
|
|
{
|
|
MIJIN_ASSERT(mHandle == nullptr, "GPUSampler has already been created.");
|
|
const SDL_GPUSamplerCreateInfo createInfo = {
|
|
.min_filter = static_cast<SDL_GPUFilter>(args.minFilter),
|
|
.mag_filter = static_cast<SDL_GPUFilter>(args.magFilter),
|
|
.mipmap_mode = static_cast<SDL_GPUSamplerMipmapMode>(args.mipmapMode),
|
|
.address_mode_u = static_cast<SDL_GPUSamplerAddressMode>(args.addressModeU),
|
|
.address_mode_v = static_cast<SDL_GPUSamplerAddressMode>(args.addressModeV),
|
|
.address_mode_w = static_cast<SDL_GPUSamplerAddressMode>(args.addressModeW),
|
|
.mip_lod_bias = args.mipLodBias,
|
|
.max_anisotropy = args.maxAnisotropy,
|
|
.enable_anisotropy = args.enableAnisotropy,
|
|
.enable_compare = args.enableCompare,
|
|
.compare_op = static_cast<SDL_GPUCompareOp>(args.compareOp),
|
|
.min_lod = args.minLod,
|
|
.max_lod = args.maxLod
|
|
};
|
|
mHandle = SDL_CreateGPUSampler(device, &createInfo);
|
|
if (mHandle == nullptr)
|
|
{
|
|
throw SDLError();
|
|
}
|
|
mDevice = device;
|
|
}
|
|
|
|
void destroy() noexcept
|
|
{
|
|
if (mHandle != nullptr)
|
|
{
|
|
SDL_ReleaseGPUSampler(mDevice, mHandle);
|
|
mHandle = nullptr;
|
|
mDevice = nullptr;
|
|
}
|
|
}
|
|
};
|
|
|
|
struct GPUBeginRenderPassArgs
|
|
{
|
|
std::span<const GPUColorTargetInfo> colorTargetInfos;
|
|
std::optional<GPUDepthStencilTargetInfo> depthStencilTargetInfo;
|
|
};
|
|
|
|
class GPUCommandBuffer : public Base<SDL_GPUCommandBuffer, GPUCommandBuffer>
|
|
{
|
|
public:
|
|
GPUCommandBuffer() noexcept = default;
|
|
GPUCommandBuffer(const GPUCommandBuffer&) = delete;
|
|
GPUCommandBuffer(GPUCommandBuffer&& other) noexcept : Base(std::move(other)) {}
|
|
|
|
GPUCommandBuffer& operator=(const GPUCommandBuffer&) = delete;
|
|
GPUCommandBuffer& operator=(GPUCommandBuffer&& other) noexcept
|
|
{
|
|
Base::operator=(std::move(other));
|
|
return *this;
|
|
}
|
|
auto operator<=>(const GPUCommandBuffer& other) const noexcept = default;
|
|
|
|
void submit() noexcept
|
|
{
|
|
SDL_SubmitGPUCommandBuffer(mHandle);
|
|
mHandle = nullptr;
|
|
}
|
|
|
|
void destroy() noexcept
|
|
{
|
|
MIJIN_ASSERT(mHandle == nullptr, "Command buffer has not been submitted.");
|
|
}
|
|
|
|
[[nodiscard]]
|
|
GPURenderPass beginRenderPass(const GPUBeginRenderPassArgs& args) const noexcept
|
|
{
|
|
GPURenderPass renderPass;
|
|
renderPass.mHandle = SDL_BeginGPURenderPass(
|
|
/* command_buffer = */ mHandle,
|
|
/* color_target_infos = */ std::bit_cast<const SDL_GPUColorTargetInfo*>(args.colorTargetInfos.data()),
|
|
/* num_color_targets = */ static_cast<Uint32>(args.colorTargetInfos.size()),
|
|
/* depth_stencil_target_info = */ args.depthStencilTargetInfo.has_value() ? std::bit_cast<const SDL_GPUDepthStencilTargetInfo*>(&*args.depthStencilTargetInfo) : nullptr
|
|
);
|
|
return renderPass;
|
|
}
|
|
|
|
[[nodiscard]]
|
|
GPUCopyPass beginCopyPass() const noexcept
|
|
{
|
|
GPUCopyPass copyPass;
|
|
copyPass.mHandle = SDL_BeginGPUCopyPass(mHandle);
|
|
return copyPass;
|
|
}
|
|
|
|
[[nodiscard]]
|
|
GPUTexture acquireSwapchainTexture(SDL_Window* window, Uint32& outWidth, Uint32& outHeight) noexcept
|
|
{
|
|
GPUTexture texture;
|
|
texture.mHandle = SDL_AcquireGPUSwapchainTexture(mHandle, window, &outWidth, &outHeight);
|
|
return texture;
|
|
}
|
|
|
|
template<typename TData>
|
|
void pushFragmentUniformData(Uint32 slotIndex, std::span<const TData> data) const noexcept
|
|
{
|
|
SDL_PushGPUFragmentUniformData(
|
|
/* command_buffer = */ mHandle,
|
|
/* slot_index = */ slotIndex,
|
|
/* data = */ data.data(),
|
|
/* length = */ data.size_bytes()
|
|
);
|
|
}
|
|
|
|
friend class GPUDevice;
|
|
};
|
|
|
|
struct GPUDeviceCreateArgs
|
|
{
|
|
GPUShaderFormatFlags formatFlags = {};
|
|
bool debugMode = false;
|
|
const char* name = nullptr;
|
|
};
|
|
|
|
class GPUDevice : public Base<SDL_GPUDevice, GPUDevice>
|
|
{
|
|
public:
|
|
GPUDevice() noexcept = default;
|
|
GPUDevice(const GPUDevice&) = delete;
|
|
GPUDevice(GPUDevice&& other) noexcept : Base(std::move(other)) {}
|
|
|
|
GPUDevice& operator=(const GPUDevice&) = delete;
|
|
GPUDevice& operator=(GPUDevice&& other) noexcept
|
|
{
|
|
Base::operator=(std::move(other));
|
|
return *this;
|
|
}
|
|
auto operator<=>(const GPUDevice& other) const noexcept = default;
|
|
|
|
void create(const GPUDeviceCreateArgs& args = {})
|
|
{
|
|
MIJIN_ASSERT(mHandle == nullptr, "GPUDevice has already been created.");
|
|
mHandle = SDL_CreateGPUDevice(args.formatFlags, args.debugMode, args.name);
|
|
if (mHandle == nullptr)
|
|
{
|
|
throw SDLError();
|
|
}
|
|
}
|
|
|
|
void destroy() noexcept
|
|
{
|
|
if (mHandle != nullptr)
|
|
{
|
|
SDL_DestroyGPUDevice(mHandle);
|
|
mHandle = nullptr;
|
|
}
|
|
}
|
|
|
|
void claimWindow(SDL_Window* window) const
|
|
{
|
|
if (!SDL_ClaimWindowForGPUDevice(mHandle, window))
|
|
{
|
|
throw SDLError();
|
|
}
|
|
}
|
|
|
|
[[nodiscard]]
|
|
bool windowSupportsSwapchainComposition(SDL_Window* window, GPUSwapchainComposition swapchainComposition) const noexcept
|
|
{
|
|
return SDL_WindowSupportsGPUSwapchainComposition(mHandle, window, static_cast<SDL_GPUSwapchainComposition>(swapchainComposition));
|
|
}
|
|
|
|
[[nodiscard]]
|
|
bool windowSupportsPresentMode(SDL_Window* window, GPUPresentMode presentMode) const noexcept
|
|
{
|
|
return SDL_WindowSupportsGPUPresentMode(mHandle, window, static_cast<SDL_GPUPresentMode>(presentMode));
|
|
}
|
|
|
|
void setSwapchainParameters(SDL_Window* window, GPUSwapchainComposition swapchainComposition,
|
|
GPUPresentMode presentMode) const
|
|
{
|
|
if (!SDL_SetGPUSwapchainParameters(mHandle, window, static_cast<SDL_GPUSwapchainComposition>(swapchainComposition),
|
|
static_cast<SDL_GPUPresentMode>(presentMode)))
|
|
{
|
|
throw SDLError();
|
|
}
|
|
}
|
|
|
|
[[nodiscard]]
|
|
GPUTextureFormat getSwapchainTextureFormat(SDL_Window* window) const
|
|
{
|
|
return static_cast<GPUTextureFormat>(SDL_GetGPUSwapchainTextureFormat(mHandle, window));
|
|
}
|
|
|
|
[[nodiscard]]
|
|
GPUCommandBuffer acquireCommandBuffer() const noexcept
|
|
{
|
|
GPUCommandBuffer cmdBuffer;
|
|
cmdBuffer.mHandle = SDL_AcquireGPUCommandBuffer(mHandle);
|
|
return cmdBuffer;
|
|
}
|
|
};
|
|
|
|
struct GPUGraphicsPipelineCreateArgs
|
|
{
|
|
SDL_GPUShader* vertexShader;
|
|
SDL_GPUShader* fragmentShader;
|
|
GPUVertexInputState vertexInputState;
|
|
GPUPrimitiveType primitiveType = GPUPrimitiveType::TRIANGLELIST;
|
|
GPURasterizerState rasterizerState;
|
|
GPUMultisampleState multisampleState;
|
|
GPUDepthStencilState depthStencilState;
|
|
GPUGraphicsPipelineTargetInfo targetInfo;
|
|
};
|
|
|
|
class GPUGraphicsPipeline : public BaseWithDevice<SDL_GPUGraphicsPipeline, GPUGraphicsPipeline>
|
|
{
|
|
public:
|
|
GPUGraphicsPipeline() noexcept = default;
|
|
GPUGraphicsPipeline(const GPUGraphicsPipeline&) = delete;
|
|
GPUGraphicsPipeline(GPUGraphicsPipeline&& other) noexcept : BaseWithDevice(std::move(other)) {}
|
|
|
|
GPUGraphicsPipeline& operator=(const GPUGraphicsPipeline&) = delete;
|
|
GPUGraphicsPipeline& operator=(GPUGraphicsPipeline&& other) noexcept
|
|
{
|
|
BaseWithDevice::operator=(std::move(other));
|
|
return *this;
|
|
}
|
|
auto operator<=>(const GPUGraphicsPipeline& other) const noexcept = default;
|
|
|
|
void create(SDL_GPUDevice* device, const GPUGraphicsPipelineCreateArgs& args)
|
|
{
|
|
MIJIN_ASSERT(mHandle == nullptr, "GPUGraphicsPipeline has already been created.");
|
|
const SDL_GPUGraphicsPipelineCreateInfo createInfo =
|
|
{
|
|
.vertex_shader = args.vertexShader,
|
|
.fragment_shader = args.fragmentShader,
|
|
.vertex_input_state = {
|
|
.vertex_bindings = std::bit_cast<SDL_GPUVertexBinding*>(args.vertexInputState.vertexBindings.data()),
|
|
.num_vertex_bindings = static_cast<Uint32>(args.vertexInputState.vertexBindings.size()),
|
|
.vertex_attributes = std::bit_cast<SDL_GPUVertexAttribute*>(args.vertexInputState.vertexAttributes.data()),
|
|
.num_vertex_attributes = static_cast<Uint32>(args.vertexInputState.vertexAttributes.size())
|
|
},
|
|
.primitive_type = static_cast<SDL_GPUPrimitiveType>(args.primitiveType),
|
|
.rasterizer_state = {
|
|
.fill_mode = static_cast<SDL_GPUFillMode>(args.rasterizerState.fillMode),
|
|
.cull_mode = static_cast<SDL_GPUCullMode>(args.rasterizerState.cullMode),
|
|
.front_face = static_cast<SDL_GPUFrontFace>(args.rasterizerState.frontFace),
|
|
.enable_depth_bias = args.rasterizerState.enableDepthBias,
|
|
.depth_bias_constant_factor = args.rasterizerState.depthBiasConstantFactor,
|
|
.depth_bias_clamp = args.rasterizerState.depthBiasClamp,
|
|
.depth_bias_slope_factor = args.rasterizerState.depthBiasSlopeFactor
|
|
},
|
|
.multisample_state = {
|
|
.sample_count = static_cast<SDL_GPUSampleCount>(args.multisampleState.sampleCount),
|
|
.sample_mask = args.multisampleState.sampleMask
|
|
},
|
|
.depth_stencil_state = {
|
|
.enable_depth_test = args.depthStencilState.enableDepthTest,
|
|
.enable_depth_write = args.depthStencilState.enableDepthWrite,
|
|
.enable_stencil_test = args.depthStencilState.enableStencilTest,
|
|
.compare_op = static_cast<SDL_GPUCompareOp>(args.depthStencilState.compareOp),
|
|
.back_stencil_state = static_cast<SDL_GPUStencilOpState>(args.depthStencilState.backStencilState),
|
|
.front_stencil_state = static_cast<SDL_GPUStencilOpState>(args.depthStencilState.frontStencilState),
|
|
.compare_mask = args.depthStencilState.compareMask,
|
|
.write_mask = args.depthStencilState.writeMask
|
|
},
|
|
.target_info = static_cast<SDL_GpuGraphicsPipelineTargetInfo>(args.targetInfo),
|
|
.props = 0
|
|
};
|
|
mHandle = SDL_CreateGPUGraphicsPipeline(device, &createInfo);
|
|
if (mHandle == nullptr)
|
|
{
|
|
throw SDLError();
|
|
}
|
|
mDevice = device;
|
|
}
|
|
|
|
void destroy() noexcept
|
|
{
|
|
if (mHandle != nullptr)
|
|
{
|
|
SDL_ReleaseGPUGraphicsPipeline(mDevice, mHandle);
|
|
mDevice = nullptr;
|
|
mHandle = nullptr;
|
|
}
|
|
}
|
|
};
|
|
|
|
struct GPUShaderCreateArgs
|
|
{
|
|
std::span<const Uint8> code;
|
|
std::string entrypoint = "main";
|
|
GPUShaderFormat format;
|
|
GPUShaderStage stage;
|
|
Uint32 numSamplers = 0;
|
|
Uint32 numStorageTextures = 0;
|
|
Uint32 numStorageBuffers = 0;
|
|
Uint32 numUniformBuffers = 0;
|
|
};
|
|
|
|
class GPUShader : public BaseWithDevice<SDL_GPUShader, GPUShader>
|
|
{
|
|
public:
|
|
GPUShader() noexcept = default;
|
|
GPUShader(const GPUShader&) = delete;
|
|
GPUShader(GPUShader&& other) noexcept : BaseWithDevice(std::move(other)) {}
|
|
|
|
GPUShader& operator=(const GPUShader&) = delete;
|
|
GPUShader& operator=(GPUShader&& other) noexcept
|
|
{
|
|
BaseWithDevice::operator=(std::move(other));
|
|
return *this;
|
|
}
|
|
auto operator<=>(const GPUShader& other) const noexcept = default;
|
|
|
|
void create(SDL_GPUDevice* device, const GPUShaderCreateArgs& args)
|
|
{
|
|
MIJIN_ASSERT(mHandle == nullptr, "GPUShader has already been created.");
|
|
const SDL_GPUShaderCreateInfo createInfo =
|
|
{
|
|
.code_size = args.code.size(),
|
|
.code = args.code.data(),
|
|
.entrypoint = args.entrypoint.c_str(),
|
|
.format = static_cast<SDL_GPUShaderFormat>(args.format),
|
|
.stage = static_cast<SDL_GPUShaderStage>(args.stage),
|
|
.num_samplers = args.numSamplers,
|
|
.num_storage_textures = args.numStorageTextures,
|
|
.num_storage_buffers = args.numStorageBuffers,
|
|
.num_uniform_buffers = args.numUniformBuffers
|
|
};
|
|
mHandle = SDL_CreateGPUShader(device, &createInfo);
|
|
if (mHandle == nullptr)
|
|
{
|
|
throw SDLError();
|
|
}
|
|
mDevice = device;
|
|
}
|
|
|
|
void destroy() noexcept
|
|
{
|
|
if (mHandle != nullptr)
|
|
{
|
|
SDL_ReleaseGPUShader(mDevice, mHandle);
|
|
mHandle = nullptr;
|
|
mDevice = nullptr;
|
|
}
|
|
}
|
|
};
|
|
|
|
struct GPUBufferCreateArgs
|
|
{
|
|
GPUBufferUsageFlags usage;
|
|
Uint32 size;
|
|
};
|
|
|
|
class GPUBuffer : public BaseWithDevice<SDL_GPUBuffer, GPUBuffer>
|
|
{
|
|
public:
|
|
GPUBuffer() noexcept = default;
|
|
GPUBuffer(const GPUBuffer&) = delete;
|
|
GPUBuffer(GPUBuffer&& other) noexcept : BaseWithDevice(std::move(other)) {}
|
|
|
|
GPUBuffer& operator=(const GPUBuffer&) = delete;
|
|
GPUBuffer& operator=(GPUBuffer&& other) noexcept
|
|
{
|
|
BaseWithDevice::operator=(std::move(other));
|
|
return *this;
|
|
}
|
|
auto operator<=>(const GPUBuffer& other) const noexcept = default;
|
|
|
|
void create(SDL_GPUDevice* device, const GPUBufferCreateArgs& args)
|
|
{
|
|
MIJIN_ASSERT(mHandle == nullptr, "GPUBuffer has already been created.");
|
|
const SDL_GPUBufferCreateInfo createInfo = {
|
|
.usage = static_cast<SDL_GPUBufferUsageFlags>(args.usage),
|
|
.size = args.size,
|
|
.props = 0
|
|
};
|
|
mHandle = SDL_CreateGPUBuffer(device, &createInfo);
|
|
if (mHandle == nullptr)
|
|
{
|
|
throw SDLError();
|
|
}
|
|
mDevice = device;
|
|
}
|
|
|
|
void destroy() noexcept
|
|
{
|
|
if (mHandle != nullptr)
|
|
{
|
|
SDL_ReleaseGPUBuffer(mDevice, mHandle);
|
|
mHandle = nullptr;
|
|
mDevice = nullptr;
|
|
}
|
|
}
|
|
};
|
|
|
|
struct GPUTransferBufferCreateArgs
|
|
{
|
|
GPUTransferBufferUsage usage;
|
|
Uint32 size;
|
|
};
|
|
|
|
class GPUTransferBuffer : public BaseWithDevice<SDL_GPUTransferBuffer, GPUTransferBuffer>
|
|
{
|
|
public:
|
|
GPUTransferBuffer() noexcept = default;
|
|
GPUTransferBuffer(const GPUTransferBuffer&) = delete;
|
|
GPUTransferBuffer(GPUTransferBuffer&& other) noexcept : BaseWithDevice(std::move(other)) {}
|
|
|
|
GPUTransferBuffer& operator=(const GPUTransferBuffer&) = delete;
|
|
GPUTransferBuffer& operator=(GPUTransferBuffer&& other) noexcept
|
|
{
|
|
BaseWithDevice::operator=(std::move(other));
|
|
return *this;
|
|
}
|
|
auto operator<=>(const GPUTransferBuffer& other) const noexcept = default;
|
|
|
|
void create(SDL_GPUDevice* device, const GPUTransferBufferCreateArgs& args)
|
|
{
|
|
MIJIN_ASSERT(mHandle == nullptr, "GPUTransferBuffer has already been created.");
|
|
const SDL_GPUTransferBufferCreateInfo createInfo = {
|
|
.usage = static_cast<SDL_GPUTransferBufferUsage>(args.usage),
|
|
.size = args.size,
|
|
.props = 0
|
|
};
|
|
mHandle = SDL_CreateGPUTransferBuffer(device, &createInfo);
|
|
if (mHandle == nullptr)
|
|
{
|
|
throw SDLError();
|
|
}
|
|
mDevice = device;
|
|
}
|
|
|
|
void destroy() noexcept
|
|
{
|
|
if (mHandle != nullptr)
|
|
{
|
|
SDL_ReleaseGPUTransferBuffer(mDevice, mHandle);
|
|
mHandle = nullptr;
|
|
mDevice = nullptr;
|
|
}
|
|
}
|
|
|
|
void* map(bool cycle = false) noexcept
|
|
{
|
|
return SDL_MapGPUTransferBuffer(mDevice, mHandle, cycle);
|
|
}
|
|
|
|
void unmap() noexcept
|
|
{
|
|
SDL_UnmapGPUTransferBuffer(mDevice, mHandle);
|
|
}
|
|
};
|
|
} // namespace sdlpp
|
|
|
|
#endif // !defined(SDL_GPU_TEST_PRIVATE_SDL_GPU_TEST_SDLPP_GPU_HPP_INCLUDED)
|