c/render: Add simple sub-allocation code

This commit is contained in:
Jakob Bornecrantz 2023-10-04 14:24:06 +01:00
parent efc06dac0f
commit 00891b3452
3 changed files with 204 additions and 0 deletions

View file

@ -122,6 +122,7 @@ if(XRT_HAVE_VULKAN)
render/render_interface.h
render/render_resources.c
render/render_shaders.c
render/render_sub_alloc.c
render/render_util.c
)
# The aux_vk library needs to be public to include Vulkan.

View file

@ -40,6 +40,17 @@ extern "C" {
*
*/
/*!
* The value `minUniformBufferOffsetAlignment` is defined by the Vulkan spec as
* having a max value of 256. Use this value to safely figure out sizes and
* alignment of UBO sub-allocation. It is also the max for 'nonCoherentAtomSize`
* which if we need to do flushing is what we need to align UBOs to.
*
* https://registry.khronos.org/vulkan/specs/1.3-extensions/man/html/VkPhysicalDeviceLimits.html
* https://registry.khronos.org/vulkan/specs/1.3-extensions/html/vkspec.html#limits-minmax
*/
#define RENDER_ALWAYS_SAFE_UBO_ALIGNMENT (256)
/*!
* Max number of layers for layer squasher, can be different from
* @ref COMP_MAX_LAYERS as the render module is separate from the compositor.
@ -228,6 +239,94 @@ VkResult
render_buffer_write(struct vk_bundle *vk, struct render_buffer *buffer, void *data, VkDeviceSize size);
/*
*
* Sub-alloc.
*
*/
/*!
* Per frame sub-allocation into a buffer, used to reduce the number of UBO
* objects we need to create. There is no way to free a sub-allocation, this is
* done implicitly at the end of the frame when @ref render_sub_alloc_tracker is
* zeroed out.
*
* @see render_sub_alloc_tracker
*/
struct render_sub_alloc
{
/*!
* The buffer this is allocated from, it's the callers responsibility
* to keep it alive for as long as the sub-allocation is used.
*/
VkBuffer buffer;
//! Size of sub-allocation.
VkDeviceSize size;
//! Offset into buffer.
VkDeviceSize offset;
};
/*!
* A per frame tracker of sub-allocation out of a buffer, used to reduce the
* number of UBO objects we need to create. This code is designed with one
* constraint in mind, that the lifetime of a sub-allocation is only for one
* frame and is discarded at the end of it, but also alive for the entire frame.
* This removes the need to free indivudial sub-allocation, or even track them
* beyond filling the UBO data and descriptor sets.
*
* @see render_sub_alloc
*/
struct render_sub_alloc_tracker
{
/*!
* The buffer to allocate from, it's the callers responsibility to keep
* it alive for as long as the sub-allocations are in used.
*/
VkBuffer buffer;
//! Start of memory, if buffer was mapped with initialised.
void *mapped;
//! Total size of buffer.
VkDeviceSize total_size;
//! Currently used memory.
VkDeviceSize used;
};
/*!
* Init a @ref render_sub_alloc_tracker struct from a @ref render_buffer, the
* caller is responsible for keeping @p buffer alive while the sub allocator
* is being used.
*/
void
render_sub_alloc_tracker_init(struct render_sub_alloc_tracker *rsat, struct render_buffer *buffer);
/*!
* Allocate enough memory (with constraints of UBOs) of @p size, return the
* pointer to the mapped memory or null if the buffer wasn't allocated.
*/
XRT_CHECK_RESULT VkResult
render_sub_alloc_ubo_alloc_and_get_ptr(struct vk_bundle *vk,
struct render_sub_alloc_tracker *rsat,
VkDeviceSize size,
void **out_ptr,
struct render_sub_alloc *out_rsa);
/*!
* Allocate enough memory (with constraints of UBOs) to hold the memory in @ptr
* and copy that memory to the buffer using the CPU.
*/
XRT_CHECK_RESULT VkResult
render_sub_alloc_ubo_alloc_and_write(struct vk_bundle *vk,
struct render_sub_alloc_tracker *rsat,
const void *ptr,
VkDeviceSize size,
struct render_sub_alloc *out_rsa);
/*
*
* Resources

View file

@ -0,0 +1,104 @@
// Copyright 2019-2023, Collabora, Ltd.
// SPDX-License-Identifier: BSL-1.0
/*!
* @file
* @brief Sub allocation functions.
* @author Jakob Bornecrantz <jakob@collabora.com>
* @ingroup comp_render
*/
#include "vk/vk_mini_helpers.h"
#include "render/render_interface.h"
// Align a size with a power of two value.
static VkDeviceSize
align_padding_pot(VkDeviceSize size, VkDeviceSize alignment)
{
return (size + alignment - 1) & ~(alignment - 1);
}
/*
*
* 'Exported' functions.
*
*/
void
render_sub_alloc_tracker_init(struct render_sub_alloc_tracker *rsat, struct render_buffer *buffer)
{
rsat->buffer = buffer->buffer;
rsat->used = 0;
rsat->total_size = buffer->size;
rsat->mapped = buffer->mapped;
}
XRT_CHECK_RESULT VkResult
render_sub_alloc_ubo_alloc_and_get_ptr(struct vk_bundle *vk,
struct render_sub_alloc_tracker *rsat,
VkDeviceSize size,
void **out_ptr,
struct render_sub_alloc *out_rsa)
{
assert(rsat->total_size >= rsat->used);
VkDeviceSize space_left = rsat->total_size - rsat->used;
if (space_left < size) {
VK_ERROR(vk, "Can not fit %u in left %u of total %u", (uint32_t)size, (uint32_t)space_left,
(uint32_t)rsat->total_size);
return VK_ERROR_OUT_OF_DEVICE_MEMORY;
}
// Make sure we align from start of memory.
VkDeviceSize padded_used = align_padding_pot(rsat->used + size, RENDER_ALWAYS_SAFE_UBO_ALIGNMENT);
// Save the current used as offset.
VkDeviceSize offset = rsat->used;
// Ensure used never gets larger then total_size.
if (padded_used > rsat->total_size) {
rsat->used = rsat->total_size;
} else {
rsat->used = padded_used;
}
void *ptr = rsat->mapped == NULL ? NULL : (void *)((uint8_t *)rsat->mapped + offset);
/*
* All done.
*/
*out_ptr = ptr;
*out_rsa = (struct render_sub_alloc){
.buffer = rsat->buffer,
.size = size,
.offset = offset,
};
return VK_SUCCESS;
}
XRT_CHECK_RESULT VkResult
render_sub_alloc_ubo_alloc_and_write(struct vk_bundle *vk,
struct render_sub_alloc_tracker *rsat,
const void *src,
VkDeviceSize size,
struct render_sub_alloc *out_rsa)
{
VkResult ret;
if (rsat->mapped == NULL) {
VK_ERROR(vk, "Sub allocation not mapped");
return VK_ERROR_OUT_OF_DEVICE_MEMORY;
}
void *dst;
ret = render_sub_alloc_ubo_alloc_and_get_ptr(vk, rsat, size, &dst, out_rsa);
VK_CHK_AND_RET(ret, "render_sub_alloc_ubo_alloc_and_get_ptr");
memcpy(dst, src, size);
return VK_SUCCESS;
}