diff --git a/src/xrt/compositor/render/render_gfx.c b/src/xrt/compositor/render/render_gfx.c index cba006762..eebeea531 100644 --- a/src/xrt/compositor/render/render_gfx.c +++ b/src/xrt/compositor/render/render_gfx.c @@ -534,6 +534,9 @@ render_gfx_init(struct render_gfx *rr, struct render_resources *r) &rr->views[1].mesh.descriptor_set); // descriptor_set VK_CHK_WITH_RET(ret, "vk_create_descriptor_set", false); + // Used to sub-allocate UBOs from, restart from scratch each frame. + render_sub_alloc_tracker_init(&rr->ubo_tracker, &r->gfx.shared_ubo); + return true; } diff --git a/src/xrt/compositor/render/render_interface.h b/src/xrt/compositor/render/render_interface.h index c3e42057c..dbc96d2d2 100644 --- a/src/xrt/compositor/render/render_interface.h +++ b/src/xrt/compositor/render/render_interface.h @@ -386,6 +386,18 @@ struct render_resources VkSampler clamp_to_border_black; } samplers; + struct + { + /*! + * Shared UBO buffer that we sub-allocate out of, this is to + * have fewer buffers that the kernel needs to validate on + * command submission time. + * + * https://registry.khronos.org/vulkan/site/guide/latest/memory_allocation.html + */ + struct render_buffer shared_ubo; + } gfx; + struct { //! The binding index for the source texture. @@ -782,6 +794,9 @@ struct render_gfx //! Resources that we are based on. struct render_resources *r; + //! Shared buffer that we sub-allocate UBOs from. + struct render_sub_alloc_tracker ubo_tracker; + //! The current target we are rendering too, can change during command building. struct render_gfx_target_resources *rtr; diff --git a/src/xrt/compositor/render/render_resources.c b/src/xrt/compositor/render/render_resources.c index 5c61ef9aa..8c2120917 100644 --- a/src/xrt/compositor/render/render_resources.c +++ b/src/xrt/compositor/render/render_resources.c @@ -656,6 +656,45 @@ render_resources_init(struct render_resources *r, VK_CHK_WITH_RET(ret, "vkAllocateCommandBuffers", false); + /* + * Gfx. + */ + + { + VkBufferUsageFlags usage_flags = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT; + VkMemoryPropertyFlags memory_property_flags = // + VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | // + VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT; // + + uint32_t num_buffers = 0; + + // Number of layer shader runs (views), number of layers, two UBOs per layer. + num_buffers += RENDER_MAX_LAYER_RUNS * RENDER_MAX_LAYERS * 2; + + // Two mesh distortion runs with one UBO each. + num_buffers += 2; + + // We currently use the aligmnent as max UBO size. + static_assert(sizeof(struct render_gfx_mesh_ubo_data) <= RENDER_ALWAYS_SAFE_UBO_ALIGNMENT, "MAX"); + + // Calculate size. + VkDeviceSize size = num_buffers * RENDER_ALWAYS_SAFE_UBO_ALIGNMENT; + + ret = render_buffer_init( // + vk, // vk_bundle + &r->gfx.shared_ubo, // buffer + usage_flags, // usage_flags + memory_property_flags, // memory_property_flags + size); // size + VK_CHK_WITH_RET(ret, "render_buffer_init", false); + + ret = render_buffer_map( // + vk, // vk_bundle + &r->gfx.shared_ubo); // buffer + VK_CHK_WITH_RET(ret, "render_buffer_map", false); + } + + /* * Mesh static. */ @@ -961,6 +1000,9 @@ render_resources_close(struct render_resources *r) D(ImageView, r->mock.color.image_view); D(Image, r->mock.color.image); DF(Memory, r->mock.color.memory); + + render_buffer_close(vk, &r->gfx.shared_ubo); + D(DescriptorSetLayout, r->mesh.descriptor_set_layout); D(PipelineLayout, r->mesh.pipeline_layout); D(PipelineCache, r->pipeline_cache);