c/render: Refactor out distortion rendering

This commit is contained in:
Jakob Bornecrantz 2020-10-05 23:30:31 +01:00
parent c5741af98a
commit 54739aff8d
11 changed files with 1882 additions and 1254 deletions

View file

@ -13,8 +13,6 @@ set(CLIENT_SOURCE_FILES)
set(MAIN_SOURCE_FILES
main/comp_compositor.c
main/comp_compositor.h
main/comp_distortion.c
main/comp_distortion.h
main/comp_documentation.h
main/comp_renderer.c
main/comp_renderer.h
@ -29,6 +27,10 @@ set(MAIN_SOURCE_FILES
main/comp_layer.c
main/comp_layer_renderer.h
main/comp_layer_renderer.c
render/comp_buffer.c
render/comp_render.h
render/comp_rendering.c
render/comp_resources.c
)
###

View file

@ -81,6 +81,8 @@ compositor_destroy(struct xrt_compositor *xc)
c->r = NULL;
}
comp_resources_close(c, &c->nr);
// As long as vk_bundle is valid it's safe to call this function.
comp_shaders_close(&c->vk, &c->shaders);
@ -1146,6 +1148,10 @@ compositor_init_shaders(struct comp_compositor *c)
static bool
compositor_init_renderer(struct comp_compositor *c)
{
if (!comp_resources_init(c, &c->nr)) {
return false;
}
c->r = comp_renderer_create(c);
return c->r != NULL;
}

View file

@ -21,6 +21,7 @@
#include "main/comp_settings.h"
#include "main/comp_window.h"
#include "main/comp_renderer.h"
#include "render/comp_render.h"
#ifdef __cplusplus
@ -237,6 +238,9 @@ struct comp_compositor
//! Thread object for safely destroying swapchain.
struct u_threading_stack destroy_swapchains;
} threading;
struct comp_resources nr;
};

View file

@ -1,726 +0,0 @@
// Copyright 2019, Collabora, Ltd.
// SPDX-License-Identifier: BSL-1.0
/*!
* @file
* @brief Distortion shader code.
* @author Lubosz Sarnecki <lubosz.sarnecki@collabora.com>
* @author Jakob Bornecrantz <jakob@collabora.com>
* @ingroup comp_main
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#include "main/comp_settings.h"
#include "main/comp_compositor.h"
#include "comp_distortion.h"
/*
*
* Pre declare functions.
*
*/
static void
comp_distortion_update_uniform_buffer_warp(struct comp_distortion *d,
struct comp_compositor *c);
static void
comp_distortion_init_buffers(struct comp_distortion *d,
struct comp_compositor *c);
XRT_MAYBE_UNUSED static void
comp_distortion_update_descriptor_sets(struct comp_distortion *d,
VkSampler samplers[2],
VkImageView views[2],
bool flip_y);
static void
comp_distortion_init_descriptor_set_layout(struct comp_distortion *d);
static void
comp_distortion_init_pipeline_layout(struct comp_distortion *d);
static void
comp_distortion_init_pipeline(struct comp_distortion *d,
struct comp_shaders *s,
VkRenderPass render_pass,
VkPipelineCache pipeline_cache);
static VkWriteDescriptorSet
comp_distortion_get_uniform_write_descriptor_set(
struct comp_distortion *d,
uint32_t binding,
VkDescriptorSet descriptor_set,
struct comp_uniform_buffer *ubo_handle);
static VkWriteDescriptorSet
comp_distortion_get_image_write_descriptor_set(
VkDescriptorSet descriptor_set,
VkDescriptorImageInfo *descriptor_position,
uint32_t binding);
static void
comp_distortion_init_descriptor_sets(struct comp_distortion *d,
VkDescriptorPool descriptor_pool);
/*
*
* Buffer functions.
*
*/
static void
_buffer_destroy(struct vk_bundle *vk, struct comp_uniform_buffer *buffer)
{
if (buffer->buffer != VK_NULL_HANDLE) {
vk->vkDestroyBuffer(buffer->device, buffer->buffer, NULL);
}
if (buffer->memory != VK_NULL_HANDLE) {
vk->vkFreeMemory(buffer->device, buffer->memory, NULL);
}
}
static VkResult
_buffer_map(struct vk_bundle *vk,
struct comp_uniform_buffer *buffer,
VkDeviceSize size,
VkDeviceSize offset)
{
return vk->vkMapMemory(vk->device, buffer->memory, offset, size, 0,
&buffer->mapped);
}
static void
_buffer_unmap(struct vk_bundle *vk, struct comp_uniform_buffer *buffer)
{
if (buffer->mapped) {
vk->vkUnmapMemory(vk->device, buffer->memory);
buffer->mapped = NULL;
}
}
static void
_buffer_setup_descriptor(struct vk_bundle *vk,
struct comp_uniform_buffer *buffer,
VkDeviceSize size,
VkDeviceSize offset)
{
buffer->descriptor.offset = offset;
buffer->descriptor.buffer = buffer->buffer;
buffer->descriptor.range = size;
}
/*
*
* Functions.
*
*/
void
comp_distortion_init(struct comp_distortion *d,
struct comp_compositor *c,
VkRenderPass render_pass,
VkPipelineCache pipeline_cache,
struct xrt_hmd_parts *parts,
VkDescriptorPool descriptor_pool)
{
d->vk = &c->vk;
//! Add support for 1 channels as well.
assert(parts->distortion.mesh.vertices == NULL ||
parts->distortion.mesh.num_uv_channels == 3);
assert(parts->distortion.mesh.indices == NULL ||
parts->distortion.mesh.total_num_indices != 0);
assert(parts->distortion.mesh.indices == NULL ||
parts->distortion.mesh.num_indices[0] != 0);
assert(parts->distortion.mesh.indices == NULL ||
parts->distortion.mesh.num_indices[1] != 0);
d->mesh.vertices = parts->distortion.mesh.vertices;
d->mesh.stride = parts->distortion.mesh.stride;
d->mesh.num_vertices = parts->distortion.mesh.num_vertices;
d->mesh.indices = parts->distortion.mesh.indices;
d->mesh.total_num_indices = parts->distortion.mesh.total_num_indices;
d->mesh.num_indices[0] = parts->distortion.mesh.num_indices[0];
d->mesh.num_indices[1] = parts->distortion.mesh.num_indices[1];
d->mesh.offset_indices[0] = parts->distortion.mesh.offset_indices[0];
d->mesh.offset_indices[1] = parts->distortion.mesh.offset_indices[1];
d->ubo_vp_data[0].flip_y = false;
d->ubo_vp_data[1].flip_y = false;
d->quirk_draw_lines = c->settings.debug.wireframe;
// binding indices used in mesh.vert & mesh.frag shaders.
d->render_texture_target_binding = 0;
d->ubo_viewport_binding = 1;
comp_distortion_init_buffers(d, c);
comp_distortion_update_uniform_buffer_warp(d, c);
comp_distortion_init_descriptor_set_layout(d);
comp_distortion_init_pipeline_layout(d);
comp_distortion_init_pipeline(d, &c->shaders, render_pass,
pipeline_cache);
comp_distortion_init_descriptor_sets(d, descriptor_pool);
}
void
comp_distortion_destroy(struct comp_distortion *d)
{
struct vk_bundle *vk = d->vk;
/*
* This makes sure that any pending command buffer has completed and all
* resources referred by it can now be manipulated. This make sure that
* validation doesn't complain. This is done during destroy so isn't
* time critical.
*/
vk->vkDeviceWaitIdle(vk->device);
vk->vkDestroyDescriptorSetLayout(vk->device, d->descriptor_set_layout,
NULL);
_buffer_destroy(vk, &d->vbo_handle);
_buffer_destroy(vk, &d->index_handle);
_buffer_destroy(vk, &d->ubo_viewport_handles[0]);
_buffer_destroy(vk, &d->ubo_viewport_handles[1]);
vk->vkDestroyPipeline(vk->device, d->pipeline, NULL);
vk->vkDestroyPipelineLayout(vk->device, d->pipeline_layout, NULL);
free(d);
}
static void
comp_distortion_init_pipeline(struct comp_distortion *d,
struct comp_shaders *s,
VkRenderPass render_pass,
VkPipelineCache pipeline_cache)
{
struct vk_bundle *vk = d->vk;
VkResult ret;
VkPolygonMode polygonMode = VK_POLYGON_MODE_FILL;
if (d->quirk_draw_lines) {
polygonMode = VK_POLYGON_MODE_LINE;
}
VkPrimitiveTopology topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST;
if (d->mesh.total_num_indices > 0) {
topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP;
}
VkPipelineInputAssemblyStateCreateInfo input_assembly_state = {
.sType =
VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO,
.topology = topology,
.primitiveRestartEnable = VK_FALSE,
};
VkPipelineRasterizationStateCreateInfo rasterization_state = {
.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO,
.depthClampEnable = VK_FALSE,
.rasterizerDiscardEnable = VK_FALSE,
.polygonMode = polygonMode,
.cullMode = VK_CULL_MODE_BACK_BIT,
.frontFace = VK_FRONT_FACE_CLOCKWISE,
.lineWidth = 1.0f,
};
VkPipelineColorBlendAttachmentState blend_attachment_state = {
.blendEnable = VK_FALSE,
.colorWriteMask = 0xf,
};
VkPipelineColorBlendStateCreateInfo color_blend_state = {
.sType = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO,
.attachmentCount = 1,
.pAttachments = &blend_attachment_state,
};
VkPipelineDepthStencilStateCreateInfo depth_stencil_state = {
.sType = VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO,
.depthTestEnable = VK_TRUE,
.depthWriteEnable = VK_TRUE,
.depthCompareOp = VK_COMPARE_OP_LESS_OR_EQUAL,
.front =
{
.compareOp = VK_COMPARE_OP_ALWAYS,
},
.back =
{
.compareOp = VK_COMPARE_OP_ALWAYS,
},
};
VkPipelineViewportStateCreateInfo viewport_state = {
.sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO,
.viewportCount = 1,
.scissorCount = 1,
};
VkPipelineMultisampleStateCreateInfo multisample_state = {
.sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO,
.rasterizationSamples = VK_SAMPLE_COUNT_1_BIT};
VkDynamicState dynamic_states[] = {
VK_DYNAMIC_STATE_VIEWPORT,
VK_DYNAMIC_STATE_SCISSOR,
};
VkPipelineDynamicStateCreateInfo dynamic_state = {
.sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO,
.dynamicStateCount = 2,
.pDynamicStates = dynamic_states,
};
VkVertexInputBindingDescription vertex_input_binding_description;
VkVertexInputAttributeDescription
vertex_input_attribute_descriptions[2];
/*
* By default, we will generate positions and UVs for the full screen
* quad from the gl_VertexIndex
*/
VkPipelineVertexInputStateCreateInfo vertex_input_state = {
.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO,
};
// clang-format off
vertex_input_attribute_descriptions[0].binding = d->render_texture_target_binding;
vertex_input_attribute_descriptions[0].location = 0;
vertex_input_attribute_descriptions[0].format = VK_FORMAT_R32G32B32A32_SFLOAT;
vertex_input_attribute_descriptions[0].offset = 0;
vertex_input_attribute_descriptions[1].binding = d->render_texture_target_binding;
vertex_input_attribute_descriptions[1].location = 1;
vertex_input_attribute_descriptions[1].format = VK_FORMAT_R32G32B32A32_SFLOAT;
vertex_input_attribute_descriptions[1].offset = 16;
vertex_input_binding_description.binding = d->render_texture_target_binding;
vertex_input_binding_description.inputRate = VK_VERTEX_INPUT_RATE_VERTEX;
vertex_input_binding_description.stride = d->mesh.stride;
vertex_input_state.vertexAttributeDescriptionCount = 2;
vertex_input_state.pVertexAttributeDescriptions = vertex_input_attribute_descriptions;
vertex_input_state.vertexBindingDescriptionCount = 1;
vertex_input_state.pVertexBindingDescriptions = &vertex_input_binding_description;
// clang-format on
VkPipelineShaderStageCreateInfo shader_stages[2] = {
{
.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
.stage = VK_SHADER_STAGE_VERTEX_BIT,
.module = s->mesh_vert,
.pName = "main",
},
{
.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
.stage = VK_SHADER_STAGE_FRAGMENT_BIT,
.module = s->mesh_frag,
.pName = "main",
},
};
VkGraphicsPipelineCreateInfo pipeline_info = {
.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO,
.stageCount = ARRAY_SIZE(shader_stages),
.pStages = shader_stages,
.pVertexInputState = &vertex_input_state,
.pInputAssemblyState = &input_assembly_state,
.pViewportState = &viewport_state,
.pRasterizationState = &rasterization_state,
.pMultisampleState = &multisample_state,
.pDepthStencilState = &depth_stencil_state,
.pColorBlendState = &color_blend_state,
.pDynamicState = &dynamic_state,
.layout = d->pipeline_layout,
.renderPass = render_pass,
.basePipelineHandle = VK_NULL_HANDLE,
.basePipelineIndex = -1,
};
ret = vk->vkCreateGraphicsPipelines(vk->device, pipeline_cache, 1,
&pipeline_info, NULL, &d->pipeline);
if (ret != VK_SUCCESS) {
VK_DEBUG(d->vk, "vkCreateGraphicsPipelines failed %u!", ret);
}
}
static VkWriteDescriptorSet
comp_distortion_get_uniform_write_descriptor_set(
struct comp_distortion *d,
uint32_t binding,
VkDescriptorSet descriptor_set,
struct comp_uniform_buffer *ubo_handle)
{
return (VkWriteDescriptorSet){
.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
.dstSet = descriptor_set,
.dstBinding = binding,
.descriptorCount = 1,
.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
.pBufferInfo = &ubo_handle->descriptor,
};
}
static VkWriteDescriptorSet
comp_distortion_get_image_write_descriptor_set(
VkDescriptorSet descriptor_set,
VkDescriptorImageInfo *descriptor_position,
uint32_t binding)
{
return (VkWriteDescriptorSet){
.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
.dstSet = descriptor_set,
.dstBinding = binding,
.descriptorCount = 1,
.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
.pImageInfo = descriptor_position,
};
}
static void
comp_distortion_init_descriptor_sets(struct comp_distortion *d,
VkDescriptorPool descriptor_pool)
{
struct vk_bundle *vk = d->vk;
VkResult ret;
VkDescriptorSetAllocateInfo alloc_info = {
.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
.descriptorPool = descriptor_pool,
.descriptorSetCount = 1,
.pSetLayouts = &d->descriptor_set_layout,
};
for (uint32_t i = 0; i < 2; i++) {
ret = vk->vkAllocateDescriptorSets(d->vk->device, &alloc_info,
&d->descriptor_sets[i]);
if (ret != VK_SUCCESS) {
VK_DEBUG(d->vk, "vkAllocateDescriptorSets failed %u",
ret);
}
}
}
void
comp_distortion_update_descriptor_set(struct comp_distortion *d,
VkSampler sampler,
VkImageView view,
uint32_t eye,
bool flip_y)
{
struct vk_bundle *vk = d->vk;
VkDescriptorImageInfo image_info = {
.sampler = sampler,
.imageView = view,
.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
};
VkWriteDescriptorSet write_descriptor_sets[] = {
// Binding 0 : Render texture target
comp_distortion_get_image_write_descriptor_set(
d->descriptor_sets[eye], &image_info,
d->render_texture_target_binding),
comp_distortion_get_uniform_write_descriptor_set(
d, d->ubo_viewport_binding, d->descriptor_sets[eye],
&d->ubo_viewport_handles[eye]),
};
vk->vkUpdateDescriptorSets(vk->device,
ARRAY_SIZE(write_descriptor_sets),
write_descriptor_sets, 0, NULL);
d->ubo_vp_data[eye].flip_y = flip_y;
memcpy(d->ubo_viewport_handles[eye].mapped, &d->ubo_vp_data[eye],
sizeof(d->ubo_vp_data[eye]));
}
static void
comp_distortion_update_descriptor_sets(struct comp_distortion *d,
VkSampler samplers[2],
VkImageView views[2],
bool flip_y)
{
for (uint32_t i = 0; i < 2; i++) {
comp_distortion_update_descriptor_set(d, samplers[i], views[i],
i, flip_y);
}
}
static void
comp_distortion_init_descriptor_set_layout(struct comp_distortion *d)
{
struct vk_bundle *vk = d->vk;
VkResult ret;
VkDescriptorSetLayoutBinding set_layout_bindings[2] = {
{
.binding = d->render_texture_target_binding,
.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
.descriptorCount = 1,
.stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT,
},
{
.binding = d->ubo_viewport_binding,
.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
.descriptorCount = 1,
.stageFlags = VK_SHADER_STAGE_VERTEX_BIT,
},
};
VkDescriptorSetLayoutCreateInfo set_layout_info = {
.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,
.bindingCount = ARRAY_SIZE(set_layout_bindings),
.pBindings = set_layout_bindings,
};
ret = vk->vkCreateDescriptorSetLayout(d->vk->device, &set_layout_info,
NULL, &d->descriptor_set_layout);
if (ret != VK_SUCCESS) {
VK_DEBUG(d->vk, "vkCreateDescriptorSetLayout failed %u", ret);
}
}
static void
comp_distortion_init_pipeline_layout(struct comp_distortion *d)
{
struct vk_bundle *vk = d->vk;
VkResult ret;
VkPipelineLayoutCreateInfo pipeline_layout_info = {
.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO,
.setLayoutCount = 1,
.pSetLayouts = &d->descriptor_set_layout,
};
ret = vk->vkCreatePipelineLayout(d->vk->device, &pipeline_layout_info,
NULL, &d->pipeline_layout);
if (ret != VK_SUCCESS) {
VK_DEBUG(d->vk, "Failed to create pipeline layout!");
}
}
void
comp_distortion_draw_mesh(struct comp_distortion *d,
VkCommandBuffer command_buffer,
int eye)
{
struct vk_bundle *vk = d->vk;
vk->vkCmdBindDescriptorSets(
command_buffer, VK_PIPELINE_BIND_POINT_GRAPHICS, d->pipeline_layout,
0, 1, &d->descriptor_sets[eye], 0, NULL);
vk->vkCmdBindPipeline(command_buffer, VK_PIPELINE_BIND_POINT_GRAPHICS,
d->pipeline);
VkDeviceSize offsets[] = {0};
vk->vkCmdBindVertexBuffers(command_buffer, 0, 1,
&(d->vbo_handle.buffer), offsets);
if (d->mesh.total_num_indices > 0) {
vk->vkCmdBindIndexBuffer(command_buffer, d->index_handle.buffer,
0, VK_INDEX_TYPE_UINT32);
vk->vkCmdDrawIndexed(command_buffer, d->mesh.num_indices[eye],
1, d->mesh.offset_indices[eye], 0, 0);
} else {
vk->vkCmdDraw(command_buffer, d->mesh.num_vertices, 1, 0, 0);
}
}
// Update fragment shader hmd warp uniform block
static void
comp_distortion_update_uniform_buffer_warp(struct comp_distortion *d,
struct comp_compositor *c)
{
/*
* Common vertex shader stuff.
*/
// clang-format off
d->ubo_vp_data[0].rot = c->xdev->hmd->views[0].rot;
d->ubo_vp_data[1].rot = c->xdev->hmd->views[1].rot;
memcpy(d->ubo_viewport_handles[0].mapped, &d->ubo_vp_data[0], sizeof(d->ubo_vp_data[0]));
memcpy(d->ubo_viewport_handles[1].mapped, &d->ubo_vp_data[1], sizeof(d->ubo_vp_data[1]));
// clang-format on
}
static VkResult
_create_buffer(struct vk_bundle *vk,
VkBufferUsageFlags usage_flags,
VkMemoryPropertyFlags memory_property_flags,
struct comp_uniform_buffer *buffer,
VkDeviceSize size,
void *data)
{
buffer->device = vk->device;
VkResult ret;
// Create the buffer handle.
VkBufferCreateInfo buffer_info = {
.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
.size = size,
.usage = usage_flags,
};
ret =
vk->vkCreateBuffer(vk->device, &buffer_info, NULL, &buffer->buffer);
if (ret != VK_SUCCESS) {
VK_DEBUG(vk, "Failed to create buffer!");
return ret;
}
// Create the memory backing up the buffer handle.
VkMemoryRequirements mem_reqs;
vk->vkGetBufferMemoryRequirements(vk->device, buffer->buffer,
&mem_reqs);
// Find a memory type index that fits the properties of the buffer.
uint32_t memory_type_index = 0;
vk_get_memory_type(vk, mem_reqs.memoryTypeBits, memory_property_flags,
&memory_type_index);
VkMemoryAllocateInfo mem_alloc = {
.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
.allocationSize = mem_reqs.size,
.memoryTypeIndex = memory_type_index,
};
ret =
vk->vkAllocateMemory(vk->device, &mem_alloc, NULL, &buffer->memory);
if (ret != VK_SUCCESS) {
VK_DEBUG(vk, "Failed to allocate memory!");
goto err_buffer;
}
buffer->alignment = mem_reqs.alignment;
buffer->size = mem_alloc.allocationSize;
buffer->usageFlags = usage_flags;
buffer->memoryPropertyFlags = memory_property_flags;
// If a pointer to the buffer data has been passed, map the
// buffer and copy over the data
if (data != NULL) {
ret = _buffer_map(vk, buffer, VK_WHOLE_SIZE, 0);
if (ret != VK_SUCCESS) {
VK_DEBUG(vk, "Failed to map buffer!");
goto err_memory;
}
memcpy(buffer->mapped, data, size);
_buffer_unmap(vk, buffer);
}
// Initialize a default descriptor that covers the whole buffer size
_buffer_setup_descriptor(vk, buffer, VK_WHOLE_SIZE, 0);
// Attach the memory to the buffer object
ret = vk->vkBindBufferMemory(vk->device, buffer->buffer, buffer->memory,
0);
if (ret != VK_SUCCESS) {
VK_DEBUG(vk, "Failed to bind buffer to memory!");
goto err_memory;
}
return VK_SUCCESS;
err_memory:
vk->vkFreeMemory(vk->device, buffer->memory, NULL);
err_buffer:
vk->vkDestroyBuffer(vk->device, buffer->buffer, NULL);
return ret;
}
static void
comp_distortion_init_buffers(struct comp_distortion *d,
struct comp_compositor *c)
{
struct vk_bundle *vk = &c->vk;
VkMemoryPropertyFlags memory_property_flags = 0;
VkBufferUsageFlags ubo_usage_flags = 0;
VkBufferUsageFlags vbo_usage_flags = 0;
VkBufferUsageFlags index_usage_flags = 0;
VkResult ret;
// Using the same flags for all ubos and vbos uniform buffers.
ubo_usage_flags |= VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT;
vbo_usage_flags |= VK_BUFFER_USAGE_VERTEX_BUFFER_BIT;
index_usage_flags |= VK_BUFFER_USAGE_INDEX_BUFFER_BIT;
memory_property_flags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
memory_property_flags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
// Distortion ubo and vbo sizes.
VkDeviceSize vbo_size = d->mesh.stride * d->mesh.num_vertices;
VkDeviceSize index_size = sizeof(int) * d->mesh.total_num_indices;
// vp ubo[0]
ret = _create_buffer(vk, ubo_usage_flags, memory_property_flags,
&d->ubo_viewport_handles[0],
sizeof(d->ubo_vp_data[0]), NULL);
if (ret != VK_SUCCESS) {
VK_DEBUG(vk, "Failed to create vp ubo buffer[0]!");
}
ret = _buffer_map(vk, &d->ubo_viewport_handles[0], VK_WHOLE_SIZE, 0);
if (ret != VK_SUCCESS) {
VK_DEBUG(vk, "Failed to map vp ubo buffer[0]!");
}
// vp ubo[1]
ret = _create_buffer(vk, ubo_usage_flags, memory_property_flags,
&d->ubo_viewport_handles[1],
sizeof(d->ubo_vp_data[1]), NULL);
if (ret != VK_SUCCESS) {
VK_DEBUG(vk, "Failed to create vp ubo buffer[1]!");
}
ret = _buffer_map(vk, &d->ubo_viewport_handles[1], VK_WHOLE_SIZE, 0);
if (ret != VK_SUCCESS) {
VK_DEBUG(vk, "Failed to map vp ubo buffer[1]!");
}
// Don't create vbo if size is zero.
if (vbo_size == 0) {
return;
}
ret = _create_buffer(vk, vbo_usage_flags, memory_property_flags,
&d->vbo_handle, vbo_size, d->mesh.vertices);
if (ret != VK_SUCCESS) {
VK_DEBUG(vk, "Failed to create mesh vbo buffer!");
}
ret = _buffer_map(vk, &d->vbo_handle, vbo_size, 0);
if (ret != VK_SUCCESS) {
VK_DEBUG(vk, "Failed to map mesh vbo buffer!");
}
if (index_size == 0) {
return;
}
ret = _create_buffer(vk, index_usage_flags, memory_property_flags,
&d->index_handle, index_size, d->mesh.indices);
if (ret != VK_SUCCESS) {
VK_DEBUG(vk, "Failed to create mesh index buffer!");
}
ret = _buffer_map(vk, &d->index_handle, index_size, 0);
if (ret != VK_SUCCESS) {
VK_DEBUG(vk, "Failed to map mesh vbo buffer!");
}
}

View file

@ -1,136 +0,0 @@
// Copyright 2019, Collabora, Ltd.
// SPDX-License-Identifier: BSL-1.0
/*!
* @file
* @brief Distortion shader code header.
* @author Lubosz Sarnecki <lubosz.sarnecki@collabora.com>
* @author Jakob Bornecrantz <jakob@collabora.com>
* @ingroup comp_main
*/
#pragma once
#include "main/comp_settings.h"
#include "main/comp_compositor.h"
#ifdef __cplusplus
extern "C" {
#endif
/*
*
* Structs
*
*/
/*!
* Helper buffer for a single uniform buffer.
*
* @ingroup comp_main
*/
struct comp_uniform_buffer
{
VkDevice device;
VkBuffer buffer;
VkDeviceMemory memory;
VkDescriptorBufferInfo descriptor;
VkDeviceSize size;
VkDeviceSize alignment;
void *mapped;
VkBufferUsageFlags usageFlags;
VkMemoryPropertyFlags memoryPropertyFlags;
};
/*!
* Helper struct that encapsulate a distortion rendering code.
*
* @ingroup comp_main
*/
struct comp_distortion
{
// Holds all of the needed common Vulkan things.
struct vk_bundle *vk;
struct comp_uniform_buffer vbo_handle;
struct comp_uniform_buffer index_handle;
struct comp_uniform_buffer ubo_viewport_handles[2];
uint32_t ubo_viewport_binding;
struct
{
float *vertices;
int *indices;
size_t stride;
size_t num_vertices;
size_t num_indices[2];
size_t offset_indices[2];
size_t total_num_indices;
} mesh;
struct
{
struct xrt_matrix_2x2 rot;
bool flip_y;
} ubo_vp_data[2];
VkPipelineLayout pipeline_layout;
VkPipeline pipeline;
VkDescriptorSetLayout descriptor_set_layout;
VkDescriptorSet descriptor_sets[2];
uint32_t render_texture_target_binding;
bool quirk_draw_lines;
};
/*
*
* Functions.
*
*/
/*!
* Init a distortion, pass in the distortion so it can be embedded in a struct.
*
* @ingroup comp_main
*/
void
comp_distortion_init(struct comp_distortion *d,
struct comp_compositor *c,
VkRenderPass render_pass,
VkPipelineCache pipeline_cache,
struct xrt_hmd_parts *parts,
VkDescriptorPool descriptor_pool);
/*!
* Free and destroy all fields, does not free the destortion itself.
*
* @ingroup comp_main
*/
void
comp_distortion_destroy(struct comp_distortion *d);
/*!
* Update the descriptor set to a new image.
*
* @ingroup comp_main
*/
void
comp_distortion_update_descriptor_set(struct comp_distortion *d,
VkSampler sampler,
VkImageView view,
uint32_t eye,
bool flip_y);
void
comp_distortion_draw_mesh(struct comp_distortion *d,
VkCommandBuffer command_buffer,
int eye);
#ifdef __cplusplus
}
#endif

View file

@ -15,7 +15,6 @@
#include "util/u_misc.h"
#include "util/u_distortion_mesh.h"
#include "main/comp_distortion.h"
#include "main/comp_layer_renderer.h"
#include "math/m_api.h"
@ -42,9 +41,6 @@ struct comp_renderer
uint32_t current_buffer;
VkQueue queue;
VkRenderPass render_pass;
VkDescriptorPool descriptor_pool;
VkPipelineCache pipeline_cache;
struct
{
@ -52,14 +48,12 @@ struct comp_renderer
VkSemaphore render_complete;
} semaphores;
VkCommandBuffer *cmd_buffers;
VkFramebuffer *frame_buffers;
struct comp_rendering *rrs;
VkFence *fences;
uint32_t num_buffers;
struct comp_compositor *c;
struct comp_settings *settings;
struct comp_distortion *distortion;
struct comp_layer_renderer *lr;
};
@ -81,30 +75,13 @@ static void
renderer_submit_queue(struct comp_renderer *r);
static void
renderer_build_command_buffers(struct comp_renderer *r);
renderer_build_renderings(struct comp_renderer *r);
static void
renderer_build_command_buffer(struct comp_renderer *r,
VkCommandBuffer command_buffer,
VkFramebuffer framebuffer);
renderer_allocate_renderings(struct comp_renderer *r);
static void
renderer_init_descriptor_pool(struct comp_renderer *r);
static void
renderer_create_frame_buffer(struct comp_renderer *r,
VkFramebuffer *frame_buffer,
uint32_t num_attachements,
VkImageView *attachments);
static void
renderer_allocate_command_buffers(struct comp_renderer *r);
static void
renderer_destroy_command_buffers(struct comp_renderer *r);
static void
renderer_create_pipeline_cache(struct comp_renderer *r);
renderer_close_renderings(struct comp_renderer *r);
static void
renderer_init_semaphores(struct comp_renderer *r);
@ -112,12 +89,6 @@ renderer_init_semaphores(struct comp_renderer *r);
static void
renderer_resize(struct comp_renderer *r);
static void
renderer_create_frame_buffers(struct comp_renderer *r);
static void
renderer_create_render_pass(struct comp_renderer *r);
static void
renderer_acquire_swapchain_image(struct comp_renderer *r);
@ -166,15 +137,10 @@ renderer_create(struct comp_renderer *r, struct comp_compositor *c)
r->current_buffer = 0;
r->queue = VK_NULL_HANDLE;
r->render_pass = VK_NULL_HANDLE;
r->descriptor_pool = VK_NULL_HANDLE;
r->pipeline_cache = VK_NULL_HANDLE;
r->semaphores.present_complete = VK_NULL_HANDLE;
r->semaphores.render_complete = VK_NULL_HANDLE;
r->distortion = NULL;
r->cmd_buffers = NULL;
r->frame_buffers = NULL;
r->rrs = NULL;
}
static void
@ -202,7 +168,7 @@ renderer_submit_queue(struct comp_renderer *r)
.pWaitSemaphores = &r->semaphores.present_complete,
.pWaitDstStageMask = stage_flags,
.commandBufferCount = 1,
.pCommandBuffers = &r->cmd_buffers[r->current_buffer],
.pCommandBuffers = &r->rrs[r->current_buffer].cmd,
.signalSemaphoreCount = 1,
.pSignalSemaphores = &r->semaphores.render_complete,
};
@ -215,151 +181,106 @@ renderer_submit_queue(struct comp_renderer *r)
}
static void
renderer_build_command_buffers(struct comp_renderer *r)
renderer_build_rendering(struct comp_renderer *r,
struct comp_rendering *rr,
uint32_t index)
{
for (uint32_t i = 0; i < r->num_buffers; ++i)
renderer_build_command_buffer(r, r->cmd_buffers[i],
r->frame_buffers[i]);
}
static void
renderer_set_viewport_scissor(float scale_x,
float scale_y,
VkViewport *v,
VkRect2D *s,
struct xrt_view *view)
{
s->offset.x = (int32_t)(view->viewport.x_pixels * scale_x);
s->offset.y = (int32_t)(view->viewport.y_pixels * scale_y);
s->extent.width = (uint32_t)(view->viewport.w_pixels * scale_x);
s->extent.height = (uint32_t)(view->viewport.h_pixels * scale_y);
v->x = s->offset.x;
v->y = s->offset.y;
v->width = s->extent.width;
v->height = s->extent.height;
}
static void
renderer_build_command_buffer(struct comp_renderer *r,
VkCommandBuffer command_buffer,
VkFramebuffer framebuffer)
{
struct vk_bundle *vk = &r->c->vk;
VkResult ret;
VkClearValue clear_color = {
.color = {.float32 = {0.0f, 0.0f, 0.0f, 0.0f}}};
VkCommandBufferBeginInfo command_buffer_info = {
.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO,
};
ret = vk->vkBeginCommandBuffer(command_buffer, &command_buffer_info);
if (ret != VK_SUCCESS) {
COMP_ERROR(r->c, "vkBeginCommandBuffer: %s",
vk_result_string(ret));
return;
}
VkRenderPassBeginInfo render_pass_begin_info = {
.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO,
.renderPass = r->render_pass,
.framebuffer = framebuffer,
.renderArea =
{
.offset =
{
.x = 0,
.y = 0,
},
.extent =
{
.width = r->c->current.width,
.height = r->c->current.height,
},
},
.clearValueCount = 1,
.pClearValues = &clear_color,
};
vk->vkCmdBeginRenderPass(command_buffer, &render_pass_begin_info,
VK_SUBPASS_CONTENTS_INLINE);
struct comp_compositor *c = r->c;
struct comp_target_data data;
data.format = r->c->window->swapchain.surface_format.format;
data.is_external = true;
data.width = r->c->current.width;
data.height = r->c->current.height;
// clang-format off
float scale_x = (float)r->c->current.width /
(float)r->c->xdev->hmd->screens[0].w_pixels;
float scale_y = (float)r->c->current.height /
(float)r->c->xdev->hmd->screens[0].h_pixels;
float scale_x = (float)r->c->current.width / (float)r->c->xdev->hmd->screens[0].w_pixels;
float scale_y = (float)r->c->current.height / (float)r->c->xdev->hmd->screens[0].h_pixels;
// clang-format on
VkViewport viewport = {
.x = 0,
.y = 0,
.width = 0,
.height = 0,
.minDepth = 0.0f,
.maxDepth = 1.0f,
struct xrt_view *l_v = &r->c->xdev->hmd->views[0];
struct comp_viewport_data l_viewport_data = {
.x = (uint32_t)(l_v->viewport.x_pixels * scale_x),
.y = (uint32_t)(l_v->viewport.y_pixels * scale_y),
.w = (uint32_t)(l_v->viewport.w_pixels * scale_x),
.h = (uint32_t)(l_v->viewport.h_pixels * scale_y),
};
struct comp_mesh_ubo_data l_data = {
.rot = l_v->rot,
.flip_y = false,
};
VkRect2D scissor = {
.offset = {.x = 0, .y = 0},
.extent = {.width = 0, .height = 0},
struct xrt_view *r_v = &r->c->xdev->hmd->views[1];
struct comp_viewport_data r_viewport_data = {
.x = (uint32_t)(r_v->viewport.x_pixels * scale_x),
.y = (uint32_t)(r_v->viewport.y_pixels * scale_y),
.w = (uint32_t)(r_v->viewport.w_pixels * scale_x),
.h = (uint32_t)(r_v->viewport.h_pixels * scale_y),
};
struct comp_mesh_ubo_data r_data = {
.rot = r_v->rot,
.flip_y = false,
};
renderer_set_viewport_scissor(scale_x, scale_y, &viewport, &scissor,
&r->c->xdev->hmd->views[0]);
vk->vkCmdSetViewport(command_buffer, 0, 1, &viewport);
vk->vkCmdSetScissor(command_buffer, 0, 1, &scissor);
/*
* Init
*/
comp_rendering_init(c, &c->nr, rr);
comp_draw_begin_target_single(
rr, //
r->c->window->swapchain.buffers[index].view, //
&data); //
// Mesh distortion
comp_distortion_draw_mesh(r->distortion, command_buffer, 0);
renderer_set_viewport_scissor(scale_x, scale_y, &viewport, &scissor,
&r->c->xdev->hmd->views[1]);
vk->vkCmdSetViewport(command_buffer, 0, 1, &viewport);
vk->vkCmdSetScissor(command_buffer, 0, 1, &scissor);
comp_distortion_draw_mesh(r->distortion, command_buffer, 1);
/*
* Viewport one
*/
vk->vkCmdEndRenderPass(command_buffer);
comp_draw_begin_view(rr, //
0, // target_index
0, // view_index
&l_viewport_data); // viewport_data
ret = vk->vkEndCommandBuffer(command_buffer);
if (ret != VK_SUCCESS) {
COMP_ERROR(r->c, "vkEndCommandBuffer: %s",
vk_result_string(ret));
return;
}
comp_draw_distortion(rr, //
r->lr->framebuffers[0].sampler, //
r->lr->framebuffers[0].view, //
&l_data); //
comp_draw_end_view(rr);
/*
* Viewport two
*/
comp_draw_begin_view(rr, //
0, // target_index
1, // view_index
&r_viewport_data); // viewport_data
comp_draw_distortion(rr, //
r->lr->framebuffers[1].sampler, //
r->lr->framebuffers[1].view, //
&r_data); //
comp_draw_end_view(rr);
/*
* End
*/
comp_draw_end_target(rr);
}
static void
renderer_init_descriptor_pool(struct comp_renderer *r)
renderer_build_renderings(struct comp_renderer *r)
{
struct vk_bundle *vk = &r->c->vk;
VkResult ret;
VkDescriptorPoolSize pool_sizes[2] = {
{
.type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
.descriptorCount = 4,
},
{
.type = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
.descriptorCount = 2,
},
};
VkDescriptorPoolCreateInfo descriptor_pool_info = {
.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
.maxSets = 2,
.poolSizeCount = ARRAY_SIZE(pool_sizes),
.pPoolSizes = pool_sizes,
};
ret = vk->vkCreateDescriptorPool(vk->device, &descriptor_pool_info,
NULL, &r->descriptor_pool);
if (ret != VK_SUCCESS) {
COMP_ERROR(r->c, "vkCreateDescriptorPool: %s",
vk_result_string(ret));
for (uint32_t i = 0; i < r->num_buffers; ++i) {
renderer_build_rendering(r, &r->rrs[i], i);
}
}
@ -431,28 +352,11 @@ renderer_init(struct comp_renderer *r)
vk->vkGetDeviceQueue(vk->device, r->c->vk.queue_family_index, 0,
&r->queue);
renderer_init_semaphores(r);
renderer_create_pipeline_cache(r);
renderer_create_render_pass(r);
assert(r->c->window->swapchain.image_count > 0);
r->num_buffers = r->c->window->swapchain.image_count;
renderer_create_fences(r);
renderer_create_frame_buffers(r);
renderer_allocate_command_buffers(r);
renderer_init_descriptor_pool(r);
r->distortion = U_TYPED_CALLOC(struct comp_distortion);
bool has_meshuv = (r->c->xdev->hmd->distortion.models &
XRT_DISTORTION_MODEL_MESHUV) != 0;
assert(has_meshuv);
comp_distortion_init(r->distortion, r->c, r->render_pass,
r->pipeline_cache, r->c->xdev->hmd,
r->descriptor_pool);
VkExtent2D extent = {
.width = r->c->xdev->hmd->screens[0].w_pixels,
@ -462,13 +366,8 @@ renderer_init(struct comp_renderer *r)
r->lr = comp_layer_renderer_create(vk, &r->c->shaders, extent,
VK_FORMAT_B8G8R8A8_SRGB);
for (uint32_t i = 0; i < 2; i++) {
comp_distortion_update_descriptor_set(
r->distortion, r->lr->framebuffers[i].sampler,
r->lr->framebuffers[i].view, i, false);
}
renderer_build_command_buffers(r);
renderer_allocate_renderings(r);
renderer_build_renderings(r);
}
VkImageView
@ -624,38 +523,8 @@ comp_renderer_draw(struct comp_renderer *r)
}
static void
renderer_create_frame_buffer(struct comp_renderer *r,
VkFramebuffer *frame_buffer,
uint32_t num_attachements,
VkImageView *attachments)
renderer_allocate_renderings(struct comp_renderer *r)
{
struct vk_bundle *vk = &r->c->vk;
VkResult ret;
VkFramebufferCreateInfo frame_buffer_info = {
.sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO,
.renderPass = r->render_pass,
.attachmentCount = num_attachements,
.pAttachments = attachments,
.width = r->c->current.width,
.height = r->c->current.height,
.layers = 1,
};
ret = vk->vkCreateFramebuffer(vk->device, &frame_buffer_info, NULL,
frame_buffer);
if (ret != VK_SUCCESS) {
COMP_ERROR(r->c, "vkCreateFramebuffer: %s",
vk_result_string(ret));
}
}
static void
renderer_allocate_command_buffers(struct comp_renderer *r)
{
struct vk_bundle *vk = &r->c->vk;
VkResult ret;
if (r->num_buffers == 0) {
COMP_ERROR(r->c, "Requested 0 command buffers.");
return;
@ -663,50 +532,22 @@ renderer_allocate_command_buffers(struct comp_renderer *r)
COMP_DEBUG(r->c, "Allocating %d Command Buffers.", r->num_buffers);
if (r->cmd_buffers != NULL)
free(r->cmd_buffers);
r->cmd_buffers = U_TYPED_ARRAY_CALLOC(VkCommandBuffer, r->num_buffers);
VkCommandBufferAllocateInfo cmd_buffer_info = {
.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO,
.commandPool = vk->cmd_pool,
.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY,
.commandBufferCount = r->num_buffers,
};
ret = vk->vkAllocateCommandBuffers(vk->device, &cmd_buffer_info,
r->cmd_buffers);
if (ret != VK_SUCCESS) {
COMP_ERROR(r->c, "vkCreateFramebuffer: %s",
vk_result_string(ret));
if (r->rrs != NULL) {
free(r->rrs);
}
r->rrs = U_TYPED_ARRAY_CALLOC(struct comp_rendering, r->num_buffers);
}
static void
renderer_destroy_command_buffers(struct comp_renderer *r)
renderer_close_renderings(struct comp_renderer *r)
{
struct vk_bundle *vk = &r->c->vk;
vk->vkFreeCommandBuffers(vk->device, vk->cmd_pool, r->num_buffers,
r->cmd_buffers);
}
static void
renderer_create_pipeline_cache(struct comp_renderer *r)
{
struct vk_bundle *vk = &r->c->vk;
VkResult ret;
VkPipelineCacheCreateInfo pipeline_cache_info = {
.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO,
};
ret = vk->vkCreatePipelineCache(vk->device, &pipeline_cache_info, NULL,
&r->pipeline_cache);
if (ret != VK_SUCCESS) {
COMP_ERROR(r->c, "vkCreatePipelineCache: %s",
vk_result_string(ret));
for (uint32_t i = 0; i < r->num_buffers; i++) {
comp_rendering_close(&r->rrs[i]);
}
free(r->rrs);
r->rrs = NULL;
}
static void
@ -752,99 +593,12 @@ renderer_resize(struct comp_renderer *r)
r->settings->color_space,
r->settings->present_mode);
for (uint32_t i = 0; i < r->num_buffers; i++)
vk->vkDestroyFramebuffer(vk->device, r->frame_buffers[i], NULL);
renderer_destroy_command_buffers(r);
renderer_close_renderings(r);
r->num_buffers = r->c->window->swapchain.image_count;
renderer_create_frame_buffers(r);
renderer_allocate_command_buffers(r);
renderer_build_command_buffers(r);
}
static void
renderer_create_frame_buffers(struct comp_renderer *r)
{
if (r->frame_buffers != NULL)
free(r->frame_buffers);
r->frame_buffers = U_TYPED_ARRAY_CALLOC(VkFramebuffer, r->num_buffers);
for (uint32_t i = 0; i < r->num_buffers; i++) {
VkImageView attachments[1] = {
r->c->window->swapchain.buffers[i].view,
};
renderer_create_frame_buffer(r, &r->frame_buffers[i],
ARRAY_SIZE(attachments),
attachments);
}
}
static void
renderer_create_render_pass(struct comp_renderer *r)
{
struct vk_bundle *vk = &r->c->vk;
VkResult ret;
VkAttachmentDescription attachments[1] = {
(VkAttachmentDescription){
.format = r->c->window->swapchain.surface_format.format,
.samples = VK_SAMPLE_COUNT_1_BIT,
.loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR,
.storeOp = VK_ATTACHMENT_STORE_OP_STORE,
.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE,
.stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE,
.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED,
.finalLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR,
},
};
VkAttachmentReference color_reference = {
.attachment = 0,
.layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
};
VkSubpassDescription subpass_description = {
.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS,
.inputAttachmentCount = 0,
.pInputAttachments = NULL,
.colorAttachmentCount = 1,
.pColorAttachments = &color_reference,
.pResolveAttachments = NULL,
.pDepthStencilAttachment = NULL,
.preserveAttachmentCount = 0,
.pPreserveAttachments = NULL,
};
VkSubpassDependency dependencies[1] = {
(VkSubpassDependency){
.srcSubpass = VK_SUBPASS_EXTERNAL,
.dstSubpass = 0,
.srcStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
.dstStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
.srcAccessMask = 0,
.dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_READ_BIT |
VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
},
};
VkRenderPassCreateInfo render_pass_info = {
.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO,
.attachmentCount = ARRAY_SIZE(attachments),
.pAttachments = attachments,
.subpassCount = 1,
.pSubpasses = &subpass_description,
.dependencyCount = ARRAY_SIZE(dependencies),
.pDependencies = dependencies,
};
ret = vk->vkCreateRenderPass(vk->device, &render_pass_info, NULL,
&r->render_pass);
if (ret != VK_SUCCESS) {
COMP_ERROR(r->c, "vkCreateRenderPass: %s",
vk_result_string(ret));
}
renderer_allocate_renderings(r);
renderer_build_renderings(r);
}
static void
@ -896,54 +650,19 @@ renderer_destroy(struct comp_renderer *r)
{
struct vk_bundle *vk = &r->c->vk;
// Distortion
if (r->distortion != NULL) {
comp_distortion_destroy(r->distortion);
r->distortion = NULL;
}
// Discriptor pool
if (r->descriptor_pool != VK_NULL_HANDLE) {
vk->vkDestroyDescriptorPool(vk->device, r->descriptor_pool,
NULL);
r->descriptor_pool = VK_NULL_HANDLE;
}
// Fences
for (uint32_t i = 0; i < r->num_buffers; i++)
vk->vkDestroyFence(vk->device, r->fences[i], NULL);
free(r->fences);
// Command buffers
renderer_destroy_command_buffers(r);
if (r->cmd_buffers != NULL)
free(r->cmd_buffers);
// Render pass
if (r->render_pass != VK_NULL_HANDLE) {
vk->vkDestroyRenderPass(vk->device, r->render_pass, NULL);
r->render_pass = VK_NULL_HANDLE;
renderer_close_renderings(r);
if (r->rrs != NULL) {
free(r->rrs);
}
// Frame buffers
for (uint32_t i = 0; i < r->num_buffers; i++) {
if (r->frame_buffers[i] != VK_NULL_HANDLE) {
vk->vkDestroyFramebuffer(vk->device,
r->frame_buffers[i], NULL);
r->frame_buffers[i] = VK_NULL_HANDLE;
}
}
if (r->frame_buffers != NULL)
free(r->frame_buffers);
r->frame_buffers = NULL;
r->num_buffers = 0;
// Pipeline cache
if (r->pipeline_cache != VK_NULL_HANDLE) {
vk->vkDestroyPipelineCache(vk->device, r->pipeline_cache, NULL);
r->pipeline_cache = VK_NULL_HANDLE;
}
// Semaphores
if (r->semaphores.present_complete != VK_NULL_HANDLE) {
vk->vkDestroySemaphore(vk->device,

View file

@ -15,8 +15,6 @@ compositor_srcs = [
'client/comp_vk_glue.c',
'main/comp_compositor.c',
'main/comp_compositor.h',
'main/comp_distortion.c',
'main/comp_distortion.h',
'main/comp_documentation.h',
'main/comp_renderer.c',
'main/comp_renderer.h',
@ -29,6 +27,10 @@ compositor_srcs = [
'main/comp_window.h',
'main/comp_layer_renderer.c',
'main/comp_layer.c',
'render/comp_buffer.c',
'render/comp_render.h',
'render/comp_rendering.c',
'render/comp_resources.c',
]
compile_args = []

View file

@ -0,0 +1,222 @@
// Copyright 2019-2020, Collabora, Ltd.
// SPDX-License-Identifier: BSL-1.0
/*!
* @file
* @brief Buffer functions.
* @author Lubosz Sarnecki <lubosz.sarnecki@collabora.com>
* @author Jakob Bornecrantz <jakob@collabora.com>
* @ingroup comp_main
*/
#include "main/comp_compositor.h"
#include "render/comp_render.h"
#include <stdio.h>
/*
*
* Common helpers.
*
*/
static VkResult
create_buffer(struct vk_bundle *vk,
VkBufferUsageFlags usage_flags,
VkMemoryPropertyFlags memory_property_flags,
VkDeviceSize size,
VkBuffer *out_buffer,
VkDeviceMemory *out_memory,
VkDeviceSize *out_alignment,
VkDeviceSize *out_allocation_size)
{
VkResult ret;
// Create the buffer handle.
VkBufferCreateInfo buffer_info = {
.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
.size = size,
.usage = usage_flags,
};
VkBuffer buffer = VK_NULL_HANDLE;
ret = vk->vkCreateBuffer(vk->device, //
&buffer_info, //
NULL, //
&buffer); //
if (ret != VK_SUCCESS) {
VK_ERROR(vk, "vkCreateBuffer failed: '%s'",
vk_result_string(ret));
return ret;
}
// Create the memory backing up the buffer handle.
VkMemoryRequirements mem_reqs;
vk->vkGetBufferMemoryRequirements(vk->device, //
buffer, //
&mem_reqs); //
// Find a memory type index that fits the properties of the buffer.
uint32_t memory_type_index = 0;
vk_get_memory_type(vk, //
mem_reqs.memoryTypeBits, //
memory_property_flags, //
&memory_type_index); //
VkMemoryAllocateInfo mem_alloc = {
.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
.allocationSize = mem_reqs.size,
.memoryTypeIndex = memory_type_index,
};
VkDeviceMemory memory = VK_NULL_HANDLE;
ret = vk->vkAllocateMemory(vk->device, //
&mem_alloc, //
NULL, //
&memory); //
if (ret != VK_SUCCESS) {
VK_ERROR(vk, "vkAllocateMemory failed: '%s'",
vk_result_string(ret));
goto err_buffer;
}
// Attach the memory to the buffer object
ret = vk->vkBindBufferMemory(vk->device, //
buffer, // buffer
memory, // memory
0); // memoryOffset
if (ret != VK_SUCCESS) {
VK_ERROR(vk, "vkBindBufferMemory failed: '%s'",
vk_result_string(ret));
goto err_memory;
}
*out_memory = memory;
*out_buffer = buffer;
*out_alignment = mem_reqs.alignment;
*out_allocation_size = mem_alloc.allocationSize;
return VK_SUCCESS;
err_memory:
vk->vkFreeMemory(vk->device, memory, NULL);
err_buffer:
vk->vkDestroyBuffer(vk->device, buffer, NULL);
return ret;
}
/*
*
* 'Exported' functions.
*
*/
VkResult
comp_buffer_init(struct vk_bundle *vk,
struct comp_buffer *buffer,
VkBufferUsageFlags usage_flags,
VkMemoryPropertyFlags memory_property_flags,
VkDeviceSize size)
{
return create_buffer(vk, //
usage_flags, // usage_flags
memory_property_flags, // memory_property_flags
size, // size
&buffer->buffer, // out_buffer
&buffer->memory, // out_memory
&buffer->alignment, // out_alignment
&buffer->allocation_size); // out_allocation_size
}
void
comp_buffer_close(struct vk_bundle *vk, struct comp_buffer *buffer)
{
if (buffer->buffer != VK_NULL_HANDLE) {
vk->vkDestroyBuffer(vk->device, buffer->buffer, NULL);
}
if (buffer->memory != VK_NULL_HANDLE) {
vk->vkFreeMemory(vk->device, buffer->memory, NULL);
}
U_ZERO(buffer);
}
VkResult
comp_buffer_map(struct vk_bundle *vk, struct comp_buffer *buffer)
{
return vk->vkMapMemory(vk->device, //
buffer->memory, // memory
0, // offset
VK_WHOLE_SIZE, // size
0, // flags
&buffer->mapped); // ppData
}
void
comp_buffer_unmap(struct vk_bundle *vk, struct comp_buffer *buffer)
{
if (buffer->mapped != NULL) {
vk->vkUnmapMemory(vk->device, buffer->memory);
buffer->mapped = NULL;
}
}
VkResult
comp_buffer_map_and_write(struct vk_bundle *vk,
struct comp_buffer *buffer,
void *data,
VkDeviceSize size)
{
VkResult ret;
if (size > buffer->allocation_size) {
VK_ERROR(vk, "Trying to write more the buffer size!");
return VK_ERROR_OUT_OF_DEVICE_MEMORY;
}
if (buffer->mapped == NULL) {
ret = comp_buffer_map(vk, buffer);
if (ret != VK_SUCCESS) {
return ret;
}
}
memcpy(buffer->mapped, data, size);
return VK_SUCCESS;
}
VkResult
comp_buffer_write(struct vk_bundle *vk,
struct comp_buffer *buffer,
void *data,
VkDeviceSize size)
{
if (size > buffer->allocation_size) {
VK_ERROR(vk, "Trying to write more the buffer size!");
return VK_ERROR_OUT_OF_DEVICE_MEMORY;
}
bool mapped = buffer->mapped != NULL;
if (!mapped) {
VkResult ret = comp_buffer_map(vk, buffer);
if (ret != VK_SUCCESS) {
return ret;
}
}
memcpy(buffer->mapped, data, size);
// Only unmap if we did the mapping.
if (!mapped) {
comp_buffer_unmap(vk, buffer);
}
return VK_SUCCESS;
}

View file

@ -0,0 +1,355 @@
// Copyright 2019-2020, Collabora, Ltd.
// SPDX-License-Identifier: BSL-1.0
/*!
* @file
* @brief The NEW compositor rendering code header.
* @author Lubosz Sarnecki <lubosz.sarnecki@collabora.com>
* @author Jakob Bornecrantz <jakob@collabora.com>
* @ingroup comp_main
*/
#pragma once
#include "xrt/xrt_compiler.h"
#include "xrt/xrt_defines.h"
#include "vk/vk_helpers.h"
#ifdef __cplusplus
extern "C" {
#endif
struct comp_compositor;
struct comp_swapchain_image;
/*!
* @ingroup comp_main
* @{
*/
/*
*
* Buffer
*
*/
/*!
* Helper struct holding a buffer and it's memory.
*/
struct comp_buffer
{
//! Backing memory.
VkDeviceMemory memory;
//! Buffer.
VkBuffer buffer;
//! Size of the buffer.
VkDeviceSize size;
//! Size of the memory allocation.
VkDeviceSize allocation_size;
//! Alignment of the buffer.
VkDeviceSize alignment;
void *mapped;
};
/*!
* Initialize a buffer.
*/
VkResult
comp_buffer_init(struct vk_bundle *vk,
struct comp_buffer *buffer,
VkBufferUsageFlags usage_flags,
VkMemoryPropertyFlags memory_property_flags,
VkDeviceSize size);
/*!
* Frees all resources that this buffer has, doesn't not free the buffer itself.
*/
void
comp_buffer_close(struct vk_bundle *vk, struct comp_buffer *buffer);
/*!
* Maps the memory, sets comp_buffer::mapped to the memory.
*/
VkResult
comp_buffer_map(struct vk_bundle *vk, struct comp_buffer *buffer);
/*!
* Unmaps the memory.
*/
void
comp_buffer_unmap(struct vk_bundle *vk, struct comp_buffer *buffer);
/*!
* Maps the buffer, and copies the given data to the buffer.
*/
VkResult
comp_buffer_map_and_write(struct vk_bundle *vk,
struct comp_buffer *buffer,
void *data,
VkDeviceSize size);
/*!
* Writes the given data to the buffer, will map it temporarily if not mapped.
*/
VkResult
comp_buffer_write(struct vk_bundle *vk,
struct comp_buffer *buffer,
void *data,
VkDeviceSize size);
/*
*
* Resources
*
*/
/*!
* Holds all pools and static resources for rendering.
*/
struct comp_resources
{
/*
* Shared pools and caches.
*/
//! Shared for all rendering.
VkPipelineCache pipeline_cache;
//! Descriptor pool for mesh rendering.
VkDescriptorPool mesh_descriptor_pool;
/*
* Static
*/
struct
{
//! The binding index for the source texture.
uint32_t src_binding;
//! The binding index for the UBO.
uint32_t ubo_binding;
//! Descriptor set layout for mesh distortion.
VkDescriptorSetLayout descriptor_set_layout;
//! Pipeline layout used for mesh.
VkPipelineLayout pipeline_layout;
struct comp_buffer vbo;
struct comp_buffer ibo;
uint32_t num_vertices;
uint32_t num_indices[2];
uint32_t stride;
uint32_t offset_indices[2];
uint32_t total_num_indices;
} mesh;
};
/*!
* Allocate pools and static resources.
*
* @ingroup comp_main
*/
bool
comp_resources_init(struct comp_compositor *c, struct comp_resources *r);
/*!
* Free all pools and static resources, does not free the struct itself.
*/
void
comp_resources_close(struct comp_compositor *c, struct comp_resources *r);
/*
*
* Rendering
*
*/
/*!
* Each rendering (@ref comp_rendering) render to one or more targets, each
* target can have one or more views (@ref comp_rendering_view), this struct
* holds all the data that is specific to the target.
*/
struct comp_target_data
{
// The format that should be used to read from the target.
VkFormat format;
// Is this target a external target.
bool is_external;
//! Total height and width of the target.
uint32_t width, height;
};
/*!
* Each rendering (@ref comp_rendering) render to one or more targets, each
* target can have one or more views (@ref comp_rendering_view), this struct
* holds all the data that is specific to the target.
*/
struct comp_rendering_view
{
struct
{
struct comp_buffer ubo;
VkDescriptorSet descriptor_set;
} mesh;
};
/*!
* A rendering is used to create command buffers needed to do one frame of
* compositor rendering, it holds onto resources used by the command buffer.
*/
struct comp_rendering
{
struct comp_compositor *c;
struct comp_resources *r;
//! Command buffer where all commands are recorded.
VkCommandBuffer cmd;
//! Render pass used for rendering.
VkRenderPass render_pass;
struct
{
//! The data for this target.
struct comp_target_data data;
//! Framebuffer for this target.
VkFramebuffer framebuffer;
} targets[2];
//! Number of different targets, number of views are always two.
uint32_t num_targets;
struct
{
//! Pipeline layout used for mesh.
VkPipeline pipeline;
} mesh;
//! Holds per view data.
struct comp_rendering_view views[2];
//! The current view we are rendering to.
uint32_t current_view;
};
/*!
* Init struct and create resources needed for rendering.
*/
bool
comp_rendering_init(struct comp_compositor *c,
struct comp_resources *r,
struct comp_rendering *rr);
/*!
* Frees all resources held by the rendering, does not free the struct itself.
*/
void
comp_rendering_close(struct comp_rendering *rr);
/*
*
* Drawing
*
*/
/*!
* The pure data information about a view that the renderer is rendering to.
*/
struct comp_viewport_data
{
uint32_t x, y;
uint32_t w, h;
};
/*!
* UBO data that is sent to the mesh shaders.
*/
struct comp_mesh_ubo_data
{
struct xrt_matrix_2x2 rot;
int flip_y;
};
/*!
* This function allocates everything to start a single rendering. This is the
* first function you call when you start rendering, you follow up with a call
* to comp_draw_begin_view.
*/
bool
comp_draw_begin_target_single(struct comp_rendering *rr,
VkImageView target,
struct comp_target_data *data);
void
comp_draw_end_target(struct comp_rendering *rr);
void
comp_draw_begin_view(struct comp_rendering *rr,
uint32_t target,
uint32_t view,
struct comp_viewport_data *viewport_data);
void
comp_draw_end_view(struct comp_rendering *rr);
/*!
* Does any needed teardown of state after all of the drawing commands have been
* submitted.
*/
void
comp_draw_end_drawing(struct comp_resources *r);
void
comp_draw_projection_layer(struct comp_rendering *rr,
uint32_t layer,
VkSampler sampler,
VkImageView l_image_view,
VkImageView r_image_view,
struct xrt_layer_data *data);
void
comp_draw_quad_layer(struct comp_rendering *rr,
uint32_t layer,
VkSampler sampler,
VkImageView image_view,
struct xrt_layer_data *data);
void
comp_draw_cylinder_layer(struct comp_rendering *rr,
uint32_t layer,
VkSampler sampler,
VkImageView image_view,
struct xrt_layer_data *data);
void
comp_draw_distortion(struct comp_rendering *rr,
VkSampler sampler,
VkImageView image_view,
struct comp_mesh_ubo_data *data);
/*!
* @}
*/
#ifdef __cplusplus
}
#endif

View file

@ -0,0 +1,847 @@
// Copyright 2019-2020, Collabora, Ltd.
// SPDX-License-Identifier: BSL-1.0
/*!
* @file
* @brief The NEW compositor rendering code header.
* @author Lubosz Sarnecki <lubosz.sarnecki@collabora.com>
* @author Jakob Bornecrantz <jakob@collabora.com>
* @ingroup comp_main
*/
#include "main/comp_compositor.h"
#include "render/comp_render.h"
#include <stdio.h>
/*
*
* Common helpers
*
*/
#define C(c) \
do { \
VkResult ret = c; \
if (ret != VK_SUCCESS) { \
return false; \
} \
} while (false)
#define D(TYPE, thing) \
if (thing != VK_NULL_HANDLE) { \
vk->vkDestroy##TYPE(vk->device, thing, NULL); \
thing = VK_NULL_HANDLE; \
}
#define DD(pool, thing) \
if (thing != VK_NULL_HANDLE) { \
free_descriptor_set(vk, pool, thing); \
thing = VK_NULL_HANDLE; \
}
static VkResult
create_external_render_pass(struct vk_bundle *vk,
VkFormat format,
VkRenderPass *out_render_pass)
{
VkResult ret;
VkAttachmentDescription attachments[1] = {
{
.format = format,
.samples = VK_SAMPLE_COUNT_1_BIT,
.loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR,
.storeOp = VK_ATTACHMENT_STORE_OP_STORE,
.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE,
.stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE,
.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED,
.finalLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR,
},
};
VkAttachmentReference color_reference = {
.attachment = 0,
.layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
};
VkSubpassDescription subpasses[1] = {
{
.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS,
.inputAttachmentCount = 0,
.pInputAttachments = NULL,
.colorAttachmentCount = 1,
.pColorAttachments = &color_reference,
.pResolveAttachments = NULL,
.pDepthStencilAttachment = NULL,
.preserveAttachmentCount = 0,
.pPreserveAttachments = NULL,
},
};
VkSubpassDependency dependencies[1] = {
{
.srcSubpass = VK_SUBPASS_EXTERNAL,
.dstSubpass = 0,
.srcStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
.dstStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
.srcAccessMask = 0,
.dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_READ_BIT |
VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
},
};
VkRenderPassCreateInfo render_pass_info = {
.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO,
.attachmentCount = ARRAY_SIZE(attachments),
.pAttachments = attachments,
.subpassCount = ARRAY_SIZE(subpasses),
.pSubpasses = subpasses,
.dependencyCount = ARRAY_SIZE(dependencies),
.pDependencies = dependencies,
};
VkRenderPass render_pass = VK_NULL_HANDLE;
ret = vk->vkCreateRenderPass(vk->device, //
&render_pass_info, //
NULL, //
&render_pass); //
if (ret != VK_SUCCESS) {
VK_ERROR(vk, "vkCreateRenderPass failed: %s",
vk_result_string(ret));
return ret;
}
*out_render_pass = render_pass;
return VK_SUCCESS;
}
static VkResult
create_descriptor_set(struct vk_bundle *vk,
VkDescriptorPool descriptor_pool,
VkDescriptorSetLayout descriptor_layout,
VkDescriptorSet *out_descriptor_set)
{
VkResult ret;
VkDescriptorSetAllocateInfo alloc_info = {
.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
.descriptorPool = descriptor_pool,
.descriptorSetCount = 1,
.pSetLayouts = &descriptor_layout,
};
VkDescriptorSet descriptor_set = VK_NULL_HANDLE;
ret = vk->vkAllocateDescriptorSets(vk->device, //
&alloc_info, //
&descriptor_set); //
if (ret != VK_SUCCESS) {
VK_DEBUG(vk, "vkAllocateDescriptorSets failed: %s",
vk_result_string(ret));
return ret;
}
*out_descriptor_set = descriptor_set;
return VK_SUCCESS;
}
static void
free_descriptor_set(struct vk_bundle *vk,
VkDescriptorPool descriptor_pool,
VkDescriptorSet descriptor_set)
{
VkResult ret;
ret = vk->vkFreeDescriptorSets(vk->device, //
descriptor_pool, // descriptorPool
1, // descriptorSetCount
&descriptor_set); // pDescriptorSets
if (ret != VK_SUCCESS) {
VK_DEBUG(vk, "vkFreeDescriptorSets failed: %s",
vk_result_string(ret));
}
}
static VkResult
create_framebuffer(struct vk_bundle *vk,
VkImageView image_view,
VkRenderPass render_pass,
uint32_t width,
uint32_t height,
VkFramebuffer *out_external_framebuffer)
{
VkResult ret;
VkImageView attachments[1] = {image_view};
VkFramebufferCreateInfo frame_buffer_info = {
.sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO,
.renderPass = render_pass,
.attachmentCount = ARRAY_SIZE(attachments),
.pAttachments = attachments,
.width = width,
.height = height,
.layers = 1,
};
VkFramebuffer framebuffer = VK_NULL_HANDLE;
ret = vk->vkCreateFramebuffer(vk->device, //
&frame_buffer_info, //
NULL, //
&framebuffer); //
if (ret != VK_SUCCESS) {
VK_ERROR(vk, "vkCreateFramebuffer failed: %s",
vk_result_string(ret));
return ret;
}
*out_external_framebuffer = framebuffer;
return VK_SUCCESS;
}
static VkResult
create_command_buffer(struct vk_bundle *vk, VkCommandBuffer *out_cmd)
{
VkResult ret;
VkCommandBufferAllocateInfo cmd_buffer_info = {
.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO,
.commandPool = vk->cmd_pool,
.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY,
.commandBufferCount = 1,
};
VkCommandBuffer cmd = VK_NULL_HANDLE;
ret = vk->vkAllocateCommandBuffers(vk->device, //
&cmd_buffer_info, //
&cmd); //
if (ret != VK_SUCCESS) {
VK_ERROR(r->c, "vkCreateFramebuffer failed: %s",
vk_result_string(ret));
return ret;
}
*out_cmd = cmd;
return VK_SUCCESS;
}
static VkResult
begin_command_buffer(struct vk_bundle *vk, VkCommandBuffer command_buffer)
{
VkResult ret;
VkCommandBufferBeginInfo command_buffer_info = {
.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO,
};
ret = vk->vkBeginCommandBuffer(command_buffer, &command_buffer_info);
if (ret != VK_SUCCESS) {
COMP_ERROR(r->c, "vkBeginCommandBuffer failed: %s",
vk_result_string(ret));
return ret;
}
return VK_SUCCESS;
}
static void
begin_render_pass(struct vk_bundle *vk,
VkCommandBuffer command_buffer,
VkRenderPass render_pass,
VkFramebuffer framebuffer,
uint32_t width,
uint32_t height)
{
VkClearValue clear_color[1] = {{
.color = {.float32 = {0.0f, 0.0f, 0.0f, 0.0f}},
}};
VkRenderPassBeginInfo render_pass_begin_info = {
.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO,
.renderPass = render_pass,
.framebuffer = framebuffer,
.renderArea =
{
.offset =
{
.x = 0,
.y = 0,
},
.extent =
{
.width = width,
.height = height,
},
},
.clearValueCount = ARRAY_SIZE(clear_color),
.pClearValues = clear_color,
};
vk->vkCmdBeginRenderPass(command_buffer, &render_pass_begin_info,
VK_SUBPASS_CONTENTS_INLINE);
}
/*
*
* Mesh
*
*/
static VkResult
create_mesh_pipeline(struct vk_bundle *vk,
VkRenderPass render_pass,
VkPipelineLayout pipeline_layout,
VkPipelineCache pipeline_cache,
uint32_t src_binding,
uint32_t mesh_total_num_indices,
uint32_t mesh_stride,
VkShaderModule mesh_vert,
VkShaderModule mesh_frag,
VkPipeline *out_mesh_pipeline)
{
VkResult ret;
// Might be changed to line for debugging.
VkPolygonMode polygonMode = VK_POLYGON_MODE_FILL;
// Do we use triangle strips or triangles with indices.
VkPrimitiveTopology topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST;
if (mesh_total_num_indices > 0) {
topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP;
}
VkPipelineInputAssemblyStateCreateInfo input_assembly_state = {
.sType =
VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO,
.topology = topology,
.primitiveRestartEnable = VK_FALSE,
};
VkPipelineRasterizationStateCreateInfo rasterization_state = {
.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO,
.depthClampEnable = VK_FALSE,
.rasterizerDiscardEnable = VK_FALSE,
.polygonMode = polygonMode,
.cullMode = VK_CULL_MODE_BACK_BIT,
.frontFace = VK_FRONT_FACE_CLOCKWISE,
.lineWidth = 1.0f,
};
VkPipelineColorBlendAttachmentState blend_attachment_state = {
.blendEnable = VK_FALSE,
.colorWriteMask = 0xf,
};
VkPipelineColorBlendStateCreateInfo color_blend_state = {
.sType = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO,
.attachmentCount = 1,
.pAttachments = &blend_attachment_state,
};
VkPipelineDepthStencilStateCreateInfo depth_stencil_state = {
.sType = VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO,
.depthTestEnable = VK_TRUE,
.depthWriteEnable = VK_TRUE,
.depthCompareOp = VK_COMPARE_OP_LESS_OR_EQUAL,
.front =
{
.compareOp = VK_COMPARE_OP_ALWAYS,
},
.back =
{
.compareOp = VK_COMPARE_OP_ALWAYS,
},
};
VkPipelineViewportStateCreateInfo viewport_state = {
.sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO,
.viewportCount = 1,
.scissorCount = 1,
};
VkPipelineMultisampleStateCreateInfo multisample_state = {
.sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO,
.rasterizationSamples = VK_SAMPLE_COUNT_1_BIT,
};
VkDynamicState dynamic_states[] = {
VK_DYNAMIC_STATE_VIEWPORT,
VK_DYNAMIC_STATE_SCISSOR,
};
VkPipelineDynamicStateCreateInfo dynamic_state = {
.sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO,
.dynamicStateCount = ARRAY_SIZE(dynamic_states),
.pDynamicStates = dynamic_states,
};
// clang-format off
VkVertexInputAttributeDescription vertex_input_attribute_descriptions[2] = {
{
.binding = src_binding,
.location = 0,
.format = VK_FORMAT_R32G32B32A32_SFLOAT,
.offset = 0,
},
{
.binding = src_binding,
.location = 1,
.format = VK_FORMAT_R32G32B32A32_SFLOAT,
.offset = 16,
},
};
VkVertexInputBindingDescription vertex_input_binding_description[1] = {
{
.binding = src_binding,
.inputRate = VK_VERTEX_INPUT_RATE_VERTEX,
.stride = mesh_stride,
},
};
VkPipelineVertexInputStateCreateInfo vertex_input_state = {
.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO,
.vertexAttributeDescriptionCount = ARRAY_SIZE(vertex_input_attribute_descriptions),
.pVertexAttributeDescriptions = vertex_input_attribute_descriptions,
.vertexBindingDescriptionCount = ARRAY_SIZE(vertex_input_binding_description),
.pVertexBindingDescriptions = vertex_input_binding_description,
};
// clang-format on
VkPipelineShaderStageCreateInfo shader_stages[2] = {
{
.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
.stage = VK_SHADER_STAGE_VERTEX_BIT,
.module = mesh_vert,
.pName = "main",
},
{
.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
.stage = VK_SHADER_STAGE_FRAGMENT_BIT,
.module = mesh_frag,
.pName = "main",
},
};
VkGraphicsPipelineCreateInfo pipeline_info = {
.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO,
.stageCount = ARRAY_SIZE(shader_stages),
.pStages = shader_stages,
.pVertexInputState = &vertex_input_state,
.pInputAssemblyState = &input_assembly_state,
.pViewportState = &viewport_state,
.pRasterizationState = &rasterization_state,
.pMultisampleState = &multisample_state,
.pDepthStencilState = &depth_stencil_state,
.pColorBlendState = &color_blend_state,
.pDynamicState = &dynamic_state,
.layout = pipeline_layout,
.renderPass = render_pass,
.basePipelineHandle = VK_NULL_HANDLE,
.basePipelineIndex = -1,
};
VkPipeline pipeline = VK_NULL_HANDLE;
ret = vk->vkCreateGraphicsPipelines(vk->device, //
pipeline_cache, //
1, //
&pipeline_info, //
NULL, //
&pipeline); //
if (ret != VK_SUCCESS) {
VK_DEBUG(vk, "vkCreateGraphicsPipelines failed: %s",
vk_result_string(ret));
return ret;
}
*out_mesh_pipeline = pipeline;
return VK_SUCCESS;
}
static bool
init_mesh_ubo_buffers(struct vk_bundle *vk,
struct comp_buffer *l_ubo,
struct comp_buffer *r_ubo)
{
// Using the same flags for all ubos.
VkBufferUsageFlags ubo_usage_flags = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT;
VkMemoryPropertyFlags memory_property_flags =
VK_MEMORY_PROPERTY_HOST_COHERENT_BIT |
VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
// Distortion ubo size.
VkDeviceSize ubo_size = sizeof(struct comp_mesh_ubo_data);
C(comp_buffer_init(vk, //
l_ubo, //
ubo_usage_flags, //
memory_property_flags, //
ubo_size)); // size
C(comp_buffer_map(vk, l_ubo));
C(comp_buffer_init(vk, //
r_ubo, //
ubo_usage_flags, //
memory_property_flags, //
ubo_size)); // size
C(comp_buffer_map(vk, r_ubo));
return true;
}
static void
update_mesh_discriptor_set(struct vk_bundle *vk,
uint32_t src_binding,
VkSampler sampler,
VkImageView image_view,
uint32_t ubo_binding,
VkBuffer buffer,
VkDeviceSize size,
VkDescriptorSet descriptor_set)
{
VkDescriptorImageInfo image_info = {
.sampler = sampler,
.imageView = image_view,
.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
};
VkDescriptorBufferInfo buffer_info = {
.buffer = buffer,
.offset = 0,
.range = size,
};
VkWriteDescriptorSet write_descriptor_sets[2] = {
{
.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
.dstSet = descriptor_set,
.dstBinding = src_binding,
.descriptorCount = 1,
.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
.pImageInfo = &image_info,
},
{
.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
.dstSet = descriptor_set,
.dstBinding = ubo_binding,
.descriptorCount = 1,
.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
.pBufferInfo = &buffer_info,
},
};
vk->vkUpdateDescriptorSets(
vk->device, //
ARRAY_SIZE(write_descriptor_sets), // descriptorWriteCount
write_descriptor_sets, // pDescriptorWrites
0, // descriptorCopyCount
NULL); // pDescriptorCopies
}
/*
*
* 'Exported' rendering functions.
*
*/
bool
comp_rendering_init(struct comp_compositor *c,
struct comp_resources *r,
struct comp_rendering *rr)
{
struct vk_bundle *vk = &c->vk;
rr->c = c;
rr->r = r;
/*
* Per rendering.
*/
C(create_command_buffer(vk, &rr->cmd));
/*
* Mesh per view
*/
C(create_descriptor_set(
vk, // vk_bundle
r->mesh_descriptor_pool, // descriptor_pool
r->mesh.descriptor_set_layout, // descriptor_set_layout
&rr->views[0].mesh.descriptor_set)); // descriptor_set
C(create_descriptor_set(
vk, // vk_bundle
r->mesh_descriptor_pool, // descriptor_pool
r->mesh.descriptor_set_layout, // descriptor_set_layout
&rr->views[1].mesh.descriptor_set)); // descriptor_set
if (!init_mesh_ubo_buffers(vk, //
&rr->views[0].mesh.ubo, //
&rr->views[1].mesh.ubo)) {
return false;
}
return true;
}
void
comp_rendering_close(struct comp_rendering *rr)
{
struct vk_bundle *vk = &rr->c->vk;
struct comp_resources *r = rr->r;
D(RenderPass, rr->render_pass);
D(Pipeline, rr->mesh.pipeline);
D(Framebuffer, rr->targets[0].framebuffer);
D(Framebuffer, rr->targets[1].framebuffer);
comp_buffer_close(vk, &rr->views[0].mesh.ubo);
comp_buffer_close(vk, &rr->views[1].mesh.ubo);
DD(r->mesh_descriptor_pool, rr->views[0].mesh.descriptor_set);
DD(r->mesh_descriptor_pool, rr->views[1].mesh.descriptor_set);
U_ZERO(rr);
}
/*
*
* 'Exported' draw functions.
*
*/
bool
comp_draw_begin_target_single(struct comp_rendering *rr,
VkImageView target,
struct comp_target_data *data)
{
struct vk_bundle *vk = &rr->c->vk;
struct comp_resources *r = rr->r;
rr->targets[0].data = *data;
rr->num_targets = 1;
assert(data->is_external);
C(create_external_render_pass( //
vk, // vk_bundle
data->format, // target_format
&rr->render_pass)); // out_render_pass
C(create_mesh_pipeline(
vk, // vk_bundle
rr->render_pass, // render_pass
r->mesh.pipeline_layout, // pipeline_layout
r->pipeline_cache, // pipeline_cache
r->mesh.src_binding, // src_binding
r->mesh.total_num_indices, // mesh_total_num_indices
r->mesh.stride, // mesh_stride
rr->c->shaders.mesh_vert, // mesh_vert
rr->c->shaders.mesh_frag, // mesh_frag
&rr->mesh.pipeline)); // out_mesh_pipeline
C(create_framebuffer(
vk, // vk_bundle,
target, // image_view,
rr->render_pass, // render_pass,
data->width, // width,
data->height, // height,
&rr->targets[0].framebuffer)); // out_external_framebuffer
C(begin_command_buffer(vk, rr->cmd));
// This is shared across both views.
begin_render_pass(vk, //
rr->cmd, //
rr->render_pass, //
rr->targets[0].framebuffer, //
rr->targets[0].data.width, //
rr->targets[0].data.height); //
return true;
}
void
comp_draw_end_target(struct comp_rendering *rr)
{
struct vk_bundle *vk = &rr->c->vk;
VkResult ret;
//! We currently only support single target mode.
assert(rr->num_targets == 1);
// Stop the shared render pass.
vk->vkCmdEndRenderPass(rr->cmd);
// End the command buffer.
ret = vk->vkEndCommandBuffer(rr->cmd);
if (ret != VK_SUCCESS) {
VK_ERROR(vk, "vkEndCommandBuffer failed: %s",
vk_result_string(ret));
return;
}
}
void
comp_draw_begin_view(struct comp_rendering *rr,
uint32_t target,
uint32_t view,
struct comp_viewport_data *viewport_data)
{
struct vk_bundle *vk = &rr->c->vk;
rr->current_view = view;
//! We currently only support single target mode.
assert(rr->num_targets == 1);
assert(target == 0);
assert(view == 0 || view == 1);
/*
* Viewport
*/
VkViewport viewport = {
.x = viewport_data->x,
.y = viewport_data->y,
.width = viewport_data->w,
.height = viewport_data->h,
.minDepth = 0.0f,
.maxDepth = 1.0f,
};
vk->vkCmdSetViewport(rr->cmd, // commandBuffer
0, // firstViewport
1, // viewportCount
&viewport); // pViewports
/*
* Scissor
*/
VkRect2D scissor = {
.offset =
{
.x = viewport_data->x,
.y = viewport_data->y,
},
.extent =
{
.width = viewport_data->w,
.height = viewport_data->h,
},
};
vk->vkCmdSetScissor(rr->cmd, // commandBuffer
0, // firstScissor
1, // scissorCount
&scissor); // pScissors
}
void
comp_draw_end_view(struct comp_rendering *rr)
{
//! We currently only support single target mode.
assert(rr->num_targets == 1);
}
void
comp_draw_distortion(struct comp_rendering *rr,
VkSampler sampler,
VkImageView image_view,
struct comp_mesh_ubo_data *data)
{
struct vk_bundle *vk = &rr->c->vk;
struct comp_resources *r = rr->r;
uint32_t view = rr->current_view;
struct comp_rendering_view *v = &rr->views[view];
/*
* Descriptors and pipeline.
*/
comp_buffer_write(vk, &v->mesh.ubo, data,
sizeof(struct comp_mesh_ubo_data));
update_mesh_discriptor_set( //
vk, // vk_bundle
r->mesh.src_binding, // src_binding
sampler, // sampler
image_view, // image_view
r->mesh.ubo_binding, // ubo_binding
v->mesh.ubo.buffer, // buffer
VK_WHOLE_SIZE, // size
v->mesh.descriptor_set); // descriptor_set
VkDescriptorSet descriptor_sets[1] = {v->mesh.descriptor_set};
vk->vkCmdBindDescriptorSets( //
rr->cmd, // commandBuffer
VK_PIPELINE_BIND_POINT_GRAPHICS, // pipelineBindPoint
r->mesh.pipeline_layout, // layout
0, // firstSet
ARRAY_SIZE(descriptor_sets), // descriptorSetCount
descriptor_sets, // pDescriptorSets
0, // dynamicOffsetCount
NULL); // pDynamicOffsets
vk->vkCmdBindPipeline( //
rr->cmd, // commandBuffer
VK_PIPELINE_BIND_POINT_GRAPHICS, // pipelineBindPoint
rr->mesh.pipeline); // pipeline
/*
* Vertex buffer.
*/
VkBuffer buffers[1] = {r->mesh.vbo.buffer};
VkDeviceSize offsets[1] = {0};
assert(ARRAY_SIZE(buffers) == ARRAY_SIZE(offsets));
vk->vkCmdBindVertexBuffers( //
rr->cmd, // commandBuffer
0, // firstBinding
ARRAY_SIZE(buffers), // bindingCount
buffers, // pBuffers
offsets); // pOffsets
/*
* Draw with indices or not?
*/
if (r->mesh.total_num_indices > 0) {
vk->vkCmdBindIndexBuffer( //
rr->cmd, // commandBuffer
r->mesh.ibo.buffer, // buffer
0, // offset
VK_INDEX_TYPE_UINT32); // indexType
vk->vkCmdDrawIndexed( //
rr->cmd, // commandBuffer
r->mesh.num_indices[view], // indexCount
1, // instanceCount
r->mesh.offset_indices[view], // firstIndex
0, // vertexOffset
0); // firstInstance
} else {
vk->vkCmdDraw( //
rr->cmd, // commandBuffer
r->mesh.num_vertices, // vertexCount
1, // instanceCount
0, // firstVertex
0); // firstInstance
}
}

View file

@ -0,0 +1,333 @@
// Copyright 2019-2020, Collabora, Ltd.
// SPDX-License-Identifier: BSL-1.0
/*!
* @file
* @brief Shared resources for rendering.
* @author Lubosz Sarnecki <lubosz.sarnecki@collabora.com>
* @author Jakob Bornecrantz <jakob@collabora.com>
* @ingroup comp_main
*/
#include "main/comp_compositor.h"
#include "render/comp_render.h"
#include <stdio.h>
#define C(c) \
do { \
VkResult ret = c; \
if (ret != VK_SUCCESS) { \
return false; \
} \
} while (false)
#define D(TYPE, thing) \
if (thing != VK_NULL_HANDLE) { \
vk->vkDestroy##TYPE(vk->device, thing, NULL); \
thing = VK_NULL_HANDLE; \
}
static VkResult
create_pipeline_cache(struct vk_bundle *vk, VkPipelineCache *out_pipeline_cache)
{
VkResult ret;
VkPipelineCacheCreateInfo pipeline_cache_info = {
.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO,
};
VkPipelineCache pipeline_cache;
ret = vk->vkCreatePipelineCache(vk->device, //
&pipeline_cache_info, //
NULL, //
&pipeline_cache); //
if (ret != VK_SUCCESS) {
COMP_ERROR(vk, "vkCreatePipelineCache failed: %s",
vk_result_string(ret));
return ret;
}
*out_pipeline_cache = pipeline_cache;
return VK_SUCCESS;
}
static VkResult
create_pipeline_layout(struct vk_bundle *vk,
VkDescriptorSetLayout descriptor_set_layout,
VkPipelineLayout *out_pipeline_layout)
{
VkResult ret;
VkPipelineLayoutCreateInfo pipeline_layout_info = {
.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO,
.setLayoutCount = 1,
.pSetLayouts = &descriptor_set_layout,
};
VkPipelineLayout pipeline_layout = VK_NULL_HANDLE;
ret = vk->vkCreatePipelineLayout(vk->device, //
&pipeline_layout_info, //
NULL, //
&pipeline_layout); //
if (ret != VK_SUCCESS) {
VK_ERROR(vk, "vkCreatePipelineLayout failed: %s",
vk_result_string(ret));
return ret;
}
*out_pipeline_layout = pipeline_layout;
return VK_SUCCESS;
}
static VkResult
create_descriptor_pool(struct vk_bundle *vk,
uint32_t num_uniform_per_desc,
uint32_t num_sampler_per_desc,
uint32_t num_descs,
VkDescriptorPool *out_descriptor_pool)
{
VkResult ret;
VkDescriptorPoolSize pool_sizes[2] = {
{
.type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
.descriptorCount = num_uniform_per_desc * num_descs,
},
{
.type = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
.descriptorCount = num_sampler_per_desc * num_descs,
},
};
VkDescriptorPoolCreateInfo descriptor_pool_info = {
.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
.flags = VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
.maxSets = num_descs,
.poolSizeCount = ARRAY_SIZE(pool_sizes),
.pPoolSizes = pool_sizes,
};
VkDescriptorPool descriptor_pool = VK_NULL_HANDLE;
ret = vk->vkCreateDescriptorPool(vk->device, //
&descriptor_pool_info, //
NULL, //
&descriptor_pool); //
if (ret != VK_SUCCESS) {
VK_ERROR(vk, "vkCreateRenderPass failed: %s",
vk_result_string(ret));
return ret;
}
*out_descriptor_pool = descriptor_pool;
return VK_SUCCESS;
}
/*
*
* Mesh
*
*/
static VkResult
create_mesh_descriptor_set_layout(
struct vk_bundle *vk,
uint32_t src_binding,
uint32_t ubo_binding,
VkDescriptorSetLayout *out_descriptor_set_layout)
{
VkResult ret;
VkDescriptorSetLayoutBinding set_layout_bindings[2] = {
{
.binding = src_binding,
.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
.descriptorCount = 1,
.stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT,
},
{
.binding = ubo_binding,
.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
.descriptorCount = 1,
.stageFlags = VK_SHADER_STAGE_VERTEX_BIT,
},
};
VkDescriptorSetLayoutCreateInfo set_layout_info = {
.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,
.bindingCount = ARRAY_SIZE(set_layout_bindings),
.pBindings = set_layout_bindings,
};
VkDescriptorSetLayout descriptor_set_layout = VK_NULL_HANDLE;
ret = vk->vkCreateDescriptorSetLayout(vk->device, //
&set_layout_info, //
NULL, //
&descriptor_set_layout); //
if (ret != VK_SUCCESS) {
VK_ERROR(vk, "vkCreateDescriptorSetLayout failed: %s",
vk_result_string(ret));
return ret;
}
*out_descriptor_set_layout = descriptor_set_layout;
return VK_SUCCESS;
}
static bool
init_mesh_vertex_buffers(struct vk_bundle *vk,
struct comp_buffer *vbo,
struct comp_buffer *ibo,
uint32_t num_vertices,
uint32_t stride,
void *vertices,
uint32_t num_indices,
void *indices)
{
// Using the same flags for all vbos.
VkBufferUsageFlags vbo_usage_flags = VK_BUFFER_USAGE_VERTEX_BUFFER_BIT;
VkBufferUsageFlags ibo_usage_flags = VK_BUFFER_USAGE_INDEX_BUFFER_BIT;
VkMemoryPropertyFlags memory_property_flags =
VK_MEMORY_PROPERTY_HOST_COHERENT_BIT |
VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
// Distortion vbo and ibo sizes.
VkDeviceSize vbo_size = stride * num_vertices;
VkDeviceSize ibo_size = sizeof(int) * num_indices;
// Don't create vbo if size is zero.
if (vbo_size == 0) {
return true;
}
C(comp_buffer_init(vk, // vk_bundle
vbo, // buffer
vbo_usage_flags, // usage_flags
memory_property_flags, // memory_property_flags
vbo_size)); // size
C(comp_buffer_write(vk, // vk_bundle
vbo, // buffer
vertices, // data
vbo_size)); // size
// Don't create index buffer if size is zero.
if (ibo_size == 0) {
return true;
}
C(comp_buffer_init(vk, // vk_bundle
ibo, // buffer
ibo_usage_flags, // usage_flags
memory_property_flags, // memory_property_flags
ibo_size)); // size
C(comp_buffer_write(vk, // vk_bundle
ibo, // buffer
indices, // data
ibo_size)); // size
return true;
}
/*
*
* 'Exported' renderer functions.
*
*/
bool
comp_resources_init(struct comp_compositor *c, struct comp_resources *r)
{
struct vk_bundle *vk = &c->vk;
struct xrt_device *xdev = c->xdev;
/*
* Constants
*/
r->mesh.src_binding = 0;
r->mesh.ubo_binding = 1;
struct xrt_hmd_parts *parts = xdev->hmd;
r->mesh.num_vertices = parts->distortion.mesh.num_vertices;
r->mesh.stride = parts->distortion.mesh.stride;
r->mesh.num_indices[0] = parts->distortion.mesh.num_indices[0];
r->mesh.num_indices[1] = parts->distortion.mesh.num_indices[1];
r->mesh.total_num_indices = parts->distortion.mesh.total_num_indices;
r->mesh.offset_indices[0] = parts->distortion.mesh.offset_indices[0];
r->mesh.offset_indices[1] = parts->distortion.mesh.offset_indices[1];
/*
* Shared
*/
C(create_pipeline_cache(vk, &r->pipeline_cache));
/*
* Mesh static.
*/
C(create_descriptor_pool(
vk, // vk_bundle
1, // num_uniform_per_desc
1, // num_sampler_per_desc
16 * 2, // num_descs
&r->mesh_descriptor_pool)); // out_descriptor_pool
C(create_mesh_descriptor_set_layout(
vk, // vk_bundle
r->mesh.src_binding, // src_binding
r->mesh.ubo_binding, // ubo_binding
&r->mesh.descriptor_set_layout)); // out_mesh_descriptor_set_layout
C(create_pipeline_layout(
vk, // vk_bundle
r->mesh.descriptor_set_layout, // descriptor_set_layout
&r->mesh.pipeline_layout)); // out_pipeline_layout
if (!init_mesh_vertex_buffers(vk, //
&r->mesh.vbo, //
&r->mesh.ibo, //
r->mesh.num_vertices, //
r->mesh.stride, //
parts->distortion.mesh.vertices, //
r->mesh.total_num_indices, //
parts->distortion.mesh.indices)) { //
return false;
}
/*
* Done
*/
U_LOG_I("New renderer initialized!");
return true;
}
void
comp_resources_close(struct comp_compositor *c, struct comp_resources *r)
{
struct vk_bundle *vk = &c->vk;
D(DescriptorSetLayout, r->mesh.descriptor_set_layout);
D(PipelineLayout, r->mesh.pipeline_layout);
D(PipelineCache, r->pipeline_cache);
D(DescriptorPool, r->mesh_descriptor_pool);
comp_buffer_close(vk, &r->mesh.vbo);
comp_buffer_close(vk, &r->mesh.ibo);
}