c/layers: Implement layer renderer.

Implements a layer renderer capable of handling multiple quad
and projection layers rendered in it's own Vulkan pipeline.
This commit is contained in:
Lubosz Sarnecki 2020-05-08 21:25:25 +02:00 committed by Jakob Bornecrantz
parent fc271ad5da
commit aedd4d9ff8
10 changed files with 1151 additions and 0 deletions

View file

@ -8,6 +8,8 @@ spirv_shaders(SHADER_HEADERS
shaders/none.frag
shaders/panotools.frag
shaders/vive.frag
shaders/quad.frag
shaders/quad.vert
)
set(CLIENT_SOURCE_FILES
@ -30,6 +32,10 @@ set(MAIN_SOURCE_FILES
main/comp_vk_swapchain.c
main/comp_vk_swapchain.h
main/comp_window.h
main/comp_layer.h
main/comp_layer.c
main/comp_layer_renderer.h
main/comp_layer_renderer.c
)
if (XRT_VULKAN_ENABLE_VALIDATION)

View file

@ -66,6 +66,12 @@ struct comp_swapchain
struct comp_swapchain_image images[XRT_MAX_SWAPCHAIN_IMAGES];
};
enum comp_layer_type
{
COMP_LAYER_STEREO_PROJECTION,
COMP_LAYER_QUAD,
};
/*!
* A stereo projection layer.
*

View file

@ -0,0 +1,251 @@
// Copyright 2020, Collabora, Ltd.
// SPDX-License-Identifier: BSL-1.0
/*!
* @file
* @brief Compositor quad rendering.
* @author Lubosz Sarnecki <lubosz.sarnecki@collabora.com>
* @ingroup comp_main
*/
#include "comp_layer.h"
#include "util/u_misc.h"
#include "math/m_api.h"
#include <stdio.h>
// clang-format off
// Projection layers span from -1 to 1, the vertex buffer and quad layers
// from -0.5 to 0.5, so this scale matrix needs to be applied for proj layers.
static struct xrt_matrix_4x4 proj_scale = {
.v = {
2, 0, 0, 0,
0, 2, 0, 0,
0, 0, 1, 0,
0, 0, 0, 1
}
};
// clang-format on
void
comp_layer_set_flip_y(struct comp_render_layer *self, bool flip_y)
{
for (uint32_t i = 0; i < 2; i++)
self->transformation[i].flip_y = flip_y;
}
void
comp_layer_set_model_matrix(struct comp_render_layer *self,
const struct xrt_matrix_4x4 *m)
{
memcpy(&self->model_matrix, m, sizeof(struct xrt_matrix_4x4));
}
static void
_update_mvp_matrix(struct comp_render_layer *self,
uint32_t eye,
const struct xrt_matrix_4x4 *vp)
{
math_matrix_4x4_multiply(vp, &self->model_matrix,
&self->transformation[eye].mvp);
memcpy(self->transformation_ubos[eye].data, &self->transformation[eye],
sizeof(struct layer_transformation));
}
static bool
_init_ubos(struct comp_render_layer *self)
{
VkBufferUsageFlags usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT;
VkMemoryPropertyFlags properties =
VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
VK_MEMORY_PROPERTY_HOST_COHERENT_BIT |
VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
for (uint32_t i = 0; i < 2; i++) {
math_matrix_4x4_identity(&self->transformation[i].mvp);
if (!vk_buffer_init(
self->vk, sizeof(struct layer_transformation), usage,
properties, &self->transformation_ubos[i].handle,
&self->transformation_ubos[i].memory))
return false;
VkResult res = self->vk->vkMapMemory(
self->vk->device, self->transformation_ubos[i].memory, 0,
VK_WHOLE_SIZE, 0, &self->transformation_ubos[i].data);
vk_check_error("vkMapMemory", res, false);
memcpy(self->transformation_ubos[i].data,
&self->transformation[i],
sizeof(struct layer_transformation));
}
return true;
}
static void
_update_descriptor(struct vk_bundle *vk,
VkDescriptorSet set,
VkBuffer transformation_buffer,
VkSampler sampler,
VkImageView image_view)
{
VkWriteDescriptorSet *sets = (VkWriteDescriptorSet[]){
{
.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
.dstSet = set,
.dstBinding = 0,
.descriptorCount = 1,
.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
.pBufferInfo =
&(VkDescriptorBufferInfo){
.buffer = transformation_buffer,
.offset = 0,
.range = VK_WHOLE_SIZE,
},
.pTexelBufferView = NULL,
},
{
.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
.dstSet = set,
.dstBinding = 1,
.descriptorCount = 1,
.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
.pImageInfo =
&(VkDescriptorImageInfo){
.sampler = sampler,
.imageView = image_view,
.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
},
.pBufferInfo = NULL,
.pTexelBufferView = NULL,
},
};
vk->vkUpdateDescriptorSets(vk->device, 2, sets, 0, NULL);
}
void
comp_layer_update_descriptors(struct comp_render_layer *self,
VkSampler sampler,
VkImageView image_view)
{
for (uint32_t eye = 0; eye < 2; eye++)
_update_descriptor(self->vk, self->descriptor_sets[eye],
self->transformation_ubos[eye].handle,
sampler, image_view);
}
void
comp_layer_update_stereo_descriptors(struct comp_render_layer *self,
VkSampler left_sampler,
VkSampler right_sampler,
VkImageView left_image_view,
VkImageView right_image_view)
{
_update_descriptor(self->vk, self->descriptor_sets[0],
self->transformation_ubos[0].handle, left_sampler,
left_image_view);
_update_descriptor(self->vk, self->descriptor_sets[1],
self->transformation_ubos[1].handle, right_sampler,
right_image_view);
}
static bool
_init(struct comp_render_layer *self,
struct vk_bundle *vk,
enum comp_layer_type type,
VkDescriptorSetLayout *layout)
{
self->vk = vk;
self->type = type;
self->visible = true;
math_matrix_4x4_identity(&self->model_matrix);
if (!_init_ubos(self))
return false;
uint32_t set_count = 2;
VkDescriptorPoolSize pool_sizes[] = {
{
.descriptorCount = set_count,
.type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
},
{
.descriptorCount = set_count,
.type = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
},
};
if (!vk_init_descriptor_pool(self->vk, pool_sizes,
ARRAY_SIZE(pool_sizes), set_count,
&self->descriptor_pool))
return false;
for (uint32_t eye = 0; eye < set_count; eye++)
if (!vk_allocate_descriptor_sets(
self->vk, self->descriptor_pool, 1, layout,
&self->descriptor_sets[eye]))
return false;
return true;
}
void
comp_layer_draw(struct comp_render_layer *self,
uint32_t eye,
VkPipeline pipeline,
VkPipelineLayout pipeline_layout,
VkCommandBuffer cmd_buffer,
const struct vk_buffer *vertex_buffer,
const struct xrt_matrix_4x4 *vp)
{
if (!self->visible)
return;
self->vk->vkCmdBindPipeline(cmd_buffer, VK_PIPELINE_BIND_POINT_GRAPHICS,
pipeline);
switch (self->type) {
case COMP_LAYER_STEREO_PROJECTION:
_update_mvp_matrix(self, eye, &proj_scale);
break;
case COMP_LAYER_QUAD: _update_mvp_matrix(self, eye, vp); break;
}
self->vk->vkCmdBindDescriptorSets(
cmd_buffer, VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout, 0, 1,
&self->descriptor_sets[eye], 0, NULL);
VkDeviceSize offsets[1] = {0};
self->vk->vkCmdBindVertexBuffers(cmd_buffer, 0, 1,
&vertex_buffer->handle, &offsets[0]);
self->vk->vkCmdDraw(cmd_buffer, vertex_buffer->size, 1, 0, 0);
}
struct comp_render_layer *
comp_layer_create(struct vk_bundle *vk,
enum comp_layer_type type,
VkDescriptorSetLayout *layout)
{
struct comp_render_layer *q = U_TYPED_CALLOC(struct comp_render_layer);
_init(q, vk, type, layout);
return q;
}
void
comp_layer_destroy(struct comp_render_layer *self)
{
for (uint32_t eye = 0; eye < 2; eye++)
vk_buffer_destroy(&self->transformation_ubos[eye], self->vk);
self->vk->vkDestroyDescriptorPool(self->vk->device,
self->descriptor_pool, NULL);
}

View file

@ -0,0 +1,72 @@
// Copyright 2020, Collabora, Ltd.
// SPDX-License-Identifier: BSL-1.0
/*!
* @file
* @brief Compositor quad rendering.
* @author Lubosz Sarnecki <lubosz.sarnecki@collabora.com>
* @ingroup comp_main
*/
#pragma once
#include "vk/vk_helpers.h"
#include "comp_compositor.h"
struct layer_transformation
{
struct xrt_matrix_4x4 mvp;
bool flip_y;
};
struct comp_render_layer
{
struct vk_bundle *vk;
bool visible;
enum comp_layer_type type;
struct layer_transformation transformation[2];
struct vk_buffer transformation_ubos[2];
VkDescriptorPool descriptor_pool;
VkDescriptorSet descriptor_sets[2];
struct xrt_matrix_4x4 model_matrix;
};
struct comp_render_layer *
comp_layer_create(struct vk_bundle *vk,
enum comp_layer_type type,
VkDescriptorSetLayout *layout);
void
comp_layer_draw(struct comp_render_layer *self,
uint32_t eye,
VkPipeline pipeline,
VkPipelineLayout pipeline_layout,
VkCommandBuffer cmd_buffer,
const struct vk_buffer *vertex_buffer,
const struct xrt_matrix_4x4 *vp);
void
comp_layer_set_model_matrix(struct comp_render_layer *self,
const struct xrt_matrix_4x4 *m);
void
comp_layer_destroy(struct comp_render_layer *self);
void
comp_layer_update_descriptors(struct comp_render_layer *self,
VkSampler sampler,
VkImageView image_view);
void
comp_layer_update_stereo_descriptors(struct comp_render_layer *self,
VkSampler left_sampler,
VkSampler right_sampler,
VkImageView left_image_view,
VkImageView right_image_view);
void
comp_layer_set_flip_y(struct comp_render_layer *self, bool flip_y);

View file

@ -0,0 +1,681 @@
// Copyright 2020, Collabora, Ltd.
// SPDX-License-Identifier: BSL-1.0
/*!
* @file
* @brief Compositor quad rendering.
* @author Lubosz Sarnecki <lubosz.sarnecki@collabora.com>
* @ingroup comp_main
*/
#include "comp_layer_renderer.h"
#include <stdio.h>
#include "util/u_misc.h"
#include "math/m_api.h"
#include "shaders/quad.frag.h"
#include "shaders/quad.vert.h"
struct comp_layer_vertex
{
float position[3];
float uv[2];
};
static const VkClearColorValue background_color = {
.float32 = {0.3f, 0.3f, 0.3f, 1.0f},
};
static bool
_init_render_pass(struct vk_bundle *vk,
VkFormat format,
VkImageLayout final_layout,
VkSampleCountFlagBits sample_count,
VkRenderPass *out_render_pass)
{
VkAttachmentDescription *attachments = (VkAttachmentDescription[]){
{
.format = format,
.samples = sample_count,
.loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR,
.storeOp = VK_ATTACHMENT_STORE_OP_STORE,
.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE,
.stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE,
.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED,
.finalLayout = final_layout,
.flags = 0,
},
};
VkRenderPassCreateInfo renderpass_info = {
.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO,
.flags = 0,
.attachmentCount = 1,
.pAttachments = attachments,
.subpassCount = 1,
.pSubpasses =
&(VkSubpassDescription){
.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS,
.colorAttachmentCount = 1,
.pColorAttachments =
&(VkAttachmentReference){
.attachment = 0,
.layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
},
.pDepthStencilAttachment = NULL,
.pResolveAttachments = NULL,
},
.dependencyCount = 0,
.pDependencies = NULL,
};
VkResult res = vk->vkCreateRenderPass(vk->device, &renderpass_info,
NULL, out_render_pass);
vk_check_error("vkCreateRenderPass", res, false);
return true;
}
static bool
_init_descriptor_layout(struct comp_layer_renderer *self)
{
struct vk_bundle *vk = self->vk;
VkDescriptorSetLayoutCreateInfo info = {
.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,
.bindingCount = 2,
.pBindings =
(VkDescriptorSetLayoutBinding[]){
// transformation buffer
{
.binding = 0,
.descriptorCount = 1,
.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
.stageFlags = VK_SHADER_STAGE_VERTEX_BIT,
},
// quad texture
{
.binding = 1,
.descriptorCount = 1,
.descriptorType =
VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
.stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT,
},
},
};
VkResult res = vk->vkCreateDescriptorSetLayout(
vk->device, &info, NULL, &self->descriptor_set_layout);
vk_check_error("vkCreateDescriptorSetLayout", res, false);
return true;
}
static bool
_init_pipeline_layout(struct comp_layer_renderer *self)
{
struct vk_bundle *vk = self->vk;
VkPipelineLayoutCreateInfo info = {
.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO,
.setLayoutCount = 1,
.pSetLayouts = &self->descriptor_set_layout,
.pushConstantRangeCount = 0,
.pPushConstantRanges = NULL,
};
VkResult res = vk->vkCreatePipelineLayout(vk->device, &info, NULL,
&self->pipeline_layout);
vk_check_error("vkCreatePipelineLayout", res, false);
return true;
}
static bool
_init_pipeline_cache(struct comp_layer_renderer *self)
{
struct vk_bundle *vk = self->vk;
VkPipelineCacheCreateInfo info = {
.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO,
};
VkResult res = vk->vkCreatePipelineCache(vk->device, &info, NULL,
&self->pipeline_cache);
vk_check_error("vkCreatePipelineCache", res, false);
return true;
}
struct __attribute__((__packed__)) comp_pipeline_config
{
VkPrimitiveTopology topology;
uint32_t stride;
const VkVertexInputAttributeDescription *attribs;
uint32_t attrib_count;
const VkPipelineDepthStencilStateCreateInfo *depth_stencil_state;
const VkPipelineColorBlendAttachmentState *blend_attachments;
const VkPipelineRasterizationStateCreateInfo *rasterization_state;
};
static VkPipelineShaderStageCreateInfo
_shader_load(struct vk_bundle *vk,
const uint32_t *code,
size_t size,
VkShaderStageFlagBits flags)
{
VkResult ret;
VkShaderModuleCreateInfo info = {
.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO,
.codeSize = size,
.pCode = code,
};
VkShaderModule module;
ret = vk->vkCreateShaderModule(vk->device, &info, NULL, &module);
if (ret != VK_SUCCESS) {
VK_DEBUG(vk, "vkCreateShaderModule failed %u", ret);
}
return (VkPipelineShaderStageCreateInfo){
.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
.stage = flags,
.module = module,
.pName = "main",
};
}
static bool
_init_graphics_pipeline(struct comp_layer_renderer *self)
{
struct vk_bundle *vk = self->vk;
struct comp_pipeline_config config = {
.topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST,
.stride = sizeof(struct comp_layer_vertex),
.attribs =
(VkVertexInputAttributeDescription[]){
{0, 0, VK_FORMAT_R32G32B32_SFLOAT, 0},
{1, 0, VK_FORMAT_R32G32_SFLOAT,
offsetof(struct comp_layer_vertex, uv)},
},
.attrib_count = 2,
.depth_stencil_state =
&(VkPipelineDepthStencilStateCreateInfo){
.sType =
VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO,
.depthTestEnable = VK_FALSE,
.depthWriteEnable = VK_FALSE,
.depthCompareOp = VK_COMPARE_OP_NEVER,
},
.blend_attachments =
&(VkPipelineColorBlendAttachmentState){
.blendEnable = VK_FALSE,
.colorWriteMask = 0xf,
},
.rasterization_state =
&(VkPipelineRasterizationStateCreateInfo){
.sType =
VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO,
.polygonMode = VK_POLYGON_MODE_FILL,
.cullMode = VK_CULL_MODE_BACK_BIT,
.frontFace = VK_FRONT_FACE_COUNTER_CLOCKWISE,
.lineWidth = 1.0f,
},
};
VkPipelineShaderStageCreateInfo shader_stages[2] = {
_shader_load(vk, shaders_quad_vert, sizeof(shaders_quad_vert),
VK_SHADER_STAGE_VERTEX_BIT),
_shader_load(vk, shaders_quad_frag, sizeof(shaders_quad_frag),
VK_SHADER_STAGE_FRAGMENT_BIT),
};
VkGraphicsPipelineCreateInfo pipeline_info = {
.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO,
.layout = self->pipeline_layout,
.pVertexInputState =
&(VkPipelineVertexInputStateCreateInfo){
.sType =
VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO,
.pVertexAttributeDescriptions = config.attribs,
.vertexBindingDescriptionCount = 1,
.pVertexBindingDescriptions =
&(VkVertexInputBindingDescription){
.binding = 0,
.inputRate = VK_VERTEX_INPUT_RATE_VERTEX,
.stride = config.stride,
},
.vertexAttributeDescriptionCount = config.attrib_count,
},
.pInputAssemblyState =
&(VkPipelineInputAssemblyStateCreateInfo){
.sType =
VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO,
.topology = config.topology,
.primitiveRestartEnable = VK_FALSE,
},
.pViewportState =
&(VkPipelineViewportStateCreateInfo){
.sType =
VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO,
.viewportCount = 1,
.scissorCount = 1,
},
.pRasterizationState = config.rasterization_state,
.pMultisampleState =
&(VkPipelineMultisampleStateCreateInfo){
.sType =
VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO,
.rasterizationSamples = self->sample_count,
.minSampleShading = 0.0f,
.pSampleMask = &(uint32_t){0xFFFFFFFF},
.alphaToCoverageEnable = VK_FALSE,
},
.pDepthStencilState = config.depth_stencil_state,
.pColorBlendState =
&(VkPipelineColorBlendStateCreateInfo){
.sType =
VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO,
.logicOpEnable = VK_FALSE,
.attachmentCount = 1,
.blendConstants = {0, 0, 0, 0},
.pAttachments = config.blend_attachments,
},
.stageCount = 2,
.pStages = shader_stages,
.renderPass = self->render_pass,
.pDynamicState =
&(VkPipelineDynamicStateCreateInfo){
.sType =
VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO,
.dynamicStateCount = 2,
.pDynamicStates =
(VkDynamicState[]){
VK_DYNAMIC_STATE_VIEWPORT,
VK_DYNAMIC_STATE_SCISSOR,
},
},
.subpass = VK_NULL_HANDLE,
};
VkResult res;
res = vk->vkCreateGraphicsPipelines(vk->device, self->pipeline_cache, 1,
&pipeline_info, NULL,
&self->pipeline);
vk_check_error("vkCreateGraphicsPipelines", res, false);
vk->vkDestroyShaderModule(vk->device, shader_stages[0].module, NULL);
vk->vkDestroyShaderModule(vk->device, shader_stages[1].module, NULL);
return true;
}
// clang-format off
float plane_vertices[6 * 5] = {
-0.5, -0.5, 0, 0, 1,
0.5, -0.5, 0, 1, 1,
0.5, 0.5, 0, 1, 0,
0.5, 0.5, 0, 1, 0,
-0.5, 0.5, 0, 0, 0,
-0.5, -0.5, 0, 0, 1,
};
// clang-format on
static bool
_init_vertex_buffer(struct comp_layer_renderer *self)
{
struct vk_bundle *vk = self->vk;
VkBufferUsageFlags usage = VK_BUFFER_USAGE_VERTEX_BUFFER_BIT;
VkMemoryPropertyFlags properties = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
if (!vk_buffer_init(vk, sizeof(float) * ARRAY_SIZE(plane_vertices),
usage, properties, &self->vertex_buffer.handle,
&self->vertex_buffer.memory))
return false;
self->vertex_buffer.size = 6;
void *tmp;
VkResult res = vk->vkMapMemory(vk->device, self->vertex_buffer.memory,
0, VK_WHOLE_SIZE, 0, &tmp);
vk_check_error("vkMapMemory", res, false);
memcpy(tmp, plane_vertices, sizeof(float) * ARRAY_SIZE(plane_vertices));
VkMappedMemoryRange memory_range = {
.sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE,
.memory = self->vertex_buffer.memory,
.size = VK_WHOLE_SIZE,
};
res = vk->vkFlushMappedMemoryRanges(vk->device, 1, &memory_range);
vk_check_error("vkFlushMappedMemoryRanges", res, false);
vk->vkUnmapMemory(vk->device, self->vertex_buffer.memory);
return true;
}
static void
_render_eye(struct comp_layer_renderer *self,
uint32_t eye,
VkCommandBuffer cmd_buffer,
VkPipelineLayout pipeline_layout)
{
struct xrt_matrix_4x4 vp;
math_matrix_4x4_multiply(&self->mat_projection[eye],
&self->mat_view[eye], &vp);
for (uint32_t i = 0; i < self->num_layers; i++)
comp_layer_draw(self->layers[i], eye, self->pipeline,
pipeline_layout, cmd_buffer,
&self->vertex_buffer, &vp);
}
static bool
_init_frame_buffer(struct comp_layer_renderer *self,
VkFormat format,
VkRenderPass rp,
uint32_t eye)
{
struct vk_bundle *vk = self->vk;
VkImageUsageFlags usage =
VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_SAMPLED_BIT;
VkResult res = vk_create_image_simple(vk, self->extent, format, usage,
&self->framebuffers[eye].memory,
&self->framebuffers[eye].image);
vk_check_error("vk_create_image_simple", res, false);
vk_create_sampler(vk, &self->framebuffers[eye].sampler);
VkImageSubresourceRange subresource_range = {
.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
.baseMipLevel = 0,
.levelCount = 1,
.baseArrayLayer = 0,
.layerCount = 1,
};
res = vk_create_view(vk, self->framebuffers[eye].image, format,
subresource_range, &self->framebuffers[eye].view);
vk_check_error("vk_create_view", res, false);
VkFramebufferCreateInfo framebuffer_info = {
.sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO,
.renderPass = rp,
.attachmentCount = 1,
.pAttachments = (VkImageView[]){self->framebuffers[eye].view},
.width = self->extent.width,
.height = self->extent.height,
.layers = 1,
};
res = vk->vkCreateFramebuffer(vk->device, &framebuffer_info, NULL,
&self->framebuffers[eye].handle);
vk_check_error("vkCreateFramebuffer", res, false);
return true;
}
void
comp_layer_renderer_allocate_layers(struct comp_layer_renderer *self,
uint32_t num_layers)
{
struct vk_bundle *vk = self->vk;
self->num_layers = num_layers;
self->layers =
U_TYPED_ARRAY_CALLOC(struct comp_render_layer *, self->num_layers);
for (uint32_t i = 0; i < self->num_layers; i++) {
self->layers[i] = comp_layer_create(
vk, COMP_LAYER_QUAD, &self->descriptor_set_layout);
}
}
void
comp_layer_renderer_destroy_layers(struct comp_layer_renderer *self)
{
for (uint32_t i = 0; i < self->num_layers; i++)
comp_layer_destroy(self->layers[i]);
if (self->layers != NULL)
free(self->layers);
self->layers = NULL;
self->num_layers = 0;
}
static bool
_init(struct comp_layer_renderer *self,
struct vk_bundle *vk,
VkExtent2D extent,
VkFormat format)
{
self->vk = vk;
self->near = 0.001f;
self->far = 100.0f;
self->sample_count = VK_SAMPLE_COUNT_1_BIT;
self->num_layers = 0;
self->extent = extent;
for (uint32_t i = 0; i < 2; i++) {
math_matrix_4x4_identity(&self->mat_projection[i]);
math_matrix_4x4_identity(&self->mat_view[i]);
}
if (!_init_render_pass(vk, format,
VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
self->sample_count, &self->render_pass))
return false;
for (uint32_t i = 0; i < 2; i++)
if (!_init_frame_buffer(self, format, self->render_pass, i))
return false;
if (!_init_descriptor_layout(self))
return false;
if (!_init_pipeline_layout(self))
return false;
if (!_init_pipeline_cache(self))
return false;
if (!_init_graphics_pipeline(self))
return false;
if (!_init_vertex_buffer(self))
return false;
return true;
}
struct comp_layer_renderer *
comp_layer_renderer_create(struct vk_bundle *vk,
VkExtent2D extent,
VkFormat format)
{
struct comp_layer_renderer *r =
U_TYPED_CALLOC(struct comp_layer_renderer);
_init(r, vk, extent, format);
return r;
}
void
_render_pass_begin(struct vk_bundle *vk,
VkRenderPass render_pass,
VkExtent2D extent,
VkClearColorValue clear_color,
VkFramebuffer frame_buffer,
VkCommandBuffer cmd_buffer)
{
VkRenderPassBeginInfo render_pass_info = {
.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO,
.renderPass = render_pass,
.framebuffer = frame_buffer,
.renderArea =
{
.offset =
{
.x = 0,
.y = 0,
},
.extent = extent,
},
.clearValueCount = 1,
.pClearValues =
(VkClearValue[]){
{
.color = clear_color,
},
{
.depthStencil =
{
.depth = 1.0f,
.stencil = 0,
},
},
},
};
vk->vkCmdBeginRenderPass(cmd_buffer, &render_pass_info,
VK_SUBPASS_CONTENTS_INLINE);
}
static void
_render_stereo(struct comp_layer_renderer *self,
struct vk_bundle *vk,
VkCommandBuffer cmd_buffer)
{
VkViewport viewport = {
0.0f, 0.0f, self->extent.width, self->extent.height, 0.0f, 1.0f,
};
vk->vkCmdSetViewport(cmd_buffer, 0, 1, &viewport);
VkRect2D scissor = {
.offset = {0, 0},
.extent = self->extent,
};
vk->vkCmdSetScissor(cmd_buffer, 0, 1, &scissor);
for (uint32_t eye = 0; eye < 2; eye++) {
_render_pass_begin(vk, self->render_pass, self->extent,
background_color,
self->framebuffers[eye].handle, cmd_buffer);
_render_eye(self, eye, cmd_buffer, self->pipeline_layout);
vk->vkCmdEndRenderPass(cmd_buffer);
}
}
void
comp_layer_renderer_draw(struct comp_layer_renderer *self)
{
struct vk_bundle *vk = self->vk;
VkCommandBuffer cmd_buffer;
if (vk_init_cmd_buffer(vk, &cmd_buffer) != VK_SUCCESS)
return;
_render_stereo(self, vk, cmd_buffer);
VkResult res = vk_submit_cmd_buffer(vk, cmd_buffer);
vk_check_error("vk_submit_cmd_buffer", res, );
}
static void
_destroy_framebuffer(struct comp_layer_renderer *self, uint32_t i)
{
struct vk_bundle *vk = self->vk;
vk->vkDestroyImageView(vk->device, self->framebuffers[i].view, NULL);
vk->vkDestroyImage(vk->device, self->framebuffers[i].image, NULL);
vk->vkFreeMemory(vk->device, self->framebuffers[i].memory, NULL);
vk->vkDestroyFramebuffer(vk->device, self->framebuffers[i].handle,
NULL);
vk->vkDestroySampler(vk->device, self->framebuffers[i].sampler, NULL);
}
void
comp_layer_renderer_destroy(struct comp_layer_renderer *self)
{
struct vk_bundle *vk = self->vk;
if (vk->device == VK_NULL_HANDLE)
return;
vk->vkDeviceWaitIdle(vk->device);
comp_layer_renderer_destroy_layers(self);
for (uint32_t i = 0; i < 2; i++)
_destroy_framebuffer(self, i);
vk->vkDestroyRenderPass(vk->device, self->render_pass, NULL);
vk->vkDestroyPipelineLayout(vk->device, self->pipeline_layout, NULL);
vk->vkDestroyDescriptorSetLayout(vk->device,
self->descriptor_set_layout, NULL);
vk->vkDestroyPipeline(vk->device, self->pipeline, NULL);
for (uint32_t i = 0; i < ARRAY_SIZE(self->shader_modules); i++)
vk->vkDestroyShaderModule(vk->device, self->shader_modules[i],
NULL);
vk_buffer_destroy(&self->vertex_buffer, vk);
vk->vkDestroyPipelineCache(vk->device, self->pipeline_cache, NULL);
}
void
comp_layer_renderer_set_fov(struct comp_layer_renderer *self,
const struct xrt_fov *fov,
uint32_t view_id)
{
const float tan_left = tanf(fov->angle_left);
const float tan_right = tanf(fov->angle_right);
const float tan_down = tanf(fov->angle_down);
const float tan_up = tanf(fov->angle_up);
const float tan_width = tan_right - tan_left;
const float tan_height = tan_up - tan_down;
const float a11 = 2 / tan_width;
const float a22 = 2 / tan_height;
const float a31 = (tan_right + tan_left) / tan_width;
const float a32 = (tan_up + tan_down) / tan_height;
const float a33 = -self->far / (self->far - self->near);
const float a43 = -(self->far * self->near) / (self->far - self->near);
// clang-format off
self->mat_projection[view_id] = (struct xrt_matrix_4x4) {
.v = {
a11, 0, 0, 0,
0, a22, 0, 0,
a31, a32, a33, -1,
0, 0, a43, 0,
}
};
// clang-format on
}
void
comp_layer_renderer_set_pose(struct comp_layer_renderer *self,
const struct xrt_pose *pose,
uint32_t view_id)
{
math_matrix_4x4_view_from_pose(pose, &self->mat_view[view_id]);
}

View file

@ -0,0 +1,83 @@
// Copyright 2020, Collabora, Ltd.
// SPDX-License-Identifier: BSL-1.0
/*!
* @file
* @brief Compositor quad rendering.
* @author Lubosz Sarnecki <lubosz.sarnecki@collabora.com>
* @ingroup comp_main
*/
/*!
* Holds associated vulkan objects and state to render quads.
*
* @ingroup comp_main
*/
#pragma once
#include "comp_layer.h"
struct comp_layer_renderer
{
struct vk_bundle *vk;
struct
{
VkImage image;
VkDeviceMemory memory;
VkImageView view;
VkSampler sampler;
VkFramebuffer handle;
} framebuffers[2];
VkRenderPass render_pass;
VkExtent2D extent;
VkSampleCountFlagBits sample_count;
VkShaderModule shader_modules[2];
VkPipeline pipeline;
VkDescriptorSetLayout descriptor_set_layout;
VkPipelineLayout pipeline_layout;
VkPipelineCache pipeline_cache;
struct xrt_matrix_4x4 mat_view[2];
struct xrt_matrix_4x4 mat_projection[2];
struct vk_buffer vertex_buffer;
float near;
float far;
struct comp_render_layer **layers;
uint32_t num_layers;
};
struct comp_layer_renderer *
comp_layer_renderer_create(struct vk_bundle *vk,
VkExtent2D extent,
VkFormat format);
void
comp_layer_renderer_destroy(struct comp_layer_renderer *self);
void
comp_layer_renderer_draw(struct comp_layer_renderer *self);
void
comp_layer_renderer_set_fov(struct comp_layer_renderer *self,
const struct xrt_fov *fov,
uint32_t view_id);
void
comp_layer_renderer_set_pose(struct comp_layer_renderer *self,
const struct xrt_pose *pose,
uint32_t view_id);
void
comp_layer_renderer_allocate_layers(struct comp_layer_renderer *self,
uint32_t num_layers);
void
comp_layer_renderer_destroy_layers(struct comp_layer_renderer *self);

View file

@ -26,6 +26,8 @@ compositor_srcs = [
'main/comp_vk_swapchain.c',
'main/comp_vk_swapchain.h',
'main/comp_window.h',
'main/comp_layer_renderer.c',
'main/comp_layer.c',
]
compile_args = []

View file

@ -5,6 +5,8 @@ shader_srcs = [
'none.frag',
'panotools.frag',
'vive.frag',
'quad.vert',
'quad.frag'
]
shader_headers = []

View file

@ -0,0 +1,18 @@
// Copyright 2020 Collabora Ltd.
// Author: Lubosz Sarnecki <lubosz.sarnecki@collabora.com>
// SPDX-License-Identifier: BSL-1.0
#version 460
layout (location = 0) in vec2 uv;
layout (binding = 1) uniform sampler2D image;
layout (location = 0) out vec4 out_color;
void main ()
{
vec4 texture_color = texture (image, uv);
out_color = texture_color;
}

View file

@ -0,0 +1,30 @@
// Copyright 2020 Collabora Ltd.
// Author: Lubosz Sarnecki <lubosz.sarnecki@collabora.com>
// SPDX-License-Identifier: BSL-1.0
#version 460
layout (binding = 0) uniform Transformation {
mat4 mvp;
bool flip_y;
} transformation;
layout (location = 0) in vec3 position;
layout (location = 1) in vec2 uv;
layout (location = 0) out vec2 out_uv;
out gl_PerVertex {
vec4 gl_Position;
};
void main() {
gl_Position = transformation.mvp * vec4 (position, 1.0f);
gl_Position.y = -gl_Position.y;
out_uv = uv;
if (transformation.flip_y) {
out_uv.y = 1.0 - out_uv.y;
}
}