monado/src/xrt/compositor/main/comp_swapchain.c

333 lines
7.7 KiB
C
Raw Normal View History

// Copyright 2019-2020, Collabora, Ltd.
2019-03-18 05:52:32 +00:00
// SPDX-License-Identifier: BSL-1.0
/*!
* @file
* @brief Swapchain code for the main compositor.
* @author Jakob Bornecrantz <jakob@collabora.com>
2020-03-01 10:31:21 +00:00
* @ingroup comp_main
2019-03-18 05:52:32 +00:00
*/
2019-03-21 20:19:52 +00:00
#include "util/u_misc.h"
2019-03-18 05:52:32 +00:00
#include "main/comp_compositor.h"
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
2019-03-18 05:52:32 +00:00
/*
*
* Swapchain member functions.
*
*/
2019-03-18 05:52:32 +00:00
static void
swapchain_destroy(struct xrt_swapchain *xsc)
{
struct comp_swapchain *sc = comp_swapchain(xsc);
COMP_SPEW(sc->c, "DESTROY");
u_threading_stack_push(&sc->c->threading.destroy_swapchains, sc);
2019-03-18 05:52:32 +00:00
}
static xrt_result_t
swapchain_acquire_image(struct xrt_swapchain *xsc, uint32_t *out_index)
2019-03-18 05:52:32 +00:00
{
struct comp_swapchain *sc = comp_swapchain(xsc);
COMP_SPEW(sc->c, "ACQUIRE_IMAGE");
// Returns negative on empty fifo.
int res = u_index_fifo_pop(&sc->fifo, out_index);
if (res >= 0) {
return XRT_SUCCESS;
} else {
return XRT_ERROR_NO_IMAGE_AVAILABLE;
}
2019-03-18 05:52:32 +00:00
}
static xrt_result_t
2019-03-18 05:52:32 +00:00
swapchain_wait_image(struct xrt_swapchain *xsc,
uint64_t timeout,
uint32_t index)
{
struct comp_swapchain *sc = comp_swapchain(xsc);
COMP_SPEW(sc->c, "WAIT_IMAGE");
return XRT_SUCCESS;
2019-03-18 05:52:32 +00:00
}
static xrt_result_t
2019-03-18 05:52:32 +00:00
swapchain_release_image(struct xrt_swapchain *xsc, uint32_t index)
{
struct comp_swapchain *sc = comp_swapchain(xsc);
COMP_SPEW(sc->c, "RELEASE_IMAGE");
int res = u_index_fifo_push(&sc->fifo, index);
if (res >= 0) {
return XRT_SUCCESS;
} else {
// FIFO full
return XRT_ERROR_NO_IMAGE_AVAILABLE;
}
2019-03-18 05:52:32 +00:00
}
/*
*
* Helper functions.
*
*/
static struct comp_swapchain *
alloc_and_set_funcs(struct comp_compositor *c, uint32_t num_images)
2019-03-18 05:52:32 +00:00
{
2019-03-21 20:19:52 +00:00
struct comp_swapchain *sc = U_TYPED_CALLOC(struct comp_swapchain);
2019-03-18 05:52:32 +00:00
sc->base.base.destroy = swapchain_destroy;
sc->base.base.acquire_image = swapchain_acquire_image;
sc->base.base.wait_image = swapchain_wait_image;
sc->base.base.release_image = swapchain_release_image;
sc->base.base.num_images = num_images;
sc->c = c;
// Make sure the fds are invalid.
for (uint32_t i = 0; i < ARRAY_SIZE(sc->base.images); i++) {
sc->base.images[i].fd = -1;
}
return sc;
}
static void
do_post_create_vulkan_setup(struct comp_compositor *c,
struct comp_swapchain *sc)
{
struct xrt_swapchain_create_info *info = &sc->vkic.info;
uint32_t num_images = sc->vkic.num_images;
VkCommandBuffer cmd_buffer;
VkComponentMapping components = {
.r = VK_COMPONENT_SWIZZLE_R,
.g = VK_COMPONENT_SWIZZLE_G,
.b = VK_COMPONENT_SWIZZLE_B,
.a = VK_COMPONENT_SWIZZLE_ONE,
};
for (uint32_t i = 0; i < num_images; i++) {
sc->images[i].views.alpha =
U_TYPED_ARRAY_CALLOC(VkImageView, info->array_size);
sc->images[i].views.no_alpha =
U_TYPED_ARRAY_CALLOC(VkImageView, info->array_size);
sc->images[i].array_size = info->array_size;
vk_create_sampler(&c->vk, &sc->images[i].sampler);
for (uint32_t layer = 0; layer < info->array_size; ++layer) {
VkImageSubresourceRange subresource_range = {
.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
.baseMipLevel = 0,
.levelCount = 1,
.baseArrayLayer = layer,
.layerCount = 1,
};
vk_create_view(&c->vk, sc->vkic.images[i].handle,
(VkFormat)info->format,
subresource_range,
&sc->images[i].views.alpha[layer]);
vk_create_view_swizzle(
&c->vk, sc->vkic.images[i].handle,
(VkFormat)info->format, subresource_range,
components, &sc->images[i].views.no_alpha[layer]);
}
2019-03-18 05:52:32 +00:00
}
// Prime the fifo
for (uint32_t i = 0; i < num_images; i++) {
u_index_fifo_push(&sc->fifo, i);
}
2019-03-18 05:52:32 +00:00
/*
*
* Transition image.
*
*/
vk_init_cmd_buffer(&c->vk, &cmd_buffer);
VkImageSubresourceRange subresource_range = {
.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
.baseMipLevel = 0,
.levelCount = 1,
.baseArrayLayer = 0,
.layerCount = info->array_size,
};
2019-03-18 05:52:32 +00:00
for (uint32_t i = 0; i < num_images; i++) {
vk_set_image_layout(
&c->vk, cmd_buffer, sc->vkic.images[i].handle, 0,
VK_ACCESS_SHADER_READ_BIT, VK_IMAGE_LAYOUT_UNDEFINED,
VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
subresource_range);
2019-03-18 05:52:32 +00:00
}
vk_submit_cmd_buffer(&c->vk, cmd_buffer);
}
static void
clean_image_views(struct vk_bundle *vk,
size_t array_size,
VkImageView **views_ptr)
{
VkImageView *views = *views_ptr;
if (views == NULL) {
return;
}
for (uint32_t i = 0; i < array_size; ++i) {
if (views[i] == VK_NULL_HANDLE) {
continue;
}
vk->vkDestroyImageView(vk->device, views[i], NULL);
views[i] = VK_NULL_HANDLE;
}
free(views);
array_size = 0;
*views_ptr = NULL;
}
/*!
* Free and destroy any initialized fields on the given image, safe to pass in
* images that has one or all fields set to NULL.
*/
static void
image_cleanup(struct vk_bundle *vk, struct comp_swapchain_image *image)
2019-03-18 05:52:32 +00:00
{
/*
* This makes sure that any pending command buffer has completed and all
* resources referred by it can now be manipulated. This make sure that
* validation doesn't complain. This is done during image destruction so
* isn't time critical.
*/
vk->vkDeviceWaitIdle(vk->device);
clean_image_views(vk, image->array_size, &image->views.alpha);
clean_image_views(vk, image->array_size, &image->views.no_alpha);
2019-03-18 05:52:32 +00:00
2019-10-07 22:27:09 +00:00
if (image->sampler != VK_NULL_HANDLE) {
2019-03-18 05:52:32 +00:00
vk->vkDestroySampler(vk->device, image->sampler, NULL);
2019-10-07 22:27:09 +00:00
image->sampler = VK_NULL_HANDLE;
2019-03-18 05:52:32 +00:00
}
}
/*
*
* Exported functions.
*
*/
xrt_result_t
comp_swapchain_create(struct xrt_compositor *xc,
const struct xrt_swapchain_create_info *info,
struct xrt_swapchain **out_xsc)
{
struct comp_compositor *c = comp_compositor(xc);
uint32_t num_images = 3;
VkResult ret;
if ((info->create & XRT_SWAPCHAIN_CREATE_STATIC_IMAGE) != 0) {
num_images = 1;
}
struct comp_swapchain *sc = alloc_and_set_funcs(c, num_images);
COMP_DEBUG(c, "CREATE %p %dx%d", (void *)sc, info->width, info->height);
// Use the image helper to allocate the images.
ret = vk_ic_allocate(&c->vk, info, num_images, &sc->vkic);
if (ret != VK_SUCCESS) {
free(sc);
return XRT_ERROR_VULKAN;
}
#ifdef XRT_OS_LINUX
int fds[ARRAY_SIZE(sc->vkic.images)];
vk_ic_get_fds(&c->vk, &sc->vkic, ARRAY_SIZE(fds), fds);
for (uint32_t i = 0; i < sc->vkic.num_images; i++) {
sc->base.images[i].fd = fds[i];
sc->base.images[i].size = sc->vkic.images[i].size;
}
#else
#error "OS not supported"
#endif
do_post_create_vulkan_setup(c, sc);
*out_xsc = &sc->base.base;
return XRT_SUCCESS;
}
xrt_result_t
comp_swapchain_import(struct xrt_compositor *xc,
const struct xrt_swapchain_create_info *info,
struct xrt_image_native *native_images,
uint32_t num_images,
struct xrt_swapchain **out_xsc)
{
struct comp_compositor *c = comp_compositor(xc);
VkResult ret;
struct comp_swapchain *sc = alloc_and_set_funcs(c, num_images);
COMP_DEBUG(c, "CREATE FROM NATIVE %p %dx%d", (void *)sc, info->width,
info->height);
// Use the image helper to get the images.
ret = vk_ic_from_natives(&c->vk, info, native_images, num_images,
&sc->vkic);
if (ret != VK_SUCCESS) {
return XRT_ERROR_VULKAN;
}
do_post_create_vulkan_setup(c, sc);
*out_xsc = &sc->base.base;
return XRT_SUCCESS;
}
void
comp_swapchain_really_destroy(struct comp_swapchain *sc)
{
struct vk_bundle *vk = &sc->c->vk;
COMP_SPEW(sc->c, "REALLY DESTROY");
for (uint32_t i = 0; i < sc->base.base.num_images; i++) {
image_cleanup(vk, &sc->images[i]);
}
for (uint32_t i = 0; i < sc->base.base.num_images; i++) {
if (sc->base.images[i].fd < 0) {
continue;
}
close(sc->base.images[i].fd);
sc->base.images[i].fd = -1;
}
vk_ic_destroy(vk, &sc->vkic);
free(sc);
}