monado/src/xrt/compositor/main/comp_compositor.c

983 lines
28 KiB
C
Raw Normal View History

2019-03-18 05:52:32 +00:00
// Copyright 2019, Collabora, Ltd.
// SPDX-License-Identifier: BSL-1.0
/*!
* @file
* @brief Main compositor written using Vulkan implementation.
* @author Jakob Bornecrantz <jakob@collabora.com>
* @author Lubosz Sarnecki <lubosz.sarnecki@collabora.com>
* @author Ryan Pavlik <ryan.pavlik@collabora.com>
2020-03-01 10:31:21 +00:00
* @ingroup comp_main
*
*
* begin_frame and end_frame delimit the application's work on graphics for a
* single frame. end_frame updates our estimate of the current estimated app
* graphics duration, as well as the "swap interval" for scheduling the
* application.
*
* We have some known overhead work required to composite a frame: eventually
* this may be measured as well. Overhead plus the estimated app render duration
* is compared to the frame duration: if it's longer, then we go to a "swap
* interval" of 2.
*
* wait_frame must be the one to produce the next predicted display time,
* because we cannot distinguish two sequential wait_frame calls (an app
* skipping a frame) from an OS scheduling blip causing the second wait_frame to
* happen before the first begin_frame actually gets executed. It cannot use the
* last display time in this computation for this reason. (Except perhaps to
* align the period at a sub-frame level? e.g. should be a multiple of the frame
* duration after the last displayed time).
*
* wait_frame should not actually produce the predicted display time until it's
* done waiting: it should wake up once a frame and see what the current swap
* interval suggests: this handles the case where end_frame changes the swap
* interval from 2 to 1 during a wait_frame call. (That is, we should wait until
* whichever is closer of the next vsync or the time we currently predict we
* should release the app.)
*
* Sleeping can be a bit hairy: in general right now we'll use a combination of
* operating system sleeps and busy-waits (for fine-grained waiting). Some
* platforms provide vsync-related sync primitives that may get us closer to our
* desired time. This is also convenient for the "wait until next frame"
* behavior.
2019-03-18 05:52:32 +00:00
*/
#include "xrt/xrt_gfx_fd.h"
2019-03-18 05:52:32 +00:00
#include "os/os_time.h"
#include "util/u_var.h"
2019-03-21 20:19:52 +00:00
#include "util/u_misc.h"
#include "util/u_time.h"
#include "util/u_debug.h"
2019-03-18 05:52:32 +00:00
#include "main/comp_compositor.h"
#include <math.h>
#include <stdio.h>
#include <assert.h>
#include <unistd.h>
#include <stdarg.h>
#include <stdlib.h>
#include <string.h>
2020-04-09 13:49:37 +00:00
/*!
*/
2019-03-18 05:52:32 +00:00
static void
compositor_destroy(struct xrt_compositor *xc)
{
struct comp_compositor *c = comp_compositor(xc);
struct vk_bundle *vk = &c->vk;
COMP_DEBUG(c, "DESTROY");
// Make sure we don't have anything to destroy.
comp_compositor_garbage_collect(c);
2019-03-18 05:52:32 +00:00
if (c->r) {
comp_renderer_destroy(c->r);
c->r = NULL;
}
if (c->window != NULL) {
vk_swapchain_cleanup(&c->window->swapchain);
2019-03-18 05:52:32 +00:00
c->window->destroy(c->window);
c->window = NULL;
}
if (vk->cmd_pool != VK_NULL_HANDLE) {
vk->vkDestroyCommandPool(vk->device, vk->cmd_pool, NULL);
vk->cmd_pool = VK_NULL_HANDLE;
}
if (vk->device != VK_NULL_HANDLE) {
vk->vkDestroyDevice(vk->device, NULL);
vk->device = VK_NULL_HANDLE;
}
vk_destroy_validation_callback(vk);
if (vk->instance != VK_NULL_HANDLE) {
vk->vkDestroyInstance(vk->instance, NULL);
vk->instance = VK_NULL_HANDLE;
}
if (c->compositor_frame_times.debug_var) {
free(c->compositor_frame_times.debug_var);
}
u_threading_stack_fini(&c->threading.destroy_swapchains);
2019-03-18 05:52:32 +00:00
free(c);
}
static void
compositor_begin_session(struct xrt_compositor *xc, enum xrt_view_type type)
{
struct comp_compositor *c = comp_compositor(xc);
COMP_DEBUG(c, "BEGIN_SESSION");
}
static void
compositor_end_session(struct xrt_compositor *xc)
{
struct comp_compositor *c = comp_compositor(xc);
COMP_DEBUG(c, "END_SESSION");
}
/*!
* @brief Utility for waiting (for rendering purposes) until the next vsync or a
* specified time point, whichever comes first.
*
* Only for rendering - this will busy-wait if needed.
*
* @return true if we waited until the time indicated
*
* @todo In the future, this may differ between platforms since some have ways
* to directly wait on a vsync.
*/
static bool
compositor_wait_vsync_or_time(struct comp_compositor *c, int64_t wake_up_time)
{
int64_t now_ns = os_monotonic_get_ns();
/*!
* @todo this is not accurate, but it serves the purpose of not letting
* us sleep longer than the next vsync usually
*/
int64_t next_vsync = now_ns + c->settings.nominal_frame_interval_ns / 2;
bool ret = true;
// Sleep until the sooner of vsync or our deadline.
if (next_vsync < wake_up_time) {
ret = false;
wake_up_time = next_vsync;
}
int64_t wait_duration = wake_up_time - now_ns;
if (wait_duration <= 0) {
// Don't wait at all
return ret;
}
if (wait_duration > 1000000) {
os_nanosleep(wait_duration - (wait_duration % 1000000));
}
// Busy-wait for fine-grained delays.
while (now_ns < wake_up_time) {
now_ns = os_monotonic_get_ns();
}
return ret;
}
2019-03-18 05:52:32 +00:00
static void
compositor_wait_frame(struct xrt_compositor *xc,
uint64_t *predicted_display_time,
uint64_t *predicted_display_period)
2019-03-18 05:52:32 +00:00
{
struct comp_compositor *c = comp_compositor(xc);
COMP_SPEW(c, "WAIT_FRAME");
2019-08-31 12:24:40 +00:00
// A little bit easier to read.
int64_t interval_ns = (int64_t)c->settings.nominal_frame_interval_ns;
int64_t now_ns = os_monotonic_get_ns();
if (c->last_next_display_time == 0) {
// First frame, we'll just assume we will display immediately
*predicted_display_period = interval_ns;
c->last_next_display_time = now_ns + interval_ns;
*predicted_display_time = c->last_next_display_time;
return;
2019-08-31 12:24:40 +00:00
}
// First estimate of next display time.
while (1) {
int64_t render_time_ns =
c->expected_app_duration_ns + c->frame_overhead_ns;
int64_t swap_interval =
ceil((float)render_time_ns / interval_ns);
int64_t render_interval_ns = swap_interval * interval_ns;
int64_t next_display_time =
c->last_next_display_time + render_interval_ns;
/*!
* @todo adjust next_display_time to be a multiple of
* interval_ns from c->last_frame_time_ns
*/
while ((next_display_time - render_time_ns) < now_ns) {
// we can't unblock in the past
next_display_time += render_interval_ns;
}
if (compositor_wait_vsync_or_time(
c, (next_display_time - render_time_ns))) {
// True return val means we actually waited for the
// deadline.
*predicted_display_period =
next_display_time - c->last_next_display_time;
*predicted_display_time = next_display_time;
c->last_next_display_time = next_display_time;
return;
}
2019-08-31 12:24:40 +00:00
}
2019-03-18 05:52:32 +00:00
}
static void
compositor_begin_frame(struct xrt_compositor *xc)
{
struct comp_compositor *c = comp_compositor(xc);
COMP_SPEW(c, "BEGIN_FRAME");
c->app_profiling.last_begin = os_monotonic_get_ns();
2019-03-18 05:52:32 +00:00
}
static void
compositor_discard_frame(struct xrt_compositor *xc)
{
struct comp_compositor *c = comp_compositor(xc);
COMP_SPEW(c, "DISCARD_FRAME");
}
static void
compositor_add_frame_timing(struct comp_compositor *c)
{
int last_index = c->compositor_frame_times.index;
c->compositor_frame_times.index++;
c->compositor_frame_times.index %= NUM_FRAME_TIMES;
// update fps only once every FPS_NUM_TIMINGS
if (c->compositor_frame_times.index == 0) {
float total_s = 0;
// frame *timings* are durations between *times*
int NUM_FRAME_TIMINGS = NUM_FRAME_TIMES - 1;
for (int i = 0; i < NUM_FRAME_TIMINGS; i++) {
uint64_t frametime_ns =
c->compositor_frame_times.times_ns[i + 1] -
c->compositor_frame_times.times_ns[i];
float frametime_s =
frametime_ns * 1. / 1000. * 1. / 1000. * 1. / 1000.;
total_s += frametime_s;
}
float avg_frametime_s = total_s / ((float)NUM_FRAME_TIMINGS);
c->compositor_frame_times.fps = 1. / avg_frametime_s;
}
c->compositor_frame_times.times_ns[c->compositor_frame_times.index] =
os_monotonic_get_ns();
uint64_t diff = c->compositor_frame_times
.times_ns[c->compositor_frame_times.index] -
c->compositor_frame_times.times_ns[last_index];
c->compositor_frame_times.timings_ms[c->compositor_frame_times.index] =
(float)diff * 1. / 1000. * 1. / 1000.;
}
2019-03-18 05:52:32 +00:00
static void
compositor_layer_begin(struct xrt_compositor *xc,
enum xrt_blend_mode env_blend_mode)
2019-03-18 05:52:32 +00:00
{
struct comp_compositor *c = comp_compositor(xc);
// Always zero for now.
uint32_t slot_id = 0;
c->slots[slot_id].env_blend_mode = env_blend_mode;
}
static void
compositor_layer_stereo_projection(struct xrt_compositor *xc,
uint64_t timestamp,
struct xrt_device *xdev,
enum xrt_input_name name,
enum xrt_layer_composition_flags layer_flags,
struct xrt_swapchain *l_sc,
uint32_t l_image_index,
struct xrt_rect *l_rect,
uint32_t l_array_index,
struct xrt_fov *l_fov,
struct xrt_pose *l_pose,
struct xrt_swapchain *r_sc,
uint32_t r_image_index,
struct xrt_rect *r_rect,
uint32_t r_array_index,
struct xrt_fov *r_fov,
struct xrt_pose *r_pose,
bool flip_y)
{
struct comp_compositor *c = comp_compositor(xc);
// Always zero for now.
uint32_t slot_id = 0;
uint32_t layer_id = 0;
struct comp_layer *layer = &c->slots[slot_id].layers[layer_id];
layer->stereo.l.sc = comp_swapchain(l_sc);
layer->stereo.l.image_index = l_image_index;
layer->stereo.l.array_index = l_array_index;
layer->stereo.r.sc = comp_swapchain(r_sc);
layer->stereo.r.image_index = r_image_index;
layer->stereo.r.array_index = r_array_index;
}
static void
compositor_layer_quad(struct xrt_compositor *xc,
uint64_t timestamp,
struct xrt_device *xdev,
enum xrt_input_name name,
enum xrt_layer_composition_flags layer_flags,
enum xrt_layer_eye_visibility visibility,
struct xrt_swapchain *sc,
uint32_t image_index,
struct xrt_rect *rect,
uint32_t array_index,
struct xrt_pose *pose,
struct xrt_vec2 *size,
bool flip_y)
{
// Noop!
}
static void
compositor_layer_commit(struct xrt_compositor *xc)
{
struct comp_compositor *c = comp_compositor(xc);
COMP_SPEW(c, "LAYER_COMMIT");
2019-03-18 05:52:32 +00:00
struct comp_swapchain_image *right;
struct comp_swapchain_image *left;
// Always zero for now.
uint32_t slot_id = 0;
uint32_t layer_id = 0;
struct comp_layer *layer = &c->slots[slot_id].layers[layer_id];
struct comp_layer_stereo *stereo = &layer->stereo;
left = &stereo->l.sc->images[stereo->l.image_index];
right = &stereo->l.sc->images[stereo->r.image_index];
comp_renderer_destroy_layers(c->r);
comp_renderer_allocate_layers(c->r, 1);
comp_renderer_set_projection_layer(c->r, left, right, layer->flip_y, 0);
comp_renderer_draw(c->r);
compositor_add_frame_timing(c);
// Record the time of this frame.
c->last_frame_time_ns = os_monotonic_get_ns();
c->app_profiling.last_end = c->last_frame_time_ns;
//! @todo do a time-weighted average or something.
c->expected_app_duration_ns =
c->app_profiling.last_end - c->app_profiling.last_begin;
// Now is a good point to garbage collect.
comp_compositor_garbage_collect(c);
2019-03-18 05:52:32 +00:00
}
/*
*
* Vulkan functions.
*
*/
#define GET_DEV_PROC(c, name) \
(PFN_##name) c->vk.vkGetDeviceProcAddr(c->vk.device, #name);
#define GET_INS_PROC(c, name) \
(PFN_##name) c->vk.vkGetInstanceProcAddr(c->vk.instance, #name);
#define GET_DEV_PROC(c, name) \
(PFN_##name) c->vk.vkGetDeviceProcAddr(c->vk.device, #name);
VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL
vkGetInstanceProcAddr(VkInstance instance, const char *pName);
static VkResult
find_get_instance_proc_addr(struct comp_compositor *c)
{
//! @todo Do any library loading here.
return vk_get_loader_functions(&c->vk, vkGetInstanceProcAddr);
2019-03-18 05:52:32 +00:00
}
#ifdef XRT_ENABLE_VK_VALIDATION
#define COMPOSITOR_DEBUG_VULKAN_EXTENSIONS VK_EXT_DEBUG_REPORT_EXTENSION_NAME,
#else
#define COMPOSITOR_DEBUG_VULKAN_EXTENSIONS
#endif
#define COMPOSITOR_COMMON_VULKAN_EXTENSIONS \
COMPOSITOR_DEBUG_VULKAN_EXTENSIONS \
VK_KHR_SURFACE_EXTENSION_NAME, \
VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME, \
VK_KHR_EXTERNAL_MEMORY_CAPABILITIES_EXTENSION_NAME, \
VK_KHR_EXTERNAL_FENCE_CAPABILITIES_EXTENSION_NAME, \
VK_KHR_EXTERNAL_SEMAPHORE_CAPABILITIES_EXTENSION_NAME
static const char *instance_extensions_none[] = {
COMPOSITOR_COMMON_VULKAN_EXTENSIONS};
#ifdef VK_USE_PLATFORM_XCB_KHR
static const char *instance_extensions_xcb[] = {
COMPOSITOR_COMMON_VULKAN_EXTENSIONS,
VK_KHR_XCB_SURFACE_EXTENSION_NAME,
};
#endif
#ifdef VK_USE_PLATFORM_WAYLAND_KHR
static const char *instance_extensions_wayland[] = {
COMPOSITOR_COMMON_VULKAN_EXTENSIONS,
VK_KHR_WAYLAND_SURFACE_EXTENSION_NAME,
};
#endif
#ifdef VK_USE_PLATFORM_XLIB_XRANDR_EXT
static const char *instance_extensions_direct_mode[] = {
COMPOSITOR_COMMON_VULKAN_EXTENSIONS,
VK_KHR_DISPLAY_EXTENSION_NAME,
VK_EXT_DIRECT_MODE_DISPLAY_EXTENSION_NAME,
VK_EXT_ACQUIRE_XLIB_DISPLAY_EXTENSION_NAME,
};
#endif
static VkResult
select_instances_extensions(struct comp_compositor *c,
const char ***out_exts,
uint32_t *out_num)
{
switch (c->settings.window_type) {
case WINDOW_NONE:
*out_exts = instance_extensions_none;
*out_num = ARRAY_SIZE(instance_extensions_none);
break;
#ifdef VK_USE_PLATFORM_WAYLAND_KHR
case WINDOW_WAYLAND:
*out_exts = instance_extensions_wayland;
*out_num = ARRAY_SIZE(instance_extensions_wayland);
break;
#endif
#ifdef VK_USE_PLATFORM_XCB_KHR
case WINDOW_XCB:
*out_exts = instance_extensions_xcb;
*out_num = ARRAY_SIZE(instance_extensions_xcb);
break;
#endif
2019-03-18 05:52:32 +00:00
#ifdef VK_USE_PLATFORM_XLIB_XRANDR_EXT
2019-03-31 21:37:34 +00:00
case WINDOW_DIRECT_RANDR:
case WINDOW_DIRECT_NVIDIA:
2019-03-18 05:52:32 +00:00
*out_exts = instance_extensions_direct_mode;
*out_num = ARRAY_SIZE(instance_extensions_direct_mode);
break;
#endif
default: return VK_ERROR_INITIALIZATION_FAILED;
}
return VK_SUCCESS;
}
static VkResult
create_instance(struct comp_compositor *c)
{
const char **instance_extensions;
uint32_t num_extensions;
VkResult ret;
VkApplicationInfo app_info = {
.sType = VK_STRUCTURE_TYPE_APPLICATION_INFO,
.pApplicationName = "Collabora Compositor",
.pEngineName = "Monado",
.apiVersion = VK_MAKE_VERSION(1, 0, 2),
};
ret = select_instances_extensions(c, &instance_extensions,
&num_extensions);
if (ret != VK_SUCCESS) {
COMP_ERROR(c, "Failed to select instance extensions: %s",
vk_result_string(ret));
return ret;
}
VkInstanceCreateInfo instance_info = {
.sType = VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO,
.pApplicationInfo = &app_info,
.enabledExtensionCount = num_extensions,
.ppEnabledExtensionNames = instance_extensions,
};
2019-03-18 05:52:32 +00:00
#ifdef XRT_ENABLE_VK_VALIDATION
const char *instance_layers[] = {
"VK_LAYER_LUNARG_standard_validation",
2019-03-18 05:52:32 +00:00
};
if (c->settings.validate_vulkan) {
instance_info.enabledLayerCount = ARRAY_SIZE(instance_layers);
instance_info.ppEnabledLayerNames = instance_layers;
}
#endif
2019-03-18 05:52:32 +00:00
ret = c->vk.vkCreateInstance(&instance_info, NULL, &c->vk.instance);
if (ret != VK_SUCCESS) {
COMP_ERROR(c, "vkCreateInstance: %s\n", vk_result_string(ret));
COMP_ERROR(c, "Failed to create Vulkan instance");
return ret;
}
ret = vk_get_instance_functions(&c->vk);
if (ret != VK_SUCCESS) {
COMP_ERROR(c, "Failed to get Vulkan instance functions: %s",
vk_result_string(ret));
return ret;
}
#ifdef XRT_ENABLE_VK_VALIDATION
if (c->settings.validate_vulkan)
vk_init_validation_callback(&c->vk);
2019-03-18 05:52:32 +00:00
#endif
return ret;
}
static bool
compositor_init_vulkan(struct comp_compositor *c)
{
2019-03-31 21:37:34 +00:00
2019-03-18 05:52:32 +00:00
VkResult ret;
c->vk.print = c->settings.print_debug;
2019-03-18 05:52:32 +00:00
ret = find_get_instance_proc_addr(c);
if (ret != VK_SUCCESS) {
return false;
}
ret = create_instance(c);
if (ret != VK_SUCCESS) {
return false;
}
ret = vk_create_device(&c->vk, c->settings.gpu_index);
2019-03-18 05:52:32 +00:00
if (ret != VK_SUCCESS) {
return false;
}
ret = vk_init_cmd_pool(&c->vk);
2019-08-16 21:59:06 +00:00
return ret == VK_SUCCESS;
2019-03-18 05:52:32 +00:00
}
2019-03-18 05:52:32 +00:00
/*
*
* Other functions.
*
*/
void
comp_compositor_print(struct comp_compositor *c,
const char *func,
const char *fmt,
...)
{
fprintf(stderr, "%s - ", func);
va_list args;
va_start(args, fmt);
vfprintf(stderr, fmt, args);
va_end(args);
fprintf(stderr, "\n");
}
2020-04-16 17:43:24 +00:00
#ifdef VK_USE_PLATFORM_XLIB_XRANDR_EXT
static bool
_match_wl_entry(const char *wl_entry, VkDisplayPropertiesKHR *disp)
{
unsigned long wl_entry_length = strlen(wl_entry);
unsigned long disp_entry_length = strlen(disp->displayName);
if (disp_entry_length < wl_entry_length)
return false;
// we have a match with this whitelist entry.
if (strncmp(wl_entry, disp->displayName, wl_entry_length) == 0)
return true;
return false;
}
/*
* our physical device is an nvidia card, we can potentially select
* nvidia-specific direct mode.
*
* we need to also check if we are confident that we can create a direct mode
* display, if not we need to abandon the attempt here, and allow desktop-window
* fallback to occur.
*/
static bool
_test_for_nvidia(struct comp_compositor *c, struct vk_bundle *vk)
{
VkPhysicalDeviceProperties physical_device_properties;
vk->vkGetPhysicalDeviceProperties(vk->physical_device,
&physical_device_properties);
if (physical_device_properties.vendorID != 0x10DE)
return false;
// get a list of attached displays
uint32_t display_count;
if (vk->vkGetPhysicalDeviceDisplayPropertiesKHR(
vk->physical_device, &display_count, NULL) != VK_SUCCESS) {
COMP_ERROR(c, "Failed to get vulkan display count");
return false;
}
VkDisplayPropertiesKHR *display_props =
U_TYPED_ARRAY_CALLOC(VkDisplayPropertiesKHR, display_count);
if (display_props && vk->vkGetPhysicalDeviceDisplayPropertiesKHR(
vk->physical_device, &display_count,
display_props) != VK_SUCCESS) {
COMP_ERROR(c, "Failed to get display properties");
free(display_props);
return false;
}
for (uint32_t i = 0; i < display_count; i++) {
VkDisplayPropertiesKHR *disp = display_props + i;
// check this display against our whitelist
for (uint32_t j = 0; j < ARRAY_SIZE(NV_DIRECT_WHITELIST); j++) {
if (_match_wl_entry(NV_DIRECT_WHITELIST[j], disp)) {
free(display_props);
return true;
}
}
}
free(display_props);
return false;
}
#endif // VK_USE_PLATFORM_XLIB_XRANDR_EXT
2019-03-31 21:37:34 +00:00
static bool
compositor_check_vulkan_caps(struct comp_compositor *c)
{
VkResult ret;
// this is duplicative, but seems to be the easiest way to
// 'pre-check' capabilities when window creation precedes vulkan
// instance creation. we also need to load the VK_KHR_DISPLAY
// extension.
2019-03-31 21:37:34 +00:00
if (c->settings.window_type != WINDOW_AUTO) {
COMP_DEBUG(c, "Skipping NVIDIA detection, window type forced.");
return true;
}
2019-08-16 21:59:06 +00:00
COMP_DEBUG(c, "Checking for NVIDIA vulkan driver.");
2019-03-31 21:37:34 +00:00
struct vk_bundle temp_vk = {0};
2019-03-31 21:37:34 +00:00
ret = vk_get_loader_functions(&temp_vk, vkGetInstanceProcAddr);
if (ret != VK_SUCCESS) {
return false;
}
const char *extension_names[] = {
VK_KHR_SURFACE_EXTENSION_NAME,
VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME,
VK_KHR_EXTERNAL_MEMORY_CAPABILITIES_EXTENSION_NAME,
VK_KHR_EXTERNAL_FENCE_CAPABILITIES_EXTENSION_NAME,
VK_KHR_EXTERNAL_SEMAPHORE_CAPABILITIES_EXTENSION_NAME,
VK_KHR_DISPLAY_EXTENSION_NAME,
};
VkInstanceCreateInfo instance_create_info = {
.sType = VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO,
.enabledExtensionCount = ARRAY_SIZE(extension_names),
.ppEnabledExtensionNames = extension_names,
};
ret = temp_vk.vkCreateInstance(&instance_create_info, NULL,
&(temp_vk.instance));
if (ret != VK_SUCCESS) {
COMP_ERROR(c, "Failed to create VkInstance: %s",
vk_result_string(ret));
return false;
}
ret = vk_get_instance_functions(&temp_vk);
if (ret != VK_SUCCESS) {
COMP_ERROR(c, "Failed to get Vulkan instance functions: %s",
vk_result_string(ret));
return false;
}
// follow same device selection logic as subsequent calls
ret = vk_create_device(&temp_vk, c->settings.gpu_index);
2019-03-31 21:37:34 +00:00
if (ret != VK_SUCCESS) {
COMP_ERROR(c, "Failed to create VkDevice: %s",
vk_result_string(ret));
return false;
}
#ifdef VK_USE_PLATFORM_XLIB_XRANDR_EXT
2020-04-16 17:43:24 +00:00
if (_test_for_nvidia(c, &temp_vk)) {
2019-03-31 21:37:34 +00:00
c->settings.window_type = WINDOW_DIRECT_NVIDIA;
COMP_DEBUG(c, "Selecting direct NVIDIA window type!");
}
2020-04-16 17:43:24 +00:00
#endif // VK_USE_PLATFORM_XLIB_XRANDR_EXT
2019-03-31 21:37:34 +00:00
temp_vk.vkDestroyDevice(temp_vk.device, NULL);
temp_vk.vkDestroyInstance(temp_vk.instance, NULL);
return true;
}
2019-03-18 05:52:32 +00:00
static bool
compositor_try_window(struct comp_compositor *c, struct comp_window *window)
{
if (window == NULL) {
return false;
}
if (!window->init(window)) {
window->destroy(window);
return false;
}
2019-08-16 21:59:06 +00:00
COMP_DEBUG(c, "Window backend %s initialized!", window->name);
c->window = window;
return true;
2019-03-18 05:52:32 +00:00
}
static bool
2019-03-31 21:37:34 +00:00
compositor_init_window_pre_vulkan(struct comp_compositor *c)
2019-03-18 05:52:32 +00:00
{
// Setup the initial width from the settings.
c->current.width = c->settings.width;
c->current.height = c->settings.height;
2019-03-31 21:37:34 +00:00
// Nothing to do for nvidia.
if (c->settings.window_type == WINDOW_DIRECT_NVIDIA) {
return true;
}
2019-03-18 05:52:32 +00:00
switch (c->settings.window_type) {
case WINDOW_AUTO:
#ifdef VK_USE_PLATFORM_WAYLAND_KHR
if (compositor_try_window(c, comp_window_wayland_create(c))) {
c->settings.window_type = WINDOW_WAYLAND;
return true;
}
#endif
2019-03-18 05:52:32 +00:00
#ifdef VK_USE_PLATFORM_XLIB_XRANDR_EXT
if (compositor_try_window(c,
comp_window_direct_randr_create(c))) {
2019-03-31 21:37:34 +00:00
c->settings.window_type = WINDOW_DIRECT_RANDR;
2019-03-18 05:52:32 +00:00
return true;
}
#endif
#ifdef VK_USE_PLATFORM_XCB_KHR
if (compositor_try_window(c, comp_window_xcb_create(c))) {
c->settings.window_type = WINDOW_XCB;
return true;
}
#endif
COMP_ERROR(c, "Failed to auto detect window support!");
break;
case WINDOW_XCB:
#ifdef VK_USE_PLATFORM_XCB_KHR
compositor_try_window(c, comp_window_xcb_create(c));
#else
COMP_ERROR(c, "XCB support not compiled in!");
#endif
break;
case WINDOW_WAYLAND:
#ifdef VK_USE_PLATFORM_WAYLAND_KHR
compositor_try_window(c, comp_window_wayland_create(c));
#else
COMP_ERROR(c, "Wayland support not compiled in!");
#endif
break;
2019-03-31 21:37:34 +00:00
case WINDOW_DIRECT_RANDR:
2019-03-18 05:52:32 +00:00
#ifdef VK_USE_PLATFORM_XLIB_XRANDR_EXT
compositor_try_window(c, comp_window_direct_randr_create(c));
2019-03-18 05:52:32 +00:00
#else
COMP_ERROR(c, "Direct mode support not compiled in!");
#endif
break;
default: COMP_ERROR(c, "Unknown window type!"); break;
}
// Failed to create?
2019-08-16 21:59:06 +00:00
return c->window != NULL;
2019-03-18 05:52:32 +00:00
}
2019-03-31 21:37:34 +00:00
static bool
compositor_init_window_post_vulkan(struct comp_compositor *c)
{
if (c->settings.window_type != WINDOW_DIRECT_NVIDIA) {
return true;
}
#ifdef VK_USE_PLATFORM_XLIB_XRANDR_EXT
return compositor_try_window(c, comp_window_direct_nvidia_create(c));
#else
assert(false &&
"NVIDIA direct mode depends on the xlib/xrandr direct mode.");
return false;
#endif
2019-03-31 21:37:34 +00:00
}
2019-03-18 05:52:32 +00:00
static void
_sc_dimension_cb(uint32_t width, uint32_t height, void *ptr)
{
struct comp_compositor *c = (struct comp_compositor *)ptr;
COMP_DEBUG(c, "_sc_dimension_cb %dx%d", width, height);
c->current.width = width;
c->current.height = height;
}
static bool
compositor_init_swapchain(struct comp_compositor *c)
{
2019-03-31 21:37:34 +00:00
//! @todo Make c->window->init_swachain call vk_swapchain_init
//! and give
//! _sc_dimension_cb to window or just have it call a
//! function?
2019-03-18 05:52:32 +00:00
vk_swapchain_init(&c->window->swapchain, &c->vk, _sc_dimension_cb,
(void *)c);
if (!c->window->init_swapchain(c->window, c->current.width,
c->current.height)) {
COMP_ERROR(c, "Window init_swapchain failed!");
goto err_destroy;
}
return true;
// Error path.
err_destroy:
c->window->destroy(c->window);
c->window = NULL;
return false;
}
static bool
compositor_init_renderer(struct comp_compositor *c)
{
c->r = comp_renderer_create(c);
2019-08-16 21:59:06 +00:00
return c->r != NULL;
2019-03-18 05:52:32 +00:00
}
struct xrt_compositor_fd *
xrt_gfx_provider_create_fd(struct xrt_device *xdev, bool flip_y)
2019-03-18 05:52:32 +00:00
{
2019-03-21 20:19:52 +00:00
struct comp_compositor *c = U_TYPED_CALLOC(struct comp_compositor);
2019-03-18 05:52:32 +00:00
c->base.base.create_swapchain = comp_swapchain_create;
c->base.base.begin_session = compositor_begin_session;
c->base.base.end_session = compositor_end_session;
c->base.base.wait_frame = compositor_wait_frame;
c->base.base.begin_frame = compositor_begin_frame;
c->base.base.discard_frame = compositor_discard_frame;
c->base.base.layer_begin = compositor_layer_begin;
c->base.base.layer_stereo_projection =
compositor_layer_stereo_projection;
c->base.base.layer_quad = compositor_layer_quad;
c->base.base.layer_commit = compositor_layer_commit;
2019-03-18 05:52:32 +00:00
c->base.base.destroy = compositor_destroy;
c->xdev = xdev;
u_threading_stack_init(&c->threading.destroy_swapchains);
2019-03-18 05:52:32 +00:00
COMP_DEBUG(c, "Doing init %p", (void *)c);
// Init the settings to default.
comp_settings_init(&c->settings, xdev);
c->settings.flip_y = flip_y;
c->last_frame_time_ns = os_monotonic_get_ns();
c->frame_overhead_ns = 2000000;
//! @todo set this to an estimate that's better than 6ms
c->expected_app_duration_ns = 6000000;
2019-03-18 05:52:32 +00:00
2019-03-31 21:37:34 +00:00
2019-03-18 05:52:32 +00:00
// Need to select window backend before creating Vulkan, then
2019-03-31 21:37:34 +00:00
// swapchain will initialize the window fully and the swapchain,
2020-04-09 13:49:37 +00:00
// and finally the renderer is created which renders to
2019-03-31 21:37:34 +00:00
// window/swapchain.
2019-03-18 05:52:32 +00:00
// clang-format off
2019-03-31 21:37:34 +00:00
if (!compositor_check_vulkan_caps(c) ||
!compositor_init_window_pre_vulkan(c) ||
2019-03-18 05:52:32 +00:00
!compositor_init_vulkan(c) ||
2019-03-31 21:37:34 +00:00
!compositor_init_window_post_vulkan(c) ||
2019-03-18 05:52:32 +00:00
!compositor_init_swapchain(c) ||
!compositor_init_renderer(c)) {
COMP_DEBUG(c, "Failed to init compositor %p", (void *)c);
c->base.base.destroy(&c->base.base);
return NULL;
}
// clang-format on
COMP_DEBUG(c, "Done %p", (void *)c);
/*!
* @todo Support more like, depth/float formats etc,
* remember to update the GL client as well.
*/
// These are the available formats we will expose to our clients.
c->base.base.formats[0] = VK_FORMAT_B8G8R8A8_SRGB;
c->base.base.formats[1] = VK_FORMAT_R8G8B8A8_SRGB;
c->base.base.formats[2] = VK_FORMAT_B8G8R8A8_UNORM;
c->base.base.formats[3] = VK_FORMAT_R8G8B8A8_UNORM;
c->base.base.num_formats = 4;
u_var_add_root(c, "Compositor", true);
u_var_add_ro_f32(c, &c->compositor_frame_times.fps, "FPS (Compositor)");
struct u_var_timing *ft = U_CALLOC_WITH_CAST(
struct u_var_timing, sizeof(struct u_var_timing));
float target_frame_time_ms =
c->settings.nominal_frame_interval_ns * 1. / 1000. * 1. / 1000.;
uint64_t now = os_monotonic_get_ns();
for (int i = 0; i < NUM_FRAME_TIMES; i++) {
c->compositor_frame_times.times_ns[i] = now + i;
}
ft->values.data = c->compositor_frame_times.timings_ms;
ft->values.length = NUM_FRAME_TIMES;
ft->values.index_ptr = &c->compositor_frame_times.index;
ft->reference_timing = target_frame_time_ms;
ft->range = 10.f;
ft->unit = "ms";
ft->dynamic_rescale = false;
ft->center_reference_timing = true;
u_var_add_f32_timing(c, ft, "Frame Times (Compositor)");
c->compositor_frame_times.debug_var = ft;
2019-03-18 05:52:32 +00:00
return &c->base;
}
void
comp_compositor_garbage_collect(struct comp_compositor *c)
{
struct comp_swapchain *sc;
while ((sc = u_threading_stack_pop(&c->threading.destroy_swapchains))) {
comp_swapchain_really_destroy(sc);
}
}