ipc: Adopt to new multi client interface

This commit is contained in:
Jakob Bornecrantz 2021-03-18 23:17:42 +00:00
parent f14958f2b0
commit bcf9b62fc0
7 changed files with 617 additions and 708 deletions

View file

@ -61,14 +61,10 @@ struct ipc_client_compositor
struct
{
uint64_t display_time_ns;
//! Id that we are currently using for submitting layers.
uint32_t slot_id;
uint32_t num_layers;
enum xrt_blend_mode env_blend_mode;
} layers;
//! Has the native compositor been created, only supports one for now.
@ -397,14 +393,12 @@ ipc_compositor_wait_frame(struct xrt_compositor *xc,
struct ipc_client_compositor *icc = ipc_client_compositor(xc);
uint64_t wake_up_time_ns = 0;
uint64_t min_display_period_ns = 0;
IPC_CALL_CHK(ipc_call_compositor_wait_frame(icc->ipc_c, // Connection
out_frame_id, // Frame id
out_predicted_display_time, // Display time
&wake_up_time_ns, // When we should wake up
out_predicted_display_period, // Current period
&min_display_period_ns)); // Minimum display period
IPC_CALL_CHK(ipc_call_compositor_predict_frame(icc->ipc_c, // Connection
out_frame_id, // Frame id
&wake_up_time_ns, // When we should wake up
out_predicted_display_time, // Display time
out_predicted_display_period)); // Current period
uint64_t now_ns = os_monotonic_get_ns();
@ -465,8 +459,11 @@ ipc_compositor_layer_begin(struct xrt_compositor *xc,
{
struct ipc_client_compositor *icc = ipc_client_compositor(xc);
icc->layers.display_time_ns = display_time_ns;
icc->layers.env_blend_mode = env_blend_mode;
struct ipc_shared_memory *ism = icc->ipc_c->ism;
struct ipc_layer_slot *slot = &ism->slots[icc->layers.slot_id];
slot->display_time_ns = display_time_ns;
slot->env_blend_mode = env_blend_mode;
return XRT_SUCCESS;
}
@ -666,6 +663,34 @@ ipc_compositor_destroy(struct xrt_compositor *xc)
icc->compositor_created = false;
}
static void
ipc_compositor_init(struct ipc_client_compositor *icc, struct xrt_compositor_native **out_xcn)
{
icc->base.base.create_swapchain = ipc_compositor_swapchain_create;
icc->base.base.import_swapchain = ipc_compositor_swapchain_import;
icc->base.base.begin_session = ipc_compositor_begin_session;
icc->base.base.end_session = ipc_compositor_end_session;
icc->base.base.wait_frame = ipc_compositor_wait_frame;
icc->base.base.begin_frame = ipc_compositor_begin_frame;
icc->base.base.discard_frame = ipc_compositor_discard_frame;
icc->base.base.layer_begin = ipc_compositor_layer_begin;
icc->base.base.layer_stereo_projection = ipc_compositor_layer_stereo_projection;
icc->base.base.layer_stereo_projection_depth = ipc_compositor_layer_stereo_projection_depth;
icc->base.base.layer_quad = ipc_compositor_layer_quad;
icc->base.base.layer_cube = ipc_compositor_layer_cube;
icc->base.base.layer_cylinder = ipc_compositor_layer_cylinder;
icc->base.base.layer_equirect1 = ipc_compositor_layer_equirect1;
icc->base.base.layer_equirect2 = ipc_compositor_layer_equirect2;
icc->base.base.layer_commit = ipc_compositor_layer_commit;
icc->base.base.destroy = ipc_compositor_destroy;
icc->base.base.poll_events = ipc_compositor_poll_events;
// Fetch info from the compositor, among it the format format list.
get_info(&(icc->base.base), &icc->base.base.info);
*out_xcn = &icc->base;
}
/*
*
@ -792,11 +817,14 @@ ipc_syscomp_create_native_compositor(struct xrt_system_compositor *xsc,
return XRT_ERROR_MULTI_SESSION_NOT_IMPLEMENTED;
}
icc->compositor_created = true;
*out_xcn = &icc->base;
// Needs to be done before init.
IPC_CALL_CHK(ipc_call_session_create(icc->ipc_c, xsi));
// Needs to be done after session create call.
ipc_compositor_init(icc, out_xcn);
icc->compositor_created = true;
return XRT_SUCCESS;
}
@ -829,24 +857,6 @@ ipc_client_create_system_compositor(struct ipc_connection *ipc_c,
{
struct ipc_client_compositor *c = U_TYPED_CALLOC(struct ipc_client_compositor);
c->base.base.create_swapchain = ipc_compositor_swapchain_create;
c->base.base.import_swapchain = ipc_compositor_swapchain_import;
c->base.base.begin_session = ipc_compositor_begin_session;
c->base.base.end_session = ipc_compositor_end_session;
c->base.base.wait_frame = ipc_compositor_wait_frame;
c->base.base.begin_frame = ipc_compositor_begin_frame;
c->base.base.discard_frame = ipc_compositor_discard_frame;
c->base.base.layer_begin = ipc_compositor_layer_begin;
c->base.base.layer_stereo_projection = ipc_compositor_layer_stereo_projection;
c->base.base.layer_stereo_projection_depth = ipc_compositor_layer_stereo_projection_depth;
c->base.base.layer_quad = ipc_compositor_layer_quad;
c->base.base.layer_cube = ipc_compositor_layer_cube;
c->base.base.layer_cylinder = ipc_compositor_layer_cylinder;
c->base.base.layer_equirect1 = ipc_compositor_layer_equirect1;
c->base.base.layer_equirect2 = ipc_compositor_layer_equirect2;
c->base.base.layer_commit = ipc_compositor_layer_commit;
c->base.base.destroy = ipc_compositor_destroy;
c->base.base.poll_events = ipc_compositor_poll_events;
c->system.create_native_compositor = ipc_syscomp_create_native_compositor;
c->system.destroy = ipc_syscomp_destroy;
c->ipc_c = ipc_c;
@ -863,9 +873,6 @@ ipc_client_create_system_compositor(struct ipc_connection *ipc_c,
}
#endif
// Fetch info from the compositor, among it the format format list.
get_info(&(c->base.base), &c->base.base.info);
// Fetch info from the system compositor.
get_system_info(c, &c->system.info);

View file

@ -69,14 +69,6 @@ struct ipc_swapchain_data
bool active;
};
struct ipc_queued_event
{
bool pending;
uint64_t timestamp;
union xrt_compositor_event event;
};
/*!
* Holds the state for a single client.
*
@ -105,17 +97,7 @@ struct ipc_client_state
//! Socket fd used for client comms
struct ipc_message_channel imc;
//! State for rendering.
struct ipc_layer_slot render_state;
//! Whether we are currently rendering @ref render_state
bool rendering_state;
//! The frame timing state.
struct u_rt_helper urth;
struct ipc_app_state client_state;
struct ipc_queued_event queued_events[IPC_EVENT_QUEUE_SIZE];
int server_thread_index;
};
@ -294,8 +276,6 @@ struct ipc_server
//! System compositor.
struct xrt_system_compositor *xsysc;
//! Native compositor.
struct xrt_compositor_native *xcn;
struct ipc_device idevs[IPC_SERVER_NUM_XDEVS];
struct xrt_tracking_origin *xtracks[IPC_SERVER_NUM_XDEVS];
@ -317,9 +297,13 @@ struct ipc_server
volatile uint32_t current_slot_index;
int active_client_index;
int last_active_client_index;
struct os_mutex global_state_lock;
struct
{
int active_client_index;
int last_active_client_index;
struct os_mutex lock;
} global_state;
};
@ -349,12 +333,36 @@ ipc_server_main_android(struct ipc_server **ps, void (*startup_complete_callback
#endif
/*!
* Called by client threads to manage global state
* Set the new active client.
*
* @ingroup ipc_server
*/
void
update_server_state(struct ipc_server *vs);
ipc_server_set_active_client(struct ipc_server *s, int active_client_index);
/*!
* Called by client threads to set a session to active.
*
* @ingroup ipc_server
*/
void
ipc_server_activate_session(volatile struct ipc_client_state *ics);
/*!
* Called by client threads to set a session to deactivate.
*
* @ingroup ipc_server
*/
void
ipc_server_deactivate_session(volatile struct ipc_client_state *ics);
/*!
* Called by client threads to recalculate active client.
*
* @ingroup ipc_server
*/
void
ipc_server_update_state(struct ipc_server *s);
/*!
* Thread function for the client side dispatching.

View file

@ -10,6 +10,7 @@
#include "xrt/xrt_gfx_native.h"
#include "util/u_misc.h"
#include "util/u_handles.h"
#include "util/u_trace_marker.h"
#include "server/ipc_server.h"
@ -93,33 +94,39 @@ ipc_handle_system_compositor_get_info(volatile struct ipc_client_state *ics,
xrt_result_t
ipc_handle_session_create(volatile struct ipc_client_state *ics, const struct xrt_session_info *xsi)
{
ics->client_state.session_active = false;
ics->client_state.session_overlay = false;
ics->client_state.session_visible = false;
struct xrt_compositor_native *xcn = NULL;
if (xsi->is_overlay) {
ics->client_state.session_overlay = true;
ics->client_state.z_order = xsi->z_order;
xrt_result_t xret = xrt_syscomp_create_native_compositor(ics->server->xsysc, xsi, &xcn);
if (xret != XRT_SUCCESS) {
return xret;
}
update_server_state(ics->server);
ics->client_state.session_overlay = xsi->is_overlay;
ics->client_state.z_order = xsi->z_order;
ics->xc = &xcn->base;
xrt_syscomp_set_state(ics->server->xsysc, ics->xc, ics->client_state.session_visible,
ics->client_state.session_focused);
xrt_syscomp_set_z_order(ics->server->xsysc, ics->xc, ics->client_state.z_order);
return XRT_SUCCESS;
}
xrt_result_t
ipc_handle_session_begin(volatile struct ipc_client_state *ics)
{
// ics->client_state.session_active = true;
// update_server_state(ics->server);
return XRT_SUCCESS;
IPC_TRACE_MARKER();
return xrt_comp_begin_session(ics->xc, 0);
}
xrt_result_t
ipc_handle_session_end(volatile struct ipc_client_state *ics)
{
ics->client_state.session_active = false;
update_server_state(ics->server);
return XRT_SUCCESS;
IPC_TRACE_MARKER();
return xrt_comp_end_session(ics->xc);
}
xrt_result_t
@ -131,62 +138,310 @@ ipc_handle_compositor_get_info(volatile struct ipc_client_state *ics, struct xrt
}
xrt_result_t
ipc_handle_compositor_wait_frame(volatile struct ipc_client_state *ics,
int64_t *out_frame_id,
uint64_t *predicted_display_time,
uint64_t *wake_up_time,
uint64_t *predicted_display_period,
uint64_t *min_display_period)
ipc_handle_compositor_predict_frame(volatile struct ipc_client_state *ics,
int64_t *out_frame_id,
uint64_t *out_wake_up_time_ns,
uint64_t *out_predicted_display_time_ns,
uint64_t *out_predicted_display_period_ns)
{
IPC_TRACE_MARKER();
os_mutex_lock(&ics->server->global_state_lock);
/*
* We use this to signal that the session has started, this is needed
* to make this client/session active/visible/focused.
*/
ipc_server_activate_session(ics);
u_rt_helper_predict((struct u_rt_helper *)&ics->urth, out_frame_id, predicted_display_time, wake_up_time,
predicted_display_period, min_display_period);
os_mutex_unlock(&ics->server->global_state_lock);
ics->client_state.session_active = true;
update_server_state(ics->server);
return XRT_SUCCESS;
uint64_t gpu_time_ns = 0;
return xrt_comp_predict_frame( //
ics->xc, //
out_frame_id, //
out_wake_up_time_ns, //
&gpu_time_ns, //
out_predicted_display_time_ns, //
out_predicted_display_period_ns); //
}
xrt_result_t
ipc_handle_compositor_wait_woke(volatile struct ipc_client_state *ics, int64_t frame_id)
{
os_mutex_lock(&ics->server->global_state_lock);
IPC_TRACE_MARKER();
u_rt_helper_mark_wait_woke((struct u_rt_helper *)&ics->urth, frame_id);
os_mutex_unlock(&ics->server->global_state_lock);
return XRT_SUCCESS;
return xrt_comp_mark_frame(ics->xc, frame_id, XRT_COMPOSITOR_FRAME_POINT_WOKE, os_monotonic_get_ns());
}
xrt_result_t
ipc_handle_compositor_begin_frame(volatile struct ipc_client_state *ics, int64_t frame_id)
{
os_mutex_lock(&ics->server->global_state_lock);
IPC_TRACE_MARKER();
u_rt_helper_mark_begin((struct u_rt_helper *)&ics->urth, frame_id);
os_mutex_unlock(&ics->server->global_state_lock);
return XRT_SUCCESS;
return xrt_comp_begin_frame(ics->xc, frame_id);
}
xrt_result_t
ipc_handle_compositor_discard_frame(volatile struct ipc_client_state *ics, int64_t frame_id)
{
os_mutex_lock(&ics->server->global_state_lock);
IPC_TRACE_MARKER();
u_rt_helper_mark_discarded((struct u_rt_helper *)&ics->urth, frame_id);
return xrt_comp_discard_frame(ics->xc, frame_id);
}
os_mutex_unlock(&ics->server->global_state_lock);
static bool
_update_projection_layer(struct xrt_compositor *xc,
volatile struct ipc_client_state *ics,
volatile struct ipc_layer_entry *layer,
uint32_t i)
{
// xdev
uint32_t device_id = layer->xdev_id;
// left
uint32_t lxsci = layer->swapchain_ids[0];
// right
uint32_t rxsci = layer->swapchain_ids[1];
return XRT_SUCCESS;
struct xrt_device *xdev = get_xdev(ics, device_id);
struct xrt_swapchain *lxcs = ics->xscs[lxsci];
struct xrt_swapchain *rxcs = ics->xscs[rxsci];
if (lxcs == NULL || rxcs == NULL) {
U_LOG_E("Invalid swap chain for projection layer!");
return false;
}
if (xdev == NULL) {
U_LOG_E("Invalid xdev for projection layer!");
return false;
}
// Cast away volatile.
struct xrt_layer_data *data = (struct xrt_layer_data *)&layer->data;
xrt_comp_layer_stereo_projection(xc, xdev, lxcs, rxcs, data);
return true;
}
static bool
_update_projection_layer_depth(struct xrt_compositor *xc,
volatile struct ipc_client_state *ics,
volatile struct ipc_layer_entry *layer,
uint32_t i)
{
// xdev
uint32_t xdevi = layer->xdev_id;
// left
uint32_t l_xsci = layer->swapchain_ids[0];
// right
uint32_t r_xsci = layer->swapchain_ids[1];
// left
uint32_t l_d_xsci = layer->swapchain_ids[2];
// right
uint32_t r_d_xsci = layer->swapchain_ids[3];
struct xrt_device *xdev = get_xdev(ics, xdevi);
struct xrt_swapchain *l_xcs = ics->xscs[l_xsci];
struct xrt_swapchain *r_xcs = ics->xscs[r_xsci];
struct xrt_swapchain *l_d_xcs = ics->xscs[l_d_xsci];
struct xrt_swapchain *r_d_xcs = ics->xscs[r_d_xsci];
if (l_xcs == NULL || r_xcs == NULL || l_d_xcs == NULL || r_d_xcs == NULL) {
U_LOG_E("Invalid swap chain for projection layer #%u!", i);
return false;
}
if (xdev == NULL) {
U_LOG_E("Invalid xdev for projection layer #%u!", i);
return false;
}
// Cast away volatile.
struct xrt_layer_data *data = (struct xrt_layer_data *)&layer->data;
xrt_comp_layer_stereo_projection_depth(xc, xdev, l_xcs, r_xcs, l_d_xcs, r_d_xcs, data);
return true;
}
static bool
do_single(struct xrt_compositor *xc,
volatile struct ipc_client_state *ics,
volatile struct ipc_layer_entry *layer,
uint32_t i,
const char *name,
struct xrt_device **out_xdev,
struct xrt_swapchain **out_xcs,
struct xrt_layer_data **out_data)
{
uint32_t device_id = layer->xdev_id;
uint32_t sci = layer->swapchain_ids[0];
struct xrt_device *xdev = get_xdev(ics, device_id);
struct xrt_swapchain *xcs = ics->xscs[sci];
if (xcs == NULL) {
U_LOG_E("Invalid swapchain for layer #%u, '%s'!", i, name);
return false;
}
if (xdev == NULL) {
U_LOG_E("Invalid xdev for layer #%u, '%s'!", i, name);
return false;
}
// Cast away volatile.
struct xrt_layer_data *data = (struct xrt_layer_data *)&layer->data;
*out_xdev = xdev;
*out_xcs = xcs;
*out_data = data;
return true;
}
static bool
_update_quad_layer(struct xrt_compositor *xc,
volatile struct ipc_client_state *ics,
volatile struct ipc_layer_entry *layer,
uint32_t i)
{
struct xrt_device *xdev;
struct xrt_swapchain *xcs;
struct xrt_layer_data *data;
if (!do_single(xc, ics, layer, i, "quad", &xdev, &xcs, &data)) {
return false;
}
xrt_comp_layer_quad(xc, xdev, xcs, data);
return true;
}
static bool
_update_cube_layer(struct xrt_compositor *xc,
volatile struct ipc_client_state *ics,
volatile struct ipc_layer_entry *layer,
uint32_t i)
{
struct xrt_device *xdev;
struct xrt_swapchain *xcs;
struct xrt_layer_data *data;
if (!do_single(xc, ics, layer, i, "cube", &xdev, &xcs, &data)) {
return false;
}
xrt_comp_layer_cube(xc, xdev, xcs, data);
return true;
}
static bool
_update_cylinder_layer(struct xrt_compositor *xc,
volatile struct ipc_client_state *ics,
volatile struct ipc_layer_entry *layer,
uint32_t i)
{
struct xrt_device *xdev;
struct xrt_swapchain *xcs;
struct xrt_layer_data *data;
if (!do_single(xc, ics, layer, i, "cylinder", &xdev, &xcs, &data)) {
return false;
}
xrt_comp_layer_cylinder(xc, xdev, xcs, data);
return true;
}
static bool
_update_equirect1_layer(struct xrt_compositor *xc,
volatile struct ipc_client_state *ics,
volatile struct ipc_layer_entry *layer,
uint32_t i)
{
struct xrt_device *xdev;
struct xrt_swapchain *xcs;
struct xrt_layer_data *data;
if (!do_single(xc, ics, layer, i, "equirect1", &xdev, &xcs, &data)) {
return false;
}
xrt_comp_layer_equirect1(xc, xdev, xcs, data);
return true;
}
static bool
_update_equirect2_layer(struct xrt_compositor *xc,
volatile struct ipc_client_state *ics,
volatile struct ipc_layer_entry *layer,
uint32_t i)
{
struct xrt_device *xdev;
struct xrt_swapchain *xcs;
struct xrt_layer_data *data;
if (!do_single(xc, ics, layer, i, "equirect2", &xdev, &xcs, &data)) {
return false;
}
xrt_comp_layer_equirect2(xc, xdev, xcs, data);
return true;
}
static bool
_update_layers(volatile struct ipc_client_state *ics, struct xrt_compositor *xc, struct ipc_layer_slot *slot)
{
IPC_TRACE_MARKER();
for (uint32_t i = 0; i < slot->num_layers; i++) {
volatile struct ipc_layer_entry *layer = &slot->layers[i];
switch (layer->data.type) {
case XRT_LAYER_STEREO_PROJECTION:
if (!_update_projection_layer(xc, ics, layer, i)) {
return false;
}
break;
case XRT_LAYER_STEREO_PROJECTION_DEPTH:
if (!_update_projection_layer_depth(xc, ics, layer, i)) {
return false;
}
break;
case XRT_LAYER_QUAD:
if (!_update_quad_layer(xc, ics, layer, i)) {
return false;
}
break;
case XRT_LAYER_CUBE:
if (!_update_cube_layer(xc, ics, layer, i)) {
return false;
}
break;
case XRT_LAYER_CYLINDER:
if (!_update_cylinder_layer(xc, ics, layer, i)) {
return false;
}
break;
case XRT_LAYER_EQUIRECT1:
if (!_update_equirect1_layer(xc, ics, layer, i)) {
return false;
}
break;
case XRT_LAYER_EQUIRECT2:
if (!_update_equirect2_layer(xc, ics, layer, i)) {
return false;
}
break;
default: U_LOG_E("Unhandled layer type '%i'!", layer->data.type); break;
}
}
return true;
}
xrt_result_t
@ -197,33 +452,49 @@ ipc_handle_compositor_layer_sync(volatile struct ipc_client_state *ics,
const xrt_graphics_sync_handle_t *handles,
const uint32_t num_handles)
{
IPC_TRACE_MARKER();
struct ipc_shared_memory *ism = ics->server->ism;
struct ipc_layer_slot *slot = &ism->slots[slot_id];
xrt_graphics_sync_handle_t sync_handle = XRT_GRAPHICS_SYNC_HANDLE_INVALID;
for (uint32_t i = 0; i < num_handles; i++) {
if (!xrt_graphics_sync_handle_is_valid(handles[i])) {
continue;
}
#ifdef XRT_GRAPHICS_SYNC_HANDLE_IS_FD
close(handles[i]);
#else
#error "Need port to transport these graphics buffers"
#endif
// If we have one or more save the first handle.
if (num_handles >= 1) {
sync_handle = handles[0];
}
// Copy current slot data to our state.
ics->render_state = *slot;
ics->rendering_state = true;
// Free all sync handles after the first one.
for (uint32_t i = 1; i < num_handles; i++) {
// Checks for valid handle.
xrt_graphics_sync_handle_t tmp = handles[i];
u_graphics_sync_unref(&tmp);
}
os_mutex_lock(&ics->server->global_state_lock);
// Copy current slot data.
struct ipc_layer_slot copy = *slot;
/*
* Transfer data to underlying compositor.
*/
xrt_comp_layer_begin(ics->xc, frame_id, copy.display_time_ns, copy.env_blend_mode);
_update_layers(ics, ics->xc, &copy);
xrt_comp_layer_commit(ics->xc, frame_id, sync_handle);
/*
* Manage shared state.
*/
os_mutex_lock(&ics->server->global_state.lock);
*out_free_slot_id = (ics->server->current_slot_index + 1) % IPC_MAX_SLOTS;
ics->server->current_slot_index = *out_free_slot_id;
// Also protected by the global lock.
u_rt_helper_mark_delivered((struct u_rt_helper *)&ics->urth, frame_id);
os_mutex_unlock(&ics->server->global_state_lock);
os_mutex_unlock(&ics->server->global_state.lock);
return XRT_SUCCESS;
}
@ -231,25 +502,9 @@ ipc_handle_compositor_layer_sync(volatile struct ipc_client_state *ics,
xrt_result_t
ipc_handle_compositor_poll_events(volatile struct ipc_client_state *ics, union xrt_compositor_event *out_xce)
{
uint64_t l_timestamp = UINT64_MAX;
volatile struct ipc_queued_event *event_to_send = NULL;
for (uint32_t i = 0; i < IPC_EVENT_QUEUE_SIZE; i++) {
volatile struct ipc_queued_event *e = &ics->queued_events[i];
if (e->pending == true && e->timestamp < l_timestamp) {
event_to_send = e;
}
}
IPC_TRACE_MARKER();
// We always return an event in response to this call -
// We signal no events with a special event type.
out_xce->type = XRT_COMPOSITOR_EVENT_NONE;
if (event_to_send) {
*out_xce = event_to_send->event;
event_to_send->pending = false;
}
return XRT_SUCCESS;
return xrt_comp_poll_events(ics->xc, out_xce);
}
xrt_result_t
@ -271,7 +526,7 @@ ipc_handle_system_get_client_info(volatile struct ipc_client_state *_ics,
//@todo: track this data in the ipc_client_state struct
out_client_desc->primary_application = false;
if (ics->server->active_client_index == (int)id) {
if (ics->server->global_state.active_client_index == (int)id) {
out_client_desc->primary_application = true;
}
@ -306,10 +561,10 @@ ipc_handle_system_get_clients(volatile struct ipc_client_state *_ics, struct ipc
xrt_result_t
ipc_handle_system_set_primary_client(volatile struct ipc_client_state *ics, uint32_t client_id)
{
ics->server->active_client_index = client_id;
IPC_INFO(ics->server, "System setting active client to %d.", client_id);
update_server_state(ics->server);
ipc_server_set_active_client(ics->server, client_id);
return XRT_SUCCESS;
}
@ -365,6 +620,8 @@ ipc_handle_swapchain_create(volatile struct ipc_client_state *ics,
xrt_graphics_buffer_handle_t *out_handles,
uint32_t *out_num_handles)
{
IPC_TRACE_MARKER();
xrt_result_t xret = XRT_SUCCESS;
uint32_t index = 0;
@ -416,6 +673,8 @@ ipc_handle_swapchain_import(volatile struct ipc_client_state *ics,
const xrt_graphics_buffer_handle_t *handles,
uint32_t num_handles)
{
IPC_TRACE_MARKER();
xrt_result_t xret = XRT_SUCCESS;
uint32_t index = 0;
@ -558,7 +817,6 @@ ipc_handle_device_get_tracked_pose(volatile struct ipc_client_state *ics,
uint64_t at_timestamp,
struct xrt_space_relation *out_relation)
{
// To make the code a bit more readable.
uint32_t device_id = id;
struct ipc_device *isdev = &ics->server->idevs[device_id];

View file

@ -69,9 +69,6 @@ client_loop(volatile struct ipc_client_state *ics)
{
IPC_INFO(ics->server, "Client connected");
// Make sure it's ready for the client.
u_rt_helper_client_clear((struct u_rt_helper *)&ics->urth);
// Claim the client fd.
int epoll_fd = setup_epoll(ics);
if (epoll_fd < 0) {
@ -123,43 +120,16 @@ client_loop(volatile struct ipc_client_state *ics)
epoll_fd = -1;
// Multiple threads might be looking at these fields.
os_mutex_lock(&ics->server->global_state_lock);
os_mutex_lock(&ics->server->global_state.lock);
ipc_message_channel_close((struct ipc_message_channel *)&ics->imc);
// Reset the urth for the next client.
u_rt_helper_client_clear((struct u_rt_helper *)&ics->urth);
ics->num_swapchains = 0;
ics->server->threads[ics->server_thread_index].state = IPC_THREAD_STOPPING;
ics->server_thread_index = -1;
memset((void *)&ics->client_state, 0, sizeof(struct ipc_app_state));
// Make sure to reset the renderstate fully.
ics->rendering_state = false;
ics->render_state.num_layers = 0;
for (uint32_t i = 0; i < ARRAY_SIZE(ics->render_state.layers); ++i) {
volatile struct ipc_layer_entry *rl = &ics->render_state.layers[i];
rl->swapchain_ids[0] = 0;
rl->swapchain_ids[1] = 0;
rl->data.flip_y = false;
/*!
* @todo this is redundant, we're setting both elements of a
* union. Why? Can we just zero the whole render_state?
*/
rl->data.stereo.l.sub.image_index = 0;
rl->data.stereo.r.sub.image_index = 0;
rl->data.quad.sub.image_index = 0;
rl->data.cube.sub.image_index = 0;
rl->data.cylinder.sub.image_index = 0;
rl->data.equirect1.sub.image_index = 0;
rl->data.equirect2.sub.image_index = 0;
//! @todo set rects or array index?
}
// Destroy all swapchains now.
for (uint32_t j = 0; j < IPC_MAX_CLIENT_SWAPCHAINS; j++) {
// Drop our reference, does NULL checking. Cast away volatile.
@ -168,12 +138,17 @@ client_loop(volatile struct ipc_client_state *ics)
IPC_TRACE(ics->server, "Destroyed swapchain %d.", j);
}
os_mutex_unlock(&ics->server->global_state_lock);
os_mutex_unlock(&ics->server->global_state.lock);
// Cast away volatile.
xrt_comp_destroy((struct xrt_compositor **)&ics->xc);
// Should we stop the server when a client disconnects?
if (ics->server->exit_on_disconnect) {
ics->server->running = false;
}
ipc_server_deactivate_session(ics);
}
@ -190,7 +165,5 @@ ipc_server_client_thread(void *_ics)
client_loop(ics);
update_server_state(ics->server);
return NULL;
}

View file

@ -50,6 +50,7 @@ extern void
oxr_sdl2_hack_stop(void **hack_ptr);
/* ---- HACK ---- */
/*
*
* Defines and helpers.
@ -59,11 +60,12 @@ oxr_sdl2_hack_stop(void **hack_ptr);
DEBUG_GET_ONCE_BOOL_OPTION(exit_on_disconnect, "IPC_EXIT_ON_DISCONNECT", false)
DEBUG_GET_ONCE_LOG_OPTION(ipc_log, "IPC_LOG", U_LOGGING_WARN)
struct _z_sort_data
{
int32_t index;
int32_t z_order;
};
/*
*
* Idev functions.
*
*/
static void
init_idev(struct ipc_device *idev, struct xrt_device *xdev)
@ -89,13 +91,12 @@ teardown_idev(struct ipc_device *idev)
* Static functions.
*
*/
static void
teardown_all(struct ipc_server *s)
{
u_var_remove_root(s);
xrt_comp_native_destroy(&s->xcn);
xrt_syscomp_destroy(&s->xsysc);
for (size_t i = 0; i < IPC_SERVER_NUM_XDEVS; i++) {
@ -106,7 +107,7 @@ teardown_all(struct ipc_server *s)
ipc_server_mainloop_deinit(&s->ml);
os_mutex_destroy(&s->global_state_lock);
os_mutex_destroy(&s->global_state.lock);
}
static int
@ -333,7 +334,7 @@ ipc_server_start_client_listener_thread(struct ipc_server *vs, int fd)
volatile struct ipc_client_state *ics = NULL;
int32_t cs_index = -1;
os_mutex_lock(&vs->global_state_lock);
os_mutex_lock(&vs->global_state.lock);
// find the next free thread in our array (server_thread_index is -1)
// and have it handle this connection
@ -349,7 +350,7 @@ ipc_server_start_client_listener_thread(struct ipc_server *vs, int fd)
close(fd);
// Unlock when we are done.
os_mutex_unlock(&vs->global_state_lock);
os_mutex_unlock(&vs->global_state.lock);
U_LOG_E("Max client count reached!");
return;
@ -361,7 +362,7 @@ ipc_server_start_client_listener_thread(struct ipc_server *vs, int fd)
close(fd);
// Unlock when we are done.
os_mutex_unlock(&vs->global_state_lock);
os_mutex_unlock(&vs->global_state.lock);
U_LOG_E("Client state management error!");
return;
@ -381,7 +382,7 @@ ipc_server_start_client_listener_thread(struct ipc_server *vs, int fd)
os_thread_start(&it->thread, ipc_server_client_thread, (void *)ics);
// Unlock when we are done.
os_mutex_unlock(&vs->global_state_lock);
os_mutex_unlock(&vs->global_state.lock);
}
static int
@ -432,17 +433,6 @@ init_all(struct ipc_server *s)
return ret;
}
struct xrt_session_info xsi = {
.is_overlay = false,
.flags = 0,
.z_order = 0,
};
ret = xrt_syscomp_create_native_compositor(s->xsysc, &xsi, &s->xcn);
if (ret < 0) {
teardown_all(s);
return ret;
}
ret = init_shm(s);
if (ret < 0) {
teardown_all(s);
@ -455,12 +445,7 @@ init_all(struct ipc_server *s)
return ret;
}
// Init all of the render timing helpers.
for (size_t i = 0; i < ARRAY_SIZE(s->threads); i++) {
u_rt_helper_init((struct u_rt_helper *)&s->threads[i].ics.urth);
}
ret = os_mutex_init(&s->global_state_lock);
ret = os_mutex_init(&s->global_state.lock);
if (ret < 0) {
teardown_all(s);
return ret;
@ -476,500 +461,31 @@ init_all(struct ipc_server *s)
return 0;
}
static uint32_t
find_event_slot(volatile struct ipc_client_state *ics)
{
uint64_t oldest_event_timestamp = UINT64_MAX;
uint32_t oldest_event_index = 0;
for (uint32_t i = 0; i < IPC_EVENT_QUEUE_SIZE; i++) {
if (ics->queued_events->timestamp < oldest_event_timestamp) {
oldest_event_index = i;
}
if (!ics->queued_events[i].pending) {
return i;
}
}
U_LOG_E("Event queue full - unconsumed event lost!");
return oldest_event_index;
}
static void
transition_overlay_visibility(volatile struct ipc_client_state *ics, bool visible)
{
uint32_t event_slot = find_event_slot(ics);
uint64_t timestamp = os_monotonic_get_ns();
volatile struct ipc_queued_event *qe = &ics->queued_events[event_slot];
qe->timestamp = timestamp;
qe->pending = true;
qe->event.type = XRT_COMPOSITOR_EVENT_OVERLAY_CHANGE;
qe->event.overlay.visible = visible;
}
static void
send_client_state(volatile struct ipc_client_state *ics)
{
uint32_t event_slot = find_event_slot(ics);
uint64_t timestamp = os_monotonic_get_ns();
volatile struct ipc_queued_event *qe = &ics->queued_events[event_slot];
qe->timestamp = timestamp;
qe->pending = true;
qe->event.type = XRT_COMPOSITOR_EVENT_STATE_CHANGE;
qe->event.state.visible = ics->client_state.session_visible;
qe->event.state.focused = ics->client_state.session_focused;
}
static bool
_update_projection_layer(struct xrt_compositor *xc,
volatile struct ipc_client_state *ics,
volatile struct ipc_layer_entry *layer,
uint32_t i)
{
// xdev
uint32_t device_id = layer->xdev_id;
// left
uint32_t lxsci = layer->swapchain_ids[0];
// right
uint32_t rxsci = layer->swapchain_ids[1];
struct xrt_device *xdev = get_xdev(ics, device_id);
struct xrt_swapchain *lxcs = ics->xscs[lxsci];
struct xrt_swapchain *rxcs = ics->xscs[rxsci];
if (lxcs == NULL || rxcs == NULL) {
U_LOG_E("Invalid swap chain for projection layer!");
return false;
}
if (xdev == NULL) {
U_LOG_E("Invalid xdev for projection layer!");
return false;
}
// Cast away volatile.
struct xrt_layer_data *data = (struct xrt_layer_data *)&layer->data;
xrt_comp_layer_stereo_projection(xc, xdev, lxcs, rxcs, data);
return true;
}
static bool
_update_projection_layer_depth(struct xrt_compositor *xc,
volatile struct ipc_client_state *ics,
volatile struct ipc_layer_entry *layer,
uint32_t i)
{
// xdev
uint32_t xdevi = layer->xdev_id;
// left
uint32_t l_xsci = layer->swapchain_ids[0];
// right
uint32_t r_xsci = layer->swapchain_ids[1];
// left
uint32_t l_d_xsci = layer->swapchain_ids[2];
// right
uint32_t r_d_xsci = layer->swapchain_ids[3];
struct xrt_device *xdev = get_xdev(ics, xdevi);
struct xrt_swapchain *l_xcs = ics->xscs[l_xsci];
struct xrt_swapchain *r_xcs = ics->xscs[r_xsci];
struct xrt_swapchain *l_d_xcs = ics->xscs[l_d_xsci];
struct xrt_swapchain *r_d_xcs = ics->xscs[r_d_xsci];
if (l_xcs == NULL || r_xcs == NULL || l_d_xcs == NULL || r_d_xcs == NULL) {
U_LOG_E("Invalid swap chain for projection layer!");
return false;
}
if (xdev == NULL) {
U_LOG_E("Invalid xdev for projection layer!");
return false;
}
// Cast away volatile.
struct xrt_layer_data *data = (struct xrt_layer_data *)&layer->data;
xrt_comp_layer_stereo_projection_depth(xc, xdev, l_xcs, r_xcs, l_d_xcs, r_d_xcs, data);
return true;
}
static bool
do_single(struct xrt_compositor *xc,
volatile struct ipc_client_state *ics,
volatile struct ipc_layer_entry *layer,
uint32_t i,
const char *name,
struct xrt_device **out_xdev,
struct xrt_swapchain **out_xcs,
struct xrt_layer_data **out_data)
{
uint32_t device_id = layer->xdev_id;
uint32_t sci = layer->swapchain_ids[0];
struct xrt_device *xdev = get_xdev(ics, device_id);
struct xrt_swapchain *xcs = ics->xscs[sci];
if (xcs == NULL) {
U_LOG_E("Invalid swapchain for '%u' layer, '%s'!", i, name);
return false;
}
if (xdev == NULL) {
U_LOG_E("Invalid xdev for '%u' layer, '%s'!", i, name);
return false;
}
// Cast away volatile.
struct xrt_layer_data *data = (struct xrt_layer_data *)&layer->data;
*out_xdev = xdev;
*out_xcs = xcs;
*out_data = data;
return true;
}
static bool
_update_quad_layer(struct xrt_compositor *xc,
volatile struct ipc_client_state *ics,
volatile struct ipc_layer_entry *layer,
uint32_t i)
{
struct xrt_device *xdev;
struct xrt_swapchain *xcs;
struct xrt_layer_data *data;
if (!do_single(xc, ics, layer, i, "quad", &xdev, &xcs, &data)) {
return false;
}
xrt_comp_layer_quad(xc, xdev, xcs, data);
return true;
}
static bool
_update_cube_layer(struct xrt_compositor *xc,
volatile struct ipc_client_state *ics,
volatile struct ipc_layer_entry *layer,
uint32_t i)
{
struct xrt_device *xdev;
struct xrt_swapchain *xcs;
struct xrt_layer_data *data;
if (!do_single(xc, ics, layer, i, "cube", &xdev, &xcs, &data)) {
return false;
}
xrt_comp_layer_cube(xc, xdev, xcs, data);
return true;
}
static bool
_update_cylinder_layer(struct xrt_compositor *xc,
volatile struct ipc_client_state *ics,
volatile struct ipc_layer_entry *layer,
uint32_t i)
{
struct xrt_device *xdev;
struct xrt_swapchain *xcs;
struct xrt_layer_data *data;
if (!do_single(xc, ics, layer, i, "cylinder", &xdev, &xcs, &data)) {
return false;
}
xrt_comp_layer_cylinder(xc, xdev, xcs, data);
return true;
}
static bool
_update_equirect1_layer(struct xrt_compositor *xc,
volatile struct ipc_client_state *ics,
volatile struct ipc_layer_entry *layer,
uint32_t i)
{
struct xrt_device *xdev;
struct xrt_swapchain *xcs;
struct xrt_layer_data *data;
if (!do_single(xc, ics, layer, i, "equirect1", &xdev, &xcs, &data)) {
return false;
}
xrt_comp_layer_equirect1(xc, xdev, xcs, data);
return true;
}
static bool
_update_equirect2_layer(struct xrt_compositor *xc,
volatile struct ipc_client_state *ics,
volatile struct ipc_layer_entry *layer,
uint32_t i)
{
struct xrt_device *xdev;
struct xrt_swapchain *xcs;
struct xrt_layer_data *data;
if (!do_single(xc, ics, layer, i, "equirect2", &xdev, &xcs, &data)) {
return false;
}
xrt_comp_layer_equirect2(xc, xdev, xcs, data);
return true;
}
static int
_overlay_sort_func(const void *a, const void *b)
{
struct _z_sort_data *oa = (struct _z_sort_data *)a;
struct _z_sort_data *ob = (struct _z_sort_data *)b;
if (oa->z_order < ob->z_order) {
return -1;
}
if (oa->z_order > ob->z_order) {
return 1;
}
return 0;
}
static bool
_update_layers(struct ipc_server *s, struct xrt_compositor *xc)
{
IPC_TRACE_MARKER();
struct _z_sort_data z_data[IPC_MAX_CLIENTS];
// initialise, and fill in overlay app data
for (int32_t i = 0; i < IPC_MAX_CLIENTS; i++) {
volatile struct ipc_client_state *ics = &s->threads[i].ics;
z_data[i].index = -1;
z_data[i].z_order = -1;
// we need to create a list of overlay applications, sorted by z
if (ics->client_state.session_overlay) {
if (ics->client_state.session_active) {
z_data[i].index = i;
z_data[i].z_order = ics->client_state.z_order;
}
}
}
// ensure our primary application is enabled,
// and rendered first in the stack
if (s->active_client_index >= 0) {
z_data[s->active_client_index].index = s->active_client_index;
z_data[s->active_client_index].z_order = INT32_MIN;
}
// sort the stack array
qsort(z_data, IPC_MAX_CLIENTS, sizeof(struct _z_sort_data), _overlay_sort_func);
// render the layer stack
for (uint32_t i = 0; i < IPC_MAX_CLIENTS; i++) {
struct _z_sort_data *zd = &z_data[i];
if (zd->index < 0) {
continue;
}
volatile struct ipc_client_state *ics = &s->threads[zd->index].ics;
for (uint32_t j = 0; j < ics->render_state.num_layers; j++) {
volatile struct ipc_layer_entry *layer = &ics->render_state.layers[j];
switch (layer->data.type) {
case XRT_LAYER_STEREO_PROJECTION:
if (!_update_projection_layer(xc, ics, layer, i)) {
return false;
}
break;
case XRT_LAYER_STEREO_PROJECTION_DEPTH:
if (!_update_projection_layer_depth(xc, ics, layer, i)) {
return false;
}
break;
case XRT_LAYER_QUAD:
if (!_update_quad_layer(xc, ics, layer, i)) {
return false;
}
break;
case XRT_LAYER_CUBE:
if (!_update_cube_layer(xc, ics, layer, i)) {
return false;
}
break;
case XRT_LAYER_CYLINDER:
if (!_update_cylinder_layer(xc, ics, layer, i)) {
return false;
}
break;
case XRT_LAYER_EQUIRECT1:
if (!_update_equirect1_layer(xc, ics, layer, i)) {
return false;
}
break;
case XRT_LAYER_EQUIRECT2:
if (!_update_equirect2_layer(xc, ics, layer, i)) {
return false;
}
break;
default: U_LOG_E("Unhandled layer type '%i'!", layer->data.type); break;
}
}
}
return true;
}
static void
broadcast_timings(struct ipc_server *s,
uint64_t predicted_display_time_ns,
uint64_t predicted_display_period_ns,
uint64_t diff_ns)
{
IPC_TRACE_MARKER();
os_mutex_lock(&s->global_state_lock);
// Broadcast the new timing information to the helpers.
for (size_t i = 0; i < ARRAY_SIZE(s->threads); i++) {
struct u_rt_helper *urth = (struct u_rt_helper *)&s->threads[i].ics.urth;
u_rt_helper_new_sample( //
urth, //
predicted_display_time_ns, //
predicted_display_period_ns, //
diff_ns); //
}
os_mutex_unlock(&s->global_state_lock);
}
static int
main_loop(struct ipc_server *s)
{
struct xrt_compositor *xc = &s->xcn->base;
// make sure all our client connections have a handle to the
// compositor and consistent initial state
while (s->running) {
int64_t frame_id;
uint64_t predicted_display_time_ns;
uint64_t predicted_display_period_ns;
os_nanosleep(U_TIME_1S_IN_NS / 20);
xrt_comp_wait_frame(xc, &frame_id, &predicted_display_time_ns, &predicted_display_period_ns);
uint64_t now_ns = os_monotonic_get_ns();
uint64_t diff_ns = predicted_display_time_ns - now_ns;
broadcast_timings(s, predicted_display_time_ns, predicted_display_period_ns, diff_ns);
xrt_comp_begin_frame(xc, frame_id);
xrt_comp_layer_begin(xc, frame_id, 0, 0);
_update_layers(s, xc);
xrt_comp_layer_commit(xc, frame_id, XRT_GRAPHICS_SYNC_HANDLE_INVALID);
// Check polling last, so we know we have valid timing data.
// Check polling.
ipc_server_mainloop_poll(s, &s->ml);
}
return 0;
}
static void
handle_overlay_client_events(volatile struct ipc_client_state *ics, int active_id, int prev_active_id)
{
// this is an overlay session.
if (ics->client_state.session_overlay) {
// switch between main applications
if (active_id >= 0 && prev_active_id >= 0) {
transition_overlay_visibility(ics, false);
transition_overlay_visibility(ics, true);
}
// switch from idle to active application
if (active_id >= 0 && prev_active_id < 0) {
transition_overlay_visibility(ics, true);
}
// switch from active application to idle
if (active_id < 0 && prev_active_id >= 0) {
transition_overlay_visibility(ics, false);
}
}
}
static void
handle_focused_client_events(volatile struct ipc_client_state *ics, int active_id, int prev_active_id)
{
// if our prev active id is -1 and our cur active id is -1, we
// can bail out early
if (active_id == -1 && prev_active_id == -1) {
return;
}
// set visibility/focus to false on all applications
ics->client_state.session_focused = false;
ics->client_state.session_visible = false;
// do we have a primary application?
if (active_id >= 0) {
// if we are an overlay, we are always visible
// if we have a primary application
if (ics->client_state.session_overlay) {
ics->client_state.session_visible = true;
}
// set visible + focused if we are the primary
// application
if (ics->server_thread_index == active_id) {
ics->client_state.session_visible = true;
ics->client_state.session_focused = true;
}
send_client_state(ics);
return;
}
// no primary application, set all overlays to synchronised
// state
if (ics->client_state.session_overlay) {
ics->client_state.session_focused = false;
ics->client_state.session_visible = false;
send_client_state(ics);
}
}
void
init_server_state(struct ipc_server *s)
{
// set up initial state for global vars, and each client state
s->active_client_index = -1; // we start off with no active client.
s->last_active_client_index = -1;
s->global_state.active_client_index = -1; // we start off with no active client.
s->global_state.last_active_client_index = -1;
s->current_slot_index = 0;
for (uint32_t i = 0; i < IPC_MAX_CLIENTS; i++) {
volatile struct ipc_client_state *ics = &s->threads[i].ics;
ics->server = s;
ics->xc = &s->xcn->base;
ics->server_thread_index = -1;
}
}
@ -977,26 +493,105 @@ init_server_state(struct ipc_server *s)
/*
*
* Exported functions.
* Client management functions.
*
*/
void
update_server_state(struct ipc_server *s)
static void
handle_overlay_client_events(volatile struct ipc_client_state *ics, int active_id, int prev_active_id)
{
// multiple threads could call this at the same time.
os_mutex_lock(&s->global_state_lock);
// Is an overlay session?
if (!ics->client_state.session_overlay) {
return;
}
// Does this client have a compositor yet?
if (ics->xc) {
return;
}
// Switch between main applications
if (active_id >= 0 && prev_active_id >= 0) {
xrt_syscomp_set_main_app_visibility(ics->server->xsysc, ics->xc, false);
xrt_syscomp_set_main_app_visibility(ics->server->xsysc, ics->xc, true);
}
// Switch from idle to active application
if (active_id >= 0 && prev_active_id < 0) {
xrt_syscomp_set_main_app_visibility(ics->server->xsysc, ics->xc, true);
}
// Switch from active application to idle
if (active_id < 0 && prev_active_id >= 0) {
xrt_syscomp_set_main_app_visibility(ics->server->xsysc, ics->xc, false);
}
}
static void
handle_focused_client_events(volatile struct ipc_client_state *ics, int active_id, int prev_active_id)
{
// Set start z_order at the bottom.
int64_t z_order = INT64_MIN;
// Set visibility/focus to false on all applications.
bool focused = false;
bool visible = false;
// Set visible + focused if we are the primary application
if (ics->server_thread_index == active_id) {
visible = true;
focused = true;
z_order = INT64_MIN;
}
// Set all overlays to always active and focused.
if (ics->client_state.session_overlay) {
visible = true;
focused = true;
z_order = ics->client_state.z_order;
}
ics->client_state.session_visible = visible;
ics->client_state.session_focused = focused;
ics->client_state.z_order = z_order;
if (ics->xc != NULL) {
xrt_syscomp_set_state(ics->server->xsysc, ics->xc, visible, focused);
xrt_syscomp_set_z_order(ics->server->xsysc, ics->xc, z_order);
}
}
static void
flush_state_to_all_clients_locked(struct ipc_server *s)
{
for (uint32_t i = 0; i < IPC_MAX_CLIENTS; i++) {
volatile struct ipc_client_state *ics = &s->threads[i].ics;
// Not running?
if (ics->server_thread_index < 0) {
continue;
}
handle_focused_client_events(ics, s->global_state.active_client_index,
s->global_state.last_active_client_index);
handle_overlay_client_events(ics, s->global_state.active_client_index,
s->global_state.last_active_client_index);
}
}
static void
update_server_state_locked(struct ipc_server *s)
{
// if our client that is set to active is still active,
// and it is the same as our last active client, we can
// early-out, as no events need to be sent
if (s->active_client_index >= 0) {
if (s->global_state.active_client_index >= 0) {
volatile struct ipc_client_state *ics = &s->threads[s->active_client_index].ics;
volatile struct ipc_client_state *ics = &s->threads[s->global_state.active_client_index].ics;
if (ics->client_state.session_active && s->active_client_index == s->last_active_client_index) {
os_mutex_unlock(&s->global_state_lock);
if (ics->client_state.session_active &&
s->global_state.active_client_index == s->global_state.last_active_client_index) {
return;
}
}
@ -1026,33 +621,101 @@ update_server_state(struct ipc_server *s)
// if our currently-set active primary application is not
// actually active/displayable, use the fallback application
// instead.
volatile struct ipc_client_state *ics = &s->threads[s->active_client_index].ics;
if (!(ics->client_state.session_overlay == false && s->active_client_index >= 0 &&
volatile struct ipc_client_state *ics = &s->threads[s->global_state.active_client_index].ics;
if (!(ics->client_state.session_overlay == false && s->global_state.active_client_index >= 0 &&
ics->client_state.session_active)) {
s->active_client_index = fallback_active_application;
s->global_state.active_client_index = fallback_active_application;
}
// if we have no applications to fallback to, enable the idle
// wallpaper.
if (set_idle) {
s->active_client_index = -1;
s->global_state.active_client_index = -1;
}
for (uint32_t i = 0; i < IPC_MAX_CLIENTS; i++) {
flush_state_to_all_clients_locked(s);
volatile struct ipc_client_state *ics = &s->threads[i].ics;
if (ics->server_thread_index >= 0) {
s->global_state.last_active_client_index = s->global_state.active_client_index;
}
handle_focused_client_events(ics, s->active_client_index, s->last_active_client_index);
handle_overlay_client_events(ics, s->active_client_index, s->last_active_client_index);
}
/*
*
* Exported functions.
*
*/
void
ipc_server_set_active_client(struct ipc_server *s, int client_id)
{
os_mutex_lock(&s->global_state.lock);
if (client_id == s->global_state.active_client_index) {
os_mutex_unlock(&s->global_state.lock);
return;
}
s->last_active_client_index = s->active_client_index;
os_mutex_unlock(&s->global_state_lock);
os_mutex_unlock(&s->global_state.lock);
}
void
ipc_server_activate_session(volatile struct ipc_client_state *ics)
{
struct ipc_server *s = ics->server;
// Already active, noop.
if (ics->client_state.session_active) {
return;
}
assert(ics->server_thread_index >= 0);
// Multiple threads could call this at the same time.
os_mutex_lock(&s->global_state.lock);
ics->client_state.session_active = true;
if (ics->client_state.session_overlay) {
// For new active overlay sessions only update this session.
handle_focused_client_events(ics, s->global_state.active_client_index,
s->global_state.last_active_client_index);
handle_overlay_client_events(ics, s->global_state.active_client_index,
s->global_state.last_active_client_index);
} else {
// For new active regular sessions update all clients.
update_server_state_locked(s);
}
os_mutex_unlock(&s->global_state.lock);
}
void
ipc_server_deactivate_session(volatile struct ipc_client_state *ics)
{
struct ipc_server *s = ics->server;
// Multiple threads could call this at the same time.
os_mutex_lock(&s->global_state.lock);
ics->client_state.session_active = false;
update_server_state_locked(s);
os_mutex_unlock(&s->global_state.lock);
}
void
ipc_server_update_state(struct ipc_server *s)
{
// Multiple threads could call this at the same time.
os_mutex_lock(&s->global_state.lock);
update_server_state_locked(s);
os_mutex_unlock(&s->global_state.lock);
}
#ifndef XRT_OS_ANDROID

View file

@ -151,6 +151,7 @@ struct ipc_layer_entry
*/
struct ipc_layer_slot
{
uint64_t display_time_ns;
enum xrt_blend_mode env_blend_mode;
uint32_t num_layers;
struct ipc_layer_entry layers[IPC_MAX_LAYERS];

View file

@ -72,13 +72,12 @@
]
},
"compositor_wait_frame": {
"compositor_predict_frame": {
"out": [
{"name": "frame_id", "type": "int64_t"},
{"name": "predicted_display_time", "type": "uint64_t"},
{"name": "wake_up_time", "type": "uint64_t"},
{"name": "predicted_display_period", "type": "uint64_t"},
{"name": "min_display_period", "type": "uint64_t"}
{"name": "predicted_display_time", "type": "uint64_t"},
{"name": "predicted_display_period", "type": "uint64_t"}
]
},