c/multi: Do not display frames before they are to be displayed

This follows the OpenXR spec that says that a frame might not be
displayed before XrFrameEndInfo::displayTime value.
This commit is contained in:
Jakob Bornecrantz 2021-04-05 23:09:44 +01:00
parent 736b9abdbe
commit aca09bdebf
3 changed files with 89 additions and 13 deletions

View file

@ -286,6 +286,7 @@ multi_compositor_layer_begin(struct xrt_compositor *xc,
assert(mc->progress.num_layers == 0);
U_ZERO(&mc->progress);
mc->progress.active = true;
mc->progress.display_time_ns = display_time_ns;
mc->progress.env_blend_mode = env_blend_mode;
@ -447,12 +448,23 @@ multi_compositor_layer_commit(struct xrt_compositor *xc, int64_t frame_id, xrt_g
xrt_compositor_fence_destroy(&xcf);
}
os_mutex_lock(&mc->slot_lock);
// Block here if the scheduled slot is not clear.
while (mc->scheduled.active) {
os_mutex_unlock(&mc->slot_lock);
os_nanosleep(U_TIME_1MS_IN_NS);
os_mutex_lock(&mc->slot_lock);
}
slot_move_and_clear(&mc->scheduled, &mc->progress);
os_mutex_unlock(&mc->slot_lock);
os_mutex_lock(&mc->msc->list_and_timing_lock);
slot_move_and_clear(&mc->delivered, &mc->progress);
u_rt_mark_delivered(mc->urt, frame_id);
os_mutex_unlock(&mc->msc->list_and_timing_lock);
return XRT_SUCCESS;
@ -492,6 +504,7 @@ multi_compositor_destroy(struct xrt_compositor *xc)
// We are now off the rendering list, clear slots for any swapchains.
slot_clear(&mc->progress);
slot_clear(&mc->scheduled);
slot_clear(&mc->delivered);
// Does null checking.
@ -500,6 +513,23 @@ multi_compositor_destroy(struct xrt_compositor *xc)
free(mc);
}
void
multi_compositor_deliver_any_frames(struct multi_compositor *mc, uint64_t display_time_ns)
{
os_mutex_lock(&mc->slot_lock);
if (!mc->scheduled.active) {
os_mutex_unlock(&mc->slot_lock);
return;
}
if (time_is_greater_then_or_within_half_ms(display_time_ns, mc->scheduled.display_time_ns)) {
slot_move_and_clear(&mc->delivered, &mc->scheduled);
}
os_mutex_unlock(&mc->slot_lock);
}
xrt_result_t
multi_compositor_create(struct multi_system_compositor *msc,
const struct xrt_session_info *xsi,

View file

@ -72,6 +72,7 @@ struct multi_layer_slot
enum xrt_blend_mode env_blend_mode;
uint32_t num_layers;
struct multi_layer_entry layers[MULTI_MAX_LAYERS];
bool active;
};
/*!
@ -122,10 +123,22 @@ struct multi_compositor
int64_t z_order;
} state;
//! Currently being transferred or waited on.
//! Lock for all of the slots.
struct os_mutex slot_lock;
/*!
* Currently being transferred or waited on.
* Not protected by the slot lock as it is only touched by the client thread.
*/
struct multi_layer_slot progress;
//! Fully ready to be used.
//! Scheduled frames for a future timepoint.
struct multi_layer_slot scheduled;
/*!
* Fully ready to be used.
* Not protected by the slot lock as it is only touched by the main render loop thread.
*/
struct multi_layer_slot delivered;
struct u_render_timing *urt;
@ -155,6 +168,15 @@ multi_compositor_create(struct multi_system_compositor *msc,
void
multi_compositor_push_event(struct multi_compositor *mc, const union xrt_compositor_event *xce);
/*!
* Deliver any scheduled frames at that is to be display at or after the given @p display_time_ns. Called by the render
* thread and copies data from multi_compositor::scheduled to multi_compositor::delivered while holding the slot_lock.
*
* @ingroup comp_multi
*/
void
multi_compositor_deliver_any_frames(struct multi_compositor *mc, uint64_t display_time_ns);
/*
*

View file

@ -211,7 +211,20 @@ overlay_sort_func(const void *a, const void *b)
}
static void
transfer_layers_locked(struct multi_system_compositor *msc)
log_frame_time_diff(uint64_t frame_time_ns, uint64_t display_time_ns)
{
int64_t diff_ns = (int64_t)frame_time_ns - (int64_t)display_time_ns;
bool late = false;
if (diff_ns < 0) {
diff_ns = -diff_ns;
late = true;
}
U_LOG_W("Frame %s by %.2fms!", late ? "late" : "early", time_ns_to_ms_f(diff_ns));
}
static void
transfer_layers_locked(struct multi_system_compositor *msc, uint64_t display_time_ns)
{
COMP_TRACE_MARKER();
@ -226,6 +239,9 @@ transfer_layers_locked(struct multi_system_compositor *msc)
}
array[count++] = msc->clients[k];
// Even if it's not shown, make sure that frames are delivered.
multi_compositor_deliver_any_frames(msc->clients[k], display_time_ns);
}
// Sort the stack array
@ -238,6 +254,16 @@ transfer_layers_locked(struct multi_system_compositor *msc)
continue;
}
// None of the data in this slot is valid, don't check access it.
if (!mc->delivered.active) {
continue;
}
uint64_t frame_time_ns = mc->delivered.display_time_ns;
if (!time_is_within_half_ms(frame_time_ns, display_time_ns)) {
log_frame_time_diff(frame_time_ns, display_time_ns);
}
for (size_t i = 0; i < mc->delivered.num_layers; i++) {
struct multi_layer_entry *layer = &mc->delivered.layers[i];
@ -354,18 +380,16 @@ multi_main_loop(struct multi_system_compositor *msc)
broadcast_timings(msc, predicted_display_time_ns, predicted_display_period_ns, diff_ns);
// Make sure that the clients doesn't go away.
os_mutex_lock(&msc->list_and_timing_lock);
xrt_comp_begin_frame(xc, frame_id);
xrt_comp_layer_begin(xc, frame_id, 0, 0);
transfer_layers_locked(msc);
// Make sure that the clients doesn't go away while we transfer layers.
os_mutex_lock(&msc->list_and_timing_lock);
transfer_layers_locked(msc, predicted_display_time_ns);
os_mutex_unlock(&msc->list_and_timing_lock);
xrt_comp_layer_commit(xc, frame_id, XRT_GRAPHICS_SYNC_HANDLE_INVALID);
os_mutex_unlock(&msc->list_and_timing_lock);
// Re-lock the thread for check in while statement.
os_thread_helper_lock(&msc->oth);
}