mirror of
https://gitlab.freedesktop.org/monado/monado.git
synced 2025-02-05 13:28:16 +00:00
u/pacing: Introduce latched and retired calls on app pacer
This commit is contained in:
parent
7eacf6bd7a
commit
da542e3f5e
|
@ -364,6 +364,30 @@ struct u_pacing_app
|
||||||
*/
|
*/
|
||||||
void (*mark_gpu_done)(struct u_pacing_app *upa, int64_t frame_id, uint64_t when_ns);
|
void (*mark_gpu_done)(struct u_pacing_app *upa, int64_t frame_id, uint64_t when_ns);
|
||||||
|
|
||||||
|
/*!
|
||||||
|
* Latch a frame for rendering for delivery to the native compositor,
|
||||||
|
* may be called multiple times for the same frame should the app be on
|
||||||
|
* a frame cadence that is lower then the native compositor.
|
||||||
|
*
|
||||||
|
* @param upa App pacer struct.
|
||||||
|
* @param[in] frame_id The frame ID of the latched frame.
|
||||||
|
* @param[in] when_ns Time when the latching happened.
|
||||||
|
* @param[in] system_frame_id The ID of the system frame that is
|
||||||
|
* latching the app's frame.
|
||||||
|
*/
|
||||||
|
void (*latched)(struct u_pacing_app *upa, int64_t frame_id, uint64_t when_ns, int64_t system_frame_id);
|
||||||
|
|
||||||
|
/*!
|
||||||
|
* Mark a frame as completely retired, will never be latched (used by
|
||||||
|
* the native compositor again) as a new frame has been latched or a
|
||||||
|
* shutdown condition has been met.
|
||||||
|
*
|
||||||
|
* @param upa App pacer struct.
|
||||||
|
* @param[in] frame_id The frame ID of the latched frame.
|
||||||
|
* @param[in] when_ns Time when the latching happened.
|
||||||
|
*/
|
||||||
|
void (*retired)(struct u_pacing_app *upa, int64_t frame_id, uint64_t when_ns);
|
||||||
|
|
||||||
/*!
|
/*!
|
||||||
* Add a new sample point from the main render loop.
|
* Add a new sample point from the main render loop.
|
||||||
*
|
*
|
||||||
|
@ -486,6 +510,34 @@ u_pa_info(struct u_pacing_app *upa,
|
||||||
upa->info(upa, predicted_display_time_ns, predicted_display_period_ns, extra_ns);
|
upa->info(upa, predicted_display_time_ns, predicted_display_period_ns, extra_ns);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*!
|
||||||
|
* @copydoc u_pacing_app::latched
|
||||||
|
*
|
||||||
|
* Helper for calling through the function pointer.
|
||||||
|
*
|
||||||
|
* @public @memberof u_pacing_app
|
||||||
|
* @ingroup aux_pacing
|
||||||
|
*/
|
||||||
|
static inline void
|
||||||
|
u_pa_latched(struct u_pacing_app *upa, int64_t frame_id, uint64_t when_ns, int64_t system_frame_id)
|
||||||
|
{
|
||||||
|
upa->latched(upa, frame_id, when_ns, system_frame_id);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*!
|
||||||
|
* @copydoc u_pacing_app::retired
|
||||||
|
*
|
||||||
|
* Helper for calling through the function pointer.
|
||||||
|
*
|
||||||
|
* @public @memberof u_pacing_app
|
||||||
|
* @ingroup aux_pacing
|
||||||
|
*/
|
||||||
|
static inline void
|
||||||
|
u_pa_retired(struct u_pacing_app *upa, int64_t frame_id, uint64_t when_ns)
|
||||||
|
{
|
||||||
|
upa->retired(upa, frame_id, when_ns);
|
||||||
|
}
|
||||||
|
|
||||||
/*!
|
/*!
|
||||||
* @copydoc u_pacing_app::destroy
|
* @copydoc u_pacing_app::destroy
|
||||||
*
|
*
|
||||||
|
|
|
@ -44,6 +44,7 @@ enum u_pa_state
|
||||||
U_RT_PREDICTED,
|
U_RT_PREDICTED,
|
||||||
U_RT_BEGUN,
|
U_RT_BEGUN,
|
||||||
U_RT_DELIVERED,
|
U_RT_DELIVERED,
|
||||||
|
U_RT_GPU_DONE,
|
||||||
};
|
};
|
||||||
|
|
||||||
struct u_pa_frame
|
struct u_pa_frame
|
||||||
|
@ -414,6 +415,7 @@ pa_mark_gpu_done(struct u_pacing_app *upa, int64_t frame_id, uint64_t when_ns)
|
||||||
|
|
||||||
// Update all data.
|
// Update all data.
|
||||||
f->when.gpu_done_ns = when_ns;
|
f->when.gpu_done_ns = when_ns;
|
||||||
|
f->state = U_RT_GPU_DONE;
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -449,6 +451,25 @@ pa_mark_gpu_done(struct u_pacing_app *upa, int64_t frame_id, uint64_t when_ns)
|
||||||
|
|
||||||
// Write out tracing data.
|
// Write out tracing data.
|
||||||
do_tracing(pa, f);
|
do_tracing(pa, f);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
pa_latched(struct u_pacing_app *upa, int64_t frame_id, uint64_t when_ns, int64_t system_frame_id)
|
||||||
|
{
|
||||||
|
struct pacing_app *pa = pacing_app(upa);
|
||||||
|
|
||||||
|
(void)pa;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
pa_retired(struct u_pacing_app *upa, int64_t frame_id, uint64_t when_ns)
|
||||||
|
{
|
||||||
|
struct pacing_app *pa = pacing_app(upa);
|
||||||
|
|
||||||
|
size_t index = GET_INDEX_FROM_ID(pa, frame_id);
|
||||||
|
struct u_pa_frame *f = &pa->frames[index];
|
||||||
|
assert(f->frame_id == frame_id);
|
||||||
|
assert(f->state == U_RT_GPU_DONE || f->state == U_RT_DELIVERED);
|
||||||
|
|
||||||
// Reset the frame.
|
// Reset the frame.
|
||||||
f->state = U_PA_READY;
|
f->state = U_PA_READY;
|
||||||
|
@ -483,6 +504,8 @@ pa_create(int64_t session_id, struct u_pacing_app **out_upa)
|
||||||
pa->base.mark_discarded = pa_mark_discarded;
|
pa->base.mark_discarded = pa_mark_discarded;
|
||||||
pa->base.mark_delivered = pa_mark_delivered;
|
pa->base.mark_delivered = pa_mark_delivered;
|
||||||
pa->base.mark_gpu_done = pa_mark_gpu_done;
|
pa->base.mark_gpu_done = pa_mark_gpu_done;
|
||||||
|
pa->base.latched = pa_latched;
|
||||||
|
pa->base.retired = pa_retired;
|
||||||
pa->base.info = pa_info;
|
pa->base.info = pa_info;
|
||||||
pa->base.destroy = pa_destroy;
|
pa->base.destroy = pa_destroy;
|
||||||
pa->session_id = session_id;
|
pa->session_id = session_id;
|
||||||
|
|
|
@ -42,9 +42,18 @@
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
|
||||||
|
/*!
|
||||||
|
* Clear a slot, need to have the list_and_timing_lock held.
|
||||||
|
*/
|
||||||
static void
|
static void
|
||||||
slot_clear(struct multi_layer_slot *slot)
|
slot_clear_locked(struct multi_compositor *mc, struct multi_layer_slot *slot)
|
||||||
{
|
{
|
||||||
|
if (slot->active) {
|
||||||
|
uint64_t now_ns = os_monotonic_get_ns();
|
||||||
|
u_pa_retired(mc->upa, slot->frame_id, now_ns);
|
||||||
|
}
|
||||||
|
|
||||||
for (size_t i = 0; i < slot->layer_count; i++) {
|
for (size_t i = 0; i < slot->layer_count; i++) {
|
||||||
for (size_t k = 0; k < ARRAY_SIZE(slot->layers[i].xscs); k++) {
|
for (size_t k = 0; k < ARRAY_SIZE(slot->layers[i].xscs); k++) {
|
||||||
xrt_swapchain_reference(&slot->layers[i].xscs[k], NULL);
|
xrt_swapchain_reference(&slot->layers[i].xscs[k], NULL);
|
||||||
|
@ -54,10 +63,13 @@ slot_clear(struct multi_layer_slot *slot)
|
||||||
U_ZERO(slot);
|
U_ZERO(slot);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*!
|
||||||
|
* Clear a slot, need to have the list_and_timing_lock held.
|
||||||
|
*/
|
||||||
static void
|
static void
|
||||||
slot_move_and_clear(struct multi_layer_slot *dst, struct multi_layer_slot *src)
|
slot_move_into_cleared(struct multi_layer_slot *dst, struct multi_layer_slot *src)
|
||||||
{
|
{
|
||||||
slot_clear(dst);
|
assert(!dst->active);
|
||||||
|
|
||||||
// All references are kept.
|
// All references are kept.
|
||||||
*dst = *src;
|
*dst = *src;
|
||||||
|
@ -65,6 +77,16 @@ slot_move_and_clear(struct multi_layer_slot *dst, struct multi_layer_slot *src)
|
||||||
U_ZERO(src);
|
U_ZERO(src);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*!
|
||||||
|
* Move a slot into a cleared slot, must be cleared before.
|
||||||
|
*/
|
||||||
|
static void
|
||||||
|
slot_move_and_clear_locked(struct multi_compositor *mc, struct multi_layer_slot *dst, struct multi_layer_slot *src)
|
||||||
|
{
|
||||||
|
slot_clear_locked(mc, dst);
|
||||||
|
slot_move_into_cleared(dst, src);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
*
|
*
|
||||||
|
@ -224,9 +246,18 @@ wait_for_scheduled_free(struct multi_compositor *mc)
|
||||||
os_mutex_lock(&mc->slot_lock);
|
os_mutex_lock(&mc->slot_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
slot_move_and_clear(&mc->scheduled, &mc->progress);
|
|
||||||
|
|
||||||
os_mutex_unlock(&mc->slot_lock);
|
os_mutex_unlock(&mc->slot_lock);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Need to take list_and_timing_lock before slot_lock because slot_lock
|
||||||
|
* is taken in multi_compositor_deliver_any_frames with list_and_timing_lock
|
||||||
|
* held to stop clients from going away.
|
||||||
|
*/
|
||||||
|
os_mutex_lock(&mc->msc->list_and_timing_lock);
|
||||||
|
os_mutex_lock(&mc->slot_lock);
|
||||||
|
slot_move_and_clear_locked(mc, &mc->scheduled, &mc->progress);
|
||||||
|
os_mutex_unlock(&mc->slot_lock);
|
||||||
|
os_mutex_unlock(&mc->msc->list_and_timing_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void *
|
static void *
|
||||||
|
@ -638,6 +669,7 @@ multi_compositor_layer_begin(struct xrt_compositor *xc,
|
||||||
U_ZERO(&mc->progress);
|
U_ZERO(&mc->progress);
|
||||||
|
|
||||||
mc->progress.active = true;
|
mc->progress.active = true;
|
||||||
|
mc->progress.frame_id = frame_id;
|
||||||
mc->progress.display_time_ns = display_time_ns;
|
mc->progress.display_time_ns = display_time_ns;
|
||||||
mc->progress.env_blend_mode = env_blend_mode;
|
mc->progress.env_blend_mode = env_blend_mode;
|
||||||
|
|
||||||
|
@ -866,9 +898,11 @@ multi_compositor_destroy(struct xrt_compositor *xc)
|
||||||
os_thread_helper_destroy(&mc->wait_thread.oth);
|
os_thread_helper_destroy(&mc->wait_thread.oth);
|
||||||
|
|
||||||
// We are now off the rendering list, clear slots for any swapchains.
|
// We are now off the rendering list, clear slots for any swapchains.
|
||||||
slot_clear(&mc->progress);
|
os_mutex_lock(&mc->msc->list_and_timing_lock);
|
||||||
slot_clear(&mc->scheduled);
|
slot_clear_locked(mc, &mc->progress);
|
||||||
slot_clear(&mc->delivered);
|
slot_clear_locked(mc, &mc->scheduled);
|
||||||
|
slot_clear_locked(mc, &mc->delivered);
|
||||||
|
os_mutex_unlock(&mc->msc->list_and_timing_lock);
|
||||||
|
|
||||||
// Does null checking.
|
// Does null checking.
|
||||||
u_pa_destroy(&mc->upa);
|
u_pa_destroy(&mc->upa);
|
||||||
|
@ -893,12 +927,18 @@ multi_compositor_deliver_any_frames(struct multi_compositor *mc, uint64_t displa
|
||||||
}
|
}
|
||||||
|
|
||||||
if (time_is_greater_then_or_within_half_ms(display_time_ns, mc->scheduled.display_time_ns)) {
|
if (time_is_greater_then_or_within_half_ms(display_time_ns, mc->scheduled.display_time_ns)) {
|
||||||
slot_move_and_clear(&mc->delivered, &mc->scheduled);
|
slot_move_and_clear_locked(mc, &mc->delivered, &mc->scheduled);
|
||||||
}
|
}
|
||||||
|
|
||||||
os_mutex_unlock(&mc->slot_lock);
|
os_mutex_unlock(&mc->slot_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
multi_compositor_latch_frame_locked(struct multi_compositor *mc, uint64_t when_ns, int64_t system_frame_id)
|
||||||
|
{
|
||||||
|
u_pa_latched(mc->upa, mc->delivered.frame_id, when_ns, system_frame_id);
|
||||||
|
}
|
||||||
|
|
||||||
xrt_result_t
|
xrt_result_t
|
||||||
multi_compositor_create(struct multi_system_compositor *msc,
|
multi_compositor_create(struct multi_system_compositor *msc,
|
||||||
const struct xrt_session_info *xsi,
|
const struct xrt_session_info *xsi,
|
||||||
|
|
|
@ -68,6 +68,7 @@ struct multi_layer_entry
|
||||||
*/
|
*/
|
||||||
struct multi_layer_slot
|
struct multi_layer_slot
|
||||||
{
|
{
|
||||||
|
int64_t frame_id;
|
||||||
uint64_t display_time_ns; //!< When should this be shown, @see XrFrameEndInfo::displayTime.
|
uint64_t display_time_ns; //!< When should this be shown, @see XrFrameEndInfo::displayTime.
|
||||||
enum xrt_blend_mode env_blend_mode;
|
enum xrt_blend_mode env_blend_mode;
|
||||||
uint32_t layer_count;
|
uint32_t layer_count;
|
||||||
|
@ -217,6 +218,16 @@ multi_compositor_push_event(struct multi_compositor *mc, const union xrt_composi
|
||||||
void
|
void
|
||||||
multi_compositor_deliver_any_frames(struct multi_compositor *mc, uint64_t display_time_ns);
|
multi_compositor_deliver_any_frames(struct multi_compositor *mc, uint64_t display_time_ns);
|
||||||
|
|
||||||
|
/*!
|
||||||
|
* Makes the current delivered frame as latched, called by the render thread.
|
||||||
|
* The list_and_timing_lock is held when this function is called.
|
||||||
|
*
|
||||||
|
* @ingroup comp_multi
|
||||||
|
* @private @memberof multi_compositor
|
||||||
|
*/
|
||||||
|
void
|
||||||
|
multi_compositor_latch_frame_locked(struct multi_compositor *mc, uint64_t when_ns, int64_t system_frame_id);
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
*
|
*
|
||||||
|
|
|
@ -227,7 +227,7 @@ log_frame_time_diff(uint64_t frame_time_ns, uint64_t display_time_ns)
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
transfer_layers_locked(struct multi_system_compositor *msc, uint64_t display_time_ns)
|
transfer_layers_locked(struct multi_system_compositor *msc, uint64_t display_time_ns, int64_t system_frame_id)
|
||||||
{
|
{
|
||||||
COMP_TRACE_MARKER();
|
COMP_TRACE_MARKER();
|
||||||
|
|
||||||
|
@ -235,28 +235,21 @@ transfer_layers_locked(struct multi_system_compositor *msc, uint64_t display_tim
|
||||||
|
|
||||||
struct multi_compositor *array[MULTI_MAX_CLIENTS] = {0};
|
struct multi_compositor *array[MULTI_MAX_CLIENTS] = {0};
|
||||||
|
|
||||||
|
// To mark latching.
|
||||||
|
uint64_t now_ns = os_monotonic_get_ns();
|
||||||
|
|
||||||
size_t count = 0;
|
size_t count = 0;
|
||||||
for (size_t k = 0; k < ARRAY_SIZE(array); k++) {
|
for (size_t k = 0; k < ARRAY_SIZE(array); k++) {
|
||||||
if (msc->clients[k] == NULL) {
|
struct multi_compositor *mc = msc->clients[k];
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
array[count++] = msc->clients[k];
|
|
||||||
|
|
||||||
// Even if it's not shown, make sure that frames are delivered.
|
|
||||||
multi_compositor_deliver_any_frames(msc->clients[k], display_time_ns);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sort the stack array
|
|
||||||
qsort(array, count, sizeof(struct multi_compositor *), overlay_sort_func);
|
|
||||||
|
|
||||||
for (size_t k = 0; k < count; k++) {
|
|
||||||
struct multi_compositor *mc = array[k];
|
|
||||||
|
|
||||||
|
// Array can be empty
|
||||||
if (mc == NULL) {
|
if (mc == NULL) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Even if it's not shown, make sure that frames are delivered.
|
||||||
|
multi_compositor_deliver_any_frames(mc, display_time_ns);
|
||||||
|
|
||||||
// None of the data in this slot is valid, don't check access it.
|
// None of the data in this slot is valid, don't check access it.
|
||||||
if (!mc->delivered.active) {
|
if (!mc->delivered.active) {
|
||||||
continue;
|
continue;
|
||||||
|
@ -273,6 +266,20 @@ transfer_layers_locked(struct multi_system_compositor *msc, uint64_t display_tim
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// The list_and_timing_lock is held when callign this function.
|
||||||
|
multi_compositor_latch_frame_locked(mc, now_ns, system_frame_id);
|
||||||
|
|
||||||
|
array[count++] = msc->clients[k];
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sort the stack array
|
||||||
|
qsort(array, count, sizeof(struct multi_compositor *), overlay_sort_func);
|
||||||
|
|
||||||
|
// Copy all active layers.
|
||||||
|
for (size_t k = 0; k < count; k++) {
|
||||||
|
struct multi_compositor *mc = array[k];
|
||||||
|
assert(mc != NULL);
|
||||||
|
|
||||||
uint64_t frame_time_ns = mc->delivered.display_time_ns;
|
uint64_t frame_time_ns = mc->delivered.display_time_ns;
|
||||||
if (!time_is_within_half_ms(frame_time_ns, display_time_ns)) {
|
if (!time_is_within_half_ms(frame_time_ns, display_time_ns)) {
|
||||||
log_frame_time_diff(frame_time_ns, display_time_ns);
|
log_frame_time_diff(frame_time_ns, display_time_ns);
|
||||||
|
@ -486,7 +493,7 @@ multi_main_loop(struct multi_system_compositor *msc)
|
||||||
|
|
||||||
// Make sure that the clients doesn't go away while we transfer layers.
|
// Make sure that the clients doesn't go away while we transfer layers.
|
||||||
os_mutex_lock(&msc->list_and_timing_lock);
|
os_mutex_lock(&msc->list_and_timing_lock);
|
||||||
transfer_layers_locked(msc, predicted_display_time_ns);
|
transfer_layers_locked(msc, predicted_display_time_ns, frame_id);
|
||||||
os_mutex_unlock(&msc->list_and_timing_lock);
|
os_mutex_unlock(&msc->list_and_timing_lock);
|
||||||
|
|
||||||
xrt_comp_layer_commit(xc, frame_id, XRT_GRAPHICS_SYNC_HANDLE_INVALID);
|
xrt_comp_layer_commit(xc, frame_id, XRT_GRAPHICS_SYNC_HANDLE_INVALID);
|
||||||
|
|
Loading…
Reference in a new issue