monado/src/xrt/auxiliary/util/u_sink_queue.c

298 lines
6.2 KiB
C
Raw Normal View History

// Copyright 2019-2021, Collabora, Ltd.
2019-08-22 13:15:41 +00:00
// SPDX-License-Identifier: BSL-1.0
/*!
* @file
2020-06-03 16:43:30 +00:00
* @brief An @ref xrt_frame_sink queue.
2019-08-22 13:15:41 +00:00
* @author Jakob Bornecrantz <jakob@collabora.com>
* @ingroup aux_util
*/
#include "util/u_misc.h"
#include "util/u_sink.h"
2021-07-09 20:36:57 +00:00
#include "util/u_trace_marker.h"
2019-08-22 13:15:41 +00:00
#include <stdio.h>
#include <pthread.h>
struct u_sink_queue_elem
{
struct xrt_frame *frame;
struct u_sink_queue_elem *next;
};
2019-08-22 13:15:41 +00:00
2020-06-03 16:43:30 +00:00
/*!
* An @ref xrt_frame_sink queue, any frames received will be pushed to the
* downstream consumer on the queue thread. Will drop frames should multiple
* frames be queued up.
*
2020-06-03 16:43:30 +00:00
* @implements xrt_frame_sink
* @implements xrt_frame_node
*/
2019-08-22 13:15:41 +00:00
struct u_sink_queue
{
//! Base sink.
2019-08-22 13:15:41 +00:00
struct xrt_frame_sink base;
//! For tracking on the frame context.
2019-08-22 13:15:41 +00:00
struct xrt_frame_node node;
//! The consumer of the frames that are queued.
2019-08-22 13:15:41 +00:00
struct xrt_frame_sink *consumer;
//! Front of the queue (oldest frame, first to be consumed)
struct u_sink_queue_elem *front;
//! Back of the queue (newest frame, back->next is always null)
struct u_sink_queue_elem *back;
//! Number of currently enqueued frames
uint64_t size;
//! Max amount of frames before dropping new ones. 0 means unbounded.
uint64_t max_size;
2019-08-22 13:15:41 +00:00
pthread_t thread;
pthread_mutex_t mutex;
2022-03-05 14:14:33 +00:00
//! So we can wake the mainloop up
2019-08-22 13:15:41 +00:00
pthread_cond_t cond;
//! Should we keep running.
2019-08-22 13:15:41 +00:00
bool running;
};
//! Call with q->mutex locked.
static bool
queue_is_empty(struct u_sink_queue *q)
{
return q->size == 0;
}
//! Call with q->mutex locked.
static bool
queue_is_full(struct u_sink_queue *q)
{
bool is_unbounded = q->max_size == 0;
return q->size >= q->max_size && !is_unbounded;
}
//! Pops the oldest frame, reference counting unchanged.
//! Call with q->mutex locked.
static struct xrt_frame *
queue_pop(struct u_sink_queue *q)
{
assert(!queue_is_empty(q));
struct xrt_frame *frame = q->front->frame;
struct u_sink_queue_elem *old_front = q->front;
q->front = q->front->next;
free(old_front);
q->size--;
if (q->front == NULL) {
assert(queue_is_empty(q));
q->back = NULL;
}
return frame;
}
//! Tries to push a frame and increases its reference count.
//! Call with q->mutex locked.
static bool
queue_try_refpush(struct u_sink_queue *q, struct xrt_frame *xf)
{
2022-03-19 00:18:10 +00:00
if (queue_is_full(q)) {
return false;
}
struct u_sink_queue_elem *elem = U_TYPED_CALLOC(struct u_sink_queue_elem);
xrt_frame_reference(&elem->frame, xf);
elem->next = NULL;
if (q->back == NULL) { // First frame
q->front = elem;
} else { // Next frame
q->back->next = elem;
}
2022-03-19 00:18:10 +00:00
q->back = elem;
q->size++;
return true;
}
//! Clears the queue and unreferences all of its frames.
//! Call with q->mutex locked.
static void
queue_refclear(struct u_sink_queue *q)
{
while (!queue_is_empty(q)) {
assert((q->size > 1) ^ (q->front == q->back));
struct xrt_frame *xf = queue_pop(q);
xrt_frame_reference(&xf, NULL);
}
}
2019-08-22 13:15:41 +00:00
static void *
2021-07-09 20:36:57 +00:00
queue_mainloop(void *ptr)
2019-08-22 13:15:41 +00:00
{
2021-07-09 20:36:57 +00:00
SINK_TRACE_MARKER();
2019-08-22 13:15:41 +00:00
struct u_sink_queue *q = (struct u_sink_queue *)ptr;
struct xrt_frame *frame = NULL;
pthread_mutex_lock(&q->mutex);
while (q->running) {
// No new frame, wait.
if (queue_is_empty(q)) {
2019-08-22 13:15:41 +00:00
pthread_cond_wait(&q->cond, &q->mutex);
}
2022-03-05 14:14:33 +00:00
// In this case, queue_break_apart woke us up to turn us off.
2019-08-22 13:15:41 +00:00
if (!q->running) {
break;
}
if (queue_is_empty(q)) {
2019-08-22 13:15:41 +00:00
continue;
}
2021-07-09 20:36:57 +00:00
SINK_TRACE_IDENT(queue_frame);
/*
* Dequeue frame.
* We need to take a reference on the current frame, this is to
* keep it alive during the call to the consumer should it be
* replaced. But we no longer need to hold onto the frame on the
2022-05-17 20:28:47 +00:00
* queue so we dequeue it.
*/
frame = queue_pop(q);
2019-08-22 13:15:41 +00:00
/*
* Unlock the mutex when we do the work, so a new frame can be
* queued.
*/
2019-08-22 13:15:41 +00:00
pthread_mutex_unlock(&q->mutex);
// Send to the consumer that does the work.
q->consumer->push_frame(q->consumer, frame);
/*
* Drop our reference we don't need it anymore, or it's held by
* the consumer.
*/
2019-08-22 13:15:41 +00:00
xrt_frame_reference(&frame, NULL);
// Have to lock it again.
pthread_mutex_lock(&q->mutex);
}
pthread_mutex_unlock(&q->mutex);
return NULL;
}
static void
2021-07-09 20:36:57 +00:00
queue_frame(struct xrt_frame_sink *xfs, struct xrt_frame *xf)
2019-08-22 13:15:41 +00:00
{
2021-07-09 20:36:57 +00:00
SINK_TRACE_MARKER();
2019-08-22 13:15:41 +00:00
struct u_sink_queue *q = (struct u_sink_queue *)xfs;
pthread_mutex_lock(&q->mutex);
// Only schedule new frames if we are running.
if (q->running) {
queue_try_refpush(q, xf);
2019-08-22 13:15:41 +00:00
}
// Wake up the thread.
pthread_cond_signal(&q->cond);
pthread_mutex_unlock(&q->mutex);
}
static void
2021-07-09 20:36:57 +00:00
queue_break_apart(struct xrt_frame_node *node)
2019-08-22 13:15:41 +00:00
{
struct u_sink_queue *q = container_of(node, struct u_sink_queue, node);
void *retval = NULL;
// The fields are protected.
pthread_mutex_lock(&q->mutex);
// Stop the thread and inhibit any new frames to be added to the queue.
q->running = false;
// Release any frame waiting for submission.
queue_refclear(q);
2019-08-22 13:15:41 +00:00
// Wake up the thread.
pthread_cond_signal(&q->cond);
// No longer need to protect fields.
pthread_mutex_unlock(&q->mutex);
// Wait for thread to finish.
pthread_join(q->thread, &retval);
}
static void
2021-07-09 20:36:57 +00:00
queue_destroy(struct xrt_frame_node *node)
2019-08-22 13:15:41 +00:00
{
struct u_sink_queue *q = container_of(node, struct u_sink_queue, node);
// Destroy resources.
pthread_mutex_destroy(&q->mutex);
pthread_cond_destroy(&q->cond);
free(q);
}
/*
*
* Exported functions.
*
*/
bool
u_sink_queue_create(struct xrt_frame_context *xfctx,
uint64_t max_size,
struct xrt_frame_sink *downstream,
struct xrt_frame_sink **out_xfs)
2019-08-22 13:15:41 +00:00
{
struct u_sink_queue *q = U_TYPED_CALLOC(struct u_sink_queue);
int ret = 0;
2021-07-09 20:36:57 +00:00
q->base.push_frame = queue_frame;
q->node.break_apart = queue_break_apart;
q->node.destroy = queue_destroy;
2019-08-22 13:15:41 +00:00
q->consumer = downstream;
q->running = true;
q->size = 0;
q->max_size = max_size;
2019-08-22 13:15:41 +00:00
ret = pthread_mutex_init(&q->mutex, NULL);
if (ret != 0) {
free(q);
return false;
}
ret = pthread_cond_init(&q->cond, NULL);
if (ret) {
pthread_mutex_destroy(&q->mutex);
free(q);
return false;
}
2021-07-09 20:36:57 +00:00
ret = pthread_create(&q->thread, NULL, queue_mainloop, q);
2019-08-22 13:15:41 +00:00
if (ret != 0) {
pthread_cond_destroy(&q->cond);
pthread_mutex_destroy(&q->mutex);
free(q);
return false;
}
xrt_frame_context_add(xfctx, &q->node);
*out_xfs = &q->base;
return true;
}