mirror of
https://gitlab.freedesktop.org/monado/monado.git
synced 2025-01-01 12:46:12 +00:00
ipc: Rename client connected function and document code
This commit is contained in:
parent
0195e22341
commit
2edf07749b
|
@ -431,11 +431,16 @@ ipc_server_client_destroy_compositor(volatile struct ipc_client_state *ics);
|
||||||
* @{
|
* @{
|
||||||
*/
|
*/
|
||||||
/*!
|
/*!
|
||||||
* Start a thread for a client connected at the other end of the ipc handle @p ipc_handle.
|
* Called when a client has connected, it takes the client's ipc handle.
|
||||||
|
* Handles all things needed to be done for a client connecting, like starting
|
||||||
|
* it's thread.
|
||||||
|
*
|
||||||
|
* @param vs The IPC server.
|
||||||
|
* @param ipc_handle Handle to communicate over.
|
||||||
* @memberof ipc_server
|
* @memberof ipc_server
|
||||||
*/
|
*/
|
||||||
void
|
void
|
||||||
ipc_server_start_client_listener_thread(struct ipc_server *vs, xrt_ipc_handle_t ipc_handle);
|
ipc_server_handle_client_connected(struct ipc_server *vs, xrt_ipc_handle_t ipc_handle);
|
||||||
|
|
||||||
/*!
|
/*!
|
||||||
* Perform whatever needs to be done when the mainloop polling encounters a failure.
|
* Perform whatever needs to be done when the mainloop polling encounters a failure.
|
||||||
|
|
|
@ -91,9 +91,14 @@ handle_listen(struct ipc_server *vs, struct ipc_server_mainloop *ml)
|
||||||
if (read(ml->pipe_read, &newfd, sizeof(newfd)) == sizeof(newfd)) {
|
if (read(ml->pipe_read, &newfd, sizeof(newfd)) == sizeof(newfd)) {
|
||||||
// client_push_mutex should prevent dropping acknowledgements
|
// client_push_mutex should prevent dropping acknowledgements
|
||||||
assert(ml->last_accepted_fd == 0);
|
assert(ml->last_accepted_fd == 0);
|
||||||
|
|
||||||
// Release the thread that gave us this fd.
|
// Release the thread that gave us this fd.
|
||||||
ml->last_accepted_fd = newfd;
|
ml->last_accepted_fd = newfd;
|
||||||
ipc_server_start_client_listener_thread(vs, newfd);
|
|
||||||
|
// Call into the generic client connected handling code.
|
||||||
|
ipc_server_handle_client_connected(vs, newfd);
|
||||||
|
|
||||||
|
// If we are waiting to shutdown, wake that thread up.
|
||||||
pthread_cond_broadcast(&ml->accept_cond);
|
pthread_cond_broadcast(&ml->accept_cond);
|
||||||
} else {
|
} else {
|
||||||
U_LOG_E("error on pipe read");
|
U_LOG_E("error on pipe read");
|
||||||
|
|
|
@ -211,7 +211,9 @@ handle_listen(struct ipc_server *vs, struct ipc_server_mainloop *ml)
|
||||||
ipc_server_handle_failure(vs);
|
ipc_server_handle_failure(vs);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
ipc_server_start_client_listener_thread(vs, ret);
|
|
||||||
|
// Call into the generic client connected handling code.
|
||||||
|
ipc_server_handle_client_connected(vs, ret);
|
||||||
}
|
}
|
||||||
|
|
||||||
#define NUM_POLL_EVENTS 8
|
#define NUM_POLL_EVENTS 8
|
||||||
|
|
|
@ -192,7 +192,10 @@ handle_connected_client(struct ipc_server *vs, struct ipc_server_mainloop *ml)
|
||||||
|
|
||||||
bRet = SetNamedPipeHandleState(ml->pipe_handle, &mode, nullptr, nullptr);
|
bRet = SetNamedPipeHandleState(ml->pipe_handle, &mode, nullptr, nullptr);
|
||||||
if (bRet) {
|
if (bRet) {
|
||||||
ipc_server_start_client_listener_thread(vs, ml->pipe_handle);
|
// Call into the generic client connected handling code.
|
||||||
|
ipc_server_handle_client_connected(vs, ml->pipe_handle);
|
||||||
|
|
||||||
|
// Create another pipe to wait on.
|
||||||
create_another_pipe_instance(vs, ml);
|
create_another_pipe_instance(vs, ml);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
|
@ -385,7 +385,7 @@ ipc_server_handle_shutdown_signal(struct ipc_server *vs)
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
ipc_server_start_client_listener_thread(struct ipc_server *vs, xrt_ipc_handle_t ipc_handle)
|
ipc_server_handle_client_connected(struct ipc_server *vs, xrt_ipc_handle_t ipc_handle)
|
||||||
{
|
{
|
||||||
volatile struct ipc_client_state *ics = NULL;
|
volatile struct ipc_client_state *ics = NULL;
|
||||||
int32_t cs_index = -1;
|
int32_t cs_index = -1;
|
||||||
|
|
Loading…
Reference in a new issue