mirror of
https://github.com/warmcat/libwebsockets.git
synced 2025-03-09 00:00:04 +01:00
smp: additional locking for libuv
With SMP + event lib, extra locking is required when dealing with cross-thread adoption case, and cross-vhost cases like wsi close, we need to hold the pt or context lock. These lock apis are NOPs when LWS_MAX_SMP == 1 which is the default.
This commit is contained in:
parent
c7c2db871d
commit
4f0545cc54
4 changed files with 52 additions and 9 deletions
|
@ -140,16 +140,22 @@ lws_adopt_descriptor_vhost1(struct lws_vhost *vh, lws_adoption_type type,
|
|||
* we initialize it, it may become "live" concurrently unexpectedly...
|
||||
*/
|
||||
|
||||
lws_context_lock(vh->context, __func__);
|
||||
|
||||
n = -1;
|
||||
if (parent)
|
||||
n = parent->tsi;
|
||||
new_wsi = lws_create_new_server_wsi(vh, n);
|
||||
if (!new_wsi)
|
||||
if (!new_wsi) {
|
||||
lws_context_unlock(vh->context);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
new_wsi->a.opaque_user_data = opaque;
|
||||
|
||||
pt = &context->pt[(int)new_wsi->tsi];
|
||||
lws_pt_lock(pt, __func__);
|
||||
|
||||
lws_stats_bump(pt, LWSSTATS_C_CONNECTIONS, 1);
|
||||
|
||||
if (parent) {
|
||||
|
@ -181,6 +187,8 @@ lws_adopt_descriptor_vhost1(struct lws_vhost *vh, lws_adoption_type type,
|
|||
goto bail;
|
||||
}
|
||||
|
||||
lws_pt_unlock(pt);
|
||||
|
||||
/*
|
||||
* he's an allocated wsi, but he's not on any fds list or child list,
|
||||
* join him to the vhost's list of these kinds of incomplete wsi until
|
||||
|
@ -191,6 +199,8 @@ lws_adopt_descriptor_vhost1(struct lws_vhost *vh, lws_adoption_type type,
|
|||
&new_wsi->a.vhost->vh_awaiting_socket_owner);
|
||||
lws_vhost_unlock(new_wsi->a.vhost);
|
||||
|
||||
lws_context_unlock(vh->context);
|
||||
|
||||
return new_wsi;
|
||||
|
||||
bail:
|
||||
|
@ -203,8 +213,12 @@ bail:
|
|||
vh->context->count_wsi_allocated--;
|
||||
|
||||
lws_vhost_unbind_wsi(new_wsi);
|
||||
|
||||
lws_free(new_wsi);
|
||||
|
||||
lws_pt_unlock(pt);
|
||||
lws_context_unlock(vh->context);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
|
|
@ -143,15 +143,17 @@ lws_set_timeout(struct lws *wsi, enum pending_timeout reason, int secs)
|
|||
{
|
||||
struct lws_context_per_thread *pt = &wsi->a.context->pt[(int)wsi->tsi];
|
||||
|
||||
lws_context_lock(pt->context, __func__);
|
||||
lws_pt_lock(pt, __func__);
|
||||
lws_dll2_remove(&wsi->sul_timeout.list);
|
||||
lws_pt_unlock(pt);
|
||||
|
||||
if (!secs)
|
||||
return;
|
||||
goto bail;
|
||||
|
||||
if (secs == LWS_TO_KILL_SYNC) {
|
||||
lwsl_debug("synchronously killing %p\n", wsi);
|
||||
lws_context_unlock(pt->context);
|
||||
lws_close_free_wsi(wsi, LWS_CLOSE_STATUS_NOSTATUS,
|
||||
"to sync kill");
|
||||
return;
|
||||
|
@ -167,6 +169,9 @@ lws_set_timeout(struct lws *wsi, enum pending_timeout reason, int secs)
|
|||
lws_pt_lock(pt, __func__);
|
||||
__lws_set_timeout(wsi, reason, secs);
|
||||
lws_pt_unlock(pt);
|
||||
|
||||
bail:
|
||||
lws_context_unlock(pt->context);
|
||||
}
|
||||
|
||||
void
|
||||
|
|
|
@ -1508,14 +1508,14 @@ lws_context_destroy2(struct lws_context *context)
|
|||
|
||||
lwsl_debug("%p: post dc2\n", __func__);
|
||||
|
||||
if (!context->pt[0].event_loop_foreign) {
|
||||
int n;
|
||||
// if (!context->pt[0].event_loop_foreign) {
|
||||
// int n;
|
||||
for (n = 0; n < context->count_threads; n++)
|
||||
if (context->pt[n].inside_service) {
|
||||
lwsl_debug("%p: bailing as inside service\n", __func__);
|
||||
return;
|
||||
}
|
||||
}
|
||||
// }
|
||||
#endif
|
||||
|
||||
lws_context_destroy3(context);
|
||||
|
|
|
@ -40,6 +40,7 @@ lws_uv_sultimer_cb(uv_timer_t *timer
|
|||
struct lws_context_per_thread *pt = ptpr->pt;
|
||||
lws_usec_t us;
|
||||
|
||||
lws_context_lock(pt->context, __func__);
|
||||
lws_pt_lock(pt, __func__);
|
||||
us = __lws_sul_service_ripe(pt->pt_sul_owner, LWS_COUNT_PT_SUL_OWNERS,
|
||||
lws_now_usecs());
|
||||
|
@ -47,6 +48,7 @@ lws_uv_sultimer_cb(uv_timer_t *timer
|
|||
uv_timer_start(&pt_to_priv_uv(pt)->sultimer, lws_uv_sultimer_cb,
|
||||
LWS_US_TO_MS(us), 0);
|
||||
lws_pt_unlock(pt);
|
||||
lws_context_unlock(pt->context);
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -62,6 +64,9 @@ lws_uv_idle(uv_idle_t *handle
|
|||
|
||||
lws_service_do_ripe_rxflow(pt);
|
||||
|
||||
lws_context_lock(pt->context, __func__);
|
||||
lws_pt_lock(pt, __func__);
|
||||
|
||||
/*
|
||||
* is there anybody with pending stuff that needs service forcing?
|
||||
*/
|
||||
|
@ -71,16 +76,17 @@ lws_uv_idle(uv_idle_t *handle
|
|||
|
||||
/* account for sultimer */
|
||||
|
||||
lws_pt_lock(pt, __func__);
|
||||
us = __lws_sul_service_ripe(pt->pt_sul_owner, LWS_COUNT_PT_SUL_OWNERS,
|
||||
lws_now_usecs());
|
||||
if (us)
|
||||
uv_timer_start(&pt_to_priv_uv(pt)->sultimer, lws_uv_sultimer_cb,
|
||||
LWS_US_TO_MS(us), 0);
|
||||
lws_pt_unlock(pt);
|
||||
|
||||
/* there is nobody who needs service forcing, shut down idle */
|
||||
uv_idle_stop(handle);
|
||||
|
||||
lws_pt_unlock(pt);
|
||||
lws_context_unlock(pt->context);
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -91,8 +97,11 @@ lws_io_cb(uv_poll_t *watcher, int status, int revents)
|
|||
struct lws_context_per_thread *pt = &context->pt[(int)wsi->tsi];
|
||||
struct lws_pollfd eventfd;
|
||||
|
||||
lws_context_lock(pt->context, __func__);
|
||||
lws_pt_lock(pt, __func__);
|
||||
|
||||
if (pt->is_destroyed)
|
||||
return;
|
||||
goto bail;
|
||||
|
||||
#if defined(WIN32) || defined(_WIN32)
|
||||
eventfd.fd = watcher->socket;
|
||||
|
@ -110,7 +119,7 @@ lws_io_cb(uv_poll_t *watcher, int status, int revents)
|
|||
* You might want to return; instead of servicing the fd in
|
||||
* some cases */
|
||||
if (status == UV_EAGAIN)
|
||||
return;
|
||||
goto bail;
|
||||
|
||||
eventfd.events |= LWS_POLLHUP;
|
||||
eventfd.revents |= LWS_POLLHUP;
|
||||
|
@ -124,6 +133,10 @@ lws_io_cb(uv_poll_t *watcher, int status, int revents)
|
|||
eventfd.revents |= LWS_POLLOUT;
|
||||
}
|
||||
}
|
||||
|
||||
lws_pt_unlock(pt);
|
||||
lws_context_unlock(pt->context);
|
||||
|
||||
lws_service_fd_tsi(context, &eventfd, wsi->tsi);
|
||||
|
||||
if (pt->destroy_self) {
|
||||
|
@ -132,6 +145,11 @@ lws_io_cb(uv_poll_t *watcher, int status, int revents)
|
|||
}
|
||||
|
||||
uv_idle_start(&pt_to_priv_uv(pt)->idle, lws_uv_idle);
|
||||
return;
|
||||
|
||||
bail:
|
||||
lws_pt_unlock(pt);
|
||||
lws_context_unlock(pt->context);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -714,6 +732,8 @@ lws_libuv_closewsi(uv_handle_t* handle)
|
|||
|
||||
lwsl_info("%s: %p\n", __func__, wsi);
|
||||
|
||||
lws_context_lock(context, __func__);
|
||||
|
||||
/*
|
||||
* We get called back here for every wsi that closes
|
||||
*/
|
||||
|
@ -774,9 +794,13 @@ lws_libuv_closewsi(uv_handle_t* handle)
|
|||
if (!context->count_event_loop_static_asset_handles &&
|
||||
context->pt[0].event_loop_foreign) {
|
||||
lwsl_info("%s: call lws_context_destroy2\n", __func__);
|
||||
lws_context_unlock(context);
|
||||
lws_context_destroy2(context);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
lws_context_unlock(context);
|
||||
}
|
||||
|
||||
void
|
||||
|
|
Loading…
Add table
Reference in a new issue