1
0
Fork 0
mirror of https://github.com/warmcat/libwebsockets.git synced 2025-03-09 00:00:04 +01:00

destroy: shuffle for internal

This commit is contained in:
Andy Green 2021-01-19 16:55:03 +00:00
parent 95a545b741
commit f1e5e573e6
5 changed files with 84 additions and 40 deletions

View file

@ -1123,6 +1123,24 @@ lws_context_user(struct lws_context *context);
LWS_VISIBLE LWS_EXTERN const char *
lws_vh_tag(struct lws_vhost *vh);
/**
* lws_context_is_being_destroyed() - find out if context is being destroyed
*
* \param context: the struct lws_context pointer
*
* Returns nonzero if the context has had lws_context_destroy() called on it...
* when using event library loops the destroy process can be asynchronous. In
* the special case of libuv foreign loops, the failure to create the context
* may have to do work on the foreign loop to reverse the partial creation,
* meaning a failed context create cannot unpick what it did and return NULL.
*
* In that condition, a valid context that is already started the destroy
* process is returned, and this test api will return nonzero as a way to
* find out the create is in the middle of failing.
*/
LWS_VISIBLE LWS_EXTERN int
lws_context_is_being_destroyed(struct lws_context *context);
/*! \defgroup vhost-mounts Vhost mounts and options
* \ingroup context-and-vhost-creation
*

View file

@ -1053,7 +1053,6 @@ lws_vhost_destroy1(struct lws_vhost *vh)
break;
}
} lws_end_foreach_ll(v, vhost_next);
#endif
lws_vhost_unlock(vh); /* } vh -------------- */

View file

@ -1871,10 +1871,6 @@ next:
#if defined(LWS_WITH_NETWORK)
context->evlib_finalize_destroy_after_int_loops_stop = 1;
if (context->event_loop_ops->destroy_context2)
context->event_loop_ops->destroy_context2(context);
for (n = 0; n < context->count_threads; n++) {
struct lws_context_per_thread *pt = &context->pt[n];
(void)pt;
@ -1915,11 +1911,20 @@ next:
goto bail;
}
if (!context->pt[0].event_loop_foreign) {
lwsl_notice("%s: waiting for internal loop exit\n", __func__);
goto bail;
}
#endif
case LWSCD_FINALIZATION:
context->evlib_finalize_destroy_after_int_loops_stop = 1;
#if defined(LWS_WITH_NETWORK)
if (context->event_loop_ops->destroy_context2)
context->event_loop_ops->destroy_context2(context);
/*
* finalize destroy of pt and things hanging off it
@ -2027,6 +2032,12 @@ bail:
lws_context_unlock(context);
}
int
lws_context_is_being_destroyed(struct lws_context *context)
{
return !!context->being_destroyed;
}
#if defined(LWS_WITH_SYS_STATE)
struct lws_context *
lws_system_context_from_system_mgr(lws_state_manager_t *mgr)

View file

@ -227,13 +227,21 @@ lws_uv_finalize_pt(struct lws_context_per_thread *pt)
return 1;
}
} else
lwsl_debug("%s: still %d undestroyed\n", __func__, pt->context->undestroyed_threads);
lwsl_debug("%s: still %d undestroyed\n", __func__,
pt->context->undestroyed_threads);
lws_context_unlock(pt->context);
return 0;
}
// static void lws_uv_walk_cb(uv_handle_t *handle, void *arg)
// {
// if (!uv_is_closing(handle))
// lwsl_err("%s: handle %p still alive on loop\n", __func__, handle);
// }
static const int sigs[] = { SIGINT, SIGTERM, SIGSEGV, SIGFPE, SIGHUP };
/*
@ -261,6 +269,13 @@ lws_uv_close_cb_sa(uv_handle_t *handle)
ptpriv->extant_handles)
return;
/*
* So we believe nothing of ours left on the loop. Let's sanity
* check it to count what's still on the loop
*/
// uv_walk(pt_to_priv_uv(pt)->io_loop, lws_uv_walk_cb, NULL);
/*
* That's it... all wsi were down, and now every
* static asset lws had a UV handle for is down.
@ -270,9 +285,6 @@ lws_uv_close_cb_sa(uv_handle_t *handle)
lwsl_info("%s: thr %d: seen final static handle gone\n", __func__, tsi);
if (ptpriv->io_loop && !pt->event_loop_foreign)
uv_stop(pt_to_priv_uv(pt)->io_loop);
if (!pt->event_loop_foreign) {
lwsl_info("%s: calling lws_context_destroy2\n", __func__);
lws_context_destroy(context);
@ -308,24 +320,6 @@ lws_libuv_static_refcount_del(uv_handle_t *h)
lws_uv_close_cb_sa(h);
}
static void lws_uv_close_cb(uv_handle_t *handle)
{
}
static void lws_uv_walk_cb(uv_handle_t *handle, void *arg)
{
if (!uv_is_closing(handle))
uv_close(handle, lws_uv_close_cb);
}
void
lws_close_all_handles_in_loop(uv_loop_t *loop)
{
uv_walk(loop, lws_uv_walk_cb, NULL);
}
void
lws_libuv_stop_without_kill(const struct lws_context *context, int tsi)
{
@ -333,8 +327,6 @@ lws_libuv_stop_without_kill(const struct lws_context *context, int tsi)
uv_stop(pt_to_priv_uv(&context->pt[tsi])->io_loop);
}
uv_loop_t *
lws_uv_getloop(struct lws_context *context, int tsi)
{
@ -516,6 +508,12 @@ elops_accept_uv(struct lws *wsi)
ptpriv->extant_handles++;
lwsl_debug("%s: thr %d: %s sa left %d: dyn left: %d\n", __func__,
(int)(pt - &pt->context->pt[0]),
lws_wsi_tag(wsi),
pt->count_event_loop_static_asset_handles,
ptpriv->extant_handles);
return 0;
}
@ -575,6 +573,7 @@ static int
elops_init_vhost_listen_wsi_uv(struct lws *wsi)
{
struct lws_context_per_thread *pt;
struct lws_pt_eventlibs_libuv *ptpriv;
struct lws_io_watcher_libuv *w_read;
int n;
@ -587,7 +586,8 @@ elops_init_vhost_listen_wsi_uv(struct lws *wsi)
return 0;
pt = &wsi->a.context->pt[(int)wsi->tsi];
if (!pt_to_priv_uv(pt)->io_loop)
ptpriv = pt_to_priv_uv(pt);
if (!ptpriv->io_loop)
return 0;
w_read->context = wsi->a.context;
@ -605,6 +605,14 @@ elops_init_vhost_listen_wsi_uv(struct lws *wsi)
return -1;
}
ptpriv->extant_handles++;
lwsl_debug("%s: thr %d: %s sa left %d: dyn left: %d\n", __func__,
(int)(pt - &pt->context->pt[0]),
lws_wsi_tag(wsi),
pt->count_event_loop_static_asset_handles,
ptpriv->extant_handles);
((uv_handle_t *)w_read->pwatcher)->data = (void *)wsi;
elops_io_uv(wsi, LWS_EV_START | LWS_EV_READ);
@ -623,16 +631,22 @@ static void
elops_destroy_pt_uv(struct lws_context *context, int tsi)
{
struct lws_context_per_thread *pt = &context->pt[tsi];
struct lws_pt_eventlibs_libuv *ptpriv = pt_to_priv_uv(pt);
int m, ns;
if (!lws_check_opt(context->options, LWS_SERVER_OPTION_LIBUV))
return;
if (!pt_to_priv_uv(pt)->io_loop)
if (!ptpriv->io_loop)
return;
if (pt->event_loop_destroy_processing_done)
if (pt->event_loop_destroy_processing_done) {
if (!pt->event_loop_foreign) {
lwsl_warn("%s: stopping event loop\n", __func__);
uv_stop(pt_to_priv_uv(pt)->io_loop);
}
return;
}
pt->event_loop_destroy_processing_done = 1;
lwsl_debug("%s: %d\n", __func__, tsi);
@ -774,9 +788,17 @@ lws_libuv_closewsi(uv_handle_t* handle)
#endif
lws_pt_lock(pt, __func__);
lwsl_notice("%s: thr %d: %s sa left %d: dyn left: %d (rk %d)\n", __func__,
(int)(pt - &pt->context->pt[0]),
lws_wsi_tag(wsi),
pt->count_event_loop_static_asset_handles,
ptpriv->extant_handles - 1,
context->requested_stop_internal_loops);
__lws_close_free_wsi_final(wsi);
assert(ptpriv->extant_handles);
ptpriv->extant_handles--;
assert(ptpriv >= 0);
lws_pt_unlock(pt);
/* it's our job to close the handle finally */
@ -789,12 +811,6 @@ lws_libuv_closewsi(uv_handle_t* handle)
}
#endif
lwsl_notice("%s: thr %d: sa left %d: dyn left: %d (rk %d)\n", __func__,
(int)(pt - &pt->context->pt[0]),
pt->count_event_loop_static_asset_handles,
ptpriv->extant_handles,
context->requested_stop_internal_loops);
/*
* eventually, we closed all the wsi...
*/

View file

@ -90,7 +90,7 @@ sanity_assert_no_sockfd_traces(const struct lws_context *context,
#else
struct lws **p, **done;
if (sfd == LWS_SOCK_INVALID)
if (sfd == LWS_SOCK_INVALID || !context->lws_lookup)
return 0;
if (!context->max_fds_unrelated_to_ulimit &&