1
0
Fork 0
mirror of https://github.com/warmcat/libwebsockets.git synced 2025-03-09 00:00:04 +01:00

context: refactor destroy flow

This commit is contained in:
Andy Green 2020-11-16 19:32:58 +00:00
parent 3549a94ce6
commit 2bcae2b3b6
35 changed files with 694 additions and 464 deletions

View file

@ -173,7 +173,7 @@ lws_handle_POLLOUT_event(struct lws *wsi, struct lws_pollfd *pollfd);
* Any direct libuv allocations in lws protocol handlers must participate in the
* lws reference counting scheme. Two apis are provided:
*
* - lws_libuv_static_refcount_add(handle, context) to mark the handle with
* - lws_libuv_static_refcount_add(handle, context, tsi) to mark the handle with
* a pointer to the context and increment the global uv object counter
*
* - lws_libuv_static_refcount_del() which should be used as the close callback
@ -187,7 +187,8 @@ LWS_VISIBLE LWS_EXTERN uv_loop_t *
lws_uv_getloop(struct lws_context *context, int tsi);
LWS_VISIBLE LWS_EXTERN void
lws_libuv_static_refcount_add(uv_handle_t *, struct lws_context *context);
lws_libuv_static_refcount_add(uv_handle_t *, struct lws_context *context,
int tsi);
LWS_VISIBLE LWS_EXTERN void
lws_libuv_static_refcount_del(uv_handle_t *);
@ -195,7 +196,7 @@ lws_libuv_static_refcount_del(uv_handle_t *);
#endif /* LWS_WITH_LIBUV */
#if defined(LWS_PLAT_FREERTOS)
#define lws_libuv_static_refcount_add(_a, _b)
#define lws_libuv_static_refcount_add(_a, _b, _c)
#define lws_libuv_static_refcount_del NULL
#endif
///@}

View file

@ -47,6 +47,7 @@ lws_get_idlest_tsi(struct lws_context *context)
struct lws *
lws_create_new_server_wsi(struct lws_vhost *vhost, int fixed_tsi)
{
struct lws_context_per_thread *pt;
struct lws *new_wsi;
int n = fixed_tsi;
size_t s = sizeof(struct lws);
@ -75,6 +76,7 @@ lws_create_new_server_wsi(struct lws_vhost *vhost, int fixed_tsi)
new_wsi->wsistate |= LWSIFR_SERVER;
new_wsi->tsi = n;
pt = &vhost->context->pt[n];
lwsl_debug("new wsi %p joining vhost %s, tsi %d\n", new_wsi,
vhost->name, new_wsi->tsi);
@ -109,7 +111,9 @@ lws_create_new_server_wsi(struct lws_vhost *vhost, int fixed_tsi)
new_wsi->desc.sockfd = LWS_SOCK_INVALID;
new_wsi->position_in_fds_table = LWS_NO_FDS_POS;
vhost->context->count_wsi_allocated++;
lws_pt_lock(pt, __func__);
pt->count_wsi_allocated++;
lws_pt_unlock(pt);
/*
* outermost create notification for wsi
@ -210,7 +214,9 @@ bail:
if (new_wsi->user_space)
lws_free(new_wsi->user_space);
vh->context->count_wsi_allocated--;
lws_pt_lock(pt, __func__);
pt->count_wsi_allocated--;
lws_pt_unlock(pt);
lws_vhost_unbind_wsi(new_wsi);

View file

@ -38,6 +38,7 @@ static const uint8_t hnames[] = {
struct lws *
lws_http_client_connect_via_info2(struct lws *wsi)
{
struct lws_context_per_thread *pt = &wsi->a.context->pt[(int)wsi->tsi];
struct client_info_stash *stash = wsi->stash;
int n;
@ -68,7 +69,9 @@ lws_http_client_connect_via_info2(struct lws *wsi)
#endif
no_ah:
wsi->a.context->count_wsi_allocated++;
lws_pt_lock(pt, __func__);
pt->count_wsi_allocated++;
lws_pt_unlock(pt);
return lws_client_connect_2_dnsreq(wsi);
@ -93,7 +96,7 @@ lws_client_connect_via_info(const struct lws_client_connect_info *i)
size_t size;
char *pc;
if (i->context->requested_kill)
if (i->context->requested_stop_internal_loops)
return NULL;
if (!i->context->protocol_init_done)

View file

@ -127,7 +127,7 @@ __lws_reset_wsi(struct lws *wsi)
if (wsi->a.vhost)
lws_dll2_remove(&wsi->dll_cli_active_conns);
#endif
wsi->a.context->count_wsi_allocated--;
wsi->a.context->pt[(int)wsi->tsi].count_wsi_allocated--;
__lws_same_vh_protocol_remove(wsi);
#if defined(LWS_WITH_CLIENT)
@ -171,7 +171,7 @@ __lws_free_wsi(struct lws *wsi)
lws_vhost_unbind_wsi(wsi);
lwsl_debug("%s: %p, remaining wsi %d, tsi fds count %d\n", __func__, wsi,
wsi->a.context->count_wsi_allocated,
wsi->a.context->pt[(int)wsi->tsi].count_wsi_allocated,
wsi->a.context->pt[(int)wsi->tsi].fds_count);
/* confirm no sul left scheduled in wsi itself */

View file

@ -438,6 +438,10 @@ struct lws_context_per_thread {
*/
volatile int service_tid;
int service_tid_detected;
#if !defined(LWS_PLAT_FREERTOS)
int count_event_loop_static_asset_handles;
#endif
int count_wsi_allocated;
volatile unsigned char inside_poll;
volatile unsigned char foreign_spinlock;
@ -452,6 +456,7 @@ struct lws_context_per_thread {
unsigned char inside_lws_service:1;
unsigned char event_loop_foreign:1;
unsigned char event_loop_destroy_processing_done:1;
unsigned char event_loop_pt_unused:1;
unsigned char destroy_self:1;
unsigned char is_destroyed:1;
};
@ -1334,7 +1339,7 @@ lws_adopt_ss_server_accept(struct lws *new_wsi);
int
lws_plat_pipe_create(struct lws *wsi);
int
lws_plat_pipe_signal(struct lws *wsi);
lws_plat_pipe_signal(struct lws_context *ctx, int tsi);
void
lws_plat_pipe_close(struct lws *wsi);

View file

@ -226,15 +226,13 @@ lws_json_dump_context(const struct lws_context *context, char *buf, int len,
"\"cgi_spawned\":\"%d\",\n"
"\"pt_fd_max\":\"%d\",\n"
"\"ah_pool_max\":\"%d\",\n"
"\"deprecated\":\"%d\",\n"
"\"wsi_alive\":\"%d\",\n",
"\"deprecated\":\"%d\",\n",
(unsigned long long)(lws_now_usecs() - context->time_up) /
LWS_US_PER_SEC,
context->count_cgi_spawned,
context->fd_limit_per_thread,
context->max_http_header_pool,
context->deprecated,
context->count_wsi_allocated);
context->deprecated);
buf += lws_snprintf(buf, end - buf, "\"pt\":[\n ");
for (n = 0; n < context->count_threads; n++) {
@ -245,11 +243,13 @@ lws_json_dump_context(const struct lws_context *context, char *buf, int len,
"\n {\n"
" \"fds_count\":\"%d\",\n"
" \"ah_pool_inuse\":\"%d\",\n"
" \"ah_wait_list\":\"%d\"\n"
" \"ah_wait_list\":\"%d\",\n"
" \"wsi_alive\":\"%d\",\n"
" }",
pt->fds_count,
pt->http.ah_count_in_use,
pt->http.ah_wait_list_length);
pt->http.ah_wait_list_length,
pt->count_wsi_allocated);
}
buf += lws_snprintf(buf, end - buf, "]");

View file

@ -641,11 +641,14 @@ lws_service_fd_tsi(struct lws_context *context, struct lws_pollfd *pollfd,
struct lws_context_per_thread *pt;
struct lws *wsi;
if (!context || context->being_destroyed1)
if (!context || context->service_no_longer_possible)
return -1;
pt = &context->pt[tsi];
if (pt->event_loop_pt_unused)
return -1;
if (!pollfd) {
/*
* calling with NULL pollfd for periodic background processing

View file

@ -156,9 +156,6 @@ lws_stats_log_dump(struct lws_context *context)
context->simultaneous_ssl,
context->simultaneous_ssl_restriction);
lwsl_notice("Live wsi: %8d\n",
context->count_wsi_allocated);
while (v) {
if (v->lserv_wsi &&
v->lserv_wsi->position_in_fds_table != LWS_NO_FDS_POS) {
@ -172,6 +169,8 @@ lws_stats_log_dump(struct lws_context *context)
lwsl_notice(" Listen port %d actual POLLIN: %d\n",
v->listen_port,
(int)pfd->events & LWS_POLLIN);
lwsl_notice(" Live wsi: %8d\n",
pt->count_wsi_allocated);
}
v = v->vhost_next;

View file

@ -898,23 +898,23 @@ lws_init_vhost_client_ssl(const struct lws_context_creation_info *info,
void
lws_cancel_service_pt(struct lws *wsi)
{
lws_plat_pipe_signal(wsi);
lws_plat_pipe_signal(wsi->a.context, wsi->tsi);
}
void
lws_cancel_service(struct lws_context *context)
{
struct lws_context_per_thread *pt = &context->pt[0];
short m = context->count_threads;
short m;
if (context->being_destroyed1)
if (context->service_no_longer_possible)
return;
lwsl_debug("%s\n", __func__);
while (m--) {
for (m = 0; m < context->count_threads; m++) {
if (pt->pipe_wsi)
lws_plat_pipe_signal(pt->pipe_wsi);
lws_plat_pipe_signal(pt->context, m);
pt++;
}
}
@ -1095,6 +1095,7 @@ __lws_vhost_destroy2(struct lws_vhost *vh)
struct lws *w =
lws_container_of(d, struct lws, vh_awaiting_socket);
lwsl_debug("%s: closing aso\n", __func__);
lws_close_free_wsi(w, LWS_CLOSE_STATUS_NOSTATUS,
"awaiting skt");
@ -1122,6 +1123,8 @@ __lws_vhost_destroy2(struct lws_vhost *vh)
while (n < vh->count_protocols) {
wsi.a.protocol = protocol;
lwsl_debug("%s: protocol destroy\n", __func__);
if (protocol->callback)
protocol->callback(&wsi, LWS_CALLBACK_PROTOCOL_DESTROY,
NULL, NULL, 0);
@ -1148,7 +1151,7 @@ __lws_vhost_destroy2(struct lws_vhost *vh)
vh->context->vhost_pending_destruction_list = vh;
}
lwsl_info("%s: %p\n", __func__, vh);
lwsl_debug("%s: do dfl '%s'\n", __func__, vh->name);
/* if we are still on deferred free list, remove ourselves */
@ -1247,7 +1250,7 @@ __lws_vhost_destroy2(struct lws_vhost *vh)
lws_dll2_foreach_safe(&vh->abstract_instances_owner, NULL, destroy_ais);
#endif
lwsl_info(" %s: Freeing vhost %p\n", __func__, vh);
lwsl_debug(" %s: Freeing vhost %p\n", __func__, vh);
memset(vh, 0, sizeof(*vh));
lws_free(vh);

View file

@ -194,6 +194,7 @@ struct lws *
lws_wsi_create_with_role(struct lws_context *context, int tsi,
const struct lws_role_ops *ops)
{
struct lws_context_per_thread *pt = &context->pt[tsi];
size_t s = sizeof(struct lws);
struct lws *wsi;
@ -218,7 +219,9 @@ lws_wsi_create_with_role(struct lws_context *context, int tsi,
wsi->a.vhost = NULL;
wsi->desc.sockfd = LWS_SOCK_INVALID;
context->count_wsi_allocated++;
lws_pt_lock(pt, __func__);
pt->count_wsi_allocated++;
lws_pt_unlock(pt);
return wsi;
}
@ -253,10 +256,14 @@ bail:
int
lws_wsi_extract_from_loop(struct lws *wsi)
{
struct lws_context_per_thread *pt = &wsi->a.context->pt[(int)wsi->tsi];
if (lws_socket_is_valid(wsi->desc.sockfd))
__remove_wsi_socket_from_fds(wsi);
wsi->a.context->count_wsi_allocated--;
lws_pt_lock(pt, __func__);
pt->count_wsi_allocated--;
lws_pt_unlock(pt);
if (!wsi->a.context->event_loop_ops->destroy_wsi &&
wsi->a.context->event_loop_ops->wsi_logical_close) {

View file

@ -158,7 +158,7 @@ lws_state_notify_protocol_init(struct lws_state_manager *mgr,
*/
if (target == LWS_SYSTATE_IFACE_COLDPLUG && !context->nl_initial_done) {
lwsl_notice("%s: waiting for netlink coldplug\n", __func__);
lwsl_info("%s: waiting for netlink coldplug\n", __func__);
return 1;
}
@ -597,7 +597,7 @@ lws_create_context(const struct lws_context_creation_info *info)
#endif
#if defined(LWS_WITH_NETWORK)
context->count_threads = count_threads;
context->undestroyed_threads = context->count_threads = count_threads;
#if defined(LWS_WITH_DETAILED_LATENCY)
context->detailed_latency_cb = info->detailed_latency_cb;
context->detailed_latency_filepath = info->detailed_latency_filepath;
@ -1320,131 +1320,327 @@ lws_context_is_deprecated(struct lws_context *cx)
* destroys the context itself, setting what was info.pcontext to NULL.
*/
/*
* destroy the actual context itself
*/
static void
lws_context_destroy3(struct lws_context *context)
{
struct lws_context **pcontext_finalize = context->pcontext_finalize;
int n;
#if defined(LWS_WITH_NETWORK)
context->finalize_destroy_after_internal_loops_stopped = 1;
if (context->event_loop_ops->destroy_context2)
context->event_loop_ops->destroy_context2(context);
for (n = 0; n < context->count_threads; n++) {
struct lws_context_per_thread *pt = &context->pt[n];
(void)pt;
#if defined(LWS_WITH_SEQUENCER)
lws_seq_destroy_all_on_pt(pt);
#endif
LWS_FOR_EVERY_AVAILABLE_ROLE_START(ar) {
if (lws_rops_fidx(ar, LWS_ROPS_pt_init_destroy))
(lws_rops_func_fidx(ar, LWS_ROPS_pt_init_destroy)).
pt_init_destroy(context, NULL, pt, 1);
} LWS_FOR_EVERY_AVAILABLE_ROLE_END;
static void
lws_pt_destroy(struct lws_context_per_thread *pt)
{
volatile struct lws_foreign_thread_pollfd *ftp, *next;
volatile struct lws_context_per_thread *vpt;
#if defined(LWS_WITH_CGI)
lws_ctx_t ctx = pt->context;
if (lws_rops_fidx(&role_ops_cgi, LWS_ROPS_pt_init_destroy))
(lws_rops_func_fidx(&role_ops_cgi, LWS_ROPS_pt_init_destroy)).
pt_init_destroy(context, NULL, pt, 1);
pt_init_destroy(ctx, NULL, pt, 1);
#endif
vpt = (volatile struct lws_context_per_thread *)pt;
ftp = vpt->foreign_pfd_list;
while (ftp) {
next = ftp->next;
lws_free((void *)ftp);
ftp = next;
}
vpt->foreign_pfd_list = NULL;
lws_pt_lock(pt, __func__);
if (pt->pipe_wsi) {
lws_destroy_event_pipe(pt->pipe_wsi);
pt->pipe_wsi = NULL;
}
#if defined(LWS_WITH_SECURE_STREAMS)
lws_dll2_foreach_safe(&pt->ss_owner, NULL, lws_ss_destroy_dll);
#if defined(LWS_WITH_SECURE_STREAMS_PROXY_API) && defined(LWS_WITH_CLIENT)
lws_dll2_foreach_safe(&pt->ss_client_owner, NULL, lws_sspc_destroy_dll);
#endif
#if defined(LWS_WITH_SEQUENCER)
lws_seq_destroy_all_on_pt(pt);
#endif
#if defined(LWS_ROLE_H1) || defined(LWS_ROLE_H2)
while (pt->http.ah_list)
_lws_destroy_ah(pt, pt->http.ah_list);
#endif
}
#if defined(LWS_WITH_SYS_SMD)
_lws_smd_destroy(context);
#endif
#if defined(LWS_WITH_SYS_ASYNC_DNS)
lws_async_dns_deinit(&context->async_dns);
#endif
#if defined(LWS_WITH_SYS_DHCP_CLIENT)
lws_dhcpc_remove(context, NULL);
#endif
lws_pt_unlock(pt);
pt->pipe_wsi = NULL;
if (context->pt[0].fds)
lws_free_set_NULL(context->pt[0].fds);
#endif
lws_context_deinit_ssl_library(context);
#if defined(LWS_WITH_DETAILED_LATENCIES)
if (context->latencies_fd != -1)
compatible_close(context->latencies_fd);
#endif
for (n = 0; n < LWS_SYSBLOB_TYPE_COUNT; n++)
lws_system_blob_destroy(
lws_system_get_blob(context, n, 0));
#if LWS_MAX_SMP > 1
lws_mutex_refcount_destroy(&context->mr);
#endif
/* drop any lingering deferred vhost frees */
while (context->deferred_free_list) {
struct lws_deferred_free *df = context->deferred_free_list;
context->deferred_free_list = df->next;
lws_free(df);
};
#if defined(LWS_WITH_EVLIB_PLUGINS) && defined(LWS_WITH_EVENT_LIBS)
if (context->evlib_plugin_list)
lws_plugins_destroy(&context->evlib_plugin_list, NULL, NULL);
#endif
lws_free(context);
lwsl_debug("%s: ctx %p freed\n", __func__, context);
if (pcontext_finalize)
*pcontext_finalize = NULL;
}
#endif
/*
* really start destroying things
* Context destruction is now a state machine that's aware of SMP pts and
* various event lib approaches.
*
* lws_context_destroy() expects to be called at the end of the user code's
* usage of it. But it can also be called non-finally, as a way to stop
* service and exit the outer user service loop, and then complete in the
* final call.
*
* For libuv, with async close, it must decide by refcounting the hamdles on
* the loop if it has extricated itself from the loop and can be destroyed.
*
* The various entry states for the staged destroy
*
* LWSCD_NO_DESTROY: begin destroy process
* - mark context as starting destroy process
* - start vhost destroy
* - stop any further user protocol service
*
* LWSCD_PT_WAS_DEFERRED: come back here if any pt inside service
* - Check for pts that are inside service loop, mark deferral needed if so
* - If not, close all wsi on the pt loop and start logical pt destroy
* - If any deferred, set state to LWSCD_PT_WAS_DEFERRED and exit
*
* LWSCD_PT_WAIT_ALL_DESTROYED: come back here for async loop / pt closes
* - exit if any pt not marked as unused, or destroyed
* - if all pt down, call into evlib to advance context destroy
* - finalize vhost destruction
* - finalize pt destruction
* - if foreign loops, set state to LWSCD_FINALIZATION and exit
*
* LWSCD_FINALIZATION: come back here at final lws_destroy_context() call
* - destroy sundries
* - destroy and free the actual context
*/
void
lws_context_destroy2(struct lws_context *context)
lws_context_destroy(struct lws_context *context)
{
struct lws_context **pcontext_finalize;
#if defined(LWS_WITH_NETWORK)
struct lws_context_per_thread *pt;
struct lws_vhost *vh = NULL, *vh1;
int n;
int alive = 0, deferred_pt = 0;
#endif
#if defined(LWS_WITH_PEER_LIMITS)
uint32_t nu;
#endif
int n;
lwsl_info("%s: ctx %p\n", __func__, context);
if (!context || context->inside_context_destroy)
return;
lws_context_lock(context, "context destroy 2"); /* ------ context { */
pcontext_finalize = context->pcontext_finalize;
lws_context_lock(context, __func__);
context->inside_context_destroy = 1;
lwsl_info("%s: destroy_state %d\n", __func__, context->destroy_state);
switch (context->destroy_state) {
case LWSCD_NO_DESTROY:
/*
* We're getting started
*/
lwsl_info("%s: starting context destroy flow\n", __func__);
context->being_destroyed = 1;
#if defined(LWS_WITH_NETWORK)
/*
* Close any vhost listen wsi
*
* inform all the protocols that they are done and will have no
* more callbacks.
*
* We can't free things until after the event loop shuts down.
*/
if (context->protocol_init_done)
vh = context->vhost_list;
while (vh) {
lwsl_info("%s: vh %s start close\n", __func__, vh->name);
vh1 = vh->vhost_next;
lws_vhost_destroy1(vh);
vh = vh1;
}
#endif
lws_plat_context_early_destroy(context);
context->service_no_longer_possible = 1;
context->requested_stop_internal_loops = 1;
/* fallthru */
case LWSCD_PT_WAS_DEFERRED:
context->being_destroyed2 = 1;
#if defined(LWS_WITH_NETWORK)
/*
* We're going to trash things like vhost-protocols
* So we need to finish dealing with wsi close that
* might make callbacks first
*/
for (n = 0; n < context->count_threads; n++) {
struct lws_context_per_thread *pt = &context->pt[n];
/*
* We want to mark the pts as their destruction having been
* initiated, so they will reject any new wsi, and iterate all
* existing pt wsi starting to close them.
*
* If the event loop has async close, we have to return after
* this and try again when all the loops stop after all the
* refcounted wsi are gone.
*/
(void)pt;
pt = context->pt;
for (n = 0; n < context->count_threads; n++) {
lws_pt_lock(pt, __func__);
#if defined(LWS_WITH_SECURE_STREAMS)
lws_dll2_foreach_safe(&pt->ss_owner, NULL, lws_ss_destroy_dll);
#if !defined(LWS_WITH_SECURE_STREAMS_STATIC_POLICY_ONLY)
/* evlib will realize it needs to destroy pt */
pt->destroy_self = 1;
if (pt->inside_lws_service) {
pt->event_loop_pt_unused = 1;
deferred_pt = 1;
goto next;
}
/*
* Close every handle in the fds
*/
while (pt->fds_count) {
struct lws *wsi = wsi_from_fd(context,
pt->fds[0].fd);
if (wsi) {
lwsl_debug("%s: pt %d: closing wsi %p\n",
__func__, n, wsi);
lws_close_free_wsi(wsi,
LWS_CLOSE_STATUS_NOSTATUS_CONTEXT_DESTROY,
"ctx destroy"
/* no protocol close */);
if (pt->pipe_wsi == wsi)
pt->pipe_wsi = NULL;
}
}
#if defined(LWS_WITH_CGI)
(lws_rops_func_fidx(&role_ops_cgi,
LWS_ROPS_pt_init_destroy)).
pt_init_destroy(context, NULL,
pt, 1);
#endif
/*
* This closes handles that belong to the evlib pt
* footprint, eg, timers, idle
*/
if (context->event_loop_ops->destroy_pt) {
lwsl_info("%s: calling evlib destroy_pt %d\n",
__func__, n);
context->event_loop_ops->destroy_pt(context, n);
}
next:
lws_pt_unlock(pt);
pt++;
}
if (deferred_pt) {
context->destroy_state = LWSCD_PT_WAS_DEFERRED;
lwsl_notice("%s: destroy from inside service\n", __func__);
lws_cancel_service(context);
goto bail;
}
#endif
context->destroy_state = LWSCD_PT_WAIT_ALL_DESTROYED;
/*
* We have different needs depending if foreign loop or not.
*
* 1) If foreign loop, we really want to advance the
* destroy_context() past here, and block only for libuv-
* style async close completion.
*
* 2a) If poll, and we exited by ourselves and are calling a
* final destroy_context() outside of any service already,
* we want to advance all the way in one step.
*
* 2b) If poll, and we are reacting to a SIGINT, service
* thread(s) may be in poll wait or servicing. We can't
* advance the destroy_context() to the point it's freeing
* things; we have to leave that for the final
* destroy_context() after the service thread(s) are
* finished calling for service.
*/
#if defined(LWS_WITH_NETWORK)
if (context->event_loop_ops->destroy_context1) {
lwsl_info("%s: do evlib destroy_context1 and wait\n",
__func__);
context->event_loop_ops->destroy_context1(context);
goto bail;
}
/*
* ...if the more typical sync close, we can clean up the pts
* now ourselves...
*/
lwsl_info("%s: manually destroying pts\n", __func__);
pt = context->pt;
for (n = 0; n < context->count_threads; n++, pt++) {
pt->event_loop_pt_unused = 1;
lws_pt_destroy(pt);
}
#endif
/* fallthru */
case LWSCD_PT_WAIT_ALL_DESTROYED:
#if defined(LWS_WITH_NETWORK)
for (n = 0; n < context->count_threads; n++)
if (!context->pt[n].is_destroyed &&
!context->pt[n].event_loop_pt_unused)
alive++;
lwsl_info("%s: PT_WAIT_ALL_DESTROYED: %d alive\n", __func__,
alive);
if (alive)
break;
/*
* With foreign loops, removing all our fds from the loop
* means there are no more ways for the foreign loop to give
* us any further CPU once we leave here... so we must make
* sure related service threads are exiting so we can pick up
* again at the original app thread and do the context
* destroy completion
*/
/*
* evlib specific loop destroy?
*/
if (context->event_loop_ops->destroy_context2)
/*
* He returns nonzero to indicate the evlib must
* continue around the loop before destroy of it is
* completed so it can be freed
*/
context->event_loop_ops->destroy_context2(context);
context->requested_stop_internal_loops = 1;
#endif
/*
* Every pt and wsi that may depend on the logical vhosts
* is destroyed. We can remove the logical vhosts.
*/
#if defined(LWS_WITH_NETWORK) && defined(LWS_WITH_SECURE_STREAMS) && \
!defined(LWS_WITH_SECURE_STREAMS_STATIC_POLICY_ONLY)
while (context->server_der_list) {
struct lws_ss_x509 *x = context->server_der_list;
@ -1456,288 +1652,217 @@ lws_context_destroy2(struct lws_context *context)
if (context->ac_policy)
lwsac_free(&context->ac_policy);
#endif
#endif
#if defined(LWS_WITH_SECURE_STREAMS_PROXY_API) && defined(LWS_WITH_CLIENT)
lws_dll2_foreach_safe(&pt->ss_client_owner, NULL, lws_sspc_destroy_dll);
#endif
#if defined(LWS_WITH_SEQUENCER)
lws_seq_destroy_all_on_pt(pt);
#endif
LWS_FOR_EVERY_AVAILABLE_ROLE_START(ar) {
if (lws_rops_fidx(ar, LWS_ROPS_pt_init_destroy))
(lws_rops_func_fidx(ar, LWS_ROPS_pt_init_destroy)).
pt_init_destroy(context, NULL, pt, 1);
} LWS_FOR_EVERY_AVAILABLE_ROLE_END;
#if defined(LWS_WITH_CGI)
if (lws_rops_fidx(&role_ops_cgi, LWS_ROPS_pt_init_destroy))
(lws_rops_func_fidx(&role_ops_cgi, LWS_ROPS_pt_init_destroy)).
pt_init_destroy(context, NULL, pt, 1);
#endif
if (context->event_loop_ops->destroy_pt)
context->event_loop_ops->destroy_pt(context, n);
#if defined(LWS_ROLE_H1) || defined(LWS_ROLE_H2)
while (pt->http.ah_list)
_lws_destroy_ah(pt, pt->http.ah_list);
#endif
}
/*
* free all the per-vhost allocations
*/
vh = context->vhost_list;
while (vh) {
vh1 = vh->vhost_next;
__lws_vhost_destroy2(vh);
vh = vh1;
}
lwsl_debug("%p: post vh listl\n", __func__);
/* remove ourselves from the pending destruction list */
while (context->vhost_pending_destruction_list)
/* removes itself from list */
__lws_vhost_destroy2(context->vhost_pending_destruction_list);
#endif
lwsl_debug("%p: post pdl\n", __func__);
lws_stats_log_dump(context);
#if defined(LWS_WITH_NETWORK)
lws_ssl_context_destroy(context);
#endif
lws_plat_context_late_destroy(context);
#if defined(LWS_WITH_PEER_LIMITS)
for (nu = 0; nu < context->pl_hash_elements; nu++) {
lws_start_foreach_llp(struct lws_peer **, peer,
context->pl_hash_table[nu]) {
struct lws_peer *df = *peer;
*peer = df->next;
lws_free(df);
continue;
} lws_end_foreach_llp(peer, next);
}
lws_free(context->pl_hash_table);
#endif
lwsl_debug("%p: baggage\n", __func__);
if (context->external_baggage_free_on_destroy)
free(context->external_baggage_free_on_destroy);
#if defined(LWS_WITH_NETWORK)
lws_check_deferred_free(context, 0, 1);
#endif
lws_context_unlock(context); /* } context ------ */
#if defined(LWS_WITH_NETWORK)
if (context->event_loop_ops->destroy_context2)
if (context->event_loop_ops->destroy_context2(context)) {
context->finalize_destroy_after_internal_loops_stopped = 1;
return;
}
lwsl_debug("%p: post dc2\n", __func__);
// if (!context->pt[0].event_loop_foreign) {
// int n;
for (n = 0; n < context->count_threads; n++)
if (context->pt[n].inside_service) {
lwsl_debug("%p: bailing as inside service\n", __func__);
return;
}
// }
#endif
lws_context_destroy3(context);
}
#if defined(LWS_WITH_NETWORK)
static void
lws_pt_destroy(struct lws_context_per_thread *pt)
{
volatile struct lws_foreign_thread_pollfd *ftp, *next;
volatile struct lws_context_per_thread *vpt;
assert(!pt->is_destroyed);
pt->destroy_self = 0;
vpt = (volatile struct lws_context_per_thread *)pt;
ftp = vpt->foreign_pfd_list;
while (ftp) {
next = ftp->next;
lws_free((void *)ftp);
ftp = next;
}
vpt->foreign_pfd_list = NULL;
lws_pt_lock(pt, __func__);
if (pt->pipe_wsi)
lws_destroy_event_pipe(pt->pipe_wsi);
lws_pt_unlock(pt);
pt->pipe_wsi = NULL;
while (pt->fds_count) {
struct lws *wsi = wsi_from_fd(pt->context, pt->fds[0].fd);
if (!wsi)
break;
lws_close_free_wsi(wsi,
LWS_CLOSE_STATUS_NOSTATUS_CONTEXT_DESTROY,
"ctx destroy"
/* no protocol close */);
}
lws_pt_mutex_destroy(pt);
pt->is_destroyed = 1;
lwsl_info("%s: pt destroyed\n", __func__);
}
#endif
/*
* Begin the context takedown
*/
void
lws_context_destroy(struct lws_context *context)
{
#if defined(LWS_WITH_NETWORK)
struct lws_vhost *vh = NULL;
int m, deferred_pt = 0;
#endif
if (!context || context->inside_context_destroy)
return;
context->inside_context_destroy = 1;
#if defined(LWS_WITH_NETWORK)
if (context->finalize_destroy_after_internal_loops_stopped) {
if (context->event_loop_ops->destroy_context2)
context->event_loop_ops->destroy_context2(context);
lws_context_destroy3(context);
/* context is invalid, no need to reset inside flag */
return;
}
#endif
if (context->being_destroyed1) {
if (!context->being_destroyed2) {
lws_context_destroy2(context);
return;
}
lwsl_info("%s: ctx %p: already being destroyed\n",
__func__, context);
lws_context_destroy3(context);
/* context is invalid, no need to reset inside flag */
return;
}
lwsl_info("%s: ctx %p\n", __func__, context);
context->being_destroyed = 1;
#if defined(LWS_WITH_NETWORK)
#if defined(LWS_WITH_SYS_STATE)
#if defined(LWS_WITH_SYS_STATE) && defined(LWS_WITH_NETWORK)
lws_state_transition(&context->mgr_system, LWS_SYSTATE_POLICY_INVALID);
#endif
m = context->count_threads;
while (m--) {
struct lws_context_per_thread *pt = &context->pt[m];
#if defined(LWS_WITH_NETWORK)
/*
* free all the per-vhost allocations
*/
if (pt->is_destroyed)
continue;
if (pt->inside_lws_service) {
pt->destroy_self = 1;
deferred_pt = 1;
continue;
vh = context->vhost_list;
while (vh) {
vh1 = vh->vhost_next;
lwsl_debug("%s: vh %s destroy2\n", __func__, vh->name);
__lws_vhost_destroy2(vh);
vh = vh1;
}
lws_pt_destroy(pt);
}
/* remove ourselves from the pending destruction list */
if (deferred_pt) {
lwsl_info("%s: waiting for deferred pt close\n", __func__);
lws_cancel_service(context);
goto out;
}
while (context->vhost_pending_destruction_list)
/* removes itself from list */
__lws_vhost_destroy2(context->vhost_pending_destruction_list);
context->being_destroyed1 = 1;
context->requested_kill = 1;
/*
* inform all the protocols that they are done and will have no more
* callbacks.
*
* We can't free things until after the event loop shuts down.
*/
if (context->protocol_init_done)
vh = context->vhost_list;
while (vh) {
struct lws_vhost *vhn = vh->vhost_next;
lws_vhost_destroy1(vh);
vh = vhn;
}
lwsl_debug("%p: post pdl\n", __func__);
#endif
lws_plat_context_early_destroy(context);
lws_stats_log_dump(context);
#if defined(LWS_WITH_NETWORK)
lws_ssl_context_destroy(context);
#endif
lws_plat_context_late_destroy(context);
#if defined(LWS_WITH_PEER_LIMITS)
for (nu = 0; nu < context->pl_hash_elements; nu++) {
lws_start_foreach_llp(struct lws_peer **, peer,
context->pl_hash_table[nu]) {
struct lws_peer *df = *peer;
*peer = df->next;
lws_free(df);
continue;
} lws_end_foreach_llp(peer, next);
}
lws_free(context->pl_hash_table);
#endif
/* drop any lingering deferred vhost frees */
while (context->deferred_free_list) {
struct lws_deferred_free *df = context->deferred_free_list;
context->deferred_free_list = df->next;
lws_free(df);
};
#if defined(LWS_WITH_NETWORK)
/*
* We face two different needs depending if foreign loop or not.
*
* 1) If foreign loop, we really want to advance the destroy_context()
* past here, and block only for libuv-style async close completion.
*
* 2a) If poll, and we exited by ourselves and are calling a final
* destroy_context() outside of any service already, we want to
* advance all the way in one step.
*
* 2b) If poll, and we are reacting to a SIGINT, service thread(s) may
* be in poll wait or servicing. We can't advance the
* destroy_context() to the point it's freeing things; we have to
* leave that for the final destroy_context() after the service
* thread(s) are finished calling for service.
*/
context->evlib_finalize_destroy_after_int_loops_stop = 1;
if (context->event_loop_ops->destroy_context2)
context->event_loop_ops->destroy_context2(context);
if (context->event_loop_ops->destroy_context1) {
context->event_loop_ops->destroy_context1(context);
goto out;
}
for (n = 0; n < context->count_threads; n++) {
struct lws_context_per_thread *pt = &context->pt[n];
(void)pt;
#if defined(LWS_WITH_SEQUENCER)
lws_seq_destroy_all_on_pt(pt);
#endif
LWS_FOR_EVERY_AVAILABLE_ROLE_START(ar) {
if (lws_rops_fidx(ar, LWS_ROPS_pt_init_destroy))
(lws_rops_func_fidx(ar, LWS_ROPS_pt_init_destroy)).
pt_init_destroy(context, NULL, pt, 1);
} LWS_FOR_EVERY_AVAILABLE_ROLE_END;
#if defined(LWS_WITH_CGI)
lws_rops_func_fidx(&role_ops_cgi,
LWS_ROPS_pt_init_destroy).
pt_init_destroy(context, NULL,
pt, 1);
#endif
#if defined(LWS_ROLE_H1) || defined(LWS_ROLE_H2)
while (pt->http.ah_list)
_lws_destroy_ah(pt, pt->http.ah_list);
#endif
lwsl_info("%s: pt destroy %d\n", __func__, n);
lws_pt_destroy(pt);
}
#endif /* NETWORK */
context->destroy_state = LWSCD_FINALIZATION;
#if defined(LWS_WITH_NETWORK)
if (context->pt[0].event_loop_foreign &&
context->event_loop_ops->destroy_context1) {
lwsl_info("%s: leaving final context destruction"
" for final call\n", __func__);
goto bail;
}
#endif
case LWSCD_FINALIZATION:
#if defined(LWS_WITH_NETWORK)
/*
* finalize destroy of pt and things hanging off it
*/
for (n = 0; n < context->count_threads; n++) {
struct lws_context_per_thread *pt = &context->pt[n];
/*
* Destroy the pt-roles
*/
LWS_FOR_EVERY_AVAILABLE_ROLE_START(ar) {
if (lws_rops_fidx(ar, LWS_ROPS_pt_init_destroy))
(lws_rops_func_fidx(ar, LWS_ROPS_pt_init_destroy)).
pt_init_destroy(context, NULL, pt, 1);
} LWS_FOR_EVERY_AVAILABLE_ROLE_END;
#if defined(LWS_WITH_CGI)
lws_rops_func_fidx(&role_ops_cgi, LWS_ROPS_pt_init_destroy).
pt_init_destroy(context, NULL, pt, 1);
#endif
lws_pt_mutex_destroy(pt);
assert(!pt->is_destroyed);
pt->destroy_self = 0;
pt->is_destroyed = 1;
lwsl_info("%s: pt %d fully destroyed\n", __func__,
(int)(pt - pt->context->pt));
}
/*
* wsis are gone, pts are gone, vhosts are gone.
*
* clean up the context and things hanging off it
*/
#if defined(LWS_WITH_SYS_SMD)
_lws_smd_destroy(context);
#endif
#if defined(LWS_WITH_SYS_ASYNC_DNS)
lws_async_dns_deinit(&context->async_dns);
#endif
#if defined(LWS_WITH_SYS_DHCP_CLIENT)
lws_dhcpc_remove(context, NULL);
#endif
if (context->pt[0].fds)
lws_free_set_NULL(context->pt[0].fds);
#endif
lws_context_deinit_ssl_library(context);
#if defined(LWS_WITH_DETAILED_LATENCIES)
if (context->latencies_fd != -1)
compatible_close(context->latencies_fd);
#endif
for (n = 0; n < LWS_SYSBLOB_TYPE_COUNT; n++)
lws_system_blob_destroy(
lws_system_get_blob(context, n, 0));
/*
* Context lock is about to go away
*/
lws_context_unlock(context);
#if LWS_MAX_SMP > 1
lws_mutex_refcount_destroy(&context->mr);
#endif
if (context->external_baggage_free_on_destroy)
free(context->external_baggage_free_on_destroy);
#if defined(LWS_PLAT_FREERTOS)
#if defined(LWS_AMAZON_RTOS)
context->last_free_heap = xPortGetFreeHeapSize();
context->last_free_heap = xPortGetFreeHeapSize();
#else
context->last_free_heap = esp_get_free_heap_size();
context->last_free_heap = esp_get_free_heap_size();
#endif
#endif
context->inside_context_destroy = 0;
lws_context_destroy2(context);
#if defined(LWS_WITH_EVLIB_PLUGINS) && defined(LWS_WITH_EVENT_LIBS)
if (context->evlib_plugin_list)
lws_plugins_destroy(&context->evlib_plugin_list,
NULL, NULL);
#endif
return;
lws_free(context);
lwsl_debug("%s: ctx %p freed\n", __func__, context);
if (pcontext_finalize)
*pcontext_finalize = NULL;
return;
}
#if defined(LWS_WITH_NETWORK)
out:
context->inside_context_destroy = 0;
bail:
#endif
lwsl_info("%s: leaving\n", __func__);
context->inside_context_destroy = 0;
lws_context_unlock(context);
}
#if defined(LWS_WITH_SYS_STATE)

View file

@ -182,6 +182,12 @@ enum lws_ssl_capable_status {
LWS_SSL_CAPABLE_MORE_SERVICE = -4, /* general retry */
};
enum lws_context_destroy {
LWSCD_NO_DESTROY, /* running */
LWSCD_PT_WAS_DEFERRED, /* destroy from inside service */
LWSCD_PT_WAIT_ALL_DESTROYED, /* libuv ends up here later */
LWSCD_FINALIZATION /* the final destruction of context */
};
#if defined(LWS_WITH_TLS)
#include "private-lib-tls.h"
@ -496,7 +502,6 @@ struct lws_context {
#if !defined(LWS_PLAT_FREERTOS)
int uid, gid;
int count_event_loop_static_asset_handles;
int fd_random;
int count_cgi_spawned;
#endif
@ -504,7 +509,6 @@ struct lws_context {
#if defined(LWS_WITH_DETAILED_LATENCY)
int latencies_fd;
#endif
int count_wsi_allocated;
unsigned int fd_limit_per_thread;
unsigned int timeout_secs;
unsigned int pt_serv_buf_size;
@ -521,13 +525,13 @@ struct lws_context {
unsigned int deprecated:1;
unsigned int inside_context_destroy:1;
unsigned int being_destroyed:1;
unsigned int being_destroyed1:1;
unsigned int service_no_longer_possible:1;
unsigned int being_destroyed2:1;
unsigned int requested_kill:1;
unsigned int requested_stop_internal_loops:1;
unsigned int protocol_init_done:1;
unsigned int doing_protocol_init:1;
unsigned int done_protocol_destroy_cb:1;
unsigned int finalize_destroy_after_internal_loops_stopped:1;
unsigned int evlib_finalize_destroy_after_int_loops_stop:1;
unsigned int max_fds_unrelated_to_ulimit:1;
unsigned int policy_updated:1;
#if defined(LWS_WITH_NETLINK)
@ -535,6 +539,7 @@ struct lws_context {
#endif
short count_threads;
short undestroyed_threads;
short plugin_protocol_count;
short plugin_extension_count;
short server_string_len;
@ -549,6 +554,8 @@ struct lws_context {
uint8_t captive_portal_detect;
uint8_t captive_portal_detect_type;
uint8_t destroy_state; /* enum lws_context_destroy */
#if defined(LWS_WITH_STATS)
uint8_t updated;
#endif

View file

@ -302,7 +302,7 @@ elops_io_ev(struct lws *wsi, int flags)
struct lws_pt_eventlibs_libev *ptpr = pt_to_priv_ev(pt);
struct lws_wsi_eventlibs_libev *w = wsi_to_priv_ev(wsi);
lwsl_notice("%s: wsi %p %s flags 0x%x %p %d\n", __func__,
lwsl_debug("%s: wsi %p %s flags 0x%x %p %d\n", __func__,
wsi, wsi->role_ops->name, flags,
ptpr->io_loop, pt->is_destroyed);
@ -355,7 +355,7 @@ elops_destroy_context2_ev(struct lws_context *context)
if (pt->event_loop_foreign || !ptpr->io_loop)
continue;
if (!context->finalize_destroy_after_internal_loops_stopped) {
if (!context->evlib_finalize_destroy_after_int_loops_stop) {
ev_break(ptpr->io_loop, EVBREAK_ONE);
continue;
}

View file

@ -455,7 +455,7 @@ elops_destroy_context2_event(struct lws_context *context)
if (pt->event_loop_foreign || !ptpr->io_loop)
continue;
if (!context->finalize_destroy_after_internal_loops_stopped) {
if (!context->evlib_finalize_destroy_after_int_loops_stop) {
event_base_loopexit(ptpr->io_loop, NULL);
continue;
}

View file

@ -166,12 +166,12 @@ lws_libuv_stop(struct lws_context *context)
lwsl_err("%s\n", __func__);
if (context->requested_kill) {
if (context->requested_stop_internal_loops) {
lwsl_err("%s: ignoring\n", __func__);
return;
}
context->requested_kill = 1;
context->requested_stop_internal_loops = 1;
m = context->count_threads;
context->being_destroyed = 1;
@ -209,10 +209,11 @@ lws_libuv_stop(struct lws_context *context)
static void
lws_uv_signal_handler(uv_signal_t *watcher, int signum)
{
struct lws_context *context = watcher->data;
struct lws_context_per_thread *pt = (struct lws_context_per_thread *)
watcher->data;
if (context->eventlib_signal_cb) {
context->eventlib_signal_cb((void *)watcher, signum);
if (pt->context->eventlib_signal_cb) {
pt->context->eventlib_signal_cb((void *)watcher, signum);
return;
}
@ -221,6 +222,52 @@ lws_uv_signal_handler(uv_signal_t *watcher, int signum)
lws_libuv_stop(watcher->data);
}
static int
lws_uv_finalize_pt(struct lws_context_per_thread *pt)
{
pt->event_loop_pt_unused = 1;
lwsl_info("%s: thr %d\n", __func__, (int)(pt - pt->context->pt));
lws_context_lock(pt->context, __func__);
if (!--pt->context->undestroyed_threads) {
struct lws_vhost *vh = pt->context->vhost_list;
/*
* eventually, we emptied all the pts...
*/
lwsl_debug("%s: all pts down now\n", __func__);
/* protocols may have initialized libuv objects */
while (vh) {
lws_vhost_destroy1(vh);
vh = vh->vhost_next;
}
if (!pt->count_event_loop_static_asset_handles &&
pt->event_loop_foreign) {
lwsl_info("%s: resuming context_destroy\n",
__func__);
lws_context_unlock(pt->context);
lws_context_destroy(pt->context);
/*
* For foreign, we're being called from the foreign
* thread context the loop is associated with, we must
* return to it cleanly even though we are done with it.
*/
return 1;
}
} else
lwsl_debug("%s: still %d undestroyed\n", __func__, pt->context->undestroyed_threads);
lws_context_unlock(pt->context);
return 0;
}
static const int sigs[] = { SIGINT, SIGTERM, SIGSEGV, SIGFPE, SIGHUP };
/*
@ -230,18 +277,21 @@ static const int sigs[] = { SIGINT, SIGTERM, SIGSEGV, SIGFPE, SIGHUP };
static void
lws_uv_close_cb_sa(uv_handle_t *handle)
{
struct lws_context *context =
LWS_UV_REFCOUNT_STATIC_HANDLE_TO_CONTEXT(handle);
int n;
struct lws_context_per_thread *pt =
LWS_UV_REFCOUNT_STATIC_HANDLE_TO_PT(handle);
struct lws_context *context = pt->context;
int tsi = (int)(pt - &context->pt[0]);
lwsl_info("%s: sa left %d: dyn left: %d\n", __func__,
context->count_event_loop_static_asset_handles,
context->count_wsi_allocated);
lwsl_info("%s: thr %d: sa left %d: dyn left: %d (rk %d)\n", __func__,
tsi,
pt->count_event_loop_static_asset_handles - 1,
pt->count_wsi_allocated,
context->requested_stop_internal_loops);
/* any static assets left? */
if (LWS_UV_REFCOUNT_STATIC_HANDLE_DESTROYED(handle) ||
context->count_wsi_allocated)
pt->count_wsi_allocated)
return;
/*
@ -251,18 +301,18 @@ lws_uv_close_cb_sa(uv_handle_t *handle)
* Stop the loop so we can get out of here.
*/
for (n = 0; n < context->count_threads; n++) {
struct lws_context_per_thread *pt = &context->pt[n];
lwsl_info("%s: thr %d: seen final static handle gone\n", __func__, tsi);
if (pt_to_priv_uv(pt)->io_loop && !pt->event_loop_foreign)
uv_stop(pt_to_priv_uv(pt)->io_loop);
}
if (pt_to_priv_uv(pt)->io_loop && !pt->event_loop_foreign)
uv_stop(pt_to_priv_uv(pt)->io_loop);
if (!context->pt[0].event_loop_foreign) {
if (!pt->event_loop_foreign) {
lwsl_info("%s: calling lws_context_destroy2\n", __func__);
lws_context_destroy2(context);
lws_context_destroy(context);
}
lws_uv_finalize_pt(pt);
lwsl_info("%s: all done\n", __func__);
}
@ -273,9 +323,12 @@ lws_uv_close_cb_sa(uv_handle_t *handle)
*/
void
lws_libuv_static_refcount_add(uv_handle_t *h, struct lws_context *context)
lws_libuv_static_refcount_add(uv_handle_t *h, struct lws_context *context,
int tsi)
{
LWS_UV_REFCOUNT_STATIC_HANDLE_NEW(h, context);
struct lws_context_per_thread *pt = &context->pt[tsi];
LWS_UV_REFCOUNT_STATIC_HANDLE_NEW(h, pt);
}
/*
@ -390,7 +443,7 @@ elops_destroy_context2_uv(struct lws_context *context)
if (!pt->event_loop_foreign && pt_to_priv_uv(pt)->io_loop) {
internal = 1;
if (!context->finalize_destroy_after_internal_loops_stopped)
if (!context->evlib_finalize_destroy_after_int_loops_stop)
uv_stop(pt_to_priv_uv(pt)->io_loop);
else {
#if UV_VERSION_MAJOR > 0
@ -601,8 +654,6 @@ elops_destroy_pt_uv(struct lws_context *context, int tsi)
struct lws_context_per_thread *pt = &context->pt[tsi];
int m, ns;
lwsl_info("%s: %d\n", __func__, tsi);
if (!lws_check_opt(context->options, LWS_SERVER_OPTION_LIBUV))
return;
@ -613,8 +664,10 @@ elops_destroy_pt_uv(struct lws_context *context, int tsi)
return;
pt->event_loop_destroy_processing_done = 1;
lwsl_debug("%s: %d\n", __func__, tsi);
if (!pt->event_loop_foreign) {
uv_signal_stop(&pt_to_priv_uv(pt)->w_sigint.watcher);
ns = LWS_ARRAY_SIZE(sigs);
@ -676,7 +729,7 @@ elops_init_pt_uv(struct lws_context *context, void *_loop, int tsi)
ptpriv->io_loop = loop;
uv_idle_init(loop, &ptpriv->idle);
LWS_UV_REFCOUNT_STATIC_HANDLE_NEW(&ptpriv->idle, context);
LWS_UV_REFCOUNT_STATIC_HANDLE_NEW(&ptpriv->idle, pt);
uv_idle_start(&ptpriv->idle, lws_uv_idle);
ns = LWS_ARRAY_SIZE(sigs);
@ -688,9 +741,9 @@ elops_init_pt_uv(struct lws_context *context, void *_loop, int tsi)
assert(ns <= (int)LWS_ARRAY_SIZE(ptpriv->signals));
for (n = 0; n < ns; n++) {
uv_signal_init(loop, &ptpriv->signals[n]);
LWS_UV_REFCOUNT_STATIC_HANDLE_NEW(&ptpriv->signals[n],
context);
ptpriv->signals[n].data = pt->context;
LWS_UV_REFCOUNT_STATIC_HANDLE_NEW(
&ptpriv->signals[n], pt);
ptpriv->signals[n].data = pt;
uv_signal_start(&ptpriv->signals[n],
lws_uv_signal_handler, sigs[n]);
}
@ -715,7 +768,7 @@ elops_init_pt_uv(struct lws_context *context, void *_loop, int tsi)
return status;
uv_timer_init(ptpriv->io_loop, &ptpriv->sultimer);
LWS_UV_REFCOUNT_STATIC_HANDLE_NEW(&ptpriv->sultimer, context);
LWS_UV_REFCOUNT_STATIC_HANDLE_NEW(&ptpriv->sultimer, pt);
return status;
}
@ -762,42 +815,28 @@ lws_libuv_closewsi(uv_handle_t* handle)
}
#endif
lwsl_info("%s: sa left %d: dyn left: %d (rk %d)\n", __func__,
context->count_event_loop_static_asset_handles,
context->count_wsi_allocated, context->requested_kill);
lwsl_info("%s: thr %d: sa left %d: dyn left: %d (rk %d)\n", __func__,
(int)(pt - &pt->context->pt[0]),
pt->count_event_loop_static_asset_handles,
pt->count_wsi_allocated,
context->requested_stop_internal_loops);
/*
* eventually, we closed all the wsi...
*/
if (context->requested_kill && !context->count_wsi_allocated) {
struct lws_vhost *vh = context->vhost_list;
int m;
if (context->requested_stop_internal_loops &&
!pt->count_wsi_allocated &&
!pt->count_event_loop_static_asset_handles) {
/*
* Start Closing Phase 2: close of static handles
* we closed everything on this pt
*/
lwsl_info("%s: all lws dynamic handles down, closing static\n",
__func__);
lws_context_unlock(context);
lws_uv_finalize_pt(pt);
for (m = 0; m < context->count_threads; m++)
elops_destroy_pt_uv(context, m);
/* protocols may have initialized libuv objects */
while (vh) {
lws_vhost_destroy1(vh);
vh = vh->vhost_next;
}
if (!context->count_event_loop_static_asset_handles &&
context->pt[0].event_loop_foreign) {
lwsl_info("%s: call lws_context_destroy2\n", __func__);
lws_context_unlock(context);
lws_context_destroy2(context);
return;
}
return;
}
lws_context_unlock(context);

View file

@ -37,13 +37,13 @@
* - contribute to context->uv_count_static_asset_handles
* counting
*/
#define LWS_UV_REFCOUNT_STATIC_HANDLE_NEW(_x, _ctx) \
{ uv_handle_t *_uht = (uv_handle_t *)(_x); _uht->data = _ctx; \
_ctx->count_event_loop_static_asset_handles++; }
#define LWS_UV_REFCOUNT_STATIC_HANDLE_TO_CONTEXT(_x) \
((struct lws_context *)((uv_handle_t *)((_x)->data)))
#define LWS_UV_REFCOUNT_STATIC_HANDLE_NEW(_x, _pt) \
{ uv_handle_t *_uht = (uv_handle_t *)(_x); _uht->data = _pt; \
_pt->count_event_loop_static_asset_handles++; }
#define LWS_UV_REFCOUNT_STATIC_HANDLE_TO_PT(_x) \
((struct lws_context_per_thread *)((uv_handle_t *)((_x)->data)))
#define LWS_UV_REFCOUNT_STATIC_HANDLE_DESTROYED(_x) \
(--(LWS_UV_REFCOUNT_STATIC_HANDLE_TO_CONTEXT(_x)-> \
(--(LWS_UV_REFCOUNT_STATIC_HANDLE_TO_PT(_x)-> \
count_event_loop_static_asset_handles))
struct lws_signal_watcher_libuv {

View file

@ -86,10 +86,10 @@ bail:
}
int
lws_plat_pipe_signal(struct lws *wsi)
lws_plat_pipe_signal(struct lws_context *ctx, int tsi)
{
struct lws_context_per_thread *pt = &wsi->a.context->pt[(int)wsi->tsi];
struct sockaddr_in *si = &wsi->a.context->frt_pipe_si;
struct lws_context_per_thread *pt = &ctx->pt[tsi];
struct sockaddr_in *si = &ctx->frt_pipe_si;
lws_sockfd_type *fd = pt->dummy_pipe_fds;
uint8_t u = 0;
int n;

View file

@ -174,7 +174,8 @@ delete_from_fd(const struct lws_context *context, int fd)
struct lws **p, **done;
if (!context->max_fds_unrelated_to_ulimit) {
context->lws_lookup[fd - lws_plat_socket_offset()] = NULL;
if (context->lws_lookup)
context->lws_lookup[fd - lws_plat_socket_offset()] = NULL;
return;
}

View file

@ -42,6 +42,7 @@ lws_sul_plat_unix(lws_sorted_usec_list_t *sul)
struct lws_context_per_thread *pt =
lws_container_of(sul, struct lws_context_per_thread, sul_plat);
struct lws_context *context = pt->context;
int n = 0, m = 0;
#if !defined(LWS_NO_DAEMONIZE)
/* if our parent went down, don't linger around */
@ -50,7 +51,10 @@ lws_sul_plat_unix(lws_sorted_usec_list_t *sul)
kill(getpid(), SIGTERM);
#endif
if (pt->context->deprecated && !pt->context->count_wsi_allocated) {
for (n = 0; n < context->count_threads; n++)
m |= pt->count_wsi_allocated;
if (context->deprecated && !m) {
lwsl_notice("%s: ending deprecated context\n", __func__);
kill(getpid(), SIGINT);
return;

View file

@ -46,9 +46,9 @@ lws_plat_pipe_create(struct lws *wsi)
}
int
lws_plat_pipe_signal(struct lws *wsi)
lws_plat_pipe_signal(struct lws_context *ctx, int tsi)
{
struct lws_context_per_thread *pt = &wsi->a.context->pt[(int)wsi->tsi];
struct lws_context_per_thread *pt = &ctx->pt[tsi];
#if defined(LWS_HAVE_EVENTFD)
eventfd_t value = 1;

View file

@ -66,6 +66,7 @@ static struct lws *
lws_create_basic_wsi(struct lws_context *context, int tsi,
const struct lws_role_ops *ops)
{
struct lws_context_per_thread *pt = &context->pt[tsi];
size_t s = sizeof(struct lws);
struct lws *new_wsi;
@ -113,7 +114,9 @@ lws_create_basic_wsi(struct lws_context *context, int tsi,
new_wsi->user_space = NULL;
new_wsi->desc.sockfd = LWS_SOCK_INVALID;
context->count_wsi_allocated++;
lws_pt_lock(pt, __func__);
pt->count_wsi_allocated++;
lws_pt_unlock(pt);
return new_wsi;
}

View file

@ -34,9 +34,9 @@ lws_plat_pipe_create(struct lws *wsi)
}
int
lws_plat_pipe_signal(struct lws *wsi)
lws_plat_pipe_signal(struct lws_context *ctx, int tsi)
{
struct lws_context_per_thread *pt = &wsi->a.context->pt[(int)wsi->tsi];
struct lws_context_per_thread *pt = &ctx->pt[tsi];
/*
* We need the critical section so that we are either setting it or

View file

@ -65,6 +65,7 @@ static struct lws *
lws_create_basic_wsi(struct lws_context *context, int tsi,
const struct lws_role_ops *ops)
{
struct lws_context_per_thread *pt = &context->pt[tsi];
struct lws *new_wsi;
size_t s = sizeof(*new_wsi);
@ -112,7 +113,9 @@ lws_create_basic_wsi(struct lws_context *context, int tsi,
new_wsi->user_space = NULL;
new_wsi->desc.sockfd = LWS_SOCK_INVALID;
context->count_wsi_allocated++;
lws_pt_lock(pt, __func__);
pt->count_wsi_allocated++;
lws_pt_unlock(pt);
return new_wsi;
}

View file

@ -102,7 +102,7 @@ __lws_shadow_wsi(struct lws_dbus_ctx *ctx, DBusWatch *w, int fd, int create_ok)
return NULL;
}
ctx->vh->context->count_wsi_allocated++;
ctx->vh->context->pt[(int)ctx->tsi].count_wsi_allocated++;
return wsi;
}
@ -123,7 +123,7 @@ __lws_shadow_wsi_destroy(struct lws_dbus_ctx *ctx, struct lws *wsi)
return 1;
}
ctx->vh->context->count_wsi_allocated--;
ctx->vh->context->pt[(int)ctx->tsi].count_wsi_allocated--;
lws_vhost_unbind_wsi(wsi);
lws_free(wsi);

View file

@ -214,6 +214,8 @@ struct lws *
lws_wsi_server_new(struct lws_vhost *vh, struct lws *parent_wsi,
unsigned int sid)
{
struct lws_context_per_thread *pt = &parent_wsi->a.context->pt[
(int)parent_wsi->tsi];
struct lws *wsi;
struct lws *nwsi = lws_get_network_wsi(parent_wsi);
struct lws_h2_netconn *h2n = nwsi->h2.h2n;
@ -290,7 +292,7 @@ bail1:
parent_wsi->mux.child_list = wsi->mux.sibling_list;
parent_wsi->mux.child_count--;
vh->context->count_wsi_allocated--;
pt->count_wsi_allocated--;
if (wsi->user_space)
lws_free_set_NULL(wsi->user_space);

View file

@ -319,7 +319,7 @@ done_list:
goto bail;
}
vhost->context->count_wsi_allocated++;
pt->count_wsi_allocated++;
vhost->lserv_wsi = wsi;
lws_pt_unlock(pt);
@ -327,7 +327,7 @@ done_list:
if (n < 0) {
lwsl_err("listen failed with error %d\n", LWS_ERRNO);
vhost->lserv_wsi = NULL;
vhost->context->count_wsi_allocated--;
pt->count_wsi_allocated--;
__remove_wsi_socket_from_fds(wsi);
goto bail;
}

View file

@ -1065,7 +1065,7 @@ bail1:
wsi->mux.child_list = w->mux.sibling_list;
wsi->mux.child_count--;
w->a.context->count_wsi_allocated--;
w->a.context->pt[(int)w->tsi].count_wsi_allocated--;
if (w->user_space)
lws_free_set_NULL(w->user_space);

View file

@ -330,6 +330,7 @@ rops_pt_init_destroy_netlink(struct lws_context *context,
int n;
if (destroy) {
/*
* pt netlink wsi closed + freed as part of pt's destroy
* wsi mass close, just need to take down the routing table
@ -342,6 +343,10 @@ rops_pt_init_destroy_netlink(struct lws_context *context,
if (pt->netlink)
return 0;
if (pt > &context->pt[0])
/* we can only have one netlink socket */
return 0;
lwsl_info("%s: creating netlink skt\n", __func__);
/*
@ -376,11 +381,11 @@ rops_pt_init_destroy_netlink(struct lws_context *context,
if (lws_wsi_inject_to_loop(pt, wsi))
goto bail2;
if (lws_change_pollfd(wsi, 0, LWS_POLLIN)) {
/* if (lws_change_pollfd(wsi, 0, LWS_POLLIN)) {
lwsl_err("%s: pollfd in fail\n", __func__);
goto bail2;
}
*/
/*
* Since we're starting the PT, ask to be sent all the existing routes.
*

View file

@ -28,7 +28,9 @@ int main(int argc, const char **argv)
lwsl_user("LWS gencrypto apis tests\n");
memset(&info, 0, sizeof info); /* otherwise uninitialized garbage */
#if defined(LWS_WITH_NETWORK)
info.port = CONTEXT_PORT_NO_LISTEN;
#endif
info.options = LWS_SERVER_OPTION_DO_SSL_GLOBAL_INIT;
context = lws_create_context(&info);

View file

@ -30,7 +30,9 @@ int main(int argc, const char **argv)
lwsl_user("LWS JOSE api tests\n");
memset(&info, 0, sizeof info); /* otherwise uninitialized garbage */
#if defined(LWS_WITH_NETWORK)
info.port = CONTEXT_PORT_NO_LISTEN;
#endif
info.options = 0;
context = lws_create_context(&info);

View file

@ -87,7 +87,9 @@ int main(int argc, const char **argv)
lwsl_user("LWS API selftest: lws_struct SQLite\n");
memset(&info, 0, sizeof info); /* otherwise uninitialized garbage */
#if defined(LWS_WITH_NETWORK)
info.port = CONTEXT_PORT_NO_LISTEN;
#endif
context = lws_create_context(&info);
if (!context) {
lwsl_err("lws init failed\n");

View file

@ -97,7 +97,9 @@ int main(int argc, const char **argv)
lwsl_user("LWS JWE example tool\n");
memset(&info, 0, sizeof info); /* otherwise uninitialized garbage */
#if defined(LWS_WITH_NETWORK)
info.port = CONTEXT_PORT_NO_LISTEN;
#endif
info.options = 0;
context = lws_create_context(&info);

View file

@ -104,7 +104,9 @@ int main(int argc, const char **argv)
}
memset(&info, 0, sizeof info); /* otherwise uninitialized garbage */
#if defined(LWS_WITH_NETWORK)
info.port = CONTEXT_PORT_NO_LISTEN;
#endif
info.options = 0;
context = lws_create_context(&info);

View file

@ -35,7 +35,9 @@ int main(int argc, const char **argv)
lwsl_user("LWS JWS example tool\n");
memset(&info, 0, sizeof info); /* otherwise uninitialized garbage */
#if defined(LWS_WITH_NETWORK)
info.port = CONTEXT_PORT_NO_LISTEN;
#endif
info.options = 0;
context = lws_create_context(&info);

View file

@ -72,7 +72,9 @@ int main(int argc, const char **argv)
lwsl_user("LWS X509 api example\n");
memset(&info, 0, sizeof info); /* otherwise uninitialized garbage */
#if defined(LWS_WITH_NETWORK)
info.port = CONTEXT_PORT_NO_LISTEN;
#endif
info.options = 0;
context = lws_create_context(&info);