1
0
Fork 0
mirror of https://github.com/warmcat/libwebsockets.git synced 2025-03-09 00:00:04 +01:00

vhost_destroy: refactor

This commit is contained in:
Andy Green 2021-01-22 09:11:22 +00:00
parent d5618f6059
commit beacabbdb1
8 changed files with 216 additions and 152 deletions

View file

@ -503,6 +503,7 @@ struct lws_vhost {
#endif
lws_lifecycle_t lc;
lws_dll2_t vh_being_destroyed_list;
#if defined(LWS_WITH_SOCKS5)
char socks_proxy_address[128];
@ -1550,6 +1551,9 @@ lws_plat_mbedtls_net_recv(void *ctx, unsigned char *buf, size_t len);
lws_usec_t
lws_sul_nonmonotonic_adjust(struct lws_context *ctx, int64_t step_us);
void
__lws_vhost_destroy_pt_wsi_dieback_start(struct lws_vhost *vh);
void
lws_netdev_instance_remove_destroy(struct lws_netdev_instance *ni);

View file

@ -993,6 +993,90 @@ lws_destroy_event_pipe(struct lws *wsi)
lws_free(wsi);
}
/*
* Start close process for any wsi bound to this vhost that belong to the
* service thread we are called from. Because of async event lib close, or
* protocol staged close on wsi, latency with pts joining in closing their
* wsi on the vhost, this may take some time.
*
* When the wsi count bound to the vhost (from all pts) drops to zero, the
* vhost destruction will be finalized.
*/
void
__lws_vhost_destroy_pt_wsi_dieback_start(struct lws_vhost *vh)
{
#if LWS_MAX_SMP > 1
/* calling pt thread has done its wsi dieback */
int tsi = lws_pthread_self_to_tsi(vh->context);
#else
int tsi = 0;
#endif
struct lws_context *ctx = vh->context;
struct lws_context_per_thread *pt = &ctx->pt[tsi];
unsigned int n;
#if LWS_MAX_SMP > 1
if (vh->close_flow_vs_tsi[lws_pthread_self_to_tsi(vh->context)])
/* this pt has already done its bit */
return;
#endif
lwsl_info("%s: %s\n", __func__, vh->name);
#if defined(LWS_WITH_CLIENT)
/*
* destroy any wsi that are associated with us but have no socket
* (and will otherwise be missed for destruction)
*/
lws_start_foreach_dll_safe(struct lws_dll2 *, d, d1,
vh->vh_awaiting_socket_owner.head) {
struct lws *w =
lws_container_of(d, struct lws, vh_awaiting_socket);
if (w->tsi == tsi) {
lwsl_debug("%s: closing aso\n", __func__);
lws_close_free_wsi(w, LWS_CLOSE_STATUS_NOSTATUS,
"awaiting skt");
}
} lws_end_foreach_dll_safe(d, d1);
#endif
/*
* Close any wsi on this pt bound to the vhost
*/
n = 0;
while (n < pt->fds_count) {
struct lws *wsi = wsi_from_fd(ctx, pt->fds[n].fd);
if (wsi && wsi->tsi == tsi && wsi->a.vhost == vh) {
lwsl_debug("%s: pt %d: closing wsi %p: role %s\n",
__func__, tsi, wsi, wsi->role_ops->name);
lws_wsi_close(wsi, LWS_TO_KILL_ASYNC);
if (pt->pipe_wsi == wsi)
pt->pipe_wsi = NULL;
}
n++;
}
#if LWS_MAX_SMP > 1
/* calling pt thread has done its wsi dieback */
vh->close_flow_vs_tsi[lws_pthread_self_to_tsi(vh->context)] = 1;
#endif
}
/*
* Mark the vhost as being destroyed, so things trying to use it abort.
*
* Dispose of the listen socket.
*/
void
lws_vhost_destroy1(struct lws_vhost *vh)
@ -1008,6 +1092,10 @@ lws_vhost_destroy1(struct lws_vhost *vh)
lws_vhost_lock(vh); /* -------------- vh { */
vh->being_destroyed = 1;
lws_dll2_add_tail(&vh->vh_being_destroyed_list,
&context->owner_vh_being_destroyed);
#if defined(LWS_WITH_NETWORK)
/*
* PHASE 1: take down or reassign any listen wsi
@ -1016,12 +1104,13 @@ lws_vhost_destroy1(struct lws_vhost *vh)
* If so we need to hand the listen socket off to one of the others
* so it will remain open.
*
* If not, leave it attached to the closing vhost, the vh being marked
* being_destroyed will defeat any service and it will get closed in
* later phases.
* If not, close the listen socket now.
*
* Either way the listen socket response to the vhost close is
* immediately performed.
*/
if (vh->lserv_wsi)
if (vh->lserv_wsi) {
lws_start_foreach_ll(struct lws_vhost *, v,
context->vhost_list) {
if (v != vh &&
@ -1038,7 +1127,7 @@ lws_vhost_destroy1(struct lws_vhost *vh)
* iface + port, but is not closing.
*/
lwsl_notice("%s: listen skt from %s to %s\n",
lwsl_notice("%s: listen skt migrate %s -> %s\n",
__func__, lws_vh_tag(vh),
lws_vh_tag(v));
@ -1048,23 +1137,28 @@ lws_vhost_destroy1(struct lws_vhost *vh)
if (v->lserv_wsi) {
lws_vhost_unbind_wsi(vh->lserv_wsi);
lws_vhost_bind_wsi(v, v->lserv_wsi);
vh->lserv_wsi = NULL;
}
break;
}
} lws_end_foreach_ll(v, vhost_next);
if (vh->lserv_wsi) {
/*
* we didn't pass it off to another vhost on the same
* listen port... let's close it next time around the
* event loop without waiting for the logical destroy
* of the vhost itself
*/
lws_set_timeout(vh->lserv_wsi, 1, LWS_TO_KILL_ASYNC);
vh->lserv_wsi = NULL;
}
}
#endif
lws_vhost_unlock(vh); /* } vh -------------- */
/*
* lws_check_deferred_free() will notice there is a vhost that is
* marked for destruction during the next 1s, for all tsi.
*
* It will start closing all wsi on this vhost. When the last wsi
* is closed, it will trigger lws_vhost_destroy2()
*/
out:
lws_context_unlock(context); /* --------------------------- context { */
}
@ -1081,33 +1175,22 @@ destroy_ais(struct lws_dll2 *d, void *user)
}
#endif
/*
* Either start close or destroy any wsi on the vhost that belong to this pt,
* if SMP mark the vh that we have done it for
*/
void
__lws_vhost_destroy2(struct lws_vhost *vh)
{
const struct lws_protocols *protocol = NULL;
struct lws_context *context = vh->context;
struct lws_deferred_free *df;
struct lws wsi;
int n;
vh->being_destroyed = 0;
#if defined(LWS_WITH_CLIENT)
/*
* destroy any wsi that are associated with us but have no socket
* (and will otherwise be missed for destruction)
*/
lws_start_foreach_dll_safe(struct lws_dll2 *, d, d1,
vh->vh_awaiting_socket_owner.head) {
struct lws *w =
lws_container_of(d, struct lws, vh_awaiting_socket);
lwsl_debug("%s: closing aso\n", __func__);
lws_close_free_wsi(w, LWS_CLOSE_STATUS_NOSTATUS,
"awaiting skt");
} lws_end_foreach_dll_safe(d, d1);
#endif
lwsl_info("%s: %s\n", __func__, vh->name);
#if defined(LWS_WITH_DEPRECATED_THINGS)
/*
@ -1160,18 +1243,6 @@ __lws_vhost_destroy2(struct lws_vhost *vh)
//lwsl_debug("%s: do dfl '%s'\n", __func__, vh->name);
/* if we are still on deferred free list, remove ourselves */
lws_start_foreach_llp(struct lws_deferred_free **, pdf,
context->deferred_free_list) {
if ((*pdf)->payload == vh) {
df = *pdf;
*pdf = df->next;
lws_free(df);
break;
}
} lws_end_foreach_llp(pdf, next);
/* remove ourselves from the pending destruction list */
lws_start_foreach_llp(struct lws_vhost **, pv,
@ -1257,110 +1328,73 @@ __lws_vhost_destroy2(struct lws_vhost *vh)
lws_dll2_foreach_safe(&vh->abstract_instances_owner, NULL, destroy_ais);
#endif
lws_dll2_remove(&vh->vh_being_destroyed_list);
__lws_lc_untag(&vh->lc);
memset(vh, 0, sizeof(*vh));
lws_free(vh);
}
/*
* each service thread calls this once a second or so
* Starts the vhost destroy process
*
* Vhosts are not simple to deal with because they are an abstraction that
* crosses SMP thread boundaries, a wsi on any pt can bind to any vhost. If we
* want another pt to do something to its wsis safely, we have to asynchronously
* ask it to do it.
*
* In addition, with event libs, closing any handles (which are bound to vhosts
* in their wsi) can happens asynchronously, so we can't just linearly do some
* cleanup flow and free it in one step.
*
* The vhost destroy is cut into two pieces:
*
* 1) dispose of the listen socket, either by passing it on to another vhost
* that was already sharing it, or just closing it.
*
* If any wsi bound to the vhost, mark the vhost as in the process of being
* destroyed, triggering each pt to close all wsi bound to the vhost next
* time around the event loop. Call lws_cancel_service() so all the pts wake
* to deal with this without long poll waits making delays.
*
* 2) When the number of wsis bound to the vhost reaches zero, do the final
* vhost destroy flow, this can be triggered from any pt.
*/
int
lws_check_deferred_free(struct lws_context *context, int tsi, int force)
{
struct lws_context_per_thread *pt;
int n;
/*
* If we see a vhost is being destroyed, forcibly close every wsi on
* this tsi associated with this vhost. That will include the listen
* socket if it is still associated with the closing vhost.
*
* For SMP, we do this once per tsi per destroyed vhost. The reference
* counting on the vhost as the bound wsi close will notice that there
* are no bound wsi left, that vhost destruction can complete,
* and perform it. It doesn't matter which service thread does that
* because there is nothing left using the vhost to conflict.
*/
lws_context_lock(context, "check deferred free"); /* ------ context { */
lws_start_foreach_ll_safe(struct lws_vhost *, v, context->vhost_list, vhost_next) {
if (v->being_destroyed
#if LWS_MAX_SMP > 1
&& !v->close_flow_vs_tsi[tsi]
#endif
) {
pt = &context->pt[tsi];
lws_pt_lock(pt, "vhost removal"); /* -------------- pt { */
#if LWS_MAX_SMP > 1
v->close_flow_vs_tsi[tsi] = 1;
#endif
for (n = 0; (unsigned int)n < pt->fds_count; n++) {
struct lws *wsi = wsi_from_fd(context, pt->fds[n].fd);
if (!wsi)
continue;
if (wsi->a.vhost != v)
continue;
__lws_close_free_wsi(wsi,
LWS_CLOSE_STATUS_NOSTATUS_CONTEXT_DESTROY,
"vh destroy"
/* no protocol close */);
n--;
}
lws_pt_unlock(pt); /* } pt -------------- */
}
} lws_end_foreach_ll_safe(v);
lws_context_unlock(context); /* } context ------------------- */
return 0;
}
void
lws_vhost_destroy(struct lws_vhost *vh)
{
struct lws_deferred_free *df = lws_malloc(sizeof(*df), "deferred free");
struct lws_context *context = vh->context;
if (!df)
return;
lws_context_lock(context, __func__); /* ------ context { */
/* dispose of the listen socket one way or another */
lws_vhost_destroy1(vh);
lwsl_debug("%s: count_bound_wsi %d\n", __func__, vh->count_bound_wsi);
/* start async closure of all wsi on this pt thread attached to vh */
__lws_vhost_destroy_pt_wsi_dieback_start(vh);
lwsl_notice("%s: count_bound_wsi %d\n", __func__, vh->count_bound_wsi);
/* if there are none, finalize now since no further chance */
if (!vh->count_bound_wsi) {
/*
* After listen handoff, there are already no wsi bound to this
* vhost by any pt: nothing can be servicing any wsi belonging
* to it any more.
*
* Finalize the vh destruction immediately
*/
__lws_vhost_destroy2(vh);
lws_free(df);
goto out;
}
/* part 2 is deferred to allow all the handle closes to complete */
/*
* We have some wsi bound to this vhost, we have to wait for these to
* complete close and unbind before progressing the vhost removal.
*
* When the last bound wsi on this vh is destroyed we will auto-call
* __lws_vhost_destroy2() to finalize vh destruction
*/
df->next = vh->context->deferred_free_list;
df->deadline = (long)lws_now_secs();
df->payload = vh;
vh->context->deferred_free_list = df;
#if LWS_MAX_SMP > 1
/* alert other pts they also need to do dieback flow for their wsi */
lws_cancel_service(context);
#endif
out:
lws_context_unlock(context); /* } context ------------------- */
@ -1429,7 +1463,7 @@ lws_get_vhost_by_name(struct lws_context *context, const char *name)
{
lws_start_foreach_ll(struct lws_vhost *, v,
context->vhost_list) {
if (!strcmp(v->name, name))
if (!v->being_destroyed && !strcmp(v->name, name))
return v;
} lws_end_foreach_ll(v, vhost_next);

View file

@ -1861,15 +1861,6 @@ next:
lws_free(context->pl_hash_table);
#endif
/* drop any lingering deferred vhost frees */
while (context->deferred_free_list) {
struct lws_deferred_free *df = context->deferred_free_list;
context->deferred_free_list = df->next;
lws_free(df);
};
#if defined(LWS_WITH_NETWORK)
for (n = 0; n < context->count_threads; n++) {
@ -1912,7 +1903,8 @@ next:
goto bail;
}
if (!context->pt[0].event_loop_foreign) {
if (context->event_loop_ops->destroy_context1 &&
!context->pt[0].event_loop_foreign) {
lwsl_notice("%s: waiting for internal loop exit\n", __func__);
goto bail;

View file

@ -293,13 +293,6 @@ struct lws_foreign_thread_pollfd {
#include "private-lib-core-net.h"
#endif
struct lws_deferred_free
{
struct lws_deferred_free *next;
time_t deadline;
void *payload;
};
struct lws_system_blob {
union {
struct lws_buflist *bl;
@ -431,6 +424,8 @@ struct lws_context {
* LWS_WITH_NETWORK =====>
*/
lws_dll2_owner_t owner_vh_being_destroyed;
#if defined(LWS_WITH_EVENT_LIBS)
struct lws_plugin *evlib_plugin_list;
void *evlib_ctx; /* overallocated */
@ -532,8 +527,6 @@ struct lws_context {
mbedtls_ctr_drbg_context mcdc;
#endif
struct lws_deferred_free *deferred_free_list;
#if defined(LWS_WITH_THREADPOOL)
struct lws_threadpool *tp_list_head;
#endif
@ -656,9 +649,6 @@ struct lws_context {
#endif
};
int
lws_check_deferred_free(struct lws_context *context, int tsi, int force);
#define lws_get_context_protocol(ctx, x) ctx->vhost_list->protocols[x]
#define lws_get_vh_protocol(vh, x) vh->protocols[x]

View file

@ -60,8 +60,6 @@ lws_sul_plat_unix(lws_sorted_usec_list_t *sul)
return;
}
lws_check_deferred_free(context, 0, 0);
#if defined(LWS_WITH_SERVER)
lws_context_lock(context, "periodic checks");
lws_start_foreach_llp(struct lws_vhost **, pv,

View file

@ -64,6 +64,30 @@ rops_handle_POLLIN_pipe(struct lws_context_per_thread *pt, struct lws *wsi,
lws_threadpool_tsi_context(pt->context, pt->tid);
#endif
#if LWS_MAX_SMP > 1
/*
* Other pts need to take care of their own wsi bound to a vhost that
* is going down
*/
if (pt->context->owner_vh_being_destroyed.head) {
lws_start_foreach_dll_safe(struct lws_dll2 *, d, d1,
pt->context->owner_vh_being_destroyed.head) {
struct lws_vhost *v =
lws_container_of(d, struct lws_vhost,
vh_being_destroyed_list);
lws_vhost_lock(v); /* -------------- vh { */
__lws_vhost_destroy_pt_wsi_dieback_start(v);
lws_vhost_unlock(v); /* } vh -------------- */
} lws_end_foreach_dll_safe(d, d1);
}
#endif
/*
* the poll() wait, or the event loop for libuv etc is a
* process-wide resource that we interrupted. So let every

View file

@ -323,6 +323,22 @@ lws_ss_policy_set(struct lws_context *context, const char *name)
* ...but when we did the trust stores, we created vhosts for
* each. We need to destroy those now too, and recreate new
* ones from the new policy, perhaps with different X.509s.
*
* Vhost destruction is inherently async, it can't be destroyed
* until all of the wsi bound to it have closed, and, eg, libuv
* means their closure is deferred until a later go around the
* event loop. SMP means we also have to wait for all the pts
* to close their wsis that are bound on the vhost too.
*
* This marks the vhost as being destroyed so new things won't
* use it, and starts the close of all wsi on this pt that are
* bound to the wsi, and deals with the listen socket if any.
* "being-destroyed" vhosts can't be found using get_vhost_by_
* name(), so if a new vhost of the same name exists that isn't
* being destroyed that will be the one found.
*
* When the number of wsi bound to the vhost gets to zero a
* short time later, the vhost is actually destroyed.
*/
v = context->vhost_list;
@ -336,8 +352,6 @@ lws_ss_policy_set(struct lws_context *context, const char *name)
}
v = v->vhost_next;
}
lws_check_deferred_free(context, 0, 1);
}
context->pss_policies = args->heads[LTY_POLICY].p;

View file

@ -89,6 +89,7 @@ int main(int argc, const char **argv)
{
struct lws_context_creation_info info;
struct lws_context *context;
struct lws_vhost *new_vhost;
const char *p;
int n = 0, logs = LLL_USER | LLL_ERR | LLL_WARN | LLL_NOTICE
/* for LLL_ verbosity above NOTICE to be built into lws,
@ -147,9 +148,12 @@ int main(int argc, const char **argv)
info.error_document_404 = "/404.html";
info.vhost_name = "localhost2";
if (!lws_create_vhost(context, &info)) {
lwsl_err("Failed to create second vhost\n");
goto bail;
if (!lws_cmdline_option(argc, argv, "--kill-7682")) {
if (!lws_create_vhost(context, &info)) {
lwsl_err("Failed to create second vhost\n");
goto bail;
}
}
/* a second vhost listens on port 7682 */
@ -159,11 +163,15 @@ int main(int argc, const char **argv)
info.finalize = vh_destruction_notification;
info.finalize_arg = NULL;
if (!lws_create_vhost(context, &info)) {
new_vhost = lws_create_vhost(context, &info);
if (!new_vhost) {
lwsl_err("Failed to create third vhost\n");
goto bail;
}
if (lws_cmdline_option(argc, argv, "--kill-7682"))
lws_vhost_destroy(new_vhost);
if (lws_cmdline_option(argc, argv, "--die-after-vhost")) {
lwsl_warn("bailing after creating vhosts\n");
goto bail;