lws-vhost-destroy
Adds a new api lws_vhost_destroy(struct lws_vhost *) which allows dynamic removal of vhosts. The external api calls two parts of internal helpers that get reused for context destroy. The second part is called deferred by 5s... this is to ensure that event library objects composed into structs owned by the vhost all have a chance to complete their close asynchronously. That should happen immediately, but it requires us to return to the event loop first. The vhost being removed is deleted from the context vhost list by the first part, and does not block further removals or creation during the delay for the deferred freeing of the vhost memory. Part 1: - if the vhost owned a listen socket needed by other vhosts listening on same iface + port, the listen socket is first handed off to another vhost so it stays alive - all wsi still open on the vhost are forcibly closed (including any listen socket still attached) - inform all active protocols on the vhost they should destroy themselves - remove vhost from context vhost list (can no longer be found by incoming connections) - add to a "being destroyed" context list and schedule the second part to be called in 5s Part 2: - remove us from the being destroyed list - free all allocations owned by the vhost - zero down the vhost and free the vhost itself In libwebsockets-test-server, you can send it a SIGUSR1 to have it toggle the creation and destruction of a second vhost on port + 1.
This commit is contained in:
parent
632a0acc99
commit
faa1526b39
13 changed files with 420 additions and 69 deletions
|
@ -103,6 +103,13 @@ same.
|
|||
[test-server.c](test-server/test-server.c) is all that is needed to use libwebsockets for
|
||||
serving both the script html over http and websockets.
|
||||
|
||||
@section lwstsdynvhost Dynamic Vhosts
|
||||
|
||||
You can send libwebsockets-test-server or libwebsockets-test-server-v2.0 a SIGUSR1
|
||||
to toggle the creation and destruction of an identical second vhost on port + 1.
|
||||
|
||||
This is intended as a test and demonstration for how to bring up and remove
|
||||
vhosts dynamically.
|
||||
|
||||
@section wscl Testing websocket client support
|
||||
|
||||
|
|
297
lib/context.c
297
lib/context.c
|
@ -1078,10 +1078,242 @@ lws_context_is_deprecated(struct lws_context *context)
|
|||
LWS_VISIBLE void
|
||||
lws_context_destroy2(struct lws_context *context);
|
||||
|
||||
|
||||
static void
|
||||
lws_vhost_destroy1(struct lws_vhost *vh)
|
||||
{
|
||||
const struct lws_protocols *protocol = NULL;
|
||||
struct lws_context_per_thread *pt;
|
||||
int n, m = vh->context->count_threads;
|
||||
struct lws_context *context = vh->context;
|
||||
struct lws wsi;
|
||||
|
||||
lwsl_notice("%s\n", __func__);
|
||||
|
||||
if (vh->being_destroyed)
|
||||
return;
|
||||
|
||||
vh->being_destroyed = 1;
|
||||
|
||||
/*
|
||||
* Are there other vhosts that are piggybacking on our listen socket?
|
||||
* If so we need to hand the listen socket off to one of the others
|
||||
* so it will remain open. If not, leave it attached to the closing
|
||||
* vhost and it will get closed.
|
||||
*/
|
||||
|
||||
if (vh->lserv_wsi)
|
||||
lws_start_foreach_ll(struct lws_vhost *, v, context->vhost_list) {
|
||||
if (v != vh &&
|
||||
!v->being_destroyed &&
|
||||
v->listen_port == vh->listen_port &&
|
||||
((!v->iface && !vh->iface) ||
|
||||
(v->iface && vh->iface &&
|
||||
!strcmp(v->iface, vh->iface)))) {
|
||||
/*
|
||||
* this can only be a listen wsi, which is
|
||||
* restricted... it has no protocol or other
|
||||
* bindings or states. So we can simply
|
||||
* swap it to a vhost that has the same
|
||||
* iface + port, but is not closing.
|
||||
*/
|
||||
assert(v->lserv_wsi == NULL);
|
||||
v->lserv_wsi = vh->lserv_wsi;
|
||||
vh->lserv_wsi = NULL;
|
||||
v->lserv_wsi->vhost = v;
|
||||
|
||||
lwsl_notice("%s: listen skt from %s to %s\n",
|
||||
__func__, vh->name, v->name);
|
||||
break;
|
||||
}
|
||||
} lws_end_foreach_ll(v, vhost_next);
|
||||
|
||||
/*
|
||||
* Forcibly close every wsi assoicated with this vhost. That will
|
||||
* include the listen socket if it is still associated with the closing
|
||||
* vhost.
|
||||
*/
|
||||
|
||||
while (m--) {
|
||||
pt = &context->pt[m];
|
||||
|
||||
for (n = 0; (unsigned int)n < context->pt[m].fds_count; n++) {
|
||||
struct lws *wsi = wsi_from_fd(context, pt->fds[n].fd);
|
||||
if (!wsi)
|
||||
continue;
|
||||
if (wsi->vhost != vh)
|
||||
continue;
|
||||
|
||||
lws_close_free_wsi(wsi,
|
||||
LWS_CLOSE_STATUS_NOSTATUS_CONTEXT_DESTROY
|
||||
/* no protocol close */);
|
||||
n--;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* let the protocols destroy the per-vhost protocol objects
|
||||
*/
|
||||
|
||||
memset(&wsi, 0, sizeof(wsi));
|
||||
wsi.context = vh->context;
|
||||
wsi.vhost = vh;
|
||||
protocol = vh->protocols;
|
||||
if (protocol) {
|
||||
n = 0;
|
||||
while (n < vh->count_protocols) {
|
||||
wsi.protocol = protocol;
|
||||
protocol->callback(&wsi, LWS_CALLBACK_PROTOCOL_DESTROY,
|
||||
NULL, NULL, 0);
|
||||
protocol++;
|
||||
n++;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* remove vhost from context list of vhosts
|
||||
*/
|
||||
|
||||
lws_start_foreach_llp(struct lws_vhost **, pv, context->vhost_list) {
|
||||
if (*pv == vh) {
|
||||
*pv = vh->vhost_next;
|
||||
break;
|
||||
}
|
||||
} lws_end_foreach_llp(pv, vhost_next);
|
||||
|
||||
/* add ourselves to the pending destruction list */
|
||||
|
||||
vh->vhost_next = vh->context->vhost_pending_destruction_list;
|
||||
vh->context->vhost_pending_destruction_list = vh;
|
||||
}
|
||||
|
||||
static void
|
||||
lws_vhost_destroy2(struct lws_vhost *vh)
|
||||
{
|
||||
const struct lws_protocols *protocol = NULL;
|
||||
struct lws_context *context = vh->context;
|
||||
struct lws_deferred_free *df;
|
||||
int n;
|
||||
|
||||
lwsl_notice("%s: %p\n", __func__, vh);
|
||||
|
||||
/* if we are still on deferred free list, remove ourselves */
|
||||
|
||||
lws_start_foreach_llp(struct lws_deferred_free **, pdf, context->deferred_free_list) {
|
||||
if ((*pdf)->payload == vh) {
|
||||
df = *pdf;
|
||||
*pdf = df->next;
|
||||
lws_free(df);
|
||||
break;
|
||||
}
|
||||
} lws_end_foreach_llp(pdf, next);
|
||||
|
||||
/* remove ourselves from the pending destruction list */
|
||||
|
||||
lws_start_foreach_llp(struct lws_vhost **, pv, context->vhost_pending_destruction_list) {
|
||||
if ((*pv) == vh) {
|
||||
*pv = (*pv)->vhost_next;
|
||||
break;
|
||||
}
|
||||
} lws_end_foreach_llp(pv, vhost_next);
|
||||
|
||||
/*
|
||||
* Free all the allocations associated with the vhost
|
||||
*/
|
||||
|
||||
protocol = vh->protocols;
|
||||
if (protocol) {
|
||||
n = 0;
|
||||
while (n < vh->count_protocols) {
|
||||
if (vh->protocol_vh_privs &&
|
||||
vh->protocol_vh_privs[n]) {
|
||||
lws_free(vh->protocol_vh_privs[n]);
|
||||
vh->protocol_vh_privs[n] = NULL;
|
||||
}
|
||||
protocol++;
|
||||
n++;
|
||||
}
|
||||
}
|
||||
if (vh->protocol_vh_privs)
|
||||
lws_free(vh->protocol_vh_privs);
|
||||
lws_ssl_SSL_CTX_destroy(vh);
|
||||
lws_free(vh->same_vh_protocol_list);
|
||||
#ifdef LWS_WITH_PLUGINS
|
||||
if (LWS_LIBUV_ENABLED(context)) {
|
||||
if (context->plugin_list)
|
||||
lws_free((void *)vh->protocols);
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
if (context->options & LWS_SERVER_OPTION_EXPLICIT_VHOSTS)
|
||||
lws_free((void *)vh->protocols);
|
||||
}
|
||||
|
||||
#ifdef LWS_WITH_PLUGINS
|
||||
#ifndef LWS_NO_EXTENSIONS
|
||||
if (context->plugin_extension_count)
|
||||
lws_free((void *)vh->extensions);
|
||||
#endif
|
||||
#endif
|
||||
#ifdef LWS_WITH_ACCESS_LOG
|
||||
if (vh->log_fd != (int)LWS_INVALID_FILE)
|
||||
close(vh->log_fd);
|
||||
#endif
|
||||
|
||||
/*
|
||||
* although async event callbacks may still come for wsi handles with
|
||||
* pending close in the case of asycn event library like libuv,
|
||||
* they do not refer to the vhost. So it's safe to free.
|
||||
*/
|
||||
|
||||
lwsl_notice(" %s: Freeing vhost %p\n", __func__, vh);
|
||||
|
||||
memset(vh, 0, sizeof(*vh));
|
||||
free(vh);
|
||||
}
|
||||
|
||||
int
|
||||
lws_check_deferred_free(struct lws_context *context, int force)
|
||||
{
|
||||
struct lws_deferred_free *df;
|
||||
time_t now = lws_now_secs();
|
||||
|
||||
lws_start_foreach_llp(struct lws_deferred_free **, pdf, context->deferred_free_list) {
|
||||
if (now > (*pdf)->deadline || force) {
|
||||
df = *pdf;
|
||||
*pdf = df->next;
|
||||
/* finalize vh destruction */
|
||||
lwsl_notice("doing deferred vh %p destroy\n", df->payload);
|
||||
lws_vhost_destroy2(df->payload);
|
||||
lws_free(df);
|
||||
continue; /* after deletion we already point to next */
|
||||
}
|
||||
} lws_end_foreach_llp(pdf, next);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
LWS_VISIBLE void
|
||||
lws_vhost_destroy(struct lws_vhost *vh)
|
||||
{
|
||||
struct lws_deferred_free *df = malloc(sizeof(*df));
|
||||
|
||||
if (!df)
|
||||
return;
|
||||
|
||||
lws_vhost_destroy1(vh);
|
||||
|
||||
/* part 2 is deferred to allow all the handle closes to complete */
|
||||
|
||||
df->next = vh->context->deferred_free_list;
|
||||
df->deadline = lws_now_secs() + 5;
|
||||
df->payload = vh;
|
||||
vh->context->deferred_free_list = df;
|
||||
}
|
||||
|
||||
LWS_VISIBLE void
|
||||
lws_context_destroy(struct lws_context *context)
|
||||
{
|
||||
const struct lws_protocols *protocol = NULL;
|
||||
struct lws_context_per_thread *pt;
|
||||
struct lws_vhost *vh = NULL;
|
||||
struct lws wsi;
|
||||
|
@ -1146,19 +1378,7 @@ lws_context_destroy(struct lws_context *context)
|
|||
if (context->protocol_init_done)
|
||||
vh = context->vhost_list;
|
||||
while (vh) {
|
||||
wsi.vhost = vh;
|
||||
protocol = vh->protocols;
|
||||
if (protocol) {
|
||||
n = 0;
|
||||
while (n < vh->count_protocols) {
|
||||
wsi.protocol = protocol;
|
||||
protocol->callback(&wsi, LWS_CALLBACK_PROTOCOL_DESTROY,
|
||||
NULL, NULL, 0);
|
||||
protocol++;
|
||||
n++;
|
||||
}
|
||||
}
|
||||
|
||||
lws_vhost_destroy1(vh);
|
||||
vh = vh->vhost_next;
|
||||
}
|
||||
|
||||
|
@ -1191,9 +1411,7 @@ lws_context_destroy(struct lws_context *context)
|
|||
LWS_VISIBLE void
|
||||
lws_context_destroy2(struct lws_context *context)
|
||||
{
|
||||
const struct lws_protocols *protocol = NULL;
|
||||
struct lws_vhost *vh = NULL, *vh1;
|
||||
int n;
|
||||
|
||||
lwsl_notice("%s: ctx %p\n", __func__, context);
|
||||
|
||||
|
@ -1203,47 +1421,18 @@ lws_context_destroy2(struct lws_context *context)
|
|||
|
||||
vh = context->vhost_list;
|
||||
while (vh) {
|
||||
protocol = vh->protocols;
|
||||
if (protocol) {
|
||||
n = 0;
|
||||
while (n < vh->count_protocols) {
|
||||
if (vh->protocol_vh_privs &&
|
||||
vh->protocol_vh_privs[n]) {
|
||||
// lwsl_notice(" %s: freeing per-vhost protocol data %p\n", __func__, vh->protocol_vh_privs[n]);
|
||||
lws_free(vh->protocol_vh_privs[n]);
|
||||
vh->protocol_vh_privs[n] = NULL;
|
||||
}
|
||||
protocol++;
|
||||
n++;
|
||||
}
|
||||
}
|
||||
if (vh->protocol_vh_privs)
|
||||
lws_free(vh->protocol_vh_privs);
|
||||
lws_ssl_SSL_CTX_destroy(vh);
|
||||
lws_free(vh->same_vh_protocol_list);
|
||||
#ifdef LWS_WITH_PLUGINS
|
||||
if (context->plugin_list)
|
||||
lws_free((void *)vh->protocols);
|
||||
#else
|
||||
if (vh->options & LWS_SERVER_OPTION_EXPLICIT_VHOSTS)
|
||||
lws_free((void *)vh->protocols);
|
||||
#endif
|
||||
#ifdef LWS_WITH_PLUGINS
|
||||
#ifndef LWS_NO_EXTENSIONS
|
||||
if (context->plugin_extension_count)
|
||||
lws_free((void *)vh->extensions);
|
||||
#endif
|
||||
#endif
|
||||
#ifdef LWS_WITH_ACCESS_LOG
|
||||
if (vh->log_fd != (int)LWS_INVALID_FILE)
|
||||
close(vh->log_fd);
|
||||
#endif
|
||||
|
||||
vh1 = vh->vhost_next;
|
||||
lws_free(vh);
|
||||
lws_vhost_destroy2(vh);
|
||||
vh = vh1;
|
||||
}
|
||||
|
||||
/* remove ourselves from the pending destruction list */
|
||||
|
||||
while (context->vhost_pending_destruction_list)
|
||||
/* removes itself from list */
|
||||
lws_vhost_destroy2(context->vhost_pending_destruction_list);
|
||||
|
||||
|
||||
lws_stats_log_dump(context);
|
||||
|
||||
lws_ssl_context_destroy(context);
|
||||
|
@ -1252,5 +1441,7 @@ lws_context_destroy2(struct lws_context *context)
|
|||
if (context->external_baggage_free_on_destroy)
|
||||
free(context->external_baggage_free_on_destroy);
|
||||
|
||||
lws_check_deferred_free(context, 1);
|
||||
|
||||
lws_free(context);
|
||||
}
|
||||
|
|
|
@ -605,6 +605,7 @@ lws_plat_plugins_init(struct lws_context *context, const char * const *d)
|
|||
if (uv_dlopen(path, &lib)) {
|
||||
uv_dlerror(&lib);
|
||||
lwsl_err("Error loading DSO: %s\n", lib.errmsg);
|
||||
uv_dlclose(&lib);
|
||||
goto bail;
|
||||
}
|
||||
|
||||
|
|
|
@ -684,6 +684,10 @@ async_close:
|
|||
|
||||
#ifdef LWS_USE_LIBUV
|
||||
if (LWS_LIBUV_ENABLED(context)) {
|
||||
if (wsi->listener) {
|
||||
lwsl_debug("%s: stopping listner libuv poll\n", __func__);
|
||||
uv_poll_stop(&wsi->w_read.uv_watcher);
|
||||
}
|
||||
lwsl_debug("%s: lws_libuv_closehandle: wsi %p\n", __func__, wsi);
|
||||
/* libuv has to do his own close handle processing asynchronously */
|
||||
lws_libuv_closehandle(wsi);
|
||||
|
|
|
@ -2207,10 +2207,22 @@ struct lws_vhost;
|
|||
* members of the info struct. You can create many vhosts inside one context
|
||||
* if you created the context with the option LWS_SERVER_OPTION_EXPLICIT_VHOSTS
|
||||
*/
|
||||
LWS_EXTERN LWS_VISIBLE struct lws_vhost *
|
||||
LWS_VISIBLE LWS_EXTERN struct lws_vhost *
|
||||
lws_create_vhost(struct lws_context *context,
|
||||
struct lws_context_creation_info *info);
|
||||
|
||||
/**
|
||||
* lws_destroy_vhost() - Destroy a vhost (virtual server context)
|
||||
* \param vhost: pointer to result of lws_create_vhost()
|
||||
*
|
||||
* This function destroys a vhost. Normally, if you just want to exit,
|
||||
* then lws_destroy_context() will take care of everything. If you want
|
||||
* to destroy an individual vhost and all connections and allocations, you
|
||||
* can do it with this.
|
||||
*/
|
||||
LWS_VISIBLE LWS_EXTERN void
|
||||
lws_vhost_destroy(struct lws_vhost *vh);
|
||||
|
||||
/**
|
||||
* lwsws_get_config_globals() - Parse a JSON server config file
|
||||
* \param info: pointer to struct with parameters
|
||||
|
|
|
@ -902,11 +902,19 @@ struct lws_vhost {
|
|||
#endif
|
||||
|
||||
unsigned int created_vhost_protocols:1;
|
||||
unsigned int being_destroyed:1;
|
||||
|
||||
unsigned char default_protocol_index;
|
||||
unsigned char raw_protocol_index;
|
||||
};
|
||||
|
||||
struct lws_deferred_free
|
||||
{
|
||||
struct lws_deferred_free *next;
|
||||
time_t deadline;
|
||||
void *payload;
|
||||
};
|
||||
|
||||
/*
|
||||
* the rest is managed per-context, that includes
|
||||
*
|
||||
|
@ -939,7 +947,9 @@ struct lws_context {
|
|||
#endif
|
||||
#endif
|
||||
struct lws_vhost *vhost_list;
|
||||
struct lws_vhost *vhost_pending_destruction_list;
|
||||
struct lws_plugin *plugin_list;
|
||||
struct lws_deferred_free *deferred_free_list;
|
||||
|
||||
void *external_baggage_free_on_destroy;
|
||||
const struct lws_token_limits *token_limits;
|
||||
|
@ -1029,6 +1039,9 @@ struct lws_context {
|
|||
uint8_t max_fi;
|
||||
};
|
||||
|
||||
int
|
||||
lws_check_deferred_free(struct lws_context *context, int force);
|
||||
|
||||
#define lws_get_context_protocol(ctx, x) ctx->vhost_list->protocols[x]
|
||||
#define lws_get_vh_protocol(vh, x) vh->protocols[x]
|
||||
|
||||
|
|
|
@ -1806,7 +1806,7 @@ lws_http_transaction_completed(struct lws *wsi)
|
|||
return 0;
|
||||
}
|
||||
|
||||
lwsl_notice("%s: wsi %p\n", __func__, wsi);
|
||||
lwsl_debug("%s: wsi %p\n", __func__, wsi);
|
||||
/* if we can't go back to accept new headers, drop the connection */
|
||||
if (wsi->u.http.connection_type != HTTP_CONNECTION_KEEP_ALIVE) {
|
||||
lwsl_info("%s: %p: close connection\n", __func__, wsi);
|
||||
|
|
|
@ -809,6 +809,8 @@ lws_service_fd_tsi(struct lws_context *context, struct lws_pollfd *pollfd, int t
|
|||
|
||||
lws_plat_service_periodic(context);
|
||||
|
||||
lws_check_deferred_free(context, 0);
|
||||
|
||||
/* retire unused deprecated context */
|
||||
#if !defined(LWS_PLAT_OPTEE) && !defined(LWS_WITH_ESP32)
|
||||
#if LWS_POSIX && !defined(_WIN32)
|
||||
|
|
|
@ -160,7 +160,7 @@ lws_ssl_server_name_cb(SSL *ssl, int *ad, void *arg)
|
|||
*/
|
||||
vh = context->vhost_list;
|
||||
while (vh) {
|
||||
if (vh->ssl_ctx == SSL_get_SSL_CTX(ssl))
|
||||
if (!vh->being_destroyed && vh->ssl_ctx == SSL_get_SSL_CTX(ssl))
|
||||
break;
|
||||
vh = vh->vhost_next;
|
||||
}
|
||||
|
|
|
@ -317,5 +317,7 @@ int main(int argc, char **argv)
|
|||
closelog();
|
||||
#endif
|
||||
|
||||
context = NULL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -88,7 +88,7 @@ callback_dumb_increment(struct lws *wsi, enum lws_callback_reasons reason,
|
|||
case LWS_CALLBACK_PROTOCOL_DESTROY:
|
||||
if (!vhd)
|
||||
break;
|
||||
// lwsl_notice("di: LWS_CALLBACK_PROTOCOL_DESTROY: v=%p, ctx=%p\n", vhd, vhd->context);
|
||||
lwsl_notice("di: LWS_CALLBACK_PROTOCOL_DESTROY: v=%p, ctx=%p\n", vhd, vhd->context);
|
||||
uv_timer_stop(&vhd->timeout_watcher);
|
||||
uv_close((uv_handle_t *)&vhd->timeout_watcher, NULL);
|
||||
break;
|
||||
|
|
|
@ -25,9 +25,21 @@
|
|||
#include <syslog.h>
|
||||
#endif
|
||||
|
||||
/* windows has no SIGUSR1 */
|
||||
#if !defined(WIN32) && !defined(_WIN32)
|
||||
#define TEST_DYNAMIC_VHOST
|
||||
#endif
|
||||
|
||||
struct lws_context_creation_info info;
|
||||
int debug_level = 7;
|
||||
struct lws_context *context;
|
||||
|
||||
#if defined(TEST_DYNAMIC_VHOST)
|
||||
volatile int dynamic_vhost_enable = 0;
|
||||
struct lws_vhost *dynamic_vhost;
|
||||
uv_timer_t timeout_watcher;
|
||||
#endif
|
||||
|
||||
/* http server gets files from this path */
|
||||
#define LOCAL_RESOURCE_PATH INSTALL_DATADIR"/libwebsockets-test-server"
|
||||
char *resource_path = LOCAL_RESOURCE_PATH;
|
||||
|
@ -48,6 +60,44 @@ char crl_path[1024] = "";
|
|||
* You can find the individual protocol plugin sources in ../plugins
|
||||
*/
|
||||
|
||||
#if defined(TEST_DYNAMIC_VHOST)
|
||||
|
||||
/*
|
||||
* to test dynamic vhost creation, fire a SIGUSR1 at the test server.
|
||||
* It will toggle the existence of a second identical vhost at port + 1
|
||||
*
|
||||
* To synchronize with the event loop, it uses a libuv timer with 0 delay
|
||||
* to get the business end called as the next event.
|
||||
*/
|
||||
|
||||
static void
|
||||
uv_timeout_dynamic_vhost_toggle(uv_timer_t *w
|
||||
#if UV_VERSION_MAJOR == 0
|
||||
, int status
|
||||
#endif
|
||||
)
|
||||
{
|
||||
if (dynamic_vhost_enable && !dynamic_vhost) {
|
||||
lwsl_notice("creating dynamic vhost...\n");
|
||||
dynamic_vhost = lws_create_vhost(context, &info);
|
||||
} else
|
||||
if (!dynamic_vhost_enable && dynamic_vhost) {
|
||||
lwsl_notice("destroying dynamic vhost...\n");
|
||||
lws_vhost_destroy(dynamic_vhost);
|
||||
dynamic_vhost = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
void sighandler_USR1(int sig)
|
||||
{
|
||||
dynamic_vhost_enable ^= 1;
|
||||
lwsl_notice("SIGUSR1: dynamic_vhost_enable: %d\n",
|
||||
dynamic_vhost_enable);
|
||||
uv_timer_start(&timeout_watcher,
|
||||
uv_timeout_dynamic_vhost_toggle, 0, 0);
|
||||
}
|
||||
#endif
|
||||
|
||||
void sighandler(int sig)
|
||||
{
|
||||
lws_cancel_service(context);
|
||||
|
@ -257,7 +307,7 @@ static const char * const plugin_dirs[] = {
|
|||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
struct lws_context_creation_info info;
|
||||
struct lws_vhost *vhost;
|
||||
char interface_name[128] = "";
|
||||
const char *iface = NULL;
|
||||
char cert_path[1024] = "";
|
||||
|
@ -374,6 +424,9 @@ int main(int argc, char **argv)
|
|||
#endif
|
||||
|
||||
signal(SIGINT, sighandler);
|
||||
#if defined(TEST_DYNAMIC_VHOST)
|
||||
signal(SIGUSR1, sighandler_USR1);
|
||||
#endif
|
||||
|
||||
#ifndef _WIN32
|
||||
/* we will only try to log things according to our debug_level */
|
||||
|
@ -385,7 +438,7 @@ int main(int argc, char **argv)
|
|||
lws_set_log_level(debug_level, lwsl_emit_syslog);
|
||||
|
||||
lwsl_notice("libwebsockets test server - license LGPL2.1+SLE\n");
|
||||
lwsl_notice("(C) Copyright 2010-2016 Andy Green <andy@warmcat.com>\n");
|
||||
lwsl_notice("(C) Copyright 2010-2017 Andy Green <andy@warmcat.com>\n");
|
||||
|
||||
lwsl_notice(" Using resource path \"%s\"\n", resource_path);
|
||||
|
||||
|
@ -396,7 +449,8 @@ int main(int argc, char **argv)
|
|||
info.gid = gid;
|
||||
info.uid = uid;
|
||||
info.max_http_header_pool = 16;
|
||||
info.options = opts | LWS_SERVER_OPTION_FALLBACK_TO_RAW |
|
||||
info.options = opts | LWS_SERVER_OPTION_EXPLICIT_VHOSTS |
|
||||
LWS_SERVER_OPTION_FALLBACK_TO_RAW |
|
||||
LWS_SERVER_OPTION_VALIDATE_UTF8 |
|
||||
LWS_SERVER_OPTION_LIBUV; /* plugins require this */
|
||||
|
||||
|
@ -447,17 +501,16 @@ int main(int argc, char **argv)
|
|||
/* tell lws about our mount we want */
|
||||
info.mounts = &mount;
|
||||
/*
|
||||
* give it our linked-list of Per-Vhost Options, these control
|
||||
* give it our linked-list of Per-Vhost Options, these control
|
||||
* which protocols (from plugins) are allowed to be enabled on
|
||||
* our vhost
|
||||
*/
|
||||
info.pvo = &pvo;
|
||||
|
||||
/*
|
||||
* As it is, this creates the context and a single Vhost at the same
|
||||
* time. You can use LWS_SERVER_OPTION_EXPLICIT_VHOSTS option above
|
||||
* to just create the context, and call lws_create_vhost() afterwards
|
||||
* multiple times with different info to get multiple listening vhosts.
|
||||
* Since we used LWS_SERVER_OPTION_EXPLICIT_VHOSTS, this only creates
|
||||
* the context. We can modify info and create as many vhosts as we
|
||||
* like subsequently.
|
||||
*/
|
||||
context = lws_create_context(&info);
|
||||
if (context == NULL) {
|
||||
|
@ -465,15 +518,42 @@ int main(int argc, char **argv)
|
|||
return -1;
|
||||
}
|
||||
|
||||
/*
|
||||
* normally we would adapt at least info.name to reflect the
|
||||
* external hostname for this server.
|
||||
*/
|
||||
vhost = lws_create_vhost(context, &info);
|
||||
if (!vhost) {
|
||||
lwsl_err("vhost creation failed\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
#if defined(TEST_DYNAMIC_VHOST)
|
||||
/* our dynamic vhost is on port + 1 */
|
||||
info.port++;
|
||||
#endif
|
||||
|
||||
/* libuv event loop */
|
||||
lws_uv_sigint_cfg(context, 1, signal_cb);
|
||||
if (lws_uv_initloop(context, NULL, 0))
|
||||
if (lws_uv_initloop(context, NULL, 0)) {
|
||||
lwsl_err("lws_uv_initloop failed\n");
|
||||
else
|
||||
lws_libuv_run(context, 0);
|
||||
goto bail;
|
||||
}
|
||||
|
||||
#if defined(TEST_DYNAMIC_VHOST)
|
||||
uv_timer_init(lws_uv_getloop(context, 0), &timeout_watcher);
|
||||
#endif
|
||||
lws_libuv_run(context, 0);
|
||||
|
||||
#if defined(TEST_DYNAMIC_VHOST)
|
||||
uv_timer_stop(&timeout_watcher);
|
||||
uv_close((uv_handle_t *)&timeout_watcher, NULL);
|
||||
#endif
|
||||
|
||||
bail:
|
||||
/* when we decided to exit the event loop */
|
||||
lws_context_destroy(context);
|
||||
lws_context_destroy2(context);
|
||||
lwsl_notice("libwebsockets-test-server exited cleanly\n");
|
||||
|
||||
#ifndef _WIN32
|
||||
|
|
|
@ -29,7 +29,8 @@ struct lws_pollfd *pollfds;
|
|||
int *fd_lookup;
|
||||
int count_pollfds;
|
||||
#endif
|
||||
volatile int force_exit = 0;
|
||||
volatile int force_exit = 0, dynamic_vhost_enable = 0;
|
||||
struct lws_vhost *dynamic_vhost;
|
||||
struct lws_context *context;
|
||||
struct lws_plat_file_ops fops_plat;
|
||||
|
||||
|
@ -159,6 +160,20 @@ test_server_fops_open(const struct lws_plat_file_ops *fops,
|
|||
|
||||
void sighandler(int sig)
|
||||
{
|
||||
#if !defined(WIN32) && !defined(_WIN32)
|
||||
/* because windows is too dumb to have SIGUSR1... */
|
||||
if (sig == SIGUSR1) {
|
||||
/*
|
||||
* For testing, you can fire a SIGUSR1 at the test server
|
||||
* to toggle the existence of an identical server on
|
||||
* port + 1
|
||||
*/
|
||||
dynamic_vhost_enable ^= 1;
|
||||
lwsl_notice("SIGUSR1: dynamic_vhost_enable: %d\n",
|
||||
dynamic_vhost_enable);
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
force_exit = 1;
|
||||
lws_cancel_service(context);
|
||||
}
|
||||
|
@ -347,6 +362,11 @@ int main(int argc, char **argv)
|
|||
#endif
|
||||
|
||||
signal(SIGINT, sighandler);
|
||||
#if !defined(WIN32) && !defined(_WIN32)
|
||||
/* because windows is too dumb to have SIGUSR1... */
|
||||
/* dynamic vhost create / destroy toggle (on port + 1) */
|
||||
signal(SIGUSR1, sighandler);
|
||||
#endif
|
||||
|
||||
#ifndef _WIN32
|
||||
/* we will only try to log things according to our debug_level */
|
||||
|
@ -434,6 +454,14 @@ int main(int argc, char **argv)
|
|||
return -1;
|
||||
}
|
||||
|
||||
/*
|
||||
* For testing dynamic vhost create / destroy later, we use port + 1
|
||||
* Normally if you were creating more vhosts, you would set info.name
|
||||
* for each to be the hostname external clients use to reach it
|
||||
*/
|
||||
|
||||
info.port++;
|
||||
|
||||
#if !defined(LWS_NO_CLIENT) && defined(LWS_OPENSSL_SUPPORT)
|
||||
lws_init_vhost_client_ssl(&info, vhost);
|
||||
#endif
|
||||
|
@ -516,6 +544,17 @@ int main(int argc, char **argv)
|
|||
|
||||
n = lws_service(context, 50);
|
||||
#endif
|
||||
|
||||
if (dynamic_vhost_enable && !dynamic_vhost) {
|
||||
lwsl_notice("creating dynamic vhost...\n");
|
||||
dynamic_vhost = lws_create_vhost(context, &info);
|
||||
} else
|
||||
if (!dynamic_vhost_enable && dynamic_vhost) {
|
||||
lwsl_notice("destroying dynamic vhost...\n");
|
||||
lws_vhost_destroy(dynamic_vhost);
|
||||
dynamic_vhost = NULL;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
#ifdef EXTERNAL_POLL
|
||||
|
|
Loading…
Add table
Reference in a new issue