2016-02-14 09:27:41 +08:00
|
|
|
/*
|
|
|
|
* libwebsockets - small server side websockets and web server implementation
|
|
|
|
*
|
|
|
|
* Copyright (C) 2010-2016 Andy Green <andy@warmcat.com>
|
|
|
|
*
|
|
|
|
* This library is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU Lesser General Public
|
|
|
|
* License as published by the Free Software Foundation:
|
|
|
|
* version 2.1 of the License.
|
|
|
|
*
|
|
|
|
* This library is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
* Lesser General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU Lesser General Public
|
|
|
|
* License along with this library; if not, write to the Free Software
|
|
|
|
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
|
|
|
|
* MA 02110-1301 USA
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "private-libwebsockets.h"
|
|
|
|
|
|
|
|
void
|
|
|
|
lws_feature_status_libuv(struct lws_context_creation_info *info)
|
|
|
|
{
|
2016-03-23 09:22:11 +08:00
|
|
|
if (lws_check_opt(info->options, LWS_SERVER_OPTION_LIBUV))
|
2016-02-14 09:27:41 +08:00
|
|
|
lwsl_notice("libuv support compiled in and enabled\n");
|
|
|
|
else
|
|
|
|
lwsl_notice("libuv support compiled in but disabled\n");
|
|
|
|
}
|
|
|
|
|
2016-04-06 09:23:16 +08:00
|
|
|
static void
|
2016-04-06 16:15:40 +08:00
|
|
|
lws_uv_idle(uv_idle_t *handle
|
|
|
|
#if UV_VERSION_MAJOR == 0
|
|
|
|
, int status
|
|
|
|
#endif
|
|
|
|
)
|
2016-04-06 09:23:16 +08:00
|
|
|
{
|
2016-04-06 16:15:40 +08:00
|
|
|
struct lws_context_per_thread *pt = lws_container_of(handle,
|
2016-04-06 09:23:16 +08:00
|
|
|
struct lws_context_per_thread, uv_idle);
|
|
|
|
|
2017-03-20 19:07:19 +08:00
|
|
|
// lwsl_debug("%s\n", __func__);
|
2016-04-06 09:23:16 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* is there anybody with pending stuff that needs service forcing?
|
|
|
|
*/
|
|
|
|
if (!lws_service_adjust_timeout(pt->context, 1, pt->tid)) {
|
|
|
|
/* -1 timeout means just do forced service */
|
2016-10-20 09:09:56 +08:00
|
|
|
_lws_plat_service_tsi(pt->context, -1, pt->tid);
|
2016-04-06 09:23:16 +08:00
|
|
|
/* still somebody left who wants forced service? */
|
|
|
|
if (!lws_service_adjust_timeout(pt->context, 1, pt->tid))
|
|
|
|
/* yes... come back again later */
|
2017-03-20 19:07:19 +08:00
|
|
|
// lwsl_debug("%s: done again\n", __func__);
|
2016-10-04 08:39:14 +08:00
|
|
|
return;
|
2016-04-06 09:23:16 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* there is nobody who needs service forcing, shut down idle */
|
|
|
|
uv_idle_stop(handle);
|
2016-04-13 11:49:07 +08:00
|
|
|
|
2017-03-22 21:16:00 +08:00
|
|
|
//lwsl_debug("%s: done stop\n", __func__);
|
2016-04-06 09:23:16 +08:00
|
|
|
}
|
|
|
|
|
2016-02-14 09:27:41 +08:00
|
|
|
static void
|
2016-03-02 11:39:37 +01:00
|
|
|
lws_io_cb(uv_poll_t *watcher, int status, int revents)
|
2016-02-14 09:27:41 +08:00
|
|
|
{
|
2016-04-06 16:15:40 +08:00
|
|
|
struct lws_io_watcher *lws_io = lws_container_of(watcher,
|
2016-02-14 09:27:41 +08:00
|
|
|
struct lws_io_watcher, uv_watcher);
|
2016-04-06 16:15:40 +08:00
|
|
|
struct lws *wsi = lws_container_of(lws_io, struct lws, w_read);
|
context deprecation
1) This makes lwsws run a parent process with the original permissions.
But this process is only able to respond to SIGHUP, it doesn't do anything
else.
2) You can send this parent process a SIGHUP now to cause it to
- close listening sockets in existing lwsws processes
- mark those processes as to exit when the number of active connections
on the falls to zero
- spawn a fresh child process from scratch, using latest configuration
file content, latest plugins, etc. It can now reopen listening sockets
if it chooses to, or open different listen ports or whatever.
Notes:
1) lws_context_destroy() has been split into two pieces... the reason for
the split is the first part closes the per-vhost protocols, but since
they may have created libuv objects in the per-vhost protocol storage,
these cannot be freed until after the loop has been run.
That's the purpose of the second part of the context destruction,
lws_context_destroy2().
For compatibility, if you are not using libuv, the first part calls the
second part. However if you are using libuv, you must now call the
second part from your own main.c after the first part.
2016-12-16 07:37:43 +08:00
|
|
|
struct lws_context *context = wsi->context;
|
2016-02-14 09:27:41 +08:00
|
|
|
struct lws_pollfd eventfd;
|
|
|
|
|
2016-03-31 20:11:53 +08:00
|
|
|
#if defined(WIN32) || defined(_WIN32)
|
2016-04-01 15:56:48 +08:00
|
|
|
eventfd.fd = watcher->socket;
|
2016-03-31 20:11:53 +08:00
|
|
|
#else
|
2016-02-14 09:27:41 +08:00
|
|
|
eventfd.fd = watcher->io_watcher.fd;
|
2016-03-31 20:11:53 +08:00
|
|
|
#endif
|
2016-02-14 09:27:41 +08:00
|
|
|
eventfd.events = 0;
|
2016-02-20 16:01:12 +01:00
|
|
|
eventfd.revents = 0;
|
2016-03-02 11:39:37 +01:00
|
|
|
|
|
|
|
if (status < 0) {
|
|
|
|
/* at this point status will be an UV error, like UV_EBADF,
|
|
|
|
we treat all errors as LWS_POLLHUP */
|
|
|
|
|
|
|
|
/* you might want to return; instead of servicing the fd in some cases */
|
|
|
|
if (status == UV_EAGAIN)
|
|
|
|
return;
|
|
|
|
|
|
|
|
eventfd.events |= LWS_POLLHUP;
|
|
|
|
eventfd.revents |= LWS_POLLHUP;
|
|
|
|
} else {
|
|
|
|
if (revents & UV_READABLE) {
|
|
|
|
eventfd.events |= LWS_POLLIN;
|
|
|
|
eventfd.revents |= LWS_POLLIN;
|
|
|
|
}
|
|
|
|
if (revents & UV_WRITABLE) {
|
|
|
|
eventfd.events |= LWS_POLLOUT;
|
|
|
|
eventfd.revents |= LWS_POLLOUT;
|
|
|
|
}
|
2016-02-14 09:27:41 +08:00
|
|
|
}
|
|
|
|
lws_service_fd(context, &eventfd);
|
2016-04-06 09:23:16 +08:00
|
|
|
|
|
|
|
uv_idle_start(&context->pt[(int)wsi->tsi].uv_idle, lws_uv_idle);
|
2016-02-14 09:27:41 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
LWS_VISIBLE void
|
2016-03-22 14:04:15 +01:00
|
|
|
lws_uv_sigint_cb(uv_signal_t *watcher, int signum)
|
2016-02-14 09:27:41 +08:00
|
|
|
{
|
2016-05-04 08:27:56 +08:00
|
|
|
lwsl_err("internal signal handler caught signal %d\n", signum);
|
2016-03-22 15:19:10 +01:00
|
|
|
lws_libuv_stop(watcher->data);
|
2016-02-14 09:27:41 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
LWS_VISIBLE int
|
|
|
|
lws_uv_sigint_cfg(struct lws_context *context, int use_uv_sigint,
|
2016-03-22 14:04:15 +01:00
|
|
|
uv_signal_cb cb)
|
2016-02-14 09:27:41 +08:00
|
|
|
{
|
|
|
|
context->use_ev_sigint = use_uv_sigint;
|
|
|
|
if (cb)
|
|
|
|
context->lws_uv_sigint_cb = cb;
|
|
|
|
else
|
|
|
|
context->lws_uv_sigint_cb = &lws_uv_sigint_cb;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-02-27 11:03:27 +08:00
|
|
|
static void
|
2016-04-06 16:15:40 +08:00
|
|
|
lws_uv_timeout_cb(uv_timer_t *timer
|
|
|
|
#if UV_VERSION_MAJOR == 0
|
|
|
|
, int status
|
|
|
|
#endif
|
|
|
|
)
|
2016-02-27 11:03:27 +08:00
|
|
|
{
|
2016-04-06 16:15:40 +08:00
|
|
|
struct lws_context_per_thread *pt = lws_container_of(timer,
|
2016-02-27 11:03:27 +08:00
|
|
|
struct lws_context_per_thread, uv_timeout_watcher);
|
|
|
|
|
2016-05-08 17:07:46 +08:00
|
|
|
if (pt->context->requested_kill)
|
|
|
|
return;
|
|
|
|
|
2016-02-27 11:42:22 +08:00
|
|
|
lwsl_debug("%s\n", __func__);
|
2016-04-06 09:23:16 +08:00
|
|
|
|
2016-02-27 11:03:27 +08:00
|
|
|
lws_service_fd_tsi(pt->context, NULL, pt->tid);
|
|
|
|
}
|
|
|
|
|
context deprecation
1) This makes lwsws run a parent process with the original permissions.
But this process is only able to respond to SIGHUP, it doesn't do anything
else.
2) You can send this parent process a SIGHUP now to cause it to
- close listening sockets in existing lwsws processes
- mark those processes as to exit when the number of active connections
on the falls to zero
- spawn a fresh child process from scratch, using latest configuration
file content, latest plugins, etc. It can now reopen listening sockets
if it chooses to, or open different listen ports or whatever.
Notes:
1) lws_context_destroy() has been split into two pieces... the reason for
the split is the first part closes the per-vhost protocols, but since
they may have created libuv objects in the per-vhost protocol storage,
these cannot be freed until after the loop has been run.
That's the purpose of the second part of the context destruction,
lws_context_destroy2().
For compatibility, if you are not using libuv, the first part calls the
second part. However if you are using libuv, you must now call the
second part from your own main.c after the first part.
2016-12-16 07:37:43 +08:00
|
|
|
static const int sigs[] = { SIGINT, SIGTERM, SIGSEGV, SIGFPE, SIGHUP };
|
2016-02-14 09:27:41 +08:00
|
|
|
|
2016-11-30 07:05:13 +08:00
|
|
|
int
|
|
|
|
lws_uv_initvhost(struct lws_vhost* vh, struct lws* wsi)
|
|
|
|
{
|
|
|
|
struct lws_context_per_thread *pt;
|
|
|
|
int n;
|
|
|
|
|
|
|
|
if (!LWS_LIBUV_ENABLED(vh->context))
|
|
|
|
return 0;
|
|
|
|
if (!wsi)
|
|
|
|
wsi = vh->lserv_wsi;
|
|
|
|
if (!wsi)
|
|
|
|
return 0;
|
|
|
|
if (wsi->w_read.context)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
pt = &vh->context->pt[(int)wsi->tsi];
|
|
|
|
if (!pt->io_loop_uv)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
wsi->w_read.context = vh->context;
|
|
|
|
n = uv_poll_init_socket(pt->io_loop_uv,
|
2017-02-27 12:55:56 +08:00
|
|
|
&wsi->w_read.uv_watcher, wsi->desc.sockfd);
|
2016-11-30 07:05:13 +08:00
|
|
|
if (n) {
|
|
|
|
lwsl_err("uv_poll_init failed %d, sockfd=%p\n",
|
2017-07-07 08:32:04 +08:00
|
|
|
n, (void *)(lws_intptr_t)wsi->desc.sockfd);
|
2016-11-30 07:05:13 +08:00
|
|
|
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
lws_libuv_io(wsi, LWS_EV_START | LWS_EV_READ);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This needs to be called after vhosts have been defined.
|
|
|
|
*
|
|
|
|
* If later, after server start, another vhost is added, this must be
|
|
|
|
* called again to bind the vhost
|
|
|
|
*/
|
|
|
|
|
2016-02-14 09:27:41 +08:00
|
|
|
LWS_VISIBLE int
|
2016-03-22 14:04:15 +01:00
|
|
|
lws_uv_initloop(struct lws_context *context, uv_loop_t *loop, int tsi)
|
2016-02-14 09:27:41 +08:00
|
|
|
{
|
|
|
|
struct lws_context_per_thread *pt = &context->pt[tsi];
|
2016-03-28 10:10:43 +08:00
|
|
|
struct lws_vhost *vh = context->vhost_list;
|
2016-11-30 07:05:13 +08:00
|
|
|
int status = 0, n, ns, first = 1;
|
2016-02-14 09:27:41 +08:00
|
|
|
|
2016-11-30 07:05:13 +08:00
|
|
|
if (!pt->io_loop_uv) {
|
2016-05-12 21:04:33 +08:00
|
|
|
if (!loop) {
|
2016-11-30 07:05:13 +08:00
|
|
|
loop = lws_malloc(sizeof(*loop));
|
|
|
|
if (!loop) {
|
|
|
|
lwsl_err("OOM\n");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
#if UV_VERSION_MAJOR > 0
|
|
|
|
uv_loop_init(loop);
|
|
|
|
#else
|
|
|
|
lwsl_err("This libuv is too old to work...\n");
|
|
|
|
return 1;
|
|
|
|
#endif
|
|
|
|
pt->ev_loop_foreign = 0;
|
|
|
|
} else {
|
|
|
|
lwsl_notice(" Using foreign event loop...\n");
|
|
|
|
pt->ev_loop_foreign = 1;
|
2016-05-12 21:04:33 +08:00
|
|
|
}
|
2016-02-14 09:27:41 +08:00
|
|
|
|
2016-11-30 07:05:13 +08:00
|
|
|
pt->io_loop_uv = loop;
|
|
|
|
uv_idle_init(loop, &pt->uv_idle);
|
2016-02-14 09:27:41 +08:00
|
|
|
|
2016-11-30 07:05:13 +08:00
|
|
|
ns = ARRAY_SIZE(sigs);
|
|
|
|
if (lws_check_opt(context->options,
|
|
|
|
LWS_SERVER_OPTION_UV_NO_SIGSEGV_SIGFPE_SPIN))
|
|
|
|
ns = 2;
|
2016-08-20 05:47:29 +08:00
|
|
|
|
2016-11-30 07:05:13 +08:00
|
|
|
if (pt->context->use_ev_sigint) {
|
|
|
|
assert(ns <= ARRAY_SIZE(pt->signals));
|
|
|
|
for (n = 0; n < ns; n++) {
|
|
|
|
uv_signal_init(loop, &pt->signals[n]);
|
|
|
|
pt->signals[n].data = pt->context;
|
|
|
|
uv_signal_start(&pt->signals[n],
|
|
|
|
context->lws_uv_sigint_cb, sigs[n]);
|
|
|
|
}
|
2016-03-22 15:19:10 +01:00
|
|
|
}
|
2016-11-30 07:05:13 +08:00
|
|
|
} else
|
|
|
|
first = 0;
|
2016-02-14 09:27:41 +08:00
|
|
|
|
|
|
|
/*
|
2016-03-28 10:10:43 +08:00
|
|
|
* Initialize the accept wsi read watcher with all the listening sockets
|
2016-02-14 09:27:41 +08:00
|
|
|
* and register a callback for read operations
|
|
|
|
*
|
|
|
|
* We have to do it here because the uv loop(s) are not
|
|
|
|
* initialized until after context creation.
|
|
|
|
*/
|
2016-03-28 10:10:43 +08:00
|
|
|
while (vh) {
|
2016-11-30 07:05:13 +08:00
|
|
|
if (lws_uv_initvhost(vh, vh->lserv_wsi) == -1)
|
|
|
|
return -1;
|
2016-03-28 10:10:43 +08:00
|
|
|
vh = vh->vhost_next;
|
2016-02-14 09:27:41 +08:00
|
|
|
}
|
|
|
|
|
2016-11-30 07:05:13 +08:00
|
|
|
if (first) {
|
|
|
|
uv_timer_init(pt->io_loop_uv, &pt->uv_timeout_watcher);
|
|
|
|
uv_timer_start(&pt->uv_timeout_watcher, lws_uv_timeout_cb,
|
|
|
|
10, 1000);
|
|
|
|
}
|
2016-02-27 11:03:27 +08:00
|
|
|
|
2016-02-14 09:27:41 +08:00
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
2016-05-04 08:27:56 +08:00
|
|
|
static void lws_uv_close_cb(uv_handle_t *handle)
|
2016-02-20 15:32:57 +01:00
|
|
|
{
|
2016-05-08 17:07:46 +08:00
|
|
|
//lwsl_err("%s: handle %p\n", __func__, handle);
|
2016-02-20 15:32:57 +01:00
|
|
|
}
|
|
|
|
|
2016-05-04 08:27:56 +08:00
|
|
|
static void lws_uv_walk_cb(uv_handle_t *handle, void *arg)
|
2016-02-20 15:32:57 +01:00
|
|
|
{
|
2017-05-19 14:41:03 +02:00
|
|
|
if (!uv_is_closing(handle))
|
|
|
|
uv_close(handle, lws_uv_close_cb);
|
2016-02-20 15:32:57 +01:00
|
|
|
}
|
|
|
|
|
2017-07-15 17:48:37 +08:00
|
|
|
LWS_VISIBLE void
|
|
|
|
lws_close_all_handles_in_loop(uv_loop_t *loop)
|
|
|
|
{
|
|
|
|
uv_walk(loop, lws_uv_walk_cb, NULL);
|
|
|
|
}
|
|
|
|
|
2016-02-14 09:27:41 +08:00
|
|
|
void
|
|
|
|
lws_libuv_destroyloop(struct lws_context *context, int tsi)
|
|
|
|
{
|
|
|
|
struct lws_context_per_thread *pt = &context->pt[tsi];
|
context deprecation
1) This makes lwsws run a parent process with the original permissions.
But this process is only able to respond to SIGHUP, it doesn't do anything
else.
2) You can send this parent process a SIGHUP now to cause it to
- close listening sockets in existing lwsws processes
- mark those processes as to exit when the number of active connections
on the falls to zero
- spawn a fresh child process from scratch, using latest configuration
file content, latest plugins, etc. It can now reopen listening sockets
if it chooses to, or open different listen ports or whatever.
Notes:
1) lws_context_destroy() has been split into two pieces... the reason for
the split is the first part closes the per-vhost protocols, but since
they may have created libuv objects in the per-vhost protocol storage,
these cannot be freed until after the loop has been run.
That's the purpose of the second part of the context destruction,
lws_context_destroy2().
For compatibility, if you are not using libuv, the first part calls the
second part. However if you are using libuv, you must now call the
second part from your own main.c after the first part.
2016-12-16 07:37:43 +08:00
|
|
|
// struct lws_context *ctx;
|
2016-08-20 05:47:29 +08:00
|
|
|
int m, budget = 100, ns;
|
2016-02-14 09:27:41 +08:00
|
|
|
|
2016-03-23 09:22:11 +08:00
|
|
|
if (!lws_check_opt(context->options, LWS_SERVER_OPTION_LIBUV))
|
2016-02-14 09:27:41 +08:00
|
|
|
return;
|
|
|
|
|
2016-02-20 11:58:49 +01:00
|
|
|
if (!pt->io_loop_uv)
|
|
|
|
return;
|
|
|
|
|
context deprecation
1) This makes lwsws run a parent process with the original permissions.
But this process is only able to respond to SIGHUP, it doesn't do anything
else.
2) You can send this parent process a SIGHUP now to cause it to
- close listening sockets in existing lwsws processes
- mark those processes as to exit when the number of active connections
on the falls to zero
- spawn a fresh child process from scratch, using latest configuration
file content, latest plugins, etc. It can now reopen listening sockets
if it chooses to, or open different listen ports or whatever.
Notes:
1) lws_context_destroy() has been split into two pieces... the reason for
the split is the first part closes the per-vhost protocols, but since
they may have created libuv objects in the per-vhost protocol storage,
these cannot be freed until after the loop has been run.
That's the purpose of the second part of the context destruction,
lws_context_destroy2().
For compatibility, if you are not using libuv, the first part calls the
second part. However if you are using libuv, you must now call the
second part from your own main.c after the first part.
2016-12-16 07:37:43 +08:00
|
|
|
lwsl_notice("%s: closing signals + timers context %p\n", __func__, context);
|
|
|
|
|
2016-05-08 17:07:46 +08:00
|
|
|
if (context->use_ev_sigint) {
|
2016-02-14 09:27:41 +08:00
|
|
|
uv_signal_stop(&pt->w_sigint.uv_watcher);
|
2016-05-04 08:27:56 +08:00
|
|
|
|
2016-08-20 05:47:29 +08:00
|
|
|
ns = ARRAY_SIZE(sigs);
|
|
|
|
if (lws_check_opt(context->options, LWS_SERVER_OPTION_UV_NO_SIGSEGV_SIGFPE_SPIN))
|
|
|
|
ns = 2;
|
|
|
|
|
|
|
|
for (m = 0; m < ns; m++) {
|
2016-05-08 17:07:46 +08:00
|
|
|
uv_signal_stop(&pt->signals[m]);
|
|
|
|
uv_close((uv_handle_t *)&pt->signals[m], lws_uv_close_cb);
|
|
|
|
}
|
2016-05-04 08:27:56 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
uv_timer_stop(&pt->uv_timeout_watcher);
|
|
|
|
uv_close((uv_handle_t *)&pt->uv_timeout_watcher, lws_uv_close_cb);
|
|
|
|
|
|
|
|
uv_idle_stop(&pt->uv_idle);
|
|
|
|
uv_close((uv_handle_t *)&pt->uv_idle, lws_uv_close_cb);
|
|
|
|
|
context deprecation
1) This makes lwsws run a parent process with the original permissions.
But this process is only able to respond to SIGHUP, it doesn't do anything
else.
2) You can send this parent process a SIGHUP now to cause it to
- close listening sockets in existing lwsws processes
- mark those processes as to exit when the number of active connections
on the falls to zero
- spawn a fresh child process from scratch, using latest configuration
file content, latest plugins, etc. It can now reopen listening sockets
if it chooses to, or open different listen ports or whatever.
Notes:
1) lws_context_destroy() has been split into two pieces... the reason for
the split is the first part closes the per-vhost protocols, but since
they may have created libuv objects in the per-vhost protocol storage,
these cannot be freed until after the loop has been run.
That's the purpose of the second part of the context destruction,
lws_context_destroy2().
For compatibility, if you are not using libuv, the first part calls the
second part. However if you are using libuv, you must now call the
second part from your own main.c after the first part.
2016-12-16 07:37:43 +08:00
|
|
|
if (pt->ev_loop_foreign)
|
|
|
|
return;
|
|
|
|
|
2016-05-04 08:27:56 +08:00
|
|
|
while (budget-- && uv_run(pt->io_loop_uv, UV_RUN_NOWAIT))
|
|
|
|
;
|
|
|
|
|
context deprecation
1) This makes lwsws run a parent process with the original permissions.
But this process is only able to respond to SIGHUP, it doesn't do anything
else.
2) You can send this parent process a SIGHUP now to cause it to
- close listening sockets in existing lwsws processes
- mark those processes as to exit when the number of active connections
on the falls to zero
- spawn a fresh child process from scratch, using latest configuration
file content, latest plugins, etc. It can now reopen listening sockets
if it chooses to, or open different listen ports or whatever.
Notes:
1) lws_context_destroy() has been split into two pieces... the reason for
the split is the first part closes the per-vhost protocols, but since
they may have created libuv objects in the per-vhost protocol storage,
these cannot be freed until after the loop has been run.
That's the purpose of the second part of the context destruction,
lws_context_destroy2().
For compatibility, if you are not using libuv, the first part calls the
second part. However if you are using libuv, you must now call the
second part from your own main.c after the first part.
2016-12-16 07:37:43 +08:00
|
|
|
lwsl_notice("%s: closing all loop handles context %p\n", __func__, context);
|
2016-05-04 08:27:56 +08:00
|
|
|
|
2016-05-08 17:07:46 +08:00
|
|
|
uv_stop(pt->io_loop_uv);
|
|
|
|
|
2016-05-04 08:27:56 +08:00
|
|
|
uv_walk(pt->io_loop_uv, lws_uv_walk_cb, NULL);
|
|
|
|
|
|
|
|
while (uv_run(pt->io_loop_uv, UV_RUN_NOWAIT))
|
|
|
|
;
|
2016-04-06 16:15:40 +08:00
|
|
|
#if UV_VERSION_MAJOR > 0
|
2016-05-04 08:27:56 +08:00
|
|
|
m = uv_loop_close(pt->io_loop_uv);
|
|
|
|
if (m == UV_EBUSY)
|
|
|
|
lwsl_err("%s: uv_loop_close: UV_EBUSY\n", __func__);
|
2016-04-06 16:15:40 +08:00
|
|
|
#endif
|
2016-05-04 08:27:56 +08:00
|
|
|
lws_free(pt->io_loop_uv);
|
2016-02-14 09:27:41 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2017-02-27 12:55:56 +08:00
|
|
|
lws_libuv_accept(struct lws *wsi, lws_sock_file_fd_type desc)
|
2016-02-14 09:27:41 +08:00
|
|
|
{
|
|
|
|
struct lws_context *context = lws_get_context(wsi);
|
|
|
|
struct lws_context_per_thread *pt = &context->pt[(int)wsi->tsi];
|
|
|
|
|
|
|
|
if (!LWS_LIBUV_ENABLED(context))
|
|
|
|
return;
|
|
|
|
|
|
|
|
lwsl_debug("%s: new wsi %p\n", __func__, wsi);
|
|
|
|
|
|
|
|
wsi->w_read.context = context;
|
2017-02-27 12:55:56 +08:00
|
|
|
if (wsi->mode == LWSCM_RAW_FILEDESC)
|
|
|
|
uv_poll_init(pt->io_loop_uv, &wsi->w_read.uv_watcher,
|
2017-04-28 11:54:27 +08:00
|
|
|
(int)desc.filefd);
|
2017-02-27 12:55:56 +08:00
|
|
|
else
|
|
|
|
uv_poll_init_socket(pt->io_loop_uv, &wsi->w_read.uv_watcher,
|
|
|
|
desc.sockfd);
|
2016-02-14 09:27:41 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
lws_libuv_io(struct lws *wsi, int flags)
|
|
|
|
{
|
|
|
|
struct lws_context *context = lws_get_context(wsi);
|
|
|
|
struct lws_context_per_thread *pt = &wsi->context->pt[(int)wsi->tsi];
|
2016-03-31 20:11:53 +08:00
|
|
|
#if defined(WIN32) || defined(_WIN32)
|
2016-04-01 15:56:48 +08:00
|
|
|
int current_events = wsi->w_read.uv_watcher.events &
|
2016-03-31 20:11:53 +08:00
|
|
|
(UV_READABLE | UV_WRITABLE);
|
|
|
|
#else
|
2016-02-14 09:27:41 +08:00
|
|
|
int current_events = wsi->w_read.uv_watcher.io_watcher.pevents &
|
|
|
|
(UV_READABLE | UV_WRITABLE);
|
2016-03-31 20:11:53 +08:00
|
|
|
#endif
|
2016-02-14 09:27:41 +08:00
|
|
|
struct lws_io_watcher *w = &wsi->w_read;
|
|
|
|
|
|
|
|
if (!LWS_LIBUV_ENABLED(context))
|
|
|
|
return;
|
|
|
|
|
2016-05-08 17:07:46 +08:00
|
|
|
// lwsl_notice("%s: wsi: %p, flags:0x%x\n", __func__, wsi, flags);
|
2016-02-14 09:27:41 +08:00
|
|
|
|
2016-12-16 08:41:16 +08:00
|
|
|
// w->context is set after the loop is initialized
|
|
|
|
|
|
|
|
if (!pt->io_loop_uv || !w->context) {
|
2016-02-14 09:27:41 +08:00
|
|
|
lwsl_info("%s: no io loop yet\n", __func__);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2016-05-13 10:27:48 +08:00
|
|
|
if (!((flags & (LWS_EV_START | LWS_EV_STOP)) &&
|
|
|
|
(flags & (LWS_EV_READ | LWS_EV_WRITE)))) {
|
|
|
|
lwsl_err("%s: assert: flags %d", __func__, flags);
|
|
|
|
assert(0);
|
|
|
|
}
|
2016-02-14 09:27:41 +08:00
|
|
|
|
|
|
|
if (flags & LWS_EV_START) {
|
|
|
|
if (flags & LWS_EV_WRITE)
|
|
|
|
current_events |= UV_WRITABLE;
|
|
|
|
|
|
|
|
if (flags & LWS_EV_READ)
|
|
|
|
current_events |= UV_READABLE;
|
|
|
|
|
2016-03-02 11:39:37 +01:00
|
|
|
uv_poll_start(&w->uv_watcher, current_events, lws_io_cb);
|
2016-02-14 09:27:41 +08:00
|
|
|
} else {
|
|
|
|
if (flags & LWS_EV_WRITE)
|
|
|
|
current_events &= ~UV_WRITABLE;
|
|
|
|
|
|
|
|
if (flags & LWS_EV_READ)
|
|
|
|
current_events &= ~UV_READABLE;
|
|
|
|
|
|
|
|
if (!(current_events & (UV_READABLE | UV_WRITABLE)))
|
|
|
|
uv_poll_stop(&w->uv_watcher);
|
|
|
|
else
|
|
|
|
uv_poll_start(&w->uv_watcher, current_events,
|
2016-03-02 11:39:37 +01:00
|
|
|
lws_io_cb);
|
2016-02-14 09:27:41 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
lws_libuv_init_fd_table(struct lws_context *context)
|
|
|
|
{
|
|
|
|
int n;
|
|
|
|
|
|
|
|
if (!LWS_LIBUV_ENABLED(context))
|
|
|
|
return 0;
|
|
|
|
|
2016-04-06 16:15:40 +08:00
|
|
|
for (n = 0; n < context->count_threads; n++)
|
2016-02-14 09:27:41 +08:00
|
|
|
context->pt[n].w_sigint.context = context;
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
LWS_VISIBLE void
|
|
|
|
lws_libuv_run(const struct lws_context *context, int tsi)
|
|
|
|
{
|
|
|
|
if (context->pt[tsi].io_loop_uv && LWS_LIBUV_ENABLED(context))
|
|
|
|
uv_run(context->pt[tsi].io_loop_uv, 0);
|
|
|
|
}
|
|
|
|
|
context deprecation
1) This makes lwsws run a parent process with the original permissions.
But this process is only able to respond to SIGHUP, it doesn't do anything
else.
2) You can send this parent process a SIGHUP now to cause it to
- close listening sockets in existing lwsws processes
- mark those processes as to exit when the number of active connections
on the falls to zero
- spawn a fresh child process from scratch, using latest configuration
file content, latest plugins, etc. It can now reopen listening sockets
if it chooses to, or open different listen ports or whatever.
Notes:
1) lws_context_destroy() has been split into two pieces... the reason for
the split is the first part closes the per-vhost protocols, but since
they may have created libuv objects in the per-vhost protocol storage,
these cannot be freed until after the loop has been run.
That's the purpose of the second part of the context destruction,
lws_context_destroy2().
For compatibility, if you are not using libuv, the first part calls the
second part. However if you are using libuv, you must now call the
second part from your own main.c after the first part.
2016-12-16 07:37:43 +08:00
|
|
|
LWS_VISIBLE void
|
|
|
|
lws_libuv_stop_without_kill(const struct lws_context *context, int tsi)
|
|
|
|
{
|
|
|
|
if (context->pt[tsi].io_loop_uv && LWS_LIBUV_ENABLED(context))
|
|
|
|
uv_stop(context->pt[tsi].io_loop_uv);
|
|
|
|
}
|
|
|
|
|
2016-02-14 09:27:41 +08:00
|
|
|
static void
|
|
|
|
lws_libuv_kill(const struct lws_context *context)
|
|
|
|
{
|
|
|
|
int n;
|
|
|
|
|
context deprecation
1) This makes lwsws run a parent process with the original permissions.
But this process is only able to respond to SIGHUP, it doesn't do anything
else.
2) You can send this parent process a SIGHUP now to cause it to
- close listening sockets in existing lwsws processes
- mark those processes as to exit when the number of active connections
on the falls to zero
- spawn a fresh child process from scratch, using latest configuration
file content, latest plugins, etc. It can now reopen listening sockets
if it chooses to, or open different listen ports or whatever.
Notes:
1) lws_context_destroy() has been split into two pieces... the reason for
the split is the first part closes the per-vhost protocols, but since
they may have created libuv objects in the per-vhost protocol storage,
these cannot be freed until after the loop has been run.
That's the purpose of the second part of the context destruction,
lws_context_destroy2().
For compatibility, if you are not using libuv, the first part calls the
second part. However if you are using libuv, you must now call the
second part from your own main.c after the first part.
2016-12-16 07:37:43 +08:00
|
|
|
lwsl_notice("%s\n", __func__);
|
|
|
|
|
2016-02-14 09:27:41 +08:00
|
|
|
for (n = 0; n < context->count_threads; n++)
|
2016-05-04 08:27:56 +08:00
|
|
|
if (context->pt[n].io_loop_uv &&
|
context deprecation
1) This makes lwsws run a parent process with the original permissions.
But this process is only able to respond to SIGHUP, it doesn't do anything
else.
2) You can send this parent process a SIGHUP now to cause it to
- close listening sockets in existing lwsws processes
- mark those processes as to exit when the number of active connections
on the falls to zero
- spawn a fresh child process from scratch, using latest configuration
file content, latest plugins, etc. It can now reopen listening sockets
if it chooses to, or open different listen ports or whatever.
Notes:
1) lws_context_destroy() has been split into two pieces... the reason for
the split is the first part closes the per-vhost protocols, but since
they may have created libuv objects in the per-vhost protocol storage,
these cannot be freed until after the loop has been run.
That's the purpose of the second part of the context destruction,
lws_context_destroy2().
For compatibility, if you are not using libuv, the first part calls the
second part. However if you are using libuv, you must now call the
second part from your own main.c after the first part.
2016-12-16 07:37:43 +08:00
|
|
|
LWS_LIBUV_ENABLED(context) )//&&
|
|
|
|
//!context->pt[n].ev_loop_foreign)
|
2016-02-14 09:27:41 +08:00
|
|
|
uv_stop(context->pt[n].io_loop_uv);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This does not actually stop the event loop. The reason is we have to pass
|
|
|
|
* libuv handle closures through its event loop. So this tries to close all
|
|
|
|
* wsi, and set a flag; when all the wsi closures are finalized then we
|
|
|
|
* actually stop the libuv event loops.
|
|
|
|
*/
|
|
|
|
|
|
|
|
LWS_VISIBLE void
|
|
|
|
lws_libuv_stop(struct lws_context *context)
|
|
|
|
{
|
|
|
|
struct lws_context_per_thread *pt;
|
|
|
|
int n, m;
|
|
|
|
|
2016-05-08 17:07:46 +08:00
|
|
|
if (context->requested_kill)
|
|
|
|
return;
|
|
|
|
|
2016-02-14 09:27:41 +08:00
|
|
|
context->requested_kill = 1;
|
|
|
|
|
|
|
|
m = context->count_threads;
|
|
|
|
context->being_destroyed = 1;
|
|
|
|
|
|
|
|
while (m--) {
|
|
|
|
pt = &context->pt[m];
|
|
|
|
|
|
|
|
for (n = 0; (unsigned int)n < context->pt[m].fds_count; n++) {
|
|
|
|
struct lws *wsi = wsi_from_fd(context, pt->fds[n].fd);
|
2016-04-16 08:40:35 +08:00
|
|
|
|
2016-02-14 09:27:41 +08:00
|
|
|
if (!wsi)
|
|
|
|
continue;
|
|
|
|
lws_close_free_wsi(wsi,
|
|
|
|
LWS_CLOSE_STATUS_NOSTATUS_CONTEXT_DESTROY
|
|
|
|
/* no protocol close */);
|
|
|
|
n--;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-04-16 08:40:35 +08:00
|
|
|
lwsl_info("%s: feels everything closed\n", __func__);
|
2016-02-14 09:27:41 +08:00
|
|
|
if (context->count_wsi_allocated == 0)
|
|
|
|
lws_libuv_kill(context);
|
|
|
|
}
|
|
|
|
|
|
|
|
LWS_VISIBLE uv_loop_t *
|
|
|
|
lws_uv_getloop(struct lws_context *context, int tsi)
|
|
|
|
{
|
|
|
|
if (context->pt[tsi].io_loop_uv && LWS_LIBUV_ENABLED(context))
|
|
|
|
return context->pt[tsi].io_loop_uv;
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
lws_libuv_closewsi(uv_handle_t* handle)
|
|
|
|
{
|
2016-03-31 20:11:53 +08:00
|
|
|
struct lws *n = NULL, *wsi = (struct lws *)(((char *)handle) -
|
|
|
|
(char *)(&n->w_read.uv_watcher));
|
2016-02-14 09:27:41 +08:00
|
|
|
struct lws_context *context = lws_get_context(wsi);
|
context deprecation
1) This makes lwsws run a parent process with the original permissions.
But this process is only able to respond to SIGHUP, it doesn't do anything
else.
2) You can send this parent process a SIGHUP now to cause it to
- close listening sockets in existing lwsws processes
- mark those processes as to exit when the number of active connections
on the falls to zero
- spawn a fresh child process from scratch, using latest configuration
file content, latest plugins, etc. It can now reopen listening sockets
if it chooses to, or open different listen ports or whatever.
Notes:
1) lws_context_destroy() has been split into two pieces... the reason for
the split is the first part closes the per-vhost protocols, but since
they may have created libuv objects in the per-vhost protocol storage,
these cannot be freed until after the loop has been run.
That's the purpose of the second part of the context destruction,
lws_context_destroy2().
For compatibility, if you are not using libuv, the first part calls the
second part. However if you are using libuv, you must now call the
second part from your own main.c after the first part.
2016-12-16 07:37:43 +08:00
|
|
|
int lspd = 0;
|
|
|
|
|
|
|
|
if (wsi->mode == LWSCM_SERVER_LISTENER &&
|
|
|
|
wsi->context->deprecated) {
|
|
|
|
lspd = 1;
|
|
|
|
context->deprecation_pending_listen_close_count--;
|
|
|
|
if (!context->deprecation_pending_listen_close_count)
|
|
|
|
lspd = 2;
|
|
|
|
}
|
2016-02-14 09:27:41 +08:00
|
|
|
|
|
|
|
lws_close_free_wsi_final(wsi);
|
|
|
|
|
context deprecation
1) This makes lwsws run a parent process with the original permissions.
But this process is only able to respond to SIGHUP, it doesn't do anything
else.
2) You can send this parent process a SIGHUP now to cause it to
- close listening sockets in existing lwsws processes
- mark those processes as to exit when the number of active connections
on the falls to zero
- spawn a fresh child process from scratch, using latest configuration
file content, latest plugins, etc. It can now reopen listening sockets
if it chooses to, or open different listen ports or whatever.
Notes:
1) lws_context_destroy() has been split into two pieces... the reason for
the split is the first part closes the per-vhost protocols, but since
they may have created libuv objects in the per-vhost protocol storage,
these cannot be freed until after the loop has been run.
That's the purpose of the second part of the context destruction,
lws_context_destroy2().
For compatibility, if you are not using libuv, the first part calls the
second part. However if you are using libuv, you must now call the
second part from your own main.c after the first part.
2016-12-16 07:37:43 +08:00
|
|
|
if (lspd == 2 && context->deprecation_cb) {
|
|
|
|
lwsl_notice("calling deprecation callback\n");
|
|
|
|
context->deprecation_cb();
|
|
|
|
}
|
|
|
|
|
|
|
|
//lwsl_notice("%s: ctx %p: wsi left %d\n", __func__, context, context->count_wsi_allocated);
|
|
|
|
|
2016-02-14 09:27:41 +08:00
|
|
|
if (context->requested_kill && context->count_wsi_allocated == 0)
|
|
|
|
lws_libuv_kill(context);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
lws_libuv_closehandle(struct lws *wsi)
|
|
|
|
{
|
|
|
|
struct lws_context *context = lws_get_context(wsi);
|
|
|
|
|
|
|
|
/* required to defer actual deletion until libuv has processed it */
|
|
|
|
uv_close((uv_handle_t*)&wsi->w_read.uv_watcher, lws_libuv_closewsi);
|
|
|
|
|
|
|
|
if (context->requested_kill && context->count_wsi_allocated == 0)
|
|
|
|
lws_libuv_kill(context);
|
|
|
|
}
|
2016-04-09 07:22:40 +08:00
|
|
|
|
2017-06-23 10:27:52 +08:00
|
|
|
static void
|
|
|
|
lws_libuv_closewsi_m(uv_handle_t* handle)
|
|
|
|
{
|
2017-07-07 08:32:04 +08:00
|
|
|
lws_sockfd_type sockfd = (lws_sockfd_type)(lws_intptr_t)handle->data;
|
2017-06-23 10:27:52 +08:00
|
|
|
|
|
|
|
compatible_close(sockfd);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
lws_libuv_closehandle_manually(struct lws *wsi)
|
|
|
|
{
|
|
|
|
uv_handle_t *h = (void *)&wsi->w_read.uv_watcher;
|
|
|
|
|
2017-07-07 08:32:04 +08:00
|
|
|
h->data = (void *)(lws_intptr_t)wsi->desc.sockfd;
|
2017-06-23 10:27:52 +08:00
|
|
|
/* required to defer actual deletion until libuv has processed it */
|
|
|
|
uv_close((uv_handle_t*)&wsi->w_read.uv_watcher, lws_libuv_closewsi_m);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
lws_libuv_check_watcher_active(struct lws *wsi)
|
|
|
|
{
|
|
|
|
uv_handle_t *h = (void *)&wsi->w_read.uv_watcher;
|
|
|
|
|
|
|
|
return uv_is_active(h);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2016-04-09 07:22:40 +08:00
|
|
|
#if defined(LWS_WITH_PLUGINS) && (UV_VERSION_MAJOR > 0)
|
|
|
|
|
|
|
|
LWS_VISIBLE int
|
context deprecation
1) This makes lwsws run a parent process with the original permissions.
But this process is only able to respond to SIGHUP, it doesn't do anything
else.
2) You can send this parent process a SIGHUP now to cause it to
- close listening sockets in existing lwsws processes
- mark those processes as to exit when the number of active connections
on the falls to zero
- spawn a fresh child process from scratch, using latest configuration
file content, latest plugins, etc. It can now reopen listening sockets
if it chooses to, or open different listen ports or whatever.
Notes:
1) lws_context_destroy() has been split into two pieces... the reason for
the split is the first part closes the per-vhost protocols, but since
they may have created libuv objects in the per-vhost protocol storage,
these cannot be freed until after the loop has been run.
That's the purpose of the second part of the context destruction,
lws_context_destroy2().
For compatibility, if you are not using libuv, the first part calls the
second part. However if you are using libuv, you must now call the
second part from your own main.c after the first part.
2016-12-16 07:37:43 +08:00
|
|
|
lws_plat_plugins_init(struct lws_context *context, const char * const *d)
|
2016-04-09 07:22:40 +08:00
|
|
|
{
|
|
|
|
struct lws_plugin_capability lcaps;
|
|
|
|
struct lws_plugin *plugin;
|
|
|
|
lws_plugin_init_func initfunc;
|
|
|
|
int m, ret = 0;
|
|
|
|
void *v;
|
|
|
|
uv_dirent_t dent;
|
|
|
|
uv_fs_t req;
|
|
|
|
char path[256];
|
|
|
|
uv_lib_t lib;
|
2017-04-28 11:54:27 +08:00
|
|
|
int pofs = 0;
|
|
|
|
|
|
|
|
#if defined(__MINGW32__) || !defined(WIN32)
|
|
|
|
pofs = 3;
|
|
|
|
#endif
|
2016-04-09 07:22:40 +08:00
|
|
|
|
|
|
|
lib.errmsg = NULL;
|
|
|
|
lib.handle = NULL;
|
|
|
|
|
2017-07-15 17:57:14 +08:00
|
|
|
uv_loop_init(&context->pu_loop);
|
2016-04-09 07:22:40 +08:00
|
|
|
|
|
|
|
lwsl_notice(" Plugins:\n");
|
|
|
|
|
2016-05-02 10:03:25 +08:00
|
|
|
while (d && *d) {
|
2016-04-09 07:22:40 +08:00
|
|
|
|
2016-05-02 10:03:25 +08:00
|
|
|
lwsl_notice(" Scanning %s\n", *d);
|
2017-07-15 17:57:14 +08:00
|
|
|
m =uv_fs_scandir(&context->pu_loop, &req, *d, 0, NULL);
|
2016-05-02 10:03:25 +08:00
|
|
|
if (m < 1) {
|
|
|
|
lwsl_err("Scandir on %s failed\n", *d);
|
|
|
|
return 1;
|
2016-04-09 07:22:40 +08:00
|
|
|
}
|
|
|
|
|
2016-05-02 10:03:25 +08:00
|
|
|
while (uv_fs_scandir_next(&req, &dent) != UV_EOF) {
|
|
|
|
if (strlen(dent.name) < 7)
|
|
|
|
continue;
|
2016-04-09 07:22:40 +08:00
|
|
|
|
2016-05-02 10:03:25 +08:00
|
|
|
lwsl_notice(" %s\n", dent.name);
|
|
|
|
|
2016-09-15 02:22:57 +08:00
|
|
|
lws_snprintf(path, sizeof(path) - 1, "%s/%s", *d, dent.name);
|
2016-05-02 10:03:25 +08:00
|
|
|
if (uv_dlopen(path, &lib)) {
|
|
|
|
uv_dlerror(&lib);
|
|
|
|
lwsl_err("Error loading DSO: %s\n", lib.errmsg);
|
2017-07-15 14:37:04 +08:00
|
|
|
uv_dlclose(&lib);
|
2016-05-02 10:03:25 +08:00
|
|
|
goto bail;
|
|
|
|
}
|
2017-04-28 11:54:27 +08:00
|
|
|
|
2016-05-02 10:03:25 +08:00
|
|
|
/* we could open it, can we get his init function? */
|
2017-04-28 11:54:27 +08:00
|
|
|
|
|
|
|
#if !defined(WIN32) && !defined(__MINGW32__)
|
2016-09-15 02:22:57 +08:00
|
|
|
m = lws_snprintf(path, sizeof(path) - 1, "init_%s",
|
2017-04-28 11:54:27 +08:00
|
|
|
dent.name + pofs /* snip lib... */);
|
2016-05-02 10:03:25 +08:00
|
|
|
path[m - 3] = '\0'; /* snip the .so */
|
2016-05-25 21:43:58 +08:00
|
|
|
#else
|
2016-09-15 02:22:57 +08:00
|
|
|
m = lws_snprintf(path, sizeof(path) - 1, "init_%s",
|
2017-04-28 11:54:27 +08:00
|
|
|
dent.name + pofs);
|
2016-05-25 21:43:58 +08:00
|
|
|
path[m - 4] = '\0'; /* snip the .dll */
|
|
|
|
#endif
|
2016-05-02 10:03:25 +08:00
|
|
|
if (uv_dlsym(&lib, path, &v)) {
|
|
|
|
uv_dlerror(&lib);
|
2017-04-28 11:54:27 +08:00
|
|
|
lwsl_err("Failed to get %s on %s: %s", path,
|
2016-05-02 10:03:25 +08:00
|
|
|
dent.name, lib.errmsg);
|
2017-07-15 17:57:14 +08:00
|
|
|
uv_dlclose(&lib);
|
2016-05-02 10:03:25 +08:00
|
|
|
goto bail;
|
|
|
|
}
|
|
|
|
initfunc = (lws_plugin_init_func)v;
|
|
|
|
lcaps.api_magic = LWS_PLUGIN_API_MAGIC;
|
|
|
|
m = initfunc(context, &lcaps);
|
|
|
|
if (m) {
|
|
|
|
lwsl_err("Initializing %s failed %d\n", dent.name, m);
|
|
|
|
goto skip;
|
|
|
|
}
|
|
|
|
|
|
|
|
plugin = lws_malloc(sizeof(*plugin));
|
|
|
|
if (!plugin) {
|
2017-07-15 17:57:14 +08:00
|
|
|
uv_dlclose(&lib);
|
2016-05-02 10:03:25 +08:00
|
|
|
lwsl_err("OOM\n");
|
|
|
|
goto bail;
|
|
|
|
}
|
|
|
|
plugin->list = context->plugin_list;
|
|
|
|
context->plugin_list = plugin;
|
|
|
|
strncpy(plugin->name, dent.name, sizeof(plugin->name) - 1);
|
|
|
|
plugin->name[sizeof(plugin->name) - 1] = '\0';
|
|
|
|
plugin->lib = lib;
|
|
|
|
plugin->caps = lcaps;
|
|
|
|
context->plugin_protocol_count += lcaps.count_protocols;
|
|
|
|
context->plugin_extension_count += lcaps.count_extensions;
|
|
|
|
|
|
|
|
continue;
|
2016-04-09 07:22:40 +08:00
|
|
|
|
|
|
|
skip:
|
2016-05-02 10:03:25 +08:00
|
|
|
uv_dlclose(&lib);
|
|
|
|
}
|
|
|
|
bail:
|
|
|
|
uv_fs_req_cleanup(&req);
|
|
|
|
d++;
|
2016-04-09 07:22:40 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
LWS_VISIBLE int
|
context deprecation
1) This makes lwsws run a parent process with the original permissions.
But this process is only able to respond to SIGHUP, it doesn't do anything
else.
2) You can send this parent process a SIGHUP now to cause it to
- close listening sockets in existing lwsws processes
- mark those processes as to exit when the number of active connections
on the falls to zero
- spawn a fresh child process from scratch, using latest configuration
file content, latest plugins, etc. It can now reopen listening sockets
if it chooses to, or open different listen ports or whatever.
Notes:
1) lws_context_destroy() has been split into two pieces... the reason for
the split is the first part closes the per-vhost protocols, but since
they may have created libuv objects in the per-vhost protocol storage,
these cannot be freed until after the loop has been run.
That's the purpose of the second part of the context destruction,
lws_context_destroy2().
For compatibility, if you are not using libuv, the first part calls the
second part. However if you are using libuv, you must now call the
second part from your own main.c after the first part.
2016-12-16 07:37:43 +08:00
|
|
|
lws_plat_plugins_destroy(struct lws_context *context)
|
2016-04-09 07:22:40 +08:00
|
|
|
{
|
|
|
|
struct lws_plugin *plugin = context->plugin_list, *p;
|
|
|
|
lws_plugin_destroy_func func;
|
|
|
|
char path[256];
|
|
|
|
void *v;
|
|
|
|
int m;
|
2017-04-28 11:54:27 +08:00
|
|
|
int pofs = 0;
|
|
|
|
|
|
|
|
#if defined(__MINGW32__) || !defined(WIN32)
|
|
|
|
pofs = 3;
|
|
|
|
#endif
|
2016-04-09 07:22:40 +08:00
|
|
|
|
|
|
|
if (!plugin)
|
|
|
|
return 0;
|
|
|
|
|
2016-05-08 17:07:46 +08:00
|
|
|
// lwsl_notice("%s\n", __func__);
|
2016-04-09 07:22:40 +08:00
|
|
|
|
|
|
|
while (plugin) {
|
|
|
|
p = plugin;
|
2017-04-28 11:54:27 +08:00
|
|
|
|
|
|
|
#if !defined(WIN32) && !defined(__MINGW32__)
|
|
|
|
m = lws_snprintf(path, sizeof(path) - 1, "destroy_%s", plugin->name + pofs);
|
2016-04-09 07:22:40 +08:00
|
|
|
path[m - 3] = '\0';
|
2016-05-25 21:43:58 +08:00
|
|
|
#else
|
2017-04-28 11:54:27 +08:00
|
|
|
m = lws_snprintf(path, sizeof(path) - 1, "destroy_%s", plugin->name + pofs);
|
2016-05-25 21:43:58 +08:00
|
|
|
path[m - 4] = '\0';
|
|
|
|
#endif
|
2016-04-09 07:22:40 +08:00
|
|
|
|
|
|
|
if (uv_dlsym(&plugin->lib, path, &v)) {
|
|
|
|
uv_dlerror(&plugin->lib);
|
2017-04-28 11:54:27 +08:00
|
|
|
lwsl_err("Failed to get %s on %s: %s", path,
|
2016-04-09 07:22:40 +08:00
|
|
|
plugin->name, plugin->lib.errmsg);
|
|
|
|
} else {
|
|
|
|
func = (lws_plugin_destroy_func)v;
|
|
|
|
m = func(context);
|
|
|
|
if (m)
|
|
|
|
lwsl_err("Destroying %s failed %d\n",
|
|
|
|
plugin->name, m);
|
|
|
|
}
|
|
|
|
|
|
|
|
uv_dlclose(&p->lib);
|
|
|
|
plugin = p->list;
|
|
|
|
p->list = NULL;
|
|
|
|
free(p);
|
|
|
|
}
|
|
|
|
|
|
|
|
context->plugin_list = NULL;
|
|
|
|
|
2017-07-15 17:57:14 +08:00
|
|
|
while (uv_loop_close(&context->pu_loop))
|
|
|
|
;
|
|
|
|
|
2016-04-09 07:22:40 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|