2016-02-14 09:27:41 +08:00
|
|
|
/*
|
|
|
|
* libwebsockets - small server side websockets and web server implementation
|
|
|
|
*
|
2021-08-10 06:35:59 +01:00
|
|
|
* Copyright (C) 2010 - 2021 Andy Green <andy@warmcat.com>
|
2016-02-14 09:27:41 +08:00
|
|
|
*
|
2019-08-14 10:44:14 +01:00
|
|
|
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
|
|
* of this software and associated documentation files (the "Software"), to
|
|
|
|
* deal in the Software without restriction, including without limitation the
|
|
|
|
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
|
|
|
|
* sell copies of the Software, and to permit persons to whom the Software is
|
|
|
|
* furnished to do so, subject to the following conditions:
|
2016-02-14 09:27:41 +08:00
|
|
|
*
|
2019-08-14 10:44:14 +01:00
|
|
|
* The above copyright notice and this permission notice shall be included in
|
|
|
|
* all copies or substantial portions of the Software.
|
2016-02-14 09:27:41 +08:00
|
|
|
*
|
2019-08-14 10:44:14 +01:00
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
|
|
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
|
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
|
|
|
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
|
|
|
* IN THE SOFTWARE.
|
2016-02-14 09:27:41 +08:00
|
|
|
*/
|
|
|
|
|
2019-08-15 10:49:52 +01:00
|
|
|
#include "private-lib-core.h"
|
2020-08-27 15:37:14 +01:00
|
|
|
#include "private-lib-event-libs-libuv.h"
|
|
|
|
|
|
|
|
#define pt_to_priv_uv(_pt) ((struct lws_pt_eventlibs_libuv *)(_pt)->evlib_pt)
|
|
|
|
#define wsi_to_priv_uv(_w) ((struct lws_wsi_eventlibs_libuv *)(_w)->evlib_wsi)
|
2016-02-14 09:27:41 +08:00
|
|
|
|
2018-03-19 16:37:37 +08:00
|
|
|
static void
|
2019-08-09 10:12:09 +01:00
|
|
|
lws_uv_sultimer_cb(uv_timer_t *timer
|
2018-03-19 16:37:37 +08:00
|
|
|
#if UV_VERSION_MAJOR == 0
|
|
|
|
, int status
|
|
|
|
#endif
|
|
|
|
)
|
|
|
|
{
|
2020-08-27 15:37:14 +01:00
|
|
|
struct lws_pt_eventlibs_libuv *ptpr = lws_container_of(timer,
|
|
|
|
struct lws_pt_eventlibs_libuv, sultimer);
|
2020-09-10 08:06:56 +01:00
|
|
|
struct lws_context_per_thread *pt = ptpr->pt;
|
2018-03-19 16:37:37 +08:00
|
|
|
lws_usec_t us;
|
|
|
|
|
2020-10-01 08:39:25 +01:00
|
|
|
lws_context_lock(pt->context, __func__);
|
2018-03-19 16:37:37 +08:00
|
|
|
lws_pt_lock(pt, __func__);
|
2020-05-28 12:48:17 +01:00
|
|
|
us = __lws_sul_service_ripe(pt->pt_sul_owner, LWS_COUNT_PT_SUL_OWNERS,
|
|
|
|
lws_now_usecs());
|
2019-08-08 06:30:14 +01:00
|
|
|
if (us)
|
2020-08-27 15:37:14 +01:00
|
|
|
uv_timer_start(&pt_to_priv_uv(pt)->sultimer, lws_uv_sultimer_cb,
|
2020-12-12 06:21:40 +00:00
|
|
|
LWS_US_TO_MS((uint64_t)us), 0);
|
2018-03-19 16:37:37 +08:00
|
|
|
lws_pt_unlock(pt);
|
2020-10-01 08:39:25 +01:00
|
|
|
lws_context_unlock(pt->context);
|
2018-03-19 16:37:37 +08:00
|
|
|
}
|
|
|
|
|
2016-04-06 09:23:16 +08:00
|
|
|
static void
|
2016-04-06 16:15:40 +08:00
|
|
|
lws_uv_idle(uv_idle_t *handle
|
|
|
|
#if UV_VERSION_MAJOR == 0
|
|
|
|
, int status
|
|
|
|
#endif
|
|
|
|
)
|
2020-08-27 15:37:14 +01:00
|
|
|
{ struct lws_pt_eventlibs_libuv *ptpr = lws_container_of(handle,
|
|
|
|
struct lws_pt_eventlibs_libuv, idle);
|
2020-09-10 08:06:56 +01:00
|
|
|
struct lws_context_per_thread *pt = ptpr->pt;
|
2018-03-19 16:37:37 +08:00
|
|
|
lws_usec_t us;
|
2016-04-06 09:23:16 +08:00
|
|
|
|
2018-04-17 11:43:20 +08:00
|
|
|
lws_service_do_ripe_rxflow(pt);
|
|
|
|
|
2020-10-01 08:39:25 +01:00
|
|
|
lws_context_lock(pt->context, __func__);
|
|
|
|
lws_pt_lock(pt, __func__);
|
|
|
|
|
2016-04-06 09:23:16 +08:00
|
|
|
/*
|
|
|
|
* is there anybody with pending stuff that needs service forcing?
|
|
|
|
*/
|
2019-09-16 15:36:12 +01:00
|
|
|
if (!lws_service_adjust_timeout(pt->context, 1, pt->tid))
|
2016-04-06 09:23:16 +08:00
|
|
|
/* -1 timeout means just do forced service */
|
2019-09-16 15:36:12 +01:00
|
|
|
_lws_plat_service_forced_tsi(pt->context, pt->tid);
|
2016-04-06 09:23:16 +08:00
|
|
|
|
2019-08-09 10:12:09 +01:00
|
|
|
/* account for sultimer */
|
2018-03-19 16:37:37 +08:00
|
|
|
|
2020-05-28 12:48:17 +01:00
|
|
|
us = __lws_sul_service_ripe(pt->pt_sul_owner, LWS_COUNT_PT_SUL_OWNERS,
|
|
|
|
lws_now_usecs());
|
2019-08-08 06:30:14 +01:00
|
|
|
if (us)
|
2020-08-27 15:37:14 +01:00
|
|
|
uv_timer_start(&pt_to_priv_uv(pt)->sultimer, lws_uv_sultimer_cb,
|
2020-12-12 06:21:40 +00:00
|
|
|
LWS_US_TO_MS((uint64_t)us), 0);
|
2018-03-19 16:37:37 +08:00
|
|
|
|
2023-11-17 00:58:08 +08:00
|
|
|
/* if there is nobody who needs service forcing, shut down idle */
|
|
|
|
if (lws_service_adjust_timeout(pt->context, 1, pt->tid))
|
|
|
|
uv_idle_stop(handle);
|
2020-10-01 08:39:25 +01:00
|
|
|
|
|
|
|
lws_pt_unlock(pt);
|
|
|
|
lws_context_unlock(pt->context);
|
2016-04-06 09:23:16 +08:00
|
|
|
}
|
|
|
|
|
2016-02-14 09:27:41 +08:00
|
|
|
static void
|
2016-03-02 11:39:37 +01:00
|
|
|
lws_io_cb(uv_poll_t *watcher, int status, int revents)
|
2016-02-14 09:27:41 +08:00
|
|
|
{
|
2018-10-13 11:59:04 +08:00
|
|
|
struct lws *wsi = (struct lws *)((uv_handle_t *)watcher)->data;
|
fakewsi: replace with smaller substructure
Currently we always reserve a fakewsi per pt so events that don't have a related actual
wsi, like vhost-protocol-init or vhost cert init via protocol callback can make callbacks
that look reasonable to user protocol handler code expecting a valid wsi every time.
This patch splits out stuff that user callbacks often unconditionally expect to be in
a wsi, like context pointer, vhost pointer etc into a substructure, which is composed
into struct lws at the top of it. Internal references (struct lws is opaque, so there
are only internal references) are all updated to go via the substructre, the compiler
should make that a NOP.
Helpers are added when fakewsi is used and referenced.
If not PLAT_FREERTOS, we continue to provide a full fakewsi in the pt as before,
although the helpers improve consistency by zeroing down the substructure. There is
a huge amount of user code out there over the last 10 years that did not always have
the minimal examples to follow, some of it does some unexpected things.
If it is PLAT_FREERTOS, that is a newer thing in lws and users have the benefit of
being able to follow the minimal examples' approach. For PLAT_FREERTOS we don't
reserve the fakewsi in the pt any more, saving around 800 bytes. The helpers then
create a struct lws_a (the substructure) on the stack, zero it down (but it is only
like 4 pointers) and prepare it with whatever we know like the context.
Then we cast it to a struct lws * and use it in the user protocol handler call.
In this case, the remainder of the struct lws is undefined. However the amount of
old protocol handlers that might touch things outside of the substructure in
PLAT_FREERTOS is very limited compared to legacy lws user code and the saving is
significant on constrained devices.
User handlers should not be touching everything in a wsi every time anyway, there
are several cases where there is no valid wsi to do the call with. Dereference of
things outside the substructure should only happen when the callback reason shows
there is a valid wsi bound to the activity (as in all the minimal examples).
2020-07-19 08:33:46 +01:00
|
|
|
struct lws_context *context = wsi->a.context;
|
2018-03-19 16:37:37 +08:00
|
|
|
struct lws_context_per_thread *pt = &context->pt[(int)wsi->tsi];
|
2021-08-10 06:35:59 +01:00
|
|
|
struct lws_pt_eventlibs_libuv *ptpriv = pt_to_priv_uv(pt);
|
2016-02-14 09:27:41 +08:00
|
|
|
struct lws_pollfd eventfd;
|
|
|
|
|
2020-10-01 08:39:25 +01:00
|
|
|
lws_context_lock(pt->context, __func__);
|
|
|
|
lws_pt_lock(pt, __func__);
|
|
|
|
|
2020-01-23 11:12:28 +00:00
|
|
|
if (pt->is_destroyed)
|
2020-10-01 08:39:25 +01:00
|
|
|
goto bail;
|
2020-01-23 11:12:28 +00:00
|
|
|
|
2021-08-10 06:35:59 +01:00
|
|
|
if (!ptpriv->thread_valid) {
|
|
|
|
/* record the thread id that gave us our first event */
|
|
|
|
ptpriv->uv_thread = uv_thread_self();
|
|
|
|
ptpriv->thread_valid = 1;
|
|
|
|
}
|
|
|
|
|
2016-03-31 20:11:53 +08:00
|
|
|
#if defined(WIN32) || defined(_WIN32)
|
2016-04-01 15:56:48 +08:00
|
|
|
eventfd.fd = watcher->socket;
|
2016-03-31 20:11:53 +08:00
|
|
|
#else
|
2016-02-14 09:27:41 +08:00
|
|
|
eventfd.fd = watcher->io_watcher.fd;
|
2016-03-31 20:11:53 +08:00
|
|
|
#endif
|
2016-02-14 09:27:41 +08:00
|
|
|
eventfd.events = 0;
|
2016-02-20 16:01:12 +01:00
|
|
|
eventfd.revents = 0;
|
2016-03-02 11:39:37 +01:00
|
|
|
|
|
|
|
if (status < 0) {
|
2017-09-23 12:55:21 +08:00
|
|
|
/*
|
|
|
|
* At this point status will be an UV error, like UV_EBADF,
|
|
|
|
* we treat all errors as LWS_POLLHUP
|
|
|
|
*
|
|
|
|
* You might want to return; instead of servicing the fd in
|
|
|
|
* some cases */
|
2016-03-02 11:39:37 +01:00
|
|
|
if (status == UV_EAGAIN)
|
2020-10-01 08:39:25 +01:00
|
|
|
goto bail;
|
2016-03-02 11:39:37 +01:00
|
|
|
|
|
|
|
eventfd.events |= LWS_POLLHUP;
|
|
|
|
eventfd.revents |= LWS_POLLHUP;
|
|
|
|
} else {
|
|
|
|
if (revents & UV_READABLE) {
|
|
|
|
eventfd.events |= LWS_POLLIN;
|
|
|
|
eventfd.revents |= LWS_POLLIN;
|
|
|
|
}
|
|
|
|
if (revents & UV_WRITABLE) {
|
|
|
|
eventfd.events |= LWS_POLLOUT;
|
|
|
|
eventfd.revents |= LWS_POLLOUT;
|
|
|
|
}
|
2016-02-14 09:27:41 +08:00
|
|
|
}
|
2020-10-01 08:39:25 +01:00
|
|
|
|
|
|
|
lws_pt_unlock(pt);
|
|
|
|
lws_context_unlock(pt->context);
|
|
|
|
|
2018-05-02 18:35:58 +08:00
|
|
|
lws_service_fd_tsi(context, &eventfd, wsi->tsi);
|
2018-03-19 16:37:37 +08:00
|
|
|
|
2020-01-23 11:12:28 +00:00
|
|
|
if (pt->destroy_self) {
|
|
|
|
lws_context_destroy(pt->context);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2021-08-10 06:35:59 +01:00
|
|
|
uv_idle_start(&ptpriv->idle, lws_uv_idle);
|
2020-10-01 08:39:25 +01:00
|
|
|
return;
|
|
|
|
|
|
|
|
bail:
|
|
|
|
lws_pt_unlock(pt);
|
|
|
|
lws_context_unlock(pt->context);
|
2016-02-14 09:27:41 +08:00
|
|
|
}
|
|
|
|
|
2018-04-29 10:44:36 +08:00
|
|
|
/*
|
|
|
|
* This does not actually stop the event loop. The reason is we have to pass
|
|
|
|
* libuv handle closures through its event loop. So this tries to close all
|
|
|
|
* wsi, and set a flag; when all the wsi closures are finalized then we
|
|
|
|
* actually stop the libuv event loops.
|
|
|
|
*/
|
2018-04-30 09:16:04 +08:00
|
|
|
static void
|
2018-04-29 10:44:36 +08:00
|
|
|
lws_libuv_stop(struct lws_context *context)
|
2016-02-14 09:27:41 +08:00
|
|
|
{
|
2020-11-16 19:32:58 +00:00
|
|
|
if (context->requested_stop_internal_loops) {
|
2021-06-28 05:05:57 +01:00
|
|
|
lwsl_cx_err(context, "ignoring");
|
2018-04-29 10:44:36 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2020-11-16 19:32:58 +00:00
|
|
|
context->requested_stop_internal_loops = 1;
|
2021-01-18 21:27:20 +00:00
|
|
|
lws_context_destroy(context);
|
2016-02-14 09:27:41 +08:00
|
|
|
}
|
|
|
|
|
2018-04-29 10:44:36 +08:00
|
|
|
static void
|
|
|
|
lws_uv_signal_handler(uv_signal_t *watcher, int signum)
|
2016-02-14 09:27:41 +08:00
|
|
|
{
|
2020-11-16 19:32:58 +00:00
|
|
|
struct lws_context_per_thread *pt = (struct lws_context_per_thread *)
|
|
|
|
watcher->data;
|
2016-02-14 09:27:41 +08:00
|
|
|
|
2020-11-16 19:32:58 +00:00
|
|
|
if (pt->context->eventlib_signal_cb) {
|
|
|
|
pt->context->eventlib_signal_cb((void *)watcher, signum);
|
2018-04-29 10:44:36 +08:00
|
|
|
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2021-06-28 05:05:57 +01:00
|
|
|
lwsl_cx_err(pt->context, "internal signal handler caught signal %d",
|
|
|
|
signum);
|
2021-01-18 21:27:20 +00:00
|
|
|
lws_libuv_stop(pt->context);
|
2016-02-14 09:27:41 +08:00
|
|
|
}
|
|
|
|
|
2020-11-16 19:32:58 +00:00
|
|
|
static int
|
|
|
|
lws_uv_finalize_pt(struct lws_context_per_thread *pt)
|
|
|
|
{
|
|
|
|
pt->event_loop_pt_unused = 1;
|
|
|
|
|
2021-06-28 05:05:57 +01:00
|
|
|
lwsl_cx_info(pt->context, "thr %d", (int)(pt - pt->context->pt));
|
2020-11-16 19:32:58 +00:00
|
|
|
|
|
|
|
lws_context_lock(pt->context, __func__);
|
|
|
|
|
|
|
|
if (!--pt->context->undestroyed_threads) {
|
|
|
|
struct lws_vhost *vh = pt->context->vhost_list;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* eventually, we emptied all the pts...
|
|
|
|
*/
|
|
|
|
|
2021-06-28 05:05:57 +01:00
|
|
|
lwsl_cx_debug(pt->context, "all pts down now");
|
2020-11-16 19:32:58 +00:00
|
|
|
|
|
|
|
/* protocols may have initialized libuv objects */
|
|
|
|
|
|
|
|
while (vh) {
|
|
|
|
lws_vhost_destroy1(vh);
|
|
|
|
vh = vh->vhost_next;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!pt->count_event_loop_static_asset_handles &&
|
|
|
|
pt->event_loop_foreign) {
|
2021-06-28 05:05:57 +01:00
|
|
|
lwsl_cx_info(pt->context, "resuming context_destroy");
|
2020-11-16 19:32:58 +00:00
|
|
|
lws_context_unlock(pt->context);
|
|
|
|
lws_context_destroy(pt->context);
|
|
|
|
/*
|
|
|
|
* For foreign, we're being called from the foreign
|
|
|
|
* thread context the loop is associated with, we must
|
|
|
|
* return to it cleanly even though we are done with it.
|
|
|
|
*/
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
} else
|
2021-06-28 05:05:57 +01:00
|
|
|
lwsl_cx_debug(pt->context, "still %d undestroyed",
|
|
|
|
pt->context->undestroyed_threads);
|
2020-11-16 19:32:58 +00:00
|
|
|
|
|
|
|
lws_context_unlock(pt->context);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2021-01-19 16:55:03 +00:00
|
|
|
// static void lws_uv_walk_cb(uv_handle_t *handle, void *arg)
|
|
|
|
// {
|
|
|
|
// if (!uv_is_closing(handle))
|
|
|
|
// lwsl_err("%s: handle %p still alive on loop\n", __func__, handle);
|
|
|
|
// }
|
|
|
|
|
|
|
|
|
context deprecation
1) This makes lwsws run a parent process with the original permissions.
But this process is only able to respond to SIGHUP, it doesn't do anything
else.
2) You can send this parent process a SIGHUP now to cause it to
- close listening sockets in existing lwsws processes
- mark those processes as to exit when the number of active connections
on the falls to zero
- spawn a fresh child process from scratch, using latest configuration
file content, latest plugins, etc. It can now reopen listening sockets
if it chooses to, or open different listen ports or whatever.
Notes:
1) lws_context_destroy() has been split into two pieces... the reason for
the split is the first part closes the per-vhost protocols, but since
they may have created libuv objects in the per-vhost protocol storage,
these cannot be freed until after the loop has been run.
That's the purpose of the second part of the context destruction,
lws_context_destroy2().
For compatibility, if you are not using libuv, the first part calls the
second part. However if you are using libuv, you must now call the
second part from your own main.c after the first part.
2016-12-16 07:37:43 +08:00
|
|
|
static const int sigs[] = { SIGINT, SIGTERM, SIGSEGV, SIGFPE, SIGHUP };
|
2016-02-14 09:27:41 +08:00
|
|
|
|
2018-03-30 20:22:46 +08:00
|
|
|
/*
|
|
|
|
* Closing Phase 2: Close callback for a static UV asset
|
|
|
|
*/
|
|
|
|
|
|
|
|
static void
|
|
|
|
lws_uv_close_cb_sa(uv_handle_t *handle)
|
|
|
|
{
|
2020-11-16 19:32:58 +00:00
|
|
|
struct lws_context_per_thread *pt =
|
|
|
|
LWS_UV_REFCOUNT_STATIC_HANDLE_TO_PT(handle);
|
2021-01-19 14:19:12 +00:00
|
|
|
struct lws_pt_eventlibs_libuv *ptpriv = pt_to_priv_uv(pt);
|
2020-11-16 19:32:58 +00:00
|
|
|
struct lws_context *context = pt->context;
|
2021-02-05 13:08:41 +00:00
|
|
|
#if !defined(LWS_WITH_NO_LOGS) && defined(_DEBUG)
|
2020-11-16 19:32:58 +00:00
|
|
|
int tsi = (int)(pt - &context->pt[0]);
|
2021-02-05 13:08:41 +00:00
|
|
|
#endif
|
2018-03-30 20:22:46 +08:00
|
|
|
|
2021-06-28 05:05:57 +01:00
|
|
|
lwsl_cx_info(context, "thr %d: sa left %d: dyn left: %d (rk %d)",
|
|
|
|
tsi,
|
|
|
|
pt->count_event_loop_static_asset_handles - 1,
|
|
|
|
ptpriv->extant_handles,
|
|
|
|
context->requested_stop_internal_loops);
|
2018-03-30 20:22:46 +08:00
|
|
|
|
|
|
|
/* any static assets left? */
|
|
|
|
|
|
|
|
if (LWS_UV_REFCOUNT_STATIC_HANDLE_DESTROYED(handle) ||
|
2021-01-19 14:19:12 +00:00
|
|
|
ptpriv->extant_handles)
|
2018-03-30 20:22:46 +08:00
|
|
|
return;
|
|
|
|
|
2021-01-19 16:55:03 +00:00
|
|
|
/*
|
|
|
|
* So we believe nothing of ours left on the loop. Let's sanity
|
|
|
|
* check it to count what's still on the loop
|
|
|
|
*/
|
|
|
|
|
|
|
|
// uv_walk(pt_to_priv_uv(pt)->io_loop, lws_uv_walk_cb, NULL);
|
|
|
|
|
2018-03-30 20:22:46 +08:00
|
|
|
/*
|
|
|
|
* That's it... all wsi were down, and now every
|
|
|
|
* static asset lws had a UV handle for is down.
|
|
|
|
*
|
|
|
|
* Stop the loop so we can get out of here.
|
|
|
|
*/
|
|
|
|
|
2021-06-28 05:05:57 +01:00
|
|
|
lwsl_cx_info(context, "thr %d: seen final static handle gone", tsi);
|
2018-03-30 20:22:46 +08:00
|
|
|
|
2021-06-28 05:05:57 +01:00
|
|
|
if (!pt->event_loop_foreign)
|
2020-11-16 19:32:58 +00:00
|
|
|
lws_context_destroy(context);
|
2018-04-29 10:44:36 +08:00
|
|
|
|
2020-11-16 19:32:58 +00:00
|
|
|
lws_uv_finalize_pt(pt);
|
|
|
|
|
2021-06-28 05:05:57 +01:00
|
|
|
lwsl_cx_info(context, "all done");
|
2018-03-30 20:22:46 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* These must be called by protocols that want to use libuv objects directly...
|
|
|
|
*
|
|
|
|
* .... when the libuv object is created...
|
|
|
|
*/
|
|
|
|
|
2020-01-02 08:32:23 +00:00
|
|
|
void
|
2020-11-16 19:32:58 +00:00
|
|
|
lws_libuv_static_refcount_add(uv_handle_t *h, struct lws_context *context,
|
|
|
|
int tsi)
|
2018-03-30 20:22:46 +08:00
|
|
|
{
|
2020-11-16 19:32:58 +00:00
|
|
|
struct lws_context_per_thread *pt = &context->pt[tsi];
|
|
|
|
|
|
|
|
LWS_UV_REFCOUNT_STATIC_HANDLE_NEW(h, pt);
|
2018-03-30 20:22:46 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* ... and in the close callback when the object is closed.
|
|
|
|
*/
|
|
|
|
|
2020-01-02 08:32:23 +00:00
|
|
|
void
|
2018-03-30 20:22:46 +08:00
|
|
|
lws_libuv_static_refcount_del(uv_handle_t *h)
|
|
|
|
{
|
2018-04-16 19:52:28 +08:00
|
|
|
lws_uv_close_cb_sa(h);
|
2018-03-30 20:22:46 +08:00
|
|
|
}
|
|
|
|
|
2020-01-02 08:32:23 +00:00
|
|
|
void
|
2018-04-29 10:44:36 +08:00
|
|
|
lws_libuv_stop_without_kill(const struct lws_context *context, int tsi)
|
|
|
|
{
|
2020-08-27 15:37:14 +01:00
|
|
|
if (pt_to_priv_uv(&context->pt[tsi])->io_loop)
|
|
|
|
uv_stop(pt_to_priv_uv(&context->pt[tsi])->io_loop);
|
2018-04-29 10:44:36 +08:00
|
|
|
}
|
2016-05-04 08:27:56 +08:00
|
|
|
|
2020-01-02 08:32:23 +00:00
|
|
|
uv_loop_t *
|
2018-04-29 10:44:36 +08:00
|
|
|
lws_uv_getloop(struct lws_context *context, int tsi)
|
|
|
|
{
|
2020-08-27 15:37:14 +01:00
|
|
|
if (pt_to_priv_uv(&context->pt[tsi])->io_loop)
|
|
|
|
return pt_to_priv_uv(&context->pt[tsi])->io_loop;
|
2016-05-04 08:27:56 +08:00
|
|
|
|
2018-04-29 10:44:36 +08:00
|
|
|
return NULL;
|
2016-02-14 09:27:41 +08:00
|
|
|
}
|
|
|
|
|
2018-04-29 10:44:36 +08:00
|
|
|
int
|
|
|
|
lws_libuv_check_watcher_active(struct lws *wsi)
|
2016-02-14 09:27:41 +08:00
|
|
|
{
|
2020-08-27 15:37:14 +01:00
|
|
|
uv_handle_t *h = (uv_handle_t *)wsi_to_priv_uv(wsi)->w_read.pwatcher;
|
2018-10-13 11:59:04 +08:00
|
|
|
|
|
|
|
if (!h)
|
|
|
|
return 0;
|
2016-02-14 09:27:41 +08:00
|
|
|
|
2018-04-29 10:44:36 +08:00
|
|
|
return uv_is_active(h);
|
|
|
|
}
|
2016-12-16 08:41:16 +08:00
|
|
|
|
2018-04-29 10:44:36 +08:00
|
|
|
static int
|
|
|
|
elops_init_context_uv(struct lws_context *context,
|
|
|
|
const struct lws_context_creation_info *info)
|
|
|
|
{
|
|
|
|
int n;
|
|
|
|
|
|
|
|
context->eventlib_signal_cb = info->signal_cb;
|
|
|
|
|
|
|
|
for (n = 0; n < context->count_threads; n++)
|
2020-08-27 15:37:14 +01:00
|
|
|
pt_to_priv_uv(&context->pt[n])->w_sigint.context = context;
|
2018-04-29 10:44:36 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
elops_destroy_context1_uv(struct lws_context *context)
|
|
|
|
{
|
|
|
|
struct lws_context_per_thread *pt;
|
2019-02-22 15:47:41 -05:00
|
|
|
int n, m = 0;
|
2018-04-29 10:44:36 +08:00
|
|
|
|
|
|
|
for (n = 0; n < context->count_threads; n++) {
|
|
|
|
int budget = 10000;
|
|
|
|
pt = &context->pt[n];
|
|
|
|
|
|
|
|
/* only for internal loops... */
|
|
|
|
|
|
|
|
if (!pt->event_loop_foreign) {
|
|
|
|
|
2020-08-27 15:37:14 +01:00
|
|
|
while (budget-- && (m = uv_run(pt_to_priv_uv(pt)->io_loop,
|
2018-04-29 10:44:36 +08:00
|
|
|
UV_RUN_NOWAIT)))
|
|
|
|
;
|
|
|
|
if (m)
|
2021-06-28 05:05:57 +01:00
|
|
|
lwsl_cx_info(context, "tsi %d: unclosed", n);
|
2018-04-29 10:44:36 +08:00
|
|
|
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* call destroy2 if internal loop */
|
|
|
|
return !context->pt[0].event_loop_foreign;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
elops_destroy_context2_uv(struct lws_context *context)
|
|
|
|
{
|
|
|
|
struct lws_context_per_thread *pt;
|
|
|
|
int n, internal = 0;
|
|
|
|
|
|
|
|
for (n = 0; n < context->count_threads; n++) {
|
|
|
|
pt = &context->pt[n];
|
|
|
|
|
|
|
|
/* only for internal loops... */
|
|
|
|
|
2020-08-27 15:37:14 +01:00
|
|
|
if (!pt->event_loop_foreign && pt_to_priv_uv(pt)->io_loop) {
|
2018-04-29 10:44:36 +08:00
|
|
|
internal = 1;
|
2020-11-16 19:32:58 +00:00
|
|
|
if (!context->evlib_finalize_destroy_after_int_loops_stop)
|
2020-08-27 15:37:14 +01:00
|
|
|
uv_stop(pt_to_priv_uv(pt)->io_loop);
|
2018-04-29 10:44:36 +08:00
|
|
|
else {
|
|
|
|
#if UV_VERSION_MAJOR > 0
|
2020-08-27 15:37:14 +01:00
|
|
|
uv_loop_close(pt_to_priv_uv(pt)->io_loop);
|
2018-04-29 10:44:36 +08:00
|
|
|
#endif
|
2020-08-27 15:37:14 +01:00
|
|
|
lws_free_set_NULL(pt_to_priv_uv(pt)->io_loop);
|
2018-04-29 10:44:36 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return internal;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
elops_wsi_logical_close_uv(struct lws *wsi)
|
|
|
|
{
|
2020-04-14 19:04:13 +01:00
|
|
|
if (!lws_socket_is_valid(wsi->desc.sockfd) &&
|
2021-10-07 11:01:00 +01:00
|
|
|
wsi->role_ops && strcmp(wsi->role_ops->name, "raw-file") &&
|
|
|
|
!wsi_to_priv_uv(wsi)->w_read.pwatcher)
|
2018-04-29 10:44:36 +08:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (wsi->listener || wsi->event_pipe) {
|
2021-06-28 05:05:57 +01:00
|
|
|
lwsl_wsi_debug(wsi, "%d %d stop listener / pipe poll",
|
|
|
|
wsi->listener,
|
|
|
|
wsi->event_pipe);
|
2020-08-27 15:37:14 +01:00
|
|
|
if (wsi_to_priv_uv(wsi)->w_read.pwatcher)
|
|
|
|
uv_poll_stop(wsi_to_priv_uv(wsi)->w_read.pwatcher);
|
2018-04-29 10:44:36 +08:00
|
|
|
}
|
2021-06-28 05:05:57 +01:00
|
|
|
lwsl_wsi_debug(wsi, "lws_libuv_closehandle");
|
2018-04-29 10:44:36 +08:00
|
|
|
/*
|
|
|
|
* libuv has to do his own close handle processing asynchronously
|
|
|
|
*/
|
|
|
|
lws_libuv_closehandle(wsi);
|
|
|
|
|
|
|
|
return 1; /* do not complete the wsi close, uv close cb will do it */
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
elops_check_client_connect_ok_uv(struct lws *wsi)
|
|
|
|
{
|
|
|
|
if (lws_libuv_check_watcher_active(wsi)) {
|
2021-06-28 05:05:57 +01:00
|
|
|
lwsl_wsi_warn(wsi, "Waiting for libuv watcher to close");
|
2018-04-29 10:44:36 +08:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2018-10-13 11:59:04 +08:00
|
|
|
lws_libuv_closewsi_m(uv_handle_t* handle)
|
2018-04-29 10:44:36 +08:00
|
|
|
{
|
2018-10-13 11:59:04 +08:00
|
|
|
lws_sockfd_type sockfd = (lws_sockfd_type)(lws_intptr_t)handle->data;
|
2021-06-28 05:05:57 +01:00
|
|
|
|
2018-10-13 11:59:04 +08:00
|
|
|
lwsl_debug("%s: sockfd %d\n", __func__, sockfd);
|
|
|
|
compatible_close(sockfd);
|
|
|
|
lws_free(handle);
|
|
|
|
}
|
2018-09-02 19:23:40 +08:00
|
|
|
|
2018-10-13 11:59:04 +08:00
|
|
|
static void
|
|
|
|
elops_close_handle_manually_uv(struct lws *wsi)
|
|
|
|
{
|
2020-08-27 15:37:14 +01:00
|
|
|
uv_handle_t *h = (uv_handle_t *)wsi_to_priv_uv(wsi)->w_read.pwatcher;
|
2018-04-29 10:44:36 +08:00
|
|
|
|
2021-06-28 05:05:57 +01:00
|
|
|
lwsl_wsi_debug(wsi, "lws_libuv_closehandle");
|
2018-10-13 11:59:04 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* the "manual" variant only closes the handle itself and the
|
|
|
|
* related fd. handle->data is the fd.
|
|
|
|
*/
|
|
|
|
h->data = (void *)(lws_intptr_t)wsi->desc.sockfd;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We take responsibility to close / destroy these now.
|
|
|
|
* Remove any trace from the wsi.
|
|
|
|
*/
|
|
|
|
|
|
|
|
wsi->desc.sockfd = LWS_SOCK_INVALID;
|
2020-08-27 15:37:14 +01:00
|
|
|
wsi_to_priv_uv(wsi)->w_read.pwatcher = NULL;
|
2019-02-24 06:16:57 +08:00
|
|
|
wsi->told_event_loop_closed = 1;
|
2018-10-13 11:59:04 +08:00
|
|
|
|
|
|
|
uv_close(h, lws_libuv_closewsi_m);
|
2018-04-29 10:44:36 +08:00
|
|
|
}
|
|
|
|
|
2018-10-13 11:59:04 +08:00
|
|
|
static int
|
2018-04-29 10:44:36 +08:00
|
|
|
elops_accept_uv(struct lws *wsi)
|
|
|
|
{
|
fakewsi: replace with smaller substructure
Currently we always reserve a fakewsi per pt so events that don't have a related actual
wsi, like vhost-protocol-init or vhost cert init via protocol callback can make callbacks
that look reasonable to user protocol handler code expecting a valid wsi every time.
This patch splits out stuff that user callbacks often unconditionally expect to be in
a wsi, like context pointer, vhost pointer etc into a substructure, which is composed
into struct lws at the top of it. Internal references (struct lws is opaque, so there
are only internal references) are all updated to go via the substructre, the compiler
should make that a NOP.
Helpers are added when fakewsi is used and referenced.
If not PLAT_FREERTOS, we continue to provide a full fakewsi in the pt as before,
although the helpers improve consistency by zeroing down the substructure. There is
a huge amount of user code out there over the last 10 years that did not always have
the minimal examples to follow, some of it does some unexpected things.
If it is PLAT_FREERTOS, that is a newer thing in lws and users have the benefit of
being able to follow the minimal examples' approach. For PLAT_FREERTOS we don't
reserve the fakewsi in the pt any more, saving around 800 bytes. The helpers then
create a struct lws_a (the substructure) on the stack, zero it down (but it is only
like 4 pointers) and prepare it with whatever we know like the context.
Then we cast it to a struct lws * and use it in the user protocol handler call.
In this case, the remainder of the struct lws is undefined. However the amount of
old protocol handlers that might touch things outside of the substructure in
PLAT_FREERTOS is very limited compared to legacy lws user code and the saving is
significant on constrained devices.
User handlers should not be touching everything in a wsi every time anyway, there
are several cases where there is no valid wsi to do the call with. Dereference of
things outside the substructure should only happen when the callback reason shows
there is a valid wsi bound to the activity (as in all the minimal examples).
2020-07-19 08:33:46 +01:00
|
|
|
struct lws_context_per_thread *pt = &wsi->a.context->pt[(int)wsi->tsi];
|
2021-01-19 14:19:12 +00:00
|
|
|
struct lws_pt_eventlibs_libuv *ptpriv = pt_to_priv_uv(pt);
|
2020-08-27 15:37:14 +01:00
|
|
|
struct lws_io_watcher_libuv *w_read = &wsi_to_priv_uv(wsi)->w_read;
|
2021-10-07 11:01:00 +01:00
|
|
|
int n;
|
2018-04-29 10:44:36 +08:00
|
|
|
|
2021-08-10 06:35:59 +01:00
|
|
|
if (!ptpriv->thread_valid) {
|
|
|
|
/* record the thread id that gave us our first event */
|
|
|
|
ptpriv->uv_thread = uv_thread_self();
|
|
|
|
ptpriv->thread_valid = 1;
|
|
|
|
}
|
2021-01-19 14:19:12 +00:00
|
|
|
|
2020-08-27 15:37:14 +01:00
|
|
|
w_read->context = wsi->a.context;
|
2018-10-13 11:59:04 +08:00
|
|
|
|
2020-08-27 15:37:14 +01:00
|
|
|
w_read->pwatcher = lws_malloc(sizeof(*w_read->pwatcher), "uvh");
|
|
|
|
if (!w_read->pwatcher)
|
2018-10-13 11:59:04 +08:00
|
|
|
return -1;
|
|
|
|
|
2018-04-29 10:44:36 +08:00
|
|
|
if (wsi->role_ops->file_handle)
|
2021-10-07 11:01:00 +01:00
|
|
|
n = uv_poll_init(pt_to_priv_uv(pt)->io_loop, w_read->pwatcher,
|
2020-06-03 06:48:06 +01:00
|
|
|
(int)(lws_intptr_t)wsi->desc.filefd);
|
2018-04-29 10:44:36 +08:00
|
|
|
else
|
2021-10-07 11:01:00 +01:00
|
|
|
n = uv_poll_init_socket(pt_to_priv_uv(pt)->io_loop,
|
2020-08-27 15:37:14 +01:00
|
|
|
w_read->pwatcher, wsi->desc.sockfd);
|
2018-10-13 11:59:04 +08:00
|
|
|
|
2021-10-07 11:01:00 +01:00
|
|
|
if (n) {
|
|
|
|
lwsl_wsi_err(wsi, "uv_poll_init failed %d, sockfd=%p", n,
|
|
|
|
(void *)(lws_intptr_t)wsi->desc.sockfd);
|
|
|
|
lws_free(w_read->pwatcher);
|
|
|
|
w_read->pwatcher = NULL;
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2020-08-27 15:37:14 +01:00
|
|
|
((uv_handle_t *)w_read->pwatcher)->data = (void *)wsi;
|
2018-10-13 11:59:04 +08:00
|
|
|
|
2021-01-19 14:19:12 +00:00
|
|
|
ptpriv->extant_handles++;
|
|
|
|
|
2021-06-28 05:05:57 +01:00
|
|
|
lwsl_wsi_debug(wsi, "thr %d: sa left %d: dyn left: %d",
|
|
|
|
(int)(pt - &pt->context->pt[0]),
|
|
|
|
pt->count_event_loop_static_asset_handles,
|
|
|
|
ptpriv->extant_handles);
|
2021-01-19 16:55:03 +00:00
|
|
|
|
2018-10-13 11:59:04 +08:00
|
|
|
return 0;
|
2018-04-29 10:44:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2020-12-12 06:21:40 +00:00
|
|
|
elops_io_uv(struct lws *wsi, unsigned int flags)
|
2018-04-29 10:44:36 +08:00
|
|
|
{
|
fakewsi: replace with smaller substructure
Currently we always reserve a fakewsi per pt so events that don't have a related actual
wsi, like vhost-protocol-init or vhost cert init via protocol callback can make callbacks
that look reasonable to user protocol handler code expecting a valid wsi every time.
This patch splits out stuff that user callbacks often unconditionally expect to be in
a wsi, like context pointer, vhost pointer etc into a substructure, which is composed
into struct lws at the top of it. Internal references (struct lws is opaque, so there
are only internal references) are all updated to go via the substructre, the compiler
should make that a NOP.
Helpers are added when fakewsi is used and referenced.
If not PLAT_FREERTOS, we continue to provide a full fakewsi in the pt as before,
although the helpers improve consistency by zeroing down the substructure. There is
a huge amount of user code out there over the last 10 years that did not always have
the minimal examples to follow, some of it does some unexpected things.
If it is PLAT_FREERTOS, that is a newer thing in lws and users have the benefit of
being able to follow the minimal examples' approach. For PLAT_FREERTOS we don't
reserve the fakewsi in the pt any more, saving around 800 bytes. The helpers then
create a struct lws_a (the substructure) on the stack, zero it down (but it is only
like 4 pointers) and prepare it with whatever we know like the context.
Then we cast it to a struct lws * and use it in the user protocol handler call.
In this case, the remainder of the struct lws is undefined. However the amount of
old protocol handlers that might touch things outside of the substructure in
PLAT_FREERTOS is very limited compared to legacy lws user code and the saving is
significant on constrained devices.
User handlers should not be touching everything in a wsi every time anyway, there
are several cases where there is no valid wsi to do the call with. Dereference of
things outside the substructure should only happen when the callback reason shows
there is a valid wsi bound to the activity (as in all the minimal examples).
2020-07-19 08:33:46 +01:00
|
|
|
struct lws_context_per_thread *pt = &wsi->a.context->pt[(int)wsi->tsi];
|
2020-08-27 15:37:14 +01:00
|
|
|
struct lws_io_watcher_libuv *w = &(wsi_to_priv_uv(wsi)->w_read);
|
2018-04-29 10:44:36 +08:00
|
|
|
int current_events = w->actual_events & (UV_READABLE | UV_WRITABLE);
|
|
|
|
|
2021-06-28 05:05:57 +01:00
|
|
|
lwsl_wsi_debug(wsi, "%d", flags);
|
2018-04-29 10:44:36 +08:00
|
|
|
|
|
|
|
/* w->context is set after the loop is initialized */
|
|
|
|
|
2020-08-27 15:37:14 +01:00
|
|
|
if (!pt_to_priv_uv(pt)->io_loop || !w->context) {
|
2021-06-28 05:05:57 +01:00
|
|
|
lwsl_wsi_info(wsi, "no io loop yet");
|
2018-04-29 10:44:36 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!((flags & (LWS_EV_START | LWS_EV_STOP)) &&
|
|
|
|
(flags & (LWS_EV_READ | LWS_EV_WRITE)))) {
|
2021-06-28 05:05:57 +01:00
|
|
|
lwsl_wsi_err(wsi, "assert: flags %d", flags);
|
2018-04-29 10:44:36 +08:00
|
|
|
assert(0);
|
|
|
|
}
|
|
|
|
|
2020-08-27 15:37:14 +01:00
|
|
|
if (!w->pwatcher || wsi->told_event_loop_closed) {
|
2021-06-28 05:05:57 +01:00
|
|
|
lwsl_wsi_info(wsi, "no watcher");
|
2019-02-24 06:16:57 +08:00
|
|
|
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2018-04-29 10:44:36 +08:00
|
|
|
if (flags & LWS_EV_START) {
|
|
|
|
if (flags & LWS_EV_WRITE)
|
|
|
|
current_events |= UV_WRITABLE;
|
|
|
|
|
|
|
|
if (flags & LWS_EV_READ)
|
|
|
|
current_events |= UV_READABLE;
|
|
|
|
|
2020-08-27 15:37:14 +01:00
|
|
|
uv_poll_start(w->pwatcher, current_events, lws_io_cb);
|
2018-04-29 10:44:36 +08:00
|
|
|
} else {
|
|
|
|
if (flags & LWS_EV_WRITE)
|
|
|
|
current_events &= ~UV_WRITABLE;
|
|
|
|
|
|
|
|
if (flags & LWS_EV_READ)
|
|
|
|
current_events &= ~UV_READABLE;
|
|
|
|
|
|
|
|
if (!(current_events & (UV_READABLE | UV_WRITABLE)))
|
2020-08-27 15:37:14 +01:00
|
|
|
uv_poll_stop(w->pwatcher);
|
2018-04-29 10:44:36 +08:00
|
|
|
else
|
2020-08-27 15:37:14 +01:00
|
|
|
uv_poll_start(w->pwatcher, current_events, lws_io_cb);
|
2018-04-29 10:44:36 +08:00
|
|
|
}
|
|
|
|
|
2020-12-12 06:21:40 +00:00
|
|
|
w->actual_events = (uint8_t)current_events;
|
2018-04-29 10:44:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
elops_init_vhost_listen_wsi_uv(struct lws *wsi)
|
|
|
|
{
|
|
|
|
struct lws_context_per_thread *pt;
|
2021-01-19 16:55:03 +00:00
|
|
|
struct lws_pt_eventlibs_libuv *ptpriv;
|
2020-08-30 09:30:36 +01:00
|
|
|
struct lws_io_watcher_libuv *w_read;
|
2018-04-29 10:44:36 +08:00
|
|
|
int n;
|
|
|
|
|
|
|
|
if (!wsi)
|
|
|
|
return 0;
|
2020-08-30 09:30:36 +01:00
|
|
|
|
|
|
|
w_read = &wsi_to_priv_uv(wsi)->w_read;
|
|
|
|
|
2020-08-27 15:37:14 +01:00
|
|
|
if (w_read->context)
|
2018-04-29 10:44:36 +08:00
|
|
|
return 0;
|
|
|
|
|
fakewsi: replace with smaller substructure
Currently we always reserve a fakewsi per pt so events that don't have a related actual
wsi, like vhost-protocol-init or vhost cert init via protocol callback can make callbacks
that look reasonable to user protocol handler code expecting a valid wsi every time.
This patch splits out stuff that user callbacks often unconditionally expect to be in
a wsi, like context pointer, vhost pointer etc into a substructure, which is composed
into struct lws at the top of it. Internal references (struct lws is opaque, so there
are only internal references) are all updated to go via the substructre, the compiler
should make that a NOP.
Helpers are added when fakewsi is used and referenced.
If not PLAT_FREERTOS, we continue to provide a full fakewsi in the pt as before,
although the helpers improve consistency by zeroing down the substructure. There is
a huge amount of user code out there over the last 10 years that did not always have
the minimal examples to follow, some of it does some unexpected things.
If it is PLAT_FREERTOS, that is a newer thing in lws and users have the benefit of
being able to follow the minimal examples' approach. For PLAT_FREERTOS we don't
reserve the fakewsi in the pt any more, saving around 800 bytes. The helpers then
create a struct lws_a (the substructure) on the stack, zero it down (but it is only
like 4 pointers) and prepare it with whatever we know like the context.
Then we cast it to a struct lws * and use it in the user protocol handler call.
In this case, the remainder of the struct lws is undefined. However the amount of
old protocol handlers that might touch things outside of the substructure in
PLAT_FREERTOS is very limited compared to legacy lws user code and the saving is
significant on constrained devices.
User handlers should not be touching everything in a wsi every time anyway, there
are several cases where there is no valid wsi to do the call with. Dereference of
things outside the substructure should only happen when the callback reason shows
there is a valid wsi bound to the activity (as in all the minimal examples).
2020-07-19 08:33:46 +01:00
|
|
|
pt = &wsi->a.context->pt[(int)wsi->tsi];
|
2021-01-19 16:55:03 +00:00
|
|
|
ptpriv = pt_to_priv_uv(pt);
|
|
|
|
if (!ptpriv->io_loop)
|
2018-04-29 10:44:36 +08:00
|
|
|
return 0;
|
|
|
|
|
2020-08-27 15:37:14 +01:00
|
|
|
w_read->context = wsi->a.context;
|
2018-10-13 11:59:04 +08:00
|
|
|
|
2020-08-27 15:37:14 +01:00
|
|
|
w_read->pwatcher = lws_malloc(sizeof(*w_read->pwatcher), "uvh");
|
|
|
|
if (!w_read->pwatcher)
|
2018-10-13 11:59:04 +08:00
|
|
|
return -1;
|
|
|
|
|
2020-08-27 15:37:14 +01:00
|
|
|
n = uv_poll_init_socket(pt_to_priv_uv(pt)->io_loop,
|
|
|
|
w_read->pwatcher, wsi->desc.sockfd);
|
2018-04-29 10:44:36 +08:00
|
|
|
if (n) {
|
2021-06-28 05:05:57 +01:00
|
|
|
lwsl_wsi_err(wsi, "uv_poll_init failed %d, sockfd=%p", n,
|
|
|
|
(void *)(lws_intptr_t)wsi->desc.sockfd);
|
2018-04-29 10:44:36 +08:00
|
|
|
|
|
|
|
return -1;
|
|
|
|
}
|
2018-10-13 11:59:04 +08:00
|
|
|
|
2021-01-19 16:55:03 +00:00
|
|
|
ptpriv->extant_handles++;
|
|
|
|
|
2021-06-28 05:05:57 +01:00
|
|
|
lwsl_wsi_debug(wsi, "thr %d: sa left %d: dyn left: %d",
|
|
|
|
(int)(pt - &pt->context->pt[0]),
|
|
|
|
pt->count_event_loop_static_asset_handles,
|
|
|
|
ptpriv->extant_handles);
|
2021-01-19 16:55:03 +00:00
|
|
|
|
2020-08-27 15:37:14 +01:00
|
|
|
((uv_handle_t *)w_read->pwatcher)->data = (void *)wsi;
|
2018-10-13 11:59:04 +08:00
|
|
|
|
2018-04-29 10:44:36 +08:00
|
|
|
elops_io_uv(wsi, LWS_EV_START | LWS_EV_READ);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
elops_run_pt_uv(struct lws_context *context, int tsi)
|
|
|
|
{
|
2020-08-27 15:37:14 +01:00
|
|
|
if (pt_to_priv_uv(&context->pt[tsi])->io_loop)
|
|
|
|
uv_run(pt_to_priv_uv(&context->pt[tsi])->io_loop, 0);
|
2018-04-29 10:44:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
elops_destroy_pt_uv(struct lws_context *context, int tsi)
|
|
|
|
{
|
|
|
|
struct lws_context_per_thread *pt = &context->pt[tsi];
|
2021-01-19 16:55:03 +00:00
|
|
|
struct lws_pt_eventlibs_libuv *ptpriv = pt_to_priv_uv(pt);
|
2018-04-29 10:44:36 +08:00
|
|
|
int m, ns;
|
|
|
|
|
|
|
|
if (!lws_check_opt(context->options, LWS_SERVER_OPTION_LIBUV))
|
|
|
|
return;
|
|
|
|
|
2021-01-19 16:55:03 +00:00
|
|
|
if (!ptpriv->io_loop)
|
2018-04-29 10:44:36 +08:00
|
|
|
return;
|
|
|
|
|
2021-01-19 16:55:03 +00:00
|
|
|
if (pt->event_loop_destroy_processing_done) {
|
|
|
|
if (!pt->event_loop_foreign) {
|
|
|
|
lwsl_warn("%s: stopping event loop\n", __func__);
|
|
|
|
uv_stop(pt_to_priv_uv(pt)->io_loop);
|
|
|
|
}
|
2018-04-29 10:44:36 +08:00
|
|
|
return;
|
2021-01-19 16:55:03 +00:00
|
|
|
}
|
2018-04-29 10:44:36 +08:00
|
|
|
|
|
|
|
pt->event_loop_destroy_processing_done = 1;
|
2021-06-28 05:05:57 +01:00
|
|
|
// lwsl_cx_debug(context, "%d", tsi);
|
2018-04-29 10:44:36 +08:00
|
|
|
|
|
|
|
if (!pt->event_loop_foreign) {
|
2020-11-16 19:32:58 +00:00
|
|
|
|
2020-08-27 15:37:14 +01:00
|
|
|
uv_signal_stop(&pt_to_priv_uv(pt)->w_sigint.watcher);
|
2018-04-29 10:44:36 +08:00
|
|
|
|
2018-08-16 19:10:32 +08:00
|
|
|
ns = LWS_ARRAY_SIZE(sigs);
|
2018-04-29 10:44:36 +08:00
|
|
|
if (lws_check_opt(context->options,
|
|
|
|
LWS_SERVER_OPTION_UV_NO_SIGSEGV_SIGFPE_SPIN))
|
|
|
|
ns = 2;
|
|
|
|
|
|
|
|
for (m = 0; m < ns; m++) {
|
2020-08-27 15:37:14 +01:00
|
|
|
uv_signal_stop(&pt_to_priv_uv(pt)->signals[m]);
|
|
|
|
uv_close((uv_handle_t *)&pt_to_priv_uv(pt)->signals[m],
|
2018-04-29 10:44:36 +08:00
|
|
|
lws_uv_close_cb_sa);
|
|
|
|
}
|
|
|
|
} else
|
2021-06-28 05:05:57 +01:00
|
|
|
lwsl_cx_debug(context, "not closing pt signals");
|
2018-04-29 10:44:36 +08:00
|
|
|
|
2020-08-27 15:37:14 +01:00
|
|
|
uv_timer_stop(&pt_to_priv_uv(pt)->sultimer);
|
|
|
|
uv_close((uv_handle_t *)&pt_to_priv_uv(pt)->sultimer, lws_uv_close_cb_sa);
|
2018-04-29 10:44:36 +08:00
|
|
|
|
2020-08-27 15:37:14 +01:00
|
|
|
uv_idle_stop(&pt_to_priv_uv(pt)->idle);
|
|
|
|
uv_close((uv_handle_t *)&pt_to_priv_uv(pt)->idle, lws_uv_close_cb_sa);
|
2018-04-29 10:44:36 +08:00
|
|
|
}
|
|
|
|
|
2021-06-21 08:50:13 +01:00
|
|
|
static int
|
|
|
|
elops_listen_init_uv(struct lws_dll2 *d, void *user)
|
|
|
|
{
|
2024-09-25 07:01:45 +01:00
|
|
|
#if defined(LWS_WITH_SERVER)
|
2021-06-21 08:50:13 +01:00
|
|
|
struct lws *wsi = lws_container_of(d, struct lws, listen_list);
|
|
|
|
|
|
|
|
if (elops_init_vhost_listen_wsi_uv(wsi) == -1)
|
|
|
|
return -1;
|
2024-09-25 07:01:45 +01:00
|
|
|
#endif
|
2021-06-21 08:50:13 +01:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-04-29 10:44:36 +08:00
|
|
|
/*
|
|
|
|
* This needs to be called after vhosts have been defined.
|
|
|
|
*
|
|
|
|
* If later, after server start, another vhost is added, this must be
|
|
|
|
* called again to bind the vhost
|
|
|
|
*/
|
|
|
|
|
2020-01-02 08:32:23 +00:00
|
|
|
int
|
2018-04-29 10:44:36 +08:00
|
|
|
elops_init_pt_uv(struct lws_context *context, void *_loop, int tsi)
|
|
|
|
{
|
|
|
|
struct lws_context_per_thread *pt = &context->pt[tsi];
|
2020-08-27 15:37:14 +01:00
|
|
|
struct lws_pt_eventlibs_libuv *ptpriv = pt_to_priv_uv(pt);
|
2018-04-29 10:44:36 +08:00
|
|
|
int status = 0, n, ns, first = 1;
|
|
|
|
uv_loop_t *loop = (uv_loop_t *)_loop;
|
|
|
|
|
2020-09-10 08:06:56 +01:00
|
|
|
ptpriv->pt = pt;
|
|
|
|
|
2020-08-27 15:37:14 +01:00
|
|
|
if (!ptpriv->io_loop) {
|
2018-04-29 10:44:36 +08:00
|
|
|
if (!loop) {
|
|
|
|
loop = lws_malloc(sizeof(*loop), "libuv loop");
|
|
|
|
if (!loop) {
|
2021-06-28 05:05:57 +01:00
|
|
|
lwsl_cx_err(context, "OOM");
|
2018-04-29 10:44:36 +08:00
|
|
|
return -1;
|
|
|
|
}
|
2021-06-28 05:05:57 +01:00
|
|
|
#if UV_VERSION_MAJOR > 0
|
2018-04-29 10:44:36 +08:00
|
|
|
uv_loop_init(loop);
|
2021-06-28 05:05:57 +01:00
|
|
|
#else
|
|
|
|
lwsl_cx_err(context, "This libuv is too old to work...");
|
2018-04-29 10:44:36 +08:00
|
|
|
return 1;
|
2021-06-28 05:05:57 +01:00
|
|
|
#endif
|
2018-04-29 10:44:36 +08:00
|
|
|
pt->event_loop_foreign = 0;
|
|
|
|
} else {
|
2021-06-28 05:05:57 +01:00
|
|
|
lwsl_cx_notice(context, " Using foreign event loop...");
|
2018-04-29 10:44:36 +08:00
|
|
|
pt->event_loop_foreign = 1;
|
|
|
|
}
|
|
|
|
|
2020-08-27 15:37:14 +01:00
|
|
|
ptpriv->io_loop = loop;
|
|
|
|
uv_idle_init(loop, &ptpriv->idle);
|
2020-11-16 19:32:58 +00:00
|
|
|
LWS_UV_REFCOUNT_STATIC_HANDLE_NEW(&ptpriv->idle, pt);
|
2020-09-10 08:06:56 +01:00
|
|
|
uv_idle_start(&ptpriv->idle, lws_uv_idle);
|
2018-04-29 10:44:36 +08:00
|
|
|
|
2018-08-16 19:10:32 +08:00
|
|
|
ns = LWS_ARRAY_SIZE(sigs);
|
2018-04-29 10:44:36 +08:00
|
|
|
if (lws_check_opt(context->options,
|
|
|
|
LWS_SERVER_OPTION_UV_NO_SIGSEGV_SIGFPE_SPIN))
|
|
|
|
ns = 2;
|
|
|
|
|
|
|
|
if (!pt->event_loop_foreign) {
|
2020-08-27 15:37:14 +01:00
|
|
|
assert(ns <= (int)LWS_ARRAY_SIZE(ptpriv->signals));
|
2018-04-29 10:44:36 +08:00
|
|
|
for (n = 0; n < ns; n++) {
|
2020-08-27 15:37:14 +01:00
|
|
|
uv_signal_init(loop, &ptpriv->signals[n]);
|
2020-11-16 19:32:58 +00:00
|
|
|
LWS_UV_REFCOUNT_STATIC_HANDLE_NEW(
|
|
|
|
&ptpriv->signals[n], pt);
|
|
|
|
ptpriv->signals[n].data = pt;
|
2020-08-27 15:37:14 +01:00
|
|
|
uv_signal_start(&ptpriv->signals[n],
|
2018-04-29 10:44:36 +08:00
|
|
|
lws_uv_signal_handler, sigs[n]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else
|
|
|
|
first = 0;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Initialize the accept wsi read watcher with all the listening sockets
|
|
|
|
* and register a callback for read operations
|
|
|
|
*
|
|
|
|
* We have to do it here because the uv loop(s) are not
|
|
|
|
* initialized until after context creation.
|
|
|
|
*/
|
2021-06-21 08:50:13 +01:00
|
|
|
lws_vhost_foreach_listen_wsi(context, context, elops_listen_init_uv);
|
2018-04-29 10:44:36 +08:00
|
|
|
|
|
|
|
if (!first)
|
|
|
|
return status;
|
|
|
|
|
2020-08-27 15:37:14 +01:00
|
|
|
uv_timer_init(ptpriv->io_loop, &ptpriv->sultimer);
|
2020-11-16 19:32:58 +00:00
|
|
|
LWS_UV_REFCOUNT_STATIC_HANDLE_NEW(&ptpriv->sultimer, pt);
|
2018-04-29 10:44:36 +08:00
|
|
|
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
lws_libuv_closewsi(uv_handle_t* handle)
|
|
|
|
{
|
2018-10-13 11:59:04 +08:00
|
|
|
struct lws *wsi = (struct lws *)handle->data;
|
2018-04-29 10:44:36 +08:00
|
|
|
struct lws_context *context = lws_get_context(wsi);
|
|
|
|
struct lws_context_per_thread *pt = &context->pt[(int)wsi->tsi];
|
2021-01-19 14:19:12 +00:00
|
|
|
struct lws_pt_eventlibs_libuv *ptpriv = pt_to_priv_uv(pt);
|
2019-08-18 05:04:15 +01:00
|
|
|
#if defined(LWS_WITH_SERVER)
|
2019-05-09 07:28:36 +01:00
|
|
|
int lspd = 0;
|
|
|
|
#endif
|
2018-04-29 10:44:36 +08:00
|
|
|
|
2021-06-28 05:05:57 +01:00
|
|
|
// lwsl_wsi_notice(wsi, "in");
|
2018-04-29 10:44:36 +08:00
|
|
|
|
2020-10-01 08:39:25 +01:00
|
|
|
lws_context_lock(context, __func__);
|
|
|
|
|
2018-04-29 10:44:36 +08:00
|
|
|
/*
|
|
|
|
* We get called back here for every wsi that closes
|
|
|
|
*/
|
|
|
|
|
2019-08-18 05:04:15 +01:00
|
|
|
#if defined(LWS_WITH_SERVER)
|
2020-08-27 15:37:14 +01:00
|
|
|
if (wsi->role_ops && !strcmp(wsi->role_ops->name, "listen") &&
|
|
|
|
wsi->a.context->deprecated) {
|
2018-04-29 10:44:36 +08:00
|
|
|
lspd = 1;
|
|
|
|
context->deprecation_pending_listen_close_count--;
|
|
|
|
if (!context->deprecation_pending_listen_close_count)
|
|
|
|
lspd = 2;
|
|
|
|
}
|
2019-05-09 07:28:36 +01:00
|
|
|
#endif
|
2018-04-29 10:44:36 +08:00
|
|
|
|
|
|
|
lws_pt_lock(pt, __func__);
|
2021-01-19 16:55:03 +00:00
|
|
|
|
2021-06-28 05:05:57 +01:00
|
|
|
lwsl_wsi_info(wsi, "thr %d: sa left %d: dyn left: %d (rk %d)",
|
|
|
|
(int)(pt - &pt->context->pt[0]),
|
|
|
|
pt->count_event_loop_static_asset_handles,
|
|
|
|
ptpriv->extant_handles - 1,
|
|
|
|
context->requested_stop_internal_loops);
|
2021-01-19 16:55:03 +00:00
|
|
|
|
2018-04-29 10:44:36 +08:00
|
|
|
__lws_close_free_wsi_final(wsi);
|
2021-01-19 16:55:03 +00:00
|
|
|
assert(ptpriv->extant_handles);
|
2021-01-19 14:19:12 +00:00
|
|
|
ptpriv->extant_handles--;
|
2018-04-29 10:44:36 +08:00
|
|
|
lws_pt_unlock(pt);
|
|
|
|
|
2018-10-13 11:59:04 +08:00
|
|
|
/* it's our job to close the handle finally */
|
|
|
|
lws_free(handle);
|
|
|
|
|
2019-08-18 05:04:15 +01:00
|
|
|
#if defined(LWS_WITH_SERVER)
|
2018-04-29 10:44:36 +08:00
|
|
|
if (lspd == 2 && context->deprecation_cb) {
|
2021-06-28 05:05:57 +01:00
|
|
|
lwsl_cx_notice(context, "calling deprecation callback");
|
2018-04-29 10:44:36 +08:00
|
|
|
context->deprecation_cb();
|
|
|
|
}
|
2019-05-09 07:28:36 +01:00
|
|
|
#endif
|
2018-04-29 10:44:36 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* eventually, we closed all the wsi...
|
|
|
|
*/
|
|
|
|
|
2020-11-16 19:32:58 +00:00
|
|
|
if (context->requested_stop_internal_loops &&
|
2021-01-19 14:19:12 +00:00
|
|
|
!ptpriv->extant_handles &&
|
2020-11-16 19:32:58 +00:00
|
|
|
!pt->count_event_loop_static_asset_handles) {
|
2018-04-29 10:44:36 +08:00
|
|
|
|
|
|
|
/*
|
2020-11-16 19:32:58 +00:00
|
|
|
* we closed everything on this pt
|
2018-04-29 10:44:36 +08:00
|
|
|
*/
|
|
|
|
|
2020-11-16 19:32:58 +00:00
|
|
|
lws_context_unlock(context);
|
|
|
|
lws_uv_finalize_pt(pt);
|
2018-04-29 10:44:36 +08:00
|
|
|
|
2020-11-16 19:32:58 +00:00
|
|
|
return;
|
2018-04-29 10:44:36 +08:00
|
|
|
}
|
2020-10-01 08:39:25 +01:00
|
|
|
|
|
|
|
lws_context_unlock(context);
|
2018-04-29 10:44:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
lws_libuv_closehandle(struct lws *wsi)
|
|
|
|
{
|
2018-10-13 11:59:04 +08:00
|
|
|
uv_handle_t* handle;
|
2020-08-27 15:37:14 +01:00
|
|
|
struct lws_io_watcher_libuv *w_read = &wsi_to_priv_uv(wsi)->w_read;
|
2018-10-13 11:59:04 +08:00
|
|
|
|
2020-08-27 15:37:14 +01:00
|
|
|
if (!w_read->pwatcher)
|
2018-10-13 11:59:04 +08:00
|
|
|
return;
|
|
|
|
|
2021-06-28 05:05:57 +01:00
|
|
|
if (wsi->told_event_loop_closed)
|
2018-04-29 10:44:36 +08:00
|
|
|
return;
|
|
|
|
|
2021-06-28 05:05:57 +01:00
|
|
|
// lwsl_wsi_debug(wsi, "in");
|
2018-04-29 10:44:36 +08:00
|
|
|
|
|
|
|
wsi->told_event_loop_closed = 1;
|
|
|
|
|
2018-10-13 11:59:04 +08:00
|
|
|
/*
|
|
|
|
* The normal close path attaches the related wsi as the
|
|
|
|
* handle->data.
|
|
|
|
*/
|
|
|
|
|
2020-08-27 15:37:14 +01:00
|
|
|
handle = (uv_handle_t *)w_read->pwatcher;
|
2018-10-13 11:59:04 +08:00
|
|
|
|
|
|
|
/* ensure we can only do this once */
|
|
|
|
|
2020-08-27 15:37:14 +01:00
|
|
|
w_read->pwatcher = NULL;
|
2018-10-13 11:59:04 +08:00
|
|
|
|
|
|
|
uv_close(handle, lws_libuv_closewsi);
|
2018-04-29 10:44:36 +08:00
|
|
|
}
|
|
|
|
|
2021-08-10 06:35:59 +01:00
|
|
|
static int
|
|
|
|
elops_foreign_thread_uv(struct lws_context *cx, int tsi)
|
|
|
|
{
|
|
|
|
struct lws_context_per_thread *pt = &cx->pt[tsi];
|
|
|
|
struct lws_pt_eventlibs_libuv *ptpriv = pt_to_priv_uv(pt);
|
|
|
|
uv_thread_t th = uv_thread_self();
|
|
|
|
|
|
|
|
if (!ptpriv->thread_valid)
|
|
|
|
/*
|
|
|
|
* We can't judge it until we get the first event from the loop
|
|
|
|
*/
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This is the same thread that gave us the first event on this loop?
|
|
|
|
* Return 0 if so.
|
|
|
|
*/
|
|
|
|
|
|
|
|
return !uv_thread_equal(&th, &ptpriv->uv_thread);
|
|
|
|
}
|
|
|
|
|
2020-08-27 15:37:14 +01:00
|
|
|
static const struct lws_event_loop_ops event_loop_ops_uv = {
|
2018-04-29 10:44:36 +08:00
|
|
|
/* name */ "libuv",
|
|
|
|
/* init_context */ elops_init_context_uv,
|
|
|
|
/* destroy_context1 */ elops_destroy_context1_uv,
|
|
|
|
/* destroy_context2 */ elops_destroy_context2_uv,
|
|
|
|
/* init_vhost_listen_wsi */ elops_init_vhost_listen_wsi_uv,
|
|
|
|
/* init_pt */ elops_init_pt_uv,
|
|
|
|
/* wsi_logical_close */ elops_wsi_logical_close_uv,
|
|
|
|
/* check_client_connect_ok */ elops_check_client_connect_ok_uv,
|
|
|
|
/* close_handle_manually */ elops_close_handle_manually_uv,
|
|
|
|
/* accept */ elops_accept_uv,
|
|
|
|
/* io */ elops_io_uv,
|
|
|
|
/* run_pt */ elops_run_pt_uv,
|
|
|
|
/* destroy_pt */ elops_destroy_pt_uv,
|
|
|
|
/* destroy wsi */ NULL,
|
2021-08-10 06:35:59 +01:00
|
|
|
/* foreign_thread */ elops_foreign_thread_uv,
|
2024-09-30 12:33:39 +01:00
|
|
|
/* fake_POLLIN */ NULL,
|
2018-04-29 10:44:36 +08:00
|
|
|
|
2020-02-07 11:39:32 +00:00
|
|
|
/* flags */ 0,
|
2020-08-27 15:37:14 +01:00
|
|
|
|
|
|
|
/* evlib_size_ctx */ sizeof(struct lws_context_eventlibs_libuv),
|
|
|
|
/* evlib_size_pt */ sizeof(struct lws_pt_eventlibs_libuv),
|
|
|
|
/* evlib_size_vh */ 0,
|
|
|
|
/* evlib_size_wsi */ sizeof(struct lws_io_watcher_libuv),
|
2018-04-29 10:44:36 +08:00
|
|
|
};
|
2020-08-27 15:37:14 +01:00
|
|
|
|
|
|
|
#if defined(LWS_WITH_EVLIB_PLUGINS)
|
|
|
|
LWS_VISIBLE
|
|
|
|
#endif
|
|
|
|
const lws_plugin_evlib_t evlib_uv = {
|
|
|
|
.hdr = {
|
|
|
|
"libuv event loop",
|
|
|
|
"lws_evlib_plugin",
|
2021-01-25 11:40:54 +00:00
|
|
|
LWS_BUILD_HASH,
|
2020-08-27 15:37:14 +01:00
|
|
|
LWS_PLUGIN_API_MAGIC
|
|
|
|
},
|
|
|
|
|
|
|
|
.ops = &event_loop_ops_uv
|
|
|
|
};
|
|
|
|
|