libuv add idle processing to force service where needed
https://github.com/warmcat/libwebsockets/issues/485 Signed-off-by: Andy Green <andy@warmcat.com>
This commit is contained in:
parent
e8009155ba
commit
09998e3ad8
4 changed files with 45 additions and 3 deletions
30
lib/libuv.c
30
lib/libuv.c
|
@ -30,11 +30,36 @@ lws_feature_status_libuv(struct lws_context_creation_info *info)
|
|||
lwsl_notice("libuv support compiled in but disabled\n");
|
||||
}
|
||||
|
||||
static void
|
||||
lws_uv_idle(uv_idle_t *handle)
|
||||
{
|
||||
struct lws_context_per_thread *pt = container_of(handle,
|
||||
struct lws_context_per_thread, uv_idle);
|
||||
|
||||
lwsl_debug("%s\n", __func__);
|
||||
|
||||
/*
|
||||
* is there anybody with pending stuff that needs service forcing?
|
||||
*/
|
||||
if (!lws_service_adjust_timeout(pt->context, 1, pt->tid)) {
|
||||
/* -1 timeout means just do forced service */
|
||||
lws_plat_service_tsi(pt->context, -1, pt->tid);
|
||||
/* still somebody left who wants forced service? */
|
||||
if (!lws_service_adjust_timeout(pt->context, 1, pt->tid))
|
||||
/* yes... come back again later */
|
||||
return;
|
||||
}
|
||||
|
||||
/* there is nobody who needs service forcing, shut down idle */
|
||||
uv_idle_stop(handle);
|
||||
}
|
||||
|
||||
static void
|
||||
lws_io_cb(uv_poll_t *watcher, int status, int revents)
|
||||
{
|
||||
struct lws_io_watcher *lws_io = container_of(watcher,
|
||||
struct lws_io_watcher, uv_watcher);
|
||||
struct lws *wsi = container_of(lws_io, struct lws, w_read);
|
||||
struct lws_context *context = lws_io->context;
|
||||
struct lws_pollfd eventfd;
|
||||
|
||||
|
@ -67,6 +92,8 @@ lws_io_cb(uv_poll_t *watcher, int status, int revents)
|
|||
}
|
||||
}
|
||||
lws_service_fd(context, &eventfd);
|
||||
|
||||
uv_idle_start(&context->pt[(int)wsi->tsi].uv_idle, lws_uv_idle);
|
||||
}
|
||||
|
||||
LWS_VISIBLE void
|
||||
|
@ -96,7 +123,7 @@ lws_uv_timeout_cb(uv_timer_t *timer)
|
|||
struct lws_context_per_thread, uv_timeout_watcher);
|
||||
|
||||
lwsl_debug("%s\n", __func__);
|
||||
/* do timeout check only */
|
||||
|
||||
lws_service_fd_tsi(pt->context, NULL, pt->tid);
|
||||
}
|
||||
|
||||
|
@ -117,6 +144,7 @@ lws_uv_initloop(struct lws_context *context, uv_loop_t *loop, int tsi)
|
|||
pt->ev_loop_foreign = 1;
|
||||
|
||||
pt->io_loop_uv = loop;
|
||||
uv_idle_init(loop, &pt->uv_idle);
|
||||
|
||||
if (pt->context->use_ev_sigint) {
|
||||
assert(ARRAY_SIZE(sigs) <= ARRAY_SIZE(pt->signals));
|
||||
|
|
|
@ -122,7 +122,7 @@ LWS_VISIBLE int
|
|||
lws_plat_service_tsi(struct lws_context *context, int timeout_ms, int tsi)
|
||||
{
|
||||
struct lws_context_per_thread *pt = &context->pt[tsi];
|
||||
int n, m, c;
|
||||
int n = -1, m, c;
|
||||
char buf;
|
||||
|
||||
/* stay dead once we are dead */
|
||||
|
@ -130,6 +130,9 @@ lws_plat_service_tsi(struct lws_context *context, int timeout_ms, int tsi)
|
|||
if (!context || !context->vhost_list)
|
||||
return 1;
|
||||
|
||||
if (timeout_ms < 0)
|
||||
goto faked_service;
|
||||
|
||||
lws_libev_run(context, tsi);
|
||||
lws_libuv_run(context, tsi);
|
||||
|
||||
|
@ -139,7 +142,8 @@ lws_plat_service_tsi(struct lws_context *context, int timeout_ms, int tsi)
|
|||
memset(&_lws, 0, sizeof(_lws));
|
||||
_lws.context = context;
|
||||
|
||||
context->service_tid_detected = context->vhost_list->protocols[0].callback(
|
||||
context->service_tid_detected =
|
||||
context->vhost_list->protocols[0].callback(
|
||||
&_lws, LWS_CALLBACK_GET_THREAD_ID, NULL, NULL, 0);
|
||||
}
|
||||
context->service_tid = context->service_tid_detected;
|
||||
|
@ -158,6 +162,7 @@ lws_plat_service_tsi(struct lws_context *context, int timeout_ms, int tsi)
|
|||
return 0;
|
||||
}
|
||||
|
||||
faked_service:
|
||||
m = lws_service_flag_pending(context, tsi);
|
||||
if (m)
|
||||
c = -1; /* unknown limit */
|
||||
|
|
|
@ -177,6 +177,9 @@ lws_plat_service_tsi(struct lws_context *context, int timeout_ms, int tsi)
|
|||
}
|
||||
context->service_tid = context->service_tid_detected;
|
||||
|
||||
if (timeout_ms < 0)
|
||||
goto faked_service;
|
||||
|
||||
for (i = 0; i < pt->fds_count; ++i) {
|
||||
pfd = &pt->fds[i];
|
||||
|
||||
|
@ -236,6 +239,8 @@ lws_plat_service_tsi(struct lws_context *context, int timeout_ms, int tsi)
|
|||
wsi->sock_send_blocking = 0;
|
||||
}
|
||||
|
||||
faked_service:
|
||||
|
||||
/* if someone faked their LWS_POLLIN, then go through all active fds */
|
||||
|
||||
if (lws_service_flag_pending(context, tsi)) {
|
||||
|
@ -254,6 +259,9 @@ lws_plat_service_tsi(struct lws_context *context, int timeout_ms, int tsi)
|
|||
return 0;
|
||||
}
|
||||
|
||||
if (timeout_ms < 0)
|
||||
return 0;
|
||||
|
||||
/* otherwise just do the one... must be a way to improve that... */
|
||||
|
||||
return lws_service_fd_tsi(context, pfd, tsi);
|
||||
|
|
|
@ -598,6 +598,7 @@ struct lws_context_per_thread {
|
|||
uv_loop_t *io_loop_uv;
|
||||
uv_signal_t signals[8];
|
||||
uv_timer_t uv_timeout_watcher;
|
||||
uv_idle_t uv_idle;
|
||||
#endif
|
||||
#if defined(LWS_USE_LIBEV)
|
||||
struct lws_io_watcher w_accept;
|
||||
|
|
Loading…
Add table
Reference in a new issue