1
0
Fork 0
mirror of https://github.com/warmcat/libwebsockets.git synced 2025-03-09 00:00:04 +01:00

service: resurrect timeout_ms being -1 as return immediately

There's no longer any reason to come out of sleep for periodic service
which has been eliminated by lws_sul.

With event libs, there is no opportunity to do it anyway since their
event loop is atomic and makes callbacks and sleeps until it is stopped.

But some users are relying on the old poll() service loop as
glue that's difficult to replace.  So for now help that happen by
accepting the timeout_ms of -1 as meaning sample poll and service
what's there without any wait.
This commit is contained in:
Andy Green 2019-09-16 15:36:12 +01:00
parent d42f434a77
commit a97347a18e
8 changed files with 268 additions and 263 deletions

View file

@ -921,6 +921,10 @@ enum {
LWSRXFC_TRIMMED = 2,
};
int
_lws_plat_service_forced_tsi(struct lws_context *context, int tsi);
LWS_EXTERN int
lws_rxflow_cache(struct lws *wsi, unsigned char *buf, int n, int len);

View file

@ -52,14 +52,9 @@ lws_ev_idle_cb(struct ev_loop *loop, struct ev_idle *handle, int revents)
/*
* is there anybody with pending stuff that needs service forcing?
*/
if (!lws_service_adjust_timeout(pt->context, 1, pt->tid)) {
if (!lws_service_adjust_timeout(pt->context, 1, pt->tid))
/* -1 timeout means just do forced service */
_lws_plat_service_tsi(pt->context, -1, pt->tid);
/* still somebody left who wants forced service? */
if (!lws_service_adjust_timeout(pt->context, 1, pt->tid))
/* yes... come back again later */
return;
}
_lws_plat_service_forced_tsi(pt->context, pt->tid);
/* account for hrtimer */

View file

@ -55,7 +55,7 @@ lws_event_idle_timer_cb(int fd, short event, void *p)
*/
if (!lws_service_adjust_timeout(pt->context, 1, pt->tid)) {
/* -1 timeout means just do forced service */
_lws_plat_service_tsi(pt->context, -1, pt->tid);
_lws_plat_service_forced_tsi(pt->context, pt->tid);
/* still somebody left who wants forced service? */
if (!lws_service_adjust_timeout(pt->context, 1, pt->tid)) {
/* yes... come back again later */

View file

@ -59,14 +59,9 @@ lws_uv_idle(uv_idle_t *handle
/*
* is there anybody with pending stuff that needs service forcing?
*/
if (!lws_service_adjust_timeout(pt->context, 1, pt->tid)) {
if (!lws_service_adjust_timeout(pt->context, 1, pt->tid))
/* -1 timeout means just do forced service */
_lws_plat_service_tsi(pt->context, -1, pt->tid);
/* still somebody left who wants forced service? */
if (!lws_service_adjust_timeout(pt->context, 1, pt->tid))
/* yes... come back again later */
return;
}
_lws_plat_service_forced_tsi(pt->context, pt->tid);
/* account for sultimer */

View file

@ -42,7 +42,7 @@ _lws_plat_service_tsi(struct lws_context *context, int timeout_ms, int tsi)
{
struct lws_context_per_thread *pt;
lws_usec_t timeout_us;
int n = -1, m, c;
int n = -1, m, c, a = 0;
/* stay dead once we are dead */
@ -80,10 +80,10 @@ _lws_plat_service_tsi(struct lws_context *context, int timeout_ms, int tsi)
}
if (timeout_ms < 0)
goto faked_service;
/* force a default timeout of 23 days */
timeout_ms = 2000000000;
timeout_ms = 0;
else
/* force a default timeout of 23 days */
timeout_ms = 2000000000;
timeout_us = ((lws_usec_t)timeout_ms) * LWS_US_PER_MS;
if (!pt->service_tid_detected) {
@ -101,95 +101,91 @@ _lws_plat_service_tsi(struct lws_context *context, int timeout_ms, int tsi)
/*
* is there anybody with pending stuff that needs service forcing?
*/
if (!lws_service_adjust_timeout(context, 1, tsi)) {
/* -1 timeout means just do forced service */
_lws_plat_service_tsi(context, -1, pt->tid);
/* still somebody left who wants forced service? */
if (!lws_service_adjust_timeout(context, 1, pt->tid))
/* yes... come back again quickly */
timeout_us = 0;
}
if (lws_service_adjust_timeout(context, 1, tsi)) {
if (timeout_us) {
lws_usec_t us;
again:
a = 0;
if (timeout_us) {
lws_usec_t us;
lws_pt_lock(pt, __func__);
/* don't stay in poll wait longer than next hr timeout */
us = __lws_sul_check(&pt->pt_sul_owner, lws_now_usecs());
if (us && us < timeout_us)
timeout_us = us;
lws_pt_lock(pt, __func__);
/* don't stay in poll wait longer than next hr timeout */
us = __lws_sul_check(&pt->pt_sul_owner, lws_now_usecs());
if (us && us < timeout_us)
timeout_us = us;
lws_pt_unlock(pt);
}
// n = poll(pt->fds, pt->fds_count, timeout_ms);
{
fd_set readfds, writefds, errfds;
struct timeval tv = { timeout_us / LWS_US_PER_SEC,
timeout_us % LWS_US_PER_SEC }, *ptv = &tv;
int max_fd = 0;
FD_ZERO(&readfds);
FD_ZERO(&writefds);
FD_ZERO(&errfds);
for (n = 0; n < (int)pt->fds_count; n++) {
pt->fds[n].revents = 0;
if (pt->fds[n].fd >= max_fd)
max_fd = pt->fds[n].fd;
if (pt->fds[n].events & LWS_POLLIN)
FD_SET(pt->fds[n].fd, &readfds);
if (pt->fds[n].events & LWS_POLLOUT)
FD_SET(pt->fds[n].fd, &writefds);
FD_SET(pt->fds[n].fd, &errfds);
lws_pt_unlock(pt);
}
n = select(max_fd + 1, &readfds, &writefds, &errfds, ptv);
n = 0;
// n = poll(pt->fds, pt->fds_count, timeout_ms);
{
fd_set readfds, writefds, errfds;
struct timeval tv = { timeout_us / LWS_US_PER_SEC,
timeout_us % LWS_US_PER_SEC }, *ptv = &tv;
int max_fd = 0;
FD_ZERO(&readfds);
FD_ZERO(&writefds);
FD_ZERO(&errfds);
#if defined(LWS_WITH_DETAILED_LATENCY)
/*
* so we can track how long it took before we actually read a POLLIN
* that was signalled when we last exited poll()
*/
if (context->detailed_latency_cb)
pt->ust_left_poll = lws_now_usecs();
#endif
for (m = 0; m < (int)pt->fds_count; m++) {
c = 0;
if (FD_ISSET(pt->fds[m].fd, &readfds)) {
pt->fds[m].revents |= LWS_POLLIN;
c = 1;
}
if (FD_ISSET(pt->fds[m].fd, &writefds)) {
pt->fds[m].revents |= LWS_POLLOUT;
c = 1;
}
if (FD_ISSET(pt->fds[m].fd, &errfds)) {
// lwsl_notice("errfds %d\n", pt->fds[m].fd);
pt->fds[m].revents |= LWS_POLLHUP;
c = 1;
for (n = 0; n < (int)pt->fds_count; n++) {
pt->fds[n].revents = 0;
if (pt->fds[n].fd >= max_fd)
max_fd = pt->fds[n].fd;
if (pt->fds[n].events & LWS_POLLIN)
FD_SET(pt->fds[n].fd, &readfds);
if (pt->fds[n].events & LWS_POLLOUT)
FD_SET(pt->fds[n].fd, &writefds);
FD_SET(pt->fds[n].fd, &errfds);
}
if (c)
n++;
n = select(max_fd + 1, &readfds, &writefds, &errfds, ptv);
n = 0;
#if defined(LWS_WITH_DETAILED_LATENCY)
/*
* so we can track how long it took before we actually read a POLLIN
* that was signalled when we last exited poll()
*/
if (context->detailed_latency_cb)
pt->ust_left_poll = lws_now_usecs();
#endif
for (m = 0; m < (int)pt->fds_count; m++) {
c = 0;
if (FD_ISSET(pt->fds[m].fd, &readfds)) {
pt->fds[m].revents |= LWS_POLLIN;
c = 1;
}
if (FD_ISSET(pt->fds[m].fd, &writefds)) {
pt->fds[m].revents |= LWS_POLLOUT;
c = 1;
}
if (FD_ISSET(pt->fds[m].fd, &errfds)) {
// lwsl_notice("errfds %d\n", pt->fds[m].fd);
pt->fds[m].revents |= LWS_POLLHUP;
c = 1;
}
if (c)
n++;
}
}
}
m = 0;
m = 0;
#if defined(LWS_ROLE_WS) && !defined(LWS_WITHOUT_EXTENSIONS)
m |= !!pt->ws.rx_draining_ext_list;
#endif
#if defined(LWS_ROLE_WS) && !defined(LWS_WITHOUT_EXTENSIONS)
m |= !!pt->ws.rx_draining_ext_list;
#endif
if (pt->context->tls_ops &&
pt->context->tls_ops->fake_POLLIN_for_buffered)
m |= pt->context->tls_ops->fake_POLLIN_for_buffered(pt);
if (pt->context->tls_ops &&
pt->context->tls_ops->fake_POLLIN_for_buffered)
m |= pt->context->tls_ops->fake_POLLIN_for_buffered(pt);
if (!m && !n)
return 0;
if (!m && !n)
return 0;
} else
a = 1;
faked_service:
m = lws_service_flag_pending(context, tsi);
if (m)
c = -1; /* unknown limit */
@ -216,5 +212,8 @@ faked_service:
n--;
}
if (a)
goto again;
return 0;
}

View file

@ -84,7 +84,7 @@ _lws_plat_service_tsi(struct lws_context *context, int timeout_ms, int tsi)
{
lws_usec_t timeout_us = timeout_ms * LWS_US_PER_MS;
struct lws_context_per_thread *pt;
int n = -1, m, c;
int n = -1, m, c, a = 0;
//char buf;
/* stay dead once we are dead */
@ -95,7 +95,9 @@ _lws_plat_service_tsi(struct lws_context *context, int timeout_ms, int tsi)
pt = &context->pt[tsi];
if (timeout_ms < 0)
goto faked_service;
timeout_ms = 0;
else
timeout_ms = 2000000000;
if (!pt->service_tid_detected) {
struct lws _lws;
@ -111,40 +113,34 @@ _lws_plat_service_tsi(struct lws_context *context, int timeout_ms, int tsi)
/*
* is there anybody with pending stuff that needs service forcing?
*/
if (!lws_service_adjust_timeout(context, 1, tsi)) {
lwsl_notice("%s: doing forced service\n", __func__);
/* -1 timeout means just do forced service */
_lws_plat_service_tsi(context, -1, pt->tid);
/* still somebody left who wants forced service? */
if (!lws_service_adjust_timeout(context, 1, pt->tid))
/* yes... come back again quickly */
timeout_us = 0;
}
if (lws_service_adjust_timeout(context, 1, tsi)) {
again:
a = 0;
if (timeout_us) {
lws_usec_t us;
if (timeout_us) {
lws_usec_t us;
lws_pt_lock(pt, __func__);
/* don't stay in poll wait longer than next hr timeout */
us = __lws_sul_check(&pt->pt_sul_owner, lws_now_usecs());
if (us && us < timeout_us)
timeout_us = us;
lws_pt_lock(pt, __func__);
/* don't stay in poll wait longer than next hr timeout */
us = __lws_sul_check(&pt->pt_sul_owner, lws_now_usecs());
if (us && us < timeout_us)
timeout_us = us;
lws_pt_unlock(pt);
}
lws_pt_unlock(pt);
}
n = poll(pt->fds, pt->fds_count, timeout_us / LWS_US_PER_MS);
n = poll(pt->fds, pt->fds_count, timeout_us / LWS_US_PER_MS);
m = 0;
m = 0;
if (pt->context->tls_ops &&
pt->context->tls_ops->fake_POLLIN_for_buffered)
m = pt->context->tls_ops->fake_POLLIN_for_buffered(pt);
if (pt->context->tls_ops &&
pt->context->tls_ops->fake_POLLIN_for_buffered)
m = pt->context->tls_ops->fake_POLLIN_for_buffered(pt);
if (/*!pt->ws.rx_draining_ext_list && */!m && !n) /* nothing to do */
return 0;
} else
a = 1;
if (/*!pt->ws.rx_draining_ext_list && */!m && !n) /* nothing to do */
return 0;
faked_service:
m = lws_service_flag_pending(context, tsi);
if (m)
c = -1; /* unknown limit */
@ -177,6 +173,9 @@ faked_service:
n--;
}
if (a)
goto again;
return 0;
}

View file

@ -31,14 +31,46 @@ lws_poll_listen_fd(struct lws_pollfd *fd)
return poll(fd, 1, 0);
}
LWS_EXTERN int
int
_lws_plat_service_forced_tsi(struct lws_context *context, int tsi)
{
struct lws_context_per_thread *pt = &context->pt[tsi];
int m, n;
lws_service_flag_pending(context, tsi);
/* any socket with events to service? */
for (n = 0; n < (int)pt->fds_count; n++) {
if (!pt->fds[n].revents)
continue;
m = lws_service_fd_tsi(context, &pt->fds[n], tsi);
if (m < 0) {
lwsl_err("%s: lws_service_fd_tsi returned %d\n",
__func__, m);
return -1;
}
/* if something closed, retry this slot */
if (m)
n--;
}
lws_service_do_ripe_rxflow(pt);
return 0;
}
int
_lws_plat_service_tsi(struct lws_context *context, int timeout_ms, int tsi)
{
volatile struct lws_foreign_thread_pollfd *ftp, *next;
volatile struct lws_context_per_thread *vpt;
struct lws_context_per_thread *pt;
lws_usec_t timeout_us;
int n = -1, m, c;
int n = -1, a = 0;
#if (defined(LWS_ROLE_WS) && !defined(LWS_WITHOUT_EXTENSIONS)) || defined(LWS_WITH_TLS)
int m;
#endif
/* stay dead once we are dead */
@ -51,10 +83,10 @@ _lws_plat_service_tsi(struct lws_context *context, int timeout_ms, int tsi)
lws_stats_bump(pt, LWSSTATS_C_SERVICE_ENTRY, 1);
if (timeout_ms < 0)
goto faked_service;
/* force a default timeout of 23 days */
timeout_ms = 2000000000;
timeout_ms = 0;
else
/* force a default timeout of 23 days */
timeout_ms = 2000000000;
timeout_us = ((lws_usec_t)timeout_ms) * LWS_US_PER_MS;
if (context->event_loop_ops->run_pt)
@ -75,129 +107,105 @@ _lws_plat_service_tsi(struct lws_context *context, int timeout_ms, int tsi)
/*
* is there anybody with pending stuff that needs service forcing?
*/
if (!lws_service_adjust_timeout(context, 1, tsi)) {
/* -1 timeout means just do forced service */
_lws_plat_service_tsi(context, -1, pt->tid);
/* still somebody left who wants forced service? */
if (!lws_service_adjust_timeout(context, 1, pt->tid))
/* yes... come back again quickly */
timeout_us = 0;
}
if (lws_service_adjust_timeout(context, 1, tsi)) {
if (timeout_us) {
lws_usec_t us;
again:
a = 0;
if (timeout_us) {
lws_usec_t us;
lws_pt_lock(pt, __func__);
/* don't stay in poll wait longer than next hr timeout */
us = __lws_sul_check(&pt->pt_sul_owner, lws_now_usecs());
if (us && us < timeout_us)
timeout_us = us;
lws_pt_unlock(pt);
}
vpt->inside_poll = 1;
lws_memory_barrier();
n = poll(pt->fds, pt->fds_count, timeout_us / LWS_US_PER_MS);
vpt->inside_poll = 0;
lws_memory_barrier();
#if defined(LWS_WITH_DETAILED_LATENCY)
/*
* so we can track how long it took before we actually read a
* POLLIN that was signalled when we last exited poll()
*/
if (context->detailed_latency_cb)
pt->ust_left_poll = lws_now_usecs();
#endif
/* Collision will be rare and brief. Spin until it completes */
while (vpt->foreign_spinlock)
;
/*
* At this point we are not inside a foreign thread pollfd
* change, and we have marked ourselves as outside the poll()
* wait. So we are the only guys that can modify the
* lws_foreign_thread_pollfd list on the pt. Drain the list
* and apply the changes to the affected pollfds in the correct
* order.
*/
lws_pt_lock(pt, __func__);
/* don't stay in poll wait longer than next hr timeout */
us = __lws_sul_check(&pt->pt_sul_owner, lws_now_usecs());
if (us && us < timeout_us)
timeout_us = us;
ftp = vpt->foreign_pfd_list;
//lwsl_notice("cleared list %p\n", ftp);
while (ftp) {
struct lws *wsi;
struct lws_pollfd *pfd;
next = ftp->next;
pfd = &vpt->fds[ftp->fd_index];
if (lws_socket_is_valid(pfd->fd)) {
wsi = wsi_from_fd(context, pfd->fd);
if (wsi)
__lws_change_pollfd(wsi, ftp->_and,
ftp->_or);
}
lws_free((void *)ftp);
ftp = next;
}
vpt->foreign_pfd_list = NULL;
lws_memory_barrier();
lws_pt_unlock(pt);
}
vpt->inside_poll = 1;
lws_memory_barrier();
n = poll(pt->fds, pt->fds_count, timeout_us / LWS_US_PER_MS);
vpt->inside_poll = 0;
lws_memory_barrier();
#if defined(LWS_WITH_DETAILED_LATENCY)
/*
* so we can track how long it took before we actually read a POLLIN
* that was signalled when we last exited poll()
*/
if (context->detailed_latency_cb)
pt->ust_left_poll = lws_now_usecs();
#if (defined(LWS_ROLE_WS) && !defined(LWS_WITHOUT_EXTENSIONS)) || defined(LWS_WITH_TLS)
m = 0;
#endif
/* Collision will be rare and brief. Just spin until it completes */
while (vpt->foreign_spinlock)
;
/*
* At this point we are not inside a foreign thread pollfd change,
* and we have marked ourselves as outside the poll() wait. So we
* are the only guys that can modify the lws_foreign_thread_pollfd
* list on the pt. Drain the list and apply the changes to the
* affected pollfds in the correct order.
*/
lws_pt_lock(pt, __func__);
ftp = vpt->foreign_pfd_list;
//lwsl_notice("cleared list %p\n", ftp);
while (ftp) {
struct lws *wsi;
struct lws_pollfd *pfd;
next = ftp->next;
pfd = &vpt->fds[ftp->fd_index];
if (lws_socket_is_valid(pfd->fd)) {
wsi = wsi_from_fd(context, pfd->fd);
if (wsi)
__lws_change_pollfd(wsi, ftp->_and, ftp->_or);
}
lws_free((void *)ftp);
ftp = next;
}
vpt->foreign_pfd_list = NULL;
lws_memory_barrier();
lws_pt_unlock(pt);
m = 0;
#if defined(LWS_ROLE_WS) && !defined(LWS_WITHOUT_EXTENSIONS)
m |= !!pt->ws.rx_draining_ext_list;
m |= !!pt->ws.rx_draining_ext_list;
#endif
#if defined(LWS_WITH_TLS)
if (pt->context->tls_ops &&
pt->context->tls_ops->fake_POLLIN_for_buffered)
m |= pt->context->tls_ops->fake_POLLIN_for_buffered(pt);
if (pt->context->tls_ops &&
pt->context->tls_ops->fake_POLLIN_for_buffered)
m |= pt->context->tls_ops->fake_POLLIN_for_buffered(pt);
#endif
if (
if (
#if (defined(LWS_ROLE_WS) && !defined(LWS_WITHOUT_EXTENSIONS)) || defined(LWS_WITH_TLS)
!m &&
!m &&
#endif
!n) { /* nothing to do */
lws_service_do_ripe_rxflow(pt);
!n) { /* nothing to do */
lws_service_do_ripe_rxflow(pt);
return 0;
}
faked_service:
m = lws_service_flag_pending(context, tsi);
if (m)
c = -1; /* unknown limit */
else
if (n < 0) {
if (LWS_ERRNO != LWS_EINTR)
return -1;
return 0;
} else
c = n;
/* any socket with events to service? */
for (n = 0; n < (int)pt->fds_count && c; n++) {
if (!pt->fds[n].revents)
continue;
c--;
m = lws_service_fd_tsi(context, &pt->fds[n], tsi);
if (m < 0) {
lwsl_err("%s: lws_service_fd_tsi returned %d\n",
__func__, m);
return -1;
}
/* if something closed, retry this slot */
if (m)
n--;
}
} else
a = 1;
lws_service_do_ripe_rxflow(pt);
if (_lws_plat_service_forced_tsi(context, tsi))
return -1;
if (a)
goto again;
return 0;
}

View file

@ -28,6 +28,33 @@
#include "private-lib-core.h"
int
_lws_plat_service_forced_tsi(struct lws_context *context, int tsi)
{
struct lws_context_per_thread *pt = &context->pt[tsi];
int m, n;
lws_service_flag_pending(context, tsi);
/* any socket with events to service? */
for (n = 0; n < (int)pt->fds_count; n++) {
if (!pt->fds[n].revents)
continue;
m = lws_service_fd_tsi(context, &pt->fds[n], tsi);
if (m < 0)
return -1;
/* if something closed, retry this slot */
if (m)
n--;
}
lws_service_do_ripe_rxflow(pt);
return 0;
}
LWS_EXTERN int
_lws_plat_service_tsi(struct lws_context *context, int timeout_ms, int tsi)
{
@ -60,27 +87,11 @@ _lws_plat_service_tsi(struct lws_context *context, int timeout_ms, int tsi)
pt->service_tid_detected = 1;
}
if (timeout_ms < 0) {
if (lws_service_flag_pending(context, tsi)) {
/* any socket with events to service? */
for (n = 0; n < (int)pt->fds_count; n++) {
int m;
if (!pt->fds[n].revents)
continue;
m = lws_service_fd_tsi(context, &pt->fds[n], tsi);
if (m < 0)
return -1;
/* if something closed, retry this slot */
if (m)
n--;
}
}
return 0;
}
/* force a default timeout of 23 days */
timeout_ms = 2000000000;
if (timeout_ms < 0)
timeout_ms = 0;
else
/* force a default timeout of 23 days */
timeout_ms = 2000000000;
timeout_us = ((lws_usec_t)timeout_ms) * LWS_US_PER_MS;
if (context->event_loop_ops->run_pt)
@ -116,14 +127,8 @@ _lws_plat_service_tsi(struct lws_context *context, int timeout_ms, int tsi)
/*
* is there anybody with pending stuff that needs service forcing?
*/
if (!lws_service_adjust_timeout(context, 1, tsi)) {
/* -1 timeout means just do forced service */
_lws_plat_service_tsi(context, -1, pt->tid);
/* still somebody left who wants forced service? */
if (!lws_service_adjust_timeout(context, 1, pt->tid))
/* yes... come back again quickly */
timeout_us = 0;
}
if (!lws_service_adjust_timeout(context, 1, tsi))
_lws_plat_service_forced_tsi(context, tsi);
if (timeout_us) {
lws_usec_t us;