1
0
Fork 0
mirror of https://github.com/warmcat/libwebsockets.git synced 2025-03-09 00:00:04 +01:00

unix: sul schedule rename internal api and call regardless of existing timeout

Pre-sul, checking for interval to next pending scheduled event was expensive and
iterative, so the service avoided it if the wait was already 0.

With sul though, the internal "check" function also services ripe events and
removes them, and finding the interval to the next one is really cheap.

Rename the "check" function to __lws_sul_service_ripe() to make it clear it's
not just about returning the interval to the next pending one.  And call it
regardless of if we already decided we are not going to wait in the poll.

After https://github.com/warmcat/libwebsockets/pull/1745
This commit is contained in:
Andy Green 2019-11-01 12:35:31 +00:00
parent e949b20c00
commit 7fad06e9e0
9 changed files with 83 additions and 91 deletions

View file

@ -252,7 +252,7 @@ __lws_sul_insert(lws_dll2_owner_t *own, lws_sorted_usec_list_t *sul,
lws_usec_t us);
lws_usec_t
__lws_sul_check(lws_dll2_owner_t *own, lws_usec_t usnow);
__lws_sul_service_ripe(lws_dll2_owner_t *own, lws_usec_t usnow);
struct lws_timed_vh_protocol {
struct lws_timed_vh_protocol *next;

View file

@ -102,7 +102,7 @@ lws_sul_schedule(struct lws_context *context, int tsi,
}
lws_usec_t
__lws_sul_check(lws_dll2_owner_t *own, lws_usec_t usnow)
__lws_sul_service_ripe(lws_dll2_owner_t *own, lws_usec_t usnow)
{
while (lws_dll2_get_head(own)) {
/* .list is always first member in lws_sorted_usec_list_t */

View file

@ -29,7 +29,7 @@ lws_ev_hrtimer_cb(struct ev_loop *loop, struct ev_timer *watcher, int revents)
lws_usec_t us;
lws_pt_lock(pt, __func__);
us = __lws_sul_check(&pt->pt_sul_owner, lws_now_usecs());
us = __lws_sul_service_ripe(&pt->pt_sul_owner, lws_now_usecs());
if (us) {
ev_timer_set(&pt->ev.hrtimer, ((float)us) / 1000000.0, 0);
ev_timer_start(pt->ev.io_loop, &pt->ev.hrtimer);
@ -56,7 +56,7 @@ lws_ev_idle_cb(struct ev_loop *loop, struct ev_idle *handle, int revents)
/* account for hrtimer */
lws_pt_lock(pt, __func__);
us = __lws_sul_check(&pt->pt_sul_owner, lws_now_usecs());
us = __lws_sul_service_ripe(&pt->pt_sul_owner, lws_now_usecs());
if (us) {
ev_timer_set(&pt->ev.hrtimer, ((float)us) / 1000000.0, 0);
ev_timer_start(pt->ev.io_loop, &pt->ev.hrtimer);

View file

@ -29,7 +29,7 @@ lws_event_hrtimer_cb(int fd, short event, void *p)
lws_usec_t us;
lws_pt_lock(pt, __func__);
us = __lws_sul_check(&pt->pt_sul_owner, lws_now_usecs());
us = __lws_sul_service_ripe(&pt->pt_sul_owner, lws_now_usecs());
if (us) {
tv.tv_sec = us / LWS_US_PER_SEC;
tv.tv_usec = us - (tv.tv_sec * LWS_US_PER_SEC);
@ -70,7 +70,7 @@ lws_event_idle_timer_cb(int fd, short event, void *p)
/* account for hrtimer */
lws_pt_lock(pt, __func__);
us = __lws_sul_check(&pt->pt_sul_owner, lws_now_usecs());
us = __lws_sul_service_ripe(&pt->pt_sul_owner, lws_now_usecs());
if (us) {
tv.tv_sec = us / LWS_US_PER_SEC;
tv.tv_usec = us - (tv.tv_sec * LWS_US_PER_SEC);

View file

@ -33,7 +33,7 @@ lws_uv_sultimer_cb(uv_timer_t *timer
lws_usec_t us;
lws_pt_lock(pt, __func__);
us = __lws_sul_check(&pt->pt_sul_owner, lws_now_usecs());
us = __lws_sul_service_ripe(&pt->pt_sul_owner, lws_now_usecs());
if (us)
uv_timer_start(&pt->uv.sultimer, lws_uv_sultimer_cb,
LWS_US_TO_MS(us), 0);
@ -63,7 +63,7 @@ lws_uv_idle(uv_idle_t *handle
/* account for sultimer */
lws_pt_lock(pt, __func__);
us = __lws_sul_check(&pt->pt_sul_owner, lws_now_usecs());
us = __lws_sul_service_ripe(&pt->pt_sul_owner, lws_now_usecs());
if (us)
uv_timer_start(&pt->uv.sultimer, lws_uv_sultimer_cb,
LWS_US_TO_MS(us), 0);

View file

@ -108,7 +108,7 @@ again:
lws_pt_lock(pt, __func__);
/* don't stay in poll wait longer than next hr timeout */
us = __lws_sul_check(&pt->pt_sul_owner, lws_now_usecs());
us = __lws_sul_service_ripe(&pt->pt_sul_owner, lws_now_usecs());
if (us && us < timeout_us)
timeout_us = us;

View file

@ -97,7 +97,7 @@ again:
lws_pt_lock(pt, __func__);
/* don't stay in poll wait longer than next hr timeout */
us = __lws_sul_check(&pt->pt_sul_owner, lws_now_usecs());
us = __lws_sul_service_ripe(&pt->pt_sul_owner, lws_now_usecs());
if (us && us < timeout_us)
timeout_us = us;

View file

@ -65,8 +65,8 @@ _lws_plat_service_tsi(struct lws_context *context, int timeout_ms, int tsi)
volatile struct lws_foreign_thread_pollfd *ftp, *next;
volatile struct lws_context_per_thread *vpt;
struct lws_context_per_thread *pt;
lws_usec_t timeout_us;
int n = -1, a = 0;
lws_usec_t timeout_us, us;
int n = -1;
#if (defined(LWS_ROLE_WS) && !defined(LWS_WITHOUT_EXTENSIONS)) || defined(LWS_WITH_TLS)
int m;
#endif
@ -103,115 +103,107 @@ _lws_plat_service_tsi(struct lws_context *context, int timeout_ms, int tsi)
pt->service_tid_detected = 1;
}
us = lws_now_usecs();
lws_pt_lock(pt, __func__);
/*
* service ripe scheduled events, and limit wait to next expected one
*/
us = __lws_sul_service_ripe(&pt->pt_sul_owner, us);
if (us && us < timeout_us)
timeout_us = us;
lws_pt_unlock(pt);
/*
* is there anybody with pending stuff that needs service forcing?
*/
if (lws_service_adjust_timeout(context, 1, tsi)) {
if (!lws_service_adjust_timeout(context, 1, tsi))
timeout_us = 0;
again:
a = 0;
if (timeout_us) {
lws_usec_t us;
/* ensure we don't wrap at 2^31 with poll()'s signed int ms */
lws_pt_lock(pt, __func__);
/* don't stay in poll wait longer than next hr timeout */
us = __lws_sul_check(&pt->pt_sul_owner, lws_now_usecs());
if (us && us < timeout_us)
timeout_us = us;
timeout_us /= LWS_US_PER_MS; /* ms now */
if (timeout_us > LWS_POLL_WAIT_LIMIT)
timeout_us = LWS_POLL_WAIT_LIMIT;
lws_pt_unlock(pt);
}
/* ensure we don't wrap at 2^31 with poll()'s signed int ms */
timeout_us /= LWS_US_PER_MS; /* ms now */
if (timeout_us > LWS_POLL_WAIT_LIMIT)
timeout_us = LWS_POLL_WAIT_LIMIT;
vpt->inside_poll = 1;
lws_memory_barrier();
n = poll(pt->fds, pt->fds_count, timeout_us /* ms now */ );
vpt->inside_poll = 0;
lws_memory_barrier();
vpt->inside_poll = 1;
lws_memory_barrier();
n = poll(pt->fds, pt->fds_count, timeout_us /* ms now */ );
vpt->inside_poll = 0;
lws_memory_barrier();
#if defined(LWS_WITH_DETAILED_LATENCY)
/*
* so we can track how long it took before we actually read a
* POLLIN that was signalled when we last exited poll()
*/
if (context->detailed_latency_cb)
pt->ust_left_poll = lws_now_usecs();
#endif
/*
* so we can track how long it took before we actually read a
* POLLIN that was signalled when we last exited poll()
*/
if (context->detailed_latency_cb)
pt->ust_left_poll = lws_now_usecs();
#endif
/* Collision will be rare and brief. Spin until it completes */
while (vpt->foreign_spinlock)
;
/* Collision will be rare and brief. Spin until it completes */
while (vpt->foreign_spinlock)
;
/*
* At this point we are not inside a foreign thread pollfd
* change, and we have marked ourselves as outside the poll()
* wait. So we are the only guys that can modify the
* lws_foreign_thread_pollfd list on the pt. Drain the list
* and apply the changes to the affected pollfds in the correct
* order.
*/
/*
* At this point we are not inside a foreign thread pollfd
* change, and we have marked ourselves as outside the poll()
* wait. So we are the only guys that can modify the
* lws_foreign_thread_pollfd list on the pt. Drain the list
* and apply the changes to the affected pollfds in the correct
* order.
*/
lws_pt_lock(pt, __func__);
lws_pt_lock(pt, __func__);
ftp = vpt->foreign_pfd_list;
//lwsl_notice("cleared list %p\n", ftp);
while (ftp) {
struct lws *wsi;
struct lws_pollfd *pfd;
ftp = vpt->foreign_pfd_list;
//lwsl_notice("cleared list %p\n", ftp);
while (ftp) {
struct lws *wsi;
struct lws_pollfd *pfd;
next = ftp->next;
pfd = &vpt->fds[ftp->fd_index];
if (lws_socket_is_valid(pfd->fd)) {
wsi = wsi_from_fd(context, pfd->fd);
if (wsi)
__lws_change_pollfd(wsi, ftp->_and,
ftp->_or);
}
lws_free((void *)ftp);
ftp = next;
next = ftp->next;
pfd = &vpt->fds[ftp->fd_index];
if (lws_socket_is_valid(pfd->fd)) {
wsi = wsi_from_fd(context, pfd->fd);
if (wsi)
__lws_change_pollfd(wsi, ftp->_and,
ftp->_or);
}
vpt->foreign_pfd_list = NULL;
lws_memory_barrier();
lws_free((void *)ftp);
ftp = next;
}
vpt->foreign_pfd_list = NULL;
lws_memory_barrier();
lws_pt_unlock(pt);
lws_pt_unlock(pt);
#if (defined(LWS_ROLE_WS) && !defined(LWS_WITHOUT_EXTENSIONS)) || defined(LWS_WITH_TLS)
m = 0;
m = 0;
#endif
#if defined(LWS_ROLE_WS) && !defined(LWS_WITHOUT_EXTENSIONS)
m |= !!pt->ws.rx_draining_ext_list;
m |= !!pt->ws.rx_draining_ext_list;
#endif
#if defined(LWS_WITH_TLS)
if (pt->context->tls_ops &&
pt->context->tls_ops->fake_POLLIN_for_buffered)
m |= pt->context->tls_ops->fake_POLLIN_for_buffered(pt);
if (pt->context->tls_ops &&
pt->context->tls_ops->fake_POLLIN_for_buffered)
m |= pt->context->tls_ops->fake_POLLIN_for_buffered(pt);
#endif
if (
if (
#if (defined(LWS_ROLE_WS) && !defined(LWS_WITHOUT_EXTENSIONS)) || defined(LWS_WITH_TLS)
!m &&
!m &&
#endif
!n) { /* nothing to do */
lws_service_do_ripe_rxflow(pt);
!n) { /* nothing to do */
lws_service_do_ripe_rxflow(pt);
return 0;
}
} else
a = 1;
return 0;
}
if (_lws_plat_service_forced_tsi(context, tsi))
return -1;
if (a)
goto again;
return 0;
}

View file

@ -135,7 +135,7 @@ _lws_plat_service_tsi(struct lws_context *context, int timeout_ms, int tsi)
lws_pt_lock(pt, __func__);
/* don't stay in poll wait longer than next hr timeout */
us = __lws_sul_check(&pt->pt_sul_owner, lws_now_usecs());
us = __lws_sul_service_ripe(&pt->pt_sul_owner, lws_now_usecs());
if (us && us < timeout_us)
timeout_us = us;