From 7fad06e9e09e0a5f6f679a3017eade0a2532ab45 Mon Sep 17 00:00:00 2001 From: Andy Green Date: Fri, 1 Nov 2019 12:35:31 +0000 Subject: [PATCH] unix: sul schedule rename internal api and call regardless of existing timeout Pre-sul, checking for interval to next pending scheduled event was expensive and iterative, so the service avoided it if the wait was already 0. With sul though, the internal "check" function also services ripe events and removes them, and finding the interval to the next one is really cheap. Rename the "check" function to __lws_sul_service_ripe() to make it clear it's not just about returning the interval to the next pending one. And call it regardless of if we already decided we are not going to wait in the poll. After https://github.com/warmcat/libwebsockets/pull/1745 --- lib/core-net/private.h | 2 +- lib/core-net/sorted-usec-list.c | 2 +- lib/event-libs/libev/libev.c | 4 +- lib/event-libs/libevent/libevent.c | 4 +- lib/event-libs/libuv/libuv.c | 4 +- lib/plat/esp32/esp32-service.c | 2 +- lib/plat/optee/network.c | 2 +- lib/plat/unix/unix-service.c | 152 ++++++++++++++--------------- lib/plat/windows/windows-service.c | 2 +- 9 files changed, 83 insertions(+), 91 deletions(-) diff --git a/lib/core-net/private.h b/lib/core-net/private.h index 89454e204..015fbae01 100644 --- a/lib/core-net/private.h +++ b/lib/core-net/private.h @@ -252,7 +252,7 @@ __lws_sul_insert(lws_dll2_owner_t *own, lws_sorted_usec_list_t *sul, lws_usec_t us); lws_usec_t -__lws_sul_check(lws_dll2_owner_t *own, lws_usec_t usnow); +__lws_sul_service_ripe(lws_dll2_owner_t *own, lws_usec_t usnow); struct lws_timed_vh_protocol { struct lws_timed_vh_protocol *next; diff --git a/lib/core-net/sorted-usec-list.c b/lib/core-net/sorted-usec-list.c index 50fccbb66..fcf381c99 100644 --- a/lib/core-net/sorted-usec-list.c +++ b/lib/core-net/sorted-usec-list.c @@ -102,7 +102,7 @@ lws_sul_schedule(struct lws_context *context, int tsi, } lws_usec_t -__lws_sul_check(lws_dll2_owner_t *own, lws_usec_t usnow) +__lws_sul_service_ripe(lws_dll2_owner_t *own, lws_usec_t usnow) { while (lws_dll2_get_head(own)) { /* .list is always first member in lws_sorted_usec_list_t */ diff --git a/lib/event-libs/libev/libev.c b/lib/event-libs/libev/libev.c index 676def7d3..c2449550f 100644 --- a/lib/event-libs/libev/libev.c +++ b/lib/event-libs/libev/libev.c @@ -29,7 +29,7 @@ lws_ev_hrtimer_cb(struct ev_loop *loop, struct ev_timer *watcher, int revents) lws_usec_t us; lws_pt_lock(pt, __func__); - us = __lws_sul_check(&pt->pt_sul_owner, lws_now_usecs()); + us = __lws_sul_service_ripe(&pt->pt_sul_owner, lws_now_usecs()); if (us) { ev_timer_set(&pt->ev.hrtimer, ((float)us) / 1000000.0, 0); ev_timer_start(pt->ev.io_loop, &pt->ev.hrtimer); @@ -56,7 +56,7 @@ lws_ev_idle_cb(struct ev_loop *loop, struct ev_idle *handle, int revents) /* account for hrtimer */ lws_pt_lock(pt, __func__); - us = __lws_sul_check(&pt->pt_sul_owner, lws_now_usecs()); + us = __lws_sul_service_ripe(&pt->pt_sul_owner, lws_now_usecs()); if (us) { ev_timer_set(&pt->ev.hrtimer, ((float)us) / 1000000.0, 0); ev_timer_start(pt->ev.io_loop, &pt->ev.hrtimer); diff --git a/lib/event-libs/libevent/libevent.c b/lib/event-libs/libevent/libevent.c index fd611821d..2c27911ab 100644 --- a/lib/event-libs/libevent/libevent.c +++ b/lib/event-libs/libevent/libevent.c @@ -29,7 +29,7 @@ lws_event_hrtimer_cb(int fd, short event, void *p) lws_usec_t us; lws_pt_lock(pt, __func__); - us = __lws_sul_check(&pt->pt_sul_owner, lws_now_usecs()); + us = __lws_sul_service_ripe(&pt->pt_sul_owner, lws_now_usecs()); if (us) { tv.tv_sec = us / LWS_US_PER_SEC; tv.tv_usec = us - (tv.tv_sec * LWS_US_PER_SEC); @@ -70,7 +70,7 @@ lws_event_idle_timer_cb(int fd, short event, void *p) /* account for hrtimer */ lws_pt_lock(pt, __func__); - us = __lws_sul_check(&pt->pt_sul_owner, lws_now_usecs()); + us = __lws_sul_service_ripe(&pt->pt_sul_owner, lws_now_usecs()); if (us) { tv.tv_sec = us / LWS_US_PER_SEC; tv.tv_usec = us - (tv.tv_sec * LWS_US_PER_SEC); diff --git a/lib/event-libs/libuv/libuv.c b/lib/event-libs/libuv/libuv.c index 8228ed642..183d146e4 100644 --- a/lib/event-libs/libuv/libuv.c +++ b/lib/event-libs/libuv/libuv.c @@ -33,7 +33,7 @@ lws_uv_sultimer_cb(uv_timer_t *timer lws_usec_t us; lws_pt_lock(pt, __func__); - us = __lws_sul_check(&pt->pt_sul_owner, lws_now_usecs()); + us = __lws_sul_service_ripe(&pt->pt_sul_owner, lws_now_usecs()); if (us) uv_timer_start(&pt->uv.sultimer, lws_uv_sultimer_cb, LWS_US_TO_MS(us), 0); @@ -63,7 +63,7 @@ lws_uv_idle(uv_idle_t *handle /* account for sultimer */ lws_pt_lock(pt, __func__); - us = __lws_sul_check(&pt->pt_sul_owner, lws_now_usecs()); + us = __lws_sul_service_ripe(&pt->pt_sul_owner, lws_now_usecs()); if (us) uv_timer_start(&pt->uv.sultimer, lws_uv_sultimer_cb, LWS_US_TO_MS(us), 0); diff --git a/lib/plat/esp32/esp32-service.c b/lib/plat/esp32/esp32-service.c index 662b9b0fb..bb89696c3 100644 --- a/lib/plat/esp32/esp32-service.c +++ b/lib/plat/esp32/esp32-service.c @@ -108,7 +108,7 @@ again: lws_pt_lock(pt, __func__); /* don't stay in poll wait longer than next hr timeout */ - us = __lws_sul_check(&pt->pt_sul_owner, lws_now_usecs()); + us = __lws_sul_service_ripe(&pt->pt_sul_owner, lws_now_usecs()); if (us && us < timeout_us) timeout_us = us; diff --git a/lib/plat/optee/network.c b/lib/plat/optee/network.c index 316674185..202d524be 100644 --- a/lib/plat/optee/network.c +++ b/lib/plat/optee/network.c @@ -97,7 +97,7 @@ again: lws_pt_lock(pt, __func__); /* don't stay in poll wait longer than next hr timeout */ - us = __lws_sul_check(&pt->pt_sul_owner, lws_now_usecs()); + us = __lws_sul_service_ripe(&pt->pt_sul_owner, lws_now_usecs()); if (us && us < timeout_us) timeout_us = us; diff --git a/lib/plat/unix/unix-service.c b/lib/plat/unix/unix-service.c index ee69bac9f..861ed1410 100644 --- a/lib/plat/unix/unix-service.c +++ b/lib/plat/unix/unix-service.c @@ -65,8 +65,8 @@ _lws_plat_service_tsi(struct lws_context *context, int timeout_ms, int tsi) volatile struct lws_foreign_thread_pollfd *ftp, *next; volatile struct lws_context_per_thread *vpt; struct lws_context_per_thread *pt; - lws_usec_t timeout_us; - int n = -1, a = 0; + lws_usec_t timeout_us, us; + int n = -1; #if (defined(LWS_ROLE_WS) && !defined(LWS_WITHOUT_EXTENSIONS)) || defined(LWS_WITH_TLS) int m; #endif @@ -103,115 +103,107 @@ _lws_plat_service_tsi(struct lws_context *context, int timeout_ms, int tsi) pt->service_tid_detected = 1; } + us = lws_now_usecs(); + lws_pt_lock(pt, __func__); + /* + * service ripe scheduled events, and limit wait to next expected one + */ + us = __lws_sul_service_ripe(&pt->pt_sul_owner, us); + if (us && us < timeout_us) + timeout_us = us; + + lws_pt_unlock(pt); + /* * is there anybody with pending stuff that needs service forcing? */ - if (lws_service_adjust_timeout(context, 1, tsi)) { + if (!lws_service_adjust_timeout(context, 1, tsi)) + timeout_us = 0; -again: - a = 0; - if (timeout_us) { - lws_usec_t us; + /* ensure we don't wrap at 2^31 with poll()'s signed int ms */ - lws_pt_lock(pt, __func__); - /* don't stay in poll wait longer than next hr timeout */ - us = __lws_sul_check(&pt->pt_sul_owner, lws_now_usecs()); - if (us && us < timeout_us) - timeout_us = us; + timeout_us /= LWS_US_PER_MS; /* ms now */ + if (timeout_us > LWS_POLL_WAIT_LIMIT) + timeout_us = LWS_POLL_WAIT_LIMIT; - lws_pt_unlock(pt); - } - - /* ensure we don't wrap at 2^31 with poll()'s signed int ms */ - - timeout_us /= LWS_US_PER_MS; /* ms now */ - if (timeout_us > LWS_POLL_WAIT_LIMIT) - timeout_us = LWS_POLL_WAIT_LIMIT; - - vpt->inside_poll = 1; - lws_memory_barrier(); - n = poll(pt->fds, pt->fds_count, timeout_us /* ms now */ ); - vpt->inside_poll = 0; - lws_memory_barrier(); + vpt->inside_poll = 1; + lws_memory_barrier(); + n = poll(pt->fds, pt->fds_count, timeout_us /* ms now */ ); + vpt->inside_poll = 0; + lws_memory_barrier(); #if defined(LWS_WITH_DETAILED_LATENCY) - /* - * so we can track how long it took before we actually read a - * POLLIN that was signalled when we last exited poll() - */ - if (context->detailed_latency_cb) - pt->ust_left_poll = lws_now_usecs(); - #endif + /* + * so we can track how long it took before we actually read a + * POLLIN that was signalled when we last exited poll() + */ + if (context->detailed_latency_cb) + pt->ust_left_poll = lws_now_usecs(); +#endif - /* Collision will be rare and brief. Spin until it completes */ - while (vpt->foreign_spinlock) - ; + /* Collision will be rare and brief. Spin until it completes */ + while (vpt->foreign_spinlock) + ; - /* - * At this point we are not inside a foreign thread pollfd - * change, and we have marked ourselves as outside the poll() - * wait. So we are the only guys that can modify the - * lws_foreign_thread_pollfd list on the pt. Drain the list - * and apply the changes to the affected pollfds in the correct - * order. - */ + /* + * At this point we are not inside a foreign thread pollfd + * change, and we have marked ourselves as outside the poll() + * wait. So we are the only guys that can modify the + * lws_foreign_thread_pollfd list on the pt. Drain the list + * and apply the changes to the affected pollfds in the correct + * order. + */ - lws_pt_lock(pt, __func__); + lws_pt_lock(pt, __func__); - ftp = vpt->foreign_pfd_list; - //lwsl_notice("cleared list %p\n", ftp); - while (ftp) { - struct lws *wsi; - struct lws_pollfd *pfd; + ftp = vpt->foreign_pfd_list; + //lwsl_notice("cleared list %p\n", ftp); + while (ftp) { + struct lws *wsi; + struct lws_pollfd *pfd; - next = ftp->next; - pfd = &vpt->fds[ftp->fd_index]; - if (lws_socket_is_valid(pfd->fd)) { - wsi = wsi_from_fd(context, pfd->fd); - if (wsi) - __lws_change_pollfd(wsi, ftp->_and, - ftp->_or); - } - lws_free((void *)ftp); - ftp = next; + next = ftp->next; + pfd = &vpt->fds[ftp->fd_index]; + if (lws_socket_is_valid(pfd->fd)) { + wsi = wsi_from_fd(context, pfd->fd); + if (wsi) + __lws_change_pollfd(wsi, ftp->_and, + ftp->_or); } - vpt->foreign_pfd_list = NULL; - lws_memory_barrier(); + lws_free((void *)ftp); + ftp = next; + } + vpt->foreign_pfd_list = NULL; + lws_memory_barrier(); - lws_pt_unlock(pt); + lws_pt_unlock(pt); #if (defined(LWS_ROLE_WS) && !defined(LWS_WITHOUT_EXTENSIONS)) || defined(LWS_WITH_TLS) - m = 0; + m = 0; #endif #if defined(LWS_ROLE_WS) && !defined(LWS_WITHOUT_EXTENSIONS) - m |= !!pt->ws.rx_draining_ext_list; + m |= !!pt->ws.rx_draining_ext_list; #endif #if defined(LWS_WITH_TLS) - if (pt->context->tls_ops && - pt->context->tls_ops->fake_POLLIN_for_buffered) - m |= pt->context->tls_ops->fake_POLLIN_for_buffered(pt); + if (pt->context->tls_ops && + pt->context->tls_ops->fake_POLLIN_for_buffered) + m |= pt->context->tls_ops->fake_POLLIN_for_buffered(pt); #endif - if ( + if ( #if (defined(LWS_ROLE_WS) && !defined(LWS_WITHOUT_EXTENSIONS)) || defined(LWS_WITH_TLS) - !m && + !m && #endif - !n) { /* nothing to do */ - lws_service_do_ripe_rxflow(pt); + !n) { /* nothing to do */ + lws_service_do_ripe_rxflow(pt); - return 0; - } - } else - a = 1; + return 0; + } if (_lws_plat_service_forced_tsi(context, tsi)) return -1; - - if (a) - goto again; - return 0; } diff --git a/lib/plat/windows/windows-service.c b/lib/plat/windows/windows-service.c index aa79ee819..4036528fd 100644 --- a/lib/plat/windows/windows-service.c +++ b/lib/plat/windows/windows-service.c @@ -135,7 +135,7 @@ _lws_plat_service_tsi(struct lws_context *context, int timeout_ms, int tsi) lws_pt_lock(pt, __func__); /* don't stay in poll wait longer than next hr timeout */ - us = __lws_sul_check(&pt->pt_sul_owner, lws_now_usecs()); + us = __lws_sul_service_ripe(&pt->pt_sul_owner, lws_now_usecs()); if (us && us < timeout_us) timeout_us = us;