diff --git a/lib/plat/esp32/esp32-service.c b/lib/plat/esp32/esp32-service.c index e13df3e1a..662b9b0fb 100644 --- a/lib/plat/esp32/esp32-service.c +++ b/lib/plat/esp32/esp32-service.c @@ -39,7 +39,7 @@ _lws_plat_service_tsi(struct lws_context *context, int timeout_ms, int tsi) { struct lws_context_per_thread *pt; lws_usec_t timeout_us; - int n = -1, m, c; + int n = -1, m, c, a = 0; /* stay dead once we are dead */ @@ -77,10 +77,10 @@ _lws_plat_service_tsi(struct lws_context *context, int timeout_ms, int tsi) } if (timeout_ms < 0) - goto faked_service; - - /* force a default timeout of 23 days */ - timeout_ms = 2000000000; + timeout_ms = 0; + else + /* force a default timeout of 23 days */ + timeout_ms = 2000000000; timeout_us = ((lws_usec_t)timeout_ms) * LWS_US_PER_MS; if (!pt->service_tid_detected) { @@ -99,85 +99,91 @@ _lws_plat_service_tsi(struct lws_context *context, int timeout_ms, int tsi) /* * is there anybody with pending stuff that needs service forcing? */ - if (!lws_service_adjust_timeout(context, 1, tsi)) { - /* -1 timeout means just do forced service */ - _lws_plat_service_tsi(context, -1, pt->tid); - /* still somebody left who wants forced service? */ - if (!lws_service_adjust_timeout(context, 1, pt->tid)) - /* yes... come back again quickly */ - timeout_us = 0; - } + if (lws_service_adjust_timeout(context, 1, tsi)) { - if (timeout_us) { - lws_usec_t us; +again: + a = 0; + if (timeout_us) { + lws_usec_t us; - lws_pt_lock(pt, __func__); - /* don't stay in poll wait longer than next hr timeout */ - us = __lws_sul_check(&pt->pt_sul_owner, lws_now_usecs()); - if (us && us < timeout_us) - timeout_us = us; + lws_pt_lock(pt, __func__); + /* don't stay in poll wait longer than next hr timeout */ + us = __lws_sul_check(&pt->pt_sul_owner, lws_now_usecs()); + if (us && us < timeout_us) + timeout_us = us; - lws_pt_unlock(pt); - } - -// n = poll(pt->fds, pt->fds_count, timeout_ms); - { - fd_set readfds, writefds, errfds; - struct timeval tv = { timeout_us / LWS_US_PER_SEC, - timeout_us % LWS_US_PER_SEC }, *ptv = &tv; - int max_fd = 0; - FD_ZERO(&readfds); - FD_ZERO(&writefds); - FD_ZERO(&errfds); - - for (n = 0; n < (int)pt->fds_count; n++) { - pt->fds[n].revents = 0; - if (pt->fds[n].fd >= max_fd) - max_fd = pt->fds[n].fd; - if (pt->fds[n].events & LWS_POLLIN) - FD_SET(pt->fds[n].fd, &readfds); - if (pt->fds[n].events & LWS_POLLOUT) - FD_SET(pt->fds[n].fd, &writefds); - FD_SET(pt->fds[n].fd, &errfds); + lws_pt_unlock(pt); } - n = select(max_fd + 1, &readfds, &writefds, &errfds, ptv); - n = 0; - for (m = 0; m < (int)pt->fds_count; m++) { - c = 0; - if (FD_ISSET(pt->fds[m].fd, &readfds)) { - pt->fds[m].revents |= LWS_POLLIN; - c = 1; - } - if (FD_ISSET(pt->fds[m].fd, &writefds)) { - pt->fds[m].revents |= LWS_POLLOUT; - c = 1; - } - if (FD_ISSET(pt->fds[m].fd, &errfds)) { - // lwsl_notice("errfds %d\n", pt->fds[m].fd); - pt->fds[m].revents |= LWS_POLLHUP; - c = 1; + // n = poll(pt->fds, pt->fds_count, timeout_ms); + { + fd_set readfds, writefds, errfds; + struct timeval tv = { timeout_us / LWS_US_PER_SEC, + timeout_us % LWS_US_PER_SEC }, *ptv = &tv; + int max_fd = 0; + FD_ZERO(&readfds); + FD_ZERO(&writefds); + FD_ZERO(&errfds); + + for (n = 0; n < (int)pt->fds_count; n++) { + pt->fds[n].revents = 0; + if (pt->fds[n].fd >= max_fd) + max_fd = pt->fds[n].fd; + if (pt->fds[n].events & LWS_POLLIN) + FD_SET(pt->fds[n].fd, &readfds); + if (pt->fds[n].events & LWS_POLLOUT) + FD_SET(pt->fds[n].fd, &writefds); + FD_SET(pt->fds[n].fd, &errfds); } - if (c) - n++; + n = select(max_fd + 1, &readfds, &writefds, &errfds, ptv); + n = 0; + + #if defined(LWS_WITH_DETAILED_LATENCY) + /* + * so we can track how long it took before we actually read a POLLIN + * that was signalled when we last exited poll() + */ + if (context->detailed_latency_cb) + pt->ust_left_poll = lws_now_usecs(); + #endif + + for (m = 0; m < (int)pt->fds_count; m++) { + c = 0; + if (FD_ISSET(pt->fds[m].fd, &readfds)) { + pt->fds[m].revents |= LWS_POLLIN; + c = 1; + } + if (FD_ISSET(pt->fds[m].fd, &writefds)) { + pt->fds[m].revents |= LWS_POLLOUT; + c = 1; + } + if (FD_ISSET(pt->fds[m].fd, &errfds)) { + // lwsl_notice("errfds %d\n", pt->fds[m].fd); + pt->fds[m].revents |= LWS_POLLHUP; + c = 1; + } + + if (c) + n++; + } } - } - m = 0; + m = 0; -#if defined(LWS_ROLE_WS) && !defined(LWS_WITHOUT_EXTENSIONS) - m |= !!pt->ws.rx_draining_ext_list; -#endif + #if defined(LWS_ROLE_WS) && !defined(LWS_WITHOUT_EXTENSIONS) + m |= !!pt->ws.rx_draining_ext_list; + #endif - if (pt->context->tls_ops && - pt->context->tls_ops->fake_POLLIN_for_buffered) - m |= pt->context->tls_ops->fake_POLLIN_for_buffered(pt); + if (pt->context->tls_ops && + pt->context->tls_ops->fake_POLLIN_for_buffered) + m |= pt->context->tls_ops->fake_POLLIN_for_buffered(pt); - if (!m && !n) - return 0; + if (!m && !n) + return 0; + } else + a = 1; -faked_service: m = lws_service_flag_pending(context, tsi); if (m) c = -1; /* unknown limit */ @@ -204,5 +210,8 @@ faked_service: n--; } + if (a) + goto again; + return 0; } diff --git a/lib/plat/optee/network.c b/lib/plat/optee/network.c index bf7818880..316674185 100644 --- a/lib/plat/optee/network.c +++ b/lib/plat/optee/network.c @@ -60,7 +60,7 @@ _lws_plat_service_tsi(struct lws_context *context, int timeout_ms, int tsi) { lws_usec_t timeout_us = timeout_ms * LWS_US_PER_MS; struct lws_context_per_thread *pt; - int n = -1, m, c; + int n = -1, m, c, a = 0; //char buf; /* stay dead once we are dead */ @@ -71,7 +71,9 @@ _lws_plat_service_tsi(struct lws_context *context, int timeout_ms, int tsi) pt = &context->pt[tsi]; if (timeout_ms < 0) - goto faked_service; + timeout_ms = 0; + else + timeout_ms = 2000000000; if (!pt->service_tid_detected) { struct lws _lws; @@ -87,40 +89,34 @@ _lws_plat_service_tsi(struct lws_context *context, int timeout_ms, int tsi) /* * is there anybody with pending stuff that needs service forcing? */ - if (!lws_service_adjust_timeout(context, 1, tsi)) { - lwsl_notice("%s: doing forced service\n", __func__); - /* -1 timeout means just do forced service */ - _lws_plat_service_tsi(context, -1, pt->tid); - /* still somebody left who wants forced service? */ - if (!lws_service_adjust_timeout(context, 1, pt->tid)) - /* yes... come back again quickly */ - timeout_us = 0; - } + if (lws_service_adjust_timeout(context, 1, tsi)) { +again: + a = 0; + if (timeout_us) { + lws_usec_t us; - if (timeout_us) { - lws_usec_t us; + lws_pt_lock(pt, __func__); + /* don't stay in poll wait longer than next hr timeout */ + us = __lws_sul_check(&pt->pt_sul_owner, lws_now_usecs()); + if (us && us < timeout_us) + timeout_us = us; - lws_pt_lock(pt, __func__); - /* don't stay in poll wait longer than next hr timeout */ - us = __lws_sul_check(&pt->pt_sul_owner, lws_now_usecs()); - if (us && us < timeout_us) - timeout_us = us; + lws_pt_unlock(pt); + } - lws_pt_unlock(pt); - } + n = poll(pt->fds, pt->fds_count, timeout_us / LWS_US_PER_MS); - n = poll(pt->fds, pt->fds_count, timeout_us / LWS_US_PER_MS); + m = 0; - m = 0; + if (pt->context->tls_ops && + pt->context->tls_ops->fake_POLLIN_for_buffered) + m = pt->context->tls_ops->fake_POLLIN_for_buffered(pt); - if (pt->context->tls_ops && - pt->context->tls_ops->fake_POLLIN_for_buffered) - m = pt->context->tls_ops->fake_POLLIN_for_buffered(pt); + if (/*!pt->ws.rx_draining_ext_list && */!m && !n) /* nothing to do */ + return 0; + } else + a = 1; - if (/*!pt->ws.rx_draining_ext_list && */!m && !n) /* nothing to do */ - return 0; - -faked_service: m = lws_service_flag_pending(context, tsi); if (m) c = -1; /* unknown limit */ @@ -153,6 +149,9 @@ faked_service: n--; } + if (a) + goto again; + return 0; } diff --git a/lib/plat/unix/unix-service.c b/lib/plat/unix/unix-service.c index 198da6d5c..19cbb4ecb 100644 --- a/lib/plat/unix/unix-service.c +++ b/lib/plat/unix/unix-service.c @@ -35,7 +35,7 @@ _lws_plat_service_tsi(struct lws_context *context, int timeout_ms, int tsi) volatile struct lws_context_per_thread *vpt; struct lws_context_per_thread *pt; lws_usec_t timeout_us; - int n = -1, m, c; + int n = -1, m, c, a = 0; /* stay dead once we are dead */ @@ -48,10 +48,10 @@ _lws_plat_service_tsi(struct lws_context *context, int timeout_ms, int tsi) lws_stats_bump(pt, LWSSTATS_C_SERVICE_ENTRY, 1); if (timeout_ms < 0) - goto faked_service; - - /* force a default timeout of 23 days */ - timeout_ms = 2000000000; + timeout_ms = 0; + else + /* force a default timeout of 23 days */ + timeout_ms = 2000000000; timeout_us = ((lws_usec_t)timeout_ms) * LWS_US_PER_MS; if (context->event_loop_ops->run_pt) @@ -72,90 +72,97 @@ _lws_plat_service_tsi(struct lws_context *context, int timeout_ms, int tsi) /* * is there anybody with pending stuff that needs service forcing? */ - if (!lws_service_adjust_timeout(context, 1, tsi)) { - /* -1 timeout means just do forced service */ - _lws_plat_service_tsi(context, -1, pt->tid); - /* still somebody left who wants forced service? */ - if (!lws_service_adjust_timeout(context, 1, pt->tid)) - /* yes... come back again quickly */ - timeout_us = 0; - } + if (lws_service_adjust_timeout(context, 1, tsi)) { - if (timeout_us) { - lws_usec_t us; +again: + a = 0; + if (timeout_us) { + lws_usec_t us; + + lws_pt_lock(pt, __func__); + /* don't stay in poll wait longer than next hr timeout */ + us = __lws_sul_check(&pt->pt_sul_owner, lws_now_usecs()); + if (us && us < timeout_us) + timeout_us = us; + + lws_pt_unlock(pt); + } + + vpt->inside_poll = 1; + lws_memory_barrier(); + n = poll(pt->fds, pt->fds_count, timeout_us / LWS_US_PER_MS); + vpt->inside_poll = 0; + lws_memory_barrier(); + + #if defined(LWS_WITH_DETAILED_LATENCY) + /* + * so we can track how long it took before we actually read a + * POLLIN that was signalled when we last exited poll() + */ + if (context->detailed_latency_cb) + pt->ust_left_poll = lws_now_usecs(); + #endif + + /* Collision will be rare and brief. Spin until it completes */ + while (vpt->foreign_spinlock) + ; + + /* + * At this point we are not inside a foreign thread pollfd + * change, and we have marked ourselves as outside the poll() + * wait. So we are the only guys that can modify the + * lws_foreign_thread_pollfd list on the pt. Drain the list + * and apply the changes to the affected pollfds in the correct + * order. + */ lws_pt_lock(pt, __func__); - /* don't stay in poll wait longer than next hr timeout */ - us = __lws_sul_check(&pt->pt_sul_owner, lws_now_usecs()); - if (us && us < timeout_us) - timeout_us = us; + + ftp = vpt->foreign_pfd_list; + //lwsl_notice("cleared list %p\n", ftp); + while (ftp) { + struct lws *wsi; + struct lws_pollfd *pfd; + + next = ftp->next; + pfd = &vpt->fds[ftp->fd_index]; + if (lws_socket_is_valid(pfd->fd)) { + wsi = wsi_from_fd(context, pfd->fd); + if (wsi) + __lws_change_pollfd(wsi, ftp->_and, + ftp->_or); + } + lws_free((void *)ftp); + ftp = next; + } + vpt->foreign_pfd_list = NULL; + lws_memory_barrier(); lws_pt_unlock(pt); - } - vpt->inside_poll = 1; - lws_memory_barrier(); - n = poll(pt->fds, pt->fds_count, timeout_us / LWS_US_PER_MS); - vpt->inside_poll = 0; - lws_memory_barrier(); + m = 0; + #if defined(LWS_ROLE_WS) && !defined(LWS_WITHOUT_EXTENSIONS) + m |= !!pt->ws.rx_draining_ext_list; + #endif - /* Collision will be rare and brief. Just spin until it completes */ - while (vpt->foreign_spinlock) - ; + #if defined(LWS_WITH_TLS) + if (pt->context->tls_ops && + pt->context->tls_ops->fake_POLLIN_for_buffered) + m |= pt->context->tls_ops->fake_POLLIN_for_buffered(pt); + #endif - /* - * At this point we are not inside a foreign thread pollfd change, - * and we have marked ourselves as outside the poll() wait. So we - * are the only guys that can modify the lws_foreign_thread_pollfd - * list on the pt. Drain the list and apply the changes to the - * affected pollfds in the correct order. - */ + if ( + #if (defined(LWS_ROLE_WS) && !defined(LWS_WITHOUT_EXTENSIONS)) || defined(LWS_WITH_TLS) + !m && + #endif + !n) { /* nothing to do */ + lws_service_do_ripe_rxflow(pt); - lws_pt_lock(pt, __func__); - - ftp = vpt->foreign_pfd_list; - //lwsl_notice("cleared list %p\n", ftp); - while (ftp) { - struct lws *wsi; - struct lws_pollfd *pfd; - - next = ftp->next; - pfd = &vpt->fds[ftp->fd_index]; - if (lws_socket_is_valid(pfd->fd)) { - wsi = wsi_from_fd(context, pfd->fd); - if (wsi) - __lws_change_pollfd(wsi, ftp->_and, ftp->_or); + return 0; } - lws_free((void *)ftp); - ftp = next; - } - vpt->foreign_pfd_list = NULL; - lws_memory_barrier(); + } else + a = 1; - lws_pt_unlock(pt); - - m = 0; -#if defined(LWS_ROLE_WS) && !defined(LWS_WITHOUT_EXTENSIONS) - m |= !!pt->ws.rx_draining_ext_list; -#endif - -#if defined(LWS_WITH_TLS) - if (pt->context->tls_ops && - pt->context->tls_ops->fake_POLLIN_for_buffered) - m |= pt->context->tls_ops->fake_POLLIN_for_buffered(pt); -#endif - - if ( -#if (defined(LWS_ROLE_WS) && !defined(LWS_WITHOUT_EXTENSIONS)) || defined(LWS_WITH_TLS) - !m && -#endif - !n) { /* nothing to do */ - lws_service_do_ripe_rxflow(pt); - - return 0; - } - -faked_service: m = lws_service_flag_pending(context, tsi); if (m) c = -1; /* unknown limit */ @@ -187,6 +194,9 @@ faked_service: lws_service_do_ripe_rxflow(pt); + if (a) + goto again; + return 0; } diff --git a/lib/plat/windows/windows-service.c b/lib/plat/windows/windows-service.c index ab9de9f7b..4397f07b0 100644 --- a/lib/plat/windows/windows-service.c +++ b/lib/plat/windows/windows-service.c @@ -57,27 +57,11 @@ _lws_plat_service_tsi(struct lws_context *context, int timeout_ms, int tsi) pt->service_tid_detected = 1; } - if (timeout_ms < 0) { - if (lws_service_flag_pending(context, tsi)) { - /* any socket with events to service? */ - for (n = 0; n < (int)pt->fds_count; n++) { - int m; - if (!pt->fds[n].revents) - continue; - - m = lws_service_fd_tsi(context, &pt->fds[n], tsi); - if (m < 0) - return -1; - /* if something closed, retry this slot */ - if (m) - n--; - } - } - return 0; - } - - /* force a default timeout of 23 days */ - timeout_ms = 2000000000; + if (timeout_ms < 0) + timeout_ms = 0; + else + /* force a default timeout of 23 days */ + timeout_ms = 2000000000; timeout_us = ((lws_usec_t)timeout_ms) * LWS_US_PER_MS; if (context->event_loop_ops->run_pt) @@ -113,13 +97,21 @@ _lws_plat_service_tsi(struct lws_context *context, int timeout_ms, int tsi) /* * is there anybody with pending stuff that needs service forcing? */ - if (!lws_service_adjust_timeout(context, 1, tsi)) { - /* -1 timeout means just do forced service */ - _lws_plat_service_tsi(context, -1, pt->tid); - /* still somebody left who wants forced service? */ - if (!lws_service_adjust_timeout(context, 1, pt->tid)) - /* yes... come back again quickly */ - timeout_us = 0; + if (!lws_service_adjust_timeout(context, 1, tsi) && + lws_service_flag_pending(context, tsi)) { + /* any socket with events to service? */ + for (n = 0; n < (int)pt->fds_count; n++) { + int m; + if (!pt->fds[n].revents) + continue; + + m = lws_service_fd_tsi(context, &pt->fds[n], tsi); + if (m < 0) + return -1; + /* if something closed, retry this slot */ + if (m) + n--; + } } if (timeout_us) { diff --git a/minimal-examples/api-tests/api-test-lws_struct-json/CMakeLists.txt b/minimal-examples/api-tests/api-test-lws_struct-json/CMakeLists.txt index a0900f866..07ab387db 100644 --- a/minimal-examples/api-tests/api-test-lws_struct-json/CMakeLists.txt +++ b/minimal-examples/api-tests/api-test-lws_struct-json/CMakeLists.txt @@ -61,8 +61,10 @@ MACRO(require_lws_config reqconfig _val result) endif() ENDMACRO() +set(requirements 1) +require_lws_config(LWS_WITH_STRUCT_JSON 1 requirements) - +if (requirements) add_executable(${SAMP} ${SRCS}) if (websockets_shared) @@ -71,3 +73,5 @@ ENDMACRO() else() target_link_libraries(${SAMP} websockets) endif() +endif() +