mirror of
https://github.com/warmcat/libwebsockets.git
synced 2025-03-16 00:00:07 +01:00

There's no longer any reason to come out of sleep for periodic service which has been eliminated by lws_sul. With event libs, there is no opportunity to do it anyway since their event loop is atomic and makes callbacks and sleeps until it is stopped. But some users are relying on the old poll() service loop as glue that's difficult to replace. So for now help that happen by accepting the timeout_ms of -1 as meaning sample poll and service what's there without any wait.
232 lines
4.2 KiB
C
232 lines
4.2 KiB
C
#include "core/private.h"
|
|
|
|
|
|
int
|
|
lws_plat_pipe_create(struct lws *wsi)
|
|
{
|
|
return 1;
|
|
}
|
|
|
|
int
|
|
lws_plat_pipe_signal(struct lws *wsi)
|
|
{
|
|
return 1;
|
|
}
|
|
|
|
void
|
|
lws_plat_pipe_close(struct lws *wsi)
|
|
{
|
|
}
|
|
|
|
LWS_VISIBLE int
|
|
lws_send_pipe_choked(struct lws *wsi)
|
|
{
|
|
struct lws *wsi_eff;
|
|
|
|
#if defined(LWS_WITH_HTTP2)
|
|
wsi_eff = lws_get_network_wsi(wsi);
|
|
#else
|
|
wsi_eff = wsi;
|
|
#endif
|
|
|
|
/* the fact we checked implies we avoided back-to-back writes */
|
|
wsi_eff->could_have_pending = 0;
|
|
|
|
/* treat the fact we got a truncated send pending as if we're choked */
|
|
if (lws_has_buffered_out(wsi_eff)
|
|
#if defined(LWS_WITH_HTTP_STREAM_COMPRESSION)
|
|
|| wsi->http.comp_ctx.buflist_comp ||
|
|
wsi->http.comp_ctx.may_have_more
|
|
#endif
|
|
)
|
|
return 1;
|
|
|
|
/* okay to send another packet without blocking */
|
|
|
|
return 0;
|
|
}
|
|
|
|
int
|
|
lws_poll_listen_fd(struct lws_pollfd *fd)
|
|
{
|
|
// return poll(fd, 1, 0);
|
|
|
|
return 0;
|
|
}
|
|
|
|
|
|
LWS_EXTERN int
|
|
_lws_plat_service_tsi(struct lws_context *context, int timeout_ms, int tsi)
|
|
{
|
|
lws_usec_t timeout_us = timeout_ms * LWS_US_PER_MS;
|
|
struct lws_context_per_thread *pt;
|
|
int n = -1, m, c, a = 0;
|
|
//char buf;
|
|
|
|
/* stay dead once we are dead */
|
|
|
|
if (!context || !context->vhost_list)
|
|
return 1;
|
|
|
|
pt = &context->pt[tsi];
|
|
|
|
if (timeout_ms < 0)
|
|
timeout_ms = 0;
|
|
else
|
|
timeout_ms = 2000000000;
|
|
|
|
if (!pt->service_tid_detected) {
|
|
struct lws _lws;
|
|
|
|
memset(&_lws, 0, sizeof(_lws));
|
|
_lws.context = context;
|
|
|
|
pt->service_tid = context->vhost_list->protocols[0].callback(
|
|
&_lws, LWS_CALLBACK_GET_THREAD_ID, NULL, NULL, 0);
|
|
pt->service_tid_detected = 1;
|
|
}
|
|
|
|
/*
|
|
* is there anybody with pending stuff that needs service forcing?
|
|
*/
|
|
if (lws_service_adjust_timeout(context, 1, tsi)) {
|
|
again:
|
|
a = 0;
|
|
if (timeout_us) {
|
|
lws_usec_t us;
|
|
|
|
lws_pt_lock(pt, __func__);
|
|
/* don't stay in poll wait longer than next hr timeout */
|
|
us = __lws_sul_check(&pt->pt_sul_owner, lws_now_usecs());
|
|
if (us && us < timeout_us)
|
|
timeout_us = us;
|
|
|
|
lws_pt_unlock(pt);
|
|
}
|
|
|
|
n = poll(pt->fds, pt->fds_count, timeout_us / LWS_US_PER_MS);
|
|
|
|
m = 0;
|
|
|
|
if (pt->context->tls_ops &&
|
|
pt->context->tls_ops->fake_POLLIN_for_buffered)
|
|
m = pt->context->tls_ops->fake_POLLIN_for_buffered(pt);
|
|
|
|
if (/*!pt->ws.rx_draining_ext_list && */!m && !n) /* nothing to do */
|
|
return 0;
|
|
} else
|
|
a = 1;
|
|
|
|
m = lws_service_flag_pending(context, tsi);
|
|
if (m)
|
|
c = -1; /* unknown limit */
|
|
else
|
|
if (n < 0) {
|
|
if (LWS_ERRNO != LWS_EINTR)
|
|
return -1;
|
|
return 0;
|
|
} else
|
|
c = n;
|
|
|
|
/* any socket with events to service? */
|
|
for (n = 0; n < (int)pt->fds_count && c; n++) {
|
|
if (!pt->fds[n].revents)
|
|
continue;
|
|
|
|
c--;
|
|
#if 0
|
|
if (pt->fds[n].fd == pt->dummy_pipe_fds[0]) {
|
|
if (read(pt->fds[n].fd, &buf, 1) != 1)
|
|
lwsl_err("Cannot read from dummy pipe.");
|
|
continue;
|
|
}
|
|
#endif
|
|
m = lws_service_fd_tsi(context, &pt->fds[n], tsi);
|
|
if (m < 0)
|
|
return -1;
|
|
/* if something closed, retry this slot */
|
|
if (m)
|
|
n--;
|
|
}
|
|
|
|
if (a)
|
|
goto again;
|
|
|
|
return 0;
|
|
}
|
|
|
|
int
|
|
lws_plat_check_connection_error(struct lws *wsi)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
int
|
|
lws_plat_service(struct lws_context *context, int timeout_ms)
|
|
{
|
|
return _lws_plat_service_tsi(context, timeout_ms, 0);
|
|
}
|
|
|
|
int
|
|
lws_plat_set_socket_options(struct lws_vhost *vhost, int fd, int unix_skt)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
|
|
int
|
|
lws_plat_write_cert(struct lws_vhost *vhost, int is_key, int fd, void *buf,
|
|
int len)
|
|
{
|
|
return 1;
|
|
}
|
|
|
|
|
|
/* cast a struct sockaddr_in6 * into addr for ipv6 */
|
|
|
|
int
|
|
lws_interface_to_sa(int ipv6, const char *ifname, struct sockaddr_in *addr,
|
|
size_t addrlen)
|
|
{
|
|
return -1;
|
|
}
|
|
|
|
void
|
|
lws_plat_insert_socket_into_fds(struct lws_context *context, struct lws *wsi)
|
|
{
|
|
struct lws_context_per_thread *pt = &context->pt[(int)wsi->tsi];
|
|
|
|
pt->fds[pt->fds_count++].revents = 0;
|
|
}
|
|
|
|
void
|
|
lws_plat_delete_socket_from_fds(struct lws_context *context,
|
|
struct lws *wsi, int m)
|
|
{
|
|
struct lws_context_per_thread *pt = &context->pt[(int)wsi->tsi];
|
|
|
|
pt->fds_count--;
|
|
}
|
|
|
|
int
|
|
lws_plat_change_pollfd(struct lws_context *context,
|
|
struct lws *wsi, struct lws_pollfd *pfd)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
const char *
|
|
lws_plat_inet_ntop(int af, const void *src, char *dst, int cnt)
|
|
{
|
|
//return inet_ntop(af, src, dst, cnt);
|
|
return "lws_plat_inet_ntop";
|
|
}
|
|
|
|
int
|
|
lws_plat_inet_pton(int af, const char *src, void *dst)
|
|
{
|
|
//return inet_pton(af, src, dst);
|
|
return 1;
|
|
}
|
|
|
|
|