mirror of
https://github.com/warmcat/libwebsockets.git
synced 2025-03-09 00:00:04 +01:00
unify us sorted waits
There are quite a few linked-lists of things that want events after some period. This introduces a type binding an lws_dll2 for the list and a lws_usec_t for the duration. The wsi timeouts, the hrtimer and the sequencer timeouts are converted to use these, also in the common event wait calculation.
This commit is contained in:
parent
6de416d811
commit
3c12fd72e8
22 changed files with 322 additions and 310 deletions
|
@ -781,6 +781,7 @@ CHECK_FUNCTION_EXISTS(getloadavg LWS_HAVE_GETLOADAVG)
|
|||
CHECK_FUNCTION_EXISTS(atoll LWS_HAVE_ATOLL)
|
||||
CHECK_FUNCTION_EXISTS(_atoi64 LWS_HAVE__ATOI64)
|
||||
CHECK_FUNCTION_EXISTS(_stat32i64 LWS_HAVE__STAT32I64)
|
||||
CHECK_FUNCTION_EXISTS(clock_gettime LWS_HAVE_CLOCK_GETTIME)
|
||||
|
||||
if (NOT LWS_HAVE_GETIFADDRS)
|
||||
if (LWS_WITHOUT_BUILTIN_GETIFADDRS)
|
||||
|
@ -958,6 +959,7 @@ if (LWS_WITH_NETWORK)
|
|||
lib/core-net/pollfd.c
|
||||
lib/core-net/sequencer.c
|
||||
lib/core-net/service.c
|
||||
lib/core-net/sorted-usec-list.c
|
||||
lib/core-net/stats.c
|
||||
lib/core-net/wsi.c
|
||||
lib/core-net/wsi-timeout.c
|
||||
|
|
|
@ -25,6 +25,7 @@
|
|||
#cmakedefine LWS_HAVE__ATOI64
|
||||
#cmakedefine LWS_HAVE_ATOLL
|
||||
#cmakedefine LWS_HAVE_BN_bn2binpad
|
||||
#cmakedefine LWS_HAVE_CLOCK_GETTIME
|
||||
#cmakedefine LWS_HAVE_EC_POINT_get_affine_coordinates
|
||||
#cmakedefine LWS_HAVE_ECDSA_SIG_set0
|
||||
#cmakedefine LWS_HAVE_EVP_MD_CTX_free
|
||||
|
|
|
@ -45,6 +45,7 @@ extern "C" {
|
|||
#define LWS_US_PER_SEC 1000000
|
||||
#define LWS_MS_PER_SEC 1000
|
||||
#define LWS_US_PER_MS 1000
|
||||
#define LWS_NS_PER_US 1000
|
||||
|
||||
#define LWS_US_TO_MS(x) ((x + (LWS_US_PER_MS / 2)) / LWS_US_PER_MS)
|
||||
|
||||
|
|
|
@ -297,6 +297,13 @@ lws_dll2_owner_clear(struct lws_dll2_owner *d);
|
|||
void
|
||||
lws_dll2_add_before(struct lws_dll2 *d, struct lws_dll2 *after);
|
||||
|
||||
#if defined(_DEBUG)
|
||||
void
|
||||
lws_dll2_describe(struct lws_dll2_owner *owner, const char *desc);
|
||||
#else
|
||||
#define lws_dll2_describe(x, y)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* these are safe against the current container object getting deleted,
|
||||
* since the hold his next in a temp and go to that next. ___tmp is
|
||||
|
@ -566,26 +573,6 @@ lws_now_secs(void);
|
|||
LWS_VISIBLE LWS_EXTERN lws_usec_t
|
||||
lws_now_usecs(void);
|
||||
|
||||
/**
|
||||
* lws_compare_time_t(): return relationship between two time_t
|
||||
*
|
||||
* \param context: struct lws_context
|
||||
* \param t1: time_t 1
|
||||
* \param t2: time_t 2
|
||||
*
|
||||
* returns <0 if t2 > t1; >0 if t1 > t2; or == 0 if t1 == t2.
|
||||
*
|
||||
* This is aware of clock discontiguities that may have affected either t1 or
|
||||
* t2 and adapts the comparison for them.
|
||||
*
|
||||
* For the discontiguity detection to work, you must avoid any arithmetic on
|
||||
* the times being compared. For example to have a timeout that triggers
|
||||
* 15s from when it was set, store the time it was set and compare like
|
||||
* `if (lws_compare_time_t(context, now, set_time) > 15)`
|
||||
*/
|
||||
LWS_VISIBLE LWS_EXTERN int
|
||||
lws_compare_time_t(struct lws_context *context, time_t t1, time_t t2);
|
||||
|
||||
/**
|
||||
* lws_get_context - Allow getting lws_context from a Websocket connection
|
||||
* instance
|
||||
|
|
|
@ -90,9 +90,7 @@ lws_time_in_microseconds(void);
|
|||
* wsi currently being serviced.
|
||||
*/
|
||||
/**
|
||||
* lws_set_timeout() - marks the wsi as subject to a timeout
|
||||
*
|
||||
* You will not need this unless you are doing something special
|
||||
* lws_set_timeout() - marks the wsi as subject to a timeout some seconds hence
|
||||
*
|
||||
* \param wsi: Websocket connection instance
|
||||
* \param reason: timeout reason
|
||||
|
@ -104,6 +102,19 @@ lws_time_in_microseconds(void);
|
|||
LWS_VISIBLE LWS_EXTERN void
|
||||
lws_set_timeout(struct lws *wsi, enum pending_timeout reason, int secs);
|
||||
|
||||
/**
|
||||
* lws_set_timeout_us() - marks the wsi as subject to a timeout some us hence
|
||||
*
|
||||
* \param wsi: Websocket connection instance
|
||||
* \param reason: timeout reason
|
||||
* \param us: 0 removes the timeout, otherwise number of us to wait
|
||||
*
|
||||
* Higher-resolution version of lws_set_timeout(). Actual resolution depends
|
||||
* on platform and load, usually ms.
|
||||
*/
|
||||
void
|
||||
lws_set_timeout_us(struct lws *wsi, enum pending_timeout reason, lws_usec_t us);
|
||||
|
||||
#define LWS_SET_TIMER_USEC_CANCEL ((lws_usec_t)-1ll)
|
||||
#define LWS_USEC_PER_SEC (1000000ll)
|
||||
|
||||
|
|
|
@ -436,7 +436,7 @@ just_kill_connection:
|
|||
*/
|
||||
__lws_ssl_remove_wsi_from_buffered_list(wsi);
|
||||
__lws_remove_from_timeout_list(wsi);
|
||||
lws_dll2_remove(&wsi->dll_hrtimer);
|
||||
lws_dll2_remove(&wsi->sul_hrtimer.list);
|
||||
|
||||
//if (wsi->told_event_loop_closed) // cgi std close case (dummy-callback)
|
||||
// return;
|
||||
|
|
|
@ -256,6 +256,20 @@ struct lws_timed_vh_protocol {
|
|||
int tsi_req;
|
||||
};
|
||||
|
||||
typedef struct lws_sorted_usec_list {
|
||||
struct lws_dll2 list; /* simplify the code by keeping this at start */
|
||||
lws_usec_t us;
|
||||
} lws_sorted_usec_list_t;
|
||||
|
||||
typedef void (*sul_cb_t)(lws_sorted_usec_list_t *sul);
|
||||
|
||||
int
|
||||
__lws_sul_insert(lws_dll2_owner_t *own, lws_sorted_usec_list_t *sul,
|
||||
lws_usec_t us);
|
||||
|
||||
lws_usec_t
|
||||
__lws_sul_check(lws_dll2_owner_t *own, sul_cb_t cb, lws_usec_t usnow);
|
||||
|
||||
/*
|
||||
* so we can have n connections being serviced simultaneously,
|
||||
* these things need to be isolated per-thread.
|
||||
|
@ -278,7 +292,7 @@ struct lws_context_per_thread {
|
|||
unsigned char *serv_buf;
|
||||
|
||||
struct lws_dll2_owner dll_timeout_owner;
|
||||
struct lws_dll2_owner dll_hrtimer_head;
|
||||
struct lws_dll2_owner dll_hrtimer_owner;
|
||||
struct lws_dll2_owner dll_buflist_owner; /* guys with pending rxflow */
|
||||
struct lws_dll2_owner seq_owner; /* list of lws_sequencer-s */
|
||||
struct lws_dll2_owner seq_pend_owner; /* lws_seq-s with pending evts */
|
||||
|
@ -503,6 +517,9 @@ struct lws {
|
|||
struct lws_io_watcher w_write;
|
||||
#endif
|
||||
|
||||
lws_sorted_usec_list_t sul_timeout;
|
||||
lws_sorted_usec_list_t sul_hrtimer;
|
||||
|
||||
/* pointers */
|
||||
|
||||
struct lws_context *context;
|
||||
|
@ -516,8 +533,6 @@ struct lws {
|
|||
|
||||
lws_seq_t *seq; /* associated sequencer if any */
|
||||
|
||||
struct lws_dll2 dll_timeout;
|
||||
struct lws_dll2 dll_hrtimer;
|
||||
struct lws_dll2 dll_buflist; /* guys with pending rxflow */
|
||||
|
||||
#if defined(LWS_WITH_THREADPOOL)
|
||||
|
@ -555,9 +570,6 @@ struct lws {
|
|||
#endif
|
||||
#endif
|
||||
|
||||
lws_usec_t pending_timer; /* hrtimer fires */
|
||||
time_t pending_timeout_set; /* second-resolution timeout start */
|
||||
|
||||
#ifdef LWS_LATENCY
|
||||
unsigned long action_start;
|
||||
unsigned long latency_start;
|
||||
|
@ -634,7 +646,6 @@ struct lws {
|
|||
#ifndef LWS_NO_CLIENT
|
||||
unsigned short c_port;
|
||||
#endif
|
||||
unsigned short pending_timeout_limit;
|
||||
|
||||
/* chars */
|
||||
|
||||
|
@ -1098,6 +1109,8 @@ __lws_remove_from_timeout_list(struct lws *wsi);
|
|||
lws_usec_t
|
||||
__lws_hrtimer_service(struct lws_context_per_thread *pt, lws_usec_t t);
|
||||
|
||||
lws_usec_t
|
||||
__lws_wsitimeout_service(struct lws_context_per_thread *pt, lws_usec_t t);
|
||||
|
||||
int
|
||||
lws_buflist_aware_read(struct lws_context_per_thread *pt, struct lws *wsi,
|
||||
|
|
|
@ -38,7 +38,8 @@ typedef struct lws_seq_event {
|
|||
typedef struct lws_sequencer {
|
||||
struct lws_dll2 seq_list;
|
||||
struct lws_dll2 seq_pend_list;
|
||||
struct lws_dll2 seq_to_list;
|
||||
|
||||
lws_sorted_usec_list_t sul;
|
||||
|
||||
struct lws_dll2_owner seq_event_owner;
|
||||
struct lws_context_per_thread *pt;
|
||||
|
@ -117,7 +118,7 @@ lws_seq_destroy(lws_seq_t **pseq)
|
|||
lws_pt_lock(seq->pt, __func__); /* -------------------------- pt { */
|
||||
|
||||
lws_dll2_remove(&seq->seq_list);
|
||||
lws_dll2_remove(&seq->seq_to_list);
|
||||
lws_dll2_remove(&seq->sul.list);
|
||||
lws_dll2_remove(&seq->seq_pend_list);
|
||||
/* remove and destroy any pending events */
|
||||
lws_dll2_foreach_safe(&seq->seq_event_owner, NULL, seq_ev_destroy);
|
||||
|
@ -286,43 +287,9 @@ lws_pt_do_pending_sequencer_events(struct lws_context_per_thread *pt)
|
|||
int
|
||||
lws_seq_timeout_us(lws_seq_t *seq, lws_usec_t us)
|
||||
{
|
||||
lws_dll2_remove(&seq->seq_to_list);
|
||||
|
||||
if (!us) {
|
||||
/* we are clearing the timeout */
|
||||
seq->timeout = 0;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
seq->timeout = lws_now_usecs() + us;
|
||||
|
||||
/*
|
||||
* we sort the pt's list of sequencers with pending timeouts, so it's
|
||||
* cheap to check it every second
|
||||
*/
|
||||
|
||||
lws_start_foreach_dll_safe(struct lws_dll2 *, p, tp,
|
||||
seq->pt->seq_to_owner.head) {
|
||||
lws_seq_t *s = lws_container_of(p, lws_seq_t, seq_to_list);
|
||||
|
||||
assert(s->timeout); /* shouldn't be on the list otherwise */
|
||||
if (s->timeout >= seq->timeout) {
|
||||
/* drop us in before this guy */
|
||||
lws_dll2_add_before(&seq->seq_to_list, &s->seq_to_list);
|
||||
|
||||
return 0;
|
||||
}
|
||||
} lws_end_foreach_dll_safe(p, tp);
|
||||
|
||||
/*
|
||||
* Either nobody on the list yet to compare him to, or he's the
|
||||
* longest timeout... stick him at the tail end
|
||||
*/
|
||||
|
||||
lws_dll2_add_tail(&seq->seq_to_list, &seq->pt->seq_to_owner);
|
||||
|
||||
return 0;
|
||||
/* list is always at the very top of the sul */
|
||||
return __lws_sul_insert(&seq->pt->seq_to_owner,
|
||||
(lws_sorted_usec_list_t *)&seq->sul.list, us);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -331,34 +298,21 @@ lws_seq_timeout_us(lws_seq_t *seq, lws_usec_t us)
|
|||
* would have serviced it)
|
||||
*/
|
||||
|
||||
static void
|
||||
lws_seq_sul_check_cb(lws_sorted_usec_list_t *sul)
|
||||
{
|
||||
lws_seq_t *s = lws_container_of(sul, lws_seq_t, sul);
|
||||
|
||||
lws_seq_queue_event(s, LWSSEQ_TIMED_OUT, NULL, NULL);
|
||||
}
|
||||
|
||||
lws_usec_t
|
||||
__lws_seq_timeout_check(struct lws_context_per_thread *pt, lws_usec_t usnow)
|
||||
{
|
||||
lws_usec_t future_us = 0;
|
||||
lws_usec_t future_us = __lws_sul_check(&pt->seq_to_owner,
|
||||
lws_seq_sul_check_cb, usnow);
|
||||
|
||||
lws_start_foreach_dll_safe(struct lws_dll2 *, p, tp,
|
||||
pt->seq_to_owner.head) {
|
||||
lws_seq_t *s = lws_container_of(p, lws_seq_t, seq_to_list);
|
||||
|
||||
assert(s->timeout); /* shouldn't be on the list otherwise */
|
||||
if (s->timeout <= usnow) {
|
||||
/* seq has timed out... remove him from timeout list */
|
||||
lws_seq_timeout_us(s, LWSSEQTO_NONE);
|
||||
/* queue the message to inform the sequencer */
|
||||
lws_seq_queue_event(s, LWSSEQ_TIMED_OUT, NULL, NULL);
|
||||
} else {
|
||||
/*
|
||||
* No need to look further if we met one later than now:
|
||||
* the list is sorted in ascending time order
|
||||
*/
|
||||
future_us = usnow - s->timeout;
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
} lws_end_foreach_dll_safe(p, tp);
|
||||
|
||||
if (usnow - pt->last_heartbeat< LWS_US_PER_SEC)
|
||||
if (usnow - pt->last_heartbeat < LWS_US_PER_SEC)
|
||||
return future_us;
|
||||
|
||||
pt->last_heartbeat = usnow;
|
||||
|
@ -366,7 +320,7 @@ __lws_seq_timeout_check(struct lws_context_per_thread *pt, lws_usec_t usnow)
|
|||
/* send every sequencer a heartbeat message... it can ignore it */
|
||||
|
||||
lws_start_foreach_dll_safe(struct lws_dll2 *, p, tp,
|
||||
pt->seq_owner.head) {
|
||||
lws_dll2_get_head(&pt->seq_owner)) {
|
||||
lws_seq_t *s = lws_container_of(p, lws_seq_t, seq_list);
|
||||
|
||||
/* queue the message to inform the sequencer */
|
||||
|
|
|
@ -227,75 +227,6 @@ bail_die:
|
|||
return -1;
|
||||
}
|
||||
|
||||
static int
|
||||
__lws_service_timeout_check(struct lws *wsi, time_t sec)
|
||||
{
|
||||
struct lws_context_per_thread *pt = &wsi->context->pt[(int)wsi->tsi];
|
||||
#if defined(LWS_ROLE_H1) || defined(LWS_ROLE_H2)
|
||||
int n = 0;
|
||||
#endif
|
||||
|
||||
(void)n;
|
||||
|
||||
/*
|
||||
* if we went beyond the allowed time, kill the
|
||||
* connection
|
||||
*/
|
||||
if (lws_compare_time_t(wsi->context, sec, wsi->pending_timeout_set) >
|
||||
wsi->pending_timeout_limit) {
|
||||
|
||||
#if defined(LWS_ROLE_H1) || defined(LWS_ROLE_H2)
|
||||
if (wsi->desc.sockfd != LWS_SOCK_INVALID &&
|
||||
wsi->position_in_fds_table >= 0)
|
||||
n = pt->fds[wsi->position_in_fds_table].events;
|
||||
#endif
|
||||
|
||||
lws_stats_atomic_bump(wsi->context, pt, LWSSTATS_C_TIMEOUTS, 1);
|
||||
|
||||
/* no need to log normal idle keepalive timeout */
|
||||
// if (wsi->pending_timeout != PENDING_TIMEOUT_HTTP_KEEPALIVE_IDLE)
|
||||
#if defined(LWS_ROLE_H1) || defined(LWS_ROLE_H2)
|
||||
lwsl_info("wsi %p: TIMEDOUT WAITING on %d "
|
||||
"(did hdr %d, ah %p, wl %d, pfd "
|
||||
"events %d) %llu vs %llu\n",
|
||||
(void *)wsi, wsi->pending_timeout,
|
||||
wsi->hdr_parsing_completed, wsi->http.ah,
|
||||
pt->http.ah_wait_list_length, n,
|
||||
(unsigned long long)sec,
|
||||
(unsigned long long)wsi->pending_timeout_limit);
|
||||
#if defined(LWS_WITH_CGI)
|
||||
if (wsi->http.cgi)
|
||||
lwsl_notice("CGI timeout: %s\n", wsi->http.cgi->summary);
|
||||
#endif
|
||||
#else
|
||||
lwsl_info("wsi %p: TIMEDOUT WAITING on %d ", (void *)wsi,
|
||||
wsi->pending_timeout);
|
||||
#endif
|
||||
|
||||
/* cgi timeout */
|
||||
if (wsi->pending_timeout != PENDING_TIMEOUT_HTTP_KEEPALIVE_IDLE)
|
||||
/*
|
||||
* Since he failed a timeout, he already had a chance to
|
||||
* do something and was unable to... that includes
|
||||
* situations like half closed connections. So process
|
||||
* this "failed timeout" close as a violent death and
|
||||
* don't try to do protocol cleanup like flush partials.
|
||||
*/
|
||||
wsi->socket_is_permanently_unusable = 1;
|
||||
if (lwsi_state(wsi) == LRS_WAITING_SSL && wsi->protocol)
|
||||
wsi->protocol->callback(wsi,
|
||||
LWS_CALLBACK_CLIENT_CONNECTION_ERROR,
|
||||
wsi->user_space,
|
||||
(void *)"Timed out waiting SSL", 21);
|
||||
|
||||
__lws_close_free_wsi(wsi, LWS_CLOSE_STATUS_NOSTATUS, "timeout");
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
lws_rxflow_cache(struct lws *wsi, unsigned char *buf, int n, int len)
|
||||
{
|
||||
|
@ -600,7 +531,7 @@ lws_service_periodic_checks(struct lws_context *context,
|
|||
{
|
||||
struct lws_context_per_thread *pt = &context->pt[tsi];
|
||||
struct lws_timed_vh_protocol *tmr;
|
||||
lws_sockfd_type our_fd = 0, tmp_fd;
|
||||
lws_sockfd_type our_fd = 0;
|
||||
struct lws *wsi;
|
||||
int timed_out = 0;
|
||||
lws_usec_t usnow;
|
||||
|
@ -624,36 +555,13 @@ lws_service_periodic_checks(struct lws_context *context,
|
|||
* at boot, and got initialized a little later
|
||||
*/
|
||||
if (context->time_up < 1464083026 && now > 1464083026)
|
||||
context->time_up = now / LWS_US_PER_SEC;
|
||||
context->time_up = now;
|
||||
|
||||
if (context->last_timeout_check_s &&
|
||||
now - context->last_timeout_check_s > 100) {
|
||||
/*
|
||||
* There has been a discontiguity. Any stored time that is
|
||||
* less than context->time_discontiguity should have context->
|
||||
* time_fixup added to it.
|
||||
*
|
||||
* Some platforms with no RTC will experience this as a normal
|
||||
* event when ntp sets their clock, but we can have started
|
||||
* long before that with a 0-based unix time.
|
||||
*/
|
||||
|
||||
context->time_discontiguity = now;
|
||||
context->time_fixup = now - context->last_timeout_check_s;
|
||||
|
||||
lwsl_notice("time discontiguity: at old time %llus, "
|
||||
"new time %llus: +%llus\n",
|
||||
(unsigned long long)context->last_timeout_check_s,
|
||||
(unsigned long long)context->time_discontiguity,
|
||||
(unsigned long long)context->time_fixup);
|
||||
|
||||
context->last_timeout_check_s = now - 1;
|
||||
}
|
||||
|
||||
__lws_seq_timeout_check(pt, usnow);
|
||||
lws_pt_do_pending_sequencer_events(pt);
|
||||
|
||||
if (!lws_compare_time_t(context, context->last_timeout_check_s, now))
|
||||
if (context->last_timeout_check_s == now)
|
||||
return 0;
|
||||
|
||||
context->last_timeout_check_s = now;
|
||||
|
@ -687,25 +595,8 @@ lws_service_periodic_checks(struct lws_context *context,
|
|||
if (pollfd)
|
||||
our_fd = pollfd->fd;
|
||||
|
||||
/*
|
||||
* Phase 1: check every wsi on our pt's timeout check list
|
||||
*/
|
||||
|
||||
lws_pt_lock(pt, __func__);
|
||||
|
||||
lws_start_foreach_dll_safe(struct lws_dll2 *, d, d1,
|
||||
lws_dll2_get_head(&context->pt[tsi].dll_timeout_owner)) {
|
||||
wsi = lws_container_of(d, struct lws, dll_timeout);
|
||||
|
||||
tmp_fd = wsi->desc.sockfd;
|
||||
if (__lws_service_timeout_check(wsi, now)) {
|
||||
/* he did time out... */
|
||||
if (tmp_fd == our_fd)
|
||||
/* it was the guy we came to service! */
|
||||
timed_out = 1;
|
||||
/* he's gone, no need to mark as handled */
|
||||
}
|
||||
} lws_end_foreach_dll_safe(d, d1);
|
||||
|
||||
#if defined(LWS_ROLE_H1) || defined(LWS_ROLE_H2)
|
||||
/*
|
||||
|
@ -721,7 +612,7 @@ lws_service_periodic_checks(struct lws_context *context,
|
|||
|
||||
if (!ah->in_use || !ah->wsi || !ah->assigned ||
|
||||
(ah->wsi->vhost &&
|
||||
lws_compare_time_t(context, now, ah->assigned) <
|
||||
(now - ah->assigned) <
|
||||
ah->wsi->vhost->timeout_secs_ah_idle + 360)) {
|
||||
ah = ah->next;
|
||||
continue;
|
||||
|
@ -949,6 +840,9 @@ vh_timers_done:
|
|||
#if defined(LWS_ROLE_CGI)
|
||||
role_ops_cgi.periodic_checks(context, tsi, now);
|
||||
#endif
|
||||
#if defined(LWS_ROLE_DBUS)
|
||||
role_ops_dbus.periodic_checks(context, tsi, now);
|
||||
#endif
|
||||
|
||||
#if defined(LWS_WITH_TLS)
|
||||
/*
|
||||
|
|
99
lib/core-net/sorted-usec-list.c
Normal file
99
lib/core-net/sorted-usec-list.c
Normal file
|
@ -0,0 +1,99 @@
|
|||
/*
|
||||
* libwebsockets - small server side websockets and web server implementation
|
||||
*
|
||||
* Copyright (C) 2010-2019 Andy Green <andy@warmcat.com>
|
||||
*
|
||||
* This library is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation:
|
||||
* version 2.1 of the License.
|
||||
*
|
||||
* This library is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with this library; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
|
||||
* MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
#include "core/private.h"
|
||||
|
||||
int
|
||||
__lws_sul_insert(lws_dll2_owner_t *own, lws_sorted_usec_list_t *sul,
|
||||
lws_usec_t us)
|
||||
{
|
||||
lws_dll2_remove(&sul->list);
|
||||
|
||||
if (us == LWS_SET_TIMER_USEC_CANCEL) {
|
||||
/* we are clearing the timeout */
|
||||
sul->us = 0;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
sul->us = lws_now_usecs() + us;
|
||||
|
||||
/*
|
||||
* we sort the pt's list of sequencers with pending timeouts, so it's
|
||||
* cheap to check it every second
|
||||
*/
|
||||
|
||||
lws_start_foreach_dll_safe(struct lws_dll2 *, p, tp,
|
||||
lws_dll2_get_head(own)) {
|
||||
/* .list is always first member in lws_sorted_usec_list_t */
|
||||
lws_sorted_usec_list_t *sul1 = (lws_sorted_usec_list_t *)p;
|
||||
|
||||
assert(sul1->us); /* shouldn't be on the list otherwise */
|
||||
assert(sul != sul1);
|
||||
if (sul1->us >= sul->us) {
|
||||
/* drop us in before this guy */
|
||||
lws_dll2_add_before(&sul->list, &sul1->list);
|
||||
|
||||
return 0;
|
||||
}
|
||||
} lws_end_foreach_dll_safe(p, tp);
|
||||
|
||||
/*
|
||||
* Either nobody on the list yet to compare him to, or he's the
|
||||
* furthest away timeout... stick him at the tail end
|
||||
*/
|
||||
|
||||
lws_dll2_add_tail(&sul->list, own);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
lws_usec_t
|
||||
__lws_sul_check(lws_dll2_owner_t *own, sul_cb_t cb, lws_usec_t usnow)
|
||||
{
|
||||
lws_usec_t future_us = 0;
|
||||
|
||||
lws_start_foreach_dll_safe(struct lws_dll2 *, p, tp,
|
||||
lws_dll2_get_head(own)) {
|
||||
/* .list is always first member in lws_sorted_usec_list_t */
|
||||
lws_sorted_usec_list_t *sul = (lws_sorted_usec_list_t *)p;
|
||||
|
||||
assert(sul->us); /* shouldn't be on the list otherwise */
|
||||
if (sul->us <= usnow) {
|
||||
/* seq has timed out... remove him from timeout list */
|
||||
lws_dll2_remove(&sul->list);
|
||||
sul->us = 0;
|
||||
if (cb)
|
||||
cb(sul);
|
||||
} else {
|
||||
/*
|
||||
* No need to look further if we met one later than now:
|
||||
* the list is sorted in ascending time order
|
||||
*/
|
||||
future_us = sul->us - usnow;
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
} lws_end_foreach_dll_safe(p, tp);
|
||||
|
||||
return future_us;
|
||||
}
|
|
@ -24,7 +24,7 @@
|
|||
void
|
||||
__lws_remove_from_timeout_list(struct lws *wsi)
|
||||
{
|
||||
lws_dll2_remove(&wsi->dll_timeout);
|
||||
lws_dll2_remove(&wsi->sul_timeout.list);
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -43,36 +43,7 @@ __lws_set_timer_usecs(struct lws *wsi, lws_usec_t us)
|
|||
{
|
||||
struct lws_context_per_thread *pt = &wsi->context->pt[(int)wsi->tsi];
|
||||
|
||||
lws_dll2_remove(&wsi->dll_hrtimer);
|
||||
|
||||
if (us == LWS_SET_TIMER_USEC_CANCEL)
|
||||
return;
|
||||
|
||||
wsi->pending_timer = lws_now_usecs() + us;
|
||||
|
||||
/*
|
||||
* we sort the hrtimer list with the earliest timeout first
|
||||
*/
|
||||
|
||||
lws_start_foreach_dll_safe(struct lws_dll2 *, p, tp,
|
||||
pt->dll_hrtimer_head.head) {
|
||||
struct lws *w = lws_container_of(p, struct lws, dll_hrtimer);
|
||||
|
||||
assert(w->pending_timer); /* shouldn't be on the list otherwise */
|
||||
if (w->pending_timer >= wsi->pending_timer) {
|
||||
/* drop us in before this guy */
|
||||
lws_dll2_add_before(&wsi->dll_hrtimer, &w->dll_hrtimer);
|
||||
|
||||
return;
|
||||
}
|
||||
} lws_end_foreach_dll_safe(p, tp);
|
||||
|
||||
/*
|
||||
* Either nobody on the list yet to compare him to, or he's the
|
||||
* longest timeout... stick him at the tail end
|
||||
*/
|
||||
|
||||
lws_dll2_add_tail(&wsi->dll_hrtimer, &pt->dll_hrtimer_head);
|
||||
__lws_sul_insert(&pt->dll_hrtimer_owner, &wsi->sul_hrtimer, us);
|
||||
}
|
||||
|
||||
LWS_VISIBLE void
|
||||
|
@ -81,77 +52,50 @@ lws_set_timer_usecs(struct lws *wsi, lws_usec_t usecs)
|
|||
__lws_set_timer_usecs(wsi, usecs);
|
||||
}
|
||||
|
||||
static void
|
||||
lws_hrtimer_sul_check_cb(lws_sorted_usec_list_t *sul)
|
||||
{
|
||||
struct lws *wsi = lws_container_of(sul, struct lws, sul_hrtimer);
|
||||
|
||||
if (wsi->protocol &&
|
||||
wsi->protocol->callback(wsi, LWS_CALLBACK_TIMER,
|
||||
wsi->user_space, NULL, 0))
|
||||
__lws_close_free_wsi(wsi, LWS_CLOSE_STATUS_NOSTATUS,
|
||||
"hrtimer cb errored");
|
||||
}
|
||||
|
||||
/* return 0 if nothing pending, or the number of us before the next event */
|
||||
|
||||
lws_usec_t
|
||||
__lws_hrtimer_service(struct lws_context_per_thread *pt, lws_usec_t t)
|
||||
{
|
||||
struct lws *wsi;
|
||||
|
||||
lws_start_foreach_dll_safe(struct lws_dll2 *, d, d1,
|
||||
lws_dll2_get_head(&pt->dll_hrtimer_head)) {
|
||||
wsi = lws_container_of(d, struct lws, dll_hrtimer);
|
||||
|
||||
/*
|
||||
* if we met one in the future, we are done, because the list
|
||||
* is sorted by time in the future.
|
||||
*/
|
||||
if (wsi->pending_timer > t)
|
||||
break;
|
||||
|
||||
lws_set_timer_usecs(wsi, LWS_SET_TIMER_USEC_CANCEL);
|
||||
|
||||
/* it's time for the timer to be serviced */
|
||||
|
||||
if (wsi->protocol &&
|
||||
wsi->protocol->callback(wsi, LWS_CALLBACK_TIMER,
|
||||
wsi->user_space, NULL, 0))
|
||||
__lws_close_free_wsi(wsi, LWS_CLOSE_STATUS_NOSTATUS,
|
||||
"timer cb errored");
|
||||
} lws_end_foreach_dll_safe(d, d1);
|
||||
|
||||
/* return an estimate how many us until next timer hit */
|
||||
|
||||
if (!lws_dll2_get_head(&pt->dll_hrtimer_head))
|
||||
return 0; /* there is nothing pending */
|
||||
|
||||
wsi = lws_container_of(lws_dll2_get_head(&pt->dll_hrtimer_head),
|
||||
struct lws, dll_hrtimer);
|
||||
|
||||
t = lws_now_usecs();
|
||||
if (wsi->pending_timer <= t) /* in the past */
|
||||
return 1;
|
||||
|
||||
return wsi->pending_timer - t; /* at least 1 */
|
||||
return __lws_sul_check(&pt->dll_hrtimer_owner,
|
||||
lws_hrtimer_sul_check_cb, t);
|
||||
}
|
||||
|
||||
void
|
||||
__lws_set_timeout(struct lws *wsi, enum pending_timeout reason, int secs)
|
||||
{
|
||||
struct lws_context_per_thread *pt = &wsi->context->pt[(int)wsi->tsi];
|
||||
time_t now;
|
||||
|
||||
time(&now);
|
||||
__lws_sul_insert(&pt->dll_timeout_owner, &wsi->sul_timeout,
|
||||
((lws_usec_t)secs) * LWS_US_PER_SEC);
|
||||
|
||||
lwsl_debug("%s: %p: %d secs, reason %d\n", __func__, wsi, secs, reason);
|
||||
|
||||
wsi->pending_timeout_limit = secs;
|
||||
wsi->pending_timeout_set = now;
|
||||
wsi->pending_timeout = reason;
|
||||
|
||||
lws_dll2_remove(&wsi->dll_timeout);
|
||||
if (!reason)
|
||||
return;
|
||||
|
||||
lws_dll2_add_head(&wsi->dll_timeout, &pt->dll_timeout_owner);
|
||||
}
|
||||
|
||||
LWS_VISIBLE void
|
||||
void
|
||||
lws_set_timeout(struct lws *wsi, enum pending_timeout reason, int secs)
|
||||
{
|
||||
struct lws_context_per_thread *pt = &wsi->context->pt[(int)wsi->tsi];
|
||||
|
||||
// lwsl_info("%s: %p: %d %d\n", __func__, wsi, reason, secs);
|
||||
if (!secs) {
|
||||
lws_remove_from_timeout_list(wsi);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
if (secs == LWS_TO_KILL_SYNC) {
|
||||
lws_remove_from_timeout_list(wsi);
|
||||
|
@ -169,6 +113,80 @@ lws_set_timeout(struct lws *wsi, enum pending_timeout reason, int secs)
|
|||
lws_pt_unlock(pt);
|
||||
}
|
||||
|
||||
void
|
||||
lws_set_timeout_us(struct lws *wsi, enum pending_timeout reason, lws_usec_t us)
|
||||
{
|
||||
struct lws_context_per_thread *pt = &wsi->context->pt[(int)wsi->tsi];
|
||||
|
||||
if (!us) {
|
||||
lws_remove_from_timeout_list(wsi);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
lws_pt_lock(pt, __func__);
|
||||
__lws_sul_insert(&pt->dll_timeout_owner, &wsi->sul_timeout, us);
|
||||
|
||||
lwsl_debug("%s: %p: %llu us, reason %d\n", __func__, wsi,
|
||||
(unsigned long long)us, reason);
|
||||
|
||||
wsi->pending_timeout = reason;
|
||||
lws_pt_unlock(pt);
|
||||
}
|
||||
|
||||
static void
|
||||
lws_wsitimeout_sul_check_cb(lws_sorted_usec_list_t *sul)
|
||||
{
|
||||
struct lws *wsi = lws_container_of(sul, struct lws, sul_timeout);
|
||||
struct lws_context_per_thread *pt = &wsi->context->pt[(int)wsi->tsi];
|
||||
|
||||
lws_stats_atomic_bump(wsi->context, pt, LWSSTATS_C_TIMEOUTS, 1);
|
||||
|
||||
/* no need to log normal idle keepalive timeout */
|
||||
// if (wsi->pending_timeout != PENDING_TIMEOUT_HTTP_KEEPALIVE_IDLE)
|
||||
#if defined(LWS_ROLE_H1) || defined(LWS_ROLE_H2)
|
||||
lwsl_info("wsi %p: TIMEDOUT WAITING on %d "
|
||||
"(did hdr %d, ah %p, wl %d\n",
|
||||
(void *)wsi, wsi->pending_timeout,
|
||||
wsi->hdr_parsing_completed, wsi->http.ah,
|
||||
pt->http.ah_wait_list_length);
|
||||
#if defined(LWS_WITH_CGI)
|
||||
if (wsi->http.cgi)
|
||||
lwsl_notice("CGI timeout: %s\n", wsi->http.cgi->summary);
|
||||
#endif
|
||||
#else
|
||||
lwsl_info("wsi %p: TIMEDOUT WAITING on %d ", (void *)wsi,
|
||||
wsi->pending_timeout);
|
||||
#endif
|
||||
/* cgi timeout */
|
||||
if (wsi->pending_timeout != PENDING_TIMEOUT_HTTP_KEEPALIVE_IDLE)
|
||||
/*
|
||||
* Since he failed a timeout, he already had a chance to
|
||||
* do something and was unable to... that includes
|
||||
* situations like half closed connections. So process
|
||||
* this "failed timeout" close as a violent death and
|
||||
* don't try to do protocol cleanup like flush partials.
|
||||
*/
|
||||
wsi->socket_is_permanently_unusable = 1;
|
||||
if (lwsi_state(wsi) == LRS_WAITING_SSL && wsi->protocol)
|
||||
wsi->protocol->callback(wsi,
|
||||
LWS_CALLBACK_CLIENT_CONNECTION_ERROR,
|
||||
wsi->user_space,
|
||||
(void *)"Timed out waiting SSL", 21);
|
||||
|
||||
__lws_close_free_wsi(wsi, LWS_CLOSE_STATUS_NOSTATUS, "timeout");
|
||||
}
|
||||
|
||||
/* return 0 if nothing pending, or the number of us before the next event */
|
||||
|
||||
lws_usec_t
|
||||
__lws_wsitimeout_service(struct lws_context_per_thread *pt, lws_usec_t t)
|
||||
{
|
||||
return __lws_sul_check(&pt->dll_timeout_owner,
|
||||
lws_wsitimeout_sul_check_cb, t);
|
||||
}
|
||||
|
||||
|
||||
/* requires context + vh lock */
|
||||
|
||||
int
|
||||
|
@ -207,7 +225,9 @@ lws_timed_callback_vh_protocol(struct lws_vhost *vh,
|
|||
|
||||
p->protocol = prot;
|
||||
p->reason = reason;
|
||||
p->time = lws_now_secs() + secs;
|
||||
p->time = (lws_now_usecs() / LWS_US_PER_SEC) + secs;
|
||||
|
||||
// lwsl_notice("%s: %s.%s %d\n", __func__, vh->name, prot->name, secs);
|
||||
|
||||
lws_vhost_lock(vh); /* vhost ---------------------------------------- */
|
||||
p->next = vh->timed_vh_protocol_list;
|
||||
|
|
|
@ -100,10 +100,19 @@ int lws_open(const char *__file, int __oflag, ...)
|
|||
LWS_VISIBLE lws_usec_t
|
||||
lws_now_usecs(void)
|
||||
{
|
||||
#if defined(LWS_HAVE_CLOCK_GETTIME)
|
||||
struct timespec ts;
|
||||
|
||||
if (clock_gettime(CLOCK_MONOTONIC, &ts))
|
||||
return 0;
|
||||
|
||||
return (ts.tv_sec * LWS_US_PER_SEC) + (ts.tv_nsec / LWS_NS_PER_US);
|
||||
#else
|
||||
struct timeval now;
|
||||
|
||||
gettimeofday(&now, NULL);
|
||||
return (now.tv_sec * 1000000ll) + now.tv_usec;
|
||||
#endif
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -158,17 +167,6 @@ lws_now_secs(void)
|
|||
return tv.tv_sec;
|
||||
}
|
||||
|
||||
LWS_VISIBLE LWS_EXTERN int
|
||||
lws_compare_time_t(struct lws_context *context, time_t t1, time_t t2)
|
||||
{
|
||||
if (t1 < context->time_discontiguity)
|
||||
t1 += context->time_fixup;
|
||||
|
||||
if (t2 < context->time_discontiguity)
|
||||
t2 += context->time_fixup;
|
||||
|
||||
return (int)(t1 - t2);
|
||||
}
|
||||
#endif
|
||||
LWS_VISIBLE extern const char *
|
||||
lws_canonical_hostname(struct lws_context *context)
|
||||
|
|
|
@ -79,12 +79,21 @@ lws_dll2_add_before(struct lws_dll2 *d, struct lws_dll2 *after)
|
|||
return;
|
||||
}
|
||||
|
||||
if (lws_dll2_is_detached(after)) {
|
||||
assert(0); /* can't add after something detached */
|
||||
return;
|
||||
}
|
||||
|
||||
d->owner = owner;
|
||||
|
||||
/* we need to point to after */
|
||||
/* we need to point forward to after */
|
||||
|
||||
d->next = after;
|
||||
|
||||
/* we need to point back to after->prev */
|
||||
|
||||
d->prev = after->prev;
|
||||
|
||||
/* guy that used to point to after, needs to point to us */
|
||||
|
||||
if (after->prev)
|
||||
|
@ -170,3 +179,22 @@ lws_dll2_owner_clear(struct lws_dll2_owner *d)
|
|||
d->tail = NULL;
|
||||
d->count = 0;
|
||||
}
|
||||
|
||||
#if defined(_DEBUG)
|
||||
|
||||
void
|
||||
lws_dll2_describe(lws_dll2_owner_t *owner, const char *desc)
|
||||
{
|
||||
int n = 1;
|
||||
|
||||
lwsl_notice("%s: %s: owner %p: count %d, head %p, tail %p\n",
|
||||
__func__, desc, owner, owner->count, owner->head, owner->tail);
|
||||
|
||||
lws_start_foreach_dll_safe(struct lws_dll2 *, p, tp,
|
||||
lws_dll2_get_head(owner)) {
|
||||
lwsl_notice("%s: %d: %p: owner %p, prev %p, next %p\n",
|
||||
__func__, n++, p, p->owner, p->prev, p->next);
|
||||
} lws_end_foreach_dll_safe(p, tp);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
|
@ -259,8 +259,6 @@ struct lws_context {
|
|||
time_t last_timeout_check_s;
|
||||
time_t last_ws_ping_pong_check_s;
|
||||
time_t time_up;
|
||||
time_t time_discontiguity;
|
||||
time_t time_fixup;
|
||||
const struct lws_plat_file_ops *fops;
|
||||
struct lws_plat_file_ops fops_platform;
|
||||
struct lws_context **pcontext_finalize;
|
||||
|
|
|
@ -28,6 +28,11 @@ __lws_event_service_get_earliest_wake(struct lws_context_per_thread *pt,
|
|||
lws_usec_t t, us = 0;
|
||||
char seen = 0;
|
||||
|
||||
t = __lws_wsitimeout_service(pt, usnow);
|
||||
if (t) {
|
||||
us = t;
|
||||
seen = 1;
|
||||
}
|
||||
t = __lws_hrtimer_service(pt, usnow);
|
||||
if (t && (!seen || t < us)) {
|
||||
us = t;
|
||||
|
|
|
@ -35,8 +35,8 @@ lws_poll_listen_fd(struct lws_pollfd *fd)
|
|||
LWS_EXTERN int
|
||||
_lws_plat_service_tsi(struct lws_context *context, int timeout_ms, int tsi)
|
||||
{
|
||||
lws_usec_t timeout_us = ((lws_usec_t)timeout_ms) * LWS_US_PER_MS;
|
||||
volatile struct lws_foreign_thread_pollfd *ftp, *next;
|
||||
lws_usec_t timeout_us = timeout_ms * LWS_US_PER_MS;
|
||||
volatile struct lws_context_per_thread *vpt;
|
||||
struct lws_context_per_thread *pt;
|
||||
int n = -1, m, c;
|
||||
|
|
|
@ -288,6 +288,7 @@ lws_dbus_add_timeout(DBusTimeout *t, void *data)
|
|||
dbt->fire = ti + (ms < 1000);
|
||||
dbt->timer_list.prev = NULL;
|
||||
dbt->timer_list.next = NULL;
|
||||
dbt->timer_list.owner = NULL;
|
||||
lws_dll2_add_head(&dbt->timer_list, &pt->dbus.timer_list_owner);
|
||||
|
||||
ctx->timeouts++;
|
||||
|
|
|
@ -567,7 +567,7 @@ bail3:
|
|||
lwsl_info("reason: %s\n", cce);
|
||||
wsi->protocol->callback(wsi,
|
||||
LWS_CALLBACK_CLIENT_CONNECTION_ERROR,
|
||||
wsi->user_space, (void *)cce, cce ? strlen(cce) : 0);
|
||||
wsi->user_space, (void *)cce, strlen(cce));
|
||||
wsi->already_did_cce = 1;
|
||||
lws_close_free_wsi(wsi, LWS_CLOSE_STATUS_NOSTATUS, "cbail3");
|
||||
return -1;
|
||||
|
|
|
@ -1507,8 +1507,7 @@ rops_periodic_checks_ws(struct lws_context *context, int tsi, time_t now)
|
|||
!wsi->socket_is_permanently_unusable &&
|
||||
!wsi->ws->send_check_ping &&
|
||||
wsi->ws->time_next_ping_check &&
|
||||
lws_compare_time_t(context, now,
|
||||
wsi->ws->time_next_ping_check) >
|
||||
(now - wsi->ws->time_next_ping_check) >
|
||||
context->ws_ping_pong_interval) {
|
||||
|
||||
lwsl_info("%s: req pp on wsi %p\n",
|
||||
|
|
|
@ -320,7 +320,7 @@ tops_periodic_housekeeping_mbedtls(struct lws_context *context, time_t now)
|
|||
{
|
||||
int n;
|
||||
|
||||
n = lws_compare_time_t(context, now, context->tls.last_cert_check_s);
|
||||
n = (now - context->tls.last_cert_check_s);
|
||||
if ((!context->tls.last_cert_check_s || n > (24 * 60 * 60)) &&
|
||||
!lws_tls_check_all_cert_lifetimes(context))
|
||||
context->tls.last_cert_check_s = now;
|
||||
|
|
|
@ -511,7 +511,7 @@ tops_periodic_housekeeping_openssl(struct lws_context *context, time_t now)
|
|||
{
|
||||
int n;
|
||||
|
||||
n = lws_compare_time_t(context, now, context->tls.last_cert_check_s);
|
||||
n = (now - context->tls.last_cert_check_s);
|
||||
if ((!context->tls.last_cert_check_s || n > (24 * 60 * 60)) &&
|
||||
!lws_tls_check_all_cert_lifetimes(context))
|
||||
context->tls.last_cert_check_s = now;
|
||||
|
|
|
@ -226,7 +226,8 @@ int main(int argc, const char **argv)
|
|||
info.options = 0;
|
||||
}
|
||||
|
||||
if (concurrent > (int)LWS_ARRAY_SIZE(clients)) {
|
||||
if (concurrent < 0 ||
|
||||
concurrent > (int)LWS_ARRAY_SIZE(clients)) {
|
||||
lwsl_err("%s: -c %d larger than max concurrency %d\n", __func__,
|
||||
concurrent, (int)LWS_ARRAY_SIZE(clients));
|
||||
|
||||
|
|
Loading…
Add table
Reference in a new issue