mirror of
https://github.com/warmcat/libwebsockets.git
synced 2025-03-09 00:00:04 +01:00
sul: multiple timer domains
Adapt the pt sul owner list to be an array, and define two different lists, one that acts like before and is the default for existing users, and another that has the ability to cooperate with systemwide suspend to restrict the interval spent suspended so that it will wake in time for the earliest thing on this wake-suspend sul list. Clean the api a bit and add lws_sul_cancel() that only needs the sul as the argument. Add a flag for client creation info to indicate that this client connection is important enough that, eg, validity checking it to detect silently dead connections should go on the wake-suspend sul list. That flag is exposed in secure streams policy so it can be added to a streamtype with "swake_validity": true Deprecate out the old vhost timer stuff that predates sul. Add a flag LWS_WITH_DEPRECATED_THINGS in cmake so users can get it back temporarily before it will be removed in a v4.2. Adapt all remaining in-tree users of it to use explicit suls.
This commit is contained in:
parent
d5773c01be
commit
286cf4357a
57 changed files with 568 additions and 414 deletions
|
@ -218,7 +218,7 @@ option(LWS_WITH_LEJP_CONF "With LEJP configuration parser as used by lwsws" OFF)
|
|||
option(LWS_WITH_ZLIB "Include zlib support (required for extensions)" OFF)
|
||||
option(LWS_WITH_BUNDLED_ZLIB "Use bundled zlib version (Windows only)" ${LWS_WITH_BUNDLED_ZLIB_DEFAULT})
|
||||
option(LWS_WITH_MINIZ "Use miniz instead of zlib" OFF)
|
||||
option(LWS_WITH_DEPRECATED_LWS_DLL "Migrate to lws_dll2 instead ASAP" OFF)
|
||||
option(LWS_WITH_DEPRECATED_THINGS "Temporary workaround for deprecated apis" OFF)
|
||||
option(LWS_WITH_SEQUENCER "lws_seq_t support" ON)
|
||||
option(LWS_WITH_EXTERNAL_POLL "Support external POLL integration using callback messages (not recommended)" OFF)
|
||||
option(LWS_WITH_LWS_DSH "Support lws_dsh_t Disordered Shared Heap" OFF)
|
||||
|
|
|
@ -60,3 +60,40 @@ In the case you destroy your object and need to cancel the scheduled callback, u
|
|||
lws_sul_schedule(context, 0, &sul_stagger, NULL, LWS_SET_TIMER_USEC_CANCEL);
|
||||
```
|
||||
|
||||
# lws_sul2 and system suspend
|
||||
|
||||
In v4.1, alongside the existing `lws_sul` apis there is a refactor and additional
|
||||
functionality aimed at negotiating system suspend, while remaining completely
|
||||
backwards-compatible with v3.2+ lws_sul apis.
|
||||
|
||||
Devicewide suspend is basically the withdrawal of CPU availability for an unbounded
|
||||
amount of time, so what may have been scheduled by the user code may miss its time
|
||||
slot because the cpu was down and nothing is getting serviced. Whether that is
|
||||
actively desirable, OK, a big disaster, or a failure that will be corrected at other
|
||||
layers at the cost of, eg, some additional latency, depends on the required device
|
||||
behaviours and the function of the user code that was scheduled, and its meaning to
|
||||
the system.
|
||||
|
||||
Before v4.1, lws just offers the same scheduling service for everything both internal
|
||||
and arranged by user code, and has no way to know what is critical for the device to
|
||||
operate as intended, and so must force wake from suspend, or if for that scheduled
|
||||
event 'failure [to get the event] is an option'.
|
||||
|
||||
For example locally-initiated periodic keepalive pings not happening may allow
|
||||
persistently dead (ie, no longer passing data) connections to remain unrenewed, but
|
||||
eventually when suspend ends for another reason, the locally-initiated PING probes
|
||||
will resume and it will be discovered and if the connectivity allows, corrected.
|
||||
|
||||
If the device's function can handle the latency of there being no connectivity in
|
||||
suspend under those conditions until it wakes for another reason, it's OK for these
|
||||
kind of timeouts to be suppressed during suspend and basically take the power saving
|
||||
instead. If for a particular device it's intolerable to ever have a silently dead
|
||||
connection for more than a very short time compared to suspend durations, then these
|
||||
kind of timeouts must have the priority to wake the whole device from suspend so
|
||||
they continue to operate unimpeded.
|
||||
|
||||
That is just one example, lws offers generic scheduler services the user code can
|
||||
exploit for any purpose, including mission-critical ones. The changes give the user
|
||||
code a way to tell lws if a particular scheduled event is important enough to the
|
||||
system operation to wake the system from devicewide suspend.
|
||||
|
||||
|
|
|
@ -58,7 +58,9 @@ enum lws_client_connect_ssl_connection_flags {
|
|||
* HTTP/2: always possible... uses parallel streams
|
||||
*/
|
||||
LCCSCF_MUXABLE_STREAM = (1 << 17),
|
||||
LCCSCF_H2_PRIOR_KNOWLEDGE = (1 << 18)
|
||||
LCCSCF_H2_PRIOR_KNOWLEDGE = (1 << 18),
|
||||
LCCSCF_WAKE_SUSPEND__VALIDITY = (1 << 19),
|
||||
/* our validity checks are important enough to wake from suspend */
|
||||
};
|
||||
|
||||
/** struct lws_client_connect_info - parameters to connect with when using
|
||||
|
|
|
@ -116,6 +116,10 @@ enum {
|
|||
/**< set up lws_system client cert */
|
||||
LWSSSPOLF_LOCAL_SINK = (1 << 13),
|
||||
/**< expected to bind to a local sink only */
|
||||
LWSSSPOLF_WAKE_SUSPEND__VALIDITY = (1 << 14),
|
||||
/**< this stream's idle validity checks are critical enough we
|
||||
* should arrange to wake from suspend to perform them
|
||||
*/
|
||||
};
|
||||
|
||||
typedef struct lws_ss_trust_store {
|
||||
|
|
|
@ -87,6 +87,8 @@ typedef struct lws_seq_info {
|
|||
lws_seq_event_cb cb; /* seq callback */
|
||||
const char *name; /* seq name */
|
||||
const lws_retry_bo_t *retry; /* retry policy */
|
||||
uint8_t wakesuspend:1; /* important enough to
|
||||
* wake system */
|
||||
} lws_seq_info_t;
|
||||
|
||||
/**
|
||||
|
|
|
@ -169,6 +169,10 @@ typedef struct lws_system_ops {
|
|||
* Start the check that proceeds asynchronously, and report the results
|
||||
* by calling lws_captive_portal_detect_result() api
|
||||
*/
|
||||
|
||||
uint32_t wake_latency_us;
|
||||
/**< time taken for this device to wake from suspend, in us
|
||||
*/
|
||||
} lws_system_ops_t;
|
||||
|
||||
/**
|
||||
|
|
|
@ -146,6 +146,8 @@ lws_set_timeout_us(struct lws *wsi, enum pending_timeout reason, lws_usec_t us);
|
|||
LWS_VISIBLE LWS_EXTERN void
|
||||
lws_set_timer_usecs(struct lws *wsi, lws_usec_t usecs);
|
||||
|
||||
#if defined(LWS_WITH_DEPRECATED_THINGS)
|
||||
|
||||
/*
|
||||
* lws_timed_callback_vh_protocol() - calls back a protocol on a vhost after
|
||||
* the specified delay in seconds
|
||||
|
@ -155,6 +157,8 @@ lws_set_timer_usecs(struct lws *wsi, lws_usec_t usecs);
|
|||
* \param reason: callback reason
|
||||
* \param secs: how many seconds in the future to do the callback.
|
||||
*
|
||||
* DEPRECATED since v4.1
|
||||
*
|
||||
* Callback the specified protocol with a fake wsi pointing to the specified
|
||||
* vhost and protocol, with the specified reason, at the specified time in the
|
||||
* future.
|
||||
|
@ -168,7 +172,8 @@ lws_set_timer_usecs(struct lws *wsi, lws_usec_t usecs);
|
|||
LWS_VISIBLE LWS_EXTERN int
|
||||
lws_timed_callback_vh_protocol(struct lws_vhost *vh,
|
||||
const struct lws_protocols *prot,
|
||||
int reason, int secs);
|
||||
int reason, int secs)
|
||||
LWS_WARN_DEPRECATED;
|
||||
|
||||
/*
|
||||
* lws_timed_callback_vh_protocol_us() - calls back a protocol on a vhost after
|
||||
|
@ -179,6 +184,8 @@ lws_timed_callback_vh_protocol(struct lws_vhost *vh,
|
|||
* \param reason: callback reason
|
||||
* \param us: how many us in the future to do the callback.
|
||||
*
|
||||
* DEPRECATED since v4.1
|
||||
*
|
||||
* Callback the specified protocol with a fake wsi pointing to the specified
|
||||
* vhost and protocol, with the specified reason, at the specified time in the
|
||||
* future.
|
||||
|
@ -192,7 +199,10 @@ lws_timed_callback_vh_protocol(struct lws_vhost *vh,
|
|||
LWS_VISIBLE LWS_EXTERN int
|
||||
lws_timed_callback_vh_protocol_us(struct lws_vhost *vh,
|
||||
const struct lws_protocols *prot, int reason,
|
||||
lws_usec_t us);
|
||||
lws_usec_t us)
|
||||
LWS_WARN_DEPRECATED;
|
||||
|
||||
#endif
|
||||
|
||||
struct lws_sorted_usec_list;
|
||||
|
||||
|
@ -200,34 +210,98 @@ typedef void (*sul_cb_t)(struct lws_sorted_usec_list *sul);
|
|||
|
||||
typedef struct lws_sorted_usec_list {
|
||||
struct lws_dll2 list; /* simplify the code by keeping this at start */
|
||||
sul_cb_t cb;
|
||||
lws_usec_t us;
|
||||
sul_cb_t cb;
|
||||
uint32_t latency_us; /* us it may safely be delayed */
|
||||
} lws_sorted_usec_list_t;
|
||||
|
||||
/*
|
||||
* There are multiple sul owners to allow accounting for, a) events that must
|
||||
* wake from suspend, and b) events that can be missued due to suspend
|
||||
*/
|
||||
#define LWS_COUNT_PT_SUL_OWNERS 2
|
||||
|
||||
#define LWSSULLI_MISS_IF_SUSPENDED 0
|
||||
#define LWSSULLI_WAKE_IF_SUSPENDED 1
|
||||
|
||||
/*
|
||||
* lws_sul_schedule() - schedule a callback
|
||||
* lws_sul2_schedule() - schedule a callback
|
||||
*
|
||||
* \param context: the lws_context
|
||||
* \param tsi: the thread service index (usually 0)
|
||||
* \param flags: LWSSULLI_...
|
||||
* \param sul: pointer to the sul element
|
||||
* \param cb: the scheduled callback
|
||||
* \param us: the delay before the callback arrives, or
|
||||
* LWS_SET_TIMER_USEC_CANCEL to cancel it.
|
||||
*
|
||||
* Generic callback-at-a-later time function. The callback happens on the
|
||||
* event loop thread context.
|
||||
*
|
||||
* Although the api has us resultion, the actual resolution depends on the
|
||||
* platform and is commonly 1ms.
|
||||
* platform and may be, eg, 1ms.
|
||||
*
|
||||
* This doesn't allocate and doesn't fail.
|
||||
*
|
||||
* You can call it again with another us value to change the delay.
|
||||
* If flags contains LWSSULLI_WAKE_IF_SUSPENDED, the scheduled event is placed
|
||||
* on a sul owner list that, if the system has entered low power suspend mode,
|
||||
* tries to arrange that the system should wake from platform suspend just
|
||||
* before the event is due. Scheduled events without this flag will be missed
|
||||
* in the case the system is in suspend and nothing else happens to have woken
|
||||
* it.
|
||||
*
|
||||
* You can call it again with another us value to change the delay or move the
|
||||
* event to a different owner (ie, wake or miss on suspend).
|
||||
*/
|
||||
LWS_VISIBLE LWS_EXTERN void
|
||||
lws_sul_schedule(struct lws_context *context, int tsi,
|
||||
lws_sorted_usec_list_t *sul, sul_cb_t cb, lws_usec_t us);
|
||||
lws_sul2_schedule(struct lws_context *context, int tsi, int flags,
|
||||
lws_sorted_usec_list_t *sul);
|
||||
|
||||
/*
|
||||
* lws_sul_cancel() - cancel scheduled callback
|
||||
*
|
||||
* \param sul: pointer to the sul element
|
||||
*
|
||||
* If it's scheduled, remove the sul from its owning sorted list.
|
||||
* If not scheduled, it's a NOP.
|
||||
*/
|
||||
LWS_VISIBLE LWS_EXTERN void
|
||||
lws_sul_cancel(lws_sorted_usec_list_t *sul);
|
||||
|
||||
/*
|
||||
* lws_sul_earliest_wakeable_event() - get earliest wake-from-suspend event
|
||||
*
|
||||
* \param ctx: the lws context
|
||||
* \param pearliest: pointer to lws_usec_t to take the result
|
||||
*
|
||||
* Either returns 1 if no pending event, or 0 and sets *pearliest to the
|
||||
* MONOTONIC time of the current earliest next expected event.
|
||||
*/
|
||||
LWS_VISIBLE LWS_EXTERN int
|
||||
lws_sul_earliest_wakeable_event(struct lws_context *ctx, lws_usec_t *pearliest);
|
||||
|
||||
/*
|
||||
* For backwards compatibility
|
||||
*
|
||||
* If us is LWS_SET_TIMER_USEC_CANCEL, the sul is removed from the scheduler.
|
||||
* New code can use lws_sul_cancel()
|
||||
*/
|
||||
|
||||
#define lws_sul_schedule(ctx, tsi, sul, _cb, _us) {\
|
||||
if ((lws_usec_t)_us == (lws_usec_t)LWS_SET_TIMER_USEC_CANCEL) \
|
||||
lws_sul_cancel(sul); \
|
||||
else { \
|
||||
(sul)->cb = _cb; \
|
||||
(sul)->us = lws_now_usecs() + _us; \
|
||||
lws_sul2_schedule(ctx, tsi, LWSSULLI_MISS_IF_SUSPENDED, sul); \
|
||||
}}
|
||||
|
||||
#define lws_sul_schedule_wakesuspend(ctx, tsi, sul, _cb, _us) {\
|
||||
if ((lws_usec_t)_us == (lws_usec_t)LWS_SET_TIMER_USEC_CANCEL) \
|
||||
lws_sul_cancel(sul); \
|
||||
else { \
|
||||
(sul)->cb = _cb; \
|
||||
(sul)->us = lws_now_usecs() + _us; \
|
||||
lws_sul2_schedule(ctx, tsi, LWSSULLI_WAKE_IF_SUSPENDED, sul); \
|
||||
}}
|
||||
|
||||
|
||||
/*
|
||||
* lws_validity_confirmed() - reset the validity timer for a network connection
|
||||
|
@ -257,10 +331,9 @@ lws_validity_confirmed(struct lws *wsi);
|
|||
*/
|
||||
|
||||
LWS_VISIBLE LWS_EXTERN int
|
||||
__lws_sul_insert(lws_dll2_owner_t *own, lws_sorted_usec_list_t *sul,
|
||||
lws_usec_t us);
|
||||
__lws_sul_insert(lws_dll2_owner_t *own, lws_sorted_usec_list_t *sul);
|
||||
|
||||
LWS_VISIBLE LWS_EXTERN lws_usec_t
|
||||
__lws_sul_service_ripe(lws_dll2_owner_t *own, lws_usec_t usnow);
|
||||
__lws_sul_service_ripe(lws_dll2_owner_t *own, int own_len, lws_usec_t usnow);
|
||||
|
||||
///@}
|
||||
|
|
|
@ -77,6 +77,9 @@ lws_client_connect_via_info(const struct lws_client_connect_info *i)
|
|||
wsi->detlat.earliest_write_req_pre_write = lws_now_usecs();
|
||||
#endif
|
||||
|
||||
if (i->ssl_connection & LCCSCF_WAKE_SUSPEND__VALIDITY)
|
||||
wsi->conn_validity_wakesuspend = 1;
|
||||
|
||||
wsi->vhost = NULL;
|
||||
if (!i->vhost) {
|
||||
struct lws_vhost *v = i->context->vhost_list;
|
||||
|
|
|
@ -77,6 +77,10 @@ enum lws_ssl_capable_status {
|
|||
LWS_SSL_CAPABLE_MORE_SERVICE = -4, /* general retry */
|
||||
};
|
||||
|
||||
#define __lws_sul_insert_us(owner, sul, _us) \
|
||||
(sul)->us = lws_now_usecs() + _us; \
|
||||
__lws_sul_insert(owner, sul)
|
||||
|
||||
|
||||
/*
|
||||
*
|
||||
|
@ -267,12 +271,11 @@ struct client_info_stash {
|
|||
|
||||
#define LWS_H2_FRAME_HEADER_LENGTH 9
|
||||
|
||||
int
|
||||
__lws_sul_insert(lws_dll2_owner_t *own, lws_sorted_usec_list_t *sul,
|
||||
lws_usec_t us);
|
||||
|
||||
lws_usec_t
|
||||
__lws_sul_service_ripe(lws_dll2_owner_t *own, lws_usec_t usnow);
|
||||
__lws_sul_service_ripe(lws_dll2_owner_t *own, int num_own, lws_usec_t usnow);
|
||||
|
||||
#if defined(LWS_WITH_DEPRECATED_THINGS)
|
||||
|
||||
struct lws_timed_vh_protocol {
|
||||
struct lws_timed_vh_protocol *next;
|
||||
|
@ -283,6 +286,8 @@ struct lws_timed_vh_protocol {
|
|||
int tsi_req;
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
/*
|
||||
* lws_dsh
|
||||
*/
|
||||
|
@ -367,7 +372,7 @@ struct lws_context_per_thread {
|
|||
lws_dll2_owner_t ss_client_owner;
|
||||
#endif
|
||||
|
||||
struct lws_dll2_owner pt_sul_owner;
|
||||
struct lws_dll2_owner pt_sul_owner[LWS_COUNT_PT_SUL_OWNERS];
|
||||
|
||||
#if defined (LWS_WITH_SEQUENCER)
|
||||
lws_sorted_usec_list_t sul_seq_heartbeat;
|
||||
|
@ -572,7 +577,9 @@ struct lws_vhost {
|
|||
struct lws_vhost_tls tls;
|
||||
#endif
|
||||
|
||||
#if defined(LWS_WITH_DEPRECATED_THINGS)
|
||||
struct lws_timed_vh_protocol *timed_vh_protocol_list;
|
||||
#endif
|
||||
void *user;
|
||||
|
||||
int listen_port;
|
||||
|
@ -800,6 +807,7 @@ struct lws {
|
|||
unsigned int validity_hup:1;
|
||||
unsigned int skip_fallback:1;
|
||||
unsigned int file_desc:1;
|
||||
unsigned int conn_validity_wakesuspend:1;
|
||||
|
||||
unsigned int could_have_pending:1; /* detect back-to-back writes */
|
||||
unsigned int outer_will_close:1;
|
||||
|
@ -1160,8 +1168,10 @@ void
|
|||
lws_sum_stats(const struct lws_context *ctx, struct lws_conn_stats *cs);
|
||||
#endif
|
||||
|
||||
#if defined(LWS_WITH_DEPRECATED_THINGS)
|
||||
int
|
||||
__lws_timed_callback_remove(struct lws_vhost *vh, struct lws_timed_vh_protocol *p);
|
||||
#endif
|
||||
|
||||
int LWS_WARN_UNUSED_RESULT
|
||||
__insert_wsi_socket_into_fds(struct lws_context *context, struct lws *wsi);
|
||||
|
|
|
@ -53,7 +53,8 @@ typedef struct lws_sequencer {
|
|||
lws_usec_t time_created;
|
||||
lws_usec_t timeout; /* 0 or time we timeout */
|
||||
|
||||
char going_down;
|
||||
uint8_t going_down:1;
|
||||
uint8_t wakesuspend:1;
|
||||
} lws_seq_t;
|
||||
|
||||
#define QUEUE_SANITY_LIMIT 10
|
||||
|
@ -77,8 +78,8 @@ lws_sul_seq_heartbeat_cb(lws_sorted_usec_list_t *sul)
|
|||
|
||||
/* schedule the next one */
|
||||
|
||||
__lws_sul_insert(&pt->pt_sul_owner, &pt->sul_seq_heartbeat,
|
||||
LWS_US_PER_SEC);
|
||||
__lws_sul_insert_us(&pt->pt_sul_owner[LWSSULLI_MISS_IF_SUSPENDED],
|
||||
&pt->sul_seq_heartbeat, LWS_US_PER_SEC);
|
||||
}
|
||||
|
||||
int
|
||||
|
@ -87,8 +88,8 @@ lws_seq_pt_init(struct lws_context_per_thread *pt)
|
|||
pt->sul_seq_heartbeat.cb = lws_sul_seq_heartbeat_cb;
|
||||
|
||||
/* schedule the first heartbeat */
|
||||
__lws_sul_insert(&pt->pt_sul_owner, &pt->sul_seq_heartbeat,
|
||||
LWS_US_PER_SEC);
|
||||
__lws_sul_insert_us(&pt->pt_sul_owner[LWSSULLI_MISS_IF_SUSPENDED],
|
||||
&pt->sul_seq_heartbeat, LWS_US_PER_SEC);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -106,6 +107,7 @@ lws_seq_create(lws_seq_info_t *i)
|
|||
seq->pt = pt;
|
||||
seq->name = i->name;
|
||||
seq->retry = i->retry;
|
||||
seq->wakesuspend = i->wakesuspend;
|
||||
|
||||
*i->puser = (void *)&seq[1];
|
||||
|
||||
|
@ -242,7 +244,8 @@ lws_seq_queue_event(lws_seq_t *seq, lws_seq_events_t e, void *data, void *aux)
|
|||
lws_dll2_add_tail(&seqe->seq_event_list, &seq->seq_event_owner);
|
||||
|
||||
seq->sul_pending.cb = lws_seq_sul_pending_cb;
|
||||
__lws_sul_insert(&seq->pt->pt_sul_owner, &seq->sul_pending, 1);
|
||||
__lws_sul_insert_us(&seq->pt->pt_sul_owner[seq->wakesuspend],
|
||||
&seq->sul_pending, 1);
|
||||
|
||||
lws_pt_unlock(seq->pt); /* } pt ------------------------------------- */
|
||||
|
||||
|
@ -300,8 +303,10 @@ lws_seq_timeout_us(lws_seq_t *seq, lws_usec_t us)
|
|||
{
|
||||
seq->sul_timeout.cb = lws_seq_sul_timeout_cb;
|
||||
/* list is always at the very top of the sul */
|
||||
return __lws_sul_insert(&seq->pt->pt_sul_owner,
|
||||
__lws_sul_insert_us(&seq->pt->pt_sul_owner[seq->wakesuspend],
|
||||
(lws_sorted_usec_list_t *)&seq->sul_timeout.list, us);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
lws_seq_t *
|
||||
|
|
|
@ -43,69 +43,57 @@ sul_compare(const lws_dll2_t *d, const lws_dll2_t *i)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* notice owner was chosen already, and sul->us was already computed
|
||||
*/
|
||||
|
||||
int
|
||||
__lws_sul_insert(lws_dll2_owner_t *own, lws_sorted_usec_list_t *sul,
|
||||
lws_usec_t us)
|
||||
__lws_sul_insert(lws_dll2_owner_t *own, lws_sorted_usec_list_t *sul)
|
||||
{
|
||||
lws_usec_t now = lws_now_usecs();
|
||||
lws_dll2_remove(&sul->list);
|
||||
|
||||
if (us == LWS_SET_TIMER_USEC_CANCEL) {
|
||||
/* we are clearing the timeout */
|
||||
sul->us = 0;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
sul->us = now + us;
|
||||
assert(sul->cb);
|
||||
|
||||
/*
|
||||
* we sort the pt's list of sequencers with pending timeouts, so it's
|
||||
* cheap to check it every second
|
||||
* cheap to check it every poll wait
|
||||
*/
|
||||
|
||||
lws_dll2_add_sorted(&sul->list, own, sul_compare);
|
||||
|
||||
#if 0 // defined(_DEBUG)
|
||||
{
|
||||
lws_usec_t worst = 0;
|
||||
int n = 1;
|
||||
|
||||
lwsl_info("%s: own %p: count %d\n", __func__, own, own->count);
|
||||
|
||||
lws_start_foreach_dll_safe(struct lws_dll2 *, p, tp,
|
||||
lws_dll2_get_head(own)) {
|
||||
lws_sorted_usec_list_t *sul = (lws_sorted_usec_list_t *)p;
|
||||
lwsl_info("%s: %d: %llu (+%lld)\n", __func__, n++,
|
||||
(unsigned long long)sul->us,
|
||||
(long long)(sul->us - now));
|
||||
if (sul->us < worst) {
|
||||
lwsl_err("%s: wrongly sorted sul entry!\n",
|
||||
__func__);
|
||||
assert(0);
|
||||
}
|
||||
worst = sul->us;
|
||||
} lws_end_foreach_dll_safe(p, tp);
|
||||
}
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void
|
||||
lws_sul_schedule(struct lws_context *context, int tsi,
|
||||
lws_sorted_usec_list_t *sul, sul_cb_t cb, lws_usec_t us)
|
||||
lws_sul_cancel(lws_sorted_usec_list_t *sul)
|
||||
{
|
||||
lws_dll2_remove(&sul->list);
|
||||
|
||||
/* we are clearing the timeout and leaving ourselves detached */
|
||||
sul->us = 0;
|
||||
}
|
||||
|
||||
void
|
||||
lws_sul2_schedule(struct lws_context *context, int tsi, int flags,
|
||||
lws_sorted_usec_list_t *sul)
|
||||
{
|
||||
struct lws_context_per_thread *pt = &context->pt[tsi];
|
||||
|
||||
sul->cb = cb;
|
||||
|
||||
__lws_sul_insert(&pt->pt_sul_owner, sul, us);
|
||||
__lws_sul_insert(
|
||||
&pt->pt_sul_owner[!!(flags & LWSSULLI_WAKE_IF_SUSPENDED)], sul);
|
||||
}
|
||||
|
||||
/*
|
||||
* own points to the first in an array of length own_len
|
||||
*
|
||||
* While any sul list owner has a "ripe", ie, ready to handle sul we do them
|
||||
* strictly in order of sul time. When nobody has a ripe sul we return 0, if
|
||||
* actually nobody has any sul, or the interval between usnow and the next
|
||||
* earliest scheduled event on any list.
|
||||
*/
|
||||
|
||||
lws_usec_t
|
||||
__lws_sul_service_ripe(lws_dll2_owner_t *own, lws_usec_t usnow)
|
||||
__lws_sul_service_ripe(lws_dll2_owner_t *own, int own_len, lws_usec_t usnow)
|
||||
{
|
||||
struct lws_context_per_thread *pt = (struct lws_context_per_thread *)
|
||||
lws_container_of(own, struct lws_context_per_thread,
|
||||
|
@ -114,36 +102,94 @@ __lws_sul_service_ripe(lws_dll2_owner_t *own, lws_usec_t usnow)
|
|||
if (pt->attach_owner.count)
|
||||
lws_system_do_attach(pt);
|
||||
|
||||
while (lws_dll2_get_head(own)) {
|
||||
|
||||
/* .list is always first member in lws_sorted_usec_list_t */
|
||||
lws_sorted_usec_list_t *sul = (lws_sorted_usec_list_t *)
|
||||
lws_dll2_get_head(own);
|
||||
|
||||
assert(sul->us); /* shouldn't be on the list otherwise */
|
||||
|
||||
if (sul->us > usnow)
|
||||
return sul->us - usnow;
|
||||
|
||||
/* his moment has come... remove him from timeout list */
|
||||
lws_dll2_remove(&sul->list);
|
||||
sul->us = 0;
|
||||
pt->inside_lws_service = 1;
|
||||
sul->cb(sul);
|
||||
pt->inside_lws_service = 0;
|
||||
|
||||
/*
|
||||
* The callback may have done any mixture of delete
|
||||
* and add sul entries... eg, close a wsi may pull out
|
||||
* multiple entries making iterating it statefully
|
||||
* unsafe. Always restart at the current head of list.
|
||||
*/
|
||||
}
|
||||
/* must be at least 1 */
|
||||
assert(own_len);
|
||||
|
||||
/*
|
||||
* Nothing left to take care of in the list (cannot return 0 otherwise
|
||||
* because we will service anything equal to usnow rather than return)
|
||||
* Of the own_len sul owning lists, the earliest next sul could be on
|
||||
* any of them. We have to find it and handle each in turn until no
|
||||
* ripe sul left on any owning list, and we can exit.
|
||||
*
|
||||
* This ensures the ripe sul are handled strictly in the right order no
|
||||
* matter which owning list they are on.
|
||||
*/
|
||||
|
||||
do {
|
||||
lws_sorted_usec_list_t *hit = NULL;
|
||||
lws_usec_t lowest;
|
||||
int n = 0;
|
||||
|
||||
for (n = 0; n < own_len; n++) {
|
||||
lws_sorted_usec_list_t *sul;
|
||||
if (!own[n].count)
|
||||
continue;
|
||||
sul = (lws_sorted_usec_list_t *)
|
||||
lws_dll2_get_head(&own[n]);
|
||||
|
||||
if (!hit || sul->us <= lowest) {
|
||||
hit = sul;
|
||||
lowest = sul->us;
|
||||
}
|
||||
}
|
||||
|
||||
if (!hit)
|
||||
return 0;
|
||||
|
||||
if (lowest > usnow)
|
||||
return lowest - usnow;
|
||||
|
||||
/* his moment has come... remove him from his owning list */
|
||||
|
||||
lws_dll2_remove(&hit->list);
|
||||
hit->us = 0;
|
||||
|
||||
pt->inside_lws_service = 1;
|
||||
hit->cb(hit);
|
||||
pt->inside_lws_service = 0;
|
||||
|
||||
} while (1);
|
||||
|
||||
/* unreachable */
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Earliest wakeable event on any pt
|
||||
*/
|
||||
|
||||
int
|
||||
lws_sul_earliest_wakeable_event(struct lws_context *ctx, lws_usec_t *pearliest)
|
||||
{
|
||||
struct lws_context_per_thread *pt;
|
||||
int n = 0, hit = -1;
|
||||
lws_usec_t lowest;
|
||||
|
||||
for (n = 0; n < ctx->count_threads; n++) {
|
||||
pt = &ctx->pt[n];
|
||||
|
||||
lws_pt_lock(pt, __func__);
|
||||
|
||||
if (pt->pt_sul_owner[LWSSULLI_WAKE_IF_SUSPENDED].count) {
|
||||
lws_sorted_usec_list_t *sul = (lws_sorted_usec_list_t *)
|
||||
lws_dll2_get_head(&pt->pt_sul_owner[
|
||||
LWSSULLI_WAKE_IF_SUSPENDED]);
|
||||
|
||||
if (hit == -1 || sul->us < lowest) {
|
||||
hit = n;
|
||||
lowest = sul->us;
|
||||
}
|
||||
}
|
||||
|
||||
lws_pt_unlock(pt);
|
||||
}
|
||||
|
||||
|
||||
if (hit == -1)
|
||||
/* there is no pending event */
|
||||
return 1;
|
||||
|
||||
*pearliest = lowest;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -1098,13 +1098,14 @@ __lws_vhost_destroy2(struct lws_vhost *vh)
|
|||
} lws_end_foreach_dll_safe(d, d1);
|
||||
#endif
|
||||
|
||||
#if defined(LWS_WITH_DEPRECATED_THINGS)
|
||||
/*
|
||||
* destroy any pending timed events
|
||||
*/
|
||||
|
||||
while (vh->timed_vh_protocol_list)
|
||||
__lws_timed_callback_remove(vh, vh->timed_vh_protocol_list);
|
||||
|
||||
#endif
|
||||
/*
|
||||
* let the protocols destroy the per-vhost protocol objects
|
||||
*/
|
||||
|
|
|
@ -61,7 +61,8 @@ __lws_set_timer_usecs(struct lws *wsi, lws_usec_t us)
|
|||
struct lws_context_per_thread *pt = &wsi->context->pt[(int)wsi->tsi];
|
||||
|
||||
wsi->sul_hrtimer.cb = lws_sul_hrtimer_cb;
|
||||
__lws_sul_insert(&pt->pt_sul_owner, &wsi->sul_hrtimer, us);
|
||||
__lws_sul_insert_us(&pt->pt_sul_owner[LWSSULLI_MISS_IF_SUSPENDED],
|
||||
&wsi->sul_hrtimer, us);
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -126,8 +127,9 @@ __lws_set_timeout(struct lws *wsi, enum pending_timeout reason, int secs)
|
|||
struct lws_context_per_thread *pt = &wsi->context->pt[(int)wsi->tsi];
|
||||
|
||||
wsi->sul_timeout.cb = lws_sul_wsitimeout_cb;
|
||||
__lws_sul_insert(&pt->pt_sul_owner, &wsi->sul_timeout,
|
||||
((lws_usec_t)secs) * LWS_US_PER_SEC);
|
||||
__lws_sul_insert_us(&pt->pt_sul_owner[LWSSULLI_MISS_IF_SUSPENDED],
|
||||
&wsi->sul_timeout,
|
||||
((lws_usec_t)secs) * LWS_US_PER_SEC);
|
||||
|
||||
lwsl_debug("%s: %p: %d secs, reason %d\n", __func__, wsi, secs, reason);
|
||||
|
||||
|
@ -178,7 +180,8 @@ lws_set_timeout_us(struct lws *wsi, enum pending_timeout reason, lws_usec_t us)
|
|||
return;
|
||||
|
||||
lws_pt_lock(pt, __func__);
|
||||
__lws_sul_insert(&pt->pt_sul_owner, &wsi->sul_timeout, us);
|
||||
__lws_sul_insert_us(&pt->pt_sul_owner[LWSSULLI_MISS_IF_SUSPENDED],
|
||||
&wsi->sul_timeout, us);
|
||||
|
||||
lwsl_notice("%s: %p: %llu us, reason %d\n", __func__, wsi,
|
||||
(unsigned long long)us, reason);
|
||||
|
@ -187,6 +190,8 @@ lws_set_timeout_us(struct lws *wsi, enum pending_timeout reason, lws_usec_t us)
|
|||
lws_pt_unlock(pt);
|
||||
}
|
||||
|
||||
#if defined(LWS_WITH_DEPRECATED_THINGS)
|
||||
|
||||
/* requires context + vh lock */
|
||||
|
||||
int
|
||||
|
@ -276,6 +281,8 @@ lws_timed_callback_vh_protocol(struct lws_vhost *vh,
|
|||
((lws_usec_t)secs) * LWS_US_PER_SEC);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
static void
|
||||
lws_validity_cb(lws_sorted_usec_list_t *sul)
|
||||
{
|
||||
|
@ -308,8 +315,9 @@ lws_validity_cb(lws_sorted_usec_list_t *sul)
|
|||
assert(rbo->secs_since_valid_hangup > rbo->secs_since_valid_ping);
|
||||
|
||||
wsi->validity_hup = 1;
|
||||
__lws_sul_insert(&pt->pt_sul_owner, &wsi->sul_validity,
|
||||
((uint64_t)rbo->secs_since_valid_hangup -
|
||||
__lws_sul_insert_us(&pt->pt_sul_owner[!!wsi->conn_validity_wakesuspend],
|
||||
&wsi->sul_validity,
|
||||
((uint64_t)rbo->secs_since_valid_hangup -
|
||||
rbo->secs_since_valid_ping) * LWS_US_PER_SEC);
|
||||
}
|
||||
|
||||
|
@ -339,8 +347,9 @@ _lws_validity_confirmed_role(struct lws *wsi)
|
|||
rbo->secs_since_valid_ping,
|
||||
wsi->validity_hup);
|
||||
|
||||
__lws_sul_insert(&pt->pt_sul_owner, &wsi->sul_validity,
|
||||
((uint64_t)(wsi->validity_hup ?
|
||||
__lws_sul_insert_us(&pt->pt_sul_owner[!!wsi->conn_validity_wakesuspend],
|
||||
&wsi->sul_validity,
|
||||
((uint64_t)(wsi->validity_hup ?
|
||||
rbo->secs_since_valid_hangup :
|
||||
rbo->secs_since_valid_ping)) * LWS_US_PER_SEC);
|
||||
}
|
||||
|
|
|
@ -62,7 +62,8 @@ lws_sul_stats_cb(lws_sorted_usec_list_t *sul)
|
|||
|
||||
lws_stats_log_dump(pt->context);
|
||||
|
||||
__lws_sul_insert(&pt->pt_sul_owner, &pt->sul_stats, 10 * LWS_US_PER_SEC);
|
||||
__lws_sul_insert_us(&pt->pt_sul_owner[LWSSULLI_MISS_IF_SUSPENDED],
|
||||
&pt->sul_stats, 10 * LWS_US_PER_SEC);
|
||||
}
|
||||
#endif
|
||||
#if defined(LWS_WITH_PEER_LIMITS)
|
||||
|
@ -74,7 +75,8 @@ lws_sul_peer_limits_cb(lws_sorted_usec_list_t *sul)
|
|||
|
||||
lws_peer_cull_peer_wait_list(pt->context);
|
||||
|
||||
__lws_sul_insert(&pt->pt_sul_owner, &pt->sul_peer_limits, 10 * LWS_US_PER_SEC);
|
||||
__lws_sul_insert_us(&pt->pt_sul_owner[LWSSULLI_MISS_IF_SUSPENDED],
|
||||
&pt->sul_peer_limits, 10 * LWS_US_PER_SEC);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -742,13 +744,13 @@ lws_create_context(const struct lws_context_creation_info *info)
|
|||
|
||||
#if defined(LWS_WITH_STATS)
|
||||
context->pt[0].sul_stats.cb = lws_sul_stats_cb;
|
||||
__lws_sul_insert(&context->pt[0].pt_sul_owner, &context->pt[0].sul_stats,
|
||||
10 * LWS_US_PER_SEC);
|
||||
__lws_sul_insert_us(&context->pt[0].pt_sul_owner[LWSSULLI_MISS_IF_SUSPENDED],
|
||||
&context->pt[0].sul_stats, 10 * LWS_US_PER_SEC);
|
||||
#endif
|
||||
#if defined(LWS_WITH_PEER_LIMITS)
|
||||
context->pt[0].sul_peer_limits.cb = lws_sul_peer_limits_cb;
|
||||
__lws_sul_insert(&context->pt[0].pt_sul_owner,
|
||||
&context->pt[0].sul_peer_limits, 10 * LWS_US_PER_SEC);
|
||||
__lws_sul_insert_us(&context->pt[0].pt_sul_owner[LWSSULLI_MISS_IF_SUSPENDED],
|
||||
&context->pt[0].sul_peer_limits, 10 * LWS_US_PER_SEC);
|
||||
#endif
|
||||
|
||||
#if defined(LWS_HAVE_SYS_CAPABILITY_H) && defined(LWS_HAVE_LIBCAP)
|
||||
|
|
|
@ -157,7 +157,8 @@ lws_glib_hrtimer_cb(void *p)
|
|||
lws_usec_t us;
|
||||
|
||||
lws_pt_lock(pt, __func__);
|
||||
us = __lws_sul_service_ripe(&pt->pt_sul_owner, lws_now_usecs());
|
||||
us = __lws_sul_service_ripe(pt->pt_sul_owner, LWS_COUNT_PT_SUL_OWNERS,
|
||||
lws_now_usecs());
|
||||
if (us) {
|
||||
ms = us / LWS_US_PER_MS;
|
||||
if (!ms)
|
||||
|
|
|
@ -32,7 +32,8 @@ lws_ev_hrtimer_cb(struct ev_loop *loop, struct ev_timer *watcher, int revents)
|
|||
lws_usec_t us;
|
||||
|
||||
lws_pt_lock(pt, __func__);
|
||||
us = __lws_sul_service_ripe(&pt->pt_sul_owner, lws_now_usecs());
|
||||
us = __lws_sul_service_ripe(pt->pt_sul_owner, LWS_COUNT_PT_SUL_OWNERS,
|
||||
lws_now_usecs());
|
||||
if (us) {
|
||||
ev_timer_set(&pt->ev.hrtimer, ((float)us) / 1000000.0, 0);
|
||||
ev_timer_start(pt->ev.io_loop, &pt->ev.hrtimer);
|
||||
|
@ -60,7 +61,8 @@ lws_ev_idle_cb(struct ev_loop *loop, struct ev_idle *handle, int revents)
|
|||
/* account for hrtimer */
|
||||
|
||||
lws_pt_lock(pt, __func__);
|
||||
us = __lws_sul_service_ripe(&pt->pt_sul_owner, lws_now_usecs());
|
||||
us = __lws_sul_service_ripe(pt->pt_sul_owner, LWS_COUNT_PT_SUL_OWNERS,
|
||||
lws_now_usecs());
|
||||
if (us) {
|
||||
ev_timer_set(&pt->ev.hrtimer, ((float)us) / 1000000.0, 0);
|
||||
ev_timer_start(pt->ev.io_loop, &pt->ev.hrtimer);
|
||||
|
|
|
@ -32,7 +32,8 @@ lws_event_hrtimer_cb(int fd, short event, void *p)
|
|||
lws_usec_t us;
|
||||
|
||||
lws_pt_lock(pt, __func__);
|
||||
us = __lws_sul_service_ripe(&pt->pt_sul_owner, lws_now_usecs());
|
||||
us = __lws_sul_service_ripe(pt->pt_sul_owner, LWS_COUNT_PT_SUL_OWNERS,
|
||||
lws_now_usecs());
|
||||
if (us) {
|
||||
tv.tv_sec = us / LWS_US_PER_SEC;
|
||||
tv.tv_usec = us - (tv.tv_sec * LWS_US_PER_SEC);
|
||||
|
@ -76,7 +77,8 @@ lws_event_idle_timer_cb(int fd, short event, void *p)
|
|||
/* account for hrtimer */
|
||||
|
||||
lws_pt_lock(pt, __func__);
|
||||
us = __lws_sul_service_ripe(&pt->pt_sul_owner, lws_now_usecs());
|
||||
us = __lws_sul_service_ripe(pt->pt_sul_owner, LWS_COUNT_PT_SUL_OWNERS,
|
||||
lws_now_usecs());
|
||||
if (us) {
|
||||
tv.tv_sec = us / LWS_US_PER_SEC;
|
||||
tv.tv_usec = us - (tv.tv_sec * LWS_US_PER_SEC);
|
||||
|
|
|
@ -36,7 +36,8 @@ lws_uv_sultimer_cb(uv_timer_t *timer
|
|||
lws_usec_t us;
|
||||
|
||||
lws_pt_lock(pt, __func__);
|
||||
us = __lws_sul_service_ripe(&pt->pt_sul_owner, lws_now_usecs());
|
||||
us = __lws_sul_service_ripe(pt->pt_sul_owner, LWS_COUNT_PT_SUL_OWNERS,
|
||||
lws_now_usecs());
|
||||
if (us)
|
||||
uv_timer_start(&pt->uv.sultimer, lws_uv_sultimer_cb,
|
||||
LWS_US_TO_MS(us), 0);
|
||||
|
@ -66,7 +67,8 @@ lws_uv_idle(uv_idle_t *handle
|
|||
/* account for sultimer */
|
||||
|
||||
lws_pt_lock(pt, __func__);
|
||||
us = __lws_sul_service_ripe(&pt->pt_sul_owner, lws_now_usecs());
|
||||
us = __lws_sul_service_ripe(pt->pt_sul_owner, LWS_COUNT_PT_SUL_OWNERS,
|
||||
lws_now_usecs());
|
||||
if (us)
|
||||
uv_timer_start(&pt->uv.sultimer, lws_uv_sultimer_cb,
|
||||
LWS_US_TO_MS(us), 0);
|
||||
|
|
|
@ -110,7 +110,9 @@ again:
|
|||
|
||||
lws_pt_lock(pt, __func__);
|
||||
/* don't stay in poll wait longer than next hr timeout */
|
||||
us = __lws_sul_service_ripe(&pt->pt_sul_owner, lws_now_usecs());
|
||||
us = __lws_sul_service_ripe(pt->pt_sul_owner,
|
||||
LWS_COUNT_PT_SUL_OWNERS,
|
||||
lws_now_usecs());
|
||||
if (us && us < timeout_us)
|
||||
timeout_us = us;
|
||||
|
||||
|
|
|
@ -128,7 +128,9 @@ again:
|
|||
|
||||
lws_pt_lock(pt, __func__);
|
||||
/* don't stay in poll wait longer than next hr timeout */
|
||||
us = __lws_sul_service_ripe(&pt->pt_sul_owner, lws_now_usecs());
|
||||
us = __lws_sul_service_ripe(pt->pt_sul_owner,
|
||||
LWS_COUNT_PT_SUL_OWNERS,
|
||||
lws_now_usecs());
|
||||
if (us && us < timeout_us)
|
||||
timeout_us = us;
|
||||
|
||||
|
|
|
@ -75,7 +75,8 @@ lws_sul_plat_unix(lws_sorted_usec_list_t *sul)
|
|||
lws_context_unlock(context);
|
||||
#endif
|
||||
|
||||
__lws_sul_insert(&pt->pt_sul_owner, &pt->sul_plat, 30 * LWS_US_PER_SEC);
|
||||
__lws_sul_insert_us(&pt->pt_sul_owner[LWSSULLI_MISS_IF_SUSPENDED],
|
||||
&pt->sul_plat, 30 * LWS_US_PER_SEC);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -141,8 +142,8 @@ lws_plat_init(struct lws_context *context,
|
|||
/* we only need to do this on pt[0] */
|
||||
|
||||
context->pt[0].sul_plat.cb = lws_sul_plat_unix;
|
||||
__lws_sul_insert(&context->pt[0].pt_sul_owner, &context->pt[0].sul_plat,
|
||||
30 * LWS_US_PER_SEC);
|
||||
__lws_sul_insert_us(&context->pt[0].pt_sul_owner[LWSSULLI_MISS_IF_SUSPENDED],
|
||||
&context->pt[0].sul_plat, 30 * LWS_US_PER_SEC);
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -113,7 +113,7 @@ _lws_plat_service_tsi(struct lws_context *context, int timeout_ms, int tsi)
|
|||
/*
|
||||
* service ripe scheduled events, and limit wait to next expected one
|
||||
*/
|
||||
us = __lws_sul_service_ripe(&pt->pt_sul_owner, us);
|
||||
us = __lws_sul_service_ripe(pt->pt_sul_owner, LWS_COUNT_PT_SUL_OWNERS, us);
|
||||
if (us && us < timeout_us)
|
||||
timeout_us = us;
|
||||
|
||||
|
|
|
@ -108,10 +108,8 @@ lws_spawn_piped_destroy(struct lws_spawn_piped **_lsp)
|
|||
|
||||
lws_dll2_remove(&lsp->dll);
|
||||
|
||||
lws_sul_schedule(lsp->info.vh->context, lsp->info.tsi, &lsp->sul,
|
||||
NULL, LWS_SET_TIMER_USEC_CANCEL);
|
||||
lws_sul_schedule(lsp->info.vh->context, lsp->info.tsi, &lsp->sul_reap,
|
||||
NULL, LWS_SET_TIMER_USEC_CANCEL);
|
||||
lws_sul_cancel(&lsp->sul);
|
||||
lws_sul_cancel(&lsp->sul_reap);
|
||||
|
||||
for (n = 0; n < 3; n++) {
|
||||
#if 0
|
||||
|
@ -188,8 +186,7 @@ lws_spawn_reap(struct lws_spawn_piped *lsp)
|
|||
|
||||
/* we reached the reap point, no need for timeout wait */
|
||||
|
||||
lws_sul_schedule(lsp->info.vh->context, lsp->info.tsi, &lsp->sul, NULL,
|
||||
LWS_SET_TIMER_USEC_CANCEL);
|
||||
lws_sul_cancel(&lsp->sul);
|
||||
|
||||
/*
|
||||
* All the stdwsi went down, nothing more is coming... it's over
|
||||
|
|
|
@ -133,7 +133,9 @@ _lws_plat_service_tsi(struct lws_context *context, int timeout_ms, int tsi)
|
|||
|
||||
lws_pt_lock(pt, __func__);
|
||||
/* don't stay in poll wait longer than next hr timeout */
|
||||
us = __lws_sul_service_ripe(&pt->pt_sul_owner, lws_now_usecs());
|
||||
us = __lws_sul_service_ripe(pt->pt_sul_owner,
|
||||
LWS_COUNT_PT_SUL_OWNERS,
|
||||
lws_now_usecs());
|
||||
if (us && us < timeout_us)
|
||||
timeout_us = us;
|
||||
|
||||
|
|
|
@ -125,14 +125,9 @@ lws_spawn_piped_destroy(struct lws_spawn_piped **_lsp)
|
|||
|
||||
lws_dll2_remove(&lsp->dll);
|
||||
|
||||
lws_sul_schedule(lsp->info.vh->context, lsp->info.tsi, &lsp->sul,
|
||||
NULL, LWS_SET_TIMER_USEC_CANCEL);
|
||||
|
||||
lws_sul_schedule(lsp->context, 0, &lsp->sul_reap, NULL,
|
||||
LWS_SET_TIMER_USEC_CANCEL);
|
||||
|
||||
lws_sul_schedule(lsp->context, 0, &lsp->sul_poll, NULL,
|
||||
LWS_SET_TIMER_USEC_CANCEL);
|
||||
lws_sul_cancel(&lsp->sul);
|
||||
lws_sul_cancel(&lsp->sul_reap);
|
||||
lws_sul_cancel(&lsp->sul_poll);
|
||||
|
||||
lwsl_warn("%s: deleting lsp\n", __func__);
|
||||
|
||||
|
@ -192,8 +187,7 @@ lws_spawn_reap(struct lws_spawn_piped *lsp)
|
|||
|
||||
/* we reached the reap point, no need for timeout wait */
|
||||
|
||||
lws_sul_schedule(lsp->info.vh->context, lsp->info.tsi, &lsp->sul, NULL,
|
||||
LWS_SET_TIMER_USEC_CANCEL);
|
||||
lws_sul_cancel(&lsp->sul);
|
||||
|
||||
/*
|
||||
* All the stdwsi went down, nothing more is coming... it's over
|
||||
|
@ -517,8 +511,7 @@ lws_spawn_piped(const struct lws_spawn_piped_info *i)
|
|||
|
||||
bail3:
|
||||
|
||||
lws_sul_schedule(context, 0, &lsp->sul_poll, NULL,
|
||||
LWS_SET_TIMER_USEC_CANCEL);
|
||||
lws_sul_cancel(&lsp->sul_poll);
|
||||
|
||||
while (--n >= 0)
|
||||
__remove_wsi_socket_from_fds(lsp->stdwsi[n]);
|
||||
|
|
|
@ -92,8 +92,8 @@ lws_cgi_sul_cb(lws_sorted_usec_list_t *sul)
|
|||
|
||||
lws_cgi_kill_terminated(pt);
|
||||
|
||||
__lws_sul_insert_us(&pt->pt_sul_owner, &pt->sul_cgi,
|
||||
3 * LWS_US_PER_SEC);
|
||||
__lws_sul_insert_us(&pt->pt_sul_owner[LWSSULLI_MISS_IF_SUSPENDED],
|
||||
&pt->sul_cgi, 3 * LWS_US_PER_SEC);
|
||||
}
|
||||
|
||||
static int
|
||||
|
@ -105,8 +105,8 @@ rops_pt_init_destroy_cgi(struct lws_context *context,
|
|||
|
||||
pt->sul_cgi.cb = lws_cgi_sul_cb;
|
||||
|
||||
__lws_sul_insert(&pt->pt_sul_owner, &pt->sul_cgi,
|
||||
3 * LWS_US_PER_SEC);
|
||||
__lws_sul_insert_us(&pt->pt_sul_owner[LWSSULLI_MISS_IF_SUSPENDED],
|
||||
&pt->sul_cgi, 3 * LWS_US_PER_SEC);
|
||||
} else
|
||||
lws_dll2_remove(&pt->sul_cgi.list);
|
||||
|
||||
|
|
|
@ -491,7 +491,7 @@ lws_dbus_sul_cb(lws_sorted_usec_list_t *sul)
|
|||
}
|
||||
} lws_end_foreach_dll_safe(rdt, nx);
|
||||
|
||||
__lws_sul_insert(&pt->pt_sul_owner, &pt->dbus.sul,
|
||||
lws_sul_schedule(pt->context, pt->tid, &pt->dbus.sul, lws_dbus_sul_cb,
|
||||
3 * LWS_US_PER_SEC);
|
||||
}
|
||||
|
||||
|
@ -501,13 +501,10 @@ rops_pt_init_destroy_dbus(struct lws_context *context,
|
|||
struct lws_context_per_thread *pt, int destroy)
|
||||
{
|
||||
if (!destroy) {
|
||||
|
||||
pt->dbus.sul.cb = lws_dbus_sul_cb;
|
||||
|
||||
__lws_sul_insert(&pt->pt_sul_owner, &pt->dbus.sul,
|
||||
lws_sul_schedule(context, pt->tid, &pt->dbus.sul, lws_dbus_sul_cb,
|
||||
3 * LWS_US_PER_SEC);
|
||||
} else
|
||||
lws_dll2_remove(&pt->dbus.sul.list);
|
||||
lws_sul_cancel(&pt->dbus.sul);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -756,8 +756,7 @@ ads_known:
|
|||
}
|
||||
|
||||
conn_good:
|
||||
lws_sul_schedule(wsi->context, 0, &wsi->sul_connect_timeout, NULL,
|
||||
LWS_SET_TIMER_USEC_CANCEL);
|
||||
lws_sul_cancel(&wsi->sul_connect_timeout);
|
||||
lwsl_info("%s: Connection started %p\n", __func__, wsi->dns_results);
|
||||
|
||||
/* the tcp connection has happend */
|
||||
|
@ -837,8 +836,7 @@ try_next_result_closesock:
|
|||
wsi->desc.sockfd = LWS_SOCK_INVALID;
|
||||
|
||||
try_next_result:
|
||||
lws_sul_schedule(wsi->context, 0, &wsi->sul_connect_timeout,
|
||||
NULL, LWS_SET_TIMER_USEC_CANCEL);
|
||||
lws_sul_cancel(&wsi->sul_connect_timeout);
|
||||
if (wsi->dns_results_next) {
|
||||
wsi->dns_results_next = wsi->dns_results_next->ai_next;
|
||||
if (wsi->dns_results_next)
|
||||
|
|
|
@ -1111,9 +1111,7 @@ bail1:
|
|||
* no need for ACK timeout wait
|
||||
* any more
|
||||
*/
|
||||
lws_sul_schedule(lws_get_context(w), 0,
|
||||
&w->mqtt->sul_qos1_puback_wait, NULL,
|
||||
LWS_SET_TIMER_USEC_CANCEL);
|
||||
lws_sul_cancel(&w->mqtt->sul_qos1_puback_wait);
|
||||
|
||||
if (requested_close) {
|
||||
__lws_close_free_wsi(w,
|
||||
|
@ -1745,8 +1743,9 @@ do_write:
|
|||
/* For QoS1, if no PUBACK coming after 3s, we must RETRY the publish */
|
||||
|
||||
wsi->mqtt->sul_qos1_puback_wait.cb = lws_mqtt_publish_resend;
|
||||
__lws_sul_insert(&pt->pt_sul_owner, &wsi->mqtt->sul_qos1_puback_wait,
|
||||
3 * LWS_USEC_PER_SEC);
|
||||
__lws_sul_insert_us(&pt->pt_sul_owner[wsi->conn_validity_wakesuspend],
|
||||
&wsi->mqtt->sul_qos1_puback_wait,
|
||||
3 * LWS_USEC_PER_SEC);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -422,8 +422,7 @@ rops_close_role_mqtt(struct lws_context_per_thread *pt, struct lws *wsi)
|
|||
|
||||
c = &wsi->mqtt->client;
|
||||
|
||||
__lws_sul_insert(&pt->pt_sul_owner, &wsi->mqtt->sul_qos1_puback_wait,
|
||||
LWS_SET_TIMER_USEC_CANCEL);
|
||||
lws_sul_cancel(&wsi->mqtt->sul_qos1_puback_wait);
|
||||
|
||||
lws_mqtt_str_free(&c->username);
|
||||
lws_mqtt_str_free(&c->password);
|
||||
|
|
|
@ -362,8 +362,7 @@ lws_process_ws_upgrade2(struct lws *wsi)
|
|||
* validity checking
|
||||
*/
|
||||
|
||||
__lws_sul_insert(&pt->pt_sul_owner, &wsi->sul_validity,
|
||||
LWS_SET_TIMER_USEC_CANCEL);
|
||||
lws_sul_cancel(&wsi->sul_validity);
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
|
|
|
@ -203,6 +203,14 @@ connection for retry + backoff
|
|||
The name of the trust store described in the `trust_stores` section to apply
|
||||
to validate the remote server cert.
|
||||
|
||||
### `swake_validity`
|
||||
|
||||
Set to `true` if this streamtype is important enough for the functioning of the
|
||||
device that its locally-initiated periodic connection validity checks of the
|
||||
interval described in the associated retry / backoff selection, are important
|
||||
enough to wake the whole system from low power suspend so they happen on
|
||||
schedule.
|
||||
|
||||
## http transport
|
||||
|
||||
### `http_method`
|
||||
|
|
|
@ -90,6 +90,7 @@ static const char * const lejp_tokens_policy[] = {
|
|||
"s[].*.mqtt_will_message",
|
||||
"s[].*.mqtt_will_qos",
|
||||
"s[].*.mqtt_will_retain",
|
||||
"s[].*.swake_validity",
|
||||
"s[].*",
|
||||
};
|
||||
|
||||
|
@ -155,6 +156,7 @@ typedef enum {
|
|||
LSSPPT_MQTT_WILL_MESSAGE,
|
||||
LSSPPT_MQTT_WILL_QOS,
|
||||
LSSPPT_MQTT_WILL_RETAIN,
|
||||
LSSPPT_SWAKE_VALIDITY,
|
||||
LSSPPT_STREAMTYPES
|
||||
} policy_token_t;
|
||||
|
||||
|
@ -498,6 +500,11 @@ lws_ss_policy_parser_cb(struct lejp_ctx *ctx, char reason)
|
|||
a->curr[LTY_POLICY].p->flags |=
|
||||
LWSSSPOLF_HTTP_X_WWW_FORM_URLENCODED;
|
||||
break;
|
||||
case LSSPPT_SWAKE_VALIDITY:
|
||||
if (reason == LEJPCB_VAL_TRUE)
|
||||
a->curr[LTY_POLICY].p->flags |=
|
||||
LWSSSPOLF_WAKE_SUSPEND__VALIDITY;
|
||||
break;
|
||||
|
||||
case LSSPPT_RETRYPTR:
|
||||
bot = a->heads[LTY_BACKOFF].b;
|
||||
|
|
|
@ -231,8 +231,10 @@ secstream_h1(struct lws *wsi, enum lws_callback_reasons reason, void *user,
|
|||
|
||||
h->retry = 0;
|
||||
h->seqstate = SSSEQ_CONNECTED;
|
||||
lws_ss_set_timeout_us(h, LWS_SET_TIMER_USEC_CANCEL);
|
||||
lws_ss_event_helper(h, LWSSSCS_CONNECTED);
|
||||
lws_sul_cancel(&h->sul);
|
||||
if (lws_ss_event_helper(h, LWSSSCS_CONNECTED)) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
/*
|
||||
* Since it's an http transaction we initiated... this is
|
||||
|
|
|
@ -72,7 +72,7 @@ secstream_mqtt(struct lws *wsi, enum lws_callback_reasons reason, void *user,
|
|||
h->wsi = wsi;
|
||||
h->retry = 0;
|
||||
h->seqstate = SSSEQ_CONNECTED;
|
||||
lws_ss_set_timeout_us(h, LWS_SET_TIMER_USEC_CANCEL);
|
||||
lws_sul_cancel(&h->sul);
|
||||
lws_ss_event_helper(h, LWSSSCS_CONNECTED);
|
||||
if (h->policy->u.mqtt.topic)
|
||||
lws_callback_on_writable(wsi);
|
||||
|
|
|
@ -67,7 +67,7 @@ secstream_raw(struct lws *wsi, enum lws_callback_reasons reason, void *user,
|
|||
|
||||
h->retry = 0;
|
||||
h->seqstate = SSSEQ_CONNECTED;
|
||||
lws_ss_set_timeout_us(h, LWS_SET_TIMER_USEC_CANCEL);
|
||||
lws_sul_cancel(&h->sul);
|
||||
lws_ss_event_helper(h, LWSSSCS_CONNECTED);
|
||||
|
||||
lws_validity_confirmed(wsi);
|
||||
|
|
|
@ -67,8 +67,9 @@ secstream_ws(struct lws *wsi, enum lws_callback_reasons reason, void *user,
|
|||
case LWS_CALLBACK_CLIENT_ESTABLISHED:
|
||||
h->retry = 0;
|
||||
h->seqstate = SSSEQ_CONNECTED;
|
||||
lws_ss_set_timeout_us(h, LWS_SET_TIMER_USEC_CANCEL);
|
||||
lws_ss_event_helper(h, LWSSSCS_CONNECTED);
|
||||
lws_sul_cancel(&h->sul);
|
||||
if (lws_ss_event_helper(h, LWSSSCS_CONNECTED))
|
||||
return -1;
|
||||
break;
|
||||
|
||||
case LWS_CALLBACK_CLIENT_RECEIVE:
|
||||
|
|
|
@ -401,8 +401,7 @@ lws_sspc_destroy(lws_sspc_handle_t **ph)
|
|||
|
||||
h->destroying = 1;
|
||||
|
||||
lws_sul_schedule(h->context, 0, &h->sul_retry, NULL,
|
||||
LWS_SET_TIMER_USEC_CANCEL);
|
||||
lws_sul_cancel(&h->sul_retry);
|
||||
lws_dll2_remove(&h->client_list);
|
||||
|
||||
if (h->dsh)
|
||||
|
|
|
@ -150,7 +150,9 @@ lws_ss_set_timeout_us(lws_ss_handle_t *h, lws_usec_t us)
|
|||
struct lws_context_per_thread *pt = &h->context->pt[h->tsi];
|
||||
|
||||
h->sul.cb = lws_ss_timeout_sul_check_cb;
|
||||
__lws_sul_insert(&pt->pt_sul_owner, &h->sul, us);
|
||||
__lws_sul_insert_us(&pt->pt_sul_owner[
|
||||
!!(h->policy->flags & LWSSSPOLF_WAKE_SUSPEND__VALIDITY)],
|
||||
&h->sul, us);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -273,6 +275,9 @@ lws_ss_client_connect(lws_ss_handle_t *h)
|
|||
}
|
||||
}
|
||||
|
||||
if (h->policy->flags & LWSSSPOLF_WAKE_SUSPEND__VALIDITY)
|
||||
i.ssl_connection |= LCCSCF_WAKE_SUSPEND__VALIDITY;
|
||||
|
||||
i.address = ads;
|
||||
i.port = port;
|
||||
i.host = i.address;
|
||||
|
@ -525,7 +530,7 @@ lws_ss_destroy(lws_ss_handle_t **ppss)
|
|||
pmd = pmd->next;
|
||||
}
|
||||
|
||||
lws_sul_schedule(h->context, 0, &h->sul, NULL, LWS_SET_TIMER_USEC_CANCEL);
|
||||
lws_sul_cancel(&h->sul);
|
||||
|
||||
lws_free_set_NULL(h);
|
||||
}
|
||||
|
|
|
@ -334,8 +334,7 @@ callback_dhcpc(struct lws *wsi, enum lws_callback_reasons reason, void *user,
|
|||
if (!r)
|
||||
break;
|
||||
r->wsi_raw = NULL;
|
||||
lws_sul_schedule(r->context, 0, &r->sul_write, NULL,
|
||||
LWS_SET_TIMER_USEC_CANCEL);
|
||||
lws_sul_cancel(&r->sul_write);
|
||||
if (r->state != LDHC_BOUND) {
|
||||
r->state = LDHC_INIT;
|
||||
lws_retry_sul_schedule(r->context, 0, &r->sul_conn, &bo2,
|
||||
|
@ -553,10 +552,8 @@ broken:
|
|||
|
||||
/* clear timeouts related to the broadcast socket */
|
||||
|
||||
lws_sul_schedule(r->context, 0, &r->sul_write, NULL,
|
||||
LWS_SET_TIMER_USEC_CANCEL);
|
||||
lws_sul_schedule(r->context, 0, &r->sul_conn, NULL,
|
||||
LWS_SET_TIMER_USEC_CANCEL);
|
||||
lws_sul_cancel(&r->sul_write);
|
||||
lws_sul_cancel(&r->sul_conn);
|
||||
|
||||
lwsl_notice("%s: DHCP configured %s\n", __func__,
|
||||
(const char *)&r[1]);
|
||||
|
@ -650,8 +647,7 @@ retry_conn:
|
|||
|
||||
#if 0
|
||||
cancel_conn_timer:
|
||||
lws_sul_schedule(r->context, 0, &r->sul_conn, NULL,
|
||||
LWS_SET_TIMER_USEC_CANCEL);
|
||||
lws_sul_cancel(&r->sul_conn);
|
||||
|
||||
return 0;
|
||||
#endif
|
||||
|
@ -665,10 +661,8 @@ lws_dhcpc_destroy(lws_dhcpc_req_t **pr)
|
|||
{
|
||||
lws_dhcpc_req_t *r = *pr;
|
||||
|
||||
lws_sul_schedule(r->context, 0, &r->sul_conn, NULL,
|
||||
LWS_SET_TIMER_USEC_CANCEL);
|
||||
lws_sul_schedule(r->context, 0, &r->sul_write, NULL,
|
||||
LWS_SET_TIMER_USEC_CANCEL);
|
||||
lws_sul_cancel(&r->sul_conn);
|
||||
lws_sul_cancel(&r->sul_write);
|
||||
if (r->wsi_raw)
|
||||
lws_set_timeout(r->wsi_raw, 1, LWS_TO_KILL_ASYNC);
|
||||
|
||||
|
|
|
@ -195,8 +195,7 @@ do_close:
|
|||
v->wsi_udp = NULL;
|
||||
|
||||
/* cancel any pending write retry */
|
||||
lws_sul_schedule(v->context, 0, &v->sul_write, NULL,
|
||||
LWS_SET_TIMER_USEC_CANCEL);
|
||||
lws_sul_cancel(&v->sul_write);
|
||||
|
||||
if (v->set_time)
|
||||
goto cancel_conn_timer;
|
||||
|
@ -280,8 +279,7 @@ retry_conn:
|
|||
|
||||
|
||||
cancel_conn_timer:
|
||||
lws_sul_schedule(v->context, 0, &v->sul_conn, NULL,
|
||||
LWS_SET_TIMER_USEC_CANCEL);
|
||||
lws_sul_cancel(&v->sul_conn);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -34,8 +34,9 @@ lws_sul_tls_cb(lws_sorted_usec_list_t *sul)
|
|||
|
||||
lws_tls_check_all_cert_lifetimes(pt->context);
|
||||
|
||||
__lws_sul_insert(&pt->pt_sul_owner, &pt->sul_tls,
|
||||
(lws_usec_t)24 * 3600 * LWS_US_PER_SEC);
|
||||
__lws_sul_insert_us(&pt->pt_sul_owner[LWSSULLI_MISS_IF_SUSPENDED],
|
||||
&pt->sul_tls,
|
||||
(lws_usec_t)24 * 3600 * LWS_US_PER_SEC);
|
||||
}
|
||||
|
||||
int
|
||||
|
@ -115,8 +116,9 @@ lws_context_init_server_ssl(const struct lws_context_creation_info *info,
|
|||
/* check certs once a day */
|
||||
|
||||
context->pt[0].sul_tls.cb = lws_sul_tls_cb;
|
||||
__lws_sul_insert(&context->pt[0].pt_sul_owner, &context->pt[0].sul_tls,
|
||||
(lws_usec_t)24 * 3600 * LWS_US_PER_SEC);
|
||||
__lws_sul_insert_us(&context->pt[0].pt_sul_owner[LWSSULLI_MISS_IF_SUSPENDED],
|
||||
&context->pt[0].sul_tls,
|
||||
(lws_usec_t)24 * 3600 * LWS_US_PER_SEC);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -335,7 +335,7 @@ int main(int argc, char **argv)
|
|||
}
|
||||
|
||||
/* cancel the per-minute sul */
|
||||
lws_sul_schedule(context, 0, &sul_lwsws, NULL, LWS_SET_TIMER_USEC_CANCEL);
|
||||
lws_sul_cancel(&sul_lwsws);
|
||||
|
||||
lws_context_destroy(context);
|
||||
(void)budget;
|
||||
|
|
|
@ -41,6 +41,8 @@ struct per_vhost_data__minimal {
|
|||
struct lws_vhost *vhost;
|
||||
const struct lws_protocols *protocol;
|
||||
|
||||
lws_sorted_usec_list_t sul;
|
||||
|
||||
struct per_session_data__minimal *pss_list; /* linked-list of live pss*/
|
||||
|
||||
struct lws_ring *ring; /* ringbuffer holding unsent messages */
|
||||
|
@ -60,9 +62,12 @@ __minimal_destroy_message(void *_msg)
|
|||
msg->len = 0;
|
||||
}
|
||||
|
||||
static int
|
||||
connect_client(struct per_vhost_data__minimal *vhd)
|
||||
static void
|
||||
sul_connect_attempt(struct lws_sorted_usec_list *sul)
|
||||
{
|
||||
struct per_vhost_data__minimal *vhd =
|
||||
lws_container_of(sul, struct per_vhost_data__minimal, sul);
|
||||
|
||||
vhd->i.context = vhd->context;
|
||||
vhd->i.port = 443;
|
||||
vhd->i.address = "libwebsockets.org";
|
||||
|
@ -75,7 +80,9 @@ connect_client(struct per_vhost_data__minimal *vhd)
|
|||
vhd->i.local_protocol_name = "lws-minimal-proxy";
|
||||
vhd->i.pwsi = &vhd->client_wsi;
|
||||
|
||||
return !lws_client_connect_via_info(&vhd->i);
|
||||
if (!lws_client_connect_via_info(&vhd->i))
|
||||
lws_sul_schedule(vhd->context, 0, &vhd->sul,
|
||||
sul_connect_attempt, 10 * LWS_US_PER_SEC);
|
||||
}
|
||||
|
||||
static int
|
||||
|
@ -109,14 +116,12 @@ callback_minimal(struct lws *wsi, enum lws_callback_reasons reason,
|
|||
if (!vhd->ring)
|
||||
return 1;
|
||||
|
||||
if (connect_client(vhd))
|
||||
lws_timed_callback_vh_protocol(vhd->vhost,
|
||||
vhd->protocol,
|
||||
LWS_CALLBACK_USER, 1);
|
||||
sul_connect_attempt(&vhd->sul);
|
||||
break;
|
||||
|
||||
case LWS_CALLBACK_PROTOCOL_DESTROY:
|
||||
lws_ring_destroy(vhd->ring);
|
||||
lws_sul_cancel(&vhd->sul);
|
||||
break;
|
||||
|
||||
/* --- serving callbacks --- */
|
||||
|
@ -169,8 +174,8 @@ callback_minimal(struct lws *wsi, enum lws_callback_reasons reason,
|
|||
lwsl_err("CLIENT_CONNECTION_ERROR: %s\n",
|
||||
in ? (char *)in : "(null)");
|
||||
vhd->client_wsi = NULL;
|
||||
lws_timed_callback_vh_protocol(vhd->vhost, vhd->protocol,
|
||||
LWS_CALLBACK_USER, 1);
|
||||
lws_sul_schedule(vhd->context, 0, &vhd->sul,
|
||||
sul_connect_attempt, LWS_US_PER_SEC);
|
||||
break;
|
||||
|
||||
case LWS_CALLBACK_CLIENT_ESTABLISHED:
|
||||
|
@ -214,18 +219,8 @@ callback_minimal(struct lws *wsi, enum lws_callback_reasons reason,
|
|||
|
||||
case LWS_CALLBACK_CLIENT_CLOSED:
|
||||
vhd->client_wsi = NULL;
|
||||
lws_timed_callback_vh_protocol(vhd->vhost, vhd->protocol,
|
||||
LWS_CALLBACK_USER, 1);
|
||||
break;
|
||||
|
||||
/* rate-limited client connect retries */
|
||||
|
||||
case LWS_CALLBACK_USER:
|
||||
lwsl_notice("%s: LWS_CALLBACK_USER\n", __func__);
|
||||
if (connect_client(vhd))
|
||||
lws_timed_callback_vh_protocol(vhd->vhost,
|
||||
vhd->protocol,
|
||||
LWS_CALLBACK_USER, 1);
|
||||
lws_sul_schedule(vhd->context, 0, &vhd->sul,
|
||||
sul_connect_attempt, LWS_US_PER_SEC);
|
||||
break;
|
||||
|
||||
default:
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
project(lws-minimal-dbus-ws-proxy-testclient)
|
||||
project(lws-minimal-dbus-ws-proxy-testclient C)
|
||||
cmake_minimum_required(VERSION 2.8)
|
||||
find_package(libwebsockets CONFIG REQUIRED)
|
||||
list(APPEND CMAKE_MODULE_PATH ${LWS_CMAKE_DIR})
|
||||
|
@ -11,7 +11,7 @@ require_lws_config(LWS_ROLE_DBUS 1 requirements)
|
|||
require_lws_config(LWS_WITH_CLIENT 1 requirements)
|
||||
|
||||
if (NOT MSVC AND NOT WIN32 AND requirements)
|
||||
add_executable(${SAMP} ${SRCS})
|
||||
add_executable(${PROJECT_NAME} minimal-dbus-ws-proxy-testclient.c)
|
||||
|
||||
if (NOT LWS_PLAT_FREERTOS)
|
||||
find_package(PkgConfig QUIET)
|
||||
|
@ -21,11 +21,11 @@ if (NOT MSVC AND NOT WIN32 AND requirements)
|
|||
endif()
|
||||
|
||||
include_directories("${LWS_DBUS_INCLUDE1}")
|
||||
|
||||
message("project ${PROJECT_NAME}")
|
||||
if (websockets_shared)
|
||||
target_link_libraries(${SAMP} websockets_shared)
|
||||
add_dependencies(${SAMP} websockets_shared ${LWS_DBUS_LIB})
|
||||
target_link_libraries(${PROJECT_NAME} websockets_shared)
|
||||
add_dependencies(${PROJECT_NAME} websockets_shared ${LWS_DBUS_LIB})
|
||||
else()
|
||||
target_link_libraries(${SAMP} websockets ${LWS_DBUS_LIB})
|
||||
target_link_libraries(${PROJECT_NAME} websockets ${LWS_DBUS_LIB})
|
||||
endif()
|
||||
endif()
|
||||
|
|
|
@ -43,6 +43,8 @@ enum lws_dbus_client_state {
|
|||
struct lws_dbus_ctx_wsproxy_client {
|
||||
struct lws_dbus_ctx ctx;
|
||||
|
||||
lws_sorted_usec_list_t sul;
|
||||
|
||||
enum lws_dbus_client_state state;
|
||||
};
|
||||
|
||||
|
@ -309,84 +311,57 @@ bail:
|
|||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Stub lws protocol, just so we can get synchronous timers conveniently.
|
||||
*
|
||||
* Set up a 1Hz timer and if our connection state is suitable, use that
|
||||
* to write mirror protocol drawing packets to the proxied ws connection
|
||||
*/
|
||||
|
||||
static int
|
||||
callback_just_timer(struct lws *wsi, enum lws_callback_reasons reason,
|
||||
void *user, void *in, size_t len)
|
||||
static void
|
||||
sul_timer(struct lws_sorted_usec_list *sul)
|
||||
{
|
||||
char payload[64];
|
||||
const char *ws_pkt = payload;
|
||||
DBusMessage *msg;
|
||||
|
||||
switch (reason) {
|
||||
case LWS_CALLBACK_PROTOCOL_INIT:
|
||||
case LWS_CALLBACK_USER:
|
||||
lwsl_info("%s: LWS_CALLBACK_USER\n", __func__);
|
||||
if (!dbus_ctx || dbus_ctx->state != LDCS_CONN_ONWARD)
|
||||
goto again;
|
||||
|
||||
if (!dbus_ctx || dbus_ctx->state != LDCS_CONN_ONWARD)
|
||||
goto again;
|
||||
|
||||
if (autoexit_budget > 0) {
|
||||
if (!--autoexit_budget) {
|
||||
lwsl_notice("reached autoexit budget\n");
|
||||
interrupted = 1;
|
||||
break;
|
||||
}
|
||||
if (autoexit_budget > 0) {
|
||||
if (!--autoexit_budget) {
|
||||
lwsl_notice("reached autoexit budget\n");
|
||||
interrupted = 1;
|
||||
return;
|
||||
}
|
||||
|
||||
msg = dbus_message_new_method_call(THIS_BUSNAME, THIS_OBJECT,
|
||||
THIS_INTERFACE, "Send");
|
||||
if (!msg)
|
||||
break;
|
||||
|
||||
lws_snprintf(payload, sizeof(payload), "d #%06X %d %d %d %d;",
|
||||
rand() & 0xffffff, rand() % 480, rand() % 300,
|
||||
rand() % 480, rand() % 300);
|
||||
|
||||
if (!dbus_message_append_args(msg, DBUS_TYPE_STRING, &ws_pkt,
|
||||
DBUS_TYPE_INVALID)) {
|
||||
dbus_message_unref(msg);
|
||||
break;
|
||||
}
|
||||
|
||||
if (!dbus_connection_send_with_reply(dbus_ctx->ctx.conn, msg,
|
||||
&dbus_ctx->ctx.pc,
|
||||
DBUS_TIMEOUT_USE_DEFAULT)) {
|
||||
lwsl_err("%s: unable to send\n", __func__);
|
||||
dbus_message_unref(msg);
|
||||
break;
|
||||
}
|
||||
|
||||
dbus_message_unref(msg);
|
||||
dbus_pending_call_set_notify(dbus_ctx->ctx.pc,
|
||||
pending_call_notify,
|
||||
&dbus_ctx->ctx, NULL);
|
||||
count_tx++;
|
||||
|
||||
again:
|
||||
lws_timed_callback_vh_protocol(lws_get_vhost(wsi),
|
||||
lws_get_protocol(wsi),
|
||||
LWS_CALLBACK_USER, 2);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
return 0;
|
||||
msg = dbus_message_new_method_call(THIS_BUSNAME, THIS_OBJECT,
|
||||
THIS_INTERFACE, "Send");
|
||||
if (!msg)
|
||||
goto again;
|
||||
|
||||
lws_snprintf(payload, sizeof(payload), "d #%06X %d %d %d %d;",
|
||||
rand() & 0xffffff, rand() % 480, rand() % 300,
|
||||
rand() % 480, rand() % 300);
|
||||
|
||||
if (!dbus_message_append_args(msg, DBUS_TYPE_STRING, &ws_pkt,
|
||||
DBUS_TYPE_INVALID)) {
|
||||
dbus_message_unref(msg);
|
||||
goto again;
|
||||
}
|
||||
|
||||
if (!dbus_connection_send_with_reply(dbus_ctx->ctx.conn, msg,
|
||||
&dbus_ctx->ctx.pc,
|
||||
DBUS_TIMEOUT_USE_DEFAULT)) {
|
||||
lwsl_err("%s: unable to send\n", __func__);
|
||||
dbus_message_unref(msg);
|
||||
goto again;
|
||||
}
|
||||
|
||||
dbus_message_unref(msg);
|
||||
dbus_pending_call_set_notify(dbus_ctx->ctx.pc,
|
||||
pending_call_notify,
|
||||
&dbus_ctx->ctx, NULL);
|
||||
count_tx++;
|
||||
|
||||
again:
|
||||
lws_sul_schedule(context, 0, &dbus_ctx->sul, sul_timer, 2 * LWS_US_PER_SEC);
|
||||
}
|
||||
|
||||
static struct lws_protocols protocols[] = {
|
||||
{ "_just_timer", callback_just_timer, 0, 10, 0, NULL, 0 },
|
||||
{ }
|
||||
};
|
||||
|
||||
|
||||
int main(int argc, const char **argv)
|
||||
{
|
||||
struct lws_vhost *vh;
|
||||
|
@ -413,7 +388,6 @@ int main(int argc, const char **argv)
|
|||
|
||||
memset(&info, 0, sizeof info); /* otherwise uninitialized garbage */
|
||||
info.options = LWS_SERVER_OPTION_EXPLICIT_VHOSTS;
|
||||
info.protocols = protocols;
|
||||
context = lws_create_context(&info);
|
||||
if (!context) {
|
||||
lwsl_err("lws init failed\n");
|
||||
|
@ -431,6 +405,9 @@ int main(int argc, const char **argv)
|
|||
if (!dbus_ctx)
|
||||
goto bail1;
|
||||
|
||||
lws_sul_schedule(context, 0, &dbus_ctx->sul, sul_timer, LWS_US_PER_SEC);
|
||||
|
||||
|
||||
if (remote_method_call(dbus_ctx))
|
||||
goto bail2;
|
||||
|
||||
|
|
|
@ -115,8 +115,7 @@ callback_http(struct lws *wsi, enum lws_callback_reasons reason,
|
|||
case LWS_CALLBACK_CLOSED_CLIENT_HTTP:
|
||||
interrupted = 1;
|
||||
bad = status != 200;
|
||||
lws_sul_schedule(lws_get_context(wsi), 0, &pss->sul, NULL,
|
||||
LWS_SET_TIMER_USEC_CANCEL);
|
||||
lws_sul_cancel(&pss->sul);
|
||||
lws_cancel_service(lws_get_context(wsi)); /* abort poll wait */
|
||||
break;
|
||||
|
||||
|
|
|
@ -79,8 +79,7 @@ callback_http(struct lws *wsi, enum lws_callback_reasons reason, void *user,
|
|||
case LWS_CALLBACK_CLOSED_HTTP:
|
||||
if (!pss)
|
||||
break;
|
||||
lws_sul_schedule(lws_get_context(wsi), 0, &pss->sul, sul_cb,
|
||||
LWS_SET_TIMER_USEC_CANCEL);
|
||||
lws_sul_cancel(&pss->sul);
|
||||
break;
|
||||
|
||||
case LWS_CALLBACK_HTTP_WRITEABLE:
|
||||
|
|
|
@ -153,7 +153,7 @@ callback_raw_test(struct lws *wsi, enum lws_callback_reasons reason,
|
|||
|
||||
case LWS_CALLBACK_RAW_CLOSE_FILE:
|
||||
lwsl_notice("LWS_CALLBACK_RAW_CLOSE_FILE\n");
|
||||
lws_sul_schedule(lws_get_context(wsi), 0, &vhd->sul, sul_cb, LWS_SET_TIMER_USEC_CANCEL);
|
||||
lws_sul_cancel(&vhd->sul);
|
||||
break;
|
||||
|
||||
case LWS_CALLBACK_RAW_WRITEABLE_FILE:
|
||||
|
|
|
@ -234,8 +234,7 @@ drain_end_cb(void *v)
|
|||
/*
|
||||
* Put a hold on bringing in any more data
|
||||
*/
|
||||
lws_sul_schedule(context, 0, &m->sul, NULL,
|
||||
LWS_SET_TIMER_USEC_CANCEL);
|
||||
lws_sul_cancel(&m->sul);
|
||||
#endif
|
||||
/* destroy our copy of the handle */
|
||||
m->mh = NULL;
|
||||
|
@ -391,8 +390,7 @@ ss_avs_metadata_rx(void *userobj, const uint8_t *buf, size_t len, int flags)
|
|||
/*
|
||||
* Put a hold on bringing in any more data
|
||||
*/
|
||||
lws_sul_schedule(context, 0, &m->sul, NULL,
|
||||
LWS_SET_TIMER_USEC_CANCEL);
|
||||
lws_sul_cancel(&m->sul);
|
||||
#endif
|
||||
/* destroy our copy of the handle */
|
||||
m->mh = NULL;
|
||||
|
@ -549,8 +547,7 @@ ss_avs_metadata_state(void *userobj, void *sh,
|
|||
lws_ss_request_tx(m->ss);
|
||||
break;
|
||||
case LWSSSCS_DISCONNECTED:
|
||||
lws_sul_schedule(context, 0, &m->sul, NULL,
|
||||
LWS_SET_TIMER_USEC_CANCEL);
|
||||
lws_sul_cancel(&m->sul);
|
||||
//if (m->mh) {
|
||||
play_mp3(NULL, NULL, NULL);
|
||||
m->mh = NULL;
|
||||
|
|
|
@ -223,7 +223,7 @@ ss_avs_metadata_state(void *userobj, void *sh,
|
|||
{
|
||||
|
||||
ss_avs_metadata_t *m = (ss_avs_metadata_t *)userobj;
|
||||
struct lws_context *context = (struct lws_context *)m->opaque_data;
|
||||
// struct lws_context *context = (struct lws_context *)m->opaque_data;
|
||||
|
||||
lwsl_user("%s: %s, ord 0x%x\n", __func__, lws_ss_state_name(state),
|
||||
(unsigned int)ack);
|
||||
|
@ -243,13 +243,11 @@ ss_avs_metadata_state(void *userobj, void *sh,
|
|||
/* for this demo app, we want to exit on fail to connect */
|
||||
case LWSSSCS_DISCONNECTED:
|
||||
/* for this demo app, we want to exit after complete flow */
|
||||
lws_sul_schedule(context, 0, &m->sul, use_buffer_50ms,
|
||||
LWS_SET_TIMER_USEC_CANCEL);
|
||||
lws_sul_cancel(&m->sul);
|
||||
interrupted = 1;
|
||||
break;
|
||||
case LWSSSCS_DESTROYING:
|
||||
lws_sul_schedule(context, 0, &m->sul, use_buffer_50ms,
|
||||
LWS_SET_TIMER_USEC_CANCEL);
|
||||
lws_sul_cancel(&m->sul);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
|
|
|
@ -112,8 +112,7 @@ myss_state(void *userobj, void *sh, lws_ss_constate_t state,
|
|||
lws_sul_schedule(context, 0, &m->sul, txcb, RATE_US);
|
||||
break;
|
||||
case LWSSSCS_DISCONNECTED:
|
||||
lws_sul_schedule(context, 0, &m->sul, txcb,
|
||||
LWS_SET_TIMER_USEC_CANCEL);
|
||||
lws_sul_cancel(&m->sul);
|
||||
break;
|
||||
case LWSSSCS_ALL_RETRIES_FAILED:
|
||||
/* if we're out of retries, we want to close the app and FAIL */
|
||||
|
|
|
@ -43,6 +43,8 @@ struct vhd_minimal_client_echo {
|
|||
struct lws_vhost *vhost;
|
||||
struct lws *client_wsi;
|
||||
|
||||
lws_sorted_usec_list_t sul;
|
||||
|
||||
int *interrupted;
|
||||
int *options;
|
||||
const char **url;
|
||||
|
@ -51,9 +53,11 @@ struct vhd_minimal_client_echo {
|
|||
int *port;
|
||||
};
|
||||
|
||||
static int
|
||||
connect_client(struct vhd_minimal_client_echo *vhd)
|
||||
static void
|
||||
sul_connect_attempt(struct lws_sorted_usec_list *sul)
|
||||
{
|
||||
struct vhd_minimal_client_echo *vhd =
|
||||
lws_container_of(sul, struct vhd_minimal_client_echo, sul);
|
||||
struct lws_client_connect_info i;
|
||||
char host[128];
|
||||
|
||||
|
@ -77,7 +81,9 @@ connect_client(struct vhd_minimal_client_echo *vhd)
|
|||
|
||||
lwsl_user("connecting to %s:%d/%s\n", i.address, i.port, i.path);
|
||||
|
||||
return !lws_client_connect_via_info(&i);
|
||||
if (!lws_client_connect_via_info(&i))
|
||||
lws_sul_schedule(vhd->context, 0, &vhd->sul,
|
||||
sul_connect_attempt, 10 * LWS_US_PER_SEC);
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -90,13 +96,6 @@ __minimal_destroy_message(void *_msg)
|
|||
msg->len = 0;
|
||||
}
|
||||
|
||||
static void
|
||||
schedule_callback(struct lws *wsi, int reason, int secs)
|
||||
{
|
||||
lws_timed_callback_vh_protocol(lws_get_vhost(wsi),
|
||||
lws_get_protocol(wsi), reason, secs);
|
||||
}
|
||||
|
||||
static int
|
||||
callback_minimal_client_echo(struct lws *wsi, enum lws_callback_reasons reason,
|
||||
void *user, void *in, size_t len)
|
||||
|
@ -142,8 +141,11 @@ callback_minimal_client_echo(struct lws *wsi, enum lws_callback_reasons reason,
|
|||
(const struct lws_protocol_vhost_options *)in,
|
||||
"iface")->value;
|
||||
|
||||
if (connect_client(vhd))
|
||||
schedule_callback(wsi, LWS_CALLBACK_USER, 1);
|
||||
sul_connect_attempt(&vhd->sul);
|
||||
break;
|
||||
|
||||
case LWS_CALLBACK_PROTOCOL_DESTROY:
|
||||
lws_sul_cancel(&vhd->sul);
|
||||
break;
|
||||
|
||||
case LWS_CALLBACK_CLIENT_ESTABLISHED:
|
||||
|
@ -250,32 +252,18 @@ callback_minimal_client_echo(struct lws *wsi, enum lws_callback_reasons reason,
|
|||
lwsl_err("CLIENT_CONNECTION_ERROR: %s\n",
|
||||
in ? (char *)in : "(null)");
|
||||
vhd->client_wsi = NULL;
|
||||
//schedule_callback(wsi, LWS_CALLBACK_USER, 1);
|
||||
//if (*vhd->options & 1) {
|
||||
if (!*vhd->interrupted)
|
||||
*vhd->interrupted = 3;
|
||||
lws_cancel_service(lws_get_context(wsi));
|
||||
//}
|
||||
if (!*vhd->interrupted)
|
||||
*vhd->interrupted = 3;
|
||||
lws_cancel_service(lws_get_context(wsi));
|
||||
break;
|
||||
|
||||
case LWS_CALLBACK_CLIENT_CLOSED:
|
||||
lwsl_user("LWS_CALLBACK_CLIENT_CLOSED\n");
|
||||
lws_ring_destroy(pss->ring);
|
||||
vhd->client_wsi = NULL;
|
||||
// schedule_callback(wsi, LWS_CALLBACK_USER, 1);
|
||||
//if (*vhd->options & 1) {
|
||||
if (!*vhd->interrupted)
|
||||
*vhd->interrupted = 1 + pss->completed;
|
||||
lws_cancel_service(lws_get_context(wsi));
|
||||
// }
|
||||
break;
|
||||
|
||||
/* rate-limited client connect retries */
|
||||
|
||||
case LWS_CALLBACK_USER:
|
||||
lwsl_notice("%s: LWS_CALLBACK_USER\n", __func__);
|
||||
if (connect_client(vhd))
|
||||
schedule_callback(wsi, LWS_CALLBACK_USER, 1);
|
||||
if (!*vhd->interrupted)
|
||||
*vhd->interrupted = 1 + pss->completed;
|
||||
lws_cancel_service(lws_get_context(wsi));
|
||||
break;
|
||||
|
||||
default:
|
||||
|
|
|
@ -65,6 +65,8 @@ struct vhd_minimal_pmd_bulk {
|
|||
struct lws_vhost *vhost;
|
||||
struct lws *client_wsi;
|
||||
|
||||
lws_sorted_usec_list_t sul;
|
||||
|
||||
int *interrupted;
|
||||
int *options;
|
||||
};
|
||||
|
@ -78,9 +80,11 @@ static uint64_t rng(uint64_t *r)
|
|||
return *r;
|
||||
}
|
||||
|
||||
static int
|
||||
connect_client(struct vhd_minimal_pmd_bulk *vhd)
|
||||
static void
|
||||
sul_connect_attempt(struct lws_sorted_usec_list *sul)
|
||||
{
|
||||
struct vhd_minimal_pmd_bulk *vhd =
|
||||
lws_container_of(sul, struct vhd_minimal_pmd_bulk, sul);
|
||||
struct lws_client_connect_info i;
|
||||
|
||||
memset(&i, 0, sizeof(i));
|
||||
|
@ -96,14 +100,9 @@ connect_client(struct vhd_minimal_pmd_bulk *vhd)
|
|||
i.protocol = "lws-minimal-pmd-bulk";
|
||||
i.pwsi = &vhd->client_wsi;
|
||||
|
||||
return !lws_client_connect_via_info(&i);
|
||||
}
|
||||
|
||||
static void
|
||||
schedule_callback(struct lws *wsi, int reason, int secs)
|
||||
{
|
||||
lws_timed_callback_vh_protocol(lws_get_vhost(wsi),
|
||||
lws_get_protocol(wsi), reason, secs);
|
||||
if (!lws_client_connect_via_info(&i))
|
||||
lws_sul_schedule(vhd->context, 0, &vhd->sul,
|
||||
sul_connect_attempt, 10 * LWS_US_PER_SEC);
|
||||
}
|
||||
|
||||
static int
|
||||
|
@ -138,8 +137,11 @@ callback_minimal_pmd_bulk(struct lws *wsi, enum lws_callback_reasons reason,
|
|||
(const struct lws_protocol_vhost_options *)in,
|
||||
"options")->value;
|
||||
|
||||
if (connect_client(vhd))
|
||||
schedule_callback(wsi, LWS_CALLBACK_USER, 1);
|
||||
sul_connect_attempt(&vhd->sul);
|
||||
break;
|
||||
|
||||
case LWS_CALLBACK_PROTOCOL_DESTROY:
|
||||
lws_sul_cancel(&vhd->sul);
|
||||
break;
|
||||
|
||||
case LWS_CALLBACK_CLIENT_ESTABLISHED:
|
||||
|
@ -253,20 +255,14 @@ callback_minimal_pmd_bulk(struct lws *wsi, enum lws_callback_reasons reason,
|
|||
lwsl_err("CLIENT_CONNECTION_ERROR: %s\n",
|
||||
in ? (char *)in : "(null)");
|
||||
vhd->client_wsi = NULL;
|
||||
schedule_callback(wsi, LWS_CALLBACK_USER, 1);
|
||||
lws_sul_schedule(vhd->context, 0, &vhd->sul,
|
||||
sul_connect_attempt, LWS_US_PER_SEC);
|
||||
break;
|
||||
|
||||
case LWS_CALLBACK_CLIENT_CLOSED:
|
||||
vhd->client_wsi = NULL;
|
||||
schedule_callback(wsi, LWS_CALLBACK_USER, 1);
|
||||
break;
|
||||
|
||||
/* rate-limited client connect retries */
|
||||
|
||||
case LWS_CALLBACK_USER:
|
||||
lwsl_notice("%s: LWS_CALLBACK_USER\n", __func__);
|
||||
if (connect_client(vhd))
|
||||
schedule_callback(wsi, LWS_CALLBACK_USER, 1);
|
||||
lws_sul_schedule(vhd->context, 0, &vhd->sul,
|
||||
sul_connect_attempt, LWS_US_PER_SEC);
|
||||
break;
|
||||
|
||||
default:
|
||||
|
|
|
@ -44,6 +44,8 @@ struct per_vhost_data__minimal {
|
|||
const struct lws_protocols *protocol;
|
||||
pthread_t pthread_spam[2];
|
||||
|
||||
lws_sorted_usec_list_t sul;
|
||||
|
||||
pthread_mutex_t lock_ring; /* serialize access to the ring buffer */
|
||||
struct lws_ring *ring; /* ringbuffer holding unsent messages */
|
||||
uint32_t tail;
|
||||
|
@ -130,9 +132,12 @@ wait:
|
|||
return NULL;
|
||||
}
|
||||
|
||||
static int
|
||||
connect_client(struct per_vhost_data__minimal *vhd)
|
||||
static void
|
||||
sul_connect_attempt(struct lws_sorted_usec_list *sul)
|
||||
{
|
||||
struct per_vhost_data__minimal *vhd =
|
||||
lws_container_of(sul, struct per_vhost_data__minimal, sul);
|
||||
|
||||
vhd->i.context = vhd->context;
|
||||
vhd->i.port = 7681;
|
||||
vhd->i.address = "localhost";
|
||||
|
@ -144,7 +149,9 @@ connect_client(struct per_vhost_data__minimal *vhd)
|
|||
vhd->i.protocol = "lws-minimal-broker";
|
||||
vhd->i.pwsi = &vhd->client_wsi;
|
||||
|
||||
return !lws_client_connect_via_info(&vhd->i);
|
||||
if (!lws_client_connect_via_info(&vhd->i))
|
||||
lws_sul_schedule(vhd->context, 0, &vhd->sul,
|
||||
sul_connect_attempt, 10 * LWS_US_PER_SEC);
|
||||
}
|
||||
|
||||
static int
|
||||
|
@ -188,9 +195,7 @@ callback_minimal_broker(struct lws *wsi, enum lws_callback_reasons reason,
|
|||
goto init_fail;
|
||||
}
|
||||
|
||||
if (connect_client(vhd))
|
||||
lws_timed_callback_vh_protocol(vhd->vhost,
|
||||
vhd->protocol, LWS_CALLBACK_USER, 1);
|
||||
sul_connect_attempt(&vhd->sul);
|
||||
break;
|
||||
|
||||
case LWS_CALLBACK_PROTOCOL_DESTROY:
|
||||
|
@ -202,6 +207,7 @@ init_fail:
|
|||
if (vhd->ring)
|
||||
lws_ring_destroy(vhd->ring);
|
||||
|
||||
lws_sul_cancel(&vhd->sul);
|
||||
pthread_mutex_destroy(&vhd->lock_ring);
|
||||
|
||||
return r;
|
||||
|
@ -210,8 +216,8 @@ init_fail:
|
|||
lwsl_err("CLIENT_CONNECTION_ERROR: %s\n",
|
||||
in ? (char *)in : "(null)");
|
||||
vhd->client_wsi = NULL;
|
||||
lws_timed_callback_vh_protocol(vhd->vhost,
|
||||
vhd->protocol, LWS_CALLBACK_USER, 1);
|
||||
lws_sul_schedule(vhd->context, 0, &vhd->sul,
|
||||
sul_connect_attempt, LWS_US_PER_SEC);
|
||||
break;
|
||||
|
||||
/* --- client callbacks --- */
|
||||
|
@ -250,8 +256,8 @@ skip:
|
|||
case LWS_CALLBACK_CLIENT_CLOSED:
|
||||
vhd->client_wsi = NULL;
|
||||
vhd->established = 0;
|
||||
lws_timed_callback_vh_protocol(vhd->vhost, vhd->protocol,
|
||||
LWS_CALLBACK_USER, 1);
|
||||
lws_sul_schedule(vhd->context, 0, &vhd->sul,
|
||||
sul_connect_attempt, LWS_US_PER_SEC);
|
||||
break;
|
||||
|
||||
case LWS_CALLBACK_EVENT_WAIT_CANCELLED:
|
||||
|
@ -267,16 +273,6 @@ skip:
|
|||
lws_callback_on_writable(vhd->client_wsi);
|
||||
break;
|
||||
|
||||
/* rate-limited client connect retries */
|
||||
|
||||
case LWS_CALLBACK_USER:
|
||||
lwsl_notice("%s: LWS_CALLBACK_USER\n", __func__);
|
||||
if (connect_client(vhd))
|
||||
lws_timed_callback_vh_protocol(vhd->vhost,
|
||||
vhd->protocol,
|
||||
LWS_CALLBACK_USER, 1);
|
||||
break;
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -34,6 +34,8 @@
|
|||
|
||||
struct per_vhost_data__minimal {
|
||||
struct lws_threadpool *tp;
|
||||
struct lws_context *context;
|
||||
lws_sorted_usec_list_t sul;
|
||||
const char *config;
|
||||
};
|
||||
|
||||
|
@ -133,6 +135,22 @@ task_function(void *user, enum lws_threadpool_task_status s)
|
|||
return LWS_TP_RETURN_CHECKING_IN;
|
||||
}
|
||||
|
||||
|
||||
static void
|
||||
sul_tp_dump(struct lws_sorted_usec_list *sul)
|
||||
{
|
||||
struct per_vhost_data__minimal *vhd =
|
||||
lws_container_of(sul, struct per_vhost_data__minimal, sul);
|
||||
/*
|
||||
* in debug mode, dump the threadpool stat to the logs once
|
||||
* a second
|
||||
*/
|
||||
lws_threadpool_dump(vhd->tp);
|
||||
lws_sul_schedule(vhd->context, 0, &vhd->sul,
|
||||
sul_tp_dump, LWS_US_PER_SEC);
|
||||
}
|
||||
|
||||
|
||||
static int
|
||||
callback_minimal(struct lws *wsi, enum lws_callback_reasons reason,
|
||||
void *user, void *in, size_t len)
|
||||
|
@ -159,6 +177,8 @@ callback_minimal(struct lws *wsi, enum lws_callback_reasons reason,
|
|||
if (!vhd)
|
||||
return 1;
|
||||
|
||||
vhd->context = lws_get_context(wsi);
|
||||
|
||||
/* recover the pointer to the globals struct */
|
||||
pvo = lws_pvo_search(
|
||||
(const struct lws_protocol_vhost_options *)in,
|
||||
|
@ -179,27 +199,14 @@ callback_minimal(struct lws *wsi, enum lws_callback_reasons reason,
|
|||
if (!vhd->tp)
|
||||
return 1;
|
||||
|
||||
lws_timed_callback_vh_protocol(lws_get_vhost(wsi),
|
||||
lws_get_protocol(wsi),
|
||||
LWS_CALLBACK_USER, 1);
|
||||
|
||||
lws_sul_schedule(vhd->context, 0, &vhd->sul,
|
||||
sul_tp_dump, LWS_US_PER_SEC);
|
||||
break;
|
||||
|
||||
case LWS_CALLBACK_PROTOCOL_DESTROY:
|
||||
lws_threadpool_finish(vhd->tp);
|
||||
lws_threadpool_destroy(vhd->tp);
|
||||
break;
|
||||
|
||||
case LWS_CALLBACK_USER:
|
||||
|
||||
/*
|
||||
* in debug mode, dump the threadpool stat to the logs once
|
||||
* a second
|
||||
*/
|
||||
lws_threadpool_dump(vhd->tp);
|
||||
lws_timed_callback_vh_protocol(lws_get_vhost(wsi),
|
||||
lws_get_protocol(wsi),
|
||||
LWS_CALLBACK_USER, 1);
|
||||
lws_sul_cancel(&vhd->sul);
|
||||
break;
|
||||
|
||||
case LWS_CALLBACK_ESTABLISHED:
|
||||
|
|
|
@ -47,6 +47,7 @@ struct vhd {
|
|||
struct lws_context *context;
|
||||
struct lws_vhost *vhost;
|
||||
const struct lws_protocols *protocol;
|
||||
lws_sorted_usec_list_t sul;
|
||||
int hide_vhosts;
|
||||
int tow_flag;
|
||||
int period_s;
|
||||
|
@ -58,8 +59,9 @@ struct vhd {
|
|||
static const struct lws_protocols protocols[1];
|
||||
|
||||
static void
|
||||
update(struct vhd *v)
|
||||
update(struct lws_sorted_usec_list *sul)
|
||||
{
|
||||
struct vhd *v = lws_container_of(sul, struct vhd, sul);
|
||||
struct lws_ss_filepath *fp;
|
||||
char contents[256], pure[256], *p = v->d.buf + LWS_PRE,
|
||||
*end = v->d.buf + sizeof(v->d.buf) - LWS_PRE - 1;
|
||||
|
@ -97,6 +99,8 @@ update(struct vhd *v)
|
|||
v->d.length = p - (v->d.buf + LWS_PRE);
|
||||
|
||||
lws_callback_on_writable_all_protocol(v->context, &protocols[0]);
|
||||
|
||||
lws_sul_schedule(v->context, 0, &v->sul, update, v->period_s * LWS_US_PER_SEC);
|
||||
}
|
||||
|
||||
static int
|
||||
|
@ -116,12 +120,9 @@ callback_lws_server_status(struct lws *wsi, enum lws_callback_reasons reason,
|
|||
case LWS_CALLBACK_ESTABLISHED:
|
||||
lwsl_info("%s: LWS_CALLBACK_ESTABLISHED\n", __func__);
|
||||
if (!v->clients++) {
|
||||
lws_timed_callback_vh_protocol(v->vhost, v->protocol,
|
||||
LWS_CALLBACK_USER, v->period_s);
|
||||
lws_sul_schedule(lws_get_context(wsi), 0, &v->sul, update, 1);
|
||||
lwsl_info("%s: starting updates\n", __func__);
|
||||
}
|
||||
update(v);
|
||||
|
||||
break;
|
||||
|
||||
case LWS_CALLBACK_CLOSED:
|
||||
|
@ -130,13 +131,6 @@ callback_lws_server_status(struct lws *wsi, enum lws_callback_reasons reason,
|
|||
|
||||
break;
|
||||
|
||||
case LWS_CALLBACK_USER:
|
||||
update(v);
|
||||
if (v->clients)
|
||||
lws_timed_callback_vh_protocol(v->vhost, v->protocol,
|
||||
LWS_CALLBACK_USER, v->period_s);
|
||||
break;
|
||||
|
||||
case LWS_CALLBACK_PROTOCOL_INIT: /* per vhost */
|
||||
if (v)
|
||||
break;
|
||||
|
@ -171,8 +165,7 @@ callback_lws_server_status(struct lws *wsi, enum lws_callback_reasons reason,
|
|||
v->vhost = lws_get_vhost(wsi);
|
||||
v->protocol = lws_get_protocol(wsi);
|
||||
|
||||
/* get the initial data */
|
||||
update(v);
|
||||
lws_sul_schedule(lws_get_context(wsi), 0, &v->sul, update, 1);
|
||||
break;
|
||||
|
||||
case LWS_CALLBACK_PROTOCOL_DESTROY: /* per vhost */
|
||||
|
|
Loading…
Add table
Reference in a new issue