mirror of
https://github.com/warmcat/libwebsockets.git
synced 2025-03-09 00:00:04 +01:00
smp: fixes
This commit is contained in:
parent
2203a5f019
commit
d39ecd814a
14 changed files with 205 additions and 84 deletions
|
@ -1495,7 +1495,7 @@ lws_h2_parser(struct lws *wsi, unsigned char *in, lws_filepos_t inlen,
|
|||
* the frame boundary, in the case there is already
|
||||
* more waiting leave it for next time around
|
||||
*/
|
||||
n = inlen + 1;
|
||||
n = (int)inlen + 1;
|
||||
if (n > (int)(h2n->length - h2n->count + 1)) {
|
||||
n = h2n->length - h2n->count + 1;
|
||||
lwsl_debug("---- restricting len to %d vs %ld\n", n, (long)inlen + 1);
|
||||
|
|
|
@ -90,7 +90,7 @@ lws_free_wsi(struct lws *wsi)
|
|||
if (wsi->vhost && wsi->vhost->lserv_wsi == wsi)
|
||||
wsi->vhost->lserv_wsi = NULL;
|
||||
|
||||
lws_pt_lock(pt);
|
||||
lws_pt_lock(pt, __func__);
|
||||
ah = pt->ah_list;
|
||||
while (ah) {
|
||||
if (ah->in_use && ah->wsi == wsi) {
|
||||
|
@ -117,12 +117,14 @@ lws_free_wsi(struct lws *wsi)
|
|||
}
|
||||
#endif
|
||||
|
||||
lws_pt_unlock(pt);
|
||||
|
||||
/* since we will destroy the wsi, make absolutely sure now */
|
||||
|
||||
lws_ssl_remove_wsi_from_buffered_list(wsi);
|
||||
lws_remove_from_timeout_list(wsi);
|
||||
#if defined(LWS_WITH_OPENSSL)
|
||||
__lws_ssl_remove_wsi_from_buffered_list(wsi);
|
||||
#endif
|
||||
__lws_remove_from_timeout_list(wsi);
|
||||
|
||||
lws_pt_unlock(pt);
|
||||
|
||||
lws_libevent_destroy(wsi);
|
||||
|
||||
|
@ -140,14 +142,11 @@ lws_should_be_on_timeout_list(struct lws *wsi)
|
|||
}
|
||||
|
||||
void
|
||||
lws_remove_from_timeout_list(struct lws *wsi)
|
||||
__lws_remove_from_timeout_list(struct lws *wsi)
|
||||
{
|
||||
struct lws_context_per_thread *pt = &wsi->context->pt[(int)wsi->tsi];
|
||||
|
||||
if (!wsi->timeout_list_prev) /* ie, not part of the list */
|
||||
return;
|
||||
|
||||
lws_pt_lock(pt);
|
||||
/* if we have a next guy, set his prev to our prev */
|
||||
if (wsi->timeout_list)
|
||||
wsi->timeout_list->timeout_list_prev = wsi->timeout_list_prev;
|
||||
|
@ -157,12 +156,22 @@ lws_remove_from_timeout_list(struct lws *wsi)
|
|||
/* we're out of the list, we should not point anywhere any more */
|
||||
wsi->timeout_list_prev = NULL;
|
||||
wsi->timeout_list = NULL;
|
||||
}
|
||||
|
||||
void
|
||||
lws_remove_from_timeout_list(struct lws *wsi)
|
||||
{
|
||||
struct lws_context_per_thread *pt = &wsi->context->pt[(int)wsi->tsi];
|
||||
|
||||
lws_pt_lock(pt, __func__);
|
||||
|
||||
__lws_remove_from_timeout_list(wsi);
|
||||
|
||||
lws_pt_unlock(pt);
|
||||
}
|
||||
|
||||
static void
|
||||
lws_add_to_timeout_list(struct lws *wsi)
|
||||
__lws_add_to_timeout_list(struct lws *wsi)
|
||||
{
|
||||
struct lws_context_per_thread *pt = &wsi->context->pt[(int)wsi->tsi];
|
||||
|
||||
|
@ -183,6 +192,7 @@ lws_add_to_timeout_list(struct lws *wsi)
|
|||
LWS_VISIBLE void
|
||||
lws_set_timer(struct lws *wsi, int secs)
|
||||
{
|
||||
struct lws_context_per_thread *pt = &wsi->context->pt[(int)wsi->tsi];
|
||||
time_t now;
|
||||
|
||||
if (secs < 0) {
|
||||
|
@ -201,16 +211,37 @@ lws_set_timer(struct lws *wsi, int secs)
|
|||
|
||||
if (!wsi->timer_active) {
|
||||
wsi->timer_active = 1;
|
||||
if (!wsi->pending_timeout)
|
||||
lws_add_to_timeout_list(wsi);
|
||||
if (!wsi->pending_timeout) {
|
||||
lws_pt_lock(pt, __func__);
|
||||
__lws_add_to_timeout_list(wsi);
|
||||
lws_pt_unlock(pt);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
__lws_set_timeout(struct lws *wsi, enum pending_timeout reason, int secs)
|
||||
{
|
||||
time_t now;
|
||||
|
||||
time(&now);
|
||||
|
||||
if (reason)
|
||||
__lws_add_to_timeout_list(wsi);
|
||||
|
||||
lwsl_debug("%s: %p: %d secs\n", __func__, wsi, secs);
|
||||
wsi->pending_timeout_limit = secs;
|
||||
wsi->pending_timeout_set = now;
|
||||
wsi->pending_timeout = reason;
|
||||
|
||||
if (!reason && !lws_should_be_on_timeout_list(wsi))
|
||||
__lws_remove_from_timeout_list(wsi);
|
||||
}
|
||||
|
||||
LWS_VISIBLE void
|
||||
lws_set_timeout(struct lws *wsi, enum pending_timeout reason, int secs)
|
||||
{
|
||||
struct lws_context_per_thread *pt = &wsi->context->pt[(int)wsi->tsi];
|
||||
time_t now;
|
||||
|
||||
if (secs == LWS_TO_KILL_SYNC) {
|
||||
lws_remove_from_timeout_list(wsi);
|
||||
|
@ -219,22 +250,11 @@ lws_set_timeout(struct lws *wsi, enum pending_timeout reason, int secs)
|
|||
return;
|
||||
}
|
||||
|
||||
lws_pt_lock(pt);
|
||||
lws_pt_lock(pt, __func__);
|
||||
|
||||
time(&now);
|
||||
|
||||
if (reason)
|
||||
lws_add_to_timeout_list(wsi);
|
||||
|
||||
lwsl_debug("%s: %p: %d secs\n", __func__, wsi, secs);
|
||||
wsi->pending_timeout_limit = secs;
|
||||
wsi->pending_timeout_set = now;
|
||||
wsi->pending_timeout = reason;
|
||||
__lws_set_timeout(wsi, reason, secs);
|
||||
|
||||
lws_pt_unlock(pt);
|
||||
|
||||
if (!reason && !lws_should_be_on_timeout_list(wsi))
|
||||
lws_remove_from_timeout_list(wsi);
|
||||
}
|
||||
|
||||
int
|
||||
|
@ -1451,6 +1471,7 @@ lws_latency(struct lws_context *context, struct lws *wsi, const char *action,
|
|||
LWS_VISIBLE int
|
||||
lws_rx_flow_control(struct lws *wsi, int _enable)
|
||||
{
|
||||
struct lws_context_per_thread *pt = &wsi->context->pt[(int)wsi->tsi];
|
||||
int en = _enable;
|
||||
|
||||
lwsl_info("%s: %p 0x%x\n", __func__, wsi, _enable);
|
||||
|
@ -1465,6 +1486,8 @@ lws_rx_flow_control(struct lws *wsi, int _enable)
|
|||
en |= LWS_RXFLOW_REASON_APPLIES_ENABLE_BIT;
|
||||
}
|
||||
|
||||
lws_pt_lock(pt, __func__);
|
||||
|
||||
/* any bit set in rxflow_bitmap DISABLEs rxflow control */
|
||||
if (en & LWS_RXFLOW_REASON_APPLIES_ENABLE_BIT)
|
||||
wsi->rxflow_bitmap &= ~(en & 0xff);
|
||||
|
@ -1473,7 +1496,7 @@ lws_rx_flow_control(struct lws *wsi, int _enable)
|
|||
|
||||
if ((LWS_RXFLOW_PENDING_CHANGE | (!wsi->rxflow_bitmap)) ==
|
||||
wsi->rxflow_change_to)
|
||||
return 0;
|
||||
goto skip;
|
||||
|
||||
wsi->rxflow_change_to = LWS_RXFLOW_PENDING_CHANGE | !wsi->rxflow_bitmap;
|
||||
|
||||
|
@ -1481,8 +1504,15 @@ lws_rx_flow_control(struct lws *wsi, int _enable)
|
|||
wsi->rxflow_bitmap, en, wsi->rxflow_change_to);
|
||||
|
||||
if (_enable & LWS_RXFLOW_REASON_FLAG_PROCESS_NOW ||
|
||||
!wsi->rxflow_will_be_applied)
|
||||
return _lws_rx_flow_control(wsi);
|
||||
!wsi->rxflow_will_be_applied) {
|
||||
en = __lws_rx_flow_control(wsi);
|
||||
lws_pt_unlock(pt);
|
||||
|
||||
return en;
|
||||
}
|
||||
|
||||
skip:
|
||||
lws_pt_unlock(pt);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1575,7 +1605,7 @@ int user_callback_handle_rxflow(lws_callback_function callback_function,
|
|||
n = callback_function(wsi, reason, user, in, len);
|
||||
wsi->rxflow_will_be_applied = 0;
|
||||
if (!n)
|
||||
n = _lws_rx_flow_control(wsi);
|
||||
n = __lws_rx_flow_control(wsi);
|
||||
|
||||
return n;
|
||||
}
|
||||
|
@ -2109,14 +2139,14 @@ lws_close_reason(struct lws *wsi, enum lws_close_status status,
|
|||
}
|
||||
|
||||
LWS_EXTERN int
|
||||
_lws_rx_flow_control(struct lws *wsi)
|
||||
__lws_rx_flow_control(struct lws *wsi)
|
||||
{
|
||||
struct lws *wsic = wsi->child_list;
|
||||
|
||||
/* if he has children, do those if they were changed */
|
||||
while (wsic) {
|
||||
if (wsic->rxflow_change_to & LWS_RXFLOW_PENDING_CHANGE)
|
||||
_lws_rx_flow_control(wsic);
|
||||
__lws_rx_flow_control(wsic);
|
||||
|
||||
wsic = wsic->sibling_list;
|
||||
}
|
||||
|
@ -2142,12 +2172,12 @@ _lws_rx_flow_control(struct lws *wsi)
|
|||
/* adjust the pollfd for this wsi */
|
||||
|
||||
if (wsi->rxflow_change_to & LWS_RXFLOW_ALLOW) {
|
||||
if (lws_change_pollfd(wsi, 0, LWS_POLLIN)) {
|
||||
if (__lws_change_pollfd(wsi, 0, LWS_POLLIN)) {
|
||||
lwsl_info("%s: fail\n", __func__);
|
||||
return -1;
|
||||
}
|
||||
} else
|
||||
if (lws_change_pollfd(wsi, LWS_POLLIN, 0))
|
||||
if (__lws_change_pollfd(wsi, LWS_POLLIN, 0))
|
||||
return -1;
|
||||
|
||||
return 0;
|
||||
|
@ -3123,7 +3153,7 @@ lws_stats_log_dump(struct lws_context *context)
|
|||
|
||||
lwsl_notice("PT %d\n", n + 1);
|
||||
|
||||
lws_pt_lock(pt);
|
||||
lws_pt_lock(pt, __func__);
|
||||
|
||||
lwsl_notice(" AH in use / max: %d / %d\n",
|
||||
pt->ah_count_in_use,
|
||||
|
@ -3185,23 +3215,23 @@ void
|
|||
lws_stats_atomic_bump(struct lws_context * context,
|
||||
struct lws_context_per_thread *pt, int index, uint64_t bump)
|
||||
{
|
||||
lws_pt_lock(pt);
|
||||
lws_pt_stats_lock(pt);
|
||||
context->lws_stats[index] += bump;
|
||||
if (index != LWSSTATS_C_SERVICE_ENTRY)
|
||||
context->updated = 1;
|
||||
lws_pt_unlock(pt);
|
||||
lws_pt_stats_unlock(pt);
|
||||
}
|
||||
|
||||
void
|
||||
lws_stats_atomic_max(struct lws_context * context,
|
||||
struct lws_context_per_thread *pt, int index, uint64_t val)
|
||||
{
|
||||
lws_pt_lock(pt);
|
||||
lws_pt_stats_lock(pt);
|
||||
if (val > context->lws_stats[index]) {
|
||||
context->lws_stats[index] = val;
|
||||
context->updated = 1;
|
||||
}
|
||||
lws_pt_unlock(pt);
|
||||
lws_pt_stats_unlock(pt);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
|
@ -224,7 +224,7 @@ typedef unsigned long long lws_intptr_t;
|
|||
|
||||
#if LWS_MAX_SMP > 1
|
||||
|
||||
#define lws_pthread_mutex(name) pthread_mutex_t name
|
||||
#define lws_pthread_mutex(name) pthread_mutex_t name;
|
||||
|
||||
static LWS_INLINE void
|
||||
lws_pthread_mutex_init(pthread_mutex_t *lock)
|
||||
|
|
29
lib/pollfd.c
29
lib/pollfd.c
|
@ -253,7 +253,7 @@ insert_wsi_socket_into_fds(struct lws_context *context, struct lws *wsi)
|
|||
wsi->user_space, (void *) &pa, 1))
|
||||
return -1;
|
||||
|
||||
lws_pt_lock(pt);
|
||||
lws_pt_lock(pt, __func__);
|
||||
pt->count_conns++;
|
||||
insert_wsi(context, wsi);
|
||||
wsi->position_in_fds_table = pt->fds_count;
|
||||
|
@ -326,7 +326,7 @@ remove_wsi_socket_from_fds(struct lws *wsi)
|
|||
lws_libuv_io(wsi, LWS_EV_STOP | LWS_EV_READ | LWS_EV_WRITE |
|
||||
LWS_EV_PREPARE_DELETION);
|
||||
|
||||
lws_pt_lock(pt);
|
||||
lws_pt_lock(pt, __func__);
|
||||
|
||||
lwsl_debug("%s: wsi=%p, sock=%d, fds pos=%d, end guy pos=%d, endfd=%d\n",
|
||||
__func__, wsi, wsi->desc.sockfd, wsi->position_in_fds_table,
|
||||
|
@ -374,9 +374,8 @@ remove_wsi_socket_from_fds(struct lws *wsi)
|
|||
}
|
||||
|
||||
int
|
||||
lws_change_pollfd(struct lws *wsi, int _and, int _or)
|
||||
__lws_change_pollfd(struct lws *wsi, int _and, int _or)
|
||||
{
|
||||
struct lws_context_per_thread *pt;
|
||||
struct lws_context *context;
|
||||
struct lws_pollargs pa;
|
||||
int ret = 0;
|
||||
|
@ -394,11 +393,7 @@ lws_change_pollfd(struct lws *wsi, int _and, int _or)
|
|||
wsi->user_space, (void *) &pa, 0))
|
||||
return -1;
|
||||
|
||||
pt = &context->pt[(int)wsi->tsi];
|
||||
|
||||
lws_pt_lock(pt);
|
||||
ret = _lws_change_pollfd(wsi, _and, _or, &pa);
|
||||
lws_pt_unlock(pt);
|
||||
if (wsi->vhost &&
|
||||
wsi->vhost->protocols[0].callback(wsi, LWS_CALLBACK_UNLOCK_POLL,
|
||||
wsi->user_space, (void *) &pa, 0))
|
||||
|
@ -407,6 +402,21 @@ lws_change_pollfd(struct lws *wsi, int _and, int _or)
|
|||
return ret;
|
||||
}
|
||||
|
||||
int
|
||||
lws_change_pollfd(struct lws *wsi, int _and, int _or)
|
||||
{
|
||||
struct lws_context_per_thread *pt;
|
||||
int ret = 0;
|
||||
|
||||
pt = &wsi->context->pt[(int)wsi->tsi];
|
||||
|
||||
lws_pt_lock(pt, __func__);
|
||||
ret = __lws_change_pollfd(wsi, _and, _or);
|
||||
lws_pt_unlock(pt);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
LWS_VISIBLE int
|
||||
lws_callback_on_writable(struct lws *wsi)
|
||||
{
|
||||
|
@ -509,12 +519,13 @@ network_sock:
|
|||
return -1;
|
||||
}
|
||||
|
||||
if (lws_change_pollfd(wsi, 0, LWS_POLLOUT))
|
||||
if (__lws_change_pollfd(wsi, 0, LWS_POLLOUT))
|
||||
return -1;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* stitch protocol choice into the vh protocol linked list
|
||||
* We always insert ourselves at the start of the list
|
||||
|
|
|
@ -811,6 +811,7 @@ struct allocated_headers {
|
|||
struct lws_context_per_thread {
|
||||
#if LWS_MAX_SMP > 1
|
||||
pthread_mutex_t lock;
|
||||
pthread_mutex_t lock_stats;
|
||||
#endif
|
||||
struct lws_pollfd *fds;
|
||||
volatile struct lws_foreign_thread_pollfd * volatile foreign_pfd_list;
|
||||
|
@ -826,6 +827,9 @@ struct lws_context_per_thread {
|
|||
void *http_header_data;
|
||||
struct allocated_headers *ah_list;
|
||||
struct lws *ah_wait_list;
|
||||
#if defined(LWS_HAVE_PTHREAD_H)
|
||||
const char *last_lock_reason;
|
||||
#endif
|
||||
int ah_wait_list_length;
|
||||
#ifdef LWS_OPENSSL_SUPPORT
|
||||
struct lws *pending_read_list; /* linked list */
|
||||
|
@ -868,7 +872,9 @@ struct lws_context_per_thread {
|
|||
|
||||
short ah_count_in_use;
|
||||
unsigned char tid;
|
||||
unsigned char lock_depth;
|
||||
#if LWS_MAX_SMP > 1
|
||||
pthread_t lock_owner;
|
||||
#endif
|
||||
};
|
||||
|
||||
struct lws_conn_stats {
|
||||
|
@ -2254,6 +2260,8 @@ LWS_EXTERN void
|
|||
lws_header_table_reset(struct lws *wsi, int autoservice);
|
||||
void
|
||||
_lws_header_table_reset(struct allocated_headers *ah);
|
||||
void
|
||||
__lws_header_table_reset(struct lws *wsi, int autoservice);
|
||||
|
||||
void
|
||||
lws_header_table_force_to_detachable_state(struct lws *wsi);
|
||||
|
@ -2444,43 +2452,65 @@ lws_context_init_http2_ssl(struct lws_vhost *vhost);
|
|||
#endif
|
||||
|
||||
#if LWS_MAX_SMP > 1
|
||||
|
||||
static LWS_INLINE void
|
||||
lws_pt_mutex_init(struct lws_context_per_thread *pt)
|
||||
{
|
||||
pthread_mutex_init(&pt->lock, NULL);
|
||||
pthread_mutex_init(&pt->lock_stats, NULL);
|
||||
}
|
||||
|
||||
static LWS_INLINE void
|
||||
lws_pt_mutex_destroy(struct lws_context_per_thread *pt)
|
||||
{
|
||||
pthread_mutex_destroy(&pt->lock_stats);
|
||||
pthread_mutex_destroy(&pt->lock);
|
||||
}
|
||||
|
||||
static LWS_INLINE void
|
||||
lws_pt_lock(struct lws_context_per_thread *pt)
|
||||
lws_pt_lock(struct lws_context_per_thread *pt, const char *reason)
|
||||
{
|
||||
if (!pt->lock_depth++)
|
||||
pthread_mutex_lock(&pt->lock);
|
||||
if (pt->lock_owner == pthread_self()) {
|
||||
lwsl_err("tid %d: lock collision: already held for %s, reacquiring for %s\n", pt->tid, pt->last_lock_reason, reason);
|
||||
assert(0);
|
||||
}
|
||||
pthread_mutex_lock(&pt->lock);
|
||||
pt->last_lock_reason = reason;
|
||||
pt->lock_owner = pthread_self();
|
||||
//lwsl_notice("tid %d: lock %s\n", pt->tid, reason);
|
||||
}
|
||||
|
||||
static LWS_INLINE void
|
||||
lws_pt_unlock(struct lws_context_per_thread *pt)
|
||||
{
|
||||
if (!(--pt->lock_depth))
|
||||
pthread_mutex_unlock(&pt->lock);
|
||||
pt->last_lock_reason ="free";
|
||||
pt->lock_owner = 0;
|
||||
//lwsl_notice("tid %d: unlock %s\n", pt->tid, pt->last_lock_reason);
|
||||
pthread_mutex_unlock(&pt->lock);
|
||||
}
|
||||
|
||||
static LWS_INLINE void
|
||||
lws_pt_stats_lock(struct lws_context_per_thread *pt)
|
||||
{
|
||||
pthread_mutex_lock(&pt->lock_stats);
|
||||
}
|
||||
|
||||
static LWS_INLINE void
|
||||
lws_pt_stats_unlock(struct lws_context_per_thread *pt)
|
||||
{
|
||||
pthread_mutex_unlock(&pt->lock_stats);
|
||||
}
|
||||
|
||||
static LWS_INLINE void
|
||||
lws_context_lock(struct lws_context *context)
|
||||
{
|
||||
if (!context->lock_depth++)
|
||||
pthread_mutex_lock(&context->lock);
|
||||
pthread_mutex_lock(&context->lock);
|
||||
}
|
||||
|
||||
static LWS_INLINE void
|
||||
lws_context_unlock(struct lws_context *context)
|
||||
{
|
||||
if (!(--context->lock_depth))
|
||||
pthread_mutex_unlock(&context->lock);
|
||||
pthread_mutex_unlock(&context->lock);
|
||||
}
|
||||
|
||||
static LWS_INLINE void
|
||||
|
@ -2499,12 +2529,14 @@ lws_vhost_unlock(struct lws_vhost *vhost)
|
|||
#else
|
||||
#define lws_pt_mutex_init(_a) (void)(_a)
|
||||
#define lws_pt_mutex_destroy(_a) (void)(_a)
|
||||
#define lws_pt_lock(_a) (void)(_a)
|
||||
#define lws_pt_lock(_a, b) (void)(_a)
|
||||
#define lws_pt_unlock(_a) (void)(_a)
|
||||
#define lws_context_lock(_a) (void)(_a)
|
||||
#define lws_context_unlock(_a) (void)(_a)
|
||||
#define lws_vhost_lock(_a) (void)(_a)
|
||||
#define lws_vhost_unlock(_a) (void)(_a)
|
||||
#define lws_pt_stats_lock(_a) (void)(_a)
|
||||
#define lws_pt_stats_unlock(_a) (void)(_a)
|
||||
#endif
|
||||
|
||||
LWS_EXTERN int LWS_WARN_UNUSED_RESULT
|
||||
|
@ -2573,7 +2605,7 @@ lws_decode_ssl_error(void);
|
|||
#endif
|
||||
|
||||
LWS_EXTERN int
|
||||
_lws_rx_flow_control(struct lws *wsi);
|
||||
__lws_rx_flow_control(struct lws *wsi);
|
||||
|
||||
LWS_EXTERN int
|
||||
_lws_change_pollfd(struct lws *wsi, int _and, int _or, struct lws_pollargs *pa);
|
||||
|
@ -2736,6 +2768,16 @@ lws_peer_add_wsi(struct lws_context *context, struct lws_peer *peer,
|
|||
struct lws *wsi);
|
||||
#endif
|
||||
|
||||
|
||||
void
|
||||
__lws_remove_from_timeout_list(struct lws *wsi);
|
||||
void
|
||||
__lws_ssl_remove_wsi_from_buffered_list(struct lws *wsi);
|
||||
void
|
||||
__lws_set_timeout(struct lws *wsi, enum pending_timeout reason, int secs);
|
||||
int
|
||||
__lws_change_pollfd(struct lws *wsi, int _and, int _or);
|
||||
|
||||
#ifdef __cplusplus
|
||||
};
|
||||
#endif
|
||||
|
|
|
@ -119,7 +119,7 @@ _lws_header_table_reset(struct allocated_headers *ah)
|
|||
// doesn't scrub the ah rxbuffer by default, parent must do if needed
|
||||
|
||||
void
|
||||
lws_header_table_reset(struct lws *wsi, int autoservice)
|
||||
__lws_header_table_reset(struct lws *wsi, int autoservice)
|
||||
{
|
||||
struct allocated_headers *ah = wsi->ah;
|
||||
struct lws_context_per_thread *pt;
|
||||
|
@ -139,7 +139,7 @@ lws_header_table_reset(struct lws *wsi, int autoservice)
|
|||
wsi->hdr_parsing_completed = 0;
|
||||
|
||||
/* while we hold the ah, keep a timeout on the wsi */
|
||||
lws_set_timeout(wsi, PENDING_TIMEOUT_HOLDING_AH,
|
||||
__lws_set_timeout(wsi, PENDING_TIMEOUT_HOLDING_AH,
|
||||
wsi->vhost->timeout_secs_ah_idle);
|
||||
|
||||
time(&ah->assigned);
|
||||
|
@ -171,6 +171,18 @@ lws_header_table_reset(struct lws *wsi, int autoservice)
|
|||
}
|
||||
}
|
||||
|
||||
void
|
||||
lws_header_table_reset(struct lws *wsi, int autoservice)
|
||||
{
|
||||
struct lws_context_per_thread *pt = &wsi->context->pt[(int)wsi->tsi];
|
||||
|
||||
lws_pt_lock(pt, __func__);
|
||||
|
||||
__lws_header_table_reset(wsi, autoservice);
|
||||
|
||||
lws_pt_unlock(pt);
|
||||
}
|
||||
|
||||
static void
|
||||
_lws_header_ensure_we_are_on_waiting_list(struct lws *wsi)
|
||||
{
|
||||
|
@ -235,7 +247,7 @@ lws_header_table_attach(struct lws *wsi, int autoservice)
|
|||
goto reset;
|
||||
}
|
||||
|
||||
lws_pt_lock(pt);
|
||||
lws_pt_lock(pt, __func__);
|
||||
|
||||
n = pt->ah_count_in_use == context->max_http_header_pool;
|
||||
#if defined(LWS_WITH_PEER_LIMITS)
|
||||
|
@ -290,7 +302,7 @@ reset:
|
|||
wsi->ah->rxpos = 0;
|
||||
wsi->ah->rxlen = 0;
|
||||
|
||||
lws_header_table_reset(wsi, autoservice);
|
||||
__lws_header_table_reset(wsi, autoservice);
|
||||
|
||||
#ifndef LWS_NO_CLIENT
|
||||
if (wsi->state == LWSS_CLIENT_UNCONNECTED)
|
||||
|
@ -336,7 +348,7 @@ int lws_header_table_detach(struct lws *wsi, int autoservice)
|
|||
struct lws **pwsi, **pwsi_eligible;
|
||||
time_t now;
|
||||
|
||||
lws_pt_lock(pt);
|
||||
lws_pt_lock(pt, __func__);
|
||||
__lws_remove_from_ah_waiting_list(wsi);
|
||||
lws_pt_unlock(pt);
|
||||
|
||||
|
@ -360,7 +372,7 @@ int lws_header_table_detach(struct lws *wsi, int autoservice)
|
|||
return 0;
|
||||
}
|
||||
|
||||
lws_pt_lock(pt);
|
||||
lws_pt_lock(pt, __func__);
|
||||
|
||||
/* we did have an ah attached */
|
||||
time(&now);
|
||||
|
@ -431,7 +443,7 @@ int lws_header_table_detach(struct lws *wsi, int autoservice)
|
|||
/* and reset the rx state */
|
||||
ah->rxpos = 0;
|
||||
ah->rxlen = 0;
|
||||
lws_header_table_reset(wsi, autoservice);
|
||||
__lws_header_table_reset(wsi, autoservice);
|
||||
#if defined(LWS_WITH_PEER_LIMITS)
|
||||
if (wsi->peer)
|
||||
wsi->peer->count_ah++;
|
||||
|
|
|
@ -1725,7 +1725,7 @@ upgrade_ws:
|
|||
|
||||
lwsl_info("%s: %p: inheriting ws ah (rxpos:%d, rxlen:%d)\n",
|
||||
__func__, wsi, wsi->ah->rxpos, wsi->ah->rxlen);
|
||||
lws_pt_lock(pt);
|
||||
lws_pt_lock(pt, __func__);
|
||||
|
||||
if (wsi->h2_stream_carries_ws)
|
||||
lws_union_transition(wsi, LWSCM_HTTP2_WS_SERVING);
|
||||
|
@ -2971,6 +2971,12 @@ lws_interpret_incoming_packet(struct lws *wsi, unsigned char **buf, size_t len)
|
|||
wsi->rxflow_pos += m;
|
||||
}
|
||||
|
||||
/* process the byte */
|
||||
m = lws_rx_sm(wsi, *(*buf)++);
|
||||
if (m < 0)
|
||||
return -1;
|
||||
len--;
|
||||
|
||||
if (wsi->rxflow_buffer && wsi->rxflow_pos == wsi->rxflow_len) {
|
||||
lwsl_debug("%s: %p flow buf: drained\n", __func__, wsi);
|
||||
lws_free_set_NULL(wsi->rxflow_buffer);
|
||||
|
@ -2978,15 +2984,9 @@ lws_interpret_incoming_packet(struct lws *wsi, unsigned char **buf, size_t len)
|
|||
#ifdef LWS_NO_SERVER
|
||||
m =
|
||||
#endif
|
||||
_lws_rx_flow_control(wsi);
|
||||
__lws_rx_flow_control(wsi);
|
||||
/* m ignored, needed for NO_SERVER case */
|
||||
}
|
||||
|
||||
/* process the byte */
|
||||
m = lws_rx_sm(wsi, *(*buf)++);
|
||||
if (m < 0)
|
||||
return -1;
|
||||
len--;
|
||||
}
|
||||
|
||||
lwsl_parser("%s: exit with %d unused\n", __func__, (int)len);
|
||||
|
|
|
@ -811,6 +811,8 @@ lws_service_flag_pending(struct lws_context *context, int tsi)
|
|||
struct lws *wsi;
|
||||
int forced = 0;
|
||||
|
||||
lws_pt_lock(pt, __func__);
|
||||
|
||||
/* POLLIN faking */
|
||||
|
||||
/*
|
||||
|
@ -876,6 +878,8 @@ lws_service_flag_pending(struct lws_context *context, int tsi)
|
|||
ah = ah->next;
|
||||
}
|
||||
|
||||
lws_pt_unlock(pt);
|
||||
|
||||
return forced;
|
||||
}
|
||||
|
||||
|
@ -1782,7 +1786,7 @@ drain:
|
|||
#ifdef LWS_NO_SERVER
|
||||
n =
|
||||
#endif
|
||||
_lws_rx_flow_control(wsi);
|
||||
__lws_rx_flow_control(wsi);
|
||||
/* n ignored, needed for NO_SERVER case */
|
||||
}
|
||||
|
||||
|
|
|
@ -41,8 +41,8 @@ lws_ssl_anybody_has_buffered_read_tsi(struct lws_context *context, int tsi)
|
|||
return 0;
|
||||
}
|
||||
|
||||
LWS_VISIBLE void
|
||||
lws_ssl_remove_wsi_from_buffered_list(struct lws *wsi)
|
||||
void
|
||||
__lws_ssl_remove_wsi_from_buffered_list(struct lws *wsi)
|
||||
{
|
||||
struct lws_context *context = wsi->context;
|
||||
struct lws_context_per_thread *pt = &context->pt[(int)wsi->tsi];
|
||||
|
@ -69,6 +69,16 @@ lws_ssl_remove_wsi_from_buffered_list(struct lws *wsi)
|
|||
wsi->pending_read_list_next = NULL;
|
||||
}
|
||||
|
||||
void
|
||||
lws_ssl_remove_wsi_from_buffered_list(struct lws *wsi)
|
||||
{
|
||||
struct lws_context_per_thread *pt = &wsi->context->pt[(int)wsi->tsi];
|
||||
|
||||
lws_pt_lock(pt, __func__);
|
||||
__lws_ssl_remove_wsi_from_buffered_list(wsi);
|
||||
lws_pt_unlock(pt);
|
||||
}
|
||||
|
||||
#if defined(LWS_WITH_ESP32)
|
||||
int alloc_file(struct lws_context *context, const char *filename, uint8_t **buf,
|
||||
lws_filepos_t *amount)
|
||||
|
|
|
@ -57,7 +57,7 @@ struct a_message {
|
|||
|
||||
struct mirror_instance {
|
||||
struct mirror_instance *next;
|
||||
lws_pthread_mutex(lock); /* protects all mirror instance data */
|
||||
lws_pthread_mutex(lock) /* protects all mirror instance data */
|
||||
struct per_session_data__lws_mirror *same_mi_pss_list;
|
||||
/**< must hold the the per_vhost_data__lws_mirror.lock as well
|
||||
* to change mi list membership */
|
||||
|
@ -68,7 +68,7 @@ struct mirror_instance {
|
|||
};
|
||||
|
||||
struct per_vhost_data__lws_mirror {
|
||||
lws_pthread_mutex(lock); /* protects mi_list membership changes */
|
||||
lws_pthread_mutex(lock) /* protects mi_list membership changes */
|
||||
struct mirror_instance *mi_list;
|
||||
};
|
||||
|
||||
|
|
|
@ -55,8 +55,8 @@ rm -rf $RPM_BUILD_ROOT
|
|||
/usr/bin/libwebsockets-test-fraggle
|
||||
/usr/bin/libwebsockets-test-fuzxy
|
||||
/usr/bin/libwebsockets-test-lejp
|
||||
/usr/bin/libwebsockets-test-server-pthreads
|
||||
/usr/bin/libwebsockets-test-server-libev
|
||||
/usr/bin/libwebsockets-test-server-libevent
|
||||
/usr/bin/libwebsockets-test-server-libuv
|
||||
/usr/bin/libwebsockets-test-server-v2.0
|
||||
/usr/bin/libwebsockets-test-sshd
|
||||
|
|
|
@ -417,6 +417,10 @@ callback_lws_mirror(struct lws *wsi, enum lws_callback_reasons reason,
|
|||
/* get notified as soon as we can write again */
|
||||
if (!justmirror)
|
||||
lws_callback_on_writable(wsi);
|
||||
|
||||
#if !defined(_WIN32) && !defined(WIN32)
|
||||
usleep(250);
|
||||
#endif
|
||||
break;
|
||||
|
||||
case LWS_CALLBACK_CLIENT_RECEIVE:
|
||||
|
|
|
@ -612,7 +612,7 @@ int callback_http(struct lws *wsi, enum lws_callback_reasons reason, void *user,
|
|||
goto bail;
|
||||
}
|
||||
if (m) /* while still active, extend timeout */
|
||||
lws_set_timeout(wsi, PENDING_TIMEOUT_HTTP_CONTENT, 5);
|
||||
lws_set_timeout(wsi, PENDING_TIMEOUT_HTTP_CONTENT, 30);
|
||||
sent += m;
|
||||
|
||||
} while (!lws_send_pipe_choked(wsi) && (sent < 1024 * 1024));
|
||||
|
|
|
@ -346,6 +346,12 @@ int main(int argc, char **argv)
|
|||
info.extensions = exts;
|
||||
info.max_http_header_pool = 4;
|
||||
|
||||
/* when doing slow benchmarks with thousands of concurrent
|
||||
* connections, we need wait longer
|
||||
*/
|
||||
info.timeout_secs = 30;
|
||||
info.keepalive_timeout = 30;
|
||||
|
||||
context = lws_create_context(&info);
|
||||
if (context == NULL) {
|
||||
lwsl_err("libwebsocket init failed\n");
|
||||
|
@ -366,6 +372,8 @@ int main(int argc, char **argv)
|
|||
* initialized.
|
||||
*/
|
||||
|
||||
lwsl_notice("Service thread count: %d\n", lws_get_count_threads(context));
|
||||
|
||||
for (n = 0; n < lws_get_count_threads(context); n++)
|
||||
if (pthread_create(&pthread_service[n], NULL, thread_service,
|
||||
(void *)(lws_intptr_t)n))
|
||||
|
|
Loading…
Add table
Reference in a new issue